instance_id
stringlengths 10
57
| base_commit
stringlengths 40
40
| created_at
stringdate 2014-04-30 14:58:36
2025-04-30 20:14:11
| environment_setup_commit
stringlengths 40
40
| hints_text
stringlengths 0
273k
| patch
stringlengths 251
7.06M
| problem_statement
stringlengths 11
52.5k
| repo
stringlengths 7
53
| test_patch
stringlengths 231
997k
| meta
dict | version
stringclasses 851
values | install_config
dict | requirements
stringlengths 93
34.2k
⌀ | environment
stringlengths 760
20.5k
⌀ | FAIL_TO_PASS
listlengths 1
9.39k
| FAIL_TO_FAIL
listlengths 0
2.69k
| PASS_TO_PASS
listlengths 0
7.87k
| PASS_TO_FAIL
listlengths 0
192
| license_name
stringclasses 55
values | __index_level_0__
int64 0
21.4k
| before_filepaths
listlengths 1
105
| after_filepaths
listlengths 1
105
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
html5lib__html5lib-python-377 | d595d0a51253b8f9f5e1f14f15b285cadedf806e | 2017-11-30 16:21:04 | 41bd598ed867f3076223328e9db577c4366ad518 | diff --git a/.gitignore b/.gitignore
index 6aed95b..ecd62df 100644
--- a/.gitignore
+++ b/.gitignore
@@ -80,3 +80,6 @@ target/
# Generated by parse.py -p
stats.prof
+
+# IDE
+.idea
diff --git a/html5lib/filters/sanitizer.py b/html5lib/filters/sanitizer.py
index dc80166..6315a1c 100644
--- a/html5lib/filters/sanitizer.py
+++ b/html5lib/filters/sanitizer.py
@@ -855,7 +855,7 @@ class Filter(base.Filter):
'padding']:
for keyword in value.split():
if keyword not in self.allowed_css_keywords and \
- not re.match(r"^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword): # noqa
+ not re.match(r"^(#[0-9a-fA-F]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword): # noqa
break
else:
clean.append(prop + ': ' + value + ';')
| support uppercase hex characters in css color check
This covers adding support for uppercase hex characters in the css color check regex.
From #287:
> Adds uppercase check (A-F) in the css regex to pass css of the following format:
> border-top: 2px #DA4534 solid; | html5lib/html5lib-python | diff --git a/html5lib/tests/test_sanitizer.py b/html5lib/tests/test_sanitizer.py
index e19deea..45046d5 100644
--- a/html5lib/tests/test_sanitizer.py
+++ b/html5lib/tests/test_sanitizer.py
@@ -113,3 +113,15 @@ def test_sanitizer():
yield (runSanitizerTest, "test_should_allow_uppercase_%s_uris" % protocol,
"<img src=\"%s:%s\">foo</a>" % (protocol, rest_of_uri),
"""<img src="%s:%s">foo</a>""" % (protocol, rest_of_uri))
+
+
+def test_lowercase_color_codes_in_style():
+ sanitized = sanitize_html("<p style=\"border: 1px solid #a2a2a2;\"></p>")
+ expected = '<p style=\"border: 1px solid #a2a2a2;\"></p>'
+ assert expected == sanitized
+
+
+def test_uppercase_color_codes_in_style():
+ sanitized = sanitize_html("<p style=\"border: 1px solid #A2A2A2;\"></p>")
+ expected = '<p style=\"border: 1px solid #A2A2A2;\"></p>'
+ assert expected == sanitized
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 2
} | 1.010 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[all]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-expect",
"mock"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | chardet==5.2.0
datrie==0.8.2
exceptiongroup==1.2.2
Genshi==0.7.9
-e git+https://github.com/html5lib/html5lib-python.git@d595d0a51253b8f9f5e1f14f15b285cadedf806e#egg=html5lib
iniconfig==2.1.0
lxml==5.3.1
mock==5.2.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-expect==1.1.0
six==1.17.0
tomli==2.2.1
u-msgpack-python==2.8.0
webencodings==0.5.1
| name: html5lib-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- chardet==5.2.0
- datrie==0.8.2
- exceptiongroup==1.2.2
- genshi==0.7.9
- iniconfig==2.1.0
- lxml==5.3.1
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-expect==1.1.0
- six==1.17.0
- tomli==2.2.1
- u-msgpack-python==2.8.0
- webencodings==0.5.1
prefix: /opt/conda/envs/html5lib-python
| [
"html5lib/tests/test_sanitizer.py::test_uppercase_color_codes_in_style"
]
| []
| [
"html5lib/tests/test_sanitizer.py::test_should_handle_astral_plane_characters",
"html5lib/tests/test_sanitizer.py::test_should_allow_relative_uris",
"html5lib/tests/test_sanitizer.py::test_invalid_data_uri",
"html5lib/tests/test_sanitizer.py::test_invalid_ipv6_url",
"html5lib/tests/test_sanitizer.py::test_data_uri_disallowed_type",
"html5lib/tests/test_sanitizer.py::test_lowercase_color_codes_in_style"
]
| []
| MIT License | 1,930 | [
".gitignore",
"html5lib/filters/sanitizer.py"
]
| [
".gitignore",
"html5lib/filters/sanitizer.py"
]
|
|
discos__simulators-103 | cec38fb58da280acfd66707e8c6438bb07b39d88 | 2017-12-01 16:15:18 | cec38fb58da280acfd66707e8c6438bb07b39d88 | coveralls:
[](https://coveralls.io/builds/14460334)
Coverage decreased (-0.4%) to 97.977% when pulling **7e009b99ec957c63ed43f3ad059a07d895642162 on fix-issue-102** into **cec38fb58da280acfd66707e8c6438bb07b39d88 on master**.
codecov-io: # [Codecov](https://codecov.io/gh/discos/simulators/pull/103?src=pr&el=h1) Report
> Merging [#103](https://codecov.io/gh/discos/simulators/pull/103?src=pr&el=desc) into [master](https://codecov.io/gh/discos/simulators/commit/cec38fb58da280acfd66707e8c6438bb07b39d88?src=pr&el=desc) will **increase** coverage by `0.23%`.
> The diff coverage is `100%`.
[](https://codecov.io/gh/discos/simulators/pull/103?src=pr&el=tree)
```diff
@@ Coverage Diff @@
## master #103 +/- ##
==========================================
+ Coverage 98.41% 98.65% +0.23%
==========================================
Files 11 11
Lines 2209 2300 +91
==========================================
+ Hits 2174 2269 +95
+ Misses 35 31 -4
```
| [Impacted Files](https://codecov.io/gh/discos/simulators/pull/103?src=pr&el=tree) | Coverage Δ | |
|---|---|---|
| [simulators/utils.py](https://codecov.io/gh/discos/simulators/pull/103/diff?src=pr&el=tree#diff-c2ltdWxhdG9ycy91dGlscy5weQ==) | `100% <100%> (+5.79%)` | :arrow_up: |
| [tests/test\_utils.py](https://codecov.io/gh/discos/simulators/pull/103/diff?src=pr&el=tree#diff-dGVzdHMvdGVzdF91dGlscy5weQ==) | `100% <100%> (ø)` | :arrow_up: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/discos/simulators/pull/103?src=pr&el=continue).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/discos/simulators/pull/103?src=pr&el=footer). Last update [cec38fb...313820c](https://codecov.io/gh/discos/simulators/pull/103?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
coveralls:
[](https://coveralls.io/builds/14489780)
Coverage increased (+0.2%) to 98.652% when pulling **313820c0c109ef8f53c68f432379dd07a6b51b5a on fix-issue-102** into **cec38fb58da280acfd66707e8c6438bb07b39d88 on master**.
| diff --git a/simulators/utils.py b/simulators/utils.py
index 1b7462c..35cf994 100644
--- a/simulators/utils.py
+++ b/simulators/utils.py
@@ -1,3 +1,4 @@
+#!/usr/bin/python
import math
import struct
from datetime import datetime
@@ -144,16 +145,59 @@ def real_to_binary(num, precision=1):
)
-def mjd():
- """Return the modified julian date.
- For more informations about modified julian date check the following link:
- https://bowie.gsfc.nasa.gov/time/"""
+def real_to_bytes(num, precision=1):
+ """Return the bytestring representation of a floating-point number
+ (IEEE 754 standard)."""
+ binary_number = real_to_binary(num, precision)
+ return binary_to_bytes(binary_number)
- utcnow = datetime.utcnow()
- year = utcnow.year
- month = utcnow.month
- day = utcnow.day
+def bytes_to_real(bytes_real, precision=1):
+ """Return the floating-point representation (IEEE 754 standard)
+ of bytestring number."""
+ if precision == 1:
+ return struct.unpack('!f', bytes_real)[0]
+ elif precision == 2:
+ return struct.unpack('!d', bytes_real)[0]
+ else:
+ raise ValueError(
+ "Unknown precision %d."
+ % (precision)
+ )
+
+
+def int_to_bytes(val, n_bytes=4):
+ """Return the bytestring representation of a given signed integer."""
+ return binary_to_bytes(int_to_twos(val, n_bytes))
+
+
+def uint_to_bytes(val, n_bytes=4):
+ """Return the bytestring representation of a given unsigned integer."""
+ n_bits = 8 * n_bytes
+ min_range = 0
+ max_range = int(math.pow(2, n_bits)) - 1
+
+ if val < min_range or val > max_range:
+ raise ValueError(
+ "%d out of range (%d, %d)."
+ % (val, min_range, max_range)
+ )
+
+ return binary_to_bytes(bin(val)[2:].zfill(n_bytes * 8))
+
+
+def mjd(time=datetime.utcnow()):
+ """Returns the modified julian date (MJD) of a given datetime object.
+ If no datetime object is given, it returns the current MJD.
+ For more informations about modified julian date check the following link:
+ https://bowie.gsfc.nasa.gov/time/"""
+ year = time.year
+ month = time.month
+ day = time.day
+ hour = time.hour
+ minute = time.minute
+ second = time.second
+ microsecond = time.microsecond
if month == 1 or month == 2:
yearp = year - 1
@@ -173,11 +217,7 @@ def mjd():
a = math.trunc(yearp / 100.)
b = 2 - a + math.trunc(a / 4.)
- if yearp < 0:
- c = math.trunc((365.25 * yearp) - 0.75)
- else:
- c = math.trunc(365.25 * yearp)
-
+ c = math.trunc(365.25 * yearp)
d = math.trunc(30.6001 * (monthp + 1))
jd = b + c + d + day + 1720994.5
@@ -185,13 +225,13 @@ def mjd():
modified_julian_day = jd - 2400000.5
# Total UTC hours of the day
- day_hours = utcnow.hour
+ day_hours = hour
# Total minutes of the day
- day_minutes = (day_hours * 60) + utcnow.minute
+ day_minutes = (day_hours * 60) + minute
# Total seconds of the day
- day_seconds = (day_minutes * 60) + utcnow.second
+ day_seconds = (day_minutes * 60) + second
# Total microseconds of the day
- day_microseconds = (day_seconds * 1000000) + utcnow.microsecond
+ day_microseconds = (day_seconds * 1000000) + microsecond
# Day percentage, 00:00 = 0.0, 24:00=1.0
day_percentage = round(float(day_microseconds) / 86400000000, 6)
| Enhance `utils.py` to provide functions useful to the `acu.py` module. | discos/simulators | diff --git a/tests/test_utils.py b/tests/test_utils.py
index 65a8878..0f24101 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -1,4 +1,5 @@
import unittest
+from datetime import datetime
from simulators import utils
@@ -40,10 +41,23 @@ class TestServer(unittest.TestCase):
with self.assertRaises(ValueError):
utils.int_to_twos(4294967295)
- def test_mjd(self):
+ def test_mjd_now(self):
"""Make sure that the datatype of the response is the correct one."""
self.assertIsInstance(utils.mjd(), float)
+ def test_mjd_given_date(self):
+ """Return the modified julian date of a given datetime object."""
+ time = datetime(2017, 12, 4, 13, 51, 10, 162534)
+ result = utils.mjd(time)
+ expected_result = 58091.577201
+ self.assertEqual(result, expected_result)
+
+ def test_mjd_old_date(self):
+ time = datetime(1500, 1, 1, 12, 0, 0, 0)
+ result = utils.mjd(time)
+ expected_result = -131067.5
+ self.assertEqual(result, expected_result)
+
def test_day_milliseconds(self):
"""Make sure that the datatype of the response is the correct one.
Also make sure that the returned value is inside the expected range."""
@@ -88,7 +102,6 @@ class TestServer(unittest.TestCase):
self.assertEqual(result, expected_result)
def test_real_to_binary_double_precision(self):
- """Convert a real number to its binary representation."""
number = 3.14159265358979323846264338327950288419716939937510582097494
result = utils.real_to_binary(number, 2)
expected_result = (
@@ -109,5 +122,86 @@ class TestServer(unittest.TestCase):
with self.assertRaises(ValueError):
utils.real_to_binary(number, 3)
+ def test_real_to_bytes_single_precision(self):
+ """Convert a real number to a string of bytes."""
+ number = 45.12371938725634
+ result = utils.real_to_bytes(number)
+ expected_result = b'\x42\x34\x7E\xB0'
+ self.assertEqual(result, expected_result)
+
+ def test_real_to_bytes_double_precision(self):
+ number = 3.14159265358979323846264338327950288419716939937510582097494
+ result = utils.real_to_bytes(number, 2)
+ expected_result = b'\x40\x09\x21\xFB\x54\x44\x2D\x18'
+ self.assertEqual(result, expected_result)
+
+ def test_real_to_bytes_unknown_precision(self):
+ number = 3267.135248123736
+ with self.assertRaises(ValueError):
+ utils.real_to_binary(number, 3)
+
+ def test_bytes_to_real_single_precision(self):
+ """Convert a string of bytes to a floating point number."""
+ byte_string = b'\x42\x34\x7E\xB0'
+ result = utils.bytes_to_real(byte_string)
+ expected_result = 45.12371826171875
+ self.assertEqual(result, expected_result)
+
+ def test_bytes_to_real_double_precision(self):
+ byte_string = b'\x40\x09\x21\xFB\x54\x44\x2D\x18'
+ result = utils.bytes_to_real(byte_string, 2)
+ expected_result = (
+ 3.14159265358979323846264338327950288419716939937510582097494
+ )
+ self.assertEqual(result, expected_result)
+
+ def test_bytes_to_real_unknown_precision(self):
+ byte_string = b'\xDA\x35\xF7\x65'
+ with self.assertRaises(ValueError):
+ utils.bytes_to_real(byte_string, 3)
+
+ def test_int_to_bytes_positive(self):
+ """Convert a signed integer to a string of bytes."""
+ number = 232144
+ result = utils.int_to_bytes(number)
+ expected_result = b'\x00\x03\x8A\xD0'
+ self.assertEqual(result, expected_result)
+
+ def test_int_to_bytes_negative(self):
+ number = -4522764
+ result = utils.int_to_bytes(number)
+ expected_result = b'\xFF\xBA\xFC\xF4'
+ self.assertEqual(result, expected_result)
+
+ def test_int_to_bytes_out_of_range(self):
+ number = 36273463
+ with self.assertRaises(ValueError):
+ utils.int_to_bytes(number, 2)
+
+ def test_int_to_bytes_wrong(self):
+ number = 6814627
+ result = utils.int_to_bytes(number)
+ wrong_expected_result = b'\xFF\x98\x04\x5D'
+ self.assertNotEqual(result, wrong_expected_result)
+
+ def test_uint_to_bytes(self):
+ """Convert an unsigned integer to a string of bytes."""
+ number = 1284639736
+ result = utils.uint_to_bytes(number)
+ expected_result = b'\x4C\x92\x0B\xF8'
+ self.assertEqual(result, expected_result)
+
+ def test_uint_to_bytes_out_of_range(self):
+ number = 13463672713
+ with self.assertRaises(ValueError):
+ utils.uint_to_bytes(number)
+
+ def test_uint_to_bytes_wrong(self):
+ number = 1235326152
+ result = utils.uint_to_bytes(number)
+ wrong_expected_result = b'\x00\x34\xAE\xDD'
+ self.assertNotEqual(result, wrong_expected_result)
+
+
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"coverage",
"prospector",
"sphinx",
"sphinx_rtd_theme",
"tox",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
astroid==3.3.9
babel==2.17.0
cachetools==5.5.2
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
coverage==7.8.0
dill==0.3.9
-e git+https://github.com/discos/simulators.git@cec38fb58da280acfd66707e8c6438bb07b39d88#egg=discos_simulators
distlib==0.3.9
docutils==0.21.2
dodgy==0.2.1
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
filelock==3.18.0
flake8==7.2.0
flake8-polyfill==1.0.2
gitdb==4.0.12
GitPython==3.1.44
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isort==6.0.1
Jinja2==3.1.6
MarkupSafe==3.0.2
mccabe==0.7.0
packaging @ file:///croot/packaging_1734472117206/work
pep8-naming==0.10.0
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
prospector==1.16.1
pycodestyle==2.13.0
pydocstyle==6.3.0
pyflakes==3.3.2
Pygments==2.19.1
pylint==3.3.6
pylint-celery==0.3
pylint-django==2.6.1
pylint-plugin-utils==0.8.2
pyproject-api==1.9.0
pytest @ file:///croot/pytest_1738938843180/work
PyYAML==6.0.2
requests==2.32.3
requirements-detector==1.3.2
semver==3.0.4
setoptconf-tmp==0.3.1
smmap==5.0.2
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinx-rtd-theme==3.0.2
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
toml==0.10.2
tomli==2.2.1
tomlkit==0.13.2
tox==4.25.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: simulators
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- astroid==3.3.9
- babel==2.17.0
- cachetools==5.5.2
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- coverage==7.8.0
- dill==0.3.9
- distlib==0.3.9
- docutils==0.21.2
- dodgy==0.2.1
- filelock==3.18.0
- flake8==7.2.0
- flake8-polyfill==1.0.2
- gitdb==4.0.12
- gitpython==3.1.44
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- isort==6.0.1
- jinja2==3.1.6
- markupsafe==3.0.2
- mccabe==0.7.0
- pep8-naming==0.10.0
- platformdirs==4.3.7
- prospector==1.16.1
- pycodestyle==2.13.0
- pydocstyle==6.3.0
- pyflakes==3.3.2
- pygments==2.19.1
- pylint==3.3.6
- pylint-celery==0.3
- pylint-django==2.6.1
- pylint-plugin-utils==0.8.2
- pyproject-api==1.9.0
- pyyaml==6.0.2
- requests==2.32.3
- requirements-detector==1.3.2
- semver==3.0.4
- setoptconf-tmp==0.3.1
- smmap==5.0.2
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinx-rtd-theme==3.0.2
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- toml==0.10.2
- tomli==2.2.1
- tomlkit==0.13.2
- tox==4.25.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/simulators
| [
"tests/test_utils.py::TestServer::test_bytes_to_real_double_precision",
"tests/test_utils.py::TestServer::test_bytes_to_real_single_precision",
"tests/test_utils.py::TestServer::test_bytes_to_real_unknown_precision",
"tests/test_utils.py::TestServer::test_int_to_bytes_out_of_range",
"tests/test_utils.py::TestServer::test_mjd_given_date",
"tests/test_utils.py::TestServer::test_mjd_old_date",
"tests/test_utils.py::TestServer::test_uint_to_bytes_out_of_range"
]
| [
"tests/test_utils.py::TestServer::test_binary_to_bytes_correct",
"tests/test_utils.py::TestServer::test_binary_to_bytes_wrong",
"tests/test_utils.py::TestServer::test_bytes_to_int_correct",
"tests/test_utils.py::TestServer::test_bytes_to_int_wrong",
"tests/test_utils.py::TestServer::test_day_milliseconds",
"tests/test_utils.py::TestServer::test_int_to_bytes_negative",
"tests/test_utils.py::TestServer::test_int_to_bytes_positive",
"tests/test_utils.py::TestServer::test_int_to_bytes_wrong",
"tests/test_utils.py::TestServer::test_real_to_binary_double_precision",
"tests/test_utils.py::TestServer::test_real_to_binary_single_precision",
"tests/test_utils.py::TestServer::test_real_to_binary_wrong",
"tests/test_utils.py::TestServer::test_real_to_bytes_double_precision",
"tests/test_utils.py::TestServer::test_real_to_bytes_single_precision",
"tests/test_utils.py::TestServer::test_uint_to_bytes",
"tests/test_utils.py::TestServer::test_uint_to_bytes_wrong"
]
| [
"tests/test_utils.py::TestServer::test_int_to_twos",
"tests/test_utils.py::TestServer::test_mjd_now",
"tests/test_utils.py::TestServer::test_out_of_range_int_to_twos",
"tests/test_utils.py::TestServer::test_real_to_binary_unknown_precision",
"tests/test_utils.py::TestServer::test_real_to_bytes_unknown_precision",
"tests/test_utils.py::TestServer::test_right_checksum",
"tests/test_utils.py::TestServer::test_right_twos_to_int",
"tests/test_utils.py::TestServer::test_wrong_checksum",
"tests/test_utils.py::TestServer::test_wrong_twos_to_int"
]
| []
| null | 1,931 | [
"simulators/utils.py"
]
| [
"simulators/utils.py"
]
|
falconry__falcon-1151 | 0c6b6deb8692b06c677893f97c89cc3870595853 | 2017-12-01 16:35:22 | 919fd3f5a3129d04f1c7d23f5eff440ec4598e35 | codecov[bot]: # [Codecov](https://codecov.io/gh/falconry/falcon/pull/1151?src=pr&el=h1) Report
> Merging [#1151](https://codecov.io/gh/falconry/falcon/pull/1151?src=pr&el=desc) into [master](https://codecov.io/gh/falconry/falcon/commit/0c6b6deb8692b06c677893f97c89cc3870595853?src=pr&el=desc) will **not change** coverage.
> The diff coverage is `100%`.
[](https://codecov.io/gh/falconry/falcon/pull/1151?src=pr&el=tree)
```diff
@@ Coverage Diff @@
## master #1151 +/- ##
======================================
Coverage 100% 100%
======================================
Files 36 36
Lines 2378 2378
Branches 347 347
======================================
Hits 2378 2378
```
| [Impacted Files](https://codecov.io/gh/falconry/falcon/pull/1151?src=pr&el=tree) | Coverage Δ | |
|---|---|---|
| [falcon/hooks.py](https://codecov.io/gh/falconry/falcon/pull/1151/diff?src=pr&el=tree#diff-ZmFsY29uL2hvb2tzLnB5) | `100% <100%> (ø)` | :arrow_up: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/falconry/falcon/pull/1151?src=pr&el=continue).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/falconry/falcon/pull/1151?src=pr&el=footer). Last update [0c6b6de...8bb3244](https://codecov.io/gh/falconry/falcon/pull/1151?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
| diff --git a/docs/api/hooks.rst b/docs/api/hooks.rst
index 16284c6..c8dbbd6 100644
--- a/docs/api/hooks.rst
+++ b/docs/api/hooks.rst
@@ -33,12 +33,43 @@ decorate the resource class:
@falcon.before(extract_project_id)
class Message(object):
- def on_post(self, req, resp):
+ def on_post(self, req, resp, project_id):
pass
- def on_get(self, req, resp):
+ def on_get(self, req, resp, project_id):
pass
+Note also that you can pass additional arguments to your hook function
+as needed:
+
+.. code:: python
+
+ def validate_image_type(req, resp, resource, params, allowed_types):
+ if req.content_type not in allowed_types:
+ msg = 'Image type not allowed.'
+ raise falcon.HTTPBadRequest('Bad request', msg)
+
+ @falcon.before(validate_image_type, ['image/png'])
+ def on_post(self, req, resp):
+ pass
+
+Falcon supports using any callable as a hook. This allows for using a class
+instead of a function:
+
+.. code:: python
+
+ class Authorize(object):
+ def __init__(self, roles):
+ self._roles = roles
+
+ def __call__(self, req, resp, resource, params):
+ pass
+
+ @falcon.before(Authorize(['admin']))
+ def on_post(self, req, resp):
+ pass
+
+
Falcon :ref:`middleware components <middleware>` can also be used to insert
logic before and after requests. However, unlike hooks,
:ref:`middleware components <middleware>` are triggered **globally** for all
diff --git a/falcon/hooks.py b/falcon/hooks.py
index e302b9a..d983759 100644
--- a/falcon/hooks.py
+++ b/falcon/hooks.py
@@ -22,7 +22,7 @@ from falcon import COMBINED_METHODS
from falcon.util.misc import get_argnames
-def before(action):
+def before(action, *args, **kwargs):
"""Decorator to execute the given action function *before* the responder.
Args:
@@ -45,6 +45,12 @@ def before(action):
params['answer'] = 42
+ *args: Any additional arguments will be passed to *action* in the
+ order given, immediately following the *req*, *resp*, *resource*,
+ and *params* arguments.
+
+ **kwargs: Any additional keyword arguments will be passed through to
+ *action*.
"""
def _before(responder_or_resource):
@@ -68,7 +74,7 @@ def before(action):
# variable that is shared between iterations of the
# for loop, above.
def let(responder=responder):
- do_before_all = _wrap_with_before(action, responder)
+ do_before_all = _wrap_with_before(responder, action, args, kwargs)
setattr(resource, responder_name, do_before_all)
@@ -78,14 +84,14 @@ def before(action):
else:
responder = responder_or_resource
- do_before_one = _wrap_with_before(action, responder)
+ do_before_one = _wrap_with_before(responder, action, args, kwargs)
return do_before_one
return _before
-def after(action):
+def after(action, *args, **kwargs):
"""Decorator to execute the given action function *after* the responder.
Args:
@@ -94,6 +100,12 @@ def after(action):
reference to the resource class instance associated with the
request
+ *args: Any additional arguments will be passed to *action* in the
+ order given, immediately following the *req*, *resp*, *resource*,
+ and *params* arguments.
+
+ **kwargs: Any additional keyword arguments will be passed through to
+ *action*.
"""
def _after(responder_or_resource):
@@ -113,7 +125,7 @@ def after(action):
if callable(responder):
def let(responder=responder):
- do_after_all = _wrap_with_after(action, responder)
+ do_after_all = _wrap_with_after(responder, action, args, kwargs)
setattr(resource, responder_name, do_after_all)
@@ -123,7 +135,7 @@ def after(action):
else:
responder = responder_or_resource
- do_after_one = _wrap_with_after(action, responder)
+ do_after_one = _wrap_with_after(responder, action, args, kwargs)
return do_after_one
@@ -135,13 +147,15 @@ def after(action):
# -----------------------------------------------------------------------------
-def _wrap_with_after(action, responder):
+def _wrap_with_after(responder, action, action_args, action_kwargs):
"""Execute the given action function after a responder method.
Args:
+ responder: The responder method to wrap.
action: A function with a signature similar to a resource responder
method, taking the form ``func(req, resp, resource)``.
- responder: The responder method to wrap.
+ action_args: Additiona positional agruments to pass to *action*.
+ action_kwargs: Additional keyword arguments to pass to *action*.
"""
# NOTE(swistakm): create shim before checking what will be actually
@@ -153,8 +167,8 @@ def _wrap_with_after(action, responder):
# classes in Python vesions prior to 3.4.
#
# @wraps(action)
- def shim(req, resp, resource):
- action(req, resp)
+ def shim(req, resp, resource, *args, **kwargs):
+ action(req, resp, *args, **kwargs)
responder_argnames = get_argnames(responder)
extra_argnames = responder_argnames[2:] # Skip req, resp
@@ -165,18 +179,20 @@ def _wrap_with_after(action, responder):
_merge_responder_args(args, kwargs, extra_argnames)
responder(self, req, resp, **kwargs)
- shim(req, resp, self)
+ shim(req, resp, self, *action_args, **action_kwargs)
return do_after
-def _wrap_with_before(action, responder):
+def _wrap_with_before(responder, action, action_args, action_kwargs):
"""Execute the given action function before a responder method.
Args:
+ responder: The responder method to wrap.
action: A function with a similar signature to a resource responder
method, taking the form ``func(req, resp, resource, params)``.
- responder: The responder method to wrap
+ action_args: Additiona positional agruments to pass to *action*.
+ action_kwargs: Additional keyword arguments to pass to *action*.
"""
# NOTE(swistakm): create shim before checking what will be actually
@@ -188,10 +204,10 @@ def _wrap_with_before(action, responder):
# classes in Python versions prior to 3.4.
#
# @wraps(action)
- def shim(req, resp, resource, kwargs):
+ def shim(req, resp, resource, params, *args, **kwargs):
# NOTE(kgriffs): Don't have to pass "self" even if has_self,
# since method is assumed to be bound.
- action(req, resp, kwargs)
+ action(req, resp, params, *args, **kwargs)
responder_argnames = get_argnames(responder)
extra_argnames = responder_argnames[2:] # Skip req, resp
@@ -201,7 +217,7 @@ def _wrap_with_before(action, responder):
if args:
_merge_responder_args(args, kwargs, extra_argnames)
- shim(req, resp, self, kwargs)
+ shim(req, resp, self, kwargs, *action_args, **action_kwargs)
responder(self, req, resp, **kwargs)
return do_before
| Support passing args to hooks
See also: https://stackoverflow.com/questions/27028029/how-to-pass-arguments-to-falcon-before-hook | falconry/falcon | diff --git a/tests/test_after_hooks.py b/tests/test_after_hooks.py
index 8675604..b8cf3d2 100644
--- a/tests/test_after_hooks.py
+++ b/tests/test_after_hooks.py
@@ -48,8 +48,10 @@ def serialize_body(req, resp):
resp.body = 'Nothing to see here. Move along.'
-def fluffiness(req, resp):
+def fluffiness(req, resp, animal=''):
resp.body = 'fluffy'
+ if animal:
+ resp.set_header('X-Animal', animal)
def resource_aware_fluffiness(req, resp, resource):
@@ -63,14 +65,14 @@ class ResourceAwareFluffiness(object):
fluffiness(req, resp)
-def cuteness(req, resp):
- if resp.body == 'fluffy':
- resp.body += ' and cute'
+def cuteness(req, resp, check, postfix=' and cute'):
+ if resp.body == check:
+ resp.body += postfix
def resource_aware_cuteness(req, resp, resource):
assert resource
- cuteness(req, resp)
+ cuteness(req, resp, 'fluffy')
class Smartness(object):
@@ -95,8 +97,8 @@ cuteness_in_the_head = functools.partial(things_in_the_head,
'X-Cuteness', 'cute')
-def fluffiness_in_the_head(req, resp):
- resp.set_header('X-Fluffiness', 'fluffy')
+def fluffiness_in_the_head(req, resp, value='fluffy'):
+ resp.set_header('X-Fluffiness', value)
# --------------------------------------------------------------------
@@ -123,8 +125,8 @@ class WrappedRespondersResource(object):
pass
[email protected](cuteness)
[email protected](fluffiness)
[email protected](cuteness, 'fluffy', postfix=' and innocent')
[email protected](fluffiness, 'kitten')
class WrappedClassResource(object):
# Test that the decorator skips non-callables
@@ -153,7 +155,7 @@ class WrappedClassResourceChild(WrappedClassResource):
class ClassResourceWithURIFields(object):
- @falcon.after(fluffiness_in_the_head)
+ @falcon.after(fluffiness_in_the_head, 'fluffy')
def on_get(self, req, resp, field1, field2):
self.fields = (field1, field2)
@@ -255,12 +257,14 @@ def test_wrapped_resource(client, resource):
client.app.add_route('/wrapped', resource)
result = client.simulate_get('/wrapped')
assert result.status_code == 200
- assert result.text == 'fluffy and cute'
+ assert result.text == 'fluffy and innocent'
+ assert result.headers['X-Animal'] == 'kitten'
result = client.simulate_head('/wrapped')
assert result.status_code == 200
assert result.headers['X-Fluffiness'] == 'fluffy'
assert result.headers['X-Cuteness'] == 'cute'
+ assert result.headers['X-Animal'] == 'kitten'
result = client.simulate_post('/wrapped')
assert result.status_code == 405
@@ -272,6 +276,7 @@ def test_wrapped_resource(client, resource):
result = client.simulate_options('/wrapped')
assert result.status_code == 200
assert not result.text
+ assert 'X-Animal' not in result.headers
def test_wrapped_resource_with_hooks_aware_of_resource(client, wrapped_resource_aware):
diff --git a/tests/test_before_hooks.py b/tests/test_before_hooks.py
index 1d58df3..01bebb1 100644
--- a/tests/test_before_hooks.py
+++ b/tests/test_before_hooks.py
@@ -17,28 +17,29 @@ def validate(req, resp, params):
'formatted correctly.')
-def validate_param(req, resp, params):
- limit = req.get_param_as_int('limit')
- if limit and int(limit) > 100:
- raise falcon.HTTPBadRequest('Out of range', 'limit must be <= 100')
+def validate_param(req, resp, params, param_name, maxval=100):
+ limit = req.get_param_as_int(param_name)
+ if limit and int(limit) > maxval:
+ msg = '{0} must be <= {1}'.format(param_name, maxval)
+ raise falcon.HTTPBadRequest('Out of Range', msg)
-def resource_aware_validate_param(req, resp, resource, params):
+def resource_aware_validate_param(req, resp, resource, params, param_name, maxval=100):
assert resource
- validate_param(req, resp, params)
+ validate_param(req, resp, params, param_name, maxval)
class ResourceAwareValidateParam(object):
def __call__(self, req, resp, resource, params):
assert resource
- validate_param(req, resp, params)
+ validate_param(req, resp, params, 'limit')
-def validate_field(req, resp, params):
+def validate_field(req, resp, params, field_name='test'):
try:
- params['id'] = int(params['id'])
+ params[field_name] = int(params[field_name])
except ValueError:
- raise falcon.HTTPBadRequest('Invalid ID', 'ID was not valid.')
+ raise falcon.HTTPBadRequest()
def parse_body(req, resp, params):
@@ -92,7 +93,7 @@ frogs_in_the_head = functools.partial(
class WrappedRespondersResource(object):
- @falcon.before(validate_param)
+ @falcon.before(validate_param, 'limit', 100)
@falcon.before(parse_body)
def on_get(self, req, resp, doc):
self.req = req
@@ -107,6 +108,10 @@ class WrappedRespondersResource(object):
class WrappedRespondersResourceChild(WrappedRespondersResource):
+ @falcon.before(validate_param, 'x', maxval=1000)
+ def on_get(self, req, resp):
+ pass
+
def on_put(self, req, resp):
# Test passing no extra args
super(WrappedRespondersResourceChild, self).on_put(req, resp)
@@ -120,11 +125,11 @@ class WrappedClassResource(object):
# Test non-callable should be skipped by decorator
on_patch = {}
- @falcon.before(validate_param)
+ @falcon.before(validate_param, 'limit')
def on_get(self, req, resp, bunnies):
self._capture(req, resp, bunnies)
- @falcon.before(validate_param)
+ @falcon.before(validate_param, 'limit')
def on_head(self, req, resp, bunnies):
self._capture(req, resp, bunnies)
@@ -150,11 +155,11 @@ class WrappedClassResource(object):
class ClassResourceWithAwareHooks(object):
hook_as_class = ResourceAwareValidateParam()
- @falcon.before(resource_aware_validate_param)
+ @falcon.before(resource_aware_validate_param, 'limit', 10)
def on_get(self, req, resp, bunnies):
self._capture(req, resp, bunnies)
- @falcon.before(resource_aware_validate_param)
+ @falcon.before(resource_aware_validate_param, 'limit')
def on_head(self, req, resp, bunnies):
self._capture(req, resp, bunnies)
@@ -174,7 +179,7 @@ class ClassResourceWithAwareHooks(object):
class TestFieldResource(object):
- @falcon.before(validate_field)
+ @falcon.before(validate_field, field_name='id')
def on_get(self, req, resp, id):
self.id = id
@@ -266,6 +271,12 @@ def test_input_validator_inherited(client):
result = client.simulate_put('/')
assert result.status_code == 400
+ result = client.simulate_get('/', query_string='x=1000')
+ assert result.status_code == 200
+
+ result = client.simulate_get('/', query_string='x=1001')
+ assert result.status_code == 400
+
def test_param_validator(client):
result = client.simulate_get('/', query_string='limit=10', body='{}')
@@ -335,6 +346,6 @@ def test_wrapped_resource_with_hooks_aware_of_resource(client, wrapped_aware_res
assert result.status_code == 200
assert wrapped_aware_resource.bunnies == 'fuzzy'
- result = client.simulate_get('/wrapped_aware', query_string='limit=101')
+ result = client.simulate_get('/wrapped_aware', query_string='limit=11')
assert result.status_code == 400
assert wrapped_aware_resource.bunnies == 'fuzzy'
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 2
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-randomly",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements/tests"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
execnet==1.9.0
-e git+https://github.com/falconry/falcon.git@0c6b6deb8692b06c677893f97c89cc3870595853#egg=falcon
fixtures==4.0.1
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
jsonschema==3.2.0
msgpack-python==0.5.6
packaging==21.3
pbr==6.1.1
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-randomly==3.10.3
pytest-xdist==3.0.2
python-mimeparse==1.6.0
PyYAML==3.11
requests==2.27.1
six==1.17.0
testtools==2.6.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: falcon
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- coverage==6.2
- execnet==1.9.0
- fixtures==4.0.1
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jsonschema==3.2.0
- msgpack-python==0.5.6
- packaging==21.3
- pbr==6.1.1
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-randomly==3.10.3
- pytest-xdist==3.0.2
- python-mimeparse==1.6.0
- pyyaml==3.11
- requests==2.27.1
- six==1.17.0
- testtools==2.6.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/falcon
| [
"tests/test_after_hooks.py::test_resource_with_uri_fields[resource1]",
"tests/test_after_hooks.py::test_output_validator",
"tests/test_after_hooks.py::test_serializer",
"tests/test_after_hooks.py::test_wrapped_resource_with_hooks_aware_of_resource",
"tests/test_after_hooks.py::test_hook_as_callable_class",
"tests/test_after_hooks.py::test_resource_with_uri_fields[resource0]",
"tests/test_after_hooks.py::test_wrapped_resource[resource1]",
"tests/test_after_hooks.py::test_wrapped_resource[resource0]",
"tests/test_boundedstream.py::test_not_writeable",
"tests/test_response_media.py::test_msgpack[application/msgpack]",
"tests/test_response_media.py::test_use_cached_media",
"tests/test_response_media.py::test_unknown_media_type",
"tests/test_response_media.py::test_json[*/*]",
"tests/test_response_media.py::test_default_media_type[]",
"tests/test_response_media.py::test_json[application/json;",
"tests/test_response_media.py::test_msgpack[application/msgpack;",
"tests/test_response_media.py::test_msgpack[application/x-msgpack]",
"tests/test_before_hooks.py::test_input_validator",
"tests/test_before_hooks.py::test_parser",
"tests/test_before_hooks.py::test_multiple_resource_hooks[resource1]",
"tests/test_before_hooks.py::test_field_validator[resource1]",
"tests/test_before_hooks.py::test_input_validator_inherited",
"tests/test_before_hooks.py::test_field_validator[resource2]",
"tests/test_before_hooks.py::test_multiple_resource_hooks[resource0]",
"tests/test_before_hooks.py::test_wrapped_resource_with_hooks_aware_of_resource",
"tests/test_before_hooks.py::test_wrapped_resource",
"tests/test_before_hooks.py::test_param_validator",
"tests/test_before_hooks.py::test_field_validator[resource0]",
"tests/test_custom_router.py::test_custom_router_takes_req_positional_argument",
"tests/test_custom_router.py::test_custom_router_add_route_should_be_used",
"tests/test_custom_router.py::test_custom_router_find_should_be_used",
"tests/test_custom_router.py::test_custom_router_takes_req_keyword_argument",
"tests/test_custom_router.py::test_can_pass_additional_params_to_add_route",
"tests/test_default_router.py::test_user_regression_special_chars[/thing-2/something+{field}|{q}/notes-/thing-2/something+else|z/notes-expected_params16]",
"tests/test_default_router.py::test_invalid_field_name[/this\\tand\\tthat/this\\nand\\nthat/{thing",
"tests/test_default_router.py::test_options_converters_invalid_name[with-hyphen]",
"tests/test_default_router.py::test_invalid_field_name[/{\\nthing}/world]",
"tests/test_default_router.py::test_not_found[/cvt/teams/NaN]",
"tests/test_default_router.py::test_converters_malformed_specification[/foo/{bar:unknown}/baz]",
"tests/test_default_router.py::test_not_found[/teams/42/members/undefined]",
"tests/test_default_router.py::test_match_entire_path[/items/{x}|{y}|-/items/1080|768]",
"tests/test_default_router.py::test_converters_with_invalid_options[/foo/{bar:int(-1)}/baz]",
"tests/test_default_router.py::test_not_found[/teams/default/members/undefined]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{org}/{repo}/compare/{th\\ting}]",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People({field})-/serviceRoot/People('hobbes')-expected_params2]",
"tests/test_default_router.py::test_not_found[/repos/racker/falcon/compare/johndoe:master...janedoe:dev/bogus]",
"tests/test_default_router.py::test_variable",
"tests/test_default_router.py::test_literal_vs_variable[/gists/first/pdf-21]",
"tests/test_default_router.py::test_converters_with_invalid_options[/foo/{bar:int(num_digits=-1)}/baz]",
"tests/test_default_router.py::test_literal_vs_variable[/teams/default-19]",
"tests/test_default_router.py::test_single_character_field_name",
"tests/test_default_router.py::test_invalid_field_name[/{thing}/wo",
"tests/test_default_router.py::test_conflict[/teams/{id:int}/settings]",
"tests/test_default_router.py::test_literal_vs_variable[/gists/first-20]",
"tests/test_default_router.py::test_converters_malformed_specification[/foo/{bar:}]",
"tests/test_default_router.py::test_converters[/cvt/repos/org/repo/compare/gunmachan:1234...kumamon:5678/part-expected_params4]",
"tests/test_default_router.py::test_literal_vs_variable[/teams/1234-6]",
"tests/test_default_router.py::test_invalid_field_name[/{th\\x0bing}/world]",
"tests/test_default_router.py::test_complex[-5]",
"tests/test_default_router.py::test_override",
"tests/test_default_router.py::test_options_converters_invalid_name[",
"tests/test_default_router.py::test_converters[/cvt/teams/1234/members-expected_params1]",
"tests/test_default_router.py::test_user_regression_special_chars[/thing-2/something+{field}+-/thing-2/something+42+-expected_params14]",
"tests/test_default_router.py::test_invalid_field_name[/{-kgriffs}]",
"tests/test_default_router.py::test_invalid_field_name[/{",
"tests/test_default_router.py::test_invalid_field_name[/this",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People[{field}]-/serviceRoot/People['calvin']-expected_params1]",
"tests/test_default_router.py::test_not_found[/emojis/signs/0/small]",
"tests/test_default_router.py::test_complex[/full-10]",
"tests/test_default_router.py::test_match_entire_path[/items/{x}*{y}foo-/items/1080*768foobar]",
"tests/test_default_router.py::test_user_regression_special_chars[/items/{x}^^{y}-/items/1080^^768-expected_params12]",
"tests/test_default_router.py::test_non_conflict[/repos/{complex}.{vs}.{simple}]",
"tests/test_default_router.py::test_invalid_field_name[/{524hello}/world]",
"tests/test_default_router.py::test_dead_segment[/teams]",
"tests/test_default_router.py::test_match_entire_path[/items/thing-/items/t]",
"tests/test_default_router.py::test_dead_segment[/gists/42]",
"tests/test_default_router.py::test_converters_with_invalid_options[/foo/{bar:int(num_digits=0)}]",
"tests/test_default_router.py::test_root_path",
"tests/test_default_router.py::test_literal_vs_variable[/gists/first/raw-18]",
"tests/test_default_router.py::test_non_conflict[/repos/{org}/{repo}/compare/{simple_vs_complex}]",
"tests/test_default_router.py::test_invalid_field_name[/hello/{1world}]",
"tests/test_default_router.py::test_complex[/part-15]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{complex}.{-v}.{thing}/etc]",
"tests/test_default_router.py::test_not_found[/teams/42/undefined]",
"tests/test_default_router.py::test_literal_vs_variable[/cvt/teams/default/members/1234-10-32]",
"tests/test_default_router.py::test_user_regression_special_chars[/^{field}-/^42-expected_params6]",
"tests/test_default_router.py::test_user_regression_versioned_url",
"tests/test_default_router.py::test_not_found[/teams/42/undefined/segments]",
"tests/test_default_router.py::test_not_found[/teams/default/undefined/segments]",
"tests/test_default_router.py::test_converters[/cvt/teams/default/members/700-5-expected_params2]",
"tests/test_default_router.py::test_dead_segment[/gists]",
"tests/test_default_router.py::test_user_regression_recipes",
"tests/test_default_router.py::test_options_converters_invalid_name[42istheanswer]",
"tests/test_default_router.py::test_invalid_field_name[/{thing\\t}/world]",
"tests/test_default_router.py::test_malformed_pattern[/repos/racker/falcon/compare/foo]",
"tests/test_default_router.py::test_literal_vs_variable[/teams/1234/members-7]",
"tests/test_default_router.py::test_literal_vs_variable[/emojis/signs/78/small(png)-25]",
"tests/test_default_router.py::test_not_found[/emojis/signs/0/undefined/segments]",
"tests/test_default_router.py::test_duplicate_field_names[/{field}/something/something/{field}/something]",
"tests/test_default_router.py::test_options_converters_set",
"tests/test_default_router.py::test_options_converters_update[spam]",
"tests/test_default_router.py::test_user_regression_special_chars[/items/{x}|{y}-/items/1080|768-expected_params10]",
"tests/test_default_router.py::test_not_str[uri_template2]",
"tests/test_default_router.py::test_not_found[/teams/default/members/thing/undefined]",
"tests/test_default_router.py::test_literal_vs_variable[/emojis/signs/78-13]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{simple-thing}/etc]",
"tests/test_default_router.py::test_dead_segment[/emojis/signs]",
"tests/test_default_router.py::test_invalid_field_name[/{*kgriffs}]",
"tests/test_default_router.py::test_converters[/cvt/repos/org/repo/compare/xkcd:353-expected_params3]",
"tests/test_default_router.py::test_user_regression_special_chars[/items/{x},{y}-/items/1080,768-expected_params11]",
"tests/test_default_router.py::test_not_found[/emojis/signs]",
"tests/test_default_router.py::test_not_found[/emojis/signs/0/undefined]",
"tests/test_default_router.py::test_not_found[/teams/default/undefined]",
"tests/test_default_router.py::test_not_found[/emojis/signs/20/undefined]",
"tests/test_default_router.py::test_not_found[/emojis/signs/20/small]",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People({field})-/serviceRoot/People('hob)bes')-expected_params3]",
"tests/test_default_router.py::test_conflict[/teams/{conflict}]",
"tests/test_default_router.py::test_complex_alt[/full-17-/repos/{org}/{repo}/compare/{usr0}:{branch0}/full]",
"tests/test_default_router.py::test_not_found[/teams]",
"tests/test_default_router.py::test_invalid_field_name[/{thing}",
"tests/test_default_router.py::test_user_regression_special_chars[/items/{x}?{y}-/items/1080?768-expected_params9]",
"tests/test_default_router.py::test_not_found[/cvt/teams/default/members/NaN]",
"tests/test_default_router.py::test_user_regression_special_chars[/thing-2/something*{field}/notes-/thing-2/something*42/notes-expected_params15]",
"tests/test_default_router.py::test_malformed_pattern[/repos/racker/falcon/compare/foo/full]",
"tests/test_default_router.py::test_invalid_field_name[/{9v}]",
"tests/test_default_router.py::test_multivar",
"tests/test_default_router.py::test_not_found[/emojis/signs/42/undefined]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{org}/{repo}/compare/{}]",
"tests/test_default_router.py::test_print_src",
"tests/test_default_router.py::test_invalid_field_name[/repos/{or",
"tests/test_default_router.py::test_duplicate_field_names[/{field}{field}]",
"tests/test_default_router.py::test_not_found[/this/does/not/exist]",
"tests/test_default_router.py::test_not_found[/emojis/signs/78/undefined]",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People|{field}-/serviceRoot/People|susie-expected_params0]",
"tests/test_default_router.py::test_conflict[/repos/{org}/{repo}/compare/{complex}:{vs}...{complex2}:{conflict}]",
"tests/test_default_router.py::test_not_found[/cvt/teams/default/members]",
"tests/test_default_router.py::test_literal_vs_variable[/cvt/teams/default-31]",
"tests/test_default_router.py::test_options_converters_invalid_name[has",
"tests/test_default_router.py::test_subsegment_not_found",
"tests/test_default_router.py::test_user_regression_special_chars[serviceRoot/$metadata#Airports('{field}')/Name-serviceRoot/$metadata#Airports('KSFO')/Name-expected_params17]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{complex}.{v}.{@thing}/etc]",
"tests/test_default_router.py::test_user_regression_special_chars[/items/{x}*{y}*-/items/1080*768*-expected_params13]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{complex}.{9v}.{thing}/etc]",
"tests/test_default_router.py::test_converters_with_invalid_options[/foo/{bar:int(0)}]",
"tests/test_default_router.py::test_user_regression_special_chars[/+{field}-/+42-expected_params7]",
"tests/test_default_router.py::test_literal",
"tests/test_default_router.py::test_literal_vs_variable[/emojis/signs/78/small.png-24]",
"tests/test_default_router.py::test_invalid_field_name[/{@kgriffs}]",
"tests/test_default_router.py::test_duplicate_field_names[/{field}/{another}/{field}]",
"tests/test_default_router.py::test_options_converters_invalid_name[funky$character]",
"tests/test_default_router.py::test_converters[/cvt/teams/007-expected_params0]",
"tests/test_default_router.py::test_literal_vs_variable[/teams/default/members-7]",
"tests/test_default_router.py::test_conflict[/emojis/signs/{id_too}]",
"tests/test_default_router.py::test_non_conflict[/repos/{org}/{repo}/compare/{complex}:{vs}...{complex2}/full]",
"tests/test_default_router.py::test_literal_segment",
"tests/test_default_router.py::test_not_found[/user/bogus]",
"tests/test_default_router.py::test_options_converters_invalid_name_on_update",
"tests/test_default_router.py::test_options_converters_invalid_name[whitespace",
"tests/test_default_router.py::test_match_entire_path[/items/{x}*768*-/items/1080*768***]",
"tests/test_default_router.py::test_invalid_field_name[/{}]",
"tests/test_default_router.py::test_user_regression_special_chars[/foo/{first}_{second}/bar-/foo/abc_def_ghijk/bar-expected_params8]",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People('{field}')-/serviceRoot/People('rosalyn')-expected_params5]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{complex}.{}.{thing}]",
"tests/test_default_router.py::test_not_str[uri_template0]",
"tests/test_default_router.py::test_complex_alt[-16-/repos/{org}/{repo}/compare/{usr0}:{branch0}]",
"tests/test_default_router.py::test_options_converters_update[spam_2]",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People({field})(z)-/serviceRoot/People(hobbes)(z)-expected_params4]",
"tests/test_default_router.py::test_literal_vs_variable[/emojis/signs/78/small_png-26]",
"tests/test_default_router.py::test_literal_vs_variable[/gists/1776/pdf-21]",
"tests/test_default_router.py::test_not_found[/teams/default/members/thing/undefined/segments]",
"tests/test_default_router.py::test_not_str[uri_template1]",
"tests/test_default_router.py::test_duplicate_field_names[/{field}...{field}]",
"tests/test_default_router.py::test_converters[/cvt/repos/xkcd/353/compare/susan:0001/full-expected_params5]",
"tests/test_httperror.py::TestHTTPError::test_429_no_retry_after",
"tests/test_httperror.py::TestHTTPError::test_invalid_param",
"tests/test_httperror.py::TestHTTPError::test_no_description_xml",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_xml[text/xml]",
"tests/test_httperror.py::TestHTTPError::test_410_with_body",
"tests/test_httperror.py::TestHTTPError::test_405_without_body_with_extra_headers_double_check",
"tests/test_httperror.py::TestHTTPError::test_416",
"tests/test_httperror.py::TestHTTPError::test_unicode_json",
"tests/test_httperror.py::TestHTTPError::test_temporary_413_integer_retry_after",
"tests/test_httperror.py::TestHTTPError::test_413",
"tests/test_httperror.py::TestHTTPError::test_411",
"tests/test_httperror.py::TestHTTPError::test_414_with_custom_kwargs",
"tests/test_httperror.py::TestHTTPError::test_custom_old_error_serializer",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_xml[application/vnd.company.system.project.resource+xml;v=1.1]",
"tests/test_httperror.py::TestHTTPError::test_414_with_title",
"tests/test_httperror.py::TestHTTPError::test_custom_new_error_serializer",
"tests/test_httperror.py::TestHTTPError::test_client_does_not_accept_json_or_xml",
"tests/test_httperror.py::TestHTTPError::test_misc",
"tests/test_httperror.py::TestHTTPError::test_414",
"tests/test_httperror.py::TestHTTPError::test_custom_old_error_serializer_no_body",
"tests/test_httperror.py::TestHTTPError::test_404_without_body",
"tests/test_httperror.py::TestHTTPError::test_401",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_xml[application/atom+xml]",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_xml[application/xml]",
"tests/test_httperror.py::TestHTTPError::test_405_without_body_with_extra_headers",
"tests/test_httperror.py::TestHTTPError::test_client_does_not_accept_anything",
"tests/test_httperror.py::TestHTTPError::test_429_datetime",
"tests/test_httperror.py::TestHTTPError::test_forbidden[application/vnd.company.system.project.resource+json;v=1.1]",
"tests/test_httperror.py::TestHTTPError::test_invalid_header",
"tests/test_httperror.py::TestHTTPError::test_429",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_json",
"tests/test_httperror.py::TestHTTPError::test_title_default_message_if_none",
"tests/test_httperror.py::TestHTTPError::test_forbidden[application/json-patch+json]",
"tests/test_httperror.py::TestHTTPError::test_temporary_413_datetime_retry_after",
"tests/test_httperror.py::TestHTTPError::test_missing_header",
"tests/test_httperror.py::TestHTTPError::test_missing_param",
"tests/test_httperror.py::TestHTTPError::test_forbidden[application/json]",
"tests/test_httperror.py::TestHTTPError::test_404_with_body",
"tests/test_httperror.py::TestHTTPError::test_base_class",
"tests/test_httperror.py::TestHTTPError::test_414_with_description",
"tests/test_httperror.py::TestHTTPError::test_unicode_xml",
"tests/test_httperror.py::TestHTTPError::test_no_description_json",
"tests/test_httperror.py::TestHTTPError::test_405_without_body",
"tests/test_httperror.py::TestHTTPError::test_503_integer_retry_after",
"tests/test_httperror.py::TestHTTPError::test_503_datetime_retry_after",
"tests/test_httperror.py::TestHTTPError::test_405_with_body",
"tests/test_httperror.py::TestHTTPError::test_410_without_body",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_default_port[HTTP/1.0]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_https[HTTP/1.0]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_http[HTTP/1.1-True]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_missing[if_unmodified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_nonlatin_path[/test/%C3%A4%C3%B6%C3%BC%C3%9F%E2%82%AC]",
"tests/test_request_attrs.py::TestRequestAttributes::test_range_unit",
"tests/test_request_attrs.py::TestRequestAttributes::test_client_accepts_props",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_default_port[HTTP/1.1]",
"tests/test_request_attrs.py::TestRequestAttributes::test_range",
"tests/test_request_attrs.py::TestRequestAttributes::test_app_blank",
"tests/test_request_attrs.py::TestRequestAttributes::test_attribute_headers",
"tests/test_request_attrs.py::TestRequestAttributes::test_uri_http_1_0",
"tests/test_request_attrs.py::TestRequestAttributes::test_missing_qs",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_invalid[If-Modified-Since-if_modified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_client_accepts",
"tests/test_request_attrs.py::TestRequestAttributes::test_bogus_content_length_neg",
"tests/test_request_attrs.py::TestRequestAttributes::test_range_invalid",
"tests/test_request_attrs.py::TestRequestAttributes::test_missing_attribute_header",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_invalid[Date-date]",
"tests/test_request_attrs.py::TestRequestAttributes::test_empty_path",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_missing[if_modified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_missing[date]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_https[HTTP/1.1]",
"tests/test_request_attrs.py::TestRequestAttributes::test_uri_https",
"tests/test_request_attrs.py::TestRequestAttributes::test_subdomain",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_invalid[If-Unmodified-Since-if_unmodified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_app_missing",
"tests/test_request_attrs.py::TestRequestAttributes::test_method",
"tests/test_request_attrs.py::TestRequestAttributes::test_date[If-Unmodified-Since-if_unmodified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_nondefault_port[HTTP/1.0]",
"tests/test_request_attrs.py::TestRequestAttributes::test_empty",
"tests/test_request_attrs.py::TestRequestAttributes::test_reconstruct_url",
"tests/test_request_attrs.py::TestRequestAttributes::test_client_accepts_bogus",
"tests/test_request_attrs.py::TestRequestAttributes::test_port_explicit[HTTP/1.0]",
"tests/test_request_attrs.py::TestRequestAttributes::test_content_length",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_from_env[HTTP/1.1]",
"tests/test_request_attrs.py::TestRequestAttributes::test_content_type_method",
"tests/test_request_attrs.py::TestRequestAttributes::test_bogus_content_length_nan",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_from_env[HTTP/1.0]",
"tests/test_request_attrs.py::TestRequestAttributes::test_host",
"tests/test_request_attrs.py::TestRequestAttributes::test_date[If-Modified-Since-if_modified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_content_length_method",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_http[HTTP/1.0-False]",
"tests/test_request_attrs.py::TestRequestAttributes::test_relative_uri",
"tests/test_request_attrs.py::TestRequestAttributes::test_port_explicit[HTTP/1.1]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_http[HTTP/1.0-True]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_http[HTTP/1.1-False]",
"tests/test_request_attrs.py::TestRequestAttributes::test_nonlatin_path[/test/%E5%BB%B6%E5%AE%89]",
"tests/test_request_attrs.py::TestRequestAttributes::test_app_present",
"tests/test_request_attrs.py::TestRequestAttributes::test_nonlatin_path[/hello_\\u043f\\u0440\\u0438\\u0432\\u0435\\u0442]",
"tests/test_request_attrs.py::TestRequestAttributes::test_uri",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_nondefault_port[HTTP/1.1]",
"tests/test_request_attrs.py::TestRequestAttributes::test_client_prefers",
"tests/test_request_attrs.py::TestRequestAttributes::test_date[Date-date]",
"tests/test_utils.py::TestFalconTesting::test_decode_empty_result",
"tests/test_utils.py::TestFalconTesting::test_httpnow_alias_for_backwards_compat",
"tests/test_utils.py::TestFalconTesting::test_path_escape_chars_in_create_environ",
"tests/test_utils.py::TestFalconTesting::test_none_header_value_in_create_environ",
"tests/test_utils.py::TestFalconTesting::test_no_prefix_allowed_for_query_strings_in_create_environ",
"tests/test_utils.py::test_simulate_free_functions[simulate_put]",
"tests/test_utils.py::test_simulate_request_protocol[http-HEAD]",
"tests/test_utils.py::test_simulate_request_protocol[https-OPTIONS]",
"tests/test_utils.py::test_simulate_request_protocol[https-CONNECT]",
"tests/test_utils.py::test_simulate_request_protocol[http-POST]",
"tests/test_utils.py::test_simulate_request_protocol[http-PATCH]",
"tests/test_utils.py::test_simulate_free_functions[simulate_post]",
"tests/test_utils.py::test_simulate_request_protocol[http-OPTIONS]",
"tests/test_utils.py::test_simulate_request_protocol[https-TRACE]",
"tests/test_utils.py::test_simulate_request_protocol[https-POST]",
"tests/test_utils.py::test_simulate_request_protocol[http-TRACE]",
"tests/test_utils.py::test_simulate_request_protocol[http-DELETE]",
"tests/test_utils.py::test_simulate_request_protocol[https-DELETE]",
"tests/test_utils.py::test_simulate_free_functions[simulate_get]",
"tests/test_utils.py::test_simulate_request_protocol[https-PUT]",
"tests/test_utils.py::test_simulate_request_protocol[https-GET]",
"tests/test_utils.py::test_simulate_free_functions[simulate_patch]",
"tests/test_utils.py::test_simulate_free_functions[simulate_head]",
"tests/test_utils.py::test_simulate_request_protocol[http-PUT]",
"tests/test_utils.py::test_simulate_request_protocol[https-HEAD]",
"tests/test_utils.py::test_simulate_request_protocol[http-GET]",
"tests/test_utils.py::test_simulate_free_functions[simulate_delete]",
"tests/test_utils.py::test_simulate_free_functions[simulate_options]",
"tests/test_utils.py::test_simulate_request_protocol[http-CONNECT]",
"tests/test_utils.py::test_simulate_request_protocol[https-PATCH]",
"tests/test_utils.py::TestFalconTestCase::test_status",
"tests/test_utils.py::TestFalconTestCase::test_cached_text_in_result",
"tests/test_utils.py::TestFalconTestCase::test_query_string",
"tests/test_utils.py::TestFalconTestCase::test_path_must_start_with_slash",
"tests/test_utils.py::TestFalconTestCase::test_wsgi_iterable_not_closeable",
"tests/test_utils.py::TestFalconTestCase::test_query_string_in_path",
"tests/test_utils.py::TestFalconTestCase::test_simple_resource_body_json_xor",
"tests/test_utils.py::TestFalconTestCase::test_query_string_no_question",
"tests/test_utils.py::TestFalconUtils::test_uri_encode_value",
"tests/test_utils.py::TestFalconUtils::test_pack_query_params_one",
"tests/test_utils.py::TestFalconUtils::test_parse_query_string",
"tests/test_utils.py::TestFalconUtils::test_uri_encode",
"tests/test_utils.py::TestFalconUtils::test_pack_query_params_none",
"tests/test_utils.py::TestFalconUtils::test_uri_encode_double",
"tests/test_utils.py::TestFalconUtils::test_dt_to_http",
"tests/test_utils.py::TestFalconUtils::test_parse_host",
"tests/test_utils.py::TestFalconUtils::test_prop_uri_encode_models_stdlib_quote",
"tests/test_utils.py::TestFalconUtils::test_deprecated_decorator",
"tests/test_utils.py::TestFalconUtils::test_get_http_status",
"tests/test_utils.py::TestFalconUtils::test_http_now",
"tests/test_utils.py::TestFalconUtils::test_uri_decode",
"tests/test_utils.py::TestFalconUtils::test_pack_query_params_several",
"tests/test_utils.py::TestFalconUtils::test_prop_uri_encode_value_models_stdlib_quote_safe_tilde",
"tests/test_utils.py::TestFalconUtils::test_http_date_to_dt",
"tests/test_utils.py::TestFalconUtils::test_prop_uri_decode_models_stdlib_unquote_plus",
"tests/test_utils.py::TestNoApiClass::test_something",
"tests/test_utils.py::TestSetupApi::test_something",
"tests/test_utils.py::TestCaseFancyAPI::test_something",
"tests/test_response_body.py::TestResponseBody::test_append_body",
"tests/test_response_body.py::TestResponseBody::test_response_repr",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[a/b//]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_two_fields[]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_two_fields[/]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_three_fields",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[a//]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_string_type_required[API]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_root",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_must_start_with_slash[this/that]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[a//b]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_one_field",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[//]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_must_start_with_slash[this]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_string_type_required[42]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_malformed_field",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[a/b//c]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_one_field_with_digits",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_no_fields[/hello/world]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[//b]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_no_fields[/hello]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_no_fields[/hi/there/how/are/you]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_one_field_with_prefixed_digits",
"tests/test_error_handlers.py::TestErrorHandler::test_uncaught_error_else",
"tests/test_error_handlers.py::TestErrorHandler::test_error_order_subclass_masked",
"tests/test_error_handlers.py::TestErrorHandler::test_subclass_error",
"tests/test_error_handlers.py::TestErrorHandler::test_uncaught_error",
"tests/test_error_handlers.py::TestErrorHandler::test_error_order_subclass",
"tests/test_error_handlers.py::TestErrorHandler::test_handle_not_defined",
"tests/test_error_handlers.py::TestErrorHandler::test_caught_error",
"tests/test_error_handlers.py::TestErrorHandler::test_converted_error",
"tests/test_error_handlers.py::TestErrorHandler::test_error_order_duplicate",
"tests/test_deps.py::test_deps_mimeparse_correct_package",
"tests/test_error.py::test_with_default_title_and_desc[HTTPLengthRequired-411",
"tests/test_error.py::test_with_title_and_desc[HTTPInternalServerError]",
"tests/test_error.py::test_with_title_and_desc[HTTPPreconditionRequired]",
"tests/test_error.py::test_with_default_title_and_desc[HTTPBadRequest-400",
"tests/test_error.py::test_with_default_title_and_desc[HTTPLoopDetected-508",
"tests/test_error.py::test_with_title_and_desc[HTTPUriTooLong]",
"tests/test_error.py::test_with_default_title_and_desc[HTTPInternalServerError-500",
"tests/test_error.py::test_with_title_and_desc[HTTPBadGateway]",
"tests/test_error.py::test_with_default_title_and_desc[HTTPUriTooLong-414",
"tests/test_error.py::test_with_default_title_and_desc[HTTPInsufficientStorage-507",
"tests/test_error.py::test_with_title_and_desc[HTTPVersionNotSupported]",
"tests/test_error.py::test_with_title_and_desc[HTTPLengthRequired]",
"tests/test_error.py::test_with_retry_after[HTTPTooManyRequests]",
"tests/test_error.py::test_http_not_acceptable_with_title_and_desc_and_challenges",
"tests/test_error.py::test_with_default_title_and_desc[HTTPTooManyRequests-429",
"tests/test_error.py::test_with_default_title_and_desc[HTTPRequestHeaderFieldsTooLarge-431",
"tests/test_error.py::test_with_default_title_and_desc[HTTPNetworkAuthenticationRequired-511",
"tests/test_error.py::test_with_title_and_desc[HTTPFailedDependency]",
"tests/test_error.py::test_with_title_and_desc[HTTPBadRequest]",
"tests/test_error.py::test_http_unsupported_media_type_with_title_and_desc_and_challenges",
"tests/test_error.py::test_with_title_and_desc[HTTPInsufficientStorage]",
"tests/test_error.py::test_with_title_and_desc[HTTPGatewayTimeout]",
"tests/test_error.py::test_with_default_title_and_desc[HTTPPreconditionRequired-428",
"tests/test_error.py::test_with_title_and_desc[HTTPNetworkAuthenticationRequired]",
"tests/test_error.py::test_with_default_title_and_desc[HTTPRequestEntityTooLarge-413",
"tests/test_error.py::test_with_title_and_desc[HTTPUnavailableForLegalReasons]",
"tests/test_error.py::test_with_title_and_desc[HTTPNotImplemented]",
"tests/test_error.py::test_with_default_title_and_desc[HTTPVersionNotSupported-505",
"tests/test_error.py::test_with_default_title_and_desc[HTTPGatewayTimeout-504",
"tests/test_error.py::test_with_retry_after[HTTPServiceUnavailable]",
"tests/test_error.py::test_with_default_title_and_desc[HTTPLocked-423",
"tests/test_error.py::test_with_default_title_and_desc[HTTPFailedDependency-424",
"tests/test_error.py::test_with_title_and_desc[HTTPUnprocessableEntity]",
"tests/test_error.py::test_with_title_and_desc[HTTPConflict]",
"tests/test_error.py::test_with_default_title_and_desc[HTTPNotImplemented-501",
"tests/test_error.py::test_with_retry_after[HTTPRequestEntityTooLarge]",
"tests/test_error.py::test_http_unauthorized_with_title_and_desc_and_challenges",
"tests/test_error.py::test_http_not_acceptable_no_title_and_desc_and_challenges",
"tests/test_error.py::test_with_default_title_and_desc[HTTPForbidden-403",
"tests/test_error.py::test_with_title_and_desc[HTTPLoopDetected]",
"tests/test_error.py::test_with_title_and_desc[HTTPForbidden]",
"tests/test_error.py::test_http_error_repr",
"tests/test_error.py::test_with_default_title_and_desc[HTTPBadGateway-502",
"tests/test_error.py::test_with_title_and_desc[HTTPPreconditionFailed]",
"tests/test_error.py::test_with_default_title_and_desc[HTTPServiceUnavailable-503",
"tests/test_error.py::test_with_default_title_and_desc[HTTPUnprocessableEntity-422",
"tests/test_error.py::test_with_default_title_and_desc[HTTPPreconditionFailed-412",
"tests/test_error.py::test_with_default_title_and_desc[HTTPUnavailableForLegalReasons-451",
"tests/test_error.py::test_with_title_and_desc[HTTPServiceUnavailable]",
"tests/test_error.py::test_http_unsupported_media_type_no_title_and_desc_and_challenges",
"tests/test_error.py::test_with_default_title_and_desc[HTTPConflict-409",
"tests/test_error.py::test_with_title_and_desc[HTTPLocked]",
"tests/test_error.py::test_with_title_and_desc[HTTPRequestHeaderFieldsTooLarge]",
"tests/test_error.py::test_http_unauthorized_no_title_and_desc_and_challenges",
"tests/test_sinks.py::TestDefaultRouting::test_route_precedence",
"tests/test_sinks.py::TestDefaultRouting::test_named_groups",
"tests/test_sinks.py::TestDefaultRouting::test_route_precedence_with_both_id",
"tests/test_sinks.py::TestDefaultRouting::test_single_simple_pattern",
"tests/test_sinks.py::TestDefaultRouting::test_multiple_patterns",
"tests/test_sinks.py::TestDefaultRouting::test_single_compiled_pattern",
"tests/test_sinks.py::TestDefaultRouting::test_single_default_pattern",
"tests/test_sinks.py::TestDefaultRouting::test_with_route",
"tests/test_sinks.py::TestDefaultRouting::test_route_precedence_with_id",
"tests/test_slots.py::TestSlots::test_slots_request",
"tests/test_slots.py::TestSlots::test_slots_response",
"tests/test_response_context.py::TestRequestContext::test_custom_response_context_failure",
"tests/test_response_context.py::TestRequestContext::test_custom_response_context_factory",
"tests/test_response_context.py::TestRequestContext::test_custom_response_context",
"tests/test_response_context.py::TestRequestContext::test_default_response_context",
"tests/test_hello.py::TestHelloWorld::test_body[/body-resource0-<lambda>]",
"tests/test_hello.py::TestHelloWorld::test_no_body_on_head",
"tests/test_hello.py::TestHelloWorld::test_filelike_closing[NonClosingBytesIO-False]",
"tests/test_hello.py::TestHelloWorld::test_filelike_closing[ClosingBytesIO-True]",
"tests/test_hello.py::TestHelloWorld::test_body[/data-resource2-<lambda>]",
"tests/test_hello.py::TestHelloWorld::test_stream_chunked",
"tests/test_hello.py::TestHelloWorld::test_no_route",
"tests/test_hello.py::TestHelloWorld::test_filelike_using_helper",
"tests/test_hello.py::TestHelloWorld::test_status_not_set",
"tests/test_hello.py::TestHelloWorld::test_body[/bytes-resource1-<lambda>]",
"tests/test_hello.py::TestHelloWorld::test_root_route",
"tests/test_hello.py::TestHelloWorld::test_stream_known_len",
"tests/test_hello.py::TestHelloWorld::test_filelike",
"tests/test_hello.py::TestHelloWorld::test_env_headers_list_of_tuples",
"tests/test_request_forwarded.py::test_no_forwarded_headers",
"tests/test_request_forwarded.py::test_x_forwarded_proto",
"tests/test_request_forwarded.py::test_forwarded_missing_first_hop_host",
"tests/test_request_forwarded.py::test_forwarded_host",
"tests/test_request_forwarded.py::test_x_forwarded_host",
"tests/test_request_forwarded.py::test_forwarded_multiple_params",
"tests/test_options.py::TestRequestOptions::test_options_toggle[auto_parse_form_urlencoded]",
"tests/test_options.py::TestRequestOptions::test_option_defaults",
"tests/test_options.py::TestRequestOptions::test_options_toggle[auto_parse_qs_csv]",
"tests/test_options.py::TestRequestOptions::test_incorrect_options",
"tests/test_options.py::TestRequestOptions::test_options_toggle[keep_blank_qs_values]",
"tests/test_options.py::TestRequestOptions::test_options_toggle[strip_url_path_trailing_slash]",
"tests/test_validators.py::test_jsonschema_validation_success",
"tests/test_validators.py::test_jsonschema_validation_failure",
"tests/test_cookies.py::test_cookie_header_is_missing",
"tests/test_cookies.py::test_non_ascii_value[Unicode_\\xc3\\x83\\xc2\\xa6\\xc3\\x83\\xc2\\xb8]",
"tests/test_cookies.py::test_cookie_timezone",
"tests/test_cookies.py::test_cookie_max_age_float_and_string[foofloat]",
"tests/test_cookies.py::test_non_ascii_value[42]",
"tests/test_cookies.py::test_cookies_setable",
"tests/test_cookies.py::test_response_unset_cookie",
"tests/test_cookies.py::test_response_complex_case",
"tests/test_cookies.py::test_cookie_max_age_float_and_string[foostring]",
"tests/test_cookies.py::test_non_ascii_value[Unicode_\\xc3\\xa6\\xc3\\xb8]",
"tests/test_cookies.py::test_non_ascii_name[Unicode_\\xc3\\x83\\xc2\\xa6\\xc3\\x83\\xc2\\xb8]",
"tests/test_cookies.py::test_non_ascii_name[Unicode_\\xc3\\xa6\\xc3\\xb8]",
"tests/test_cookies.py::test_invalid_cookies_are_ignored",
"tests/test_cookies.py::test_cookie_expires_aware",
"tests/test_cookies.py::test_cookie_expires_naive",
"tests/test_cookies.py::test_request_cookie_parsing",
"tests/test_cookies.py::test_unicode_inside_ascii_range",
"tests/test_cookies.py::test_response_disable_secure_globally",
"tests/test_cookies.py::test_non_ascii_name[42]",
"tests/test_cookies.py::test_response_base_case",
"tests/test_request_media.py::test_use_cached_media",
"tests/test_request_media.py::test_msgpack[application/x-msgpack]",
"tests/test_request_media.py::test_json[application/json]",
"tests/test_request_media.py::test_msgpack[application/msgpack]",
"tests/test_request_media.py::test_invalid_json",
"tests/test_request_media.py::test_unknown_media_type[nope/json]",
"tests/test_request_media.py::test_json[None]",
"tests/test_request_media.py::test_json[application/json;",
"tests/test_request_media.py::test_invalid_msgpack",
"tests/test_request_media.py::test_msgpack[application/msgpack;",
"tests/test_request_media.py::test_invalid_stream_fails_gracefully",
"tests/test_request_media.py::test_json[*/*]",
"tests/test_request_context.py::TestRequestContext::test_default_request_context",
"tests/test_request_context.py::TestRequestContext::test_custom_request_context",
"tests/test_request_context.py::TestRequestContext::test_custom_request_context_request_access",
"tests/test_request_context.py::TestRequestContext::test_custom_request_context_failure",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_in_responder",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_in_before_hook",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_runs_after_hooks",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_empty_body",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_survives_after_hooks",
"tests/test_httpstatus.py::TestHTTPStatusWithMiddleware::test_raise_status_in_process_request",
"tests/test_httpstatus.py::TestHTTPStatusWithMiddleware::test_raise_status_runs_process_response",
"tests/test_httpstatus.py::TestHTTPStatusWithMiddleware::test_raise_status_in_process_resource",
"tests/test_media_handlers.py::test_base_handler_contract",
"tests/test_cmd_print_api.py::test_traverse_with_verbose",
"tests/test_cmd_print_api.py::test_traverse",
"tests/test_query_params.py::TestQueryParams::test_get_dict_valid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_none[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_int_neg[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_simple_true[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_form_keys_as_list[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_invalid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_complex_false[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_percent_encoded[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_keys_as_bool[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param-simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_missing_param[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_keys_as_int[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_missing_param[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_type[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_list-simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_int[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_none[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_valid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_transformer[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_missing_param[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_store[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_form_keys[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_type_blank[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_valid_with_format[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_bool-simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_valid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param-simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_valid_with_format[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_valid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_allowed_names[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_valid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_bool-simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_int-simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_int[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_valid_with_format[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_list-simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_boolean[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_simple[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_store[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_bad_percentage[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_valid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_bad_percentage[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_allowed_names[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_store[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_form_keys[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_int-simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_simple_false[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_invalid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_invalid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_simple_true[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_keys_as_bool[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_invalid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_simple_false[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_keys_as_int[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_blank[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_form_keys_as_list[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_missing_param[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_missing_param[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_invalid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_param_property[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_boolean_blank[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_valid_with_format[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_simple[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_store[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_complex_false[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_store[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_percent_encoded[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_int_neg[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_invalid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_store[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_transformer[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_boolean_blank[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_type_blank[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_blank[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_missing_param[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_boolean[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_type[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_param_property[simulate_request_post_query_params]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_expected[POST]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_expected[DELETE]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_expected[PUT]",
"tests/test_query_params.py::TestPostQueryParams::test_empty_body_no_content_length",
"tests/test_query_params.py::TestPostQueryParams::test_explicitly_disable_auto_parse",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_not_expected[HEAD]",
"tests/test_query_params.py::TestPostQueryParams::test_non_ascii",
"tests/test_query_params.py::TestPostQueryParams::test_empty_body",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_not_expected[GET]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_expected[OPTIONS]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_expected[PATCH]",
"tests/test_query_params.py::TestPostQueryParamsDefaultBehavior::test_dont_auto_parse_by_default",
"tests/test_wsgi_interface.py::TestWSGIInterface::test_srmock",
"tests/test_wsgi_interface.py::TestWSGIInterface::test_pep3333",
"tests/test_request_access_route.py::test_remote_addr_missing",
"tests/test_request_access_route.py::test_remote_addr",
"tests/test_request_access_route.py::test_x_forwarded_for",
"tests/test_request_access_route.py::test_x_real_ip",
"tests/test_request_access_route.py::test_rfc_forwarded",
"tests/test_request_access_route.py::test_malformed_rfc_forwarded",
"tests/test_request_access_route.py::test_remote_addr_only",
"tests/test_request_body.py::TestRequestBody::test_request_repr",
"tests/test_request_body.py::TestRequestBody::test_body_stream_wrapper",
"tests/test_request_body.py::TestRequestBody::test_bounded_stream_property_empty_body",
"tests/test_request_body.py::TestRequestBody::test_tiny_body_overflow",
"tests/test_request_body.py::TestRequestBody::test_tiny_body",
"tests/test_request_body.py::TestRequestBody::test_read_body",
"tests/test_request_body.py::TestRequestBody::test_empty_body",
"tests/test_uri_converters.py::test_int_converter_malformed[\\x0c123]",
"tests/test_uri_converters.py::test_int_converter[12-2-13-12-None]",
"tests/test_uri_converters.py::test_int_converter_malformed[]",
"tests/test_uri_converters.py::test_datetime_converter[2017-07-03T14:30:01-%Y-%m-%dT%H:%M:%SZ-None]",
"tests/test_uri_converters.py::test_uuid_converter[4edba55524ac41f48c1a3bbf09a76b0b-expected2]",
"tests/test_uri_converters.py::test_datetime_converter_default_format",
"tests/test_uri_converters.py::test_int_converter[12-2-12-12-12]",
"tests/test_uri_converters.py::test_datetime_converter[07",
"tests/test_uri_converters.py::test_uuid_converter[4edba555-24ac-41f4-8c1a-3bbf09a76b0g-None]",
"tests/test_uri_converters.py::test_datetime_converter[07-03-17-%m-%d-%y-expected0]",
"tests/test_uri_converters.py::test_int_converter[2-1-2-2-2]",
"tests/test_uri_converters.py::test_int_converter_malformed[123",
"tests/test_uri_converters.py::test_int_converter[1-1-1-None-1]",
"tests/test_uri_converters.py::test_int_converter_malformed[\\n123]",
"tests/test_uri_converters.py::test_int_converter[123-None-None-None-123]",
"tests/test_uri_converters.py::test_int_converter_malformed[\\x0b123]",
"tests/test_uri_converters.py::test_int_converter_malformed[123\\t]",
"tests/test_uri_converters.py::test_uuid_converter[4edba555-24ac-41f4-8c1a-3bbf09a76b0-None]",
"tests/test_uri_converters.py::test_uuid_converter[4edba55524ac-41f4-8c1a-3bbf09a76b0b-expected1]",
"tests/test_uri_converters.py::test_int_converter_malformed[123\\n]",
"tests/test_uri_converters.py::test_int_converter_malformed[",
"tests/test_uri_converters.py::test_uuid_converter[urn:uuid:4edba555-24ac-41f4-8c1a-3bbf09a76b0b-expected3]",
"tests/test_uri_converters.py::test_int_converter_invalid_config[-10]",
"tests/test_uri_converters.py::test_datetime_converter[2017_19-%Y_%H-expected4]",
"tests/test_uri_converters.py::test_int_converter[1-1-1-2-1]",
"tests/test_uri_converters.py::test_int_converter[0-None-None-None-0]",
"tests/test_uri_converters.py::test_uuid_converter[4edba555_24ac_41f4_8c1a_3bbf09a76b0b-None]",
"tests/test_uri_converters.py::test_int_converter[12-1-None-None-None1]",
"tests/test_uri_converters.py::test_uuid_converter[urn:uuid:4edba55524ac41f48c1a3bbf09a76b0b-expected4]",
"tests/test_uri_converters.py::test_int_converter_malformed[\\t123]",
"tests/test_uri_converters.py::test_datetime_converter[2017-07-03T14:30:01Z-%Y-%m-%dT%H:%M:%SZ-expected2]",
"tests/test_uri_converters.py::test_int_converter[12-2-1-12-12]",
"tests/test_uri_converters.py::test_datetime_converter[2017-07-03T14:30:01-%Y-%m-%dT%H:%M:%S-expected3]",
"tests/test_uri_converters.py::test_int_converter_malformed[123\\x0c]",
"tests/test_uri_converters.py::test_uuid_converter[4edba555-24ac-41f4-8c1a-3bbf09a76b0b-expected0]",
"tests/test_uri_converters.py::test_uuid_converter[",
"tests/test_uri_converters.py::test_int_converter[12-2-13-13-None]",
"tests/test_uri_converters.py::test_datetime_converter[07-03-17",
"tests/test_uri_converters.py::test_int_converter_invalid_config[0]",
"tests/test_uri_converters.py::test_int_converter[12-2-None-None-121]",
"tests/test_uri_converters.py::test_int_converter[2-1-1-2-2]",
"tests/test_uri_converters.py::test_int_converter[001-None-None-None-1]",
"tests/test_uri_converters.py::test_int_converter_malformed[123\\x0b]",
"tests/test_uri_converters.py::test_int_converter[1-1-1-1-1]",
"tests/test_uri_converters.py::test_int_converter_malformed[0x0F]",
"tests/test_uri_converters.py::test_int_converter[1-1-None-None-1]",
"tests/test_uri_converters.py::test_int_converter[1-1-2-None-None]",
"tests/test_uri_converters.py::test_int_converter[12-2-None-None-120]",
"tests/test_uri_converters.py::test_int_converter_malformed[something]",
"tests/test_uri_converters.py::test_int_converter_invalid_config[-1]",
"tests/test_uri_converters.py::test_datetime_converter[",
"tests/test_uri_converters.py::test_int_converter[3-1-1-2-None]",
"tests/test_uri_converters.py::test_int_converter[12-1-1-12-None]",
"tests/test_uri_converters.py::test_int_converter_malformed[123\\r]",
"tests/test_uri_converters.py::test_int_converter[1-1-2-1-None]",
"tests/test_uri_converters.py::test_int_converter[01-None-None-None-1]",
"tests/test_uri_converters.py::test_int_converter[00-None-None-None-0]",
"tests/test_uri_converters.py::test_int_converter_malformed[\\r123]",
"tests/test_uri_converters.py::test_uuid_converter[4edba555-24ac-41f4-8c1a-3bbf09a76b0b",
"tests/test_uri_converters.py::test_uuid_converter[4-None]",
"tests/test_uri_converters.py::test_int_converter[12-1-None-None-None0]",
"tests/test_wsgiref_inputwrapper_with_size.py::TestWsgiRefInputWrapper::test_resources_can_read_request_stream_during_tests",
"tests/test_redirects.py::TestRedirects::test_redirect[PUT-303",
"tests/test_redirects.py::TestRedirects::test_redirect[POST-302",
"tests/test_redirects.py::TestRedirects::test_redirect[GET-301",
"tests/test_redirects.py::TestRedirects::test_redirect[DELETE-307",
"tests/test_redirects.py::TestRedirects::test_redirect[HEAD-308",
"tests/test_wsgi_errors.py::TestWSGIError::test_responder_logged_bytestring",
"tests/test_uri_templates.py::test_uuid_converter[/widgets/{widget_id:uuid}/orders-/widgets/dcb0b536015a48509caf6d06c7e6c6e/orders-None]",
"tests/test_uri_templates.py::test_int_converter[/{id:int}]",
"tests/test_uri_templates.py::test_relative_path[]",
"tests/test_uri_templates.py::test_uuid_converter_complex_segment",
"tests/test_uri_templates.py::test_converter_custom[/{food:spam(\")\")}:{food_too:spam(\"()\")}-/bacon:eggs-expected1]",
"tests/test_uri_templates.py::test_datetime_converter[/{start_year:int}-to-{timestamp:dt(\"%Y-%m\")}-/1961-to-1969-07-21-None]",
"tests/test_uri_templates.py::test_same_level_complex_var[False]",
"tests/test_uri_templates.py::test_single[widget_id]",
"tests/test_uri_templates.py::test_single[id]",
"tests/test_uri_templates.py::test_uuid_converter[/versions/diff/{left:uuid}...{right:uuid()}-/versions/diff/dcb0b536-015a-4850-9caf-6d06c7e6c6eb...626a3038-ac0d-4a11-9921-857d9b81e6d6-expected3]",
"tests/test_uri_templates.py::test_converter_custom[/({food:spam()}){food_too:spam(\"()\")}-/(bacon)eggs-expected2]",
"tests/test_uri_templates.py::test_int_converter[/{id:int(min=123)}]",
"tests/test_uri_templates.py::test_no_vars",
"tests/test_uri_templates.py::test_int_converter_rejections[/{id:int(num_digits=3,",
"tests/test_uri_templates.py::test_multiple",
"tests/test_uri_templates.py::test_datetime_converter[/{start_year:int}-to-{timestamp:dt(\"%Y-%m-%d\")}-/1961-to-1969-07-21-dt_expected1]",
"tests/test_uri_templates.py::test_int_converter_rejections[/{id:int(2)}]",
"tests/test_uri_templates.py::test_single[id123]",
"tests/test_uri_templates.py::test_special_chars",
"tests/test_uri_templates.py::test_datetime_converter[/{start_year:int}-to-{timestamp:dt}-/1961-to-1969-07-21T02:56:00Z-dt_expected0]",
"tests/test_uri_templates.py::test_uuid_converter[/versions/diff/{left:uuid()}...{right:uuid()}-/versions/diff/dcb0b536-015a-4850-9caf-6d06c7e6c6eb...626a3038-ac0d-4a11-9921-857d9b81e6d6-expected2]",
"tests/test_uri_templates.py::test_uuid_converter[/widgets/{widget_id:uuid}-/widgets/dcb0b536-015a-4850-9caf-6d06c7e6c6eb-expected0]",
"tests/test_uri_templates.py::test_uuid_converter[/widgets/{widget_id:uuid}/orders-/widgets/dcb0b536015a48509caf6d06c7e6c6eb/orders-expected1]",
"tests/test_uri_templates.py::test_int_converter[/{id:int(min=123,",
"tests/test_uri_templates.py::test_empty_path_component[/in//side]",
"tests/test_uri_templates.py::test_int_converter_rejections[/{id:int(min=124)}]",
"tests/test_uri_templates.py::test_relative_path[no]",
"tests/test_uri_templates.py::test_empty_path_component[/end//]",
"tests/test_uri_templates.py::test_single_trailing_slash",
"tests/test_uri_templates.py::test_relative_path[no/leading_slash]",
"tests/test_uri_templates.py::test_uuid_converter[/versions/diff/{left:uuid()}...{right:uuid}-/versions/diff/dcb0b536-015a-4850-9caf-6d06c7e6c6eb...626a3038-ac0d-4a11-9921-857d9b81e6d6-expected4]",
"tests/test_uri_templates.py::test_root_path",
"tests/test_uri_templates.py::test_same_level_complex_var[True]",
"tests/test_uri_templates.py::test_datetime_converter[/{start_year:int}/{timestamp:dt(\"%Y-%m-%d",
"tests/test_uri_templates.py::test_empty_path_component[//begin]",
"tests/test_uri_templates.py::test_converter_custom[/{food:spam}-/something-expected0]",
"tests/test_uri_templates.py::test_int_converter[/{id:int(3)}]",
"tests/test_uri_templates.py::test_empty_path_component[//]",
"tests/test_response.py::test_response_set_content_type_set",
"tests/test_response.py::test_response_set_content_type_not_set",
"tests/test_headers.py::TestHeaders::test_add_link_single",
"tests/test_headers.py::TestHeaders::test_add_link_with_anchor",
"tests/test_headers.py::TestHeaders::test_content_type_no_body",
"tests/test_headers.py::TestHeaders::test_response_set_and_get_header",
"tests/test_headers.py::TestHeaders::test_add_link_with_title",
"tests/test_headers.py::TestHeaders::test_no_content_type[204",
"tests/test_headers.py::TestHeaders::test_headers_as_list",
"tests/test_headers.py::TestHeaders::test_vary_header[vary2-accept-encoding,",
"tests/test_headers.py::TestHeaders::test_add_link_with_hreflang_multi",
"tests/test_headers.py::TestHeaders::test_default_value",
"tests/test_headers.py::TestHeaders::test_add_link_with_type_hint",
"tests/test_headers.py::TestHeaders::test_no_content_length[304",
"tests/test_headers.py::TestHeaders::test_vary_header[vary1-accept-encoding,",
"tests/test_headers.py::TestHeaders::test_content_length_options",
"tests/test_headers.py::TestHeaders::test_custom_content_type",
"tests/test_headers.py::TestHeaders::test_response_header_helpers_on_get",
"tests/test_headers.py::TestHeaders::test_override_default_media_type[text/plain;",
"tests/test_headers.py::TestHeaders::test_no_content_length[204",
"tests/test_headers.py::TestHeaders::test_add_link_multiple",
"tests/test_headers.py::TestHeaders::test_override_default_media_type[text/plain-Hello",
"tests/test_headers.py::TestHeaders::test_passthrough_request_headers",
"tests/test_headers.py::TestHeaders::test_override_default_media_type_missing_encoding",
"tests/test_headers.py::TestHeaders::test_default_media_type",
"tests/test_headers.py::TestHeaders::test_vary_star",
"tests/test_headers.py::TestHeaders::test_unicode_headers_convertable",
"tests/test_headers.py::TestHeaders::test_unicode_location_headers",
"tests/test_headers.py::TestHeaders::test_add_link_with_hreflang",
"tests/test_headers.py::TestHeaders::test_add_link_with_title_star",
"tests/test_headers.py::TestHeaders::test_required_header",
"tests/test_headers.py::TestHeaders::test_add_link_complex",
"tests/test_headers.py::TestHeaders::test_content_header_missing",
"tests/test_headers.py::TestHeaders::test_no_content_type[304",
"tests/test_headers.py::TestHeaders::test_vary_header[vary0-accept-encoding]",
"tests/test_headers.py::TestHeaders::test_content_length",
"tests/test_headers.py::TestHeaders::test_response_append_header",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_default_on_options",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_methods_not_allowed_simple",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_bogus_method",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_report",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_post_not_allowed",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_on_options",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_method_not_allowed_with_param",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_put",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_get",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_misc",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_methods_not_allowed_complex",
"tests/test_middleware.py::TestResourceMiddleware::test_can_access_resource_params",
"tests/test_middleware.py::TestErrorHandling::test_error_composed_before_resp_middleware_called",
"tests/test_middleware.py::TestErrorHandling::test_http_status_raised_from_error_handler",
"tests/test_middleware.py::TestRequestTimeMiddleware::test_skip_process_resource",
"tests/test_middleware.py::TestRequestTimeMiddleware::test_log_get_request",
"tests/test_middleware.py::TestRequestTimeMiddleware::test_response_middleware_raises_exception",
"tests/test_middleware.py::TestRequestTimeMiddleware::test_add_invalid_middleware",
"tests/test_middleware.py::TestTransactionIdMiddleware::test_generate_trans_id_with_request",
"tests/test_middleware.py::TestRemoveBasePathMiddleware::test_base_path_is_removed_before_routing",
"tests/test_middleware.py::TestSeveralMiddlewares::test_generate_trans_id_and_time_with_request",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_mw_executed_when_exception_in_req",
"tests/test_middleware.py::TestSeveralMiddlewares::test_middleware_execution_order",
"tests/test_middleware.py::TestSeveralMiddlewares::test_multiple_reponse_mw_throw_exception",
"tests/test_middleware.py::TestSeveralMiddlewares::test_inner_mw_throw_exception",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_independent_mw_executed_when_exception_in_req",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_mw_executed_when_exception_in_resp",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_independent_mw_executed_when_exception_in_rsrc",
"tests/test_middleware.py::TestSeveralMiddlewares::test_inner_mw_with_ex_handler_throw_exception",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_mw_executed_when_exception_in_rsrc",
"tests/test_middleware.py::TestSeveralMiddlewares::test_outer_mw_with_ex_handler_throw_exception",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_independent_mw_executed_when_exception_in_resp",
"tests/test_middleware.py::TestSeveralMiddlewares::test_legacy_middleware_called_with_correct_args",
"tests/test_middleware.py::TestSeveralMiddlewares::test_independent_middleware_execution_order",
"tests/test_wsgi.py::TestWSGIServer::test_get",
"tests/test_wsgi.py::TestWSGIServer::test_post_read_bounded_stream",
"tests/test_wsgi.py::TestWSGIServer::test_head_405",
"tests/test_wsgi.py::TestWSGIServer::test_post",
"tests/test_wsgi.py::TestWSGIServer::test_post_invalid_content_length",
"tests/test_wsgi.py::TestWSGIServer::test_post_read_bounded_stream_no_body",
"tests/test_wsgi.py::TestWSGIServer::test_put"
]
| [
"tests/test_response_media.py::test_default_media_type[media_type1]"
]
| []
| []
| Apache License 2.0 | 1,932 | [
"docs/api/hooks.rst",
"falcon/hooks.py"
]
| [
"docs/api/hooks.rst",
"falcon/hooks.py"
]
|
lbl-srg__BuildingsPy-183 | ad2f3e7ffb0a01117e5f09ac498a87b5c02ca158 | 2017-12-05 15:34:13 | 923b1087e255f7f35224aa7c1653abf9c038f849 | diff --git a/buildingspy/development/error_dictionary.py b/buildingspy/development/error_dictionary.py
index 2304f63..4a3c8d3 100644
--- a/buildingspy/development/error_dictionary.py
+++ b/buildingspy/development/error_dictionary.py
@@ -138,6 +138,13 @@ class ErrorDictionary(object):
'model_message': "\"inner Modelica.StateGraph.StateGraphRoot\" is missing in '{}'.\n",
'summary_message': "Number of models with missing StateGraphRoot : {}\n"}
+ self._error_dict["mismatched displayUnits"] = {
+ 'tool_message': "Mismatched displayUnit",
+ 'counter': 0,
+ 'buildingspy_var': "iMisDisUni",
+ 'model_message': "\"Mismatched displayUnit in '{}'.\n",
+ 'summary_message': "Number of models with mismatched displayUnit : {}\n"}
+
def get_dictionary(self):
""" Return the dictionary with all error data
"""
diff --git a/buildingspy/development/refactor.py b/buildingspy/development/refactor.py
index ba4d363..2c064b0 100644
--- a/buildingspy/development/refactor.py
+++ b/buildingspy/development/refactor.py
@@ -637,6 +637,10 @@ def move_class(source, target):
"""
##############################################################
+ # First, remove empty subdirectories
+ _remove_empty_folders(source.replace(".", os.path.sep),
+ removeRoot=False)
+ ##############################################################
# Check if it is a directory with a package.mo file
if os.path.isdir(source.replace(".", os.path.sep)):
_move_class_directory(source, target)
@@ -665,6 +669,26 @@ def move_class(source, target):
_update_all_references(source, target)
+def _remove_empty_folders(path, removeRoot=True):
+ ''' Remove empty directories
+ '''
+ if not os.path.isdir(path):
+ return
+
+ # remove empty subfolders
+ files = os.listdir(path)
+ if len(files):
+ for f in files:
+ fullpath = os.path.join(path, f)
+ if os.path.isdir(fullpath):
+ _remove_empty_folders(fullpath)
+
+ # if folder empty, delete it
+ files = os.listdir(path)
+ if len(files) == 0 and removeRoot:
+ os.rmdir(path)
+
+
def _update_all_references(source, target):
""" Updates all references in `.mo` and `.mos` files.
diff --git a/buildingspy/fmi/__init__.py b/buildingspy/fmi/__init__.py
index 4efbae7..7bb1d9d 100644
--- a/buildingspy/fmi/__init__.py
+++ b/buildingspy/fmi/__init__.py
@@ -58,8 +58,6 @@ def get_dependencies(fmu_file_name):
]
},
"InitialUnknowns": {
- "CPUtime": [],
- "EventCounter": [],
"der(x)": [
"u"
],
@@ -72,8 +70,6 @@ def get_dependencies(fmu_file_name):
]
},
"Outputs": {
- "CPUtime": [],
- "EventCounter": [],
"y1": [
"x"
],
@@ -120,8 +116,13 @@ def get_dependencies(fmu_file_name):
#this_root = outputs
for child in children:
variable = variable_names[int(child.attrib['index'])]
- dependencies[typ][variable] = []
- for ind_var in child.attrib['dependencies'].split(' '):
- if ind_var.strip() != "": # If variables depend on nothing, there will be an empty string
- dependencies[typ][variable].append(variable_names[int(ind_var)])
+ # Exclude CPUtime and EventCounter, which are written
+ # depending on the Dymola 2018FD01 configuration.
+ if variable not in ["CPUtime", "EventCounter"]:
+ dependencies[typ][variable] = []
+ for ind_var in child.attrib['dependencies'].split(' '):
+ # If variables depend on nothing, there will be an empty string, these
+ # are therefore excluded.
+ if ind_var.strip() != "":
+ dependencies[typ][variable].append(variable_names[int(ind_var)])
return dependencies
| In reference results, exclude CPUtime and EventCounter in FMI dependencies
These are only written based on the configuration of Dymola. As they are only output of the solver, they should be excluded from the reference results. | lbl-srg/BuildingsPy | diff --git a/buildingspy/tests/test_development_error_dictionary.py b/buildingspy/tests/test_development_error_dictionary.py
index 393e2cc..ee9d12b 100644
--- a/buildingspy/tests/test_development_error_dictionary.py
+++ b/buildingspy/tests/test_development_error_dictionary.py
@@ -39,7 +39,8 @@ class Test_development_error_dictionary(unittest.TestCase):
'type inconsistent definition equations',
'unspecified initial conditions',
'unused connector',
- 'stateGraphRoot missing'])
+ 'stateGraphRoot missing',
+ 'mismatched displayUnits'])
self.assertEqual(len(k), len(k_expected), "Wrong number of keys.")
for i in range(len(k)):
@@ -63,7 +64,8 @@ class Test_development_error_dictionary(unittest.TestCase):
'Type inconsistent definition equation',
'Dymola has selected default initial condition',
'Warning: The following connector variables are not used in the model',
- "A \\\"stateGraphRoot\\\" component was automatically introduced."])
+ "A \\\"stateGraphRoot\\\" component was automatically introduced.",
+ "Mismatched displayUnit"])
self.assertEqual(len(k), len(k_expected), "Wrong number of tool messages.")
for i in range(len(k)):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 3
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"autopep8",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc tidy"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
autopep8==2.0.4
-e git+https://github.com/lbl-srg/BuildingsPy.git@ad2f3e7ffb0a01117e5f09ac498a87b5c02ca158#egg=buildingspy
certifi==2021.5.30
future==1.0.0
gitdb==4.0.9
GitPython==3.1.18
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.0.3
MarkupSafe==2.0.1
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycodestyle==2.10.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytidylib==0.3.2
smmap==5.0.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: BuildingsPy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- autopep8==2.0.4
- future==1.0.0
- gitdb==4.0.9
- gitpython==3.1.18
- jinja2==3.0.3
- markupsafe==2.0.1
- pycodestyle==2.10.0
- pytidylib==0.3.2
- smmap==5.0.0
- tomli==1.2.3
prefix: /opt/conda/envs/BuildingsPy
| [
"buildingspy/tests/test_development_error_dictionary.py::Test_development_error_dictionary::test_keys",
"buildingspy/tests/test_development_error_dictionary.py::Test_development_error_dictionary::test_tool_messages"
]
| []
| []
| []
| null | 1,934 | [
"buildingspy/development/error_dictionary.py",
"buildingspy/fmi/__init__.py",
"buildingspy/development/refactor.py"
]
| [
"buildingspy/development/error_dictionary.py",
"buildingspy/fmi/__init__.py",
"buildingspy/development/refactor.py"
]
|
|
dpkp__kafka-python-1312 | 141b6b29609f9594ad9d3d3302a0123d1b831261 | 2017-12-06 00:48:48 | 618c5051493693c1305aa9f08e8a0583d5fcf0e3 | diff --git a/kafka/client_async.py b/kafka/client_async.py
index e36d78e..1350503 100644
--- a/kafka/client_async.py
+++ b/kafka/client_async.py
@@ -947,7 +947,7 @@ class DelayedTaskQueue(object):
"""Number of seconds until next task is ready."""
self._drop_removed()
if not self._tasks:
- return 9999999999
+ return float('inf')
else:
return max(self._tasks[0][0] - time.time(), 0)
diff --git a/kafka/conn.py b/kafka/conn.py
index 51a007c..2926e2f 100644
--- a/kafka/conn.py
+++ b/kafka/conn.py
@@ -251,67 +251,42 @@ class BrokerConnection(object):
self._sasl_auth_future = None
self.last_attempt = 0
self._gai = None
- self._gai_index = 0
self._sensors = None
if self.config['metrics']:
self._sensors = BrokerConnectionMetrics(self.config['metrics'],
self.config['metric_group_prefix'],
self.node_id)
+ def _next_afi_host_port(self):
+ if not self._gai:
+ self._gai = dns_lookup(self._init_host, self._init_port, self._init_afi)
+ if not self._gai:
+ log.error('DNS lookup failed for %s:%i (%s)',
+ self._init_host, self._init_port, self._init_afi)
+ return
+
+ afi, _, __, ___, sockaddr = self._gai.pop(0)
+ host, port = sockaddr[:2]
+ return (afi, host, port)
+
def connect(self):
"""Attempt to connect and return ConnectionState"""
if self.state is ConnectionStates.DISCONNECTED:
- log.debug('%s: creating new socket', self)
- # if self.afi is set to AF_UNSPEC, then we need to do a name
- # resolution and try all available address families
- if self._init_afi == socket.AF_UNSPEC:
- if self._gai is None:
- # XXX: all DNS functions in Python are blocking. If we really
- # want to be non-blocking here, we need to use a 3rd-party
- # library like python-adns, or move resolution onto its
- # own thread. This will be subject to the default libc
- # name resolution timeout (5s on most Linux boxes)
- try:
- self._gai = socket.getaddrinfo(self._init_host,
- self._init_port,
- socket.AF_UNSPEC,
- socket.SOCK_STREAM)
- except socket.gaierror as ex:
- log.warning('DNS lookup failed for %s:%d,'
- ' exception was %s. Is your'
- ' advertised.listeners (called'
- ' advertised.host.name before Kafka 9)'
- ' correct and resolvable?',
- self._init_host, self._init_port, ex)
- self._gai = []
- self._gai_index = 0
- else:
- # if self._gai already exists, then we should try the next
- # name
- self._gai_index += 1
- while True:
- if self._gai_index >= len(self._gai):
- error = 'Unable to connect to any of the names for {0}:{1}'.format(
- self._init_host, self._init_port)
- log.error(error)
- self.close(Errors.ConnectionError(error))
- return
- afi, _, __, ___, sockaddr = self._gai[self._gai_index]
- if afi not in (socket.AF_INET, socket.AF_INET6):
- self._gai_index += 1
- continue
- break
- self.host, self.port = sockaddr[:2]
- self._sock = socket.socket(afi, socket.SOCK_STREAM)
+ self.last_attempt = time.time()
+ next_lookup = self._next_afi_host_port()
+ if not next_lookup:
+ self.close(Errors.ConnectionError('DNS failure'))
+ return
else:
- self._sock = socket.socket(self._init_afi, socket.SOCK_STREAM)
+ log.debug('%s: creating new socket', self)
+ self.afi, self.host, self.port = next_lookup
+ self._sock = socket.socket(self.afi, socket.SOCK_STREAM)
for option in self.config['socket_options']:
log.debug('%s: setting socket option %s', self, option)
self._sock.setsockopt(*option)
self._sock.setblocking(False)
- self.last_attempt = time.time()
self.state = ConnectionStates.CONNECTING
if self.config['security_protocol'] in ('SSL', 'SASL_SSL'):
self._wrap_ssl()
@@ -328,11 +303,6 @@ class BrokerConnection(object):
ret = None
try:
ret = self._sock.connect_ex((self.host, self.port))
- # if we got here through a host lookup, we've found a host,port,af tuple
- # that works save it so we don't do a GAI lookup again
- if self._gai is not None:
- self.afi = self._sock.family
- self._gai = None
except socket.error as err:
ret = err.errno
@@ -607,7 +577,7 @@ class BrokerConnection(object):
elif self.connecting():
return 0
else:
- return 999999999
+ return float('inf')
def connected(self):
"""Return True iff socket is connected."""
@@ -645,23 +615,15 @@ class BrokerConnection(object):
will be failed with this exception.
Default: kafka.errors.ConnectionError.
"""
- if self.state is ConnectionStates.DISCONNECTED:
- if error is not None:
- if sys.version_info >= (3, 2):
- log.warning('%s: close() called on disconnected connection with error: %s', self, error, stack_info=True)
- else:
- log.warning('%s: close() called on disconnected connection with error: %s', self, error)
- return
-
log.info('%s: Closing connection. %s', self, error or '')
- self.state = ConnectionStates.DISCONNECTING
- self.config['state_change_callback'](self)
+ if self.state is not ConnectionStates.DISCONNECTED:
+ self.state = ConnectionStates.DISCONNECTING
+ self.config['state_change_callback'](self)
self._update_reconnect_backoff()
if self._sock:
self._sock.close()
self._sock = None
self.state = ConnectionStates.DISCONNECTED
- self.last_attempt = time.time()
self._sasl_auth_future = None
self._protocol = KafkaProtocol(
client_id=self.config['client_id'],
@@ -747,13 +709,12 @@ class BrokerConnection(object):
return ()
# augment respones w/ correlation_id, future, and timestamp
- for i in range(len(responses)):
+ for i, response in enumerate(responses):
(correlation_id, future, timestamp) = self.in_flight_requests.popleft()
latency_ms = (time.time() - timestamp) * 1000
if self._sensors:
self._sensors.request_time.record(latency_ms)
- response = responses[i]
log.debug('%s Response %d (%s ms): %s', self, correlation_id, latency_ms, response)
responses[i] = (response, future)
@@ -1171,3 +1132,29 @@ def collect_hosts(hosts, randomize=True):
shuffle(result)
return result
+
+
+def is_inet_4_or_6(gai):
+ """Given a getaddrinfo struct, return True iff ipv4 or ipv6"""
+ return gai[0] in (socket.AF_INET, socket.AF_INET6)
+
+
+def dns_lookup(host, port, afi=socket.AF_UNSPEC):
+ """Returns a list of getaddrinfo structs, optionally filtered to an afi (ipv4 / ipv6)"""
+ # XXX: all DNS functions in Python are blocking. If we really
+ # want to be non-blocking here, we need to use a 3rd-party
+ # library like python-adns, or move resolution onto its
+ # own thread. This will be subject to the default libc
+ # name resolution timeout (5s on most Linux boxes)
+ try:
+ return list(filter(is_inet_4_or_6,
+ socket.getaddrinfo(host, port, afi,
+ socket.SOCK_STREAM)))
+ except socket.gaierror as ex:
+ log.warning('DNS lookup failed for %s:%d,'
+ ' exception was %s. Is your'
+ ' advertised.listeners (called'
+ ' advertised.host.name before Kafka 9)'
+ ' correct and resolvable?',
+ host, port, ex)
+ return []
diff --git a/kafka/producer/kafka.py b/kafka/producer/kafka.py
index 646e773..5d32b13 100644
--- a/kafka/producer/kafka.py
+++ b/kafka/producer/kafka.py
@@ -437,7 +437,7 @@ class KafkaProducer(object):
return
if timeout is None:
# threading.TIMEOUT_MAX is available in Python3.3+
- timeout = getattr(threading, 'TIMEOUT_MAX', 999999999)
+ timeout = getattr(threading, 'TIMEOUT_MAX', float('inf'))
if getattr(threading, 'TIMEOUT_MAX', False):
assert 0 <= timeout <= getattr(threading, 'TIMEOUT_MAX')
else:
diff --git a/kafka/producer/sender.py b/kafka/producer/sender.py
index ffc67f8..48ad06e 100644
--- a/kafka/producer/sender.py
+++ b/kafka/producer/sender.py
@@ -103,7 +103,7 @@ class Sender(threading.Thread):
self._metadata.request_update()
# remove any nodes we aren't ready to send to
- not_ready_timeout = 999999999
+ not_ready_timeout = float('inf')
for node in list(ready_nodes):
if not self._client.ready(node):
log.debug('Node %s not ready; delaying produce of accumulated batch', node)
diff --git a/kafka/util.py b/kafka/util.py
index de8f228..385fd56 100644
--- a/kafka/util.py
+++ b/kafka/util.py
@@ -12,14 +12,21 @@ from kafka.vendor import six
from kafka.errors import BufferUnderflowError
-def crc32(data):
- crc = binascii.crc32(data)
- # py2 and py3 behave a little differently
- # CRC is encoded as a signed int in kafka protocol
- # so we'll convert the py3 unsigned result to signed
- if six.PY3 and crc >= 2**31:
- crc -= 2**32
- return crc
+if six.PY3:
+ MAX_INT = 2 ** 31
+ TO_SIGNED = 2 ** 32
+
+ def crc32(data):
+ crc = binascii.crc32(data)
+ # py2 and py3 behave a little differently
+ # CRC is encoded as a signed int in kafka protocol
+ # so we'll convert the py3 unsigned result to signed
+ if crc >= MAX_INT:
+ crc -= TO_SIGNED
+ return crc
+else:
+ def crc32(data):
+ return binascii.crc32(data)
def write_int_string(s):
| KafkaConsumer stuck in infinite loop on connection error
It seems to be stuck in this loop https://github.com/dpkp/kafka-python/blob/34dc9dd2fe6b47f4542c5a131e0e0cbc1b00ed80/kafka/conn.py#L294
The consumer filled up ~1TB logs over the course of 3 days, but did not throw an exception. Example logs:
```kafka.conn ERROR Unable to connect to any of the names for kafka-4-broker.example.com:9092
kafka.cluster INFO Group coordinator for my-group is BrokerMetadata(nodeId=102, host=u'kafka-2-broker.example.com', port=9092, rack=None)
kafka.cluster INFO Group coordinator for my-group is BrokerMetadata(nodeId=102, host=u'kafka-2-broker.example.com', port=9092, rack=None)
kafka.conn ERROR Unable to connect to any of the names for kafka-4-broker.example.com:9092
kafka.conn WARNING <BrokerConnection node_id=104 host=kafka-4-broker.example.com/kafka-4-broker.example.com port=9092>: close() called on disconnected connection with error: ConnectionError: Unable to connect to any of the names for kafka-4-broker.example.com:9092
kafka.cluster INFO Group coordinator for my-group is BrokerMetadata(nodeId=102, host=u'kafka-2-broker.example.com', port=9092, rack=None)
kafka.cluster INFO Group coordinator for my-group is BrokerMetadata(nodeId=102, host=u'kafka-2-broker.example.com', port=9092, rack=None)
kafka.coordinator INFO Discovered coordinator 102 for group my-group
kafka.conn ERROR Unable to connect to any of the names for kafka-1-broker.example.com:9092
kafka.coordinator INFO Discovered coordinator 102 for group my-group
kafka.cluster INFO Group coordinator for my-group is BrokerMetadata(nodeId=102, host=u'kafka-2-broker.example.com', port=9092, rack=None)
kafka.conn ERROR Unable to connect to any of the names for kafka-2-broker.example.com:9092
kafka.conn WARNING <BrokerConnection node_id=104 host=kafka-4-broker.example.com/kafka-4-broker.example.com port=9092>: close() called on disconnected connection with error: ConnectionError: Unable to connect to any of the names for kafka-4-broker.example.com:9092
kafka.cluster INFO Group coordinator for my-group is BrokerMetadata(nodeId=102, host=u'kafka-2-broker.example.com', port=9092, rack=None)
kafka.conn WARNING <BrokerConnection node_id=101 host=kafka-1-broker.example.com/kafka-1-broker.example.com port=9092>: close() called on disconnected connection with error: ConnectionError: Unable to connect to any of the names for kafka-1-broker.example.com:9092
kafka.conn ERROR Unable to connect to any of the names for kafka-2-broker.example.com:9092
kafka.conn ERROR Unable to connect to any of the names for kafka-2-broker.example.com:9092
kafka.coordinator INFO Discovered coordinator 102 for group my-group
kafka.conn ERROR Unable to connect to any of the names for kafka-3-broker.example.com:9092
kafka.coordinator INFO Discovered coordinator 102 for group my-group
kafka.conn WARNING <BrokerConnection node_id=102 host=kafka-2-broker.example.com/kafka-2-broker.example.com port=9092>: close() called on disconnected connection with error: ConnectionError: Unable to connect to any of the names for kafka-2-broker.example.com:9092
kafka.conn WARNING <BrokerConnection node_id=103 host=kafka-3-broker.example.com/kafka-3-broker.example.com port=9092>: close() called on disconnected connection with error: ConnectionError: Unable to connect to any of the names for kafka-3-broker.example.com:9092
kafka.coordinator INFO Discovered coordinator 102 for group my-group
kafka.coordinator INFO Discovered coordinator 102 for group my-group
kafka.conn WARNING <BrokerConnection node_id=102 host=kafka-2-broker.example.com/kafka-2-broker.example.com port=9092>: close() called on disconnected connection with error: ConnectionError: Unable to connect to any of the names for kafka-2-broker.example.com:9092
``` | dpkp/kafka-python | diff --git a/test/test_conn.py b/test/test_conn.py
index 1621e60..ef7925a 100644
--- a/test/test_conn.py
+++ b/test/test_conn.py
@@ -267,3 +267,28 @@ def test_lookup_on_connect():
m.assert_called_once_with(hostname, port, 0, 1)
conn.close()
assert conn.host == ip2
+
+
+def test_relookup_on_failure():
+ hostname = 'example.org'
+ port = 9092
+ conn = BrokerConnection(hostname, port, socket.AF_UNSPEC)
+ assert conn.host == conn.hostname == hostname
+ mock_return1 = []
+ with mock.patch("socket.getaddrinfo", return_value=mock_return1) as m:
+ last_attempt = conn.last_attempt
+ conn.connect()
+ m.assert_called_once_with(hostname, port, 0, 1)
+ assert conn.disconnected()
+ assert conn.last_attempt > last_attempt
+
+ ip2 = '127.0.0.2'
+ mock_return2 = [
+ (2, 2, 17, '', (ip2, 9092)),
+ ]
+
+ with mock.patch("socket.getaddrinfo", return_value=mock_return2) as m:
+ conn.connect()
+ m.assert_called_once_with(hostname, port, 0, 1)
+ conn.close()
+ assert conn.host == ip2
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 5
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-catchlog pytest-pylint pytest-sugar pytest-mock mock python-snappy lz4tools xxhash",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc libsnappy-dev"
],
"python": "3.6",
"reqs_path": [
"docs/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
astroid==2.11.7
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
cramjam==2.5.0
dill==0.3.4
docutils==0.18.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
isort==5.10.1
Jinja2==3.0.3
-e git+https://github.com/dpkp/kafka-python.git@141b6b29609f9594ad9d3d3302a0123d1b831261#egg=kafka_python
lazy-object-proxy==1.7.1
lz4tools==1.3.1.2
MarkupSafe==2.0.1
mccabe==0.7.0
mock==5.2.0
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
pockets==0.9.1
py==1.11.0
Pygments==2.14.0
pylint==2.13.9
pyparsing==3.1.4
pytest==7.0.1
pytest-catchlog==1.2.2
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-pylint==0.18.0
pytest-sugar==0.9.6
python-snappy==0.7.3
pytz==2025.2
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-napoleon==0.7
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
termcolor==1.1.0
toml==0.10.2
tomli==1.2.3
typed-ast==1.5.5
typing_extensions==4.1.1
urllib3==1.26.20
wrapt==1.16.0
xxhash==3.2.0
zipp==3.6.0
| name: kafka-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- astroid==2.11.7
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- cramjam==2.5.0
- dill==0.3.4
- docutils==0.18.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isort==5.10.1
- jinja2==3.0.3
- lazy-object-proxy==1.7.1
- lz4tools==1.3.1.2
- markupsafe==2.0.1
- mccabe==0.7.0
- mock==5.2.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- pockets==0.9.1
- py==1.11.0
- pygments==2.14.0
- pylint==2.13.9
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-catchlog==1.2.2
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-pylint==0.18.0
- pytest-sugar==0.9.6
- python-snappy==0.7.3
- pytz==2025.2
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-napoleon==0.7
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- termcolor==1.1.0
- toml==0.10.2
- tomli==1.2.3
- typed-ast==1.5.5
- typing-extensions==4.1.1
- urllib3==1.26.20
- wrapt==1.16.0
- xxhash==3.2.0
- zipp==3.6.0
prefix: /opt/conda/envs/kafka-python
| [
"test/test_conn.py::test_relookup_on_failure"
]
| []
| [
"test/test_conn.py::test_connect[states0]",
"test/test_conn.py::test_connect[states1]",
"test/test_conn.py::test_connect[states2]",
"test/test_conn.py::test_connect[states3]",
"test/test_conn.py::test_connect[states4]",
"test/test_conn.py::test_connect_timeout",
"test/test_conn.py::test_blacked_out",
"test/test_conn.py::test_connected",
"test/test_conn.py::test_connecting",
"test/test_conn.py::test_send_disconnected",
"test/test_conn.py::test_send_connecting",
"test/test_conn.py::test_send_max_ifr",
"test/test_conn.py::test_send_no_response",
"test/test_conn.py::test_send_response",
"test/test_conn.py::test_send_error",
"test/test_conn.py::test_can_send_more",
"test/test_conn.py::test_recv_disconnected",
"test/test_conn.py::test_recv",
"test/test_conn.py::test_close",
"test/test_conn.py::test_collect_hosts__happy_path",
"test/test_conn.py::test_collect_hosts__ipv6",
"test/test_conn.py::test_collect_hosts__string_list",
"test/test_conn.py::test_collect_hosts__with_spaces",
"test/test_conn.py::test_lookup_on_connect"
]
| []
| Apache License 2.0 | 1,935 | [
"kafka/client_async.py",
"kafka/producer/kafka.py",
"kafka/producer/sender.py",
"kafka/util.py",
"kafka/conn.py"
]
| [
"kafka/client_async.py",
"kafka/producer/kafka.py",
"kafka/producer/sender.py",
"kafka/util.py",
"kafka/conn.py"
]
|
|
conan-io__conan-2118 | ad94635b286e94e2468d2add00ba9081ad4be895 | 2017-12-06 12:18:05 | ad94635b286e94e2468d2add00ba9081ad4be895 | codecov-io: # [Codecov](https://codecov.io/gh/conan-io/conan/pull/2118?src=pr&el=h1) Report
> Merging [#2118](https://codecov.io/gh/conan-io/conan/pull/2118?src=pr&el=desc) into [develop](https://codecov.io/gh/conan-io/conan/commit/ad94635b286e94e2468d2add00ba9081ad4be895?src=pr&el=desc) will **decrease** coverage by `0.01%`.
> The diff coverage is `77.27%`.
```diff
@@ Coverage Diff @@
## develop #2118 +/- ##
===========================================
- Coverage 95.88% 95.87% -0.02%
===========================================
Files 350 350
Lines 27833 27837 +4
===========================================
- Hits 26689 26688 -1
- Misses 1144 1149 +5
```
| diff --git a/conans/client/cmd/new.py b/conans/client/cmd/new.py
index 20e3f01a7..9717ec679 100644
--- a/conans/client/cmd/new.py
+++ b/conans/client/cmd/new.py
@@ -138,7 +138,7 @@ class {package_name}TestConan(ConanFile):
def build(self):
cmake = CMake(self)
# Current dir is "test_package/build/<build_id>" and CMakeLists.txt is in "test_package"
- cmake.configure(source_dir=self.conanfile_directory, build_dir="./")
+ cmake.configure()
cmake.build()
def imports(self):
diff --git a/conans/client/command.py b/conans/client/command.py
index 6e3d0ee50..ebb6fc319 100644
--- a/conans/client/command.py
+++ b/conans/client/command.py
@@ -172,8 +172,6 @@ class Command(object):
parser.add_argument('--keep-source', '-k', default=False, action='store_true',
help='Optional. Do not remove the source folder in local cache. '
'Use for testing purposes only')
- parser.add_argument("--werror", action='store_true', default=False,
- help='Error instead of warnings for graph inconsistencies')
_add_manifests_arguments(parser)
_add_common_install_arguments(parser, build_help=_help_build_policies)
@@ -187,7 +185,7 @@ class Command(object):
args.build, args.keep_source, args.verify, args.manifests,
args.manifests_interactive, args.remote, args.update,
conan_file_path=args.cwd, name=name, version=version, user=user,
- channel=channel, filename=args.file, werror=args.werror)
+ channel=channel, filename=args.file)
def download(self, *args):
"""Downloads recipe and binaries to the local cache, without using settings.
@@ -229,8 +227,6 @@ class Command(object):
parser.add_argument("--file", "-f", help="specify conanfile filename", action=OnceArgument)
parser.add_argument("--generator", "-g", nargs=1, action=Extender,
help='Generators to use')
- parser.add_argument("--werror", action='store_true', default=False,
- help='Error instead of warnings for graph inconsistencies')
parser.add_argument("--install-folder", "--install_folder", "-if", action=OnceArgument,
help='Use this directory as the directory where to put the generator'
'files, conaninfo/conanbuildinfo.txt etc.')
@@ -250,7 +246,7 @@ class Command(object):
return self._conan.install(path=args.path,
settings=args.settings, options=args.options,
env=args.env,
- remote=args.remote, werror=args.werror,
+ remote=args.remote,
verify=args.verify, manifests=args.manifests,
manifests_interactive=args.manifests_interactive,
build=args.build, profile_name=args.profile,
@@ -261,7 +257,7 @@ class Command(object):
return self._conan.install_reference(reference, settings=args.settings,
options=args.options,
env=args.env,
- remote=args.remote, werror=args.werror,
+ remote=args.remote,
verify=args.verify, manifests=args.manifests,
manifests_interactive=args.manifests_interactive,
build=args.build, profile_name=args.profile,
@@ -337,6 +333,12 @@ class Command(object):
parser.add_argument("--graph", "-g", action=OnceArgument,
help='Creates file with project dependencies graph. It will generate '
'a DOT or HTML file depending on the filename extension')
+ parser.add_argument("--install-folder", "-if", action=OnceArgument,
+ help="local folder containing the conaninfo.txt and conanbuildinfo.txt "
+ "files (from a previous conan install execution). Defaulted to "
+ "current folder, unless --profile, -s or -o is specified. If you "
+ "specify both install-folder and any setting/option "
+ "it will raise an error.")
build_help = 'given a build policy (same install command "build" parameter), return an ' \
'ordered list of ' \
'packages that would be built from sources in install command (simulation)'
@@ -344,6 +346,10 @@ class Command(object):
_add_common_install_arguments(parser, build_help=build_help)
args = parser.parse_args(*args)
+ if args.install_folder and (args.profile or args.settings or args.options or args.env):
+ raise ArgumentError(None,
+ "--install-folder cannot be used together with -s, -o, -e or -pr")
+
# BUILD ORDER ONLY
if args.build_order:
ret = self._conan.info_build_order(args.reference, settings=args.settings,
@@ -352,7 +358,8 @@ class Command(object):
profile_name=args.profile,
filename=args.file, remote=args.remote,
build_order=args.build_order,
- check_updates=args.update)
+ check_updates=args.update,
+ install_folder=args.install_folder)
if args.json:
json_arg = True if args.json == "1" else args.json
self._outputer.json_build_order(ret, json_arg, os.getcwd())
@@ -367,7 +374,8 @@ class Command(object):
profile_name=args.profile,
filename=args.file,
remote=args.remote,
- check_updates=args.update)
+ check_updates=args.update,
+ install_folder=args.install_folder)
self._outputer.nodes_to_build(nodes)
# INFO ABOUT DEPS OF CURRENT PROJECT OR REFERENCE
else:
@@ -375,7 +383,8 @@ class Command(object):
settings=args.settings,
options=args.options, env=args.env,
profile_name=args.profile, update=args.update,
- filename=args.file)
+ filename=args.file,
+ install_folder=args.install_folder)
deps_graph, graph_updates_info, project_reference = data
only = args.only
if args.only == ["None"]:
diff --git a/conans/client/conan_api.py b/conans/client/conan_api.py
index 84516984a..82c0556e8 100644
--- a/conans/client/conan_api.py
+++ b/conans/client/conan_api.py
@@ -199,12 +199,11 @@ class ConanAPIV1(object):
keep_source=False, verify=None,
manifests=None, manifests_interactive=None,
remote=None, update=False, conan_file_path=None, filename=None,
- user=None, channel=None, name=None, version=None, werror=False):
+ user=None, channel=None, name=None, version=None):
settings = settings or []
options = options or []
env = env or []
- self._user_io.out.werror_active = werror
cwd = os.getcwd()
conanfile_folder = self._abs_relative_to(conan_file_path, cwd, default=cwd)
@@ -272,14 +271,6 @@ class ConanAPIV1(object):
raise ConanException("The specified --install-folder doesn't contain '%s' and '%s' "
"files" % (CONANINFO, BUILD_INFO))
- @staticmethod
- def _validate_one_settings_source(install_folder, profile_name, settings, options, env):
- if install_folder and existing_info_files(install_folder) and \
- (profile_name or settings or options or env):
- raise ConanException("%s and %s are found, at '%s' folder, so specifying profile, "
- "settings, options or env is not allowed" % (CONANINFO, BUILD_INFO,
- install_folder))
-
@api_method
def export_pkg(self, path, name, channel, source_folder=None, build_folder=None,
install_folder=None, profile_name=None, settings=None, options=None,
@@ -299,7 +290,12 @@ class ConanAPIV1(object):
source_folder = self._abs_relative_to(source_folder, cwd, default=build_folder)
# Checks that no both settings and info files are specified
- self._validate_one_settings_source(install_folder, profile_name, settings, options, env)
+ if install_folder and existing_info_files(install_folder) and \
+ (profile_name or settings or options or env):
+ raise ConanException("%s and %s are found, at '%s' folder, so specifying profile, "
+ "settings, options or env is not allowed" % (CONANINFO, BUILD_INFO,
+ install_folder))
+
infos_present = existing_info_files(install_folder)
if not infos_present:
@@ -332,11 +328,10 @@ class ConanAPIV1(object):
@api_method
def install_reference(self, reference, settings=None, options=None, env=None,
- remote=None, werror=False, verify=None, manifests=None,
+ remote=None, verify=None, manifests=None,
manifests_interactive=None, build=None, profile_name=None,
update=False, generators=None, install_folder=None):
- self._user_io.out.werror_active = werror
cwd = os.getcwd()
install_folder = self._abs_relative_to(install_folder, cwd, default=cwd)
@@ -360,13 +355,11 @@ class ConanAPIV1(object):
@api_method
def install(self, path="", settings=None, options=None, env=None,
- remote=None, werror=False, verify=None, manifests=None,
+ remote=None, verify=None, manifests=None,
manifests_interactive=None, build=None, profile_name=None,
update=False, generators=None, no_imports=False, filename=None,
install_folder=None):
- self._user_io.out.werror_active = werror
-
cwd = os.getcwd()
install_folder = self._abs_relative_to(install_folder, cwd, default=cwd)
conanfile_folder = self._abs_relative_to(path, cwd, default=cwd)
@@ -411,19 +404,29 @@ class ConanAPIV1(object):
from conans.client.conf.config_installer import configuration_install
return configuration_install(item, self._client_cache, self._user_io.out, self._runner)
+ def _info_get_profile(self, install_folder, profile_name, settings, options, env):
+ cwd = os.getcwd()
+
+ if install_folder or not (profile_name or settings or options or env):
+ # When not install folder is specified but neither any setting, we try to read the
+ # info from cwd
+ install_folder = self._abs_relative_to(install_folder, cwd, default=cwd)
+ if existing_info_files(install_folder):
+ return read_conaninfo_profile(install_folder)
+
+ return profile_from_args(profile_name, settings, options, env=env,
+ cwd=cwd, client_cache=self._client_cache)
+
@api_method
def info_build_order(self, reference, settings=None, options=None, env=None,
profile_name=None, filename=None, remote=None, build_order=None,
- check_updates=None, build_folder=None):
-
- current_path = os.getcwd()
+ check_updates=None, install_folder=None):
try:
reference = ConanFileReference.loads(reference)
except ConanException:
- reference = os.path.normpath(os.path.join(current_path, reference))
+ reference = os.path.normpath(os.path.join(os.getcwd(), reference))
- profile = profile_from_args(profile_name, settings, options, env, build_folder,
- self._client_cache)
+ profile = self._info_get_profile(install_folder, profile_name, settings, options, env)
graph = self._manager.info_build_order(reference, profile, filename, build_order,
remote, check_updates)
return graph
@@ -431,16 +434,13 @@ class ConanAPIV1(object):
@api_method
def info_nodes_to_build(self, reference, build_modes, settings=None, options=None, env=None,
profile_name=None, filename=None, remote=None,
- check_updates=None, build_folder=None):
-
- current_path = os.getcwd()
+ check_updates=None, install_folder=None):
try:
reference = ConanFileReference.loads(reference)
except ConanException:
- reference = os.path.normpath(os.path.join(current_path, reference))
+ reference = os.path.normpath(os.path.join(os.getcwd(), reference))
- profile = profile_from_args(profile_name, settings, options, env, build_folder,
- self._client_cache)
+ profile = self._info_get_profile(install_folder, profile_name, settings, options, env)
ret = self._manager.info_nodes_to_build(reference, profile, filename, build_modes, remote,
check_updates)
ref_list, project_reference = ret
@@ -449,16 +449,13 @@ class ConanAPIV1(object):
@api_method
def info_get_graph(self, reference, remote=None, settings=None, options=None, env=None,
profile_name=None, update=False, filename=None,
- build_folder=None):
-
- current_path = os.getcwd()
+ install_folder=None):
try:
reference = ConanFileReference.loads(reference)
except ConanException:
- reference = os.path.normpath(os.path.join(current_path, reference))
+ reference = os.path.normpath(os.path.join(os.getcwd(), reference))
- profile = profile_from_args(profile_name, settings, options, env, build_folder,
- self._client_cache)
+ profile = self._info_get_profile(install_folder, profile_name, settings, options, env)
ret = self._manager.info_get_graph(reference=reference,
remote=remote, profile=profile, check_updates=update,
filename=filename)
diff --git a/conans/client/deps_builder.py b/conans/client/deps_builder.py
index da3934ad2..facfdf0f2 100644
--- a/conans/client/deps_builder.py
+++ b/conans/client/deps_builder.py
@@ -337,12 +337,12 @@ class DepsGraphBuilder(object):
else: # a public node already exist with this name
previous_node, closure = previous
if previous_node.conan_ref != require.conan_reference:
- self._output.werror("Conflict in %s\n"
- " Requirement %s conflicts with already defined %s\n"
- " Keeping %s\n"
- " To change it, override it in your base requirements"
- % (conanref, require.conan_reference,
- previous_node.conan_ref, previous_node.conan_ref))
+ raise ConanException("Conflict in %s\n"
+ " Requirement %s conflicts with already defined %s\n"
+ " Keeping %s\n"
+ " To change it, override it in your base requirements"
+ % (conanref, require.conan_reference,
+ previous_node.conan_ref, previous_node.conan_ref))
dep_graph.add_edge(node, previous_node)
# RECURSION!
if closure is None:
diff --git a/conans/client/generators/__init__.py b/conans/client/generators/__init__.py
index 2e8a35a6f..679f4ae5c 100644
--- a/conans/client/generators/__init__.py
+++ b/conans/client/generators/__init__.py
@@ -68,36 +68,36 @@ def write_generators(conanfile, path, output):
""" produces auxiliary files, required to build a project or a package.
"""
for generator_name in conanfile.generators:
- if generator_name not in registered_generators:
- output.warn("Invalid generator '%s'. Available types: %s" %
- (generator_name, ", ".join(registered_generators.available)))
- else:
+ try:
generator_class = registered_generators[generator_name]
- try:
- generator = generator_class(conanfile)
- except TypeError:
- # To allow old-style generator packages to work (e.g. premake)
- output.warn("Generator %s failed with new __init__(), trying old one")
- generator = generator_class(conanfile.deps_cpp_info, conanfile.cpp_info)
+ except KeyError:
+ raise ConanException("Invalid generator '%s'. Available types: %s" %
+ (generator_name, ", ".join(registered_generators.available)))
+ try:
+ generator = generator_class(conanfile)
+ except TypeError:
+ # To allow old-style generator packages to work (e.g. premake)
+ output.warn("Generator %s failed with new __init__(), trying old one")
+ generator = generator_class(conanfile.deps_cpp_info, conanfile.cpp_info)
- try:
- generator.output_path = path
- content = generator.content
- if isinstance(content, dict):
- if generator.filename:
- output.warn("Generator %s is multifile. Property 'filename' not used"
- % (generator_name,))
- for k, v in content.items():
- v = normalize(v)
- output.info("Generator %s created %s" % (generator_name, k))
- save(join(path, k), v)
- else:
- content = normalize(content)
- output.info("Generator %s created %s" % (generator_name, generator.filename))
- save(join(path, generator.filename), content)
- except Exception as e:
- if get_env("CONAN_VERBOSE_TRACEBACK", False):
- output.error(traceback.format_exc())
- output.error("Generator %s(file:%s) failed\n%s"
- % (generator_name, generator.filename, str(e)))
- raise ConanException(e)
+ try:
+ generator.output_path = path
+ content = generator.content
+ if isinstance(content, dict):
+ if generator.filename:
+ output.warn("Generator %s is multifile. Property 'filename' not used"
+ % (generator_name,))
+ for k, v in content.items():
+ v = normalize(v)
+ output.info("Generator %s created %s" % (generator_name, k))
+ save(join(path, k), v)
+ else:
+ content = normalize(content)
+ output.info("Generator %s created %s" % (generator_name, generator.filename))
+ save(join(path, generator.filename), content)
+ except Exception as e:
+ if get_env("CONAN_VERBOSE_TRACEBACK", False):
+ output.error(traceback.format_exc())
+ output.error("Generator %s(file:%s) failed\n%s"
+ % (generator_name, generator.filename, str(e)))
+ raise ConanException(e)
diff --git a/conans/client/generators/cmake_common.py b/conans/client/generators/cmake_common.py
index c8c3aef22..cf8b16b0e 100644
--- a/conans/client/generators/cmake_common.py
+++ b/conans/client/generators/cmake_common.py
@@ -247,7 +247,8 @@ macro(conan_set_rpath)
set(CMAKE_SKIP_RPATH 1) # AVOID RPATH FOR *.dylib, ALL LIBS BETWEEN THEM AND THE EXE
# SHOULD BE ON THE LINKER RESOLVER PATH (./ IS ONE OF THEM)
# Policy CMP0068
- set(CMAKE_SKIP_INSTALL_RPATH 1) # In previous versions to 3.9 it is explicitly necessary
+ # We want the old behavior, in CMake >= 3.9 CMAKE_SKIP_RPATH won't affect the install_name in OSX
+ set(CMAKE_INSTALL_NAME_DIR "")
endif()
endmacro()
@@ -452,7 +453,7 @@ endmacro()
cmake_macros = """
macro(conan_basic_setup)
- set(options TARGETS NO_OUTPUT_DIRS SKIP_RPATH)
+ set(options TARGETS NO_OUTPUT_DIRS SKIP_RPATH KEEP_RPATHS)
cmake_parse_arguments(ARGUMENTS "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} )
if(CONAN_EXPORTED)
message(STATUS "Conan: called by CMake conan helper")
@@ -469,7 +470,12 @@ macro(conan_basic_setup)
message(STATUS "Conan: Using cmake targets configuration")
conan_define_targets()
endif()
- if(NOT ARGUMENTS_SKIP_RPATH)
+ if(ARGUMENTS_SKIP_RPATH)
+ # Change by "DEPRECATION" or "SEND_ERROR" when we are ready
+ message(WARNING "Conan: SKIP_RPATH is deprecated, it has been renamed to KEEP_RPATHS")
+ endif()
+ if(NOT ARGUMENTS_SKIP_RPATH AND NOT ARGUMENTS_KEEP_RPATHS)
+ # Parameter has renamed, but we keep the compatibility with old SKIP_RPATH
message(STATUS "Conan: Adjusting default RPATHs Conan policies")
conan_set_rpath()
endif()
diff --git a/conans/client/generators/virtualenv.py b/conans/client/generators/virtualenv.py
index 35cb37378..687edab1a 100644
--- a/conans/client/generators/virtualenv.py
+++ b/conans/client/generators/virtualenv.py
@@ -84,7 +84,7 @@ class VirtualEnvGenerator(Generator):
def _ps1_lines(self, venv_name):
deactivate_lines = []
- activate_lines = ["function global:_old_conan_prompt {\"\"}",]
+ activate_lines = ["function global:_old_conan_prompt {\"\"}"]
activate_lines.append("$function:_old_conan_prompt = $function:prompt")
activate_lines.append(
"function global:prompt { write-host \"(%s) \" -nonewline; & $function:_old_conan_prompt }" % venv_name)
diff --git a/conans/client/output.py b/conans/client/output.py
index cf7897025..c5511ebe0 100644
--- a/conans/client/output.py
+++ b/conans/client/output.py
@@ -44,7 +44,6 @@ class ConanOutput(object):
def __init__(self, stream, color=False):
self._stream = stream
self._color = color
- self.werror_active = False
@property
def is_terminal(self):
@@ -85,12 +84,6 @@ class ConanOutput(object):
def warn(self, data):
self.writeln("WARN: " + data, Color.BRIGHT_YELLOW)
- def werror(self, data):
- if self.werror_active:
- raise ConanException(data)
- else:
- self.warn(data)
-
def error(self, data):
self.writeln("ERROR: " + data, Color.BRIGHT_RED)
@@ -114,7 +107,6 @@ class ScopedOutput(ConanOutput):
self.scope = scope
self._stream = output._stream
self._color = output._color
- self.werror_active = output.werror_active
def write(self, data, front=None, back=None, newline=False):
super(ScopedOutput, self).write("%s: " % self.scope, front, back, False)
diff --git a/conans/client/require_resolver.py b/conans/client/require_resolver.py
index cfcf5e545..a75bc4f95 100644
--- a/conans/client/require_resolver.py
+++ b/conans/client/require_resolver.py
@@ -36,9 +36,9 @@ class RequireResolver(object):
ref = require.conan_reference
resolved = self._resolve_version(version_range, [ref])
if not resolved:
- self._output.werror("Version range '%s' required by '%s' not valid for "
- "downstream requirement '%s'"
- % (version_range, base_conanref, str(ref)))
+ raise ConanException("Version range '%s' required by '%s' not valid for "
+ "downstream requirement '%s'"
+ % (version_range, base_conanref, str(ref)))
else:
self._output.success("Version range '%s' required by '%s' valid for "
"downstream requirement '%s'"
diff --git a/conans/model/options.py b/conans/model/options.py
index e61326f45..f3c5d7757 100644
--- a/conans/model/options.py
+++ b/conans/model/options.py
@@ -112,10 +112,10 @@ class PackageOptionValues(object):
modified = self._modified.get(name)
if modified is not None:
modified_value, modified_ref = modified
- output.werror("%s tried to change %s option %s:%s to %s\n"
- "but it was already assigned to %s by %s"
- % (down_ref, own_ref, package_name, name, value,
- modified_value, modified_ref))
+ raise ConanException("%s tried to change %s option %s:%s to %s\n"
+ "but it was already assigned to %s by %s"
+ % (down_ref, own_ref, package_name, name, value,
+ modified_value, modified_ref))
else:
self._modified[name] = (value, down_ref)
self._dict[name] = value
@@ -433,9 +433,9 @@ class PackageOptions(object):
modified = self._modified.get(name)
if modified is not None:
modified_value, modified_ref = modified
- output.werror("%s tried to change %s option %s to %s\n"
- "but it was already assigned to %s by %s"
- % (down_ref, own_ref, name, value, modified_value, modified_ref))
+ raise ConanException("%s tried to change %s option %s to %s\n"
+ "but it was already assigned to %s by %s"
+ % (down_ref, own_ref, name, value, modified_value, modified_ref))
else:
if ignore_unknown:
if name in self._data:
| CMAKE_SKIP_INSTALL_RPATH is not set on macOS
Hi,
According to the docs `conan_basic_setup()` sets `CMAKE_SKIP_RPATH 1` on macOS.
However, `otool -L my_installed_lib.dylib` gives the following output:
>@rpath/libmy_installed_lib.dylib (compatibility version 0.0.0, current version 0.0.0)
Whereas the one in the build folder doesn't have the `@rpath/` prefix.
Is this intended? I'm kind of a n00b with rpaths.
EDIT: setting `cmake.definitions["CMAKE_SKIP_INSTALL_RPATH"] = "ON"` in my recipe fixes the issue.
EDIT2: that's weird though, the CMake doc says that CMAKE_SKIP_RPATH includes CMAKE_SKIP_INSTALL_RPATH... | conan-io/conan | diff --git a/conans/test/build_helpers/cmake_test.py b/conans/test/build_helpers/cmake_test.py
index 2dac92708..45310e0f0 100644
--- a/conans/test/build_helpers/cmake_test.py
+++ b/conans/test/build_helpers/cmake_test.py
@@ -196,7 +196,6 @@ class CMakeTest(unittest.TestCase):
'-DCONAN_COMPILER="Visual Studio" -DCONAN_COMPILER_VERSION="12" -Wno-dev',
"--config Debug")
-
def deleted_os_test(self):
partial_settings = """
os: [Linux]
diff --git a/conans/test/command/create_test.py b/conans/test/command/create_test.py
index 4949e08af..68c49d79b 100644
--- a/conans/test/command/create_test.py
+++ b/conans/test/command/create_test.py
@@ -96,7 +96,7 @@ class Pkg(ConanFile):
class Pkg(ConanFile):
requires = "LibB/0.1@user/channel", "LibC/0.1@user/channel"
"""})
- error = client.run("create Consumer/0.1@lasote/testing --werror", ignore_error=True)
+ error = client.run("create Consumer/0.1@lasote/testing", ignore_error=True)
self.assertTrue(error)
self.assertIn("ERROR: Conflict in LibC/0.1@user/channel",
client.out)
diff --git a/conans/test/command/info_test.py b/conans/test/command/info_test.py
index 613d8aad1..35e815814 100644
--- a/conans/test/command/info_test.py
+++ b/conans/test/command/info_test.py
@@ -48,6 +48,46 @@ class InfoTest(unittest.TestCase):
self.client.run("export lasote/stable")
self.assertNotIn("WARN: Conanfile doesn't have 'url'", self.client.user_io.out)
+ def install_folder_test(self):
+
+ conanfile = """from conans import ConanFile
+from conans.util.files import save
+
+class MyTest(ConanFile):
+ name = "Pkg"
+ version = "0.1"
+ settings = "build_type"
+
+"""
+ client = TestClient()
+ client.save({"conanfile.py": conanfile})
+ client.run("info -s build_type=Debug")
+ self.assertNotIn("ID: 4024617540c4f240a6a5e8911b0de9ef38a11a72", client.user_io.out)
+ self.assertIn("ID: 5a67a79dbc25fd0fa149a0eb7a20715189a0d988", client.user_io.out)
+
+ client.run('install -s build_type=Debug')
+ client.run("info") # Re-uses debug from curdir
+ self.assertNotIn("ID: 4024617540c4f240a6a5e8911b0de9ef38a11a72", client.user_io.out)
+ self.assertIn("ID: 5a67a79dbc25fd0fa149a0eb7a20715189a0d988", client.user_io.out)
+
+ client.run('install -s build_type=Release --install-folder=MyInstall')
+ client.run("info --install-folder=MyInstall") # Re-uses debug from MyInstall folder
+
+ self.assertIn("ID: 4024617540c4f240a6a5e8911b0de9ef38a11a72", client.user_io.out)
+ self.assertNotIn("ID: 5a67a79dbc25fd0fa149a0eb7a20715189a0d988", client.user_io.out)
+
+ client.run('install -s build_type=Debug --install-folder=MyInstall')
+ client.run("info --install-folder=MyInstall") # Re-uses debug from MyInstall folder
+
+ self.assertNotIn("ID: 4024617540c4f240a6a5e8911b0de9ef38a11a72", client.user_io.out)
+ self.assertIn("ID: 5a67a79dbc25fd0fa149a0eb7a20715189a0d988", client.user_io.out)
+
+ # Both should raise
+ error = client.run("info --install-folder=MyInstall -s build_type=Release",
+ ignore_error=True) # Re-uses debug from MyInstall folder
+ self.assertTrue(error)
+ self.assertIn("--install-folder cannot be used together with -s, -o, -e or -pr", client.out)
+
def graph_test(self):
self.client = TestClient()
diff --git a/conans/test/functional/cmake_skip_rpath_test.py b/conans/test/functional/cmake_skip_rpath_test.py
index 699259217..ee362da6f 100644
--- a/conans/test/functional/cmake_skip_rpath_test.py
+++ b/conans/test/functional/cmake_skip_rpath_test.py
@@ -1,3 +1,4 @@
+import platform
import unittest
from conans.test.utils.tools import TestClient
@@ -21,10 +22,10 @@ project(MyHello CXX)
cmake_minimum_required(VERSION 2.8.12)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
-conan_basic_setup(TARGETS SKIP_RPATH)
+conan_basic_setup(TARGETS %s)
IF(APPLE AND CMAKE_SKIP_RPATH)
- MESSAGE(FATAL_ERROR "RPath was not skipped")
+ MESSAGE(FATAL_ERROR "RPath was skipped")
ENDIF()
"""
@@ -32,31 +33,36 @@ ENDIF()
class CMakeSkipRpathTest(unittest.TestCase):
def test_skip_flag(self):
- client = TestClient()
- client.save({"conanfile.py": conanfile_py})
- client.run("export lasote/testing")
- client.save({"conanfile.txt": conanfile,
- "CMakeLists.txt": cmake}, clean_first=True)
-
- client.run('install -g cmake --build')
- client.runner("cmake .", cwd=client.current_folder)
- self.assertNotIn("Conan: Adjusting default RPATHs Conan policies", client.user_io.out)
- self.assertIn("Build files have been written", client.user_io.out)
-
- client.save({"conanfile.txt": conanfile,
- "CMakeLists.txt": cmake.replace("TARGETS SKIP_RPATH", "SKIP_RPATH")},
- clean_first=True)
-
- client.run('install -g cmake --build')
- client.runner("cmake .", cwd=client.current_folder)
- self.assertNotIn("Conan: Adjusting default RPATHs Conan policies", client.user_io.out)
- self.assertIn("Build files have been written", client.user_io.out)
-
- client.save({"conanfile.txt": conanfile,
- "CMakeLists.txt": cmake.replace("SKIP_RPATH", "")},
- clean_first=True)
-
- client.run('install -g cmake --build')
- client.runner("cmake .", cwd=client.current_folder)
- self.assertIn("Conan: Adjusting default RPATHs Conan policies", client.user_io.out)
- self.assertIn("Build files have been written", client.user_io.out)
+ for way_to_skip in ("SKIP_RPATH", "KEEP_RPATHS"):
+ client = TestClient()
+ client.save({"conanfile.py": conanfile_py})
+ client.run("export lasote/testing")
+ client.save({"conanfile.txt": conanfile,
+ "CMakeLists.txt": cmake % way_to_skip}, clean_first=True)
+ client.run('install -g cmake --build')
+ client.runner("cmake .", cwd=client.current_folder)
+ self.assertNotIn("Conan: Adjusting default RPATHs Conan policies", client.out)
+ self.assertIn("Build files have been written", client.out)
+ if way_to_skip == "SKIP_RPATH":
+ self.assertIn("Conan: SKIP_RPATH is deprecated, it has been renamed to KEEP_RPATHS",
+ client.out)
+
+ client.save({"conanfile.txt": conanfile,
+ "CMakeLists.txt": (cmake % way_to_skip).replace("TARGETS", "")},
+ clean_first=True)
+
+ client.run('install -g cmake --build')
+ client.runner("cmake .", cwd=client.current_folder)
+ self.assertNotIn("Conan: Adjusting default RPATHs Conan policies", client.out)
+ self.assertIn("Build files have been written", client.out)
+
+ client.save({"conanfile.txt": conanfile,
+ "CMakeLists.txt": (cmake % "").replace("FATAL_ERROR", "INFO")},
+ clean_first=True)
+
+ if platform.system() == "Darwin":
+ client.run('install -g cmake --build')
+ client.runner("cmake .", cwd=client.current_folder)
+ self.assertIn("Conan: Adjusting default RPATHs Conan policies", client.out)
+ self.assertIn("Build files have been written", client.out)
+ self.assertIn("RPath was skipped", client.out)
diff --git a/conans/test/generators/cmake_test.py b/conans/test/generators/cmake_test.py
index 85214df86..0ceb4530c 100644
--- a/conans/test/generators/cmake_test.py
+++ b/conans/test/generators/cmake_test.py
@@ -97,7 +97,7 @@ class CMakeGeneratorTest(unittest.TestCase):
# extract the conan_basic_setup macro
macro = self._extract_macro("conan_basic_setup", aux_cmake_test_setup)
self.assertEqual("""macro(conan_basic_setup)
- set(options TARGETS NO_OUTPUT_DIRS SKIP_RPATH)
+ set(options TARGETS NO_OUTPUT_DIRS SKIP_RPATH KEEP_RPATHS)
cmake_parse_arguments(ARGUMENTS "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} )
if(CONAN_EXPORTED)
message(STATUS "Conan: called by CMake conan helper")
@@ -114,7 +114,12 @@ class CMakeGeneratorTest(unittest.TestCase):
message(STATUS "Conan: Using cmake targets configuration")
conan_define_targets()
endif()
- if(NOT ARGUMENTS_SKIP_RPATH)
+ if(ARGUMENTS_SKIP_RPATH)
+ # Change by "DEPRECATION" or "SEND_ERROR" when we are ready
+ message(WARNING "Conan: SKIP_RPATH is deprecated, it has been renamed to KEEP_RPATHS")
+ endif()
+ if(NOT ARGUMENTS_SKIP_RPATH AND NOT ARGUMENTS_KEEP_RPATHS)
+ # Parameter has renamed, but we keep the compatibility with old SKIP_RPATH
message(STATUS "Conan: Adjusting default RPATHs Conan policies")
conan_set_rpath()
endif()
diff --git a/conans/test/generators/generators_test.py b/conans/test/generators/generators_test.py
index eab8e6256..7393586c4 100644
--- a/conans/test/generators/generators_test.py
+++ b/conans/test/generators/generators_test.py
@@ -6,6 +6,17 @@ from conans.util.files import load
class GeneratorsTest(unittest.TestCase):
+ def test_error(self):
+ base = '''
+[generators]
+unknown
+'''
+ client = TestClient()
+ client.save({"conanfile.txt": base})
+ error = client.run("install --build", ignore_error=True)
+ self.assertTrue(error)
+ self.assertIn("ERROR: Invalid generator 'unknown'. Available types:", client.out)
+
def test_base(self):
base = '''
[generators]
diff --git a/conans/test/integration/conflict_diamond_test.py b/conans/test/integration/conflict_diamond_test.py
index f2f043fdb..c7a47c8fe 100644
--- a/conans/test/integration/conflict_diamond_test.py
+++ b/conans/test/integration/conflict_diamond_test.py
@@ -32,10 +32,7 @@ class HelloReuseConan(ConanFile):
self._export("Hello3", "0.1", ["Hello1/0.1@lasote/stable", "Hello2/0.1@lasote/stable"],
export=False)
- self.client.run("install . --build missing")
- self.assertIn("WARN: Conflict in Hello2/0.1@lasote/stable", self.client.user_io.out)
- self.assertIn("PROJECT: Generated conaninfo.txt", self.client.user_io.out)
-
- self.client.run("install . --build missing --werror", ignore_error=True)
- self.assertIn("ERROR: Conflict in Hello2/0.1@lasote/stable", self.client.user_io.out)
+ error = self.client.run("install . --build missing", ignore_error=True)
+ self.assertTrue(error)
+ self.assertIn("Conflict in Hello2/0.1@lasote/stable", self.client.user_io.out)
self.assertNotIn("PROJECT: Generated conaninfo.txt", self.client.user_io.out)
diff --git a/conans/test/integration/package_info_test.py b/conans/test/integration/package_info_test.py
new file mode 100644
index 000000000..36dc1b3c6
--- /dev/null
+++ b/conans/test/integration/package_info_test.py
@@ -0,0 +1,48 @@
+import unittest
+
+from conans.paths import CONANFILE, CONANFILE_TXT
+from conans.test.utils.tools import TestClient
+
+
+class TestPackageInfo(unittest.TestCase):
+
+ def package_info_called_in_local_cache_test(self):
+ client = TestClient()
+ conanfile_tmp = '''
+from conans import ConanFile
+import os
+
+class HelloConan(ConanFile):
+ name = "%s"
+ version = "1.0"
+ build_policy = "missing"
+ options = {"switch": ["1", "0"]}
+ default_options = "switch=0"
+ %s
+
+ def build(self):
+ self.output.warn("Env var MYVAR={0}.".format(os.getenv("MYVAR", "")))
+
+ def package_info(self):
+ if self.options.switch == "0":
+ self.env_info.MYVAR = "foo"
+ else:
+ self.env_info.MYVAR = "bar"
+
+'''
+ for index in range(4):
+ requires = "requires = 'Lib%s/1.0@conan/stable'" % index if index > 0 else ""
+ conanfile = conanfile_tmp % ("Lib%s" % (index + 1), requires)
+ client.save({CONANFILE: conanfile}, clean_first=True)
+ client.run("create conan/stable")
+
+ txt = "[requires]\nLib4/1.0@conan/stable"
+ client.save({CONANFILE_TXT: txt}, clean_first=True)
+ client.run("install . -o *:switch=1")
+ self.assertIn("Lib1/1.0@conan/stable: WARN: Env var MYVAR=.", client.out)
+ self.assertIn("Lib2/1.0@conan/stable: WARN: Env var MYVAR=bar.", client.out)
+ self.assertIn("Lib3/1.0@conan/stable: WARN: Env var MYVAR=bar.", client.out)
+ self.assertIn("Lib4/1.0@conan/stable: WARN: Env var MYVAR=bar.", client.out)
+
+ client.run("install . -o *:switch=0 --build Lib3")
+ self.assertIn("Lib3/1.0@conan/stable: WARN: Env var MYVAR=foo", client.out)
diff --git a/conans/test/integration/version_ranges_conflict_test.py b/conans/test/integration/version_ranges_conflict_test.py
index ab8b33c4a..a1d31b654 100644
--- a/conans/test/integration/version_ranges_conflict_test.py
+++ b/conans/test/integration/version_ranges_conflict_test.py
@@ -18,14 +18,8 @@ class VersionRangesConflictTest(unittest.TestCase):
add("MyPkg2", "0.1", ["MyPkg1/[~0.1]@user/testing"])
add("MyPkg3", "0.1", ["MyPkg1/[~0.2]@user/testing", "MyPkg2/[~0.1]@user/testing"])
- def werror_warn_test(self):
- self.client.run("info")
- self.assertIn("WARN: Version range '~0.1' required by 'MyPkg2/0.1@user/testing' "
- "not valid for downstream requirement 'MyPkg1/0.2.0@user/testing'",
- self.client.user_io.out)
-
def werror_fail_test(self):
- error = self.client.run("install --build --werror", ignore_error=True)
+ error = self.client.run("install --build", ignore_error=True)
self.assertTrue(error)
self.assertNotIn("WARN: Version range '~0.1' required", self.client.user_io.out)
self.assertIn("ERROR: Version range '~0.1' required by 'MyPkg2/0.1@user/testing' "
diff --git a/conans/test/model/options_test.py b/conans/test/model/options_test.py
index 983482614..816e3d7e0 100644
--- a/conans/test/model/options_test.py
+++ b/conans/test/model/options_test.py
@@ -111,19 +111,11 @@ class OptionsTest(unittest.TestCase):
"Poco": poco_values,
"Hello1": hello1_values}
down_ref = ConanFileReference.loads("Hello2/0.1@diego/testing")
- self.sut.propagate_upstream(options2, down_ref, own_ref, output)
- self.assertIn("""WARN: Hello2/0.1@diego/testing tried to change Hello1/0.1@diego/testing option optimized to 2
-but it was already assigned to 4 by Hello0/0.1@diego/testing
-WARN: Hello2/0.1@diego/testing tried to change Hello1/0.1@diego/testing option static to True
-but it was already assigned to False by Hello0/0.1@diego/testing
-WARN: Hello2/0.1@diego/testing tried to change Hello1/0.1@diego/testing option Boost:static to 2
-but it was already assigned to False by Hello0/0.1@diego/testing
-WARN: Hello2/0.1@diego/testing tried to change Hello1/0.1@diego/testing option Boost:thread to Any
-but it was already assigned to True by Hello0/0.1@diego/testing
-WARN: Hello2/0.1@diego/testing tried to change Hello1/0.1@diego/testing option Boost:thread.multi to on
-but it was already assigned to off by Hello0/0.1@diego/testing
-WARN: Hello2/0.1@diego/testing tried to change Hello1/0.1@diego/testing option Poco:deps_bundled to What
-but it was already assigned to True by Hello0/0.1@diego/testing""", str(output))
+
+ with self.assertRaisesRegexp(ConanException, "Hello2/0.1@diego/testing tried to change "
+ "Hello1/0.1@diego/testing option optimized to 2"):
+ self.sut.propagate_upstream(options2, down_ref, own_ref, output)
+
self.assertEqual(self.sut.values.dumps(),
"""optimized=4
path=NOTDEF
@@ -182,7 +174,6 @@ Poco:deps_bundled=True""")
own_ref = ConanFileReference.loads("Boost.Assert/0.1@diego/testing")
down_ref = ConanFileReference.loads("Consumer/0.1@diego/testing")
output = TestBufferConanOutput()
- output.werror_active = True
with self.assertRaises(ConanException):
self.sut.propagate_upstream(options, down_ref, own_ref, output)
diff --git a/conans/test/model/transitive_reqs_test.py b/conans/test/model/transitive_reqs_test.py
index 831458b9e..c12d51ff6 100644
--- a/conans/test/model/transitive_reqs_test.py
+++ b/conans/test/model/transitive_reqs_test.py
@@ -429,7 +429,6 @@ class ChatConan(ConanFile):
self.retriever.conan(say_ref2, say_content2)
self.retriever.conan(hello_ref, hello_content)
self.retriever.conan(bye_ref, bye_content2)
- self.output.werror_active = True
with self.assertRaisesRegexp(ConanException, "Conflict in Bye/0.2@user/testing"):
self.root(chat_content)
@@ -446,46 +445,9 @@ class ChatConan(ConanFile):
self.retriever.conan(say_ref2, say_content2)
self.retriever.conan(hello_ref, hello_content)
self.retriever.conan(bye_ref, bye_content2)
- deps_graph = self.root(chat_content)
-
- self.assertIn("""Conflict in Bye/0.2@user/testing
- Requirement Say/0.2@user/testing conflicts with already defined Say/0.1@user/testing
- Keeping Say/0.1@user/testing
- To change it, override it in your base requirements""", self.output)
- self.assertEqual(4, len(deps_graph.nodes))
- hello = _get_nodes(deps_graph, "Hello")[0]
- bye = _get_nodes(deps_graph, "Bye")[0]
- say = _get_nodes(deps_graph, "Say")[0]
- chat = _get_nodes(deps_graph, "Chat")[0]
- self.assertEqual(_get_edges(deps_graph), {Edge(hello, say), Edge(chat, hello),
- Edge(bye, say), Edge(chat, bye)})
- self.assertEqual(hello.conan_ref, hello_ref)
- self.assertEqual(say.conan_ref, say_ref)
- self.assertEqual(bye.conan_ref, bye_ref)
-
- self._check_say(say.conanfile)
- self._check_hello(hello, say_ref)
-
- conanfile = chat.conanfile
- self.assertEqual(conanfile.version, "2.3")
- self.assertEqual(conanfile.name, "Chat")
- self.assertEqual(conanfile.options.values.dumps(), "")
- self.assertEqual(conanfile.settings.fields, [])
- self.assertEqual(conanfile.settings.values.dumps(), "")
- self.assertEqual(conanfile.requires, Requirements(str(hello_ref),
- str(bye_ref)))
-
- conaninfo = conanfile.info
- self.assertEqual(conaninfo.settings.dumps(), "")
- self.assertEqual(conaninfo.full_settings.dumps(), "")
- self.assertEqual(conaninfo.options.dumps(), "")
- self.assertEqual(conaninfo.full_options.dumps(), "")
- self.assertEqual(conaninfo.requires.dumps(), "Bye/0.2\nHello/1.Y.Z")
- self.assertEqual(conaninfo.full_requires.dumps(),
- "Bye/0.2@user/testing:0b09634eb446bffb8d3042a3f19d813cfc162b9d\n"
- "Hello/1.2@user/testing:0b09634eb446bffb8d3042a3f19d813cfc162b9d\n"
- "Say/0.1@user/testing:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9")
+ with self.assertRaisesRegexp(ConanException, "Conflict in Bye/0.2@user/testing"):
+ deps_graph = self.root(chat_content)
def test_diamond_conflict_solved(self):
chat_content = """
@@ -909,55 +871,9 @@ class ChatConan(ConanFile):
self.retriever.conan(hello_ref, hello_content)
self.retriever.conan(bye_ref, bye_content)
- self.output.werror_active = True
with self.assertRaisesRegexp(ConanException, "tried to change"):
self.root(chat_content)
- self.output.werror_active = False
- deps_graph = self.root(chat_content)
-
- self.assertEqual(4, len(deps_graph.nodes))
- hello = _get_nodes(deps_graph, "Hello")[0]
- bye = _get_nodes(deps_graph, "Bye")[0]
- say = _get_nodes(deps_graph, "Say")[0]
- chat = _get_nodes(deps_graph, "Chat")[0]
- self.assertEqual(_get_edges(deps_graph), {Edge(hello, say), Edge(chat, hello),
- Edge(bye, say), Edge(chat, bye)})
-
- self._check_say(say.conanfile, options="myoption=234")
- self.assertIn("Bye/0.2@user/testing tried to change Say/0.1@user/testing "
- "option myoption to 123 but it was already assigned to 234 "
- "by Hello/1.2@user/testing", str(self.output).replace("\n", " "))
- self.assertEqual(4, len(deps_graph.nodes))
- hello = _get_nodes(deps_graph, "Hello")[0]
- bye = _get_nodes(deps_graph, "Bye")[0]
- say = _get_nodes(deps_graph, "Say")[0]
- chat = _get_nodes(deps_graph, "Chat")[0]
- self.assertEqual(_get_edges(deps_graph), {Edge(hello, say), Edge(chat, hello),
- Edge(bye, say), Edge(chat, bye)})
-
- self._check_say(say.conanfile, options="myoption=234")
-
- conanfile = chat.conanfile
- self.assertEqual(conanfile.version, "2.3")
- self.assertEqual(conanfile.name, "Chat")
- self.assertEqual(conanfile.options.values.dumps(), "Say:myoption=234")
- self.assertEqual(conanfile.settings.fields, [])
- self.assertEqual(conanfile.settings.values.dumps(), "")
- self.assertEqual(conanfile.requires, Requirements(str(hello_ref),
- str(bye_ref)))
-
- conaninfo = conanfile.info
- self.assertEqual(conaninfo.settings.dumps(), "")
- self.assertEqual(conaninfo.full_settings.dumps(), "")
- self.assertEqual(conaninfo.options.dumps(), "")
- self.assertEqual(conaninfo.full_options.dumps(), "Say:myoption=234")
- self.assertEqual(conaninfo.requires.dumps(), "Bye/0.2\nHello/1.Y.Z")
- self.assertEqual(conaninfo.full_requires.dumps(),
- "Bye/0.2@user/testing:0b09634eb446bffb8d3042a3f19d813cfc162b9d\n"
- "Hello/1.2@user/testing:0b09634eb446bffb8d3042a3f19d813cfc162b9d\n"
- "Say/0.1@user/testing:48bb3c5cbdb4822ae87914437ca3cceb733c7e1d")
-
def test_diamond_conflict_options_solved(self):
say_content = """
from conans import ConanFile
@@ -1621,12 +1537,12 @@ class LibDConan(ConanFile):
libd_ref = ConanFileReference.loads("LibD/0.1@user/testing")
self.retriever.conan(libd_ref, libd_content)
- self.root(self.consumer_content)
+ with self.assertRaisesRegexp(ConanException, "Conflict in LibB/0.1@user/testing"):
+ self.root(self.consumer_content)
self.assertIn("LibB/0.1@user/testing requirement LibA/0.1@user/testing overriden by "
"LibD/0.1@user/testing to LibA/0.2@user/testing", str(self.output))
- self.assertIn("WARN: Conflict in LibB/0.1@user/testing", str(self.output))
- self.assertEqual(2, str(self.output).count("LibA requirements()"))
- self.assertEqual(2, str(self.output).count("LibA configure()"))
+ self.assertEqual(1, str(self.output).count("LibA requirements()"))
+ self.assertEqual(1, str(self.output).count("LibA configure()"))
def test_expand_requirements_direct(self):
libd_content = """
@@ -1640,12 +1556,10 @@ class LibDConan(ConanFile):
libd_ref = ConanFileReference.loads("LibD/0.1@user/testing")
self.retriever.conan(libd_ref, libd_content)
- self.root(self.consumer_content)
- self.assertIn("LibB/0.1@user/testing requirement LibA/0.1@user/testing overriden by "
- "LibD/0.1@user/testing to LibA/0.2@user/testing", str(self.output))
- self.assertIn("WARN: Conflict in LibB/0.1@user/testing", str(self.output))
- self.assertEqual(3, str(self.output).count("LibA requirements()"))
- self.assertEqual(3, str(self.output).count("LibA configure()"))
+ with self.assertRaisesRegexp(ConanException, "Conflict in LibB/0.1@user/testing"):
+ self.root(self.consumer_content)
+ self.assertEqual(1, str(self.output).count("LibA requirements()"))
+ self.assertEqual(1, str(self.output).count("LibA configure()"))
def test_expand_options(self):
""" if only one path changes the default option, it has to be expanded
@@ -1696,9 +1610,10 @@ class LibDConan(ConanFile):
libc_ref = ConanFileReference.loads("LibC/0.1@user/testing")
self.retriever.conan(libc_ref, libc_content)
- self.root(self.consumer_content)
- self.assertIn("WARN: LibD/0.1@user/testing tried to change LibB/0.1@user/testing "
- "option LibA:shared to True", str(self.output))
+ with self.assertRaisesRegexp(ConanException, "LibD/0.1@user/testing tried to change LibB/0.1@user/testing "
+ "option LibA:shared to True"):
+ self.root(self.consumer_content)
+
self.assertEqual(1, str(self.output).count("LibA requirements()"))
self.assertEqual(1, str(self.output).count("LibA configure()"))
diff --git a/conans/test/model/version_ranges_test.py b/conans/test/model/version_ranges_test.py
index b71c0777a..0aac1c8a5 100644
--- a/conans/test/model/version_ranges_test.py
+++ b/conans/test/model/version_ranges_test.py
@@ -14,6 +14,7 @@ from conans.client.require_resolver import RequireResolver, satisfying
import re
from nose_parameterized import parameterized
from conans.model.profile import Profile
+from conans.errors import ConanException
class BasicMaxVersionTest(unittest.TestCase):
@@ -247,6 +248,10 @@ class ChatConan(ConanFile):
version = "2.3"
requires = "Hello/1.2@memsharded/testing", %s
"""
+ if valid is False:
+ with self.assertRaisesRegexp(ConanException, "not valid"):
+ self.root(chat_content % version_range)
+ return
deps_graph = self.root(chat_content % version_range)
hello = _get_nodes(deps_graph, "Hello")[0]
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 10
} | 0.26 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"nose-cov",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"conans/requirements.txt",
"conans/requirements_osx.txt",
"conans/requirements_server.txt",
"conans/requirements_dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asn1crypto==1.5.1
astroid==1.6.6
attrs==22.2.0
beautifulsoup4==4.12.3
bottle==0.12.25
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
codecov==2.1.13
colorama==0.3.9
-e git+https://github.com/conan-io/conan.git@ad94635b286e94e2468d2add00ba9081ad4be895#egg=conan
cov-core==1.15.0
coverage==4.2
cryptography==2.1.4
distro==1.0.4
fasteners==0.19
future==0.16.0
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
isort==5.10.1
lazy-object-proxy==1.7.1
mccabe==0.7.0
mock==1.3.0
ndg-httpsclient==0.4.4
node-semver==0.2.0
nose==1.3.7
nose-cov==1.6
nose-parameterized==0.5.0
packaging==21.3
patch==1.16
pbr==6.1.1
pluggy==1.0.0
pluginbase==0.7
py==1.11.0
pyasn==1.5.0b7
pyasn1==0.5.1
pycparser==2.21
Pygments==2.14.0
PyJWT==1.7.1
pylint==1.8.0
pyOpenSSL==17.5.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==3.12
requests==2.27.1
six==1.17.0
soupsieve==2.3.2.post1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
waitress==2.0.0
WebOb==1.8.9
WebTest==2.0.35
wrapt==1.16.0
zipp==3.6.0
| name: conan
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asn1crypto==1.5.1
- astroid==1.6.6
- attrs==22.2.0
- beautifulsoup4==4.12.3
- bottle==0.12.25
- cffi==1.15.1
- charset-normalizer==2.0.12
- codecov==2.1.13
- colorama==0.3.9
- cov-core==1.15.0
- coverage==4.2
- cryptography==2.1.4
- distro==1.0.4
- fasteners==0.19
- future==0.16.0
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isort==5.10.1
- lazy-object-proxy==1.7.1
- mccabe==0.7.0
- mock==1.3.0
- ndg-httpsclient==0.4.4
- node-semver==0.2.0
- nose==1.3.7
- nose-cov==1.6
- nose-parameterized==0.5.0
- packaging==21.3
- patch==1.16
- pbr==6.1.1
- pluggy==1.0.0
- pluginbase==0.7
- py==1.11.0
- pyasn==1.5.0b7
- pyasn1==0.5.1
- pycparser==2.21
- pygments==2.14.0
- pyjwt==1.7.1
- pylint==1.8.0
- pyopenssl==17.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==3.12
- requests==2.27.1
- six==1.17.0
- soupsieve==2.3.2.post1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- waitress==2.0.0
- webob==1.8.9
- webtest==2.0.35
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/conan
| [
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_diamond_conflict",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_diamond_conflict_error",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_diamond_conflict_options",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsOptimizerTest::test_expand_conflict_options",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsOptimizerTest::test_expand_requirements",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsOptimizerTest::test_expand_requirements_direct"
]
| [
"conans/test/command/create_test.py::CreateTest::test_error_create_name_version",
"conans/test/command/info_test.py::InfoTest::test_cwd",
"conans/test/functional/cmake_skip_rpath_test.py::CMakeSkipRpathTest::test_skip_flag",
"conans/test/generators/generators_test.py::GeneratorsTest::test_base",
"conans/test/generators/generators_test.py::GeneratorsTest::test_error",
"conans/test/generators/generators_test.py::GeneratorsTest::test_qmake"
]
| [
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_clean_sh_path",
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_cores_ancient_visual",
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_deprecated_behaviour",
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_run_tests",
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_shared",
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_sysroot",
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_verbose",
"conans/test/model/options_test.py::OptionsTest::test_in",
"conans/test/model/options_test.py::OptionsValuesTest::test_dumps",
"conans/test/model/options_test.py::OptionsValuesTest::test_from_list",
"conans/test/model/options_test.py::OptionsValuesTest::test_sha_constant",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_basic",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_basic_option",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_basic_transitive_option",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_conditional",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_conditional_diamond",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_dep_requires_clear",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_diamond_conflict_options_solved",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_diamond_conflict_solved",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_diamond_no_conflict",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_diamond_no_conflict_options",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_propagate_indirect_options",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_remove_build_requires",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_remove_two_build_requires",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_simple_override",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_transitive",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_transitive_diamond_private",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_transitive_pattern_options",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_transitive_private",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_transitive_two_levels",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_transitive_two_levels_options",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_transitive_two_levels_wrong_options",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_version_requires2_change",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsTest::test_version_requires_change",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsOptimizerTest::test_avoid_duplicate_expansion",
"conans/test/model/transitive_reqs_test.py::ConanRequirementsOptimizerTest::test_expand_options",
"conans/test/model/transitive_reqs_test.py::CoreSettingsTest::test_basic",
"conans/test/model/transitive_reqs_test.py::CoreSettingsTest::test_config",
"conans/test/model/transitive_reqs_test.py::CoreSettingsTest::test_config_remove",
"conans/test/model/transitive_reqs_test.py::CoreSettingsTest::test_config_remove2",
"conans/test/model/transitive_reqs_test.py::CoreSettingsTest::test_errors",
"conans/test/model/transitive_reqs_test.py::CoreSettingsTest::test_new_configure",
"conans/test/model/transitive_reqs_test.py::CoreSettingsTest::test_transitive_two_levels_options",
"conans/test/model/version_ranges_test.py::VersionRangesTest::test_local_basic",
"conans/test/model/version_ranges_test.py::VersionRangesTest::test_remote_basic"
]
| []
| MIT License | 1,936 | [
"conans/client/generators/cmake_common.py",
"conans/client/generators/__init__.py",
"conans/client/command.py",
"conans/client/generators/virtualenv.py",
"conans/client/output.py",
"conans/client/require_resolver.py",
"conans/client/deps_builder.py",
"conans/client/cmd/new.py",
"conans/client/conan_api.py",
"conans/model/options.py"
]
| [
"conans/client/generators/cmake_common.py",
"conans/client/generators/__init__.py",
"conans/client/command.py",
"conans/client/generators/virtualenv.py",
"conans/client/output.py",
"conans/client/require_resolver.py",
"conans/client/deps_builder.py",
"conans/client/cmd/new.py",
"conans/client/conan_api.py",
"conans/model/options.py"
]
|
pycontribs__jenkinsapi-594 | cdd3f90e4ce5122db24e31038aeee098357313a4 | 2017-12-06 23:39:26 | 713bb4f589419455ad8e3cd3d67489b99554ce2e | rbtcollins: The failure is from other flaky tests :(. Could you please retest it? | diff --git a/jenkinsapi/build.py b/jenkinsapi/build.py
index 2e142da..e220b61 100644
--- a/jenkinsapi/build.py
+++ b/jenkinsapi/build.py
@@ -106,7 +106,7 @@ class Build(JenkinsBase):
if elem.get('_class') == 'hudson.model.ParametersAction':
parameters = elem.get('parameters', {})
break
- return {pair['name']: pair['value'] for pair in parameters}
+ return {pair['name']: pair.get('value') for pair in parameters}
def get_changeset_items(self):
"""
diff --git a/tox.ini b/tox.ini
index a5aac73..16b7cc1 100644
--- a/tox.ini
+++ b/tox.ini
@@ -22,7 +22,7 @@ usedevelop=
commands=
python -m pylint jenkinsapi
python -m pycodestyle
- py.test -sv --cov=jenkinsapi --cov-report=term-missing --cov-report=xml jenkinsapi_tests
+ py.test -sv --cov=jenkinsapi --cov-report=term-missing --cov-report=xml jenkinsapi_tests {posargs}
[testenv:args]
deps = -rtest-requirements.txt
| jenkinsapi.build.get_params() fails when Job have password parameter
##### ISSUE TYPE
<!--- Pick one below and delete the rest: -->
- Bug Report
##### Jenkinsapi VERSION
0.3.4
##### Jenkins VERSION
Jenkins ver. 2.60.2
##### SUMMARY
When Job have Password Parameter and I try to get build parameters of this job ( jenkinsapi.build.get_params() ), procedure get_params() fails with traceback:
```
Traceback (most recent call last):
File "/opt/pycharm/helpers/pydev/pydevd.py", line 1599, in <module>
globals = debugger.run(setup['file'], None, None, is_module)
File "/opt/pycharm/helpers/pydev/pydevd.py", line 1026, in run
pydev_imports.execfile(file, globals, locals) # execute the script
File "/opt/pycharm/helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "/home/tema/Pro/Jenins-job-restart/jjr.py", line 320, in <module>
jjr.start()
File "/home/tema/Pro/Jenins-job-restart/jjr.py", line 50, in start
self.__cycle()
File "/home/tema/Pro/Jenins-job-restart/jjr.py", line 100, in __cycle
job_params = job_build.get_params()
File "/home/tema/Pro/ve_Jenkins/lib/python3.5/site-packages/jenkinsapi/build.py", line 110, in get_params
return {pair['name']: pair['value'] for pair in parameters}
File "/home/tema/Pro/ve_Jenkins/lib/python3.5/site-packages/jenkinsapi/build.py", line 110, in <dictcomp>
return {pair['name']: pair['value'] for pair in parameters}
KeyError: 'value'
```
because value "parameters" in get_params():
`[{'name': 'BRANCH', '_class': 'hudson.model.StringParameterValue', 'value': 'staging'}, {'name': 'REPO_URL', '_class': 'hudson.model.StringParameterValue', 'value': 'git.my.com:/BB.git'}, {'name': 'NODE', '_class': 'org.jvnet.jenkins.plugins.nodelabelparameter.NodeParameterValue', 'value': 'my11'}, {'name': 'DOMAIN', '_class': 'hudson.model.StringParameterValue', 'value': 'my.lan'}, {'name': 'SETUP', '_class': 'hudson.model.BooleanParameterValue', 'value': True}, {'name': 'LAUNCH', '_class': 'hudson.model.BooleanParameterValue', 'value': True}, {'name': 'REBUILD', '_class': 'hudson.model.StringParameterValue', 'value': 'No_Force'}, {'name': 'JENKINS_TESTS', '_class': 'hudson.model.BooleanParameterValue', 'value': False}, {'name': 'DR_REGISTRY', '_class': 'hudson.model.StringParameterValue', 'value': 'dr.my.lan:5000'}, {'name': 'TESTS_LISTS', '_class': 'com.cwctravel.hudson.plugins.extended_choice_parameter.ExtendedChoiceParameterValue', 'value': 'registration_actions'}, {'name': 'CURRENCY', '_class': 'hudson.model.StringParameterValue', 'value': 'EUR'}, {'name': 'DEFAULT_LANG_CODE', '_class': 'hudson.model.StringParameterValue', 'value': 'en'}, {'name': 'SELENIUM_HOST', '_class': 'hudson.model.StringParameterValue', 'value': my2.my.lan'}, {'name': 'GET_JS_ERRORS', '_class': 'hudson.model.BooleanParameterValue', 'value': False}, {'name': 'OPERATOR_ID', '_class': 'hudson.model.StringParameterValue', 'value': '15'}, {'name': 'OPERATOR_API_SERVER_URL', '_class': 'hudson.model.StringParameterValue', 'value': 'op.my.com:8043'}, {'name': 'USER_API_SERVER_HOST', '_class': 'hudson.model.StringParameterValue', 'value': 'cw-gm.my.com'}, {'name': 'EXTERNAL_OP_HOST', '_class': 'hudson.model.StringParameterValue', 'value': 'my.com'}, {'name': 'MOBILE_SITE_URL', '_class': 'hudson.model.StringParameterValue', 'value': 'm.my.com'}, {'name': 'BO_SERVER_HOST', '_class': 'hudson.model.StringParameterValue', 'value': 'bo.my.com'}, {'name': 'PREPROD', '_class': 'hudson.model.BooleanParameterValue', 'value': True}, {'name': 'OPERATOR_PASSWORD', '_class': 'hudson.model.PasswordParameterValue'}, {'name': 'FB_APPID', '_class': 'hudson.model.PasswordParameterValue'}, {'name': 'FB_SECRET', '_class': 'hudson.model.PasswordParameterValue'}]
`
last key pairs (OPERATOR_PASSWORD FB_APPID FB_SECRET) don't have key 'value' because its "Password Parameters"
##### EXPECTED RESULTS
Key 'value' must return at least an empty string.
##### ACTUAL RESULTS
Procedure fails with traceback written above.
##### USEFUL INFORMATION
All info in ##### SUMMARY
| pycontribs/jenkinsapi | diff --git a/jenkinsapi_tests/unittests/test_build.py b/jenkinsapi_tests/unittests/test_build.py
index fe31972..8df3adf 100644
--- a/jenkinsapi_tests/unittests/test_build.py
+++ b/jenkinsapi_tests/unittests/test_build.py
@@ -157,6 +157,25 @@ def test_only_ParametersAction_parameters_considered(build):
assert params == expected
+def test_ParametersWithNoValueSetValueNone_issue_583(build):
+ """SecretParameters don't share their value in the API."""
+ expected = {
+ 'some-secret': None,
+ }
+ build._data = {
+ 'actions': [
+ {
+ '_class': 'hudson.model.ParametersAction',
+ 'parameters': [
+ {'name': 'some-secret'},
+ ]
+ }
+ ]
+ }
+ params = build.get_params()
+ assert params == expected
+
+
def test_build_env_vars(monkeypatch, build):
def fake_get_data(cls, tree=None, params=None):
return configs.BUILD_ENV_VARS
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 2
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-mock",
"pytest-cov",
"pycodestyle>=2.3.1",
"astroid>=1.4.8",
"pylint>=1.7.1",
"tox>=2.3.1",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astroid==3.3.9
cachetools==5.5.2
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
coverage==7.8.0
dill==0.3.9
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
idna==3.10
iniconfig==2.1.0
isort==6.0.1
-e git+https://github.com/pycontribs/jenkinsapi.git@cdd3f90e4ce5122db24e31038aeee098357313a4#egg=jenkinsapi
mccabe==0.7.0
mock==5.2.0
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
pycodestyle==2.13.0
pylint==3.3.6
pyproject-api==1.9.0
pytest==8.3.5
pytest-cov==6.0.0
pytest-mock==3.14.0
pytz==2025.2
requests==2.32.3
six==1.17.0
tomli==2.2.1
tomlkit==0.13.2
tox==4.25.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
| name: jenkinsapi
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astroid==3.3.9
- cachetools==5.5.2
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- coverage==7.8.0
- dill==0.3.9
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- idna==3.10
- iniconfig==2.1.0
- isort==6.0.1
- mccabe==0.7.0
- mock==5.2.0
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pycodestyle==2.13.0
- pylint==3.3.6
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytz==2025.2
- requests==2.32.3
- six==1.17.0
- tomli==2.2.1
- tomlkit==0.13.2
- tox==4.25.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
prefix: /opt/conda/envs/jenkinsapi
| [
"jenkinsapi_tests/unittests/test_build.py::test_ParametersWithNoValueSetValueNone_issue_583"
]
| [
"jenkinsapi_tests/unittests/test_build.py::test_build_env_vars_wo_injected_env_vars_plugin",
"jenkinsapi_tests/unittests/test_build.py::test_build_env_vars_other_exception"
]
| [
"jenkinsapi_tests/unittests/test_build.py::test_timestamp",
"jenkinsapi_tests/unittests/test_build.py::test_name",
"jenkinsapi_tests/unittests/test_build.py::test_duration",
"jenkinsapi_tests/unittests/test_build.py::test_get_causes",
"jenkinsapi_tests/unittests/test_build.py::test_get_description",
"jenkinsapi_tests/unittests/test_build.py::test_get_slave",
"jenkinsapi_tests/unittests/test_build.py::test_get_revision_no_scm",
"jenkinsapi_tests/unittests/test_build.py::test_downstream",
"jenkinsapi_tests/unittests/test_build.py::test_get_params",
"jenkinsapi_tests/unittests/test_build.py::test_get_params_different_order",
"jenkinsapi_tests/unittests/test_build.py::test_only_ParametersAction_parameters_considered",
"jenkinsapi_tests/unittests/test_build.py::test_build_env_vars"
]
| []
| MIT License | 1,937 | [
"tox.ini",
"jenkinsapi/build.py"
]
| [
"tox.ini",
"jenkinsapi/build.py"
]
|
falconry__falcon-1157 | 064232a826070c8ff528b739e560917e7da67e1e | 2017-12-07 00:46:11 | 919fd3f5a3129d04f1c7d23f5eff440ec4598e35 | codecov[bot]: # [Codecov](https://codecov.io/gh/falconry/falcon/pull/1157?src=pr&el=h1) Report
> Merging [#1157](https://codecov.io/gh/falconry/falcon/pull/1157?src=pr&el=desc) into [master](https://codecov.io/gh/falconry/falcon/commit/064232a826070c8ff528b739e560917e7da67e1e?src=pr&el=desc) will **not change** coverage.
> The diff coverage is `100%`.
[](https://codecov.io/gh/falconry/falcon/pull/1157?src=pr&el=tree)
```diff
@@ Coverage Diff @@
## master #1157 +/- ##
======================================
Coverage 100% 100%
======================================
Files 36 37 +1
Lines 2378 2421 +43
Branches 347 353 +6
======================================
+ Hits 2378 2421 +43
```
| [Impacted Files](https://codecov.io/gh/falconry/falcon/pull/1157?src=pr&el=tree) | Coverage Δ | |
|---|---|---|
| [falcon/response\_helpers.py](https://codecov.io/gh/falconry/falcon/pull/1157/diff?src=pr&el=tree#diff-ZmFsY29uL3Jlc3BvbnNlX2hlbHBlcnMucHk=) | `100% <100%> (ø)` | :arrow_up: |
| [falcon/routing/static.py](https://codecov.io/gh/falconry/falcon/pull/1157/diff?src=pr&el=tree#diff-ZmFsY29uL3JvdXRpbmcvc3RhdGljLnB5) | `100% <100%> (ø)` | |
| [falcon/routing/\_\_init\_\_.py](https://codecov.io/gh/falconry/falcon/pull/1157/diff?src=pr&el=tree#diff-ZmFsY29uL3JvdXRpbmcvX19pbml0X18ucHk=) | `100% <100%> (ø)` | :arrow_up: |
| [falcon/response.py](https://codecov.io/gh/falconry/falcon/pull/1157/diff?src=pr&el=tree#diff-ZmFsY29uL3Jlc3BvbnNlLnB5) | `100% <100%> (ø)` | :arrow_up: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/falconry/falcon/pull/1157?src=pr&el=continue).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/falconry/falcon/pull/1157?src=pr&el=footer). Last update [064232a...869d993](https://codecov.io/gh/falconry/falcon/pull/1157?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
| diff --git a/falcon/api.py b/falcon/api.py
index beb4fff..a003f6d 100644
--- a/falcon/api.py
+++ b/falcon/api.py
@@ -140,7 +140,8 @@ class API(object):
__slots__ = ('_request_type', '_response_type',
'_error_handlers', '_media_type', '_router', '_sinks',
'_serialize_error', 'req_options', 'resp_options',
- '_middleware', '_independent_middleware', '_router_search')
+ '_middleware', '_independent_middleware', '_router_search',
+ '_static_routes')
def __init__(self, media_type=DEFAULT_MEDIA_TYPE,
request_type=Request, response_type=Response,
@@ -148,6 +149,7 @@ class API(object):
independent_middleware=False):
self._sinks = []
self._media_type = media_type
+ self._static_routes = []
# set middleware
self._middleware = helpers.prepare_middleware(
@@ -350,6 +352,53 @@ class API(object):
self._router.add_route(uri_template, method_map, resource, *args,
**kwargs)
+ def add_static_route(self, prefix, directory, downloadable=False):
+ """Add a route to a directory of static files.
+
+ Static routes provide a way to serve files directly. This
+ feature provides an alternative to serving files at the web server
+ level when you don't have that option, when authorization is
+ required, or for testing purposes.
+
+ Warning:
+ Serving files directly from the web server,
+ rather than through the Python app, will always be more efficient,
+ and therefore should be preferred in production deployments.
+
+ Static routes are matched in LIFO order. Therefore, if the same
+ prefix is used for two routes, the second one will override the
+ first. This also means that more specific routes should be added
+ *after* less specific ones. For example, the following sequence
+ would result in ``'/foo/bar/thing.js'`` being mapped to the
+ ``'/foo/bar'`` route, and ``'/foo/xyz/thing.js'`` being mapped to the
+ ``'/foo'`` route::
+
+ api.add_static_route('/foo', foo_path)
+ api.add_static_route('/foo/bar', foobar_path)
+
+ Args:
+ prefix (str): The path prefix to match for this route. If the
+ path in the requested URI starts with this string, the remainder
+ of the path will be appended to the source directory to
+ determine the file to serve. This is done in a secure manner
+ to prevent an attacker from requesting a file outside the
+ specified directory.
+
+ Note that static routes are matched in LIFO order, and are only
+ attempted after checking dynamic routes and sinks.
+
+ directory (str): The source directory from which to serve files.
+ downloadable (bool): Set to ``True`` to include a
+ Content-Disposition header in the response. The "filename"
+ directive is simply set to the name of the requested file.
+
+ """
+
+ self._static_routes.insert(
+ 0,
+ routing.StaticRoute(prefix, directory, downloadable=downloadable)
+ )
+
def add_sink(self, sink, prefix=r'/'):
"""Register a sink method for the API.
@@ -563,7 +612,13 @@ class API(object):
break
else:
- responder = falcon.responders.path_not_found
+
+ for sr in self._static_routes:
+ if sr.match(path):
+ responder = sr
+ break
+ else:
+ responder = falcon.responders.path_not_found
return (responder, params, resource, uri_template)
diff --git a/falcon/response.py b/falcon/response.py
index e10d778..9479468 100644
--- a/falcon/response.py
+++ b/falcon/response.py
@@ -14,6 +14,8 @@
"""Response class."""
+import mimetypes
+
from six import PY2
from six import string_types as STRING_TYPES
@@ -25,6 +27,7 @@ from six.moves import http_cookies # NOQA: I202
from falcon import DEFAULT_MEDIA_TYPE
from falcon.media import Handlers
from falcon.response_helpers import (
+ format_content_disposition,
format_header_value_list,
format_range,
header_property,
@@ -34,6 +37,7 @@ from falcon.util import dt_to_http, TimezoneGMT
from falcon.util.uri import encode as uri_encode
from falcon.util.uri import encode_value as uri_encode_value
+
SimpleCookie = http_cookies.SimpleCookie
CookieError = http_cookies.CookieError
@@ -679,6 +683,16 @@ class Response(object):
and ``falcon.MEDIA_GIF``.
""")
+ downloadable_as = header_property(
+ 'Content-Disposition',
+ """Set the Content-Disposition header using the given filename.
+
+ The value will be used for the "filename" directive. For example,
+ given 'report.pdf', the Content-Disposition header would be set
+ to ``'attachment; filename="report.pdf"'``.
+ """,
+ format_content_disposition)
+
etag = header_property(
'ETag',
'Set the ETag header.')
@@ -811,24 +825,32 @@ class ResponseOptions(object):
not requiring HTTPS. Note, however, that this setting can
be overridden via `set_cookie()`'s `secure` kwarg.
- default_media_type (str): The default media-type to use when
- deserializing a response. This value is normally set to the media
- type provided when a :class:`falcon.API` is initialized; however,
- if created independently, this will default to the
+ default_media_type (str): The default Internet media type (RFC 2046) to
+ use when deserializing a response. This value is normally set to the
+ media type provided when a :class:`falcon.API` is initialized;
+ however, if created independently, this will default to the
``DEFAULT_MEDIA_TYPE`` specified by Falcon.
media_handlers (Handlers): A dict-like object that allows you to
configure the media-types that you would like to handle.
By default, a handler is provided for the ``application/json``
media type.
+
+ static_media_types (dict): A mapping of dot-prefixed file extensions to
+ Internet media types (RFC 2046). Defaults to ``mimetypes.types_map``
+ after calling ``mimetypes.init()``.
"""
__slots__ = (
'secure_cookies_by_default',
'default_media_type',
'media_handlers',
+ 'static_media_types',
)
def __init__(self):
self.secure_cookies_by_default = True
self.default_media_type = DEFAULT_MEDIA_TYPE
self.media_handlers = Handlers()
+
+ mimetypes.init()
+ self.static_media_types = mimetypes.types_map
diff --git a/falcon/response_helpers.py b/falcon/response_helpers.py
index 47308c2..602eb5b 100644
--- a/falcon/response_helpers.py
+++ b/falcon/response_helpers.py
@@ -77,6 +77,12 @@ def format_range(value):
return result
+def format_content_disposition(value):
+ """Formats a Content-Disposition header given a filename."""
+
+ return 'attachment; filename="' + value + '"'
+
+
if six.PY2:
def format_header_value_list(iterable):
"""Join an iterable of strings with commas."""
diff --git a/falcon/routing/__init__.py b/falcon/routing/__init__.py
index abb9c87..51bc7d3 100644
--- a/falcon/routing/__init__.py
+++ b/falcon/routing/__init__.py
@@ -20,6 +20,7 @@ routers.
"""
from falcon.routing.compiled import CompiledRouter, CompiledRouterOptions # NOQA
+from falcon.routing.static import StaticRoute # NOQA
from falcon.routing.util import create_http_method_map # NOQA
from falcon.routing.util import map_http_methods # NOQA
from falcon.routing.util import set_default_responders # NOQA
diff --git a/falcon/routing/static.py b/falcon/routing/static.py
new file mode 100644
index 0000000..ba3fab2
--- /dev/null
+++ b/falcon/routing/static.py
@@ -0,0 +1,98 @@
+import io
+import os
+import re
+
+import falcon
+
+
+class StaticRoute(object):
+ """Represents a static route.
+
+ Args:
+ prefix (str): The path prefix to match for this route. If the
+ path in the requested URI starts with this string, the remainder
+ of the path will be appended to the source directory to
+ determine the file to serve. This is done in a secure manner
+ to prevent an attacker from requesting a file outside the
+ specified directory.
+
+ Note that static routes are matched in LIFO order, and are only
+ attempted after checking dynamic routes and sinks.
+
+ directory (str): The source directory from which to serve files. Must
+ be an absolute path.
+ downloadable (bool): Set to ``True`` to include a
+ Content-Disposition header in the response. The "filename"
+ directive is simply set to the name of the requested file.
+ """
+
+ # NOTE(kgriffs): Don't allow control characters and reserved chars
+ _DISALLOWED_CHARS_PATTERN = re.compile('[\x00-\x1f\x80-\x9f~?<>:*|\'"]')
+
+ # NOTE(kgriffs): If somehow an executable code exploit is triggerable, this
+ # minimizes how much can be included in the payload.
+ _MAX_NON_PREFIXED_LEN = 512
+
+ def __init__(self, prefix, directory, downloadable=False):
+ if not prefix.startswith('/'):
+ raise ValueError("prefix must start with '/'")
+
+ if not os.path.isabs(directory):
+ raise ValueError('directory must be an absolute path')
+
+ # NOTE(kgriffs): Ensure it ends with a path separator to ensure
+ # we only match on the complete segment. Don't raise an error
+ # because most people won't expect to have to append a slash.
+ if not prefix.endswith('/'):
+ prefix += '/'
+
+ self._prefix = prefix
+ self._directory = directory
+ self._downloadable = downloadable
+
+ def match(self, path):
+ """Check whether the given path matches this route."""
+ return path.startswith(self._prefix)
+
+ def __call__(self, req, resp):
+ """Resource responder for this route."""
+
+ without_prefix = req.path[len(self._prefix):]
+
+ # NOTE(kgriffs): Check surrounding whitespace and strip trailing
+ # periods, which are illegal on windows
+ if (not without_prefix or
+ without_prefix.strip().rstrip('.') != without_prefix or
+ self._DISALLOWED_CHARS_PATTERN.search(without_prefix) or
+ '\\' in without_prefix or
+ '//' in without_prefix or
+ len(without_prefix) > self._MAX_NON_PREFIXED_LEN):
+
+ raise falcon.HTTPNotFound()
+
+ normalized = os.path.normpath(without_prefix)
+
+ if normalized.startswith('../') or normalized.startswith('/'):
+ raise falcon.HTTPNotFound()
+
+ file_path = os.path.join(self._directory, normalized)
+
+ # NOTE(kgriffs): Final sanity-check just to be safe. This check
+ # should never succeed, but this should guard against us having
+ # overlooked something.
+ if '..' in file_path or not file_path.startswith(self._directory):
+ raise falcon.HTTPNotFound() # pragma: nocover
+
+ try:
+ resp.stream = io.open(file_path, 'rb')
+ except IOError:
+ raise falcon.HTTPNotFound()
+
+ suffix = os.path.splitext(file_path)[1]
+ resp.content_type = resp.options.static_media_types.get(
+ suffix,
+ 'application/octet-stream'
+ )
+
+ if self._downloadable:
+ resp.downloadable_as = os.path.basename(file_path)
| Route to static file
On a PaaS you may not be able to server static files from the web server directly, so let's make it easy to do this in Falcon.
Depends on #181
| falconry/falcon | diff --git a/tests/test_headers.py b/tests/test_headers.py
index 0f03ecc..79ab1df 100644
--- a/tests/test_headers.py
+++ b/tests/test_headers.py
@@ -52,6 +52,7 @@ class HeaderHelpersResource(object):
# Relative URI's are OK per http://goo.gl/DbVqR
resp.location = '/things/87'
resp.content_location = '/things/78'
+ resp.downloadable_as = 'Some File.zip'
if req.range_unit is None or req.range_unit == 'bytes':
# bytes 0-499/10240
@@ -310,6 +311,7 @@ class TestHeaders(object):
content_type = 'x-falcon/peregrine'
assert resp.content_type == content_type
assert result.headers['Content-Type'] == content_type
+ assert result.headers['Content-Disposition'] == 'attachment; filename="Some File.zip"'
cache_control = ('public, private, no-cache, no-store, '
'must-revalidate, proxy-revalidate, max-age=3600, '
diff --git a/tests/test_static.py b/tests/test_static.py
new file mode 100644
index 0000000..2fbb55c
--- /dev/null
+++ b/tests/test_static.py
@@ -0,0 +1,201 @@
+# -*- coding: utf-8 -*-
+
+import io
+
+import pytest
+
+import falcon
+from falcon.request import Request
+from falcon.response import Response
+from falcon.routing import StaticRoute
+import falcon.testing as testing
+
+
[email protected]
+def client():
+ app = falcon.API()
+ return testing.TestClient(app)
+
+
[email protected]('uri', [
+ # Root
+ '/static',
+ '/static/',
+ '/static/.',
+
+ # Attempt to jump out of the directory
+ '/static/..',
+ '/static/../.',
+ '/static/.././etc/passwd',
+ '/static/../etc/passwd',
+ '/static/css/../../secret',
+ '/static/css/../../etc/passwd',
+ '/static/./../etc/passwd',
+
+ # The file system probably won't process escapes, but better safe than sorry
+ '/static/css/../.\\056/etc/passwd',
+ '/static/./\\056./etc/passwd',
+ '/static/\\056\\056/etc/passwd',
+
+ # Double slash
+ '/static//test.css',
+ '/static//COM10',
+ '/static/path//test.css',
+ '/static/path///test.css',
+ '/static/path////test.css',
+ '/static/path/foo//test.css',
+
+ # Control characters (0x00–0x1f and 0x80–0x9f)
+ '/static/.\x00ssh/authorized_keys',
+ '/static/.\x1fssh/authorized_keys',
+ '/static/.\x80ssh/authorized_keys',
+ '/static/.\x9fssh/authorized_keys',
+
+ # Reserved characters (~, ?, <, >, :, *, |, ', and ")
+ '/static/~/.ssh/authorized_keys',
+ '/static/.ssh/authorized_key?',
+ '/static/.ssh/authorized_key>foo',
+ '/static/.ssh/authorized_key|foo',
+ '/static/.ssh/authorized_key<foo',
+ '/static/something:something',
+ '/static/thing*.sql',
+ '/static/\'thing\'.sql',
+ '/static/"thing".sql',
+
+ # Trailing periods and spaces
+ '/static/something.',
+ '/static/something..',
+ '/static/something ',
+ '/static/ something ',
+ '/static/ something ',
+ '/static/something\t',
+ '/static/\tsomething',
+
+ # Too long
+ '/static/' + ('t' * StaticRoute._MAX_NON_PREFIXED_LEN) + 'x',
+
+])
+def test_bad_path(uri, monkeypatch):
+ monkeypatch.setattr(io, 'open', lambda path, mode: path)
+
+ sr = StaticRoute('/static', '/var/www/statics')
+
+ req = Request(testing.create_environ(
+ host='test.com',
+ path=uri,
+ app='statics'
+ ))
+
+ resp = Response()
+
+ with pytest.raises(falcon.HTTPNotFound):
+ sr(req, resp)
+
+
[email protected]('prefix, directory', [
+ ('static', '/var/www/statics'),
+ ('/static', './var/www/statics'),
+ ('/static', 'statics'),
+ ('/static', '../statics'),
+])
+def test_invalid_args(prefix, directory, client):
+ with pytest.raises(ValueError):
+ StaticRoute(prefix, directory)
+
+ with pytest.raises(ValueError):
+ client.app.add_static_route(prefix, directory)
+
+
[email protected]('uri_prefix, uri_path, expected_path, mtype', [
+ ('/static/', '/css/test.css', '/css/test.css', 'text/css'),
+ ('/static', '/css/test.css', '/css/test.css', 'text/css'),
+ (
+ '/static',
+ '/' + ('t' * StaticRoute._MAX_NON_PREFIXED_LEN),
+ '/' + ('t' * StaticRoute._MAX_NON_PREFIXED_LEN),
+ 'application/octet-stream',
+ ),
+ ('/static', '/.test.css', '/.test.css', 'text/css'),
+ ('/some/download/', '/report.pdf', '/report.pdf', 'application/pdf'),
+ ('/some/download/', '/Fancy Report.pdf', '/Fancy Report.pdf', 'application/pdf'),
+ ('/some/download', '/report.zip', '/report.zip', 'application/zip'),
+ ('/some/download', '/foo/../report.zip', '/report.zip', 'application/zip'),
+ ('/some/download', '/foo/../bar/../report.zip', '/report.zip', 'application/zip'),
+ ('/some/download', '/foo/bar/../../report.zip', '/report.zip', 'application/zip'),
+])
+def test_good_path(uri_prefix, uri_path, expected_path, mtype, monkeypatch):
+ monkeypatch.setattr(io, 'open', lambda path, mode: path)
+
+ sr = StaticRoute(uri_prefix, '/var/www/statics')
+
+ req_path = uri_prefix[:-1] if uri_prefix.endswith('/') else uri_prefix
+ req_path += uri_path
+
+ req = Request(testing.create_environ(
+ host='test.com',
+ path=req_path,
+ app='statics'
+ ))
+
+ resp = Response()
+
+ sr(req, resp)
+
+ assert resp.content_type == mtype
+ assert resp.stream == '/var/www/statics' + expected_path
+
+
+def test_lifo(client, monkeypatch):
+ monkeypatch.setattr(io, 'open', lambda path, mode: [path.encode('utf-8')])
+
+ client.app.add_static_route('/downloads', '/opt/somesite/downloads')
+ client.app.add_static_route('/downloads/archive', '/opt/somesite/x')
+
+ response = client.simulate_request(path='/downloads/thing.zip')
+ assert response.status == falcon.HTTP_200
+ assert response.text == '/opt/somesite/downloads/thing.zip'
+
+ response = client.simulate_request(path='/downloads/archive/thingtoo.zip')
+ assert response.status == falcon.HTTP_200
+ assert response.text == '/opt/somesite/x/thingtoo.zip'
+
+
+def test_lifo_negative(client, monkeypatch):
+ monkeypatch.setattr(io, 'open', lambda path, mode: [path.encode('utf-8')])
+
+ client.app.add_static_route('/downloads/archive', '/opt/somesite/x')
+ client.app.add_static_route('/downloads', '/opt/somesite/downloads')
+
+ response = client.simulate_request(path='/downloads/thing.zip')
+ assert response.status == falcon.HTTP_200
+ assert response.text == '/opt/somesite/downloads/thing.zip'
+
+ response = client.simulate_request(path='/downloads/archive/thingtoo.zip')
+ assert response.status == falcon.HTTP_200
+ assert response.text == '/opt/somesite/downloads/archive/thingtoo.zip'
+
+
+def test_downloadable(client, monkeypatch):
+ monkeypatch.setattr(io, 'open', lambda path, mode: [path.encode('utf-8')])
+
+ client.app.add_static_route('/downloads', '/opt/somesite/downloads', downloadable=True)
+ client.app.add_static_route('/assets/', '/opt/somesite/assets')
+
+ response = client.simulate_request(path='/downloads/thing.zip')
+ assert response.status == falcon.HTTP_200
+ assert response.headers['Content-Disposition'] == 'attachment; filename="thing.zip"'
+
+ response = client.simulate_request(path='/downloads/Some Report.zip')
+ assert response.status == falcon.HTTP_200
+ assert response.headers['Content-Disposition'] == 'attachment; filename="Some Report.zip"'
+
+ response = client.simulate_request(path='/assets/css/main.css')
+ assert response.status == falcon.HTTP_200
+ assert 'Content-Disposition' not in response.headers
+
+
+def test_downloadable_not_found(client):
+ client.app.add_static_route('/downloads', '/opt/somesite/downloads', downloadable=True)
+
+ response = client.simulate_request(path='/downloads/thing.zip')
+ assert response.status == falcon.HTTP_404
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 4
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements/tests"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
execnet==1.9.0
-e git+https://github.com/falconry/falcon.git@064232a826070c8ff528b739e560917e7da67e1e#egg=falcon
fixtures==4.0.1
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
jsonschema==3.2.0
msgpack-python==0.5.6
packaging==21.3
pbr==6.1.1
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-mimeparse==1.6.0
PyYAML==3.11
requests==2.27.1
six==1.17.0
testtools==2.6.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: falcon
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- coverage==6.2
- execnet==1.9.0
- fixtures==4.0.1
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jsonschema==3.2.0
- msgpack-python==0.5.6
- packaging==21.3
- pbr==6.1.1
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-mimeparse==1.6.0
- pyyaml==3.11
- requests==2.27.1
- six==1.17.0
- testtools==2.6.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/falcon
| [
"tests/test_after_hooks.py::test_output_validator",
"tests/test_after_hooks.py::test_serializer",
"tests/test_after_hooks.py::test_hook_as_callable_class",
"tests/test_after_hooks.py::test_resource_with_uri_fields[resource0]",
"tests/test_after_hooks.py::test_resource_with_uri_fields[resource1]",
"tests/test_after_hooks.py::test_wrapped_resource[resource0]",
"tests/test_after_hooks.py::test_wrapped_resource[resource1]",
"tests/test_after_hooks.py::test_wrapped_resource_with_hooks_aware_of_resource",
"tests/test_before_hooks.py::test_multiple_resource_hooks[resource0]",
"tests/test_before_hooks.py::test_multiple_resource_hooks[resource1]",
"tests/test_before_hooks.py::test_input_validator",
"tests/test_before_hooks.py::test_input_validator_inherited",
"tests/test_before_hooks.py::test_param_validator",
"tests/test_before_hooks.py::test_field_validator[resource0]",
"tests/test_before_hooks.py::test_field_validator[resource1]",
"tests/test_before_hooks.py::test_field_validator[resource2]",
"tests/test_before_hooks.py::test_parser",
"tests/test_before_hooks.py::test_wrapped_resource",
"tests/test_before_hooks.py::test_wrapped_resource_with_hooks_aware_of_resource",
"tests/test_boundedstream.py::test_not_writeable",
"tests/test_cmd_print_api.py::test_traverse_with_verbose",
"tests/test_cmd_print_api.py::test_traverse",
"tests/test_cookies.py::test_response_base_case",
"tests/test_cookies.py::test_response_disable_secure_globally",
"tests/test_cookies.py::test_response_complex_case",
"tests/test_cookies.py::test_cookie_expires_naive",
"tests/test_cookies.py::test_cookie_expires_aware",
"tests/test_cookies.py::test_cookies_setable",
"tests/test_cookies.py::test_cookie_max_age_float_and_string[foofloat]",
"tests/test_cookies.py::test_cookie_max_age_float_and_string[foostring]",
"tests/test_cookies.py::test_response_unset_cookie",
"tests/test_cookies.py::test_cookie_timezone",
"tests/test_cookies.py::test_request_cookie_parsing",
"tests/test_cookies.py::test_invalid_cookies_are_ignored",
"tests/test_cookies.py::test_cookie_header_is_missing",
"tests/test_cookies.py::test_unicode_inside_ascii_range",
"tests/test_cookies.py::test_non_ascii_name[Unicode_\\xc3\\xa6\\xc3\\xb8]",
"tests/test_cookies.py::test_non_ascii_name[Unicode_\\xc3\\x83\\xc2\\xa6\\xc3\\x83\\xc2\\xb8]",
"tests/test_cookies.py::test_non_ascii_name[42]",
"tests/test_cookies.py::test_non_ascii_value[Unicode_\\xc3\\xa6\\xc3\\xb8]",
"tests/test_cookies.py::test_non_ascii_value[Unicode_\\xc3\\x83\\xc2\\xa6\\xc3\\x83\\xc2\\xb8]",
"tests/test_cookies.py::test_non_ascii_value[42]",
"tests/test_custom_router.py::test_custom_router_add_route_should_be_used",
"tests/test_custom_router.py::test_custom_router_find_should_be_used",
"tests/test_custom_router.py::test_can_pass_additional_params_to_add_route",
"tests/test_custom_router.py::test_custom_router_takes_req_positional_argument",
"tests/test_custom_router.py::test_custom_router_takes_req_keyword_argument",
"tests/test_default_router.py::test_user_regression_versioned_url",
"tests/test_default_router.py::test_user_regression_recipes",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People|{field}-/serviceRoot/People|susie-expected_params0]",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People[{field}]-/serviceRoot/People['calvin']-expected_params1]",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People({field})-/serviceRoot/People('hobbes')-expected_params2]",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People({field})-/serviceRoot/People('hob)bes')-expected_params3]",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People({field})(z)-/serviceRoot/People(hobbes)(z)-expected_params4]",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People('{field}')-/serviceRoot/People('rosalyn')-expected_params5]",
"tests/test_default_router.py::test_user_regression_special_chars[/^{field}-/^42-expected_params6]",
"tests/test_default_router.py::test_user_regression_special_chars[/+{field}-/+42-expected_params7]",
"tests/test_default_router.py::test_user_regression_special_chars[/foo/{first}_{second}/bar-/foo/abc_def_ghijk/bar-expected_params8]",
"tests/test_default_router.py::test_user_regression_special_chars[/items/{x}?{y}-/items/1080?768-expected_params9]",
"tests/test_default_router.py::test_user_regression_special_chars[/items/{x}|{y}-/items/1080|768-expected_params10]",
"tests/test_default_router.py::test_user_regression_special_chars[/items/{x},{y}-/items/1080,768-expected_params11]",
"tests/test_default_router.py::test_user_regression_special_chars[/items/{x}^^{y}-/items/1080^^768-expected_params12]",
"tests/test_default_router.py::test_user_regression_special_chars[/items/{x}*{y}*-/items/1080*768*-expected_params13]",
"tests/test_default_router.py::test_user_regression_special_chars[/thing-2/something+{field}+-/thing-2/something+42+-expected_params14]",
"tests/test_default_router.py::test_user_regression_special_chars[/thing-2/something*{field}/notes-/thing-2/something*42/notes-expected_params15]",
"tests/test_default_router.py::test_user_regression_special_chars[/thing-2/something+{field}|{q}/notes-/thing-2/something+else|z/notes-expected_params16]",
"tests/test_default_router.py::test_user_regression_special_chars[serviceRoot/$metadata#Airports('{field}')/Name-serviceRoot/$metadata#Airports('KSFO')/Name-expected_params17]",
"tests/test_default_router.py::test_not_str[uri_template0]",
"tests/test_default_router.py::test_not_str[uri_template1]",
"tests/test_default_router.py::test_not_str[uri_template2]",
"tests/test_default_router.py::test_root_path",
"tests/test_default_router.py::test_duplicate_field_names[/{field}{field}]",
"tests/test_default_router.py::test_duplicate_field_names[/{field}...{field}]",
"tests/test_default_router.py::test_duplicate_field_names[/{field}/{another}/{field}]",
"tests/test_default_router.py::test_duplicate_field_names[/{field}/something/something/{field}/something]",
"tests/test_default_router.py::test_match_entire_path[/items/thing-/items/t]",
"tests/test_default_router.py::test_match_entire_path[/items/{x}|{y}|-/items/1080|768]",
"tests/test_default_router.py::test_match_entire_path[/items/{x}*{y}foo-/items/1080*768foobar]",
"tests/test_default_router.py::test_match_entire_path[/items/{x}*768*-/items/1080*768***]",
"tests/test_default_router.py::test_conflict[/teams/{conflict}]",
"tests/test_default_router.py::test_conflict[/emojis/signs/{id_too}]",
"tests/test_default_router.py::test_conflict[/repos/{org}/{repo}/compare/{complex}:{vs}...{complex2}:{conflict}]",
"tests/test_default_router.py::test_conflict[/teams/{id:int}/settings]",
"tests/test_default_router.py::test_non_conflict[/repos/{org}/{repo}/compare/{simple_vs_complex}]",
"tests/test_default_router.py::test_non_conflict[/repos/{complex}.{vs}.{simple}]",
"tests/test_default_router.py::test_non_conflict[/repos/{org}/{repo}/compare/{complex}:{vs}...{complex2}/full]",
"tests/test_default_router.py::test_invalid_field_name[/{}]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{org}/{repo}/compare/{}]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{complex}.{}.{thing}]",
"tests/test_default_router.py::test_invalid_field_name[/{9v}]",
"tests/test_default_router.py::test_invalid_field_name[/{524hello}/world]",
"tests/test_default_router.py::test_invalid_field_name[/hello/{1world}]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{complex}.{9v}.{thing}/etc]",
"tests/test_default_router.py::test_invalid_field_name[/{*kgriffs}]",
"tests/test_default_router.py::test_invalid_field_name[/{@kgriffs}]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{complex}.{v}.{@thing}/etc]",
"tests/test_default_router.py::test_invalid_field_name[/{-kgriffs}]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{complex}.{-v}.{thing}/etc]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{simple-thing}/etc]",
"tests/test_default_router.py::test_invalid_field_name[/this",
"tests/test_default_router.py::test_invalid_field_name[/this\\tand\\tthat/this\\nand\\nthat/{thing",
"tests/test_default_router.py::test_invalid_field_name[/{thing\\t}/world]",
"tests/test_default_router.py::test_invalid_field_name[/{\\nthing}/world]",
"tests/test_default_router.py::test_invalid_field_name[/{th\\x0bing}/world]",
"tests/test_default_router.py::test_invalid_field_name[/{",
"tests/test_default_router.py::test_invalid_field_name[/{thing}/wo",
"tests/test_default_router.py::test_invalid_field_name[/{thing}",
"tests/test_default_router.py::test_invalid_field_name[/repos/{or",
"tests/test_default_router.py::test_invalid_field_name[/repos/{org}/{repo}/compare/{th\\ting}]",
"tests/test_default_router.py::test_print_src",
"tests/test_default_router.py::test_override",
"tests/test_default_router.py::test_literal_segment",
"tests/test_default_router.py::test_dead_segment[/teams]",
"tests/test_default_router.py::test_dead_segment[/emojis/signs]",
"tests/test_default_router.py::test_dead_segment[/gists]",
"tests/test_default_router.py::test_dead_segment[/gists/42]",
"tests/test_default_router.py::test_malformed_pattern[/repos/racker/falcon/compare/foo]",
"tests/test_default_router.py::test_malformed_pattern[/repos/racker/falcon/compare/foo/full]",
"tests/test_default_router.py::test_literal",
"tests/test_default_router.py::test_converters[/cvt/teams/007-expected_params0]",
"tests/test_default_router.py::test_converters[/cvt/teams/1234/members-expected_params1]",
"tests/test_default_router.py::test_converters[/cvt/teams/default/members/700-5-expected_params2]",
"tests/test_default_router.py::test_converters[/cvt/repos/org/repo/compare/xkcd:353-expected_params3]",
"tests/test_default_router.py::test_converters[/cvt/repos/org/repo/compare/gunmachan:1234...kumamon:5678/part-expected_params4]",
"tests/test_default_router.py::test_converters[/cvt/repos/xkcd/353/compare/susan:0001/full-expected_params5]",
"tests/test_default_router.py::test_converters_with_invalid_options[/foo/{bar:int(0)}]",
"tests/test_default_router.py::test_converters_with_invalid_options[/foo/{bar:int(num_digits=0)}]",
"tests/test_default_router.py::test_converters_with_invalid_options[/foo/{bar:int(-1)}/baz]",
"tests/test_default_router.py::test_converters_with_invalid_options[/foo/{bar:int(num_digits=-1)}/baz]",
"tests/test_default_router.py::test_converters_malformed_specification[/foo/{bar:}]",
"tests/test_default_router.py::test_converters_malformed_specification[/foo/{bar:unknown}/baz]",
"tests/test_default_router.py::test_variable",
"tests/test_default_router.py::test_single_character_field_name",
"tests/test_default_router.py::test_literal_vs_variable[/teams/default-19]",
"tests/test_default_router.py::test_literal_vs_variable[/teams/default/members-7]",
"tests/test_default_router.py::test_literal_vs_variable[/cvt/teams/default-31]",
"tests/test_default_router.py::test_literal_vs_variable[/cvt/teams/default/members/1234-10-32]",
"tests/test_default_router.py::test_literal_vs_variable[/teams/1234-6]",
"tests/test_default_router.py::test_literal_vs_variable[/teams/1234/members-7]",
"tests/test_default_router.py::test_literal_vs_variable[/gists/first-20]",
"tests/test_default_router.py::test_literal_vs_variable[/gists/first/raw-18]",
"tests/test_default_router.py::test_literal_vs_variable[/gists/first/pdf-21]",
"tests/test_default_router.py::test_literal_vs_variable[/gists/1776/pdf-21]",
"tests/test_default_router.py::test_literal_vs_variable[/emojis/signs/78-13]",
"tests/test_default_router.py::test_literal_vs_variable[/emojis/signs/78/small.png-24]",
"tests/test_default_router.py::test_literal_vs_variable[/emojis/signs/78/small(png)-25]",
"tests/test_default_router.py::test_literal_vs_variable[/emojis/signs/78/small_png-26]",
"tests/test_default_router.py::test_not_found[/this/does/not/exist]",
"tests/test_default_router.py::test_not_found[/user/bogus]",
"tests/test_default_router.py::test_not_found[/repos/racker/falcon/compare/johndoe:master...janedoe:dev/bogus]",
"tests/test_default_router.py::test_not_found[/teams]",
"tests/test_default_router.py::test_not_found[/teams/42/members/undefined]",
"tests/test_default_router.py::test_not_found[/teams/42/undefined]",
"tests/test_default_router.py::test_not_found[/teams/42/undefined/segments]",
"tests/test_default_router.py::test_not_found[/teams/default/members/undefined]",
"tests/test_default_router.py::test_not_found[/teams/default/members/thing/undefined]",
"tests/test_default_router.py::test_not_found[/teams/default/members/thing/undefined/segments]",
"tests/test_default_router.py::test_not_found[/teams/default/undefined]",
"tests/test_default_router.py::test_not_found[/teams/default/undefined/segments]",
"tests/test_default_router.py::test_not_found[/cvt/teams/default/members]",
"tests/test_default_router.py::test_not_found[/cvt/teams/NaN]",
"tests/test_default_router.py::test_not_found[/cvt/teams/default/members/NaN]",
"tests/test_default_router.py::test_not_found[/emojis/signs]",
"tests/test_default_router.py::test_not_found[/emojis/signs/0/small]",
"tests/test_default_router.py::test_not_found[/emojis/signs/0/undefined]",
"tests/test_default_router.py::test_not_found[/emojis/signs/0/undefined/segments]",
"tests/test_default_router.py::test_not_found[/emojis/signs/20/small]",
"tests/test_default_router.py::test_not_found[/emojis/signs/20/undefined]",
"tests/test_default_router.py::test_not_found[/emojis/signs/42/undefined]",
"tests/test_default_router.py::test_not_found[/emojis/signs/78/undefined]",
"tests/test_default_router.py::test_subsegment_not_found",
"tests/test_default_router.py::test_multivar",
"tests/test_default_router.py::test_complex[-5]",
"tests/test_default_router.py::test_complex[/full-10]",
"tests/test_default_router.py::test_complex[/part-15]",
"tests/test_default_router.py::test_complex_alt[-16-/repos/{org}/{repo}/compare/{usr0}:{branch0}]",
"tests/test_default_router.py::test_complex_alt[/full-17-/repos/{org}/{repo}/compare/{usr0}:{branch0}/full]",
"tests/test_default_router.py::test_options_converters_set",
"tests/test_default_router.py::test_options_converters_update[spam]",
"tests/test_default_router.py::test_options_converters_update[spam_2]",
"tests/test_default_router.py::test_options_converters_invalid_name[has",
"tests/test_default_router.py::test_options_converters_invalid_name[whitespace",
"tests/test_default_router.py::test_options_converters_invalid_name[",
"tests/test_default_router.py::test_options_converters_invalid_name[funky$character]",
"tests/test_default_router.py::test_options_converters_invalid_name[42istheanswer]",
"tests/test_default_router.py::test_options_converters_invalid_name[with-hyphen]",
"tests/test_default_router.py::test_options_converters_invalid_name_on_update",
"tests/test_deps.py::test_deps_mimeparse_correct_package",
"tests/test_error.py::test_with_default_title_and_desc[HTTPBadRequest-400",
"tests/test_error.py::test_with_default_title_and_desc[HTTPForbidden-403",
"tests/test_error.py::test_with_default_title_and_desc[HTTPConflict-409",
"tests/test_error.py::test_with_default_title_and_desc[HTTPLengthRequired-411",
"tests/test_error.py::test_with_default_title_and_desc[HTTPPreconditionFailed-412",
"tests/test_error.py::test_with_default_title_and_desc[HTTPRequestEntityTooLarge-413",
"tests/test_error.py::test_with_default_title_and_desc[HTTPUriTooLong-414",
"tests/test_error.py::test_with_default_title_and_desc[HTTPUnprocessableEntity-422",
"tests/test_error.py::test_with_default_title_and_desc[HTTPLocked-423",
"tests/test_error.py::test_with_default_title_and_desc[HTTPFailedDependency-424",
"tests/test_error.py::test_with_default_title_and_desc[HTTPPreconditionRequired-428",
"tests/test_error.py::test_with_default_title_and_desc[HTTPTooManyRequests-429",
"tests/test_error.py::test_with_default_title_and_desc[HTTPRequestHeaderFieldsTooLarge-431",
"tests/test_error.py::test_with_default_title_and_desc[HTTPUnavailableForLegalReasons-451",
"tests/test_error.py::test_with_default_title_and_desc[HTTPInternalServerError-500",
"tests/test_error.py::test_with_default_title_and_desc[HTTPNotImplemented-501",
"tests/test_error.py::test_with_default_title_and_desc[HTTPBadGateway-502",
"tests/test_error.py::test_with_default_title_and_desc[HTTPServiceUnavailable-503",
"tests/test_error.py::test_with_default_title_and_desc[HTTPGatewayTimeout-504",
"tests/test_error.py::test_with_default_title_and_desc[HTTPVersionNotSupported-505",
"tests/test_error.py::test_with_default_title_and_desc[HTTPInsufficientStorage-507",
"tests/test_error.py::test_with_default_title_and_desc[HTTPLoopDetected-508",
"tests/test_error.py::test_with_default_title_and_desc[HTTPNetworkAuthenticationRequired-511",
"tests/test_error.py::test_with_title_and_desc[HTTPBadRequest]",
"tests/test_error.py::test_with_title_and_desc[HTTPForbidden]",
"tests/test_error.py::test_with_title_and_desc[HTTPConflict]",
"tests/test_error.py::test_with_title_and_desc[HTTPLengthRequired]",
"tests/test_error.py::test_with_title_and_desc[HTTPPreconditionFailed]",
"tests/test_error.py::test_with_title_and_desc[HTTPPreconditionRequired]",
"tests/test_error.py::test_with_title_and_desc[HTTPUriTooLong]",
"tests/test_error.py::test_with_title_and_desc[HTTPUnprocessableEntity]",
"tests/test_error.py::test_with_title_and_desc[HTTPLocked]",
"tests/test_error.py::test_with_title_and_desc[HTTPFailedDependency]",
"tests/test_error.py::test_with_title_and_desc[HTTPRequestHeaderFieldsTooLarge]",
"tests/test_error.py::test_with_title_and_desc[HTTPUnavailableForLegalReasons]",
"tests/test_error.py::test_with_title_and_desc[HTTPInternalServerError]",
"tests/test_error.py::test_with_title_and_desc[HTTPNotImplemented]",
"tests/test_error.py::test_with_title_and_desc[HTTPBadGateway]",
"tests/test_error.py::test_with_title_and_desc[HTTPServiceUnavailable]",
"tests/test_error.py::test_with_title_and_desc[HTTPGatewayTimeout]",
"tests/test_error.py::test_with_title_and_desc[HTTPVersionNotSupported]",
"tests/test_error.py::test_with_title_and_desc[HTTPInsufficientStorage]",
"tests/test_error.py::test_with_title_and_desc[HTTPLoopDetected]",
"tests/test_error.py::test_with_title_and_desc[HTTPNetworkAuthenticationRequired]",
"tests/test_error.py::test_with_retry_after[HTTPServiceUnavailable]",
"tests/test_error.py::test_with_retry_after[HTTPTooManyRequests]",
"tests/test_error.py::test_with_retry_after[HTTPRequestEntityTooLarge]",
"tests/test_error.py::test_http_unauthorized_no_title_and_desc_and_challenges",
"tests/test_error.py::test_http_unauthorized_with_title_and_desc_and_challenges",
"tests/test_error.py::test_http_not_acceptable_no_title_and_desc_and_challenges",
"tests/test_error.py::test_http_not_acceptable_with_title_and_desc_and_challenges",
"tests/test_error.py::test_http_unsupported_media_type_no_title_and_desc_and_challenges",
"tests/test_error.py::test_http_unsupported_media_type_with_title_and_desc_and_challenges",
"tests/test_error.py::test_http_error_repr",
"tests/test_error_handlers.py::TestErrorHandler::test_caught_error",
"tests/test_error_handlers.py::TestErrorHandler::test_uncaught_error",
"tests/test_error_handlers.py::TestErrorHandler::test_uncaught_error_else",
"tests/test_error_handlers.py::TestErrorHandler::test_converted_error",
"tests/test_error_handlers.py::TestErrorHandler::test_handle_not_defined",
"tests/test_error_handlers.py::TestErrorHandler::test_subclass_error",
"tests/test_error_handlers.py::TestErrorHandler::test_error_order_duplicate",
"tests/test_error_handlers.py::TestErrorHandler::test_error_order_subclass",
"tests/test_error_handlers.py::TestErrorHandler::test_error_order_subclass_masked",
"tests/test_headers.py::TestHeaders::test_content_length",
"tests/test_headers.py::TestHeaders::test_default_value",
"tests/test_headers.py::TestHeaders::test_required_header",
"tests/test_headers.py::TestHeaders::test_no_content_length[204",
"tests/test_headers.py::TestHeaders::test_no_content_length[304",
"tests/test_headers.py::TestHeaders::test_content_header_missing",
"tests/test_headers.py::TestHeaders::test_passthrough_request_headers",
"tests/test_headers.py::TestHeaders::test_headers_as_list",
"tests/test_headers.py::TestHeaders::test_default_media_type",
"tests/test_headers.py::TestHeaders::test_override_default_media_type[text/plain;",
"tests/test_headers.py::TestHeaders::test_override_default_media_type[text/plain-Hello",
"tests/test_headers.py::TestHeaders::test_override_default_media_type_missing_encoding",
"tests/test_headers.py::TestHeaders::test_response_header_helpers_on_get",
"tests/test_headers.py::TestHeaders::test_unicode_location_headers",
"tests/test_headers.py::TestHeaders::test_unicode_headers_convertable",
"tests/test_headers.py::TestHeaders::test_response_set_and_get_header",
"tests/test_headers.py::TestHeaders::test_response_append_header",
"tests/test_headers.py::TestHeaders::test_vary_star",
"tests/test_headers.py::TestHeaders::test_vary_header[vary0-accept-encoding]",
"tests/test_headers.py::TestHeaders::test_vary_header[vary1-accept-encoding,",
"tests/test_headers.py::TestHeaders::test_vary_header[vary2-accept-encoding,",
"tests/test_headers.py::TestHeaders::test_content_type_no_body",
"tests/test_headers.py::TestHeaders::test_no_content_type[204",
"tests/test_headers.py::TestHeaders::test_no_content_type[304",
"tests/test_headers.py::TestHeaders::test_custom_content_type",
"tests/test_headers.py::TestHeaders::test_add_link_single",
"tests/test_headers.py::TestHeaders::test_add_link_multiple",
"tests/test_headers.py::TestHeaders::test_add_link_with_title",
"tests/test_headers.py::TestHeaders::test_add_link_with_title_star",
"tests/test_headers.py::TestHeaders::test_add_link_with_anchor",
"tests/test_headers.py::TestHeaders::test_add_link_with_hreflang",
"tests/test_headers.py::TestHeaders::test_add_link_with_hreflang_multi",
"tests/test_headers.py::TestHeaders::test_add_link_with_type_hint",
"tests/test_headers.py::TestHeaders::test_add_link_complex",
"tests/test_headers.py::TestHeaders::test_content_length_options",
"tests/test_hello.py::TestHelloWorld::test_env_headers_list_of_tuples",
"tests/test_hello.py::TestHelloWorld::test_root_route",
"tests/test_hello.py::TestHelloWorld::test_no_route",
"tests/test_hello.py::TestHelloWorld::test_body[/body-resource0-<lambda>]",
"tests/test_hello.py::TestHelloWorld::test_body[/bytes-resource1-<lambda>]",
"tests/test_hello.py::TestHelloWorld::test_body[/data-resource2-<lambda>]",
"tests/test_hello.py::TestHelloWorld::test_no_body_on_head",
"tests/test_hello.py::TestHelloWorld::test_stream_chunked",
"tests/test_hello.py::TestHelloWorld::test_stream_known_len",
"tests/test_hello.py::TestHelloWorld::test_filelike",
"tests/test_hello.py::TestHelloWorld::test_filelike_closing[ClosingBytesIO-True]",
"tests/test_hello.py::TestHelloWorld::test_filelike_closing[NonClosingBytesIO-False]",
"tests/test_hello.py::TestHelloWorld::test_filelike_using_helper",
"tests/test_hello.py::TestHelloWorld::test_status_not_set",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_get",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_put",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_post_not_allowed",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_report",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_misc",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_methods_not_allowed_simple",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_methods_not_allowed_complex",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_method_not_allowed_with_param",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_default_on_options",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_on_options",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_bogus_method",
"tests/test_httperror.py::TestHTTPError::test_base_class",
"tests/test_httperror.py::TestHTTPError::test_no_description_json",
"tests/test_httperror.py::TestHTTPError::test_no_description_xml",
"tests/test_httperror.py::TestHTTPError::test_client_does_not_accept_json_or_xml",
"tests/test_httperror.py::TestHTTPError::test_custom_old_error_serializer",
"tests/test_httperror.py::TestHTTPError::test_custom_old_error_serializer_no_body",
"tests/test_httperror.py::TestHTTPError::test_custom_new_error_serializer",
"tests/test_httperror.py::TestHTTPError::test_client_does_not_accept_anything",
"tests/test_httperror.py::TestHTTPError::test_forbidden[application/json]",
"tests/test_httperror.py::TestHTTPError::test_forbidden[application/vnd.company.system.project.resource+json;v=1.1]",
"tests/test_httperror.py::TestHTTPError::test_forbidden[application/json-patch+json]",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_json",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_xml[text/xml]",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_xml[application/xml]",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_xml[application/vnd.company.system.project.resource+xml;v=1.1]",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_xml[application/atom+xml]",
"tests/test_httperror.py::TestHTTPError::test_unicode_json",
"tests/test_httperror.py::TestHTTPError::test_unicode_xml",
"tests/test_httperror.py::TestHTTPError::test_401",
"tests/test_httperror.py::TestHTTPError::test_404_without_body",
"tests/test_httperror.py::TestHTTPError::test_404_with_body",
"tests/test_httperror.py::TestHTTPError::test_405_without_body",
"tests/test_httperror.py::TestHTTPError::test_405_without_body_with_extra_headers",
"tests/test_httperror.py::TestHTTPError::test_405_without_body_with_extra_headers_double_check",
"tests/test_httperror.py::TestHTTPError::test_405_with_body",
"tests/test_httperror.py::TestHTTPError::test_410_without_body",
"tests/test_httperror.py::TestHTTPError::test_410_with_body",
"tests/test_httperror.py::TestHTTPError::test_411",
"tests/test_httperror.py::TestHTTPError::test_413",
"tests/test_httperror.py::TestHTTPError::test_temporary_413_integer_retry_after",
"tests/test_httperror.py::TestHTTPError::test_temporary_413_datetime_retry_after",
"tests/test_httperror.py::TestHTTPError::test_414",
"tests/test_httperror.py::TestHTTPError::test_414_with_title",
"tests/test_httperror.py::TestHTTPError::test_414_with_description",
"tests/test_httperror.py::TestHTTPError::test_414_with_custom_kwargs",
"tests/test_httperror.py::TestHTTPError::test_416",
"tests/test_httperror.py::TestHTTPError::test_429_no_retry_after",
"tests/test_httperror.py::TestHTTPError::test_429",
"tests/test_httperror.py::TestHTTPError::test_429_datetime",
"tests/test_httperror.py::TestHTTPError::test_503_integer_retry_after",
"tests/test_httperror.py::TestHTTPError::test_503_datetime_retry_after",
"tests/test_httperror.py::TestHTTPError::test_invalid_header",
"tests/test_httperror.py::TestHTTPError::test_missing_header",
"tests/test_httperror.py::TestHTTPError::test_invalid_param",
"tests/test_httperror.py::TestHTTPError::test_missing_param",
"tests/test_httperror.py::TestHTTPError::test_misc",
"tests/test_httperror.py::TestHTTPError::test_title_default_message_if_none",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_in_before_hook",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_in_responder",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_runs_after_hooks",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_survives_after_hooks",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_empty_body",
"tests/test_httpstatus.py::TestHTTPStatusWithMiddleware::test_raise_status_in_process_request",
"tests/test_httpstatus.py::TestHTTPStatusWithMiddleware::test_raise_status_in_process_resource",
"tests/test_httpstatus.py::TestHTTPStatusWithMiddleware::test_raise_status_runs_process_response",
"tests/test_media_handlers.py::test_base_handler_contract",
"tests/test_middleware.py::TestRequestTimeMiddleware::test_skip_process_resource",
"tests/test_middleware.py::TestRequestTimeMiddleware::test_add_invalid_middleware",
"tests/test_middleware.py::TestRequestTimeMiddleware::test_response_middleware_raises_exception",
"tests/test_middleware.py::TestRequestTimeMiddleware::test_log_get_request",
"tests/test_middleware.py::TestTransactionIdMiddleware::test_generate_trans_id_with_request",
"tests/test_middleware.py::TestSeveralMiddlewares::test_generate_trans_id_and_time_with_request",
"tests/test_middleware.py::TestSeveralMiddlewares::test_legacy_middleware_called_with_correct_args",
"tests/test_middleware.py::TestSeveralMiddlewares::test_middleware_execution_order",
"tests/test_middleware.py::TestSeveralMiddlewares::test_independent_middleware_execution_order",
"tests/test_middleware.py::TestSeveralMiddlewares::test_multiple_reponse_mw_throw_exception",
"tests/test_middleware.py::TestSeveralMiddlewares::test_inner_mw_throw_exception",
"tests/test_middleware.py::TestSeveralMiddlewares::test_inner_mw_with_ex_handler_throw_exception",
"tests/test_middleware.py::TestSeveralMiddlewares::test_outer_mw_with_ex_handler_throw_exception",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_mw_executed_when_exception_in_resp",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_independent_mw_executed_when_exception_in_resp",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_mw_executed_when_exception_in_req",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_independent_mw_executed_when_exception_in_req",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_mw_executed_when_exception_in_rsrc",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_independent_mw_executed_when_exception_in_rsrc",
"tests/test_middleware.py::TestRemoveBasePathMiddleware::test_base_path_is_removed_before_routing",
"tests/test_middleware.py::TestResourceMiddleware::test_can_access_resource_params",
"tests/test_middleware.py::TestErrorHandling::test_error_composed_before_resp_middleware_called",
"tests/test_middleware.py::TestErrorHandling::test_http_status_raised_from_error_handler",
"tests/test_options.py::TestRequestOptions::test_option_defaults",
"tests/test_options.py::TestRequestOptions::test_options_toggle[keep_blank_qs_values]",
"tests/test_options.py::TestRequestOptions::test_options_toggle[auto_parse_form_urlencoded]",
"tests/test_options.py::TestRequestOptions::test_options_toggle[auto_parse_qs_csv]",
"tests/test_options.py::TestRequestOptions::test_options_toggle[strip_url_path_trailing_slash]",
"tests/test_options.py::TestRequestOptions::test_incorrect_options",
"tests/test_query_params.py::TestQueryParams::test_none[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_none[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_blank[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_blank[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_simple[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_simple[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_percent_encoded[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_percent_encoded[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_simple_false[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_simple_false[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_simple_true[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_simple_true[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_complex_false[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_complex_false[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_bad_percentage[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_bad_percentage[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_allowed_names[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_allowed_names[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param-simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param-simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_int-simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_int-simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_bool-simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_bool-simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_list-simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_list-simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_int[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_int[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_int_neg[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_int_neg[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_boolean[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_boolean[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_boolean_blank[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_boolean_blank[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_type[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_type[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_type_blank[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_type_blank[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_transformer[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_transformer[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_param_property[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_param_property[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_form_keys[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_form_keys[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_keys_as_bool[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_keys_as_bool[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_keys_as_int[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_keys_as_int[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_form_keys_as_list[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_form_keys_as_list[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_valid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_valid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_missing_param[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_missing_param[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_valid_with_format[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_valid_with_format[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_store[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_store[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_invalid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_invalid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_valid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_valid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_missing_param[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_missing_param[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_valid_with_format[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_valid_with_format[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_store[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_store[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_invalid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_invalid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_valid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_valid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_missing_param[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_missing_param[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_store[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_store[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_invalid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_invalid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_expected[POST]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_expected[PUT]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_expected[PATCH]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_expected[DELETE]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_expected[OPTIONS]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_not_expected[GET]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_not_expected[HEAD]",
"tests/test_query_params.py::TestPostQueryParams::test_non_ascii",
"tests/test_query_params.py::TestPostQueryParams::test_empty_body",
"tests/test_query_params.py::TestPostQueryParams::test_empty_body_no_content_length",
"tests/test_query_params.py::TestPostQueryParams::test_explicitly_disable_auto_parse",
"tests/test_query_params.py::TestPostQueryParamsDefaultBehavior::test_dont_auto_parse_by_default",
"tests/test_redirects.py::TestRedirects::test_redirect[GET-301",
"tests/test_redirects.py::TestRedirects::test_redirect[POST-302",
"tests/test_redirects.py::TestRedirects::test_redirect[PUT-303",
"tests/test_redirects.py::TestRedirects::test_redirect[DELETE-307",
"tests/test_redirects.py::TestRedirects::test_redirect[HEAD-308",
"tests/test_request_access_route.py::test_remote_addr_only",
"tests/test_request_access_route.py::test_rfc_forwarded",
"tests/test_request_access_route.py::test_malformed_rfc_forwarded",
"tests/test_request_access_route.py::test_x_forwarded_for",
"tests/test_request_access_route.py::test_x_real_ip",
"tests/test_request_access_route.py::test_remote_addr",
"tests/test_request_access_route.py::test_remote_addr_missing",
"tests/test_request_attrs.py::TestRequestAttributes::test_missing_qs",
"tests/test_request_attrs.py::TestRequestAttributes::test_empty",
"tests/test_request_attrs.py::TestRequestAttributes::test_host",
"tests/test_request_attrs.py::TestRequestAttributes::test_subdomain",
"tests/test_request_attrs.py::TestRequestAttributes::test_reconstruct_url",
"tests/test_request_attrs.py::TestRequestAttributes::test_nonlatin_path[/hello_\\u043f\\u0440\\u0438\\u0432\\u0435\\u0442]",
"tests/test_request_attrs.py::TestRequestAttributes::test_nonlatin_path[/test/%E5%BB%B6%E5%AE%89]",
"tests/test_request_attrs.py::TestRequestAttributes::test_nonlatin_path[/test/%C3%A4%C3%B6%C3%BC%C3%9F%E2%82%AC]",
"tests/test_request_attrs.py::TestRequestAttributes::test_uri",
"tests/test_request_attrs.py::TestRequestAttributes::test_uri_https",
"tests/test_request_attrs.py::TestRequestAttributes::test_uri_http_1_0",
"tests/test_request_attrs.py::TestRequestAttributes::test_relative_uri",
"tests/test_request_attrs.py::TestRequestAttributes::test_client_accepts",
"tests/test_request_attrs.py::TestRequestAttributes::test_client_accepts_bogus",
"tests/test_request_attrs.py::TestRequestAttributes::test_client_accepts_props",
"tests/test_request_attrs.py::TestRequestAttributes::test_client_prefers",
"tests/test_request_attrs.py::TestRequestAttributes::test_range",
"tests/test_request_attrs.py::TestRequestAttributes::test_range_unit",
"tests/test_request_attrs.py::TestRequestAttributes::test_range_invalid",
"tests/test_request_attrs.py::TestRequestAttributes::test_missing_attribute_header",
"tests/test_request_attrs.py::TestRequestAttributes::test_content_length",
"tests/test_request_attrs.py::TestRequestAttributes::test_bogus_content_length_nan",
"tests/test_request_attrs.py::TestRequestAttributes::test_bogus_content_length_neg",
"tests/test_request_attrs.py::TestRequestAttributes::test_date[Date-date]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date[If-Modified-Since-if_modified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date[If-Unmodified-Since-if_unmodified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_invalid[Date-date]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_invalid[If-Modified-Since-if_modified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_invalid[If-Unmodified-Since-if_unmodified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_missing[date]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_missing[if_modified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_missing[if_unmodified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_attribute_headers",
"tests/test_request_attrs.py::TestRequestAttributes::test_method",
"tests/test_request_attrs.py::TestRequestAttributes::test_empty_path",
"tests/test_request_attrs.py::TestRequestAttributes::test_content_type_method",
"tests/test_request_attrs.py::TestRequestAttributes::test_content_length_method",
"tests/test_request_attrs.py::TestRequestAttributes::test_port_explicit[HTTP/1.0]",
"tests/test_request_attrs.py::TestRequestAttributes::test_port_explicit[HTTP/1.1]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_https[HTTP/1.0]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_https[HTTP/1.1]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_http[HTTP/1.0-True]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_http[HTTP/1.0-False]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_http[HTTP/1.1-True]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_http[HTTP/1.1-False]",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_default_port[HTTP/1.0]",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_default_port[HTTP/1.1]",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_nondefault_port[HTTP/1.0]",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_nondefault_port[HTTP/1.1]",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_from_env[HTTP/1.0]",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_from_env[HTTP/1.1]",
"tests/test_request_attrs.py::TestRequestAttributes::test_app_present",
"tests/test_request_attrs.py::TestRequestAttributes::test_app_blank",
"tests/test_request_attrs.py::TestRequestAttributes::test_app_missing",
"tests/test_request_body.py::TestRequestBody::test_empty_body",
"tests/test_request_body.py::TestRequestBody::test_tiny_body",
"tests/test_request_body.py::TestRequestBody::test_tiny_body_overflow",
"tests/test_request_body.py::TestRequestBody::test_read_body",
"tests/test_request_body.py::TestRequestBody::test_bounded_stream_property_empty_body",
"tests/test_request_body.py::TestRequestBody::test_body_stream_wrapper",
"tests/test_request_body.py::TestRequestBody::test_request_repr",
"tests/test_request_context.py::TestRequestContext::test_default_request_context",
"tests/test_request_context.py::TestRequestContext::test_custom_request_context",
"tests/test_request_context.py::TestRequestContext::test_custom_request_context_failure",
"tests/test_request_context.py::TestRequestContext::test_custom_request_context_request_access",
"tests/test_request_forwarded.py::test_no_forwarded_headers",
"tests/test_request_forwarded.py::test_x_forwarded_host",
"tests/test_request_forwarded.py::test_x_forwarded_proto",
"tests/test_request_forwarded.py::test_forwarded_host",
"tests/test_request_forwarded.py::test_forwarded_multiple_params",
"tests/test_request_forwarded.py::test_forwarded_missing_first_hop_host",
"tests/test_request_media.py::test_json[None]",
"tests/test_request_media.py::test_json[*/*]",
"tests/test_request_media.py::test_json[application/json]",
"tests/test_request_media.py::test_json[application/json;",
"tests/test_request_media.py::test_msgpack[application/msgpack]",
"tests/test_request_media.py::test_msgpack[application/msgpack;",
"tests/test_request_media.py::test_msgpack[application/x-msgpack]",
"tests/test_request_media.py::test_unknown_media_type[nope/json]",
"tests/test_request_media.py::test_invalid_json",
"tests/test_request_media.py::test_invalid_msgpack",
"tests/test_request_media.py::test_invalid_stream_fails_gracefully",
"tests/test_request_media.py::test_use_cached_media",
"tests/test_response.py::test_response_set_content_type_set",
"tests/test_response.py::test_response_set_content_type_not_set",
"tests/test_response_body.py::TestResponseBody::test_append_body",
"tests/test_response_body.py::TestResponseBody::test_response_repr",
"tests/test_response_context.py::TestRequestContext::test_default_response_context",
"tests/test_response_context.py::TestRequestContext::test_custom_response_context",
"tests/test_response_context.py::TestRequestContext::test_custom_response_context_failure",
"tests/test_response_context.py::TestRequestContext::test_custom_response_context_factory",
"tests/test_response_media.py::test_json[*/*]",
"tests/test_response_media.py::test_json[application/json;",
"tests/test_response_media.py::test_msgpack[application/msgpack]",
"tests/test_response_media.py::test_msgpack[application/msgpack;",
"tests/test_response_media.py::test_msgpack[application/x-msgpack]",
"tests/test_response_media.py::test_unknown_media_type",
"tests/test_response_media.py::test_use_cached_media",
"tests/test_response_media.py::test_default_media_type[]",
"tests/test_sinks.py::TestDefaultRouting::test_single_default_pattern",
"tests/test_sinks.py::TestDefaultRouting::test_single_simple_pattern",
"tests/test_sinks.py::TestDefaultRouting::test_single_compiled_pattern",
"tests/test_sinks.py::TestDefaultRouting::test_named_groups",
"tests/test_sinks.py::TestDefaultRouting::test_multiple_patterns",
"tests/test_sinks.py::TestDefaultRouting::test_with_route",
"tests/test_sinks.py::TestDefaultRouting::test_route_precedence",
"tests/test_sinks.py::TestDefaultRouting::test_route_precedence_with_id",
"tests/test_sinks.py::TestDefaultRouting::test_route_precedence_with_both_id",
"tests/test_slots.py::TestSlots::test_slots_request",
"tests/test_slots.py::TestSlots::test_slots_response",
"tests/test_static.py::test_bad_path[/static]",
"tests/test_static.py::test_bad_path[/static/]",
"tests/test_static.py::test_bad_path[/static/.]",
"tests/test_static.py::test_bad_path[/static/..]",
"tests/test_static.py::test_bad_path[/static/../.]",
"tests/test_static.py::test_bad_path[/static/.././etc/passwd]",
"tests/test_static.py::test_bad_path[/static/../etc/passwd]",
"tests/test_static.py::test_bad_path[/static/css/../../secret]",
"tests/test_static.py::test_bad_path[/static/css/../../etc/passwd]",
"tests/test_static.py::test_bad_path[/static/./../etc/passwd]",
"tests/test_static.py::test_bad_path[/static/css/../.\\\\056/etc/passwd]",
"tests/test_static.py::test_bad_path[/static/./\\\\056./etc/passwd]",
"tests/test_static.py::test_bad_path[/static/\\\\056\\\\056/etc/passwd]",
"tests/test_static.py::test_bad_path[/static//test.css]",
"tests/test_static.py::test_bad_path[/static//COM10]",
"tests/test_static.py::test_bad_path[/static/path//test.css]",
"tests/test_static.py::test_bad_path[/static/path///test.css]",
"tests/test_static.py::test_bad_path[/static/path////test.css]",
"tests/test_static.py::test_bad_path[/static/path/foo//test.css]",
"tests/test_static.py::test_bad_path[/static/.\\x00ssh/authorized_keys]",
"tests/test_static.py::test_bad_path[/static/.\\x1fssh/authorized_keys]",
"tests/test_static.py::test_bad_path[/static/.\\x80ssh/authorized_keys]",
"tests/test_static.py::test_bad_path[/static/.\\x9fssh/authorized_keys]",
"tests/test_static.py::test_bad_path[/static/~/.ssh/authorized_keys]",
"tests/test_static.py::test_bad_path[/static/.ssh/authorized_key?]",
"tests/test_static.py::test_bad_path[/static/.ssh/authorized_key>foo]",
"tests/test_static.py::test_bad_path[/static/.ssh/authorized_key|foo]",
"tests/test_static.py::test_bad_path[/static/.ssh/authorized_key<foo]",
"tests/test_static.py::test_bad_path[/static/something:something]",
"tests/test_static.py::test_bad_path[/static/thing*.sql]",
"tests/test_static.py::test_bad_path[/static/'thing'.sql]",
"tests/test_static.py::test_bad_path[/static/\"thing\".sql]",
"tests/test_static.py::test_bad_path[/static/something.]",
"tests/test_static.py::test_bad_path[/static/something..]",
"tests/test_static.py::test_bad_path[/static/something",
"tests/test_static.py::test_bad_path[/static/",
"tests/test_static.py::test_bad_path[/static/something\\t]",
"tests/test_static.py::test_bad_path[/static/\\tsomething]",
"tests/test_static.py::test_bad_path[/static/ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttx]",
"tests/test_static.py::test_invalid_args[static-/var/www/statics]",
"tests/test_static.py::test_invalid_args[/static-./var/www/statics]",
"tests/test_static.py::test_invalid_args[/static-statics]",
"tests/test_static.py::test_invalid_args[/static-../statics]",
"tests/test_static.py::test_good_path[/static/-/css/test.css-/css/test.css-text/css]",
"tests/test_static.py::test_good_path[/static-/css/test.css-/css/test.css-text/css]",
"tests/test_static.py::test_good_path[/static-/tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt-/tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt-application/octet-stream]",
"tests/test_static.py::test_good_path[/static-/.test.css-/.test.css-text/css]",
"tests/test_static.py::test_good_path[/some/download/-/report.pdf-/report.pdf-application/pdf]",
"tests/test_static.py::test_good_path[/some/download/-/Fancy",
"tests/test_static.py::test_good_path[/some/download-/report.zip-/report.zip-application/zip]",
"tests/test_static.py::test_good_path[/some/download-/foo/../report.zip-/report.zip-application/zip]",
"tests/test_static.py::test_good_path[/some/download-/foo/../bar/../report.zip-/report.zip-application/zip]",
"tests/test_static.py::test_good_path[/some/download-/foo/bar/../../report.zip-/report.zip-application/zip]",
"tests/test_static.py::test_lifo",
"tests/test_static.py::test_lifo_negative",
"tests/test_static.py::test_downloadable",
"tests/test_static.py::test_downloadable_not_found",
"tests/test_uri_converters.py::test_int_converter[123-None-None-None-123]",
"tests/test_uri_converters.py::test_int_converter[01-None-None-None-1]",
"tests/test_uri_converters.py::test_int_converter[001-None-None-None-1]",
"tests/test_uri_converters.py::test_int_converter[0-None-None-None-0]",
"tests/test_uri_converters.py::test_int_converter[00-None-None-None-0]",
"tests/test_uri_converters.py::test_int_converter[1-1-None-None-1]",
"tests/test_uri_converters.py::test_int_converter[12-1-None-None-None0]",
"tests/test_uri_converters.py::test_int_converter[12-2-None-None-120]",
"tests/test_uri_converters.py::test_int_converter[1-1-1-1-1]",
"tests/test_uri_converters.py::test_int_converter[1-1-1-None-1]",
"tests/test_uri_converters.py::test_int_converter[1-1-1-2-1]",
"tests/test_uri_converters.py::test_int_converter[1-1-2-None-None]",
"tests/test_uri_converters.py::test_int_converter[1-1-2-1-None]",
"tests/test_uri_converters.py::test_int_converter[2-1-1-2-2]",
"tests/test_uri_converters.py::test_int_converter[2-1-2-2-2]",
"tests/test_uri_converters.py::test_int_converter[3-1-1-2-None]",
"tests/test_uri_converters.py::test_int_converter[12-1-None-None-None1]",
"tests/test_uri_converters.py::test_int_converter[12-1-1-12-None]",
"tests/test_uri_converters.py::test_int_converter[12-2-None-None-121]",
"tests/test_uri_converters.py::test_int_converter[12-2-1-12-12]",
"tests/test_uri_converters.py::test_int_converter[12-2-12-12-12]",
"tests/test_uri_converters.py::test_int_converter[12-2-13-12-None]",
"tests/test_uri_converters.py::test_int_converter[12-2-13-13-None]",
"tests/test_uri_converters.py::test_int_converter_malformed[0x0F]",
"tests/test_uri_converters.py::test_int_converter_malformed[something]",
"tests/test_uri_converters.py::test_int_converter_malformed[]",
"tests/test_uri_converters.py::test_int_converter_malformed[",
"tests/test_uri_converters.py::test_int_converter_malformed[123",
"tests/test_uri_converters.py::test_int_converter_malformed[123\\t]",
"tests/test_uri_converters.py::test_int_converter_malformed[123\\n]",
"tests/test_uri_converters.py::test_int_converter_malformed[123\\r]",
"tests/test_uri_converters.py::test_int_converter_malformed[123\\x0b]",
"tests/test_uri_converters.py::test_int_converter_malformed[123\\x0c]",
"tests/test_uri_converters.py::test_int_converter_malformed[\\t123]",
"tests/test_uri_converters.py::test_int_converter_malformed[\\n123]",
"tests/test_uri_converters.py::test_int_converter_malformed[\\r123]",
"tests/test_uri_converters.py::test_int_converter_malformed[\\x0b123]",
"tests/test_uri_converters.py::test_int_converter_malformed[\\x0c123]",
"tests/test_uri_converters.py::test_int_converter_invalid_config[0]",
"tests/test_uri_converters.py::test_int_converter_invalid_config[-1]",
"tests/test_uri_converters.py::test_int_converter_invalid_config[-10]",
"tests/test_uri_converters.py::test_datetime_converter[07-03-17-%m-%d-%y-expected0]",
"tests/test_uri_converters.py::test_datetime_converter[07-03-17",
"tests/test_uri_converters.py::test_datetime_converter[2017-07-03T14:30:01Z-%Y-%m-%dT%H:%M:%SZ-expected2]",
"tests/test_uri_converters.py::test_datetime_converter[2017-07-03T14:30:01-%Y-%m-%dT%H:%M:%S-expected3]",
"tests/test_uri_converters.py::test_datetime_converter[2017_19-%Y_%H-expected4]",
"tests/test_uri_converters.py::test_datetime_converter[2017-07-03T14:30:01-%Y-%m-%dT%H:%M:%SZ-None]",
"tests/test_uri_converters.py::test_datetime_converter[",
"tests/test_uri_converters.py::test_datetime_converter[07",
"tests/test_uri_converters.py::test_datetime_converter_default_format",
"tests/test_uri_converters.py::test_uuid_converter[378360d3-4190-4f9f-a1ed-d1346d56fafe-expected0]",
"tests/test_uri_converters.py::test_uuid_converter[378360d34190-4f9f-a1ed-d1346d56fafe-expected1]",
"tests/test_uri_converters.py::test_uuid_converter[378360d341904f9fa1edd1346d56fafe-expected2]",
"tests/test_uri_converters.py::test_uuid_converter[urn:uuid:378360d3-4190-4f9f-a1ed-d1346d56fafe-expected3]",
"tests/test_uri_converters.py::test_uuid_converter[urn:uuid:378360d341904f9fa1edd1346d56fafe-expected4]",
"tests/test_uri_converters.py::test_uuid_converter[",
"tests/test_uri_converters.py::test_uuid_converter[378360d3-4190-4f9f-a1ed-d1346d56fafe",
"tests/test_uri_converters.py::test_uuid_converter[378360d3-4190-4f9f-a1ed-d1346d56faf-None]",
"tests/test_uri_converters.py::test_uuid_converter[3-None]",
"tests/test_uri_converters.py::test_uuid_converter[378360d3-4190-4f9f-a1ed-d1346d56fafg-None]",
"tests/test_uri_converters.py::test_uuid_converter[378360d3_4190_4f9f_a1ed_d1346d56fafe-None]",
"tests/test_uri_templates.py::test_root_path",
"tests/test_uri_templates.py::test_no_vars",
"tests/test_uri_templates.py::test_special_chars",
"tests/test_uri_templates.py::test_single[id]",
"tests/test_uri_templates.py::test_single[id123]",
"tests/test_uri_templates.py::test_single[widget_id]",
"tests/test_uri_templates.py::test_int_converter[/{id:int}]",
"tests/test_uri_templates.py::test_int_converter[/{id:int(3)}]",
"tests/test_uri_templates.py::test_int_converter[/{id:int(min=123)}]",
"tests/test_uri_templates.py::test_int_converter[/{id:int(min=123,",
"tests/test_uri_templates.py::test_int_converter_rejections[/{id:int(2)}]",
"tests/test_uri_templates.py::test_int_converter_rejections[/{id:int(min=124)}]",
"tests/test_uri_templates.py::test_int_converter_rejections[/{id:int(num_digits=3,",
"tests/test_uri_templates.py::test_datetime_converter[/{start_year:int}-to-{timestamp:dt}-/1961-to-1969-07-21T02:56:00Z-dt_expected0]",
"tests/test_uri_templates.py::test_datetime_converter[/{start_year:int}-to-{timestamp:dt(\"%Y-%m-%d\")}-/1961-to-1969-07-21-dt_expected1]",
"tests/test_uri_templates.py::test_datetime_converter[/{start_year:int}/{timestamp:dt(\"%Y-%m-%d",
"tests/test_uri_templates.py::test_datetime_converter[/{start_year:int}-to-{timestamp:dt(\"%Y-%m\")}-/1961-to-1969-07-21-None]",
"tests/test_uri_templates.py::test_uuid_converter[/widgets/{widget_id:uuid}-/widgets/53ec7369-0d5b-47de-a5c4-6172472dfeb2-expected0]",
"tests/test_uri_templates.py::test_uuid_converter[/widgets/{widget_id:uuid}/orders-/widgets/53ec73690d5b47dea5c46172472dfeb2/orders-expected1]",
"tests/test_uri_templates.py::test_uuid_converter[/versions/diff/{left:uuid()}...{right:uuid()}-/versions/diff/53ec7369-0d5b-47de-a5c4-6172472dfeb2...807f7894-16b9-4db2-8ca3-c8c28e6b6706-expected2]",
"tests/test_uri_templates.py::test_uuid_converter[/versions/diff/{left:uuid}...{right:uuid()}-/versions/diff/53ec7369-0d5b-47de-a5c4-6172472dfeb2...807f7894-16b9-4db2-8ca3-c8c28e6b6706-expected3]",
"tests/test_uri_templates.py::test_uuid_converter[/versions/diff/{left:uuid()}...{right:uuid}-/versions/diff/53ec7369-0d5b-47de-a5c4-6172472dfeb2...807f7894-16b9-4db2-8ca3-c8c28e6b6706-expected4]",
"tests/test_uri_templates.py::test_uuid_converter[/widgets/{widget_id:uuid}/orders-/widgets/53ec73690d5b47dea5c46172472dfeb/orders-None]",
"tests/test_uri_templates.py::test_uuid_converter_complex_segment",
"tests/test_uri_templates.py::test_converter_custom[/{food:spam}-/something-expected0]",
"tests/test_uri_templates.py::test_converter_custom[/{food:spam(\")\")}:{food_too:spam(\"()\")}-/bacon:eggs-expected1]",
"tests/test_uri_templates.py::test_converter_custom[/({food:spam()}){food_too:spam(\"()\")}-/(bacon)eggs-expected2]",
"tests/test_uri_templates.py::test_single_trailing_slash",
"tests/test_uri_templates.py::test_multiple",
"tests/test_uri_templates.py::test_empty_path_component[//]",
"tests/test_uri_templates.py::test_empty_path_component[//begin]",
"tests/test_uri_templates.py::test_empty_path_component[/end//]",
"tests/test_uri_templates.py::test_empty_path_component[/in//side]",
"tests/test_uri_templates.py::test_relative_path[]",
"tests/test_uri_templates.py::test_relative_path[no]",
"tests/test_uri_templates.py::test_relative_path[no/leading_slash]",
"tests/test_uri_templates.py::test_same_level_complex_var[True]",
"tests/test_uri_templates.py::test_same_level_complex_var[False]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_string_type_required[42]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_string_type_required[API]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_must_start_with_slash[this]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_must_start_with_slash[this/that]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[//]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[a//]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[//b]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[a//b]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[a/b//]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[a/b//c]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_root",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_no_fields[/hello]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_no_fields[/hello/world]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_no_fields[/hi/there/how/are/you]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_one_field",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_one_field_with_digits",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_one_field_with_prefixed_digits",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_two_fields[]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_two_fields[/]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_three_fields",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_malformed_field",
"tests/test_utils.py::TestFalconUtils::test_deprecated_decorator",
"tests/test_utils.py::TestFalconUtils::test_http_now",
"tests/test_utils.py::TestFalconUtils::test_dt_to_http",
"tests/test_utils.py::TestFalconUtils::test_http_date_to_dt",
"tests/test_utils.py::TestFalconUtils::test_pack_query_params_none",
"tests/test_utils.py::TestFalconUtils::test_pack_query_params_one",
"tests/test_utils.py::TestFalconUtils::test_pack_query_params_several",
"tests/test_utils.py::TestFalconUtils::test_uri_encode",
"tests/test_utils.py::TestFalconUtils::test_uri_encode_double",
"tests/test_utils.py::TestFalconUtils::test_uri_encode_value",
"tests/test_utils.py::TestFalconUtils::test_uri_decode",
"tests/test_utils.py::TestFalconUtils::test_prop_uri_encode_models_stdlib_quote",
"tests/test_utils.py::TestFalconUtils::test_prop_uri_encode_value_models_stdlib_quote_safe_tilde",
"tests/test_utils.py::TestFalconUtils::test_prop_uri_decode_models_stdlib_unquote_plus",
"tests/test_utils.py::TestFalconUtils::test_parse_query_string",
"tests/test_utils.py::TestFalconUtils::test_parse_host",
"tests/test_utils.py::TestFalconUtils::test_get_http_status",
"tests/test_utils.py::TestFalconTesting::test_path_escape_chars_in_create_environ",
"tests/test_utils.py::TestFalconTesting::test_no_prefix_allowed_for_query_strings_in_create_environ",
"tests/test_utils.py::TestFalconTesting::test_none_header_value_in_create_environ",
"tests/test_utils.py::TestFalconTesting::test_decode_empty_result",
"tests/test_utils.py::TestFalconTesting::test_httpnow_alias_for_backwards_compat",
"tests/test_utils.py::test_simulate_request_protocol[https-CONNECT]",
"tests/test_utils.py::test_simulate_request_protocol[https-DELETE]",
"tests/test_utils.py::test_simulate_request_protocol[https-GET]",
"tests/test_utils.py::test_simulate_request_protocol[https-HEAD]",
"tests/test_utils.py::test_simulate_request_protocol[https-OPTIONS]",
"tests/test_utils.py::test_simulate_request_protocol[https-PATCH]",
"tests/test_utils.py::test_simulate_request_protocol[https-POST]",
"tests/test_utils.py::test_simulate_request_protocol[https-PUT]",
"tests/test_utils.py::test_simulate_request_protocol[https-TRACE]",
"tests/test_utils.py::test_simulate_request_protocol[http-CONNECT]",
"tests/test_utils.py::test_simulate_request_protocol[http-DELETE]",
"tests/test_utils.py::test_simulate_request_protocol[http-GET]",
"tests/test_utils.py::test_simulate_request_protocol[http-HEAD]",
"tests/test_utils.py::test_simulate_request_protocol[http-OPTIONS]",
"tests/test_utils.py::test_simulate_request_protocol[http-PATCH]",
"tests/test_utils.py::test_simulate_request_protocol[http-POST]",
"tests/test_utils.py::test_simulate_request_protocol[http-PUT]",
"tests/test_utils.py::test_simulate_request_protocol[http-TRACE]",
"tests/test_utils.py::test_simulate_free_functions[simulate_get]",
"tests/test_utils.py::test_simulate_free_functions[simulate_head]",
"tests/test_utils.py::test_simulate_free_functions[simulate_post]",
"tests/test_utils.py::test_simulate_free_functions[simulate_put]",
"tests/test_utils.py::test_simulate_free_functions[simulate_options]",
"tests/test_utils.py::test_simulate_free_functions[simulate_patch]",
"tests/test_utils.py::test_simulate_free_functions[simulate_delete]",
"tests/test_utils.py::TestFalconTestCase::test_status",
"tests/test_utils.py::TestFalconTestCase::test_wsgi_iterable_not_closeable",
"tests/test_utils.py::TestFalconTestCase::test_path_must_start_with_slash",
"tests/test_utils.py::TestFalconTestCase::test_cached_text_in_result",
"tests/test_utils.py::TestFalconTestCase::test_simple_resource_body_json_xor",
"tests/test_utils.py::TestFalconTestCase::test_query_string",
"tests/test_utils.py::TestFalconTestCase::test_query_string_no_question",
"tests/test_utils.py::TestFalconTestCase::test_query_string_in_path",
"tests/test_utils.py::TestCaseFancyAPI::test_something",
"tests/test_utils.py::TestNoApiClass::test_something",
"tests/test_utils.py::TestSetupApi::test_something",
"tests/test_validators.py::test_jsonschema_validation_success",
"tests/test_validators.py::test_jsonschema_validation_failure",
"tests/test_wsgi.py::TestWSGIServer::test_get",
"tests/test_wsgi.py::TestWSGIServer::test_put",
"tests/test_wsgi.py::TestWSGIServer::test_head_405",
"tests/test_wsgi.py::TestWSGIServer::test_post",
"tests/test_wsgi.py::TestWSGIServer::test_post_invalid_content_length",
"tests/test_wsgi.py::TestWSGIServer::test_post_read_bounded_stream",
"tests/test_wsgi.py::TestWSGIServer::test_post_read_bounded_stream_no_body",
"tests/test_wsgi_errors.py::TestWSGIError::test_responder_logged_bytestring",
"tests/test_wsgi_interface.py::TestWSGIInterface::test_srmock",
"tests/test_wsgi_interface.py::TestWSGIInterface::test_pep3333",
"tests/test_wsgiref_inputwrapper_with_size.py::TestWsgiRefInputWrapper::test_resources_can_read_request_stream_during_tests"
]
| [
"tests/test_response_media.py::test_default_media_type[media_type1]"
]
| []
| []
| Apache License 2.0 | 1,938 | [
"falcon/routing/static.py",
"falcon/response.py",
"falcon/routing/__init__.py",
"falcon/api.py",
"falcon/response_helpers.py"
]
| [
"falcon/routing/static.py",
"falcon/response.py",
"falcon/routing/__init__.py",
"falcon/api.py",
"falcon/response_helpers.py"
]
|
keis__base58-27 | 5444801b217436757d3f9fbab129652017fca3d9 | 2017-12-07 08:22:51 | 5444801b217436757d3f9fbab129652017fca3d9 | diff --git a/base58.py b/base58.py
index 734e5d1..f03e3a4 100644
--- a/base58.py
+++ b/base58.py
@@ -14,7 +14,7 @@ from hashlib import sha256
__version__ = '0.2.5'
# 58 character alphabet used
-alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
+alphabet = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
if bytes == str: # python2
@@ -31,22 +31,33 @@ else: # python3
)
+def scrub_input(v):
+ if isinstance(v, str) and not isinstance(v, bytes):
+ v = v.encode('ascii')
+
+ if not isinstance(v, bytes):
+ raise TypeError(
+ "a bytes-like object is required (also str), not '%s'" %
+ type(v).__name__)
+
+ return v
+
+
def b58encode_int(i, default_one=True):
'''Encode an integer using Base58'''
if not i and default_one:
- return alphabet[0]
- string = ""
+ return alphabet[0:1]
+ string = b""
while i:
i, idx = divmod(i, 58)
- string = alphabet[idx] + string
+ string = alphabet[idx:idx+1] + string
return string
def b58encode(v):
'''Encode a string using Base58'''
- if not isinstance(v, bytes):
- raise TypeError("a bytes-like object is required, not '%s'" %
- type(v).__name__)
+
+ v = scrub_input(v)
nPad = len(v)
v = v.lstrip(b'\0')
@@ -59,14 +70,13 @@ def b58encode(v):
result = b58encode_int(acc, default_one=False)
- return (alphabet[0] * nPad + result)
+ return (alphabet[0:1] * nPad + result)
def b58decode_int(v):
'''Decode a Base58 encoded string as an integer'''
- if not isinstance(v, str):
- v = v.decode('ascii')
+ v = scrub_input(v)
decimal = 0
for char in v:
@@ -77,16 +87,10 @@ def b58decode_int(v):
def b58decode(v):
'''Decode a Base58 encoded string'''
- if not isinstance(v, str):
- v = v.decode('ascii')
-
- if not isinstance(v, str):
- raise TypeError(
- "a string-like object is required (also bytes), not '%s'" %
- type(v).__name__)
+ v = scrub_input(v)
origlen = len(v)
- v = v.lstrip(alphabet[0])
+ v = v.lstrip(alphabet[0:1])
newlen = len(v)
acc = b58decode_int(v)
diff --git a/tox.ini b/tox.ini
index 33e915e..45d5087 100644
--- a/tox.ini
+++ b/tox.ini
@@ -3,4 +3,4 @@ envlist = py27,py35
[testenv]
deps = -rtest-requirements.txt
-commands = pytest --pep8 --flakes --cov=base58 .
+commands = pytest --pep8 --flakes --cov=base58 . []
| output as a byte array
currently your encode function spits out a string, which is annoying because I have to wrap the output in the str.encode() method.
| keis/base58 | diff --git a/test_base58.py b/test_base58.py
index d76c2c1..feb020a 100644
--- a/test_base58.py
+++ b/test_base58.py
@@ -6,6 +6,16 @@ from base58 import (
b58decode_int, alphabet)
+if bytes == str:
+ bytes_from_char = (
+ lambda c: c
+ )
+else:
+ bytes_from_char = (
+ lambda c: bytes([c])
+ )
+
+
class RaisesContext(object):
pass
@@ -27,17 +37,17 @@ def assert_raises(matcher=None, message=''):
def test_simple_encode():
data = b58encode(b'hello world')
- assert_that(data, equal_to('StV1DL6CwTryKyV'))
+ assert_that(data, equal_to(b'StV1DL6CwTryKyV'))
def test_leadingz_encode():
data = b58encode(b'\0\0hello world')
- assert_that(data, equal_to('11StV1DL6CwTryKyV'))
+ assert_that(data, equal_to(b'11StV1DL6CwTryKyV'))
def test_encode_empty():
data = b58encode(b'')
- assert_that(data, equal_to(''))
+ assert_that(data, equal_to(b''))
def test_simple_decode():
@@ -91,14 +101,9 @@ def test_round_trips():
assert_that(bytes_in, equal_to(bytes_out))
-def test_input_should_be_bytes():
- data = u'3vQB7B6MrGQZaxCuFg4oH'
- with assert_raises(TypeError):
- b58encode(data)
-
-
def test_simple_integers():
for idx, char in enumerate(alphabet):
+ char = bytes_from_char(char)
assert_that(b58decode_int(char), equal_to(idx))
assert_that(b58encode_int(idx), equal_to(char))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-pep8",
"pytest-flakes",
"pytest-cov",
"PyHamcrest",
"mock",
"matchmock",
"coveralls"
],
"pre_install": [],
"python": "3.6",
"reqs_path": [
"test-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
-e git+https://github.com/keis/base58.git@5444801b217436757d3f9fbab129652017fca3d9#egg=base58
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
coveralls==3.3.1
docopt==0.6.2
execnet==1.9.0
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
matchmock==2.1.1
mock==5.2.0
packaging==21.3
pep8==1.7.1
pluggy==1.0.0
py==1.11.0
pyflakes==3.0.1
PyHamcrest==2.1.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cache==1.0
pytest-cov==4.0.0
pytest-flakes==4.0.5
pytest-pep8==1.0.6
requests==2.27.1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: base58
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- coverage==6.2
- coveralls==3.3.1
- docopt==0.6.2
- execnet==1.9.0
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- matchmock==2.1.1
- mock==5.2.0
- packaging==21.3
- pep8==1.7.1
- pluggy==1.0.0
- py==1.11.0
- pyflakes==3.0.1
- pyhamcrest==2.1.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cache==1.0
- pytest-cov==4.0.0
- pytest-flakes==4.0.5
- pytest-pep8==1.0.6
- requests==2.27.1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/base58
| [
"test_base58.py::test_simple_encode",
"test_base58.py::test_leadingz_encode",
"test_base58.py::test_encode_empty",
"test_base58.py::test_simple_integers"
]
| []
| [
"test_base58.py::test_simple_decode",
"test_base58.py::test_simple_decode_bytes",
"test_base58.py::test_leadingz_decode",
"test_base58.py::test_leadingz_decode_bytes",
"test_base58.py::test_empty_decode",
"test_base58.py::test_empty_decode_bytes",
"test_base58.py::test_check_identity",
"test_base58.py::test_check_failure",
"test_base58.py::test_round_trips",
"test_base58.py::test_large_integer"
]
| []
| MIT License | 1,939 | [
"base58.py",
"tox.ini"
]
| [
"base58.py",
"tox.ini"
]
|
|
CartoDB__cartoframes-319 | 8dcdd8361140d6e087c7301042bd5ba9dc475001 | 2017-12-07 17:46:33 | 088d019b9e95f68def26afa1efe6b9a73ff632fd | diff --git a/cartoframes/context.py b/cartoframes/context.py
index acac1b3e..f05c9f3b 100644
--- a/cartoframes/context.py
+++ b/cartoframes/context.py
@@ -1013,6 +1013,9 @@ class CartoContext(object):
query=layer.orig_query,
geoms=','.join(g['geom_type'] for g in resp['rows']),
common_geom=resp['rows'][0]['geom_type']))
+ elif len(resp['rows']) == 0:
+ raise ValueError('No geometry for layer. Check all layer tables '
+ 'and queries to ensure there are geometries.')
return resp['rows'][0]['geom_type']
def data_boundaries(self, df=None, table_name=None):
@@ -1303,8 +1306,8 @@ class CartoContext(object):
median_income = cc.data_discovery('transaction_events',
regex='.*median income.*',
time='2011 - 2015')
- df = cc.data(median_income,
- 'transaction_event')
+ df = cc.data('transaction_events',
+ median_income)
Pass in cherry-picked measures from the Data Observatory catalog.
The rest of the metadata will be filled in, but it's important to
diff --git a/cartoframes/layer.py b/cartoframes/layer.py
index a10acf78..789344c5 100644
--- a/cartoframes/layer.py
+++ b/cartoframes/layer.py
@@ -7,7 +7,7 @@ basemap layers.
import pandas as pd
import webcolors
-from cartoframes.utils import cssify, join_url
+from cartoframes.utils import cssify, join_url, minify_sql
from cartoframes.styling import BinMethod, mint, antique, get_scheme_cartocss
# colors map data layers without color specified
@@ -388,7 +388,7 @@ class QueryLayer(AbstractLayer):
duration = self.time['duration']
if (self.color in self.style_cols and
self.style_cols[self.color] in ('string', 'boolean', )):
- self.query = ' '.join([s.strip() for s in [
+ self.query = minify_sql([
'SELECT',
' orig.*, __wrap.cf_value_{col}',
'FROM ({query}) AS orig, (',
@@ -404,7 +404,7 @@ class QueryLayer(AbstractLayer):
' ) AS _wrap',
') AS __wrap',
'WHERE __wrap.{col} = orig.{col}',
- ]]).format(col=self.color, query=self.orig_query)
+ ]).format(col=self.color, query=self.orig_query)
agg_func = '\'CDB_Math_Mode(cf_value_{})\''.format(self.color)
self.scheme = {
'bins': ','.join(str(i) for i in range(1, 11)),
@@ -476,6 +476,11 @@ class QueryLayer(AbstractLayer):
'comp-op': 'source-over',
}
})
+ if self.color in self.style_cols:
+ css += cssify({
+ '#layer[{} = null]'.format(self.color): {
+ 'marker-fill': '#666'}
+ })
for t in range(1, self.time['trails'] + 1):
# Trails decay as 1/2^n, and grow 30% at each step
trail_temp = cssify({
@@ -487,33 +492,56 @@ class QueryLayer(AbstractLayer):
css += trail_temp
return css
else:
- return cssify({
- # Point CSS
- "#layer['mapnik::geometry_type'=1]": {
- 'marker-width': size_style,
- 'marker-fill': color_style,
- 'marker-fill-opacity': '1',
- 'marker-allow-overlap': 'true',
- 'marker-line-width': '0.5',
- 'marker-line-color': line_color,
- 'marker-line-opacity': '1',
- },
- # Line CSS
- "#layer['mapnik::geometry_type'=2]": {
- 'line-width': '1.5',
- 'line-color': color_style,
- },
- # Polygon CSS
- "#layer['mapnik::geometry_type'=3]": {
- 'polygon-fill': color_style,
- 'polygon-opacity': '0.9',
- 'polygon-gamma': '0.5',
- 'line-color': '#FFF',
- 'line-width': '0.5',
- 'line-opacity': '0.25',
- 'line-comp-op': 'hard-light',
- }
- })
+ if self.geom_type == 'point':
+ css = cssify({
+ # Point CSS
+ "#layer": {
+ 'marker-width': size_style,
+ 'marker-fill': color_style,
+ 'marker-fill-opacity': '1',
+ 'marker-allow-overlap': 'true',
+ 'marker-line-width': '0.5',
+ 'marker-line-color': line_color,
+ 'marker-line-opacity': '1',
+ }})
+ if self.color in self.style_cols:
+ css += cssify({
+ '#layer[{} = null]'.format(self.color): {
+ 'marker-fill': '#ccc'}
+ })
+ return css
+ elif self.geom_type == 'line':
+ css = cssify({
+ "#layer": {
+ 'line-width': '1.5',
+ 'line-color': color_style,
+ }})
+ if self.color in self.style_cols:
+ css += cssify({
+ '#layer[{} = null]'.format(self.color): {
+ 'line-color': '#ccc'}
+ })
+ return css
+ elif self.geom_type == 'polygon':
+ css = cssify({
+ "#layer": {
+ 'polygon-fill': color_style,
+ 'polygon-opacity': '0.9',
+ 'polygon-gamma': '0.5',
+ 'line-color': '#FFF',
+ 'line-width': '0.5',
+ 'line-opacity': '0.25',
+ 'line-comp-op': 'hard-light',
+ }})
+ if self.color in self.style_cols:
+ css += cssify({
+ '#layer[{} = null]'.format(self.color): {
+ 'polygon-fill': '#ccc'}
+ })
+ return css
+ else:
+ raise ValueError('Unsupported geometry type: {}'.format(
+ self.geom_type))
class Layer(QueryLayer):
diff --git a/cartoframes/utils.py b/cartoframes/utils.py
index 820398bc..36365850 100644
--- a/cartoframes/utils.py
+++ b/cartoframes/utils.py
@@ -17,7 +17,7 @@ def cssify(css_dict):
css += ' {field}: {field_value};'.format(field=field,
field_value=field_value)
css += '} '
- return css
+ return css.strip()
def normalize_colnames(columns):
| null column values in styling not handled correctly
Dataset is earthquakes and styling for both color and size is ramp of magnitude (5 bins, equal interval). Null values in the column used for styling are being colored as though they are in the highest color bin using CARTOframes.

Equivalent styling in CARTO Builder shows that the null values default to the lowest color in the ramp.

We should change this so the null values correspond to the lowest value of ramp, the same as in Builder.
| CartoDB/cartoframes | diff --git a/test/test_context.py b/test/test_context.py
index cdc1d0c6..87fb4383 100644
--- a/test/test_context.py
+++ b/test/test_context.py
@@ -566,6 +566,16 @@ class TestCartoContext(unittest.TestCase):
cc.map(layers=[Layer(self.test_read_table, time='cartodb_id'),
Layer(self.test_read_table, time='cartodb_id')])
+ # no geometry
+ with self.assertRaises(ValueError):
+ cc.map(layers=QueryLayer('''
+ SELECT
+ null::geometry as the_geom,
+ null::geometry as the_geom_webmercator,
+ row_number() OVER () as cartodb_id
+ FROM generate_series(1, 10) as m(i)
+ '''))
+
@unittest.skipIf(WILL_SKIP, 'no cartocredentials, skipping')
def test_cartocontext_map_time(self):
"""CartoContext.map time options"""
diff --git a/test/test_layer.py b/test/test_layer.py
index 8e1699e5..806afe2d 100644
--- a/test/test_layer.py
+++ b/test/test_layer.py
@@ -145,6 +145,7 @@ class TestQueryLayer(unittest.TestCase):
for idx, color in enumerate(str_colors):
qlayer = QueryLayer(self.query, color=color)
+ qlayer.geom_type = 'point'
if color == 'cookie_monster':
qlayer.style_cols[color] = 'number'
qlayer._setup([BaseMap(), qlayer], 1)
@@ -159,6 +160,7 @@ class TestQueryLayer(unittest.TestCase):
qlayer = QueryLayer(self.query, color='datetime_column')
qlayer.style_cols['datetime_column'] = 'date'
qlayer._setup([BaseMap(), qlayer], 1)
+
# Exception testing
# color column cannot be a geometry column
with self.assertRaises(ValueError,
@@ -192,10 +194,12 @@ class TestQueryLayer(unittest.TestCase):
dict(name='Antique', bin_method='',
bins=','.join(str(i) for i in range(1, 11))))
# expect category maps query
+ with open('qlayerquery.txt', 'w') as f:
+ f.write(ql.query)
self.assertRegexpMatches(ql.query,
- '^SELECT orig\.\*, '
- '__wrap.cf_value_colorcol.* '
- 'GROUP BY.*orig\.colorcol$')
+ '(?s)^SELECT\norig\.\*,\s__wrap\.'
+ 'cf_value_colorcol\n.*GROUP\sBY.*orig\.'
+ 'colorcol$')
# cartocss should have cdb math mode
self.assertRegexpMatches(ql.cartocss,
'.*CDB_Math_Mode\(cf_value_colorcol\).*')
@@ -346,8 +350,31 @@ class TestQueryLayer(unittest.TestCase):
"""layer.QueryLayer._get_cartocss"""
qlayer = QueryLayer(self.query, size=dict(column='cold_brew', min=10,
max=20))
+ qlayer.geom_type = 'point'
self.assertRegexpMatches(
qlayer._get_cartocss(BaseMap()),
('.*marker-width:\sramp\(\[cold_brew\],\srange\(10,20\),\s'
'quantiles\(5\)\).*')
)
+
+ # test line cartocss
+ qlayer = QueryLayer(self.query)
+ qlayer.geom_type = 'line'
+ self.assertRegexpMatches(qlayer._get_cartocss(BaseMap()),
+ '^\#layer.*line\-width.*$')
+ # test point, line, polygon
+ for g in ('point', 'line', 'polygon', ):
+ styles = {'point': 'marker\-fill',
+ 'line': 'line\-color',
+ 'polygon': 'polygon\-fill'}
+ qlayer = QueryLayer(self.query, color='colname')
+ qlayer.geom_type = g
+ self.assertRegexpMatches(qlayer._get_cartocss(BaseMap()),
+ '^\#layer.*{}.*\}}$'.format(styles[g]))
+
+ # geometry type should be defined
+ with self.assertRaises(ValueError,
+ msg='invalid geometry type'):
+ ql = QueryLayer(self.query, color='red')
+ ql.geom_type = 'notvalid'
+ ql._get_cartocss(BaseMap())
diff --git a/test/test_utils.py b/test/test_utils.py
index 4be5fb25..af4db384 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -85,7 +85,7 @@ class TestUtils(unittest.TestCase):
"marker-width: 6; marker-fill: yellow; "
"marker-fill-opacity: 1; marker-allow-overlap: "
"true; marker-line-width: 0.5; marker-line-color: "
- "black; marker-line-opacity: 1;} "),
+ "black; marker-line-opacity: 1;}"),
msg="point style")
# polygon style
@@ -96,7 +96,7 @@ class TestUtils(unittest.TestCase):
"#cc607d, #9e3963, #672044), quantiles); "
"polygon-opacity: 0.9; polygon-gamma: 0.5; "
"line-color: #FFF; line-width: 0.5; line-opacity: "
- "0.25; line-comp-op: hard-light;} "),
+ "0.25; line-comp-op: hard-light;}"),
msg="polygon style")
# complex style
@@ -113,7 +113,7 @@ class TestUtils(unittest.TestCase):
"polygon-fill: blue; polygon-opacity: 0.9; "
"polygon-gamma: 0.5; line-color: #FFF; line-width: "
"0.5; line-opacity: 0.25; "
- "line-comp-op: hard-light;} "),
+ "line-comp-op: hard-light;}"),
msg="multi-layer styling")
def test_norm_colname(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 3
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
appdirs==1.4.4
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
carto==1.11.3
-e git+https://github.com/CartoDB/cartoframes.git@8dcdd8361140d6e087c7301042bd5ba9dc475001#egg=cartoframes
certifi==2021.5.30
charset-normalizer==2.0.12
decorator==5.1.1
docutils==0.18.1
future==1.0.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
ipython==7.16.3
ipython-genutils==0.2.0
jedi==0.17.2
Jinja2==3.0.3
MarkupSafe==2.0.1
numpy==1.19.5
packaging==21.3
pandas==1.1.5
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
pockets==0.9.1
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pyrestcli==0.6.11
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.27.1
Shapely==1.8.5.post1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-napoleon==0.7
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
tqdm==4.64.1
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
wcwidth==0.2.13
webcolors==1.7
zipp==3.6.0
| name: cartoframes
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- appdirs==1.4.4
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- carto==1.11.3
- charset-normalizer==2.0.12
- decorator==5.1.1
- docutils==0.18.1
- future==1.0.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- ipython==7.16.3
- ipython-genutils==0.2.0
- jedi==0.17.2
- jinja2==3.0.3
- markupsafe==2.0.1
- numpy==1.19.5
- packaging==21.3
- pandas==1.1.5
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pluggy==1.0.0
- pockets==0.9.1
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pyrestcli==0.6.11
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.27.1
- shapely==1.8.5.post1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-napoleon==0.7
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- tqdm==4.64.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wcwidth==0.2.13
- webcolors==1.7
- zipp==3.6.0
prefix: /opt/conda/envs/cartoframes
| [
"test/test_layer.py::TestQueryLayer::test_querylayer_get_cartocss",
"test/test_layer.py::TestQueryLayer::test_querylayer_time_category",
"test/test_utils.py::TestUtils::test_cssify"
]
| [
"test/test_context.py::TestCartoContext::test_add_encoded_geom",
"test/test_context.py::TestCartoContext::test_cartocontext",
"test/test_context.py::TestCartoContext::test_cartocontext_check_query",
"test/test_context.py::TestCartoContext::test_cartocontext_credentials",
"test/test_context.py::TestCartoContext::test_cartocontext_delete",
"test/test_context.py::TestCartoContext::test_cartocontext_handle_import",
"test/test_context.py::TestCartoContext::test_cartocontext_isorguser",
"test/test_context.py::TestCartoContext::test_cartocontext_map",
"test/test_context.py::TestCartoContext::test_cartocontext_map_geom_type",
"test/test_context.py::TestCartoContext::test_cartocontext_map_time",
"test/test_context.py::TestCartoContext::test_cartocontext_mixed_case",
"test/test_context.py::TestCartoContext::test_cartocontext_read",
"test/test_context.py::TestCartoContext::test_cartocontext_table_exists",
"test/test_context.py::TestCartoContext::test_cartocontext_write",
"test/test_context.py::TestCartoContext::test_cartocontext_write_index",
"test/test_context.py::TestCartoContext::test_cartoframes_query",
"test/test_context.py::TestCartoContext::test_cartoframes_sync",
"test/test_context.py::TestCartoContext::test_data",
"test/test_context.py::TestCartoContext::test_data_discovery",
"test/test_context.py::TestCartoContext::test_debug_print",
"test/test_context.py::TestCartoContext::test_get_bounds",
"test/test_context.py::TestBatchJobStatus::test_batchjobstatus",
"test/test_context.py::TestBatchJobStatus::test_batchjobstatus_methods",
"test/test_context.py::TestBatchJobStatus::test_batchjobstatus_repr"
]
| [
"test/test_context.py::TestCartoContext::test_cartocontext_send_dataframe",
"test/test_context.py::TestCartoContext::test_decode_geom",
"test/test_context.py::TestCartoContext::test_df2pg_schema",
"test/test_context.py::TestCartoContext::test_dtypes2pg",
"test/test_context.py::TestCartoContext::test_encode_geom",
"test/test_context.py::TestCartoContext::test_pg2dtypes",
"test/test_layer.py::TestAbstractLayer::test_class",
"test/test_layer.py::TestLayer::test_layer_setup_dataframe",
"test/test_layer.py::TestBaseMap::test_basemap_invalid",
"test/test_layer.py::TestBaseMap::test_basemap_source",
"test/test_layer.py::TestQueryLayer::test_querylayer_colors",
"test/test_layer.py::TestQueryLayer::test_querylayer_size_and_time",
"test/test_layer.py::TestQueryLayer::test_querylayer_size_column_key",
"test/test_layer.py::TestQueryLayer::test_querylayer_size_default",
"test/test_layer.py::TestQueryLayer::test_querylayer_size_defaults",
"test/test_layer.py::TestQueryLayer::test_querylayer_time_default",
"test/test_layer.py::TestQueryLayer::test_querylayer_time_errors",
"test/test_layer.py::TestQueryLayer::test_querylayer_time_numeric",
"test/test_utils.py::TestUtils::test_dict_items",
"test/test_utils.py::TestUtils::test_importify_params",
"test/test_utils.py::TestUtils::test_norm_colname",
"test/test_utils.py::TestUtils::test_normalize_colnames"
]
| []
| BSD 3-Clause "New" or "Revised" License | 1,940 | [
"cartoframes/context.py",
"cartoframes/layer.py",
"cartoframes/utils.py"
]
| [
"cartoframes/context.py",
"cartoframes/layer.py",
"cartoframes/utils.py"
]
|
|
dpkp__kafka-python-1320 | 009290ddd5d4616d70bff93f841e773af8b22750 | 2017-12-08 02:16:09 | 618c5051493693c1305aa9f08e8a0583d5fcf0e3 | tvoinarovskyi: Looks good. | diff --git a/kafka/conn.py b/kafka/conn.py
index e20210a..2926e2f 100644
--- a/kafka/conn.py
+++ b/kafka/conn.py
@@ -251,67 +251,42 @@ class BrokerConnection(object):
self._sasl_auth_future = None
self.last_attempt = 0
self._gai = None
- self._gai_index = 0
self._sensors = None
if self.config['metrics']:
self._sensors = BrokerConnectionMetrics(self.config['metrics'],
self.config['metric_group_prefix'],
self.node_id)
+ def _next_afi_host_port(self):
+ if not self._gai:
+ self._gai = dns_lookup(self._init_host, self._init_port, self._init_afi)
+ if not self._gai:
+ log.error('DNS lookup failed for %s:%i (%s)',
+ self._init_host, self._init_port, self._init_afi)
+ return
+
+ afi, _, __, ___, sockaddr = self._gai.pop(0)
+ host, port = sockaddr[:2]
+ return (afi, host, port)
+
def connect(self):
"""Attempt to connect and return ConnectionState"""
if self.state is ConnectionStates.DISCONNECTED:
- log.debug('%s: creating new socket', self)
- # if self.afi is set to AF_UNSPEC, then we need to do a name
- # resolution and try all available address families
- if self._init_afi == socket.AF_UNSPEC:
- if self._gai is None:
- # XXX: all DNS functions in Python are blocking. If we really
- # want to be non-blocking here, we need to use a 3rd-party
- # library like python-adns, or move resolution onto its
- # own thread. This will be subject to the default libc
- # name resolution timeout (5s on most Linux boxes)
- try:
- self._gai = socket.getaddrinfo(self._init_host,
- self._init_port,
- socket.AF_UNSPEC,
- socket.SOCK_STREAM)
- except socket.gaierror as ex:
- log.warning('DNS lookup failed for %s:%d,'
- ' exception was %s. Is your'
- ' advertised.listeners (called'
- ' advertised.host.name before Kafka 9)'
- ' correct and resolvable?',
- self._init_host, self._init_port, ex)
- self._gai = []
- self._gai_index = 0
- else:
- # if self._gai already exists, then we should try the next
- # name
- self._gai_index += 1
- while True:
- if self._gai_index >= len(self._gai):
- error = 'Unable to connect to any of the names for {0}:{1}'.format(
- self._init_host, self._init_port)
- log.error(error)
- self.close(Errors.ConnectionError(error))
- return
- afi, _, __, ___, sockaddr = self._gai[self._gai_index]
- if afi not in (socket.AF_INET, socket.AF_INET6):
- self._gai_index += 1
- continue
- break
- self.host, self.port = sockaddr[:2]
- self._sock = socket.socket(afi, socket.SOCK_STREAM)
+ self.last_attempt = time.time()
+ next_lookup = self._next_afi_host_port()
+ if not next_lookup:
+ self.close(Errors.ConnectionError('DNS failure'))
+ return
else:
- self._sock = socket.socket(self._init_afi, socket.SOCK_STREAM)
+ log.debug('%s: creating new socket', self)
+ self.afi, self.host, self.port = next_lookup
+ self._sock = socket.socket(self.afi, socket.SOCK_STREAM)
for option in self.config['socket_options']:
log.debug('%s: setting socket option %s', self, option)
self._sock.setsockopt(*option)
self._sock.setblocking(False)
- self.last_attempt = time.time()
self.state = ConnectionStates.CONNECTING
if self.config['security_protocol'] in ('SSL', 'SASL_SSL'):
self._wrap_ssl()
@@ -328,11 +303,6 @@ class BrokerConnection(object):
ret = None
try:
ret = self._sock.connect_ex((self.host, self.port))
- # if we got here through a host lookup, we've found a host,port,af tuple
- # that works save it so we don't do a GAI lookup again
- if self._gai is not None:
- self.afi = self._sock.family
- self._gai = None
except socket.error as err:
ret = err.errno
@@ -645,23 +615,15 @@ class BrokerConnection(object):
will be failed with this exception.
Default: kafka.errors.ConnectionError.
"""
- if self.state is ConnectionStates.DISCONNECTED:
- if error is not None:
- if sys.version_info >= (3, 2):
- log.warning('%s: close() called on disconnected connection with error: %s', self, error, stack_info=True)
- else:
- log.warning('%s: close() called on disconnected connection with error: %s', self, error)
- return
-
log.info('%s: Closing connection. %s', self, error or '')
- self.state = ConnectionStates.DISCONNECTING
- self.config['state_change_callback'](self)
+ if self.state is not ConnectionStates.DISCONNECTED:
+ self.state = ConnectionStates.DISCONNECTING
+ self.config['state_change_callback'](self)
self._update_reconnect_backoff()
if self._sock:
self._sock.close()
self._sock = None
self.state = ConnectionStates.DISCONNECTED
- self.last_attempt = time.time()
self._sasl_auth_future = None
self._protocol = KafkaProtocol(
client_id=self.config['client_id'],
@@ -1170,3 +1132,29 @@ def collect_hosts(hosts, randomize=True):
shuffle(result)
return result
+
+
+def is_inet_4_or_6(gai):
+ """Given a getaddrinfo struct, return True iff ipv4 or ipv6"""
+ return gai[0] in (socket.AF_INET, socket.AF_INET6)
+
+
+def dns_lookup(host, port, afi=socket.AF_UNSPEC):
+ """Returns a list of getaddrinfo structs, optionally filtered to an afi (ipv4 / ipv6)"""
+ # XXX: all DNS functions in Python are blocking. If we really
+ # want to be non-blocking here, we need to use a 3rd-party
+ # library like python-adns, or move resolution onto its
+ # own thread. This will be subject to the default libc
+ # name resolution timeout (5s on most Linux boxes)
+ try:
+ return list(filter(is_inet_4_or_6,
+ socket.getaddrinfo(host, port, afi,
+ socket.SOCK_STREAM)))
+ except socket.gaierror as ex:
+ log.warning('DNS lookup failed for %s:%d,'
+ ' exception was %s. Is your'
+ ' advertised.listeners (called'
+ ' advertised.host.name before Kafka 9)'
+ ' correct and resolvable?',
+ host, port, ex)
+ return []
diff --git a/kafka/protocol/types.py b/kafka/protocol/types.py
index 22b49a4..516b957 100644
--- a/kafka/protocol/types.py
+++ b/kafka/protocol/types.py
@@ -8,16 +8,20 @@ from .abstract import AbstractType
def _pack(f, value):
try:
return pack(f, value)
- except error:
- raise ValueError(error)
+ except error as e:
+ raise ValueError("Error encountered when attempting to convert value: "
+ "{} to struct format: '{}', hit error: {}"
+ .format(value, f, e))
def _unpack(f, data):
try:
(value,) = unpack(f, data)
return value
- except error:
- raise ValueError(error)
+ except error as e:
+ raise ValueError("Error encountered when attempting to convert value: "
+ "{} to struct format: '{}', hit error: {}"
+ .format(value, f, e))
class Int8(AbstractType):
diff --git a/kafka/util.py b/kafka/util.py
index de8f228..181f67f 100644
--- a/kafka/util.py
+++ b/kafka/util.py
@@ -12,14 +12,20 @@ from kafka.vendor import six
from kafka.errors import BufferUnderflowError
-def crc32(data):
- crc = binascii.crc32(data)
- # py2 and py3 behave a little differently
- # CRC is encoded as a signed int in kafka protocol
- # so we'll convert the py3 unsigned result to signed
- if six.PY3 and crc >= 2**31:
- crc -= 2**32
- return crc
+if six.PY3:
+ MAX_INT = 2 ** 31
+ TO_SIGNED = 2 ** 32
+
+ def crc32(data):
+ crc = binascii.crc32(data)
+ # py2 and py3 behave a little differently
+ # CRC is encoded as a signed int in kafka protocol
+ # so we'll convert the py3 unsigned result to signed
+ if crc >= MAX_INT:
+ crc -= TO_SIGNED
+ return crc
+else:
+ from binascii import crc32
def write_int_string(s):
| Handling of struct errors doesn't print the specific error message
When a `struct.error` is thrown during `_pack()` or `_unpack()`, we catch and re-raise as a `ValueError`: https://github.com/dpkp/kafka-python/blob/master/kafka/protocol/types.py#L11-L12
However, we're shadowing the word `error` so we lose a handle on the specific exception and cannot print the specific error message. | dpkp/kafka-python | diff --git a/test/test_conn.py b/test/test_conn.py
index 1621e60..ef7925a 100644
--- a/test/test_conn.py
+++ b/test/test_conn.py
@@ -267,3 +267,28 @@ def test_lookup_on_connect():
m.assert_called_once_with(hostname, port, 0, 1)
conn.close()
assert conn.host == ip2
+
+
+def test_relookup_on_failure():
+ hostname = 'example.org'
+ port = 9092
+ conn = BrokerConnection(hostname, port, socket.AF_UNSPEC)
+ assert conn.host == conn.hostname == hostname
+ mock_return1 = []
+ with mock.patch("socket.getaddrinfo", return_value=mock_return1) as m:
+ last_attempt = conn.last_attempt
+ conn.connect()
+ m.assert_called_once_with(hostname, port, 0, 1)
+ assert conn.disconnected()
+ assert conn.last_attempt > last_attempt
+
+ ip2 = '127.0.0.2'
+ mock_return2 = [
+ (2, 2, 17, '', (ip2, 9092)),
+ ]
+
+ with mock.patch("socket.getaddrinfo", return_value=mock_return2) as m:
+ conn.connect()
+ m.assert_called_once_with(hostname, port, 0, 1)
+ conn.close()
+ assert conn.host == ip2
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 3
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-catchlog",
"pytest-sugar",
"pytest-mock",
"mock",
"python-snappy",
"lz4tools",
"xxhash"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc libsnappy-dev"
],
"python": "3.6",
"reqs_path": [
"docs/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
cramjam==2.5.0
docutils==0.18.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==3.0.3
-e git+https://github.com/dpkp/kafka-python.git@009290ddd5d4616d70bff93f841e773af8b22750#egg=kafka_python
lz4tools==1.3.1.2
MarkupSafe==2.0.1
mock==5.2.0
packaging==21.3
pluggy==1.0.0
pockets==0.9.1
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-catchlog==1.2.2
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-sugar==0.9.6
python-snappy==0.7.3
pytz==2025.2
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-napoleon==0.7
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
termcolor==1.1.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
xxhash==3.2.0
zipp==3.6.0
| name: kafka-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- cramjam==2.5.0
- docutils==0.18.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==3.0.3
- lz4tools==1.3.1.2
- markupsafe==2.0.1
- mock==5.2.0
- packaging==21.3
- pluggy==1.0.0
- pockets==0.9.1
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-catchlog==1.2.2
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-sugar==0.9.6
- python-snappy==0.7.3
- pytz==2025.2
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-napoleon==0.7
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- termcolor==1.1.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- xxhash==3.2.0
- zipp==3.6.0
prefix: /opt/conda/envs/kafka-python
| [
"test/test_conn.py::test_relookup_on_failure"
]
| []
| [
"test/test_conn.py::test_connect[states0]",
"test/test_conn.py::test_connect[states1]",
"test/test_conn.py::test_connect[states2]",
"test/test_conn.py::test_connect[states3]",
"test/test_conn.py::test_connect[states4]",
"test/test_conn.py::test_connect_timeout",
"test/test_conn.py::test_blacked_out",
"test/test_conn.py::test_connected",
"test/test_conn.py::test_connecting",
"test/test_conn.py::test_send_disconnected",
"test/test_conn.py::test_send_connecting",
"test/test_conn.py::test_send_max_ifr",
"test/test_conn.py::test_send_no_response",
"test/test_conn.py::test_send_response",
"test/test_conn.py::test_send_error",
"test/test_conn.py::test_can_send_more",
"test/test_conn.py::test_recv_disconnected",
"test/test_conn.py::test_recv",
"test/test_conn.py::test_close",
"test/test_conn.py::test_collect_hosts__happy_path",
"test/test_conn.py::test_collect_hosts__ipv6",
"test/test_conn.py::test_collect_hosts__string_list",
"test/test_conn.py::test_collect_hosts__with_spaces",
"test/test_conn.py::test_lookup_on_connect"
]
| []
| Apache License 2.0 | 1,941 | [
"kafka/util.py",
"kafka/protocol/types.py",
"kafka/conn.py"
]
| [
"kafka/util.py",
"kafka/protocol/types.py",
"kafka/conn.py"
]
|
marshmallow-code__marshmallow-700 | 12044877d8af3d8f07c39bf0c5df78d3d7a22df1 | 2017-12-08 16:54:20 | 54e1605604aaf647ee4b03340284b348341eff62 | diff --git a/marshmallow/fields.py b/marshmallow/fields.py
index d2ea022f..5a0835da 100755
--- a/marshmallow/fields.py
+++ b/marshmallow/fields.py
@@ -1125,12 +1125,20 @@ class TimeDelta(Field):
class Dict(Field):
- """A dict field. Supports dicts and dict-like objects.
+ """A dict field. Supports dicts and dict-like objects. Optionally composed
+ with another `Field` class or instance.
+
+ Example: ::
+
+ numbers = fields.Dict(values=fields.Float(), keys=fields.Str())
+
+ :param Field values: A field class or instance for dict values.
+ :param Field keys: A field class or instance for dict keys.
+ :param kwargs: The same keyword arguments that :class:`Field` receives.
.. note::
- This field is only appropriate when the structure of
- nested data is not known. For structured data, use
- `Nested`.
+ When the structure of nested data is not known, you may omit the
+ `values` and `keys` arguments to prevent content validation.
.. versionadded:: 2.1.0
"""
@@ -1139,11 +1147,85 @@ class Dict(Field):
'invalid': 'Not a valid mapping type.'
}
- def _deserialize(self, value, attr, data):
- if isinstance(value, collections.Mapping):
- return value
+ def __init__(self, values=None, keys=None, **kwargs):
+ super(Dict, self).__init__(**kwargs)
+ if values is None:
+ self.value_container = None
+ elif isinstance(values, type):
+ if not issubclass(values, FieldABC):
+ raise ValueError('"values" must be a subclass of '
+ 'marshmallow.base.FieldABC')
+ self.value_container = values()
+ else:
+ if not isinstance(values, FieldABC):
+ raise ValueError('"values" must be of type '
+ 'marshmallow.base.FieldABC')
+ self.value_container = values
+ if keys is None:
+ self.key_container = None
+ elif isinstance(keys, type):
+ if not issubclass(keys, FieldABC):
+ raise ValueError('"keys" must be a subclass of '
+ 'marshmallow.base.FieldABC')
+ self.key_container = keys()
else:
+ if not isinstance(keys, FieldABC):
+ raise ValueError('"keys" must be of type '
+ 'marshmallow.base.FieldABC')
+ self.key_container = keys
+
+ def _serialize(self, value, attr, obj):
+ if value is None:
+ return None
+ if not self.value_container and not self.key_container:
+ return value
+ if isinstance(value, collections.Mapping):
+ values = value.values()
+ if self.value_container:
+ values = [self.value_container._serialize(item, attr, obj) for item in values]
+ keys = value.keys()
+ if self.key_container:
+ keys = [self.key_container._serialize(key, attr, obj) for key in keys]
+ return dict(zip(keys, values))
+ self.fail('invalid')
+
+ def _deserialize(self, value, attr, data):
+ if not isinstance(value, collections.Mapping):
self.fail('invalid')
+ if not self.value_container and not self.key_container:
+ return value
+
+ errors = {}
+ values = list(value.values())
+ keys = list(value.keys())
+ if self.key_container:
+ for idx, key in enumerate(keys):
+ try:
+ keys[idx] = self.key_container.deserialize(key)
+ except ValidationError as e:
+ errors[key] = [
+ 'Invalid key: {}'.format(message)
+ for message in e.messages
+ ]
+ if self.value_container:
+ for idx, item in enumerate(values):
+ try:
+ values[idx] = self.value_container.deserialize(item)
+ except ValidationError as e:
+ values[idx] = e.data
+ key = keys[idx]
+ if key not in errors:
+ errors[key] = []
+ errors[key].extend([
+ 'Invalid value: {}'.format(message)
+ for message in e.messages
+ ])
+ result = dict(zip(keys, values))
+
+ if errors:
+ raise ValidationError(errors, data=result)
+
+ return result
class ValidatedField(Field):
| How to create a Schema containing a dict of nested Schema?
Hi. I've been digging around and couldn't find the answer to this.
Say I've got a model like this:
``` python
class AlbumSchema(Schema):
year = fields.Int()
class ArtistSchema(Schema):
name = fields.Str()
albums = ...
```
I want `albums` to be a `dict` of `AlbumSchema`, so that `ArtistSchema` serializes as
``` python
{ 'albums': { 'Hunky Dory': {'year': 1971},
'The Man Who Sold the World': {'year': 1970}},
'name': 'David Bowie'}
```
Naively, I would expect syntaxes like this to work:
``` python
fields.List(Schema)
fields.Dict(Schema)
```
or maybe
``` python
fields.List(fields.Nested(Schema))
fields.Dict(fields.Nested(Schema))
```
Serializing a `list` of `Schema` can be achieved through `Nested(Schema, many=True)`, which I find less intuitive, and I don't know about a `dict` of `Schema`.
Is there any way to do it? Or a good reason _not_ to do it?
(Question also [asked on SO](http://stackoverflow.com/questions/38048775/marshmallow-dict-of-nested-schema).)
| marshmallow-code/marshmallow | diff --git a/tests/test_deserialization.py b/tests/test_deserialization.py
index adaba296..3e2a7c37 100644
--- a/tests/test_deserialization.py
+++ b/tests/test_deserialization.py
@@ -542,6 +542,51 @@ class TestFieldDeserialization:
field.deserialize('baddict')
assert excinfo.value.args[0] == 'Not a valid mapping type.'
+ def test_structured_dict_value_deserialization(self):
+ field = fields.Dict(values=fields.Str)
+ assert field.deserialize({"foo": "bar"}) == {"foo": "bar"}
+ with pytest.raises(ValidationError) as excinfo:
+ field.deserialize({"foo": 1, "bar": "baz"})
+ assert excinfo.value.args[0] == {'foo': ['Invalid value: Not a valid string.']}
+ assert excinfo.value.data == {'bar': 'baz', 'foo': None}
+
+ def test_structured_dict_key_deserialization(self):
+ field = fields.Dict(keys=fields.Str)
+ assert field.deserialize({"foo": "bar"}) == {"foo": "bar"}
+ with pytest.raises(ValidationError) as excinfo:
+ field.deserialize({1: "bar", "foo": "baz"})
+ assert excinfo.value.args[0] == {1: ['Invalid key: Not a valid string.']}
+ assert excinfo.value.data == {'foo': 'baz', 1: "bar"}
+
+ def test_structured_dict_key_value_deserialization(self):
+ field = fields.Dict(
+ keys=fields.Str(validate=[validate.Email(), validate.Regexp(r'.*@test\.com$')]),
+ values=fields.Decimal,
+ )
+ assert field.deserialize({"[email protected]": 1}) == {"[email protected]": decimal.Decimal(1)}
+ with pytest.raises(ValidationError) as excinfo:
+ field.deserialize({1: "bar"})
+ assert excinfo.value.args[0] == {1: [
+ 'Invalid key: Not a valid string.',
+ 'Invalid value: Not a valid number.',
+ ]}
+ with pytest.raises(ValidationError) as excinfo:
+ field.deserialize({"[email protected]": "bar"})
+ assert excinfo.value.args[0] == {"[email protected]": ['Invalid value: Not a valid number.']}
+ assert excinfo.value.data == {'[email protected]': None}
+ with pytest.raises(ValidationError) as excinfo:
+ field.deserialize({1: 1})
+ assert excinfo.value.args[0] == {1: ['Invalid key: Not a valid string.']}
+ assert excinfo.value.data == {1: 1}
+ with pytest.raises(ValidationError) as excinfo:
+ field.deserialize({"foo": "bar"})
+ assert excinfo.value.args[0] == {"foo": [
+ 'Invalid key: Not a valid email address.',
+ 'Invalid key: String does not match expected pattern.',
+ 'Invalid value: Not a valid number.',
+ ]}
+ assert excinfo.value.data == {'foo': None}
+
def test_url_field_deserialization(self):
field = fields.Url()
assert field.deserialize('https://duckduckgo.com') == 'https://duckduckgo.com'
diff --git a/tests/test_serialization.py b/tests/test_serialization.py
index 3f3c290d..718078c7 100644
--- a/tests/test_serialization.py
+++ b/tests/test_serialization.py
@@ -388,6 +388,27 @@ class TestFieldSerialization:
assert field.serialize('various_data', user) == \
OrderedDict([("foo", "bar"), ("bar", "baz")])
+ def test_structured_dict_value_serialize(self, user):
+ user.various_data = {"foo": decimal.Decimal('1')}
+ field = fields.Dict(values=fields.Decimal)
+ assert field.serialize('various_data', user) == {"foo": 1}
+
+ def test_structured_dict_key_serialize(self, user):
+ user.various_data = {1: "bar"}
+ field = fields.Dict(keys=fields.Str)
+ assert field.serialize('various_data', user) == {"1": "bar"}
+
+ def test_structured_dict_key_value_serialize(self, user):
+ user.various_data = {1: decimal.Decimal('1')}
+ field = fields.Dict(keys=fields.Str, values=fields.Decimal)
+ assert field.serialize('various_data', user) == {"1": 1}
+
+ def test_structured_dict_validates(self, user):
+ user.various_data = {"foo": "bar"}
+ field = fields.Dict(values=fields.Decimal)
+ with pytest.raises(ValidationError):
+ field.serialize('various_data', user)
+
def test_url_field_serialize_none(self, user):
user.homepage = None
field = fields.Url()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 2.15 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[reco]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"dev-requirements.txt",
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | backports.tarfile==1.2.0
cachetools==5.5.2
certifi==2025.1.31
cffi==1.17.1
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
coverage==7.8.0
cryptography==44.0.2
distlib==0.3.9
docutils==0.21.2
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
flake8==2.4.1
id==1.5.0
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
invoke==0.21.0
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jeepney==0.9.0
keyring==25.6.0
markdown-it-py==3.0.0
-e git+https://github.com/marshmallow-code/marshmallow.git@12044877d8af3d8f07c39bf0c5df78d3d7a22df1#egg=marshmallow
mccabe==0.3.1
mdurl==0.1.2
more-itertools==10.6.0
nh3==0.2.21
packaging==24.2
pep8==1.7.1
platformdirs==4.3.7
pluggy==1.5.0
py==1.11.0
pycparser==2.22
pyflakes==0.8.1
Pygments==2.19.1
pyproject-api==1.9.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
readme_renderer==44.0
requests==2.32.3
requests-toolbelt==1.0.0
rfc3986==2.0.0
rich==14.0.0
SecretStorage==3.3.3
simplejson==3.20.1
six==1.17.0
tomli==2.2.1
tox==4.25.0
twine==6.1.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: marshmallow
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- backports-tarfile==1.2.0
- cachetools==5.5.2
- certifi==2025.1.31
- cffi==1.17.1
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- coverage==7.8.0
- cryptography==44.0.2
- distlib==0.3.9
- docutils==0.21.2
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- flake8==2.4.1
- id==1.5.0
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- invoke==0.21.0
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jeepney==0.9.0
- keyring==25.6.0
- markdown-it-py==3.0.0
- mccabe==0.3.1
- mdurl==0.1.2
- more-itertools==10.6.0
- nh3==0.2.21
- packaging==24.2
- pep8==1.7.1
- platformdirs==4.3.7
- pluggy==1.5.0
- py==1.11.0
- pycparser==2.22
- pyflakes==0.8.1
- pygments==2.19.1
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- readme-renderer==44.0
- requests==2.32.3
- requests-toolbelt==1.0.0
- rfc3986==2.0.0
- rich==14.0.0
- secretstorage==3.3.3
- simplejson==3.20.1
- six==1.17.0
- tomli==2.2.1
- tox==4.25.0
- twine==6.1.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/marshmallow
| [
"tests/test_deserialization.py::TestFieldDeserialization::test_structured_dict_value_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_structured_dict_key_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_structured_dict_key_value_deserialization",
"tests/test_serialization.py::TestFieldSerialization::test_structured_dict_key_serialize",
"tests/test_serialization.py::TestFieldSerialization::test_structured_dict_key_value_serialize",
"tests/test_serialization.py::TestFieldSerialization::test_structured_dict_validates"
]
| []
| [
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[String]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Integer]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Boolean]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Float]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Number]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[DateTime]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[LocalDateTime]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Time]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Date]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[TimeDelta]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Dict]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Url]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Email]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[FormattedString]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[UUID]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Decimal]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[String]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Integer]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Boolean]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Float]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Number]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[DateTime]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[LocalDateTime]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Time]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Date]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[TimeDelta]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Dict]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Url]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Email]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[FormattedString]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[UUID]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Decimal]",
"tests/test_deserialization.py::TestDeserializingNone::test_allow_none_is_true_if_missing_is_true",
"tests/test_deserialization.py::TestDeserializingNone::test_list_field_deserialize_none_to_empty_list",
"tests/test_deserialization.py::TestFieldDeserialization::test_float_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_float_field_deserialization[bad]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_float_field_deserialization[]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_float_field_deserialization[in_val2]",
"tests/test_deserialization.py::TestFieldDeserialization::test_integer_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_strict_integer_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_with_places",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_with_places_and_rounding",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_deserialization_string",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_special_values",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_special_values_not_permitted",
"tests/test_deserialization.py::TestFieldDeserialization::test_string_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_custom_truthy_values",
"tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_custom_truthy_values_invalid[notvalid]",
"tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_custom_truthy_values_invalid[123]",
"tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_empty_truthy",
"tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_custom_falsy_values",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[not-a-datetime]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[42]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[in_value3]",
"tests/test_deserialization.py::TestFieldDeserialization::test_datetime_passed_year_is_invalid",
"tests/test_deserialization.py::TestFieldDeserialization::test_datetime_passed_date_is_invalid",
"tests/test_deserialization.py::TestFieldDeserialization::test_custom_date_format_datetime_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_rfc_datetime_field_deserialization[rfc]",
"tests/test_deserialization.py::TestFieldDeserialization::test_rfc_datetime_field_deserialization[rfc822]",
"tests/test_deserialization.py::TestFieldDeserialization::test_iso_datetime_field_deserialization[iso]",
"tests/test_deserialization.py::TestFieldDeserialization::test_iso_datetime_field_deserialization[iso8601]",
"tests/test_deserialization.py::TestFieldDeserialization::test_localdatetime_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_time_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[badvalue]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[in_data2]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[42]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_precision",
"tests/test_deserialization.py::TestFieldDeserialization::test_timedelta_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[badvalue]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[in_value2]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[9999999999]",
"tests/test_deserialization.py::TestFieldDeserialization::test_date_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_date_field_deserialization[]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_date_field_deserialization[123]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_date_field_deserialization[in_value2]",
"tests/test_deserialization.py::TestFieldDeserialization::test_dict_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_url_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_relative_url_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_url_field_schemes_argument",
"tests/test_deserialization.py::TestFieldDeserialization::test_email_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_function_field_deserialization_is_noop_by_default",
"tests/test_deserialization.py::TestFieldDeserialization::test_function_field_deserialization_with_callable",
"tests/test_deserialization.py::TestFieldDeserialization::test_function_field_deserialization_with_context",
"tests/test_deserialization.py::TestFieldDeserialization::test_function_field_passed_deserialize_only_is_load_only",
"tests/test_deserialization.py::TestFieldDeserialization::test_function_field_passed_deserialize_and_serialize_is_not_load_only",
"tests/test_deserialization.py::TestFieldDeserialization::test_uuid_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_uuid_deserialization[malformed]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_uuid_deserialization[123]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_uuid_deserialization[in_value2]",
"tests/test_deserialization.py::TestFieldDeserialization::test_deserialization_function_must_be_callable",
"tests/test_deserialization.py::TestFieldDeserialization::test_method_field_deserialization_is_noop_by_default",
"tests/test_deserialization.py::TestFieldDeserialization::test_deserialization_method",
"tests/test_deserialization.py::TestFieldDeserialization::test_deserialization_method_must_be_a_method",
"tests/test_deserialization.py::TestFieldDeserialization::test_method_field_deserialize_only",
"tests/test_deserialization.py::TestFieldDeserialization::test_datetime_list_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_invalid_item",
"tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_multiple_invalid_items",
"tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_value_that_is_not_a_list[notalist]",
"tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_value_that_is_not_a_list[42]",
"tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_value_that_is_not_a_list[value2]",
"tests/test_deserialization.py::TestFieldDeserialization::test_constant_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_constant_is_always_included_in_deserialized_data",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validator_function",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validator_class_that_returns_bool",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validator_that_raises_error_with_list",
"tests/test_deserialization.py::TestFieldDeserialization::test_validator_must_return_false_to_raise_error",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_validator_with_nonascii_input",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validators",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_custom_error_message",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_non_utf8_value",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_to_dict",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_values",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_many",
"tests/test_deserialization.py::TestSchemaDeserialization::test_exclude",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_single_deserialization_to_dict",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_list_deserialization_to_dict",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_single_none_not_allowed",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_many_non_not_allowed",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_single_required_missing",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_many_required_missing",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_only_basestring",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_only_basestring_with_list_data",
"tests/test_deserialization.py::TestSchemaDeserialization::test_none_deserialization",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_none_deserialization",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_attribute_param",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_attribute_param_symmetry",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_attribute_param_error_returns_field_name_not_attribute_name",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_attribute_param_error_returns_load_from_not_attribute_name",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_load_from_param",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_dump_only_param",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_param_value",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_param_callable",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_param_none",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialization_returns_errors",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialization_returns_errors_with_multiple_validators",
"tests/test_deserialization.py::TestSchemaDeserialization::test_strict_mode_deserialization",
"tests/test_deserialization.py::TestSchemaDeserialization::test_strict_mode_many",
"tests/test_deserialization.py::TestSchemaDeserialization::test_strict_mode_deserialization_with_multiple_validators",
"tests/test_deserialization.py::TestSchemaDeserialization::test_uncaught_validation_errors_are_stored",
"tests/test_deserialization.py::TestSchemaDeserialization::test_multiple_errors_can_be_stored_for_a_field",
"tests/test_deserialization.py::TestSchemaDeserialization::test_multiple_errors_can_be_stored_for_an_email_field",
"tests/test_deserialization.py::TestSchemaDeserialization::test_multiple_errors_can_be_stored_for_a_url_field",
"tests/test_deserialization.py::TestSchemaDeserialization::test_required_value_only_passed_to_validators_if_provided",
"tests/test_deserialization.py::TestSchemaDeserialization::test_partial_deserialization[True]",
"tests/test_deserialization.py::TestSchemaDeserialization::test_partial_deserialization[False]",
"tests/test_deserialization.py::TestSchemaDeserialization::test_partial_fields_deserialization",
"tests/test_deserialization.py::TestSchemaDeserialization::test_partial_fields_validation",
"tests/test_deserialization.py::TestValidation::test_integer_with_validator",
"tests/test_deserialization.py::TestValidation::test_integer_with_validators[field0]",
"tests/test_deserialization.py::TestValidation::test_integer_with_validators[field1]",
"tests/test_deserialization.py::TestValidation::test_integer_with_validators[field2]",
"tests/test_deserialization.py::TestValidation::test_float_with_validators[field0]",
"tests/test_deserialization.py::TestValidation::test_float_with_validators[field1]",
"tests/test_deserialization.py::TestValidation::test_float_with_validators[field2]",
"tests/test_deserialization.py::TestValidation::test_string_validator",
"tests/test_deserialization.py::TestValidation::test_function_validator",
"tests/test_deserialization.py::TestValidation::test_function_validators[field0]",
"tests/test_deserialization.py::TestValidation::test_function_validators[field1]",
"tests/test_deserialization.py::TestValidation::test_function_validators[field2]",
"tests/test_deserialization.py::TestValidation::test_method_validator",
"tests/test_deserialization.py::TestValidation::test_nested_data_is_stored_when_validation_fails",
"tests/test_deserialization.py::TestValidation::test_false_value_validation",
"tests/test_deserialization.py::test_required_field_failure[String]",
"tests/test_deserialization.py::test_required_field_failure[Integer]",
"tests/test_deserialization.py::test_required_field_failure[Boolean]",
"tests/test_deserialization.py::test_required_field_failure[Float]",
"tests/test_deserialization.py::test_required_field_failure[Number]",
"tests/test_deserialization.py::test_required_field_failure[DateTime]",
"tests/test_deserialization.py::test_required_field_failure[LocalDateTime]",
"tests/test_deserialization.py::test_required_field_failure[Time]",
"tests/test_deserialization.py::test_required_field_failure[Date]",
"tests/test_deserialization.py::test_required_field_failure[TimeDelta]",
"tests/test_deserialization.py::test_required_field_failure[Dict]",
"tests/test_deserialization.py::test_required_field_failure[Url]",
"tests/test_deserialization.py::test_required_field_failure[Email]",
"tests/test_deserialization.py::test_required_field_failure[UUID]",
"tests/test_deserialization.py::test_required_field_failure[Decimal]",
"tests/test_deserialization.py::test_required_message_can_be_changed[My",
"tests/test_deserialization.py::test_required_message_can_be_changed[message1]",
"tests/test_deserialization.py::test_required_message_can_be_changed[message2]",
"tests/test_deserialization.py::test_deserialize_doesnt_raise_exception_if_strict_is_false_and_input_type_is_incorrect",
"tests/test_deserialization.py::test_deserialize_raises_exception_if_strict_is_true_and_input_type_is_incorrect",
"tests/test_serialization.py::TestFieldSerialization::test_default",
"tests/test_serialization.py::TestFieldSerialization::test_number[42-42.0]",
"tests/test_serialization.py::TestFieldSerialization::test_number[0-0.0]",
"tests/test_serialization.py::TestFieldSerialization::test_number[None-None]",
"tests/test_serialization.py::TestFieldSerialization::test_number_as_string",
"tests/test_serialization.py::TestFieldSerialization::test_number_as_string_passed_none",
"tests/test_serialization.py::TestFieldSerialization::test_callable_default",
"tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_func",
"tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_serialize_only_is_dump_only",
"tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_deserialize_and_serialize_is_not_dump_only",
"tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_serialize",
"tests/test_serialization.py::TestFieldSerialization::test_function_field_does_not_swallow_attribute_error",
"tests/test_serialization.py::TestFieldSerialization::test_function_field_load_only",
"tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_serialize_with_context",
"tests/test_serialization.py::TestFieldSerialization::test_function_field_passed_uncallable_object",
"tests/test_serialization.py::TestFieldSerialization::test_integer_field",
"tests/test_serialization.py::TestFieldSerialization::test_integer_as_string_field",
"tests/test_serialization.py::TestFieldSerialization::test_integer_field_default",
"tests/test_serialization.py::TestFieldSerialization::test_integer_field_default_set_to_none",
"tests/test_serialization.py::TestFieldSerialization::test_uuid_field",
"tests/test_serialization.py::TestFieldSerialization::test_decimal_field",
"tests/test_serialization.py::TestFieldSerialization::test_decimal_field_string",
"tests/test_serialization.py::TestFieldSerialization::test_decimal_field_special_values",
"tests/test_serialization.py::TestFieldSerialization::test_decimal_field_special_values_not_permitted",
"tests/test_serialization.py::TestFieldSerialization::test_decimal_field_fixed_point_representation",
"tests/test_serialization.py::TestFieldSerialization::test_boolean_field_serialization",
"tests/test_serialization.py::TestFieldSerialization::test_function_with_uncallable_param",
"tests/test_serialization.py::TestFieldSerialization::test_email_field_validates",
"tests/test_serialization.py::TestFieldSerialization::test_email_field_serialize_none",
"tests/test_serialization.py::TestFieldSerialization::test_dict_field_serialize_none",
"tests/test_serialization.py::TestFieldSerialization::test_dict_field_invalid_dict_but_okay",
"tests/test_serialization.py::TestFieldSerialization::test_dict_field_serialize",
"tests/test_serialization.py::TestFieldSerialization::test_dict_field_serialize_ordereddict",
"tests/test_serialization.py::TestFieldSerialization::test_structured_dict_value_serialize",
"tests/test_serialization.py::TestFieldSerialization::test_url_field_serialize_none",
"tests/test_serialization.py::TestFieldSerialization::test_url_field_validates",
"tests/test_serialization.py::TestFieldSerialization::test_method_field_with_method_missing",
"tests/test_serialization.py::TestFieldSerialization::test_method_field_passed_serialize_only_is_dump_only",
"tests/test_serialization.py::TestFieldSerialization::test_method_field_passed_deserialize_only_is_load_only",
"tests/test_serialization.py::TestFieldSerialization::test_method_field_with_uncallable_attribute",
"tests/test_serialization.py::TestFieldSerialization::test_method_field_does_not_swallow_attribute_error",
"tests/test_serialization.py::TestFieldSerialization::test_method_with_no_serialize_is_missing",
"tests/test_serialization.py::TestFieldSerialization::test_serialize_with_dump_to_param",
"tests/test_serialization.py::TestFieldSerialization::test_serialize_with_attribute_and_dump_to_uses_dump_to",
"tests/test_serialization.py::TestFieldSerialization::test_datetime_serializes_to_iso_by_default",
"tests/test_serialization.py::TestFieldSerialization::test_datetime_invalid_serialization[invalid]",
"tests/test_serialization.py::TestFieldSerialization::test_datetime_invalid_serialization[value1]",
"tests/test_serialization.py::TestFieldSerialization::test_datetime_invalid_serialization[24]",
"tests/test_serialization.py::TestFieldSerialization::test_datetime_field_rfc822[rfc]",
"tests/test_serialization.py::TestFieldSerialization::test_datetime_field_rfc822[rfc822]",
"tests/test_serialization.py::TestFieldSerialization::test_localdatetime_rfc_field",
"tests/test_serialization.py::TestFieldSerialization::test_datetime_iso8601[iso]",
"tests/test_serialization.py::TestFieldSerialization::test_datetime_iso8601[iso8601]",
"tests/test_serialization.py::TestFieldSerialization::test_localdatetime_iso",
"tests/test_serialization.py::TestFieldSerialization::test_datetime_format",
"tests/test_serialization.py::TestFieldSerialization::test_string_field",
"tests/test_serialization.py::TestFieldSerialization::test_formattedstring_field",
"tests/test_serialization.py::TestFieldSerialization::test_formattedstring_field_on_schema",
"tests/test_serialization.py::TestFieldSerialization::test_string_field_default_to_empty_string",
"tests/test_serialization.py::TestFieldSerialization::test_time_field",
"tests/test_serialization.py::TestFieldSerialization::test_invalid_time_field_serialization[badvalue]",
"tests/test_serialization.py::TestFieldSerialization::test_invalid_time_field_serialization[]",
"tests/test_serialization.py::TestFieldSerialization::test_invalid_time_field_serialization[in_data2]",
"tests/test_serialization.py::TestFieldSerialization::test_invalid_time_field_serialization[42]",
"tests/test_serialization.py::TestFieldSerialization::test_date_field",
"tests/test_serialization.py::TestFieldSerialization::test_invalid_date_field_serialization[badvalue]",
"tests/test_serialization.py::TestFieldSerialization::test_invalid_date_field_serialization[]",
"tests/test_serialization.py::TestFieldSerialization::test_invalid_date_field_serialization[in_data2]",
"tests/test_serialization.py::TestFieldSerialization::test_invalid_date_field_serialization[42]",
"tests/test_serialization.py::TestFieldSerialization::test_timedelta_field",
"tests/test_serialization.py::TestFieldSerialization::test_datetime_list_field",
"tests/test_serialization.py::TestFieldSerialization::test_list_field_with_error",
"tests/test_serialization.py::TestFieldSerialization::test_datetime_list_serialize_single_value",
"tests/test_serialization.py::TestFieldSerialization::test_list_field_serialize_none_returns_none",
"tests/test_serialization.py::TestFieldSerialization::test_list_field_respect_inner_attribute",
"tests/test_serialization.py::TestFieldSerialization::test_list_field_respect_inner_attribute_single_value",
"tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_generator_single_value",
"tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_generators_multiple_values",
"tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_generators_error",
"tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_generators_empty_generator_returns_none_for_every_non_returning_yield_statement",
"tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_set",
"tests/test_serialization.py::TestFieldSerialization::test_list_field_work_with_custom_class_with_iterator_protocol",
"tests/test_serialization.py::TestFieldSerialization::test_bad_list_field",
"tests/test_serialization.py::TestFieldSerialization::test_serialize_does_not_apply_validators",
"tests/test_serialization.py::TestFieldSerialization::test_constant_field_serialization",
"tests/test_serialization.py::TestFieldSerialization::test_constant_is_always_included_in_serialized_data",
"tests/test_serialization.py::TestFieldSerialization::test_constant_field_serialize_when_omitted",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[String]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Integer]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Boolean]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Float]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Number]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[DateTime]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[LocalDateTime]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Time]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Date]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[TimeDelta]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Dict]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Url]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Email]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[FormattedString]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[UUID]",
"tests/test_serialization.py::TestFieldSerialization::test_all_fields_serialize_none_to_none[Decimal]",
"tests/test_serialization.py::test_serializing_named_tuple",
"tests/test_serialization.py::test_serializing_named_tuple_with_meta",
"tests/test_serialization.py::test_serializing_slice"
]
| []
| MIT License | 1,943 | [
"marshmallow/fields.py"
]
| [
"marshmallow/fields.py"
]
|
|
cevoaustralia__aws-google-auth-34 | eb991c78bf4e96c30add65f0520b281f916a31ad | 2017-12-08 18:11:53 | d473d67b0772700942f5bb0db3522af0a1005453 | diff --git a/README.rst b/README.rst
index fe68570..bac4d06 100644
--- a/README.rst
+++ b/README.rst
@@ -110,7 +110,8 @@ Usage
-d DURATION, --duration DURATION
Credential duration ($DURATION)
-p PROFILE, --profile PROFILE
- AWS profile ($AWS_PROFILE)
+ AWS profile (defaults to value of $AWS_PROFILE,
+ then falls back to 'sts')
-a ASK_ROLE, --ask-role ASK_ROLE
Set true to always pick the role
-r AWS_ROLE_ARN, --role-arn AWS_ROLE_ARN
diff --git a/aws_google_auth/__init__.py b/aws_google_auth/__init__.py
index 7b35df2..2d3bf2e 100644
--- a/aws_google_auth/__init__.py
+++ b/aws_google_auth/__init__.py
@@ -460,9 +460,8 @@ def cli(cli_args):
DurationSeconds=config.duration)
print("Credentials Expiration: " + format(token['Credentials']['Expiration'].astimezone(get_localzone())))
- if config.profile is None:
- print_exports(token)
+ print_exports(token)
_store(config, token)
@@ -482,6 +481,7 @@ def print_exports(token):
def _store(config, aws_session_token):
def store_config(profile, config_location, storer):
+ assert (profile is not None), "Can not store config/credentials if the AWS_PROFILE is None."
config_file = configparser.RawConfigParser()
config_file.read(config_location)
diff --git a/aws_google_auth/prepare.py b/aws_google_auth/prepare.py
index 5dca8df..bb86ad8 100644
--- a/aws_google_auth/prepare.py
+++ b/aws_google_auth/prepare.py
@@ -18,7 +18,8 @@ def get_prepared_config(
def default_if_none(value, default):
return value if value is not None else default
- google_config.profile = default_if_none(profile, google_config.profile)
+ # If no profile is specified, default to "sts" so we don't clobber the user's default.
+ google_config.profile = default_if_none(profile, google_config.profile) or "sts"
_create_base_aws_cli_config_files_if_needed(google_config)
if google_config.profile is not None:
| Running twice results in malformed credentials file
On Ubuntu with version 0.0.12. Two runs of aws-google-auth results in ~/.aws/credentials with two "[None]" sections. The config parser of aws-cli throws error configparser.DuplicateSectionError. | cevoaustralia/aws-google-auth | diff --git a/aws_google_auth/tests/test_persist_profile.py b/aws_google_auth/tests/test_persist_profile.py
index 19d3f43..feb8156 100644
--- a/aws_google_auth/tests/test_persist_profile.py
+++ b/aws_google_auth/tests/test_persist_profile.py
@@ -107,5 +107,5 @@ class TestPersistConfig(unittest.TestCase):
self.assertEquals(config.google_sp_id, None)
self.assertEquals(config.duration, 3600)
self.assertEquals(config.ask_role, False)
- self.assertEquals(config.profile, None)
+ self.assertEquals(config.profile, "sts")
self.assertEquals(config.role_arn, None)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 3
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/cevoaustralia/aws-google-auth.git@eb991c78bf4e96c30add65f0520b281f916a31ad#egg=aws_google_auth
beautifulsoup4==4.13.3
boto3==1.37.23
botocore==1.37.23
certifi==2025.1.31
charset-normalizer==3.4.1
configparser==7.2.0
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
jmespath==1.0.1
lxml==5.3.1
mock==5.2.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
python-dateutil==2.9.0.post0
requests==2.32.3
s3transfer==0.11.4
six==1.17.0
soupsieve==2.6
tomli==2.2.1
typing_extensions==4.13.0
tzlocal==5.3.1
urllib3==1.26.20
| name: aws-google-auth
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- beautifulsoup4==4.13.3
- boto3==1.37.23
- botocore==1.37.23
- certifi==2025.1.31
- charset-normalizer==3.4.1
- configparser==7.2.0
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- jmespath==1.0.1
- lxml==5.3.1
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- requests==2.32.3
- s3transfer==0.11.4
- six==1.17.0
- soupsieve==2.6
- tomli==2.2.1
- typing-extensions==4.13.0
- tzlocal==5.3.1
- urllib3==1.26.20
prefix: /opt/conda/envs/aws-google-auth
| [
"aws_google_auth/tests/test_persist_profile.py::TestPersistConfig::test_when_there_is_no_profile_use_default_values"
]
| []
| [
"aws_google_auth/tests/test_persist_profile.py::TestPersistConfig::test_when_there_is_no_profile_use_supplied_values"
]
| []
| MIT License | 1,944 | [
"README.rst",
"aws_google_auth/prepare.py",
"aws_google_auth/__init__.py"
]
| [
"README.rst",
"aws_google_auth/prepare.py",
"aws_google_auth/__init__.py"
]
|
|
springload__draftjs_exporter-84 | 13e1d0e7f159abcdc2e547806fe69cac1cf15e1a | 2017-12-08 22:35:30 | 7acd6218f1a8460efd67965bb227dca16cf65bf0 | thibaudcolas: @loicteixeira up | diff --git a/README.rst b/README.rst
index b91b22c..94898d6 100644
--- a/README.rst
+++ b/README.rst
@@ -105,13 +105,13 @@ The exporter output is extensively configurable to cater for varied rich text re
'wrapper_props': {'class': 'bullet-list'},
},
# Use a custom component for more flexibility (reading block data or depth).
- BLOCK_TYPES.BLOCKQUOTE: Blockquote,
+ BLOCK_TYPES.BLOCKQUOTE: blockquote,
BLOCK_TYPES.ORDERED_LIST_ITEM: {
- 'element': ListItem,
- 'wrapper': OrderedList,
+ 'element': list_item,
+ 'wrapper': ordered_list,
},
# Provide a fallback component (advanced).
- BLOCK_TYPES.FALLBACK: BlockFallback
+ BLOCK_TYPES.FALLBACK: block_fallback
}),
# `style_map` defines the HTML representation of inline elements.
# Extend STYLE_MAP to start with sane defaults, or make your own from scratch.
@@ -123,25 +123,33 @@ The exporter output is extensively configurable to cater for varied rich text re
}),
'entity_decorators': {
# Map entities to components so they can be rendered with their data.
- ENTITY_TYPES.IMAGE: Image,
- # Components can be defined as classes to receive extra parameters.
- ENTITY_TYPES.LINK: Link(use_new_window=True),
+ ENTITY_TYPES.IMAGE: image,
+ ENTITY_TYPES.LINK: link
# Lambdas work too.
ENTITY_TYPES.HORIZONTAL_RULE: lambda props: DOM.create_element('hr'),
# Discard those entities.
ENTITY_TYPES.EMBED: None,
# Provide a fallback component (advanced).
- ENTITY_TYPES.FALLBACK: EntityFallback,
+ ENTITY_TYPES.FALLBACK: entity_fallback,
},
'composite_decorators': [
# Use composite decorators to replace text based on a regular expression.
- BR,
- Hashtag,
- Linkify,
+ {
+ 'strategy': re.compile(r'\n'),
+ 'component': br,
+ },
+ {
+ 'strategy': re.compile(r'#\w+'),
+ 'component': hashtag,
+ },
+ {
+ 'strategy': LINKIFY_RE,
+ 'component': linkify,
+ },
],
}
-See ``examples.py`` in the repository for more details.
+See `examples.py <https://github.com/springload/draftjs_exporter/blob/master/example.py>`_ for more details.
Advanced usage
--------------
@@ -149,7 +157,7 @@ Advanced usage
Custom components
~~~~~~~~~~~~~~~~~
-To produce arbitrary markup with dynamic data, draftjs_exporter comes with an API to create rendering components. This API mirrors React's `createElement <https://facebook.github.io/react/docs/top-level-api.html#react.createelement>`_ API (what JSX compiles to).
+To generate arbitrary markup with dynamic data, draftjs_exporter comes with an API to create rendering components. This API mirrors React's `createElement <https://facebook.github.io/react/docs/top-level-api.html#react.createelement>`_ API (what JSX compiles to).
.. code:: python
@@ -158,7 +166,7 @@ To produce arbitrary markup with dynamic data, draftjs_exporter comes with an AP
# Components are simple functions that take `props` as parameter and return DOM elements.
- def Image(props):
+ def image(props):
# This component creates an image element, with the relevant attributes.
return DOM.create_element('img', {
'src': props.get('src'),
@@ -168,7 +176,7 @@ To produce arbitrary markup with dynamic data, draftjs_exporter comes with an AP
})
- def Blockquote(props):
+ def blockquote(props):
# This component uses block data to render a blockquote.
block_data = props['block']['data']
@@ -178,20 +186,20 @@ To produce arbitrary markup with dynamic data, draftjs_exporter comes with an AP
}, props['children'])
- class Button:
- def render(self, props):
- href = props.get('href', '#')
- icon = props.get('icon', None)
- text = props.get('text', '')
+ def button(props):
+ href = props.get('href', '#')
+ icon_name = props.get('icon', None)
+ text = props.get('text', '')
+ return DOM.create_element('a', {
+ 'class': 'icon-text' if icon_name else None,
+ 'href': href,
+ },
# There can be as many `children` as required.
# It is also possible to reuse other components and render them instead of HTML tags.
- return DOM.create_element(
- 'a',
- {'class': 'icon-text' if icon else None, 'href': href},
- DOM.create_element(Icon, {'name': icon}) if icon else None,
- DOM.create_element('span', {'class': 'icon-text__text'}, text) if icon else text
- )
+ DOM.create_element(icon, {'name': icon_name}) if icon_name else None,
+ DOM.create_element('span', {'class': 'icon-text'}, text) if icon_name else text
+ )
Apart from ``create_element``, a ``parse_html`` method is also available. Use it to interface with other HTML generators, like template engines.
@@ -211,7 +219,7 @@ Add the following to the exporter config,
config = {
'block_map': dict(BLOCK_MAP, **{
# Provide a fallback for block types.
- BLOCK_TYPES.FALLBACK: BlockFallback
+ BLOCK_TYPES.FALLBACK: block_fallback
}),
}
@@ -219,7 +227,7 @@ This fallback component can now control the exporter behavior when normal compon
.. code:: python
- def BlockFallback(props):
+ def block_fallback(props):
type_ = props['block']['type']
if type_ == 'example-discard':
diff --git a/benchmark.py b/benchmark.py
index a9714de..40ae663 100644
--- a/benchmark.py
+++ b/benchmark.py
@@ -5,6 +5,7 @@ from __future__ import absolute_import, unicode_literals
import cProfile
import logging
import os
+import re
from pstats import Stats
from memory_profiler import profile
@@ -15,23 +16,23 @@ from draftjs_exporter.html import HTML
from markov_draftjs import get_content_sample
-from example import ListItem, OrderedList, Image, BR, EntityFallback
+from example import list_item, ordered_list, image, br, entity_fallback
-def Document(props):
+def document(props):
return DOM.create_element('a', {
'title': props.get('label'),
'href': '/documents/%s' % props.get('id'),
}, props['children'])
-def Link(props):
+def link(props):
return DOM.create_element('a', {
'href': props['url'],
}, props['children'])
-def BlockFallback(props):
+def block_fallback(props):
type_ = props['block']['type']
logging.warn('Missing config for "%s".' % type_)
@@ -48,22 +49,25 @@ config = {
'wrapper_props': {'class': 'bullet-list'},
},
BLOCK_TYPES.ORDERED_LIST_ITEM: {
- 'element': ListItem,
- 'wrapper': OrderedList,
+ 'element': list_item,
+ 'wrapper': ordered_list,
},
- BLOCK_TYPES.FALLBACK: BlockFallback
+ BLOCK_TYPES.FALLBACK: block_fallback
}),
'style_map': STYLE_MAP,
'entity_decorators': {
- ENTITY_TYPES.IMAGE: Image,
- ENTITY_TYPES.LINK: Link,
- ENTITY_TYPES.DOCUMENT: Document,
+ ENTITY_TYPES.IMAGE: image,
+ ENTITY_TYPES.LINK: link,
+ ENTITY_TYPES.DOCUMENT: document,
ENTITY_TYPES.HORIZONTAL_RULE: lambda props: DOM.create_element('hr'),
ENTITY_TYPES.EMBED: None,
- ENTITY_TYPES.FALLBACK: EntityFallback,
+ ENTITY_TYPES.FALLBACK: entity_fallback,
},
'composite_decorators': [
- BR,
+ {
+ 'strategy': re.compile(r'\n'),
+ 'component': br,
+ }
],
'engine': 'string',
}
diff --git a/docs/example.md b/docs/example.md
index 299010b..c0fedf4 100644
--- a/docs/example.md
+++ b/docs/example.md
@@ -4,7 +4,7 @@
-----
<h2>
draftjs_exporter is an HTML exporter for
- <a href="https://github.com/facebook/draft-js" rel="noreferrer noopener" target="_blank">
+ <a href="https://github.com/facebook/draft-js">
Draft.js
</a>
content
@@ -17,7 +17,7 @@
</h3>
<p>
The exporter aims to provide sensible defaults from basic block types and inline styles to HTML, that can easily be customised when required. For more advanced scenarios, an API is provided (mimicking React's
- <a href="https://facebook.github.io/react/docs/top-level-api.html#react.createelement" rel="noreferrer noopener" target="_blank">
+ <a href="https://facebook.github.io/react/docs/top-level-api.html#react.createelement">
<code>
createElement
</code>
@@ -39,7 +39,7 @@
</li>
<li>
Automatic conversion of entity data to HTML attributes (int & boolean to string,
- <a href="https://facebook.github.io/react/docs/jsx-in-depth.html" rel="noreferrer noopener" target="_blank">
+ <a href="https://facebook.github.io/react/docs/jsx-in-depth.html">
<code>
style object
</code>
@@ -120,7 +120,7 @@
#hashtag
</span>
support via
- <a href="https://github.com/springload/draftjs_exporter/pull/17" rel="noreferrer noopener" target="_blank">
+ <a href="https://github.com/springload/draftjs_exporter/pull/17">
#CompositeDecorators
</a>
.
@@ -177,7 +177,7 @@
</ol>
</li>
</ol>
- <pre><code>def Blockquote(props):
+ <pre><code>def blockquote(props):
block_data = props['block']['data']
return DOM.create_element('blockquote', {
'cite': block_data.get('cite')
diff --git a/draftjs_exporter/composite_decorators.py b/draftjs_exporter/composite_decorators.py
index e8c8053..8080fd5 100644
--- a/draftjs_exporter/composite_decorators.py
+++ b/draftjs_exporter/composite_decorators.py
@@ -9,7 +9,7 @@ def get_decorations(decorators, text):
decorations = []
for decorator in decorators:
- for match in decorator.SEARCH_RE.finditer(text):
+ for match in decorator['strategy'].finditer(text):
begin, end = match.span()
if not any(occupied.get(i) for i in range(begin, end)):
for i in range(begin, end):
@@ -29,7 +29,7 @@ def apply_decorators(decorators, text, block):
if pointer < begin:
yield text[pointer:begin]
- yield DOM.create_element(decorator, {
+ yield DOM.create_element(decorator['component'], {
'match': match,
'block': {
'type': block['type'],
diff --git a/draftjs_exporter/defaults.py b/draftjs_exporter/defaults.py
index 89514f8..045ba56 100644
--- a/draftjs_exporter/defaults.py
+++ b/draftjs_exporter/defaults.py
@@ -12,7 +12,7 @@ def render_children(props):
return props['children']
-def CodeBlock(props):
+def code_block(props):
return DOM.create_element('pre', {}, DOM.create_element('code', {}, props['children']))
@@ -29,7 +29,7 @@ BLOCK_MAP = {
BLOCK_TYPES.ORDERED_LIST_ITEM: {'element': 'li', 'wrapper': 'ol'},
BLOCK_TYPES.BLOCKQUOTE: 'blockquote',
BLOCK_TYPES.PRE: 'pre',
- BLOCK_TYPES.CODE: CodeBlock,
+ BLOCK_TYPES.CODE: code_block,
BLOCK_TYPES.ATOMIC: render_children,
}
diff --git a/draftjs_exporter/dom.py b/draftjs_exporter/dom.py
index 406667b..e3b9424 100644
--- a/draftjs_exporter/dom.py
+++ b/draftjs_exporter/dom.py
@@ -80,13 +80,7 @@ class DOM(object):
# The children prop is the first child if there is only one.
props['children'] = children[0] if len(children) == 1 else children
- if inspect.isclass(type_):
- # Class component, not instantiated.
- elt = type_().render(props)
- elif callable(getattr(type_, 'render', None)):
- # Class component, already instantiated.
- elt = type_.render(props)
- elif callable(type_):
+ if callable(type_):
# Function component, via def or lambda.
elt = type_(props)
else:
diff --git a/example.py b/example.py
index 03684a1..bd4f657 100644
--- a/example.py
+++ b/example.py
@@ -17,7 +17,7 @@ from draftjs_exporter.dom import DOM
from draftjs_exporter.html import HTML
-def Blockquote(props):
+def blockquote(props):
block_data = props['block']['data']
return DOM.create_element('blockquote', {
@@ -25,7 +25,7 @@ def Blockquote(props):
}, props['children'])
-def ListItem(props):
+def list_item(props):
depth = props['block']['depth']
return DOM.create_element('li', {
@@ -33,7 +33,7 @@ def ListItem(props):
}, props['children'])
-def OrderedList(props):
+def ordered_list(props):
depth = props['block']['depth']
return DOM.create_element('ol', {
@@ -41,7 +41,7 @@ def OrderedList(props):
}, props['children'])
-def Image(props):
+def image(props):
return DOM.create_element('img', {
'src': props.get('src'),
'width': props.get('width'),
@@ -50,78 +50,61 @@ def Image(props):
})
-class Link:
- def __init__(self, use_new_window=False):
- self.use_new_window = use_new_window
-
- def render(self, props):
- link_props = {
- 'href': props['url'],
- }
-
- if self.use_new_window:
- link_props['target'] = '_blank'
- link_props['rel'] = 'noreferrer noopener'
-
- return DOM.create_element('a', link_props, props['children'])
+def link(props):
+ return DOM.create_element('a', {
+ 'href': props['url']
+ }, props['children'])
-class BR:
+def br(props):
"""
Replace line breaks (\n) with br tags.
"""
- SEARCH_RE = re.compile(r'\n')
-
- def render(self, props):
- # Do not process matches inside code blocks.
- if props['block']['type'] == BLOCK_TYPES.CODE:
- return props['children']
+ # Do not process matches inside code blocks.
+ if props['block']['type'] == BLOCK_TYPES.CODE:
+ return props['children']
- return DOM.create_element('br')
+ return DOM.create_element('br')
-class Hashtag:
+def hashtag(props):
"""
Wrap hashtags in spans with a specific class.
"""
- SEARCH_RE = re.compile(r'#\w+')
+ # Do not process matches inside code blocks.
+ if props['block']['type'] == BLOCK_TYPES.CODE:
+ return props['children']
- def render(self, props):
- # Do not process matches inside code blocks.
- if props['block']['type'] == BLOCK_TYPES.CODE:
- return props['children']
+ return DOM.create_element('span', {'class': 'hashtag'}, props['children'])
- return DOM.create_element('span', {'class': 'hashtag'}, props['children'])
+# See http://pythex.org/?regex=(http%3A%2F%2F%7Chttps%3A%2F%2F%7Cwww%5C.)(%5Ba-zA-Z0-9%5C.%5C-%25%2F%5C%3F%26_%3D%5C%2B%23%3A~!%2C%5C%27%5C*%5C%5E%24%5D%2B)&test_string=search%20http%3A%2F%2Fa.us%20or%20https%3A%2F%2Fyahoo.com%20or%20www.google.com%20for%20%23github%20and%20%23facebook&ignorecase=0&multiline=0&dotall=0&verbose=0
+LINKIFY_RE = re.compile(r'(http://|https://|www\.)([a-zA-Z0-9\.\-%/\?&_=\+#:~!,\'\*\^$]+)')
-class Linkify:
+
+def linkify(props):
"""
Wrap plain URLs with link tags.
- See http://pythex.org/?regex=(http%3A%2F%2F%7Chttps%3A%2F%2F%7Cwww%5C.)(%5Ba-zA-Z0-9%5C.%5C-%25%2F%5C%3F%26_%3D%5C%2B%23%3A~!%2C%5C%27%5C*%5C%5E%24%5D%2B)&test_string=search%20http%3A%2F%2Fa.us%20or%20https%3A%2F%2Fyahoo.com%20or%20www.google.com%20for%20%23github%20and%20%23facebook&ignorecase=0&multiline=0&dotall=0&verbose=0
- for an example.
"""
- SEARCH_RE = re.compile(r'(http://|https://|www\.)([a-zA-Z0-9\.\-%/\?&_=\+#:~!,\'\*\^$]+)')
-
- def render(self, props):
- match = props['match']
- protocol = match.group(1)
- url = match.group(2)
- href = protocol + url
+ match = props['match']
+ protocol = match.group(1)
+ url = match.group(2)
+ href = protocol + url
- if props['block']['type'] == BLOCK_TYPES.CODE:
- return href
+ if props['block']['type'] == BLOCK_TYPES.CODE:
+ return href
- link_props = {
- 'href': href,
- }
+ link_props = {
+ 'href': href,
+ }
- if href.startswith('www'):
- link_props['href'] = 'http://' + href
+ if href.startswith('www'):
+ link_props['href'] = 'http://' + href
- return DOM.create_element('a', link_props, href)
+ return DOM.create_element('a', link_props, href)
-def BlockFallback(props):
+def block_fallback(props):
type_ = props['block']['type']
if type_ == 'example-discard':
@@ -138,7 +121,7 @@ def BlockFallback(props):
return DOM.create_element('div', {}, props['children'])
-def EntityFallback(props):
+def entity_fallback(props):
type_ = props['entity']['type']
logging.warn('Missing config for "%s".' % type_)
return DOM.create_element('span', {'class': 'missing-entity'}, props['children'])
@@ -160,13 +143,13 @@ if __name__ == '__main__':
'wrapper_props': {'class': 'bullet-list'},
},
# Use a custom component for more flexibility (reading block data or depth).
- BLOCK_TYPES.BLOCKQUOTE: Blockquote,
+ BLOCK_TYPES.BLOCKQUOTE: blockquote,
BLOCK_TYPES.ORDERED_LIST_ITEM: {
- 'element': ListItem,
- 'wrapper': OrderedList,
+ 'element': list_item,
+ 'wrapper': ordered_list,
},
# Provide a fallback component (advanced).
- BLOCK_TYPES.FALLBACK: BlockFallback
+ BLOCK_TYPES.FALLBACK: block_fallback
}),
# `style_map` defines the HTML representation of inline elements.
# Extend STYLE_MAP to start with sane defaults, or make your own from scratch.
@@ -178,21 +161,29 @@ if __name__ == '__main__':
}),
'entity_decorators': {
# Map entities to components so they can be rendered with their data.
- ENTITY_TYPES.IMAGE: Image,
- # Components can be defined as classes to receive extra parameters.
- ENTITY_TYPES.LINK: Link(use_new_window=True),
+ ENTITY_TYPES.IMAGE: image,
+ ENTITY_TYPES.LINK: link,
# Lambdas work too.
ENTITY_TYPES.HORIZONTAL_RULE: lambda props: DOM.create_element('hr'),
# Discard those entities.
ENTITY_TYPES.EMBED: None,
# Provide a fallback component (advanced).
- ENTITY_TYPES.FALLBACK: EntityFallback,
+ ENTITY_TYPES.FALLBACK: entity_fallback,
},
'composite_decorators': [
# Use composite decorators to replace text based on a regular expression.
- BR,
- Hashtag,
- Linkify,
+ {
+ 'strategy': re.compile(r'\n'),
+ 'component': br,
+ },
+ {
+ 'strategy': re.compile(r'#\w+'),
+ 'component': hashtag,
+ },
+ {
+ 'strategy': LINKIFY_RE,
+ 'component': linkify,
+ },
],
# Specify which DOM backing engine to use.
'engine': 'string',
@@ -579,7 +570,7 @@ if __name__ == '__main__':
"data": {}
}, {
"key": "ed7hu",
- "text": "def Blockquote(props):\n block_data = props['block']['data']\n return DOM.create_element('blockquote', {\n 'cite': block_data.get('cite')\n }, props['children'])\n",
+ "text": "def blockquote(props):\n block_data = props['block']['data']\n return DOM.create_element('blockquote', {\n 'cite': block_data.get('cite')\n }, props['children'])\n",
"type": "code-block",
"depth": 0,
"inlineStyleRanges": [],
| Remove support for class based decorator
Capturing offline discussions for a public discussion.
Currently a decorator can be written as a `function` (accepting a single positional argument `props`) or a `class` (with a single `render` method accepting a single positional argument `props`) which has a few issues:
- Nothing is passed to the `__init__` of the `class` but instead everything is passed to `props` which makes the `class` quite useless. Moving the props to the `__init__` will either create boilerplate or will force the user to inherit from a custom class which does that for them. In short, it's not clear which benefit there is to use a class.
- To keep naming consistent, functions are names are camel cased (so when you update your config file, you don't have to think whether it's a function or a class) which gets the linter to complain. The library should not encourage non-PEP8 compliant code.
We were therefore thinking of removing support for class based decorators. Any thoughts? | springload/draftjs_exporter | diff --git a/tests/test_composite_decorators.py b/tests/test_composite_decorators.py
index 94a8f2f..721f4dd 100644
--- a/tests/test_composite_decorators.py
+++ b/tests/test_composite_decorators.py
@@ -6,127 +6,64 @@ import unittest
from draftjs_exporter.composite_decorators import render_decorators
from draftjs_exporter.constants import BLOCK_TYPES
from draftjs_exporter.dom import DOM
+from example import LINKIFY_RE, br, hashtag, linkify
+BR_DECORATOR = {
+ 'strategy': re.compile(r'\n'),
+ 'component': br,
+}
-class Linkify:
- """
- Wrap plain URLs with link tags.
- See http://pythex.org/?regex=(http%3A%2F%2F%7Chttps%3A%2F%2F%7Cwww%5C.)(%5Ba-zA-Z0-9%5C.%5C-%25%2F%5C%3F%26_%3D%5C%2B%23%3A~!%2C%5C%27%5C*%5C%5E%24%5D%2B)&test_string=search%20http%3A%2F%2Fa.us%20or%20https%3A%2F%2Fyahoo.com%20or%20www.google.com%20for%20%23github%20and%20%23facebook&ignorecase=0&multiline=0&dotall=0&verbose=0
- for an example.
- """
- SEARCH_RE = re.compile(r'(http://|https://|www\.)([a-zA-Z0-9\.\-%/\?&_=\+#:~!,\'\*\^$]+)')
+HASHTAG_DECORATOR = {
+ 'strategy': re.compile(r'#\w+'),
+ 'component': hashtag,
+}
- def __init__(self, use_new_window=False):
- self.use_new_window = use_new_window
-
- def render(self, props):
- match = props['match']
- protocol = match.group(1)
- url = match.group(2)
- href = protocol + url
-
- if props['block']['type'] == BLOCK_TYPES.CODE:
- return href
-
- link_props = {
- 'href': href,
- }
-
- if self.use_new_window:
- link_props['target'] = '_blank'
- link_props['rel'] = 'noreferrer noopener'
-
- if href.startswith('www'):
- link_props['href'] = 'http://' + href
-
- return DOM.create_element('a', link_props, href)
-
-
-class Hashtag:
- """
- Wrap hashtags in spans with a specific class.
- """
- SEARCH_RE = re.compile(r'#\w+')
-
- def render(self, props):
- # Do not process matches inside code blocks.
- if props['block']['type'] == BLOCK_TYPES.CODE:
- return props['children']
-
- return DOM.create_element('span', {'class': 'hashtag'}, props['children'])
-
-
-class BR:
- """
- Replace line breaks (\n) with br tags.
- """
- SEARCH_RE = re.compile(r'\n')
-
- def render(self, props):
- # Do not process matches inside code blocks.
- if props['block']['type'] == BLOCK_TYPES.CODE:
- return props['children']
-
- return DOM.create_element('br')
+LINKIFY_DECORATOR = {
+ 'strategy': LINKIFY_RE,
+ 'component': linkify,
+}
class TestLinkify(unittest.TestCase):
- def test_init(self):
- self.assertIsInstance(Linkify(), Linkify)
-
def test_render(self):
- match = next(Linkify.SEARCH_RE.finditer('test https://www.example.com'))
+ match = next(LINKIFY_DECORATOR['strategy'].finditer('test https://www.example.com'))
- self.assertEqual(DOM.render(DOM.create_element(Linkify, {
+ self.assertEqual(DOM.render(DOM.create_element(LINKIFY_DECORATOR['component'], {
'block': {'type': BLOCK_TYPES.UNSTYLED},
'match': match,
}, match.group(0))), '<a href="https://www.example.com">https://www.example.com</a>')
def test_render_www(self):
- match = next(Linkify.SEARCH_RE.finditer('test www.example.com'))
+ match = next(LINKIFY_DECORATOR['strategy'].finditer('test www.example.com'))
- self.assertEqual(DOM.render(DOM.create_element(Linkify, {
+ self.assertEqual(DOM.render(DOM.create_element(LINKIFY_DECORATOR['component'], {
'block': {'type': BLOCK_TYPES.UNSTYLED},
'match': match,
}, match.group(0))), '<a href="http://www.example.com">www.example.com</a>')
def test_render_code_block(self):
- match = next(Linkify.SEARCH_RE.finditer('test https://www.example.com'))
+ match = next(LINKIFY_DECORATOR['strategy'].finditer('test https://www.example.com'))
- self.assertEqual(DOM.create_element(Linkify, {
+ self.assertEqual(DOM.create_element(LINKIFY_DECORATOR['component'], {
'block': {'type': BLOCK_TYPES.CODE},
'match': match,
}, match.group(0)), match.group(0))
- def test_render_new_window(self):
- match = next(Linkify.SEARCH_RE.finditer('test https://www.example.com'))
-
- self.assertEqual(DOM.render(DOM.create_element(Linkify(use_new_window=True), {
- 'block': {'type': BLOCK_TYPES.UNSTYLED},
- 'match': match,
- }, match.group(0))), '<a href="https://www.example.com" rel="noreferrer noopener" target="_blank">https://www.example.com</a>')
-
class TestHashtag(unittest.TestCase):
- def test_init(self):
- self.assertIsInstance(Hashtag(), Hashtag)
-
def test_render(self):
- self.assertEqual(DOM.render(DOM.create_element(Hashtag, {'block': {'type': BLOCK_TYPES.UNSTYLED}}, '#hashtagtest')), '<span class="hashtag">#hashtagtest</span>')
+ self.assertEqual(DOM.render(DOM.create_element(HASHTAG_DECORATOR['component'], {'block': {'type': BLOCK_TYPES.UNSTYLED}}, '#hashtagtest')), '<span class="hashtag">#hashtagtest</span>')
def test_render_code_block(self):
- self.assertEqual(DOM.render(DOM.create_element(Hashtag, {'block': {'type': BLOCK_TYPES.CODE}}, '#hashtagtest')), '#hashtagtest')
+ self.assertEqual(DOM.render(DOM.create_element(HASHTAG_DECORATOR['component'], {'block': {'type': BLOCK_TYPES.CODE}}, '#hashtagtest')), '#hashtagtest')
class TestBR(unittest.TestCase):
- def test_init(self):
- self.assertIsInstance(BR(), BR)
-
def test_render(self):
- self.assertEqual(DOM.render(DOM.create_element(BR, {'block': {'type': BLOCK_TYPES.UNSTYLED}}, '\n')), '<br/>')
+ self.assertEqual(DOM.render(DOM.create_element(BR_DECORATOR['component'], {'block': {'type': BLOCK_TYPES.UNSTYLED}}, '\n')), '<br/>')
def test_render_code_block(self):
- self.assertEqual(DOM.create_element(BR, {'block': {'type': BLOCK_TYPES.CODE}}, '\n'), '\n')
+ self.assertEqual(DOM.create_element(BR_DECORATOR['component'], {'block': {'type': BLOCK_TYPES.CODE}}, '\n'), '\n')
class TestCompositeDecorators(unittest.TestCase):
@@ -134,10 +71,10 @@ class TestCompositeDecorators(unittest.TestCase):
self.assertEqual(DOM.render(render_decorators([], 'test https://www.example.com#hash #hashtagtest', {'type': BLOCK_TYPES.UNSTYLED, 'depth': 0})), 'test https://www.example.com#hash #hashtagtest')
def test_render_decorators_single(self):
- self.assertEqual(DOM.render(render_decorators([Linkify()], 'test https://www.example.com#hash #hashtagtest', {'type': BLOCK_TYPES.UNSTYLED, 'depth': 0})), 'test <a href="https://www.example.com#hash">https://www.example.com#hash</a> #hashtagtest')
+ self.assertEqual(DOM.render(render_decorators([LINKIFY_DECORATOR], 'test https://www.example.com#hash #hashtagtest', {'type': BLOCK_TYPES.UNSTYLED, 'depth': 0})), 'test <a href="https://www.example.com#hash">https://www.example.com#hash</a> #hashtagtest')
def test_render_decorators_conflicting_order_one(self):
- self.assertEqual(DOM.render(render_decorators([Linkify(), Hashtag()], 'test https://www.example.com#hash #hashtagtest', {'type': BLOCK_TYPES.UNSTYLED, 'depth': 0})), 'test <a href="https://www.example.com#hash">https://www.example.com#hash</a> <span class="hashtag">#hashtagtest</span>')
+ self.assertEqual(DOM.render(render_decorators([LINKIFY_DECORATOR, HASHTAG_DECORATOR], 'test https://www.example.com#hash #hashtagtest', {'type': BLOCK_TYPES.UNSTYLED, 'depth': 0})), 'test <a href="https://www.example.com#hash">https://www.example.com#hash</a> <span class="hashtag">#hashtagtest</span>')
def test_render_decorators_conflicting_order_two(self):
- self.assertEqual(DOM.render(render_decorators([Hashtag(), Linkify()], 'test https://www.example.com#hash #hashtagtest', {'type': BLOCK_TYPES.UNSTYLED, 'depth': 0})), 'test https://www.example.com<span class="hashtag">#hash</span> <span class="hashtag">#hashtagtest</span>')
+ self.assertEqual(DOM.render(render_decorators([HASHTAG_DECORATOR, LINKIFY_DECORATOR], 'test https://www.example.com#hash #hashtagtest', {'type': BLOCK_TYPES.UNSTYLED, 'depth': 0})), 'test https://www.example.com<span class="hashtag">#hash</span> <span class="hashtag">#hashtagtest</span>')
diff --git a/tests/test_defaults.py b/tests/test_defaults.py
index 14bdcd7..58839d7 100644
--- a/tests/test_defaults.py
+++ b/tests/test_defaults.py
@@ -2,7 +2,7 @@ from __future__ import absolute_import, unicode_literals
import unittest
-from draftjs_exporter.defaults import BLOCK_MAP, STYLE_MAP, CodeBlock, render_children
+from draftjs_exporter.defaults import BLOCK_MAP, STYLE_MAP, code_block, render_children
from draftjs_exporter.dom import DOM
@@ -16,7 +16,5 @@ class TestDefaults(unittest.TestCase):
def test_render_children(self):
self.assertEqual(render_children({'children': 'test'}), 'test')
-
-class TestCodeBlock(unittest.TestCase):
- def test_render(self):
- self.assertEqual(DOM.render_debug(CodeBlock({'children': 'test'})), '<pre><code>test</code></pre>')
+ def test_render_code_block(self):
+ self.assertEqual(DOM.render_debug(code_block({'children': 'test'})), '<pre><code>test</code></pre>')
diff --git a/tests/test_dom.py b/tests/test_dom.py
index 0695b50..e4b7617 100644
--- a/tests/test_dom.py
+++ b/tests/test_dom.py
@@ -6,7 +6,7 @@ from draftjs_exporter.dom import DOM
from draftjs_exporter.engines.html5lib import DOM_HTML5LIB
from draftjs_exporter.engines.lxml import DOM_LXML
from draftjs_exporter.error import ConfigException
-from tests.test_entities import Icon
+from tests.test_entities import icon
class DOMTestImpl(object):
@@ -54,10 +54,7 @@ class TestDOM(unittest.TestCase):
self.assertEqual(DOM.render_debug(DOM.create_element('a', {}, None, DOM.create_element('span', {}, 'Test test'))), '<a><span>Test test</span></a>')
def test_create_element_entity(self):
- self.assertEqual(DOM.render_debug(DOM.create_element(Icon, {'name': 'rocket'})), '<svg class="icon"><use xlink:href="#icon-rocket"></use></svg>')
-
- def test_create_element_entity_configured(self):
- self.assertEqual(DOM.render_debug(DOM.create_element(Icon(icon_class='i'), {'name': 'rocket'})), '<svg class="i"><use xlink:href="#icon-rocket"></use></svg>')
+ self.assertEqual(DOM.render_debug(DOM.create_element(icon, {'name': 'rocket'})), '<svg class="icon"><use xlink:href="#icon-rocket"></use></svg>')
def test_parse_html(self):
self.assertEqual(DOM.render_debug(DOM.parse_html('<p><span>Test text</span></p>')), '<p><span>Test text</span></p>')
diff --git a/tests/test_entities.py b/tests/test_entities.py
index d80a3b5..eea6c4e 100644
--- a/tests/test_entities.py
+++ b/tests/test_entities.py
@@ -5,25 +5,20 @@ import unittest
from draftjs_exporter.dom import DOM
-def Null(props):
- return DOM.create_element()
-
-
-def HR(props):
+def hr(props):
return DOM.create_element('hr')
-class Link:
- def render(self, props):
- attributes = {}
- for key in props:
- attr = key if key != 'url' else 'href'
- attributes[attr] = props[key]
+def link(props):
+ attributes = {}
+ for key in props:
+ attr = key if key != 'url' else 'href'
+ attributes[attr] = props[key]
- return DOM.create_element('a', attributes, props['children'])
+ return DOM.create_element('a', attributes, props['children'])
-def Image(props):
+def image(props):
return DOM.create_element('img', {
'src': props.get('src'),
'width': props.get('width'),
@@ -32,45 +27,32 @@ def Image(props):
})
-class Icon:
- def __init__(self, icon_class='icon'):
- self.icon_class = icon_class
+def icon(props):
+ href = '#icon-%s' % props.get('name', '')
+ return DOM.create_element('svg', {'class': 'icon'}, DOM.create_element('use', {'xlink:href': href}))
- def render(self, props):
- href = '#icon-%s' % props.get('name', '')
- return DOM.create_element('svg', {'class': self.icon_class}, DOM.create_element('use', {'xlink:href': href}))
+def button(props):
+ href = props.get('href', '#')
+ icon_name = props.get('icon', None)
+ text = props.get('text', '')
-class Button:
- def render(self, props):
- href = props.get('href', '#')
- icon = props.get('icon', None)
- text = props.get('text', '')
-
- return DOM.create_element(
- 'a',
- {'class': 'icon-text' if icon else None, 'href': href},
- DOM.create_element(Icon, {'name': icon}) if icon else None,
- DOM.create_element('span', {'class': 'icon-text__text'}, text) if icon else text
- )
-
-
-class TestNull(unittest.TestCase):
- def test_render(self):
- self.assertEqual(DOM.render(DOM.create_element(Null)), '')
+ return DOM.create_element(
+ 'a',
+ {'class': 'icon-text' if icon_name else None, 'href': href},
+ DOM.create_element(icon, {'name': icon_name}) if icon_name else None,
+ DOM.create_element('span', {'class': 'icon-text__text'}, text) if icon_name else text
+ )
class TestIcon(unittest.TestCase):
def test_render(self):
- self.assertEqual(DOM.render(DOM.create_element(Icon, {'name': 'rocket'})), '<svg class="icon"><use xlink:href="#icon-rocket"></use></svg>')
-
- def test_render_configured(self):
- self.assertEqual(DOM.render(DOM.create_element(Icon(icon_class='i'), {'name': 'rocket'})), '<svg class="i"><use xlink:href="#icon-rocket"></use></svg>')
+ self.assertEqual(DOM.render(DOM.create_element(icon, {'name': 'rocket'})), '<svg class="icon"><use xlink:href="#icon-rocket"></use></svg>')
class TestImage(unittest.TestCase):
def test_render(self):
- self.assertEqual(DOM.render(DOM.create_element(Image, {
+ self.assertEqual(DOM.render(DOM.create_element(image, {
'src': 'http://example.com/example.png',
'width': 320,
'height': 580,
@@ -79,21 +61,21 @@ class TestImage(unittest.TestCase):
class TestLink(unittest.TestCase):
def test_render(self):
- self.assertEqual(DOM.render(DOM.create_element(Link, {
+ self.assertEqual(DOM.render(DOM.create_element(link, {
'url': 'http://example.com',
}, 'wow')), '<a href="http://example.com">wow</a>')
class TestButton(unittest.TestCase):
def test_render_with_icon(self):
- self.assertEqual(DOM.render(DOM.create_element(Button, {
+ self.assertEqual(DOM.render(DOM.create_element(button, {
'href': 'http://example.com',
'icon': 'rocket',
'text': 'Launch',
})), '<a class="icon-text" href="http://example.com"><svg class="icon"><use xlink:href="#icon-rocket"></use></svg><span class="icon-text__text">Launch</span></a>')
def test_render_without_icon(self):
- self.assertEqual(DOM.render(DOM.create_element(Button, {
+ self.assertEqual(DOM.render(DOM.create_element(button, {
'href': 'http://example.com',
'text': 'Launch',
})), '<a href="http://example.com">Launch</a>')
diff --git a/tests/test_entity_state.py b/tests/test_entity_state.py
index 7c91ff0..743d76d 100644
--- a/tests/test_entity_state.py
+++ b/tests/test_entity_state.py
@@ -4,10 +4,10 @@ import unittest
from draftjs_exporter.command import Command
from draftjs_exporter.entity_state import EntityException, EntityState
-from tests.test_entities import Link
+from tests.test_entities import link
entity_decorators = {
- 'LINK': Link()
+ 'LINK': link
}
entity_map = {
diff --git a/tests/test_exports.py b/tests/test_exports.py
index f9b8650..dfd50a6 100644
--- a/tests/test_exports.py
+++ b/tests/test_exports.py
@@ -12,23 +12,23 @@ from draftjs_exporter.constants import BLOCK_TYPES, ENTITY_TYPES
from draftjs_exporter.defaults import BLOCK_MAP, STYLE_MAP
from draftjs_exporter.dom import DOM
from draftjs_exporter.html import HTML
-from tests.test_composite_decorators import BR, Hashtag, Linkify
-from tests.test_entities import HR, Image, Link
+from tests.test_composite_decorators import BR_DECORATOR, HASHTAG_DECORATOR, LINKIFY_DECORATOR
+from tests.test_entities import hr, image, link
fixtures_path = os.path.join(os.path.dirname(__file__), 'test_exports.json')
fixtures = json.loads(open(fixtures_path, 'r').read())
exporter = HTML({
'entity_decorators': {
- ENTITY_TYPES.LINK: Link,
- ENTITY_TYPES.IMAGE: Image,
- ENTITY_TYPES.HORIZONTAL_RULE: HR,
+ ENTITY_TYPES.LINK: link,
+ ENTITY_TYPES.HORIZONTAL_RULE: hr,
+ ENTITY_TYPES.IMAGE: image,
ENTITY_TYPES.EMBED: None,
},
'composite_decorators': [
- BR,
- Linkify,
- Hashtag,
+ BR_DECORATOR,
+ LINKIFY_DECORATOR,
+ HASHTAG_DECORATOR,
],
'block_map': dict(BLOCK_MAP, **{
BLOCK_TYPES.UNORDERED_LIST_ITEM: {
diff --git a/tests/test_output.py b/tests/test_output.py
index cc643f1..fac2bfb 100644
--- a/tests/test_output.py
+++ b/tests/test_output.py
@@ -7,20 +7,20 @@ from draftjs_exporter.constants import BLOCK_TYPES, ENTITY_TYPES, INLINE_STYLES
from draftjs_exporter.defaults import BLOCK_MAP
from draftjs_exporter.entity_state import EntityException
from draftjs_exporter.html import HTML
-from tests.test_composite_decorators import BR, Hashtag, Linkify
-from tests.test_entities import HR, Image, Link
-from tests.test_wrapper_state import Blockquote
+from tests.test_composite_decorators import BR_DECORATOR, HASHTAG_DECORATOR, LINKIFY_DECORATOR
+from tests.test_entities import hr, image, link
+from tests.test_wrapper_state import blockquote
config = {
'entity_decorators': {
- ENTITY_TYPES.LINK: Link,
- ENTITY_TYPES.HORIZONTAL_RULE: HR,
- ENTITY_TYPES.IMAGE: Image
+ ENTITY_TYPES.LINK: link,
+ ENTITY_TYPES.HORIZONTAL_RULE: hr,
+ ENTITY_TYPES.IMAGE: image
},
'composite_decorators': [
- Linkify,
- Hashtag,
- BR,
+ LINKIFY_DECORATOR,
+ HASHTAG_DECORATOR,
+ BR_DECORATOR,
],
'block_map': dict(BLOCK_MAP, **{
BLOCK_TYPES.UNORDERED_LIST_ITEM: {
@@ -29,7 +29,7 @@ config = {
'wrapper_props': {'class': 'steps'},
},
'blockquote': {
- 'element': Blockquote,
+ 'element': blockquote,
'wrapper': 'div',
},
}),
@@ -924,7 +924,7 @@ class TestOutput(unittest.TestCase):
def test_render_with_big_content(self):
self.assertEqual(HTML({
'entity_decorators': {
- 'LINK': Link()
+ 'LINK': link
},
'block_map': {
'header-two': {'element': 'h2'},
diff --git a/tests/test_wrapper_state.py b/tests/test_wrapper_state.py
index ca58cd5..60deb78 100644
--- a/tests/test_wrapper_state.py
+++ b/tests/test_wrapper_state.py
@@ -4,30 +4,7 @@ import unittest
from draftjs_exporter.dom import DOM
from draftjs_exporter.wrapper_state import WrapperState
-
-
-def Blockquote(props):
- block_data = props['block']['data']
-
- return DOM.create_element('blockquote', {
- 'cite': block_data.get('cite')
- }, props['children'])
-
-
-def ListItem(props):
- depth = props['block']['depth']
-
- return DOM.create_element('li', {
- 'class': 'list-item--depth-{0}'.format(depth)
- }, props['children'])
-
-
-def OrderedList(props):
- depth = props['block']['depth']
-
- return DOM.create_element('ol', {
- 'class': 'list--depth-{0}'.format(depth)
- }, props['children'])
+from example import blockquote, list_item, ordered_list
class TestWrapperState(unittest.TestCase):
@@ -39,10 +16,10 @@ class TestWrapperState(unittest.TestCase):
'unstyled': 'div',
'atomic': lambda props: props['children'],
'ignore': None,
- 'blockquote': Blockquote,
+ 'blockquote': blockquote,
'ordered-list-item': {
- 'element': ListItem,
- 'wrapper': OrderedList
+ 'element': list_item,
+ 'wrapper': ordered_list
},
})
@@ -132,7 +109,7 @@ class TestBlockquote(unittest.TestCase):
DOM.use(DOM.HTML5LIB)
def test_render_debug(self):
- self.assertEqual(DOM.render_debug(DOM.create_element(Blockquote, {
+ self.assertEqual(DOM.render_debug(DOM.create_element(blockquote, {
'block': {
'data': {
'cite': 'http://example.com/',
@@ -146,7 +123,7 @@ class TestListItem(unittest.TestCase):
DOM.use(DOM.HTML5LIB)
def test_render_debug(self):
- self.assertEqual(DOM.render_debug(DOM.create_element(ListItem, {
+ self.assertEqual(DOM.render_debug(DOM.create_element(list_item, {
'block': {
'depth': 5,
},
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 7
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[testing,docs]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | beautifulsoup4==4.13.3
cachetools==5.5.2
chardet==5.2.0
colorama==0.4.6
coverage==7.8.0
distlib==0.3.9
-e git+https://github.com/springload/draftjs_exporter.git@13e1d0e7f159abcdc2e547806fe69cac1cf15e1a#egg=draftjs_exporter
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
flake8==7.2.0
html5lib==1.0b10
iniconfig==2.1.0
isort==4.2.5
lxml==5.3.1
markov-draftjs==0.1.1
mccabe==0.7.0
memory_profiler==0.47
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
psutil==5.4.1
pycodestyle==2.13.0
pyflakes==3.3.1
pyproject-api==1.9.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
six==1.17.0
soupsieve==2.6
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
virtualenv==20.29.3
webencodings==0.5.1
| name: draftjs_exporter
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- beautifulsoup4==4.13.3
- cachetools==5.5.2
- chardet==5.2.0
- colorama==0.4.6
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- flake8==7.2.0
- html5lib==1.0b10
- iniconfig==2.1.0
- isort==4.2.5
- lxml==5.3.1
- markov-draftjs==0.1.1
- mccabe==0.7.0
- memory-profiler==0.47
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- psutil==5.4.1
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- six==1.17.0
- soupsieve==2.6
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- virtualenv==20.29.3
- webencodings==0.5.1
prefix: /opt/conda/envs/draftjs_exporter
| [
"tests/test_composite_decorators.py::TestLinkify::test_render",
"tests/test_composite_decorators.py::TestLinkify::test_render_code_block",
"tests/test_composite_decorators.py::TestLinkify::test_render_www",
"tests/test_composite_decorators.py::TestHashtag::test_render",
"tests/test_composite_decorators.py::TestHashtag::test_render_code_block",
"tests/test_composite_decorators.py::TestBR::test_render",
"tests/test_composite_decorators.py::TestBR::test_render_code_block",
"tests/test_composite_decorators.py::TestCompositeDecorators::test_render_decorators_conflicting_order_one",
"tests/test_composite_decorators.py::TestCompositeDecorators::test_render_decorators_conflicting_order_two",
"tests/test_composite_decorators.py::TestCompositeDecorators::test_render_decorators_empty",
"tests/test_composite_decorators.py::TestCompositeDecorators::test_render_decorators_single",
"tests/test_defaults.py::TestDefaults::test_default_block_map",
"tests/test_defaults.py::TestDefaults::test_default_style_map",
"tests/test_defaults.py::TestDefaults::test_render_children",
"tests/test_defaults.py::TestDefaults::test_render_code_block",
"tests/test_dom.py::TestDOM::test_append_child",
"tests/test_dom.py::TestDOM::test_camel_to_dash",
"tests/test_dom.py::TestDOM::test_create_element",
"tests/test_dom.py::TestDOM::test_create_element_empty",
"tests/test_dom.py::TestDOM::test_create_element_entity",
"tests/test_dom.py::TestDOM::test_create_element_nested",
"tests/test_dom.py::TestDOM::test_create_element_none",
"tests/test_dom.py::TestDOM::test_create_element_style_dict",
"tests/test_dom.py::TestDOM::test_create_element_style_str",
"tests/test_dom.py::TestDOM::test_parse_html",
"tests/test_dom.py::TestDOM::test_render_debug",
"tests/test_dom.py::TestDOM::test_use_custom",
"tests/test_dom.py::TestDOM::test_use_html5lib",
"tests/test_dom.py::TestDOM::test_use_invalid",
"tests/test_dom.py::TestDOM::test_use_lxml",
"tests/test_entities.py::TestIcon::test_render",
"tests/test_entities.py::TestImage::test_render",
"tests/test_entities.py::TestLink::test_render",
"tests/test_entities.py::TestButton::test_render_with_icon",
"tests/test_entities.py::TestButton::test_render_without_icon",
"tests/test_entity_state.py::TestEntityState::test_apply_raises",
"tests/test_entity_state.py::TestEntityState::test_apply_start_entity",
"tests/test_entity_state.py::TestEntityState::test_apply_stop_entity",
"tests/test_entity_state.py::TestEntityState::test_get_entity_details",
"tests/test_entity_state.py::TestEntityState::test_get_entity_details_raises",
"tests/test_entity_state.py::TestEntityState::test_has_no_entity_default",
"tests/test_entity_state.py::TestEntityState::test_has_no_entity_styled",
"tests/test_entity_state.py::TestEntityState::test_init",
"tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_adjacent_inline_styles",
"tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_all_plain_html_elements_we_need",
"tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_big_content_export",
"tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_entity",
"tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_entity_with_data-*",
"tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_entity_with_inline_style",
"tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_from_https://github.com/icelab/draft-js-ast-exporter/blob/651c807bea12d97dad6f4965ab40481c8f2130dd/test/fixtures/content.js",
"tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_html_entities_escaping",
"tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_multiple_decorators",
"tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_nested_inline_styles",
"tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_nested_inline_styles_(inverted)",
"tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_ordered_list",
"tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_plain_text",
"tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_same_content_multiple_times",
"tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_single_inline_style",
"tests/test_exports.py::TestExportsHTML5LIB::test_export_html5lib_style_map_defaults",
"tests/test_exports.py::TestExportsHTML5LIB::test_init_html5lib",
"tests/test_exports.py::TestExportsLXML::test_export_lxml_adjacent_inline_styles",
"tests/test_exports.py::TestExportsLXML::test_export_lxml_all_plain_html_elements_we_need",
"tests/test_exports.py::TestExportsLXML::test_export_lxml_entity",
"tests/test_exports.py::TestExportsLXML::test_export_lxml_entity_with_inline_style",
"tests/test_exports.py::TestExportsLXML::test_export_lxml_from_https://github.com/icelab/draft-js-ast-exporter/blob/651c807bea12d97dad6f4965ab40481c8f2130dd/test/fixtures/content.js",
"tests/test_exports.py::TestExportsLXML::test_export_lxml_html_entities_escaping",
"tests/test_exports.py::TestExportsLXML::test_export_lxml_multiple_decorators",
"tests/test_exports.py::TestExportsLXML::test_export_lxml_nested_inline_styles",
"tests/test_exports.py::TestExportsLXML::test_export_lxml_nested_inline_styles_(inverted)",
"tests/test_exports.py::TestExportsLXML::test_export_lxml_ordered_list",
"tests/test_exports.py::TestExportsLXML::test_export_lxml_plain_text",
"tests/test_exports.py::TestExportsLXML::test_export_lxml_same_content_multiple_times",
"tests/test_exports.py::TestExportsLXML::test_export_lxml_single_inline_style",
"tests/test_exports.py::TestExportsLXML::test_export_lxml_style_map_defaults",
"tests/test_exports.py::TestExportsLXML::test_init",
"tests/test_exports.py::TestExportsSTRING::test_export_string_adjacent_inline_styles",
"tests/test_exports.py::TestExportsSTRING::test_export_string_all_plain_html_elements_we_need",
"tests/test_exports.py::TestExportsSTRING::test_export_string_big_content_export",
"tests/test_exports.py::TestExportsSTRING::test_export_string_entity",
"tests/test_exports.py::TestExportsSTRING::test_export_string_entity_with_data-*",
"tests/test_exports.py::TestExportsSTRING::test_export_string_entity_with_inline_style",
"tests/test_exports.py::TestExportsSTRING::test_export_string_from_https://github.com/icelab/draft-js-ast-exporter/blob/651c807bea12d97dad6f4965ab40481c8f2130dd/test/fixtures/content.js",
"tests/test_exports.py::TestExportsSTRING::test_export_string_html_entities_escaping",
"tests/test_exports.py::TestExportsSTRING::test_export_string_multiple_decorators",
"tests/test_exports.py::TestExportsSTRING::test_export_string_nested_inline_styles",
"tests/test_exports.py::TestExportsSTRING::test_export_string_nested_inline_styles_(inverted)",
"tests/test_exports.py::TestExportsSTRING::test_export_string_ordered_list",
"tests/test_exports.py::TestExportsSTRING::test_export_string_plain_text",
"tests/test_exports.py::TestExportsSTRING::test_export_string_same_content_multiple_times",
"tests/test_exports.py::TestExportsSTRING::test_export_string_single_inline_style",
"tests/test_exports.py::TestExportsSTRING::test_export_string_style_map_defaults",
"tests/test_exports.py::TestExportsSTRING::test_init",
"tests/test_output.py::TestOutput::test_render_empty",
"tests/test_output.py::TestOutput::test_render_with_backtracking_nested_wrapping",
"tests/test_output.py::TestOutput::test_render_with_big_content",
"tests/test_output.py::TestOutput::test_render_with_boolean_attribute_false",
"tests/test_output.py::TestOutput::test_render_with_boolean_attribute_true",
"tests/test_output.py::TestOutput::test_render_with_default_block_map",
"tests/test_output.py::TestOutput::test_render_with_default_config",
"tests/test_output.py::TestOutput::test_render_with_default_style_map",
"tests/test_output.py::TestOutput::test_render_with_different_blocks",
"tests/test_output.py::TestOutput::test_render_with_element_options",
"tests/test_output.py::TestOutput::test_render_with_entities",
"tests/test_output.py::TestOutput::test_render_with_entities_crossing_raises",
"tests/test_output.py::TestOutput::test_render_with_entity",
"tests/test_output.py::TestOutput::test_render_with_entity_and_decorators",
"tests/test_output.py::TestOutput::test_render_with_immediate_jumping",
"tests/test_output.py::TestOutput::test_render_with_inline_styles",
"tests/test_output.py::TestOutput::test_render_with_jumping_wrapping",
"tests/test_output.py::TestOutput::test_render_with_line_breaks",
"tests/test_output.py::TestOutput::test_render_with_many_line_breaks",
"tests/test_output.py::TestOutput::test_render_with_multiple_decorators",
"tests/test_output.py::TestOutput::test_render_with_multiple_inline_styles",
"tests/test_output.py::TestOutput::test_render_with_no_zero_depth",
"tests/test_output.py::TestOutput::test_render_with_none_attribute",
"tests/test_output.py::TestOutput::test_render_with_none_component",
"tests/test_output.py::TestOutput::test_render_with_none_return_value",
"tests/test_output.py::TestOutput::test_render_with_number_attribute",
"tests/test_output.py::TestOutput::test_render_with_styles_in_entities",
"tests/test_output.py::TestOutput::test_render_with_unicode",
"tests/test_output.py::TestOutput::test_render_with_unidirectional_nested_wrapping",
"tests/test_output.py::TestOutput::test_render_with_unknown_attribute",
"tests/test_output.py::TestOutput::test_render_with_wrapping",
"tests/test_output.py::TestOutput::test_render_with_wrapping_reset",
"tests/test_output.py::TestOutput::test_render_with_wrapping_reset_block_components",
"tests/test_wrapper_state.py::TestWrapperState::test_element_for_component",
"tests/test_wrapper_state.py::TestWrapperState::test_element_for_component_wrapper",
"tests/test_wrapper_state.py::TestWrapperState::test_element_for_dismiss_content",
"tests/test_wrapper_state.py::TestWrapperState::test_element_for_element_content",
"tests/test_wrapper_state.py::TestWrapperState::test_element_for_no_block",
"tests/test_wrapper_state.py::TestWrapperState::test_element_for_simple_content",
"tests/test_wrapper_state.py::TestWrapperState::test_init",
"tests/test_wrapper_state.py::TestWrapperState::test_str",
"tests/test_wrapper_state.py::TestWrapperState::test_str_elts",
"tests/test_wrapper_state.py::TestBlockquote::test_render_debug",
"tests/test_wrapper_state.py::TestListItem::test_render_debug"
]
| [
"tests/test_exports.py::TestExportsLXML::test_export_lxml_big_content_export",
"tests/test_exports.py::TestExportsLXML::test_export_lxml_entity_with_data-*"
]
| []
| []
| MIT License | 1,945 | [
"README.rst",
"draftjs_exporter/dom.py",
"benchmark.py",
"draftjs_exporter/defaults.py",
"example.py",
"draftjs_exporter/composite_decorators.py",
"docs/example.md"
]
| [
"README.rst",
"draftjs_exporter/dom.py",
"benchmark.py",
"draftjs_exporter/defaults.py",
"example.py",
"draftjs_exporter/composite_decorators.py",
"docs/example.md"
]
|
pyvisa__pyvisa-301 | 52fca2a660c706564bd59ef36f22c616b1bd42c1 | 2017-12-09 17:13:57 | 52fca2a660c706564bd59ef36f22c616b1bd42c1 | MatthieuDartiailh: The broken tests come from Python 2.6. As we are likely to drop support for it in January I will not try to fix it. This is ready for review from my end. ping @hgrecco, @skrchnavy
thliebig: @MatthieuDartiailh, I have added a missing fixup for the improved header support:
https://github.com/thliebig/pyvisa/commit/f9879997ef87f6d903be63bb90ab9efa5d74a43e
It catches the case that a termination character happens to be the last valid data byte(s). It occurs only very very rarly, but it does and it then crashes the read... Please consider adding the path to this PR
MatthieuDartiailh: Done. And I set you as author. This somehow always bothered me but as it worked in my tests as I forgot to spend more time on it. If you can do a complete review it would be awesome.
MatthieuDartiailh: @thliebig Do you want to do a final review before I merge ? | diff --git a/.travis.yml b/.travis.yml
index f79e28d..291df6f 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -16,8 +16,8 @@ python:
- "3.6"
install:
- - pip install coverage
- - pip install coveralls
+ - if [ $TRAVIS_PYTHON_VERSION == '2.7' ] || [ $TRAVIS_PYTHON_VERSION == '3.6' ]; then pip install numpy; fi
+ - pip install coverage coveralls
script:
- coverage run -p --source=pyvisa --omit="*test*","*compat*" setup.py test
diff --git a/docs/conf.py b/docs/conf.py
index 1a1d32d..dd66e73 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -44,10 +44,10 @@ master_doc = 'index'
# General information about the project.
project = 'PyVISA'
author = 'PyVISA Authors'
+
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
-
version = pkg_resources.get_distribution(project).version
release = version
this_year = datetime.date.today().year
diff --git a/docs/rvalues.rst b/docs/rvalues.rst
index 383585c..aecb6a6 100644
--- a/docs/rvalues.rst
+++ b/docs/rvalues.rst
@@ -77,6 +77,11 @@ If you have doubles `d` in big endian the call will be::
You can also specify the output container type, just as it was shown before.
+By default, PyVISA will assume that the data block is formatted according to
+the IEEE convention. If your instrument uses HP data block you can pass
+``header_fmt='hp'`` to ``read_binary_values``. If your instrument does not use
+any header for the data simply ``header_fmt='empty'``.
+
Writing ASCII values
--------------------
@@ -178,4 +183,12 @@ In those cases, you need to get the data::
and then you need to implement the logic to parse it.
+Alternatively if the `read_raw` call fails you can try to read just a few bytes
+using::
+
+ >>> inst.write('CURV?')
+ >>> data = inst.read_bytes(1)
+If this call fails it may mean that your instrument did not answer, either
+because it needs more time or because your first instruction was not
+understood.
diff --git a/pyvisa/compat/__init__.py b/pyvisa/compat/__init__.py
index 9aacf7d..abd0f78 100644
--- a/pyvisa/compat/__init__.py
+++ b/pyvisa/compat/__init__.py
@@ -9,11 +9,10 @@
:license: BSD, see LICENSE for more details.
"""
-from __future__ import division, unicode_literals, print_function, absolute_import
-import sys
-import unittest
+from __future__ import (division, unicode_literals, print_function,
+ absolute_import)
-from subprocess import check_output
+import sys
PYTHON3 = sys.version >= '3'
@@ -27,6 +26,10 @@ if PYTHON3:
integer_types = (int, )
input = input
+
+ int_to_bytes = int.to_bytes
+ int_from_bytes = int.from_bytes
+
else:
string_types = basestring
@@ -39,6 +42,86 @@ else:
input = raw_input
+ # The 2 following function implementation extracted from the python-future
+ # project
+ import collections
+
+ def int_to_bytes(integer, length, byteorder, signed=False):
+ """
+ Return an array of bytes representing an integer.
+ The integer is represented using length bytes. An OverflowError is
+ raised if the integer is not representable with the given number of
+ bytes.
+ The byteorder argument determines the byte order used to represent the
+ integer. If byteorder is 'big', the most significant byte is at the
+ beginning of the byte array. If byteorder is 'little', the most
+ significant byte is at the end of the byte array. To request the
+ native byte order of the host system, use `sys.byteorder' as the byte
+ order value.
+ The signed keyword-only argument determines whether two's complement is
+ used to represent the integer. If signed is False and a negative
+ integer is given, an OverflowError is raised.
+ """
+ if length < 0:
+ raise ValueError("length argument must be non-negative")
+ if length == 0 and integer == 0:
+ return bytes()
+ if signed and integer < 0:
+ bits = length * 8
+ num = (2**bits) + integer
+ if num <= 0:
+ raise OverflowError("int too smal to convert")
+ else:
+ if integer < 0:
+ raise OverflowError("can't convert negative int to unsigned")
+ num = integer
+ if byteorder not in ('little', 'big'):
+ raise ValueError("byteorder must be either 'little' or 'big'")
+ h = b'%x' % num
+ s = bytes((b'0'*(len(h) % 2) + h).zfill(length*2).decode('hex'))
+ if signed:
+ high_set = s[0] & 0x80
+ if integer > 0 and high_set:
+ raise OverflowError("int too big to convert")
+ if integer < 0 and not high_set:
+ raise OverflowError("int too small to convert")
+ if len(s) > length:
+ raise OverflowError("int too big to convert")
+ return s if byteorder == 'big' else s[::-1]
+
+ def int_from_bytes(mybytes, byteorder='big', signed=False):
+ """
+ Return the integer represented by the given array of bytes.
+ The mybytes argument must either support the buffer protocol or be an
+ iterable object producing bytes. Bytes and bytearray are examples of
+ built-in objects that support the buffer protocol.
+ The byteorder argument determines the byte order used to represent the
+ integer. If byteorder is 'big', the most significant byte is at the
+ beginning of the byte array. If byteorder is 'little', the most
+ significant byte is at the end of the byte array. To request the
+ native byte order of the host system, use `sys.byteorder' as the byte
+ order value.
+ The signed keyword-only argument indicates whether two's complement is
+ used to represent the integer.
+ """
+ if byteorder not in ('little', 'big'):
+ raise ValueError("byteorder must be either 'little' or 'big'")
+ if isinstance(mybytes, unicode):
+ raise TypeError("cannot convert unicode objects to bytes")
+ # mybytes can also be passed as a sequence of integers on Py3.
+ # Test for this:
+ elif isinstance(mybytes, collections.Iterable):
+ mybytes = bytes(mybytes)
+ b = mybytes if byteorder == 'big' else mybytes[::-1]
+ if len(b) == 0:
+ b = b'\x00'
+ # The encode() method has been disabled by newbytes, but Py2's
+ # str has it:
+ num = int(b.encode('hex'), 16)
+ if signed and (b[0] & 0x80):
+ num = num - (2 ** (len(b)*8))
+ return num
+
try:
from collections import OrderedDict
except ImportError:
diff --git a/pyvisa/compat/struct.py b/pyvisa/compat/struct.py
index 154ddbf..2d0e429 100644
--- a/pyvisa/compat/struct.py
+++ b/pyvisa/compat/struct.py
@@ -9,7 +9,8 @@
:license: PSF License
"""
-from __future__ import division, unicode_literals, print_function, absolute_import
+from __future__ import (division, unicode_literals, print_function,
+ absolute_import)
import sys
import struct
diff --git a/pyvisa/constants.py b/pyvisa/constants.py
index 2172306..c2e907f 100644
--- a/pyvisa/constants.py
+++ b/pyvisa/constants.py
@@ -688,7 +688,7 @@ class InterfaceType(enum.IntEnum):
class AddressState(enum.IntEnum):
- unaddressed =VI_GPIB_UNADDRESSED
+ unaddressed = VI_GPIB_UNADDRESSED
talker = VI_GPIB_TALKER
listenr = VI_GPIB_LISTENER
diff --git a/pyvisa/resources/messagebased.py b/pyvisa/resources/messagebased.py
index ea98bf4..588ed26 100644
--- a/pyvisa/resources/messagebased.py
+++ b/pyvisa/resources/messagebased.py
@@ -11,7 +11,8 @@
:license: MIT, see LICENSE for more details.
"""
-from __future__ import division, unicode_literals, print_function, absolute_import
+from __future__ import (division, unicode_literals, print_function,
+ absolute_import)
import contextlib
import time
@@ -47,7 +48,8 @@ class ValuesFormat(object):
self.container = container
self.is_binary = False
- def use_binary(self, datatype, is_big_endian, container=list, header_fmt='ieee'):
+ def use_binary(self, datatype, is_big_endian, container=list,
+ header_fmt='ieee'):
self.datatype = datatype
self.is_big_endian = is_big_endian
self.container = container
@@ -56,19 +58,21 @@ class ValuesFormat(object):
class ControlRenMixin(object):
- """Common controlt_ren method of some messaged based resources.
+ """Common control_ren method of some messaged based resources.
+
"""
# It should work for GPIB, USB and some TCPIP
- # For TCPIP I found some (all?) NI's VISA library do not handle control_ren, but
- # it works for Agilent's VISA library (at least some of them)
+ # For TCPIP I found some (all?) NI's VISA library do not handle
+ # control_ren, but it works for Agilent's VISA library (at least some of
+ # them)
def control_ren(self, mode):
- """Controls the state of the GPIB Remote Enable (REN) interface line, and optionally the remote/local
- state of the device.
+ """Controls the state of the GPIB Remote Enable (REN) interface line,
+ and optionally the remote/local state of the device.
Corresponds to viGpibControlREN function of the VISA library.
- :param mode: Specifies the state of the REN line and optionally the device remote/local state.
- (Constants.GPIB_REN*)
+ :param mode: Specifies the state of the REN line and optionally the
+ device remote/local state. (Constants.GPIB_REN*)
:return: return value of the library call.
:rtype: VISAStatus
"""
@@ -95,17 +99,21 @@ class MessageBasedResource(Resource):
self._values_format = ValuesFormat()
super(MessageBasedResource, self).__init__(*args, **kwargs)
- # This is done for backwards compatibility but will be removed in 1.7
+ # This is done for backwards compatibility but will be removed in 1.10
@property
def values_format(self):
+ warnings.warn('values_format is deprecated and will be removed in '
+ '1.10', FutureWarning)
return self._values_format
@values_format.setter
def values_format(self, fmt):
+ warnings.warn('values_format is deprecated and will be removed in '
+ '1.10', FutureWarning)
self._values_format.is_binary = not (fmt & 0x01 == 0)
- if fmt & 0x03 == 1: #single
+ if fmt & 0x03 == 1: # single
self._values_format.datatype = 'f'
- elif fmt & 0x03 == 3: #double:
+ elif fmt & 0x03 == 3: # double:
self._values_format.datatype = 'd'
else:
raise ValueError("unknown data values fmt requested")
@@ -118,10 +126,14 @@ class MessageBasedResource(Resource):
def ask_delay(self):
"""An alias for query_delay kept for backwards compatibility.
"""
+ warnings.warn('ask_delay is deprecated and will be removed in '
+ '1.10, use query_delay instead', FutureWarning)
return self.query_delay
@ask_delay.setter
def ask_delay(self, value):
+ warnings.warn('ask_delay is deprecated and will be removed in '
+ '1.10, use query_delay instead', FutureWarning)
self.query_delay = value
@property
@@ -132,7 +144,8 @@ class MessageBasedResource(Resource):
@encoding.setter
def encoding(self, encoding):
- _ = 'test encoding'.encode(encoding).decode(encoding)
+ # Test that the encoding specified makes sense.
+ 'test encoding'.encode(encoding).decode(encoding)
self._encoding = encoding
@property
@@ -145,22 +158,25 @@ class MessageBasedResource(Resource):
def read_termination(self, value):
if value:
- # termination character, the rest is just used for verification after
- # each read operation.
+ # termination character, the rest is just used for verification
+ # after each read operation.
last_char = value[-1:]
- # Consequently, it's illogical to have the real termination character
- # twice in the sequence (otherwise reading would stop prematurely).
+ # Consequently, it's illogical to have the real termination
+ # character twice in the sequence (otherwise reading would stop
+ # prematurely).
if last_char in value[:-1]:
raise ValueError("ambiguous ending in termination characters")
self.set_visa_attribute(constants.VI_ATTR_TERMCHAR, ord(last_char))
- self.set_visa_attribute(constants.VI_ATTR_TERMCHAR_EN, constants.VI_TRUE)
+ self.set_visa_attribute(constants.VI_ATTR_TERMCHAR_EN,
+ constants.VI_TRUE)
else:
- # The termchar is also used in VI_ATTR_ASRL_END_IN (for serial termination)
- # so return it to its default.
+ # The termchar is also used in VI_ATTR_ASRL_END_IN (for serial
+ # termination) so return it to its default.
self.set_visa_attribute(constants.VI_ATTR_TERMCHAR, ord(self.LF))
- self.set_visa_attribute(constants.VI_ATTR_TERMCHAR_EN, constants.VI_FALSE)
+ self.set_visa_attribute(constants.VI_ATTR_TERMCHAR_EN,
+ constants.VI_FALSE)
self._read_termination = value
@@ -208,8 +224,10 @@ class MessageBasedResource(Resource):
return count
- def write_ascii_values(self, message, values, converter='f', separator=',', termination=None, encoding=None):
- """Write a string message to the device followed by values in ascii format.
+ def write_ascii_values(self, message, values, converter='f', separator=',',
+ termination=None, encoding=None):
+ """Write a string message to the device followed by values in ascii
+ format.
The write_termination is always appended to it.
@@ -220,8 +238,8 @@ class MessageBasedResource(Resource):
String formatting codes are also accepted.
Defaults to str.
:type converter: callable | str
- :param separator: a callable that split the str into individual elements.
- If a str is given, data.split(separator) is used.
+ :param separator: a callable that join the values in a single str.
+ If a str is given, separator.join(values) is used.
:type: separator: (collections.Iterable[T]) -> str | str
:return: number of bytes written.
:rtype: int
@@ -236,7 +254,7 @@ class MessageBasedResource(Resource):
block = util.to_ascii_block(values, converter, separator)
- message = message.encode(enco) + block
+ message = message.encode(enco) + block.encode(enco)
if term:
message += term.encode(enco)
@@ -245,15 +263,19 @@ class MessageBasedResource(Resource):
return count
- def write_binary_values(self, message, values, datatype='f', is_big_endian=False, termination=None, encoding=None):
- """Write a string message to the device followed by values in binary format.
+ def write_binary_values(self, message, values, datatype='f',
+ is_big_endian=False, termination=None,
+ encoding=None):
+ """Write a string message to the device followed by values in binary
+ format.
The write_termination is always appended to it.
:param message: the message to be sent.
:type message: unicode (Py2) or str (Py3)
:param values: data to be writen to the device.
- :param datatype: the format string for a single element. See struct module.
+ :param datatype: the format string for a single element. See struct
+ module.
:param is_big_endian: boolean indicating endianess.
:return: number of bytes written.
:rtype: int
@@ -277,28 +299,76 @@ class MessageBasedResource(Resource):
return count
def write_values(self, message, values, termination=None, encoding=None):
-
+ warnings.warn('write_values is deprecated and will be removed in '
+ '1.10, use write_ascii_values or write_binary_values '
+ 'instead.', FutureWarning)
vf = self.values_format
if vf.is_binary:
- return self.write_binary_values(message, values, vf.datatype, vf.is_big_endian, termination, encoding)
+ return self.write_binary_values(message, values, vf.datatype,
+ vf.is_big_endian, termination,
+ encoding)
- return self.write_ascii_values(message, values, vf.converter, vf.separator, termination, encoding)
+ return self.write_ascii_values(message, values, vf.converter,
+ vf.separator, termination, encoding)
+
+ def read_bytes(self, count, chunk_size=None, break_on_termchar=False):
+ """Read a certain number of bytes from the instrument.
+
+ :param count: The number of bytes to read from the instrument.
+ :type count: int
+ :param chunk_size: The chunk size to use to perform the reading.
+ :type chunk_size: int
+ :param break_on_termchar: Should the reading stop when a termination
+ character is encountered.
+ :type brak_on_termchar: bool
+
+ :rtype: bytes
+
+ """
+ chunk_size = chunk_size or self.chunk_size
+ ret = bytearray()
+ left_to_read = count
+ termchar_read = constants.StatusCode.success_termination_character_read
+
+ with self.ignore_warning(constants.VI_SUCCESS_DEV_NPRESENT,
+ constants.VI_SUCCESS_MAX_CNT):
+ try:
+ status = None
+ while len(ret) < count:
+ size = min(chunk_size, left_to_read)
+ logger.debug('%s - reading %d bytes (last status %r)',
+ self._resource_name, size, status)
+ chunk, status = self.visalib.read(self.session, size)
+ ret.extend(chunk)
+ left_to_read -= len(chunk)
+ if break_on_termchar and status == termchar_read:
+ break
+ except errors.VisaIOError as e:
+ logger.debug('%s - exception while reading: %s\n'
+ 'Buffer content: %r', self._resource_name, e,
+ ret)
+ raise
+ return bytes(ret)
def read_raw(self, size=None):
"""Read the unmodified string sent from the instrument to the computer.
In contrast to read(), no termination characters are stripped.
+ :param size: The chunk size to use when reading the data.
+
:rtype: bytes
"""
return bytes(self._read_raw(size))
def _read_raw(self, size=None):
"""Read the unmodified string sent from the instrument to the computer.
-
+
In contrast to read(), no termination characters are stripped.
-
+
+ :param size: The chunk size to use when reading the data.
+
:rtype: bytearray
"""
size = self.chunk_size if size is None else size
@@ -306,7 +376,8 @@ class MessageBasedResource(Resource):
loop_status = constants.StatusCode.success_max_count_read
ret = bytearray()
- with self.ignore_warning(constants.VI_SUCCESS_DEV_NPRESENT, constants.VI_SUCCESS_MAX_CNT):
+ with self.ignore_warning(constants.VI_SUCCESS_DEV_NPRESENT,
+ constants.VI_SUCCESS_MAX_CNT):
try:
status = loop_status
while status == loop_status:
@@ -315,8 +386,8 @@ class MessageBasedResource(Resource):
chunk, status = self.visalib.read(self.session, size)
ret.extend(chunk)
except errors.VisaIOError as e:
- logger.debug('%s - exception while reading: %s\nBuffer content: %r',
- self._resource_name, e, ret)
+ logger.debug('%s - exception while reading: %s\nBuffer '
+ 'content: %r', self._resource_name, e, ret)
raise
return ret
@@ -353,6 +424,78 @@ class MessageBasedResource(Resource):
return message[:-len(termination)]
+ def read_ascii_values(self, converter='f', separator=',', container=list,
+ delay=None):
+ """Read values from the device in ascii format returning an iterable of
+ values.
+
+ :param delay: delay in seconds between write and read operations.
+ if None, defaults to self.query_delay
+ :param converter: function used to convert each element.
+ Defaults to float
+ :type converter: callable
+ :param separator: a callable that split the str into individual
+ elements. If a str is given, data.split(separator) is
+ used.
+ :type: separator: (str) -> collections.Iterable[int] | str
+ :param container: container type to use for the output data.
+ :returns: the answer from the device.
+ :rtype: list
+
+ """
+ # Use read rather than _read_raw because we cannot handle a bytearray
+ block = self.read()
+
+ return util.from_ascii_block(block, converter, separator, container)
+
+ def read_binary_values(self, datatype='f', is_big_endian=False,
+ container=list, header_fmt='ieee'):
+ """Read values from the device in binary format returning an iterable
+ of values.
+
+ :param datatype: the format string for a single element. See struct
+ module.
+ :param is_big_endian: boolean indicating endianess.
+ Defaults to False.
+ :param container: container type to use for the output data.
+ :param header_fmt: format of the header prefixing the data. Possible
+ values are: 'ieee', 'hp', 'empty'
+ :returns: the answer from the device.
+ :rtype: type(container)
+
+ """
+ block = self._read_raw()
+ expected_length = 0
+
+ if header_fmt == 'ieee':
+ offset, data_length = util.parse_ieee_block_header(block)
+ expected_length = offset + data_length
+ elif header_fmt == 'hp':
+ offset, data_length = util.parse_hp_block_header(block,
+ is_big_endian)
+ expected_length = offset + data_length
+
+ if self._read_termination is not None:
+ expected_length += len(self._read_termination)
+
+ while len(block) < expected_length:
+ block.extend(self._read_raw())
+
+ try:
+ if header_fmt == 'ieee':
+ return util.from_ieee_block(block, datatype, is_big_endian,
+ container)
+ elif header_fmt == 'hp':
+ return util.from_hp_block(block, datatype, is_big_endian,
+ container)
+ elif header_fmt == 'empty':
+ return util.from_binary_block(block, 0, None, datatype,
+ is_big_endian, container)
+ else:
+ raise
+ except ValueError as e:
+ raise errors.InvalidBinaryFormat(e.args)
+
def read_values(self, fmt=None, container=list):
"""Read a list of floating point values from the device.
@@ -365,13 +508,17 @@ class MessageBasedResource(Resource):
:return: the list of read values
:rtype: list
"""
+ warnings.warn('read_values is deprecated and will be removed in '
+ '1.10, use read_ascii_values or read_binary_values '
+ 'instead.', FutureWarning)
if not fmt:
vf = self.values_format
if not vf.is_binary:
return util.from_ascii_block(self.read(), container)
data = self._read_raw()
try:
- return util.parse_binary(data, vf.is_big_endian, vf.datatype=='f')
+ return util.parse_binary(data, vf.is_big_endian,
+ vf.datatype == 'f')
except ValueError as e:
try:
msg = e.args[0]
@@ -379,20 +526,20 @@ class MessageBasedResource(Resource):
msg = ''
raise errors.InvalidBinaryFormat(msg)
- if fmt & 0x01 == 0: # ascii
+ if fmt & 0x01 == 0: # ascii
return util.from_ascii_block(self.read())
data = self._read_raw()
try:
- if fmt & 0x03 == 1: #single
+ if fmt & 0x03 == 1: # single
is_single = True
- elif fmt & 0x03 == 3: #double:
+ elif fmt & 0x03 == 3: # double:
is_single = False
else:
raise ValueError("unknown data values fmt requested")
- is_big_endian = fmt & 0x04 # big endian
+ is_big_endian = fmt & 0x04 # big endian
return util.parse_binary(data, is_big_endian, is_single)
except ValueError as e:
raise errors.InvalidBinaryFormat(e.args)
@@ -416,8 +563,10 @@ class MessageBasedResource(Resource):
time.sleep(delay)
return self.read()
- # Kept for backwards compatibility.
- ask = query
+ def ask(self, message, delay=None):
+ warnings.warn('ask is deprecated and will be removed in '
+ '1.10, use query instead.', FutureWarning)
+ return self.query(message, delay)
def query_values(self, message, delay=None):
"""Query the device for values returning an iterable of values.
@@ -431,15 +580,23 @@ class MessageBasedResource(Resource):
:returns: the answer from the device.
:rtype: list
"""
+ warnings.warn('query_values is deprecated and will be removed in '
+ '1.10, use query_ascii_values or quey_binary_values '
+ 'instead.', FutureWarning)
vf = self.values_format
if vf.is_binary:
- return self.query_binary_values(message, vf.datatype, vf.is_big_endian, vf.container, delay, vf.header_fmt)
+ return self.query_binary_values(message, vf.datatype,
+ vf.is_big_endian, vf.container,
+ delay, vf.header_fmt)
- return self.query_ascii_values(message, vf.converter, vf.separator, vf.container, delay)
+ return self.query_ascii_values(message, vf.converter, vf.separator,
+ vf.container, delay)
- def query_ascii_values(self, message, converter='f', separator=',', container=list, delay=None):
- """Query the device for values in ascii format returning an iterable of values.
+ def query_ascii_values(self, message, converter='f', separator=',',
+ container=list, delay=None):
+ """Query the device for values in ascii format returning an iterable of
+ values.
:param message: the message to send.
:type message: str
@@ -448,8 +605,9 @@ class MessageBasedResource(Resource):
:param converter: function used to convert each element.
Defaults to float
:type converter: callable
- :param separator: a callable that split the str into individual elements.
- If a str is given, data.split(separator) is used.
+ :param separator: a callable that split the str into individual
+ elements. If a str is given, data.split(separator) is
+ used.
:type: separator: (str) -> collections.Iterable[int] | str
:param container: container type to use for the output data.
:returns: the answer from the device.
@@ -462,25 +620,28 @@ class MessageBasedResource(Resource):
if delay > 0.0:
time.sleep(delay)
- block = self.read()
-
- return util.from_ascii_block(block, converter, separator, container)
+ return self.read_ascii_values(converter, separator, container,
+ delay)
- def query_binary_values(self, message, datatype='f', is_big_endian=False, container=list, delay=None, header_fmt='ieee'):
- """Converts an iterable of numbers into a block of data in the ieee format.
+ def query_binary_values(self, message, datatype='f', is_big_endian=False,
+ container=list, delay=None, header_fmt='ieee'):
+ """Query the device for values in binary format returning an iterable
+ of values.
:param message: the message to send to the instrument.
- :param datatype: the format string for a single element. See struct module.
+ :param datatype: the format string for a single element. See struct
+ module.
:param is_big_endian: boolean indicating endianess.
Defaults to False.
:param container: container type to use for the output data.
:param delay: delay in seconds between write and read operations.
if None, defaults to self.query_delay
- :rtype: bytes
+ :returns: the answer from the device.
+ :rtype: list
"""
-
if header_fmt not in ('ieee', 'empty', 'hp'):
- raise ValueError("Invalid header format. Valid options are 'ieee', 'empty', 'hp'")
+ raise ValueError("Invalid header format. Valid options are 'ieee',"
+ " 'empty', 'hp'")
self.write(message)
if delay is None:
@@ -488,24 +649,8 @@ class MessageBasedResource(Resource):
if delay > 0.0:
time.sleep(delay)
- block = self._read_raw()
-
- if header_fmt == 'ieee':
- offset, data_length = util.parse_ieee_block_header(block)
- expected_length = offset + data_length
-
- while len(block) < expected_length:
- block.extend(self._read_raw())
-
- try:
- if header_fmt == 'ieee':
- return util.from_ieee_block(block, datatype, is_big_endian, container)
- elif header_fmt == 'empty':
- return util.from_binary_block(block, 0, None, datatype, is_big_endian, container)
- elif header_fmt == 'hp':
- return util.from_binary_block(block, 4, None, datatype, is_big_endian, container)
- except ValueError as e:
- raise errors.InvalidBinaryFormat(e.args)
+ return self.read_binary_values(datatype, is_big_endian, container,
+ header_fmt)
def ask_for_values(self, message, fmt=None, delay=None):
"""A combination of write(message) and read_values()
@@ -517,7 +662,9 @@ class MessageBasedResource(Resource):
:returns: the answer from the device.
:rtype: list
"""
-
+ warnings.warn('ask_values is deprecated and will be removed in '
+ '1.10, use query_ascii_values or quey_binary_values '
+ 'instead.', FutureWarning)
self.write(message)
if delay is None:
delay = self.query_delay
@@ -529,7 +676,8 @@ class MessageBasedResource(Resource):
"""Sends a software trigger to the device.
"""
- self.visalib.assert_trigger(self.session, constants.VI_TRIG_PROT_DEFAULT)
+ self.visalib.assert_trigger(self.session,
+ constants.VI_TRIG_PROT_DEFAULT)
@property
def stb(self):
@@ -546,10 +694,12 @@ class MessageBasedResource(Resource):
@contextlib.contextmanager
def read_termination_context(self, new_termination):
term = self.get_visa_attribute(constants.VI_ATTR_TERMCHAR)
- self.set_visa_attribute(constants.VI_ATTR_TERMCHAR, ord(new_termination[-1]))
+ self.set_visa_attribute(constants.VI_ATTR_TERMCHAR,
+ ord(new_termination[-1]))
yield
self.set_visa_attribute(constants.VI_ATTR_TERMCHAR, term)
# Rohde and Schwarz Device via Passport. Not sure which Resource should be.
-MessageBasedResource.register(constants.InterfaceType.rsnrp, 'INSTR')(MessageBasedResource)
+MessageBasedResource.register(constants.InterfaceType.rsnrp,
+ 'INSTR')(MessageBasedResource)
diff --git a/pyvisa/util.py b/pyvisa/util.py
index 8fcbf1f..7b01435 100644
--- a/pyvisa/util.py
+++ b/pyvisa/util.py
@@ -11,7 +11,8 @@
:license: MIT, see LICENSE for more details.
"""
-from __future__ import division, unicode_literals, print_function, absolute_import
+from __future__ import (division, unicode_literals, print_function,
+ absolute_import)
import functools
import io
@@ -20,11 +21,34 @@ import platform
import sys
import subprocess
import warnings
+import inspect
+from subprocess import check_output
-from .compat import check_output, string_types, OrderedDict, struct
+from .compat import (string_types, OrderedDict, struct,
+ int_to_bytes, int_from_bytes, PYTHON3)
from . import __version__, logger
+try:
+ import numpy as np
+except ImportError:
+ np = None
+
+
+def _use_numpy_routines(container):
+ """Should optimized numpy routines be used to extract the data.
+
+ """
+ if np is None or container in (tuple, list):
+ return False
+
+ if (container is np.array or (inspect.isclass(container) and
+ issubclass(container, np.ndarray))):
+ return True
+
+ return False
+
+
def read_user_library_path():
"""Return the library path stored in one of the following configuration files:
@@ -43,13 +67,16 @@ def read_user_library_path():
"""
try:
- from ConfigParser import SafeConfigParser as ConfigParser, NoSectionError
+ from ConfigParser import (SafeConfigParser as ConfigParser,
+ NoSectionError)
except ImportError:
from configparser import ConfigParser, NoSectionError
config_parser = ConfigParser()
- files = config_parser.read([os.path.join(sys.prefix, "share", "pyvisa", ".pyvisarc"),
- os.path.join(os.path.expanduser("~"), ".pyvisarc")])
+ files = config_parser.read([os.path.join(sys.prefix, "share", "pyvisa",
+ ".pyvisarc"),
+ os.path.join(os.path.expanduser("~"),
+ ".pyvisarc")])
if not files:
logger.debug('No user defined library files')
@@ -151,6 +178,16 @@ _converters = {
'G': float,
}
+_np_converters = {
+ 'd': 'i',
+ 'e': 'f',
+ 'E': 'f',
+ 'f': 'f',
+ 'F': 'f',
+ 'g': 'f',
+ 'G': 'f',
+}
+
def from_ascii_block(ascii_data, converter='f', separator=',', container=list):
"""Parse ascii data and return an iterable of numbers.
@@ -165,12 +202,19 @@ def from_ascii_block(ascii_data, converter='f', separator=',', container=list):
:type: separator: (str) -> collections.Iterable[T] | str
:param container: container type to use for the output data.
"""
+ if (_use_numpy_routines(container) and
+ isinstance(converter, string_types) and
+ isinstance(separator, string_types) and
+ converter in _np_converters):
+ return np.fromstring(ascii_data, _np_converters[converter],
+ sep=separator)
if isinstance(converter, string_types):
try:
converter = _converters[converter]
except KeyError:
- raise ValueError('Invalid code for converter: %s not in %s' % (converter, str(tuple(_converters.keys()))))
+ raise ValueError('Invalid code for converter: %s not in %s' %
+ (converter, str(tuple(_converters.keys()))))
if isinstance(separator, string_types):
data = ascii_data.split(separator)
@@ -181,7 +225,7 @@ def from_ascii_block(ascii_data, converter='f', separator=',', container=list):
def to_ascii_block(iterable, converter='f', separator=','):
- """Parse ascii data and return an iterable of numbers.
+ """Turn an iterable of numbers in an ascii block of data.
:param iterable: data to be parsed.
:type iterable: collections.Iterable[T]
@@ -192,6 +236,8 @@ def to_ascii_block(iterable, converter='f', separator=','):
:param separator: a callable that split the str into individual elements.
If a str is given, data.split(separator) is used.
:type: separator: (collections.Iterable[T]) -> str | str
+
+ :rtype: str
"""
if isinstance(separator, string_types):
@@ -199,9 +245,10 @@ def to_ascii_block(iterable, converter='f', separator=','):
if isinstance(converter, string_types):
converter = '%' + converter
- return separator(converter % val for val in iterable)
+ block = separator(converter % val for val in iterable)
else:
- return separator(converter(val) for val in iterable)
+ block = separator(converter(val) for val in iterable)
+ return block
def parse_binary(bytes_data, is_big_endian=False, is_single=False):
@@ -214,6 +261,9 @@ def parse_binary(bytes_data, is_big_endian=False, is_single=False):
:param is_single: boolean indicating the type (if not is double)
:return:
"""
+ warnings.warn('parse_binary is deprecated and will be removed in '
+ '1.10, use read_ascii_values or read_binary_values '
+ 'instead.', FutureWarning)
data = bytes_data
hash_sign_position = bytes_data.find(b"#")
@@ -269,20 +319,20 @@ def parse_ieee_block_header(block):
#0<data>
:param block: IEEE block.
- :type block: bytes
+ :type block: bytes | bytearray
:return: (offset, data_length)
:rtype: (int, int)
"""
begin = block.find(b'#')
if begin < 0:
- raise ValueError("Could not find hash sign (#) indicating the start of the block.")
+ raise ValueError("Could not find hash sign (#) indicating the start of"
+ " the block.")
try:
# int(block[begin+1]) != int(block[begin+1:begin+2]) in Python 3
header_length = int(block[begin+1:begin+2])
except ValueError:
header_length = 0
-
offset = begin + 2 + header_length
if header_length > 0:
@@ -297,6 +347,35 @@ def parse_ieee_block_header(block):
return offset, data_length
+def parse_hp_block_header(block, is_big_endian):
+ """Parse the header of a HP block.
+
+ Definite Length Arbitrary Block:
+ #A<data_length><data>
+
+ The header ia always 4 bytes long.
+ The data_length field specifies the size of the data.
+
+ :param block: HP block.
+ :type block: bytes | bytearray
+ :param is_big_endian: boolean indicating endianess.
+ :return: (offset, data_length)
+ :rtype: (int, int)
+
+ """
+ begin = block.find(b'#A')
+ if begin < 0:
+ raise ValueError("Could not find the standard block header (#A) "
+ "indicating the start of the block.")
+ offset = begin + 4
+
+ data_length = int_from_bytes(block[begin+2:offset],
+ byteorder='big' if is_big_endian else 'little'
+ )
+
+ return offset, data_length
+
+
def from_ieee_block(block, datatype='f', is_big_endian=False, container=list):
"""Convert a block in the IEEE format into an iterable of numbers.
@@ -309,38 +388,68 @@ def from_ieee_block(block, datatype='f', is_big_endian=False, container=list):
Indefinite Length Arbitrary Block:
#0<data>
- :param block: IEEE block.
- :type block: bytes
+ :param block: HP block.
+ :type block: bytes | bytearray
:param datatype: the format string for a single element. See struct module.
:param is_big_endian: boolean indicating endianess.
:param container: container type to use for the output data.
:return: items
:rtype: type(container)
"""
-
offset, data_length = parse_ieee_block_header(block)
if len(block) < offset + data_length:
- raise ValueError("Binary data is incomplete. The header states %d data bytes, "
- "but %d where received." % (data_length, len(block) - offset))
+ raise ValueError("Binary data is incomplete. The header states %d data"
+ " bytes, but %d where received." %
+ (data_length, len(block) - offset))
+
+ return from_binary_block(block, offset, data_length, datatype,
+ is_big_endian, container)
- return from_binary_block(block, offset, data_length, datatype, is_big_endian, container)
+def from_hp_block(block, datatype='f', is_big_endian=False, container=list):
+ """Convert a block in the HP format into an iterable of numbers.
-def from_binary_block(block, offset=0, data_length=None, datatype='f', is_big_endian=False, container=list):
+ Definite Length Arbitrary Block:
+ #A<data_length><data>
+
+ The header ia always 4 bytes long.
+ The data_length field specifies the size of the data.
+
+ :param block: IEEE block.
+ :type block: bytes | bytearray
+ :param datatype: the format string for a single element. See struct module.
+ :param is_big_endian: boolean indicating endianess.
+ :param container: container type to use for the output data.
+ :return: items
+ :rtype: type(container)
+ """
+ offset, data_length = parse_hp_block_header(block, is_big_endian)
+
+ if len(block) < offset + data_length:
+ raise ValueError("Binary data is incomplete. The header states %d data"
+ " bytes, but %d where received." %
+ (data_length, len(block) - offset))
+
+ return from_binary_block(block, offset, data_length, datatype,
+ is_big_endian, container)
+
+
+def from_binary_block(block, offset=0, data_length=None, datatype='f',
+ is_big_endian=False, container=list):
"""Convert a binary block into an iterable of numbers.
:param block: binary block.
- :type block: bytes
+ :type block: bytes | bytearray
:param offset: offset at which the data block starts (default=0)
- :param data_length: size in bytes of the data block (default=len(block) - offset)
+ :param data_length: size in bytes of the data block
+ (default=len(block) - offset)
:param datatype: the format string for a single element. See struct module.
:param is_big_endian: boolean indicating endianess.
:param container: container type to use for the output data.
:return: items
:rtype: type(container)
"""
-
if data_length is None:
data_length = len(block) - offset
@@ -349,6 +458,9 @@ def from_binary_block(block, offset=0, data_length=None, datatype='f', is_big_en
endianess = '>' if is_big_endian else '<'
+ if _use_numpy_routines(container):
+ return np.frombuffer(block, endianess+datatype, array_length, offset)
+
fullfmt = '%s%d%s' % (endianess, array_length, datatype)
try:
@@ -357,6 +469,27 @@ def from_binary_block(block, offset=0, data_length=None, datatype='f', is_big_en
raise ValueError("Binary data was malformed")
+def to_binary_block(iterable, header, datatype, is_big_endian):
+ """Convert an iterable of numbers into a block of data with a given header.
+
+ :param iterable: an iterable of numbers.
+ :param header: the header which should prefix the binary block
+ :param datatype: the format string for a single element. See struct module.
+ :param is_big_endian: boolean indicating endianess.
+ :return: IEEE block.
+ :rtype: bytes
+ """
+ array_length = len(iterable)
+
+ endianess = '>' if is_big_endian else '<'
+ fullfmt = '%s%d%s' % (endianess, array_length, datatype)
+
+ if isinstance(header, string_types):
+ header = bytes(header, 'ascii') if PYTHON3 else str(header)
+
+ return header + struct.pack(fullfmt, *iterable)
+
+
def to_ieee_block(iterable, datatype='f', is_big_endian=False):
"""Convert an iterable of numbers into a block of data in the IEEE format.
@@ -366,21 +499,34 @@ def to_ieee_block(iterable, datatype='f', is_big_endian=False):
:return: IEEE block.
:rtype: bytes
"""
-
array_length = len(iterable)
element_length = struct.calcsize(datatype)
data_length = array_length * element_length
header = '%d' % data_length
- header = '#%d%s'%(len(header),header)
+ header = '#%d%s' % (len(header), header)
- endianess = '>' if is_big_endian else '<'
- fullfmt = '%s%d%s' % (endianess, array_length, datatype)
+ return to_binary_block(iterable, header, datatype, is_big_endian)
- if sys.version >= '3':
- return bytes(header, 'ascii') + struct.pack(fullfmt, *iterable)
- else:
- return str(header) + struct.pack(fullfmt, *iterable)
+
+def to_hp_block(iterable, datatype='f', is_big_endian=False):
+ """Convert an iterable of numbers into a block of data in the HP format.
+
+ :param iterable: an iterable of numbers.
+ :param datatype: the format string for a single element. See struct module.
+ :param is_big_endian: boolean indicating endianess.
+ :return: IEEE block.
+ :rtype: bytes
+ """
+
+ array_length = len(iterable)
+ element_length = struct.calcsize(datatype)
+ data_length = array_length * element_length
+
+ header = b'#A' + (int_to_bytes(data_length, 2,
+ 'big' if is_big_endian else 'little'))
+
+ return to_binary_block(iterable, header, datatype, is_big_endian)
def get_system_details(backends=True):
@@ -530,7 +676,7 @@ machine_types = {
0x0200: 'IA64',
0x0266: 'MIPS16',
0x0284: 'ALPHA64',
- #0x0284: 'AXP64', # same
+ # 0x0284: 'AXP64', # same
0x0366: 'MIPSFPU',
0x0466: 'MIPSFPU16',
0x0520: 'TRICORE',
@@ -573,7 +719,7 @@ def get_arch(filename):
return 64,
else:
return ()
- elif not this_platform in ('linux2', 'linux3', 'linux', 'darwin'):
+ elif this_platform not in ('linux2', 'linux3', 'linux', 'darwin'):
raise OSError('')
out = check_output(["file", filename], stderr=subprocess.STDOUT)
| 'Can't concat bytes to str' error when calling write_ascii_values
It's very strange, as message.encode(enco) is bytes and util.to_ascii_block() returns string, so line 239 of messagebased.py `message = message.encode(enco) + block` fails with an error specified in the title.
I use "d" for converter and "," for separator
Why is this happening, wasn't it tested at all or am I doing something wrong?
I could fix it trivially by calling .encode on the block variable. | pyvisa/pyvisa | diff --git a/pyvisa/testsuite/__init__.py b/pyvisa/testsuite/__init__.py
index e46a262..acd8991 100644
--- a/pyvisa/testsuite/__init__.py
+++ b/pyvisa/testsuite/__init__.py
@@ -1,15 +1,18 @@
# -*- coding: utf-8 -*-
-from __future__ import division, unicode_literals, print_function, absolute_import
+from __future__ import (division, unicode_literals, print_function,
+ absolute_import)
import os
import logging
+import warnings
+import unittest
from contextlib import contextmanager
from logging.handlers import BufferingHandler
from pyvisa import logger
-from pyvisa.compat import unittest
+from pyvisa.compat import PYTHON3
class TestHandler(BufferingHandler):
@@ -62,6 +65,21 @@ class BaseTestCase(unittest.TestCase):
msg = '\n'.join(record.get('msg', str(record)) for record in buf)
self.assertEqual(l, 0, msg='%d warnings raised.\n%s' % (l, msg))
+ if not PYTHON3:
+ @contextmanager
+ def assertWarns(self, category):
+ """Backport for Python 2
+
+ """
+ with warnings.catch_warnings(record=True) as w:
+ # Cause all warnings to always be triggered.
+ warnings.simplefilter("always")
+ # Trigger a warning.
+ yield
+ # Verify some things
+ assert len(w) == 1, 'No warning raised'
+ assert issubclass(w[-1].category, category)
+
def testsuite():
"""A testsuite that has all the pyvisa tests.
@@ -85,4 +103,3 @@ def run():
"""
test_runner = unittest.TextTestRunner()
return test_runner.run(testsuite())
-
diff --git a/pyvisa/testsuite/test_rname.py b/pyvisa/testsuite/test_rname.py
index f6c694b..55193ba 100644
--- a/pyvisa/testsuite/test_rname.py
+++ b/pyvisa/testsuite/test_rname.py
@@ -1,8 +1,9 @@
# -*- coding: utf-8 -*-
-from __future__ import division, unicode_literals, print_function, absolute_import
+from __future__ import (division, unicode_literals, print_function,
+ absolute_import)
-from pyvisa.compat import unittest
+import unittest
from pyvisa.testsuite import BaseTestCase
from pyvisa import rname
diff --git a/pyvisa/testsuite/test_util.py b/pyvisa/testsuite/test_util.py
index 85ff8c7..00501eb 100644
--- a/pyvisa/testsuite/test_util.py
+++ b/pyvisa/testsuite/test_util.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
-from __future__ import division, unicode_literals, print_function, absolute_import
+from __future__ import (division, unicode_literals, print_function,
+ absolute_import)
from pyvisa.testsuite import BaseTestCase
@@ -12,49 +13,102 @@ try:
except ImportError:
np = None
+
class TestParser(BaseTestCase):
def test_parse_binary(self):
- s = b'#A@\xe2\x8b<@\xe2\x8b<@\xe2\x8b<@\xe2\x8b<@\xde\x8b<@\xde\x8b<@\xde\x8b<' \
- b'@\xde\x8b<@\xe0\x8b<@\xe0\x8b<@\xdc\x8b<@\xde\x8b<@\xe2\x8b<@\xe0\x8b<'
+ s = (b'#A@\xe2\x8b<@\xe2\x8b<@\xe2\x8b<@\xe2\x8b<@\xde\x8b<@\xde\x8b<@'
+ b'\xde\x8b<@\xde\x8b<@\xe0\x8b<@\xe0\x8b<@\xdc\x8b<@\xde\x8b<@'
+ b'\xe2\x8b<@\xe0\x8b<')
e = [0.01707566, 0.01707566, 0.01707566, 0.01707566, 0.01707375,
0.01707375, 0.01707375, 0.01707375, 0.01707470, 0.01707470,
0.01707280, 0.01707375, 0.01707566, 0.01707470]
- p = util.parse_binary(s, is_big_endian=False, is_single=True)
+ with self.assertWarns(FutureWarning):
+ p = util.parse_binary(s, is_big_endian=False, is_single=True)
for a, b in zip(p, e):
self.assertAlmostEqual(a, b)
+
+ # Test handling indefinite length block
p = util.from_ieee_block(s, datatype='f', is_big_endian=False)
for a, b in zip(p, e):
self.assertAlmostEqual(a, b)
- def test_ieee_integer(self):
+ # Test handling definite length block
+ p = util.from_ieee_block(b'#214' + s[2:], datatype='f',
+ is_big_endian=False)
+ for a, b in zip(p, e):
+ self.assertAlmostEqual(a, b)
+
+ p = util.from_hp_block(b'#A\x0e\x00' + s[2:], datatype='f',
+ is_big_endian=False)
+ for a, b in zip(p, e):
+ self.assertAlmostEqual(a, b)
+
+ def test_integer_ascii_block(self):
values = list(range(99))
- containers = (list, tuple) #+ ((np.asarray,) if np else ())
- for fmt in 'bBhHiIfd':
- for endi in (True, False):
- for cont in containers:
- conv = cont(values)
- msg = 'fmt=%s, endianness=%s, container=%s' % (fmt, endi, cont.__name__)
- try:
- block = util.to_ieee_block(conv, fmt, endi)
- parsed = util.from_ieee_block(block, fmt, endi, cont)
- except Exception as e:
- raise Exception(msg + '\n' + repr(e))
-
- self.assertEqual(conv, parsed, msg)
-
- def test_ieee_noninteger(self):
+ for fmt in 'd':
+ msg = 'block=%s, fmt=%s'
+ msg = msg % ('ascii', fmt)
+ tb = lambda values: util.to_ascii_block(values, fmt, ',')
+ fb = lambda block, cont: util.from_ascii_block(block, fmt, ',',
+ cont)
+ self.round_trip_block_converstion(values, tb, fb, msg)
+
+ def test_non_integer_ascii_block(self):
values = [val + 0.5 for val in range(99)]
- containers = (list, tuple) #+ ((np.asarray,) if np else ())
- for fmt in 'fd':
- for endi in (True, False):
- for cont in containers:
- conv = cont(values)
- msg = 'fmt=%s, endianness=%s, container=%s' % (fmt, endi, cont.__name__)
- try:
- block = util.to_ieee_block(conv, fmt, endi)
- parsed = util.from_ieee_block(block, fmt, endi, cont)
- except Exception as e:
- raise Exception(msg + '\n' + repr(e))
-
- self.assertEqual(conv, parsed, msg)
+ values = list(range(99))
+ for fmt in 'fFeEgG':
+ msg = 'block=%s, fmt=%s'
+ msg = msg % ('ascii', fmt)
+ tb = lambda values: util.to_ascii_block(values, fmt, ',')
+ fb = lambda block, cont: util.from_ascii_block(block, fmt, ',',
+ cont)
+ self.round_trip_block_converstion(values, tb, fb, msg)
+
+ def test_integer_binary_block(self):
+ values = list(range(99))
+ for block, tb, fb in zip(('ieee', 'hp'),
+ (util.to_ieee_block, util.to_hp_block),
+ (util.from_ieee_block, util.from_hp_block)):
+ for fmt in 'bBhHiIfd':
+ for endi in (True, False):
+ msg = 'block=%s, fmt=%s, endianness=%s'
+ msg = msg % (block, fmt, endi)
+ tblock = lambda values: tb(values, fmt, endi)
+ fblock = lambda block, cont: fb(block, fmt, endi, cont)
+ self.round_trip_block_converstion(values, tblock, fblock,
+ msg)
+
+ def test_noninteger_binary_block(self):
+ values = [val + 0.5 for val in range(99)]
+ for block, tb, fb in zip(('ieee', 'hp'),
+ (util.to_ieee_block, util.to_hp_block),
+ (util.from_ieee_block, util.from_hp_block)):
+ for fmt in 'fd':
+ for endi in (True, False):
+ msg = 'block=%s, fmt=%s, endianness=%s'
+ msg = msg % (block, fmt, endi)
+ tblock = lambda values: bytearray(tb(values, fmt, endi))
+ fblock = lambda block, cont: fb(block, fmt, endi, cont)
+ self.round_trip_block_converstion(values, tblock, fblock,
+ msg)
+
+ def round_trip_block_converstion(self, values, to_block, from_block, msg):
+ """Test that block conversion round trip as expected.
+
+ """
+ containers = (list, tuple) + ((np.array,) if np else ())
+ for cont in containers:
+ conv = cont(values)
+ msg += ', container=%s'
+ msg = msg % cont.__name__
+ try:
+ block = to_block(conv)
+ parsed = from_block(block, cont)
+ except Exception as e:
+ raise Exception(msg + '\n' + repr(e))
+
+ if np and cont in (np.array,):
+ np.testing.assert_array_equal(conv, parsed, msg)
+ else:
+ self.assertEqual(conv, parsed, msg)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 8
} | 1.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
-e git+https://github.com/pyvisa/pyvisa.git@52fca2a660c706564bd59ef36f22c616b1bd42c1#egg=PyVISA
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: pyvisa
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/pyvisa
| [
"pyvisa/testsuite/test_util.py::TestParser::test_integer_binary_block",
"pyvisa/testsuite/test_util.py::TestParser::test_noninteger_binary_block",
"pyvisa/testsuite/test_util.py::TestParser::test_parse_binary"
]
| []
| [
"pyvisa/testsuite/__init__.py::testsuite",
"pyvisa/testsuite/test_rname.py::TestParsers::test_asrl",
"pyvisa/testsuite/test_rname.py::TestParsers::test_gpib_instr",
"pyvisa/testsuite/test_rname.py::TestParsers::test_gpib_intf",
"pyvisa/testsuite/test_rname.py::TestParsers::test_tcpip_intr",
"pyvisa/testsuite/test_rname.py::TestParsers::test_tcpip_socket",
"pyvisa/testsuite/test_rname.py::TestParsers::test_usb_instr",
"pyvisa/testsuite/test_rname.py::TestParsers::test_usb_raw",
"pyvisa/testsuite/test_rname.py::TestFilters::test_filter",
"pyvisa/testsuite/test_util.py::TestParser::test_integer_ascii_block",
"pyvisa/testsuite/test_util.py::TestParser::test_non_integer_ascii_block"
]
| []
| MIT License | 1,947 | [
"docs/conf.py",
"docs/rvalues.rst",
"pyvisa/util.py",
"pyvisa/compat/struct.py",
".travis.yml",
"pyvisa/compat/__init__.py",
"pyvisa/constants.py",
"pyvisa/resources/messagebased.py"
]
| [
"docs/conf.py",
"docs/rvalues.rst",
"pyvisa/util.py",
"pyvisa/compat/struct.py",
".travis.yml",
"pyvisa/compat/__init__.py",
"pyvisa/constants.py",
"pyvisa/resources/messagebased.py"
]
|
springload__draftjs_exporter-85 | 7acd6218f1a8460efd67965bb227dca16cf65bf0 | 2017-12-10 01:14:40 | 7acd6218f1a8460efd67965bb227dca16cf65bf0 | loicteixeira: Dependency free package 🎉 Well done 👌 | diff --git a/README.rst b/README.rst
index 94898d6..20b58a9 100644
--- a/README.rst
+++ b/README.rst
@@ -248,33 +248,22 @@ See ``examples.py`` in the repository for more details.
Alternative backing engines
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-By default the exporter uses ``html5lib`` via BeautifulSoup to build the DOM tree. There are two alternative backing engines: ``string`` and ``lxml``.
+By default, the exporter uses a dependency-free engine called ``string`` to build the DOM tree. There are two alternative backing engines: ``html5lib`` (via BeautifulSoup) and ``lxml``.
-The ``string`` engine is the fastest, and does not have any dependencies. Its only drawback is that the ``parse_html`` method does not escape/sanitise HTML like that of other engines.
+The ``string`` engine is the fastest, and does not have any dependencies. Its only drawback is that the ``parse_html`` method does not escape/sanitise HTML like that of other engines. It is also more recent, so hasn't been as battle-tested as the other ones.
-To use it, add the following to the exporter config:
+* For ``html5lib``, do ``pip install draftjs_exporter[html5lib]``.
+* For ``lxml``, do ``pip install draftjs_exporter[lxml]``. It also requires ``libxml2`` and ``libxslt`` to be available on your system.
-.. code:: python
-
- config = {
- # Specify which DOM backing engine to use.
- 'engine': 'string',
- }
-
-``lxml`` is also supported. It requires ``libxml2`` and ``libxslt`` to be available on your system.
-
-.. code:: sh
-
- # Use the `lxml` extra to install the exporter and its lxml dependencies:
- pip install draftjs_exporter[lxml]
-
-Add the following to the exporter config:
+Then, use the ``engine`` attribute of the exporter config:
.. code:: python
config = {
# Specify which DOM backing engine to use.
- 'engine': 'lxml',
+ 'engine': DOM.HTML5LIB,
+ # Or for lxml:
+ 'engine': DOM.LXML,
}
Custom backing engines
@@ -307,7 +296,10 @@ Here is an example implementation:
return elt
- exporter = HTML({'engine': DOMListTree})
+ exporter = HTML({
+ # Use the dotted module syntax to point to the DOMEngine implementation.
+ 'engine': 'myproject.example.DOMListTree'
+ })
Development
-----------
diff --git a/draftjs_exporter/html.py b/draftjs_exporter/html.py
index 2eb5264..9c4ce2b 100644
--- a/draftjs_exporter/html.py
+++ b/draftjs_exporter/html.py
@@ -25,7 +25,7 @@ class HTML:
self.block_map = config.get('block_map', BLOCK_MAP)
self.style_map = config.get('style_map', STYLE_MAP)
- DOM.use(config.get('engine', DOM.HTML5LIB))
+ DOM.use(config.get('engine', DOM.STRING))
def render(self, content_state=None):
"""
diff --git a/setup.py b/setup.py
index c6438ed..4f0cb3b 100755
--- a/setup.py
+++ b/setup.py
@@ -11,8 +11,9 @@ try:
except ImportError:
from distutils.core import setup
+dependencies = []
-dependencies = [
+html5lib_dependencies = [
'beautifulsoup4>=4.4.1,<5',
'html5lib>=0.999,<=1.0b10',
]
@@ -34,7 +35,7 @@ testing_dependencies = [
'coverage>=4.1.0',
'flake8>=3.2.0',
'isort==4.2.5',
-] + lxml_dependencies
+] + html5lib_dependencies + lxml_dependencies
documentation_dependencies = [
@@ -78,5 +79,6 @@ setup(
'testing': testing_dependencies,
'docs': documentation_dependencies,
'lxml': lxml_dependencies,
+ 'html5lib': html5lib_dependencies,
},
zip_safe=False)
| Change default engine to the new dependency-free one introduced in #77
The exporter now has an engine that doesn't have any dependencies. It should probably be the one activated by default, to make the whole package dependency-free unless another engine is configured. It also happens to be faster, and less memory-hungry.
This is a breaking change though, no matter how little difference there is with the output of the current default (html5lib + BeautifulSoup), so should be part of the 2.0.0 release.
As part of this change, it will also be necessary to move the html5lib / BS4 dependencies to a separate extra like for lxml (`pip install draftjs_exporter[html5lib]`), as well as update the documentation. | springload/draftjs_exporter | diff --git a/tests/test_composite_decorators.py b/tests/test_composite_decorators.py
index 721f4dd..aeb029c 100644
--- a/tests/test_composite_decorators.py
+++ b/tests/test_composite_decorators.py
@@ -55,7 +55,7 @@ class TestHashtag(unittest.TestCase):
self.assertEqual(DOM.render(DOM.create_element(HASHTAG_DECORATOR['component'], {'block': {'type': BLOCK_TYPES.UNSTYLED}}, '#hashtagtest')), '<span class="hashtag">#hashtagtest</span>')
def test_render_code_block(self):
- self.assertEqual(DOM.render(DOM.create_element(HASHTAG_DECORATOR['component'], {'block': {'type': BLOCK_TYPES.CODE}}, '#hashtagtest')), '#hashtagtest')
+ self.assertEqual(DOM.create_element(HASHTAG_DECORATOR['component'], {'block': {'type': BLOCK_TYPES.CODE}}, '#hashtagtest'), '#hashtagtest')
class TestBR(unittest.TestCase):
@@ -68,7 +68,7 @@ class TestBR(unittest.TestCase):
class TestCompositeDecorators(unittest.TestCase):
def test_render_decorators_empty(self):
- self.assertEqual(DOM.render(render_decorators([], 'test https://www.example.com#hash #hashtagtest', {'type': BLOCK_TYPES.UNSTYLED, 'depth': 0})), 'test https://www.example.com#hash #hashtagtest')
+ self.assertEqual(render_decorators([], 'test https://www.example.com#hash #hashtagtest', {'type': BLOCK_TYPES.UNSTYLED, 'depth': 0}), 'test https://www.example.com#hash #hashtagtest')
def test_render_decorators_single(self):
self.assertEqual(DOM.render(render_decorators([LINKIFY_DECORATOR], 'test https://www.example.com#hash #hashtagtest', {'type': BLOCK_TYPES.UNSTYLED, 'depth': 0})), 'test <a href="https://www.example.com#hash">https://www.example.com#hash</a> #hashtagtest')
diff --git a/tests/test_dom.py b/tests/test_dom.py
index 48eed62..e5d5172 100644
--- a/tests/test_dom.py
+++ b/tests/test_dom.py
@@ -5,6 +5,7 @@ import unittest
from draftjs_exporter.dom import DOM
from draftjs_exporter.engines.html5lib import DOM_HTML5LIB
from draftjs_exporter.engines.lxml import DOM_LXML
+from draftjs_exporter.engines.string import DOMString
from tests.test_entities import icon
@@ -29,6 +30,10 @@ class TestDOM(unittest.TestCase):
DOM.use(DOM.HTML5LIB)
self.assertEqual(DOM.dom, DOM_HTML5LIB)
+ def test_use_string(self):
+ DOM.use(DOM.STRING)
+ self.assertEqual(DOM.dom, DOMString)
+
def test_use_invalid(self):
with self.assertRaises(ImportError):
DOM.use('test')
diff --git a/tests/test_html.py b/tests/test_html.py
index 86a2ceb..069196a 100644
--- a/tests/test_html.py
+++ b/tests/test_html.py
@@ -3,6 +3,8 @@ from __future__ import absolute_import, unicode_literals
import unittest
from draftjs_exporter.command import Command
+from draftjs_exporter.dom import DOM
+from draftjs_exporter.engines.string import DOMString
from draftjs_exporter.html import HTML
config = {
@@ -29,6 +31,10 @@ class TestHTML(unittest.TestCase):
def test_init(self):
self.assertIsInstance(self.exporter, HTML)
+ def test_init_dom_engine_default(self):
+ HTML()
+ self.assertEqual(DOM.dom, DOMString)
+
def test_render_block_exists(self):
self.assertTrue('render_block' in dir(self.exporter))
diff --git a/tests/test_output.py b/tests/test_output.py
index 28726d7..3922ee4 100644
--- a/tests/test_output.py
+++ b/tests/test_output.py
@@ -43,7 +43,7 @@ config = {
'props': {'style': {'textDecoration': 'underline'}},
},
},
- 'engine': DOM.HTML5LIB
+ 'engine': DOM.STRING,
}
diff --git a/tests/test_style_state.py b/tests/test_style_state.py
index 8101933..44b3120 100644
--- a/tests/test_style_state.py
+++ b/tests/test_style_state.py
@@ -32,7 +32,7 @@ style_map = {
class TestStyleState(unittest.TestCase):
def setUp(self):
- DOM.use(DOM.HTML5LIB)
+ DOM.use(DOM.STRING)
self.style_state = StyleState(style_map)
def test_init(self):
diff --git a/tests/test_wrapper_state.py b/tests/test_wrapper_state.py
index 60deb78..8d2cebe 100644
--- a/tests/test_wrapper_state.py
+++ b/tests/test_wrapper_state.py
@@ -9,7 +9,7 @@ from example import blockquote, list_item, ordered_list
class TestWrapperState(unittest.TestCase):
def setUp(self):
- DOM.use(DOM.HTML5LIB)
+ DOM.use(DOM.STRING)
self.wrapper_state = WrapperState({
'header-one': 'h1',
@@ -106,7 +106,7 @@ class TestWrapperState(unittest.TestCase):
class TestBlockquote(unittest.TestCase):
def setUp(self):
- DOM.use(DOM.HTML5LIB)
+ DOM.use(DOM.STRING)
def test_render_debug(self):
self.assertEqual(DOM.render_debug(DOM.create_element(blockquote, {
@@ -120,7 +120,7 @@ class TestBlockquote(unittest.TestCase):
class TestListItem(unittest.TestCase):
def setUp(self):
- DOM.use(DOM.HTML5LIB)
+ DOM.use(DOM.STRING)
def test_render_debug(self):
self.assertEqual(DOM.render_debug(DOM.create_element(list_item, {
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 3
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[testing,docs]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | beautifulsoup4==4.13.3
cachetools==5.5.2
chardet==5.2.0
colorama==0.4.6
coverage==7.8.0
distlib==0.3.9
-e git+https://github.com/springload/draftjs_exporter.git@7acd6218f1a8460efd67965bb227dca16cf65bf0#egg=draftjs_exporter
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
flake8==7.2.0
html5lib==1.0b10
iniconfig==2.1.0
isort==4.2.5
lxml==5.3.1
markov-draftjs==0.1.1
mccabe==0.7.0
memory_profiler==0.47
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
psutil==5.4.1
pycodestyle==2.13.0
pyflakes==3.3.1
pyproject-api==1.9.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
six==1.17.0
soupsieve==2.6
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
virtualenv==20.29.3
webencodings==0.5.1
| name: draftjs_exporter
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- beautifulsoup4==4.13.3
- cachetools==5.5.2
- chardet==5.2.0
- colorama==0.4.6
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- flake8==7.2.0
- html5lib==1.0b10
- iniconfig==2.1.0
- isort==4.2.5
- lxml==5.3.1
- markov-draftjs==0.1.1
- mccabe==0.7.0
- memory-profiler==0.47
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- psutil==5.4.1
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- six==1.17.0
- soupsieve==2.6
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- virtualenv==20.29.3
- webencodings==0.5.1
prefix: /opt/conda/envs/draftjs_exporter
| [
"tests/test_html.py::TestHTML::test_init_dom_engine_default"
]
| []
| [
"tests/test_composite_decorators.py::TestLinkify::test_render",
"tests/test_composite_decorators.py::TestLinkify::test_render_code_block",
"tests/test_composite_decorators.py::TestLinkify::test_render_www",
"tests/test_composite_decorators.py::TestHashtag::test_render",
"tests/test_composite_decorators.py::TestHashtag::test_render_code_block",
"tests/test_composite_decorators.py::TestBR::test_render",
"tests/test_composite_decorators.py::TestBR::test_render_code_block",
"tests/test_composite_decorators.py::TestCompositeDecorators::test_render_decorators_conflicting_order_one",
"tests/test_composite_decorators.py::TestCompositeDecorators::test_render_decorators_conflicting_order_two",
"tests/test_composite_decorators.py::TestCompositeDecorators::test_render_decorators_empty",
"tests/test_composite_decorators.py::TestCompositeDecorators::test_render_decorators_single",
"tests/test_dom.py::TestDOM::test_append_child",
"tests/test_dom.py::TestDOM::test_camel_to_dash",
"tests/test_dom.py::TestDOM::test_create_element",
"tests/test_dom.py::TestDOM::test_create_element_empty",
"tests/test_dom.py::TestDOM::test_create_element_entity",
"tests/test_dom.py::TestDOM::test_create_element_nested",
"tests/test_dom.py::TestDOM::test_create_element_none",
"tests/test_dom.py::TestDOM::test_create_element_style_dict",
"tests/test_dom.py::TestDOM::test_create_element_style_str",
"tests/test_dom.py::TestDOM::test_parse_html",
"tests/test_dom.py::TestDOM::test_render_debug",
"tests/test_dom.py::TestDOM::test_use_custom",
"tests/test_dom.py::TestDOM::test_use_html5lib",
"tests/test_dom.py::TestDOM::test_use_invalid",
"tests/test_dom.py::TestDOM::test_use_lxml",
"tests/test_dom.py::TestDOM::test_use_string",
"tests/test_html.py::TestHTML::test_build_command_groups_empty",
"tests/test_html.py::TestHTML::test_build_command_groups_multiple",
"tests/test_html.py::TestHTML::test_build_commands_empty",
"tests/test_html.py::TestHTML::test_build_commands_multiple",
"tests/test_html.py::TestHTML::test_build_entity_commands_empty",
"tests/test_html.py::TestHTML::test_build_entity_commands_multiple",
"tests/test_html.py::TestHTML::test_build_entity_commands_single",
"tests/test_html.py::TestHTML::test_build_style_commands_empty",
"tests/test_html.py::TestHTML::test_build_style_commands_multiple",
"tests/test_html.py::TestHTML::test_build_style_commands_single",
"tests/test_html.py::TestHTML::test_init",
"tests/test_html.py::TestHTML::test_render",
"tests/test_html.py::TestHTML::test_render_block_exists",
"tests/test_html.py::TestHTML::test_render_empty",
"tests/test_html.py::TestHTML::test_render_none",
"tests/test_html.py::TestHTML::test_render_twice",
"tests/test_output.py::TestOutput::test_render_empty",
"tests/test_output.py::TestOutput::test_render_with_backtracking_nested_wrapping",
"tests/test_output.py::TestOutput::test_render_with_big_content",
"tests/test_output.py::TestOutput::test_render_with_boolean_attribute_false",
"tests/test_output.py::TestOutput::test_render_with_boolean_attribute_true",
"tests/test_output.py::TestOutput::test_render_with_default_block_map",
"tests/test_output.py::TestOutput::test_render_with_default_config",
"tests/test_output.py::TestOutput::test_render_with_default_style_map",
"tests/test_output.py::TestOutput::test_render_with_different_blocks",
"tests/test_output.py::TestOutput::test_render_with_element_options",
"tests/test_output.py::TestOutput::test_render_with_entities",
"tests/test_output.py::TestOutput::test_render_with_entities_crossing_raises",
"tests/test_output.py::TestOutput::test_render_with_entity",
"tests/test_output.py::TestOutput::test_render_with_entity_and_decorators",
"tests/test_output.py::TestOutput::test_render_with_immediate_jumping",
"tests/test_output.py::TestOutput::test_render_with_inline_styles",
"tests/test_output.py::TestOutput::test_render_with_jumping_wrapping",
"tests/test_output.py::TestOutput::test_render_with_line_breaks",
"tests/test_output.py::TestOutput::test_render_with_many_line_breaks",
"tests/test_output.py::TestOutput::test_render_with_multiple_decorators",
"tests/test_output.py::TestOutput::test_render_with_multiple_inline_styles",
"tests/test_output.py::TestOutput::test_render_with_no_zero_depth",
"tests/test_output.py::TestOutput::test_render_with_none_attribute",
"tests/test_output.py::TestOutput::test_render_with_none_component",
"tests/test_output.py::TestOutput::test_render_with_none_return_value",
"tests/test_output.py::TestOutput::test_render_with_number_attribute",
"tests/test_output.py::TestOutput::test_render_with_styles_in_entities",
"tests/test_output.py::TestOutput::test_render_with_unicode",
"tests/test_output.py::TestOutput::test_render_with_unidirectional_nested_wrapping",
"tests/test_output.py::TestOutput::test_render_with_unknown_attribute",
"tests/test_output.py::TestOutput::test_render_with_wrapping",
"tests/test_output.py::TestOutput::test_render_with_wrapping_reset",
"tests/test_output.py::TestOutput::test_render_with_wrapping_reset_block_components",
"tests/test_style_state.py::TestStyleState::test_apply_start_inline_style",
"tests/test_style_state.py::TestStyleState::test_apply_stop_inline_style",
"tests/test_style_state.py::TestStyleState::test_init",
"tests/test_style_state.py::TestStyleState::test_is_empty_default",
"tests/test_style_state.py::TestStyleState::test_is_empty_styled",
"tests/test_style_state.py::TestStyleState::test_render_styles_attributes",
"tests/test_style_state.py::TestStyleState::test_render_styles_component",
"tests/test_style_state.py::TestStyleState::test_render_styles_component_multiple",
"tests/test_style_state.py::TestStyleState::test_render_styles_component_multiple_invert",
"tests/test_style_state.py::TestStyleState::test_render_styles_styled",
"tests/test_style_state.py::TestStyleState::test_render_styles_styled_multiple",
"tests/test_style_state.py::TestStyleState::test_render_styles_unicode",
"tests/test_style_state.py::TestStyleState::test_render_styles_unstyled",
"tests/test_wrapper_state.py::TestWrapperState::test_element_for_component",
"tests/test_wrapper_state.py::TestWrapperState::test_element_for_component_wrapper",
"tests/test_wrapper_state.py::TestWrapperState::test_element_for_dismiss_content",
"tests/test_wrapper_state.py::TestWrapperState::test_element_for_element_content",
"tests/test_wrapper_state.py::TestWrapperState::test_element_for_no_block",
"tests/test_wrapper_state.py::TestWrapperState::test_element_for_simple_content",
"tests/test_wrapper_state.py::TestWrapperState::test_init",
"tests/test_wrapper_state.py::TestWrapperState::test_str",
"tests/test_wrapper_state.py::TestWrapperState::test_str_elts",
"tests/test_wrapper_state.py::TestBlockquote::test_render_debug",
"tests/test_wrapper_state.py::TestListItem::test_render_debug"
]
| []
| MIT License | 1,948 | [
"README.rst",
"draftjs_exporter/html.py",
"setup.py"
]
| [
"README.rst",
"draftjs_exporter/html.py",
"setup.py"
]
|
google__yapf-485 | c67685c0f4bf04dc2d34f8d615fa256181913788 | 2017-12-10 04:55:36 | c67685c0f4bf04dc2d34f8d615fa256181913788 | diff --git a/yapf/yapflib/reformatter.py b/yapf/yapflib/reformatter.py
index 7ed7a62..299fef8 100644
--- a/yapf/yapflib/reformatter.py
+++ b/yapf/yapflib/reformatter.py
@@ -439,7 +439,7 @@ def _IsClassOrDef(uwline):
if uwline.first.value in {'class', 'def'}:
return True
- return (t.name for t in uwline.tokens[:2]) == ('async', 'def')
+ return [t.value for t in uwline.tokens[:2]] == ['async', 'def']
def _CalculateNumberOfNewlines(first_token, indent_depth, prev_uwline,
| Regression in formatting nested 'async def' functions
My style file has `blank_line_before_nested_class_or_def=False`.
Yapf 0.19.0 formats like:
```python
async def foo():
async def bar():
pass
```
Yapf 0.20.0 changes this to:
```python
async def foo():
async def bar():
pass
```
According to `git bisect`, this was introduced by 58e36945be8978deb7ab3ad3681f5feccb0405fc, which seems like it must be a bug, since that commit wasn't intended to change python 3 formatting at all. | google/yapf | diff --git a/yapftests/reformatter_python3_test.py b/yapftests/reformatter_python3_test.py
index c6a3e99..6ed1bbb 100644
--- a/yapftests/reformatter_python3_test.py
+++ b/yapftests/reformatter_python3_test.py
@@ -209,6 +209,17 @@ class TestsForPython3Code(yapf_test_helper.YAPFTest):
uwlines = yapf_test_helper.ParseAndUnwrap(unformatted_code)
self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(uwlines))
+ def testAsyncFunctionsNested(self):
+ if sys.version_info[1] < 5:
+ return
+ code = textwrap.dedent("""\
+ async def outer():
+ async def inner():
+ pass
+ """)
+ uwlines = yapf_test_helper.ParseAndUnwrap(code)
+ self.assertCodeEqual(code, reformatter.Reformat(uwlines))
+
def testKeepTypesIntact(self):
if sys.version_info[1] < 5:
return
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_git_commit_hash"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.20 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
-e git+https://github.com/google/yapf.git@c67685c0f4bf04dc2d34f8d615fa256181913788#egg=yapf
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: yapf
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/yapf
| [
"yapftests/reformatter_python3_test.py::TestsForPython3Code::testAsyncFunctionsNested"
]
| []
| [
"yapftests/reformatter_python3_test.py::TestsForPython3Code::testAnnotations",
"yapftests/reformatter_python3_test.py::TestsForPython3Code::testAsyncFunctions",
"yapftests/reformatter_python3_test.py::TestsForPython3Code::testAsyncWithPrecedingComment",
"yapftests/reformatter_python3_test.py::TestsForPython3Code::testContinuationIndentWithAsync",
"yapftests/reformatter_python3_test.py::TestsForPython3Code::testExecAsNonKeyword",
"yapftests/reformatter_python3_test.py::TestsForPython3Code::testKeepTypesIntact",
"yapftests/reformatter_python3_test.py::TestsForPython3Code::testKeywordOnlyArgSpecifier",
"yapftests/reformatter_python3_test.py::TestsForPython3Code::testMatrixMultiplication",
"yapftests/reformatter_python3_test.py::TestsForPython3Code::testNoSpacesAroundPowerOparator",
"yapftests/reformatter_python3_test.py::TestsForPython3Code::testPEP448ParameterExpansion",
"yapftests/reformatter_python3_test.py::TestsForPython3Code::testSpacesAroundDefaultOrNamedAssign",
"yapftests/reformatter_python3_test.py::TestsForPython3Code::testSplittingArguments",
"yapftests/reformatter_python3_test.py::TestsForPython3Code::testTypeHint",
"yapftests/reformatter_python3_test.py::TestsForPython3Code::testTypedNames"
]
| []
| Apache License 2.0 | 1,949 | [
"yapf/yapflib/reformatter.py"
]
| [
"yapf/yapflib/reformatter.py"
]
|
|
colour-science__colour-367 | 3e8087c925f28dd07b6468751f603b34d14760a4 | 2017-12-10 09:17:12 | 3cd6ab8d4c3483bcdeb2d7ef33967160808c0bb2 | KelSolaar: This one should be quick to review, it implements support for the 2011 parameterisation of *hdr-CIELAB* and *hdr-IPT*.
coveralls:
[](https://coveralls.io/builds/14584447)
Coverage increased (+0.01%) to 97.926% when pulling **2d6f04d257a19b30973ee93d130f6295d3c32623 on feature/hdr_cielab_ipt_2011** into **158bd4441886e49c0431e4b444298ed0426ac6d5 on develop**.
coveralls:
[](https://coveralls.io/builds/14584447)
Coverage increased (+0.01%) to 97.926% when pulling **2d6f04d257a19b30973ee93d130f6295d3c32623 on feature/hdr_cielab_ipt_2011** into **158bd4441886e49c0431e4b444298ed0426ac6d5 on develop**.
codacy-bot:
 Here is an overview of what got changed by this pull request:
```diff
Complexity increasing per file
==============================
- colour/colorimetry/luminance.py 1
- colour/models/hdr_ipt.py 1
- colour/models/hdr_cie_lab.py 1
```
See the complete overview on [Codacy](https://www.codacy.com/app/colour-science/colour/pullRequest?prid=1190092&bid=6137186)
| diff --git a/colour/colorimetry/__init__.py b/colour/colorimetry/__init__.py
index edeadca44..c14274bca 100644
--- a/colour/colorimetry/__init__.py
+++ b/colour/colorimetry/__init__.py
@@ -21,11 +21,13 @@ from .lefs import (mesopic_luminous_efficiency_function,
from .lightness import LIGHTNESS_METHODS
from .lightness import lightness
from .lightness import (lightness_Glasser1958, lightness_Wyszecki1963,
- lightness_CIE1976, lightness_Fairchild2010)
+ lightness_CIE1976, lightness_Fairchild2010,
+ lightness_Fairchild2011)
from .luminance import LUMINANCE_METHODS
from .luminance import luminance
from .luminance import (luminance_Newhall1943, luminance_ASTMD153508,
- luminance_CIE1976, luminance_Fairchild2010)
+ luminance_CIE1976, luminance_Fairchild2010,
+ luminance_Fairchild2011)
from .dominant import (dominant_wavelength, complementary_wavelength,
excitation_purity, colorimetric_purity)
from .photometry import (luminous_flux, luminous_efficiency, luminous_efficacy)
@@ -74,13 +76,13 @@ __all__ += ['LIGHTNESS_METHODS']
__all__ += ['lightness']
__all__ += [
'lightness_Glasser1958', 'lightness_Wyszecki1963', 'lightness_CIE1976',
- 'lightness_Fairchild2010'
+ 'lightness_Fairchild2010', 'lightness_Fairchild2011'
]
__all__ += ['LUMINANCE_METHODS']
__all__ += ['luminance']
__all__ += [
'luminance_Newhall1943', 'luminance_ASTMD153508', 'luminance_CIE1976',
- 'luminance_Fairchild2010'
+ 'luminance_Fairchild2010', 'luminance_Fairchild2011'
]
__all__ += [
'dominant_wavelength', 'complementary_wavelength', 'excitation_purity',
diff --git a/colour/colorimetry/lightness.py b/colour/colorimetry/lightness.py
index bcbaca685..19f02635f 100644
--- a/colour/colorimetry/lightness.py
+++ b/colour/colorimetry/lightness.py
@@ -17,6 +17,8 @@ The following methods are available:
*luminance* :math:`Y` as per *CIE 1976* recommendation.
- :func:`lightness_Fairchild2010`: *Lightness* :math:`L_{hdr}` computation
of given *luminance* :math:`Y` using *Fairchild and Wyble (2010)* method.
+- :func:`lightness_Fairchild2011`: *Lightness* :math:`L_{hdr}` computation
+ of given *luminance* :math:`Y` using *Fairchild and Chen (2011)* method.
See Also
--------
@@ -48,7 +50,8 @@ __status__ = 'Production'
__all__ = [
'lightness_Glasser1958', 'lightness_Wyszecki1963', 'lightness_CIE1976',
- 'lightness_Fairchild2010', 'LIGHTNESS_METHODS', 'lightness'
+ 'lightness_Fairchild2010', 'lightness_Fairchild2011', 'LIGHTNESS_METHODS',
+ 'lightness'
]
@@ -183,7 +186,7 @@ def lightness_CIE1976(Y, Y_n=100):
return Lstar
-def lightness_Fairchild2010(Y, epsilon=2):
+def lightness_Fairchild2010(Y, epsilon=1.836):
"""
Computes *Lightness* :math:`L_{hdr}` of given *luminance* :math:`Y` using
*Fairchild and Wyble (2010)* method according to *Michealis-Menten*
@@ -218,14 +221,73 @@ def lightness_Fairchild2010(Y, epsilon=2):
Examples
--------
- >>> lightness_Fairchild2010(10.08 / 100, 1.836) # doctest: +ELLIPSIS
+ >>> lightness_Fairchild2010(10.08 / 100) # doctest: +ELLIPSIS
24.9022902...
"""
+ maximum_perception = 100
+
Y = np.asarray(Y)
- L_hdr = reaction_rate_MichealisMenten(Y ** epsilon, 100, 0.184 **
- epsilon) + 0.02
+ L_hdr = reaction_rate_MichealisMenten(Y ** epsilon, maximum_perception,
+ 0.184 ** epsilon) + 0.02
+
+ return L_hdr
+
+
+def lightness_Fairchild2011(Y, epsilon=0.710, method='hdr-CIELAB'):
+ """
+ Computes *Lightness* :math:`L_{hdr}` of given *luminance* :math:`Y` using
+ *Fairchild and Chen (2011)* method accordingly to *Michealis-Menten*
+ kinetics.
+
+ Parameters
+ ----------
+ Y : array_like
+ *luminance* :math:`Y`.
+ epsilon : numeric or array_like, optional
+ :math:`\epsilon` exponent.
+ method : unicode, optional
+ **{'hdr-CIELAB', 'hdr-IPT'}**,
+ *Lightness* :math:`L_{hdr}` computation method.
+
+ Returns
+ -------
+ array_like
+ *Lightness* :math:`L_{hdr}`.
+
+ Warning
+ -------
+ The input domain of that definition is non standard!
+
+ Notes
+ -----
+ - Input *luminance* :math:`Y` is in domain [0, :math:`\infty`].
+
+ References
+ ----------
+ .. [7] Fairchild, M. D., & Chen, P. (2011). Brightness, Lightness, and
+ Specifying Color in High-Dynamic-Range Scenes and Images.
+ doi:10.1117/12.872075
+
+ Examples
+ --------
+ >>> lightness_Fairchild2011(10.08 / 100) # doctest: +ELLIPSIS
+ 26.45950981...
+ >>> lightness_Fairchild2011(10.08 / 100, method='hdr-IPT')
+ ... # doctest: +ELLIPSIS
+ 26.3524672...
+ """
+
+ Y = np.asarray(Y)
+
+ if method.lower() == 'hdr-cielab':
+ maximum_perception = 247
+ else:
+ maximum_perception = 246
+
+ L_hdr = reaction_rate_MichealisMenten(Y ** epsilon, maximum_perception, 2
+ ** epsilon) + 0.02
return L_hdr
@@ -234,13 +296,15 @@ LIGHTNESS_METHODS = CaseInsensitiveMapping({
'Glasser 1958': lightness_Glasser1958,
'Wyszecki 1963': lightness_Wyszecki1963,
'CIE 1976': lightness_CIE1976,
- 'Fairchild 2010': lightness_Fairchild2010
+ 'Fairchild 2010': lightness_Fairchild2010,
+ 'Fairchild 2011': lightness_Fairchild2011
})
"""
Supported *Lightness* computations methods.
LIGHTNESS_METHODS : CaseInsensitiveMapping
- **{'Glasser 1958', 'Wyszecki 1963', 'CIE 1976', 'Fairchild 2010'}**
+ **{'Glasser 1958', 'Wyszecki 1963', 'CIE 1976', 'Fairchild 2010',
+ 'Fairchild 2011'}**
Aliases:
@@ -258,7 +322,8 @@ def lightness(Y, method='CIE 1976', **kwargs):
Y : numeric or array_like
*luminance* :math:`Y`.
method : unicode, optional
- **{'CIE 1976', 'Glasser 1958', 'Wyszecki 1963', 'Fairchild 2010'}**,
+ **{'CIE 1976', 'Glasser 1958', 'Wyszecki 1963', 'Fairchild 2010',
+ 'Fairchild 2011'}**,
Computation method.
Other Parameters
@@ -267,7 +332,7 @@ def lightness(Y, method='CIE 1976', **kwargs):
{:func:`lightness_CIE1976`},
White reference *luminance* :math:`Y_n`.
epsilon : numeric or array_like, optional
- {:func:`lightness_Fairchild2010`},
+ {:func:`lightness_Fairchild2010`, :func:`lightness_Fairchild2011`},
:math:`\epsilon` exponent.
Returns
diff --git a/colour/colorimetry/luminance.py b/colour/colorimetry/luminance.py
index 9af677107..8e60fe58e 100644
--- a/colour/colorimetry/luminance.py
+++ b/colour/colorimetry/luminance.py
@@ -17,6 +17,8 @@ The following methods are available:
*Lightness* :math:`L^*` as per *CIE 1976* recommendation.
- :func:`luminance_Fairchild2010`: *luminance* :math:`Y` computation of given
*Lightness* :math:`L_{hdr}` using *Fairchild and Wyble (2010)* method.
+- :func:`luminance_Fairchild2011`: *luminance* :math:`Y` computation of given
+ *Lightness* :math:`L_{hdr}` using *Fairchild and Chen (2011)* method.
See Also
--------
@@ -42,7 +44,8 @@ __status__ = 'Production'
__all__ = [
'luminance_Newhall1943', 'luminance_ASTMD153508', 'luminance_CIE1976',
- 'luminance_Fairchild2010', 'LUMINANCE_METHODS', 'luminance'
+ 'luminance_Fairchild2010', 'luminance_Fairchild2011', 'LUMINANCE_METHODS',
+ 'luminance'
]
@@ -175,7 +178,7 @@ def luminance_CIE1976(Lstar, Y_n=100):
return Y
-def luminance_Fairchild2010(L_hdr, epsilon=2):
+def luminance_Fairchild2010(L_hdr, epsilon=1.836):
"""
Computes *luminance* :math:`Y` of given *Lightness* :math:`L_{hdr}` using
*Fairchild and Wyble (2010)* method according to *Michealis-Menten*
@@ -225,11 +228,71 @@ def luminance_Fairchild2010(L_hdr, epsilon=2):
return Y
+def luminance_Fairchild2011(L_hdr, epsilon=0.710, method='hdr-CIELAB'):
+ """
+ Computes *luminance* :math:`Y` of given *Lightness* :math:`L_{hdr}` using
+ *Fairchild and Chen (2011)* method accordingly to *Michealis-Menten*
+ kinetics.
+
+ Parameters
+ ----------
+ L_hdr : array_like
+ *Lightness* :math:`L_{hdr}`.
+ epsilon : numeric or array_like, optional
+ :math:`\epsilon` exponent.
+ method : unicode, optional
+ **{'hdr-CIELAB', 'hdr-IPT'}**,
+ *Lightness* :math:`L_{hdr}` computation method.
+
+ Returns
+ -------
+ array_like
+ *luminance* :math:`Y`.
+
+ Warning
+ -------
+ The output range of that definition is non standard!
+
+ Notes
+ -----
+ - Output *luminance* :math:`Y` is in range [0, math:`\infty`].
+
+ References
+ ----------
+ .. [5] Fairchild, M. D., & Chen, P. (2011). Brightness, Lightness, and
+ Specifying Color in High-Dynamic-Range Scenes and Images.
+ doi:10.1117/12.872075
+
+ Examples
+ --------
+ >>> luminance_Fairchild2011(26.459509817572265) # doctest: +ELLIPSIS
+ 0.1007999...
+ >>> luminance_Fairchild2011(26.352467267703549, method='hdr-IPT')
+ ... # doctest: +ELLIPSIS
+ 0.1007999...
+ """
+
+ L_hdr = np.asarray(L_hdr)
+
+ if method.lower() == 'hdr-cielab':
+ maximum_perception = 247
+ else:
+ maximum_perception = 246
+
+ Y = np.exp(
+ np.log(
+ substrate_concentration_MichealisMenten(
+ L_hdr - 0.02, maximum_perception, 2 ** epsilon)) / epsilon)
+
+ return Y
+
+
LUMINANCE_METHODS = CaseInsensitiveMapping({
'Newhall 1943': luminance_Newhall1943,
'ASTM D1535-08': luminance_ASTMD153508,
'CIE 1976': luminance_CIE1976,
- 'Fairchild 2010': luminance_Fairchild2010
+ 'Fairchild 2010': luminance_Fairchild2010,
+ 'Fairchild 2011': luminance_Fairchild2011
})
"""
Supported *luminance* computations methods.
@@ -256,7 +319,8 @@ def luminance(LV, method='CIE 1976', **kwargs):
LV : numeric or array_like
*Lightness* :math:`L^*` or *Munsell* value :math:`V`.
method : unicode, optional
- **{'CIE 1976', 'Newhall 1943', 'ASTM D1535-08', 'Fairchild 2010'}**,
+ **{'CIE 1976', 'Newhall 1943', 'ASTM D1535-08', 'Fairchild 2010',
+ 'Fairchild 2011'}**,
Computation method.
Other Parameters
@@ -265,7 +329,7 @@ def luminance(LV, method='CIE 1976', **kwargs):
{:func:`luminance_CIE1976`},
White reference *luminance* :math:`Y_n`.
epsilon : numeric or array_like, optional
- {:func:`lightness_Fairchild2010`},
+ {:func:`lightness_Fairchild2010`, :func:`lightness_Fairchild2011`},
:math:`\epsilon` exponent.
Returns
diff --git a/colour/models/__init__.py b/colour/models/__init__.py
index 41d5ab8c8..4b0dab467 100644
--- a/colour/models/__init__.py
+++ b/colour/models/__init__.py
@@ -10,12 +10,13 @@ from .cie_luv import (XYZ_to_Luv, Luv_to_XYZ, Luv_to_uv, Luv_uv_to_xy,
Luv_to_LCHuv, LCHuv_to_Luv)
from .cie_ucs import XYZ_to_UCS, UCS_to_XYZ, UCS_to_uv, UCS_uv_to_xy
from .cie_uvw import XYZ_to_UVW
-from .hdr_cie_lab import XYZ_to_hdr_CIELab, hdr_CIELab_to_XYZ
+from .hdr_cie_lab import (HDR_CIELAB_METHODS, XYZ_to_hdr_CIELab,
+ hdr_CIELab_to_XYZ)
from .hunter_lab import (XYZ_to_K_ab_HunterLab1966, XYZ_to_Hunter_Lab,
Hunter_Lab_to_XYZ)
from .hunter_rdab import XYZ_to_Hunter_Rdab
from .ipt import XYZ_to_IPT, IPT_to_XYZ, IPT_hue_angle
-from .hdr_ipt import XYZ_to_hdr_IPT, hdr_IPT_to_XYZ
+from .hdr_ipt import HDR_IPT_METHODS, XYZ_to_hdr_IPT, hdr_IPT_to_XYZ
from .ucs_luo2006 import (JMh_CIECAM02_to_CAM02LCD, CAM02LCD_to_JMh_CIECAM02,
JMh_CIECAM02_to_CAM02SCD, CAM02SCD_to_JMh_CIECAM02,
JMh_CIECAM02_to_CAM02UCS, CAM02UCS_to_JMh_CIECAM02)
@@ -37,14 +38,14 @@ __all__ += [
]
__all__ += ['XYZ_to_UCS', 'UCS_to_XYZ', 'UCS_to_uv', 'UCS_uv_to_xy']
__all__ += ['XYZ_to_UVW']
-__all__ += ['XYZ_to_hdr_CIELab', 'hdr_CIELab_to_XYZ']
+__all__ += ['HDR_CIELAB_METHODS', 'XYZ_to_hdr_CIELab', 'hdr_CIELab_to_XYZ']
__all__ += [
'XYZ_to_K_ab_HunterLab1966', 'XYZ_to_Hunter_Lab', 'Hunter_Lab_to_XYZ',
'XYZ_to_Hunter_Rdab'
]
__all__ += ['XYZ_to_Hunter_Rdab']
__all__ += ['XYZ_to_IPT', 'IPT_to_XYZ', 'IPT_hue_angle']
-__all__ += ['XYZ_to_hdr_IPT', 'hdr_IPT_to_XYZ']
+__all__ += ['HDR_IPT_METHODS', 'XYZ_to_hdr_IPT', 'hdr_IPT_to_XYZ']
__all__ += [
'JMh_CIECAM02_to_CAM02LCD', 'CAM02LCD_to_JMh_CIECAM02',
'JMh_CIECAM02_to_CAM02SCD', 'CAM02SCD_to_JMh_CIECAM02',
diff --git a/colour/models/hdr_cie_lab.py b/colour/models/hdr_cie_lab.py
index a943b8fe5..6476afc3b 100644
--- a/colour/models/hdr_cie_lab.py
+++ b/colour/models/hdr_cie_lab.py
@@ -21,14 +21,18 @@ References
Simple Models for Describing the Color of High-Dynamic-Range and
Wide-Color-Gamut Images. In Proc. of Color and Imaging Conference
(pp. 322–326). ISBN:9781629932156
+.. [2] Fairchild, M. D., & Chen, P. (2011). Brightness, Lightness, and
+ Specifying Color in High-Dynamic-Range Scenes and Images.
+ doi:10.1117/12.872075
"""
from __future__ import division, unicode_literals
import numpy as np
-from colour.colorimetry import (ILLUMINANTS, lightness_Fairchild2010,
- luminance_Fairchild2010)
+from colour.colorimetry import (
+ ILLUMINANTS, lightness_Fairchild2010, lightness_Fairchild2011,
+ luminance_Fairchild2010, luminance_Fairchild2011)
from colour.models import xy_to_xyY, xyY_to_XYZ
from colour.utilities import tsplit, tstack
@@ -39,14 +43,77 @@ __maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
-__all__ = ['XYZ_to_hdr_CIELab', 'hdr_CIELab_to_XYZ', 'exponent_hdr_CIELab']
+__all__ = [
+ 'HDR_CIELAB_METHODS', 'exponent_hdr_CIELab', 'XYZ_to_hdr_CIELab',
+ 'hdr_CIELab_to_XYZ'
+]
+
+HDR_CIELAB_METHODS = ('Fairchild 2010', 'Fairchild 2011')
+"""
+Supported *hdr-CIELAB* colourspace computation methods.
+
+HDR_CIELAB_METHODS : tuple
+ **{'Fairchild 2011', 'Fairchild 2010'}**
+"""
+
+
+def exponent_hdr_CIELab(Y_s, Y_abs, method='Fairchild 2011'):
+ """
+ Computes *hdr-CIELAB* colourspace *Lightness* :math:`\epsilon` exponent
+ using *Fairchild and Wyble (2010)* or *Fairchild and Chen (2011)* method.
+
+ Parameters
+ ----------
+ Y_s : numeric or array_like
+ Relative luminance :math:`Y_s` of the surround in range [0, 1].
+ Y_abs : numeric or array_like
+ Absolute luminance :math:`Y_{abs}` of the scene diffuse white in
+ :math:`cd/m^2`.
+ method : unicode, optional
+ **{'Fairchild 2011', 'Fairchild 2010'}**,
+ Computation method.
+
+ Returns
+ -------
+ array_like
+ *hdr-CIELAB* colourspace *Lightness* :math:`\epsilon` exponent.
+
+ Examples
+ --------
+ >>> exponent_hdr_CIELab(0.2, 100) # doctest: +ELLIPSIS
+ 0.7099276...
+ >>> exponent_hdr_CIELab(0.2, 100, method='Fairchild 2010')
+ ... # doctest: +ELLIPSIS
+ 1.8360198...
+ """
+
+ Y_s = np.asarray(Y_s)
+ Y_abs = np.asarray(Y_abs)
+
+ method_l = method.lower()
+ assert method.lower() in [
+ m.lower() for m in HDR_CIELAB_METHODS
+ ], ('"{0}" method is invalid, must be one of {1}!'.format(
+ method, HDR_CIELAB_METHODS))
+
+ if method_l == 'fairchild 2010':
+ epsilon = 1.50
+ else:
+ epsilon = 0.58
+
+ sf = 1.25 - 0.25 * (Y_s / 0.184)
+ lf = np.log(318) / np.log(Y_abs)
+ epsilon *= sf * lf
+
+ return epsilon
def XYZ_to_hdr_CIELab(
XYZ,
illuminant=ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['D50'],
Y_s=0.2,
- Y_abs=100):
+ Y_abs=100,
+ method='Fairchild 2011'):
"""
Converts from *CIE XYZ* tristimulus values to *hdr-CIELAB* colourspace.
@@ -62,6 +129,9 @@ def XYZ_to_hdr_CIELab(
Y_abs : numeric or array_like
Absolute luminance :math:`Y_{abs}` of the scene diffuse white in
:math:`cd/m^2`.
+ method : unicode, optional
+ **{'Fairchild 2011', 'Fairchild 2010'}**,
+ Computation method.
Returns
-------
@@ -85,17 +155,30 @@ def XYZ_to_hdr_CIELab(
--------
>>> XYZ = np.array([0.07049534, 0.10080000, 0.09558313])
>>> XYZ_to_hdr_CIELab(XYZ) # doctest: +ELLIPSIS
- array([ 24.9020664..., -46.8312760..., -10.14274843])
+ array([ 26.4646106..., -24.613326 ..., -4.8479681...])
+ >>> XYZ_to_hdr_CIELab(XYZ, method='Fairchild 2010') # doctest: +ELLIPSIS
+ array([ 24.9020664..., -46.8312760..., -10.1427484...])
"""
X, Y, Z = tsplit(XYZ)
X_n, Y_n, Z_n = tsplit(xyY_to_XYZ(xy_to_xyY(illuminant)))
- e = exponent_hdr_CIELab(Y_s, Y_abs)
+ method_l = method.lower()
+ assert method.lower() in [
+ m.lower() for m in HDR_CIELAB_METHODS
+ ], ('"{0}" method is invalid, must be one of {1}!'.format(
+ method, HDR_CIELAB_METHODS))
+
+ if method_l == 'fairchild 2010':
+ lightness_callable = lightness_Fairchild2010
+ else:
+ lightness_callable = lightness_Fairchild2011
- L_hdr = lightness_Fairchild2010(Y / Y_n, e)
- a_hdr = 5 * (lightness_Fairchild2010(X / X_n, e) - L_hdr)
- b_hdr = 2 * (L_hdr - lightness_Fairchild2010(Z / Z_n, e))
+ e = exponent_hdr_CIELab(Y_s, Y_abs, method)
+
+ L_hdr = lightness_callable(Y / Y_n, e)
+ a_hdr = 5 * (lightness_callable(X / X_n, e) - L_hdr)
+ b_hdr = 2 * (L_hdr - lightness_callable(Z / Z_n, e))
Lab_hdr = tstack((L_hdr, a_hdr, b_hdr))
@@ -106,7 +189,8 @@ def hdr_CIELab_to_XYZ(
Lab_hdr,
illuminant=ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['D50'],
Y_s=0.2,
- Y_abs=100):
+ Y_abs=100,
+ method='Fairchild 2011'):
"""
Converts from *hdr-CIELAB* colourspace to *CIE XYZ* tristimulus values.
@@ -122,6 +206,9 @@ def hdr_CIELab_to_XYZ(
Y_abs : numeric or array_like
Absolute luminance :math:`Y_{abs}` of the scene diffuse white in
:math:`cd/m^2`.
+ method : unicode, optional
+ **{'Fairchild 2011', 'Fairchild 2010'}**,
+ Computation method.
Returns
-------
@@ -136,53 +223,35 @@ def hdr_CIELab_to_XYZ(
Examples
--------
- >>> Lab_hdr = np.array([24.90206646, -46.83127607, -10.14274843])
+ >>> Lab_hdr = np.array([26.46461067, -24.613326, -4.84796811])
>>> hdr_CIELab_to_XYZ(Lab_hdr) # doctest: +ELLIPSIS
array([ 0.0704953..., 0.1008 , 0.0955831...])
+ >>> Lab_hdr = np.array([24.90206646, -46.83127607, -10.14274843])
+ >>> hdr_CIELab_to_XYZ(Lab_hdr, method='Fairchild 2010')
+ ... # doctest: +ELLIPSIS
+ array([ 0.0704953..., 0.1008 , 0.0955831...])
"""
L_hdr, a_hdr, b_hdr = tsplit(Lab_hdr)
X_n, Y_n, Z_n = tsplit(xyY_to_XYZ(xy_to_xyY(illuminant)))
- e = exponent_hdr_CIELab(Y_s, Y_abs)
-
- Y = luminance_Fairchild2010(L_hdr, e) * Y_n
- X = luminance_Fairchild2010((a_hdr + 5 * L_hdr) / 5, e) * X_n
- Z = luminance_Fairchild2010((-b_hdr + 2 * L_hdr) / 2, e) * Z_n
-
- XYZ = tstack((X, Y, Z))
-
- return XYZ
-
-
-def exponent_hdr_CIELab(Y_s, Y_abs):
- """
- Computes *hdr-CIELAB* colourspace *Lightness* :math:`\epsilon` exponent.
-
- Parameters
- ----------
- Y_s : numeric or array_like
- Relative luminance :math:`Y_s` of the surround in range [0, 1].
- Y_abs : numeric or array_like
- Absolute luminance :math:`Y_{abs}` of the scene diffuse white in
- :math:`cd/m^2`.
+ method_l = method.lower()
+ assert method.lower() in [
+ m.lower() for m in HDR_CIELAB_METHODS
+ ], ('"{0}" method is invalid, must be one of {1}!'.format(
+ method, HDR_CIELAB_METHODS))
- Returns
- -------
- array_like
- *hdr-CIELAB* colourspace *Lightness* :math:`\epsilon` exponent.
+ if method_l == 'fairchild 2010':
+ luminance_callable = luminance_Fairchild2010
+ else:
+ luminance_callable = luminance_Fairchild2011
- Examples
- --------
- >>> exponent_hdr_CIELab(0.2, 100) # doctest: +ELLIPSIS
- 1.8360198...
- """
+ e = exponent_hdr_CIELab(Y_s, Y_abs, method)
- Y_s = np.asarray(Y_s)
- Y_abs = np.asarray(Y_abs)
+ Y = luminance_callable(L_hdr, e) * Y_n
+ X = luminance_callable((a_hdr + 5 * L_hdr) / 5, e) * X_n
+ Z = luminance_callable((-b_hdr + 2 * L_hdr) / 2, e) * Z_n
- lf = np.log(318) / np.log(Y_abs)
- sf = 1.25 - 0.25 * (Y_s / 0.184)
- epsilon = 1.50 * sf * lf
+ XYZ = tstack((X, Y, Z))
- return epsilon
+ return XYZ
diff --git a/colour/models/hdr_ipt.py b/colour/models/hdr_ipt.py
index 8cc91cbb1..c1050bffe 100644
--- a/colour/models/hdr_ipt.py
+++ b/colour/models/hdr_ipt.py
@@ -21,13 +21,18 @@ References
Simple Models for Describing the Color of High-Dynamic-Range and
Wide-Color-Gamut Images. In Proc. of Color and Imaging Conference
(pp. 322–326). ISBN:9781629932156
+.. [2] Fairchild, M. D., & Chen, P. (2011). Brightness, Lightness, and
+ Specifying Color in High-Dynamic-Range Scenes and Images.
+ doi:10.1117/12.872075
"""
from __future__ import division, unicode_literals
import numpy as np
-from colour.colorimetry import lightness_Fairchild2010, luminance_Fairchild2010
+from colour.colorimetry import (
+ lightness_Fairchild2010, lightness_Fairchild2011, luminance_Fairchild2010,
+ luminance_Fairchild2011)
from colour.models.ipt import (IPT_XYZ_TO_LMS_MATRIX, IPT_LMS_TO_XYZ_MATRIX,
IPT_LMS_TO_IPT_MATRIX, IPT_IPT_TO_LMS_MATRIX)
from colour.utilities import dot_vector
@@ -39,10 +44,71 @@ __maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
-__all__ = ['XYZ_to_hdr_IPT', 'hdr_IPT_to_XYZ', 'exponent_hdr_IPT']
+__all__ = [
+ 'HDR_IPT_METHODS', 'exponent_hdr_IPT', 'XYZ_to_hdr_IPT', 'hdr_IPT_to_XYZ'
+]
+HDR_IPT_METHODS = ('Fairchild 2010', 'Fairchild 2011')
+"""
+Supported *hdr-IPT* colourspace computation methods.
+
+HDR_IPT_METHODS : tuple
+ **{'Fairchild 2011', 'Fairchild 2010'}**
+"""
-def XYZ_to_hdr_IPT(XYZ, Y_s=0.2, Y_abs=100):
+
+def exponent_hdr_IPT(Y_s, Y_abs, method='Fairchild 2011'):
+ """
+ Computes *hdr-IPT* colourspace *Lightness* :math:`\epsilon` exponent using
+ *Fairchild and Wyble (2010)* or *Fairchild and Chen (2011)* method.
+
+ Parameters
+ ----------
+ Y_s : numeric or array_like
+ Relative luminance :math:`Y_s` of the surround in range [0, 1].
+ Y_abs : numeric or array_like
+ Absolute luminance :math:`Y_{abs}` of the scene diffuse white in
+ :math:`cd/m^2`.
+ method : unicode, optional
+ **{'Fairchild 2011', 'Fairchild 2010'}**,
+ Computation method.
+
+ Returns
+ -------
+ array_like
+ *hdr-IPT* colourspace *Lightness* :math:`\epsilon` exponent.
+
+ Examples
+ --------
+ >>> exponent_hdr_IPT(0.2, 100) # doctest: +ELLIPSIS
+ 0.7221678...
+ >>> exponent_hdr_IPT(0.2, 100, method='Fairchild 2010')
+ ... # doctest: +ELLIPSIS
+ 1.6891383...
+ """
+
+ Y_s = np.asarray(Y_s)
+ Y_abs = np.asarray(Y_abs)
+
+ method_l = method.lower()
+ assert method.lower() in [
+ m.lower() for m in HDR_IPT_METHODS
+ ], ('"{0}" method is invalid, must be one of {1}!'.format(
+ method, HDR_IPT_METHODS))
+
+ if method_l == 'fairchild 2010':
+ epsilon = 1.38
+ else:
+ epsilon = 0.59
+
+ lf = np.log(318) / np.log(Y_abs)
+ sf = 1.25 - 0.25 * (Y_s / 0.184)
+ epsilon *= sf * lf
+
+ return epsilon
+
+
+def XYZ_to_hdr_IPT(XYZ, Y_s=0.2, Y_abs=100, method='Fairchild 2011'):
"""
Converts from *CIE XYZ* tristimulus values to *hdr-IPT* colourspace.
@@ -55,6 +121,9 @@ def XYZ_to_hdr_IPT(XYZ, Y_s=0.2, Y_abs=100):
Y_abs : numeric or array_like
Absolute luminance :math:`Y_{abs}` of the scene diffuse white in
:math:`cd/m^2`.
+ method : unicode, optional
+ **{'Fairchild 2011', 'Fairchild 2010'}**,
+ Computation method.
Returns
-------
@@ -70,19 +139,32 @@ def XYZ_to_hdr_IPT(XYZ, Y_s=0.2, Y_abs=100):
--------
>>> XYZ = np.array([0.96907232, 1.00000000, 1.12179215])
>>> XYZ_to_hdr_IPT(XYZ) # doctest: +ELLIPSIS
+ array([ 93.5317473..., 1.8564156..., -1.3292254...])
+ >>> XYZ_to_hdr_IPT(XYZ, method='Fairchild 2010') # doctest: +ELLIPSIS
array([ 94.6592917..., 0.3804177..., -0.2673118...])
"""
- e = exponent_hdr_IPT(Y_s, Y_abs)[..., np.newaxis]
+ method_l = method.lower()
+ assert method.lower() in [
+ m.lower() for m in HDR_IPT_METHODS
+ ], ('"{0}" method is invalid, must be one of {1}!'.format(
+ method, HDR_IPT_METHODS))
+
+ if method_l == 'fairchild 2010':
+ lightness_callable = lightness_Fairchild2010
+ else:
+ lightness_callable = lightness_Fairchild2011
+
+ e = exponent_hdr_IPT(Y_s, Y_abs, method)[..., np.newaxis]
LMS = dot_vector(IPT_XYZ_TO_LMS_MATRIX, XYZ)
- LMS_prime = np.sign(LMS) * np.abs(lightness_Fairchild2010(LMS, e))
+ LMS_prime = np.sign(LMS) * np.abs(lightness_callable(LMS, e))
IPT = dot_vector(IPT_LMS_TO_IPT_MATRIX, LMS_prime)
return IPT
-def hdr_IPT_to_XYZ(IPT_hdr, Y_s=0.2, Y_abs=100):
+def hdr_IPT_to_XYZ(IPT_hdr, Y_s=0.2, Y_abs=100, method='Fairchild 2011'):
"""
Converts from *hdr-IPT* colourspace to *CIE XYZ* tristimulus values.
@@ -95,6 +177,9 @@ def hdr_IPT_to_XYZ(IPT_hdr, Y_s=0.2, Y_abs=100):
Y_abs : numeric or array_like
Absolute luminance :math:`Y_{abs}` of the scene diffuse white in
:math:`cd/m^2`.
+ method : unicode, optional
+ **{'Fairchild 2011', 'Fairchild 2010'}**,
+ Computation method.
Returns
-------
@@ -103,48 +188,30 @@ def hdr_IPT_to_XYZ(IPT_hdr, Y_s=0.2, Y_abs=100):
Examples
--------
- >>> IPT_hdr = np.array([94.65929175, 0.38041773, -0.26731187])
+ >>> IPT_hdr = np.array([93.53174734, 1.85641567, -1.32922546])
>>> hdr_IPT_to_XYZ(IPT_hdr) # doctest: +ELLIPSIS
array([ 0.9690723..., 1. , 1.1217921...])
+ >>> IPT_hdr = np.array([94.65929175, 0.38041773, -0.26731187])
+ >>> hdr_IPT_to_XYZ(IPT_hdr, method='Fairchild 2010')
+ ... # doctest: +ELLIPSIS
+ array([ 0.9690723..., 1. , 1.1217921...])
"""
- e = exponent_hdr_IPT(Y_s, Y_abs)[..., np.newaxis]
+ method_l = method.lower()
+ assert method.lower() in [
+ m.lower() for m in HDR_IPT_METHODS
+ ], ('"{0}" method is invalid, must be one of {1}!'.format(
+ method, HDR_IPT_METHODS))
+
+ if method_l == 'fairchild 2010':
+ luminance_callable = luminance_Fairchild2010
+ else:
+ luminance_callable = luminance_Fairchild2011
+
+ e = exponent_hdr_IPT(Y_s, Y_abs, method)[..., np.newaxis]
LMS = dot_vector(IPT_IPT_TO_LMS_MATRIX, IPT_hdr)
- LMS_prime = np.sign(LMS) * np.abs(luminance_Fairchild2010(LMS, e))
+ LMS_prime = np.sign(LMS) * np.abs(luminance_callable(LMS, e))
XYZ = dot_vector(IPT_LMS_TO_XYZ_MATRIX, LMS_prime)
return XYZ
-
-
-def exponent_hdr_IPT(Y_s, Y_abs):
- """
- Computes *hdr-IPT* colourspace *Lightness* :math:`\epsilon` exponent.
-
- Parameters
- ----------
- Y_s : numeric or array_like
- Relative luminance :math:`Y_s` of the surround in range [0, 1].
- Y_abs : numeric or array_like
- Absolute luminance :math:`Y_{abs}` of the scene diffuse white in
- :math:`cd/m^2`.
-
- Returns
- -------
- array_like
- *hdr-IPT* colourspace *Lightness* :math:`\epsilon` exponent.
-
- Examples
- --------
- >>> exponent_hdr_IPT(0.2, 100) # doctest: +ELLIPSIS
- 1.6891383...
- """
-
- Y_s = np.asarray(Y_s)
- Y_abs = np.asarray(Y_abs)
-
- lf = np.log(318) / np.log(Y_abs)
- sf = 1.25 - 0.25 * (Y_s / 0.184)
- epsilon = 1.38 * sf * lf
-
- return epsilon
| Implement support for "hdr-CIELab" and "hdr-IPT" (2011) colourspaces.
References
---
- Fairchild, M. D., & Chen, P. (2011). Brightness , Lightness , and Specifying Color in High-Dynamic-Range Scenes and Images. doi:10.1117/12.872075
- #347
- #321 | colour-science/colour | diff --git a/colour/colorimetry/tests/test_lightness.py b/colour/colorimetry/tests/test_lightness.py
index 6f8bc5dc1..808cea398 100644
--- a/colour/colorimetry/tests/test_lightness.py
+++ b/colour/colorimetry/tests/test_lightness.py
@@ -10,7 +10,8 @@ import numpy as np
import unittest
from colour.colorimetry import (lightness_Glasser1958, lightness_Wyszecki1963,
- lightness_CIE1976, lightness_Fairchild2010)
+ lightness_CIE1976, lightness_Fairchild2010,
+ lightness_Fairchild2011)
from colour.utilities import ignore_numpy_errors
__author__ = 'Colour Developers'
@@ -22,7 +23,8 @@ __status__ = 'Production'
__all__ = [
'TestLightnessGlasser1958', 'TestLightnessWyszecki1963',
- 'TestLightnessCIE1976', 'TestLightnessFairchild2010'
+ 'TestLightnessCIE1976', 'TestLightnessFairchild2010',
+ 'TestLightnessFairchild2011'
]
@@ -206,22 +208,22 @@ class TestLightnessFairchild2010(unittest.TestCase):
"""
self.assertAlmostEqual(
- lightness_Fairchild2010(10.08 / 100), 23.10363383, places=7)
+ lightness_Fairchild2010(10.08 / 100), 24.90229027, places=7)
self.assertAlmostEqual(
- lightness_Fairchild2010(56.76 / 100), 90.51057574, places=7)
+ lightness_Fairchild2010(56.76 / 100), 88.79756887, places=7)
self.assertAlmostEqual(
- lightness_Fairchild2010(98.32 / 100), 96.636221285, places=7)
+ lightness_Fairchild2010(98.32 / 100), 95.61301852, places=7)
self.assertAlmostEqual(
lightness_Fairchild2010(10.08 / 100, 2.75), 16.06420271, places=7)
self.assertAlmostEqual(
- lightness_Fairchild2010(1008), 100.01999667, places=7)
+ lightness_Fairchild2010(1008), 100.019986327374240, places=7)
self.assertAlmostEqual(
- lightness_Fairchild2010(100800), 100.01999999, places=7)
+ lightness_Fairchild2010(100800), 100.019999997090270, places=7)
def test_n_dimensional_lightness_Fairchild2010(self):
"""
@@ -230,7 +232,7 @@ class TestLightnessFairchild2010(unittest.TestCase):
"""
Y = 10.08 / 100
- L = 23.10363383
+ L = 24.90229027
np.testing.assert_almost_equal(
lightness_Fairchild2010(Y), L, decimal=7)
@@ -260,5 +262,72 @@ class TestLightnessFairchild2010(unittest.TestCase):
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
+class TestLightnessFairchild2011(unittest.TestCase):
+ """
+ Defines :func:`colour.colorimetry.lightness.lightness_Fairchild2011`
+ definition unit tests methods.
+ """
+
+ def test_lightness_Fairchild2011(self):
+ """
+ Tests :func:`colour.colorimetry.lightness.lightness_Fairchild2011`
+ definition.
+ """
+
+ self.assertAlmostEqual(
+ lightness_Fairchild2011(10.08 / 100), 26.45950982, places=7)
+
+ self.assertAlmostEqual(
+ lightness_Fairchild2011(56.76 / 100), 71.70846602, places=7)
+
+ self.assertAlmostEqual(
+ lightness_Fairchild2011(98.32 / 100), 93.03097540, places=7)
+
+ self.assertAlmostEqual(
+ lightness_Fairchild2011(10.08 / 100, 2.75), 0.08672116, places=7)
+
+ self.assertAlmostEqual(
+ lightness_Fairchild2011(1008), 244.07716521, places=7)
+
+ self.assertAlmostEqual(
+ lightness_Fairchild2011(100800), 246.90681934, places=7)
+
+ def test_n_dimensional_lightness_Fairchild2011(self):
+ """
+ Tests :func:`colour.colorimetry.lightness.lightness_Fairchild2011`
+ definition n-dimensional arrays support.
+ """
+
+ Y = 10.08 / 100
+ L = 26.45950982
+ np.testing.assert_almost_equal(
+ lightness_Fairchild2011(Y), L, decimal=7)
+
+ Y = np.tile(Y, 6)
+ L = np.tile(L, 6)
+ np.testing.assert_almost_equal(
+ lightness_Fairchild2011(Y), L, decimal=7)
+
+ Y = np.reshape(Y, (2, 3))
+ L = np.reshape(L, (2, 3))
+ np.testing.assert_almost_equal(
+ lightness_Fairchild2011(Y), L, decimal=7)
+
+ Y = np.reshape(Y, (2, 3, 1))
+ L = np.reshape(L, (2, 3, 1))
+ np.testing.assert_almost_equal(
+ lightness_Fairchild2011(Y), L, decimal=7)
+
+ @ignore_numpy_errors
+ def test_nan_lightness_Fairchild2011(self):
+ """
+ Tests :func:`colour.colorimetry.lightness.lightness_Fairchild2011`
+ definition nan support.
+ """
+
+ lightness_Fairchild2011(
+ np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/colour/colorimetry/tests/test_luminance.py b/colour/colorimetry/tests/test_luminance.py
index 8b8cb07d8..c73e017b9 100644
--- a/colour/colorimetry/tests/test_luminance.py
+++ b/colour/colorimetry/tests/test_luminance.py
@@ -11,7 +11,7 @@ import unittest
from colour.colorimetry.luminance import (
luminance_Newhall1943, luminance_CIE1976, luminance_ASTMD153508,
- luminance_Fairchild2010)
+ luminance_Fairchild2010, luminance_Fairchild2011)
from colour.utilities import ignore_numpy_errors
__author__ = 'Colour Developers'
@@ -23,7 +23,8 @@ __status__ = 'Production'
__all__ = [
'TestLuminanceNewhall1943', 'TestLuminanceASTMD153508',
- 'TestLuminanceCIE1976', 'TestLuminanceFairchild2010'
+ 'TestLuminanceCIE1976', 'TestLuminanceFairchild2010',
+ 'TestLuminanceFairchild2011'
]
@@ -210,13 +211,13 @@ class TestLuminanceFairchild2010(unittest.TestCase):
"""
self.assertAlmostEqual(
- luminance_Fairchild2010(23.103633825753175), 0.10079999, places=7)
+ luminance_Fairchild2010(24.902290269546651), 0.10079999, places=7)
self.assertAlmostEqual(
- luminance_Fairchild2010(90.510575738115122), 0.56759999, places=7)
+ luminance_Fairchild2010(88.797568871771162), 0.56759999, places=7)
self.assertAlmostEqual(
- luminance_Fairchild2010(96.636221285055527), 0.98319999, places=7)
+ luminance_Fairchild2010(95.613018520289828), 0.98319999, places=7)
self.assertAlmostEqual(
luminance_Fairchild2010(16.064202706248068, 2.75),
@@ -224,13 +225,13 @@ class TestLuminanceFairchild2010(unittest.TestCase):
places=7)
self.assertAlmostEqual(
- luminance_Fairchild2010(100.01999666792653),
- 1007.99999963,
+ luminance_Fairchild2010(100.019986327374240),
+ 1008.00000024,
places=7)
self.assertAlmostEqual(
- luminance_Fairchild2010(100.01999999966679),
- 100800.82383352,
+ luminance_Fairchild2010(100.019999997090270),
+ 100799.92312466,
places=7)
def test_n_dimensional_luminance_Fairchild2010(self):
@@ -239,7 +240,7 @@ class TestLuminanceFairchild2010(unittest.TestCase):
definition n-dimensional arrays support.
"""
- L_hdr = 23.103633825753175
+ L_hdr = 24.902290269546651
Y = 10.08 / 100
np.testing.assert_almost_equal(
luminance_Fairchild2010(L_hdr), Y, decimal=7)
@@ -270,5 +271,78 @@ class TestLuminanceFairchild2010(unittest.TestCase):
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
+class TestLuminanceFairchild2011(unittest.TestCase):
+ """
+ Defines :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
+ definition unit tests methods.
+ """
+
+ def test_luminance_Fairchild2011(self):
+ """
+ Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
+ definition.
+ """
+
+ self.assertAlmostEqual(
+ luminance_Fairchild2011(26.459509817572265), 0.10079999, places=7)
+
+ self.assertAlmostEqual(
+ luminance_Fairchild2011(71.708466023819625), 0.56759999, places=7)
+
+ self.assertAlmostEqual(
+ luminance_Fairchild2011(93.030975393206475), 0.98319999, places=7)
+
+ self.assertAlmostEqual(
+ luminance_Fairchild2011(0.08672116154998, 2.75),
+ 0.10079999,
+ places=7)
+
+ self.assertAlmostEqual(
+ luminance_Fairchild2011(244.07716520973938),
+ 1008.00000000,
+ places=7)
+
+ self.assertAlmostEqual(
+ luminance_Fairchild2011(246.90681933957006),
+ 100800.00000000,
+ places=7)
+
+ def test_n_dimensional_luminance_Fairchild2011(self):
+ """
+ Tests :func:`colour.colorimetry.lightness.luminance_Fairchild2011`
+ definition n-dimensional arrays support.
+ """
+
+ L_hdr = 26.459509817572265
+ Y = 10.08 / 100
+ np.testing.assert_almost_equal(
+ luminance_Fairchild2011(L_hdr), Y, decimal=7)
+
+ L_hdr = np.tile(L_hdr, 6)
+ Y = np.tile(Y, 6)
+ np.testing.assert_almost_equal(
+ luminance_Fairchild2011(L_hdr), Y, decimal=7)
+
+ L_hdr = np.reshape(L_hdr, (2, 3))
+ Y = np.reshape(Y, (2, 3))
+ np.testing.assert_almost_equal(
+ luminance_Fairchild2011(L_hdr), Y, decimal=7)
+
+ L_hdr = np.reshape(L_hdr, (2, 3, 1))
+ Y = np.reshape(Y, (2, 3, 1))
+ np.testing.assert_almost_equal(
+ luminance_Fairchild2011(L_hdr), Y, decimal=7)
+
+ @ignore_numpy_errors
+ def test_nan_luminance_Fairchild2011(self):
+ """
+ Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
+ definition nan support.
+ """
+
+ luminance_Fairchild2011(
+ np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/colour/models/tests/test_hdr_cie_lab.py b/colour/models/tests/test_hdr_cie_lab.py
index 64c4db540..7b38c6645 100644
--- a/colour/models/tests/test_hdr_cie_lab.py
+++ b/colour/models/tests/test_hdr_cie_lab.py
@@ -22,10 +22,77 @@ __email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
- 'TestXYZ_to_hdr_CIELab', 'TestHdr_CIELab_to_XYZ', 'TestExponent_hdr_CIELab'
+ 'TestExponent_hdr_CIELab', 'TestXYZ_to_hdr_CIELab', 'TestHdr_CIELab_to_XYZ'
]
+class TestExponent_hdr_CIELab(unittest.TestCase):
+ """
+ Defines :func:`colour.models.hdr_cie_lab.exponent_hdr_CIELab`
+ definition unit tests methods.
+ """
+
+ def test_exponent_hdr_CIELab(self):
+ """
+ Tests :func:`colour.models.hdr_cie_lab.exponent_hdr_CIELab`
+ definition.
+ """
+
+ self.assertAlmostEqual(
+ exponent_hdr_CIELab(0.2, 100), 0.709927693821670, places=7)
+
+ self.assertAlmostEqual(
+ exponent_hdr_CIELab(0.4, 100), 0.512725556648984, places=7)
+
+ self.assertAlmostEqual(
+ exponent_hdr_CIELab(0.4, 100, method='Fairchild 2010'),
+ 1.326014370643925,
+ places=7)
+
+ self.assertAlmostEqual(
+ exponent_hdr_CIELab(0.2, 1000), 0.473285129214447, places=7)
+
+ def test_n_dimensional_exponent_hdr_CIELab(self):
+ """
+ Tests :func:`colour.models.hdr_cie_lab.exponent_hdr_CIELab`
+ definition n-dimensional arrays support.
+ """
+
+ Y_s = 0.2
+ Y_abs = 100
+ e = 0.709927693821670
+ np.testing.assert_almost_equal(
+ exponent_hdr_CIELab(Y_s, Y_abs), e, decimal=7)
+
+ Y_s = np.tile(Y_s, 6)
+ Y_abs = np.tile(Y_abs, 6)
+ e = np.tile(e, 6)
+ np.testing.assert_almost_equal(
+ exponent_hdr_CIELab(Y_s, Y_abs), e, decimal=7)
+
+ Y_s = np.reshape(Y_s, (2, 3))
+ Y_abs = np.reshape(Y_abs, (2, 3))
+ e = np.reshape(e, (2, 3))
+ np.testing.assert_almost_equal(
+ exponent_hdr_CIELab(Y_s, Y_abs), e, decimal=7)
+
+ Y_s = np.reshape(Y_s, (2, 3, 1))
+ Y_abs = np.reshape(Y_abs, (2, 3, 1))
+ e = np.reshape(e, (2, 3, 1))
+ np.testing.assert_almost_equal(
+ exponent_hdr_CIELab(Y_s, Y_abs), e, decimal=7)
+
+ @ignore_numpy_errors
+ def test_nan_exponent_hdr_CIELab(self):
+ """
+ Tests :func:`colour.models.hdr_cie_lab.exponent_hdr_CIELab`
+ definition nan support.
+ """
+
+ cases = np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])
+ exponent_hdr_CIELab(cases, cases)
+
+
class TestXYZ_to_hdr_CIELab(unittest.TestCase):
"""
Defines :func:`colour.models.hdr_cie_lab.XYZ_to_hdr_CIELab` definition unit
@@ -39,26 +106,34 @@ class TestXYZ_to_hdr_CIELab(unittest.TestCase):
np.testing.assert_almost_equal(
XYZ_to_hdr_CIELab(np.array([0.07049534, 0.10080000, 0.09558313])),
- np.array([24.90206646, -46.83127607, -10.14274843]),
+ np.array([26.46461067, -24.61332600, -4.84796811]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_hdr_CIELab(
np.array([0.07049534, 0.10080000, 0.09558313]),
np.array([0.44757, 0.40745])),
+ np.array([26.46461067, -33.35816986, -42.86850246]),
+ decimal=7)
+
+ np.testing.assert_almost_equal(
+ XYZ_to_hdr_CIELab(
+ np.array([0.07049534, 0.10080000, 0.09558313]),
+ np.array([0.44757, 0.40745]),
+ method='Fairchild 2010'),
np.array([24.90206646, -61.24983919, -83.63902870]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_hdr_CIELab(
np.array([0.07049534, 0.10080000, 0.09558313]), Y_s=0.5),
- np.array([34.44227938, -36.51485775, -6.87279617]),
+ np.array([55.57158803, -27.58973060, -5.03923267]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_hdr_CIELab(
np.array([0.07049534, 0.10080000, 0.09558313]), Y_abs=1000),
- np.array([32.39463250, -39.77445283, -7.66690737]),
+ np.array([48.33166805, -28.18355309, -5.21974184]),
decimal=7)
def test_n_dimensional_XYZ_to_hdr_CIELab(self):
@@ -71,7 +146,7 @@ class TestXYZ_to_hdr_CIELab(unittest.TestCase):
illuminant = np.array([0.34570, 0.35850])
Y_s = 0.2
Y_abs = 100
- Lab_hdr = np.array([24.90206646, -46.83127607, -10.14274843])
+ Lab_hdr = np.array([26.46461067, -24.61332600, -4.84796811])
np.testing.assert_almost_equal(
XYZ_to_hdr_CIELab(XYZ, illuminant, Y_s, Y_abs), Lab_hdr, decimal=7)
@@ -124,26 +199,26 @@ class TestHdr_CIELab_to_XYZ(unittest.TestCase):
np.testing.assert_almost_equal(
hdr_CIELab_to_XYZ(
- np.array([24.90206646, -46.83127607, -10.14274843])),
+ np.array([26.46461067, -24.61332600, -4.84796811])),
np.array([0.07049534, 0.10080000, 0.09558313]),
decimal=7)
np.testing.assert_almost_equal(
hdr_CIELab_to_XYZ(
- np.array([24.90206646, -61.24983919, -83.63902870]),
+ np.array([26.46461067, -33.35816986, -42.86850246]),
np.array([0.44757, 0.40745])),
np.array([0.07049534, 0.10080000, 0.09558313]),
decimal=7)
np.testing.assert_almost_equal(
hdr_CIELab_to_XYZ(
- np.array([34.44227938, -36.51485775, -6.87279617]), Y_s=0.5),
+ np.array([55.57158803, -27.58973060, -5.03923267]), Y_s=0.5),
np.array([0.07049534, 0.10080000, 0.09558313]),
decimal=7)
np.testing.assert_almost_equal(
hdr_CIELab_to_XYZ(
- np.array([32.39463250, -39.77445283, -7.66690737]),
+ np.array([48.33166805, -28.18355309, -5.21974184]),
Y_abs=1000),
np.array([0.07049534, 0.10080000, 0.09558313]),
decimal=7)
@@ -154,7 +229,7 @@ class TestHdr_CIELab_to_XYZ(unittest.TestCase):
n-dimensions support.
"""
- Lab_hdr = np.array([24.90206646, -46.83127607, -10.14274843])
+ Lab_hdr = np.array([26.46461067, -24.61332600, -4.84796811])
illuminant = np.array([0.34570, 0.35850])
Y_s = 0.2
Y_abs = 100
@@ -198,67 +273,5 @@ class TestHdr_CIELab_to_XYZ(unittest.TestCase):
hdr_CIELab_to_XYZ(Lab_hdr, illuminant, Y_s, Y_abs)
-class TestExponent_hdr_CIELab(unittest.TestCase):
- """
- Defines :func:`colour.models.hdr_cie_lab.exponent_hdr_CIELab`
- definition unit tests methods.
- """
-
- def test_exponent_hdr_CIELab(self):
- """
- Tests :func:`colour.models.hdr_cie_lab.exponent_hdr_CIELab`
- definition.
- """
-
- self.assertAlmostEqual(
- exponent_hdr_CIELab(0.2, 100), 1.836019897814665, places=7)
-
- self.assertAlmostEqual(
- exponent_hdr_CIELab(0.4, 100), 1.326014370643925, places=7)
-
- self.assertAlmostEqual(
- exponent_hdr_CIELab(0.2, 1000), 1.224013265209777, places=7)
-
- def test_n_dimensional_exponent_hdr_CIELab(self):
- """
- Tests :func:`colour.models.hdr_cie_lab.exponent_hdr_CIELab`
- definition n-dimensional arrays support.
- """
-
- Y_s = 0.2
- Y_abs = 100
- e = 1.836019897814665
- np.testing.assert_almost_equal(
- exponent_hdr_CIELab(Y_s, Y_abs), e, decimal=7)
-
- Y_s = np.tile(Y_s, 6)
- Y_abs = np.tile(Y_abs, 6)
- e = np.tile(e, 6)
- np.testing.assert_almost_equal(
- exponent_hdr_CIELab(Y_s, Y_abs), e, decimal=7)
-
- Y_s = np.reshape(Y_s, (2, 3))
- Y_abs = np.reshape(Y_abs, (2, 3))
- e = np.reshape(e, (2, 3))
- np.testing.assert_almost_equal(
- exponent_hdr_CIELab(Y_s, Y_abs), e, decimal=7)
-
- Y_s = np.reshape(Y_s, (2, 3, 1))
- Y_abs = np.reshape(Y_abs, (2, 3, 1))
- e = np.reshape(e, (2, 3, 1))
- np.testing.assert_almost_equal(
- exponent_hdr_CIELab(Y_s, Y_abs), e, decimal=7)
-
- @ignore_numpy_errors
- def test_nan_exponent_hdr_CIELab(self):
- """
- Tests :func:`colour.models.hdr_cie_lab.exponent_hdr_CIELab`
- definition nan support.
- """
-
- cases = np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])
- exponent_hdr_CIELab(cases, cases)
-
-
if __name__ == '__main__':
unittest.main()
diff --git a/colour/models/tests/test_hdr_ipt.py b/colour/models/tests/test_hdr_ipt.py
index 92eb4a45b..181f2b19e 100644
--- a/colour/models/tests/test_hdr_ipt.py
+++ b/colour/models/tests/test_hdr_ipt.py
@@ -21,7 +21,74 @@ __maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
-__all__ = ['TestXYZ_to_hdr_IPT', 'TestHdr_IPT_to_XYZ', 'TestExponent_hdr_IPT']
+__all__ = ['TestExponent_hdr_IPT', 'TestXYZ_to_hdr_IPT', 'TestHdr_IPT_to_XYZ']
+
+
+class TestExponent_hdr_IPT(unittest.TestCase):
+ """
+ Defines :func:`colour.models.hdr_ipt.exponent_hdr_IPT`
+ definition unit tests methods.
+ """
+
+ def test_exponent_hdr_IPT(self):
+ """
+ Tests :func:`colour.models.hdr_ipt.exponent_hdr_IPT`
+ definition.
+ """
+
+ self.assertAlmostEqual(
+ exponent_hdr_IPT(0.2, 100), 0.722167826473768, places=7)
+
+ self.assertAlmostEqual(
+ exponent_hdr_IPT(0.4, 100), 0.521565652453277, places=7)
+
+ self.assertAlmostEqual(
+ exponent_hdr_IPT(0.4, 100, method='Fairchild 2010'),
+ 1.219933220992410,
+ places=7)
+
+ self.assertAlmostEqual(
+ exponent_hdr_IPT(0.2, 1000), 0.481445217649179, places=7)
+
+ def test_n_dimensional_exponent_hdr_IPT(self):
+ """
+ Tests :func:`colour.models.hdr_ipt.exponent_hdr_IPT`
+ definition n-dimensional arrays support.
+ """
+
+ Y_s = 0.2
+ Y_abs = 100
+ e = 0.722167826473768
+ np.testing.assert_almost_equal(
+ exponent_hdr_IPT(Y_s, Y_abs), e, decimal=7)
+
+ Y_s = np.tile(Y_s, 6)
+ Y_abs = np.tile(Y_abs, 6)
+ e = np.tile(e, 6)
+ np.testing.assert_almost_equal(
+ exponent_hdr_IPT(Y_s, Y_abs), e, decimal=7)
+
+ Y_s = np.reshape(Y_s, (2, 3))
+ Y_abs = np.reshape(Y_abs, (2, 3))
+ e = np.reshape(e, (2, 3))
+ np.testing.assert_almost_equal(
+ exponent_hdr_IPT(Y_s, Y_abs), e, decimal=7)
+
+ Y_s = np.reshape(Y_s, (2, 3, 1))
+ Y_abs = np.reshape(Y_abs, (2, 3, 1))
+ e = np.reshape(e, (2, 3, 1))
+ np.testing.assert_almost_equal(
+ exponent_hdr_IPT(Y_s, Y_abs), e, decimal=7)
+
+ @ignore_numpy_errors
+ def test_nan_exponent_hdr_IPT(self):
+ """
+ Tests :func:`colour.models.hdr_ipt.exponent_hdr_IPT`
+ definition nan support.
+ """
+
+ cases = np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])
+ exponent_hdr_IPT(cases, cases)
class TestXYZ_to_hdr_IPT(unittest.TestCase):
@@ -37,19 +104,26 @@ class TestXYZ_to_hdr_IPT(unittest.TestCase):
np.testing.assert_almost_equal(
XYZ_to_hdr_IPT(np.array([0.07049534, 0.10080000, 0.09558313])),
+ np.array([24.88927680, -11.44574144, 1.63147707]),
+ decimal=7)
+
+ np.testing.assert_almost_equal(
+ XYZ_to_hdr_IPT(
+ np.array([0.07049534, 0.10080000, 0.09558313]),
+ method='Fairchild 2010'),
np.array([25.18261761, -22.62111297, 3.18511729]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_hdr_IPT(
np.array([0.07049534, 0.10080000, 0.09558313]), Y_s=0.5),
- np.array([34.60312115, -15.70974390, 2.26601353]),
+ np.array([53.85070486, -12.48767103, 1.80705844]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_hdr_IPT(
np.array([0.25506814, 0.19150000, 0.08849752]), Y_abs=1000),
- np.array([47.18074546, 32.38073691, 29.13827648]),
+ np.array([57.49548734, 25.88213868, 21.85080772]),
decimal=7)
def test_n_dimensional_XYZ_to_hdr_IPT(self):
@@ -61,7 +135,7 @@ class TestXYZ_to_hdr_IPT(unittest.TestCase):
XYZ = np.array([0.07049534, 0.10080000, 0.09558313])
Y_s = 0.2
Y_abs = 100
- IPT_hdr = np.array([25.18261761, -22.62111297, 3.18511729])
+ IPT_hdr = np.array([24.88927680, -11.44574144, 1.63147707])
np.testing.assert_almost_equal(
XYZ_to_hdr_IPT(XYZ, Y_s, Y_abs), IPT_hdr, decimal=7)
@@ -110,19 +184,26 @@ class TestHdr_IPT_to_XYZ(unittest.TestCase):
"""
np.testing.assert_almost_equal(
- hdr_IPT_to_XYZ(np.array([25.18261761, -22.62111297, 3.18511729])),
+ hdr_IPT_to_XYZ(np.array([24.88927680, -11.44574144, 1.63147707])),
np.array([0.07049534, 0.10080000, 0.09558313]),
decimal=7)
np.testing.assert_almost_equal(
hdr_IPT_to_XYZ(
- np.array([34.60312115, -15.70974390, 2.26601353]), Y_s=0.5),
+ np.array([25.18261761, -22.62111297, 3.18511729]),
+ method='Fairchild 2010'),
np.array([0.07049534, 0.10080000, 0.09558313]),
decimal=7)
np.testing.assert_almost_equal(
hdr_IPT_to_XYZ(
- np.array([47.18074546, 32.38073691, 29.13827648]), Y_abs=1000),
+ np.array([53.85070486, -12.48767103, 1.80705844]), Y_s=0.5),
+ np.array([0.07049534, 0.10080000, 0.09558313]),
+ decimal=7)
+
+ np.testing.assert_almost_equal(
+ hdr_IPT_to_XYZ(
+ np.array([57.49548734, 25.88213868, 21.85080772]), Y_abs=1000),
np.array([0.25506814, 0.19150000, 0.08849752]),
decimal=7)
@@ -132,7 +213,7 @@ class TestHdr_IPT_to_XYZ(unittest.TestCase):
n-dimensions support.
"""
- IPT_hdr = np.array([25.18261761, -22.62111297, 3.18511729])
+ IPT_hdr = np.array([24.88927680, -11.44574144, 1.63147707])
Y_s = 0.2
Y_abs = 100
XYZ = np.array([0.07049534, 0.10080000, 0.09558313])
@@ -172,67 +253,5 @@ class TestHdr_IPT_to_XYZ(unittest.TestCase):
hdr_IPT_to_XYZ(IPT_hdr, Y_s, Y_abs)
-class TestExponent_hdr_IPT(unittest.TestCase):
- """
- Defines :func:`colour.models.hdr_ipt.exponent_hdr_IPT`
- definition unit tests methods.
- """
-
- def test_exponent_hdr_IPT(self):
- """
- Tests :func:`colour.models.hdr_ipt.exponent_hdr_IPT`
- definition.
- """
-
- self.assertAlmostEqual(
- exponent_hdr_IPT(0.2, 100), 1.689138305989492, places=7)
-
- self.assertAlmostEqual(
- exponent_hdr_IPT(0.4, 100), 1.219933220992410, places=7)
-
- self.assertAlmostEqual(
- exponent_hdr_IPT(0.2, 1000), 1.126092203992995, places=7)
-
- def test_n_dimensional_exponent_hdr_IPT(self):
- """
- Tests :func:`colour.models.hdr_ipt.exponent_hdr_IPT`
- definition n-dimensional arrays support.
- """
-
- Y_s = 0.2
- Y_abs = 100
- e = 1.689138305989492
- np.testing.assert_almost_equal(
- exponent_hdr_IPT(Y_s, Y_abs), e, decimal=7)
-
- Y_s = np.tile(Y_s, 6)
- Y_abs = np.tile(Y_abs, 6)
- e = np.tile(e, 6)
- np.testing.assert_almost_equal(
- exponent_hdr_IPT(Y_s, Y_abs), e, decimal=7)
-
- Y_s = np.reshape(Y_s, (2, 3))
- Y_abs = np.reshape(Y_abs, (2, 3))
- e = np.reshape(e, (2, 3))
- np.testing.assert_almost_equal(
- exponent_hdr_IPT(Y_s, Y_abs), e, decimal=7)
-
- Y_s = np.reshape(Y_s, (2, 3, 1))
- Y_abs = np.reshape(Y_abs, (2, 3, 1))
- e = np.reshape(e, (2, 3, 1))
- np.testing.assert_almost_equal(
- exponent_hdr_IPT(Y_s, Y_abs), e, decimal=7)
-
- @ignore_numpy_errors
- def test_nan_exponent_hdr_IPT(self):
- """
- Tests :func:`colour.models.hdr_ipt.exponent_hdr_IPT`
- definition nan support.
- """
-
- cases = np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])
- exponent_hdr_IPT(cases, cases)
-
-
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 6
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[tests]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"flake8",
"coverage",
"pytest"
],
"pre_install": [],
"python": "3.4",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
-e git+https://github.com/colour-science/colour.git@3e8087c925f28dd07b6468751f603b34d14760a4#egg=colour_science
coverage==6.2
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig==1.1.1
mccabe==0.7.0
nose==1.3.7
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
scipy==1.5.4
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: colour
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- flake8==5.0.4
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- mccabe==0.7.0
- nose==1.3.7
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- scipy==1.5.4
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/colour
| [
"colour/colorimetry/tests/test_lightness.py::TestLightnessGlasser1958::test_lightness_Glasser1958",
"colour/colorimetry/tests/test_lightness.py::TestLightnessGlasser1958::test_n_dimensional_lightness_Glasser1958",
"colour/colorimetry/tests/test_lightness.py::TestLightnessGlasser1958::test_nan_lightness_Glasser1958",
"colour/colorimetry/tests/test_lightness.py::TestLightnessWyszecki1963::test_lightness_Wyszecki1963",
"colour/colorimetry/tests/test_lightness.py::TestLightnessWyszecki1963::test_n_dimensional_lightness_Wyszecki1963",
"colour/colorimetry/tests/test_lightness.py::TestLightnessWyszecki1963::test_nan_lightness_Wyszecki1963",
"colour/colorimetry/tests/test_lightness.py::TestLightnessCIE1976::test_lightness_CIE1976",
"colour/colorimetry/tests/test_lightness.py::TestLightnessCIE1976::test_n_dimensional_lightness_CIE1976",
"colour/colorimetry/tests/test_lightness.py::TestLightnessCIE1976::test_nan_lightness_CIE1976",
"colour/colorimetry/tests/test_lightness.py::TestLightnessFairchild2010::test_lightness_Fairchild2010",
"colour/colorimetry/tests/test_lightness.py::TestLightnessFairchild2010::test_n_dimensional_lightness_Fairchild2010",
"colour/colorimetry/tests/test_lightness.py::TestLightnessFairchild2010::test_nan_lightness_Fairchild2010",
"colour/colorimetry/tests/test_lightness.py::TestLightnessFairchild2011::test_lightness_Fairchild2011",
"colour/colorimetry/tests/test_lightness.py::TestLightnessFairchild2011::test_n_dimensional_lightness_Fairchild2011",
"colour/colorimetry/tests/test_lightness.py::TestLightnessFairchild2011::test_nan_lightness_Fairchild2011",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceNewhall1943::test_luminance_Newhall1943",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceNewhall1943::test_n_dimensional_luminance_Newhall1943",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceNewhall1943::test_nan_luminance_Newhall1943",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceASTMD153508::test_luminance_ASTMD153508",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceASTMD153508::test_n_dimensional_luminance_ASTMD153508",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceASTMD153508::test_nan_luminance_ASTMD153508",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceCIE1976::test_luminance_CIE1976",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceCIE1976::test_n_dimensional_luminance_CIE1976",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceCIE1976::test_nan_luminance_CIE1976",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceFairchild2010::test_luminance_Fairchild2010",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceFairchild2010::test_n_dimensional_luminance_Fairchild2010",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceFairchild2010::test_nan_luminance_Fairchild2010",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceFairchild2011::test_luminance_Fairchild2011",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceFairchild2011::test_n_dimensional_luminance_Fairchild2011",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceFairchild2011::test_nan_luminance_Fairchild2011",
"colour/models/tests/test_hdr_cie_lab.py::TestExponent_hdr_CIELab::test_exponent_hdr_CIELab",
"colour/models/tests/test_hdr_cie_lab.py::TestExponent_hdr_CIELab::test_n_dimensional_exponent_hdr_CIELab",
"colour/models/tests/test_hdr_cie_lab.py::TestExponent_hdr_CIELab::test_nan_exponent_hdr_CIELab",
"colour/models/tests/test_hdr_cie_lab.py::TestXYZ_to_hdr_CIELab::test_XYZ_to_hdr_CIELab",
"colour/models/tests/test_hdr_cie_lab.py::TestXYZ_to_hdr_CIELab::test_n_dimensional_XYZ_to_hdr_CIELab",
"colour/models/tests/test_hdr_cie_lab.py::TestXYZ_to_hdr_CIELab::test_nan_XYZ_to_hdr_CIELab",
"colour/models/tests/test_hdr_cie_lab.py::TestHdr_CIELab_to_XYZ::test_hdr_CIELab_to_XYZ",
"colour/models/tests/test_hdr_cie_lab.py::TestHdr_CIELab_to_XYZ::test_n_dimensional_hdr_CIELab_to_XYZ",
"colour/models/tests/test_hdr_cie_lab.py::TestHdr_CIELab_to_XYZ::test_nan_hdr_CIELab_to_XYZ",
"colour/models/tests/test_hdr_ipt.py::TestExponent_hdr_IPT::test_exponent_hdr_IPT",
"colour/models/tests/test_hdr_ipt.py::TestExponent_hdr_IPT::test_n_dimensional_exponent_hdr_IPT",
"colour/models/tests/test_hdr_ipt.py::TestExponent_hdr_IPT::test_nan_exponent_hdr_IPT",
"colour/models/tests/test_hdr_ipt.py::TestXYZ_to_hdr_IPT::test_XYZ_to_hdr_IPT",
"colour/models/tests/test_hdr_ipt.py::TestXYZ_to_hdr_IPT::test_n_dimensional_XYZ_to_hdr_IPT",
"colour/models/tests/test_hdr_ipt.py::TestXYZ_to_hdr_IPT::test_nan_XYZ_to_hdr_IPT",
"colour/models/tests/test_hdr_ipt.py::TestHdr_IPT_to_XYZ::test_hdr_IPT_to_XYZ",
"colour/models/tests/test_hdr_ipt.py::TestHdr_IPT_to_XYZ::test_n_dimensional_hdr_IPT_to_XYZ",
"colour/models/tests/test_hdr_ipt.py::TestHdr_IPT_to_XYZ::test_nan_hdr_IPT_to_XYZ"
]
| []
| []
| []
| BSD 3-Clause "New" or "Revised" License | 1,950 | [
"colour/colorimetry/__init__.py",
"colour/colorimetry/luminance.py",
"colour/colorimetry/lightness.py",
"colour/models/hdr_ipt.py",
"colour/models/hdr_cie_lab.py",
"colour/models/__init__.py"
]
| [
"colour/colorimetry/__init__.py",
"colour/colorimetry/luminance.py",
"colour/colorimetry/lightness.py",
"colour/models/hdr_ipt.py",
"colour/models/hdr_cie_lab.py",
"colour/models/__init__.py"
]
|
agronholm__apscheduler-258 | c6c5031276c3b864e127c637d2bdd48138a0b426 | 2017-12-10 16:17:28 | c6c5031276c3b864e127c637d2bdd48138a0b426 | coveralls:
[](https://coveralls.io/builds/14586525)
Coverage decreased (-0.04%) to 93.351% when pulling **b5233b3ec0a8525eef22e90f8abddf690dd759ff on gilbsgilbs:jitter** into **c6c5031276c3b864e127c637d2bdd48138a0b426 on agronholm:master**.
agronholm: The changes I requested for the interval trigger of course apply to the cron trigger as well.
coveralls:
[](https://coveralls.io/builds/14587374)
Coverage increased (+0.05%) to 93.442% when pulling **1e53344018d1fd5568f0a0597846ec46a65b4805 on gilbsgilbs:jitter** into **c6c5031276c3b864e127c637d2bdd48138a0b426 on agronholm:master**.
| diff --git a/apscheduler/triggers/base.py b/apscheduler/triggers/base.py
index ba98632..ce2526a 100644
--- a/apscheduler/triggers/base.py
+++ b/apscheduler/triggers/base.py
@@ -1,4 +1,6 @@
from abc import ABCMeta, abstractmethod
+from datetime import timedelta
+import random
import six
@@ -17,3 +19,30 @@ class BaseTrigger(six.with_metaclass(ABCMeta)):
:param datetime.datetime previous_fire_time: the previous time the trigger was fired
:param datetime.datetime now: current datetime
"""
+
+ def _apply_jitter(self, next_fire_time, jitter, now):
+ """
+ Randomize ``next_fire_time`` by adding or subtracting a random value (the jitter). If the
+ resulting datetime is in the past, returns the initial ``next_fire_time`` without jitter.
+
+ ``next_fire_time - jitter <= result <= next_fire_time + jitter``
+
+ :param datetime.datetime|None next_fire_time: next fire time without jitter applied. If
+ ``None``, returns ``None``.
+ :param int|None jitter: maximum number of seconds to add or subtract to
+ ``next_fire_time``. If ``None`` or ``0``, returns ``next_fire_time``
+ :param datetime.datetime now: current datetime
+ :return datetime.datetime|None: next fire time with a jitter.
+ """
+ if next_fire_time is None or not jitter:
+ return next_fire_time
+
+ next_fire_time_with_jitter = next_fire_time + timedelta(
+ seconds=random.uniform(-jitter, jitter))
+
+ if next_fire_time_with_jitter < now:
+ # Next fire time with jitter is in the past.
+ # Ignore jitter to avoid false misfire.
+ return next_fire_time
+
+ return next_fire_time_with_jitter
diff --git a/apscheduler/triggers/cron/__init__.py b/apscheduler/triggers/cron/__init__.py
index eccee0c..e936190 100644
--- a/apscheduler/triggers/cron/__init__.py
+++ b/apscheduler/triggers/cron/__init__.py
@@ -26,6 +26,7 @@ class CronTrigger(BaseTrigger):
:param datetime|str end_date: latest possible date/time to trigger on (inclusive)
:param datetime.tzinfo|str timezone: time zone to use for the date/time calculations (defaults
to scheduler timezone)
+ :param int|None jitter: advance or delay the job execution by ``jitter`` seconds at most.
.. note:: The first weekday is always **monday**.
"""
@@ -42,10 +43,11 @@ class CronTrigger(BaseTrigger):
'second': BaseField
}
- __slots__ = 'timezone', 'start_date', 'end_date', 'fields'
+ __slots__ = 'timezone', 'start_date', 'end_date', 'fields', 'jitter'
def __init__(self, year=None, month=None, day=None, week=None, day_of_week=None, hour=None,
- minute=None, second=None, start_date=None, end_date=None, timezone=None):
+ minute=None, second=None, start_date=None, end_date=None, timezone=None,
+ jitter=None):
if timezone:
self.timezone = astimezone(timezone)
elif isinstance(start_date, datetime) and start_date.tzinfo:
@@ -58,6 +60,8 @@ class CronTrigger(BaseTrigger):
self.start_date = convert_to_datetime(start_date, self.timezone, 'start_date')
self.end_date = convert_to_datetime(end_date, self.timezone, 'end_date')
+ self.jitter = jitter
+
values = dict((key, value) for (key, value) in six.iteritems(locals())
if key in self.FIELD_NAMES and value is not None)
self.fields = []
@@ -168,15 +172,18 @@ class CronTrigger(BaseTrigger):
return None
if fieldnum >= 0:
+ if self.jitter is not None:
+ next_date = self._apply_jitter(next_date, self.jitter, now)
return next_date
def __getstate__(self):
return {
- 'version': 1,
+ 'version': 2,
'timezone': self.timezone,
'start_date': self.start_date,
'end_date': self.end_date,
- 'fields': self.fields
+ 'fields': self.fields,
+ 'jitter': self.jitter,
}
def __setstate__(self, state):
@@ -184,15 +191,16 @@ class CronTrigger(BaseTrigger):
if isinstance(state, tuple):
state = state[1]
- if state.get('version', 1) > 1:
+ if state.get('version', 1) > 2:
raise ValueError(
- 'Got serialized data for version %s of %s, but only version 1 can be handled' %
- (state['version'], self.__class__.__name__))
+ 'Got serialized data for version %s of %s, but only versions up to 2 can be '
+ 'handled' % (state['version'], self.__class__.__name__))
self.timezone = state['timezone']
self.start_date = state['start_date']
self.end_date = state['end_date']
self.fields = state['fields']
+ self.jitter = state.get('jitter')
def __str__(self):
options = ["%s='%s'" % (f.name, f) for f in self.fields if not f.is_default]
@@ -202,5 +210,5 @@ class CronTrigger(BaseTrigger):
options = ["%s='%s'" % (f.name, f) for f in self.fields if not f.is_default]
if self.start_date:
options.append("start_date='%s'" % datetime_repr(self.start_date))
- return "<%s (%s, timezone='%s')>" % (
- self.__class__.__name__, ', '.join(options), self.timezone)
+ return "<%s (%s, timezone='%s', jitter='%s')>" % (
+ self.__class__.__name__, ', '.join(options), self.timezone, self.jitter)
diff --git a/apscheduler/triggers/interval.py b/apscheduler/triggers/interval.py
index fec912a..d3589a8 100644
--- a/apscheduler/triggers/interval.py
+++ b/apscheduler/triggers/interval.py
@@ -20,12 +20,13 @@ class IntervalTrigger(BaseTrigger):
:param datetime|str start_date: starting point for the interval calculation
:param datetime|str end_date: latest possible date/time to trigger on
:param datetime.tzinfo|str timezone: time zone to use for the date/time calculations
+ :param int|None jitter: advance or delay the job execution by ``jitter`` seconds at most.
"""
- __slots__ = 'timezone', 'start_date', 'end_date', 'interval', 'interval_length'
+ __slots__ = 'timezone', 'start_date', 'end_date', 'interval', 'interval_length', 'jitter'
def __init__(self, weeks=0, days=0, hours=0, minutes=0, seconds=0, start_date=None,
- end_date=None, timezone=None):
+ end_date=None, timezone=None, jitter=None):
self.interval = timedelta(weeks=weeks, days=days, hours=hours, minutes=minutes,
seconds=seconds)
self.interval_length = timedelta_seconds(self.interval)
@@ -46,6 +47,8 @@ class IntervalTrigger(BaseTrigger):
self.start_date = convert_to_datetime(start_date, self.timezone, 'start_date')
self.end_date = convert_to_datetime(end_date, self.timezone, 'end_date')
+ self.jitter = jitter
+
def get_next_fire_time(self, previous_fire_time, now):
if previous_fire_time:
next_fire_time = previous_fire_time + self.interval
@@ -56,16 +59,20 @@ class IntervalTrigger(BaseTrigger):
next_interval_num = int(ceil(timediff_seconds / self.interval_length))
next_fire_time = self.start_date + self.interval * next_interval_num
+ if self.jitter is not None:
+ next_fire_time = self._apply_jitter(next_fire_time, self.jitter, now)
+
if not self.end_date or next_fire_time <= self.end_date:
return self.timezone.normalize(next_fire_time)
def __getstate__(self):
return {
- 'version': 1,
+ 'version': 2,
'timezone': self.timezone,
'start_date': self.start_date,
'end_date': self.end_date,
- 'interval': self.interval
+ 'interval': self.interval,
+ 'jitter': self.jitter,
}
def __setstate__(self, state):
@@ -73,20 +80,22 @@ class IntervalTrigger(BaseTrigger):
if isinstance(state, tuple):
state = state[1]
- if state.get('version', 1) > 1:
+ if state.get('version', 1) > 2:
raise ValueError(
- 'Got serialized data for version %s of %s, but only version 1 can be handled' %
- (state['version'], self.__class__.__name__))
+ 'Got serialized data for version %s of %s, but only versions up to 2 can be '
+ 'handled' % (state['version'], self.__class__.__name__))
self.timezone = state['timezone']
self.start_date = state['start_date']
self.end_date = state['end_date']
self.interval = state['interval']
self.interval_length = timedelta_seconds(self.interval)
+ self.jitter = state.get('jitter')
def __str__(self):
return 'interval[%s]' % str(self.interval)
def __repr__(self):
- return "<%s (interval=%r, start_date='%s', timezone='%s')>" % (
- self.__class__.__name__, self.interval, datetime_repr(self.start_date), self.timezone)
+ return "<%s (interval=%r, start_date='%s', timezone='%s', jitter='%s')>" % (
+ self.__class__.__name__, self.interval, datetime_repr(self.start_date), self.timezone,
+ self.jitter)
diff --git a/docs/modules/triggers/cron.rst b/docs/modules/triggers/cron.rst
index 85f8f83..791156d 100644
--- a/docs/modules/triggers/cron.rst
+++ b/docs/modules/triggers/cron.rst
@@ -104,3 +104,11 @@ The :meth:`~apscheduler.schedulers.base.BaseScheduler.scheduled_job` decorator w
@sched.scheduled_job('cron', id='my_job_id', day='last sun')
def some_decorated_task():
print("I am printed at 00:00:00 on the last Sunday of every month!")
+
+
+The ``jitter`` option enables you to add a random component to the execution time. This might be useful if you have
+multiple servers and don't want them to run a job at the exact same moment or if you want to prevent jobs from running
+at sharp hours::
+
+ # Run the `job_function` every sharp hour with an extra-delay picked randomly in a [-120,+120] seconds window.
+ sched.add_job(job_function, 'cron', hour='*', jitter=120)
diff --git a/docs/modules/triggers/interval.rst b/docs/modules/triggers/interval.rst
index 5fec5b0..f7b8ae4 100644
--- a/docs/modules/triggers/interval.rst
+++ b/docs/modules/triggers/interval.rst
@@ -59,3 +59,11 @@ The :meth:`~apscheduler.schedulers.base.BaseScheduler.scheduled_job` decorator w
@sched.scheduled_job('interval', id='my_job_id', hours=2)
def job_function():
print("Hello World")
+
+
+The ``jitter`` option enables you to add a random component to the execution time. This might be useful if you have
+multiple servers and don't want them to run a job at the exact same moment or if you want to prevent multiple jobs
+with similar options from always running concurrently::
+
+ # Run the `job_function` every hour with an extra-delay picked randomly in a [-120,+120] seconds window.
+ sched.add_job(job_function, 'interval', hours=1, jitter=120)
| Feature request: allow interval jobs to be offset by a random value
When jobs are configured to execute at certain times, they tend to coalesce at one particular time. For example, executing 10 jobs at the top of the hour, another 10 once in at noon and once at midnight, and a third job that executes every minute will cause all of these jobs to run at the same time at noon and midnight, potentially causing a bottleneck in the number of threads.
This could be solved by introducing a new parameter to the interval trigger that would define a value in seconds by which each job would be offset.
Assuming these new imports:
``` python
from operator import add, sub
from random import randrange, choice
```
The constructor could have a new parameter value (`random_offset`) that would be at most equivalent to the configured interval:
``` python
self.random_offset = min(self.interval_length, random_offset)
```
Then, the `get_next_fire_time()` function could check if an offset is defined and modify the next execution time accordingly:
``` python
if self.random_offset:
offset = timedelta(seconds=randrange(0, self.random_offset))
op = choice((add, sub))
next_fire_time = max(datetime.now(self.timezone), op(next_fire_time, offset))
```
I understand all this can be worked around by setting custom start times for each group of jobs so that they don't intersect but having an easy-to-configure parameter would be cool too. The UNIX Anacron scheduler uses a similar mechanic with the [`RANDOM_DELAY` option](http://man7.org/linux/man-pages/man5/anacrontab.5.html).
I did notice during testing of the above code that `get_next_fire_time()` is called both from `_get_run_times()` (in `job.py`) and `_process_jobs` in (in `base.py`), producing different times for each. I'm not sure if it's significant that these functions both receive the same next fire time.
Anyway, is this something you'd consider merging if I were to submit a PR? Thank you! | agronholm/apscheduler | diff --git a/tests/test_triggers.py b/tests/test_triggers.py
index c22136e..d854800 100644
--- a/tests/test_triggers.py
+++ b/tests/test_triggers.py
@@ -1,9 +1,11 @@
import pickle
+import random
from datetime import datetime, timedelta, date
import pytest
import pytz
+from apscheduler.triggers.base import BaseTrigger
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.date import DateTrigger
from apscheduler.triggers.interval import IntervalTrigger
@@ -14,11 +16,94 @@ except ImportError:
from mock import Mock
+class _DummyTriggerWithJitter(BaseTrigger):
+ def __init__(self, dt, jitter):
+ self.dt = dt
+ self.jitter = jitter
+
+ def get_next_fire_time(self, previous_fire_time, now):
+ return self._apply_jitter(self.dt, self.jitter, now)
+
+
+class TestJitter(object):
+ def test_jitter_disabled(self):
+ dt = datetime(2017, 5, 25, 14, 49, 50)
+ trigger = _DummyTriggerWithJitter(dt, None)
+
+ now = datetime(2017, 5, 25, 13, 40, 44)
+ assert trigger.get_next_fire_time(None, now) == dt
+
+ def test_jitter_with_none_next_fire_time(self):
+ trigger = _DummyTriggerWithJitter(None, 5)
+ now = datetime(2017, 5, 25, 13, 40, 44)
+ assert trigger.get_next_fire_time(None, now) is None
+
+ def test_jitter_positive(self, monkeypatch):
+ monkeypatch.setattr(random, 'uniform', lambda a, b: 30.)
+
+ now = datetime(2017, 5, 25, 13, 40, 44)
+ dt = datetime(2017, 5, 25, 14, 49, 50)
+ expected_dt = datetime(2017, 5, 25, 14, 50, 20)
+
+ trigger = _DummyTriggerWithJitter(dt, 60)
+ assert trigger.get_next_fire_time(None, now) == expected_dt
+
+ def test_jitter_in_past_but_initial_date_in_future(self, monkeypatch):
+ monkeypatch.setattr(random, 'uniform', lambda a, b: -30.)
+
+ now = datetime(2017, 5, 25, 13, 40, 44)
+ dt = datetime(2017, 5, 25, 13, 40, 47)
+ expected_dt = dt
+
+ trigger = _DummyTriggerWithJitter(dt, 60)
+ assert trigger.get_next_fire_time(None, now) == expected_dt
+
+ def test_jitter_in_future_but_initial_date_in_past(self, monkeypatch):
+ monkeypatch.setattr(random, 'uniform', lambda a, b: 30.)
+
+ now = datetime(2017, 5, 25, 13, 40, 44)
+ dt = datetime(2017, 5, 25, 13, 40, 30)
+ expected_dt = datetime(2017, 5, 25, 13, 41, 0)
+
+ trigger = _DummyTriggerWithJitter(dt, 60)
+ assert trigger.get_next_fire_time(None, now) == expected_dt
+
+ def test_jitter_misfire(self, monkeypatch):
+ monkeypatch.setattr(random, 'uniform', lambda a, b: -30.)
+
+ now = datetime(2017, 5, 25, 13, 40, 44)
+ dt = datetime(2017, 5, 25, 13, 40, 40)
+ expected_dt = dt
+
+ trigger = _DummyTriggerWithJitter(dt, 60)
+ assert trigger.get_next_fire_time(None, now) == expected_dt
+
+ def test_jitter_is_now(self, monkeypatch):
+ monkeypatch.setattr(random, 'uniform', lambda a, b: 4.)
+
+ now = datetime(2017, 5, 25, 13, 40, 44)
+ dt = datetime(2017, 5, 25, 13, 40, 40)
+ expected_dt = now
+
+ trigger = _DummyTriggerWithJitter(dt, 60)
+ assert trigger.get_next_fire_time(None, now) == expected_dt
+
+ def test_jitter(self):
+ now = datetime(2017, 5, 25, 13, 36, 44)
+ dt = datetime(2017, 5, 25, 13, 40, 45)
+ min_expected_dt = datetime(2017, 5, 25, 13, 40, 40)
+ max_expected_dt = datetime(2017, 5, 25, 13, 40, 50)
+
+ trigger = _DummyTriggerWithJitter(dt, 5)
+ for _ in range(0, 100):
+ assert min_expected_dt <= trigger.get_next_fire_time(None, now) <= max_expected_dt
+
+
class TestCronTrigger(object):
def test_cron_trigger_1(self, timezone):
trigger = CronTrigger(year='2009/2', month='1/3', day='5-13', timezone=timezone)
assert repr(trigger) == ("<CronTrigger (year='2009/2', month='1/3', day='5-13', "
- "timezone='Europe/Berlin')>")
+ "timezone='Europe/Berlin', jitter='None')>")
assert str(trigger) == "cron[year='2009/2', month='1/3', day='5-13']"
start_date = timezone.localize(datetime(2008, 12, 1))
correct_next_date = timezone.localize(datetime(2009, 1, 5))
@@ -33,7 +118,7 @@ class TestCronTrigger(object):
def test_cron_trigger_3(self, timezone):
trigger = CronTrigger(year='2009', month='2', hour='8-10', timezone=timezone)
assert repr(trigger) == ("<CronTrigger (year='2009', month='2', hour='8-10', "
- "timezone='Europe/Berlin')>")
+ "timezone='Europe/Berlin', jitter='None')>")
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = timezone.localize(datetime(2009, 2, 1, 8))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
@@ -41,7 +126,7 @@ class TestCronTrigger(object):
def test_cron_trigger_4(self, timezone):
trigger = CronTrigger(year='2012', month='2', day='last', timezone=timezone)
assert repr(trigger) == ("<CronTrigger (year='2012', month='2', day='last', "
- "timezone='Europe/Berlin')>")
+ "timezone='Europe/Berlin', jitter='None')>")
start_date = timezone.localize(datetime(2012, 2, 1))
correct_next_date = timezone.localize(datetime(2012, 2, 29))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
@@ -55,11 +140,12 @@ class TestCronTrigger(object):
def test_cron_zero_value(self, timezone):
trigger = CronTrigger(year=2009, month=2, hour=0, timezone=timezone)
assert repr(trigger) == ("<CronTrigger (year='2009', month='2', hour='0', "
- "timezone='Europe/Berlin')>")
+ "timezone='Europe/Berlin', jitter='None')>")
def test_cron_year_list(self, timezone):
trigger = CronTrigger(year='2009,2008', timezone=timezone)
- assert repr(trigger) == "<CronTrigger (year='2009,2008', timezone='Europe/Berlin')>"
+ assert repr(trigger) == ("<CronTrigger (year='2009,2008', timezone='Europe/Berlin', "
+ "jitter='None')>")
assert str(trigger) == "cron[year='2009,2008']"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = timezone.localize(datetime(2009, 1, 1))
@@ -70,7 +156,7 @@ class TestCronTrigger(object):
start_date='2009-02-03 11:00:00', timezone=timezone)
assert repr(trigger) == ("<CronTrigger (year='2009', month='2', hour='8-10', "
"start_date='2009-02-03 11:00:00 CET', "
- "timezone='Europe/Berlin')>")
+ "timezone='Europe/Berlin', jitter='None')>")
assert str(trigger) == "cron[year='2009', month='2', hour='8-10']"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = timezone.localize(datetime(2009, 2, 4, 8))
@@ -101,7 +187,7 @@ class TestCronTrigger(object):
def test_cron_weekday_overlap(self, timezone):
trigger = CronTrigger(year=2009, month=1, day='6-10', day_of_week='2-4', timezone=timezone)
assert repr(trigger) == ("<CronTrigger (year='2009', month='1', day='6-10', "
- "day_of_week='2-4', timezone='Europe/Berlin')>")
+ "day_of_week='2-4', timezone='Europe/Berlin', jitter='None')>")
assert str(trigger) == "cron[year='2009', month='1', day='6-10', day_of_week='2-4']"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = timezone.localize(datetime(2009, 1, 7))
@@ -110,7 +196,7 @@ class TestCronTrigger(object):
def test_cron_weekday_nomatch(self, timezone):
trigger = CronTrigger(year=2009, month=1, day='6-10', day_of_week='0,6', timezone=timezone)
assert repr(trigger) == ("<CronTrigger (year='2009', month='1', day='6-10', "
- "day_of_week='0,6', timezone='Europe/Berlin')>")
+ "day_of_week='0,6', timezone='Europe/Berlin', jitter='None')>")
assert str(trigger) == "cron[year='2009', month='1', day='6-10', day_of_week='0,6']"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = None
@@ -119,7 +205,7 @@ class TestCronTrigger(object):
def test_cron_weekday_positional(self, timezone):
trigger = CronTrigger(year=2009, month=1, day='4th wed', timezone=timezone)
assert repr(trigger) == ("<CronTrigger (year='2009', month='1', day='4th wed', "
- "timezone='Europe/Berlin')>")
+ "timezone='Europe/Berlin', jitter='None')>")
assert str(trigger) == "cron[year='2009', month='1', day='4th wed']"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = timezone.localize(datetime(2009, 1, 28))
@@ -128,7 +214,7 @@ class TestCronTrigger(object):
def test_week_1(self, timezone):
trigger = CronTrigger(year=2009, month=2, week=8, timezone=timezone)
assert repr(trigger) == ("<CronTrigger (year='2009', month='2', week='8', "
- "timezone='Europe/Berlin')>")
+ "timezone='Europe/Berlin', jitter='None')>")
assert str(trigger) == "cron[year='2009', month='2', week='8']"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = timezone.localize(datetime(2009, 2, 16))
@@ -137,7 +223,7 @@ class TestCronTrigger(object):
def test_week_2(self, timezone):
trigger = CronTrigger(year=2009, week=15, day_of_week=2, timezone=timezone)
assert repr(trigger) == ("<CronTrigger (year='2009', week='15', day_of_week='2', "
- "timezone='Europe/Berlin')>")
+ "timezone='Europe/Berlin', jitter='None')>")
assert str(trigger) == "cron[year='2009', week='15', day_of_week='2']"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = timezone.localize(datetime(2009, 4, 8))
@@ -146,7 +232,8 @@ class TestCronTrigger(object):
def test_cron_extra_coverage(self, timezone):
# This test has no value other than patching holes in test coverage
trigger = CronTrigger(day='6,8', timezone=timezone)
- assert repr(trigger) == "<CronTrigger (day='6,8', timezone='Europe/Berlin')>"
+ assert repr(trigger) == ("<CronTrigger (day='6,8', timezone='Europe/Berlin', "
+ "jitter='None')>")
assert str(trigger) == "cron[day='6,8']"
start_date = timezone.localize(datetime(2009, 12, 31))
correct_next_date = timezone.localize(datetime(2010, 1, 6))
@@ -162,7 +249,8 @@ class TestCronTrigger(object):
"""
trigger = CronTrigger(hour='5-6', timezone=timezone)
- assert repr(trigger) == "<CronTrigger (hour='5-6', timezone='Europe/Berlin')>"
+ assert repr(trigger) == ("<CronTrigger (hour='5-6', timezone='Europe/Berlin', "
+ "jitter='None')>")
assert str(trigger) == "cron[hour='5-6']"
start_date = timezone.localize(datetime(2009, 9, 25, 7))
correct_next_date = timezone.localize(datetime(2009, 9, 26, 5))
@@ -200,7 +288,7 @@ class TestCronTrigger(object):
alter_tz = pytz.FixedOffset(-600)
trigger = CronTrigger(year=2009, week=15, day_of_week=2, timezone=timezone)
assert repr(trigger) == ("<CronTrigger (year='2009', week='15', day_of_week='2', "
- "timezone='Europe/Berlin')>")
+ "timezone='Europe/Berlin', jitter='None')>")
assert str(trigger) == "cron[year='2009', week='15', day_of_week='2']"
start_date = alter_tz.localize(datetime(2008, 12, 31, 22))
correct_next_date = timezone.localize(datetime(2009, 4, 8))
@@ -249,6 +337,43 @@ class TestCronTrigger(object):
for attr in CronTrigger.__slots__:
assert getattr(trigger2, attr) == getattr(trigger, attr)
+ def test_jitter_produces_differrent_valid_results(self, timezone):
+ trigger = CronTrigger(minute='*', jitter=5)
+ now = timezone.localize(datetime(2017, 11, 12, 6, 55, 30))
+
+ results = set()
+ for _ in range(0, 100):
+ next_fire_time = trigger.get_next_fire_time(None, now)
+ results.add(next_fire_time)
+ assert timedelta(seconds=25) <= (next_fire_time - now) <= timedelta(seconds=35)
+ assert 1 < len(results)
+
+ def test_jitter_with_timezone(self, timezone):
+ est = pytz.FixedOffset(-300)
+ cst = pytz.FixedOffset(-360)
+ trigger = CronTrigger(hour=11, minute='*/5', timezone=est, jitter=5)
+ start_date = cst.localize(datetime(2009, 9, 26, 10, 16))
+ correct_next_date = est.localize(datetime(2009, 9, 26, 11, 20))
+ for _ in range(0, 100):
+ assert abs(trigger.get_next_fire_time(None, start_date) -
+ correct_next_date) <= timedelta(seconds=5)
+
+ @pytest.mark.parametrize('trigger_args, start_date, start_date_dst, correct_next_date', [
+ ({'hour': 8}, datetime(2013, 3, 9, 12), False, datetime(2013, 3, 10, 8)),
+ ({'hour': 8}, datetime(2013, 11, 2, 12), True, datetime(2013, 11, 3, 8)),
+ ({'minute': '*/30'}, datetime(2013, 3, 10, 1, 35), False, datetime(2013, 3, 10, 3)),
+ ({'minute': '*/30'}, datetime(2013, 11, 3, 1, 35), True, datetime(2013, 11, 3, 1))
+ ], ids=['absolute_spring', 'absolute_autumn', 'interval_spring', 'interval_autumn'])
+ def test_jitter_dst_change(self, trigger_args, start_date, start_date_dst, correct_next_date):
+ timezone = pytz.timezone('US/Eastern')
+ trigger = CronTrigger(timezone=timezone, jitter=5, **trigger_args)
+ start_date = timezone.localize(start_date, is_dst=start_date_dst)
+ correct_next_date = timezone.localize(correct_next_date, is_dst=not start_date_dst)
+
+ for _ in range(0, 100):
+ next_fire_time = trigger.get_next_fire_time(None, start_date)
+ assert abs(next_fire_time - correct_next_date) <= timedelta(seconds=5)
+
class TestDateTrigger(object):
@pytest.mark.parametrize('run_date,alter_tz,previous,now,expected', [
@@ -375,7 +500,7 @@ class TestIntervalTrigger(object):
def test_repr(self, trigger):
assert repr(trigger) == ("<IntervalTrigger (interval=datetime.timedelta(0, 1), "
"start_date='2009-08-04 00:00:02 CEST', "
- "timezone='Europe/Berlin')>")
+ "timezone='Europe/Berlin', jitter='None')>")
def test_str(self, trigger):
assert str(trigger) == "interval[0:00:01]"
@@ -384,9 +509,37 @@ class TestIntervalTrigger(object):
"""Test that the trigger is pickleable."""
trigger = IntervalTrigger(weeks=2, days=6, minutes=13, seconds=2,
- start_date=date(2016, 4, 3), timezone=timezone)
+ start_date=date(2016, 4, 3), timezone=timezone,
+ jitter=12)
data = pickle.dumps(trigger, 2)
trigger2 = pickle.loads(data)
for attr in IntervalTrigger.__slots__:
assert getattr(trigger2, attr) == getattr(trigger, attr)
+
+ def test_jitter_produces_different_valid_results(self, timezone):
+ trigger = IntervalTrigger(seconds=5, timezone=timezone, jitter=3)
+ now = datetime.now(timezone)
+
+ results = set()
+ for _ in range(0, 100):
+ next_fire_time = trigger.get_next_fire_time(None, now)
+ results.add(next_fire_time)
+ assert timedelta(seconds=2) <= (next_fire_time - now) <= timedelta(seconds=8)
+ assert 1 < len(results)
+
+ @pytest.mark.parametrize('trigger_args, start_date, start_date_dst, correct_next_date', [
+ ({'hours': 1}, datetime(2013, 3, 10, 1, 35), False, datetime(2013, 3, 10, 3, 35)),
+ ({'hours': 1}, datetime(2013, 11, 3, 1, 35), True, datetime(2013, 11, 3, 1, 35))
+ ], ids=['interval_spring', 'interval_autumn'])
+ def test_jitter_dst_change(self, trigger_args, start_date, start_date_dst, correct_next_date):
+ timezone = pytz.timezone('US/Eastern')
+ epsilon = timedelta(seconds=1)
+ start_date = timezone.localize(start_date, is_dst=start_date_dst)
+ trigger = IntervalTrigger(timezone=timezone, start_date=start_date, jitter=5,
+ **trigger_args)
+ correct_next_date = timezone.localize(correct_next_date, is_dst=not start_date_dst)
+
+ for _ in range(0, 100):
+ next_fire_time = trigger.get_next_fire_time(None, start_date + epsilon)
+ assert abs(next_fire_time - correct_next_date) <= timedelta(seconds=5)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 5
} | 3.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[testing]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-tornado"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/agronholm/apscheduler.git@c6c5031276c3b864e127c637d2bdd48138a0b426#egg=APScheduler
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
backports.zoneinfo==0.2.1
certifi==2021.5.30
coverage==6.2
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-asyncio==0.5.0
pytest-cov==4.0.0
pytest-tornado==0.8.1
pytz==2025.2
pytz-deprecation-shim==0.1.0.post0
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
tornado==6.1
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
tzdata==2025.2
tzlocal==4.2
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: apscheduler
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- backports-zoneinfo==0.2.1
- coverage==6.2
- importlib-resources==5.4.0
- pytest-asyncio==0.5.0
- pytest-cov==4.0.0
- pytest-tornado==0.8.1
- pytz==2025.2
- pytz-deprecation-shim==0.1.0.post0
- six==1.17.0
- tomli==1.2.3
- tornado==6.1
- tzdata==2025.2
- tzlocal==4.2
prefix: /opt/conda/envs/apscheduler
| [
"tests/test_triggers.py::TestJitter::test_jitter_disabled",
"tests/test_triggers.py::TestJitter::test_jitter_with_none_next_fire_time",
"tests/test_triggers.py::TestJitter::test_jitter_positive",
"tests/test_triggers.py::TestJitter::test_jitter_in_past_but_initial_date_in_future",
"tests/test_triggers.py::TestJitter::test_jitter_in_future_but_initial_date_in_past",
"tests/test_triggers.py::TestJitter::test_jitter_misfire",
"tests/test_triggers.py::TestJitter::test_jitter_is_now",
"tests/test_triggers.py::TestJitter::test_jitter",
"tests/test_triggers.py::TestCronTrigger::test_cron_trigger_1",
"tests/test_triggers.py::TestCronTrigger::test_cron_trigger_3",
"tests/test_triggers.py::TestCronTrigger::test_cron_trigger_4",
"tests/test_triggers.py::TestCronTrigger::test_cron_zero_value",
"tests/test_triggers.py::TestCronTrigger::test_cron_year_list",
"tests/test_triggers.py::TestCronTrigger::test_cron_start_date",
"tests/test_triggers.py::TestCronTrigger::test_cron_weekday_overlap",
"tests/test_triggers.py::TestCronTrigger::test_cron_weekday_nomatch",
"tests/test_triggers.py::TestCronTrigger::test_cron_weekday_positional",
"tests/test_triggers.py::TestCronTrigger::test_week_1",
"tests/test_triggers.py::TestCronTrigger::test_week_2",
"tests/test_triggers.py::TestCronTrigger::test_cron_extra_coverage",
"tests/test_triggers.py::TestCronTrigger::test_cron_increment_weekday",
"tests/test_triggers.py::TestCronTrigger::test_different_tz",
"tests/test_triggers.py::TestCronTrigger::test_jitter_produces_differrent_valid_results",
"tests/test_triggers.py::TestCronTrigger::test_jitter_with_timezone",
"tests/test_triggers.py::TestCronTrigger::test_jitter_dst_change[absolute_spring]",
"tests/test_triggers.py::TestCronTrigger::test_jitter_dst_change[absolute_autumn]",
"tests/test_triggers.py::TestCronTrigger::test_jitter_dst_change[interval_spring]",
"tests/test_triggers.py::TestCronTrigger::test_jitter_dst_change[interval_autumn]",
"tests/test_triggers.py::TestIntervalTrigger::test_repr",
"tests/test_triggers.py::TestIntervalTrigger::test_pickle",
"tests/test_triggers.py::TestIntervalTrigger::test_jitter_produces_different_valid_results",
"tests/test_triggers.py::TestIntervalTrigger::test_jitter_dst_change[interval_spring]",
"tests/test_triggers.py::TestIntervalTrigger::test_jitter_dst_change[interval_autumn]"
]
| []
| [
"tests/test_triggers.py::TestCronTrigger::test_cron_trigger_2",
"tests/test_triggers.py::TestCronTrigger::test_start_end_times_string",
"tests/test_triggers.py::TestCronTrigger::test_previous_fire_time_1",
"tests/test_triggers.py::TestCronTrigger::test_previous_fire_time_2",
"tests/test_triggers.py::TestCronTrigger::test_previous_fire_time_3",
"tests/test_triggers.py::TestCronTrigger::test_cron_faulty_expr",
"tests/test_triggers.py::TestCronTrigger::test_cron_bad_kwarg",
"tests/test_triggers.py::TestCronTrigger::test_month_rollover",
"tests/test_triggers.py::TestCronTrigger::test_timezone_from_start_date",
"tests/test_triggers.py::TestCronTrigger::test_end_date",
"tests/test_triggers.py::TestCronTrigger::test_dst_change[absolute_spring]",
"tests/test_triggers.py::TestCronTrigger::test_dst_change[absolute_autumn]",
"tests/test_triggers.py::TestCronTrigger::test_dst_change[interval_spring]",
"tests/test_triggers.py::TestCronTrigger::test_dst_change[interval_autumn]",
"tests/test_triggers.py::TestCronTrigger::test_timezone_change",
"tests/test_triggers.py::TestCronTrigger::test_pickle",
"tests/test_triggers.py::TestDateTrigger::test_get_next_fire_time[earlier]",
"tests/test_triggers.py::TestDateTrigger::test_get_next_fire_time[exact]",
"tests/test_triggers.py::TestDateTrigger::test_get_next_fire_time[later]",
"tests/test_triggers.py::TestDateTrigger::test_get_next_fire_time[as",
"tests/test_triggers.py::TestDateTrigger::test_get_next_fire_time[previously",
"tests/test_triggers.py::TestDateTrigger::test_get_next_fire_time[alternate",
"tests/test_triggers.py::TestDateTrigger::test_get_next_fire_time[current_time]",
"tests/test_triggers.py::TestDateTrigger::test_dst_change[daylight",
"tests/test_triggers.py::TestDateTrigger::test_dst_change[standard",
"tests/test_triggers.py::TestDateTrigger::test_repr",
"tests/test_triggers.py::TestDateTrigger::test_str",
"tests/test_triggers.py::TestDateTrigger::test_pickle",
"tests/test_triggers.py::TestIntervalTrigger::test_invalid_interval",
"tests/test_triggers.py::TestIntervalTrigger::test_start_end_times_string",
"tests/test_triggers.py::TestIntervalTrigger::test_before",
"tests/test_triggers.py::TestIntervalTrigger::test_within",
"tests/test_triggers.py::TestIntervalTrigger::test_no_start_date",
"tests/test_triggers.py::TestIntervalTrigger::test_different_tz",
"tests/test_triggers.py::TestIntervalTrigger::test_end_date",
"tests/test_triggers.py::TestIntervalTrigger::test_dst_change",
"tests/test_triggers.py::TestIntervalTrigger::test_str"
]
| []
| MIT License | 1,951 | [
"docs/modules/triggers/cron.rst",
"apscheduler/triggers/base.py",
"docs/modules/triggers/interval.rst",
"apscheduler/triggers/cron/__init__.py",
"apscheduler/triggers/interval.py"
]
| [
"docs/modules/triggers/cron.rst",
"apscheduler/triggers/base.py",
"docs/modules/triggers/interval.rst",
"apscheduler/triggers/cron/__init__.py",
"apscheduler/triggers/interval.py"
]
|
Agizin__Algorithm-Visualization-9 | c846f54fae8bf5981bde0df534781ac3db0dcc78 | 2017-12-11 16:15:03 | c846f54fae8bf5981bde0df534781ac3db0dcc78 | diff --git a/algviz/parser/json_objects.py b/algviz/parser/json_objects.py
index 10619ee..cd36f94 100644
--- a/algviz/parser/json_objects.py
+++ b/algviz/parser/json_objects.py
@@ -191,6 +191,11 @@ def decode_json(text):
snapshots.append(decode_snapshot(*raw_snapshot))
return snapshots
+def decode_snapshot_text(text):
+ raw_snapshot = parse(text)
+ validate_snapshot(raw_snapshot)
+ return decode_snapshot(*raw_snapshot)
+
def decode_snapshot(*objects):
sd = SnapshotDecoder()
for raw_obj in objects:
@@ -207,6 +212,9 @@ def validate(json_stuff):
if json_stuff.get(Tokens.TYPE) == Tokens.NULL_T:
validate_null(json_stuff)
+def validate_snapshot(snapshot):
+ pass # TODO
+
def parse(text):
return json.loads(text, object_hook=fix_aliases)
diff --git a/algviz/parser/structures.py b/algviz/parser/structures.py
index 499de37..5efe694 100644
--- a/algviz/parser/structures.py
+++ b/algviz/parser/structures.py
@@ -29,6 +29,12 @@ class ObjectTable(dict):
if hasattr(obj, 'untablify'):
obj.untablify(self)
+ def getuid(self, uid):
+ """Convenience method to return the object with the given uid (`str` type)"""
+ if not isinstance(uid, str):
+ raise TypeError("uid must be a string, not {}".format(uid))
+ return self[ObjectTableReference(uid=uid)]
+
ObjectTableReference = collections.namedtuple("ObjectTableReference", ("uid",))
Snapshot = collections.namedtuple("Snapshot", ("names", "obj_table"))
diff --git a/algviz/tools/__init__.py b/algviz/tools/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/algviz/tools/graph_drawing_mockup.py b/algviz/tools/graph_drawing_mockup.py
new file mode 100644
index 0000000..352cd89
--- /dev/null
+++ b/algviz/tools/graph_drawing_mockup.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python3
+
+"""
+This is a toy script that reads a snapshot (with objects in algviz's JSON data
+format) and directly uses PyGraphViz to create an SVG image of a graph from the
+snapshot.
+
+Run with --help for usage information.
+"""
+
+import argparse
+import sys
+import pygraphviz as pgv
+
+from algviz.parser import json_objects, structures
+
+def graph_to_pgv(graph):
+ """Make a `pygraphviz.AGraph` from the given `algviz.structures.Graph`"""
+ G = pgv.AGraph(directed=True)
+ # It's a shortcoming of pygraphviz that the nodes must be labeled with
+ # their UID and not with their contents, since adding two nodes with the
+ # same label is an error. (I.e., graphviz makes more assumptions about
+ # nodes' labels than we do. It assumes they will be unique identifiers.)
+ G.add_nodes_from(node.uid for node in graph.nodes)
+ for edge in graph.edges:
+ G.add_edge(edge.orig.uid, edge.dest.uid)
+ return G
+
+def main():
+ """Run this script with --help for documenation"""
+ parser = argparse.ArgumentParser(
+ "Read from graph in algviz JSON format and write SVG using PyGraphViz")
+ parser.add_argument("infile", type=argparse.FileType("r"),
+ help="input file. - for stdin")
+ parser.add_argument("outfile", type=argparse.FileType("wb"),
+ help="output file (to be overwritten). - for stdout")
+ parser.add_argument("--prog", "-p", type=str, default="neato", choices=[
+ 'neato', 'dot', 'twopi', 'circo', 'fdp', 'sfdp'],
+ help="A GraphViz graph-drawing algorithm to use")
+ parser.add_argument("--uid", "-u", type=str, default=None,
+ help=("uid of graph to be drawn, if there is more than"
+ " one graph in the snapshot."))
+ parser.add_argument("--var", "-r", default=None, type=str,
+ help="var name of graph. Takes precedence over UID.")
+ args = parser.parse_args()
+
+ # Even though we asked for args.infile to be opened in binary mode, stdin
+ # will be opened in text mode...
+ if 'b' in args.outfile.mode:
+ outfile = args.outfile
+ else:
+ # ... So we use the use the underlying buffer to write binary data to stdout
+ outfile = args.outfile.buffer
+ # Now we can do the actual decoding and drawing
+ snapshot = json_objects.decode_snapshot_text(args.infile.read())
+ if args.var:
+ graph = snapshot.names[args.var]
+ elif args.uid:
+ graph = snapshot.obj_table.getuid(args.uid)
+ else:
+ # Just search for the first graph we find in the snapshot
+ graph = None
+ for obj in snapshot.obj_table.values():
+ if isinstance(obj, structures.Graph):
+ graph = obj
+ break
+ if graph is None:
+ raise Exception("No graph found in JSON input")
+
+ gv_graph = graph_to_pgv(graph)
+ gv_graph.layout(prog=args.prog)
+ gv_graph.draw(path=outfile, format="svg")
+
+if __name__ == "__main__":
+ main()
diff --git a/example_objects/README.md b/example_objects/README.md
new file mode 100644
index 0000000..1902d09
--- /dev/null
+++ b/example_objects/README.md
@@ -0,0 +1,1 @@
+This directory is for examples of JSON object specification files that may be useful for tests or just fun to play with.
diff --git a/example_objects/example_graph.json b/example_objects/example_graph.json
new file mode 100644
index 0000000..35bfd48
--- /dev/null
+++ b/example_objects/example_graph.json
@@ -0,0 +1,37 @@
+[
+ {"T": "graph", "uid": "mygraph", "var": "G",
+ "nodes": [
+ {"T": "node", "uid": "n0", "data": 0},
+ {"T": "node", "uid": "n1", "data": 1},
+ {"T": "node", "uid": "n2", "data": 2},
+ {"T": "node", "uid": "n3", "data": 3},
+ {"T": "node", "uid": "n4", "data": 4},
+ {"T": "node", "uid": "n5", "data": 5},
+ {"T": "node", "uid": "n6", "data": 6},
+ {"T": "node", "uid": "n7", "data": 7},
+ {"T": "node", "uid": "n8", "data": 8}
+ ],
+ "edges": [
+ {"T": "edge", "uid": "e0", "from": "n0", "to": "n1"},
+ {"T": "edge", "uid": "e1", "from": "n3", "to": "n5"},
+ {"T": "edge", "uid": "e2", "from": "n0", "to": "n2"},
+ {"T": "edge", "uid": "e3", "from": "n2", "to": "n1"},
+ {"T": "edge", "uid": "e4", "from": "n0", "to": "n5"},
+ {"T": "edge", "uid": "e5", "from": "n5", "to": "n7"},
+ {"T": "edge", "uid": "e6", "from": "n6", "to": "n7"},
+ {"T": "edge", "uid": "e7", "from": "n8", "to": "n1"},
+ {"T": "edge", "uid": "e8", "from": "n8", "to": "n7"}
+ ]
+ },
+ {"T": "graph", "uid": "othergraph", "var": "H",
+ "nodes": [
+ "n0", "n1", "n2", "n3", "n4", "n5", "n6", "n7", "n8"
+ ],
+ "edges": [
+ "e0", "e1", "e2", "e3", "e4", "e5", "e6", "e7", "e8",
+ {"T": "edge", "uid": "e9", "from": "n4", "to": "n7"},
+ {"T": "edge", "uid": "e10", "from": "n1", "to": "n4"}
+ ]
+ }
+
+]
diff --git a/setup.py b/setup.py
index 276195b..e6337ee 100644
--- a/setup.py
+++ b/setup.py
@@ -6,4 +6,10 @@ setup(name='algviz',
version=version,
author=['Anna Gorbenko', 'Jonathan Homburg', 'John McGowan', 'Doni Ivanov', 'Eyal Minsky-Fenick', 'Oliver Kisielius'], # feel free to change this, too
url=r'https://github.com/Agizin/Algorithm-Visualization',
- packages=find_packages(exclude=['ez_setup',]))
+ packages=find_packages(exclude=['ez_setup',]),
+ entry_points={
+ "console_scripts": [
+ "algviz_graph_mockup=algviz.tools.graph_drawing_mockup:main",
+ ]},
+ install_requires=['pygraphviz'],
+)
| Toy tool to make pictures of graphs
Please implement a tool to parse JSON descriptions of graphs (using the `parsers.json_objects` module) and produce pictures of graphs directly with pygraphviz.
It's about time we had some fun. | Agizin/Algorithm-Visualization | diff --git a/algviz/parser/test_structures.py b/algviz/parser/test_structures.py
index 581f220..303f843 100644
--- a/algviz/parser/test_structures.py
+++ b/algviz/parser/test_structures.py
@@ -12,5 +12,25 @@ class DataStructuresTestCase(unittest.TestCase):
self.assertFalse(structures.Null)
self.assertEqual(hash(structures.Null), hash(structures.Null))
+class ObjectTableTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.obj_tab = structures.ObjectTable()
+
+ def test_null_always_in_table(self):
+ self.assertIn(structures.ObjectTableReference(structures.Null.uid),
+ self.obj_tab)
+
+ def test_keys_must_be_object_table_references(self):
+ obj = structures.Widget(uid="some_kinda_widget")
+ with self.assertRaises(TypeError):
+ self.obj_tab[obj.uid] = obj
+ # make sure the key didn't go in before the error got thrown
+ self.assertNotIn(obj.uid, self.obj_tab)
+
+ def test_getuid_convenience_method(self):
+ self.assertEqual(self.obj_tab.getuid(structures.Null.uid),
+ structures.Null)
+
if __name__ == "__main__":
unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/Agizin/Algorithm-Visualization.git@c846f54fae8bf5981bde0df534781ac3db0dcc78#egg=algviz
exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
tomli==2.2.1
| name: Algorithm-Visualization
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/Algorithm-Visualization
| [
"algviz/parser/test_structures.py::ObjectTableTestCase::test_getuid_convenience_method"
]
| []
| [
"algviz/parser/test_structures.py::DataStructuresTestCase::test_Null",
"algviz/parser/test_structures.py::ObjectTableTestCase::test_keys_must_be_object_table_references",
"algviz/parser/test_structures.py::ObjectTableTestCase::test_null_always_in_table"
]
| []
| null | 1,953 | [
"setup.py",
"algviz/parser/structures.py",
"example_objects/README.md",
"algviz/tools/__init__.py",
"algviz/tools/graph_drawing_mockup.py",
"example_objects/example_graph.json",
"algviz/parser/json_objects.py"
]
| [
"setup.py",
"algviz/parser/structures.py",
"example_objects/README.md",
"algviz/tools/__init__.py",
"algviz/tools/graph_drawing_mockup.py",
"example_objects/example_graph.json",
"algviz/parser/json_objects.py"
]
|
|
globus__globus-sdk-python-261 | 803aa674e145f5a386f6f032264656206b7e0dde | 2017-12-11 18:25:37 | 803aa674e145f5a386f6f032264656206b7e0dde | diff --git a/globus_sdk/auth/token_response.py b/globus_sdk/auth/token_response.py
index 8b50dba3..65c8688e 100644
--- a/globus_sdk/auth/token_response.py
+++ b/globus_sdk/auth/token_response.py
@@ -2,6 +2,7 @@ import logging
import json
import requests
import time
+import six
import jwt
@@ -24,10 +25,71 @@ def _convert_token_info_dict(source_dict):
'access_token': source_dict['access_token'],
'refresh_token': source_dict.get('refresh_token'),
'token_type': source_dict.get('token_type'),
- 'expires_at_seconds': int(time.time() + expires_in)
+ 'expires_at_seconds': int(time.time() + expires_in),
+ 'resource_server': source_dict['resource_server']
}
+class _ByScopesGetter(object):
+ """
+ A fancy dict-like object for looking up token data by scope name.
+ Allows usage like
+
+ >>> tokens = OAuthTokenResponse(...)
+ >>> tok = tokens.by_scopes['openid profile']['access_token']
+ """
+ def __init__(self, scope_map):
+ self.scope_map = scope_map
+
+ def __str__(self):
+ return json.dumps(self.scope_map)
+
+ def __iter__(self):
+ """iteration gets you every individual scope"""
+ return iter(self.scope_map.keys())
+
+ def __getitem__(self, scopename):
+ if not isinstance(scopename, six.string_types):
+ raise KeyError('by_scopes cannot contain non-string value "{}"'
+ .format(scopename))
+
+ # split on spaces
+ scopes = scopename.split()
+ # collect every matching token in a set to dedup
+ # but collect actual results (dicts) in a list
+ rs_names = set()
+ toks = []
+ for scope in scopes:
+ try:
+ rs_names.add(self.scope_map[scope]['resource_server'])
+ toks.append(self.scope_map[scope])
+ except KeyError:
+ raise KeyError(('Scope specifier "{}" contains scope "{}" '
+ "which was not found"
+ ).format(scopename, scope))
+ # if there isn't exactly 1 token, it's an error
+ if len(rs_names) != 1:
+ raise KeyError(
+ 'Scope specifier "{}" did not match exactly one token!'
+ .format(scopename))
+ # pop the only element in the set
+ return toks.pop()
+
+ def __contains__(self, item):
+ """
+ contains is driven by checking against getitem
+ that way, the definitions are always "in sync" if we update them in
+ the future
+ """
+ try:
+ self.__getitem__(item)
+ return True
+ except KeyError:
+ pass
+
+ return False
+
+
class OAuthTokenResponse(GlobusHTTPResponse):
"""
Class for responses from the OAuth2 code for tokens exchange used in
@@ -36,11 +98,20 @@ class OAuthTokenResponse(GlobusHTTPResponse):
def __init__(self, *args, **kwargs):
GlobusHTTPResponse.__init__(self, *args, **kwargs)
self._init_rs_dict()
+ self._init_scopes_getter()
+
+ def _init_scopes_getter(self):
+ scope_map = {}
+ for rs, tok_data in self._by_resource_server.items():
+ for s in tok_data["scope"].split():
+ scope_map[s] = tok_data
+ self._by_scopes = _ByScopesGetter(scope_map)
def _init_rs_dict(self):
# call the helper at the top level
self._by_resource_server = {
- self['resource_server']: _convert_token_info_dict(self)}
+ self['resource_server']: _convert_token_info_dict(self)
+ }
# call the helper on everything in 'other_tokens'
self._by_resource_server.update(dict(
(unprocessed_item['resource_server'],
@@ -59,6 +130,29 @@ class OAuthTokenResponse(GlobusHTTPResponse):
"""
return self._by_resource_server
+ @property
+ def by_scopes(self):
+ """
+ Representation of the token response in a dict-like object indexed by
+ scope name (or even space delimited scope names, so long as they match
+ the same token).
+
+ If you request scopes `scope1 scope2 scope3`, where `scope1` and
+ `scope2` are for the same service (and therefore map to the same
+ token), but `scope3` is for a different service, the following forms of
+ access are valid:
+
+ >>> tokens = ...
+ >>> # single scope
+ >>> token_data = tokens.by_scopes['scope1']
+ >>> token_data = tokens.by_scopes['scope2']
+ >>> token_data = tokens.by_scopes['scope3']
+ >>> # matching scopes
+ >>> token_data = tokens.by_scopes['scope1 scope2']
+ >>> token_data = tokens.by_scopes['scope2 scope1']
+ """
+ return self._by_scopes
+
def decode_id_token(self, auth_client=None):
"""
A parsed ID Token (OIDC) as a dict.
| Add `by_scopes` form (or similar) to token response
This is pretty much just another presentation of the same data that we put into `OAuthTokenResponse.by_resource_server`.
Although instinctively I'd just generate this as a dict, I don't think that's right. IMO, `tokens.by_scopes['openid profile']` should do the same thing as `tokens.by_scopes['openid']`
So, `by_scopes` should really be a small class which has `__getitem__` defined as doing `.split(' ')`, getting all indexed values, `KeyError` if num values != 1
We _could_ do this with `tokens.get_by_scopes('openid profile')`, but I don't think this is in any way fundamentally easier than a dict-like with sophisticated access.
Is this too fancy? | globus/globus-sdk-python | diff --git a/tests/unit/responses/test_token_response.py b/tests/unit/responses/test_token_response.py
index e9d0e9e6..fd9f993a 100644
--- a/tests/unit/responses/test_token_response.py
+++ b/tests/unit/responses/test_token_response.py
@@ -35,7 +35,8 @@ class OAuthTokenResponseTests(CapturedIOTestCase):
"id_token": "invalid_id_token",
"access_token": SDKTESTER1A_ID_ACCESS_TOKEN}
self.other_token2 = { # valid id_token with invalid access_token
- "resource_server": "server3", "expires_in": 30, "scope": "scope3",
+ "resource_server": "server3", "expires_in": 30,
+ "scope": "scope3 scope4",
"refresh_token": "RT3", "other_tokens": [], "token_type": "3",
"id_token": SDKTESTER1A_NATIVE1_ID_TOKEN,
"access_token": "invalid_access_token"}
@@ -114,6 +115,35 @@ class OAuthTokenResponseTests(CapturedIOTestCase):
self.assertIn(server_data["expires_at_seconds"],
(expected - 1, expected, expected + 1))
+ def test_by_scopes(self):
+ """
+ Gets by_scopes attribute from test response,
+ Confirms expected values found for top and other tokens
+ """
+ by_scopes = self.response.by_scopes
+
+ # confirm data by server matches known token values
+ for scope, token in [("scope1", self.top_token),
+ ("scope2", self.other_token1),
+ ("scope3", self.other_token2),
+ ("scope4", self.other_token2),
+ ("scope3 scope4", self.other_token2),
+ ("scope4 scope3", self.other_token2)]:
+ scope_data = by_scopes[scope]
+ for key in ["scope", "access_token",
+ "refresh_token", "token_type"]:
+ self.assertEqual(scope_data[key], token[key])
+ # assumes test runs within 1 second range
+ expected = int(time.time()) + token["expires_in"]
+ self.assertIn(scope_data["expires_at_seconds"],
+ (expected - 1, expected, expected + 1))
+
+ self.assertIn('scope1', by_scopes)
+ self.assertIn('scope3', by_scopes)
+ self.assertNotIn('scope1 scope2', by_scopes)
+ self.assertNotIn('scope1 scope3', by_scopes)
+ self.assertIn('scope4 scope3', by_scopes)
+
@retry_errors()
def test_decode_id_token_invalid_id(self):
"""
@@ -159,7 +189,8 @@ class OAuthDependentTokenResponseTests(CapturedIOTestCase):
"resource_server": "server2", "expires_in": 20, "scope": "scope2",
"access_token": "AT2", "refresh_token": "RT2", "token_type": "2"}
self.token3 = {
- "resource_server": "server3", "expires_in": 30, "scope": "scope3",
+ "resource_server": "server3", "expires_in": 30,
+ "scope": "scope3 scope4",
"access_token": "AT3", "refresh_token": "RT3", "token_type": "3"}
# create the response
@@ -188,3 +219,32 @@ class OAuthDependentTokenResponseTests(CapturedIOTestCase):
expected = int(time.time()) + token["expires_in"]
self.assertIn(server_data["expires_at_seconds"],
(expected - 1, expected, expected + 1))
+
+ def test_by_scopes(self):
+ """
+ Gets by_scopes attribute from test response,
+ Confirms expected values found for top and other tokens
+ """
+ by_scopes = self.response.by_scopes
+
+ # confirm data by server matches known token values
+ for scope, token in [("scope1", self.token1),
+ ("scope2", self.token2),
+ ("scope3", self.token3),
+ ("scope4", self.token3),
+ ("scope3 scope4", self.token3),
+ ("scope4 scope3", self.token3)]:
+ scope_data = by_scopes[scope]
+ for key in ["scope", "access_token",
+ "refresh_token", "token_type"]:
+ self.assertEqual(scope_data[key], token[key])
+ # assumes test runs within 1 second range
+ expected = int(time.time()) + token["expires_in"]
+ self.assertIn(scope_data["expires_at_seconds"],
+ (expected - 1, expected, expected + 1))
+
+ self.assertIn('scope1', by_scopes)
+ self.assertIn('scope3', by_scopes)
+ self.assertNotIn('scope1 scope2', by_scopes)
+ self.assertNotIn('scope1 scope3', by_scopes)
+ self.assertIn('scope4 scope3', by_scopes)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": -1
},
"num_modified_files": 1
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"flake8",
"nose2",
"mock",
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
cryptography==40.0.2
flake8==5.0.4
-e git+https://github.com/globus/globus-sdk-python.git@803aa674e145f5a386f6f032264656206b7e0dde#egg=globus_sdk
idna==3.10
importlib-metadata==4.2.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mccabe==0.7.0
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
nose2==0.13.0
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycodestyle==2.9.1
pycparser==2.21
pyflakes==2.5.0
PyJWT==1.7.1
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
requests==2.27.1
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: globus-sdk-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- cffi==1.15.1
- charset-normalizer==2.0.12
- cryptography==40.0.2
- flake8==5.0.4
- idna==3.10
- importlib-metadata==4.2.0
- mccabe==0.7.0
- mock==5.2.0
- nose2==0.13.0
- pycodestyle==2.9.1
- pycparser==2.21
- pyflakes==2.5.0
- pyjwt==1.7.1
- requests==2.27.1
- six==1.17.0
- urllib3==1.26.20
prefix: /opt/conda/envs/globus-sdk-python
| [
"tests/unit/responses/test_token_response.py::OAuthTokenResponseTests::test_by_scopes",
"tests/unit/responses/test_token_response.py::OAuthDependentTokenResponseTests::test_by_scopes"
]
| []
| [
"tests/unit/responses/test_token_response.py::OAuthTokenResponseTests::test_by_resource_server",
"tests/unit/responses/test_token_response.py::OAuthTokenResponseTests::test_convert_token_info_dict",
"tests/unit/responses/test_token_response.py::OAuthTokenResponseTests::test_decode_id_token_expired",
"tests/unit/responses/test_token_response.py::OAuthTokenResponseTests::test_decode_id_token_invalid_id",
"tests/unit/responses/test_token_response.py::OAuthDependentTokenResponseTests::test_by_resource_server"
]
| []
| Apache License 2.0 | 1,954 | [
"globus_sdk/auth/token_response.py"
]
| [
"globus_sdk/auth/token_response.py"
]
|
|
elastic__elasticsearch-dsl-py-795 | 1cb17a0be2cf48c5e4cf5df9ef55d5fc1a202f18 | 2017-12-13 12:39:10 | 1cb17a0be2cf48c5e4cf5df9ef55d5fc1a202f18 | diff --git a/Changelog.rst b/Changelog.rst
index f29287b..f001879 100644
--- a/Changelog.rst
+++ b/Changelog.rst
@@ -3,13 +3,39 @@
Changelog
=========
-5.4.0 (dev)
+6.0.0 (dev)
-----------
+
+Release compatible with elasticsearch 6.0, changes include:
+
+ * use ``doc`` as default ``DocType`` name, this change includes:
+ * ``DocType._doc_type.matches`` method is now used to determine which
+ ``DocType`` should be used for a hit instead of just checking ``_type``
+ * ``Nested`` and ``Object`` field refactoring using newly introduced
+ ``InnerDoc`` class. To define a ``Nested``/``Object`` field just define the
+ ``InnerDoc`` subclass and then use it when defining the field::
+
+ class Comment(InnerDoc):
+ body = Text()
+ created_at = Date()
+
+ class Blog(DocType):
+ comments = Nested(Comment)
+
+ * methods on ``connections`` singleton are now exposed on the ``connections``
+ module directly.
+ * field values are now only deserialized when coming from elasticsearch (via
+ ``from_es`` method) and not when assigning values in python (either by
+ direct assignment or in ``__init__``).
+
+5.4.0 (2017-12-06)
+------------------
* fix ``ip_range`` aggregation and rename the class to ``IPRange``.
``Iprange`` is kept for bw compatibility
* fix bug in loading an aggregation with meta data from dict
- * add support for ``normalizer`` paramter of ``Keyword`` fields
+ * add support for ``normalizer`` parameter of ``Keyword`` fields
* ``IndexTemplate`` can now be specified using the same API as ``Index``
+ * ``Boolean`` field now accepts ``"false"`` as ``False``
5.3.0 (2017-05-18)
------------------
diff --git a/docs/configuration.rst b/docs/configuration.rst
index 7266efe..4439885 100644
--- a/docs/configuration.rst
+++ b/docs/configuration.rst
@@ -51,7 +51,7 @@ To define a default connection that will be used globally, use the
.. code:: python
- from elasticsearch_dsl.connections import connections
+ from elasticsearch_dsl import connections
connections.create_connection(hosts=['localhost'], timeout=20)
@@ -68,7 +68,7 @@ time using the ``configure`` method:
.. code:: python
- from elasticsearch_dsl.connections import connections
+ from elasticsearch_dsl import connections
connections.configure(
default={'hosts': 'localhost'},
diff --git a/docs/persistence.rst b/docs/persistence.rst
index 2698c27..5fab1a8 100644
--- a/docs/persistence.rst
+++ b/docs/persistence.rst
@@ -9,7 +9,11 @@ layer for your application.
Mappings
--------
-The mapping definition follows a similar pattern to the query dsl:
+If you wish to create mappings manually you can use the ``Mapping`` class, for
+more advanced use cases, however, we recommend you use the :ref:`doc_type`
+abstraction in combination with :ref:`index` (or ``IndexTemplate``) to define
+index-level settings and properties. The mapping definition follows a similar
+pattern to the query dsl:
.. code:: python
@@ -25,9 +29,11 @@ The mapping definition follows a similar pattern to the query dsl:
m.field('category', 'text', fields={'raw': Keyword()})
# you can also create a field manually
- comment = Nested()
- comment.field('author', Text())
- comment.field('created_at', Date())
+ comment = Nested(
+ properties={
+ 'author': Text(),
+ 'created_at': Date()
+ })
# and attach it to the mapping
m.field('comments', comment)
@@ -99,6 +105,8 @@ specify type (``nGram`` in our example).
either not exist or be closed. To create multiple ``DocType``-defined
mappings you can use the :ref:`index` object.
+.. _doc_type:
+
DocType
-------
@@ -109,7 +117,7 @@ If you want to create a model-like wrapper around your documents, use the
from datetime import datetime
from elasticsearch_dsl import DocType, Date, Nested, Boolean, \
- analyzer, InnerObjectWrapper, Completion, Keyword, Text
+ analyzer, InnerDoc, Completion, Keyword, Text
html_strip = analyzer('html_strip',
tokenizer="standard",
@@ -117,7 +125,11 @@ If you want to create a model-like wrapper around your documents, use the
char_filter=["html_strip"]
)
- class Comment(InnerObjectWrapper):
+ class Comment(InnerDoc):
+ author = Text(fields={'raw': Keyword()})
+ content = Text(analyzer='snowball')
+ created_at = Date()
+
def age(self):
return datetime.now() - self.created_at
@@ -131,21 +143,14 @@ If you want to create a model-like wrapper around your documents, use the
fields={'raw': Keyword()}
)
- comments = Nested(
- doc_class=Comment,
- properties={
- 'author': Text(fields={'raw': Keyword()}),
- 'content': Text(analyzer='snowball'),
- 'created_at': Date()
- }
- )
+ comments = Nested(Comment)
class Meta:
index = 'blog'
def add_comment(self, author, content):
self.comments.append(
- {'author': author, 'content': content})
+ Comment(author=author, content=content, created_at=datetime.now()))
def save(self, ** kwargs):
self.created_at = datetime.now()
@@ -199,7 +204,7 @@ explicitly:
first.save()
-All the metadata fields (``id``, ``parent``, ``routing``, ``index`` etc) can be
+All the metadata fields (``id``, ``routing``, ``index`` etc) can be
accessed (and set) via a ``meta`` attribute or directly using the underscored
variant:
@@ -271,16 +276,12 @@ accessed through the ``_doc_type`` attribute of the class:
.. code:: python
- # name of the type and index in elasticsearch
- Post._doc_type.name
+ # name of the index in elasticsearch
Post._doc_type.index
# the raw Mapping object
Post._doc_type.mapping
- # the optional name of the parent type (if defined)
- Post._doc_type.parent
-
The ``_doc_type`` attribute is also home to the ``refresh`` method which will
update the mapping on the ``DocType`` from elasticsearch. This is very useful
if you use dynamic mappings and want the class to be aware of those fields (for
@@ -352,8 +353,8 @@ In the ``Meta`` class inside your document definition you can define various
metadata for your document:
``doc_type``
- name of the doc_type in elasticsearch. By default it will be constructed from
- the class name (MyDocument -> my_document)
+ name of the doc_type in elasticsearch. By default it will be set to ``doc``,
+ it is not recommended to change.
``index``
default index for the document, by default it is empty and every operation
@@ -366,8 +367,15 @@ metadata for your document:
optional instance of ``Mapping`` class to use as base for the mappings
created from the fields on the document class itself.
+``matches(self, hit)``
+ method that returns ``True`` if a given raw hit (``dict`` returned from
+ elasticsearch) should be deserialized using this ``DocType`` subclass. Can be
+ overriden, by default will just check that values for ``_index`` (including
+ any wildcard expansions) and ``_type`` in the document matches those in
+ ``_doc_type``.
+
Any attributes on the ``Meta`` class that are instance of ``MetaField`` will be
-used to control the mapping of the meta fields (``_all``, ``_parent`` etc).
+used to control the mapping of the meta fields (``_all``, ``dynamic`` etc).
Just name the parameter (without the leading underscore) as the field you wish
to map and pass any parameters to the ``MetaField`` class:
@@ -378,7 +386,6 @@ to map and pass any parameters to the ``MetaField`` class:
class Meta:
all = MetaField(enabled=False)
- parent = MetaField(type='blog')
dynamic = MetaField('strict')
.. _index:
diff --git a/elasticsearch_dsl/__init__.py b/elasticsearch_dsl/__init__.py
index aae0c1b..a8890de 100644
--- a/elasticsearch_dsl/__init__.py
+++ b/elasticsearch_dsl/__init__.py
@@ -3,12 +3,12 @@ from .aggs import A
from .function import SF
from .search import Search, MultiSearch
from .field import *
-from .document import DocType, MetaField
+from .document import DocType, MetaField, InnerDoc
from .mapping import Mapping
from .index import Index, IndexTemplate
from .analysis import analyzer, token_filter, char_filter, tokenizer
from .faceted_search import *
-VERSION = (5, 4, 0, 'dev')
+VERSION = (6, 0, 0, 'dev')
__version__ = VERSION
__versionstr__ = '.'.join(map(str, VERSION))
diff --git a/elasticsearch_dsl/connections.py b/elasticsearch_dsl/connections.py
index bdc62e0..80d33a2 100644
--- a/elasticsearch_dsl/connections.py
+++ b/elasticsearch_dsl/connections.py
@@ -94,3 +94,8 @@ class Connections(object):
raise KeyError('There is no connection with alias %r.' % alias)
connections = Connections()
+configure = connections.configure
+add_connection = connections.add_connection
+remove_connection = connections.remove_connection
+create_connection = connections.create_connection
+get_connection = connections.get_connection
diff --git a/elasticsearch_dsl/document.py b/elasticsearch_dsl/document.py
index 8b94f2f..bb8d1ef 100644
--- a/elasticsearch_dsl/document.py
+++ b/elasticsearch_dsl/document.py
@@ -1,26 +1,17 @@
import collections
-import re
+from fnmatch import fnmatch
from elasticsearch.exceptions import NotFoundError, RequestError
from six import iteritems, add_metaclass
from .field import Field
from .mapping import Mapping
-from .utils import ObjectBase, AttrDict, merge
+from .utils import ObjectBase, AttrDict, merge, DOC_META_FIELDS, META_FIELDS
from .response import HitMeta
from .search import Search
from .connections import connections
from .exceptions import ValidationException, IllegalOperation
-DOC_META_FIELDS = frozenset((
- 'id', 'routing', 'version', 'version_type'
-))
-
-META_FIELDS = frozenset((
- # Elasticsearch metadata fields, except 'type'
- 'index', 'using', 'score',
-)).union(DOC_META_FIELDS)
-
class MetaField(object):
def __init__(self, *args, **kwargs):
@@ -44,10 +35,8 @@ class DocTypeOptions(object):
# default cluster alias, can be overriden in doc.meta
self._using = getattr(meta, 'using', None)
- # get doc_type name, if not defined take the name of the class and
- # transform it to lower_case
- doc_type = getattr(meta, 'doc_type',
- re.sub(r'(.)([A-Z])', r'\1_\2', name).lower())
+ # get doc_type name, if not defined use 'doc'
+ doc_type = getattr(meta, 'doc_type', 'doc')
# create the mapping instance
self.mapping = getattr(meta, 'mapping', Mapping(doc_type))
@@ -72,6 +61,9 @@ class DocTypeOptions(object):
self._using = self._using or b._doc_type._using
self.index = self.index or b._doc_type.index
+ # custom method to determine if a hit belongs to this DocType
+ self._matches = getattr(meta, 'matches', None)
+
@property
def using(self):
return self._using or 'default'
@@ -95,6 +87,20 @@ class DocTypeOptions(object):
def refresh(self, index=None, using=None):
self.mapping.update_from_es(index or self.index, using=using or self.using)
+ def matches(self, hit):
+ if self._matches is not None:
+ return self._matches(hit)
+
+ return (
+ self.index is None or fnmatch(hit.get('_index', ''), self.index)
+ ) and self.name == hit.get('_type')
+
+@add_metaclass(DocTypeMeta)
+class InnerDoc(ObjectBase):
+ """
+ Common class for inner documents like Object or Nested
+ """
+
@add_metaclass(DocTypeMeta)
class DocType(ObjectBase):
@@ -250,27 +256,6 @@ class DocType(ObjectBase):
raise NotFoundError(404, message, {'docs': missing_docs})
return objs
- @classmethod
- def from_es(cls, hit):
- """
- Helper method to construct an instance from a dictionary returned by
- elasticsearch.
- """
- # don't modify in place
- meta = hit.copy()
- doc = meta.pop('_source', {})
-
- if 'fields' in meta:
- for k, v in iteritems(meta.pop('fields')):
- if k == '_source':
- doc.update(v)
- if k.startswith('_') and k[1:] in META_FIELDS:
- meta[k] = v
- else:
- doc[k] = v
-
- return cls(meta=meta, **doc)
-
def _get_connection(self, using=None):
return connections.get_connection(using or self._doc_type.using)
connection = property(_get_connection)
diff --git a/elasticsearch_dsl/field.py b/elasticsearch_dsl/field.py
index 418ccc5..ab6f7d8 100644
--- a/elasticsearch_dsl/field.py
+++ b/elasticsearch_dsl/field.py
@@ -2,7 +2,7 @@ import collections
from datetime import date, datetime
from dateutil import parser, tz
-from six import itervalues, string_types
+from six import itervalues, string_types, iteritems
from six.moves import map
from .utils import DslBase, ObjectBase, AttrDict, AttrList
@@ -99,70 +99,50 @@ class CustomField(Field):
d['type'] = self.builtin_type
return d
-class InnerObjectWrapper(ObjectBase):
- def __init__(self, mapping, **kwargs):
- # mimic DocType behavior with _doc_type.mapping
- super(AttrDict, self).__setattr__('_doc_type', type('Meta', (), {'mapping': mapping}))
- super(InnerObjectWrapper, self).__init__(**kwargs)
-
-
-class InnerObject(object):
- " Common functionality for nested and object fields. "
- _param_defs = {'properties': {'type': 'field', 'hash': True}}
+class Object(Field):
+ name = 'object'
_coerce = True
- def __init__(self, *args, **kwargs):
- self._doc_class = kwargs.pop('doc_class', InnerObjectWrapper)
- super(InnerObject, self).__init__(*args, **kwargs)
+ def __init__(self, doc_class=None, **kwargs):
+ self._doc_class = doc_class
+ if doc_class is None:
+ # FIXME import
+ from .document import InnerDoc
+ # no InnerDoc subclass, creating one instead...
+ self._doc_class = type('InnerDoc', (InnerDoc, ), {})
+ for name, field in iteritems(kwargs.pop('properties', {})):
+ self._doc_class._doc_type.mapping.field(name, field)
+ if 'dynamic' in kwargs:
+ self._doc_class._doc_type.mapping.meta('dynamic', kwargs.pop('dynamic'))
+
+ self._mapping = self._doc_class._doc_type.mapping
+ super(Object, self).__init__(**kwargs)
+
+ def __getitem__(self, name):
+ return self._mapping[name]
- def field(self, name, *args, **kwargs):
- self.properties[name] = construct_field(*args, **kwargs)
- return self
- # XXX: backwards compatible, will be removed
- property = field
+ def __contains__(self, name):
+ return name in self._mapping
def _empty(self):
- return self._doc_class(self.properties)
+ return self._wrap({})
def _wrap(self, data):
- return self._doc_class(self.properties, **data)
+ return self._doc_class(**data)
def empty(self):
if self._multi:
return AttrList([], self._wrap)
return self._empty()
- def __getitem__(self, name):
- return self.properties[name]
-
- def __contains__(self, name):
- return name in self.properties
+ def to_dict(self):
+ d = self._mapping.to_dict()
+ _, d = d.popitem()
+ d["type"] = self.name
+ return d
def _collect_fields(self):
- " Iterate over all Field objects within, including multi fields. "
- for f in itervalues(self.properties.to_dict()):
- yield f
- # multi fields
- if hasattr(f, 'fields'):
- for inner_f in itervalues(f.fields.to_dict()):
- yield inner_f
- # nested and inner objects
- if hasattr(f, '_collect_fields'):
- for inner_f in f._collect_fields():
- yield inner_f
-
- def update(self, other_object):
- if not hasattr(other_object, 'properties'):
- # not an inner/nested object, no merge possible
- return
-
- our, other = self.properties, other_object.properties
- for name in other:
- if name in our:
- if hasattr(our[name], 'update'):
- our[name].update(other[name])
- continue
- our[name] = other[name]
+ return self._mapping.properties._collect_fields()
def _deserialize(self, data):
if data is None:
@@ -171,10 +151,6 @@ class InnerObject(object):
if isinstance(data, self._doc_class):
return data
- if isinstance(data, (list, AttrList)):
- data[:] = list(map(self._deserialize, data))
- return data
-
if isinstance(data, AttrDict):
data = data._d_
@@ -183,10 +159,15 @@ class InnerObject(object):
def _serialize(self, data):
if data is None:
return None
+
+ # somebody assigned raw dict to the field, we should tolerate that
+ if isinstance(data, collections.Mapping):
+ return data
+
return data.to_dict()
def clean(self, data):
- data = super(InnerObject, self).clean(data)
+ data = super(Object, self).clean(data)
if data is None:
return None
if isinstance(data, (list, AttrList)):
@@ -196,15 +177,17 @@ class InnerObject(object):
data.full_clean()
return data
+ def update(self, other):
+ if not isinstance(other, Object):
+ # not an inner/nested object, no merge possible
+ return
-class Object(InnerObject, Field):
- name = 'object'
+ self._mapping.update(other._mapping)
-class Nested(InnerObject, Field):
+class Nested(Object):
name = 'nested'
def __init__(self, *args, **kwargs):
- # change the default for Nested fields
kwargs.setdefault('multi', True)
super(Nested, self).__init__(*args, **kwargs)
diff --git a/elasticsearch_dsl/mapping.py b/elasticsearch_dsl/mapping.py
index 385416d..bdfa012 100644
--- a/elasticsearch_dsl/mapping.py
+++ b/elasticsearch_dsl/mapping.py
@@ -1,10 +1,10 @@
import collections
-from six import iteritems
+from six import iteritems, itervalues
from itertools import chain
from .utils import DslBase
-from .field import InnerObject, Text
+from .field import Text, construct_field
from .connections import connections
from .exceptions import IllegalOperation
from .index import Index
@@ -14,7 +14,8 @@ META_FIELDS = frozenset((
'numeric_detection', 'dynamic_templates', 'enabled'
))
-class Properties(InnerObject, DslBase):
+class Properties(DslBase):
+ _param_defs = {'properties': {'type': 'field', 'hash': True}}
def __init__(self, name):
self._name = name
super(Properties, self).__init__()
@@ -22,10 +23,46 @@ class Properties(InnerObject, DslBase):
def __repr__(self):
return 'Properties(%r)' % self._name
+ def __getitem__(self, name):
+ return self.properties[name]
+
+ def __contains__(self, name):
+ return name in self.properties
+
@property
def name(self):
return self._name
+ def field(self, name, *args, **kwargs):
+ self.properties[name] = construct_field(*args, **kwargs)
+ return self
+
+ def _collect_fields(self):
+ " Iterate over all Field objects within, including multi fields. "
+ for f in itervalues(self.properties.to_dict()):
+ yield f
+ # multi fields
+ if hasattr(f, 'fields'):
+ for inner_f in itervalues(f.fields.to_dict()):
+ yield inner_f
+ # nested and inner objects
+ if hasattr(f, '_collect_fields'):
+ for inner_f in f._collect_fields():
+ yield inner_f
+
+ def update(self, other_object):
+ if not hasattr(other_object, 'properties'):
+ # not an inner/nested object, no merge possible
+ return
+
+ our, other = self.properties, other_object.properties
+ for name in other:
+ if name in our:
+ if hasattr(our[name], 'update'):
+ our[name].update(other[name])
+ continue
+ our[name] = other[name]
+
class Mapping(object):
def __init__(self, name):
diff --git a/elasticsearch_dsl/response/__init__.py b/elasticsearch_dsl/response/__init__.py
index 6bbefa0..0041989 100644
--- a/elasticsearch_dsl/response/__init__.py
+++ b/elasticsearch_dsl/response/__init__.py
@@ -3,8 +3,9 @@ from ..utils import AttrDict, AttrList
from .hit import Hit, HitMeta
class Response(AttrDict):
- def __init__(self, search, response):
+ def __init__(self, search, response, doc_class=None):
super(AttrDict, self).__setattr__('_search', search)
+ super(AttrDict, self).__setattr__('_doc_class', doc_class)
super(Response, self).__init__(response)
def __iter__(self):
@@ -27,30 +28,23 @@ class Response(AttrDict):
return len(self.hits)
def __getstate__(self):
- return (self._d_, self._search)
+ return (self._d_, self._search, self._doc_class)
def __setstate__(self, state):
super(AttrDict, self).__setattr__('_d_', state[0])
super(AttrDict, self).__setattr__('_search', state[1])
+ super(AttrDict, self).__setattr__('_doc_class', state[2])
def success(self):
return self._shards.total == self._shards.successful and not self.timed_out
- def _get_result(self, hit):
- dt = hit.get('_type')
- for t in hit.get('inner_hits', ()):
- hit['inner_hits'][t] = Response(self._search, hit['inner_hits'][t])
- callback = self._search._doc_type_map.get(dt, Hit)
- callback = getattr(callback, 'from_es', callback)
- return callback(hit)
-
@property
def hits(self):
if not hasattr(self, '_hits'):
h = self._d_['hits']
try:
- hits = AttrList(map(self._get_result, h['hits']))
+ hits = AttrList(map(self._search._get_result, h['hits']))
except AttributeError as e:
# avoid raising AttributeError since it will be hidden by the property
raise TypeError("Could not parse hits.", e)
diff --git a/elasticsearch_dsl/search.py b/elasticsearch_dsl/search.py
index f18a4f3..94778df 100644
--- a/elasticsearch_dsl/search.py
+++ b/elasticsearch_dsl/search.py
@@ -92,13 +92,12 @@ class Request(object):
self._doc_type = []
self._doc_type_map = {}
if isinstance(doc_type, (tuple, list)):
- for dt in doc_type:
- self._add_doc_type(dt)
+ self._doc_type.extend(doc_type)
elif isinstance(doc_type, collections.Mapping):
self._doc_type.extend(doc_type.keys())
self._doc_type_map.update(doc_type)
elif doc_type:
- self._add_doc_type(doc_type)
+ self._doc_type.append(doc_type)
self._params = {}
self._extra = extra or {}
@@ -148,7 +147,7 @@ class Request(object):
else:
indexes = []
for i in index:
- if isinstance(i, str):
+ if isinstance(i, string_types):
indexes.append(i)
elif isinstance(i, list):
indexes += i
@@ -159,11 +158,52 @@ class Request(object):
return s
- def _add_doc_type(self, doc_type):
- if hasattr(doc_type, '_doc_type'):
- self._doc_type_map[doc_type._doc_type.name] = doc_type
- doc_type = doc_type._doc_type.name
- self._doc_type.append(doc_type)
+ def _get_doc_type(self):
+ """
+ Return a list of doc_type names to be used
+ """
+ return list(set(dt._doc_type.name if hasattr(dt, '_doc_type') else dt for dt in self._doc_type))
+
+ def _resolve_nested(self, field, parent_class=None):
+ doc_class = Hit
+ if hasattr(parent_class, '_doc_type'):
+ nested_field = parent_class._doc_type.resolve_field(field)
+
+ else:
+ for dt in self._doc_type:
+ if not hasattr(dt, '_doc_type'):
+ continue
+ nested_field = dt._doc_type.resolve_field(field)
+ if nested_field is not None:
+ break
+
+ if nested_field is not None:
+ return nested_field._doc_class
+
+ return doc_class
+
+ def _get_result(self, hit, parent_class=None):
+ doc_class = Hit
+ dt = hit.get('_type')
+
+ if '_nested' in hit:
+ doc_class = self._resolve_nested(hit['_nested']['field'], parent_class)
+
+ elif dt in self._doc_type_map:
+ doc_class = self._doc_type_map[dt]
+
+ else:
+ for doc_type in self._doc_type:
+ if hasattr(doc_type, '_doc_type') and doc_type._doc_type.matches(hit):
+ doc_class = doc_type
+ break
+
+ for t in hit.get('inner_hits', ()):
+ hit['inner_hits'][t] = Response(self, hit['inner_hits'][t], doc_class=doc_class)
+
+ callback = getattr(doc_class, 'from_es', doc_class)
+ return callback(hit)
+
def doc_type(self, *doc_type, **kwargs):
"""
@@ -186,8 +226,7 @@ class Request(object):
s._doc_type = []
s._doc_type_map = {}
else:
- for dt in doc_type:
- s._add_doc_type(dt)
+ s._doc_type.extend(doc_type)
s._doc_type.extend(kwargs.keys())
s._doc_type_map.update(kwargs)
return s
@@ -528,6 +567,7 @@ class Search(Request):
Search().highlight('title', fragment_size=50).highlight('body', fragment_size=100)
which will produce::
+
{
"highlight": {
"fields": {
@@ -615,7 +655,7 @@ class Search(Request):
# TODO: failed shards detection
return es.count(
index=self._index,
- doc_type=self._doc_type,
+ doc_type=self._get_doc_type(),
body=d,
**self._params
)['count']
@@ -634,7 +674,7 @@ class Search(Request):
self,
es.search(
index=self._index,
- doc_type=self._doc_type,
+ doc_type=self._get_doc_type(),
body=self.to_dict(),
**self._params
)
@@ -657,12 +697,10 @@ class Search(Request):
es,
query=self.to_dict(),
index=self._index,
- doc_type=self._doc_type,
+ doc_type=self._get_doc_type(),
**self._params
):
- callback = self._doc_type_map.get(hit['_type'], Hit)
- callback = getattr(callback, 'from_es', callback)
- yield callback(hit)
+ yield self._get_result(hit)
def delete(self):
"""
@@ -675,7 +713,7 @@ class Search(Request):
es.delete_by_query(
index=self._index,
body=self.to_dict(),
- doc_type=self._doc_type,
+ doc_type=self._get_doc_type(),
**self._params
)
)
@@ -720,7 +758,7 @@ class MultiSearch(Request):
if s._index:
meta['index'] = s._index
if s._doc_type:
- meta['type'] = s._doc_type
+ meta['type'] = s._get_doc_type()
meta.update(s._params)
out.append(meta)
@@ -737,7 +775,7 @@ class MultiSearch(Request):
responses = es.msearch(
index=self._index,
- doc_type=self._doc_type,
+ doc_type=self._get_doc_type(),
body=self.to_dict(),
**self._params
)
diff --git a/elasticsearch_dsl/utils.py b/elasticsearch_dsl/utils.py
index 192111e..1397edb 100644
--- a/elasticsearch_dsl/utils.py
+++ b/elasticsearch_dsl/utils.py
@@ -10,6 +10,15 @@ from .exceptions import UnknownDslObject, ValidationException
SKIP_VALUES = ('', None)
EXPAND__TO_DOT=True
+DOC_META_FIELDS = frozenset((
+ 'id', 'routing', 'version', 'version_type'
+))
+
+META_FIELDS = frozenset((
+ # Elasticsearch metadata fields, except 'type'
+ 'index', 'using', 'score',
+)).union(DOC_META_FIELDS)
+
def _wrap(val, obj_wrapper=None):
if isinstance(val, collections.Mapping):
return AttrDict(val) if obj_wrapper is None else obj_wrapper(val)
@@ -326,12 +335,25 @@ class DslBase(object):
class ObjectBase(AttrDict):
def __init__(self, **kwargs):
- m = self._doc_type.mapping
- for k in m:
- if k in kwargs and m[k]._coerce:
- kwargs[k] = m[k].deserialize(kwargs[k])
super(ObjectBase, self).__init__(kwargs)
+ @classmethod
+ def from_es(cls, hit):
+ meta = hit.copy()
+ doc = meta.pop('_source', {})
+ if 'fields' in meta:
+ for k, v in iteritems(meta.pop('fields')):
+ if k.startswith('_') and k[1:] in META_FIELDS:
+ meta[k] = v
+ else:
+ doc[k] = v
+
+ m = cls._doc_type.mapping
+ for k in m:
+ if k in doc and m[k]._coerce:
+ doc[k] = m[k].deserialize(doc[k])
+ return cls(meta=meta, **doc)
+
def __getattr__(self, name):
try:
return super(ObjectBase, self).__getattr__(name)
@@ -346,11 +368,6 @@ class ObjectBase(AttrDict):
return value
raise
- def __setattr__(self, name, value):
- if name in self._doc_type.mapping:
- value = self._doc_type.mapping[name].deserialize(value)
- super(ObjectBase, self).__setattr__(name, value)
-
def to_dict(self):
out = {}
for k, v in iteritems(self._d_):
diff --git a/setup.py b/setup.py
index c1838ef..fb20ea8 100644
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,7 @@ import sys
from os.path import join, dirname
from setuptools import setup, find_packages
-VERSION = (5, 4, 0, 'dev')
+VERSION = (6, 0, 0, 'dev')
__version__ = VERSION
__versionstr__ = '.'.join(map(str, VERSION))
| Search does not support unicode indices
In elasticsearch_dsl/search.py, the `index` function checks to see if the arguments are of type `str` before appending them to the list of search indices. On Python 2, this test fails for unicode strings.
Instead, we can use `six.string_types` for Python 2 and 3 compatibility.
This problem may also occur elsewhere.
To reproduce:
```
from elasticsearch_dsl.search import Search
s = Search()
s.index('twitter-2015.01.01') # works
s.index(u'twitter-2015.01.01') # does not work
``` | elastic/elasticsearch-dsl-py | diff --git a/test_elasticsearch_dsl/conftest.py b/test_elasticsearch_dsl/conftest.py
index 754e41e..0bb3af7 100644
--- a/test_elasticsearch_dsl/conftest.py
+++ b/test_elasticsearch_dsl/conftest.py
@@ -13,7 +13,7 @@ from .test_integration.test_data import DATA, FLAT_DATA, create_git_index, \
@fixture(scope='session')
-def client(request):
+def client():
try:
connection = get_test_client(nowait='WAIT_FOR_ES' not in os.environ)
connections.add_connection('default', connection)
@@ -22,13 +22,13 @@ def client(request):
skip()
@fixture
-def write_client(request, client):
+def write_client(client):
yield client
client.indices.delete('test-*', ignore=404)
client.indices.delete_template('test-template', ignore=404)
@fixture
-def mock_client(request):
+def mock_client():
client = Mock()
client.search.return_value = dummy_response()
connections.add_connection('mock', client)
@@ -37,7 +37,7 @@ def mock_client(request):
connections._kwargs = {}
@fixture(scope='session')
-def data_client(request, client):
+def data_client(client):
# create mappings
create_git_index(client, 'git')
create_flat_git_index(client, 'flat-git')
@@ -74,7 +74,7 @@ def dummy_response():
"_type": "employee",
"_id": "42",
"_score": 11.123,
- "_parent": "elasticsearch",
+ "_routing": "elasticsearch",
"_source": {
"name": {
@@ -90,7 +90,7 @@ def dummy_response():
"_type": "employee",
"_id": "47",
"_score": 1,
- "_parent": "elasticsearch",
+ "_routing": "elasticsearch",
"_source": {
"name": {
@@ -106,7 +106,7 @@ def dummy_response():
"_type": "employee",
"_id": "53",
"_score": 16.0,
- "_parent": "elasticsearch",
+ "_routing": "elasticsearch",
},
],
"max_score": 12.0,
@@ -119,7 +119,7 @@ def dummy_response():
@fixture
def aggs_search():
from elasticsearch_dsl import Search
- s = Search(index='git', doc_type='commits')
+ s = Search(index='flat-git')
s.aggs\
.bucket('popular_files', 'terms', field='files', size=2)\
.metric('line_stats', 'stats', field='stats.lines')\
@@ -156,27 +156,23 @@ def aggs_data():
'hits': [
{
'_id': '3ca6e1e73a071a705b4babd2f581c91a2a3e5037',
- '_type': 'commits',
+ '_type': 'doc',
'_source': {
'stats': {'files': 4, 'deletions': 7, 'lines': 30, 'insertions': 23},
'committed_date': '2014-05-02T13:47:19'
},
'_score': 1.0,
- '_parent': 'elasticsearch-dsl-py',
- '_routing': 'elasticsearch-dsl-py',
- '_index': 'git'
+ '_index': 'flat-git'
},
{
'_id': 'eb3e543323f189fd7b698e66295427204fff5755',
- '_type': 'commits',
+ '_type': 'doc',
'_source': {
'stats': {'files': 1, 'deletions': 0, 'lines': 18, 'insertions': 18},
'committed_date': '2014-05-01T13:32:14'
},
'_score': 1.0,
- '_parent': 'elasticsearch-dsl-py',
- '_routing': 'elasticsearch-dsl-py',
- '_index': 'git'
+ '_index': 'flat-git'
}
],
'max_score': 1.0
@@ -193,26 +189,22 @@ def aggs_data():
'hits': [
{
'_id': '3ca6e1e73a071a705b4babd2f581c91a2a3e5037',
- '_type': 'commits',
+ '_type': 'doc',
'_source': {
'stats': {'files': 4, 'deletions': 7, 'lines': 30, 'insertions': 23},
'committed_date': '2014-05-02T13:47:19'
},
'_score': 1.0,
- '_parent': 'elasticsearch-dsl-py',
- '_routing': 'elasticsearch-dsl-py',
- '_index': 'git'
+ '_index': 'flat-git'
}, {
'_id': 'dd15b6ba17dd9ba16363a51f85b31f66f1fb1157',
- '_type': 'commits',
+ '_type': 'doc',
'_source': {
'stats': {'files': 3, 'deletions': 18, 'lines': 62, 'insertions': 44},
'committed_date': '2014-05-01T13:30:44'
},
'_score': 1.0,
- '_parent': 'elasticsearch-dsl-py',
- '_routing': 'elasticsearch-dsl-py',
- '_index': 'git'
+ '_index': 'flat-git'
}
],
'max_score': 1.0
diff --git a/test_elasticsearch_dsl/test_document.py b/test_elasticsearch_dsl/test_document.py
index d65a674..1d31457 100644
--- a/test_elasticsearch_dsl/test_document.py
+++ b/test_elasticsearch_dsl/test_document.py
@@ -3,19 +3,19 @@ import codecs
from hashlib import md5
from datetime import datetime
-from elasticsearch_dsl import document, field, Mapping, utils
+from elasticsearch_dsl import document, field, Mapping, utils, InnerDoc
from elasticsearch_dsl.exceptions import ValidationException, IllegalOperation
from pytest import raises
-class MyInner(field.InnerObjectWrapper):
- pass
+class MyInner(InnerDoc):
+ old_field = field.Text()
class MyDoc(document.DocType):
title = field.Keyword()
name = field.Text()
created_at = field.Date()
- inner = field.Object(properties={'old_field': field.Text()}, doc_class=MyInner)
+ inner = field.Object(MyInner)
class MySubDoc(MyDoc):
name = field.Keyword()
@@ -30,13 +30,12 @@ class MyDoc2(document.DocType):
class MyMultiSubDoc(MyDoc2, MySubDoc):
pass
+class Comment(document.InnerDoc):
+ title = field.Text()
+ tags = field.Keyword(multi=True)
+
class DocWithNested(document.DocType):
- comments = field.Nested(
- properties={
- 'title': field.Text(),
- 'tags': field.Keyword(multi=True)
- }
- )
+ comments = field.Nested(Comment)
class SimpleCommit(document.DocType):
files = field.Text(multi=True)
@@ -61,11 +60,47 @@ class SecretDoc(document.DocType):
title = SecretField(index='no')
class NestedSecret(document.DocType):
- secrets = field.Nested(properties={'title': SecretField()})
+ secrets = field.Nested(SecretDoc)
class OptionalObjectWithRequiredField(document.DocType):
comments = field.Nested(properties={'title': field.Keyword(required=True)})
+def test_matches_uses_index_name_and_doc_type():
+ assert SimpleCommit._doc_type.matches({
+ '_type': 'doc',
+ '_index': 'test-git'
+ })
+ assert not SimpleCommit._doc_type.matches({
+ '_type': 'doc',
+ '_index': 'not-test-git'
+ })
+ assert MySubDoc._doc_type.matches({
+ '_type': 'my_custom_doc',
+ '_index': 'default-index'
+ })
+ assert not MySubDoc._doc_type.matches({
+ '_type': 'doc',
+ '_index': 'default-index'
+ })
+ assert not MySubDoc._doc_type.matches({
+ '_type': 'my_custom_doc',
+ '_index': 'test-git'
+ })
+
+def test_matches_accepts_wildcards():
+ class MyDoc(document.DocType):
+ class Meta:
+ index = 'my-*'
+
+ assert MyDoc._doc_type.matches({
+ '_type': 'doc',
+ '_index': 'my-index'
+ })
+ assert not MyDoc._doc_type.matches({
+ '_type': 'doc',
+ '_index': 'not-my-index'
+ })
+
def test_assigning_attrlist_to_field():
sc = SimpleCommit()
l = ['README', 'README.rst']
@@ -84,13 +119,13 @@ def test_custom_field():
assert {'title': 'Uryyb'} == s.to_dict()
assert s.title == 'Hello'
- s.title = 'Uryyb'
+ s = SecretDoc.from_es({'_source': {'title': 'Uryyb'}})
assert s.title == 'Hello'
assert isinstance(s.title, Secret)
def test_custom_field_mapping():
assert {
- 'secret_doc': {
+ 'doc': {
'properties': {
'title': {'index': 'no', 'type': 'text'}
}
@@ -113,7 +148,7 @@ def test_multi_works_after_doc_has_been_saved():
def test_multi_works_in_nested_after_doc_has_been_serialized():
# Issue #359
- c = DocWithNested(comments=[{'title': 'First!'}])
+ c = DocWithNested(comments=[Comment(title='First!')])
assert [] == c.comments[0].tags
assert {'comments': [{'title': 'First!'}]} == c.to_dict()
@@ -160,7 +195,7 @@ def test_attribute_can_be_removed():
assert 'title' not in d._d_
def test_doc_type_can_be_correctly_pickled():
- d = DocWithNested(title='Hello World!', comments=[{'title': 'hellp'}], meta={'id': 42})
+ d = DocWithNested(title='Hello World!', comments=[Comment(title='hellp')], meta={'id': 42})
s = pickle.dumps(d)
d2 = pickle.loads(s)
@@ -187,7 +222,7 @@ def test_meta_field_mapping():
dynamic_templates = document.MetaField([42])
assert {
- 'user': {
+ 'doc': {
'properties': {
'username': {'type': 'text'}
},
@@ -236,12 +271,14 @@ def test_docs_with_properties():
u.password
def test_nested_can_be_assigned_to():
- d1 = DocWithNested(comments=[{'title': 'First!'}])
+ d1 = DocWithNested(comments=[Comment(title='First!')])
d2 = DocWithNested()
d2.comments = d1.comments
+ assert isinstance(d1.comments[0], Comment)
assert d2.comments == [{'title': 'First!'}]
assert {'comments': [{'title': 'First!'}]} == d2.to_dict()
+ assert isinstance(d2.comments[0], Comment)
def test_nested_can_be_none():
d = DocWithNested(comments=None, title='Hello World!')
@@ -258,7 +295,7 @@ def test_nested_defaults_to_list_and_can_be_updated():
def test_to_dict_is_recursive_and_can_cope_with_multi_values():
md = MyDoc(name=['a', 'b', 'c'])
- md.inner = [{'old_field': 'of1'}, {'old_field': 'of2'}]
+ md.inner = [MyInner(old_field='of1'), MyInner(old_field='of2')]
assert isinstance(md.inner[0], MyInner)
@@ -276,9 +313,8 @@ def test_to_dict_ignores_empty_collections():
def test_declarative_mapping_definition():
assert issubclass(MyDoc, document.DocType)
assert hasattr(MyDoc, '_doc_type')
- assert 'my_doc' == MyDoc._doc_type.name
assert {
- 'my_doc': {
+ 'doc': {
'properties': {
'created_at': {'type': 'date'},
'name': {'type': 'text'},
@@ -331,8 +367,9 @@ def test_document_can_be_created_dynamically():
def test_invalid_date_will_raise_exception():
md = MyDoc()
+ md.created_at = 'not-a-date'
with raises(ValidationException):
- md.created_at = 'not-a-date'
+ md.full_clean()
def test_document_inheritance():
assert issubclass(MySubDoc, MyDoc)
@@ -367,13 +404,11 @@ def test_meta_inheritance():
assert issubclass(MyMultiSubDoc, MyDoc2)
assert issubclass(MyMultiSubDoc, document.DocType)
assert hasattr(MyMultiSubDoc, '_doc_type')
- # doc_type should not be inherited
- assert 'my_multi_sub_doc' == MyMultiSubDoc._doc_type.name
# index and using should be
assert MyMultiSubDoc._doc_type.index == MySubDoc._doc_type.index
assert MyMultiSubDoc._doc_type.using == MySubDoc._doc_type.using
assert {
- 'my_multi_sub_doc': {
+ 'doc': {
'properties': {
'created_at': {'type': 'date'},
'name': {'type': 'keyword'},
diff --git a/test_elasticsearch_dsl/test_field.py b/test_elasticsearch_dsl/test_field.py
index 61466a8..ee74297 100644
--- a/test_elasticsearch_dsl/test_field.py
+++ b/test_elasticsearch_dsl/test_field.py
@@ -64,20 +64,8 @@ def test_multi_fields_are_accepted_and_parsed():
}
} == f.to_dict()
-def test_modifying_nested():
- f = field.Nested()
- f.field('name', 'text', index='not_analyzed')
-
- assert {
- 'type': 'nested',
- 'properties': {
- 'name': {'type': 'text', 'index': 'not_analyzed'}
- },
- } == f.to_dict()
-
def test_nested_provides_direct_access_to_its_fields():
- f = field.Nested()
- f.field('name', 'text', index='not_analyzed')
+ f = field.Nested(properties={'name': {'type': 'text', 'index': 'not_analyzed'}})
assert 'name' in f
assert f['name'] == field.Text(index='not_analyzed')
diff --git a/test_elasticsearch_dsl/test_index.py b/test_elasticsearch_dsl/test_index.py
index 17aa6b7..9d0269d 100644
--- a/test_elasticsearch_dsl/test_index.py
+++ b/test_elasticsearch_dsl/test_index.py
@@ -64,7 +64,7 @@ def test_registered_doc_type_included_in_to_dict():
assert Post._doc_type.index == 'i'
assert {
'mappings': {
- 'post': {
+ 'doc': {
'properties': {
'title': {'type': 'text'},
'published_from': {'type': 'date'},
@@ -79,7 +79,7 @@ def test_registered_doc_type_included_in_search():
s = i.search()
- assert s._doc_type_map == {'post': Post}
+ assert s._doc_type == [Post]
def test_aliases_add_to_object():
diff --git a/test_elasticsearch_dsl/test_integration/test_document.py b/test_elasticsearch_dsl/test_integration/test_document.py
index f87e259..aceaa81 100644
--- a/test_elasticsearch_dsl/test_integration/test_document.py
+++ b/test_elasticsearch_dsl/test_integration/test_document.py
@@ -3,22 +3,23 @@ from pytz import timezone
from elasticsearch import ConflictError, NotFoundError, RequestError
-from elasticsearch_dsl import DocType, Date, Text, Keyword, construct_field, Mapping
+from elasticsearch_dsl import DocType, Date, Text, Keyword, Mapping, InnerDoc, \
+ Object, Nested, MetaField, Q
from elasticsearch_dsl.utils import AttrList
-from pytest import raises
+from pytest import raises, fixture
-user_field = construct_field('object')
-user_field.field('name', 'text', fields={'raw': construct_field('keyword')})
+class User(InnerDoc):
+ name = Text(fields={'raw': Keyword()})
class Wiki(DocType):
- owner = user_field
+ owner = Object(User)
class Meta:
index = 'test-wiki'
class Repository(DocType):
- owner = user_field
+ owner = Object(User)
created_at = Date()
description = Text(analyzer='snowball')
tags = Keyword()
@@ -41,9 +42,49 @@ class Commit(DocType):
doc_type = 'doc'
mapping = Mapping('doc')
+class Comment(InnerDoc):
+ content = Text()
+ author = Object(User)
+ class Meta:
+ dynamic = MetaField(False)
+
+class PullRequest(DocType):
+ comments = Nested(Comment)
+ class Meta:
+ index = 'test-prs'
+
+@fixture
+def pull_request(write_client):
+ PullRequest.init()
+ pr = PullRequest(_id=42, comments=[Comment(content='Hello World!', author=User(name='honzakral'))])
+ pr.save(refresh=True)
+ return pr
+
+def test_nested_inner_hits_are_wrapped_properly(pull_request):
+ s = PullRequest.search().query('nested', inner_hits={}, path='comments',
+ query=Q('match', comments__content='hello'))
+
+ response = s.execute()
+ pr = response.hits[0]
+ assert isinstance(pr, PullRequest)
+ assert isinstance(pr.comments[0], Comment)
+
+ comment = pr.meta.inner_hits.comments.hits[0]
+ assert isinstance(comment, Comment)
+
+def test_nested_top_hits_are_wrapped_properly(pull_request):
+ s = PullRequest.search()
+ s.aggs.bucket('comments', 'nested', path='comments').metric('hits', 'top_hits', size=1)
+
+ r = s.execute()
+
+ print(r._d_)
+ assert isinstance(r.aggregations.comments.hits.hits[0], Comment)
+
+
def test_update_object_field(write_client):
Wiki.init()
- w = Wiki(owner={'name': 'Honza Kral'}, _id='elasticsearch-py')
+ w = Wiki(owner=User(name='Honza Kral'), _id='elasticsearch-py')
w.save()
w.update(owner=[{'name': 'Honza'}, {'name': 'Nick'}])
diff --git a/test_elasticsearch_dsl/test_integration/test_index.py b/test_elasticsearch_dsl/test_integration/test_index.py
index 88c7550..2a2eb53 100644
--- a/test_elasticsearch_dsl/test_integration/test_index.py
+++ b/test_elasticsearch_dsl/test_integration/test_index.py
@@ -16,7 +16,7 @@ def test_index_template_works(write_client):
assert {
'test-blog': {
'mappings': {
- 'post': {
+ 'doc': {
'properties': {
'title': {'type': 'text', 'analyzer': 'my_analyzer'},
'published_from': {'type': 'date'},
@@ -40,7 +40,7 @@ def test_index_can_be_created_with_settings_and_mappings(write_client):
assert {
'test-blog': {
'mappings': {
- 'post': {
+ 'doc': {
'properties': {
'title': {'type': 'text', 'analyzer': 'my_analyzer'},
'published_from': {'type': 'date'}
diff --git a/test_elasticsearch_dsl/test_integration/test_search.py b/test_elasticsearch_dsl/test_integration/test_search.py
index 78435fa..f42342d 100644
--- a/test_elasticsearch_dsl/test_integration/test_search.py
+++ b/test_elasticsearch_dsl/test_integration/test_search.py
@@ -21,11 +21,9 @@ class Repository(DocType):
class Meta:
index = 'git'
- doc_type = 'doc'
class Commit(DocType):
class Meta:
- doc_type = 'doc'
index = 'flat-git'
def test_filters_aggregation_buckets_are_accessible(data_client):
diff --git a/test_elasticsearch_dsl/test_mapping.py b/test_elasticsearch_dsl/test_mapping.py
index 367005f..e1dcacc 100644
--- a/test_elasticsearch_dsl/test_mapping.py
+++ b/test_elasticsearch_dsl/test_mapping.py
@@ -20,17 +20,16 @@ def test_mapping_update_is_recursive():
m1 = mapping.Mapping('article')
m1.field('title', 'text')
m1.field('author', 'object')
- m1['author'].field('name', 'text')
+ m1.field('author', 'object', properties={'name': {'type': 'text'}})
m1.meta('_all', enabled=False)
m1.meta('dynamic', False)
m2 = mapping.Mapping('article')
m2.field('published_from', 'date')
- m2.field('author', 'object')
+ m2.field('author', 'object', properties={'email': {'type': 'text'}})
m2.field('title', 'text')
m2.field('lang', 'keyword')
m2.meta('_analyzer', path='lang')
- m2['author'].field('email', 'text')
m1.update(m2, update_only=True)
diff --git a/test_elasticsearch_dsl/test_result.py b/test_elasticsearch_dsl/test_result.py
index e7b36ed..bb11f65 100644
--- a/test_elasticsearch_dsl/test_result.py
+++ b/test_elasticsearch_dsl/test_result.py
@@ -90,7 +90,7 @@ def test_iterating_over_response_gives_you_hits(dummy_response):
assert 'elasticsearch' == h.meta.id
assert 12 == h.meta.score
- assert hits[1].meta.parent == 'elasticsearch'
+ assert hits[1].meta.routing == 'elasticsearch'
def test_hits_get_wrapped_to_contain_additional_attrs(dummy_response):
res = response.Response(Search(), dummy_response)
diff --git a/test_elasticsearch_dsl/test_search.py b/test_elasticsearch_dsl/test_search.py
index 122bc7d..f8f1caf 100644
--- a/test_elasticsearch_dsl/test_search.py
+++ b/test_elasticsearch_dsl/test_search.py
@@ -162,6 +162,8 @@ def test_search_index():
assert s._index == ['i']
s = s.index('i2')
assert s._index == ['i', 'i2']
+ s = s.index(u'i3')
+ assert s._index == ['i', 'i2', 'i3']
s = s.index()
assert s._index is None
s = search.Search(index=('i', 'i2'))
@@ -209,13 +211,14 @@ def test_doc_type_can_be_document_class():
pass
s = search.Search(doc_type=MyDocType)
- assert s._doc_type == ['my_doc_type']
- assert s._doc_type_map == {'my_doc_type': MyDocType}
+ assert s._doc_type == [MyDocType]
+ assert s._doc_type_map == {}
+ assert s._get_doc_type() == ['doc']
s = search.Search().doc_type(MyDocType)
- assert s._doc_type == ['my_doc_type']
- assert s._doc_type_map == {'my_doc_type': MyDocType}
-
+ assert s._doc_type == [MyDocType]
+ assert s._doc_type_map == {}
+ assert s._get_doc_type() == ['doc']
def test_sort():
s = search.Search()
diff --git a/test_elasticsearch_dsl/test_validation.py b/test_elasticsearch_dsl/test_validation.py
index c84d671..f30c194 100644
--- a/test_elasticsearch_dsl/test_validation.py
+++ b/test_elasticsearch_dsl/test_validation.py
@@ -1,25 +1,21 @@
from datetime import datetime
-from elasticsearch_dsl import DocType, Nested, Text, Date, Object, Boolean, Integer
-from elasticsearch_dsl.field import InnerObjectWrapper
+from elasticsearch_dsl import DocType, Nested, Text, Date, Object, Boolean, Integer, InnerDoc
from elasticsearch_dsl.exceptions import ValidationException
from pytest import raises
-class Author(InnerObjectWrapper):
+class Author(InnerDoc):
+ name = Text(required=True)
+ email = Text(required=True)
+
def clean(self):
+ print(self, type(self), self.name)
if self.name.lower() not in self.email:
raise ValidationException('Invalid email!')
class BlogPost(DocType):
- authors = Nested(
- required=True,
- doc_class=Author,
- properties={
- 'name': Text(required=True),
- 'email': Text(required=True)
- }
- )
+ authors = Nested(Author, required=True)
created = Date()
inner = Object()
@@ -57,8 +53,9 @@ def test_validation_works_for_lists_of_values():
class DT(DocType):
i = Date(required=True)
+ dt = DT(i=[datetime.now(), 'not date'])
with raises(ValidationException):
- DT(i=[datetime.now(), 'not date'])
+ dt.full_clean()
dt = DT(i=[datetime.now(), datetime.now()])
assert None is dt.full_clean()
@@ -101,7 +98,7 @@ def test_boolean_doesnt_treat_false_as_empty():
def test_custom_validation_on_nested_gets_run():
- d = BlogPost(authors=[{'name': 'Honza', 'email': '[email protected]'}], created=None)
+ d = BlogPost(authors=[Author(name='Honza', email='[email protected]')], created=None)
assert isinstance(d.authors[0], Author)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 12
} | 5.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[develop]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
docutils==0.18.1
elasticsearch==6.8.2
-e git+https://github.com/elastic/elasticsearch-dsl-py.git@1cb17a0be2cf48c5e4cf5df9ef55d5fc1a202f18#egg=elasticsearch_dsl
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==3.0.3
MarkupSafe==2.0.1
mock==5.2.0
packaging==21.3
pluggy==1.0.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: elasticsearch-dsl-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- docutils==0.18.1
- elasticsearch==6.8.2
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==3.0.3
- markupsafe==2.0.1
- mock==5.2.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/elasticsearch-dsl-py
| [
"test_elasticsearch_dsl/test_document.py::test_matches_uses_index_name_and_doc_type",
"test_elasticsearch_dsl/test_document.py::test_matches_accepts_wildcards",
"test_elasticsearch_dsl/test_document.py::test_assigning_attrlist_to_field",
"test_elasticsearch_dsl/test_document.py::test_optional_inner_objects_are_not_validated_if_missing",
"test_elasticsearch_dsl/test_document.py::test_custom_field",
"test_elasticsearch_dsl/test_document.py::test_custom_field_mapping",
"test_elasticsearch_dsl/test_document.py::test_custom_field_in_nested",
"test_elasticsearch_dsl/test_document.py::test_multi_works_after_doc_has_been_saved",
"test_elasticsearch_dsl/test_document.py::test_multi_works_in_nested_after_doc_has_been_serialized",
"test_elasticsearch_dsl/test_document.py::test_null_value_for_object",
"test_elasticsearch_dsl/test_document.py::test_inherited_doc_types_can_override_index",
"test_elasticsearch_dsl/test_document.py::test_to_dict_with_meta",
"test_elasticsearch_dsl/test_document.py::test_to_dict_with_meta_includes_custom_index",
"test_elasticsearch_dsl/test_document.py::test_attribute_can_be_removed",
"test_elasticsearch_dsl/test_document.py::test_doc_type_can_be_correctly_pickled",
"test_elasticsearch_dsl/test_document.py::test_meta_is_accessible_even_on_empty_doc",
"test_elasticsearch_dsl/test_document.py::test_meta_field_mapping",
"test_elasticsearch_dsl/test_document.py::test_multi_value_fields",
"test_elasticsearch_dsl/test_document.py::test_docs_with_properties",
"test_elasticsearch_dsl/test_document.py::test_nested_can_be_assigned_to",
"test_elasticsearch_dsl/test_document.py::test_nested_can_be_none",
"test_elasticsearch_dsl/test_document.py::test_nested_defaults_to_list_and_can_be_updated",
"test_elasticsearch_dsl/test_document.py::test_to_dict_is_recursive_and_can_cope_with_multi_values",
"test_elasticsearch_dsl/test_document.py::test_to_dict_ignores_empty_collections",
"test_elasticsearch_dsl/test_document.py::test_declarative_mapping_definition",
"test_elasticsearch_dsl/test_document.py::test_you_can_supply_own_mapping_instance",
"test_elasticsearch_dsl/test_document.py::test_document_can_be_created_dynamically",
"test_elasticsearch_dsl/test_document.py::test_invalid_date_will_raise_exception",
"test_elasticsearch_dsl/test_document.py::test_document_inheritance",
"test_elasticsearch_dsl/test_document.py::test_meta_fields_are_stored_in_meta_and_ignored_by_to_dict",
"test_elasticsearch_dsl/test_document.py::test_meta_inheritance",
"test_elasticsearch_dsl/test_document.py::test_meta_fields_can_be_accessed_directly_with_underscore",
"test_elasticsearch_dsl/test_document.py::test_update_no_fields",
"test_elasticsearch_dsl/test_document.py::test_from_es_respects_underscored_non_meta_fields",
"test_elasticsearch_dsl/test_field.py::test_boolean_deserialization",
"test_elasticsearch_dsl/test_field.py::test_date_field_can_have_default_tz",
"test_elasticsearch_dsl/test_field.py::test_custom_field_car_wrap_other_field",
"test_elasticsearch_dsl/test_field.py::test_field_from_dict",
"test_elasticsearch_dsl/test_field.py::test_multi_fields_are_accepted_and_parsed",
"test_elasticsearch_dsl/test_field.py::test_nested_provides_direct_access_to_its_fields",
"test_elasticsearch_dsl/test_field.py::test_field_supports_multiple_analyzers",
"test_elasticsearch_dsl/test_field.py::test_multifield_supports_multiple_analyzers",
"test_elasticsearch_dsl/test_field.py::test_scaled_float",
"test_elasticsearch_dsl/test_index.py::test_search_is_limited_to_index_name",
"test_elasticsearch_dsl/test_index.py::test_cloned_index_has_copied_settings_and_using",
"test_elasticsearch_dsl/test_index.py::test_cloned_index_has_analysis_attribute",
"test_elasticsearch_dsl/test_index.py::test_settings_are_saved",
"test_elasticsearch_dsl/test_index.py::test_registered_doc_type_included_in_to_dict",
"test_elasticsearch_dsl/test_index.py::test_registered_doc_type_included_in_search",
"test_elasticsearch_dsl/test_index.py::test_aliases_add_to_object",
"test_elasticsearch_dsl/test_index.py::test_aliases_returned_from_to_dict",
"test_elasticsearch_dsl/test_index.py::test_analyzers_added_to_object",
"test_elasticsearch_dsl/test_index.py::test_analyzers_returned_from_to_dict",
"test_elasticsearch_dsl/test_mapping.py::test_mapping_can_has_fields",
"test_elasticsearch_dsl/test_mapping.py::test_mapping_update_is_recursive",
"test_elasticsearch_dsl/test_mapping.py::test_properties_can_iterate_over_all_the_fields",
"test_elasticsearch_dsl/test_mapping.py::test_mapping_can_collect_all_analyzers_and_normalizers",
"test_elasticsearch_dsl/test_mapping.py::test_mapping_can_collect_multiple_analyzers",
"test_elasticsearch_dsl/test_mapping.py::test_even_non_custom_analyzers_can_have_params",
"test_elasticsearch_dsl/test_mapping.py::test_resolve_field_can_resolve_multifields",
"test_elasticsearch_dsl/test_result.py::test_agg_response_is_pickleable",
"test_elasticsearch_dsl/test_result.py::test_response_is_pickleable",
"test_elasticsearch_dsl/test_result.py::test_hit_is_pickleable",
"test_elasticsearch_dsl/test_result.py::test_response_stores_search",
"test_elasticsearch_dsl/test_result.py::test_attribute_error_in_hits_is_not_hidden",
"test_elasticsearch_dsl/test_result.py::test_interactive_helpers",
"test_elasticsearch_dsl/test_result.py::test_empty_response_is_false",
"test_elasticsearch_dsl/test_result.py::test_len_response",
"test_elasticsearch_dsl/test_result.py::test_iterating_over_response_gives_you_hits",
"test_elasticsearch_dsl/test_result.py::test_hits_get_wrapped_to_contain_additional_attrs",
"test_elasticsearch_dsl/test_result.py::test_hits_provide_dot_and_bracket_access_to_attrs",
"test_elasticsearch_dsl/test_result.py::test_slicing_on_response_slices_on_hits",
"test_elasticsearch_dsl/test_result.py::test_aggregation_base",
"test_elasticsearch_dsl/test_result.py::test_metric_agg_works",
"test_elasticsearch_dsl/test_result.py::test_aggregations_can_be_iterated_over",
"test_elasticsearch_dsl/test_result.py::test_aggregations_can_be_retrieved_by_name",
"test_elasticsearch_dsl/test_result.py::test_bucket_response_can_be_iterated_over",
"test_elasticsearch_dsl/test_result.py::test_bucket_keys_get_deserialized",
"test_elasticsearch_dsl/test_search.py::test_expand__to_dot_is_respected",
"test_elasticsearch_dsl/test_search.py::test_execute_uses_cache",
"test_elasticsearch_dsl/test_search.py::test_iter_iterates_over_hits",
"test_elasticsearch_dsl/test_search.py::test_count_uses_cache",
"test_elasticsearch_dsl/test_search.py::test_cache_isnt_cloned",
"test_elasticsearch_dsl/test_search.py::test_search_starts_with_empty_query",
"test_elasticsearch_dsl/test_search.py::test_search_query_combines_query",
"test_elasticsearch_dsl/test_search.py::test_query_can_be_assigned_to",
"test_elasticsearch_dsl/test_search.py::test_query_can_be_wrapped",
"test_elasticsearch_dsl/test_search.py::test_using",
"test_elasticsearch_dsl/test_search.py::test_methods_are_proxied_to_the_query",
"test_elasticsearch_dsl/test_search.py::test_query_always_returns_search",
"test_elasticsearch_dsl/test_search.py::test_source_copied_on_clone",
"test_elasticsearch_dsl/test_search.py::test_copy_clones",
"test_elasticsearch_dsl/test_search.py::test_aggs_get_copied_on_change",
"test_elasticsearch_dsl/test_search.py::test_search_index",
"test_elasticsearch_dsl/test_search.py::test_search_doc_type",
"test_elasticsearch_dsl/test_search.py::test_doc_type_can_be_document_class",
"test_elasticsearch_dsl/test_search.py::test_sort",
"test_elasticsearch_dsl/test_search.py::test_sort_by_score",
"test_elasticsearch_dsl/test_search.py::test_slice",
"test_elasticsearch_dsl/test_search.py::test_index",
"test_elasticsearch_dsl/test_search.py::test_search_to_dict",
"test_elasticsearch_dsl/test_search.py::test_complex_example",
"test_elasticsearch_dsl/test_search.py::test_reverse",
"test_elasticsearch_dsl/test_search.py::test_from_dict_doesnt_need_query",
"test_elasticsearch_dsl/test_search.py::test_source",
"test_elasticsearch_dsl/test_search.py::test_source_on_clone",
"test_elasticsearch_dsl/test_search.py::test_source_on_clear",
"test_elasticsearch_dsl/test_search.py::test_suggest_accepts_global_text",
"test_elasticsearch_dsl/test_search.py::test_suggest",
"test_elasticsearch_dsl/test_search.py::test_exclude",
"test_elasticsearch_dsl/test_validation.py::test_required_int_can_be_0",
"test_elasticsearch_dsl/test_validation.py::test_required_field_cannot_be_empty_list",
"test_elasticsearch_dsl/test_validation.py::test_validation_works_for_lists_of_values",
"test_elasticsearch_dsl/test_validation.py::test_field_with_custom_clean",
"test_elasticsearch_dsl/test_validation.py::test_empty_object",
"test_elasticsearch_dsl/test_validation.py::test_missing_required_field_raises_validation_exception",
"test_elasticsearch_dsl/test_validation.py::test_boolean_doesnt_treat_false_as_empty",
"test_elasticsearch_dsl/test_validation.py::test_custom_validation_on_nested_gets_run",
"test_elasticsearch_dsl/test_validation.py::test_accessing_known_fields_returns_empty_value",
"test_elasticsearch_dsl/test_validation.py::test_empty_values_are_not_serialized"
]
| []
| []
| []
| Apache License 2.0 | 1,955 | [
"elasticsearch_dsl/response/__init__.py",
"docs/persistence.rst",
"setup.py",
"elasticsearch_dsl/field.py",
"elasticsearch_dsl/connections.py",
"elasticsearch_dsl/document.py",
"elasticsearch_dsl/__init__.py",
"elasticsearch_dsl/utils.py",
"Changelog.rst",
"elasticsearch_dsl/search.py",
"elasticsearch_dsl/mapping.py",
"docs/configuration.rst"
]
| [
"elasticsearch_dsl/response/__init__.py",
"docs/persistence.rst",
"setup.py",
"elasticsearch_dsl/field.py",
"elasticsearch_dsl/connections.py",
"elasticsearch_dsl/document.py",
"elasticsearch_dsl/__init__.py",
"elasticsearch_dsl/utils.py",
"Changelog.rst",
"elasticsearch_dsl/search.py",
"elasticsearch_dsl/mapping.py",
"docs/configuration.rst"
]
|
|
softlayer__softlayer-python-905 | 1ccda8ffba02479c9903ca0dc22fe33b80e42ae5 | 2017-12-14 15:26:25 | 1ccda8ffba02479c9903ca0dc22fe33b80e42ae5 | diff --git a/SoftLayer/CLI/block/snapshot/schedule_list.py b/SoftLayer/CLI/block/snapshot/schedule_list.py
new file mode 100644
index 00000000..022427e1
--- /dev/null
+++ b/SoftLayer/CLI/block/snapshot/schedule_list.py
@@ -0,0 +1,70 @@
+"""List scheduled snapshots of a specific volume"""
+# :license: MIT, see LICENSE for more details.
+
+import click
+import SoftLayer
+from SoftLayer.CLI import environment
+from SoftLayer.CLI import formatting
+
+
[email protected]()
[email protected]('volume_id')
[email protected]_env
+def cli(env, volume_id):
+ """Lists snapshot schedules for a given volume"""
+
+ block_manager = SoftLayer.BlockStorageManager(env.client)
+
+ snapshot_schedules = block_manager.list_volume_schedules(volume_id)
+
+ table = formatting.Table(['id',
+ 'active',
+ 'type',
+ 'replication',
+ 'date_created',
+ 'minute',
+ 'hour',
+ 'day',
+ 'week',
+ 'day_of_week',
+ 'date_of_month',
+ 'month_of_year',
+ 'maximum_snapshots'])
+
+ for schedule in snapshot_schedules:
+
+ if 'REPLICATION' in schedule['type']['keyname']:
+ replication = '*'
+ else:
+ replication = formatting.blank()
+
+ block_schedule_type = schedule['type']['keyname'].replace('REPLICATION_', '')
+ block_schedule_type = block_schedule_type.replace('SNAPSHOT_', '')
+
+ property_list = ['MINUTE', 'HOUR', 'DAY', 'WEEK',
+ 'DAY_OF_WEEK', 'DAY_OF_MONTH',
+ 'MONTH_OF_YEAR', 'SNAPSHOT_LIMIT']
+
+ schedule_properties = []
+ for prop_key in property_list:
+ item = formatting.blank()
+ for schedule_property in schedule.get('properties', []):
+ if schedule_property['type']['keyname'] == prop_key:
+ if schedule_property['value'] == '-1':
+ item = '*'
+ else:
+ item = schedule_property['value']
+ break
+ schedule_properties.append(item)
+
+ table_row = [
+ schedule['id'],
+ '*' if schedule.get('active', '') else '',
+ block_schedule_type,
+ replication,
+ schedule.get('createDate', '')]
+ table_row.extend(schedule_properties)
+
+ table.add_row(table_row)
+
+ env.fout(table)
diff --git a/SoftLayer/CLI/file/snapshot/schedule_list.py b/SoftLayer/CLI/file/snapshot/schedule_list.py
new file mode 100644
index 00000000..c83c50da
--- /dev/null
+++ b/SoftLayer/CLI/file/snapshot/schedule_list.py
@@ -0,0 +1,70 @@
+"""List scheduled snapshots of a specific volume"""
+# :license: MIT, see LICENSE for more details.
+
+import click
+import SoftLayer
+from SoftLayer.CLI import environment
+from SoftLayer.CLI import formatting
+
+
[email protected]()
[email protected]('volume_id')
[email protected]_env
+def cli(env, volume_id):
+ """Lists snapshot schedules for a given volume"""
+
+ file_manager = SoftLayer.FileStorageManager(env.client)
+
+ snapshot_schedules = file_manager.list_volume_schedules(volume_id)
+
+ table = formatting.Table(['id',
+ 'active',
+ 'type',
+ 'replication',
+ 'date_created',
+ 'minute',
+ 'hour',
+ 'day',
+ 'week',
+ 'day_of_week',
+ 'date_of_month',
+ 'month_of_year',
+ 'maximum_snapshots'])
+
+ for schedule in snapshot_schedules:
+
+ if 'REPLICATION' in schedule['type']['keyname']:
+ replication = '*'
+ else:
+ replication = formatting.blank()
+
+ file_schedule_type = schedule['type']['keyname'].replace('REPLICATION_', '')
+ file_schedule_type = file_schedule_type.replace('SNAPSHOT_', '')
+
+ property_list = ['MINUTE', 'HOUR', 'DAY', 'WEEK',
+ 'DAY_OF_WEEK', 'DAY_OF_MONTH',
+ 'MONTH_OF_YEAR', 'SNAPSHOT_LIMIT']
+
+ schedule_properties = []
+ for prop_key in property_list:
+ item = formatting.blank()
+ for schedule_property in schedule.get('properties', []):
+ if schedule_property['type']['keyname'] == prop_key:
+ if schedule_property['value'] == '-1':
+ item = '*'
+ else:
+ item = schedule_property['value']
+ break
+ schedule_properties.append(item)
+
+ table_row = [
+ schedule['id'],
+ '*' if schedule.get('active', '') else '',
+ file_schedule_type,
+ replication,
+ schedule.get('createDate', '')
+ ]
+ table_row.extend(schedule_properties)
+ table.add_row(table_row)
+
+ env.fout(table)
diff --git a/SoftLayer/CLI/routes.py b/SoftLayer/CLI/routes.py
index 3b406ec2..f02795cc 100644
--- a/SoftLayer/CLI/routes.py
+++ b/SoftLayer/CLI/routes.py
@@ -72,6 +72,8 @@
('block:snapshot-delete', 'SoftLayer.CLI.block.snapshot.delete:cli'),
('block:snapshot-disable', 'SoftLayer.CLI.block.snapshot.disable:cli'),
('block:snapshot-enable', 'SoftLayer.CLI.block.snapshot.enable:cli'),
+ ('block:snapshot-schedule-list',
+ 'SoftLayer.CLI.block.snapshot.schedule_list:cli'),
('block:snapshot-list', 'SoftLayer.CLI.block.snapshot.list:cli'),
('block:snapshot-order', 'SoftLayer.CLI.block.snapshot.order:cli'),
('block:snapshot-restore', 'SoftLayer.CLI.block.snapshot.restore:cli'),
@@ -98,6 +100,8 @@
('file:snapshot-delete', 'SoftLayer.CLI.file.snapshot.delete:cli'),
('file:snapshot-disable', 'SoftLayer.CLI.file.snapshot.disable:cli'),
('file:snapshot-enable', 'SoftLayer.CLI.file.snapshot.enable:cli'),
+ ('file:snapshot-schedule-list',
+ 'SoftLayer.CLI.file.snapshot.schedule_list:cli'),
('file:snapshot-list', 'SoftLayer.CLI.file.snapshot.list:cli'),
('file:snapshot-order', 'SoftLayer.CLI.file.snapshot.order:cli'),
('file:snapshot-restore', 'SoftLayer.CLI.file.snapshot.restore:cli'),
diff --git a/SoftLayer/fixtures/SoftLayer_Network_Storage.py b/SoftLayer/fixtures/SoftLayer_Network_Storage.py
index 80996cd7..8a0c9772 100644
--- a/SoftLayer/fixtures/SoftLayer_Network_Storage.py
+++ b/SoftLayer/fixtures/SoftLayer_Network_Storage.py
@@ -132,10 +132,22 @@
'username': 'TEST_REP_2',
}],
'replicationStatus': 'Replicant Volume Provisioning has completed.',
- 'schedules': [{
- 'id': 978,
- 'type': {'keyname': 'SNAPSHOT_WEEKLY'},
- }],
+ 'schedules': [
+ {
+ 'id': 978,
+ 'type': {'keyname': 'SNAPSHOT_WEEKLY'},
+ 'properties': [
+ {'type': {'keyname': 'MINUTE'}, 'value': '30'},
+ ]
+ },
+ {
+ 'id': 988,
+ 'type': {'keyname': 'REPLICATION_INTERVAL'},
+ 'properties': [
+ {'type': {'keyname': 'MINUTE'}, 'value': '-1'},
+ ]
+ }
+ ],
'serviceProviderId': 1,
'serviceResource': {'datacenter': {'id': 449500, 'name': 'dal05'}},
'serviceResourceBackendIpAddress': '10.1.2.3',
@@ -188,6 +200,18 @@
'name': 'dal05'
}]
+listVolumeSchedules = [
+ {
+ 'id': 978,
+ 'type': {'keyname': 'SNAPSHOT_WEEKLY'},
+ 'properties': [{'type': {'keyname': 'MINUTE'}, 'value': '30'}]
+ },
+ {
+ 'id': 988,
+ 'type': {'keyname': 'REPLICATION_INTERVAL'},
+ 'properties': [{'type': {'keyname': 'MINUTE'}, 'value': '-1'}]
+ }
+]
deleteObject = True
allowAccessFromHostList = True
diff --git a/SoftLayer/managers/block.py b/SoftLayer/managers/block.py
index 0bd5b60f..ed0d3261 100644
--- a/SoftLayer/managers/block.py
+++ b/SoftLayer/managers/block.py
@@ -474,6 +474,20 @@ def disable_snapshots(self, volume_id, schedule_type):
return self.client.call('Network_Storage', 'disableSnapshots',
schedule_type, id=volume_id)
+ def list_volume_schedules(self, volume_id):
+ """Lists schedules for a given volume
+
+ :param integer volume_id: The id of the volume
+ :return: Returns list of schedules assigned to a given volume
+ """
+ volume_detail = self.client.call(
+ 'Network_Storage',
+ 'getObject',
+ id=volume_id,
+ mask='schedules[type,properties[type]]')
+
+ return utils.lookup(volume_detail, 'schedules')
+
def restore_from_snapshot(self, volume_id, snapshot_id):
"""Restores a specific volume from a snapshot
diff --git a/SoftLayer/managers/file.py b/SoftLayer/managers/file.py
index 9eccdc8a..92c060b7 100644
--- a/SoftLayer/managers/file.py
+++ b/SoftLayer/managers/file.py
@@ -394,6 +394,20 @@ def disable_snapshots(self, volume_id, schedule_type):
return self.client.call('Network_Storage', 'disableSnapshots',
schedule_type, id=volume_id)
+ def list_volume_schedules(self, volume_id):
+ """Lists schedules for a given volume
+
+ :param integer volume_id: The id of the volume
+ :return: Returns list of schedules assigned to a given volume
+ """
+ volume_detail = self.client.call(
+ 'Network_Storage',
+ 'getObject',
+ id=volume_id,
+ mask='schedules[type,properties[type]]')
+
+ return utils.lookup(volume_detail, 'schedules')
+
def order_snapshot_space(self, volume_id, capacity, tier,
upgrade, **kwargs):
"""Orders snapshot space for the given file volume.
| Add functionality to list file/block storage snapshot schedules
This functionality needs to be added to both file and block managers; snapshot-enable and snapshot-disable exist, but nothing to show the enabled and disabled snapshot schedules for a given volume.
Perhaps `slcli block snapshot-schedule-list` ? | softlayer/softlayer-python | diff --git a/tests/CLI/modules/block_tests.py b/tests/CLI/modules/block_tests.py
index 352871b1..08914757 100644
--- a/tests/CLI/modules/block_tests.py
+++ b/tests/CLI/modules/block_tests.py
@@ -302,6 +302,43 @@ def test_disable_snapshots(self):
'--schedule-type=HOURLY'])
self.assert_no_fail(result)
+ def test_list_volume_schedules(self):
+ result = self.run_command([
+ 'block', 'snapshot-schedule-list', '12345678'])
+ self.assert_no_fail(result)
+ self.assertEqual([
+ {
+ "week": None,
+ "maximum_snapshots": None,
+ "hour": None,
+ "day_of_week": None,
+ "day": None,
+ "replication": None,
+ "date_of_month": None,
+ "month_of_year": None,
+ "active": "",
+ "date_created": "",
+ "type": "WEEKLY",
+ "id": 978,
+ "minute": '30'
+ },
+ {
+ "week": None,
+ "maximum_snapshots": None,
+ "hour": None,
+ "day_of_week": None,
+ "day": None,
+ "replication": '*',
+ "date_of_month": None,
+ "month_of_year": None,
+ "active": "",
+ "date_created": "",
+ "type": "INTERVAL",
+ "id": 988,
+ "minute": '*'
+ }
+ ], json.loads(result.output))
+
def test_create_snapshot(self):
result = self.run_command(['block', 'snapshot-create', '12345678'])
diff --git a/tests/CLI/modules/file_tests.py b/tests/CLI/modules/file_tests.py
index 6614e115..14c522f8 100644
--- a/tests/CLI/modules/file_tests.py
+++ b/tests/CLI/modules/file_tests.py
@@ -304,6 +304,43 @@ def test_disable_snapshots(self):
'--schedule-type=HOURLY'])
self.assert_no_fail(result)
+ def test_list_volume_schedules(self):
+ result = self.run_command([
+ 'file', 'snapshot-schedule-list', '12345678'])
+ self.assert_no_fail(result)
+ self.assertEqual([
+ {
+ "week": None,
+ "maximum_snapshots": None,
+ "hour": None,
+ "day_of_week": None,
+ "day": None,
+ "replication": None,
+ "date_of_month": None,
+ "month_of_year": None,
+ "active": "",
+ "date_created": "",
+ "type": "WEEKLY",
+ "id": 978,
+ "minute": '30'
+ },
+ {
+ "week": None,
+ "maximum_snapshots": None,
+ "hour": None,
+ "day_of_week": None,
+ "day": None,
+ "replication": '*',
+ "date_of_month": None,
+ "month_of_year": None,
+ "active": "",
+ "date_created": "",
+ "type": "INTERVAL",
+ "id": 988,
+ "minute": '*'
+ }
+ ], json.loads(result.output))
+
def test_create_snapshot(self):
result = self.run_command(['file', 'snapshot-create', '12345678'])
diff --git a/tests/managers/block_tests.py b/tests/managers/block_tests.py
index 2f6cf2ab..203c021b 100644
--- a/tests/managers/block_tests.py
+++ b/tests/managers/block_tests.py
@@ -492,6 +492,22 @@ def test_disable_snapshots(self):
'disableSnapshots',
identifier=12345678)
+ def test_list_volume_schedules(self):
+ result = self.block.list_volume_schedules(12345678)
+
+ self.assertEqual(
+ fixtures.SoftLayer_Network_Storage.listVolumeSchedules,
+ result)
+
+ expected_mask = 'schedules[type,properties[type]]'
+
+ self.assert_called_with(
+ 'SoftLayer_Network_Storage',
+ 'getObject',
+ identifier=12345678,
+ mask='mask[%s]' % expected_mask
+ )
+
def test_order_block_snapshot_space_upgrade(self):
mock = self.set_mock('SoftLayer_Product_Package', 'getAllObjects')
mock.return_value = [fixtures.SoftLayer_Product_Package.SAAS_PACKAGE]
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 4
} | 5.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"tools/test-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
coverage==6.2
distlib==0.3.9
docutils==0.18.1
filelock==3.4.1
fixtures==4.0.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
Jinja2==3.0.3
MarkupSafe==2.0.1
mock==5.2.0
packaging==21.3
pbr==6.1.1
platformdirs==2.4.0
pluggy==1.0.0
prettytable==2.5.0
prompt-toolkit==3.0.36
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytz==2025.2
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
-e git+https://github.com/softlayer/softlayer-python.git@1ccda8ffba02479c9903ca0dc22fe33b80e42ae5#egg=SoftLayer
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
testtools==2.6.0
toml==0.10.2
tomli==1.2.3
tox==3.28.0
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.17.1
wcwidth==0.2.13
zipp==3.6.0
| name: softlayer-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- click==8.0.4
- coverage==6.2
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- fixtures==4.0.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- jinja2==3.0.3
- markupsafe==2.0.1
- mock==5.2.0
- packaging==21.3
- pbr==6.1.1
- platformdirs==2.4.0
- pluggy==1.0.0
- prettytable==2.5.0
- prompt-toolkit==3.0.36
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytz==2025.2
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- testtools==2.6.0
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.17.1
- wcwidth==0.2.13
- zipp==3.6.0
prefix: /opt/conda/envs/softlayer-python
| [
"tests/managers/block_tests.py::BlockTests::test_list_volume_schedules"
]
| [
"tests/CLI/modules/block_tests.py::BlockTests::test_access_list",
"tests/CLI/modules/block_tests.py::BlockTests::test_authorize_host_to_volume",
"tests/CLI/modules/block_tests.py::BlockTests::test_create_snapshot",
"tests/CLI/modules/block_tests.py::BlockTests::test_deauthorize_host_to_volume",
"tests/CLI/modules/block_tests.py::BlockTests::test_disable_snapshots",
"tests/CLI/modules/block_tests.py::BlockTests::test_duplicate_order",
"tests/CLI/modules/block_tests.py::BlockTests::test_duplicate_order_hourly_billing",
"tests/CLI/modules/block_tests.py::BlockTests::test_duplicate_order_order_not_placed",
"tests/CLI/modules/block_tests.py::BlockTests::test_enable_snapshots",
"tests/CLI/modules/block_tests.py::BlockTests::test_list_volume_schedules",
"tests/CLI/modules/block_tests.py::BlockTests::test_modify_order",
"tests/CLI/modules/block_tests.py::BlockTests::test_modify_order_order_not_placed",
"tests/CLI/modules/block_tests.py::BlockTests::test_replicant_failback",
"tests/CLI/modules/block_tests.py::BlockTests::test_replicant_failover",
"tests/CLI/modules/block_tests.py::BlockTests::test_replicant_order",
"tests/CLI/modules/block_tests.py::BlockTests::test_replicant_order_order_not_placed",
"tests/CLI/modules/block_tests.py::BlockTests::test_replication_locations",
"tests/CLI/modules/block_tests.py::BlockTests::test_replication_locations_unsuccessful",
"tests/CLI/modules/block_tests.py::BlockTests::test_replication_partners",
"tests/CLI/modules/block_tests.py::BlockTests::test_set_password",
"tests/CLI/modules/block_tests.py::BlockTests::test_snapshot_cancel",
"tests/CLI/modules/block_tests.py::BlockTests::test_snapshot_list",
"tests/CLI/modules/block_tests.py::BlockTests::test_snapshot_order",
"tests/CLI/modules/block_tests.py::BlockTests::test_snapshot_order_order_not_placed",
"tests/CLI/modules/block_tests.py::BlockTests::test_snapshot_restore",
"tests/CLI/modules/block_tests.py::BlockTests::test_volume_cancel",
"tests/CLI/modules/block_tests.py::BlockTests::test_volume_count",
"tests/CLI/modules/block_tests.py::BlockTests::test_volume_detail",
"tests/CLI/modules/block_tests.py::BlockTests::test_volume_list",
"tests/CLI/modules/block_tests.py::BlockTests::test_volume_order_endurance",
"tests/CLI/modules/block_tests.py::BlockTests::test_volume_order_hourly_billing",
"tests/CLI/modules/block_tests.py::BlockTests::test_volume_order_order_not_placed",
"tests/CLI/modules/block_tests.py::BlockTests::test_volume_order_performance",
"tests/CLI/modules/block_tests.py::BlockTests::test_volume_set_lun_id_in_range",
"tests/CLI/modules/block_tests.py::BlockTests::test_volume_set_lun_id_in_range_missing_value",
"tests/CLI/modules/file_tests.py::FileTests::test_access_list",
"tests/CLI/modules/file_tests.py::FileTests::test_authorize_host_to_volume",
"tests/CLI/modules/file_tests.py::FileTests::test_create_snapshot",
"tests/CLI/modules/file_tests.py::FileTests::test_deauthorize_host_to_volume",
"tests/CLI/modules/file_tests.py::FileTests::test_delete_snapshot",
"tests/CLI/modules/file_tests.py::FileTests::test_disable_snapshots",
"tests/CLI/modules/file_tests.py::FileTests::test_duplicate_order",
"tests/CLI/modules/file_tests.py::FileTests::test_duplicate_order_hourly_billing",
"tests/CLI/modules/file_tests.py::FileTests::test_duplicate_order_order_not_placed",
"tests/CLI/modules/file_tests.py::FileTests::test_enable_snapshots",
"tests/CLI/modules/file_tests.py::FileTests::test_list_volume_schedules",
"tests/CLI/modules/file_tests.py::FileTests::test_modify_order",
"tests/CLI/modules/file_tests.py::FileTests::test_modify_order_order_not_placed",
"tests/CLI/modules/file_tests.py::FileTests::test_replicant_failback",
"tests/CLI/modules/file_tests.py::FileTests::test_replicant_failover",
"tests/CLI/modules/file_tests.py::FileTests::test_replicant_order",
"tests/CLI/modules/file_tests.py::FileTests::test_replicant_order_order_not_placed",
"tests/CLI/modules/file_tests.py::FileTests::test_replication_locations",
"tests/CLI/modules/file_tests.py::FileTests::test_replication_locations_unsuccessful",
"tests/CLI/modules/file_tests.py::FileTests::test_replication_partners",
"tests/CLI/modules/file_tests.py::FileTests::test_snapshot_cancel",
"tests/CLI/modules/file_tests.py::FileTests::test_snapshot_list",
"tests/CLI/modules/file_tests.py::FileTests::test_snapshot_order",
"tests/CLI/modules/file_tests.py::FileTests::test_snapshot_order_order_not_placed",
"tests/CLI/modules/file_tests.py::FileTests::test_snapshot_restore",
"tests/CLI/modules/file_tests.py::FileTests::test_volume_cancel",
"tests/CLI/modules/file_tests.py::FileTests::test_volume_count",
"tests/CLI/modules/file_tests.py::FileTests::test_volume_detail",
"tests/CLI/modules/file_tests.py::FileTests::test_volume_list",
"tests/CLI/modules/file_tests.py::FileTests::test_volume_order_endurance",
"tests/CLI/modules/file_tests.py::FileTests::test_volume_order_hourly_billing",
"tests/CLI/modules/file_tests.py::FileTests::test_volume_order_order_not_placed",
"tests/CLI/modules/file_tests.py::FileTests::test_volume_order_performance",
"tests/managers/block_tests.py::BlockTests::test_order_block_duplicate_endurance",
"tests/managers/block_tests.py::BlockTests::test_order_block_duplicate_endurance_no_duplicate_snapshot",
"tests/managers/block_tests.py::BlockTests::test_order_block_duplicate_origin_os_type_not_found",
"tests/managers/block_tests.py::BlockTests::test_order_block_duplicate_performance",
"tests/managers/block_tests.py::BlockTests::test_order_block_duplicate_performance_no_duplicate_snapshot",
"tests/managers/block_tests.py::BlockTests::test_order_block_modified_endurance",
"tests/managers/block_tests.py::BlockTests::test_order_block_modified_performance",
"tests/managers/block_tests.py::BlockTests::test_order_block_replicant_endurance",
"tests/managers/block_tests.py::BlockTests::test_order_block_replicant_performance_os_type_given",
"tests/managers/block_tests.py::BlockTests::test_order_block_snapshot_space",
"tests/managers/block_tests.py::BlockTests::test_order_block_snapshot_space_upgrade",
"tests/managers/block_tests.py::BlockTests::test_order_block_volume_endurance",
"tests/managers/block_tests.py::BlockTests::test_order_block_volume_performance"
]
| [
"tests/CLI/modules/block_tests.py::BlockTests::test_create_snapshot_unsuccessful",
"tests/CLI/modules/block_tests.py::BlockTests::test_duplicate_order_exception_caught",
"tests/CLI/modules/block_tests.py::BlockTests::test_modify_order_exception_caught",
"tests/CLI/modules/block_tests.py::BlockTests::test_replicant_failback_unsuccessful",
"tests/CLI/modules/block_tests.py::BlockTests::test_replicant_failover_unsuccessful",
"tests/CLI/modules/block_tests.py::BlockTests::test_replication_partners_unsuccessful",
"tests/CLI/modules/block_tests.py::BlockTests::test_snapshot_order_performance_manager_error",
"tests/CLI/modules/block_tests.py::BlockTests::test_volume_order_endurance_manager_error",
"tests/CLI/modules/block_tests.py::BlockTests::test_volume_order_endurance_tier_not_given",
"tests/CLI/modules/block_tests.py::BlockTests::test_volume_order_hourly_billing_not_available",
"tests/CLI/modules/block_tests.py::BlockTests::test_volume_order_performance_iops_not_given",
"tests/CLI/modules/block_tests.py::BlockTests::test_volume_order_performance_iops_not_multiple_of_100",
"tests/CLI/modules/block_tests.py::BlockTests::test_volume_order_performance_manager_error",
"tests/CLI/modules/block_tests.py::BlockTests::test_volume_order_performance_snapshot_error",
"tests/CLI/modules/block_tests.py::BlockTests::test_volume_set_lun_id_not_in_range",
"tests/CLI/modules/file_tests.py::FileTests::test_create_snapshot_unsuccessful",
"tests/CLI/modules/file_tests.py::FileTests::test_duplicate_order_exception_caught",
"tests/CLI/modules/file_tests.py::FileTests::test_modify_order_exception_caught",
"tests/CLI/modules/file_tests.py::FileTests::test_replicant_failback_unsuccessful",
"tests/CLI/modules/file_tests.py::FileTests::test_replicant_failover_unsuccessful",
"tests/CLI/modules/file_tests.py::FileTests::test_replication_partners_unsuccessful",
"tests/CLI/modules/file_tests.py::FileTests::test_snapshot_order_performance_manager_error",
"tests/CLI/modules/file_tests.py::FileTests::test_volume_order_endurance_manager_error",
"tests/CLI/modules/file_tests.py::FileTests::test_volume_order_endurance_tier_not_given",
"tests/CLI/modules/file_tests.py::FileTests::test_volume_order_hourly_billing_not_available",
"tests/CLI/modules/file_tests.py::FileTests::test_volume_order_performance_iops_not_given",
"tests/CLI/modules/file_tests.py::FileTests::test_volume_order_performance_iops_not_multiple_of_100",
"tests/CLI/modules/file_tests.py::FileTests::test_volume_order_performance_manager_error",
"tests/CLI/modules/file_tests.py::FileTests::test_volume_order_performance_snapshot_error",
"tests/managers/block_tests.py::BlockTests::test_authorize_host_to_volume",
"tests/managers/block_tests.py::BlockTests::test_cancel_block_volume_immediately",
"tests/managers/block_tests.py::BlockTests::test_cancel_block_volume_immediately_hourly_billing",
"tests/managers/block_tests.py::BlockTests::test_cancel_snapshot_exception_no_billing_item_active_children",
"tests/managers/block_tests.py::BlockTests::test_cancel_snapshot_exception_snapshot_billing_item_not_found",
"tests/managers/block_tests.py::BlockTests::test_cancel_snapshot_hourly_billing_immediate_false",
"tests/managers/block_tests.py::BlockTests::test_cancel_snapshot_hourly_billing_immediate_true",
"tests/managers/block_tests.py::BlockTests::test_cancel_snapshot_immediately",
"tests/managers/block_tests.py::BlockTests::test_create_snapshot",
"tests/managers/block_tests.py::BlockTests::test_deauthorize_host_to_volume",
"tests/managers/block_tests.py::BlockTests::test_delete_snapshot",
"tests/managers/block_tests.py::BlockTests::test_disable_snapshots",
"tests/managers/block_tests.py::BlockTests::test_enable_snapshots",
"tests/managers/block_tests.py::BlockTests::test_get_block_volume_access_list",
"tests/managers/block_tests.py::BlockTests::test_get_block_volume_details",
"tests/managers/block_tests.py::BlockTests::test_get_block_volume_snapshot_list",
"tests/managers/block_tests.py::BlockTests::test_get_replication_locations",
"tests/managers/block_tests.py::BlockTests::test_get_replication_partners",
"tests/managers/block_tests.py::BlockTests::test_list_block_volumes",
"tests/managers/block_tests.py::BlockTests::test_list_block_volumes_with_additional_filters",
"tests/managers/block_tests.py::BlockTests::test_order_block_replicant_os_type_not_found",
"tests/managers/block_tests.py::BlockTests::test_replicant_failback",
"tests/managers/block_tests.py::BlockTests::test_replicant_failover",
"tests/managers/block_tests.py::BlockTests::test_setCredentialPassword",
"tests/managers/block_tests.py::BlockTests::test_snapshot_restore"
]
| []
| MIT License | 1,956 | [
"SoftLayer/CLI/routes.py",
"SoftLayer/managers/file.py",
"SoftLayer/CLI/file/snapshot/schedule_list.py",
"SoftLayer/managers/block.py",
"SoftLayer/CLI/block/snapshot/schedule_list.py",
"SoftLayer/fixtures/SoftLayer_Network_Storage.py"
]
| [
"SoftLayer/CLI/routes.py",
"SoftLayer/managers/file.py",
"SoftLayer/CLI/file/snapshot/schedule_list.py",
"SoftLayer/managers/block.py",
"SoftLayer/CLI/block/snapshot/schedule_list.py",
"SoftLayer/fixtures/SoftLayer_Network_Storage.py"
]
|
|
PlasmaPy__PlasmaPy-190 | e8213d681289f0484c9950d01415ca0fc27585e0 | 2017-12-15 01:22:52 | e8213d681289f0484c9950d01415ca0fc27585e0 | diff --git a/docs/index.rst b/docs/index.rst
index e6a0a4e0..5c3727d3 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -31,7 +31,7 @@ Modules
:maxdepth: 1
atomic/index
- math/index
+ mathematics/index
physics/index
Classes
diff --git a/docs/math/index.rst b/docs/math/index.rst
deleted file mode 100644
index 6368d851..00000000
--- a/docs/math/index.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-.. py:module:: math
-
-.. _plasmapy-math:
-
-****
-Math
-****
-
-.. automodule:: plasmapy.math
-
-Functions
-=========
-
-The functions contained within the math subpackage are:
-
-* .. autofunction:: plasma_dispersion_func
-
-* .. autofunction:: plasma_dispersion_func_deriv
diff --git a/docs/mathematics/index.rst b/docs/mathematics/index.rst
new file mode 100644
index 00000000..a2ad3502
--- /dev/null
+++ b/docs/mathematics/index.rst
@@ -0,0 +1,18 @@
+.. py:module:: mathematics
+
+.. _plasmapy-mathematics:
+
+****
+Mathematics
+****
+
+.. automodule:: plasmapy.mathematics
+
+Functions
+=========
+
+The functions contained within the mathematics subpackage are:
+
+* .. autofunction:: plasma_dispersion_func
+
+* .. autofunction:: plasma_dispersion_func_deriv
diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py
index f45b745f..8362b316 100644
--- a/plasmapy/__init__.py
+++ b/plasmapy/__init__.py
@@ -71,7 +71,7 @@ def _check_astropy_version(): # coveralls: ignore
from . import classes
from . import constants
from . import atomic
- from . import math
+ from . import mathematics
from . import physics
from . import utils
except ImportError: # coveralls: ignore
diff --git a/plasmapy/math/__init__.py b/plasmapy/math/__init__.py
deleted file mode 100644
index 48603f3e..00000000
--- a/plasmapy/math/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .math import (plasma_dispersion_func,
- plasma_dispersion_func_deriv)
-
-from . import tests
diff --git a/plasmapy/mathematics/__init__.py b/plasmapy/mathematics/__init__.py
new file mode 100644
index 00000000..33e80147
--- /dev/null
+++ b/plasmapy/mathematics/__init__.py
@@ -0,0 +1,4 @@
+from .mathematics import (plasma_dispersion_func,
+ plasma_dispersion_func_deriv)
+
+from . import tests
| Rename math submodule
Namespaces are one honking great idea, so let's not have a submodule that shares a name with [an existing library](https://docs.python.org/3.6/library/math.html). Will require changing the filename, folder name, and purging all instances of it being imported.
Current frontrunner for replacement name is `mathematics`. | PlasmaPy/PlasmaPy | diff --git a/plasmapy/math/math.py b/plasmapy/mathematics/mathematics.py
similarity index 100%
rename from plasmapy/math/math.py
rename to plasmapy/mathematics/mathematics.py
diff --git a/plasmapy/math/tests/__init__.py b/plasmapy/mathematics/tests/__init__.py
similarity index 100%
rename from plasmapy/math/tests/__init__.py
rename to plasmapy/mathematics/tests/__init__.py
diff --git a/plasmapy/math/tests/test_dispersion.py b/plasmapy/mathematics/tests/test_dispersion.py
similarity index 97%
rename from plasmapy/math/tests/test_dispersion.py
rename to plasmapy/mathematics/tests/test_dispersion.py
index 877c8c64..98349f58 100644
--- a/plasmapy/math/tests/test_dispersion.py
+++ b/plasmapy/mathematics/tests/test_dispersion.py
@@ -3,7 +3,7 @@
import numpy as np
import pytest
from astropy import units as u
-from ..math import plasma_dispersion_func, plasma_dispersion_func_deriv
+from ..mathematics import plasma_dispersion_func, plasma_dispersion_func_deriv
def test_plasma_dispersion_func():
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_removed_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astropy==6.0.1
astropy-iers-data==0.2025.3.31.0.36.18
exceptiongroup==1.2.2
iniconfig==2.1.0
numpy==1.26.4
packaging==24.2
-e git+https://github.com/PlasmaPy/PlasmaPy.git@e8213d681289f0484c9950d01415ca0fc27585e0#egg=plasmapy
pluggy==1.5.0
pyerfa==2.0.1.5
pytest==8.3.5
PyYAML==6.0.2
scipy==1.13.1
tomli==2.2.1
| name: PlasmaPy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astropy==6.0.1
- astropy-iers-data==0.2025.3.31.0.36.18
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- numpy==1.26.4
- packaging==24.2
- pluggy==1.5.0
- pyerfa==2.0.1.5
- pytest==8.3.5
- pyyaml==6.0.2
- scipy==1.13.1
- tomli==2.2.1
prefix: /opt/conda/envs/PlasmaPy
| [
"plasmapy/mathematics/tests/__init__.py::test_plasma_dispersion_func",
"plasmapy/mathematics/tests/__init__.py::test_plasma_dispersion_func_deriv",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv"
]
| [
"plasmapy/mathematics/mathematics.py::plasmapy.mathematics.mathematics.plasma_dispersion_func",
"plasmapy/mathematics/mathematics.py::plasmapy.mathematics.mathematics.plasma_dispersion_func_deriv"
]
| []
| []
| BSD 3-Clause "New" or "Revised" License | 1,957 | [
"plasmapy/math/__init__.py",
"docs/math/index.rst",
"plasmapy/mathematics/__init__.py",
"plasmapy/__init__.py",
"docs/mathematics/index.rst",
"docs/index.rst"
]
| [
"plasmapy/math/__init__.py",
"docs/math/index.rst",
"plasmapy/mathematics/__init__.py",
"plasmapy/__init__.py",
"docs/mathematics/index.rst",
"docs/index.rst"
]
|
|
mirumee__prices-26 | e56c162aa254f17ab3fa50b718d74e491a90b913 | 2017-12-15 10:37:39 | 3f1ee61f5855334b5c7d07418a4b9f50873d5973 | diff --git a/prices/price.py b/prices/price.py
index ffc15bb..549dfab 100644
--- a/prices/price.py
+++ b/prices/price.py
@@ -14,6 +14,10 @@ class Price(object):
if not isinstance(net, Amount) or not isinstance(gross, Amount):
raise TypeError('Price requires two amounts, got %r, %r' % (
net, gross))
+ if net.currency != gross.currency:
+ raise ValueError(
+ 'Amounts given in different currencies: %r and %r' % (
+ net.currency, gross.currency))
self.net = net
self.gross = gross
diff --git a/setup.py b/setup.py
index 7b0a652..02f7aac 100755
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,7 @@ setup(
author_email='[email protected]',
description='Python price handling for humans',
license='BSD',
- version='1.0.0-beta',
+ version='1.0.1-beta',
url='https://github.com/mirumee/prices',
packages=['prices'],
install_requires=['babel'],
| Price can be created with net and gross with different currencies
Right now we allow to create Price with net Amount and gross Amount with different currencies.
`Price(net=Amount('25', 'USD'), gross=Amount('30', 'EUR'))`
This should not be allowed and should rise an exception. | mirumee/prices | diff --git a/tests/test_price.py b/tests/test_price.py
index 9f2f214..2a7465c 100644
--- a/tests/test_price.py
+++ b/tests/test_price.py
@@ -10,6 +10,11 @@ def test_construction():
Price(1, 1)
+def test_construction_different_currencies():
+ with pytest.raises(ValueError):
+ Price(net=Amount(1, 'USD'), gross=Amount(2, 'EUR'))
+
+
def test_addition():
price1 = Price(Amount(10, 'USD'), Amount(15, 'USD'))
price2 = Price(Amount(20, 'USD'), Amount(30, 'USD'))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
-e git+https://github.com/mirumee/prices.git@e56c162aa254f17ab3fa50b718d74e491a90b913#egg=prices
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytz==2025.2
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: prices
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- babel==2.11.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytz==2025.2
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/prices
| [
"tests/test_price.py::test_construction_different_currencies"
]
| []
| [
"tests/test_price.py::test_construction",
"tests/test_price.py::test_addition",
"tests/test_price.py::test_subtraction",
"tests/test_price.py::test_multiplication",
"tests/test_price.py::test_division",
"tests/test_price.py::test_comparison",
"tests/test_price.py::test_quantize",
"tests/test_price.py::test_currency",
"tests/test_price.py::test_tax",
"tests/test_price.py::test_repr",
"tests/test_price.py::test_sum"
]
| []
| BSD License | 1,958 | [
"setup.py",
"prices/price.py"
]
| [
"setup.py",
"prices/price.py"
]
|
|
inhumantsar__python-ec2-reaper-8 | 9e69a1f1bf4e98c8d22ee62b5870bbb0a39b91be | 2017-12-15 20:22:55 | d4b0f08b945f95f550149482486c4301f87f3619 | diff --git a/Makefile b/Makefile
deleted file mode 100644
index 296e6d4..0000000
--- a/Makefile
+++ /dev/null
@@ -1,87 +0,0 @@
-.PHONY: clean clean-test clean-pyc clean-build docs help
-.DEFAULT_GOAL := help
-define BROWSER_PYSCRIPT
-import os, webbrowser, sys
-try:
- from urllib import pathname2url
-except:
- from urllib.request import pathname2url
-
-webbrowser.open("file://" + pathname2url(os.path.abspath(sys.argv[1])))
-endef
-export BROWSER_PYSCRIPT
-
-define PRINT_HELP_PYSCRIPT
-import re, sys
-
-for line in sys.stdin:
- match = re.match(r'^([a-zA-Z_-]+):.*?## (.*)$$', line)
- if match:
- target, help = match.groups()
- print("%-20s %s" % (target, help))
-endef
-export PRINT_HELP_PYSCRIPT
-BROWSER := python -c "$$BROWSER_PYSCRIPT"
-
-help:
- @python -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST)
-
-clean: clean-build clean-pyc clean-test ## remove all build, test, coverage and Python artifacts
-
-
-clean-build: ## remove build artifacts
- rm -fr build/
- rm -fr dist/
- rm -fr .eggs/
- find . -name '*.egg-info' -exec rm -fr {} +
- find . -name '*.egg' -exec rm -f {} +
-
-clean-pyc: ## remove Python file artifacts
- find . -name '*.pyc' -exec rm -f {} +
- find . -name '*.pyo' -exec rm -f {} +
- find . -name '*~' -exec rm -f {} +
- find . -name '__pycache__' -exec rm -fr {} +
-
-clean-test: ## remove test and coverage artifacts
- rm -fr .tox/
- rm -f .coverage
- rm -fr htmlcov/
-
-lint: ## check style with flake8
- flake8 ec2_reaper tests
-
-test: ## run tests quickly with the default Python
- py.test
-
-
-test-all: ## run tests on every Python version with tox
- tox
-
-coverage: ## check code coverage quickly with the default Python
- coverage run --source ec2_reaper -m pytest
- coverage report -m
- coverage html
- $(BROWSER) htmlcov/index.html
-
-docs: ## generate Sphinx HTML documentation, including API docs
- rm -f docs/ec2_reaper.rst
- rm -f docs/modules.rst
- sphinx-apidoc -o docs/ ec2_reaper
- $(MAKE) -C docs clean
- $(MAKE) -C docs html
- $(BROWSER) docs/_build/html/index.html
-
-servedocs: docs ## compile the docs watching for changes
- watchmedo shell-command -p '*.rst' -c '$(MAKE) -C docs html' -R -D .
-
-release: clean ## package and upload a release
- python setup.py sdist upload
- python setup.py bdist_wheel upload
-
-dist: clean ## builds source and wheel package
- python setup.py sdist
- python setup.py bdist_wheel
- ls -l dist
-
-install: clean ## install the package to the active Python's site-packages
- python setup.py install
diff --git a/ec2_reaper/aws_lambda.py b/ec2_reaper/aws_lambda.py
index ea519d0..c51e330 100644
--- a/ec2_reaper/aws_lambda.py
+++ b/ec2_reaper/aws_lambda.py
@@ -14,8 +14,6 @@ def _is_py3():
log = logging.getLogger(__name__)
-DEFAULT_SLACK_ENDPOINT = ''
-
MIN_AGE = os.environ.get('MIN_AGE', ec2_reaper.DEFAULT_MIN_AGE)
REGIONS = os.environ.get('REGIONS', ec2_reaper.DEFAULT_REGIONS)
REGIONS = REGIONS.split(' ') if isinstance(REGIONS, str) else REGIONS
@@ -36,7 +34,7 @@ else:
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
-# this is necessary because tz-aware dts aren't JSON serializable by default
+# so that we can send tz-aware datetimes through json
class DateTimeJSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
@@ -47,7 +45,7 @@ def _respond(body, error=True, headers=None, status_code=500):
o = {'statusCode': status_code, 'body': body}
if headers:
o['headers'] = headers
- return json.dumps(o, cls=DateTimeJSONEncoder)
+ return o
def _get_expires(launch_time, min_age=MIN_AGE):
# if launch_time is naive, assume UTC
@@ -66,7 +64,8 @@ def _notify(msg, attachments=[]):
data = {'text': msg, 'attachements': attachments}
headers = {'Content-Type': 'application/json'}
- r = requests.post(SLACK_ENDPOINT, json=data, headers=headers)
+ r = requests.post(SLACK_ENDPOINT, headers=headers,
+ data=json.dumps(data, cls=DateTimeJSONEncoder))
if r.status_code != 200:
log.error('Slack notification failed: (HTTP {}) {}'.format(r.status_code, r.text))
@@ -127,4 +126,7 @@ def handler(event, context):
})
_notify(msg, attachments)
- return _respond(reaperlog, error=False, status_code=200)
+ r = {'reaped': len(reaped), 'matches_under_min_age': len(too_young),
+ 'tag_matches': len([i for i in reaperlog if i['tag_match']]),
+ 'instances': len(reaperlog), 'log': reaperlog}
+ return _respond(r, error=False, status_code=200)
| Unserializable datetimes strike again!
```
$ sls invoke -f cron
{
"stackTrace": [
[
"/var/task/handler.py",
4,
"run",
"return ec2_reaper.aws_lambda.handler(event, context)"
],
[
"/var/task/ec2_reaper/aws_lambda.py",
128,
"handler",
"_notify(msg, attachments)"
],
[
"/var/task/ec2_reaper/aws_lambda.py",
69,
"_notify",
"r = requests.post(SLACK_ENDPOINT, json=data, headers=headers)"
],
[
"/var/task/requests/api.py",
112,
"post",
"return request('post', url, data=data, json=json, **kwargs)"
],
[
"/var/task/requests/api.py",
58,
"request",
"return session.request(method=method, url=url, **kwargs)"
],
[
"/var/task/requests/sessions.py",
494,
"request",
"prep = self.prepare_request(req)"
],
[
"/var/task/requests/sessions.py",
437,
"prepare_request",
"hooks=merge_hooks(request.hooks, self.hooks),"
],
[
"/var/task/requests/models.py",
308,
"prepare",
"self.prepare_body(data, files, json)"
],
[
"/var/task/requests/models.py",
458,
"prepare_body",
"body = complexjson.dumps(json)"
],
[
"/usr/lib64/python2.7/json/__init__.py",
244,
"dumps",
"return _default_encoder.encode(obj)"
],
[
"/usr/lib64/python2.7/json/encoder.py",
207,
"encode",
"chunks = self.iterencode(o, _one_shot=True)"
],
[
"/usr/lib64/python2.7/json/encoder.py",
270,
"iterencode",
"return _iterencode(o, 0)"
],
[
"/usr/lib64/python2.7/json/encoder.py",
184,
"default",
"raise TypeError(repr(o) + \" is not JSON serializable\")"
]
],
"errorType": "TypeError",
"errorMessage": "datetime.datetime(2017, 12, 15, 20, 4, 33, tzinfo=tzlocal()) is not JSON serializable"
}
Error --------------------------------------------------
Invoked function failed
For debugging logs, run again after setting the "SLS_DEBUG=*" environment variable.
Get Support --------------------------------------------
Docs: docs.serverless.com
Bugs: github.com/serverless/serverless/issues
Forums: forum.serverless.com
Chat: gitter.im/serverless/serverless
Your Environment Information -----------------------------
OS: linux
Node Version: 6.11.4
Serverless Version: 1.24.1
``` | inhumantsar/python-ec2-reaper | diff --git a/tests/test_lambda_handler.py b/tests/test_lambda_handler.py
index 08bd129..bc153b4 100644
--- a/tests/test_lambda_handler.py
+++ b/tests/test_lambda_handler.py
@@ -10,10 +10,11 @@ logging.basicConfig(level=logging.DEBUG)
logging.getLogger('botocore').setLevel(logging.INFO)
logging.getLogger('boto3').setLevel(logging.INFO)
-if sys.version_info >= (3, 0):
- from unittest.mock import patch
-else:
+if sys.version_info < (3, 0) or (sys.version_info >= (3, 5) and
+ sys.version_info < (3, 6)):
from mock import patch
+else:
+ from unittest.mock import patch
# when no results, handler should have called reap, *not* called (slack) notify,
# and should have returned a happy response json obj,
@@ -21,12 +22,13 @@ else:
@patch.object(aws_lambda, '_notify')
def test_reap_no_results(mock_notify, mock_reap):
mock_reap.return_value = []
- r = json.loads(aws_lambda.handler({}, {}))
+ r = aws_lambda.handler({}, {})
mock_notify.assert_not_called()
mock_reap.assert_called_once()
assert r['statusCode'] == 200
- assert r['body'] == []
+ assert r['body']['log'] == []
+ assert r['body']['reaped'] == 0
# with pos and neg results, handler should have called reap,
# called (slack) notify, and should have returned a happy response json obj with
@@ -46,9 +48,10 @@ def test_reap_2neg_1pos(mock_notify, mock_reap):
'launch_time': match_time, 'reaped': True, 'region': 'us-east-1'},
]
mock_reap.return_value = mock_reap_results
- r = json.loads(aws_lambda.handler({}, {}))
+ r = aws_lambda.handler({}, {})
mock_notify.assert_called()
mock_reap.assert_called_once()
assert r['statusCode'] == 200
- assert r['body'] == json.loads(json.dumps(mock_reap_results, cls=aws_lambda.DateTimeJSONEncoder))
+ assert r['body']['log'] == mock_reap_results
+ assert r['body']['reaped'] == 1
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_removed_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt",
"requirements_dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
boto3==1.23.10
botocore==1.26.10
bump2version==1.0.1
bumpversion==0.6.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
click==8.0.4
coverage==6.2
cryptography==40.0.2
dataclasses==0.8
distlib==0.3.9
docutils==0.17.1
-e git+https://github.com/inhumantsar/python-ec2-reaper.git@9e69a1f1bf4e98c8d22ee62b5870bbb0a39b91be#egg=ec2_reaper
filelock==3.4.1
flake8==5.0.4
idna==3.10
imagesize==1.4.1
importlib-metadata==4.2.0
importlib-resources==5.4.0
iniconfig==1.1.1
Jinja2==3.0.3
jmespath==0.10.0
MarkupSafe==2.0.1
mccabe==0.7.0
mock==5.2.0
moto==4.0.13
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pycparser==2.21
pyflakes==2.5.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-runner==5.3.2
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.1
requests==2.27.1
responses==0.17.0
s3transfer==0.5.2
six==1.17.0
slacker==0.14.0
snowballstemmer==2.2.0
Sphinx==4.3.2
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
toml==0.10.2
tomli==1.2.3
tox==3.28.0
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.16.2
watchdog==2.3.1
Werkzeug==2.0.3
xmltodict==0.14.2
zipp==3.6.0
| name: python-ec2-reaper
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- boto3==1.23.10
- botocore==1.26.10
- bump2version==1.0.1
- bumpversion==0.6.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- click==8.0.4
- coverage==6.2
- cryptography==40.0.2
- dataclasses==0.8
- distlib==0.3.9
- docutils==0.17.1
- filelock==3.4.1
- flake8==5.0.4
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.2.0
- importlib-resources==5.4.0
- iniconfig==1.1.1
- jinja2==3.0.3
- jmespath==0.10.0
- markupsafe==2.0.1
- mccabe==0.7.0
- mock==5.2.0
- moto==4.0.13
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pycparser==2.21
- pyflakes==2.5.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-runner==5.3.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.1
- requests==2.27.1
- responses==0.17.0
- s3transfer==0.5.2
- six==1.17.0
- slacker==0.14.0
- snowballstemmer==2.2.0
- sphinx==4.3.2
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.16.2
- watchdog==2.3.1
- werkzeug==2.0.3
- xmltodict==0.14.2
- zipp==3.6.0
prefix: /opt/conda/envs/python-ec2-reaper
| [
"tests/test_lambda_handler.py::test_reap_no_results",
"tests/test_lambda_handler.py::test_reap_2neg_1pos"
]
| []
| []
| []
| BSD License | 1,959 | [
"Makefile",
"ec2_reaper/aws_lambda.py"
]
| [
"Makefile",
"ec2_reaper/aws_lambda.py"
]
|
|
Instagram__MonkeyType-23 | 666b2a1c5fc09f219a9ef5811af68946cd9c1830 | 2017-12-16 06:01:23 | 1f7b42773edd42c38e78e49c2a0e585fcf109b0d | carljm: This approach looks fine to me; thanks for the report and fix!
Normally we try to automatically provide the right imports for annotations too, so `monkeytype apply` doesn't result in modules with missing imports. However, I don't think that's possible with the current runtime implementation of `NewType`; the calling module is not stored anywhere in the resulting function object, so there's no way we can introspect it. So unless we change that in `typing.py` we're stuck with the limitation of not being able to automatically add NewType imports.
It would be great to add tests for NewType in `test_stubs.py`; thanks!
carljm: Er, never mind about generating imports; `NewType` annotations can only occur as existing annotations, never as traced types. So the import should always be present already.
folz: Added a test for this and updated the changelog. I think you're right that this doesn't need to worry about imports - if the import isn't present, it wasn't valid to mypy in the first place.
carljm: Awesome, thank you! | diff --git a/.gitignore b/.gitignore
index a31079d..2f4a0c7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,3 +8,4 @@ htmlcov/
doc/_build
dist/
build/
+.idea/
\ No newline at end of file
diff --git a/CHANGES.rst b/CHANGES.rst
index 3350f81..c3d23d3 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -5,7 +5,7 @@ master
------
* Fix passing args to script run with ``monkeytype run`` (#18; merge of #21).
-
+* Fix generated annotations for NewType types (#22; merge of #23).
17.12.1
-------
diff --git a/monkeytype/stubs.py b/monkeytype/stubs.py
index 574fc35..8abc90b 100644
--- a/monkeytype/stubs.py
+++ b/monkeytype/stubs.py
@@ -319,6 +319,8 @@ def render_annotation(anno: Any) -> str:
elem_type = _get_optional_elem(anno)
rendered = render_annotation(elem_type)
return 'Optional[' + rendered + ']'
+ elif hasattr(anno, '__supertype__'):
+ return anno.__name__
elif getattr(anno, '__module__', None) == 'typing':
return repr(anno).replace('typing.', '')
elif anno is NoneType:
| `NewType` generated into invalid type signatures
Given
```python
UserId = NewType('UserId', int)
def get_user(self, user_id: UserId) -> 'User':
```
MonkeyType generates
```python
def get_user(
self,
user_id: <function NewType.<locals>.new_type at 0x1090a52f0>
) -> 'User': ...
```
which then causes retype to fail with this error message:
```
error: /path/to/module.py: invalid syntax (<unknown>, line 18)
```
This happens even when `--exclude-unparsable-defaults` is passed.
I was able to come up with a solution to this, which I'll submit a PR for. However, it seems hacky to me and I'd appreciate feedback on it. | Instagram/MonkeyType | diff --git a/tests/test_stubs.py b/tests/test_stubs.py
index bbe5913..acfa771 100644
--- a/tests/test_stubs.py
+++ b/tests/test_stubs.py
@@ -11,6 +11,7 @@ from typing import (
Generator,
Iterator,
List,
+ NewType,
Optional,
Set,
Tuple,
@@ -42,6 +43,8 @@ from monkeytype.tracing import CallTrace
from monkeytype.typing import NoneType
from .util import Dummy
+UserId = NewType('UserId', int)
+
class TestImportMap:
def test_merge(self):
@@ -133,6 +136,10 @@ def has_length_exceeds_120_chars(
return None
+def has_newtype_param(user_id: UserId) -> None:
+ pass
+
+
class TestHasUnparsableDefaults:
@pytest.mark.parametrize(
'func, expected',
@@ -241,6 +248,11 @@ class TestFunctionStub:
expected = 'def test(x: Optional[int] = None) -> None: ...'
assert stub.render() == expected
+ def test_newtype_parameter_annotation(self):
+ stub = FunctionStub('test', inspect.signature(has_newtype_param), FunctionKind.MODULE)
+ expected = 'def test(user_id: UserId) -> None: ...'
+ assert stub.render() == expected
+
def _func_stub_from_callable(func: Callable, strip_modules: List[str] = None):
kind = FunctionKind.from_callable(func)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 3
} | 17.12 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "Pipfile",
"pip_packages": [
"pytest",
"pytest-smartcov"
],
"pre_install": [
"pip install --user pipenv"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
click==8.0.4
coverage==6.2
distlib==0.3.9
filelock==3.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
-e git+https://github.com/Instagram/MonkeyType.git@666b2a1c5fc09f219a9ef5811af68946cd9c1830#egg=MonkeyType
packaging==21.3
pathspec==0.9.0
pipenv==2022.4.8
pipfile==0.0.2
platformdirs==2.4.0
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-smartcov==0.3
retype==21.10.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
typed-ast==1.5.5
typing_extensions==4.1.1
virtualenv==20.17.1
virtualenv-clone==0.5.7
zipp==3.6.0
| name: MonkeyType
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- pipfile=0.0.2=py_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- click==8.0.4
- coverage==6.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pathspec==0.9.0
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-smartcov==0.3
- retype==21.10.0
- tomli==1.2.3
- typed-ast==1.5.5
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/MonkeyType
| [
"tests/test_stubs.py::TestFunctionStub::test_newtype_parameter_annotation"
]
| []
| [
"tests/test_stubs.py::TestImportMap::test_merge",
"tests/test_stubs.py::TestImportBlockStub::test_single_import",
"tests/test_stubs.py::TestImportBlockStub::test_multiple_imports",
"tests/test_stubs.py::TestHasUnparsableDefaults::test_has_unparsable_defaults[simple_add-False]",
"tests/test_stubs.py::TestHasUnparsableDefaults::test_has_unparsable_defaults[has_parsable_defaults-False]",
"tests/test_stubs.py::TestHasUnparsableDefaults::test_has_unparsable_defaults[has_unparsable_default-True]",
"tests/test_stubs.py::TestFunctionStub::test_classmethod",
"tests/test_stubs.py::TestFunctionStub::test_staticmethod",
"tests/test_stubs.py::TestFunctionStub::test_property",
"tests/test_stubs.py::TestFunctionStub::test_simple",
"tests/test_stubs.py::TestFunctionStub::test_with_prefix",
"tests/test_stubs.py::TestFunctionStub::test_strip_modules",
"tests/test_stubs.py::TestFunctionStub::test_async_function",
"tests/test_stubs.py::TestFunctionStub::test_optional_parameter_annotation",
"tests/test_stubs.py::TestFunctionStub::test_optional_union_parameter_annotation",
"tests/test_stubs.py::TestFunctionStub::test_optional_return_annotation",
"tests/test_stubs.py::TestFunctionStub::test_split_parameters_across_multiple_lines",
"tests/test_stubs.py::TestFunctionStub::test_default_none_parameter_annotation",
"tests/test_stubs.py::TestClassStub::test_render",
"tests/test_stubs.py::TestModuleStub::test_render",
"tests/test_stubs.py::TestBuildModuleStubs::test_build_module_stubs",
"tests/test_stubs.py::TestStubIndexBuilder::test_ignore_non_matching_functions",
"tests/test_stubs.py::TestStubIndexBuilder::test_build_index",
"tests/test_stubs.py::TestUpdateSignatureArgs::test_update_arg",
"tests/test_stubs.py::TestUpdateSignatureArgs::test_update_arg_with_anno",
"tests/test_stubs.py::TestUpdateSignatureArgs::test_update_self",
"tests/test_stubs.py::TestUpdateSignatureArgs::test_update_class",
"tests/test_stubs.py::TestUpdateSignatureReturn::test_update_return",
"tests/test_stubs.py::TestUpdateSignatureReturn::test_update_return_with_anno",
"tests/test_stubs.py::TestUpdateSignatureReturn::test_update_yield",
"tests/test_stubs.py::TestUpdateSignatureReturn::test_update_yield_and_return",
"tests/test_stubs.py::TestUpdateSignatureReturn::test_update_yield_none_and_return",
"tests/test_stubs.py::TestUpdateSignatureReturn::test_update_yield_and_return_none",
"tests/test_stubs.py::TestFunctionKind::test_from_callable[a_static_method-FunctionKind.STATIC]",
"tests/test_stubs.py::TestFunctionKind::test_from_callable[a_class_method-FunctionKind.CLASS]",
"tests/test_stubs.py::TestFunctionKind::test_from_callable[an_instance_method-FunctionKind.INSTANCE]",
"tests/test_stubs.py::TestFunctionKind::test_from_callable[a_property-FunctionKind.PROPERTY]",
"tests/test_stubs.py::TestFunctionKind::test_from_callable[a_module_func-FunctionKind.MODULE]",
"tests/test_stubs.py::TestFunctionDefinition::test_has_self[a_static_method-False]",
"tests/test_stubs.py::TestFunctionDefinition::test_has_self[a_class_method-True]",
"tests/test_stubs.py::TestFunctionDefinition::test_has_self[an_instance_method-True]",
"tests/test_stubs.py::TestFunctionDefinition::test_has_self[a_property-True]",
"tests/test_stubs.py::TestFunctionDefinition::test_has_self[a_module_func-False]",
"tests/test_stubs.py::TestFunctionDefinition::test_from_callable[a_static_method-expected0]",
"tests/test_stubs.py::TestFunctionDefinition::test_from_callable[a_class_method-expected1]",
"tests/test_stubs.py::TestFunctionDefinition::test_from_callable[an_instance_method-expected2]",
"tests/test_stubs.py::TestFunctionDefinition::test_from_callable[a_property-expected3]",
"tests/test_stubs.py::TestFunctionDefinition::test_from_callable[a_module_func-expected4]",
"tests/test_stubs.py::TestFunctionDefinition::test_from_callable[an_async_func-expected5]",
"tests/test_stubs.py::TestShrinkTracedTypes::test_shrink_args",
"tests/test_stubs.py::TestShrinkTracedTypes::test_shrink_return",
"tests/test_stubs.py::TestShrinkTracedTypes::test_shrink_yield",
"tests/test_stubs.py::TestGetImportsForAnnotation::test_no_imports[_empty0]",
"tests/test_stubs.py::TestGetImportsForAnnotation::test_no_imports[_empty1]",
"tests/test_stubs.py::TestGetImportsForAnnotation::test_no_imports[not",
"tests/test_stubs.py::TestGetImportsForAnnotation::test_no_imports[int]",
"tests/test_stubs.py::TestGetImportsForAnnotation::test_special_case_types[anno0-expected0]",
"tests/test_stubs.py::TestGetImportsForAnnotation::test_special_case_types[anno1-expected1]",
"tests/test_stubs.py::TestGetImportsForAnnotation::test_user_defined_class",
"tests/test_stubs.py::TestGetImportsForAnnotation::test_container_types[Dict-expected0]",
"tests/test_stubs.py::TestGetImportsForAnnotation::test_container_types[List-expected1]",
"tests/test_stubs.py::TestGetImportsForAnnotation::test_container_types[Set-expected2]",
"tests/test_stubs.py::TestGetImportsForAnnotation::test_container_types[Tuple-expected3]",
"tests/test_stubs.py::TestGetImportsForAnnotation::test_container_types[Type-expected4]",
"tests/test_stubs.py::TestGetImportsForAnnotation::test_container_types[anno5-expected5]",
"tests/test_stubs.py::TestGetImportsForAnnotation::test_nested_class",
"tests/test_stubs.py::TestBuildModuleStubsFromTraces::test_remove_funcs_with_unparsable_defaults"
]
| []
| BSD License | 1,960 | [
".gitignore",
"monkeytype/stubs.py",
"CHANGES.rst"
]
| [
".gitignore",
"monkeytype/stubs.py",
"CHANGES.rst"
]
|
paris-saclay-cds__specio-28 | bf3c46fb34575bb6df7860fb354d0fc173c30f18 | 2017-12-18 10:53:51 | bf3c46fb34575bb6df7860fb354d0fc173c30f18 | diff --git a/doc/_templates/class.rst b/doc/_templates/class.rst
new file mode 100644
index 0000000..3eef974
--- /dev/null
+++ b/doc/_templates/class.rst
@@ -0,0 +1,16 @@
+:mod:`{{module}}`.{{objname}}
+{{ underline }}==============
+
+.. currentmodule:: {{ module }}
+
+.. autoclass:: {{ objname }}
+
+ {% block methods %}
+ .. automethod:: __init__
+ {% endblock %}
+
+.. include:: {{module}}.{{objname}}.examples
+
+.. raw:: html
+
+ <div style='clear:both'></div>
diff --git a/doc/_templates/function.rst b/doc/_templates/function.rst
new file mode 100644
index 0000000..4ba355d
--- /dev/null
+++ b/doc/_templates/function.rst
@@ -0,0 +1,12 @@
+:mod:`{{module}}`.{{objname}}
+{{ underline }}====================
+
+.. currentmodule:: {{ module }}
+
+.. autofunction:: {{ objname }}
+
+.. include:: {{module}}.{{objname}}.examples
+
+.. raw:: html
+
+ <div style='clear:both'></div>
diff --git a/doc/_templates/numpydoc_docstring.py b/doc/_templates/numpydoc_docstring.py
new file mode 100644
index 0000000..fd6a35f
--- /dev/null
+++ b/doc/_templates/numpydoc_docstring.py
@@ -0,0 +1,16 @@
+{{index}}
+{{summary}}
+{{extended_summary}}
+{{parameters}}
+{{returns}}
+{{yields}}
+{{other_parameters}}
+{{attributes}}
+{{raises}}
+{{warns}}
+{{warnings}}
+{{see_also}}
+{{notes}}
+{{references}}
+{{examples}}
+{{methods}}
diff --git a/doc/api.rst b/doc/api.rst
index cb37f66..932c673 100644
--- a/doc/api.rst
+++ b/doc/api.rst
@@ -2,24 +2,59 @@
specio's user API
###################
-.. automodule:: specio.core.functions
+Spectra reader functions
+========================
-----
+These functions represent specio's main interface for the user. They provide a
+common API to read spectra data for a large variety of formats. All read
+functions accept keyword arguments, which are passed on to the format that does
+the actual work. To see what keyword arguments are supported by a specific
+format, use the :func:`.help` function.
-.. autofunction:: specio.help
+Functions for reading:
-.. autofunction :: specio.show_formats
+ * :func:`.specread` - read a file with spectra from the specified uri
-----
+For a larger degree of control, specio provides a function
+:func:`.get_reader`. It returns an :class:`.Reader` object, which can be used
+to read data and meta data in a more controlled manner. This also allows
+specific scientific formats to be exposed in a way that best suits that
+file-format.
-.. autofunction:: specio.specread
+Functions
+---------
-----
+.. autosummary::
+ :toctree: generated/
+ :template: function.rst
-.. autofunction:: specio.get_reader
+ specio.help
+ specio.show_formats
+ specio.specread
+ specio.get_reader
-----
+Classes
+-------
-.. autoclass:: specio.core.format.Reader
- :inherited-members:
- :members:
+.. autosummary::
+ :toctree: generated/
+ :template: class.rst
+
+ specio.core.format.Reader
+
+
+Example datasets
+================
+
+.. automodule:: specio.datasets
+ :no-members:
+ :no-inherited-members:
+
+.. currentmodule:: specio
+
+.. autosummary::
+ :toctree: generated/
+ :template: function.rst
+
+ datasets.load_spc_path
+ datasets.load_fsm_path
diff --git a/doc/conf.py b/doc/conf.py
index 34133ea..089c4b6 100755
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -47,6 +47,7 @@ extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.autosummary',
+ 'sphinx.ext.intersphinx',
'sphinx_gallery.gen_gallery',
'numpydoc',
'sphinx_issues',
@@ -62,19 +63,21 @@ autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
+# intersphinx configuration
+intersphinx_mapping = {
+ 'python': ('https://docs.python.org/{.major}'.format(
+ sys.version_info), None),
+ 'numpy': ('https://docs.scipy.org/doc/numpy/', None),
+ 'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
+ 'matplotlib': ('https://matplotlib.org/', None),
+}
+
+# sphinx-gallery configuration
sphinx_gallery_conf = {
- # path to your examples scripts
- 'examples_dirs': '../examples',
- # path where to save gallery generated examples
- 'gallery_dirs': 'auto_examples',
- # to make references clickable
'doc_module': 'specio',
+ 'backreferences_dir': os.path.join('generated'),
'reference_url': {
- 'specio': None,
- 'matplotlib': 'http://matplotlib.org',
- 'numpy': 'http://docs.scipy.org/doc/numpy-1.11.0',
- 'scipy': 'http://docs.scipy.org/doc/scipy-0.18.0/reference'
- }
+ 'specio': None}
}
# Add any paths that contain templates here, relative to this directory.
diff --git a/examples/plot_read_multiple_files.py b/examples/plot_read_multiple_files.py
index e006a0e..b578887 100644
--- a/examples/plot_read_multiple_files.py
+++ b/examples/plot_read_multiple_files.py
@@ -10,13 +10,14 @@ folder.
from __future__ import print_function
import os
+import sys
import matplotlib.pyplot as plt
from specio import specread
# Get the path to the data relatively to this example
-DATA_PATH = os.path.dirname(__file__)
+DATA_PATH = os.path.dirname(sys.argv[0])
spc_filenames = os.path.join(DATA_PATH, 'data', '*.spc')
print('The SPC files will be search in: {}'.format(spc_filenames))
diff --git a/specio/plugins/fsm.py b/specio/plugins/fsm.py
index bcb257a..9e11f7e 100644
--- a/specio/plugins/fsm.py
+++ b/specio/plugins/fsm.py
@@ -6,7 +6,10 @@
from __future__ import absolute_import, print_function, division
+from os.path import basename
+
import numpy as np
+from six import string_types
from .. import formats
from ..core import Format
@@ -267,6 +270,10 @@ class FSM(Format):
wavelength = np.arange(meta['z_start'],
meta['z_end'] + meta['z_delta'],
meta['z_delta'])
+ if isinstance(fsm_file, string_types):
+ meta['filename'] = basename(fsm_file)
+ else:
+ meta['filename'] = basename(fsm_file.name)
return Spectrum(spectrum, wavelength, meta)
diff --git a/specio/plugins/spc.py b/specio/plugins/spc.py
index c61fe41..53c2510 100644
--- a/specio/plugins/spc.py
+++ b/specio/plugins/spc.py
@@ -8,8 +8,10 @@ from __future__ import absolute_import, print_function, division
import struct
import warnings
+from os.path import basename
import numpy as np
+from six import string_types
from .. import formats
from ..core import Format
@@ -91,7 +93,7 @@ class SPC(Format):
return meta
- def _spc_to_numpy(self, spc_file):
+ def _spc_to_numpy(self, spc_file, spc_filename):
"""Convert the SPC File data to spectrum data.
Parameters
@@ -99,6 +101,9 @@ class SPC(Format):
spc_file : spc.File
The SPC File to be converted.
+ spc_filename : string
+ The SPC filename to be added to the dictionary.
+
Returns
-------
spectrum : util.Spectrum
@@ -106,6 +111,7 @@ class SPC(Format):
"""
meta = self._meta_data_from_spc(spc_file)
+ meta['filename'] = basename(spc_filename)
if spc_file.dat_fmt in ('gx-y', 'x-y'):
spectrum = np.squeeze([f.y for f in spc_file.sub])
wavelength = spc_file.x
@@ -121,7 +127,7 @@ class SPC(Format):
import spc
# Open the reader
self._fp = self.request.get_local_filename()
- self._data = self._spc_to_numpy(spc.File(self._fp))
+ self._data = self._spc_to_numpy(spc.File(self._fp), self._fp)
self._length = len(self._data)
def _close(self):
| Factorize test for all reader
It should be possible to make common tests for the different format:
- read one file
- read multiple files | paris-saclay-cds/specio | diff --git a/specio/plugins/tests/test_common.py b/specio/plugins/tests/test_common.py
new file mode 100644
index 0000000..bab8387
--- /dev/null
+++ b/specio/plugins/tests/test_common.py
@@ -0,0 +1,26 @@
+"""Common tests using the toy data."""
+
+# Copyright (c) 2017
+# Authors: Guillaume Lemaitre <[email protected]>
+# License: BSD 3 clause
+
+from os.path import basename
+
+import pytest
+
+from specio import specread
+from specio.core import Spectrum
+from specio.datasets import load_fsm_path
+from specio.datasets import load_spc_path
+
+
[email protected](
+ "filename,spectrum_shape,wavelength_shape",
+ [(load_fsm_path(), (7998, 1641), (1641,)),
+ (load_spc_path(), (1911,), (1911,))])
+def test_toy_data(filename, spectrum_shape, wavelength_shape):
+ spec = specread(filename)
+ assert isinstance(spec, Spectrum)
+ assert spec.spectrum.shape == spectrum_shape
+ assert spec.wavelength.shape == wavelength_shape
+ assert spec.meta['filename'] == basename(filename)
diff --git a/specio/plugins/tests/test_fsm.py b/specio/plugins/tests/test_fsm.py
index 1af2d16..b5fa2d2 100644
--- a/specio/plugins/tests/test_fsm.py
+++ b/specio/plugins/tests/test_fsm.py
@@ -28,12 +28,3 @@ def test_fsm_format():
assert spec.spectrum.shape == (1641,)
assert spec.wavelength.shape == (1641,)
assert spec.spectrum[0] == pytest.approx(38.656551)
-
-
[email protected](
- "filename,spectrum_shape,wavelength_shape",
- [(load_fsm_path(), (7998, 1641), (1641,))])
-def test_fsm_file(filename, spectrum_shape, wavelength_shape):
- spec = specread(filename)
- assert spec.spectrum.shape == spectrum_shape
- assert spec.wavelength.shape == wavelength_shape
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 5
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-mock",
"sphinx",
"sphinx-gallery",
"sphinx_rtd_theme",
"numpydoc",
"matplotlib"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
cycler==0.11.0
docutils==0.18.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==3.0.3
kiwisolver==1.3.1
MarkupSafe==2.0.1
matplotlib==3.3.4
numpy==1.19.5
numpydoc==1.1.0
packaging==21.3
Pillow==8.4.0
pluggy==1.0.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-mock==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
spc @ git+https://github.com/glemaitre/spc.git@44b67d49e1e4fe9364e7cbce9a93086037703511
-e git+https://github.com/paris-saclay-cds/specio.git@bf3c46fb34575bb6df7860fb354d0fc173c30f18#egg=specio
Sphinx==5.3.0
sphinx-gallery==0.10.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: specio
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- cycler==0.11.0
- docutils==0.18.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==3.0.3
- kiwisolver==1.3.1
- markupsafe==2.0.1
- matplotlib==3.3.4
- numpy==1.19.5
- numpydoc==1.1.0
- packaging==21.3
- pillow==8.4.0
- pluggy==1.0.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- spc==0.4.0
- sphinx==5.3.0
- sphinx-gallery==0.10.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/specio
| [
"specio/plugins/tests/test_common.py::test_toy_data[/specio/specio/datasets/data/spectra.fsm-spectrum_shape0-wavelength_shape0]",
"specio/plugins/tests/test_common.py::test_toy_data[/specio/specio/datasets/data/spectra.spc-spectrum_shape1-wavelength_shape1]"
]
| []
| [
"specio/plugins/tests/test_fsm.py::test_fsm_format"
]
| []
| BSD 3-Clause "New" or "Revised" License | 1,961 | [
"doc/_templates/class.rst",
"doc/conf.py",
"specio/plugins/spc.py",
"examples/plot_read_multiple_files.py",
"specio/plugins/fsm.py",
"doc/_templates/numpydoc_docstring.py",
"doc/_templates/function.rst",
"doc/api.rst"
]
| [
"doc/_templates/class.rst",
"doc/conf.py",
"specio/plugins/spc.py",
"examples/plot_read_multiple_files.py",
"specio/plugins/fsm.py",
"doc/_templates/numpydoc_docstring.py",
"doc/_templates/function.rst",
"doc/api.rst"
]
|
|
ofek__bit-24 | 37182647309b54934fb49078d4c7a2fb21d7eb47 | 2017-12-18 20:34:04 | 37182647309b54934fb49078d4c7a2fb21d7eb47 | teran-mckinney: @bjarnemagnussen, could you also review this since it impacts your pull request?
Unfortunately, I know you'll have to rebase undo most of this with your change.
Thank you!
teran-mckinney: @ofek, could you consider merging this if it looks good to you? Would also appreciate if you can bump the version as this can prevent accidental Bitcoin loss and the tests still pass.
I will merge it into bitcash shortly.
teran-mckinney: @ofek do you have the time to review this? Thank you!
ofek: @teran-mckinney Sorry! I recently started a new job which has decreased my OSS time. Can you please ping me again in a few days? Also, would you like write access/become a maintainer? Do you have time? I don't want this project to die :smile:
teran-mckinney: I completely understand. Congratulations on the new job!
Wow, most certainly. I'm happy to take on bit, especially for smaller patches like this. I have the time for it and I'm already using bit in a couple projects.
I'll ping you in a few days. Thanks for getting back to me.
teran-mckinney: Hey @ofek. Just pinging you. | diff --git a/bit/format.py b/bit/format.py
index 5ff8679..088d990 100644
--- a/bit/format.py
+++ b/bit/format.py
@@ -35,6 +35,8 @@ def verify_sig(signature, data, public_key):
def address_to_public_key_hash(address):
+ # Raise ValueError if we cannot identify the address.
+ get_version(address)
return b58decode_check(address)[1:]
| Pay to scripthash
I have no experience with paying to scripthashes except when it's worked automagically for me before.
In #12 it looks like the initial workins of P2SH are being added. I'm not sure how straight forward P2SH is.
But... for the time being shouldn't we throw an exception if we try to `send()` to an address that does not begin with 1? Are there any sideeffects to doing so?
I think I just lost a fair bit of coin not reviewing this properly. Hopefully if we add in an exception we can keep others from doing the same. | ofek/bit | diff --git a/tests/samples.py b/tests/samples.py
index 8a9ad70..82b64ae 100644
--- a/tests/samples.py
+++ b/tests/samples.py
@@ -1,8 +1,10 @@
BINARY_ADDRESS = b'\x00\x92F\x1b\xdeb\x83\xb4a\xec\xe7\xdd\xf4\xdb\xf1\xe0\xa4\x8b\xd1\x13\xd8&E\xb4\xbf'
BITCOIN_ADDRESS = '1ELReFsTCUY2mfaDTy32qxYiT49z786eFg'
BITCOIN_ADDRESS_COMPRESSED = '1ExJJsNLQDNVVM1s1sdyt1o5P3GC5r32UG'
+BITCOIN_ADDRESS_PAY2SH = '39SrGQEfFXcTYJhBvjZeQja66Cpz82EEUn'
BITCOIN_ADDRESS_TEST = 'mtrNwJxS1VyHYn3qBY1Qfsm3K3kh1mGRMS'
BITCOIN_ADDRESS_TEST_COMPRESSED = 'muUFbvTKDEokGTVUjScMhw1QF2rtv5hxCz'
+BITCOIN_ADDRESS_TEST_PAY2SH = '2NFKbBHzzh32q5DcZJNgZE9sF7gYmtPbawk'
PRIVATE_KEY_BYTES = b'\xc2\x8a\x9f\x80s\x8fw\rRx\x03\xa5f\xcfo\xc3\xed\xf6\xce\xa5\x86\xc4\xfcJR#\xa5\xady~\x1a\xc3'
PRIVATE_KEY_DER = (b"0\x81\x84\x02\x01\x000\x10\x06\x07*\x86H\xce=\x02\x01\x06"
b"\x05+\x81\x04\x00\n\x04m0k\x02\x01\x01\x04 \xc2\x8a\x9f"
diff --git a/tests/test_format.py b/tests/test_format.py
index b338b03..86dcace 100644
--- a/tests/test_format.py
+++ b/tests/test_format.py
@@ -6,9 +6,11 @@ from bit.format import (
public_key_to_address, verify_sig, wif_checksum_check, wif_to_bytes
)
from .samples import (
- BITCOIN_ADDRESS, BITCOIN_ADDRESS_COMPRESSED, BITCOIN_ADDRESS_TEST_COMPRESSED,
- BITCOIN_ADDRESS_TEST, PRIVATE_KEY_BYTES, PUBKEY_HASH, PUBKEY_HASH_COMPRESSED,
- PUBLIC_KEY_COMPRESSED, PUBLIC_KEY_UNCOMPRESSED, PUBLIC_KEY_X, PUBLIC_KEY_Y,
+ BITCOIN_ADDRESS, BITCOIN_ADDRESS_COMPRESSED, BITCOIN_ADDRESS_PAY2SH,
+ BITCOIN_ADDRESS_TEST_COMPRESSED, BITCOIN_ADDRESS_TEST,
+ BITCOIN_ADDRESS_TEST_PAY2SH, PRIVATE_KEY_BYTES, PUBKEY_HASH,
+ PUBKEY_HASH_COMPRESSED, PUBLIC_KEY_COMPRESSED, PUBLIC_KEY_UNCOMPRESSED,
+ PUBLIC_KEY_X, PUBLIC_KEY_Y,
WALLET_FORMAT_COMPRESSED_MAIN, WALLET_FORMAT_COMPRESSED_TEST,
WALLET_FORMAT_MAIN, WALLET_FORMAT_TEST
)
@@ -41,6 +43,14 @@ class TestGetVersion:
with pytest.raises(ValueError):
get_version('dg2dNAjuezub6iJVPNML5pW5ZQvtA9ocL')
+ def test_mainnet_pay2sh(self):
+ with pytest.raises(ValueError):
+ get_version(BITCOIN_ADDRESS_PAY2SH)
+
+ def test_testnet_pay2sh(self):
+ with pytest.raises(ValueError):
+ get_version(BITCOIN_ADDRESS_TEST_PAY2SH)
+
class TestVerifySig:
def test_valid(self):
@@ -146,3 +156,7 @@ def test_point_to_public_key():
def test_address_to_public_key_hash():
assert address_to_public_key_hash(BITCOIN_ADDRESS) == PUBKEY_HASH
assert address_to_public_key_hash(BITCOIN_ADDRESS_COMPRESSED) == PUBKEY_HASH_COMPRESSED
+ with pytest.raises(ValueError):
+ address_to_public_key_hash(BITCOIN_ADDRESS_PAY2SH)
+ with pytest.raises(ValueError):
+ address_to_public_key_hash(BITCOIN_ADDRESS_TEST_PAY2SH)
diff --git a/tests/test_wallet.py b/tests/test_wallet.py
index 44d882d..6fbbfdd 100644
--- a/tests/test_wallet.py
+++ b/tests/test_wallet.py
@@ -238,6 +238,21 @@ class TestPrivateKeyTestnet:
assert current > initial
+ def test_send_pay2sh(self):
+ """
+ We don't yet support pay2sh, so we must throw an exception if we get one.
+ Otherwise, we could send coins into an unrecoverable blackhole, needlessly.
+ pay2sh addresses begin with 2 in testnet and 3 on mainnet.
+ """
+ if TRAVIS and sys.version_info[:2] != (3, 6):
+ return
+
+ private_key = PrivateKeyTestnet(WALLET_FORMAT_COMPRESSED_TEST)
+ private_key.get_unspents()
+
+ with pytest.raises(ValueError):
+ private_key.send([('2NFKbBHzzh32q5DcZJNgZE9sF7gYmtPbawk', 1, 'jpy')])
+
def test_cold_storage(self):
if TRAVIS and sys.version_info[:2] != (3, 6):
return
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[cli,cache]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"codecov",
"coverage"
],
"pre_install": [
"pip install -U setuptools pip"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | appdirs==1.4.4
argon2-cffi==21.3.0
argon2-cffi-bindings==21.2.0
asn1crypto==1.5.1
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
-e git+https://github.com/ofek/bit.git@37182647309b54934fb49078d4c7a2fb21d7eb47#egg=bit
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
click==8.0.4
codecov==2.1.13
coincurve==16.0.0
coverage==6.2
cryptography==40.0.2
dataclasses==0.8
idna==3.10
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
lmdb==1.6.2
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
privy==6.0.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycparser==2.21
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
requests==2.27.1
tinydb==4.7.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: bit
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- appdirs==1.4.4
- argon2-cffi==21.3.0
- argon2-cffi-bindings==21.2.0
- asn1crypto==1.5.1
- cffi==1.15.1
- charset-normalizer==2.0.12
- click==8.0.4
- codecov==2.1.13
- coincurve==16.0.0
- coverage==6.2
- cryptography==40.0.2
- dataclasses==0.8
- idna==3.10
- lmdb==1.6.2
- pip==21.3.1
- privy==6.0.0
- pycparser==2.21
- requests==2.27.1
- setuptools==59.6.0
- tinydb==4.7.0
- urllib3==1.26.20
prefix: /opt/conda/envs/bit
| [
"tests/test_format.py::test_address_to_public_key_hash"
]
| [
"tests/test_wallet.py::TestPrivateKey::test_get_balance",
"tests/test_wallet.py::TestPrivateKey::test_get_unspent",
"tests/test_wallet.py::TestPrivateKey::test_get_transactions",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_get_balance",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_get_unspent",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_get_transactions",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_send",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_send_pay2sh",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_cold_storage"
]
| [
"tests/test_format.py::TestGetVersion::test_mainnet",
"tests/test_format.py::TestGetVersion::test_testnet",
"tests/test_format.py::TestGetVersion::test_invalid",
"tests/test_format.py::TestGetVersion::test_mainnet_pay2sh",
"tests/test_format.py::TestGetVersion::test_testnet_pay2sh",
"tests/test_format.py::TestVerifySig::test_valid",
"tests/test_format.py::TestVerifySig::test_invalid",
"tests/test_format.py::TestBytesToWIF::test_mainnet",
"tests/test_format.py::TestBytesToWIF::test_testnet",
"tests/test_format.py::TestBytesToWIF::test_compressed",
"tests/test_format.py::TestBytesToWIF::test_compressed_testnet",
"tests/test_format.py::TestWIFToBytes::test_mainnet",
"tests/test_format.py::TestWIFToBytes::test_testnet",
"tests/test_format.py::TestWIFToBytes::test_compressed",
"tests/test_format.py::TestWIFToBytes::test_invalid_network",
"tests/test_format.py::TestWIFChecksumCheck::test_wif_checksum_check_main_success",
"tests/test_format.py::TestWIFChecksumCheck::test_wif_checksum_check_test_success",
"tests/test_format.py::TestWIFChecksumCheck::test_wif_checksum_check_compressed_success",
"tests/test_format.py::TestWIFChecksumCheck::test_wif_checksum_check_decode_failure",
"tests/test_format.py::TestWIFChecksumCheck::test_wif_checksum_check_other_failure",
"tests/test_format.py::TestPublicKeyToCoords::test_public_key_to_coords_compressed",
"tests/test_format.py::TestPublicKeyToCoords::test_public_key_to_coords_uncompressed",
"tests/test_format.py::TestPublicKeyToCoords::test_public_key_to_coords_incorrect_length",
"tests/test_format.py::TestPublicKeyToAddress::test_public_key_to_address_compressed",
"tests/test_format.py::TestPublicKeyToAddress::test_public_key_to_address_uncompressed",
"tests/test_format.py::TestPublicKeyToAddress::test_public_key_to_address_incorrect_length",
"tests/test_format.py::TestPublicKeyToAddress::test_public_key_to_address_test_compressed",
"tests/test_format.py::TestPublicKeyToAddress::test_public_key_to_address_test_uncompressed",
"tests/test_format.py::TestCoordsToPublicKey::test_coords_to_public_key_compressed",
"tests/test_format.py::TestCoordsToPublicKey::test_coords_to_public_key_uncompressed",
"tests/test_format.py::test_point_to_public_key",
"tests/test_wallet.py::TestWIFToKey::test_compressed_main",
"tests/test_wallet.py::TestWIFToKey::test_uncompressed_main",
"tests/test_wallet.py::TestWIFToKey::test_compressed_test",
"tests/test_wallet.py::TestWIFToKey::test_uncompressed_test",
"tests/test_wallet.py::TestBaseKey::test_init_default",
"tests/test_wallet.py::TestBaseKey::test_init_from_key",
"tests/test_wallet.py::TestBaseKey::test_init_wif_error",
"tests/test_wallet.py::TestBaseKey::test_public_key_compressed",
"tests/test_wallet.py::TestBaseKey::test_public_key_uncompressed",
"tests/test_wallet.py::TestBaseKey::test_public_point",
"tests/test_wallet.py::TestBaseKey::test_sign",
"tests/test_wallet.py::TestBaseKey::test_verify_success",
"tests/test_wallet.py::TestBaseKey::test_verify_failure",
"tests/test_wallet.py::TestBaseKey::test_to_hex",
"tests/test_wallet.py::TestBaseKey::test_to_bytes",
"tests/test_wallet.py::TestBaseKey::test_to_der",
"tests/test_wallet.py::TestBaseKey::test_to_pem",
"tests/test_wallet.py::TestBaseKey::test_to_int",
"tests/test_wallet.py::TestBaseKey::test_is_compressed",
"tests/test_wallet.py::TestBaseKey::test_equal",
"tests/test_wallet.py::TestPrivateKey::test_alias",
"tests/test_wallet.py::TestPrivateKey::test_init_default",
"tests/test_wallet.py::TestPrivateKey::test_address",
"tests/test_wallet.py::TestPrivateKey::test_to_wif",
"tests/test_wallet.py::TestPrivateKey::test_from_hex",
"tests/test_wallet.py::TestPrivateKey::test_from_der",
"tests/test_wallet.py::TestPrivateKey::test_from_pem",
"tests/test_wallet.py::TestPrivateKey::test_from_int",
"tests/test_wallet.py::TestPrivateKey::test_repr",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_init_default",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_address",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_to_wif",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_from_hex",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_from_der",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_from_pem",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_from_int",
"tests/test_wallet.py::TestPrivateKeyTestnet::test_repr"
]
| []
| MIT License | 1,962 | [
"bit/format.py"
]
| [
"bit/format.py"
]
|
desihub__desiutil-93 | 229031acffe2b18186e4928a8a9832131849dc6d | 2017-12-18 21:13:35 | e720a1e3b4e468af23e59ab618a3b2aaef383ae0 | diff --git a/doc/changes.rst b/doc/changes.rst
index 362be2b..6cb18fc 100644
--- a/doc/changes.rst
+++ b/doc/changes.rst
@@ -7,9 +7,13 @@ Change Log
* Enhance :mod:`desiutil.log` with a context manager (PR `#92`_), and
change the way the log level is set.
+* Avoid logging interference with :func:`desiutil.log.get_logger` is called
+ with different log levels (PR `#93`_).
* Use :mod:`unittest.mock` to increase test coverage.
.. _`#92`: https://github.com/desihub/desiutil/pull/92
+.. _`#93`: https://github.com/desihub/desiutil/pull/93
+
1.9.8 (2017-11-09)
------------------
diff --git a/py/desiutil/log.py b/py/desiutil/log.py
index 6efe7c6..6e9fedb 100644
--- a/py/desiutil/log.py
+++ b/py/desiutil/log.py
@@ -27,6 +27,11 @@ Examples
Simplest possible use:
+>>> from desiutil.log import log
+>>> log.info('This is some information.')
+
+This is exactly equivalent to:
+
>>> from desiutil.log import get_logger
>>> log = get_logger()
>>> log.info('This is some information.')
@@ -51,12 +56,31 @@ Create the logger with a different log level:
"""
from __future__ import absolute_import, division, print_function
+import os
+import sys
import logging
-from os import environ
-from sys import stdout
from warnings import warn
-desi_logger = None
+
+_desiutil_log_root = dict()
+_good_levels = {'DEBUG': logging.DEBUG,
+ 'INFO': logging.INFO,
+ 'WARNING': logging.WARNING,
+ 'ERROR': logging.ERROR,
+ 'CRITICAL': logging.CRITICAL,
+ logging.DEBUG: logging.DEBUG,
+ logging.INFO: logging.INFO,
+ logging.WARNING: logging.WARNING,
+ logging.ERROR: logging.ERROR,
+ logging.CRITICAL: logging.CRITICAL,
+ }
+_level_children = {logging.DEBUG: 'debug',
+ logging.INFO: 'info',
+ logging.WARNING: 'warning',
+ logging.ERROR: 'error',
+ logging.CRITICAL: 'critical',
+ }
+
# Just for convenience to avoid importing logging, we duplicate the logging levels
DEBUG = logging.DEBUG # Detailed information, typically of interest only when diagnosing problems.
@@ -112,17 +136,50 @@ class DesiLogContext(object):
# self.handler.close()
+def _configure_root_logger(timestamp=False, delimiter=':'):
+ """Configure a root logger.
+
+ Parameters
+ ----------
+ timestamp : :class:`bool`, optional
+ If ``True``, add a timestamp to the log message.
+ delimiter : :class:`str`, optional
+ Use `delimiter` to separate fields in the log message (default ``:``).
+
+ Returns
+ -------
+ :class:`str`
+ The name of the root logger, suitable for input to :func:`logging.getLogger`.
+ """
+ root_name = "desiutil.log.dlm" + ''.join(map(str, map(ord, delimiter)))
+ if timestamp:
+ root_name += 'timestamp'
+ if root_name not in _desiutil_log_root:
+ ch = logging.StreamHandler(sys.stdout)
+ fmtfields = ['%(levelname)s', '%(filename)s', '%(lineno)s', '%(funcName)s']
+ if timestamp:
+ fmtfields.append('%(asctime)s')
+ fmtfields.append(' %(message)s')
+ formatter = logging.Formatter(delimiter.join(fmtfields),
+ datefmt='%Y-%m-%dT%H:%M:%S')
+ ch.setFormatter(formatter)
+ _desiutil_log_root[root_name] = logging.getLogger(root_name)
+ _desiutil_log_root[root_name].addHandler(ch)
+ _desiutil_log_root[root_name].setLevel(logging.INFO)
+ return root_name
+
+
def get_logger(level=None, timestamp=False, delimiter=':'):
"""Returns a default DESI logger.
Parameters
----------
level : :class:`int` or :class:`str`, optional
- Debugging level.
+ Set the logging level (default ``INFO``).
timestamp : :class:`bool`, optional
- If set, include a time stamp in the log message.
+ If ``True``, add a timestamp to the log message.
delimiter : :class:`str`, optional
- Use this string to separate fields in the log messages, default ':'.
+ Use `delimiter` to separate fields in the log messages (default ``:``).
Returns
-------
@@ -140,59 +197,27 @@ def get_logger(level=None, timestamp=False, delimiter=':'):
* If :envvar:`DESI_LOGLEVEL` is not set and `level` is ``None``,
the default level is set to INFO.
"""
- global desi_logger
- good_levels = {"DEBUG": DEBUG,
- "INFO": INFO,
- "WARNING": WARNING,
- "ERROR": ERROR,
- "CRITICAL": CRITICAL,
- DEBUG: DEBUG,
- INFO: INFO,
- WARNING: WARNING,
- ERROR: ERROR,
- CRITICAL: CRITICAL}
+ root_name = _configure_root_logger(timestamp=timestamp, delimiter=delimiter)
if level is None:
try:
- level = environ["DESI_LOGLEVEL"].upper()
+ ul = os.environ["DESI_LOGLEVEL"].upper()
except KeyError:
- level = INFO
+ ul = logging.INFO
else:
try:
- level = level.upper()
+ ul = level.upper()
except AttributeError:
# level should be an integer in this case.
- pass
- if level not in good_levels:
- message = ("Ignore level='{0}' " +
- "(only recognize {1}).").format(str(level),
- ', '.join(map(str, good_levels.keys())))
+ ul = level
+ try:
+ gl = _good_levels[ul]
+ except KeyError:
+ message = "Invalid level='{0}' ignored. Setting INFO.".format(str(ul))
warn(message, DesiLogWarning)
- level = INFO
-
- if desi_logger is not None:
- if level is not None:
- desi_logger.setLevel(level)
- return desi_logger
-
- desi_logger = logging.getLogger("DESI")
-
- desi_logger.setLevel(level)
-
- while len(desi_logger.handlers) > 0:
- h = desi_logger.handlers[0]
- desi_logger.removeHandler(h)
-
- ch = logging.StreamHandler(stdout)
-
- fmtfields = ['%(levelname)s', '%(filename)s', '%(lineno)s', '%(funcName)s']
- if timestamp:
- fmtfields.append('%(asctime)s')
- fmt = delimiter.join(fmtfields)
-
- formatter = logging.Formatter(fmt + ': %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
-
- ch.setFormatter(formatter)
+ gl = logging.INFO
+ log = logging.getLogger(root_name + '.' + _level_children[gl])
+ log.setLevel(gl)
+ return log
- desi_logger.addHandler(ch)
- return desi_logger
+log = get_logger()
| Questions/suggestions for logging module
I have some questions/suggestions about `log.get_logger()` after adapting `desisurvey` and `surveysim` to use it.
Environment variables usually set a default that can be overridden via the API, but `$DESI_LOGLEVEL` overrides the API `level` parameter. I found this confusing and wonder if we should change this.
A command-line script should be able to set the log-level (e.g., via cmd-line options) and then have all library code respect this level. I believe the way this is supposed to work is:
```
# script.py
log = get_logger(WARNING)
# module.py
log = get_logger()
log.info('...')
```
I found two issues with how this works in practice:
1. Calling `get_logger()` always sets the log-level to `INFO`, even after a previous call where a different level was specified.
2. Nothing prevents library code from calling `get_logger(DEBUG)` and changing the log level specified by the top-level script. Library code should probably always call `get_logger()` but we could enforce this by raising an exception on attempts to change an already specified level. Even better, I think, would be to provide separate `get_logger()` (with no level arg) and `set_log_level(level)` methods.
Finally, it could be useful to support a hierarchy of DESI loggers with different levels. For example, I might want to enable DEBUG messages from the arc calibration only, without being flooded with other DEBUG messages. In practice, this could be implemented by passing an optional component arg to `get_logger()` that uses dot notation to express hierarchy. The level of a child component would then default to the lowest level of any parent, and setting a parent level enforces a maximum level for all children. | desihub/desiutil | diff --git a/py/desiutil/test/test_census.py b/py/desiutil/test/test_census.py
index bc5053c..26831ad 100644
--- a/py/desiutil/test/test_census.py
+++ b/py/desiutil/test/test_census.py
@@ -64,21 +64,23 @@ class TestCensus(unittest.TestCase):
"""Test error-handling function for os.walk().
"""
from ..census import walk_error
- with patch('desiutil.log.desi_logger') as mock:
+ with patch('desiutil.log.get_logger') as mock_get_logger:
+ mock = Mock()
+ mock_get_logger.return_value = mock
try:
raise OSError(2, 'File not found', 'foo.txt')
except OSError as e:
walk_error(e)
- calls = [call.setLevel(20),
- call.error("[Errno 2] File not found: 'foo.txt'")]
+ calls = [call.error("[Errno 2] File not found: 'foo.txt'")]
self.assertListEqual(mock.mock_calls, calls)
- with patch('desiutil.log.desi_logger') as mock:
+ with patch('desiutil.log.get_logger') as mock_get_logger:
+ mock = Mock()
+ mock_get_logger.return_value = mock
try:
raise OSError(2, 'File not found', 'foo.txt', None, 'bar.txt')
except OSError as e:
walk_error(e)
- calls = [call.setLevel(20),
- call.error("[Errno 2] File not found: 'foo.txt' -> " +
+ calls = [call.error("[Errno 2] File not found: 'foo.txt' -> " +
"'bar.txt'")]
self.assertListEqual(mock.mock_calls, calls)
@@ -110,12 +112,13 @@ class TestCensus(unittest.TestCase):
#
# Simulate a simple file.
#
- calls = [call.setLevel(20),
- call.debug("os.stat('{0}')".format(fd)),
+ calls = [call.debug("os.stat('{0}')".format(fd)),
call.warning("{0} does not have correct group id!".format(fd))]
- with patch('desiutil.log.desi_logger') as mock_log:
+ mock_log = Mock()
+ with patch('desiutil.log.get_logger') as mock_get_logger:
with patch.dict('sys.modules', {'os': mock_os,
'os.path': mock_os.path}):
+ mock_get_logger.return_value = mock_log
mock_os.environ = dict()
mock_os.stat.return_value = s
mock_os.path.islink.return_value = False
@@ -128,15 +131,16 @@ class TestCensus(unittest.TestCase):
#
# Simulate an internal link.
#
- calls = [call.setLevel(20),
- call.debug("os.stat('{0}')".format(fd)),
+ calls = [call.debug("os.stat('{0}')".format(fd)),
call.warning("{0} does not have correct group id!".format(fd)),
call.debug("os.lstat('{0}')".format(fd)),
call.warning("{0} does not have correct group id!".format(fd)),
call.debug("Found internal link {0} -> {0}.link.".format(fd))]
- with patch('desiutil.log.desi_logger') as mock_log:
+ mock_log = Mock()
+ with patch('desiutil.log.get_logger') as mock_get_logger:
with patch.dict('sys.modules', {'os': mock_os,
'os.path': mock_os.path}):
+ mock_get_logger.return_value = mock_log
mock_os.environ = dict()
mock_os.stat.return_value = s
mock_os.lstat.return_value = s
@@ -154,15 +158,16 @@ class TestCensus(unittest.TestCase):
#
# Simulate an external link.
#
- calls = [call.setLevel(20),
- call.debug("os.stat('{0}')".format(fd)),
+ calls = [call.debug("os.stat('{0}')".format(fd)),
call.warning("{0} does not have correct group id!".format(fd)),
call.debug("os.lstat('{0}')".format(fd)),
call.warning("{0} does not have correct group id!".format(fd)),
call.debug("Found external link {0} -> {1}.".format(fd, extlink))]
- with patch('desiutil.log.desi_logger') as mock_log:
+ mock_log = Mock()
+ with patch('desiutil.log.get_logger') as mock_get_logger:
with patch.dict('sys.modules', {'os': mock_os,
'os.path': mock_os.path}):
+ mock_get_logger.return_value = mock_log
mock_os.environ = dict()
mock_os.stat.return_value = s
mock_os.lstat.return_value = s
diff --git a/py/desiutil/test/test_depend.py b/py/desiutil/test/test_depend.py
index 41e3be0..0949643 100644
--- a/py/desiutil/test/test_depend.py
+++ b/py/desiutil/test/test_depend.py
@@ -127,6 +127,23 @@ class TestDepend(unittest.TestCase):
for name, version in iterdep(hdr):
self.assertEqual(version, getdep(hdr, name))
+ #
+ # Test dependency index starting from one.
+ #
+ hdr = dict()
+ for j in range(1, 20):
+ hdr["DEPNAM{0:02d}".format(i)] = "test{0:03d}".format(i)
+ hdr["DEPVER{0:02d}".format(i)] = "v{0:d}.0.1".format(i)
+ y = Dependencies(hdr)
+ for name in y:
+ self.assertEqual(y[name], getdep(hdr, name))
+
+ for name, version in y.items():
+ self.assertEqual(version, getdep(hdr, name))
+
+ for name, version in iterdep(hdr):
+ self.assertEqual(version, getdep(hdr, name))
+
def test_class(self):
"""Test the Dependencies object.
@@ -166,9 +183,11 @@ class TestDepend(unittest.TestCase):
self.assertFalse(hasdep(hdr, 'quatlarm'))
# no .__version__
- add_dependencies(hdr, ['os.path', 'sys'])
+ add_dependencies(hdr, ['os.path', 'unittest', 'sys'])
self.assertTrue(hasdep(hdr, 'os.path'))
self.assertTrue(getdep(hdr, 'os.path').startswith('unknown'))
+ self.assertTrue(hasdep(hdr, 'unittest'))
+ self.assertTrue(getdep(hdr, 'unittest').startswith('unknown'))
self.assertTrue(hasdep(hdr, 'sys'))
self.assertTrue(getdep(hdr, 'sys').startswith('unknown'))
diff --git a/py/desiutil/test/test_install.py b/py/desiutil/test/test_install.py
index a6307f0..2e7aabf 100644
--- a/py/desiutil/test/test_install.py
+++ b/py/desiutil/test/test_install.py
@@ -11,6 +11,7 @@ from os.path import dirname, isdir, join
from shutil import rmtree
from argparse import Namespace
from tempfile import mkdtemp
+from logging import getLogger
from pkg_resources import resource_filename
from ..log import DEBUG
from ..install import DesiInstall, DesiInstallException, dependencies
@@ -41,13 +42,14 @@ class TestInstall(unittest.TestCase):
# Create a "fresh" DesiInstall object for every test.
self.desiInstall = DesiInstall()
# Replace the log handler with something that writes to memory.
- while len(self.desiInstall.log.handlers) > 0:
- h = self.desiInstall.log.handlers[0]
+ root_logger = getLogger(self.desiInstall.log.name.rsplit('.', 1)[0])
+ while len(root_logger.handlers) > 0:
+ h = root_logger.handlers[0]
fmt = h.formatter
- self.desiInstall.log.removeHandler(h)
+ root_logger.removeHandler(h)
mh = TestHandler()
mh.setFormatter(fmt)
- self.desiInstall.log.addHandler(mh)
+ root_logger.addHandler(mh)
self.desiInstall.log.setLevel(DEBUG)
# Create a temporary directory.
self.data_dir = mkdtemp()
@@ -58,7 +60,7 @@ class TestInstall(unittest.TestCase):
def assertLog(self, order=-1, message=''):
"""Examine the log messages.
"""
- handler = self.desiInstall.log.handlers[0]
+ handler = getLogger(self.desiInstall.log.name.rsplit('.', 1)[0]).handlers[0]
record = handler.buffer[order]
self.assertEqual(record.getMessage(), message)
diff --git a/py/desiutil/test/test_log.py b/py/desiutil/test/test_log.py
index dea3b56..d2a71a1 100644
--- a/py/desiutil/test/test_log.py
+++ b/py/desiutil/test/test_log.py
@@ -6,7 +6,7 @@ from __future__ import absolute_import, print_function
import os
import re
import unittest
-from logging import NullHandler
+from logging import getLogger, NullHandler
from logging.handlers import MemoryHandler
from warnings import catch_warnings, simplefilter
@@ -35,7 +35,7 @@ class TestHandler(MemoryHandler):
class TestLog(unittest.TestCase):
- """Test desispec.log
+ """Test desiutil.log
"""
@classmethod
@@ -55,7 +55,7 @@ class TestLog(unittest.TestCase):
(:|\s--\s) # delimiter
(run_logs|test_log_context) # function
((:|\s--\s)\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}|) # optional timetamp
- :\s # start of message
+ (:|\s--\s)\s # start of message
""", re.VERBOSE)
@classmethod
@@ -76,7 +76,8 @@ class TestLog(unittest.TestCase):
def assertLog(self, logger, order=-1, message=''):
"""Examine the log messages.
"""
- handler = logger.handlers[0]
+ root_logger = getLogger(logger.name.rsplit('.', 1)[0])
+ handler = root_logger.handlers[0]
record = handler.buffer[order]
self.assertEqual(record.getMessage(), message)
formatted = handler.format(record)
@@ -88,15 +89,14 @@ class TestLog(unittest.TestCase):
"""Get the actual logging object, but swap out its default handler.
"""
logger = dul.get_logger(level, **kwargs)
- # actual_level = logger.level
- while len(logger.handlers) > 0:
- h = logger.handlers[0]
+ root_logger = getLogger(logger.name.rsplit('.', 1)[0])
+ while len(root_logger.handlers) > 0:
+ h = root_logger.handlers[0]
fmt = h.formatter
- logger.removeHandler(h)
+ root_logger.removeHandler(h)
mh = TestHandler()
mh.setFormatter(fmt)
- logger.addHandler(mh)
- # logger.setLevel(actual_level)
+ root_logger.addHandler(mh)
return logger
def run_logs(self, **kwargs):
@@ -126,7 +126,7 @@ class TestLog(unittest.TestCase):
self.assertTrue(issubclass(w[-1].category,
UserWarning))
# print(w[-1].message)
- self.assertTrue("Ignore level='FOOBAR'" in str(w[-1].message))
+ self.assertIn("Invalid level='FOOBAR' ignored.", str(w[-1].message))
else:
self.assertEqual(logger.level, dul.WARNING)
logger.debug("This is a debugging message.")
@@ -171,8 +171,7 @@ class TestLog(unittest.TestCase):
self.assertLog(logger, 0, "This is a warning message.")
self.assertLog(logger, 1, "This is an error message.")
self.assertLog(logger, 2, "This is a critical error message.")
- logger.handlers[0].flush()
- dul.desi_logger = None
+ getLogger(logger.name.rsplit('.', 1)[0]).handlers[0].flush()
def test_log(self):
"""Test basic logging functionality.
@@ -186,6 +185,12 @@ class TestLog(unittest.TestCase):
log2 = dul.get_logger()
self.assertIs(log1, log2)
+ def test_log_singleton(self):
+ """Test the default pseudo-singleton created by the module.
+ """
+ log2 = dul.get_logger()
+ self.assertIs(log2, dul.log)
+
@unittest.skipIf(skipMock, "Skipping test that requires unittest.mock.")
def test_log_with_desi_loglevel(self):
"""Test basic logging functionality with DESI_LOGLEVEL set.
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 2
} | 1.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astropy==6.0.1
astropy-iers-data==0.2025.3.31.0.36.18
certifi==2025.1.31
charset-normalizer==3.4.1
contourpy==1.3.0
cycler==0.12.1
-e git+https://github.com/desihub/desiutil.git@229031acffe2b18186e4928a8a9832131849dc6d#egg=desiutil
exceptiongroup==1.2.2
fonttools==4.56.0
healpy==1.17.3
idna==3.10
importlib_resources==6.5.2
iniconfig==2.1.0
kiwisolver==1.4.7
matplotlib==3.9.4
numpy==1.26.4
packaging==24.2
pillow==11.1.0
pluggy==1.5.0
pyerfa==2.0.1.5
pyparsing==3.2.3
pytest==8.3.5
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
six==1.17.0
tomli==2.2.1
urllib3==2.3.0
zipp==3.21.0
| name: desiutil
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astropy==6.0.1
- astropy-iers-data==0.2025.3.31.0.36.18
- certifi==2025.1.31
- charset-normalizer==3.4.1
- contourpy==1.3.0
- cycler==0.12.1
- exceptiongroup==1.2.2
- fonttools==4.56.0
- healpy==1.17.3
- idna==3.10
- importlib-resources==6.5.2
- iniconfig==2.1.0
- kiwisolver==1.4.7
- matplotlib==3.9.4
- numpy==1.26.4
- packaging==24.2
- pillow==11.1.0
- pluggy==1.5.0
- pyerfa==2.0.1.5
- pyparsing==3.2.3
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- requests==2.32.3
- six==1.17.0
- tomli==2.2.1
- urllib3==2.3.0
- zipp==3.21.0
prefix: /opt/conda/envs/desiutil
| [
"py/desiutil/test/test_log.py::TestLog::test_log_singleton",
"py/desiutil/test/test_log.py::TestLog::test_log_with_desi_loglevel"
]
| [
"py/desiutil/test/test_install.py::TestInstall::test_get_options",
"py/desiutil/test/test_install.py::TestInstall::test_verify_url"
]
| [
"py/desiutil/test/test_census.py::TestCensus::test_ScannedFile",
"py/desiutil/test/test_census.py::TestCensus::test_get_options",
"py/desiutil/test/test_census.py::TestCensus::test_in_path",
"py/desiutil/test/test_census.py::TestCensus::test_output_csv",
"py/desiutil/test/test_census.py::TestCensus::test_scan_file",
"py/desiutil/test/test_census.py::TestCensus::test_walk_error",
"py/desiutil/test/test_census.py::TestCensus::test_year",
"py/desiutil/test/test_census.py::test_suite",
"py/desiutil/test/test_depend.py::TestDepend::test_add_dependencies",
"py/desiutil/test/test_depend.py::TestDepend::test_class",
"py/desiutil/test/test_depend.py::TestDepend::test_fits_header",
"py/desiutil/test/test_depend.py::TestDepend::test_getdep",
"py/desiutil/test/test_depend.py::TestDepend::test_hasdep",
"py/desiutil/test/test_depend.py::TestDepend::test_iter",
"py/desiutil/test/test_depend.py::TestDepend::test_setdep",
"py/desiutil/test/test_depend.py::TestDepend::test_update",
"py/desiutil/test/test_depend.py::test_suite",
"py/desiutil/test/test_install.py::TestInstall::test_anaconda_version",
"py/desiutil/test/test_install.py::TestInstall::test_build_type",
"py/desiutil/test/test_install.py::TestInstall::test_cleanup",
"py/desiutil/test/test_install.py::TestInstall::test_default_nersc_dir",
"py/desiutil/test/test_install.py::TestInstall::test_dependencies",
"py/desiutil/test/test_install.py::TestInstall::test_get_product_version",
"py/desiutil/test/test_install.py::TestInstall::test_identify_branch",
"py/desiutil/test/test_install.py::TestInstall::test_knl",
"py/desiutil/test/test_install.py::TestInstall::test_nersc_module_dir",
"py/desiutil/test/test_install.py::TestInstall::test_sanity_check",
"py/desiutil/test/test_install.py::TestInstall::test_set_install_dir",
"py/desiutil/test/test_install.py::test_suite",
"py/desiutil/test/test_log.py::TestLog::test_log",
"py/desiutil/test/test_log.py::TestLog::test_log_context",
"py/desiutil/test/test_log.py::TestLog::test_log_multiple",
"py/desiutil/test/test_log.py::TestLog::test_log_with_delimiter",
"py/desiutil/test/test_log.py::TestLog::test_log_with_timestamp",
"py/desiutil/test/test_log.py::test_suite"
]
| []
| BSD 3-Clause "New" or "Revised" License | 1,963 | [
"doc/changes.rst",
"py/desiutil/log.py"
]
| [
"doc/changes.rst",
"py/desiutil/log.py"
]
|
|
falconry__falcon-1174 | 919fd3f5a3129d04f1c7d23f5eff440ec4598e35 | 2017-12-18 22:06:28 | 919fd3f5a3129d04f1c7d23f5eff440ec4598e35 | codecov[bot]: # [Codecov](https://codecov.io/gh/falconry/falcon/pull/1174?src=pr&el=h1) Report
> Merging [#1174](https://codecov.io/gh/falconry/falcon/pull/1174?src=pr&el=desc) into [master](https://codecov.io/gh/falconry/falcon/commit/919fd3f5a3129d04f1c7d23f5eff440ec4598e35?src=pr&el=desc) will **not change** coverage.
> The diff coverage is `100%`.
[](https://codecov.io/gh/falconry/falcon/pull/1174?src=pr&el=tree)
```diff
@@ Coverage Diff @@
## master #1174 +/- ##
======================================
Coverage 100% 100%
======================================
Files 37 37
Lines 2419 2437 +18
Branches 350 354 +4
======================================
+ Hits 2419 2437 +18
```
| [Impacted Files](https://codecov.io/gh/falconry/falcon/pull/1174?src=pr&el=tree) | Coverage Δ | |
|---|---|---|
| [falcon/request.py](https://codecov.io/gh/falconry/falcon/pull/1174/diff?src=pr&el=tree#diff-ZmFsY29uL3JlcXVlc3QucHk=) | `100% <100%> (ø)` | :arrow_up: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/falconry/falcon/pull/1174?src=pr&el=continue).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/falconry/falcon/pull/1174?src=pr&el=footer). Last update [919fd3f...a5ceeec](https://codecov.io/gh/falconry/falcon/pull/1174?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
| diff --git a/falcon/request.py b/falcon/request.py
index b1f92e1..5bc5d41 100644
--- a/falcon/request.py
+++ b/falcon/request.py
@@ -26,7 +26,8 @@ except AttributeError:
import io
NativeStream = io.BufferedReader
-from wsgiref.validate import InputWrapper # NOQA: I202
+from uuid import UUID # NOQA: I202
+from wsgiref.validate import InputWrapper
import mimeparse
import six
@@ -1273,6 +1274,70 @@ class Request(object):
raise errors.HTTPMissingParam(name)
+ def get_param_as_uuid(self, name, required=False, store=None):
+ """Return the value of a query string parameter as an UUID.
+
+ The value to convert must conform to the standard UUID string
+ representation per RFC 4122. For example, the following
+ strings are all valid::
+
+ # Lowercase
+ '64be949b-3433-4d36-a4a8-9f19d352fee8'
+
+ # Uppercase
+ 'BE71ECAA-F719-4D42-87FD-32613C2EEB60'
+
+ # Mixed
+ '81c8155C-D6de-443B-9495-39Fa8FB239b5'
+
+ Args:
+ name (str): Parameter name, case-sensitive (e.g., 'id').
+
+ Keyword Args:
+ required (bool): Set to ``True`` to raise
+ ``HTTPBadRequest`` instead of returning ``None`` when the
+ parameter is not found or is not a UUID (default
+ ``False``).
+ store (dict): A ``dict``-like object in which to place
+ the value of the param, but only if the param is found
+ (default ``None``).
+
+ Returns:
+ UUID: The value of the param if it is found and can be converted to
+ a ``UUID``. If the param is not found, returns ``None``, unless
+ `required` is ``True``.
+
+ Raises
+ HTTPBadRequest: The param was not found in the request, even though
+ it was required to be there, or it was found but could not
+ be converted to a ``UUID``.
+ """
+
+ params = self._params
+
+ # PERF: Use if..in since it is a good all-around performer; we don't
+ # know how likely params are to be specified by clients.
+ if name in params:
+ val = params[name]
+ if isinstance(val, list):
+ val = val[-1]
+
+ try:
+ val = UUID(val)
+ except ValueError:
+ msg = 'The value must be a UUID string.'
+ raise errors.HTTPInvalidParam(msg, name)
+
+ if store is not None:
+ store[name] = val
+
+ return val
+
+ if not required:
+ return None
+
+ raise errors.HTTPMissingParam(name)
+
def get_param_as_bool(self, name, required=False, store=None,
blank_as_true=False):
"""Return the value of a query string parameter as a boolean
| Add a get_param_as_uuid method to `falcon.Request`
It would be great to be able to get a parameter and to cast it to an UUID, raising a HTTPBadRequest otherwise.
| falconry/falcon | diff --git a/tests/test_query_params.py b/tests/test_query_params.py
index e90567d..3485773 100644
--- a/tests/test_query_params.py
+++ b/tests/test_query_params.py
@@ -1,4 +1,5 @@
from datetime import date, datetime
+from uuid import UUID
try:
import ujson as json
@@ -72,7 +73,7 @@ class TestQueryParams(object):
def test_none(self, simulate_request, client, resource):
query_string = ''
- client.app.add_route('/', resource)
+ client.app.add_route('/', resource) # TODO: DRY up this setup logic
simulate_request(client=client, path='/', query_string=query_string)
req = resource.captured_req
@@ -213,6 +214,7 @@ class TestQueryParams(object):
@pytest.mark.parametrize('method_name', [
'get_param',
'get_param_as_int',
+ 'get_param_as_uuid',
'get_param_as_bool',
'get_param_as_list',
])
@@ -307,6 +309,33 @@ class TestQueryParams(object):
with pytest.raises(falcon.HTTPBadRequest):
req.get_param_as_int('pos', min=0, max=10)
+ def test_uuid(self, simulate_request, client, resource):
+ client.app.add_route('/', resource)
+ query_string = ('marker1=8d76b7b3-d0dd-46ca-ad6e-3989dcd66959&'
+ 'marker2=64be949b-3433-4d36-a4a8-9f19d352fee8&'
+ 'marker2=8D76B7B3-d0dd-46ca-ad6e-3989DCD66959&'
+ 'short=4be949b-3433-4d36-a4a8-9f19d352fee8')
+ simulate_request(client=client, path='/', query_string=query_string)
+
+ req = resource.captured_req
+
+ expected_uuid = UUID('8d76b7b3-d0dd-46ca-ad6e-3989dcd66959')
+ assert req.get_param_as_uuid('marker1') == expected_uuid
+ assert req.get_param_as_uuid('marker2') == expected_uuid
+ assert req.get_param_as_uuid('marker3') is None
+ assert req.get_param_as_uuid('marker3', required=False) is None
+
+ with pytest.raises(falcon.HTTPBadRequest):
+ req.get_param_as_uuid('short')
+
+ store = {}
+ with pytest.raises(falcon.HTTPBadRequest):
+ req.get_param_as_uuid('marker3', required=True, store=store)
+
+ assert not store
+ assert req.get_param_as_uuid('marker1', store=store)
+ assert store['marker1'] == expected_uuid
+
def test_boolean(self, simulate_request, client, resource):
client.app.add_route('/', resource)
query_string = ('echo=true&doit=false&bogus=bar&bogus2=foo&'
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements/tests"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
execnet==1.9.0
-e git+https://github.com/falconry/falcon.git@919fd3f5a3129d04f1c7d23f5eff440ec4598e35#egg=falcon
fixtures==4.0.1
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
jsonschema==3.2.0
msgpack-python==0.5.6
packaging==21.3
pbr==6.1.1
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-mimeparse==1.6.0
PyYAML==3.11
requests==2.27.1
six==1.17.0
testtools==2.6.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: falcon
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- coverage==6.2
- execnet==1.9.0
- fixtures==4.0.1
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jsonschema==3.2.0
- msgpack-python==0.5.6
- packaging==21.3
- pbr==6.1.1
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-mimeparse==1.6.0
- pyyaml==3.11
- requests==2.27.1
- six==1.17.0
- testtools==2.6.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/falcon
| [
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_uuid-simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_uuid-simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_uuid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_uuid[simulate_request_post_query_params]",
"tests/test_uri_converters.py::test_uuid_converter[5e88c39b-a657-4229-8092-e5adfcdde0c3-expected0]",
"tests/test_uri_converters.py::test_uuid_converter[5e88c39ba657-4229-8092-e5adfcdde0c3-expected1]",
"tests/test_uri_converters.py::test_uuid_converter[5e88c39ba65742298092e5adfcdde0c3-expected2]",
"tests/test_uri_converters.py::test_uuid_converter[urn:uuid:5e88c39b-a657-4229-8092-e5adfcdde0c3-expected3]",
"tests/test_uri_converters.py::test_uuid_converter[urn:uuid:5e88c39ba65742298092e5adfcdde0c3-expected4]",
"tests/test_uri_converters.py::test_uuid_converter[5e88c39b-a657-4229-8092-e5adfcdde0c3",
"tests/test_uri_converters.py::test_uuid_converter[5e88c39b-a657-4229-8092-e5adfcdde0c-None]",
"tests/test_uri_converters.py::test_uuid_converter[5-None]",
"tests/test_uri_converters.py::test_uuid_converter[5e88c39b-a657-4229-8092-e5adfcdde0cg-None]",
"tests/test_uri_converters.py::test_uuid_converter[5e88c39b_a657_4229_8092_e5adfcdde0c3-None]",
"tests/test_uri_templates.py::test_uuid_converter[/widgets/{widget_id:uuid}-/widgets/13df0b92-5c92-4044-b1b1-7097d50e90bf-expected0]",
"tests/test_uri_templates.py::test_uuid_converter[/widgets/{widget_id:uuid}/orders-/widgets/13df0b925c924044b1b17097d50e90bf/orders-expected1]",
"tests/test_uri_templates.py::test_uuid_converter[/versions/diff/{left:uuid()}...{right:uuid()}-/versions/diff/13df0b92-5c92-4044-b1b1-7097d50e90bf...6407e43c-1a2e-45eb-8e13-661a80978ccf-expected2]",
"tests/test_uri_templates.py::test_uuid_converter[/versions/diff/{left:uuid}...{right:uuid()}-/versions/diff/13df0b92-5c92-4044-b1b1-7097d50e90bf...6407e43c-1a2e-45eb-8e13-661a80978ccf-expected3]",
"tests/test_uri_templates.py::test_uuid_converter[/versions/diff/{left:uuid()}...{right:uuid}-/versions/diff/13df0b92-5c92-4044-b1b1-7097d50e90bf...6407e43c-1a2e-45eb-8e13-661a80978ccf-expected4]",
"tests/test_uri_templates.py::test_uuid_converter[/widgets/{widget_id:uuid}/orders-/widgets/13df0b925c924044b1b17097d50e90b/orders-None]"
]
| []
| [
"tests/test_after_hooks.py::test_output_validator",
"tests/test_after_hooks.py::test_serializer",
"tests/test_after_hooks.py::test_hook_as_callable_class",
"tests/test_after_hooks.py::test_resource_with_uri_fields[resource0]",
"tests/test_after_hooks.py::test_resource_with_uri_fields[resource1]",
"tests/test_after_hooks.py::test_wrapped_resource[resource0]",
"tests/test_after_hooks.py::test_wrapped_resource[resource1]",
"tests/test_after_hooks.py::test_wrapped_resource_with_hooks_aware_of_resource",
"tests/test_before_hooks.py::test_multiple_resource_hooks[resource0]",
"tests/test_before_hooks.py::test_multiple_resource_hooks[resource1]",
"tests/test_before_hooks.py::test_input_validator",
"tests/test_before_hooks.py::test_input_validator_inherited",
"tests/test_before_hooks.py::test_param_validator",
"tests/test_before_hooks.py::test_field_validator[resource0]",
"tests/test_before_hooks.py::test_field_validator[resource1]",
"tests/test_before_hooks.py::test_field_validator[resource2]",
"tests/test_before_hooks.py::test_parser",
"tests/test_before_hooks.py::test_wrapped_resource",
"tests/test_before_hooks.py::test_wrapped_resource_with_hooks_aware_of_resource",
"tests/test_boundedstream.py::test_not_writeable",
"tests/test_cmd_print_api.py::test_traverse_with_verbose",
"tests/test_cmd_print_api.py::test_traverse",
"tests/test_cookies.py::test_response_base_case",
"tests/test_cookies.py::test_response_disable_secure_globally",
"tests/test_cookies.py::test_response_complex_case",
"tests/test_cookies.py::test_cookie_expires_naive",
"tests/test_cookies.py::test_cookie_expires_aware",
"tests/test_cookies.py::test_cookies_setable",
"tests/test_cookies.py::test_cookie_max_age_float_and_string[foofloat]",
"tests/test_cookies.py::test_cookie_max_age_float_and_string[foostring]",
"tests/test_cookies.py::test_response_unset_cookie",
"tests/test_cookies.py::test_cookie_timezone",
"tests/test_cookies.py::test_request_cookie_parsing",
"tests/test_cookies.py::test_invalid_cookies_are_ignored",
"tests/test_cookies.py::test_cookie_header_is_missing",
"tests/test_cookies.py::test_unicode_inside_ascii_range",
"tests/test_cookies.py::test_non_ascii_name[Unicode_\\xc3\\xa6\\xc3\\xb8]",
"tests/test_cookies.py::test_non_ascii_name[Unicode_\\xc3\\x83\\xc2\\xa6\\xc3\\x83\\xc2\\xb8]",
"tests/test_cookies.py::test_non_ascii_name[42]",
"tests/test_cookies.py::test_non_ascii_value[Unicode_\\xc3\\xa6\\xc3\\xb8]",
"tests/test_cookies.py::test_non_ascii_value[Unicode_\\xc3\\x83\\xc2\\xa6\\xc3\\x83\\xc2\\xb8]",
"tests/test_cookies.py::test_non_ascii_value[42]",
"tests/test_custom_router.py::test_custom_router_add_route_should_be_used",
"tests/test_custom_router.py::test_custom_router_find_should_be_used",
"tests/test_custom_router.py::test_can_pass_additional_params_to_add_route",
"tests/test_custom_router.py::test_custom_router_takes_req_positional_argument",
"tests/test_custom_router.py::test_custom_router_takes_req_keyword_argument",
"tests/test_default_router.py::test_user_regression_versioned_url",
"tests/test_default_router.py::test_user_regression_recipes",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People|{field}-/serviceRoot/People|susie-expected_params0]",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People[{field}]-/serviceRoot/People['calvin']-expected_params1]",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People({field})-/serviceRoot/People('hobbes')-expected_params2]",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People({field})-/serviceRoot/People('hob)bes')-expected_params3]",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People({field})(z)-/serviceRoot/People(hobbes)(z)-expected_params4]",
"tests/test_default_router.py::test_user_regression_special_chars[/serviceRoot/People('{field}')-/serviceRoot/People('rosalyn')-expected_params5]",
"tests/test_default_router.py::test_user_regression_special_chars[/^{field}-/^42-expected_params6]",
"tests/test_default_router.py::test_user_regression_special_chars[/+{field}-/+42-expected_params7]",
"tests/test_default_router.py::test_user_regression_special_chars[/foo/{first}_{second}/bar-/foo/abc_def_ghijk/bar-expected_params8]",
"tests/test_default_router.py::test_user_regression_special_chars[/items/{x}?{y}-/items/1080?768-expected_params9]",
"tests/test_default_router.py::test_user_regression_special_chars[/items/{x}|{y}-/items/1080|768-expected_params10]",
"tests/test_default_router.py::test_user_regression_special_chars[/items/{x},{y}-/items/1080,768-expected_params11]",
"tests/test_default_router.py::test_user_regression_special_chars[/items/{x}^^{y}-/items/1080^^768-expected_params12]",
"tests/test_default_router.py::test_user_regression_special_chars[/items/{x}*{y}*-/items/1080*768*-expected_params13]",
"tests/test_default_router.py::test_user_regression_special_chars[/thing-2/something+{field}+-/thing-2/something+42+-expected_params14]",
"tests/test_default_router.py::test_user_regression_special_chars[/thing-2/something*{field}/notes-/thing-2/something*42/notes-expected_params15]",
"tests/test_default_router.py::test_user_regression_special_chars[/thing-2/something+{field}|{q}/notes-/thing-2/something+else|z/notes-expected_params16]",
"tests/test_default_router.py::test_user_regression_special_chars[serviceRoot/$metadata#Airports('{field}')/Name-serviceRoot/$metadata#Airports('KSFO')/Name-expected_params17]",
"tests/test_default_router.py::test_not_str[uri_template0]",
"tests/test_default_router.py::test_not_str[uri_template1]",
"tests/test_default_router.py::test_not_str[uri_template2]",
"tests/test_default_router.py::test_root_path",
"tests/test_default_router.py::test_duplicate_field_names[/{field}{field}]",
"tests/test_default_router.py::test_duplicate_field_names[/{field}...{field}]",
"tests/test_default_router.py::test_duplicate_field_names[/{field}/{another}/{field}]",
"tests/test_default_router.py::test_duplicate_field_names[/{field}/something/something/{field}/something]",
"tests/test_default_router.py::test_match_entire_path[/items/thing-/items/t]",
"tests/test_default_router.py::test_match_entire_path[/items/{x}|{y}|-/items/1080|768]",
"tests/test_default_router.py::test_match_entire_path[/items/{x}*{y}foo-/items/1080*768foobar]",
"tests/test_default_router.py::test_match_entire_path[/items/{x}*768*-/items/1080*768***]",
"tests/test_default_router.py::test_conflict[/teams/{conflict}]",
"tests/test_default_router.py::test_conflict[/emojis/signs/{id_too}]",
"tests/test_default_router.py::test_conflict[/repos/{org}/{repo}/compare/{complex}:{vs}...{complex2}:{conflict}]",
"tests/test_default_router.py::test_conflict[/teams/{id:int}/settings]",
"tests/test_default_router.py::test_non_conflict[/repos/{org}/{repo}/compare/{simple_vs_complex}]",
"tests/test_default_router.py::test_non_conflict[/repos/{complex}.{vs}.{simple}]",
"tests/test_default_router.py::test_non_conflict[/repos/{org}/{repo}/compare/{complex}:{vs}...{complex2}/full]",
"tests/test_default_router.py::test_invalid_field_name[/{}]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{org}/{repo}/compare/{}]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{complex}.{}.{thing}]",
"tests/test_default_router.py::test_invalid_field_name[/{9v}]",
"tests/test_default_router.py::test_invalid_field_name[/{524hello}/world]",
"tests/test_default_router.py::test_invalid_field_name[/hello/{1world}]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{complex}.{9v}.{thing}/etc]",
"tests/test_default_router.py::test_invalid_field_name[/{*kgriffs}]",
"tests/test_default_router.py::test_invalid_field_name[/{@kgriffs}]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{complex}.{v}.{@thing}/etc]",
"tests/test_default_router.py::test_invalid_field_name[/{-kgriffs}]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{complex}.{-v}.{thing}/etc]",
"tests/test_default_router.py::test_invalid_field_name[/repos/{simple-thing}/etc]",
"tests/test_default_router.py::test_invalid_field_name[/this",
"tests/test_default_router.py::test_invalid_field_name[/this\\tand\\tthat/this\\nand\\nthat/{thing",
"tests/test_default_router.py::test_invalid_field_name[/{thing\\t}/world]",
"tests/test_default_router.py::test_invalid_field_name[/{\\nthing}/world]",
"tests/test_default_router.py::test_invalid_field_name[/{th\\x0bing}/world]",
"tests/test_default_router.py::test_invalid_field_name[/{",
"tests/test_default_router.py::test_invalid_field_name[/{thing}/wo",
"tests/test_default_router.py::test_invalid_field_name[/{thing}",
"tests/test_default_router.py::test_invalid_field_name[/repos/{or",
"tests/test_default_router.py::test_invalid_field_name[/repos/{org}/{repo}/compare/{th\\ting}]",
"tests/test_default_router.py::test_print_src",
"tests/test_default_router.py::test_override",
"tests/test_default_router.py::test_literal_segment",
"tests/test_default_router.py::test_dead_segment[/teams]",
"tests/test_default_router.py::test_dead_segment[/emojis/signs]",
"tests/test_default_router.py::test_dead_segment[/gists]",
"tests/test_default_router.py::test_dead_segment[/gists/42]",
"tests/test_default_router.py::test_malformed_pattern[/repos/racker/falcon/compare/foo]",
"tests/test_default_router.py::test_malformed_pattern[/repos/racker/falcon/compare/foo/full]",
"tests/test_default_router.py::test_literal",
"tests/test_default_router.py::test_converters[/cvt/teams/007-expected_params0]",
"tests/test_default_router.py::test_converters[/cvt/teams/1234/members-expected_params1]",
"tests/test_default_router.py::test_converters[/cvt/teams/default/members/700-5-expected_params2]",
"tests/test_default_router.py::test_converters[/cvt/repos/org/repo/compare/xkcd:353-expected_params3]",
"tests/test_default_router.py::test_converters[/cvt/repos/org/repo/compare/gunmachan:1234...kumamon:5678/part-expected_params4]",
"tests/test_default_router.py::test_converters[/cvt/repos/xkcd/353/compare/susan:0001/full-expected_params5]",
"tests/test_default_router.py::test_converters_with_invalid_options[/foo/{bar:int(0)}]",
"tests/test_default_router.py::test_converters_with_invalid_options[/foo/{bar:int(num_digits=0)}]",
"tests/test_default_router.py::test_converters_with_invalid_options[/foo/{bar:int(-1)}/baz]",
"tests/test_default_router.py::test_converters_with_invalid_options[/foo/{bar:int(num_digits=-1)}/baz]",
"tests/test_default_router.py::test_converters_malformed_specification[/foo/{bar:}]",
"tests/test_default_router.py::test_converters_malformed_specification[/foo/{bar:unknown}/baz]",
"tests/test_default_router.py::test_variable",
"tests/test_default_router.py::test_single_character_field_name",
"tests/test_default_router.py::test_literal_vs_variable[/teams/default-19]",
"tests/test_default_router.py::test_literal_vs_variable[/teams/default/members-7]",
"tests/test_default_router.py::test_literal_vs_variable[/cvt/teams/default-31]",
"tests/test_default_router.py::test_literal_vs_variable[/cvt/teams/default/members/1234-10-32]",
"tests/test_default_router.py::test_literal_vs_variable[/teams/1234-6]",
"tests/test_default_router.py::test_literal_vs_variable[/teams/1234/members-7]",
"tests/test_default_router.py::test_literal_vs_variable[/gists/first-20]",
"tests/test_default_router.py::test_literal_vs_variable[/gists/first/raw-18]",
"tests/test_default_router.py::test_literal_vs_variable[/gists/first/pdf-21]",
"tests/test_default_router.py::test_literal_vs_variable[/gists/1776/pdf-21]",
"tests/test_default_router.py::test_literal_vs_variable[/emojis/signs/78-13]",
"tests/test_default_router.py::test_literal_vs_variable[/emojis/signs/78/small.png-24]",
"tests/test_default_router.py::test_literal_vs_variable[/emojis/signs/78/small(png)-25]",
"tests/test_default_router.py::test_literal_vs_variable[/emojis/signs/78/small_png-26]",
"tests/test_default_router.py::test_not_found[/this/does/not/exist]",
"tests/test_default_router.py::test_not_found[/user/bogus]",
"tests/test_default_router.py::test_not_found[/repos/racker/falcon/compare/johndoe:master...janedoe:dev/bogus]",
"tests/test_default_router.py::test_not_found[/teams]",
"tests/test_default_router.py::test_not_found[/teams/42/members/undefined]",
"tests/test_default_router.py::test_not_found[/teams/42/undefined]",
"tests/test_default_router.py::test_not_found[/teams/42/undefined/segments]",
"tests/test_default_router.py::test_not_found[/teams/default/members/undefined]",
"tests/test_default_router.py::test_not_found[/teams/default/members/thing/undefined]",
"tests/test_default_router.py::test_not_found[/teams/default/members/thing/undefined/segments]",
"tests/test_default_router.py::test_not_found[/teams/default/undefined]",
"tests/test_default_router.py::test_not_found[/teams/default/undefined/segments]",
"tests/test_default_router.py::test_not_found[/cvt/teams/default/members]",
"tests/test_default_router.py::test_not_found[/cvt/teams/NaN]",
"tests/test_default_router.py::test_not_found[/cvt/teams/default/members/NaN]",
"tests/test_default_router.py::test_not_found[/emojis/signs]",
"tests/test_default_router.py::test_not_found[/emojis/signs/0/small]",
"tests/test_default_router.py::test_not_found[/emojis/signs/0/undefined]",
"tests/test_default_router.py::test_not_found[/emojis/signs/0/undefined/segments]",
"tests/test_default_router.py::test_not_found[/emojis/signs/20/small]",
"tests/test_default_router.py::test_not_found[/emojis/signs/20/undefined]",
"tests/test_default_router.py::test_not_found[/emojis/signs/42/undefined]",
"tests/test_default_router.py::test_not_found[/emojis/signs/78/undefined]",
"tests/test_default_router.py::test_subsegment_not_found",
"tests/test_default_router.py::test_multivar",
"tests/test_default_router.py::test_complex[-5]",
"tests/test_default_router.py::test_complex[/full-10]",
"tests/test_default_router.py::test_complex[/part-15]",
"tests/test_default_router.py::test_complex_alt[-16-/repos/{org}/{repo}/compare/{usr0}:{branch0}]",
"tests/test_default_router.py::test_complex_alt[/full-17-/repos/{org}/{repo}/compare/{usr0}:{branch0}/full]",
"tests/test_default_router.py::test_options_converters_set",
"tests/test_default_router.py::test_options_converters_update[spam]",
"tests/test_default_router.py::test_options_converters_update[spam_2]",
"tests/test_default_router.py::test_options_converters_invalid_name[has",
"tests/test_default_router.py::test_options_converters_invalid_name[whitespace",
"tests/test_default_router.py::test_options_converters_invalid_name[",
"tests/test_default_router.py::test_options_converters_invalid_name[funky$character]",
"tests/test_default_router.py::test_options_converters_invalid_name[42istheanswer]",
"tests/test_default_router.py::test_options_converters_invalid_name[with-hyphen]",
"tests/test_default_router.py::test_options_converters_invalid_name_on_update",
"tests/test_deps.py::test_deps_mimeparse_correct_package",
"tests/test_error.py::test_with_default_title_and_desc[HTTPBadRequest-400",
"tests/test_error.py::test_with_default_title_and_desc[HTTPForbidden-403",
"tests/test_error.py::test_with_default_title_and_desc[HTTPConflict-409",
"tests/test_error.py::test_with_default_title_and_desc[HTTPLengthRequired-411",
"tests/test_error.py::test_with_default_title_and_desc[HTTPPreconditionFailed-412",
"tests/test_error.py::test_with_default_title_and_desc[HTTPRequestEntityTooLarge-413",
"tests/test_error.py::test_with_default_title_and_desc[HTTPUriTooLong-414",
"tests/test_error.py::test_with_default_title_and_desc[HTTPUnprocessableEntity-422",
"tests/test_error.py::test_with_default_title_and_desc[HTTPLocked-423",
"tests/test_error.py::test_with_default_title_and_desc[HTTPFailedDependency-424",
"tests/test_error.py::test_with_default_title_and_desc[HTTPPreconditionRequired-428",
"tests/test_error.py::test_with_default_title_and_desc[HTTPTooManyRequests-429",
"tests/test_error.py::test_with_default_title_and_desc[HTTPRequestHeaderFieldsTooLarge-431",
"tests/test_error.py::test_with_default_title_and_desc[HTTPUnavailableForLegalReasons-451",
"tests/test_error.py::test_with_default_title_and_desc[HTTPInternalServerError-500",
"tests/test_error.py::test_with_default_title_and_desc[HTTPNotImplemented-501",
"tests/test_error.py::test_with_default_title_and_desc[HTTPBadGateway-502",
"tests/test_error.py::test_with_default_title_and_desc[HTTPServiceUnavailable-503",
"tests/test_error.py::test_with_default_title_and_desc[HTTPGatewayTimeout-504",
"tests/test_error.py::test_with_default_title_and_desc[HTTPVersionNotSupported-505",
"tests/test_error.py::test_with_default_title_and_desc[HTTPInsufficientStorage-507",
"tests/test_error.py::test_with_default_title_and_desc[HTTPLoopDetected-508",
"tests/test_error.py::test_with_default_title_and_desc[HTTPNetworkAuthenticationRequired-511",
"tests/test_error.py::test_with_title_and_desc[HTTPBadRequest]",
"tests/test_error.py::test_with_title_and_desc[HTTPForbidden]",
"tests/test_error.py::test_with_title_and_desc[HTTPConflict]",
"tests/test_error.py::test_with_title_and_desc[HTTPLengthRequired]",
"tests/test_error.py::test_with_title_and_desc[HTTPPreconditionFailed]",
"tests/test_error.py::test_with_title_and_desc[HTTPPreconditionRequired]",
"tests/test_error.py::test_with_title_and_desc[HTTPUriTooLong]",
"tests/test_error.py::test_with_title_and_desc[HTTPUnprocessableEntity]",
"tests/test_error.py::test_with_title_and_desc[HTTPLocked]",
"tests/test_error.py::test_with_title_and_desc[HTTPFailedDependency]",
"tests/test_error.py::test_with_title_and_desc[HTTPRequestHeaderFieldsTooLarge]",
"tests/test_error.py::test_with_title_and_desc[HTTPUnavailableForLegalReasons]",
"tests/test_error.py::test_with_title_and_desc[HTTPInternalServerError]",
"tests/test_error.py::test_with_title_and_desc[HTTPNotImplemented]",
"tests/test_error.py::test_with_title_and_desc[HTTPBadGateway]",
"tests/test_error.py::test_with_title_and_desc[HTTPServiceUnavailable]",
"tests/test_error.py::test_with_title_and_desc[HTTPGatewayTimeout]",
"tests/test_error.py::test_with_title_and_desc[HTTPVersionNotSupported]",
"tests/test_error.py::test_with_title_and_desc[HTTPInsufficientStorage]",
"tests/test_error.py::test_with_title_and_desc[HTTPLoopDetected]",
"tests/test_error.py::test_with_title_and_desc[HTTPNetworkAuthenticationRequired]",
"tests/test_error.py::test_with_retry_after[HTTPServiceUnavailable]",
"tests/test_error.py::test_with_retry_after[HTTPTooManyRequests]",
"tests/test_error.py::test_with_retry_after[HTTPRequestEntityTooLarge]",
"tests/test_error.py::test_http_unauthorized_no_title_and_desc_and_challenges",
"tests/test_error.py::test_http_unauthorized_with_title_and_desc_and_challenges",
"tests/test_error.py::test_http_not_acceptable_no_title_and_desc_and_challenges",
"tests/test_error.py::test_http_not_acceptable_with_title_and_desc_and_challenges",
"tests/test_error.py::test_http_unsupported_media_type_no_title_and_desc_and_challenges",
"tests/test_error.py::test_http_unsupported_media_type_with_title_and_desc_and_challenges",
"tests/test_error.py::test_http_error_repr",
"tests/test_error_handlers.py::TestErrorHandler::test_caught_error",
"tests/test_error_handlers.py::TestErrorHandler::test_uncaught_error",
"tests/test_error_handlers.py::TestErrorHandler::test_uncaught_error_else",
"tests/test_error_handlers.py::TestErrorHandler::test_converted_error",
"tests/test_error_handlers.py::TestErrorHandler::test_handle_not_defined",
"tests/test_error_handlers.py::TestErrorHandler::test_subclass_error",
"tests/test_error_handlers.py::TestErrorHandler::test_error_order_duplicate",
"tests/test_error_handlers.py::TestErrorHandler::test_error_order_subclass",
"tests/test_error_handlers.py::TestErrorHandler::test_error_order_subclass_masked",
"tests/test_headers.py::TestHeaders::test_content_length",
"tests/test_headers.py::TestHeaders::test_default_value",
"tests/test_headers.py::TestHeaders::test_unset_header",
"tests/test_headers.py::TestHeaders::test_required_header",
"tests/test_headers.py::TestHeaders::test_no_content_length[204",
"tests/test_headers.py::TestHeaders::test_no_content_length[304",
"tests/test_headers.py::TestHeaders::test_content_header_missing",
"tests/test_headers.py::TestHeaders::test_passthrough_request_headers",
"tests/test_headers.py::TestHeaders::test_headers_as_list",
"tests/test_headers.py::TestHeaders::test_default_media_type",
"tests/test_headers.py::TestHeaders::test_override_default_media_type[text/plain;",
"tests/test_headers.py::TestHeaders::test_override_default_media_type[text/plain-Hello",
"tests/test_headers.py::TestHeaders::test_override_default_media_type_missing_encoding",
"tests/test_headers.py::TestHeaders::test_response_header_helpers_on_get",
"tests/test_headers.py::TestHeaders::test_unicode_location_headers",
"tests/test_headers.py::TestHeaders::test_unicode_headers_convertable",
"tests/test_headers.py::TestHeaders::test_response_set_and_get_header",
"tests/test_headers.py::TestHeaders::test_response_append_header",
"tests/test_headers.py::TestHeaders::test_vary_star",
"tests/test_headers.py::TestHeaders::test_vary_header[vary0-accept-encoding]",
"tests/test_headers.py::TestHeaders::test_vary_header[vary1-accept-encoding,",
"tests/test_headers.py::TestHeaders::test_vary_header[vary2-accept-encoding,",
"tests/test_headers.py::TestHeaders::test_content_type_no_body",
"tests/test_headers.py::TestHeaders::test_no_content_type[204",
"tests/test_headers.py::TestHeaders::test_no_content_type[304",
"tests/test_headers.py::TestHeaders::test_custom_content_type",
"tests/test_headers.py::TestHeaders::test_add_link_single",
"tests/test_headers.py::TestHeaders::test_add_link_multiple",
"tests/test_headers.py::TestHeaders::test_add_link_with_title",
"tests/test_headers.py::TestHeaders::test_add_link_with_title_star",
"tests/test_headers.py::TestHeaders::test_add_link_with_anchor",
"tests/test_headers.py::TestHeaders::test_add_link_with_hreflang",
"tests/test_headers.py::TestHeaders::test_add_link_with_hreflang_multi",
"tests/test_headers.py::TestHeaders::test_add_link_with_type_hint",
"tests/test_headers.py::TestHeaders::test_add_link_complex",
"tests/test_headers.py::TestHeaders::test_content_length_options",
"tests/test_hello.py::TestHelloWorld::test_env_headers_list_of_tuples",
"tests/test_hello.py::TestHelloWorld::test_root_route",
"tests/test_hello.py::TestHelloWorld::test_no_route",
"tests/test_hello.py::TestHelloWorld::test_body[/body-resource0-<lambda>]",
"tests/test_hello.py::TestHelloWorld::test_body[/bytes-resource1-<lambda>]",
"tests/test_hello.py::TestHelloWorld::test_body[/data-resource2-<lambda>]",
"tests/test_hello.py::TestHelloWorld::test_no_body_on_head",
"tests/test_hello.py::TestHelloWorld::test_stream_chunked",
"tests/test_hello.py::TestHelloWorld::test_stream_known_len",
"tests/test_hello.py::TestHelloWorld::test_filelike",
"tests/test_hello.py::TestHelloWorld::test_filelike_closing[ClosingBytesIO-True]",
"tests/test_hello.py::TestHelloWorld::test_filelike_closing[NonClosingBytesIO-False]",
"tests/test_hello.py::TestHelloWorld::test_filelike_using_helper",
"tests/test_hello.py::TestHelloWorld::test_status_not_set",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_get",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_put",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_post_not_allowed",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_report",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_misc",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_methods_not_allowed_simple",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_methods_not_allowed_complex",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_method_not_allowed_with_param",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_default_on_options",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_on_options",
"tests/test_http_method_routing.py::TestHttpMethodRouting::test_bogus_method",
"tests/test_httperror.py::TestHTTPError::test_base_class",
"tests/test_httperror.py::TestHTTPError::test_no_description_json",
"tests/test_httperror.py::TestHTTPError::test_no_description_xml",
"tests/test_httperror.py::TestHTTPError::test_client_does_not_accept_json_or_xml",
"tests/test_httperror.py::TestHTTPError::test_custom_old_error_serializer",
"tests/test_httperror.py::TestHTTPError::test_custom_old_error_serializer_no_body",
"tests/test_httperror.py::TestHTTPError::test_custom_new_error_serializer",
"tests/test_httperror.py::TestHTTPError::test_client_does_not_accept_anything",
"tests/test_httperror.py::TestHTTPError::test_forbidden[application/json]",
"tests/test_httperror.py::TestHTTPError::test_forbidden[application/vnd.company.system.project.resource+json;v=1.1]",
"tests/test_httperror.py::TestHTTPError::test_forbidden[application/json-patch+json]",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_json",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_xml[text/xml]",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_xml[application/xml]",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_xml[application/vnd.company.system.project.resource+xml;v=1.1]",
"tests/test_httperror.py::TestHTTPError::test_epic_fail_xml[application/atom+xml]",
"tests/test_httperror.py::TestHTTPError::test_unicode_json",
"tests/test_httperror.py::TestHTTPError::test_unicode_xml",
"tests/test_httperror.py::TestHTTPError::test_401",
"tests/test_httperror.py::TestHTTPError::test_404_without_body",
"tests/test_httperror.py::TestHTTPError::test_404_with_body",
"tests/test_httperror.py::TestHTTPError::test_405_without_body",
"tests/test_httperror.py::TestHTTPError::test_405_without_body_with_extra_headers",
"tests/test_httperror.py::TestHTTPError::test_405_without_body_with_extra_headers_double_check",
"tests/test_httperror.py::TestHTTPError::test_405_with_body",
"tests/test_httperror.py::TestHTTPError::test_410_without_body",
"tests/test_httperror.py::TestHTTPError::test_410_with_body",
"tests/test_httperror.py::TestHTTPError::test_411",
"tests/test_httperror.py::TestHTTPError::test_413",
"tests/test_httperror.py::TestHTTPError::test_temporary_413_integer_retry_after",
"tests/test_httperror.py::TestHTTPError::test_temporary_413_datetime_retry_after",
"tests/test_httperror.py::TestHTTPError::test_414",
"tests/test_httperror.py::TestHTTPError::test_414_with_title",
"tests/test_httperror.py::TestHTTPError::test_414_with_description",
"tests/test_httperror.py::TestHTTPError::test_414_with_custom_kwargs",
"tests/test_httperror.py::TestHTTPError::test_416",
"tests/test_httperror.py::TestHTTPError::test_429_no_retry_after",
"tests/test_httperror.py::TestHTTPError::test_429",
"tests/test_httperror.py::TestHTTPError::test_429_datetime",
"tests/test_httperror.py::TestHTTPError::test_503_integer_retry_after",
"tests/test_httperror.py::TestHTTPError::test_503_datetime_retry_after",
"tests/test_httperror.py::TestHTTPError::test_invalid_header",
"tests/test_httperror.py::TestHTTPError::test_missing_header",
"tests/test_httperror.py::TestHTTPError::test_invalid_param",
"tests/test_httperror.py::TestHTTPError::test_missing_param",
"tests/test_httperror.py::TestHTTPError::test_misc",
"tests/test_httperror.py::TestHTTPError::test_title_default_message_if_none",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_in_before_hook",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_in_responder",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_runs_after_hooks",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_survives_after_hooks",
"tests/test_httpstatus.py::TestHTTPStatus::test_raise_status_empty_body",
"tests/test_httpstatus.py::TestHTTPStatusWithMiddleware::test_raise_status_in_process_request",
"tests/test_httpstatus.py::TestHTTPStatusWithMiddleware::test_raise_status_in_process_resource",
"tests/test_httpstatus.py::TestHTTPStatusWithMiddleware::test_raise_status_runs_process_response",
"tests/test_media_handlers.py::test_base_handler_contract",
"tests/test_middleware.py::TestRequestTimeMiddleware::test_skip_process_resource",
"tests/test_middleware.py::TestRequestTimeMiddleware::test_add_invalid_middleware",
"tests/test_middleware.py::TestRequestTimeMiddleware::test_response_middleware_raises_exception",
"tests/test_middleware.py::TestRequestTimeMiddleware::test_log_get_request",
"tests/test_middleware.py::TestTransactionIdMiddleware::test_generate_trans_id_with_request",
"tests/test_middleware.py::TestSeveralMiddlewares::test_generate_trans_id_and_time_with_request",
"tests/test_middleware.py::TestSeveralMiddlewares::test_legacy_middleware_called_with_correct_args",
"tests/test_middleware.py::TestSeveralMiddlewares::test_middleware_execution_order",
"tests/test_middleware.py::TestSeveralMiddlewares::test_independent_middleware_execution_order",
"tests/test_middleware.py::TestSeveralMiddlewares::test_multiple_reponse_mw_throw_exception",
"tests/test_middleware.py::TestSeveralMiddlewares::test_inner_mw_throw_exception",
"tests/test_middleware.py::TestSeveralMiddlewares::test_inner_mw_with_ex_handler_throw_exception",
"tests/test_middleware.py::TestSeveralMiddlewares::test_outer_mw_with_ex_handler_throw_exception",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_mw_executed_when_exception_in_resp",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_independent_mw_executed_when_exception_in_resp",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_mw_executed_when_exception_in_req",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_independent_mw_executed_when_exception_in_req",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_mw_executed_when_exception_in_rsrc",
"tests/test_middleware.py::TestSeveralMiddlewares::test_order_independent_mw_executed_when_exception_in_rsrc",
"tests/test_middleware.py::TestRemoveBasePathMiddleware::test_base_path_is_removed_before_routing",
"tests/test_middleware.py::TestResourceMiddleware::test_can_access_resource_params",
"tests/test_middleware.py::TestErrorHandling::test_error_composed_before_resp_middleware_called",
"tests/test_middleware.py::TestErrorHandling::test_http_status_raised_from_error_handler",
"tests/test_options.py::TestRequestOptions::test_option_defaults",
"tests/test_options.py::TestRequestOptions::test_options_toggle[keep_blank_qs_values]",
"tests/test_options.py::TestRequestOptions::test_options_toggle[auto_parse_form_urlencoded]",
"tests/test_options.py::TestRequestOptions::test_options_toggle[auto_parse_qs_csv]",
"tests/test_options.py::TestRequestOptions::test_options_toggle[strip_url_path_trailing_slash]",
"tests/test_options.py::TestRequestOptions::test_incorrect_options",
"tests/test_query_params.py::TestQueryParams::test_none[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_none[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_blank[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_blank[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_simple[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_simple[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_percent_encoded[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_percent_encoded[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_simple_false[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_simple_false[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_simple_true[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_simple_true[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_complex_false[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_option_auto_parse_qs_csv_complex_false[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_bad_percentage[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_bad_percentage[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_allowed_names[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_allowed_names[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param-simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param-simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_int-simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_int-simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_bool-simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_bool-simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_list-simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_required[get_param_as_list-simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_int[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_int[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_int_neg[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_int_neg[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_boolean[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_boolean[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_boolean_blank[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_boolean_blank[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_type[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_type[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_type_blank[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_type_blank[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_transformer[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_list_transformer[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_param_property[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_param_property[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_form_keys[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_form_keys[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_keys_as_bool[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_keys_as_bool[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_keys_as_int[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_keys_as_int[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_form_keys_as_list[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_multiple_form_keys_as_list[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_valid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_valid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_missing_param[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_missing_param[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_valid_with_format[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_valid_with_format[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_store[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_store[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_invalid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_date_invalid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_valid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_valid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_missing_param[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_missing_param[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_valid_with_format[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_valid_with_format[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_store[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_store[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_invalid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_datetime_invalid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_valid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_valid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_missing_param[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_missing_param[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_store[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_store[simulate_request_post_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_invalid[simulate_request_get_query_params]",
"tests/test_query_params.py::TestQueryParams::test_get_dict_invalid[simulate_request_post_query_params]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_expected[POST]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_expected[PUT]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_expected[PATCH]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_expected[DELETE]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_expected[OPTIONS]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_not_expected[GET]",
"tests/test_query_params.py::TestPostQueryParams::test_http_methods_body_not_expected[HEAD]",
"tests/test_query_params.py::TestPostQueryParams::test_non_ascii",
"tests/test_query_params.py::TestPostQueryParams::test_empty_body",
"tests/test_query_params.py::TestPostQueryParams::test_empty_body_no_content_length",
"tests/test_query_params.py::TestPostQueryParams::test_explicitly_disable_auto_parse",
"tests/test_query_params.py::TestPostQueryParamsDefaultBehavior::test_dont_auto_parse_by_default",
"tests/test_redirects.py::TestRedirects::test_redirect[GET-301",
"tests/test_redirects.py::TestRedirects::test_redirect[POST-302",
"tests/test_redirects.py::TestRedirects::test_redirect[PUT-303",
"tests/test_redirects.py::TestRedirects::test_redirect[DELETE-307",
"tests/test_redirects.py::TestRedirects::test_redirect[HEAD-308",
"tests/test_request_access_route.py::test_remote_addr_only",
"tests/test_request_access_route.py::test_rfc_forwarded",
"tests/test_request_access_route.py::test_malformed_rfc_forwarded",
"tests/test_request_access_route.py::test_x_forwarded_for",
"tests/test_request_access_route.py::test_x_real_ip",
"tests/test_request_access_route.py::test_remote_addr",
"tests/test_request_access_route.py::test_remote_addr_missing",
"tests/test_request_attrs.py::TestRequestAttributes::test_missing_qs",
"tests/test_request_attrs.py::TestRequestAttributes::test_empty",
"tests/test_request_attrs.py::TestRequestAttributes::test_host",
"tests/test_request_attrs.py::TestRequestAttributes::test_subdomain",
"tests/test_request_attrs.py::TestRequestAttributes::test_reconstruct_url",
"tests/test_request_attrs.py::TestRequestAttributes::test_nonlatin_path[/hello_\\u043f\\u0440\\u0438\\u0432\\u0435\\u0442]",
"tests/test_request_attrs.py::TestRequestAttributes::test_nonlatin_path[/test/%E5%BB%B6%E5%AE%89]",
"tests/test_request_attrs.py::TestRequestAttributes::test_nonlatin_path[/test/%C3%A4%C3%B6%C3%BC%C3%9F%E2%82%AC]",
"tests/test_request_attrs.py::TestRequestAttributes::test_uri",
"tests/test_request_attrs.py::TestRequestAttributes::test_uri_https",
"tests/test_request_attrs.py::TestRequestAttributes::test_uri_http_1_0",
"tests/test_request_attrs.py::TestRequestAttributes::test_relative_uri",
"tests/test_request_attrs.py::TestRequestAttributes::test_client_accepts",
"tests/test_request_attrs.py::TestRequestAttributes::test_client_accepts_bogus",
"tests/test_request_attrs.py::TestRequestAttributes::test_client_accepts_props",
"tests/test_request_attrs.py::TestRequestAttributes::test_client_prefers",
"tests/test_request_attrs.py::TestRequestAttributes::test_range",
"tests/test_request_attrs.py::TestRequestAttributes::test_range_unit",
"tests/test_request_attrs.py::TestRequestAttributes::test_range_invalid",
"tests/test_request_attrs.py::TestRequestAttributes::test_missing_attribute_header",
"tests/test_request_attrs.py::TestRequestAttributes::test_content_length",
"tests/test_request_attrs.py::TestRequestAttributes::test_bogus_content_length_nan",
"tests/test_request_attrs.py::TestRequestAttributes::test_bogus_content_length_neg",
"tests/test_request_attrs.py::TestRequestAttributes::test_date[Date-date]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date[If-Modified-Since-if_modified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date[If-Unmodified-Since-if_unmodified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_invalid[Date-date]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_invalid[If-Modified-Since-if_modified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_invalid[If-Unmodified-Since-if_unmodified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_missing[date]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_missing[if_modified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_date_missing[if_unmodified_since]",
"tests/test_request_attrs.py::TestRequestAttributes::test_attribute_headers",
"tests/test_request_attrs.py::TestRequestAttributes::test_method",
"tests/test_request_attrs.py::TestRequestAttributes::test_empty_path",
"tests/test_request_attrs.py::TestRequestAttributes::test_content_type_method",
"tests/test_request_attrs.py::TestRequestAttributes::test_content_length_method",
"tests/test_request_attrs.py::TestRequestAttributes::test_port_explicit[HTTP/1.0]",
"tests/test_request_attrs.py::TestRequestAttributes::test_port_explicit[HTTP/1.1]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_https[HTTP/1.0]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_https[HTTP/1.1]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_http[HTTP/1.0-True]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_http[HTTP/1.0-False]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_http[HTTP/1.1-True]",
"tests/test_request_attrs.py::TestRequestAttributes::test_scheme_http[HTTP/1.1-False]",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_default_port[HTTP/1.0]",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_default_port[HTTP/1.1]",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_nondefault_port[HTTP/1.0]",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_nondefault_port[HTTP/1.1]",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_from_env[HTTP/1.0]",
"tests/test_request_attrs.py::TestRequestAttributes::test_netloc_from_env[HTTP/1.1]",
"tests/test_request_attrs.py::TestRequestAttributes::test_app_present",
"tests/test_request_attrs.py::TestRequestAttributes::test_app_blank",
"tests/test_request_attrs.py::TestRequestAttributes::test_app_missing",
"tests/test_request_body.py::TestRequestBody::test_empty_body",
"tests/test_request_body.py::TestRequestBody::test_tiny_body",
"tests/test_request_body.py::TestRequestBody::test_tiny_body_overflow",
"tests/test_request_body.py::TestRequestBody::test_read_body",
"tests/test_request_body.py::TestRequestBody::test_bounded_stream_property_empty_body",
"tests/test_request_body.py::TestRequestBody::test_body_stream_wrapper",
"tests/test_request_body.py::TestRequestBody::test_request_repr",
"tests/test_request_context.py::TestRequestContext::test_default_request_context",
"tests/test_request_context.py::TestRequestContext::test_custom_request_context",
"tests/test_request_context.py::TestRequestContext::test_custom_request_context_failure",
"tests/test_request_context.py::TestRequestContext::test_custom_request_context_request_access",
"tests/test_request_forwarded.py::test_no_forwarded_headers",
"tests/test_request_forwarded.py::test_x_forwarded_host",
"tests/test_request_forwarded.py::test_x_forwarded_proto",
"tests/test_request_forwarded.py::test_forwarded_host",
"tests/test_request_forwarded.py::test_forwarded_multiple_params",
"tests/test_request_forwarded.py::test_forwarded_missing_first_hop_host",
"tests/test_request_media.py::test_json[None]",
"tests/test_request_media.py::test_json[*/*]",
"tests/test_request_media.py::test_json[application/json]",
"tests/test_request_media.py::test_json[application/json;",
"tests/test_request_media.py::test_msgpack[application/msgpack]",
"tests/test_request_media.py::test_msgpack[application/msgpack;",
"tests/test_request_media.py::test_msgpack[application/x-msgpack]",
"tests/test_request_media.py::test_unknown_media_type[nope/json]",
"tests/test_request_media.py::test_invalid_json",
"tests/test_request_media.py::test_invalid_msgpack",
"tests/test_request_media.py::test_invalid_stream_fails_gracefully",
"tests/test_request_media.py::test_use_cached_media",
"tests/test_response.py::test_response_set_content_type_set",
"tests/test_response.py::test_response_set_content_type_not_set",
"tests/test_response_body.py::TestResponseBody::test_append_body",
"tests/test_response_body.py::TestResponseBody::test_response_repr",
"tests/test_response_context.py::TestRequestContext::test_default_response_context",
"tests/test_response_context.py::TestRequestContext::test_custom_response_context",
"tests/test_response_context.py::TestRequestContext::test_custom_response_context_failure",
"tests/test_response_context.py::TestRequestContext::test_custom_response_context_factory",
"tests/test_response_media.py::test_json[*/*]",
"tests/test_response_media.py::test_json[application/json;",
"tests/test_response_media.py::test_msgpack[application/msgpack]",
"tests/test_response_media.py::test_msgpack[application/msgpack;",
"tests/test_response_media.py::test_msgpack[application/x-msgpack]",
"tests/test_response_media.py::test_unknown_media_type",
"tests/test_response_media.py::test_use_cached_media",
"tests/test_response_media.py::test_default_media_type",
"tests/test_response_media.py::test_mimeparse_edgecases",
"tests/test_sinks.py::TestDefaultRouting::test_single_default_pattern",
"tests/test_sinks.py::TestDefaultRouting::test_single_simple_pattern",
"tests/test_sinks.py::TestDefaultRouting::test_single_compiled_pattern",
"tests/test_sinks.py::TestDefaultRouting::test_named_groups",
"tests/test_sinks.py::TestDefaultRouting::test_multiple_patterns",
"tests/test_sinks.py::TestDefaultRouting::test_with_route",
"tests/test_sinks.py::TestDefaultRouting::test_route_precedence",
"tests/test_sinks.py::TestDefaultRouting::test_route_precedence_with_id",
"tests/test_sinks.py::TestDefaultRouting::test_route_precedence_with_both_id",
"tests/test_slots.py::TestSlots::test_slots_request",
"tests/test_slots.py::TestSlots::test_slots_response",
"tests/test_static.py::test_bad_path[/static]",
"tests/test_static.py::test_bad_path[/static/]",
"tests/test_static.py::test_bad_path[/static/.]",
"tests/test_static.py::test_bad_path[/static/..]",
"tests/test_static.py::test_bad_path[/static/../.]",
"tests/test_static.py::test_bad_path[/static/.././etc/passwd]",
"tests/test_static.py::test_bad_path[/static/../etc/passwd]",
"tests/test_static.py::test_bad_path[/static/css/../../secret]",
"tests/test_static.py::test_bad_path[/static/css/../../etc/passwd]",
"tests/test_static.py::test_bad_path[/static/./../etc/passwd]",
"tests/test_static.py::test_bad_path[/static/css/../.\\\\056/etc/passwd]",
"tests/test_static.py::test_bad_path[/static/./\\\\056./etc/passwd]",
"tests/test_static.py::test_bad_path[/static/\\\\056\\\\056/etc/passwd]",
"tests/test_static.py::test_bad_path[/static//test.css]",
"tests/test_static.py::test_bad_path[/static//COM10]",
"tests/test_static.py::test_bad_path[/static/path//test.css]",
"tests/test_static.py::test_bad_path[/static/path///test.css]",
"tests/test_static.py::test_bad_path[/static/path////test.css]",
"tests/test_static.py::test_bad_path[/static/path/foo//test.css]",
"tests/test_static.py::test_bad_path[/static/.\\x00ssh/authorized_keys]",
"tests/test_static.py::test_bad_path[/static/.\\x1fssh/authorized_keys]",
"tests/test_static.py::test_bad_path[/static/.\\x80ssh/authorized_keys]",
"tests/test_static.py::test_bad_path[/static/.\\x9fssh/authorized_keys]",
"tests/test_static.py::test_bad_path[/static/~/.ssh/authorized_keys]",
"tests/test_static.py::test_bad_path[/static/.ssh/authorized_key?]",
"tests/test_static.py::test_bad_path[/static/.ssh/authorized_key>foo]",
"tests/test_static.py::test_bad_path[/static/.ssh/authorized_key|foo]",
"tests/test_static.py::test_bad_path[/static/.ssh/authorized_key<foo]",
"tests/test_static.py::test_bad_path[/static/something:something]",
"tests/test_static.py::test_bad_path[/static/thing*.sql]",
"tests/test_static.py::test_bad_path[/static/'thing'.sql]",
"tests/test_static.py::test_bad_path[/static/\"thing\".sql]",
"tests/test_static.py::test_bad_path[/static/something.]",
"tests/test_static.py::test_bad_path[/static/something..]",
"tests/test_static.py::test_bad_path[/static/something",
"tests/test_static.py::test_bad_path[/static/",
"tests/test_static.py::test_bad_path[/static/something\\t]",
"tests/test_static.py::test_bad_path[/static/\\tsomething]",
"tests/test_static.py::test_bad_path[/static/ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttx]",
"tests/test_static.py::test_invalid_args[static-/var/www/statics]",
"tests/test_static.py::test_invalid_args[/static-./var/www/statics]",
"tests/test_static.py::test_invalid_args[/static-statics]",
"tests/test_static.py::test_invalid_args[/static-../statics]",
"tests/test_static.py::test_good_path[/static/-/css/test.css-/css/test.css-text/css]",
"tests/test_static.py::test_good_path[/static-/css/test.css-/css/test.css-text/css]",
"tests/test_static.py::test_good_path[/static-/tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt-/tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt-application/octet-stream]",
"tests/test_static.py::test_good_path[/static-/.test.css-/.test.css-text/css]",
"tests/test_static.py::test_good_path[/some/download/-/report.pdf-/report.pdf-application/pdf]",
"tests/test_static.py::test_good_path[/some/download/-/Fancy",
"tests/test_static.py::test_good_path[/some/download-/report.zip-/report.zip-application/zip]",
"tests/test_static.py::test_good_path[/some/download-/foo/../report.zip-/report.zip-application/zip]",
"tests/test_static.py::test_good_path[/some/download-/foo/../bar/../report.zip-/report.zip-application/zip]",
"tests/test_static.py::test_good_path[/some/download-/foo/bar/../../report.zip-/report.zip-application/zip]",
"tests/test_static.py::test_lifo",
"tests/test_static.py::test_lifo_negative",
"tests/test_static.py::test_downloadable",
"tests/test_static.py::test_downloadable_not_found",
"tests/test_uri_converters.py::test_int_converter[123-None-None-None-123]",
"tests/test_uri_converters.py::test_int_converter[01-None-None-None-1]",
"tests/test_uri_converters.py::test_int_converter[001-None-None-None-1]",
"tests/test_uri_converters.py::test_int_converter[0-None-None-None-0]",
"tests/test_uri_converters.py::test_int_converter[00-None-None-None-0]",
"tests/test_uri_converters.py::test_int_converter[1-1-None-None-1]",
"tests/test_uri_converters.py::test_int_converter[12-1-None-None-None0]",
"tests/test_uri_converters.py::test_int_converter[12-2-None-None-120]",
"tests/test_uri_converters.py::test_int_converter[1-1-1-1-1]",
"tests/test_uri_converters.py::test_int_converter[1-1-1-None-1]",
"tests/test_uri_converters.py::test_int_converter[1-1-1-2-1]",
"tests/test_uri_converters.py::test_int_converter[1-1-2-None-None]",
"tests/test_uri_converters.py::test_int_converter[1-1-2-1-None]",
"tests/test_uri_converters.py::test_int_converter[2-1-1-2-2]",
"tests/test_uri_converters.py::test_int_converter[2-1-2-2-2]",
"tests/test_uri_converters.py::test_int_converter[3-1-1-2-None]",
"tests/test_uri_converters.py::test_int_converter[12-1-None-None-None1]",
"tests/test_uri_converters.py::test_int_converter[12-1-1-12-None]",
"tests/test_uri_converters.py::test_int_converter[12-2-None-None-121]",
"tests/test_uri_converters.py::test_int_converter[12-2-1-12-12]",
"tests/test_uri_converters.py::test_int_converter[12-2-12-12-12]",
"tests/test_uri_converters.py::test_int_converter[12-2-13-12-None]",
"tests/test_uri_converters.py::test_int_converter[12-2-13-13-None]",
"tests/test_uri_converters.py::test_int_converter_malformed[0x0F]",
"tests/test_uri_converters.py::test_int_converter_malformed[something]",
"tests/test_uri_converters.py::test_int_converter_malformed[]",
"tests/test_uri_converters.py::test_int_converter_malformed[",
"tests/test_uri_converters.py::test_int_converter_malformed[123",
"tests/test_uri_converters.py::test_int_converter_malformed[123\\t]",
"tests/test_uri_converters.py::test_int_converter_malformed[123\\n]",
"tests/test_uri_converters.py::test_int_converter_malformed[123\\r]",
"tests/test_uri_converters.py::test_int_converter_malformed[123\\x0b]",
"tests/test_uri_converters.py::test_int_converter_malformed[123\\x0c]",
"tests/test_uri_converters.py::test_int_converter_malformed[\\t123]",
"tests/test_uri_converters.py::test_int_converter_malformed[\\n123]",
"tests/test_uri_converters.py::test_int_converter_malformed[\\r123]",
"tests/test_uri_converters.py::test_int_converter_malformed[\\x0b123]",
"tests/test_uri_converters.py::test_int_converter_malformed[\\x0c123]",
"tests/test_uri_converters.py::test_int_converter_invalid_config[0]",
"tests/test_uri_converters.py::test_int_converter_invalid_config[-1]",
"tests/test_uri_converters.py::test_int_converter_invalid_config[-10]",
"tests/test_uri_converters.py::test_datetime_converter[07-03-17-%m-%d-%y-expected0]",
"tests/test_uri_converters.py::test_datetime_converter[07-03-17",
"tests/test_uri_converters.py::test_datetime_converter[2017-07-03T14:30:01Z-%Y-%m-%dT%H:%M:%SZ-expected2]",
"tests/test_uri_converters.py::test_datetime_converter[2017-07-03T14:30:01-%Y-%m-%dT%H:%M:%S-expected3]",
"tests/test_uri_converters.py::test_datetime_converter[2017_19-%Y_%H-expected4]",
"tests/test_uri_converters.py::test_datetime_converter[2017-07-03T14:30:01-%Y-%m-%dT%H:%M:%SZ-None]",
"tests/test_uri_converters.py::test_datetime_converter[",
"tests/test_uri_converters.py::test_datetime_converter[07",
"tests/test_uri_converters.py::test_datetime_converter_default_format",
"tests/test_uri_converters.py::test_uuid_converter[",
"tests/test_uri_templates.py::test_root_path",
"tests/test_uri_templates.py::test_no_vars",
"tests/test_uri_templates.py::test_special_chars",
"tests/test_uri_templates.py::test_single[id]",
"tests/test_uri_templates.py::test_single[id123]",
"tests/test_uri_templates.py::test_single[widget_id]",
"tests/test_uri_templates.py::test_int_converter[/{id:int}]",
"tests/test_uri_templates.py::test_int_converter[/{id:int(3)}]",
"tests/test_uri_templates.py::test_int_converter[/{id:int(min=123)}]",
"tests/test_uri_templates.py::test_int_converter[/{id:int(min=123,",
"tests/test_uri_templates.py::test_int_converter_rejections[/{id:int(2)}]",
"tests/test_uri_templates.py::test_int_converter_rejections[/{id:int(min=124)}]",
"tests/test_uri_templates.py::test_int_converter_rejections[/{id:int(num_digits=3,",
"tests/test_uri_templates.py::test_datetime_converter[/{start_year:int}-to-{timestamp:dt}-/1961-to-1969-07-21T02:56:00Z-dt_expected0]",
"tests/test_uri_templates.py::test_datetime_converter[/{start_year:int}-to-{timestamp:dt(\"%Y-%m-%d\")}-/1961-to-1969-07-21-dt_expected1]",
"tests/test_uri_templates.py::test_datetime_converter[/{start_year:int}/{timestamp:dt(\"%Y-%m-%d",
"tests/test_uri_templates.py::test_datetime_converter[/{start_year:int}-to-{timestamp:dt(\"%Y-%m\")}-/1961-to-1969-07-21-None]",
"tests/test_uri_templates.py::test_uuid_converter_complex_segment",
"tests/test_uri_templates.py::test_converter_custom[/{food:spam}-/something-expected0]",
"tests/test_uri_templates.py::test_converter_custom[/{food:spam(\")\")}:{food_too:spam(\"()\")}-/bacon:eggs-expected1]",
"tests/test_uri_templates.py::test_converter_custom[/({food:spam()}){food_too:spam(\"()\")}-/(bacon)eggs-expected2]",
"tests/test_uri_templates.py::test_single_trailing_slash",
"tests/test_uri_templates.py::test_multiple",
"tests/test_uri_templates.py::test_empty_path_component[//]",
"tests/test_uri_templates.py::test_empty_path_component[//begin]",
"tests/test_uri_templates.py::test_empty_path_component[/end//]",
"tests/test_uri_templates.py::test_empty_path_component[/in//side]",
"tests/test_uri_templates.py::test_relative_path[]",
"tests/test_uri_templates.py::test_relative_path[no]",
"tests/test_uri_templates.py::test_relative_path[no/leading_slash]",
"tests/test_uri_templates.py::test_same_level_complex_var[True]",
"tests/test_uri_templates.py::test_same_level_complex_var[False]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_string_type_required[42]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_string_type_required[API]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_must_start_with_slash[this]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_must_start_with_slash[this/that]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[//]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[a//]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[//b]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[a//b]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[a/b//]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_template_may_not_contain_double_slash[a/b//c]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_root",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_no_fields[/hello]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_no_fields[/hello/world]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_no_fields[/hi/there/how/are/you]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_one_field",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_one_field_with_digits",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_one_field_with_prefixed_digits",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_two_fields[]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_two_fields[/]",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_three_fields",
"tests/test_uri_templates_legacy.py::TestUriTemplates::test_malformed_field",
"tests/test_utils.py::TestFalconUtils::test_deprecated_decorator",
"tests/test_utils.py::TestFalconUtils::test_http_now",
"tests/test_utils.py::TestFalconUtils::test_dt_to_http",
"tests/test_utils.py::TestFalconUtils::test_http_date_to_dt",
"tests/test_utils.py::TestFalconUtils::test_pack_query_params_none",
"tests/test_utils.py::TestFalconUtils::test_pack_query_params_one",
"tests/test_utils.py::TestFalconUtils::test_pack_query_params_several",
"tests/test_utils.py::TestFalconUtils::test_uri_encode",
"tests/test_utils.py::TestFalconUtils::test_uri_encode_double",
"tests/test_utils.py::TestFalconUtils::test_uri_encode_value",
"tests/test_utils.py::TestFalconUtils::test_uri_decode",
"tests/test_utils.py::TestFalconUtils::test_prop_uri_encode_models_stdlib_quote",
"tests/test_utils.py::TestFalconUtils::test_prop_uri_encode_value_models_stdlib_quote_safe_tilde",
"tests/test_utils.py::TestFalconUtils::test_prop_uri_decode_models_stdlib_unquote_plus",
"tests/test_utils.py::TestFalconUtils::test_parse_query_string",
"tests/test_utils.py::TestFalconUtils::test_parse_host",
"tests/test_utils.py::TestFalconUtils::test_get_http_status",
"tests/test_utils.py::test_simulate_request_protocol[https-CONNECT]",
"tests/test_utils.py::test_simulate_request_protocol[https-DELETE]",
"tests/test_utils.py::test_simulate_request_protocol[https-GET]",
"tests/test_utils.py::test_simulate_request_protocol[https-HEAD]",
"tests/test_utils.py::test_simulate_request_protocol[https-OPTIONS]",
"tests/test_utils.py::test_simulate_request_protocol[https-PATCH]",
"tests/test_utils.py::test_simulate_request_protocol[https-POST]",
"tests/test_utils.py::test_simulate_request_protocol[https-PUT]",
"tests/test_utils.py::test_simulate_request_protocol[https-TRACE]",
"tests/test_utils.py::test_simulate_request_protocol[http-CONNECT]",
"tests/test_utils.py::test_simulate_request_protocol[http-DELETE]",
"tests/test_utils.py::test_simulate_request_protocol[http-GET]",
"tests/test_utils.py::test_simulate_request_protocol[http-HEAD]",
"tests/test_utils.py::test_simulate_request_protocol[http-OPTIONS]",
"tests/test_utils.py::test_simulate_request_protocol[http-PATCH]",
"tests/test_utils.py::test_simulate_request_protocol[http-POST]",
"tests/test_utils.py::test_simulate_request_protocol[http-PUT]",
"tests/test_utils.py::test_simulate_request_protocol[http-TRACE]",
"tests/test_utils.py::test_simulate_free_functions[simulate_get]",
"tests/test_utils.py::test_simulate_free_functions[simulate_head]",
"tests/test_utils.py::test_simulate_free_functions[simulate_post]",
"tests/test_utils.py::test_simulate_free_functions[simulate_put]",
"tests/test_utils.py::test_simulate_free_functions[simulate_options]",
"tests/test_utils.py::test_simulate_free_functions[simulate_patch]",
"tests/test_utils.py::test_simulate_free_functions[simulate_delete]",
"tests/test_utils.py::TestFalconTestingUtils::test_path_escape_chars_in_create_environ",
"tests/test_utils.py::TestFalconTestingUtils::test_no_prefix_allowed_for_query_strings_in_create_environ",
"tests/test_utils.py::TestFalconTestingUtils::test_none_header_value_in_create_environ",
"tests/test_utils.py::TestFalconTestingUtils::test_decode_empty_result",
"tests/test_utils.py::TestFalconTestingUtils::test_httpnow_alias_for_backwards_compat",
"tests/test_utils.py::TestFalconTestingUtils::test_default_headers",
"tests/test_utils.py::TestFalconTestingUtils::test_default_headers_with_override",
"tests/test_utils.py::TestFalconTestingUtils::test_status",
"tests/test_utils.py::TestFalconTestingUtils::test_wsgi_iterable_not_closeable",
"tests/test_utils.py::TestFalconTestingUtils::test_path_must_start_with_slash",
"tests/test_utils.py::TestFalconTestingUtils::test_cached_text_in_result",
"tests/test_utils.py::TestFalconTestingUtils::test_simple_resource_body_json_xor",
"tests/test_utils.py::TestFalconTestingUtils::test_query_string",
"tests/test_utils.py::TestFalconTestingUtils::test_query_string_no_question",
"tests/test_utils.py::TestFalconTestingUtils::test_query_string_in_path",
"tests/test_utils.py::TestCaseFancyAPI::test_something",
"tests/test_utils.py::TestNoApiClass::test_something",
"tests/test_utils.py::TestSetupApi::test_something",
"tests/test_validators.py::test_jsonschema_validation_success",
"tests/test_validators.py::test_jsonschema_validation_failure",
"tests/test_wsgi.py::TestWSGIServer::test_get",
"tests/test_wsgi.py::TestWSGIServer::test_put",
"tests/test_wsgi.py::TestWSGIServer::test_head_405",
"tests/test_wsgi.py::TestWSGIServer::test_post",
"tests/test_wsgi.py::TestWSGIServer::test_post_invalid_content_length",
"tests/test_wsgi.py::TestWSGIServer::test_post_read_bounded_stream",
"tests/test_wsgi.py::TestWSGIServer::test_post_read_bounded_stream_no_body",
"tests/test_wsgi_errors.py::TestWSGIError::test_responder_logged_bytestring",
"tests/test_wsgi_interface.py::TestWSGIInterface::test_srmock",
"tests/test_wsgi_interface.py::TestWSGIInterface::test_pep3333",
"tests/test_wsgiref_inputwrapper_with_size.py::TestWsgiRefInputWrapper::test_resources_can_read_request_stream_during_tests"
]
| []
| Apache License 2.0 | 1,964 | [
"falcon/request.py"
]
| [
"falcon/request.py"
]
|
colour-science__colour-368 | 8c77aa68c2347583ad23adf678ae1eb3821f854e | 2017-12-19 09:05:12 | 3cd6ab8d4c3483bcdeb2d7ef33967160808c0bb2 | KelSolaar: @Nick-Shaw : I think the implementation is correct, as consequence of the changes is that the *Canon Log* curves will have their output changed.
coveralls:
[](https://coveralls.io/builds/14718843)
Coverage increased (+0.006%) to 97.97% when pulling **dc33b133866c009f9d29c7acd2b0813429e1ef67 on feature/canon_log_vlog** into **fb177953e8e0cd968ec36fc3a65a05e171c547b4 on develop**.
Nick-Shaw: And also references to the input and output values of the Canon functions being `non-linear *IRE* data` should be removed, as this may not be the case. 'IRE' means full range, which we now have switches for. | diff --git a/colour/colorimetry/__init__.py b/colour/colorimetry/__init__.py
index c14274bca..edeadca44 100644
--- a/colour/colorimetry/__init__.py
+++ b/colour/colorimetry/__init__.py
@@ -21,13 +21,11 @@ from .lefs import (mesopic_luminous_efficiency_function,
from .lightness import LIGHTNESS_METHODS
from .lightness import lightness
from .lightness import (lightness_Glasser1958, lightness_Wyszecki1963,
- lightness_CIE1976, lightness_Fairchild2010,
- lightness_Fairchild2011)
+ lightness_CIE1976, lightness_Fairchild2010)
from .luminance import LUMINANCE_METHODS
from .luminance import luminance
from .luminance import (luminance_Newhall1943, luminance_ASTMD153508,
- luminance_CIE1976, luminance_Fairchild2010,
- luminance_Fairchild2011)
+ luminance_CIE1976, luminance_Fairchild2010)
from .dominant import (dominant_wavelength, complementary_wavelength,
excitation_purity, colorimetric_purity)
from .photometry import (luminous_flux, luminous_efficiency, luminous_efficacy)
@@ -76,13 +74,13 @@ __all__ += ['LIGHTNESS_METHODS']
__all__ += ['lightness']
__all__ += [
'lightness_Glasser1958', 'lightness_Wyszecki1963', 'lightness_CIE1976',
- 'lightness_Fairchild2010', 'lightness_Fairchild2011'
+ 'lightness_Fairchild2010'
]
__all__ += ['LUMINANCE_METHODS']
__all__ += ['luminance']
__all__ += [
'luminance_Newhall1943', 'luminance_ASTMD153508', 'luminance_CIE1976',
- 'luminance_Fairchild2010', 'luminance_Fairchild2011'
+ 'luminance_Fairchild2010'
]
__all__ += [
'dominant_wavelength', 'complementary_wavelength', 'excitation_purity',
diff --git a/colour/colorimetry/lightness.py b/colour/colorimetry/lightness.py
index 19f02635f..bcbaca685 100644
--- a/colour/colorimetry/lightness.py
+++ b/colour/colorimetry/lightness.py
@@ -17,8 +17,6 @@ The following methods are available:
*luminance* :math:`Y` as per *CIE 1976* recommendation.
- :func:`lightness_Fairchild2010`: *Lightness* :math:`L_{hdr}` computation
of given *luminance* :math:`Y` using *Fairchild and Wyble (2010)* method.
-- :func:`lightness_Fairchild2011`: *Lightness* :math:`L_{hdr}` computation
- of given *luminance* :math:`Y` using *Fairchild and Chen (2011)* method.
See Also
--------
@@ -50,8 +48,7 @@ __status__ = 'Production'
__all__ = [
'lightness_Glasser1958', 'lightness_Wyszecki1963', 'lightness_CIE1976',
- 'lightness_Fairchild2010', 'lightness_Fairchild2011', 'LIGHTNESS_METHODS',
- 'lightness'
+ 'lightness_Fairchild2010', 'LIGHTNESS_METHODS', 'lightness'
]
@@ -186,7 +183,7 @@ def lightness_CIE1976(Y, Y_n=100):
return Lstar
-def lightness_Fairchild2010(Y, epsilon=1.836):
+def lightness_Fairchild2010(Y, epsilon=2):
"""
Computes *Lightness* :math:`L_{hdr}` of given *luminance* :math:`Y` using
*Fairchild and Wyble (2010)* method according to *Michealis-Menten*
@@ -221,73 +218,14 @@ def lightness_Fairchild2010(Y, epsilon=1.836):
Examples
--------
- >>> lightness_Fairchild2010(10.08 / 100) # doctest: +ELLIPSIS
+ >>> lightness_Fairchild2010(10.08 / 100, 1.836) # doctest: +ELLIPSIS
24.9022902...
"""
- maximum_perception = 100
-
Y = np.asarray(Y)
- L_hdr = reaction_rate_MichealisMenten(Y ** epsilon, maximum_perception,
- 0.184 ** epsilon) + 0.02
-
- return L_hdr
-
-
-def lightness_Fairchild2011(Y, epsilon=0.710, method='hdr-CIELAB'):
- """
- Computes *Lightness* :math:`L_{hdr}` of given *luminance* :math:`Y` using
- *Fairchild and Chen (2011)* method accordingly to *Michealis-Menten*
- kinetics.
-
- Parameters
- ----------
- Y : array_like
- *luminance* :math:`Y`.
- epsilon : numeric or array_like, optional
- :math:`\epsilon` exponent.
- method : unicode, optional
- **{'hdr-CIELAB', 'hdr-IPT'}**,
- *Lightness* :math:`L_{hdr}` computation method.
-
- Returns
- -------
- array_like
- *Lightness* :math:`L_{hdr}`.
-
- Warning
- -------
- The input domain of that definition is non standard!
-
- Notes
- -----
- - Input *luminance* :math:`Y` is in domain [0, :math:`\infty`].
-
- References
- ----------
- .. [7] Fairchild, M. D., & Chen, P. (2011). Brightness, Lightness, and
- Specifying Color in High-Dynamic-Range Scenes and Images.
- doi:10.1117/12.872075
-
- Examples
- --------
- >>> lightness_Fairchild2011(10.08 / 100) # doctest: +ELLIPSIS
- 26.45950981...
- >>> lightness_Fairchild2011(10.08 / 100, method='hdr-IPT')
- ... # doctest: +ELLIPSIS
- 26.3524672...
- """
-
- Y = np.asarray(Y)
-
- if method.lower() == 'hdr-cielab':
- maximum_perception = 247
- else:
- maximum_perception = 246
-
- L_hdr = reaction_rate_MichealisMenten(Y ** epsilon, maximum_perception, 2
- ** epsilon) + 0.02
+ L_hdr = reaction_rate_MichealisMenten(Y ** epsilon, 100, 0.184 **
+ epsilon) + 0.02
return L_hdr
@@ -296,15 +234,13 @@ LIGHTNESS_METHODS = CaseInsensitiveMapping({
'Glasser 1958': lightness_Glasser1958,
'Wyszecki 1963': lightness_Wyszecki1963,
'CIE 1976': lightness_CIE1976,
- 'Fairchild 2010': lightness_Fairchild2010,
- 'Fairchild 2011': lightness_Fairchild2011
+ 'Fairchild 2010': lightness_Fairchild2010
})
"""
Supported *Lightness* computations methods.
LIGHTNESS_METHODS : CaseInsensitiveMapping
- **{'Glasser 1958', 'Wyszecki 1963', 'CIE 1976', 'Fairchild 2010',
- 'Fairchild 2011'}**
+ **{'Glasser 1958', 'Wyszecki 1963', 'CIE 1976', 'Fairchild 2010'}**
Aliases:
@@ -322,8 +258,7 @@ def lightness(Y, method='CIE 1976', **kwargs):
Y : numeric or array_like
*luminance* :math:`Y`.
method : unicode, optional
- **{'CIE 1976', 'Glasser 1958', 'Wyszecki 1963', 'Fairchild 2010',
- 'Fairchild 2011'}**,
+ **{'CIE 1976', 'Glasser 1958', 'Wyszecki 1963', 'Fairchild 2010'}**,
Computation method.
Other Parameters
@@ -332,7 +267,7 @@ def lightness(Y, method='CIE 1976', **kwargs):
{:func:`lightness_CIE1976`},
White reference *luminance* :math:`Y_n`.
epsilon : numeric or array_like, optional
- {:func:`lightness_Fairchild2010`, :func:`lightness_Fairchild2011`},
+ {:func:`lightness_Fairchild2010`},
:math:`\epsilon` exponent.
Returns
diff --git a/colour/colorimetry/luminance.py b/colour/colorimetry/luminance.py
index 8e60fe58e..9af677107 100644
--- a/colour/colorimetry/luminance.py
+++ b/colour/colorimetry/luminance.py
@@ -17,8 +17,6 @@ The following methods are available:
*Lightness* :math:`L^*` as per *CIE 1976* recommendation.
- :func:`luminance_Fairchild2010`: *luminance* :math:`Y` computation of given
*Lightness* :math:`L_{hdr}` using *Fairchild and Wyble (2010)* method.
-- :func:`luminance_Fairchild2011`: *luminance* :math:`Y` computation of given
- *Lightness* :math:`L_{hdr}` using *Fairchild and Chen (2011)* method.
See Also
--------
@@ -44,8 +42,7 @@ __status__ = 'Production'
__all__ = [
'luminance_Newhall1943', 'luminance_ASTMD153508', 'luminance_CIE1976',
- 'luminance_Fairchild2010', 'luminance_Fairchild2011', 'LUMINANCE_METHODS',
- 'luminance'
+ 'luminance_Fairchild2010', 'LUMINANCE_METHODS', 'luminance'
]
@@ -178,7 +175,7 @@ def luminance_CIE1976(Lstar, Y_n=100):
return Y
-def luminance_Fairchild2010(L_hdr, epsilon=1.836):
+def luminance_Fairchild2010(L_hdr, epsilon=2):
"""
Computes *luminance* :math:`Y` of given *Lightness* :math:`L_{hdr}` using
*Fairchild and Wyble (2010)* method according to *Michealis-Menten*
@@ -228,71 +225,11 @@ def luminance_Fairchild2010(L_hdr, epsilon=1.836):
return Y
-def luminance_Fairchild2011(L_hdr, epsilon=0.710, method='hdr-CIELAB'):
- """
- Computes *luminance* :math:`Y` of given *Lightness* :math:`L_{hdr}` using
- *Fairchild and Chen (2011)* method accordingly to *Michealis-Menten*
- kinetics.
-
- Parameters
- ----------
- L_hdr : array_like
- *Lightness* :math:`L_{hdr}`.
- epsilon : numeric or array_like, optional
- :math:`\epsilon` exponent.
- method : unicode, optional
- **{'hdr-CIELAB', 'hdr-IPT'}**,
- *Lightness* :math:`L_{hdr}` computation method.
-
- Returns
- -------
- array_like
- *luminance* :math:`Y`.
-
- Warning
- -------
- The output range of that definition is non standard!
-
- Notes
- -----
- - Output *luminance* :math:`Y` is in range [0, math:`\infty`].
-
- References
- ----------
- .. [5] Fairchild, M. D., & Chen, P. (2011). Brightness, Lightness, and
- Specifying Color in High-Dynamic-Range Scenes and Images.
- doi:10.1117/12.872075
-
- Examples
- --------
- >>> luminance_Fairchild2011(26.459509817572265) # doctest: +ELLIPSIS
- 0.1007999...
- >>> luminance_Fairchild2011(26.352467267703549, method='hdr-IPT')
- ... # doctest: +ELLIPSIS
- 0.1007999...
- """
-
- L_hdr = np.asarray(L_hdr)
-
- if method.lower() == 'hdr-cielab':
- maximum_perception = 247
- else:
- maximum_perception = 246
-
- Y = np.exp(
- np.log(
- substrate_concentration_MichealisMenten(
- L_hdr - 0.02, maximum_perception, 2 ** epsilon)) / epsilon)
-
- return Y
-
-
LUMINANCE_METHODS = CaseInsensitiveMapping({
'Newhall 1943': luminance_Newhall1943,
'ASTM D1535-08': luminance_ASTMD153508,
'CIE 1976': luminance_CIE1976,
- 'Fairchild 2010': luminance_Fairchild2010,
- 'Fairchild 2011': luminance_Fairchild2011
+ 'Fairchild 2010': luminance_Fairchild2010
})
"""
Supported *luminance* computations methods.
@@ -319,8 +256,7 @@ def luminance(LV, method='CIE 1976', **kwargs):
LV : numeric or array_like
*Lightness* :math:`L^*` or *Munsell* value :math:`V`.
method : unicode, optional
- **{'CIE 1976', 'Newhall 1943', 'ASTM D1535-08', 'Fairchild 2010',
- 'Fairchild 2011'}**,
+ **{'CIE 1976', 'Newhall 1943', 'ASTM D1535-08', 'Fairchild 2010'}**,
Computation method.
Other Parameters
@@ -329,7 +265,7 @@ def luminance(LV, method='CIE 1976', **kwargs):
{:func:`luminance_CIE1976`},
White reference *luminance* :math:`Y_n`.
epsilon : numeric or array_like, optional
- {:func:`lightness_Fairchild2010`, :func:`lightness_Fairchild2011`},
+ {:func:`lightness_Fairchild2010`},
:math:`\epsilon` exponent.
Returns
diff --git a/colour/models/__init__.py b/colour/models/__init__.py
index 4b0dab467..41d5ab8c8 100644
--- a/colour/models/__init__.py
+++ b/colour/models/__init__.py
@@ -10,13 +10,12 @@ from .cie_luv import (XYZ_to_Luv, Luv_to_XYZ, Luv_to_uv, Luv_uv_to_xy,
Luv_to_LCHuv, LCHuv_to_Luv)
from .cie_ucs import XYZ_to_UCS, UCS_to_XYZ, UCS_to_uv, UCS_uv_to_xy
from .cie_uvw import XYZ_to_UVW
-from .hdr_cie_lab import (HDR_CIELAB_METHODS, XYZ_to_hdr_CIELab,
- hdr_CIELab_to_XYZ)
+from .hdr_cie_lab import XYZ_to_hdr_CIELab, hdr_CIELab_to_XYZ
from .hunter_lab import (XYZ_to_K_ab_HunterLab1966, XYZ_to_Hunter_Lab,
Hunter_Lab_to_XYZ)
from .hunter_rdab import XYZ_to_Hunter_Rdab
from .ipt import XYZ_to_IPT, IPT_to_XYZ, IPT_hue_angle
-from .hdr_ipt import HDR_IPT_METHODS, XYZ_to_hdr_IPT, hdr_IPT_to_XYZ
+from .hdr_ipt import XYZ_to_hdr_IPT, hdr_IPT_to_XYZ
from .ucs_luo2006 import (JMh_CIECAM02_to_CAM02LCD, CAM02LCD_to_JMh_CIECAM02,
JMh_CIECAM02_to_CAM02SCD, CAM02SCD_to_JMh_CIECAM02,
JMh_CIECAM02_to_CAM02UCS, CAM02UCS_to_JMh_CIECAM02)
@@ -38,14 +37,14 @@ __all__ += [
]
__all__ += ['XYZ_to_UCS', 'UCS_to_XYZ', 'UCS_to_uv', 'UCS_uv_to_xy']
__all__ += ['XYZ_to_UVW']
-__all__ += ['HDR_CIELAB_METHODS', 'XYZ_to_hdr_CIELab', 'hdr_CIELab_to_XYZ']
+__all__ += ['XYZ_to_hdr_CIELab', 'hdr_CIELab_to_XYZ']
__all__ += [
'XYZ_to_K_ab_HunterLab1966', 'XYZ_to_Hunter_Lab', 'Hunter_Lab_to_XYZ',
'XYZ_to_Hunter_Rdab'
]
__all__ += ['XYZ_to_Hunter_Rdab']
__all__ += ['XYZ_to_IPT', 'IPT_to_XYZ', 'IPT_hue_angle']
-__all__ += ['HDR_IPT_METHODS', 'XYZ_to_hdr_IPT', 'hdr_IPT_to_XYZ']
+__all__ += ['XYZ_to_hdr_IPT', 'hdr_IPT_to_XYZ']
__all__ += [
'JMh_CIECAM02_to_CAM02LCD', 'CAM02LCD_to_JMh_CIECAM02',
'JMh_CIECAM02_to_CAM02SCD', 'CAM02SCD_to_JMh_CIECAM02',
diff --git a/colour/models/hdr_cie_lab.py b/colour/models/hdr_cie_lab.py
index 6476afc3b..a943b8fe5 100644
--- a/colour/models/hdr_cie_lab.py
+++ b/colour/models/hdr_cie_lab.py
@@ -21,18 +21,14 @@ References
Simple Models for Describing the Color of High-Dynamic-Range and
Wide-Color-Gamut Images. In Proc. of Color and Imaging Conference
(pp. 322–326). ISBN:9781629932156
-.. [2] Fairchild, M. D., & Chen, P. (2011). Brightness, Lightness, and
- Specifying Color in High-Dynamic-Range Scenes and Images.
- doi:10.1117/12.872075
"""
from __future__ import division, unicode_literals
import numpy as np
-from colour.colorimetry import (
- ILLUMINANTS, lightness_Fairchild2010, lightness_Fairchild2011,
- luminance_Fairchild2010, luminance_Fairchild2011)
+from colour.colorimetry import (ILLUMINANTS, lightness_Fairchild2010,
+ luminance_Fairchild2010)
from colour.models import xy_to_xyY, xyY_to_XYZ
from colour.utilities import tsplit, tstack
@@ -43,77 +39,14 @@ __maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
-__all__ = [
- 'HDR_CIELAB_METHODS', 'exponent_hdr_CIELab', 'XYZ_to_hdr_CIELab',
- 'hdr_CIELab_to_XYZ'
-]
-
-HDR_CIELAB_METHODS = ('Fairchild 2010', 'Fairchild 2011')
-"""
-Supported *hdr-CIELAB* colourspace computation methods.
-
-HDR_CIELAB_METHODS : tuple
- **{'Fairchild 2011', 'Fairchild 2010'}**
-"""
-
-
-def exponent_hdr_CIELab(Y_s, Y_abs, method='Fairchild 2011'):
- """
- Computes *hdr-CIELAB* colourspace *Lightness* :math:`\epsilon` exponent
- using *Fairchild and Wyble (2010)* or *Fairchild and Chen (2011)* method.
-
- Parameters
- ----------
- Y_s : numeric or array_like
- Relative luminance :math:`Y_s` of the surround in range [0, 1].
- Y_abs : numeric or array_like
- Absolute luminance :math:`Y_{abs}` of the scene diffuse white in
- :math:`cd/m^2`.
- method : unicode, optional
- **{'Fairchild 2011', 'Fairchild 2010'}**,
- Computation method.
-
- Returns
- -------
- array_like
- *hdr-CIELAB* colourspace *Lightness* :math:`\epsilon` exponent.
-
- Examples
- --------
- >>> exponent_hdr_CIELab(0.2, 100) # doctest: +ELLIPSIS
- 0.7099276...
- >>> exponent_hdr_CIELab(0.2, 100, method='Fairchild 2010')
- ... # doctest: +ELLIPSIS
- 1.8360198...
- """
-
- Y_s = np.asarray(Y_s)
- Y_abs = np.asarray(Y_abs)
-
- method_l = method.lower()
- assert method.lower() in [
- m.lower() for m in HDR_CIELAB_METHODS
- ], ('"{0}" method is invalid, must be one of {1}!'.format(
- method, HDR_CIELAB_METHODS))
-
- if method_l == 'fairchild 2010':
- epsilon = 1.50
- else:
- epsilon = 0.58
-
- sf = 1.25 - 0.25 * (Y_s / 0.184)
- lf = np.log(318) / np.log(Y_abs)
- epsilon *= sf * lf
-
- return epsilon
+__all__ = ['XYZ_to_hdr_CIELab', 'hdr_CIELab_to_XYZ', 'exponent_hdr_CIELab']
def XYZ_to_hdr_CIELab(
XYZ,
illuminant=ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['D50'],
Y_s=0.2,
- Y_abs=100,
- method='Fairchild 2011'):
+ Y_abs=100):
"""
Converts from *CIE XYZ* tristimulus values to *hdr-CIELAB* colourspace.
@@ -129,9 +62,6 @@ def XYZ_to_hdr_CIELab(
Y_abs : numeric or array_like
Absolute luminance :math:`Y_{abs}` of the scene diffuse white in
:math:`cd/m^2`.
- method : unicode, optional
- **{'Fairchild 2011', 'Fairchild 2010'}**,
- Computation method.
Returns
-------
@@ -155,30 +85,17 @@ def XYZ_to_hdr_CIELab(
--------
>>> XYZ = np.array([0.07049534, 0.10080000, 0.09558313])
>>> XYZ_to_hdr_CIELab(XYZ) # doctest: +ELLIPSIS
- array([ 26.4646106..., -24.613326 ..., -4.8479681...])
- >>> XYZ_to_hdr_CIELab(XYZ, method='Fairchild 2010') # doctest: +ELLIPSIS
- array([ 24.9020664..., -46.8312760..., -10.1427484...])
+ array([ 24.9020664..., -46.8312760..., -10.14274843])
"""
X, Y, Z = tsplit(XYZ)
X_n, Y_n, Z_n = tsplit(xyY_to_XYZ(xy_to_xyY(illuminant)))
- method_l = method.lower()
- assert method.lower() in [
- m.lower() for m in HDR_CIELAB_METHODS
- ], ('"{0}" method is invalid, must be one of {1}!'.format(
- method, HDR_CIELAB_METHODS))
-
- if method_l == 'fairchild 2010':
- lightness_callable = lightness_Fairchild2010
- else:
- lightness_callable = lightness_Fairchild2011
+ e = exponent_hdr_CIELab(Y_s, Y_abs)
- e = exponent_hdr_CIELab(Y_s, Y_abs, method)
-
- L_hdr = lightness_callable(Y / Y_n, e)
- a_hdr = 5 * (lightness_callable(X / X_n, e) - L_hdr)
- b_hdr = 2 * (L_hdr - lightness_callable(Z / Z_n, e))
+ L_hdr = lightness_Fairchild2010(Y / Y_n, e)
+ a_hdr = 5 * (lightness_Fairchild2010(X / X_n, e) - L_hdr)
+ b_hdr = 2 * (L_hdr - lightness_Fairchild2010(Z / Z_n, e))
Lab_hdr = tstack((L_hdr, a_hdr, b_hdr))
@@ -189,8 +106,7 @@ def hdr_CIELab_to_XYZ(
Lab_hdr,
illuminant=ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['D50'],
Y_s=0.2,
- Y_abs=100,
- method='Fairchild 2011'):
+ Y_abs=100):
"""
Converts from *hdr-CIELAB* colourspace to *CIE XYZ* tristimulus values.
@@ -206,9 +122,6 @@ def hdr_CIELab_to_XYZ(
Y_abs : numeric or array_like
Absolute luminance :math:`Y_{abs}` of the scene diffuse white in
:math:`cd/m^2`.
- method : unicode, optional
- **{'Fairchild 2011', 'Fairchild 2010'}**,
- Computation method.
Returns
-------
@@ -223,35 +136,53 @@ def hdr_CIELab_to_XYZ(
Examples
--------
- >>> Lab_hdr = np.array([26.46461067, -24.613326, -4.84796811])
- >>> hdr_CIELab_to_XYZ(Lab_hdr) # doctest: +ELLIPSIS
- array([ 0.0704953..., 0.1008 , 0.0955831...])
>>> Lab_hdr = np.array([24.90206646, -46.83127607, -10.14274843])
- >>> hdr_CIELab_to_XYZ(Lab_hdr, method='Fairchild 2010')
- ... # doctest: +ELLIPSIS
+ >>> hdr_CIELab_to_XYZ(Lab_hdr) # doctest: +ELLIPSIS
array([ 0.0704953..., 0.1008 , 0.0955831...])
"""
L_hdr, a_hdr, b_hdr = tsplit(Lab_hdr)
X_n, Y_n, Z_n = tsplit(xyY_to_XYZ(xy_to_xyY(illuminant)))
- method_l = method.lower()
- assert method.lower() in [
- m.lower() for m in HDR_CIELAB_METHODS
- ], ('"{0}" method is invalid, must be one of {1}!'.format(
- method, HDR_CIELAB_METHODS))
-
- if method_l == 'fairchild 2010':
- luminance_callable = luminance_Fairchild2010
- else:
- luminance_callable = luminance_Fairchild2011
+ e = exponent_hdr_CIELab(Y_s, Y_abs)
- e = exponent_hdr_CIELab(Y_s, Y_abs, method)
-
- Y = luminance_callable(L_hdr, e) * Y_n
- X = luminance_callable((a_hdr + 5 * L_hdr) / 5, e) * X_n
- Z = luminance_callable((-b_hdr + 2 * L_hdr) / 2, e) * Z_n
+ Y = luminance_Fairchild2010(L_hdr, e) * Y_n
+ X = luminance_Fairchild2010((a_hdr + 5 * L_hdr) / 5, e) * X_n
+ Z = luminance_Fairchild2010((-b_hdr + 2 * L_hdr) / 2, e) * Z_n
XYZ = tstack((X, Y, Z))
return XYZ
+
+
+def exponent_hdr_CIELab(Y_s, Y_abs):
+ """
+ Computes *hdr-CIELAB* colourspace *Lightness* :math:`\epsilon` exponent.
+
+ Parameters
+ ----------
+ Y_s : numeric or array_like
+ Relative luminance :math:`Y_s` of the surround in range [0, 1].
+ Y_abs : numeric or array_like
+ Absolute luminance :math:`Y_{abs}` of the scene diffuse white in
+ :math:`cd/m^2`.
+
+ Returns
+ -------
+ array_like
+ *hdr-CIELAB* colourspace *Lightness* :math:`\epsilon` exponent.
+
+ Examples
+ --------
+ >>> exponent_hdr_CIELab(0.2, 100) # doctest: +ELLIPSIS
+ 1.8360198...
+ """
+
+ Y_s = np.asarray(Y_s)
+ Y_abs = np.asarray(Y_abs)
+
+ lf = np.log(318) / np.log(Y_abs)
+ sf = 1.25 - 0.25 * (Y_s / 0.184)
+ epsilon = 1.50 * sf * lf
+
+ return epsilon
diff --git a/colour/models/hdr_ipt.py b/colour/models/hdr_ipt.py
index c1050bffe..8cc91cbb1 100644
--- a/colour/models/hdr_ipt.py
+++ b/colour/models/hdr_ipt.py
@@ -21,18 +21,13 @@ References
Simple Models for Describing the Color of High-Dynamic-Range and
Wide-Color-Gamut Images. In Proc. of Color and Imaging Conference
(pp. 322–326). ISBN:9781629932156
-.. [2] Fairchild, M. D., & Chen, P. (2011). Brightness, Lightness, and
- Specifying Color in High-Dynamic-Range Scenes and Images.
- doi:10.1117/12.872075
"""
from __future__ import division, unicode_literals
import numpy as np
-from colour.colorimetry import (
- lightness_Fairchild2010, lightness_Fairchild2011, luminance_Fairchild2010,
- luminance_Fairchild2011)
+from colour.colorimetry import lightness_Fairchild2010, luminance_Fairchild2010
from colour.models.ipt import (IPT_XYZ_TO_LMS_MATRIX, IPT_LMS_TO_XYZ_MATRIX,
IPT_LMS_TO_IPT_MATRIX, IPT_IPT_TO_LMS_MATRIX)
from colour.utilities import dot_vector
@@ -44,71 +39,10 @@ __maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
-__all__ = [
- 'HDR_IPT_METHODS', 'exponent_hdr_IPT', 'XYZ_to_hdr_IPT', 'hdr_IPT_to_XYZ'
-]
+__all__ = ['XYZ_to_hdr_IPT', 'hdr_IPT_to_XYZ', 'exponent_hdr_IPT']
-HDR_IPT_METHODS = ('Fairchild 2010', 'Fairchild 2011')
-"""
-Supported *hdr-IPT* colourspace computation methods.
-
-HDR_IPT_METHODS : tuple
- **{'Fairchild 2011', 'Fairchild 2010'}**
-"""
-
-def exponent_hdr_IPT(Y_s, Y_abs, method='Fairchild 2011'):
- """
- Computes *hdr-IPT* colourspace *Lightness* :math:`\epsilon` exponent using
- *Fairchild and Wyble (2010)* or *Fairchild and Chen (2011)* method.
-
- Parameters
- ----------
- Y_s : numeric or array_like
- Relative luminance :math:`Y_s` of the surround in range [0, 1].
- Y_abs : numeric or array_like
- Absolute luminance :math:`Y_{abs}` of the scene diffuse white in
- :math:`cd/m^2`.
- method : unicode, optional
- **{'Fairchild 2011', 'Fairchild 2010'}**,
- Computation method.
-
- Returns
- -------
- array_like
- *hdr-IPT* colourspace *Lightness* :math:`\epsilon` exponent.
-
- Examples
- --------
- >>> exponent_hdr_IPT(0.2, 100) # doctest: +ELLIPSIS
- 0.7221678...
- >>> exponent_hdr_IPT(0.2, 100, method='Fairchild 2010')
- ... # doctest: +ELLIPSIS
- 1.6891383...
- """
-
- Y_s = np.asarray(Y_s)
- Y_abs = np.asarray(Y_abs)
-
- method_l = method.lower()
- assert method.lower() in [
- m.lower() for m in HDR_IPT_METHODS
- ], ('"{0}" method is invalid, must be one of {1}!'.format(
- method, HDR_IPT_METHODS))
-
- if method_l == 'fairchild 2010':
- epsilon = 1.38
- else:
- epsilon = 0.59
-
- lf = np.log(318) / np.log(Y_abs)
- sf = 1.25 - 0.25 * (Y_s / 0.184)
- epsilon *= sf * lf
-
- return epsilon
-
-
-def XYZ_to_hdr_IPT(XYZ, Y_s=0.2, Y_abs=100, method='Fairchild 2011'):
+def XYZ_to_hdr_IPT(XYZ, Y_s=0.2, Y_abs=100):
"""
Converts from *CIE XYZ* tristimulus values to *hdr-IPT* colourspace.
@@ -121,9 +55,6 @@ def XYZ_to_hdr_IPT(XYZ, Y_s=0.2, Y_abs=100, method='Fairchild 2011'):
Y_abs : numeric or array_like
Absolute luminance :math:`Y_{abs}` of the scene diffuse white in
:math:`cd/m^2`.
- method : unicode, optional
- **{'Fairchild 2011', 'Fairchild 2010'}**,
- Computation method.
Returns
-------
@@ -139,32 +70,19 @@ def XYZ_to_hdr_IPT(XYZ, Y_s=0.2, Y_abs=100, method='Fairchild 2011'):
--------
>>> XYZ = np.array([0.96907232, 1.00000000, 1.12179215])
>>> XYZ_to_hdr_IPT(XYZ) # doctest: +ELLIPSIS
- array([ 93.5317473..., 1.8564156..., -1.3292254...])
- >>> XYZ_to_hdr_IPT(XYZ, method='Fairchild 2010') # doctest: +ELLIPSIS
array([ 94.6592917..., 0.3804177..., -0.2673118...])
"""
- method_l = method.lower()
- assert method.lower() in [
- m.lower() for m in HDR_IPT_METHODS
- ], ('"{0}" method is invalid, must be one of {1}!'.format(
- method, HDR_IPT_METHODS))
-
- if method_l == 'fairchild 2010':
- lightness_callable = lightness_Fairchild2010
- else:
- lightness_callable = lightness_Fairchild2011
-
- e = exponent_hdr_IPT(Y_s, Y_abs, method)[..., np.newaxis]
+ e = exponent_hdr_IPT(Y_s, Y_abs)[..., np.newaxis]
LMS = dot_vector(IPT_XYZ_TO_LMS_MATRIX, XYZ)
- LMS_prime = np.sign(LMS) * np.abs(lightness_callable(LMS, e))
+ LMS_prime = np.sign(LMS) * np.abs(lightness_Fairchild2010(LMS, e))
IPT = dot_vector(IPT_LMS_TO_IPT_MATRIX, LMS_prime)
return IPT
-def hdr_IPT_to_XYZ(IPT_hdr, Y_s=0.2, Y_abs=100, method='Fairchild 2011'):
+def hdr_IPT_to_XYZ(IPT_hdr, Y_s=0.2, Y_abs=100):
"""
Converts from *hdr-IPT* colourspace to *CIE XYZ* tristimulus values.
@@ -177,9 +95,6 @@ def hdr_IPT_to_XYZ(IPT_hdr, Y_s=0.2, Y_abs=100, method='Fairchild 2011'):
Y_abs : numeric or array_like
Absolute luminance :math:`Y_{abs}` of the scene diffuse white in
:math:`cd/m^2`.
- method : unicode, optional
- **{'Fairchild 2011', 'Fairchild 2010'}**,
- Computation method.
Returns
-------
@@ -188,30 +103,48 @@ def hdr_IPT_to_XYZ(IPT_hdr, Y_s=0.2, Y_abs=100, method='Fairchild 2011'):
Examples
--------
- >>> IPT_hdr = np.array([93.53174734, 1.85641567, -1.32922546])
- >>> hdr_IPT_to_XYZ(IPT_hdr) # doctest: +ELLIPSIS
- array([ 0.9690723..., 1. , 1.1217921...])
>>> IPT_hdr = np.array([94.65929175, 0.38041773, -0.26731187])
- >>> hdr_IPT_to_XYZ(IPT_hdr, method='Fairchild 2010')
- ... # doctest: +ELLIPSIS
+ >>> hdr_IPT_to_XYZ(IPT_hdr) # doctest: +ELLIPSIS
array([ 0.9690723..., 1. , 1.1217921...])
"""
- method_l = method.lower()
- assert method.lower() in [
- m.lower() for m in HDR_IPT_METHODS
- ], ('"{0}" method is invalid, must be one of {1}!'.format(
- method, HDR_IPT_METHODS))
-
- if method_l == 'fairchild 2010':
- luminance_callable = luminance_Fairchild2010
- else:
- luminance_callable = luminance_Fairchild2011
-
- e = exponent_hdr_IPT(Y_s, Y_abs, method)[..., np.newaxis]
+ e = exponent_hdr_IPT(Y_s, Y_abs)[..., np.newaxis]
LMS = dot_vector(IPT_IPT_TO_LMS_MATRIX, IPT_hdr)
- LMS_prime = np.sign(LMS) * np.abs(luminance_callable(LMS, e))
+ LMS_prime = np.sign(LMS) * np.abs(luminance_Fairchild2010(LMS, e))
XYZ = dot_vector(IPT_LMS_TO_XYZ_MATRIX, LMS_prime)
return XYZ
+
+
+def exponent_hdr_IPT(Y_s, Y_abs):
+ """
+ Computes *hdr-IPT* colourspace *Lightness* :math:`\epsilon` exponent.
+
+ Parameters
+ ----------
+ Y_s : numeric or array_like
+ Relative luminance :math:`Y_s` of the surround in range [0, 1].
+ Y_abs : numeric or array_like
+ Absolute luminance :math:`Y_{abs}` of the scene diffuse white in
+ :math:`cd/m^2`.
+
+ Returns
+ -------
+ array_like
+ *hdr-IPT* colourspace *Lightness* :math:`\epsilon` exponent.
+
+ Examples
+ --------
+ >>> exponent_hdr_IPT(0.2, 100) # doctest: +ELLIPSIS
+ 1.6891383...
+ """
+
+ Y_s = np.asarray(Y_s)
+ Y_abs = np.asarray(Y_abs)
+
+ lf = np.log(318) / np.log(Y_abs)
+ sf = 1.25 - 0.25 * (Y_s / 0.184)
+ epsilon = 1.38 * sf * lf
+
+ return epsilon
diff --git a/colour/models/rgb/transfer_functions/canon_log.py b/colour/models/rgb/transfer_functions/canon_log.py
index 97c1029e0..fb97c1013 100644
--- a/colour/models/rgb/transfer_functions/canon_log.py
+++ b/colour/models/rgb/transfer_functions/canon_log.py
@@ -39,6 +39,8 @@ Notes
from __future__ import division, unicode_literals
import numpy as np
+
+from colour.models.rgb.transfer_functions import full_to_legal, legal_to_full
from colour.utilities import as_numeric
__author__ = 'Colour Developers'
@@ -55,7 +57,7 @@ __all__ = [
]
-def log_encoding_CanonLog(x):
+def log_encoding_CanonLog(x, bit_depth=10, out_legal=True, in_reflection=True):
"""
Defines the *Canon Log* log encoding curve / opto-electronic transfer
function.
@@ -64,67 +66,86 @@ def log_encoding_CanonLog(x):
----------
x : numeric or array_like
Linear data :math:`x`.
+ bit_depth : int, optional
+ Bit depth used for conversion.
+ out_legal : bool, optional
+ Whether the *Canon Log* non-linear data is encoded in legal
+ range.
+ in_reflection : bool, optional
+ Whether the light level :math`x` to a camera is reflection.
Returns
-------
numeric or ndarray
- *Canon Log* non-linear *IRE* data.
- Notes
- -----
- - Output *Canon Log* non-linear *IRE* data should be converted to code
- value *CV* as follows: `CV = IRE * (940 - 64) + 64`.
+ *Canon Log* non-linear data.
Examples
--------
- >>> log_encoding_CanonLog(0.20) * 100 # doctest: +ELLIPSIS
- 32.7953896...
+ >>> log_encoding_CanonLog(0.18) * 100 # doctest: +ELLIPSIS
+ 34.3389651...
"""
x = np.asarray(x)
- clog_ire = np.where(x < log_decoding_CanonLog(0.0730597),
- -(0.529136 * (np.log10(-x * 10.1596 + 1)) - 0.0730597),
- 0.529136 * np.log10(10.1596 * x + 1) + 0.0730597)
+ if in_reflection:
+ x = x / 0.9
+
+ clog = np.where(x < log_decoding_CanonLog(0.0730597, bit_depth, False),
+ -(0.529136 * (np.log10(-x * 10.1596 + 1)) - 0.0730597),
+ 0.529136 * np.log10(10.1596 * x + 1) + 0.0730597)
- return as_numeric(clog_ire)
+ clog = full_to_legal(clog, bit_depth) if out_legal else clog
+ return as_numeric(clog)
-def log_decoding_CanonLog(clog_ire):
+
+def log_decoding_CanonLog(clog,
+ bit_depth=10,
+ in_legal=True,
+ out_reflection=True):
"""
Defines the *Canon Log* log decoding curve / electro-optical transfer
function.
Parameters
----------
- clog_ire : numeric or array_like
- *Canon Log* non-linear *IRE* data.
+ clog : numeric or array_like
+ *Canon Log* non-linear data.
+ bit_depth : int, optional
+ Bit depth used for conversion.
+ in_legal : bool, optional
+ Whether the *Canon Log* non-linear data is encoded in legal
+ range.
+ out_reflection : bool, optional
+ Whether the light level :math`x` to a camera is reflection.
Returns
-------
numeric or ndarray
Linear data :math:`x`.
- Notes
- -----
- - Input *Canon Log* non-linear *IRE* data should be converted from code
- value *CV* to *IRE* as follows: `IRE = (CV - 64) / (940 - 64)`.
-
Examples
--------
- >>> log_decoding_CanonLog(32.795389693580908 / 100) # doctest: +ELLIPSIS
- 0.19999999...
+ >>> log_decoding_CanonLog(34.338965172606912 / 100) # doctest: +ELLIPSIS
+ 0.17999999...
"""
- clog_ire = np.asarray(clog_ire)
+ clog = np.asarray(clog)
+
+ clog = legal_to_full(clog, bit_depth) if in_legal else clog
- x = np.where(clog_ire < 0.0730597,
- -(10 ** ((0.0730597 - clog_ire) / 0.529136) - 1) / 10.1596,
- (10 ** ((clog_ire - 0.0730597) / 0.529136) - 1) / 10.1596)
+ x = np.where(clog < 0.0730597,
+ -(10 ** ((0.0730597 - clog) / 0.529136) - 1) / 10.1596,
+ (10 ** ((clog - 0.0730597) / 0.529136) - 1) / 10.1596)
+
+ if out_reflection:
+ x = x * 0.9
return as_numeric(x)
-def log_encoding_CanonLog2(x):
+def log_encoding_CanonLog2(x, bit_depth=10, out_legal=True,
+ in_reflection=True):
"""
Defines the *Canon Log 2* log encoding curve / opto-electronic transfer
function.
@@ -133,69 +154,89 @@ def log_encoding_CanonLog2(x):
----------
x : numeric or array_like
Linear data :math:`x`.
+ bit_depth : int, optional
+ Bit depth used for conversion.
+ out_legal : bool, optional
+ Whether the *Canon Log 2* non-linear data is encoded in legal
+ range.
+ in_reflection : bool, optional
+ Whether the light level :math`x` to a camera is reflection.
Returns
-------
numeric or ndarray
- *Canon Log 2* non-linear *IRE* data.
- Notes
- -----
- - Output *Canon Log 2* non-linear *IRE* data should be converted to code
- value *CV* as follows: `CV = IRE * (940 - 64) + 64`.
+ *Canon Log 2* non-linear data.
Examples
--------
- >>> log_encoding_CanonLog2(0.20) * 100 # doctest: +ELLIPSIS
- 39.2025745...
+ >>> log_encoding_CanonLog2(0.18) * 100 # doctest: +ELLIPSIS
+ 39.8254694...
"""
x = np.asarray(x)
- clog2_ire = np.where(
- x < log_decoding_CanonLog2(0.035388128),
- -(0.281863093 * (np.log10(-x * 87.09937546 + 1)) - 0.035388128),
- 0.281863093 * np.log10(x * 87.09937546 + 1) + 0.035388128)
+ if in_reflection:
+ x = x / 0.9
+
+ clog2 = np.where(x < log_decoding_CanonLog2(0.035388128, bit_depth, False),
+ -(0.281863093 *
+ (np.log10(-x * 87.09937546 + 1)) - 0.035388128),
+ 0.281863093 * np.log10(x * 87.09937546 + 1) + 0.035388128)
+
+ clog2 = full_to_legal(clog2, bit_depth) if out_legal else clog2
- return as_numeric(clog2_ire)
+ return as_numeric(clog2)
-def log_decoding_CanonLog2(clog2_ire):
+def log_decoding_CanonLog2(clog2,
+ bit_depth=10,
+ in_legal=True,
+ out_reflection=True):
"""
Defines the *Canon Log 2* log decoding curve / electro-optical transfer
function.
Parameters
----------
- clog2_ire : numeric or array_like
- *Canon Log 2* non-linear *IRE* data.
+ clog2 : numeric or array_like
+ *Canon Log 2* non-linear data.
+ bit_depth : int, optional
+ Bit depth used for conversion.
+ in_legal : bool, optional
+ Whether the *Canon Log 2* non-linear data is encoded in legal
+ range.
+ out_reflection : bool, optional
+ Whether the light level :math`x` to a camera is reflection.
Returns
-------
numeric or ndarray
Linear data :math:`x`.
- Notes
- -----
- - Input *Canon Log 2* non-linear *IRE* data should be converted from code
- value *CV* to *IRE* as follows: `IRE = (CV - 64) / (940 - 64)`.
-
Examples
--------
- >>> log_decoding_CanonLog2(39.202574539700947 / 100) # doctest: +ELLIPSIS
- 0.2000000...
+ >>> log_decoding_CanonLog2(39.825469498316735 / 100) # doctest: +ELLIPSIS
+ 0.1799999...
"""
- clog2_ire = np.asarray(clog2_ire)
+ clog2 = np.asarray(clog2)
+
+ clog2 = legal_to_full(clog2, bit_depth) if in_legal else clog2
- x = np.where(
- clog2_ire < 0.035388128,
- -(10 ** ((0.035388128 - clog2_ire) / 0.281863093) - 1) / 87.09937546,
- (10 ** ((clog2_ire - 0.035388128) / 0.281863093) - 1) / 87.09937546)
+ x = np.where(clog2 < 0.035388128,
+ -(10 **
+ ((0.035388128 - clog2) / 0.281863093) - 1) / 87.09937546,
+ (10 **
+ ((clog2 - 0.035388128) / 0.281863093) - 1) / 87.09937546)
+
+ if out_reflection:
+ x = x * 0.9
return as_numeric(x)
-def log_encoding_CanonLog3(x):
+def log_encoding_CanonLog3(x, bit_depth=10, out_legal=True,
+ in_reflection=True):
"""
Defines the *Canon Log 3* log encoding curve / opto-electronic transfer
function.
@@ -204,68 +245,98 @@ def log_encoding_CanonLog3(x):
----------
x : numeric or array_like
Linear data :math:`x`.
+ bit_depth : int, optional
+ Bit depth used for conversion.
+ out_legal : bool, optional
+ Whether the *Canon Log 3* non-linear data is encoded in legal
+ range.
+ in_reflection : bool, optional
+ Whether the light level :math`x` to a camera is reflection.
Returns
-------
numeric or ndarray
- *Canon Log 3* non-linear *IRE* data.
+ *Canon Log 3* non-linear data.
+
Notes
-----
- - Output *Canon Log 3* non-linear *IRE* data should be converted to code
- value *CV* as follows: `CV = IRE * (940 - 64) + 64`.
+ - Introspection of the grafting points by Shaw, N. (2018) shows that the
+ *Canon Log 3* IDT was likely derived from its encoding curve as the
+ later is grafted at *+/-0.014*::
+
+ >>> clog3 = 0.04076162
+ >>> (clog3 - 0.073059361) / 2.3069815
+ -0.014000000000000002
+ >>> clog3 = 0.105357102
+ >>> (clog3 - 0.073059361) / 2.3069815
+ 0.013999999999999997
Examples
--------
- >>> log_encoding_CanonLog3(0.20) * 100 # doctest: +ELLIPSIS
- 32.7953567...
+ >>> log_encoding_CanonLog3(0.18) * 100 # doctest: +ELLIPSIS
+ 34.3389369...
"""
x = np.asarray(x)
- clog3_ire = np.select(
- (x < log_decoding_CanonLog3(0.04076162),
- x <= log_decoding_CanonLog3(0.105357102),
- x > log_decoding_CanonLog3(0.105357102)),
- (-(0.42889912 * (np.log10(-x * 14.98325 + 1)) - 0.069886632),
+ if in_reflection:
+ x = x / 0.9
+
+ clog3 = np.select(
+ (x < log_decoding_CanonLog3(0.04076162, bit_depth, False, False),
+ x <= log_decoding_CanonLog3(0.105357102, bit_depth, False, False),
+ x > log_decoding_CanonLog3(0.105357102, bit_depth, False, False)),
+ (-0.42889912 * np.log10(-x * 14.98325 + 1) + 0.07623209,
2.3069815 * x + 0.073059361,
0.42889912 * np.log10(x * 14.98325 + 1) + 0.069886632))
- return as_numeric(clog3_ire)
+ clog3 = full_to_legal(clog3, bit_depth) if out_legal else clog3
+
+ return as_numeric(clog3)
-def log_decoding_CanonLog3(clog3_ire):
+def log_decoding_CanonLog3(clog3,
+ bit_depth=10,
+ in_legal=True,
+ out_reflection=True):
"""
Defines the *Canon Log 3* log decoding curve / electro-optical transfer
function.
Parameters
----------
- clog3_ire : numeric or array_like
- *Canon Log 3* non-linear *IRE* data.
+ clog3 : numeric or array_like
+ *Canon Log 3* non-linear data.
+ bit_depth : int, optional
+ Bit depth used for conversion.
+ in_legal : bool, optional
+ Whether the *Canon Log 3* non-linear data is encoded in legal
+ range.
+ out_reflection : bool, optional
+ Whether the light level :math`x` to a camera is reflection.
Returns
-------
numeric or ndarray
Linear data :math:`x`.
- Notes
- -----
- - Input *Canon Log 3* non-linear *IRE* data should be converted from code
- value *CV* to *IRE* as follows: `IRE = (CV - 64) / (940 - 64)`.
-
Examples
--------
- >>> log_decoding_CanonLog3(32.795356721989336 / 100) # doctest: +ELLIPSIS
- 0.2000000...
+ >>> log_decoding_CanonLog3(34.338936938868677 / 100) # doctest: +ELLIPSIS
+ 0.1800000...
"""
- clog3_ire = np.asarray(clog3_ire)
+ clog3 = np.asarray(clog3)
+
+ clog3 = legal_to_full(clog3, bit_depth) if in_legal else clog3
x = np.select(
- (clog3_ire < 0.04076162, clog3_ire <= 0.105357102,
- clog3_ire > 0.105357102),
- (-(10 ** ((0.069886632 - clog3_ire) / 0.42889912) - 1) / 14.98325,
- (clog3_ire - 0.073059361) / 2.3069815,
- (10 ** ((clog3_ire - 0.069886632) / 0.42889912) - 1) / 14.98325))
+ (clog3 < 0.04076162, clog3 <= 0.105357102, clog3 > 0.105357102),
+ (-(10 ** ((0.07623209 - clog3) / 0.42889912) - 1) / 14.98325,
+ (clog3 - 0.073059361) / 2.3069815,
+ (10 ** ((clog3 - 0.069886632) / 0.42889912) - 1) / 14.98325))
+
+ if out_reflection:
+ x = x * 0.9
return as_numeric(x)
diff --git a/colour/models/rgb/transfer_functions/panasonic_vlog.py b/colour/models/rgb/transfer_functions/panasonic_vlog.py
index 1b76d1dc8..76e68f4f0 100644
--- a/colour/models/rgb/transfer_functions/panasonic_vlog.py
+++ b/colour/models/rgb/transfer_functions/panasonic_vlog.py
@@ -26,6 +26,7 @@ from __future__ import division, unicode_literals
import numpy as np
+from colour.models.rgb.transfer_functions import full_to_legal, legal_to_full
from colour.utilities import Structure, as_numeric
__author__ = 'Colour Developers'
@@ -46,7 +47,7 @@ VLOG_CONSTANTS : Structure
"""
-def log_encoding_VLog(L_in):
+def log_encoding_VLog(L_in, bit_depth=10, out_legal=True, in_reflection=True):
"""
Defines the *Panasonic V-Log* log encoding curve / opto-electronic transfer
function.
@@ -55,6 +56,13 @@ def log_encoding_VLog(L_in):
----------
L_in : numeric or array_like
Linear reflection data :math`L_{in}`.
+ bit_depth : int, optional
+ Bit depth used for conversion.
+ out_legal : bool, optional
+ Whether the non-linear *Panasonic V-Log* data :math:`V_{out}` is
+ encoded in legal range.
+ in_reflection : bool, optional
+ Whether the light level :math`L_{in}` to a camera is reflection.
Returns
-------
@@ -69,18 +77,23 @@ def log_encoding_VLog(L_in):
L_in = np.asarray(L_in)
+ if not in_reflection:
+ L_in = L_in * 0.9
+
cut1 = VLOG_CONSTANTS.cut1
b = VLOG_CONSTANTS.b
c = VLOG_CONSTANTS.c
d = VLOG_CONSTANTS.d
- L_in = np.where(L_in < cut1, 5.6 * L_in + 0.125,
- c * np.log10(L_in + b) + d)
+ V_out = np.where(L_in < cut1, 5.6 * L_in + 0.125,
+ c * np.log10(L_in + b) + d)
- return as_numeric(L_in)
+ V_out = V_out if out_legal else legal_to_full(V_out, bit_depth)
+
+ return as_numeric(V_out)
-def log_decoding_VLog(V_out):
+def log_decoding_VLog(V_out, bit_depth=10, in_legal=True, out_reflection=True):
"""
Defines the *Panasonic V-Log* log decoding curve / electro-optical transfer
function.
@@ -89,6 +102,13 @@ def log_decoding_VLog(V_out):
----------
V_out : numeric or array_like
Non-linear data :math:`V_{out}`.
+ bit_depth : int, optional
+ Bit depth used for conversion.
+ in_legal : bool, optional
+ Whether the non-linear *Panasonic V-Log* data :math:`V_{out}` is
+ encoded in legal range.
+ out_reflection : bool, optional
+ Whether the light level :math`L_{in}` to a camera is reflection.
Returns
-------
@@ -103,12 +123,17 @@ def log_decoding_VLog(V_out):
V_out = np.asarray(V_out)
+ V_out = V_out if in_legal else full_to_legal(V_out, bit_depth)
+
cut2 = VLOG_CONSTANTS.cut2
b = VLOG_CONSTANTS.b
c = VLOG_CONSTANTS.c
d = VLOG_CONSTANTS.d
- V_out = np.where(V_out < cut2, (V_out - 0.125) / 5.6,
- np.power(10, ((V_out - d) / c)) - b)
+ L_in = np.where(V_out < cut2, (V_out - 0.125) / 5.6,
+ np.power(10, ((V_out - d) / c)) - b)
- return as_numeric(V_out)
+ if not out_reflection:
+ L_in = L_in / 0.9
+
+ return as_numeric(L_in)
| Improve "Canon Log" and "Panasonic V-Log" implementations and consistency
Following on from #336 and the changes implemented in PR #338 for "Sony S-Log", it would improve consistency if the same things were done for "Canon Log" and "Panasonic V-Log".
Like the Sony documentation, both Canon and Panasonic show tables in their documents which give equivalents of 0%, 18% and 90% reflectance as both IRE(%) and 10-bit code values. With ALEXALogC, for example, where 18% reflectance is coded as 10-bit code value 400 (in a 10-bit decode of an ARRIRAW image of a grey card) the IRE(%) on the SDI output of the camera is simply `100 * 400/1023 = 39.1`. For V-Log, the Panasonic table shows 128 as the 10-bit code value for black, and 7.3% as the IRE. To get 7.3% you need to do `100 * legal_to_full(128/1023)`.
Therefore I suggest that both Canon and Panasonic log encoding functions should include `out_legal=True` kwargs and `in_legal=True` for the decoding functions. Defaulting to `True` for these would give a result matching the manufacturer supplied IDTs.
While there could be an argument for including full/legal switches for all the transfer functions, to allow for results matching the code values on the SDI outputs of the cameras, in reality I do not believe that either RED or ARRI log material is ever stored or processed in this form. RED and ARRI SDKs produce raw decodes matching full range, and (almost) all ProRes decoders incorporate a legal_to_full scale, hidden from the user, so raw and ProRes recordings match.
For Canon there is an additional issue. Like Sony, Canon refer to 18% reflectance as 20% 'linear IRE'. Therefore, for example, the `CanonLog3_to_linear` function in the IDT requires its output to be multiplied by 0.9 to return 0.18 instead of 0.2 for the Canon Log 3 coding of mid grey. I suggest that the same `out_reflection=True` kwarg as has been added to the Sony functions should also be added for Canon. Currently to match the ACES IDT linearisation, the following is required: `0.9 * log_decoding_CanonLog3( ( y * 1023 - 64 ) / 876 )`, since the current `log_decoding_CanonLog3` function implements only the `CanonLog3 IRE to Linear` subfunction from the IDT.
The fact that the output of the Canon Log encoding functions is 'IRE' (in the common video levels sense, rather than the obtuse 'linear IRE' sense, used confusingly in the same Canon and Sony documents) and should be converted to code value using a full to legal scale is noted in the current docstring, but the 0.9 'reflectance factor' is not. | colour-science/colour | diff --git a/colour/colorimetry/tests/test_lightness.py b/colour/colorimetry/tests/test_lightness.py
index 808cea398..6f8bc5dc1 100644
--- a/colour/colorimetry/tests/test_lightness.py
+++ b/colour/colorimetry/tests/test_lightness.py
@@ -10,8 +10,7 @@ import numpy as np
import unittest
from colour.colorimetry import (lightness_Glasser1958, lightness_Wyszecki1963,
- lightness_CIE1976, lightness_Fairchild2010,
- lightness_Fairchild2011)
+ lightness_CIE1976, lightness_Fairchild2010)
from colour.utilities import ignore_numpy_errors
__author__ = 'Colour Developers'
@@ -23,8 +22,7 @@ __status__ = 'Production'
__all__ = [
'TestLightnessGlasser1958', 'TestLightnessWyszecki1963',
- 'TestLightnessCIE1976', 'TestLightnessFairchild2010',
- 'TestLightnessFairchild2011'
+ 'TestLightnessCIE1976', 'TestLightnessFairchild2010'
]
@@ -208,22 +206,22 @@ class TestLightnessFairchild2010(unittest.TestCase):
"""
self.assertAlmostEqual(
- lightness_Fairchild2010(10.08 / 100), 24.90229027, places=7)
+ lightness_Fairchild2010(10.08 / 100), 23.10363383, places=7)
self.assertAlmostEqual(
- lightness_Fairchild2010(56.76 / 100), 88.79756887, places=7)
+ lightness_Fairchild2010(56.76 / 100), 90.51057574, places=7)
self.assertAlmostEqual(
- lightness_Fairchild2010(98.32 / 100), 95.61301852, places=7)
+ lightness_Fairchild2010(98.32 / 100), 96.636221285, places=7)
self.assertAlmostEqual(
lightness_Fairchild2010(10.08 / 100, 2.75), 16.06420271, places=7)
self.assertAlmostEqual(
- lightness_Fairchild2010(1008), 100.019986327374240, places=7)
+ lightness_Fairchild2010(1008), 100.01999667, places=7)
self.assertAlmostEqual(
- lightness_Fairchild2010(100800), 100.019999997090270, places=7)
+ lightness_Fairchild2010(100800), 100.01999999, places=7)
def test_n_dimensional_lightness_Fairchild2010(self):
"""
@@ -232,7 +230,7 @@ class TestLightnessFairchild2010(unittest.TestCase):
"""
Y = 10.08 / 100
- L = 24.90229027
+ L = 23.10363383
np.testing.assert_almost_equal(
lightness_Fairchild2010(Y), L, decimal=7)
@@ -262,72 +260,5 @@ class TestLightnessFairchild2010(unittest.TestCase):
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
-class TestLightnessFairchild2011(unittest.TestCase):
- """
- Defines :func:`colour.colorimetry.lightness.lightness_Fairchild2011`
- definition unit tests methods.
- """
-
- def test_lightness_Fairchild2011(self):
- """
- Tests :func:`colour.colorimetry.lightness.lightness_Fairchild2011`
- definition.
- """
-
- self.assertAlmostEqual(
- lightness_Fairchild2011(10.08 / 100), 26.45950982, places=7)
-
- self.assertAlmostEqual(
- lightness_Fairchild2011(56.76 / 100), 71.70846602, places=7)
-
- self.assertAlmostEqual(
- lightness_Fairchild2011(98.32 / 100), 93.03097540, places=7)
-
- self.assertAlmostEqual(
- lightness_Fairchild2011(10.08 / 100, 2.75), 0.08672116, places=7)
-
- self.assertAlmostEqual(
- lightness_Fairchild2011(1008), 244.07716521, places=7)
-
- self.assertAlmostEqual(
- lightness_Fairchild2011(100800), 246.90681934, places=7)
-
- def test_n_dimensional_lightness_Fairchild2011(self):
- """
- Tests :func:`colour.colorimetry.lightness.lightness_Fairchild2011`
- definition n-dimensional arrays support.
- """
-
- Y = 10.08 / 100
- L = 26.45950982
- np.testing.assert_almost_equal(
- lightness_Fairchild2011(Y), L, decimal=7)
-
- Y = np.tile(Y, 6)
- L = np.tile(L, 6)
- np.testing.assert_almost_equal(
- lightness_Fairchild2011(Y), L, decimal=7)
-
- Y = np.reshape(Y, (2, 3))
- L = np.reshape(L, (2, 3))
- np.testing.assert_almost_equal(
- lightness_Fairchild2011(Y), L, decimal=7)
-
- Y = np.reshape(Y, (2, 3, 1))
- L = np.reshape(L, (2, 3, 1))
- np.testing.assert_almost_equal(
- lightness_Fairchild2011(Y), L, decimal=7)
-
- @ignore_numpy_errors
- def test_nan_lightness_Fairchild2011(self):
- """
- Tests :func:`colour.colorimetry.lightness.lightness_Fairchild2011`
- definition nan support.
- """
-
- lightness_Fairchild2011(
- np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
-
-
if __name__ == '__main__':
unittest.main()
diff --git a/colour/colorimetry/tests/test_luminance.py b/colour/colorimetry/tests/test_luminance.py
index c73e017b9..8b8cb07d8 100644
--- a/colour/colorimetry/tests/test_luminance.py
+++ b/colour/colorimetry/tests/test_luminance.py
@@ -11,7 +11,7 @@ import unittest
from colour.colorimetry.luminance import (
luminance_Newhall1943, luminance_CIE1976, luminance_ASTMD153508,
- luminance_Fairchild2010, luminance_Fairchild2011)
+ luminance_Fairchild2010)
from colour.utilities import ignore_numpy_errors
__author__ = 'Colour Developers'
@@ -23,8 +23,7 @@ __status__ = 'Production'
__all__ = [
'TestLuminanceNewhall1943', 'TestLuminanceASTMD153508',
- 'TestLuminanceCIE1976', 'TestLuminanceFairchild2010',
- 'TestLuminanceFairchild2011'
+ 'TestLuminanceCIE1976', 'TestLuminanceFairchild2010'
]
@@ -211,13 +210,13 @@ class TestLuminanceFairchild2010(unittest.TestCase):
"""
self.assertAlmostEqual(
- luminance_Fairchild2010(24.902290269546651), 0.10079999, places=7)
+ luminance_Fairchild2010(23.103633825753175), 0.10079999, places=7)
self.assertAlmostEqual(
- luminance_Fairchild2010(88.797568871771162), 0.56759999, places=7)
+ luminance_Fairchild2010(90.510575738115122), 0.56759999, places=7)
self.assertAlmostEqual(
- luminance_Fairchild2010(95.613018520289828), 0.98319999, places=7)
+ luminance_Fairchild2010(96.636221285055527), 0.98319999, places=7)
self.assertAlmostEqual(
luminance_Fairchild2010(16.064202706248068, 2.75),
@@ -225,13 +224,13 @@ class TestLuminanceFairchild2010(unittest.TestCase):
places=7)
self.assertAlmostEqual(
- luminance_Fairchild2010(100.019986327374240),
- 1008.00000024,
+ luminance_Fairchild2010(100.01999666792653),
+ 1007.99999963,
places=7)
self.assertAlmostEqual(
- luminance_Fairchild2010(100.019999997090270),
- 100799.92312466,
+ luminance_Fairchild2010(100.01999999966679),
+ 100800.82383352,
places=7)
def test_n_dimensional_luminance_Fairchild2010(self):
@@ -240,7 +239,7 @@ class TestLuminanceFairchild2010(unittest.TestCase):
definition n-dimensional arrays support.
"""
- L_hdr = 24.902290269546651
+ L_hdr = 23.103633825753175
Y = 10.08 / 100
np.testing.assert_almost_equal(
luminance_Fairchild2010(L_hdr), Y, decimal=7)
@@ -271,78 +270,5 @@ class TestLuminanceFairchild2010(unittest.TestCase):
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
-class TestLuminanceFairchild2011(unittest.TestCase):
- """
- Defines :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
- definition unit tests methods.
- """
-
- def test_luminance_Fairchild2011(self):
- """
- Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
- definition.
- """
-
- self.assertAlmostEqual(
- luminance_Fairchild2011(26.459509817572265), 0.10079999, places=7)
-
- self.assertAlmostEqual(
- luminance_Fairchild2011(71.708466023819625), 0.56759999, places=7)
-
- self.assertAlmostEqual(
- luminance_Fairchild2011(93.030975393206475), 0.98319999, places=7)
-
- self.assertAlmostEqual(
- luminance_Fairchild2011(0.08672116154998, 2.75),
- 0.10079999,
- places=7)
-
- self.assertAlmostEqual(
- luminance_Fairchild2011(244.07716520973938),
- 1008.00000000,
- places=7)
-
- self.assertAlmostEqual(
- luminance_Fairchild2011(246.90681933957006),
- 100800.00000000,
- places=7)
-
- def test_n_dimensional_luminance_Fairchild2011(self):
- """
- Tests :func:`colour.colorimetry.lightness.luminance_Fairchild2011`
- definition n-dimensional arrays support.
- """
-
- L_hdr = 26.459509817572265
- Y = 10.08 / 100
- np.testing.assert_almost_equal(
- luminance_Fairchild2011(L_hdr), Y, decimal=7)
-
- L_hdr = np.tile(L_hdr, 6)
- Y = np.tile(Y, 6)
- np.testing.assert_almost_equal(
- luminance_Fairchild2011(L_hdr), Y, decimal=7)
-
- L_hdr = np.reshape(L_hdr, (2, 3))
- Y = np.reshape(Y, (2, 3))
- np.testing.assert_almost_equal(
- luminance_Fairchild2011(L_hdr), Y, decimal=7)
-
- L_hdr = np.reshape(L_hdr, (2, 3, 1))
- Y = np.reshape(Y, (2, 3, 1))
- np.testing.assert_almost_equal(
- luminance_Fairchild2011(L_hdr), Y, decimal=7)
-
- @ignore_numpy_errors
- def test_nan_luminance_Fairchild2011(self):
- """
- Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
- definition nan support.
- """
-
- luminance_Fairchild2011(
- np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
-
-
if __name__ == '__main__':
unittest.main()
diff --git a/colour/models/rgb/transfer_functions/tests/test_canon_log.py b/colour/models/rgb/transfer_functions/tests/test_canon_log.py
index e3afa11b8..268e49803 100644
--- a/colour/models/rgb/transfer_functions/tests/test_canon_log.py
+++ b/colour/models/rgb/transfer_functions/tests/test_canon_log.py
@@ -23,7 +23,11 @@ __maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
-__all__ = ['TestLogEncoding_CanonLog', 'TestLogDecoding_CanonLog']
+__all__ = [
+ 'TestLogEncoding_CanonLog', 'TestLogDecoding_CanonLog',
+ 'TestLogEncoding_CanonLog2', 'TestLogDecoding_CanonLog2',
+ 'TestLogEncoding_CanonLog3', 'TestLogDecoding_CanonLog3'
+]
class TestLogEncoding_CanonLog(unittest.TestCase):
@@ -39,16 +43,29 @@ log_encoding_CanonLog` definition.
"""
self.assertAlmostEqual(
- log_encoding_CanonLog(-0.1), -0.088052640318143, places=7)
+ log_encoding_CanonLog(-0.1), -0.023560122781997, places=7)
self.assertAlmostEqual(
- log_encoding_CanonLog(0.0), 0.073059700000000, places=7)
+ log_encoding_CanonLog(0.0), 0.125122480156403, places=7)
self.assertAlmostEqual(
- log_encoding_CanonLog(0.18), 0.312012855550395, places=7)
+ log_encoding_CanonLog(0.18), 0.343389651726069, places=7)
self.assertAlmostEqual(
- log_encoding_CanonLog(1.0), 0.627408304537653, places=7)
+ log_encoding_CanonLog(0.18, 12), 0.343138084215647, places=7)
+
+ self.assertAlmostEqual(
+ log_encoding_CanonLog(0.18, 10, False),
+ 0.327953896935809,
+ places=7)
+
+ self.assertAlmostEqual(
+ log_encoding_CanonLog(0.18, 10, False, False),
+ 0.312012855550395,
+ places=7)
+
+ self.assertAlmostEqual(
+ log_encoding_CanonLog(1.0), 0.618775485598649, places=7)
def test_n_dimensional_log_encoding_CanonLog(self):
"""
@@ -56,21 +73,25 @@ log_encoding_CanonLog` definition.
log_encoding_CanonLog` definition n-dimensional arrays support.
"""
- L = 0.18
- V = 0.312012855550395
- np.testing.assert_almost_equal(log_encoding_CanonLog(L), V, decimal=7)
+ x = 0.18
+ clog = 0.343389651726069
+ np.testing.assert_almost_equal(
+ log_encoding_CanonLog(x), clog, decimal=7)
- L = np.tile(L, 6)
- V = np.tile(V, 6)
- np.testing.assert_almost_equal(log_encoding_CanonLog(L), V, decimal=7)
+ x = np.tile(x, 6)
+ clog = np.tile(clog, 6)
+ np.testing.assert_almost_equal(
+ log_encoding_CanonLog(x), clog, decimal=7)
- L = np.reshape(L, (2, 3))
- V = np.reshape(V, (2, 3))
- np.testing.assert_almost_equal(log_encoding_CanonLog(L), V, decimal=7)
+ x = np.reshape(x, (2, 3))
+ clog = np.reshape(clog, (2, 3))
+ np.testing.assert_almost_equal(
+ log_encoding_CanonLog(x), clog, decimal=7)
- L = np.reshape(L, (2, 3, 1))
- V = np.reshape(V, (2, 3, 1))
- np.testing.assert_almost_equal(log_encoding_CanonLog(L), V, decimal=7)
+ x = np.reshape(x, (2, 3, 1))
+ clog = np.reshape(clog, (2, 3, 1))
+ np.testing.assert_almost_equal(
+ log_encoding_CanonLog(x), clog, decimal=7)
@ignore_numpy_errors
def test_nan_log_encoding_CanonLog(self):
@@ -96,16 +117,29 @@ log_decoding_CanonLog` definition.
"""
self.assertAlmostEqual(
- log_decoding_CanonLog(-0.088052640318143), -0.1, places=7)
+ log_decoding_CanonLog(-0.023560122781997), -0.1, places=7)
+
+ self.assertAlmostEqual(
+ log_decoding_CanonLog(0.125122480156403), 0.0, places=7)
+
+ self.assertAlmostEqual(
+ log_decoding_CanonLog(0.343389651726069), 0.18, places=7)
+
+ self.assertAlmostEqual(
+ log_decoding_CanonLog(0.343138084215647, 12), 0.18, places=7)
self.assertAlmostEqual(
- log_decoding_CanonLog(0.073059700000000), 0.0, places=7)
+ log_decoding_CanonLog(0.327953896935809, 10, False),
+ 0.18,
+ places=7)
self.assertAlmostEqual(
- log_decoding_CanonLog(0.312012855550395), 0.18, places=7)
+ log_decoding_CanonLog(0.312012855550395, 10, False, False),
+ 0.18,
+ places=7)
self.assertAlmostEqual(
- log_decoding_CanonLog(0.627408304537653), 1.0, places=7)
+ log_decoding_CanonLog(0.618775485598649), 1.0, places=7)
def test_n_dimensional_log_decoding_CanonLog(self):
"""
@@ -113,21 +147,25 @@ log_decoding_CanonLog` definition.
log_decoding_CanonLog` definition n-dimensional arrays support.
"""
- V = 0.312012855550395
- L = 0.18
- np.testing.assert_almost_equal(log_decoding_CanonLog(V), L, decimal=7)
+ clog = 0.343389651726069
+ x = 0.18
+ np.testing.assert_almost_equal(
+ log_decoding_CanonLog(clog), x, decimal=7)
- V = np.tile(V, 6)
- L = np.tile(L, 6)
- np.testing.assert_almost_equal(log_decoding_CanonLog(V), L, decimal=7)
+ clog = np.tile(clog, 6)
+ x = np.tile(x, 6)
+ np.testing.assert_almost_equal(
+ log_decoding_CanonLog(clog), x, decimal=7)
- V = np.reshape(V, (2, 3))
- L = np.reshape(L, (2, 3))
- np.testing.assert_almost_equal(log_decoding_CanonLog(V), L, decimal=7)
+ clog = np.reshape(clog, (2, 3))
+ x = np.reshape(x, (2, 3))
+ np.testing.assert_almost_equal(
+ log_decoding_CanonLog(clog), x, decimal=7)
- V = np.reshape(V, (2, 3, 1))
- L = np.reshape(L, (2, 3, 1))
- np.testing.assert_almost_equal(log_decoding_CanonLog(V), L, decimal=7)
+ clog = np.reshape(clog, (2, 3, 1))
+ x = np.reshape(x, (2, 3, 1))
+ np.testing.assert_almost_equal(
+ log_decoding_CanonLog(clog), x, decimal=7)
@ignore_numpy_errors
def test_nan_log_decoding_CanonLog(self):
@@ -153,16 +191,29 @@ log_encoding_CanonLog2` definition.
"""
self.assertAlmostEqual(
- log_encoding_CanonLog2(-0.1), -0.242871750266172, places=7)
+ log_encoding_CanonLog2(-0.1), -0.155370131996824, places=7)
+
+ self.assertAlmostEqual(
+ log_encoding_CanonLog2(0.0), 0.092864125247312, places=7)
+
+ self.assertAlmostEqual(
+ log_encoding_CanonLog2(0.18), 0.398254694983167, places=7)
+
+ self.assertAlmostEqual(
+ log_encoding_CanonLog2(0.18, 12), 0.397962933301861, places=7)
self.assertAlmostEqual(
- log_encoding_CanonLog2(0.0), 0.035388127999999, places=7)
+ log_encoding_CanonLog2(0.18, 10, False),
+ 0.392025745397009,
+ places=7)
self.assertAlmostEqual(
- log_encoding_CanonLog2(0.18), 0.379864582222983, places=7)
+ log_encoding_CanonLog2(0.18, 10, False, False),
+ 0.379864582222983,
+ places=7)
self.assertAlmostEqual(
- log_encoding_CanonLog2(1.0), 0.583604185577946, places=7)
+ log_encoding_CanonLog2(1.0), 0.573229282897641, places=7)
def test_n_dimensional_log_encoding_CanonLog2(self):
"""
@@ -170,21 +221,25 @@ log_encoding_CanonLog2` definition.
log_encoding_CanonLog2` definition n-dimensional arrays support.
"""
- L = 0.18
- V = 0.379864582222983
- np.testing.assert_almost_equal(log_encoding_CanonLog2(L), V, decimal=7)
+ x = 0.18
+ clog2 = 0.398254694983167
+ np.testing.assert_almost_equal(
+ log_encoding_CanonLog2(x), clog2, decimal=7)
- L = np.tile(L, 6)
- V = np.tile(V, 6)
- np.testing.assert_almost_equal(log_encoding_CanonLog2(L), V, decimal=7)
+ x = np.tile(x, 6)
+ clog2 = np.tile(clog2, 6)
+ np.testing.assert_almost_equal(
+ log_encoding_CanonLog2(x), clog2, decimal=7)
- L = np.reshape(L, (2, 3))
- V = np.reshape(V, (2, 3))
- np.testing.assert_almost_equal(log_encoding_CanonLog2(L), V, decimal=7)
+ x = np.reshape(x, (2, 3))
+ clog2 = np.reshape(clog2, (2, 3))
+ np.testing.assert_almost_equal(
+ log_encoding_CanonLog2(x), clog2, decimal=7)
- L = np.reshape(L, (2, 3, 1))
- V = np.reshape(V, (2, 3, 1))
- np.testing.assert_almost_equal(log_encoding_CanonLog2(L), V, decimal=7)
+ x = np.reshape(x, (2, 3, 1))
+ clog2 = np.reshape(clog2, (2, 3, 1))
+ np.testing.assert_almost_equal(
+ log_encoding_CanonLog2(x), clog2, decimal=7)
@ignore_numpy_errors
def test_nan_log_encoding_CanonLog2(self):
@@ -210,16 +265,29 @@ log_decoding_CanonLog2` definition.
"""
self.assertAlmostEqual(
- log_decoding_CanonLog2(-0.242871750266172), -0.1, places=7)
+ log_decoding_CanonLog2(-0.155370131996824), -0.1, places=7)
self.assertAlmostEqual(
- log_decoding_CanonLog2(0.035388127999999), 0.0, places=7)
+ log_decoding_CanonLog2(0.092864125247312), 0.0, places=7)
self.assertAlmostEqual(
- log_decoding_CanonLog2(0.379864582222983), 0.18, places=7)
+ log_decoding_CanonLog2(0.398254694983167), 0.18, places=7)
self.assertAlmostEqual(
- log_decoding_CanonLog2(0.583604185577946), 1.0, places=7)
+ log_decoding_CanonLog2(0.397962933301861, 12), 0.18, places=7)
+
+ self.assertAlmostEqual(
+ log_decoding_CanonLog2(0.392025745397009, 10, False),
+ 0.18,
+ places=7)
+
+ self.assertAlmostEqual(
+ log_decoding_CanonLog2(0.379864582222983, 10, False, False),
+ 0.18,
+ places=7)
+
+ self.assertAlmostEqual(
+ log_decoding_CanonLog2(0.573229282897641), 1.0, places=7)
def test_n_dimensional_log_decoding_CanonLog2(self):
"""
@@ -227,21 +295,25 @@ log_decoding_CanonLog2` definition.
log_decoding_CanonLog2` definition n-dimensional arrays support.
"""
- V = 0.379864582222983
- L = 0.18
- np.testing.assert_almost_equal(log_decoding_CanonLog2(V), L, decimal=7)
+ clog2 = 0.398254694983167
+ x = 0.18
+ np.testing.assert_almost_equal(
+ log_decoding_CanonLog2(clog2), x, decimal=7)
- V = np.tile(V, 6)
- L = np.tile(L, 6)
- np.testing.assert_almost_equal(log_decoding_CanonLog2(V), L, decimal=7)
+ clog2 = np.tile(clog2, 6)
+ x = np.tile(x, 6)
+ np.testing.assert_almost_equal(
+ log_decoding_CanonLog2(clog2), x, decimal=7)
- V = np.reshape(V, (2, 3))
- L = np.reshape(L, (2, 3))
- np.testing.assert_almost_equal(log_decoding_CanonLog2(V), L, decimal=7)
+ clog2 = np.reshape(clog2, (2, 3))
+ x = np.reshape(x, (2, 3))
+ np.testing.assert_almost_equal(
+ log_decoding_CanonLog2(clog2), x, decimal=7)
- V = np.reshape(V, (2, 3, 1))
- L = np.reshape(L, (2, 3, 1))
- np.testing.assert_almost_equal(log_decoding_CanonLog2(V), L, decimal=7)
+ clog2 = np.reshape(clog2, (2, 3, 1))
+ x = np.reshape(x, (2, 3, 1))
+ np.testing.assert_almost_equal(
+ log_decoding_CanonLog2(clog2), x, decimal=7)
@ignore_numpy_errors
def test_nan_log_decoding_CanonLog2(self):
@@ -267,16 +339,29 @@ log_encoding_CanonLog3` definition.
"""
self.assertAlmostEqual(
- log_encoding_CanonLog3(-0.1), -0.100664645796433, places=7)
+ log_encoding_CanonLog3(-0.1), -0.028494506076432, places=7)
+
+ self.assertAlmostEqual(
+ log_encoding_CanonLog3(0.0), 0.125122189869013, places=7)
+
+ self.assertAlmostEqual(
+ log_encoding_CanonLog3(0.18), 0.343389369388687, places=7)
+
+ self.assertAlmostEqual(
+ log_encoding_CanonLog3(0.18, 12), 0.343137802085105, places=7)
self.assertAlmostEqual(
- log_encoding_CanonLog3(0.0), 0.073059361000000, places=7)
+ log_encoding_CanonLog3(0.18, 10, False),
+ 0.327953567219893,
+ places=7)
self.assertAlmostEqual(
- log_encoding_CanonLog3(0.18), 0.313436005886328, places=7)
+ log_encoding_CanonLog3(0.18, 10, False, False),
+ 0.313436005886328,
+ places=7)
self.assertAlmostEqual(
- log_encoding_CanonLog3(1.0), 0.586137530935974, places=7)
+ log_encoding_CanonLog3(1.0), 0.580277796238604, places=7)
def test_n_dimensional_log_encoding_CanonLog3(self):
"""
@@ -284,21 +369,25 @@ log_encoding_CanonLog3` definition.
log_encoding_CanonLog3` definition n-dimensional arrays support.
"""
- L = 0.18
- V = 0.313436005886328
- np.testing.assert_almost_equal(log_encoding_CanonLog3(L), V, decimal=7)
+ x = 0.18
+ clog3 = 0.343389369388687
+ np.testing.assert_almost_equal(
+ log_encoding_CanonLog3(x), clog3, decimal=7)
- L = np.tile(L, 6)
- V = np.tile(V, 6)
- np.testing.assert_almost_equal(log_encoding_CanonLog3(L), V, decimal=7)
+ x = np.tile(x, 6)
+ clog3 = np.tile(clog3, 6)
+ np.testing.assert_almost_equal(
+ log_encoding_CanonLog3(x), clog3, decimal=7)
- L = np.reshape(L, (2, 3))
- V = np.reshape(V, (2, 3))
- np.testing.assert_almost_equal(log_encoding_CanonLog3(L), V, decimal=7)
+ x = np.reshape(x, (2, 3))
+ clog3 = np.reshape(clog3, (2, 3))
+ np.testing.assert_almost_equal(
+ log_encoding_CanonLog3(x), clog3, decimal=7)
- L = np.reshape(L, (2, 3, 1))
- V = np.reshape(V, (2, 3, 1))
- np.testing.assert_almost_equal(log_encoding_CanonLog3(L), V, decimal=7)
+ x = np.reshape(x, (2, 3, 1))
+ clog3 = np.reshape(clog3, (2, 3, 1))
+ np.testing.assert_almost_equal(
+ log_encoding_CanonLog3(x), clog3, decimal=7)
@ignore_numpy_errors
def test_nan_log_encoding_CanonLog3(self):
@@ -324,16 +413,29 @@ log_decoding_CanonLog3` definition.
"""
self.assertAlmostEqual(
- log_decoding_CanonLog3(-0.100664645796433), -0.1, places=7)
+ log_decoding_CanonLog3(-0.028494506076432), -0.1, places=7)
+
+ self.assertAlmostEqual(
+ log_decoding_CanonLog3(0.125122189869013), 0.0, places=7)
+
+ self.assertAlmostEqual(
+ log_decoding_CanonLog3(0.343389369388687), 0.18, places=7)
+
+ self.assertAlmostEqual(
+ log_decoding_CanonLog3(0.343137802085105, 12), 0.18, places=7)
self.assertAlmostEqual(
- log_decoding_CanonLog3(0.073059361000000), 0.0, places=7)
+ log_decoding_CanonLog3(0.327953567219893, 10, False),
+ 0.18,
+ places=7)
self.assertAlmostEqual(
- log_decoding_CanonLog3(0.313436005886328), 0.18, places=7)
+ log_decoding_CanonLog3(0.313436005886328, 10, False, False),
+ 0.18,
+ places=7)
self.assertAlmostEqual(
- log_decoding_CanonLog3(0.586137530935974), 1.0, places=7)
+ log_decoding_CanonLog3(0.580277796238604), 1.0, places=7)
def test_n_dimensional_log_decoding_CanonLog3(self):
"""
@@ -341,21 +443,25 @@ log_decoding_CanonLog3` definition.
log_decoding_CanonLog3` definition n-dimensional arrays support.
"""
- V = 0.313436005886328
- L = 0.18
- np.testing.assert_almost_equal(log_decoding_CanonLog3(V), L, decimal=7)
+ clog3 = 0.343389369388687
+ x = 0.18
+ np.testing.assert_almost_equal(
+ log_decoding_CanonLog3(clog3), x, decimal=7)
- V = np.tile(V, 6)
- L = np.tile(L, 6)
- np.testing.assert_almost_equal(log_decoding_CanonLog3(V), L, decimal=7)
+ clog3 = np.tile(clog3, 6)
+ x = np.tile(x, 6)
+ np.testing.assert_almost_equal(
+ log_decoding_CanonLog3(clog3), x, decimal=7)
- V = np.reshape(V, (2, 3))
- L = np.reshape(L, (2, 3))
- np.testing.assert_almost_equal(log_decoding_CanonLog3(V), L, decimal=7)
+ clog3 = np.reshape(clog3, (2, 3))
+ x = np.reshape(x, (2, 3))
+ np.testing.assert_almost_equal(
+ log_decoding_CanonLog3(clog3), x, decimal=7)
- V = np.reshape(V, (2, 3, 1))
- L = np.reshape(L, (2, 3, 1))
- np.testing.assert_almost_equal(log_decoding_CanonLog3(V), L, decimal=7)
+ clog3 = np.reshape(clog3, (2, 3, 1))
+ x = np.reshape(x, (2, 3, 1))
+ np.testing.assert_almost_equal(
+ log_decoding_CanonLog3(clog3), x, decimal=7)
@ignore_numpy_errors
def test_nan_log_decoding_CanonLog3(self):
diff --git a/colour/models/rgb/transfer_functions/tests/test_panasonic_vlog.py b/colour/models/rgb/transfer_functions/tests/test_panasonic_vlog.py
index 017fc84e1..b1d102786 100644
--- a/colour/models/rgb/transfer_functions/tests/test_panasonic_vlog.py
+++ b/colour/models/rgb/transfer_functions/tests/test_panasonic_vlog.py
@@ -41,6 +41,17 @@ log_encoding_VLog` definition.
self.assertAlmostEqual(
log_encoding_VLog(0.18), 0.423311448760136, places=7)
+ self.assertAlmostEqual(
+ log_encoding_VLog(0.18, 12), 0.423311448760136, places=7)
+
+ self.assertAlmostEqual(
+ log_encoding_VLog(0.18, 10, False), 0.421287228403675, places=7)
+
+ self.assertAlmostEqual(
+ log_encoding_VLog(0.18, 10, False, False),
+ 0.409009628526078,
+ places=7)
+
self.assertAlmostEqual(
log_encoding_VLog(1.0), 0.599117700158146, places=7)
@@ -97,6 +108,17 @@ log_decoding_VLog` definition.
self.assertAlmostEqual(
log_decoding_VLog(0.423311448760136), 0.18, places=7)
+ self.assertAlmostEqual(
+ log_decoding_VLog(0.423311448760136, 12), 0.18, places=7)
+
+ self.assertAlmostEqual(
+ log_decoding_VLog(0.421287228403675, 10, False), 0.18, places=7)
+
+ self.assertAlmostEqual(
+ log_decoding_VLog(0.409009628526078, 10, False, False),
+ 0.18,
+ places=7)
+
self.assertAlmostEqual(
log_decoding_VLog(0.599117700158146), 1.0, places=7)
diff --git a/colour/models/tests/test_hdr_cie_lab.py b/colour/models/tests/test_hdr_cie_lab.py
index 7b38c6645..64c4db540 100644
--- a/colour/models/tests/test_hdr_cie_lab.py
+++ b/colour/models/tests/test_hdr_cie_lab.py
@@ -22,77 +22,10 @@ __email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
- 'TestExponent_hdr_CIELab', 'TestXYZ_to_hdr_CIELab', 'TestHdr_CIELab_to_XYZ'
+ 'TestXYZ_to_hdr_CIELab', 'TestHdr_CIELab_to_XYZ', 'TestExponent_hdr_CIELab'
]
-class TestExponent_hdr_CIELab(unittest.TestCase):
- """
- Defines :func:`colour.models.hdr_cie_lab.exponent_hdr_CIELab`
- definition unit tests methods.
- """
-
- def test_exponent_hdr_CIELab(self):
- """
- Tests :func:`colour.models.hdr_cie_lab.exponent_hdr_CIELab`
- definition.
- """
-
- self.assertAlmostEqual(
- exponent_hdr_CIELab(0.2, 100), 0.709927693821670, places=7)
-
- self.assertAlmostEqual(
- exponent_hdr_CIELab(0.4, 100), 0.512725556648984, places=7)
-
- self.assertAlmostEqual(
- exponent_hdr_CIELab(0.4, 100, method='Fairchild 2010'),
- 1.326014370643925,
- places=7)
-
- self.assertAlmostEqual(
- exponent_hdr_CIELab(0.2, 1000), 0.473285129214447, places=7)
-
- def test_n_dimensional_exponent_hdr_CIELab(self):
- """
- Tests :func:`colour.models.hdr_cie_lab.exponent_hdr_CIELab`
- definition n-dimensional arrays support.
- """
-
- Y_s = 0.2
- Y_abs = 100
- e = 0.709927693821670
- np.testing.assert_almost_equal(
- exponent_hdr_CIELab(Y_s, Y_abs), e, decimal=7)
-
- Y_s = np.tile(Y_s, 6)
- Y_abs = np.tile(Y_abs, 6)
- e = np.tile(e, 6)
- np.testing.assert_almost_equal(
- exponent_hdr_CIELab(Y_s, Y_abs), e, decimal=7)
-
- Y_s = np.reshape(Y_s, (2, 3))
- Y_abs = np.reshape(Y_abs, (2, 3))
- e = np.reshape(e, (2, 3))
- np.testing.assert_almost_equal(
- exponent_hdr_CIELab(Y_s, Y_abs), e, decimal=7)
-
- Y_s = np.reshape(Y_s, (2, 3, 1))
- Y_abs = np.reshape(Y_abs, (2, 3, 1))
- e = np.reshape(e, (2, 3, 1))
- np.testing.assert_almost_equal(
- exponent_hdr_CIELab(Y_s, Y_abs), e, decimal=7)
-
- @ignore_numpy_errors
- def test_nan_exponent_hdr_CIELab(self):
- """
- Tests :func:`colour.models.hdr_cie_lab.exponent_hdr_CIELab`
- definition nan support.
- """
-
- cases = np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])
- exponent_hdr_CIELab(cases, cases)
-
-
class TestXYZ_to_hdr_CIELab(unittest.TestCase):
"""
Defines :func:`colour.models.hdr_cie_lab.XYZ_to_hdr_CIELab` definition unit
@@ -106,34 +39,26 @@ class TestXYZ_to_hdr_CIELab(unittest.TestCase):
np.testing.assert_almost_equal(
XYZ_to_hdr_CIELab(np.array([0.07049534, 0.10080000, 0.09558313])),
- np.array([26.46461067, -24.61332600, -4.84796811]),
+ np.array([24.90206646, -46.83127607, -10.14274843]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_hdr_CIELab(
np.array([0.07049534, 0.10080000, 0.09558313]),
np.array([0.44757, 0.40745])),
- np.array([26.46461067, -33.35816986, -42.86850246]),
- decimal=7)
-
- np.testing.assert_almost_equal(
- XYZ_to_hdr_CIELab(
- np.array([0.07049534, 0.10080000, 0.09558313]),
- np.array([0.44757, 0.40745]),
- method='Fairchild 2010'),
np.array([24.90206646, -61.24983919, -83.63902870]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_hdr_CIELab(
np.array([0.07049534, 0.10080000, 0.09558313]), Y_s=0.5),
- np.array([55.57158803, -27.58973060, -5.03923267]),
+ np.array([34.44227938, -36.51485775, -6.87279617]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_hdr_CIELab(
np.array([0.07049534, 0.10080000, 0.09558313]), Y_abs=1000),
- np.array([48.33166805, -28.18355309, -5.21974184]),
+ np.array([32.39463250, -39.77445283, -7.66690737]),
decimal=7)
def test_n_dimensional_XYZ_to_hdr_CIELab(self):
@@ -146,7 +71,7 @@ class TestXYZ_to_hdr_CIELab(unittest.TestCase):
illuminant = np.array([0.34570, 0.35850])
Y_s = 0.2
Y_abs = 100
- Lab_hdr = np.array([26.46461067, -24.61332600, -4.84796811])
+ Lab_hdr = np.array([24.90206646, -46.83127607, -10.14274843])
np.testing.assert_almost_equal(
XYZ_to_hdr_CIELab(XYZ, illuminant, Y_s, Y_abs), Lab_hdr, decimal=7)
@@ -199,26 +124,26 @@ class TestHdr_CIELab_to_XYZ(unittest.TestCase):
np.testing.assert_almost_equal(
hdr_CIELab_to_XYZ(
- np.array([26.46461067, -24.61332600, -4.84796811])),
+ np.array([24.90206646, -46.83127607, -10.14274843])),
np.array([0.07049534, 0.10080000, 0.09558313]),
decimal=7)
np.testing.assert_almost_equal(
hdr_CIELab_to_XYZ(
- np.array([26.46461067, -33.35816986, -42.86850246]),
+ np.array([24.90206646, -61.24983919, -83.63902870]),
np.array([0.44757, 0.40745])),
np.array([0.07049534, 0.10080000, 0.09558313]),
decimal=7)
np.testing.assert_almost_equal(
hdr_CIELab_to_XYZ(
- np.array([55.57158803, -27.58973060, -5.03923267]), Y_s=0.5),
+ np.array([34.44227938, -36.51485775, -6.87279617]), Y_s=0.5),
np.array([0.07049534, 0.10080000, 0.09558313]),
decimal=7)
np.testing.assert_almost_equal(
hdr_CIELab_to_XYZ(
- np.array([48.33166805, -28.18355309, -5.21974184]),
+ np.array([32.39463250, -39.77445283, -7.66690737]),
Y_abs=1000),
np.array([0.07049534, 0.10080000, 0.09558313]),
decimal=7)
@@ -229,7 +154,7 @@ class TestHdr_CIELab_to_XYZ(unittest.TestCase):
n-dimensions support.
"""
- Lab_hdr = np.array([26.46461067, -24.61332600, -4.84796811])
+ Lab_hdr = np.array([24.90206646, -46.83127607, -10.14274843])
illuminant = np.array([0.34570, 0.35850])
Y_s = 0.2
Y_abs = 100
@@ -273,5 +198,67 @@ class TestHdr_CIELab_to_XYZ(unittest.TestCase):
hdr_CIELab_to_XYZ(Lab_hdr, illuminant, Y_s, Y_abs)
+class TestExponent_hdr_CIELab(unittest.TestCase):
+ """
+ Defines :func:`colour.models.hdr_cie_lab.exponent_hdr_CIELab`
+ definition unit tests methods.
+ """
+
+ def test_exponent_hdr_CIELab(self):
+ """
+ Tests :func:`colour.models.hdr_cie_lab.exponent_hdr_CIELab`
+ definition.
+ """
+
+ self.assertAlmostEqual(
+ exponent_hdr_CIELab(0.2, 100), 1.836019897814665, places=7)
+
+ self.assertAlmostEqual(
+ exponent_hdr_CIELab(0.4, 100), 1.326014370643925, places=7)
+
+ self.assertAlmostEqual(
+ exponent_hdr_CIELab(0.2, 1000), 1.224013265209777, places=7)
+
+ def test_n_dimensional_exponent_hdr_CIELab(self):
+ """
+ Tests :func:`colour.models.hdr_cie_lab.exponent_hdr_CIELab`
+ definition n-dimensional arrays support.
+ """
+
+ Y_s = 0.2
+ Y_abs = 100
+ e = 1.836019897814665
+ np.testing.assert_almost_equal(
+ exponent_hdr_CIELab(Y_s, Y_abs), e, decimal=7)
+
+ Y_s = np.tile(Y_s, 6)
+ Y_abs = np.tile(Y_abs, 6)
+ e = np.tile(e, 6)
+ np.testing.assert_almost_equal(
+ exponent_hdr_CIELab(Y_s, Y_abs), e, decimal=7)
+
+ Y_s = np.reshape(Y_s, (2, 3))
+ Y_abs = np.reshape(Y_abs, (2, 3))
+ e = np.reshape(e, (2, 3))
+ np.testing.assert_almost_equal(
+ exponent_hdr_CIELab(Y_s, Y_abs), e, decimal=7)
+
+ Y_s = np.reshape(Y_s, (2, 3, 1))
+ Y_abs = np.reshape(Y_abs, (2, 3, 1))
+ e = np.reshape(e, (2, 3, 1))
+ np.testing.assert_almost_equal(
+ exponent_hdr_CIELab(Y_s, Y_abs), e, decimal=7)
+
+ @ignore_numpy_errors
+ def test_nan_exponent_hdr_CIELab(self):
+ """
+ Tests :func:`colour.models.hdr_cie_lab.exponent_hdr_CIELab`
+ definition nan support.
+ """
+
+ cases = np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])
+ exponent_hdr_CIELab(cases, cases)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/colour/models/tests/test_hdr_ipt.py b/colour/models/tests/test_hdr_ipt.py
index 181f2b19e..92eb4a45b 100644
--- a/colour/models/tests/test_hdr_ipt.py
+++ b/colour/models/tests/test_hdr_ipt.py
@@ -21,74 +21,7 @@ __maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
-__all__ = ['TestExponent_hdr_IPT', 'TestXYZ_to_hdr_IPT', 'TestHdr_IPT_to_XYZ']
-
-
-class TestExponent_hdr_IPT(unittest.TestCase):
- """
- Defines :func:`colour.models.hdr_ipt.exponent_hdr_IPT`
- definition unit tests methods.
- """
-
- def test_exponent_hdr_IPT(self):
- """
- Tests :func:`colour.models.hdr_ipt.exponent_hdr_IPT`
- definition.
- """
-
- self.assertAlmostEqual(
- exponent_hdr_IPT(0.2, 100), 0.722167826473768, places=7)
-
- self.assertAlmostEqual(
- exponent_hdr_IPT(0.4, 100), 0.521565652453277, places=7)
-
- self.assertAlmostEqual(
- exponent_hdr_IPT(0.4, 100, method='Fairchild 2010'),
- 1.219933220992410,
- places=7)
-
- self.assertAlmostEqual(
- exponent_hdr_IPT(0.2, 1000), 0.481445217649179, places=7)
-
- def test_n_dimensional_exponent_hdr_IPT(self):
- """
- Tests :func:`colour.models.hdr_ipt.exponent_hdr_IPT`
- definition n-dimensional arrays support.
- """
-
- Y_s = 0.2
- Y_abs = 100
- e = 0.722167826473768
- np.testing.assert_almost_equal(
- exponent_hdr_IPT(Y_s, Y_abs), e, decimal=7)
-
- Y_s = np.tile(Y_s, 6)
- Y_abs = np.tile(Y_abs, 6)
- e = np.tile(e, 6)
- np.testing.assert_almost_equal(
- exponent_hdr_IPT(Y_s, Y_abs), e, decimal=7)
-
- Y_s = np.reshape(Y_s, (2, 3))
- Y_abs = np.reshape(Y_abs, (2, 3))
- e = np.reshape(e, (2, 3))
- np.testing.assert_almost_equal(
- exponent_hdr_IPT(Y_s, Y_abs), e, decimal=7)
-
- Y_s = np.reshape(Y_s, (2, 3, 1))
- Y_abs = np.reshape(Y_abs, (2, 3, 1))
- e = np.reshape(e, (2, 3, 1))
- np.testing.assert_almost_equal(
- exponent_hdr_IPT(Y_s, Y_abs), e, decimal=7)
-
- @ignore_numpy_errors
- def test_nan_exponent_hdr_IPT(self):
- """
- Tests :func:`colour.models.hdr_ipt.exponent_hdr_IPT`
- definition nan support.
- """
-
- cases = np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])
- exponent_hdr_IPT(cases, cases)
+__all__ = ['TestXYZ_to_hdr_IPT', 'TestHdr_IPT_to_XYZ', 'TestExponent_hdr_IPT']
class TestXYZ_to_hdr_IPT(unittest.TestCase):
@@ -104,26 +37,19 @@ class TestXYZ_to_hdr_IPT(unittest.TestCase):
np.testing.assert_almost_equal(
XYZ_to_hdr_IPT(np.array([0.07049534, 0.10080000, 0.09558313])),
- np.array([24.88927680, -11.44574144, 1.63147707]),
- decimal=7)
-
- np.testing.assert_almost_equal(
- XYZ_to_hdr_IPT(
- np.array([0.07049534, 0.10080000, 0.09558313]),
- method='Fairchild 2010'),
np.array([25.18261761, -22.62111297, 3.18511729]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_hdr_IPT(
np.array([0.07049534, 0.10080000, 0.09558313]), Y_s=0.5),
- np.array([53.85070486, -12.48767103, 1.80705844]),
+ np.array([34.60312115, -15.70974390, 2.26601353]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_hdr_IPT(
np.array([0.25506814, 0.19150000, 0.08849752]), Y_abs=1000),
- np.array([57.49548734, 25.88213868, 21.85080772]),
+ np.array([47.18074546, 32.38073691, 29.13827648]),
decimal=7)
def test_n_dimensional_XYZ_to_hdr_IPT(self):
@@ -135,7 +61,7 @@ class TestXYZ_to_hdr_IPT(unittest.TestCase):
XYZ = np.array([0.07049534, 0.10080000, 0.09558313])
Y_s = 0.2
Y_abs = 100
- IPT_hdr = np.array([24.88927680, -11.44574144, 1.63147707])
+ IPT_hdr = np.array([25.18261761, -22.62111297, 3.18511729])
np.testing.assert_almost_equal(
XYZ_to_hdr_IPT(XYZ, Y_s, Y_abs), IPT_hdr, decimal=7)
@@ -184,26 +110,19 @@ class TestHdr_IPT_to_XYZ(unittest.TestCase):
"""
np.testing.assert_almost_equal(
- hdr_IPT_to_XYZ(np.array([24.88927680, -11.44574144, 1.63147707])),
+ hdr_IPT_to_XYZ(np.array([25.18261761, -22.62111297, 3.18511729])),
np.array([0.07049534, 0.10080000, 0.09558313]),
decimal=7)
np.testing.assert_almost_equal(
hdr_IPT_to_XYZ(
- np.array([25.18261761, -22.62111297, 3.18511729]),
- method='Fairchild 2010'),
+ np.array([34.60312115, -15.70974390, 2.26601353]), Y_s=0.5),
np.array([0.07049534, 0.10080000, 0.09558313]),
decimal=7)
np.testing.assert_almost_equal(
hdr_IPT_to_XYZ(
- np.array([53.85070486, -12.48767103, 1.80705844]), Y_s=0.5),
- np.array([0.07049534, 0.10080000, 0.09558313]),
- decimal=7)
-
- np.testing.assert_almost_equal(
- hdr_IPT_to_XYZ(
- np.array([57.49548734, 25.88213868, 21.85080772]), Y_abs=1000),
+ np.array([47.18074546, 32.38073691, 29.13827648]), Y_abs=1000),
np.array([0.25506814, 0.19150000, 0.08849752]),
decimal=7)
@@ -213,7 +132,7 @@ class TestHdr_IPT_to_XYZ(unittest.TestCase):
n-dimensions support.
"""
- IPT_hdr = np.array([24.88927680, -11.44574144, 1.63147707])
+ IPT_hdr = np.array([25.18261761, -22.62111297, 3.18511729])
Y_s = 0.2
Y_abs = 100
XYZ = np.array([0.07049534, 0.10080000, 0.09558313])
@@ -253,5 +172,67 @@ class TestHdr_IPT_to_XYZ(unittest.TestCase):
hdr_IPT_to_XYZ(IPT_hdr, Y_s, Y_abs)
+class TestExponent_hdr_IPT(unittest.TestCase):
+ """
+ Defines :func:`colour.models.hdr_ipt.exponent_hdr_IPT`
+ definition unit tests methods.
+ """
+
+ def test_exponent_hdr_IPT(self):
+ """
+ Tests :func:`colour.models.hdr_ipt.exponent_hdr_IPT`
+ definition.
+ """
+
+ self.assertAlmostEqual(
+ exponent_hdr_IPT(0.2, 100), 1.689138305989492, places=7)
+
+ self.assertAlmostEqual(
+ exponent_hdr_IPT(0.4, 100), 1.219933220992410, places=7)
+
+ self.assertAlmostEqual(
+ exponent_hdr_IPT(0.2, 1000), 1.126092203992995, places=7)
+
+ def test_n_dimensional_exponent_hdr_IPT(self):
+ """
+ Tests :func:`colour.models.hdr_ipt.exponent_hdr_IPT`
+ definition n-dimensional arrays support.
+ """
+
+ Y_s = 0.2
+ Y_abs = 100
+ e = 1.689138305989492
+ np.testing.assert_almost_equal(
+ exponent_hdr_IPT(Y_s, Y_abs), e, decimal=7)
+
+ Y_s = np.tile(Y_s, 6)
+ Y_abs = np.tile(Y_abs, 6)
+ e = np.tile(e, 6)
+ np.testing.assert_almost_equal(
+ exponent_hdr_IPT(Y_s, Y_abs), e, decimal=7)
+
+ Y_s = np.reshape(Y_s, (2, 3))
+ Y_abs = np.reshape(Y_abs, (2, 3))
+ e = np.reshape(e, (2, 3))
+ np.testing.assert_almost_equal(
+ exponent_hdr_IPT(Y_s, Y_abs), e, decimal=7)
+
+ Y_s = np.reshape(Y_s, (2, 3, 1))
+ Y_abs = np.reshape(Y_abs, (2, 3, 1))
+ e = np.reshape(e, (2, 3, 1))
+ np.testing.assert_almost_equal(
+ exponent_hdr_IPT(Y_s, Y_abs), e, decimal=7)
+
+ @ignore_numpy_errors
+ def test_nan_exponent_hdr_IPT(self):
+ """
+ Tests :func:`colour.models.hdr_ipt.exponent_hdr_IPT`
+ definition nan support.
+ """
+
+ cases = np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])
+ exponent_hdr_IPT(cases, cases)
+
+
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 8
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
-e git+https://github.com/colour-science/colour.git@8c77aa68c2347583ad23adf678ae1eb3821f854e#egg=colour_science
coverage==6.2
execnet==1.9.0
importlib-metadata==4.8.3
iniconfig==1.1.1
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
scipy==1.5.4
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: colour
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- execnet==1.9.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- scipy==1.5.4
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/colour
| [
"colour/colorimetry/tests/test_lightness.py::TestLightnessFairchild2010::test_lightness_Fairchild2010",
"colour/colorimetry/tests/test_lightness.py::TestLightnessFairchild2010::test_n_dimensional_lightness_Fairchild2010",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceFairchild2010::test_luminance_Fairchild2010",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceFairchild2010::test_n_dimensional_luminance_Fairchild2010",
"colour/models/rgb/transfer_functions/tests/test_canon_log.py::TestLogEncoding_CanonLog::test_log_encoding_CanonLog",
"colour/models/rgb/transfer_functions/tests/test_canon_log.py::TestLogEncoding_CanonLog::test_n_dimensional_log_encoding_CanonLog",
"colour/models/rgb/transfer_functions/tests/test_canon_log.py::TestLogDecoding_CanonLog::test_log_decoding_CanonLog",
"colour/models/rgb/transfer_functions/tests/test_canon_log.py::TestLogDecoding_CanonLog::test_n_dimensional_log_decoding_CanonLog",
"colour/models/rgb/transfer_functions/tests/test_canon_log.py::TestLogEncoding_CanonLog2::test_log_encoding_CanonLog2",
"colour/models/rgb/transfer_functions/tests/test_canon_log.py::TestLogEncoding_CanonLog2::test_n_dimensional_log_encoding_CanonLog2",
"colour/models/rgb/transfer_functions/tests/test_canon_log.py::TestLogDecoding_CanonLog2::test_log_decoding_CanonLog2",
"colour/models/rgb/transfer_functions/tests/test_canon_log.py::TestLogDecoding_CanonLog2::test_n_dimensional_log_decoding_CanonLog2",
"colour/models/rgb/transfer_functions/tests/test_canon_log.py::TestLogEncoding_CanonLog3::test_log_encoding_CanonLog3",
"colour/models/rgb/transfer_functions/tests/test_canon_log.py::TestLogEncoding_CanonLog3::test_n_dimensional_log_encoding_CanonLog3",
"colour/models/rgb/transfer_functions/tests/test_canon_log.py::TestLogDecoding_CanonLog3::test_log_decoding_CanonLog3",
"colour/models/rgb/transfer_functions/tests/test_canon_log.py::TestLogDecoding_CanonLog3::test_n_dimensional_log_decoding_CanonLog3",
"colour/models/rgb/transfer_functions/tests/test_panasonic_vlog.py::TestLogEncoding_VLog::test_log_encoding_VLog",
"colour/models/rgb/transfer_functions/tests/test_panasonic_vlog.py::TestLogDecoding_VLog::test_log_decoding_VLog",
"colour/models/tests/test_hdr_cie_lab.py::TestXYZ_to_hdr_CIELab::test_XYZ_to_hdr_CIELab",
"colour/models/tests/test_hdr_cie_lab.py::TestXYZ_to_hdr_CIELab::test_n_dimensional_XYZ_to_hdr_CIELab",
"colour/models/tests/test_hdr_cie_lab.py::TestHdr_CIELab_to_XYZ::test_hdr_CIELab_to_XYZ",
"colour/models/tests/test_hdr_cie_lab.py::TestHdr_CIELab_to_XYZ::test_n_dimensional_hdr_CIELab_to_XYZ",
"colour/models/tests/test_hdr_cie_lab.py::TestExponent_hdr_CIELab::test_exponent_hdr_CIELab",
"colour/models/tests/test_hdr_cie_lab.py::TestExponent_hdr_CIELab::test_n_dimensional_exponent_hdr_CIELab",
"colour/models/tests/test_hdr_ipt.py::TestXYZ_to_hdr_IPT::test_XYZ_to_hdr_IPT",
"colour/models/tests/test_hdr_ipt.py::TestXYZ_to_hdr_IPT::test_n_dimensional_XYZ_to_hdr_IPT",
"colour/models/tests/test_hdr_ipt.py::TestHdr_IPT_to_XYZ::test_hdr_IPT_to_XYZ",
"colour/models/tests/test_hdr_ipt.py::TestHdr_IPT_to_XYZ::test_n_dimensional_hdr_IPT_to_XYZ",
"colour/models/tests/test_hdr_ipt.py::TestExponent_hdr_IPT::test_exponent_hdr_IPT",
"colour/models/tests/test_hdr_ipt.py::TestExponent_hdr_IPT::test_n_dimensional_exponent_hdr_IPT"
]
| []
| [
"colour/colorimetry/tests/test_lightness.py::TestLightnessGlasser1958::test_lightness_Glasser1958",
"colour/colorimetry/tests/test_lightness.py::TestLightnessGlasser1958::test_n_dimensional_lightness_Glasser1958",
"colour/colorimetry/tests/test_lightness.py::TestLightnessGlasser1958::test_nan_lightness_Glasser1958",
"colour/colorimetry/tests/test_lightness.py::TestLightnessWyszecki1963::test_lightness_Wyszecki1963",
"colour/colorimetry/tests/test_lightness.py::TestLightnessWyszecki1963::test_n_dimensional_lightness_Wyszecki1963",
"colour/colorimetry/tests/test_lightness.py::TestLightnessWyszecki1963::test_nan_lightness_Wyszecki1963",
"colour/colorimetry/tests/test_lightness.py::TestLightnessCIE1976::test_lightness_CIE1976",
"colour/colorimetry/tests/test_lightness.py::TestLightnessCIE1976::test_n_dimensional_lightness_CIE1976",
"colour/colorimetry/tests/test_lightness.py::TestLightnessCIE1976::test_nan_lightness_CIE1976",
"colour/colorimetry/tests/test_lightness.py::TestLightnessFairchild2010::test_nan_lightness_Fairchild2010",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceNewhall1943::test_luminance_Newhall1943",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceNewhall1943::test_n_dimensional_luminance_Newhall1943",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceNewhall1943::test_nan_luminance_Newhall1943",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceASTMD153508::test_luminance_ASTMD153508",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceASTMD153508::test_n_dimensional_luminance_ASTMD153508",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceASTMD153508::test_nan_luminance_ASTMD153508",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceCIE1976::test_luminance_CIE1976",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceCIE1976::test_n_dimensional_luminance_CIE1976",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceCIE1976::test_nan_luminance_CIE1976",
"colour/colorimetry/tests/test_luminance.py::TestLuminanceFairchild2010::test_nan_luminance_Fairchild2010",
"colour/models/rgb/transfer_functions/tests/test_canon_log.py::TestLogEncoding_CanonLog::test_nan_log_encoding_CanonLog",
"colour/models/rgb/transfer_functions/tests/test_canon_log.py::TestLogDecoding_CanonLog::test_nan_log_decoding_CanonLog",
"colour/models/rgb/transfer_functions/tests/test_canon_log.py::TestLogEncoding_CanonLog2::test_nan_log_encoding_CanonLog2",
"colour/models/rgb/transfer_functions/tests/test_canon_log.py::TestLogDecoding_CanonLog2::test_nan_log_decoding_CanonLog2",
"colour/models/rgb/transfer_functions/tests/test_canon_log.py::TestLogEncoding_CanonLog3::test_nan_log_encoding_CanonLog3",
"colour/models/rgb/transfer_functions/tests/test_canon_log.py::TestLogDecoding_CanonLog3::test_nan_log_decoding_CanonLog3",
"colour/models/rgb/transfer_functions/tests/test_panasonic_vlog.py::TestLogEncoding_VLog::test_n_dimensional_log_encoding_VLog",
"colour/models/rgb/transfer_functions/tests/test_panasonic_vlog.py::TestLogEncoding_VLog::test_nan_log_encoding_VLog",
"colour/models/rgb/transfer_functions/tests/test_panasonic_vlog.py::TestLogDecoding_VLog::test_n_dimensional_log_decoding_VLog",
"colour/models/rgb/transfer_functions/tests/test_panasonic_vlog.py::TestLogDecoding_VLog::test_nan_log_decoding_VLog",
"colour/models/tests/test_hdr_cie_lab.py::TestXYZ_to_hdr_CIELab::test_nan_XYZ_to_hdr_CIELab",
"colour/models/tests/test_hdr_cie_lab.py::TestHdr_CIELab_to_XYZ::test_nan_hdr_CIELab_to_XYZ",
"colour/models/tests/test_hdr_cie_lab.py::TestExponent_hdr_CIELab::test_nan_exponent_hdr_CIELab",
"colour/models/tests/test_hdr_ipt.py::TestXYZ_to_hdr_IPT::test_nan_XYZ_to_hdr_IPT",
"colour/models/tests/test_hdr_ipt.py::TestHdr_IPT_to_XYZ::test_nan_hdr_IPT_to_XYZ",
"colour/models/tests/test_hdr_ipt.py::TestExponent_hdr_IPT::test_nan_exponent_hdr_IPT"
]
| []
| BSD 3-Clause "New" or "Revised" License | 1,965 | [
"colour/colorimetry/__init__.py",
"colour/colorimetry/luminance.py",
"colour/colorimetry/lightness.py",
"colour/models/rgb/transfer_functions/panasonic_vlog.py",
"colour/models/rgb/transfer_functions/canon_log.py",
"colour/models/hdr_ipt.py",
"colour/models/hdr_cie_lab.py",
"colour/models/__init__.py"
]
| [
"colour/colorimetry/__init__.py",
"colour/colorimetry/luminance.py",
"colour/colorimetry/lightness.py",
"colour/models/rgb/transfer_functions/panasonic_vlog.py",
"colour/models/rgb/transfer_functions/canon_log.py",
"colour/models/hdr_ipt.py",
"colour/models/hdr_cie_lab.py",
"colour/models/__init__.py"
]
|
mpdavis__python-jose-76 | 28cc6719eceb89129eed59c25f7bdac015665bdd | 2017-12-19 12:53:56 | 28cc6719eceb89129eed59c25f7bdac015665bdd | mpdavis: It looks like there are builds failing on Python 2.6 and 3.3.
Python 2.6 is failing because pytest dropped support. It looks like we will need to pin pytest in `tox.ini` for 2.6 builds (or possibly just all builds if easier).
I am still looking into the 3.3 failure. I can take a look later if you don't want to worry about it.
leplatrem: I also saw this:
```
$ tox
Matching undeclared envs is deprecated. Be sure all the envs that Tox should run are declared in the tox config.
``` | diff --git a/jose/jwt.py b/jose/jwt.py
index 2da511f..3ba3250 100644
--- a/jose/jwt.py
+++ b/jose/jwt.py
@@ -408,24 +408,28 @@ def _validate_jti(claims):
def _validate_at_hash(claims, access_token, algorithm):
"""
- Validates that the 'at_hash' parameter included in the claims matches
- with the access_token returned alongside the id token as part of
- the authorization_code flow.
+ Validates that the 'at_hash' is valid.
+
+ Its value is the base64url encoding of the left-most half of the hash
+ of the octets of the ASCII representation of the access_token value,
+ where the hash algorithm used is the hash algorithm used in the alg
+ Header Parameter of the ID Token's JOSE Header. For instance, if the
+ alg is RS256, hash the access_token value with SHA-256, then take the
+ left-most 128 bits and base64url encode them. The at_hash value is a
+ case sensitive string. Use of this claim is OPTIONAL.
Args:
- claims (dict): The claims dictionary to validate.
- access_token (str): The access token returned by the OpenID Provider.
- algorithm (str): The algorithm used to sign the JWT, as specified by
- the token headers.
+ claims (dict): The claims dictionary to validate.
+ access_token (str): The access token returned by the OpenID Provider.
+ algorithm (str): The algorithm used to sign the JWT, as specified by
+ the token headers.
"""
- if 'at_hash' not in claims and not access_token:
+ if 'at_hash' not in claims:
return
- elif 'at_hash' in claims and not access_token:
+
+ if not access_token:
msg = 'No access_token provided to compare against at_hash claim.'
raise JWTClaimsError(msg)
- elif access_token and 'at_hash' not in claims:
- msg = 'at_hash claim missing from token.'
- raise JWTClaimsError(msg)
try:
expected_hash = calculate_at_hash(access_token,
@@ -433,7 +437,7 @@ def _validate_at_hash(claims, access_token, algorithm):
except (TypeError, ValueError):
msg = 'Unable to calculate at_hash to verify against token claims.'
raise JWTClaimsError(msg)
-
+
if claims['at_hash'] != expected_hash:
raise JWTClaimsError('at_hash claim does not match access_token.')
| Should at_hash claim verification fail when missing from JWT?
It looks like `at_hash` in JWT payload is optional (see http://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken).
However, in python-jose, when both `id_token` and `access_token` parameters are specified, decoding a JWT that has no `at_hash` claim raises an error (*at_hash claim missing from token*)
https://github.com/mpdavis/python-jose/pull/30/files#diff-b106d01229785c64375df96ca4b3f58cR422
Shouldn't it be acceptable since the spec says it's optional?
Obviously we can disable at_hash verification with the appropriate decode option, but we find it useful to perform claims verification on JWT that have it or not with the same code. Maybe with a `allow_missing_at_hash` option or something?
Huge thanks for this lib 😻 | mpdavis/python-jose | diff --git a/tests/test_jwt.py b/tests/test_jwt.py
index 485fff5..beb6789 100644
--- a/tests/test_jwt.py
+++ b/tests/test_jwt.py
@@ -468,8 +468,8 @@ class TestJWT:
def test_at_hash_missing_claim(self, claims, key):
token = jwt.encode(claims, key)
- with pytest.raises(JWTError):
- jwt.decode(token, key, access_token='<ACCESS_TOKEN>')
+ payload = jwt.decode(token, key, access_token='<ACCESS_TOKEN>')
+ assert 'at_hash' not in payload
def test_at_hash_unable_to_calculate(self, claims, key):
token = jwt.encode(claims, key, access_token='<ACCESS_TOKEN>')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 1
} | 2.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
ecdsa==0.19.1
future==0.18.3
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycrypto==2.6.1
pycryptodome==3.21.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
-e git+https://github.com/mpdavis/python-jose.git@28cc6719eceb89129eed59c25f7bdac015665bdd#egg=python_jose
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: python-jose
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- ecdsa==0.19.1
- future==0.18.3
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycrypto==2.6.1
- pycryptodome==3.21.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/python-jose
| [
"tests/test_jwt.py::TestJWT::test_at_hash_missing_claim"
]
| []
| [
"tests/test_jwt.py::TestJWT::test_non_default_alg",
"tests/test_jwt.py::TestJWT::test_non_default_alg_positional_bwcompat",
"tests/test_jwt.py::TestJWT::test_non_default_headers",
"tests/test_jwt.py::TestJWT::test_encode",
"tests/test_jwt.py::TestJWT::test_decode",
"tests/test_jwt.py::TestJWT::test_leeway_is_int",
"tests/test_jwt.py::TestJWT::test_leeway_is_timedelta",
"tests/test_jwt.py::TestJWT::test_iat_not_int",
"tests/test_jwt.py::TestJWT::test_nbf_not_int",
"tests/test_jwt.py::TestJWT::test_nbf_datetime",
"tests/test_jwt.py::TestJWT::test_nbf_with_leeway",
"tests/test_jwt.py::TestJWT::test_nbf_in_future",
"tests/test_jwt.py::TestJWT::test_nbf_skip",
"tests/test_jwt.py::TestJWT::test_exp_not_int",
"tests/test_jwt.py::TestJWT::test_exp_datetime",
"tests/test_jwt.py::TestJWT::test_exp_with_leeway",
"tests/test_jwt.py::TestJWT::test_exp_in_past",
"tests/test_jwt.py::TestJWT::test_exp_skip",
"tests/test_jwt.py::TestJWT::test_aud_string",
"tests/test_jwt.py::TestJWT::test_aud_list",
"tests/test_jwt.py::TestJWT::test_aud_list_multiple",
"tests/test_jwt.py::TestJWT::test_aud_list_is_strings",
"tests/test_jwt.py::TestJWT::test_aud_case_sensitive",
"tests/test_jwt.py::TestJWT::test_aud_empty_claim",
"tests/test_jwt.py::TestJWT::test_aud_not_string_or_list",
"tests/test_jwt.py::TestJWT::test_aud_given_number",
"tests/test_jwt.py::TestJWT::test_iss_string",
"tests/test_jwt.py::TestJWT::test_iss_list",
"tests/test_jwt.py::TestJWT::test_iss_tuple",
"tests/test_jwt.py::TestJWT::test_iss_invalid",
"tests/test_jwt.py::TestJWT::test_sub_string",
"tests/test_jwt.py::TestJWT::test_sub_invalid",
"tests/test_jwt.py::TestJWT::test_sub_correct",
"tests/test_jwt.py::TestJWT::test_sub_incorrect",
"tests/test_jwt.py::TestJWT::test_jti_string",
"tests/test_jwt.py::TestJWT::test_jti_invalid",
"tests/test_jwt.py::TestJWT::test_at_hash",
"tests/test_jwt.py::TestJWT::test_at_hash_invalid",
"tests/test_jwt.py::TestJWT::test_at_hash_missing_access_token",
"tests/test_jwt.py::TestJWT::test_at_hash_unable_to_calculate",
"tests/test_jwt.py::TestJWT::test_unverified_claims_string",
"tests/test_jwt.py::TestJWT::test_unverified_claims_list",
"tests/test_jwt.py::TestJWT::test_unverified_claims_object"
]
| []
| MIT License | 1,967 | [
"jose/jwt.py"
]
| [
"jose/jwt.py"
]
|
dask__dask-3016 | 9a1f92801eebeefba4481fa5c9fc00badeee148a | 2017-12-19 13:49:50 | a1653463534a7dd9212f45f833aa17b7dd12e574 | diff --git a/dask/array/core.py b/dask/array/core.py
index 9346b1c04..15a63bd8b 100644
--- a/dask/array/core.py
+++ b/dask/array/core.py
@@ -2159,7 +2159,9 @@ def atop(func, out_ind, *args, **kwargs):
concatenate : bool, keyword only
If true concatenate arrays along dummy indices, else provide lists
adjust_chunks : dict
- Dictionary mapping index to function to be applied to chunk sizes
+ Dictionary mapping index to information to adjust chunk sizes. Can
+ either be a constant chunksize, a tuple of all chunksizes, or a
+ function that converts old chunksize to new chunksize
new_axes : dict, keyword only
New indexes and their dimension lengths
diff --git a/dask/bag/core.py b/dask/bag/core.py
index 4428e0ba5..62297b290 100644
--- a/dask/bag/core.py
+++ b/dask/bag/core.py
@@ -1,16 +1,15 @@
from __future__ import absolute_import, division, print_function
-from collections import Iterable, Iterator, defaultdict
-from functools import wraps, partial
import itertools
import math
-from operator import getitem
import types
import uuid
-from random import Random
-from warnings import warn
+import warnings
+from collections import Iterable, Iterator, defaultdict
from distutils.version import LooseVersion
-
+from functools import wraps, partial
+from operator import getitem
+from random import Random
from toolz import (merge, take, reduce, valmap, map, partition_all, filter,
remove, compose, curry, first, second, accumulate, peek)
@@ -1050,7 +1049,7 @@ class Bag(Base):
return type(self)(merge(self.dask, dsk), e, 1)
- def take(self, k, npartitions=1, compute=True):
+ def take(self, k, npartitions=1, compute=True, warn=True):
""" Take the first k elements.
Parameters
@@ -1064,6 +1063,9 @@ class Bag(Base):
returned. Pass -1 to use all partitions.
compute : bool, optional
Whether to compute the result, default is True.
+ warn : bool, optional
+ Whether to warn if the number of elements returned is less than
+ requested, default is True.
>>> b = from_sequence(range(10))
>>> b.take(3) # doctest: +SKIP
@@ -1087,9 +1089,9 @@ class Bag(Base):
dsk[(name_p, i)] = (list, (take, k, (self.name, i)))
concat = (toolz.concat, ([(name_p, i) for i in range(npartitions)]))
- dsk[(name, 0)] = (safe_take, k, concat)
+ dsk[(name, 0)] = (safe_take, k, concat, warn)
else:
- dsk = {(name, 0): (safe_take, k, (self.name, 0))}
+ dsk = {(name, 0): (safe_take, k, (self.name, 0), warn)}
b = Bag(merge(self.dask, dsk), name, 1)
@@ -1215,12 +1217,16 @@ class Bag(Base):
import dask.dataframe as dd
if meta is None:
if isinstance(columns, pd.DataFrame):
- warn("Passing metadata to `columns` is deprecated. Please "
- "use the `meta` keyword instead.")
+ warnings.warn("Passing metadata to `columns` is deprecated. "
+ "Please use the `meta` keyword instead.")
meta = columns
else:
- head = self.take(1)[0]
- meta = pd.DataFrame([head], columns=columns)
+ head = self.take(1, warn=False)
+ if len(head) == 0:
+ raise ValueError("`dask.bag.Bag.to_dataframe` failed to "
+ "properly infer metadata, please pass in "
+ "metadata via the `meta` keyword")
+ meta = pd.DataFrame(list(head), columns=columns)
elif columns is not None:
raise ValueError("Can't specify both `meta` and `columns`")
else:
@@ -1986,12 +1992,12 @@ def empty_safe_aggregate(func, parts, is_last):
return empty_safe_apply(func, parts2, is_last)
-def safe_take(n, b):
+def safe_take(n, b, warn=True):
r = list(take(n, b))
- if len(r) != n:
- warn("Insufficient elements for `take`. {0} elements requested, "
- "only {1} elements available. Try passing larger `npartitions` "
- "to `take`.".format(n, len(r)))
+ if len(r) != n and warn:
+ warnings.warn("Insufficient elements for `take`. {0} elements "
+ "requested, only {1} elements available. Try passing "
+ "larger `npartitions` to `take`.".format(n, len(r)))
return r
diff --git a/docs/source/inspect.rst b/docs/source/inspect.rst
index 2cd98f57a..4725af80a 100644
--- a/docs/source/inspect.rst
+++ b/docs/source/inspect.rst
@@ -19,7 +19,7 @@ The first step is to look at the ``.dask`` attribute of an array
>>> import dask.array as da
>>> x = da.ones((5, 15), chunks=(5, 5))
- >>> x.dask
+ >>> dict(x.dask)
{('wrapped_1', 0, 0): (ones, (5, 5)),
('wrapped_1', 0, 1): (ones, (5, 5)),
('wrapped_1', 0, 2): (ones, (5, 5))}
@@ -29,7 +29,7 @@ objects
.. code-block:: python
- >>> (x + 1).dask
+ >>> dict((x + 1).dask)
{('wrapped_1', 0, 0): (ones, (5, 5)),
('wrapped_1', 0, 1): (ones, (5, 5)),
('wrapped_1', 0, 2): (ones, (5, 5))
@@ -37,6 +37,9 @@ objects
('x_1', 0, 1): (add, ('wrapped_1', 0, 1), 1),
('x_1', 0, 2): (add, ('wrapped_1', 0, 2), 1)}
+.. note:: In this example we use simple names like ``x_1``, ``ones``, and
+ ``add`` for demonstration purposes. However in practice these names may be
+ more complex and include long hashed names.
Visualize graphs with DOT
-------------------------
| Dask objects inspection failing, issues with graph
Hi,
- I cannot reproduce the example given [here](http://dask.pydata.org/en/latest/inspect.html) on the dask website in order to inspect dask objects.
Please see the following notebook:
[inspect dask object](https://github.com/apatlpo/lops-array/blob/master/sandbox/test_graph.ipynb)
(sorry if github is not rendering notebooks as it the case for me right now)
- Also when I try to print graphs, complicated numbers show up which complicate the interpretation. For example:
[graph](https://github.com/apatlpo/lops-array/blob/master/sandbox/natl60_tseries_debug.ipynb)
Any thoughts?
Are these relates issues?
| dask/dask | diff --git a/dask/bag/tests/test_bag.py b/dask/bag/tests/test_bag.py
index 46427f714..44b874d48 100644
--- a/dask/bag/tests/test_bag.py
+++ b/dask/bag/tests/test_bag.py
@@ -530,17 +530,23 @@ def test_take_npartitions():
b.take(1, npartitions=5)
[email protected](sys.version_info[:2] == (3,3),
- reason="Python3.3 uses pytest2.7.2, w/o warns method")
def test_take_npartitions_warn():
- with pytest.warns(None):
- b.take(100)
+ # Use single-threaded scheduler so warnings are properly captured in the
+ # same process
+ with dask.set_options(get=dask.get):
+ with pytest.warns(UserWarning):
+ b.take(100)
+
+ with pytest.warns(UserWarning):
+ b.take(7)
- with pytest.warns(None):
- b.take(7)
+ with pytest.warns(None) as rec:
+ b.take(7, npartitions=2)
+ assert len(rec) == 0
- with pytest.warns(None):
- b.take(7, npartitions=2)
+ with pytest.warns(None) as rec:
+ b.take(7, warn=False)
+ assert len(rec) == 0
def test_map_is_lazy():
@@ -775,6 +781,11 @@ def test_to_dataframe():
with pytest.raises(ValueError):
b.to_dataframe(columns=['a', 'b'], meta=sol)
+ # Inference fails if empty first partition
+ b2 = b.filter(lambda x: x['a'] > 200)
+ with pytest.raises(ValueError):
+ b2.to_dataframe()
+
# Single column
b = b.pluck('a')
sol = sol[['a']]
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 3
} | 1.20 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[complete]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
click==8.0.4
cloudpickle==2.2.1
-e git+https://github.com/dask/dask.git@9a1f92801eebeefba4481fa5c9fc00badeee148a#egg=dask
distributed==1.20.2
HeapDict==1.0.1
importlib-metadata==4.8.3
iniconfig==1.1.1
locket==1.0.0
msgpack-python==0.5.6
numpy==1.19.5
packaging==21.3
pandas==1.1.5
partd==1.2.0
pluggy==1.0.0
psutil==7.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2025.2
six==1.17.0
sortedcontainers==2.4.0
tblib==1.7.0
tomli==1.2.3
toolz==0.12.0
tornado==6.1
typing_extensions==4.1.1
zict==2.1.0
zipp==3.6.0
| name: dask
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- click==8.0.4
- cloudpickle==2.2.1
- distributed==1.20.2
- heapdict==1.0.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- locket==1.0.0
- msgpack-python==0.5.6
- numpy==1.19.5
- packaging==21.3
- pandas==1.1.5
- partd==1.2.0
- pluggy==1.0.0
- psutil==7.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- six==1.17.0
- sortedcontainers==2.4.0
- tblib==1.7.0
- tomli==1.2.3
- toolz==0.12.0
- tornado==6.1
- typing-extensions==4.1.1
- zict==2.1.0
- zipp==3.6.0
prefix: /opt/conda/envs/dask
| [
"dask/bag/tests/test_bag.py::test_take_npartitions_warn",
"dask/bag/tests/test_bag.py::test_to_dataframe"
]
| []
| [
"dask/bag/tests/test_bag.py::test_Bag",
"dask/bag/tests/test_bag.py::test_keys",
"dask/bag/tests/test_bag.py::test_bag_map",
"dask/bag/tests/test_bag.py::test_map_method",
"dask/bag/tests/test_bag.py::test_starmap",
"dask/bag/tests/test_bag.py::test_filter",
"dask/bag/tests/test_bag.py::test_remove",
"dask/bag/tests/test_bag.py::test_iter",
"dask/bag/tests/test_bag.py::test_repr[str]",
"dask/bag/tests/test_bag.py::test_repr[repr]",
"dask/bag/tests/test_bag.py::test_pluck",
"dask/bag/tests/test_bag.py::test_pluck_with_default",
"dask/bag/tests/test_bag.py::test_unzip",
"dask/bag/tests/test_bag.py::test_fold",
"dask/bag/tests/test_bag.py::test_distinct",
"dask/bag/tests/test_bag.py::test_frequencies",
"dask/bag/tests/test_bag.py::test_topk",
"dask/bag/tests/test_bag.py::test_topk_with_non_callable_key[1]",
"dask/bag/tests/test_bag.py::test_topk_with_non_callable_key[2]",
"dask/bag/tests/test_bag.py::test_topk_with_multiarg_lambda",
"dask/bag/tests/test_bag.py::test_lambdas",
"dask/bag/tests/test_bag.py::test_reductions",
"dask/bag/tests/test_bag.py::test_reduction_names",
"dask/bag/tests/test_bag.py::test_tree_reductions",
"dask/bag/tests/test_bag.py::test_aggregation[1]",
"dask/bag/tests/test_bag.py::test_aggregation[3]",
"dask/bag/tests/test_bag.py::test_aggregation[4]",
"dask/bag/tests/test_bag.py::test_non_splittable_reductions[1]",
"dask/bag/tests/test_bag.py::test_non_splittable_reductions[10]",
"dask/bag/tests/test_bag.py::test_std",
"dask/bag/tests/test_bag.py::test_var",
"dask/bag/tests/test_bag.py::test_join",
"dask/bag/tests/test_bag.py::test_foldby",
"dask/bag/tests/test_bag.py::test_foldby_tree_reduction",
"dask/bag/tests/test_bag.py::test_map_partitions",
"dask/bag/tests/test_bag.py::test_map_partitions_args_kwargs",
"dask/bag/tests/test_bag.py::test_random_sample_size",
"dask/bag/tests/test_bag.py::test_random_sample_prob_range",
"dask/bag/tests/test_bag.py::test_random_sample_repeated_computation",
"dask/bag/tests/test_bag.py::test_random_sample_different_definitions",
"dask/bag/tests/test_bag.py::test_random_sample_random_state",
"dask/bag/tests/test_bag.py::test_lazify_task",
"dask/bag/tests/test_bag.py::test_lazify",
"dask/bag/tests/test_bag.py::test_inline_singleton_lists",
"dask/bag/tests/test_bag.py::test_take",
"dask/bag/tests/test_bag.py::test_take_npartitions",
"dask/bag/tests/test_bag.py::test_map_is_lazy",
"dask/bag/tests/test_bag.py::test_can_use_dict_to_make_concrete",
"dask/bag/tests/test_bag.py::test_read_text",
"dask/bag/tests/test_bag.py::test_read_text_large",
"dask/bag/tests/test_bag.py::test_read_text_encoding",
"dask/bag/tests/test_bag.py::test_read_text_large_gzip",
"dask/bag/tests/test_bag.py::test_from_sequence",
"dask/bag/tests/test_bag.py::test_from_long_sequence",
"dask/bag/tests/test_bag.py::test_product",
"dask/bag/tests/test_bag.py::test_partition_collect",
"dask/bag/tests/test_bag.py::test_groupby",
"dask/bag/tests/test_bag.py::test_groupby_with_indexer",
"dask/bag/tests/test_bag.py::test_groupby_with_npartitions_changed",
"dask/bag/tests/test_bag.py::test_concat",
"dask/bag/tests/test_bag.py::test_flatten",
"dask/bag/tests/test_bag.py::test_concat_after_map",
"dask/bag/tests/test_bag.py::test_args",
"dask/bag/tests/test_bag.py::test_to_textfiles[gz-GzipFile]",
"dask/bag/tests/test_bag.py::test_to_textfiles[-open]",
"dask/bag/tests/test_bag.py::test_to_textfiles[bz2-BZ2File]",
"dask/bag/tests/test_bag.py::test_to_textfiles_name_function_preserves_order",
"dask/bag/tests/test_bag.py::test_to_textfiles_name_function_warn",
"dask/bag/tests/test_bag.py::test_to_textfiles_encoding",
"dask/bag/tests/test_bag.py::test_to_textfiles_inputs",
"dask/bag/tests/test_bag.py::test_to_textfiles_endlines",
"dask/bag/tests/test_bag.py::test_string_namespace",
"dask/bag/tests/test_bag.py::test_string_namespace_with_unicode",
"dask/bag/tests/test_bag.py::test_str_empty_split",
"dask/bag/tests/test_bag.py::test_map_with_iterator_function",
"dask/bag/tests/test_bag.py::test_ensure_compute_output_is_concrete",
"dask/bag/tests/test_bag.py::test_bag_class_extend",
"dask/bag/tests/test_bag.py::test_gh715",
"dask/bag/tests/test_bag.py::test_bag_compute_forward_kwargs",
"dask/bag/tests/test_bag.py::test_to_delayed",
"dask/bag/tests/test_bag.py::test_to_delayed_optimizes",
"dask/bag/tests/test_bag.py::test_from_delayed",
"dask/bag/tests/test_bag.py::test_from_delayed_iterator",
"dask/bag/tests/test_bag.py::test_range",
"dask/bag/tests/test_bag.py::test_zip[1]",
"dask/bag/tests/test_bag.py::test_zip[7]",
"dask/bag/tests/test_bag.py::test_zip[10]",
"dask/bag/tests/test_bag.py::test_zip[28]",
"dask/bag/tests/test_bag.py::test_repartition[1-1]",
"dask/bag/tests/test_bag.py::test_repartition[1-2]",
"dask/bag/tests/test_bag.py::test_repartition[1-7]",
"dask/bag/tests/test_bag.py::test_repartition[1-11]",
"dask/bag/tests/test_bag.py::test_repartition[1-23]",
"dask/bag/tests/test_bag.py::test_repartition[2-1]",
"dask/bag/tests/test_bag.py::test_repartition[2-2]",
"dask/bag/tests/test_bag.py::test_repartition[2-7]",
"dask/bag/tests/test_bag.py::test_repartition[2-11]",
"dask/bag/tests/test_bag.py::test_repartition[2-23]",
"dask/bag/tests/test_bag.py::test_repartition[5-1]",
"dask/bag/tests/test_bag.py::test_repartition[5-2]",
"dask/bag/tests/test_bag.py::test_repartition[5-7]",
"dask/bag/tests/test_bag.py::test_repartition[5-11]",
"dask/bag/tests/test_bag.py::test_repartition[5-23]",
"dask/bag/tests/test_bag.py::test_repartition[12-1]",
"dask/bag/tests/test_bag.py::test_repartition[12-2]",
"dask/bag/tests/test_bag.py::test_repartition[12-7]",
"dask/bag/tests/test_bag.py::test_repartition[12-11]",
"dask/bag/tests/test_bag.py::test_repartition[12-23]",
"dask/bag/tests/test_bag.py::test_repartition[23-1]",
"dask/bag/tests/test_bag.py::test_repartition[23-2]",
"dask/bag/tests/test_bag.py::test_repartition[23-7]",
"dask/bag/tests/test_bag.py::test_repartition[23-11]",
"dask/bag/tests/test_bag.py::test_repartition[23-23]",
"dask/bag/tests/test_bag.py::test_repartition_names",
"dask/bag/tests/test_bag.py::test_accumulate",
"dask/bag/tests/test_bag.py::test_groupby_tasks",
"dask/bag/tests/test_bag.py::test_groupby_tasks_names",
"dask/bag/tests/test_bag.py::test_groupby_tasks_2[1000-20-100]",
"dask/bag/tests/test_bag.py::test_groupby_tasks_2[12345-234-1042]",
"dask/bag/tests/test_bag.py::test_groupby_tasks_3",
"dask/bag/tests/test_bag.py::test_to_textfiles_empty_partitions",
"dask/bag/tests/test_bag.py::test_reduction_empty",
"dask/bag/tests/test_bag.py::test_reduction_empty_aggregate[1]",
"dask/bag/tests/test_bag.py::test_reduction_empty_aggregate[2]",
"dask/bag/tests/test_bag.py::test_reduction_empty_aggregate[4]",
"dask/bag/tests/test_bag.py::test_reduction_with_non_comparable_objects",
"dask/bag/tests/test_bag.py::test_empty",
"dask/bag/tests/test_bag.py::test_bag_picklable",
"dask/bag/tests/test_bag.py::test_msgpack_unicode",
"dask/bag/tests/test_bag.py::test_bag_with_single_callable",
"dask/bag/tests/test_bag.py::test_optimize_fuse_keys",
"dask/bag/tests/test_bag.py::test_reductions_are_lazy",
"dask/bag/tests/test_bag.py::test_repeated_groupby",
"dask/bag/tests/test_bag.py::test_temporary_directory",
"dask/bag/tests/test_bag.py::test_empty_bag",
"dask/bag/tests/test_bag.py::test_bag_paths"
]
| []
| BSD 3-Clause "New" or "Revised" License | 1,968 | [
"dask/array/core.py",
"docs/source/inspect.rst",
"dask/bag/core.py"
]
| [
"dask/array/core.py",
"docs/source/inspect.rst",
"dask/bag/core.py"
]
|
|
CORE-GATECH-GROUP__serpent-tools-72 | efd13f38b728415f603bef6ab68ae3afd4694983 | 2017-12-19 15:46:17 | 0f11460f4d97775d096a6cdb8e1c2b94549c9f4e | diff --git a/README.rst b/README.rst
index 5ab1e01..c7b11fb 100644
--- a/README.rst
+++ b/README.rst
@@ -44,9 +44,6 @@ References
----------
The Annals of Nuclear Energy article should be cited for all work
-using ``SERPENT``. If you wish to cite this project, please cite as
-
-.. code:: bibtex
using ``SERPENT``. If you wish to cite this project, please cite as::
url{@serpentTools
diff --git a/docs/about.rst b/docs/about.rst
deleted file mode 100644
index 7887051..0000000
--- a/docs/about.rst
+++ /dev/null
@@ -1,31 +0,0 @@
-
-.. include:: ../README.rst
-
-Installation
-------------
-
-The ``serpentTools`` package can be downloaded either as a git repository or
-as a zipped file. Both can be obtained through the ``Clone or download`` option
-at the
-`serpent-tools GitHub <https://github.com/CORE-GATECH-GROUP/serpent-tools>`_.
-
-Once the repository has been downloaded or extracted from zip, the package
-can be installed with::
-
- cd serpentTools
- python setup.py install
- python setup.py test
-
-Installing with `setuptools <https://pypi.python.org/pypi/setuptools/38.2.4>`_
-is preferred over the standard ``distutils`` module. ``setuptools`` can be
-installed with ``pip`` as::
-
- pip install -U setuptools
-
-Installing in this manner ensures that the supporting packages,
-like ``numpy`` are installed and up to date.
-
-License
--------
-
-.. include:: ../LICENSE.rst
diff --git a/docs/api/index.rst b/docs/api/index.rst
index 2b3c86a..a1e4e1f 100644
--- a/docs/api/index.rst
+++ b/docs/api/index.rst
@@ -1,3 +1,4 @@
+.. _api:
API
===
diff --git a/docs/conf.py b/docs/conf.py
index 0fcd4ae..12a4177 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -161,4 +161,4 @@ texinfo_documents = [
]
# -- Options for auto documentation --------------------------------------
-autodoc_default_flags = ['members', 'inherited-members']
+autodoc_default_flags = ['members']
diff --git a/docs/contributing/index.rst b/docs/contributing/index.rst
index a38d11c..dd142de 100644
--- a/docs/contributing/index.rst
+++ b/docs/contributing/index.rst
@@ -1,3 +1,4 @@
+.. _contributing:
Contributing
============
diff --git a/docs/examples/Settings.rst b/docs/examples/Settings.rst
index 5e7ba9d..76ab757 100644
--- a/docs/examples/Settings.rst
+++ b/docs/examples/Settings.rst
@@ -25,9 +25,9 @@ Below are the default values for each setting available
.. code:: ipython3
>>> for setting in sorted(defaultSettings.keys()):
- >>> print(setting)
- >>> for key in defaultSettings[setting]:
- >>> print('\t', key, '-', defaultSettings[setting][key])
+ ... print(setting)
+ ... for key in defaultSettings[setting]:
+ ... print('\t', key, '-', defaultSettings[setting][key])
depletion.materialVariables
default - []
description - Names of variables to store. Empty list -> all variables.
@@ -83,14 +83,14 @@ correct type, and is an allowable option, if given.
.. code:: ipython3
>>> try:
- >>> rc['depletion.metadataKeys'] = False
- >>> except TypeError as te:
- >>> print(te)
+ ... rc['depletion.metadataKeys'] = False
+ ... except TypeError as te:
+ ... print(te)
Setting depletion.metadataKeys should be of type <class 'list'>, not <class 'bool'>
>>> try:
- >>> rc['serpentVersion'] = '1.2.3'
- >>> except KeyError as ke:
- >>> print(ke)
+ ... rc['serpentVersion'] = '1.2.3'
+ ... except KeyError as ke:
+ ... print(ke)
"Setting serpentVersion is
1.2.3
and not one of the allowed options:
@@ -102,7 +102,7 @@ changes.
.. code:: ipython3
>>> with rc:
- >>> rc['depletion.metadataKeys'] = ['ZAI', 'BU']
+ ... rc['depletion.metadataKeys'] = ['ZAI', 'BU']
>>>
>>> rc['depletion.metadataKeys']
>>> rc['verbosity'] = 'info'
@@ -168,3 +168,28 @@ to obtain all the data present in their respective files.
See the :ref:`branching-ex` example for more information on using these
settings to control scraped data.
+
+.. _conf-files:
+
+Configuration Files
+-------------------
+
+As of version 0.1.2, the ``rc`` object allows for settings to be updated
+from a yaml configuration file using the
+:py:meth:`~serpentTools.settings.UserSettingsLoader.loadYaml` method.
+The file is structured with the names of settings as keys and the
+desired setting value as the values.
+The loader also attempts to expand nested settings, like reader-specific
+settings, that may be lumped in a second level.
+
+.. code:: yaml
+
+ verbosity: warning
+ xs.getInfXS: False
+ branching:
+ areUncsPresent: False
+ floatVariables: [Fhi, Blo]
+ depletion:
+ materials: [fuel*]
+ materialVariables:
+ [ADENS, MDENS, VOLUME]
diff --git a/docs/welcome/about.rst b/docs/welcome/about.rst
index 4d2c74b..e09c9c5 100644
--- a/docs/welcome/about.rst
+++ b/docs/welcome/about.rst
@@ -1,2 +1,71 @@
+=============
+serpent-tools
+=============
-.. include:: ../../README.rst
+.. image:: https://travis-ci.org/CORE-GATECH-GROUP/serpent-tools.svg?branch=master
+ :target: https://travis-ci.org/CORE-GATECH-GROUP/serpent-tools
+
+A suite of parsers designed to make interacting with
+``SERPENT`` [1]_ output files simple and flawless.
+
+The ``SERPENT`` Monte Carlo code
+is developed by VTT Technical Research Centre of Finland, Ltd.
+More information, including distribution and licensing of ``SERPENT`` can be
+found at `<montecarlo.vtt.fi>`_
+
+Installation
+------------
+
+The ``serpentTools`` package can be downloaded either as a git repository or
+as a zipped file. Both can be obtained through the ``Clone or download`` option
+at the
+`serpent-tools GitHub <https://github.com/CORE-GATECH-GROUP/serpent-tools>`_.
+
+Once the repository has been downloaded or extracted from zip, the package
+can be installed with::
+
+ cd serpentTools
+ python setup.py install
+ python setup.py test
+
+Installing with `setuptools <https://pypi.python.org/pypi/setuptools/38.2.4>`_
+is preferred over the standard ``distutils`` module. ``setuptools`` can be
+installed with ``pip`` as::
+
+ pip install -U setuptools
+
+Installing in this manner ensures that the supporting packages,
+like ``numpy`` are installed and up to date.
+
+Issues
+------
+
+If you have issues installing the project, find a bug, or want to add a feature,
+the `GitHub issue page <https://github.com/CORE-GATECH-GROUP/serpent-tools/issues>`_
+is the best place to do that.
+
+Contributors
+------------
+
+Here are all the wonderful people that helped make this project happen
+
+* `Andrew Johnson <https://github.com/drewejohnson>`_
+* `Dr. Dan Kotlyar <https://github.com/CORE-GATECH>`_
+* `Stefano Terlizzi <https://github.com/sallustius>`_
+
+References
+----------
+
+The Annals of Nuclear Energy article should be cited for all work
+using ``SERPENT``. If you wish to cite this project, please cite as::
+
+ url{@serpentTools
+ author = {Andrew Johnson and Dan Kotlyar},
+ title = {serpentTools: A suite of parsers designed to make interacting with SERPENT outputs simple and flawless},
+ url = {https://github.com/CORE-GATECH-GROUP/serpent-tools},
+ year = {2017}
+ }
+
+.. [1] Leppanen, J. et al. (2015) "The Serpent Monte Carlo code: Status,
+ development and applications in 2013." Ann. Nucl. Energy, `82 (2015) 142-150
+ <http://www.sciencedirect.com/science/article/pii/S0306454914004095>`_
diff --git a/examples/Settings.ipynb b/examples/Settings.ipynb
index d321137..05064f6 100644
--- a/examples/Settings.ipynb
+++ b/examples/Settings.ipynb
@@ -25,9 +25,17 @@
},
{
"cell_type": "code",
- "execution_count": 10,
+ "execution_count": 1,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "INFO : serpentTools: Using version 1.0b0+34.gce072dd.dirty\n"
+ ]
+ }
+ ],
"source": [
"import serpentTools\n",
"from serpentTools.settings import rc, defaultSettings"
@@ -42,7 +50,7 @@
},
{
"cell_type": "code",
- "execution_count": 11,
+ "execution_count": 2,
"metadata": {},
"outputs": [
{
@@ -51,60 +59,60 @@
"text": [
"branching.areUncsPresent\n",
"\t default - False\n",
- "\t type - <class 'bool'>\n",
"\t description - True if the values in the .coe file contain uncertainties\n",
+ "\t type - <class 'bool'>\n",
"branching.floatVariables\n",
"\t default - []\n",
- "\t description - Names of state data variables to convert to floats for each branch\n",
"\t type - <class 'list'>\n",
+ "\t description - Names of state data variables to convert to floats for each branch\n",
"branching.intVariables\n",
"\t default - []\n",
- "\t description - Name of state data variables to convert to integers for each branch\n",
"\t type - <class 'list'>\n",
+ "\t description - Name of state data variables to convert to integers for each branch\n",
"depletion.materialVariables\n",
"\t default - []\n",
- "\t description - Names of variables to store. Empty list -> all variables.\n",
"\t type - <class 'list'>\n",
+ "\t description - Names of variables to store. Empty list -> all variables.\n",
"depletion.materials\n",
"\t default - []\n",
- "\t description - Names of materials to store. Empty list -> all materials.\n",
"\t type - <class 'list'>\n",
+ "\t description - Names of materials to store. Empty list -> all materials.\n",
"depletion.metadataKeys\n",
- "\t default - ['ZAI', 'NAMES', 'DAYS', 'BU']\n",
"\t options - default\n",
- "\t description - Non-material data to store, i.e. zai, isotope names, burnup schedule, etc.\n",
+ "\t default - ['ZAI', 'NAMES', 'DAYS', 'BU']\n",
"\t type - <class 'list'>\n",
+ "\t description - Non-material data to store, i.e. zai, isotope names, burnup schedule, etc.\n",
"depletion.processTotal\n",
"\t default - True\n",
- "\t description - Option to store the depletion data from the TOT block\n",
"\t type - <class 'bool'>\n",
+ "\t description - Option to store the depletion data from the TOT block\n",
"serpentVersion\n",
- "\t default - 2.1.29\n",
"\t options - ['2.1.29']\n",
- "\t description - Version of SERPENT\n",
+ "\t default - 2.1.29\n",
"\t type - <class 'str'>\n",
+ "\t description - Version of SERPENT\n",
"verbosity\n",
- "\t default - info\n",
"\t options - ['critical', 'error', 'warning', 'info', 'debug']\n",
- "\t type - <class 'str'>\n",
+ "\t default - info\n",
+ "\t updater - <function updateLevel at 0x7f32b2e87048>\n",
"\t description - Set the level of errors to be shown.\n",
- "\t updater - <function updateLevel at 0x00000251B54FD0D0>\n",
+ "\t type - <class 'str'>\n",
"xs.getB1XS\n",
"\t default - True\n",
- "\t description - If true, store the critical leakage cross sections.\n",
"\t type - <class 'bool'>\n",
+ "\t description - If true, store the critical leakage cross sections.\n",
"xs.getInfXS\n",
"\t default - True\n",
- "\t description - If true, store the infinite medium cross sections.\n",
"\t type - <class 'bool'>\n",
+ "\t description - If true, store the infinite medium cross sections.\n",
"xs.variableExtras\n",
"\t default - []\n",
- "\t description - Full SERPENT name of variables to be read\n",
"\t type - <class 'list'>\n",
+ "\t description - Full SERPENT name of variables to be read\n",
"xs.variableGroups\n",
"\t default - []\n",
- "\t description - Name of variable groups from variables.yaml to be expanded into SERPENT variable to be stored\n",
- "\t type - <class 'list'>\n"
+ "\t type - <class 'list'>\n",
+ "\t description - Name of variable groups from variables.yaml to be expanded into SERPENT variable to be stored\n"
]
}
],
@@ -124,7 +132,7 @@
},
{
"cell_type": "code",
- "execution_count": 12,
+ "execution_count": 3,
"metadata": {},
"outputs": [
{
@@ -148,7 +156,7 @@
},
{
"cell_type": "code",
- "execution_count": 13,
+ "execution_count": 4,
"metadata": {},
"outputs": [
{
@@ -168,7 +176,7 @@
},
{
"cell_type": "code",
- "execution_count": 14,
+ "execution_count": 5,
"metadata": {},
"outputs": [
{
@@ -195,7 +203,7 @@
},
{
"cell_type": "code",
- "execution_count": 15,
+ "execution_count": 6,
"metadata": {},
"outputs": [
{
@@ -236,7 +244,7 @@
},
{
"cell_type": "code",
- "execution_count": 16,
+ "execution_count": 7,
"metadata": {},
"outputs": [
{
@@ -245,7 +253,7 @@
"'2.1.29'"
]
},
- "execution_count": 16,
+ "execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
@@ -256,7 +264,7 @@
},
{
"cell_type": "code",
- "execution_count": 17,
+ "execution_count": 8,
"metadata": {},
"outputs": [
{
@@ -283,10 +291,8 @@
},
{
"cell_type": "code",
- "execution_count": 18,
- "metadata": {
- "collapsed": true
- },
+ "execution_count": 9,
+ "metadata": {},
"outputs": [],
"source": [
"assert 'INF_SCATT3' not in varSet"
@@ -305,6 +311,50 @@
"source": [
"See the `BrancingReader` example for more information on using these settings to control scraped data."
]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Configuration files\n",
+ "As of version 0.1.2, the `rc` object allows for settings to be updated from a yaml configuration file using the `loadYaml` method. The file contains setting names as keys with the desired variables as values, as\n",
+ "```\n",
+ "verbosity: warning\n",
+ "xs.getInfXS: False\n",
+ "```\n",
+ "However, the loader can also expand a nested dictionary structure, as\n",
+ "```\n",
+ "branching:\n",
+ " areUncsPresent: False\n",
+ " floatVariables: [Fhi, Blo]\n",
+ "depletion:\n",
+ " materials: [fuel*]\n",
+ " materialVariables:\n",
+ " [ADENS, MDENS, VOLUME]\n",
+ "```"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "False"
+ ]
+ },
+ "execution_count": 15,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "myConf = 'myConfig.yaml'\n",
+ "rc.loadYaml(myConf)\n",
+ "rc['branching.areUncsPresent']"
+ ]
}
],
"metadata": {
@@ -323,7 +373,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.6.1"
+ "version": "3.5.2"
}
},
"nbformat": 4,
diff --git a/examples/myConfig.yaml b/examples/myConfig.yaml
new file mode 100644
index 0000000..8953d66
--- /dev/null
+++ b/examples/myConfig.yaml
@@ -0,0 +1,12 @@
+xs.getInfXS: False
+xs.getB1XS: True
+xs.variableGroups: [gc-meta, kinetics, xs]
+branching:
+ areUncsPresent: False
+ floatVariables: [Fhi, Blo]
+depletion:
+ materials: [fuel*]
+ metadataKeys: [NAMES, BU]
+ materialVariables:
+ [ADENS, MDENS, VOLUME]
+serpentVersion: 2.1.29
diff --git a/serpentTools/settings.py b/serpentTools/settings.py
index 0f2ae8f..a8d9ae5 100644
--- a/serpentTools/settings.py
+++ b/serpentTools/settings.py
@@ -1,5 +1,6 @@
"""Settings to yield control to the user."""
import os
+import six
import yaml
@@ -151,7 +152,7 @@ class DefaultSettingsLoader(dict):
for name, value in defaultSettings.items():
if 'options' in value:
options = (value['default'] if value['options'] == 'default'
- else value['options'])
+ else value['options'])
else:
options = None
settingsOptions = {'name': name,
@@ -272,7 +273,7 @@ class UserSettingsLoader(dict):
"""
settings = {}
settingsPreffix = ([settingsPreffix] if isinstance(settingsPreffix, str)
- else settingsPreffix)
+ else settingsPreffix)
for setting, value in self.items():
settingPath = setting.split('.')
if settingPath[0] in settingsPreffix:
@@ -314,5 +315,60 @@ class UserSettingsLoader(dict):
variables.update(baseGroups[key])
return variables
+ def loadYaml(self, filePath, strict=True):
+ """
+ Update the settings based on the contents of the yaml file
+
+ .. versionadded:: 0.2.0
+
+ Parameters
+ ----------
+ filePath: str
+ Path to config file
+ strict: bool
+ Fail at the first incorrect setting. If false, failed settings
+ will not be loaded and alerts will be raised
+
+ Raises
+ ------
+ KeyError or TypeError
+ If settings found in the config file are not
+ valid
+ FileNotFound or OSError
+ If the file does not exist
+
+ """
+ messages.debug('Attempting to read from {}'.format(filePath))
+ with open(filePath) as yFile:
+ l = yaml.safe_load(yFile)
+ messages.info('Loading settings onto object with strict:{}'
+ .format(strict))
+
+ for key, value in six.iteritems(l):
+ if isinstance(value, dict):
+ self.__recursiveLoad(value, strict, key)
+ else:
+ self.__safeLoad(key, value, strict)
+ messages.info('Done')
+
+ def __recursiveLoad(self, curLevel, strict, preset):
+ for nextLevelKey, nextLevel in six.iteritems(curLevel):
+ newSettingName = preset + '.' + nextLevelKey
+ if isinstance(nextLevel, dict):
+ self.__recursiveLoad(nextLevel, strict, newSettingName)
+ else:
+ self.__safeLoad(newSettingName, nextLevel, strict)
+
+ def __safeLoad(self, key, value, strict):
+ messages.debug('Attempting to set setting {} to {}'
+ .format(key, value))
+ try:
+ self.setValue(key, value)
+ except (KeyError, TypeError) as error:
+ if strict:
+ raise error
+ else:
+ messages.error(str(error))
+
rc = UserSettingsLoader()
| Update settings from config file
Currently, the settings loader has to take settings one at a time. For the sake of consistency and ease, it would be nice to have the `rc` settings loader take a file path argument and load all settings from there.
I think a `yaml` file would be pretty straight forward, since we already use the `pyyaml` module. The loader could be setup to read in a strict, full name manner,
```
depletion.metadataKeys: ['ZAI', 'NAMES']
depletion.materials: ['fuel*', 'bp1']
verbosity: 'error'
...
```
or with nested levels
```
depletion:
metadataKeys: ['ZAI', 'NAMES']
materials: ['fuel*', 'bp1']
verbosity: 'error'
...
```
The two implementations should have the same result, just that in the second case there is some reconstructing that has to be done behind the scenes. | CORE-GATECH-GROUP/serpent-tools | diff --git a/serpentTools/tests/test_settings.py b/serpentTools/tests/test_settings.py
index de3b43d..b6f8e50 100644
--- a/serpentTools/tests/test_settings.py
+++ b/serpentTools/tests/test_settings.py
@@ -1,133 +1,204 @@
-"""Tests for the settings loaders."""
-import warnings
-import unittest
-
-from serpentTools import settings
-from serpentTools.messages import deprecated, willChange
-
-class DefaultSettingsTester(unittest.TestCase):
- """Class to test the functionality of the master loader."""
-
- @classmethod
- def setUpClass(cls):
- cls.defaultLoader = settings.DefaultSettingsLoader()
- cls.testSetting = 'depletion.metadataKeys'
- cls.testSettingExpected = ['ZAI', 'NAMES', 'DAYS', 'BU']
- cls.testSettingMethod = cls.assertListEqual
-
- def test_getDefault(self):
- """Verify the default settings loader properly retrives defaults."""
- self.testSettingMethod(self._getLoaderSetting(self.testSetting),
- self.testSettingExpected)
-
- def test_cannotChangeDefaults(self):
- """Verify the default settings loader is locked after creation."""
- with self.assertRaises(KeyError):
- self.defaultLoader['this'] = 'should fail'
-
- def _getLoaderSetting(self, setting):
- return self.defaultLoader[setting].default
-
-
-class RCTester(unittest.TestCase):
- """Class to test the functionality of the scriptable settings manager."""
-
- @classmethod
- def setUpClass(cls):
- cls.rc = settings.UserSettingsLoader()
- cls.rc['depletion.metadataKeys'] = ['ZAI']
-
- def test_failAtNonexistentSetting(self):
- """Verify that the loader will not load a nonexistent setting."""
- with self.assertRaises(KeyError):
- self.rc['bad setting'] = False
-
- def test_failAtBadSetting_options(self):
- """Verify that the loader will raise an error for bad options."""
- with self.assertRaises(KeyError):
- self.rc['depletion.metadata'] = ['this should fail']
-
- def test_failAtBadSettings_type(self):
- """Verify that the loader will raise an error for bad type."""
- with self.assertRaises(TypeError):
- self.rc['depletion.processTotal'] = 'this should fail'
-
- def test_returnReaderSettings(self):
- """Verify the correct reader settings can be retrieved."""
- readerName = 'depletion'
- expected = {
- 'metadataKeys': ['ZAI'],
- 'materialVariables': [],
- 'materials': [],
- 'processTotal': True,
- }
- actual = self.rc.getReaderSettings(readerName)
- self.assertDictEqual(expected, actual)
-
- def test_readerWithUpdatedSettings(self):
- """Verify the settings passed to the reader reflect the update."""
- from serpentTools.parsers.depletion import DepletionReader
- with settings.rc as tempRC:
- tempRC['depletion.metadataKeys'] = ['ZAI']
- reader = DepletionReader('None')
- self.assertTrue(reader.settings['metadataKeys'] == ['ZAI'])
-
- def test_expandExtras(self):
- """Verify that a set of extras is given if that is the only argument."""
- extras = ['hello', 'testing']
- with self.rc as tempRC:
- tempRC['xs.variableExtras'] = extras
- expected = set(extras)
- actual = tempRC.expandVariables()
- self.assertSetEqual(expected, actual)
-
- def test_fullExtend(self):
- """Verify that the variable expansion return the correct variables"""
- groupNames = ['times', 'diffusion']
- extras = ['hello']
- expected = {'CMM_TRANSPXS', 'CMM_TRANSPXS_X', 'CMM_TRANSPXS_Y',
- 'CMM_TRANSPXS_Z', 'CMM_DIFFCOEF', 'CMM_DIFFCOEF_X',
- 'CMM_DIFFCOEF_Y', 'CMM_DIFFCOEF_Z', 'hello',
- 'TOT_CPU_TIME', 'RUNNING_TIME', 'INIT_TIME', 'PROCESS_TIME',
- 'TRANSPORT_CYCLE_TIME', 'BURNUP_CYCLE_TIME',
- 'BATEMAN_SOLUTION_TIME', 'MPI_OVERHEAD_TIME',
- 'ESTIMATED_RUNNING_TIME', 'CPU_USAGE',
- 'TRANSPORT_CPU_USAGE', 'OMP_PARALLEL_FRAC'}
- with self.rc as tempRC:
- tempRC['xs.variableExtras'] = extras
- tempRC['xs.variableGroups'] = groupNames
- actual = tempRC.expandVariables()
- self.assertSetEqual(expected, actual)
-
-
-class MessagingTester(unittest.TestCase):
- """Class to test the messaging framework."""
-
- def test_futureDecorator(self):
- """Verify that the future decorator doesn't break"""
-
- @willChange('This function will be updated in the future, '
- 'but will still exist')
- def demoFuture(x, val=5):
- return x + val
-
- with warnings.catch_warnings(record=True) as record:
- self.assertEqual(7, demoFuture(2))
- self.assertEqual(7, demoFuture(2, 5))
- self.assertEquals(len(record), 2, 'Did not catch two warnings::willChange')
-
- def test_depreciatedDecorator(self):
- """Verify that the depreciated decorator doesn't break things"""
-
- @deprecated('this nonexistent function')
- def demoFunction(x, val=5):
- return x + val
-
- with warnings.catch_warnings(record=True) as record:
- self.assertEqual(7, demoFunction(2))
- self.assertEqual(7, demoFunction(2, 5))
- self.assertEquals(len(record), 2, 'Did not catch two warnings::deprecation')
-
-
-if __name__ == '__main__':
- unittest.main()
+"""Tests for the settings loaders."""
+from os.path import join
+from os import remove
+import warnings
+import unittest
+
+import yaml
+import six
+
+from serpentTools import settings
+from serpentTools.messages import deprecated, willChange
+from serpentTools.tests import TEST_ROOT
+
+
+class DefaultSettingsTester(unittest.TestCase):
+ """Class to test the functionality of the master loader."""
+
+ @classmethod
+ def setUpClass(cls):
+ cls.defaultLoader = settings.DefaultSettingsLoader()
+ cls.testSetting = 'depletion.metadataKeys'
+ cls.testSettingExpected = ['ZAI', 'NAMES', 'DAYS', 'BU']
+ cls.testSettingMethod = cls.assertListEqual
+
+ def test_getDefault(self):
+ """Verify the default settings loader properly retrives defaults."""
+ self.testSettingMethod(self._getLoaderSetting(self.testSetting),
+ self.testSettingExpected)
+
+ def test_cannotChangeDefaults(self):
+ """Verify the default settings loader is locked after creation."""
+ with self.assertRaises(KeyError):
+ self.defaultLoader['this'] = 'should fail'
+
+ def _getLoaderSetting(self, setting):
+ return self.defaultLoader[setting].default
+
+
+class RCTester(unittest.TestCase):
+ """Class to test the functionality of the scriptable settings manager."""
+
+ @classmethod
+ def setUpClass(cls):
+ cls.rc = settings.UserSettingsLoader()
+ cls.rc['depletion.metadataKeys'] = ['ZAI']
+
+ def test_failAtNonexistentSetting(self):
+ """Verify that the loader will not load a nonexistent setting."""
+ with self.assertRaises(KeyError):
+ self.rc['bad setting'] = False
+
+ def test_failAtBadSetting_options(self):
+ """Verify that the loader will raise an error for bad options."""
+ with self.assertRaises(KeyError):
+ self.rc['depletion.metadata'] = ['this should fail']
+
+ def test_failAtBadSettings_type(self):
+ """Verify that the loader will raise an error for bad type."""
+ with self.assertRaises(TypeError):
+ self.rc['depletion.processTotal'] = 'this should fail'
+
+ def test_returnReaderSettings(self):
+ """Verify the correct reader settings can be retrieved."""
+ readerName = 'depletion'
+ expected = {
+ 'metadataKeys': ['ZAI'],
+ 'materialVariables': [],
+ 'materials': [],
+ 'processTotal': True,
+ }
+ actual = self.rc.getReaderSettings(readerName)
+ self.assertDictEqual(expected, actual)
+
+ def test_readerWithUpdatedSettings(self):
+ """Verify the settings passed to the reader reflect the update."""
+ from serpentTools.parsers.depletion import DepletionReader
+ with settings.rc as tempRC:
+ tempRC['depletion.metadataKeys'] = ['ZAI']
+ reader = DepletionReader('None')
+ self.assertTrue(reader.settings['metadataKeys'] == ['ZAI'])
+
+ def test_expandExtras(self):
+ """Verify that a set of extras is given if that is the only argument."""
+ extras = ['hello', 'testing']
+ with self.rc as tempRC:
+ tempRC['xs.variableExtras'] = extras
+ expected = set(extras)
+ actual = tempRC.expandVariables()
+ self.assertSetEqual(expected, actual)
+
+ def test_fullExtend(self):
+ """Verify that the variable expansion return the correct variables"""
+ groupNames = ['times', 'diffusion']
+ extras = ['hello']
+ expected = {'CMM_TRANSPXS', 'CMM_TRANSPXS_X', 'CMM_TRANSPXS_Y',
+ 'CMM_TRANSPXS_Z', 'CMM_DIFFCOEF', 'CMM_DIFFCOEF_X',
+ 'CMM_DIFFCOEF_Y', 'CMM_DIFFCOEF_Z', 'hello',
+ 'TOT_CPU_TIME', 'RUNNING_TIME', 'INIT_TIME', 'PROCESS_TIME',
+ 'TRANSPORT_CYCLE_TIME', 'BURNUP_CYCLE_TIME',
+ 'BATEMAN_SOLUTION_TIME', 'MPI_OVERHEAD_TIME',
+ 'ESTIMATED_RUNNING_TIME', 'CPU_USAGE',
+ 'TRANSPORT_CPU_USAGE', 'OMP_PARALLEL_FRAC'}
+ with self.rc as tempRC:
+ tempRC['xs.variableExtras'] = extras
+ tempRC['xs.variableGroups'] = groupNames
+ actual = tempRC.expandVariables()
+ self.assertSetEqual(expected, actual)
+
+
+class ConfigLoaderTester(unittest.TestCase):
+ """Class to test loading multiple setttings at once, i.e. config files"""
+
+ @classmethod
+ def setUpClass(cls):
+ cls.configSettings = {
+ 'branching.areUncsPresent': True,
+ 'branching.floatVariables': ['Bhi', 'Tlo'],
+ 'verbosity': 'warning',
+ 'depletion.materials': ['fuel*'],
+ 'depletion.materialVariables': ['ADENS', 'MDENS']
+ }
+ cls.nestedSettings = {
+ 'branching': {
+ 'areUncsPresent':
+ cls.configSettings['branching.areUncsPresent'],
+ 'floatVariables':
+ cls.configSettings['branching.floatVariables']
+ },
+ 'depletion': {
+ 'materials': cls.configSettings['depletion.materials'],
+ 'materialVariables':
+ cls.configSettings['depletion.materialVariables']
+ },
+ 'verbosity': cls.configSettings['verbosity']
+ }
+ cls.files = {'singleLevel': join(TEST_ROOT, 'singleLevelConf.yaml'),
+ 'nested': join(TEST_ROOT, 'nestedConf.yaml'),
+ 'badNested': join(TEST_ROOT, 'badNestedConf.yaml')}
+ cls.rc = settings.UserSettingsLoader()
+
+ def _writeTestRemoveConfFile(self, settings, filePath, expected, strict):
+ with open(filePath, 'w') as out:
+ yaml.dump(settings, out)
+ with self.rc:
+ self.rc.loadYaml(filePath, strict)
+ for key, value in six.iteritems(expected):
+ if isinstance(value, list):
+ self.assertListEqual(value, self.rc[key])
+ else:
+ self.assertEqual(value, self.rc[key])
+ remove(filePath)
+
+ def test_loadSingleLevelConfig(self):
+ """Test loading settings from a non-nested config file"""
+ self._writeTestRemoveConfFile(self.configSettings,
+ self.files['singleLevel'],
+self.configSettings, True)
+
+ def test_loadNestedConfig(self):
+ """Test loading settings from a nested config file"""
+ self._writeTestRemoveConfFile(self.nestedSettings, self.files['nested'],
+ self.configSettings, True)
+
+ def test_loadNestedNonStrict(self):
+ """Test loading settings with errors but non-strict error handling"""
+ badSettings = {'bad setting': False}
+ badSettings.update(self.nestedSettings)
+ self._writeTestRemoveConfFile(badSettings, self.files['nested'],
+ self.configSettings, False)
+
+
+class MessagingTester(unittest.TestCase):
+ """Class to test the messaging framework."""
+
+ def test_futureDecorator(self):
+ """Verify that the future decorator doesn't break"""
+
+ @willChange('This function will be updated in the future, '
+ 'but will still exist')
+ def demoFuture(x, val=5):
+ return x + val
+
+ with warnings.catch_warnings(record=True) as record:
+ self.assertEqual(7, demoFuture(2))
+ self.assertEqual(7, demoFuture(2, 5))
+ self.assertEquals(len(record), 2,
+ 'Did not catch two warnings::willChange')
+
+ def test_depreciatedDecorator(self):
+ """Verify that the depreciated decorator doesn't break things"""
+
+ @deprecated('this nonexistent function')
+ def demoFunction(x, val=5):
+ return x + val
+
+ with warnings.catch_warnings(record=True) as record:
+ self.assertEqual(7, demoFunction(2))
+ self.assertEqual(7, demoFunction(2, 5))
+ self.assertEquals(len(record), 2,
+ 'Did not catch two warnings::deprecation')
+
+
+if __name__ == '__main__':
+ unittest.main()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_added_files",
"has_removed_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 8
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "numpy>=1.11.1 matplotlib>=1.5.0 pyyaml>=3.08 scipy six",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
cycler @ file:///tmp/build/80754af9/cycler_1637851556182/work
importlib-metadata==4.8.3
iniconfig==1.1.1
kiwisolver @ file:///tmp/build/80754af9/kiwisolver_1612282412546/work
matplotlib @ file:///tmp/build/80754af9/matplotlib-suite_1613407855456/work
numpy @ file:///tmp/build/80754af9/numpy_and_numpy_base_1603483703303/work
olefile @ file:///Users/ktietz/demo/mc3/conda-bld/olefile_1629805411829/work
packaging==21.3
Pillow @ file:///tmp/build/80754af9/pillow_1625670622947/work
pluggy==1.0.0
py==1.11.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==7.0.1
python-dateutil @ file:///tmp/build/80754af9/python-dateutil_1626374649649/work
PyYAML==5.4.1
scipy @ file:///tmp/build/80754af9/scipy_1597686635649/work
-e git+https://github.com/CORE-GATECH-GROUP/serpent-tools.git@efd13f38b728415f603bef6ab68ae3afd4694983#egg=serpentTools
six @ file:///tmp/build/80754af9/six_1644875935023/work
tomli==1.2.3
tornado @ file:///tmp/build/80754af9/tornado_1606942266872/work
typing_extensions==4.1.1
zipp==3.6.0
| name: serpent-tools
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- blas=1.0=openblas
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- cycler=0.11.0=pyhd3eb1b0_0
- dbus=1.13.18=hb2f20db_0
- expat=2.6.4=h6a678d5_0
- fontconfig=2.14.1=h52c9d5c_1
- freetype=2.12.1=h4a9f257_0
- giflib=5.2.2=h5eee18b_0
- glib=2.69.1=h4ff587b_1
- gst-plugins-base=1.14.1=h6a678d5_1
- gstreamer=1.14.1=h5eee18b_1
- icu=58.2=he6710b0_3
- jpeg=9e=h5eee18b_3
- kiwisolver=1.3.1=py36h2531618_0
- lcms2=2.16=hb9589c4_0
- ld_impl_linux-64=2.40=h12ee557_0
- lerc=4.0.0=h6a678d5_0
- libdeflate=1.22=h5eee18b_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgfortran-ng=7.5.0=ha8ba4b0_17
- libgfortran4=7.5.0=ha8ba4b0_17
- libgomp=11.2.0=h1234567_1
- libopenblas=0.3.18=hf726d26_0
- libpng=1.6.39=h5eee18b_0
- libstdcxx-ng=11.2.0=h1234567_1
- libtiff=4.5.1=hffd6297_1
- libuuid=1.41.5=h5eee18b_0
- libwebp=1.2.4=h11a3e52_1
- libwebp-base=1.2.4=h5eee18b_1
- libxcb=1.15=h7f8727e_0
- libxml2=2.9.14=h74e7548_0
- lz4-c=1.9.4=h6a678d5_1
- matplotlib=3.3.4=py36h06a4308_0
- matplotlib-base=3.3.4=py36h62a2d02_0
- ncurses=6.4=h6a678d5_0
- numpy=1.19.2=py36h6163131_0
- numpy-base=1.19.2=py36h75fe3a5_0
- olefile=0.46=pyhd3eb1b0_0
- openssl=1.1.1w=h7f8727e_0
- pcre=8.45=h295c915_0
- pillow=8.3.1=py36h5aabda8_0
- pip=21.2.2=py36h06a4308_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pyqt=5.9.2=py36h05f1152_2
- python=3.6.13=h12debd9_1
- python-dateutil=2.8.2=pyhd3eb1b0_0
- pyyaml=5.4.1=py36h27cfd23_1
- qt=5.9.7=h5867ecd_1
- readline=8.2=h5eee18b_0
- scipy=1.5.2=py36habc2bb6_0
- setuptools=58.0.4=py36h06a4308_0
- sip=4.19.8=py36hf484d3e_0
- six=1.16.0=pyhd3eb1b0_1
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tornado=6.1=py36h27cfd23_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- yaml=0.2.5=h7b6447c_0
- zlib=1.2.13=h5eee18b_1
- zstd=1.5.6=hc292b87_0
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/serpent-tools
| [
"serpentTools/tests/test_settings.py::ConfigLoaderTester::test_loadNestedConfig",
"serpentTools/tests/test_settings.py::ConfigLoaderTester::test_loadNestedNonStrict",
"serpentTools/tests/test_settings.py::ConfigLoaderTester::test_loadSingleLevelConfig"
]
| []
| [
"serpentTools/tests/test_settings.py::DefaultSettingsTester::test_cannotChangeDefaults",
"serpentTools/tests/test_settings.py::DefaultSettingsTester::test_getDefault",
"serpentTools/tests/test_settings.py::RCTester::test_expandExtras",
"serpentTools/tests/test_settings.py::RCTester::test_failAtBadSetting_options",
"serpentTools/tests/test_settings.py::RCTester::test_failAtBadSettings_type",
"serpentTools/tests/test_settings.py::RCTester::test_failAtNonexistentSetting",
"serpentTools/tests/test_settings.py::RCTester::test_fullExtend",
"serpentTools/tests/test_settings.py::RCTester::test_readerWithUpdatedSettings",
"serpentTools/tests/test_settings.py::RCTester::test_returnReaderSettings",
"serpentTools/tests/test_settings.py::MessagingTester::test_depreciatedDecorator",
"serpentTools/tests/test_settings.py::MessagingTester::test_futureDecorator"
]
| []
| MIT License | 1,970 | [
"README.rst",
"docs/conf.py",
"docs/examples/Settings.rst",
"serpentTools/settings.py",
"docs/welcome/about.rst",
"examples/Settings.ipynb",
"docs/contributing/index.rst",
"docs/api/index.rst",
"docs/about.rst",
"examples/myConfig.yaml"
]
| [
"README.rst",
"docs/conf.py",
"docs/examples/Settings.rst",
"serpentTools/settings.py",
"docs/welcome/about.rst",
"examples/Settings.ipynb",
"docs/contributing/index.rst",
"docs/api/index.rst",
"docs/about.rst",
"examples/myConfig.yaml"
]
|
|
paris-saclay-cds__specio-34 | 0aacacf20cdd8fe979eab7279ea415d996669630 | 2017-12-19 17:09:01 | 0aacacf20cdd8fe979eab7279ea415d996669630 | diff --git a/doc/api.rst b/doc/api.rst
index 4b46ff1..57f9783 100644
--- a/doc/api.rst
+++ b/doc/api.rst
@@ -50,7 +50,7 @@ from the class :class:`specio.core.Spectrum`. This class is composed of three
attributes:
* a 1D ndarray of shape (n_wavelength,) or 2D ndarray of shape
- (n_spectra, n_wavelength) ``spectrum`` containing the counts/amplitude for
+ (n_spectra, n_wavelength) ``amplitudes`` containing the counts/amplitude for
the different wavelengths;
* a 1D ndarray of shape (n_wavelength,) ``wavelength`` containing the
wavelength for which the spectra have been acquired;
diff --git a/examples/plot_spectrum_usage.py b/examples/plot_spectrum_usage.py
index 747cc99..aaca836 100644
--- a/examples/plot_spectrum_usage.py
+++ b/examples/plot_spectrum_usage.py
@@ -26,7 +26,7 @@ print(__doc__)
# information:
#
# * a 1D ndarray of shape (n_wavelength,) or 2D ndarray of shape
-# (n_spectra, n_wavelength) ``spectrum``;
+# (n_spectra, n_wavelength) ``amplitudes``;
# * a 1D ndarray of shape (n_wavelength,) ``wavelength``;
# * a dict ``meta``.
#
diff --git a/examples/reader/plot_read_fsm.py b/examples/reader/plot_read_fsm.py
index d41aa2f..f50ccd6 100644
--- a/examples/reader/plot_read_fsm.py
+++ b/examples/reader/plot_read_fsm.py
@@ -28,5 +28,5 @@ spectra = specread(fsm_filename)
# Plot the first spectra
plt.plot(spectra.wavelength,
- spectra.spectrum[0])
+ spectra.amplitudes[0])
plt.show()
diff --git a/examples/reader/plot_read_multiple_files.py b/examples/reader/plot_read_multiple_files.py
index f144611..32bfadd 100644
--- a/examples/reader/plot_read_multiple_files.py
+++ b/examples/reader/plot_read_multiple_files.py
@@ -33,7 +33,7 @@ spectra = specread(spc_filenames)
# Plot the first spectra
plt.plot(spectra.wavelength,
- spectra.spectrum.T)
+ spectra.amplitudes.T)
# We get the axis information by using the meta data of the first file read.
plt.xlabel(spectra.meta[0]['xlabel'])
diff --git a/examples/reader/plot_read_spc.py b/examples/reader/plot_read_spc.py
index ab2ec09..6ab1fd2 100644
--- a/examples/reader/plot_read_spc.py
+++ b/examples/reader/plot_read_spc.py
@@ -28,7 +28,7 @@ spectra = specread(spc_filename)
# Plot the first spectra
plt.plot(spectra.wavelength,
- spectra.spectrum)
+ spectra.amplitudes)
plt.xlabel(spectra.meta['xlabel'])
plt.ylabel(spectra.meta['ylabel'])
plt.show()
diff --git a/specio/core/format.py b/specio/core/format.py
index 6ff8db1..91f55fc 100644
--- a/specio/core/format.py
+++ b/specio/core/format.py
@@ -396,8 +396,8 @@ class Format(object):
"""
if isinstance(self._data, Spectrum):
- if index is not None and self._data.spectrum.ndim == 2:
- return Spectrum(self._data.spectrum[index],
+ if index is not None and self._data.amplitudes.ndim == 2:
+ return Spectrum(self._data.amplitudes[index],
self._data.wavelength,
self._data.meta)
else:
diff --git a/specio/core/functions.py b/specio/core/functions.py
index a71f255..de6523b 100644
--- a/specio/core/functions.py
+++ b/specio/core/functions.py
@@ -159,7 +159,7 @@ def specread(uri, format=None, **kwargs):
:class:`specio.core.Spectrum`.
A :class:`specio.core.Spectrum` contains:
* a 1D ndarray of shape (n_wavelength,) or 2D ndarray of shape
- (n_spectra, n_wavelength) ``spectrum``;
+ (n_spectra, n_wavelength) ``amplitudes``;
* a 1D ndarray of shape (n_wavelength,) ``wavelength``;
* a dict ``meta``.
@@ -196,7 +196,7 @@ def specread(uri, format=None, **kwargs):
return spectrum
else:
- spectrum_2d, meta_2d = zip(*[(sp.spectrum, sp.meta)
+ spectrum_2d, meta_2d = zip(*[(sp.amplitudes, sp.meta)
for sp in spectrum])
return Spectrum(np.vstack(spectrum_2d),
wavelength,
diff --git a/specio/core/util.py b/specio/core/util.py
index ecaabb5..42a4ca3 100644
--- a/specio/core/util.py
+++ b/specio/core/util.py
@@ -61,8 +61,8 @@ class Spectrum(object):
Parameters
----------
- spectrum : ndarray, shape (n_wavelength) or (n_spectra, n_wavelength)
- The spectrum read.
+ amplitudes : ndarray, shape (n_wavelength) or (n_spectra, n_wavelength)
+ The amplitudes or counts.
wavelength : ndarray, shape (n_wavelength,)
The corresponding wavelength.
@@ -77,41 +77,41 @@ class Spectrum(object):
"""
@staticmethod
- def _validate_spectrum_wavelength(spectrum, wavelength):
+ def _validate_amplitudes_wavelength(amplitudes, wavelength):
msg = ("The number of frequencies in wavelength and spectra are"
- " not equal. Wavelength: {} - Spectrum: {}.")
- if wavelength.ndim == 1 and spectrum.ndim == 2:
- if wavelength.shape[0] != spectrum.shape[1]:
+ " not equal. Wavelength: {} - Amplitudes: {}.")
+ if wavelength.ndim == 1 and amplitudes.ndim == 2:
+ if wavelength.shape[0] != amplitudes.shape[1]:
raise ValueError(msg.format(wavelength.shape[0],
- spectrum.shape[1]))
- elif wavelength.ndim == 1 and spectrum.ndim == 1:
- if wavelength.size != spectrum.size:
+ amplitudes.shape[1]))
+ elif wavelength.ndim == 1 and amplitudes.ndim == 1:
+ if wavelength.size != amplitudes.size:
raise ValueError(msg.format(wavelength.size,
- spectrum.size))
+ amplitudes.size))
else:
- raise ValueError("The dimension of wavelength and spectrum are"
+ raise ValueError("The dimension of wavelength and amplitudes are"
" incorrect. They need to be 1-D or 2-D."
- " Wavelength {} - Spectrum: {}".format(
- wavelength.shape, spectrum.shape))
+ " Wavelength {} - Amplitudes: {}".format(
+ wavelength.shape, amplitudes.shape))
- return spectrum, wavelength
+ return amplitudes, wavelength
- def __init__(self, spectrum, wavelength, meta=None):
- self.spectrum, self.wavelength = self._validate_spectrum_wavelength(
- spectrum, wavelength)
+ def __init__(self, amplitudes, wavelength, meta=None):
+ self.amplitudes, self.wavelength = \
+ self._validate_amplitudes_wavelength(amplitudes, wavelength)
self.meta = meta if meta is not None else {}
def __len__(self):
- if self.spectrum.ndim == 1:
+ if self.amplitudes.ndim == 1:
return 1
else:
- return self.spectrum.shape[0]
+ return self.amplitudes.shape[0]
def __repr__(self):
msg = ("Spectrum: \n"
"wavelength:\n {} \n"
- "spectra: \n {} \n"
+ "amplitudes: \n {} \n"
"metadata: \n {} \n".format(self.wavelength,
- self.spectrum,
+ self.amplitudes,
self.meta))
return msg
diff --git a/specio/plugins/fsm.py b/specio/plugins/fsm.py
index fd041ab..4e518e3 100644
--- a/specio/plugins/fsm.py
+++ b/specio/plugins/fsm.py
@@ -195,7 +195,7 @@ class FSM(Format):
>>> spectra = specread(load_fsm_path())
>>> spectra.wavelength
array([ 4000., 3998., 3996., ..., 724., 722., 720.])
- >>> spectra.spectrum # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
+ >>> spectra.amplitudes # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 38.65655136, 38.6666069 , 38.64698792, ..., 39.89584732,
29.76511383, 28.13317108],
[ 44.61751175, 44.51957703, 44.59909439, ..., 27.84810638,
@@ -284,7 +284,7 @@ class FSM(Format):
def _open(self):
self._fp = self.request.get_file()
self._data = self._read_fsm(self._fp)
- self._length = self._data.spectrum.shape[0]
+ self._length = self._data.amplitudes.shape[0]
def _close(self):
# Close the reader.
diff --git a/specio/plugins/spc.py b/specio/plugins/spc.py
index ce535dd..2a33e3f 100644
--- a/specio/plugins/spc.py
+++ b/specio/plugins/spc.py
@@ -46,7 +46,7 @@ class SPC(Format):
>>> spectra.wavelength
array([ 400.62109375, 402.94384766, 405.26721191, ..., 3797.08911133,
3798.45288086, 3799.81542969])
- >>> spectra.spectrum
+ >>> spectra.amplitudes
array([ 1487. , 1385. , 1441. , ..., 147.24124146,
139.16082764, 134.08041382])
| Rename specio.Spectrum.spectrum into specio.Spectrum.amplitude | paris-saclay-cds/specio | diff --git a/specio/core/tests/test_format.py b/specio/core/tests/test_format.py
index 005bad0..b7650c7 100644
--- a/specio/core/tests/test_format.py
+++ b/specio/core/tests/test_format.py
@@ -127,15 +127,15 @@ def test_reader():
specs = [spec for spec in R]
assert len(specs) == n
for i in range(3):
- assert specs[i].spectrum[0, 0] == i
+ assert specs[i].amplitudes[0, 0] == i
assert specs[i].wavelength[0] == i
assert specs[i].meta['index'] == i
for i in range(3):
assert R.get_meta_data(i)['index'] == i
# Read next
- assert R.get_data(0).spectrum[0, 0] == 0
- assert R.get_next_data().spectrum[0, 0] == 1
- assert R.get_next_data().spectrum[0, 0] == 2
+ assert R.get_data(0).amplitudes[0, 0] == 0
+ assert R.get_next_data().amplitudes[0, 0] == 1
+ assert R.get_next_data().amplitudes[0, 0] == 2
# Fail
R._failmode = 1
with pytest.raises(ValueError, message="Meta data be a dict"):
diff --git a/specio/core/tests/test_functions.py b/specio/core/tests/test_functions.py
index 90884b3..0cfd489 100644
--- a/specio/core/tests/test_functions.py
+++ b/specio/core/tests/test_functions.py
@@ -47,9 +47,9 @@ def test_specread_single_file():
filename = join(DATA_PATH, 'data', 'spectra.foobar')
spec1 = specread(filename)
spec2 = specread(filename, 'foobar')
- assert spec1.spectrum.shape == (1, 801)
+ assert spec1.amplitudes.shape == (1, 801)
assert spec1.wavelength.shape == (801,)
- assert_allclose(spec1.spectrum, spec2.spectrum)
+ assert_allclose(spec1.amplitudes, spec2.amplitudes)
assert_allclose(spec1.wavelength, spec2.wavelength)
@@ -106,7 +106,7 @@ def test_specread_consitent_wavelength(side_effect, spectra_type,
spectra = specread('')
assert isinstance(spectra, spectra_type)
if isinstance(spectra, Spectrum):
- assert spectra.spectrum.shape == spectra_shape
+ assert spectra.amplitudes.shape == spectra_shape
assert spectra.wavelength.shape == (spectra_shape[1],)
assert spectra.meta == tuple({} for _ in range(spectra_shape[0]))
elif isinstance(spectra, list):
diff --git a/specio/core/tests/test_util.py b/specio/core/tests/test_util.py
index 287cfdf..eb34b86 100644
--- a/specio/core/tests/test_util.py
+++ b/specio/core/tests/test_util.py
@@ -29,7 +29,7 @@ def test_spectrum_error(spectrum, wavelength, msg):
(np.ones((10,)), np.ones((10,)), {'kind': 'random'})])
def test_spectrum(spectrum, wavelength, metadata):
spec = Spectrum(spectrum, wavelength, metadata)
- assert_allclose(spec.spectrum, spectrum)
+ assert_allclose(spec.amplitudes, spectrum)
assert_allclose(spec.wavelength, wavelength)
assert spec.meta == {'kind': 'random'}
diff --git a/specio/plugins/tests/test_common.py b/specio/plugins/tests/test_common.py
index bab8387..1882966 100644
--- a/specio/plugins/tests/test_common.py
+++ b/specio/plugins/tests/test_common.py
@@ -21,6 +21,6 @@ from specio.datasets import load_spc_path
def test_toy_data(filename, spectrum_shape, wavelength_shape):
spec = specread(filename)
assert isinstance(spec, Spectrum)
- assert spec.spectrum.shape == spectrum_shape
+ assert spec.amplitudes.shape == spectrum_shape
assert spec.wavelength.shape == wavelength_shape
assert spec.meta['filename'] == basename(filename)
diff --git a/specio/plugins/tests/test_example.py b/specio/plugins/tests/test_example.py
index 6e5cb29..801c54f 100644
--- a/specio/plugins/tests/test_example.py
+++ b/specio/plugins/tests/test_example.py
@@ -22,5 +22,5 @@ def test_dummy_format():
assert reader.get_length() == 1
assert reader.get_meta_data() == {}
spec = reader.get_data(0)
- assert spec.spectrum.shape == (1, 801)
+ assert spec.amplitudes.shape == (1, 801)
assert spec.wavelength.shape == (801,)
diff --git a/specio/plugins/tests/test_fsm.py b/specio/plugins/tests/test_fsm.py
index b5fa2d2..13b8898 100644
--- a/specio/plugins/tests/test_fsm.py
+++ b/specio/plugins/tests/test_fsm.py
@@ -22,9 +22,9 @@ def test_fsm_format():
assert reader.get_length() == 7998
assert reader.get_meta_data()['signature'] == b'PEPE'
spec = reader.get_data()
- assert spec.spectrum.shape == (7998, 1641)
+ assert spec.amplitudes.shape == (7998, 1641)
assert spec.wavelength.shape == (1641,)
spec = reader.get_data(0)
- assert spec.spectrum.shape == (1641,)
+ assert spec.amplitudes.shape == (1641,)
assert spec.wavelength.shape == (1641,)
- assert spec.spectrum[0] == pytest.approx(38.656551)
+ assert spec.amplitudes[0] == pytest.approx(38.656551)
diff --git a/specio/plugins/tests/test_spc.py b/specio/plugins/tests/test_spc.py
index ff96bf0..f872ebc 100644
--- a/specio/plugins/tests/test_spc.py
+++ b/specio/plugins/tests/test_spc.py
@@ -27,10 +27,10 @@ def test_spc_format():
assert reader.get_length() == 1
assert reader.get_meta_data()['dat_fmt'] == 'x-y'
spec = reader.get_data()
- assert spec.spectrum.shape == (1911,)
+ assert spec.amplitudes.shape == (1911,)
assert spec.wavelength.shape == (1911,)
spec = reader.get_data(0)
- assert spec.spectrum.shape == (1911,)
+ assert spec.amplitudes.shape == (1911,)
assert spec.wavelength.shape == (1911,)
@@ -48,8 +48,8 @@ def test_spc_file(filename, spectrum_shape, wavelength_shape):
# in '-xy.spc', there is two different wavelength size: we are checking
# each of them
for wi in range(1):
- assert spec[wi].spectrum.shape == spectrum_shape[wi]
+ assert spec[wi].amplitudes.shape == spectrum_shape[wi]
assert spec[wi].wavelength.shape == wavelength_shape[wi]
else:
- assert spec.spectrum.shape == spectrum_shape
+ assert spec.amplitudes.shape == spectrum_shape
assert spec.wavelength.shape == wavelength_shape
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 10
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[tests,docs]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
cycler==0.11.0
docutils==0.18.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==3.0.3
kiwisolver==1.3.1
MarkupSafe==2.0.1
matplotlib==3.3.4
numpy==1.19.5
numpydoc==1.1.0
packaging==21.3
Pillow==8.4.0
pluggy==1.0.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-mock==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
spc @ git+https://github.com/glemaitre/spc.git@44b67d49e1e4fe9364e7cbce9a93086037703511
-e git+https://github.com/paris-saclay-cds/specio.git@0aacacf20cdd8fe979eab7279ea415d996669630#egg=specio
Sphinx==5.3.0
sphinx-gallery==0.10.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: specio
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- cycler==0.11.0
- docutils==0.18.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==3.0.3
- kiwisolver==1.3.1
- markupsafe==2.0.1
- matplotlib==3.3.4
- numpy==1.19.5
- numpydoc==1.1.0
- packaging==21.3
- pillow==8.4.0
- pluggy==1.0.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- spc==0.4.0
- sphinx==5.3.0
- sphinx-gallery==0.10.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/specio
| [
"specio/core/tests/test_functions.py::test_specread_single_file",
"specio/core/tests/test_functions.py::test_specread_consitent_wavelength[_generate_spectrum_identical_wavelength-Spectrum-spectra_shape0]",
"specio/core/tests/test_util.py::test_spectrum[spectrum0-wavelength0-metadata0]",
"specio/core/tests/test_util.py::test_spectrum[spectrum1-wavelength1-metadata1]",
"specio/plugins/tests/test_common.py::test_toy_data[/specio/specio/datasets/data/spectra.fsm-spectrum_shape0-wavelength_shape0]",
"specio/plugins/tests/test_common.py::test_toy_data[/specio/specio/datasets/data/spectra.spc-spectrum_shape1-wavelength_shape1]",
"specio/plugins/tests/test_example.py::test_dummy_format",
"specio/plugins/tests/test_fsm.py::test_fsm_format",
"specio/plugins/tests/test_spc.py::test_spc_format",
"specio/plugins/tests/test_spc.py::test_spc_file[/specio/specio/plugins/tests/data/spc/single_file/gxy.spc-spectrum_shape0-wavelength_shape0]",
"specio/plugins/tests/test_spc.py::test_spc_file[/specio/specio/plugins/tests/data/spc/single_file/x-y.spc-spectrum_shape1-wavelength_shape1]",
"specio/plugins/tests/test_spc.py::test_spc_file[/specio/specio/plugins/tests/data/spc/single_file/-xy.spc-spectrum_shape2-wavelength_shape2]"
]
| [
"specio/core/tests/test_format.py::test_format",
"specio/core/tests/test_format.py::test_format_context_manager",
"specio/core/tests/test_format.py::test_reader",
"specio/core/tests/test_format.py::test_format_manager",
"specio/core/tests/test_format.py::test_sorting_errors[TypeError-accepts",
"specio/core/tests/test_format.py::test_sorting_errors[ValueError-should",
"specio/core/tests/test_functions.py::test_get_reader_error[ValueError-Could",
"specio/core/tests/test_functions.py::test_get_reader_error[OSError-No",
"specio/core/tests/test_functions.py::test_get_reader_error[IndexError-No",
"specio/core/tests/test_util.py::test_spectrum_error[spectrum0-wavelength0-1-D",
"specio/core/tests/test_util.py::test_spectrum_error[spectrum1-wavelength1-1-D",
"specio/core/tests/test_util.py::test_spectrum_error[spectrum2-wavelength2-The",
"specio/core/tests/test_util.py::test_spectrum_error[spectrum3-wavelength3-The"
]
| [
"specio/core/tests/test_format.py::test_format_subclass",
"specio/core/tests/test_format.py::test_default_can_read",
"specio/core/tests/test_format.py::test_format_selection",
"specio/core/tests/test_functions.py::test_help",
"specio/core/tests/test_functions.py::test_get_reader",
"specio/core/tests/test_functions.py::test_specread_consitent_wavelength[_generate_spectrum_different_wavelength_size-list-10]",
"specio/core/tests/test_functions.py::test_specread_consitent_wavelength[_generate_spectrum_different_wavelength-list-10]",
"specio/core/tests/test_functions.py::test_specread_consitent_wavelength[_generate_list_spectrum-list-30]",
"specio/core/tests/test_util.py::test_util_dict"
]
| []
| BSD 3-Clause "New" or "Revised" License | 1,971 | [
"examples/reader/plot_read_spc.py",
"examples/reader/plot_read_multiple_files.py",
"specio/plugins/spc.py",
"examples/reader/plot_read_fsm.py",
"specio/plugins/fsm.py",
"specio/core/format.py",
"specio/core/functions.py",
"examples/plot_spectrum_usage.py",
"specio/core/util.py",
"doc/api.rst"
]
| [
"examples/reader/plot_read_spc.py",
"examples/reader/plot_read_multiple_files.py",
"specio/plugins/spc.py",
"examples/reader/plot_read_fsm.py",
"specio/plugins/fsm.py",
"specio/core/format.py",
"specio/core/functions.py",
"examples/plot_spectrum_usage.py",
"specio/core/util.py",
"doc/api.rst"
]
|
|
zopefoundation__zope.publisher-27 | dc4f23e4b71835bdc858c338a1ca230cf4178feb | 2017-12-21 11:51:01 | dc4f23e4b71835bdc858c338a1ca230cf4178feb | diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000..f6c19d2
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,13 @@
+[run]
+source = zope.publisher
+omit =
+ */flycheck_*py
+
+[report]
+precision = 2
+exclude_lines =
+ pragma: no cover
+ if __name__ == '__main__':
+ raise NotImplementedError
+ self.fail
+ raise AssertionError
diff --git a/.gitignore b/.gitignore
index bb7ceb7..4047009 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,3 +7,5 @@ dist
*.pyc
.dir-locals.el
docs/_build
+.coverage
+htmlcov/
diff --git a/CHANGES.rst b/CHANGES.rst
index 05ed569..372f8d2 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -13,6 +13,10 @@
- Drop support for Python 3.3.
+- Fix ``XMLRPCResponse`` having a str body (instead of a bytes body)
+ which could lead to ``TypeError`` on Python 3. See `issue 26
+ <https://github.com/zopefoundation/zope.publisher/issues/26>`_.
+
4.3.2 (2017-05-23)
==================
diff --git a/MANIFEST.in b/MANIFEST.in
index a98d6ea..73e054e 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,6 +1,8 @@
include *.rst
include *.txt
include tox.ini
+include .travis.yml
+include .coveragerc
include bootstrap.py
include buildout.cfg
diff --git a/src/zope/publisher/xmlrpc.py b/src/zope/publisher/xmlrpc.py
index ed4bd9c..2f15c9b 100644
--- a/src/zope/publisher/xmlrpc.py
+++ b/src/zope/publisher/xmlrpc.py
@@ -121,6 +121,12 @@ class XMLRPCResponse(HTTPResponse):
self.handleException(sys.exc_info())
return
+ # HTTP response payloads are byte strings, and methods like
+ # consumeBody rely on that, but xmlrpc.client.dumps produces
+ # native strings, which is incorrect on Python 3.
+ if not isinstance(body, bytes):
+ body = body.encode('utf-8')
+
headers = [('content-type', 'text/xml;charset=utf-8'),
('content-length', str(len(body)))]
self._headers.update(dict((k, [v]) for (k, v) in headers))
@@ -172,7 +178,7 @@ class XMLRPCResponse(HTTPResponse):
@implementer(IXMLRPCView)
class XMLRPCView(object):
- """A base XML-RPC view that can be used as mix-in for XML-RPC views."""
+ """A base XML-RPC view that can be used as mix-in for XML-RPC views."""
def __init__(self, context, request):
self.context = context
diff --git a/tox.ini b/tox.ini
index 379bf98..850f194 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,6 +1,6 @@
[tox]
envlist =
- py27,py34,py35,py36,pypy,pypy3,docs
+ py27,py34,py35,py36,pypy,pypy3,docs,coverage
[testenv]
commands =
@@ -18,20 +18,15 @@ setenv =
LC_CTYPE=en_US.UTF-8
[testenv:coverage]
+usedevelop = true
basepython =
- python2.7
+ python3.6
commands =
-# The installed version messes up nose's test discovery / coverage reporting
-# So, we uninstall that from the environment, and then install the editable
-# version, before running nosetests.
- pip uninstall -y zope.publisher
- pip install -e .[test]
- nosetests --with-xunit --with-xcoverage
+ coverage run -m zope.testrunner --test-path=src []
+ coverage report --fail-under=92
deps =
- nose
+ {[testenv]deps}
coverage
- nosexcover
- .[test]
[testenv:docs]
| XMLRPCResponse.consumeBody produces `TypeError` on Python 3
Discovered when updating [zope.app.testing.functional](https://github.com/zopefoundation/zope.app.testing/blob/5208545dc52fb0f5699b608fd0cc17c5fb91d55e/src/zope/app/testing/functional.py#L89) for Python 3:
```python
try:
b = self._response.consumeBody()
except TypeError:
from zope.publisher.http import DirectResult
from zope.publisher.xmlrpc import XMLRPCResponse
if (isinstance(self._response, XMLRPCResponse)
and isinstance(getattr(self._response, '_result', None), DirectResult)):
# Somewhere in the publisher we're getting a DirectResult
# whose body is a sequence of strings, but we're expecting
# bytes
b = ''.join(self._response._result.body)
```
I've tracked that somewhere down to `XMLRPCResponse.setResult`.
[HTTPResponse.consumeBody](https://github.com/zopefoundation/zope.publisher/blob/master/src/zope/publisher/http.py#L805) simply does `b''.join(self._result)` When `self._result` is a `DirectResult` that will call its [`__iter__` method](https://github.com/zopefoundation/zope.publisher/blob/master/src/zope/publisher/http.py#L1072):
```python
def __iter__(self):
if isinstance(self.body, bytes):
return iter([self.body])
return iter(self.body)
```
In the case of an XMLRPCResponse, the `DirectResult` is instantiated with `(body,)`, where `body` is the result of [xmlrpc.client.dumps](https://github.com/zopefoundation/zope.publisher/blob/master/src/zope/publisher/xmlrpc.py#L111). This is incredibly [poorly documented on the web](https://docs.python.org/3/library/xmlrpc.client.html#xmlrpc.client.dumps), but [the source reveals](https://github.com/python/cpython/blob/3.6/Lib/xmlrpc/client.py#L998) that `dumps` returns a native string, i.e., unicode on Python 3.
So `consumeBody` winds up doing `b''.join(iter(u'some unicode'))` which as you can imagine fails. | zopefoundation/zope.publisher | diff --git a/src/zope/publisher/tests/test_xmlrpc.py b/src/zope/publisher/tests/test_xmlrpc.py
index 71030a1..12a54fe 100644
--- a/src/zope/publisher/tests/test_xmlrpc.py
+++ b/src/zope/publisher/tests/test_xmlrpc.py
@@ -13,18 +13,30 @@
##############################################################################
"""Testing the XML-RPC Publisher code.
"""
-import sys
import doctest
+import unittest
+
import zope.component.testing
from zope.publisher import xmlrpc
from zope.security.checker import defineChecker, Checker, CheckerPublic
-if sys.version_info[0] == 2:
+try:
import xmlrpclib
-else:
+except ImportError:
import xmlrpc.client as xmlrpclib
-def setUp(test):
+class TestXMLRPCResponse(unittest.TestCase):
+
+ def testConsumeBody(self):
+ response = xmlrpc.XMLRPCResponse()
+ response.setResult(['hi'])
+
+ body = response.consumeBody()
+ self.assertIsInstance(body, bytes)
+ self.assertIn(b'<methodResponse>', body)
+
+
+def doctest_setUp(test):
zope.component.testing.setUp(test)
zope.component.provideAdapter(xmlrpc.ListPreMarshaller)
zope.component.provideAdapter(xmlrpc.TuplePreMarshaller)
@@ -45,18 +57,13 @@ def setUp(test):
Checker({'value':CheckerPublic}, {}))
def test_suite():
- return doctest.DocFileSuite(
- "xmlrpc.txt", package="zope.publisher",
- setUp=setUp, tearDown=zope.component.testing.tearDown,
- optionflags=doctest.ELLIPSIS
- )
-
-# Proper zope.component/zope.interface support requires PyPy 2.5.1+.
-# Older versions fail to hash types correctly. This manifests itself here
-# as being unable to find the marshlers registered as adapters for types
-# like 'list' and 'dict'. As of Jun 1 2015, Travis CI is still using PyPy 2.5.0.
-# All we can do is skip the test.
-if hasattr(sys, 'pypy_version_info') and sys.pypy_version_info[:3] == (2,5,0):
- import unittest
- def test_suite():
- return unittest.TestSuite(())
+ return unittest.TestSuite((
+ unittest.defaultTestLoader.loadTestsFromName(__name__),
+ doctest.DocFileSuite(
+ "xmlrpc.txt",
+ package="zope.publisher",
+ setUp=doctest_setUp,
+ tearDown=zope.component.testing.tearDown,
+ optionflags=doctest.ELLIPSIS
+ ),
+ ))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 5
} | 4.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"doc-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
python-gettext==4.1
pytz==2025.2
six==1.17.0
swebench-matterhorn @ file:///swebench_matterhorn
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
zope.browser==2.4
zope.component==5.1.0
zope.configuration==4.4.1
zope.contenttype==4.6
zope.deprecation==4.4.0
zope.event==4.6
zope.exceptions==4.6
zope.hookable==5.4
zope.i18n==4.9.0
zope.i18nmessageid==5.1.1
zope.interface==5.5.2
zope.location==4.3
zope.proxy==4.6.1
-e git+https://github.com/zopefoundation/zope.publisher.git@dc4f23e4b71835bdc858c338a1ca230cf4178feb#egg=zope.publisher
zope.schema==6.2.1
zope.security==5.8
zope.testing==5.0.1
zope.testrunner==5.6
| name: zope.publisher
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-gettext==4.1
- pytz==2025.2
- six==1.17.0
- swebench-matterhorn==0.0.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
- zope-browser==2.4
- zope-component==5.1.0
- zope-configuration==4.4.1
- zope-contenttype==4.6
- zope-deprecation==4.4.0
- zope-event==4.6
- zope-exceptions==4.6
- zope-hookable==5.4
- zope-i18n==4.9.0
- zope-i18nmessageid==5.1.1
- zope-interface==5.5.2
- zope-location==4.3
- zope-proxy==4.6.1
- zope-schema==6.2.1
- zope-security==5.8
- zope-testing==5.0.1
- zope-testrunner==5.6
prefix: /opt/conda/envs/zope.publisher
| [
"src/zope/publisher/tests/test_xmlrpc.py::TestXMLRPCResponse::testConsumeBody"
]
| []
| [
"src/zope/publisher/tests/test_xmlrpc.py::test_suite"
]
| []
| Zope Public License 2.1 | 1,974 | [
"MANIFEST.in",
".gitignore",
"tox.ini",
".coveragerc",
"CHANGES.rst",
"src/zope/publisher/xmlrpc.py"
]
| [
"MANIFEST.in",
".gitignore",
"tox.ini",
".coveragerc",
"CHANGES.rst",
"src/zope/publisher/xmlrpc.py"
]
|
|
ipython__ipython-10959 | 38e0033a6cf59136208b998c394ac5472b9c1849 | 2017-12-21 13:42:48 | cc353b25b0fff58e4ed13899df9b3c8153df01d9 | diff --git a/IPython/lib/pretty.py b/IPython/lib/pretty.py
index cbbb72600..9181113e3 100644
--- a/IPython/lib/pretty.py
+++ b/IPython/lib/pretty.py
@@ -392,6 +392,10 @@ def pretty(self, obj):
meth = cls._repr_pretty_
if callable(meth):
return meth(obj, self, cycle)
+ if cls is not object \
+ and callable(cls.__dict__.get('__repr__')):
+ return _repr_pprint(obj, self, cycle)
+
return _default_pprint(obj, self, cycle)
finally:
self.end_group()
@@ -537,17 +541,12 @@ def _default_pprint(obj, p, cycle):
p.end_group(1, '>')
-def _seq_pprinter_factory(start, end, basetype):
+def _seq_pprinter_factory(start, end):
"""
Factory that returns a pprint function useful for sequences. Used by
the default pprint for tuples, dicts, and lists.
"""
def inner(obj, p, cycle):
- typ = type(obj)
- if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__:
- # If the subclass provides its own repr, use it instead.
- return p.text(typ.__repr__(obj))
-
if cycle:
return p.text(start + '...' + end)
step = len(start)
@@ -564,21 +563,16 @@ def inner(obj, p, cycle):
return inner
-def _set_pprinter_factory(start, end, basetype):
+def _set_pprinter_factory(start, end):
"""
Factory that returns a pprint function useful for sets and frozensets.
"""
def inner(obj, p, cycle):
- typ = type(obj)
- if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__:
- # If the subclass provides its own repr, use it instead.
- return p.text(typ.__repr__(obj))
-
if cycle:
return p.text(start + '...' + end)
if len(obj) == 0:
# Special case.
- p.text(basetype.__name__ + '()')
+ p.text(type(obj).__name__ + '()')
else:
step = len(start)
p.begin_group(step, start)
@@ -596,17 +590,12 @@ def inner(obj, p, cycle):
return inner
-def _dict_pprinter_factory(start, end, basetype=None):
+def _dict_pprinter_factory(start, end):
"""
Factory that returns a pprint function used by the default pprint of
dicts and dict proxies.
"""
def inner(obj, p, cycle):
- typ = type(obj)
- if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__:
- # If the subclass provides its own repr, use it instead.
- return p.text(typ.__repr__(obj))
-
if cycle:
return p.text('{...}')
step = len(start)
@@ -745,12 +734,12 @@ def _exception_pprint(obj, p, cycle):
int: _repr_pprint,
float: _repr_pprint,
str: _repr_pprint,
- tuple: _seq_pprinter_factory('(', ')', tuple),
- list: _seq_pprinter_factory('[', ']', list),
- dict: _dict_pprinter_factory('{', '}', dict),
+ tuple: _seq_pprinter_factory('(', ')'),
+ list: _seq_pprinter_factory('[', ']'),
+ dict: _dict_pprinter_factory('{', '}'),
- set: _set_pprinter_factory('{', '}', set),
- frozenset: _set_pprinter_factory('frozenset({', '})', frozenset),
+ set: _set_pprinter_factory('{', '}'),
+ frozenset: _set_pprinter_factory('frozenset({', '})'),
super: _super_pprint,
_re_pattern_type: _re_pattern_pprint,
type: _type_pprint,
| OrderedDict output differs in ipython from python (and official documentation)
[collections — Container datatypes](https://docs.python.org/3/library/collections.html#collections.OrderedDict)
An ordered dictionary can be combined with the Counter class so that the counter remembers the order elements are first encountered:
```
from collections import Counter, OrderedDict
class OrderedCounter(Counter, OrderedDict):
'Counter that remembers the order elements are first encountered'
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))
def __reduce__(self):
return self.__class__, (OrderedDict(self),)
```
This is based off a Raymond Hettinger - Super considered super! - PyCon 2015, and his blog post [Python’s super() considered super!](https://rhettinger.wordpress.com/2011/05/26/super-considered-super/).
My confusion is IPython returns a result that differs from the Python it's built on. The ordering of the output is important and the recipe in Hettinger's blog and the official documentation preserves the insertion order but IPython 6.1.0 built on Python 3.6.3 does not. However if I execute directly in Python 3.6.3 I receive the expected desired result.
`oc = OrderedCounter('abracadabra')`
output in Python 3.6.3 (and Python 2.7.14)
`OrderedCounter(OrderedDict([('a', 5), ('b', 2), ('r', 2), ('c', 1), ('d', 1)]))`
output in IPython 6.1.0 on Python 3.6.3 Anaconda
`OrderedCounter({'a': 5, 'b': 2, 'c': 1, 'd': 1, 'r': 2})` //ordering is not preserved, seems sorted by key
Would someone kindly explain why the output is different in IPython 6.1.0 | ipython/ipython | diff --git a/IPython/core/tests/test_formatters.py b/IPython/core/tests/test_formatters.py
index 35edc75d1..cde43c94a 100644
--- a/IPython/core/tests/test_formatters.py
+++ b/IPython/core/tests/test_formatters.py
@@ -49,7 +49,7 @@ def test_pretty():
f = PlainTextFormatter()
f.for_type(A, foo_printer)
nt.assert_equal(f(A()), 'foo')
- nt.assert_equal(f(B()), 'foo')
+ nt.assert_equal(f(B()), 'B()')
nt.assert_equal(f(GoodPretty()), 'foo')
# Just don't raise an exception for the following:
f(BadPretty())
diff --git a/IPython/lib/tests/test_pretty.py b/IPython/lib/tests/test_pretty.py
index 6d6574345..68e90ecae 100644
--- a/IPython/lib/tests/test_pretty.py
+++ b/IPython/lib/tests/test_pretty.py
@@ -420,4 +420,24 @@ def meaning_of_life(question=None):
return "Don't panic"
nt.assert_in('meaning_of_life(question=None)', pretty.pretty(meaning_of_life))
-
+
+
+class OrderedCounter(Counter, OrderedDict):
+ 'Counter that remembers the order elements are first encountered'
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))
+
+ def __reduce__(self):
+ return self.__class__, (OrderedDict(self),)
+
+class MySet(set): # Override repr of a basic type
+ def __repr__(self):
+ return 'mine'
+
+def test_custom_repr():
+ """A custom repr should override a pretty printer for a parent type"""
+ oc = OrderedCounter("abracadabra")
+ nt.assert_in("OrderedCounter(OrderedDict", pretty.pretty(oc))
+
+ nt.assert_equal(pretty.pretty(MySet()), 'mine')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 6.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
backcall==0.2.0
certifi==2025.1.31
charset-normalizer==3.4.1
decorator==5.2.1
exceptiongroup==1.2.2
fastjsonschema==2.21.1
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
ipykernel==5.5.6
-e git+https://github.com/ipython/ipython.git@38e0033a6cf59136208b998c394ac5472b9c1849#egg=ipython
ipython-genutils==0.2.0
jedi==0.19.2
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter_client==8.6.3
jupyter_core==5.7.2
nbformat==5.10.4
nose==1.3.7
numpy==2.0.2
packaging==24.2
parso==0.8.4
pexpect==4.9.0
pickleshare==0.7.5
platformdirs==4.3.7
pluggy==1.5.0
prompt-toolkit==1.0.18
ptyprocess==0.7.0
Pygments==2.19.1
pytest==8.3.5
python-dateutil==2.9.0.post0
pyzmq==26.3.0
referencing==0.36.2
requests==2.32.3
rpds-py==0.24.0
simplegeneric==0.8.1
six==1.17.0
testpath==0.6.0
tomli==2.2.1
tornado==6.4.2
traitlets==5.14.3
typing_extensions==4.13.0
urllib3==2.3.0
wcwidth==0.2.13
zipp==3.21.0
| name: ipython
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- backcall==0.2.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- decorator==5.2.1
- exceptiongroup==1.2.2
- fastjsonschema==2.21.1
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- ipykernel==5.5.6
- ipython-genutils==0.2.0
- jedi==0.19.2
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter-client==8.6.3
- jupyter-core==5.7.2
- nbformat==5.10.4
- nose==1.3.7
- numpy==2.0.2
- packaging==24.2
- parso==0.8.4
- pexpect==4.9.0
- pickleshare==0.7.5
- platformdirs==4.3.7
- pluggy==1.5.0
- prompt-toolkit==1.0.18
- ptyprocess==0.7.0
- pygments==2.19.1
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pyzmq==26.3.0
- referencing==0.36.2
- requests==2.32.3
- rpds-py==0.24.0
- simplegeneric==0.8.1
- six==1.17.0
- testpath==0.6.0
- tomli==2.2.1
- tornado==6.4.2
- traitlets==5.14.3
- typing-extensions==4.13.0
- urllib3==2.3.0
- wcwidth==0.2.13
- zipp==3.21.0
prefix: /opt/conda/envs/ipython
| [
"IPython/core/tests/test_formatters.py::test_pretty",
"IPython/lib/tests/test_pretty.py::test_custom_repr"
]
| [
"IPython/core/tests/test_formatters.py::test_error_method",
"IPython/core/tests/test_formatters.py::test_warn_error_for_type",
"IPython/core/tests/test_formatters.py::test_error_pretty_method",
"IPython/core/tests/test_formatters.py::test_bad_repr_traceback",
"IPython/core/tests/test_formatters.py::test_ipython_display_formatter",
"IPython/core/tests/test_formatters.py::test_repr_mime",
"IPython/core/tests/test_formatters.py::test_pass_correct_include_exclude",
"IPython/core/tests/test_formatters.py::test_repr_mime_meta",
"IPython/core/tests/test_formatters.py::test_repr_mime_failure"
]
| [
"IPython/core/tests/test_formatters.py::test_deferred",
"IPython/core/tests/test_formatters.py::test_precision",
"IPython/core/tests/test_formatters.py::test_bad_precision",
"IPython/core/tests/test_formatters.py::test_for_type",
"IPython/core/tests/test_formatters.py::test_for_type_string",
"IPython/core/tests/test_formatters.py::test_for_type_by_name",
"IPython/core/tests/test_formatters.py::test_lookup",
"IPython/core/tests/test_formatters.py::test_lookup_string",
"IPython/core/tests/test_formatters.py::test_lookup_by_type",
"IPython/core/tests/test_formatters.py::test_lookup_by_type_string",
"IPython/core/tests/test_formatters.py::test_in_formatter",
"IPython/core/tests/test_formatters.py::test_string_in_formatter",
"IPython/core/tests/test_formatters.py::test_pop",
"IPython/core/tests/test_formatters.py::test_pop_string",
"IPython/core/tests/test_formatters.py::test_nowarn_notimplemented",
"IPython/core/tests/test_formatters.py::test_pdf_formatter",
"IPython/core/tests/test_formatters.py::test_print_method_bound",
"IPython/core/tests/test_formatters.py::test_print_method_weird",
"IPython/core/tests/test_formatters.py::test_format_config",
"IPython/core/tests/test_formatters.py::test_pretty_max_seq_length",
"IPython/core/tests/test_formatters.py::test_json_as_string_deprecated",
"IPython/lib/tests/test_pretty.py::test_indentation",
"IPython/lib/tests/test_pretty.py::test_dispatch",
"IPython/lib/tests/test_pretty.py::test_callability_checking",
"IPython/lib/tests/test_pretty.py::test_pprint_heap_allocated_type",
"IPython/lib/tests/test_pretty.py::test_pprint_nomod",
"IPython/lib/tests/test_pretty.py::test_pprint_break",
"IPython/lib/tests/test_pretty.py::test_pprint_break_repr",
"IPython/lib/tests/test_pretty.py::test_bad_repr",
"IPython/lib/tests/test_pretty.py::test_really_bad_repr",
"IPython/lib/tests/test_pretty.py::TestsPretty::test_long_dict",
"IPython/lib/tests/test_pretty.py::TestsPretty::test_long_list",
"IPython/lib/tests/test_pretty.py::TestsPretty::test_long_set",
"IPython/lib/tests/test_pretty.py::TestsPretty::test_long_tuple",
"IPython/lib/tests/test_pretty.py::TestsPretty::test_super_repr",
"IPython/lib/tests/test_pretty.py::TestsPretty::test_unbound_method",
"IPython/lib/tests/test_pretty.py::test_metaclass_repr",
"IPython/lib/tests/test_pretty.py::test_unicode_repr",
"IPython/lib/tests/test_pretty.py::test_basic_class",
"IPython/lib/tests/test_pretty.py::test_collections_defaultdict",
"IPython/lib/tests/test_pretty.py::test_collections_ordereddict",
"IPython/lib/tests/test_pretty.py::test_collections_deque",
"IPython/lib/tests/test_pretty.py::test_collections_counter",
"IPython/lib/tests/test_pretty.py::test_mappingproxy",
"IPython/lib/tests/test_pretty.py::test_function_pretty"
]
| []
| BSD 3-Clause "New" or "Revised" License | 1,975 | [
"IPython/lib/pretty.py"
]
| [
"IPython/lib/pretty.py"
]
|
|
dask__dask-3024 | 076830c8e87c5f8806c85be639b92d97008ebf03 | 2017-12-21 18:14:40 | a1653463534a7dd9212f45f833aa17b7dd12e574 | diff --git a/dask/array/core.py b/dask/array/core.py
index 9346b1c04..8801c75aa 100644
--- a/dask/array/core.py
+++ b/dask/array/core.py
@@ -2159,7 +2159,9 @@ def atop(func, out_ind, *args, **kwargs):
concatenate : bool, keyword only
If true concatenate arrays along dummy indices, else provide lists
adjust_chunks : dict
- Dictionary mapping index to function to be applied to chunk sizes
+ Dictionary mapping index to information to adjust chunk sizes. Can
+ either be a constant chunksize, a tuple of all chunksizes, or a
+ function that converts old chunksize to new chunksize
new_axes : dict, keyword only
New indexes and their dimension lengths
@@ -2568,24 +2570,73 @@ def concatenate(seq, axis=0, allow_unknown_chunksizes=False):
return Array(dsk2, name, chunks, dtype=dt)
-def insert_to_ooc(arr, out, lock=True, region=None):
- if lock is True:
- lock = Lock()
+def store_chunk(x, out, index, lock, region):
+ """
+ A function inserted in a Dask graph for storing a chunk.
- def store(x, out, index, lock, region):
- subindex = index
- if region is not None:
- subindex = fuse_slice(region, index)
+ Parameters
+ ----------
+ x: array-like
+ An array (potentially a NumPy one)
+ out: array-like
+ Where to store results too.
+ index: slice-like
+ Where to store result from ``x`` in ``out``.
+ lock: Lock-like or False
+ Lock to use before writing to ``out``.
+ region: slice-like or None
+ Where relative to ``out`` to store ``x``.
+
+ Examples
+ --------
+ >>> a = np.ones((5, 6))
+ >>> b = np.empty(a.shape)
+ >>> store_chunk(a, b, (slice(None), slice(None)), False, None)
+ """
+
+ subindex = index
+ if region is not None:
+ subindex = fuse_slice(region, index)
+
+ if lock:
+ lock.acquire()
+ try:
+ out[subindex] = np.asanyarray(x)
+ finally:
if lock:
- lock.acquire()
- try:
- out[subindex] = np.asanyarray(x)
- finally:
- if lock:
- lock.release()
+ lock.release()
+
+ return None
+
+
+def insert_to_ooc(arr, out, lock=True, region=None):
+ """
+ Creates a Dask graph for storing chunks from ``arr`` in ``out``.
+
+ Parameters
+ ----------
+ arr: da.Array
+ A dask array
+ out: array-like
+ Where to store results too.
+ lock: Lock-like or bool, optional
+ Whether to lock or with what (default is ``True``,
+ which means a ``threading.Lock`` instance).
+ region: slice-like, optional
+ Where in ``out`` to store ``arr``'s results
+ (default is ``None``, meaning all of ``out``).
+
+ Examples
+ --------
+ >>> import dask.array as da
+ >>> d = da.ones((5, 6), chunks=(2, 3))
+ >>> a = np.empty(d.shape)
+ >>> insert_to_ooc(d, a) # doctest: +SKIP
+ """
- return None
+ if lock is True:
+ lock = Lock()
slices = slices_from_chunks(arr.chunks)
@@ -2593,7 +2644,9 @@ def insert_to_ooc(arr, out, lock=True, region=None):
dsk = dict()
for t, slc in zip(core.flatten(arr.__dask_keys__()), slices):
store_key = (name,) + t[1:]
- dsk[store_key] = (store, t, out, slc, lock, region)
+ dsk[store_key] = (
+ store_chunk, t, out, slc, lock, region
+ )
return dsk
diff --git a/dask/array/percentile.py b/dask/array/percentile.py
index 6368231a5..e86894d6a 100644
--- a/dask/array/percentile.py
+++ b/dask/array/percentile.py
@@ -2,6 +2,7 @@ from __future__ import absolute_import, division, print_function
from functools import wraps
from collections import Iterator
+from numbers import Number
import numpy as np
from toolz import merge, merge_sorted
@@ -38,6 +39,8 @@ def percentile(a, q, interpolation='linear'):
if not a.ndim == 1:
raise NotImplementedError(
"Percentiles only implemented for 1-d arrays")
+ if isinstance(q, Number):
+ q = [q]
q = np.array(q)
token = tokenize(a, list(q), interpolation)
name = 'percentile_chunk-' + token
diff --git a/dask/bag/core.py b/dask/bag/core.py
index 4428e0ba5..62297b290 100644
--- a/dask/bag/core.py
+++ b/dask/bag/core.py
@@ -1,16 +1,15 @@
from __future__ import absolute_import, division, print_function
-from collections import Iterable, Iterator, defaultdict
-from functools import wraps, partial
import itertools
import math
-from operator import getitem
import types
import uuid
-from random import Random
-from warnings import warn
+import warnings
+from collections import Iterable, Iterator, defaultdict
from distutils.version import LooseVersion
-
+from functools import wraps, partial
+from operator import getitem
+from random import Random
from toolz import (merge, take, reduce, valmap, map, partition_all, filter,
remove, compose, curry, first, second, accumulate, peek)
@@ -1050,7 +1049,7 @@ class Bag(Base):
return type(self)(merge(self.dask, dsk), e, 1)
- def take(self, k, npartitions=1, compute=True):
+ def take(self, k, npartitions=1, compute=True, warn=True):
""" Take the first k elements.
Parameters
@@ -1064,6 +1063,9 @@ class Bag(Base):
returned. Pass -1 to use all partitions.
compute : bool, optional
Whether to compute the result, default is True.
+ warn : bool, optional
+ Whether to warn if the number of elements returned is less than
+ requested, default is True.
>>> b = from_sequence(range(10))
>>> b.take(3) # doctest: +SKIP
@@ -1087,9 +1089,9 @@ class Bag(Base):
dsk[(name_p, i)] = (list, (take, k, (self.name, i)))
concat = (toolz.concat, ([(name_p, i) for i in range(npartitions)]))
- dsk[(name, 0)] = (safe_take, k, concat)
+ dsk[(name, 0)] = (safe_take, k, concat, warn)
else:
- dsk = {(name, 0): (safe_take, k, (self.name, 0))}
+ dsk = {(name, 0): (safe_take, k, (self.name, 0), warn)}
b = Bag(merge(self.dask, dsk), name, 1)
@@ -1215,12 +1217,16 @@ class Bag(Base):
import dask.dataframe as dd
if meta is None:
if isinstance(columns, pd.DataFrame):
- warn("Passing metadata to `columns` is deprecated. Please "
- "use the `meta` keyword instead.")
+ warnings.warn("Passing metadata to `columns` is deprecated. "
+ "Please use the `meta` keyword instead.")
meta = columns
else:
- head = self.take(1)[0]
- meta = pd.DataFrame([head], columns=columns)
+ head = self.take(1, warn=False)
+ if len(head) == 0:
+ raise ValueError("`dask.bag.Bag.to_dataframe` failed to "
+ "properly infer metadata, please pass in "
+ "metadata via the `meta` keyword")
+ meta = pd.DataFrame(list(head), columns=columns)
elif columns is not None:
raise ValueError("Can't specify both `meta` and `columns`")
else:
@@ -1986,12 +1992,12 @@ def empty_safe_aggregate(func, parts, is_last):
return empty_safe_apply(func, parts2, is_last)
-def safe_take(n, b):
+def safe_take(n, b, warn=True):
r = list(take(n, b))
- if len(r) != n:
- warn("Insufficient elements for `take`. {0} elements requested, "
- "only {1} elements available. Try passing larger `npartitions` "
- "to `take`.".format(n, len(r)))
+ if len(r) != n and warn:
+ warnings.warn("Insufficient elements for `take`. {0} elements "
+ "requested, only {1} elements available. Try passing "
+ "larger `npartitions` to `take`.".format(n, len(r)))
return r
diff --git a/dask/base.py b/dask/base.py
index 88777b0ab..f13786387 100644
--- a/dask/base.py
+++ b/dask/base.py
@@ -584,8 +584,10 @@ def _normalize_function(func):
funcs = reversed((first,) + func.funcs) if first else func.funcs
return tuple(normalize_function(f) for f in funcs)
elif isinstance(func, partial):
- kws = tuple(sorted(func.keywords.items())) if func.keywords else ()
- return (normalize_function(func.func), func.args, kws)
+ args = tuple(normalize_token(i) for i in func.args)
+ kws = tuple((k, normalize_token(v))
+ for k, v in sorted(func.keywords.items()))
+ return (normalize_function(func.func), args, kws)
else:
try:
result = pickle.dumps(func, protocol=0)
diff --git a/dask/dataframe/__init__.py b/dask/dataframe/__init__.py
index 1dabbe714..0e954e44a 100644
--- a/dask/dataframe/__init__.py
+++ b/dask/dataframe/__init__.py
@@ -9,10 +9,7 @@ from .io import (from_array, from_pandas, from_bcolz,
demo, to_hdf, to_records, to_bag)
from .optimize import optimize
from .multi import merge, concat
-from .rolling import (rolling_count, rolling_sum, rolling_mean, rolling_median,
- rolling_min, rolling_max, rolling_std, rolling_var,
- rolling_skew, rolling_kurt, rolling_quantile, rolling_apply,
- rolling_window)
+from . import rolling
from ..base import compute
from .reshape import get_dummies, pivot_table, melt
try:
diff --git a/dask/dataframe/rolling.py b/dask/dataframe/rolling.py
index e6e606698..501ac6fa8 100644
--- a/dask/dataframe/rolling.py
+++ b/dask/dataframe/rolling.py
@@ -1,8 +1,6 @@
from __future__ import absolute_import, division, print_function
import datetime
-import warnings
-from functools import wraps
import pandas as pd
from pandas.core.window import Rolling as pd_Rolling
@@ -142,27 +140,6 @@ def map_overlap(func, df, before, after, *args, **kwargs):
return df._constructor(dsk, name, meta, df.divisions)
-def wrap_rolling(func, method_name):
- """Create a chunked version of a pandas.rolling_* function"""
- @wraps(func)
- def rolling(arg, window, *args, **kwargs):
- # pd.rolling_* functions are deprecated
- warnings.warn(("DeprecationWarning: dd.rolling_{0} is deprecated and "
- "will be removed in a future version, replace with "
- "df.rolling(...).{0}(...)").format(method_name))
-
- rolling_kwargs = {}
- method_kwargs = {}
- for k, v in kwargs.items():
- if k in {'min_periods', 'center', 'win_type', 'axis', 'freq'}:
- rolling_kwargs[k] = v
- else:
- method_kwargs[k] = v
- rolling = arg.rolling(window, **rolling_kwargs)
- return getattr(rolling, method_name)(*args, **method_kwargs)
- return rolling
-
-
def _head_timedelta(current, next_, after):
"""Return rows of ``next_`` whose index is before the last
observation in ``current`` + ``after``.
@@ -197,27 +174,6 @@ def _tail_timedelta(prev, current, before):
return prev[prev.index > (current.index.min() - before)]
-rolling_count = wrap_rolling(pd.rolling_count, 'count')
-rolling_sum = wrap_rolling(pd.rolling_sum, 'sum')
-rolling_mean = wrap_rolling(pd.rolling_mean, 'mean')
-rolling_median = wrap_rolling(pd.rolling_median, 'median')
-rolling_min = wrap_rolling(pd.rolling_min, 'min')
-rolling_max = wrap_rolling(pd.rolling_max, 'max')
-rolling_std = wrap_rolling(pd.rolling_std, 'std')
-rolling_var = wrap_rolling(pd.rolling_var, 'var')
-rolling_skew = wrap_rolling(pd.rolling_skew, 'skew')
-rolling_kurt = wrap_rolling(pd.rolling_kurt, 'kurt')
-rolling_quantile = wrap_rolling(pd.rolling_quantile, 'quantile')
-rolling_apply = wrap_rolling(pd.rolling_apply, 'apply')
-
-
-@wraps(pd.rolling_window)
-def rolling_window(arg, window, **kwargs):
- if kwargs.pop('mean', True):
- return rolling_mean(arg, window, **kwargs)
- return rolling_sum(arg, window, **kwargs)
-
-
def pandas_rolling_method(df, rolling_kwargs, name, *args, **kwargs):
rolling = df.rolling(**rolling_kwargs)
return getattr(rolling, name)(*args, **kwargs)
diff --git a/dask/dot.py b/dask/dot.py
index 33f809911..e5fedfbd9 100644
--- a/dask/dot.py
+++ b/dask/dot.py
@@ -125,23 +125,29 @@ def to_graphviz(dsk, data_attributes=None, function_attributes=None,
k_name = name(k)
if k_name not in seen:
seen.add(k_name)
- g.node(k_name, label=label(k, cache=cache), shape='box',
- **data_attributes.get(k, {}))
+ attrs = data_attributes.get(k, {})
+ attrs.setdefault('label', label(k, cache=cache))
+ attrs.setdefault('shape', 'box')
+ g.node(k_name, **attrs)
if istask(v):
func_name = name((k, 'function'))
if func_name not in seen:
seen.add(func_name)
- g.node(func_name, label=task_label(v), shape='circle',
- **function_attributes.get(k, {}))
+ attrs = function_attributes.get(k, {})
+ attrs.setdefault('label', task_label(v))
+ attrs.setdefault('shape', 'circle')
+ g.node(func_name, **attrs)
g.edge(func_name, k_name)
for dep in get_dependencies(dsk, k):
dep_name = name(dep)
if dep_name not in seen:
seen.add(dep_name)
- g.node(dep_name, label=label(dep, cache=cache), shape='box',
- **data_attributes.get(dep, {}))
+ attrs = data_attributes.get(dep, {})
+ attrs.setdefault('label', label(dep, cache=cache))
+ attrs.setdefault('shape', 'box')
+ g.node(dep_name, **attrs)
g.edge(dep_name, func_name)
elif ishashable(v) and v in dsk:
g.edge(name(v), k_name)
diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst
index ada3406ce..e7bed3642 100644
--- a/docs/source/changelog.rst
+++ b/docs/source/changelog.rst
@@ -7,6 +7,7 @@ Changelog
Array
+++++
+- Fix handling of scalar percentile values in ``percentile`` (:pr:`3021`) `James Bourbeau`_
- Prevent ``bool()`` coercion from calling compute (:pr:`2958`) `Albert DeFusco`_
- Add ``matmul`` (:pr:`2904`) `John A Kirkham`_
- Support N-D arrays with ``matmul`` (:pr:`2909`) `John A Kirkham`_
@@ -32,6 +33,7 @@ DataFrame
- Correctly handle the column name (`df.columns.name`) when reading in ``dd.read_parquet`` (:pr:2973`) `Tom Augspurger`_
- Fixed ``dd.concat`` losing the index dtype when the data contained a categorical (:issue:`2932`) `Tom Augspurger`_
- ``DataFrame.merge()`` (:pr:`2960`) now supports merging on a combination of columns and the index `Jon Mease`_
+- Removed the deprecated ``dd.rolling*`` methods, in preperation for their removal in the next pandas release (:pr:`2995`) `Tom Augspurger`_
Core
diff --git a/docs/source/dataframe-api.rst b/docs/source/dataframe-api.rst
index b39ca00dc..a28df37c6 100644
--- a/docs/source/dataframe-api.rst
+++ b/docs/source/dataframe-api.rst
@@ -243,19 +243,25 @@ Rolling Operations
.. autosummary::
rolling.map_overlap
- rolling.rolling_apply
- rolling.rolling_count
- rolling.rolling_kurt
- rolling.rolling_max
- rolling.rolling_mean
- rolling.rolling_median
- rolling.rolling_min
- rolling.rolling_quantile
- rolling.rolling_skew
- rolling.rolling_std
- rolling.rolling_sum
- rolling.rolling_var
- rolling.rolling_window
+ Series.rolling
+ DataFrame.rolling
+
+.. currentmodule:: dask.dataframe.rolling
+
+.. autosummary::
+ Rolling.apply
+ Rolling.count
+ Rolling.kurt
+ Rolling.max
+ Rolling.mean
+ Rolling.median
+ Rolling.min
+ Rolling.quantile
+ Rolling.skew
+ Rolling.std
+ Rolling.sum
+ Rolling.var
+
Create DataFrames
~~~~~~~~~~~~~~~~~
@@ -346,20 +352,7 @@ Rolling
.. currentmodule:: dask.dataframe.rolling
-.. autofunction:: rolling_apply
.. autofunction:: map_overlap
-.. autofunction:: rolling_count
-.. autofunction:: rolling_kurt
-.. autofunction:: rolling_max
-.. autofunction:: rolling_mean
-.. autofunction:: rolling_median
-.. autofunction:: rolling_min
-.. autofunction:: rolling_quantile
-.. autofunction:: rolling_skew
-.. autofunction:: rolling_std
-.. autofunction:: rolling_sum
-.. autofunction:: rolling_var
-.. autofunction:: rolling_window
Other functions
diff --git a/docs/source/inspect.rst b/docs/source/inspect.rst
index 2cd98f57a..4725af80a 100644
--- a/docs/source/inspect.rst
+++ b/docs/source/inspect.rst
@@ -19,7 +19,7 @@ The first step is to look at the ``.dask`` attribute of an array
>>> import dask.array as da
>>> x = da.ones((5, 15), chunks=(5, 5))
- >>> x.dask
+ >>> dict(x.dask)
{('wrapped_1', 0, 0): (ones, (5, 5)),
('wrapped_1', 0, 1): (ones, (5, 5)),
('wrapped_1', 0, 2): (ones, (5, 5))}
@@ -29,7 +29,7 @@ objects
.. code-block:: python
- >>> (x + 1).dask
+ >>> dict((x + 1).dask)
{('wrapped_1', 0, 0): (ones, (5, 5)),
('wrapped_1', 0, 1): (ones, (5, 5)),
('wrapped_1', 0, 2): (ones, (5, 5))
@@ -37,6 +37,9 @@ objects
('x_1', 0, 1): (add, ('wrapped_1', 0, 1), 1),
('x_1', 0, 2): (add, ('wrapped_1', 0, 2), 1)}
+.. note:: In this example we use simple names like ``x_1``, ``ones``, and
+ ``add`` for demonstration purposes. However in practice these names may be
+ more complex and include long hashed names.
Visualize graphs with DOT
-------------------------
diff --git a/docs/source/setup/kubernetes.rst b/docs/source/setup/kubernetes.rst
index bb75e2d25..1b877f65e 100644
--- a/docs/source/setup/kubernetes.rst
+++ b/docs/source/setup/kubernetes.rst
@@ -136,6 +136,7 @@ pip packages to install on the both the workers and Jupyter server (these two
environments should be matched).
.. code-block:: yaml
+
# config.yaml
worker:
diff --git a/docs/source/spark.rst b/docs/source/spark.rst
index c5a693974..a0997c10d 100644
--- a/docs/source/spark.rst
+++ b/docs/source/spark.rst
@@ -1,13 +1,10 @@
-Comparison to PySpark
-=====================
+Comparison to Spark
+===================
-Spark_ is a popular distributed computing tool with a decent Python API
-PySpark_. Spark is growing to become a dominant name today in Big Data
-analysis alongside Hadoop, for which MRJob_ is possibly the dominant
-Python layer.
-
-Dask has several elements that appear to intersect this space and we are often
-asked, "How does Dask compare with Spark?"
+`Apache Spark <https://spark.apache.org/>`_ is a popular distributed computing
+tool for tabular datasets that is growing to become a dominant name in Big Data
+analysis today. Dask has several elements that appear to intersect this space
+and we are often asked, "How does Dask compare with Spark?"
Answering such comparison questions in an unbiased and informed way is hard,
particularly when the differences can be somewhat technical. This document
@@ -16,103 +13,114 @@ tries to do this; we welcome any corrections.
Summary
-------
-Apache Spark is an all-inclusive framework combining distributed computing,
-SQL queries, machine learning, and more that runs on the JVM and is commonly
-co-deployed with other Big Data frameworks like Hadoop. It was originally
-optimized for bulk data ingest and querying common in data engineering and
-business analytics but has since broadened out. Spark is typically used on
-small to medium sized cluster but also runs well on a single machine.
-
-Dask is a parallel programming library that combines with the Numeric Python
-ecosystem to provide parallel arrays, dataframes, machine learning, and custom
-algorithms. It is based on Python and the foundational C/Fortran stack. Dask
-was originally designed to complement other libraries with parallelism,
-particular for numeric computing and advanced analytics, but has since
-broadened out. Dask is typically used on a single machine, but also runs well
-on a distributed cluster.
-
Generally Dask is smaller and lighter weight than Spark. This means that it
has fewer features and instead is intended to be used in conjunction with other
-libraries, particularly those in the numeric Python ecosystem.
-
-
-User-Facing Differences
------------------------
-
-Scale
-~~~~~
-
-Spark began its life aimed at the thousand node cluster case. As
-such it thinks well about worker failures and integration with data-local
-file systems like the Hadoop FileSystem (HDFS). That being said, Spark can
-run in standalone mode on a single machine.
-
-Dask began its life building out parallel algorithms for numerical array
-computations on a single computer. As such it thinks well about low-latency
-scheduling, low memory footprints, shared memory, and efficient use of local
-disk. That being said dask can run on a distributed_ cluster, making use of
-HDFS and other Big Data technologies.
-
-.. _distributed: https://distributed.readthedocs.io/
-
-
-Java Python Performance
-~~~~~~~~~~~~~~~~~~~~~~~
-
-Spark is written in Scala, a multi-paradigm language built on top of the Java
-Virtual Machine (JVM). Since the rise of Hadoop, Java based languages have
-steadily gained traction on data warehousing tasks and are good at managing
-large amounts of heterogeneous data such as you might find in JSON blobs. The
-Spark development team is now focusing more on binary and native data formats
-with their new effort, Tungsten.
-
-Dask is written in Python, a multi-paradigm language built on top of the
-C/Fortran native stack. This stack benefits from decades of scientific research
-optimizing very fast computation on numeric data. As such, dask is already
-very good on analytic computations on data such as you might find in HDF5 files
-or analytic databases. It can also handle JSON blob type data using Python
-data structures (which are `surprisingly fast`_) using the cytoolz_ library in
-parallel.
-
-
-Java Python Disconnect
-~~~~~~~~~~~~~~~~~~~~~~
-
-Python users on Spark sometimes express frustration by how far separated they
-are from computations. Some of this is inevitable; distributed debugging is a
-hard problem. Some of it however is due to having to hop over the JVM. Spark
-workers spin up JVMs which in turn spin up Python processes. Data moving back
-and forth makes extra trips both through a distributed cluster and also through
-extra serialization layers (see py4j_) and computation layers. Limitations
-like the Java heap size and large Java stack traces come as a surprise to users
-accustomed to native code execution.
-
-Dask has an advantage for Python users because it is itself a Python library,
-so serialization and debugging when things go wrong happens more smoothly.
-
-However, Dask only benefits Python users while Spark is useful in a
-variety of JVM languages (Scala, Java, Clojure) and also has limited support in
-Python and R. New Spark projects like the DataFrame skip serialization and
-boxed execution issues by forgoing the Python process entirely and instead have
-Python code drive native Scala code. APIs for these libraries tend to lag a
-bit behind their Scala counterparts.
-
-
-Scope
-~~~~~
-
-Spark was originally built around the RDD, an unordered collection allowing
-repeats. Most spark add-ons were built on top of this construct, inheriting
-both its abilities and limitations.
-
-Dask is built on a lower-level and more general construct of a generic task
-graph with arbitrary data dependencies. This allows more general computations
-to be built by users within the dask framework. This is probably the largest
-fundamental difference between the two projects. Dask gives up high-level
-understanding to allow users to express more complex parallel algorithms. This
-ended up being essential when writing complex projects like ``dask.array``,
-datetime algorithms in ``dask.dataframe`` or non-trivial algorithms in machine
-learning.
+libraries, particularly those in the numeric Python ecosystem. It couples with
+other libraries like Pandas or Scikit-Learn to achieve high-level
+functionality.
+
+- Language
+ - Spark is written in Scala with some support for Python and R. It
+ interoperates well with other JVM code.
+ - Dask is written in Python and only really supports Python. It
+ interoperates well with C/C++/Fortran/LLVM or other natively compiled
+ code linked through Python.
+- Ecosystem
+ - Spark is an all-in-one project that has inspired its own ecosystem. It
+ integrates well with many other Apache projects.
+ - Dask is a component of the larger Python ecosystem. It couples with and
+ enhances other libraries like NumPy, Pandas, and Scikit-Learn.
+- Age and Trust
+ - Spark is older (since 2010) and has become a dominant and
+ well-trusted tool in the Big Data enterprise world.
+ - Dask is younger (since 2014) and is an extension of the
+ well trusted NumPy/Pandas/Scikit-learn/Jupyter stack.
+- Scope
+ - Spark is more focused on traditional business intelligence
+ operations like SQL and lightweight machine learning.
+ - Dask is applied more generally both to business intelligence
+ applications, as well as a number of scientific and custom business
+ situations
+- Internal Design
+ - Spark's internal model is higher level, providing good high level
+ optimizations on uniformly applied computations, but lacking flexibility
+ for more complex algorithms or ad-hoc systems. It is fundamentally an
+ extension of the Map-Shuffle-Reduce paradigm.
+ - Dask's internal model is lower level, and so lacks high level
+ optimizations, but is able to implement more sophisticated algorithms and
+ build more complex bespoke systems. It is fundamentally based on generic
+ task scheduling.
+- Scale
+ - Spark scales from a single node to thousand-node clusters
+ - Dask scales from a single node to thousand-node clusters
+- APIs
+ - Dataframes
+ - Spark dataframe has its own API and memory model. It also
+ implements a large subset of the SQL language. Spark includes a
+ high-level query optimizer for complex queries.
+ - Dask.dataframe reuses the Pandas API and memory model. It implements
+ neither SQL nor a query optimizer. It is able to do random access,
+ efficient time series operations, and other Pandas-style indexed
+ operations.
+ - Machine Learning
+ - Spark MLLib is a cohesive project with support for common operations
+ that are easy to implement with Spark's Map-Shuffle-Reduce style
+ system. People considering MLLib might also want to consider *other*
+ JVM-based machine learning libraries like H2O, which may have better
+ performance.
+ - Dask relies on and interoperates with existing libraries like
+ Scikit-Learn and XGBoost. These can be more familiar or higher
+ performance, but generally results in a less-cohesive whole. See the
+ `dask-ml`_ project for integrations.
+ - Arrays
+ - Spark does not include support for multi-dimensional arrays natively
+ (this would be challenging given their computation model) although
+ some support for two-dimensional matrices may be found in MLLib.
+ People may also want to look at the `Thunder
+ <https://github.com/thunder-project/thunder>`_ project.
+ - Dask fully supports the NumPy model for :doc:`scalable multi-dimensional
+ arrays <array>`.
+ - Streaming
+ - Spark's support for streaming data is first-class and integrates well
+ into their other APIs. It follows a mini-batch approach. This
+ provides decent performance on large uniform streaming operations
+ - Dask provides a real-time futures interface that is lower-level than
+ Spark streaming. This enables more creative and complex use-cases,
+ but requires more work than Spark streaming.
+ - Graphs / complex networks
+ - Spark provides GraphX, a library for graph processing
+ - Dask provides no such library
+ - Custom parallelism
+ - Spark generally expects users to compose computations out of their
+ high-level primitives (map, reduce, groupby, join, ...). It is also
+ possible to extend Spark through subclassing RDDs, although this is
+ rarely done.
+ - Dask allows you to specify arbitrary task graphs for more complex and
+ custom systems that are not part of the standard set of collections.
+
+.. _dask-ml: https://dask-ml.readthedocs.org/en/latest
+
+
+Reasons you might choose Spark
+------------------------------
+
+- You prefer Scala or the SQL language
+- You have mostly JVM infrastructure and legacy systems
+- You want an established and trusted solution for business
+- You are mostly doing business analytics with some lightweight machine learning
+- You want an all-in-one solution
+
+
+Reasons you might choose Dask
+-----------------------------
+
+- You prefer Python or native code, or have large legacy code bases that you
+ do not want to entirely rewrite
+- Your use case is complex or does not cleanly fit the Spark computing model
+- You want a lighter-weight transition from single-machine computing to
+ cluster computing
+- You want to interoperate with other technologies and don't mind installing
+ multiple packages
Developer-Facing Differences
@@ -142,10 +150,6 @@ can because Dask schedulers do not have a top-down picture of the computation
they were asked to perform. However, dask is able to easily represent far more
`complex algorithms`_ and expose the creation of these algorithms to normal users.
-Dask.bag, the equivalent of the Spark.RDD, is just one abstraction built on top
-of dask. Others exist. Alternatively power-users can forego high-level
-collections entirely and jump straight to direct low-level task scheduling.
-
Coding Styles
~~~~~~~~~~~~~
@@ -174,11 +178,6 @@ then you should forget both Spark and Dask and use Postgres_ or MongoDB_.
.. _Spark: https://spark.apache.org/
.. _PySpark: https://spark.apache.org/docs/latest/api/python/
-.. _Hadoop: https://hadoop.apache.org/
-.. _MRJob: https://mrjob.readthedocs.io
-.. _`surprisingly fast`: https://www.youtube.com/watch?v=PpBK4zIaFLE
-.. _cytoolz: https://toolz.readthedocs.io
-.. _py4j: http://py4j.sourceforge.net/
.. _Postgres: http://www.postgresql.org/
.. _MongoDB: https://www.mongodb.org/
.. _`complex algorithms`: http://matthewrocklin.com/blog/work/2015/06/26/Complex-Graphs
| `dask.array.nanmean()` generates unstable name (hash)
#### Code Sample
```python
import dask.array as da
import numpy as np
x = da.ones((5, 5), chunks=(2, 2))
x = da.nanmean(x, axis=0)
print(x.name)
```
#### Problem Description
Running the above sample three times outputs:
```
mean_agg-aggregate-8157871966f12f849b40951c495fef6f
mean_agg-aggregate-c6939e3b5eb3290f17252b895e10c7c3
mean_agg-aggregate-17e2ac663f1cdddf5b6fc02b1ad0b889
```
which shows the dask name (hash) is unstable.
#### Expected Output
For processing large datasets, it's convenient to have a stable hash name in order to save intermediate results or compare them among developers.
> Workaround: use `dask.array.mean()` instead of `dask.array.nanmean()` | dask/dask | diff --git a/dask/array/tests/test_percentiles.py b/dask/array/tests/test_percentiles.py
index ec9f57bf0..1ff01aa6d 100644
--- a/dask/array/tests/test_percentiles.py
+++ b/dask/array/tests/test_percentiles.py
@@ -51,3 +51,11 @@ def test_percentile_with_categoricals():
def test_percentiles_with_empty_arrays():
x = da.ones(10, chunks=((5, 0, 5),))
assert_eq(da.percentile(x, [10, 50, 90]), np.array([1, 1, 1], dtype=x.dtype))
+
+
[email protected]('q', [5, 5.0, np.int64(5), np.float64(5)])
+def test_percentiles_with_scaler_percentile(q):
+ # Regression test to ensure da.percentile works with scalar percentiles
+ # See #3020
+ d = da.ones((16,), chunks=(4,))
+ assert_eq(da.percentile(d, q), np.array([1], dtype=d.dtype))
diff --git a/dask/bag/tests/test_bag.py b/dask/bag/tests/test_bag.py
index 46427f714..44b874d48 100644
--- a/dask/bag/tests/test_bag.py
+++ b/dask/bag/tests/test_bag.py
@@ -530,17 +530,23 @@ def test_take_npartitions():
b.take(1, npartitions=5)
[email protected](sys.version_info[:2] == (3,3),
- reason="Python3.3 uses pytest2.7.2, w/o warns method")
def test_take_npartitions_warn():
- with pytest.warns(None):
- b.take(100)
+ # Use single-threaded scheduler so warnings are properly captured in the
+ # same process
+ with dask.set_options(get=dask.get):
+ with pytest.warns(UserWarning):
+ b.take(100)
+
+ with pytest.warns(UserWarning):
+ b.take(7)
- with pytest.warns(None):
- b.take(7)
+ with pytest.warns(None) as rec:
+ b.take(7, npartitions=2)
+ assert len(rec) == 0
- with pytest.warns(None):
- b.take(7, npartitions=2)
+ with pytest.warns(None) as rec:
+ b.take(7, warn=False)
+ assert len(rec) == 0
def test_map_is_lazy():
@@ -775,6 +781,11 @@ def test_to_dataframe():
with pytest.raises(ValueError):
b.to_dataframe(columns=['a', 'b'], meta=sol)
+ # Inference fails if empty first partition
+ b2 = b.filter(lambda x: x['a'] > 200)
+ with pytest.raises(ValueError):
+ b2.to_dataframe()
+
# Single column
b = b.pluck('a')
sol = sol[['a']]
diff --git a/dask/dataframe/tests/test_rolling.py b/dask/dataframe/tests/test_rolling.py
index 5e6695811..5b34ee690 100644
--- a/dask/dataframe/tests/test_rolling.py
+++ b/dask/dataframe/tests/test_rolling.py
@@ -93,48 +93,6 @@ def mad(x):
return np.fabs(x - x.mean()).mean()
-def rolling_functions_tests(p, d):
- # Old-fashioned rolling API
- with pytest.warns(FutureWarning):
- assert_eq(pd.rolling_count(p, 3), dd.rolling_count(d, 3))
- assert_eq(pd.rolling_sum(p, 3), dd.rolling_sum(d, 3))
- assert_eq(pd.rolling_mean(p, 3), dd.rolling_mean(d, 3))
- assert_eq(pd.rolling_median(p, 3), dd.rolling_median(d, 3))
- assert_eq(pd.rolling_min(p, 3), dd.rolling_min(d, 3))
- assert_eq(pd.rolling_max(p, 3), dd.rolling_max(d, 3))
- assert_eq(pd.rolling_std(p, 3), dd.rolling_std(d, 3))
- assert_eq(pd.rolling_var(p, 3), dd.rolling_var(d, 3))
- # see note around test_rolling_dataframe for logic concerning precision
- assert_eq(pd.rolling_skew(p, 3),
- dd.rolling_skew(d, 3), check_less_precise=True)
- assert_eq(pd.rolling_kurt(p, 3),
- dd.rolling_kurt(d, 3), check_less_precise=True)
- assert_eq(pd.rolling_quantile(p, 3, 0.5), dd.rolling_quantile(d, 3, 0.5))
- assert_eq(pd.rolling_apply(p, 3, mad), dd.rolling_apply(d, 3, mad))
- # Test with edge-case window sizes
- assert_eq(pd.rolling_sum(p, 0), dd.rolling_sum(d, 0))
- assert_eq(pd.rolling_sum(p, 1), dd.rolling_sum(d, 1))
- # Test with kwargs
- assert_eq(pd.rolling_sum(p, 3, min_periods=3),
- dd.rolling_sum(d, 3, min_periods=3))
- pytest.importorskip("scipy")
- assert_eq(pd.rolling_window(p, 3, win_type='boxcar'),
- dd.rolling_window(d, 3, win_type='boxcar'))
-
-
-def test_rolling_functions_series():
- ts = pd.Series(np.random.randn(25).cumsum())
- dts = dd.from_pandas(ts, 3)
- rolling_functions_tests(ts, dts)
-
-
-def test_rolling_functions_dataframe():
- df = pd.DataFrame({'a': np.random.randn(25).cumsum(),
- 'b': np.random.randint(100, size=(25,))})
- ddf = dd.from_pandas(df, 3)
- rolling_functions_tests(df, ddf)
-
-
rolling_method_args_check_less_precise = [
('count', (), False),
('sum', (), False),
diff --git a/dask/tests/test_base.py b/dask/tests/test_base.py
index dfe9e3093..8ab1a76dc 100644
--- a/dask/tests/test_base.py
+++ b/dask/tests/test_base.py
@@ -32,16 +32,19 @@ np = import_or_none('numpy')
pd = import_or_none('pandas')
-def test_normalize_function():
+def f1(a, b, c=1):
+ pass
+
- def f1(a, b, c=1):
- pass
+def f2(a, b=1, c=2):
+ pass
- def f2(a, b=1, c=2):
- pass
- def f3(a):
- pass
+def f3(a):
+ pass
+
+
+def test_normalize_function():
assert normalize_function(f2)
@@ -168,6 +171,15 @@ def test_tokenize_numpy_ufunc_consistent():
assert tokenize(inc) == tokenize(inc)
+def test_tokenize_partial_func_args_kwargs_consistent():
+ f = tz.partial(f3, f2, c=f1)
+ res = normalize_token(f)
+ sol = (b'cdask.tests.test_base\nf3\np0\n.',
+ (b'cdask.tests.test_base\nf2\np0\n.',),
+ (('c', b'cdask.tests.test_base\nf1\np0\n.'),))
+ assert res == sol
+
+
def test_normalize_base():
for i in [1, long(1), 1.1, '1', slice(1, 2, 3)]:
assert normalize_token(i) is i
diff --git a/dask/tests/test_dot.py b/dask/tests/test_dot.py
index 1ec118669..06377a87e 100644
--- a/dask/tests/test_dot.py
+++ b/dask/tests/test_dot.py
@@ -19,7 +19,7 @@ from IPython.display import Image, SVG
# Since graphviz doesn't store a graph, we need to parse the output
-label_re = re.compile('.*\[label=(.*?) shape=.*\]')
+label_re = re.compile('.*\[label=(.*?) shape=(.*?)\]')
def get_label(line):
@@ -28,6 +28,12 @@ def get_label(line):
return m.group(1)
+def get_shape(line):
+ m = label_re.match(line)
+ if m:
+ return m.group(2)
+
+
dsk = {'a': 1,
'b': 2,
'c': (neg, 'a'),
@@ -73,6 +79,22 @@ def test_to_graphviz():
funcs = set(('add', 'sum', 'neg'))
assert set(labels).difference(dsk) == funcs
assert set(labels).difference(funcs) == set(dsk)
+ shapes = list(filter(None, map(get_shape, g.body)))
+ assert set(shapes) == set(('box', 'circle'))
+
+
+def test_to_graphviz_custom():
+ g = to_graphviz(
+ dsk,
+ data_attributes={'a': {'shape': 'square'}},
+ function_attributes={'c': {'label': 'neg_c', 'shape': 'ellipse'}},
+ )
+ labels = list(filter(None, map(get_label, g.body)))
+ funcs = set(('add', 'sum', 'neg', 'neg_c'))
+ assert set(labels).difference(dsk) == funcs
+ assert set(labels).difference(funcs) == set(dsk)
+ shapes = list(filter(None, map(get_shape, g.body)))
+ assert set(shapes) == set(('box', 'circle', 'square', 'ellipse'))
def test_to_graphviz_attributes():
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 12
} | 1.20 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[complete]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
click==8.0.4
cloudpickle==2.2.1
-e git+https://github.com/dask/dask.git@076830c8e87c5f8806c85be639b92d97008ebf03#egg=dask
distributed==1.20.2
HeapDict==1.0.1
importlib-metadata==4.8.3
iniconfig==1.1.1
locket==1.0.0
msgpack-python==0.5.6
numpy==1.19.5
packaging==21.3
pandas==1.1.5
partd==1.2.0
pluggy==1.0.0
psutil==7.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2025.2
six==1.17.0
sortedcontainers==2.4.0
tblib==1.7.0
tomli==1.2.3
toolz==0.12.0
tornado==6.1
typing_extensions==4.1.1
zict==2.1.0
zipp==3.6.0
| name: dask
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- click==8.0.4
- cloudpickle==2.2.1
- distributed==1.20.2
- heapdict==1.0.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- locket==1.0.0
- msgpack-python==0.5.6
- numpy==1.19.5
- packaging==21.3
- pandas==1.1.5
- partd==1.2.0
- pluggy==1.0.0
- psutil==7.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- six==1.17.0
- sortedcontainers==2.4.0
- tblib==1.7.0
- tomli==1.2.3
- toolz==0.12.0
- tornado==6.1
- typing-extensions==4.1.1
- zict==2.1.0
- zipp==3.6.0
prefix: /opt/conda/envs/dask
| [
"dask/array/tests/test_percentiles.py::test_percentile",
"dask/array/tests/test_percentiles.py::test_percentiles_with_empty_arrays",
"dask/array/tests/test_percentiles.py::test_percentiles_with_scaler_percentile[5]",
"dask/array/tests/test_percentiles.py::test_percentiles_with_scaler_percentile[5.00]",
"dask/array/tests/test_percentiles.py::test_percentiles_with_scaler_percentile[q2]",
"dask/array/tests/test_percentiles.py::test_percentiles_with_scaler_percentile[5.01]",
"dask/bag/tests/test_bag.py::test_Bag",
"dask/bag/tests/test_bag.py::test_keys",
"dask/bag/tests/test_bag.py::test_bag_map",
"dask/bag/tests/test_bag.py::test_map_method",
"dask/bag/tests/test_bag.py::test_starmap",
"dask/bag/tests/test_bag.py::test_filter",
"dask/bag/tests/test_bag.py::test_remove",
"dask/bag/tests/test_bag.py::test_iter",
"dask/bag/tests/test_bag.py::test_repr[str]",
"dask/bag/tests/test_bag.py::test_repr[repr]",
"dask/bag/tests/test_bag.py::test_pluck",
"dask/bag/tests/test_bag.py::test_pluck_with_default",
"dask/bag/tests/test_bag.py::test_unzip",
"dask/bag/tests/test_bag.py::test_fold",
"dask/bag/tests/test_bag.py::test_distinct",
"dask/bag/tests/test_bag.py::test_frequencies",
"dask/bag/tests/test_bag.py::test_topk",
"dask/bag/tests/test_bag.py::test_topk_with_non_callable_key[1]",
"dask/bag/tests/test_bag.py::test_topk_with_non_callable_key[2]",
"dask/bag/tests/test_bag.py::test_topk_with_multiarg_lambda",
"dask/bag/tests/test_bag.py::test_lambdas",
"dask/bag/tests/test_bag.py::test_reductions",
"dask/bag/tests/test_bag.py::test_reduction_names",
"dask/bag/tests/test_bag.py::test_tree_reductions",
"dask/bag/tests/test_bag.py::test_aggregation[1]",
"dask/bag/tests/test_bag.py::test_aggregation[3]",
"dask/bag/tests/test_bag.py::test_aggregation[4]",
"dask/bag/tests/test_bag.py::test_non_splittable_reductions[1]",
"dask/bag/tests/test_bag.py::test_non_splittable_reductions[10]",
"dask/bag/tests/test_bag.py::test_std",
"dask/bag/tests/test_bag.py::test_var",
"dask/bag/tests/test_bag.py::test_join",
"dask/bag/tests/test_bag.py::test_foldby",
"dask/bag/tests/test_bag.py::test_foldby_tree_reduction",
"dask/bag/tests/test_bag.py::test_map_partitions",
"dask/bag/tests/test_bag.py::test_map_partitions_args_kwargs",
"dask/bag/tests/test_bag.py::test_random_sample_size",
"dask/bag/tests/test_bag.py::test_random_sample_prob_range",
"dask/bag/tests/test_bag.py::test_random_sample_repeated_computation",
"dask/bag/tests/test_bag.py::test_random_sample_different_definitions",
"dask/bag/tests/test_bag.py::test_random_sample_random_state",
"dask/bag/tests/test_bag.py::test_lazify_task",
"dask/bag/tests/test_bag.py::test_lazify",
"dask/bag/tests/test_bag.py::test_inline_singleton_lists",
"dask/bag/tests/test_bag.py::test_take",
"dask/bag/tests/test_bag.py::test_take_npartitions",
"dask/bag/tests/test_bag.py::test_take_npartitions_warn",
"dask/bag/tests/test_bag.py::test_map_is_lazy",
"dask/bag/tests/test_bag.py::test_can_use_dict_to_make_concrete",
"dask/bag/tests/test_bag.py::test_read_text",
"dask/bag/tests/test_bag.py::test_read_text_large",
"dask/bag/tests/test_bag.py::test_read_text_encoding",
"dask/bag/tests/test_bag.py::test_read_text_large_gzip",
"dask/bag/tests/test_bag.py::test_from_sequence",
"dask/bag/tests/test_bag.py::test_from_long_sequence",
"dask/bag/tests/test_bag.py::test_product",
"dask/bag/tests/test_bag.py::test_partition_collect",
"dask/bag/tests/test_bag.py::test_groupby",
"dask/bag/tests/test_bag.py::test_groupby_with_indexer",
"dask/bag/tests/test_bag.py::test_groupby_with_npartitions_changed",
"dask/bag/tests/test_bag.py::test_concat",
"dask/bag/tests/test_bag.py::test_flatten",
"dask/bag/tests/test_bag.py::test_concat_after_map",
"dask/bag/tests/test_bag.py::test_args",
"dask/bag/tests/test_bag.py::test_to_dataframe",
"dask/bag/tests/test_bag.py::test_to_textfiles[gz-GzipFile]",
"dask/bag/tests/test_bag.py::test_to_textfiles[-open]",
"dask/bag/tests/test_bag.py::test_to_textfiles[bz2-BZ2File]",
"dask/bag/tests/test_bag.py::test_to_textfiles_name_function_preserves_order",
"dask/bag/tests/test_bag.py::test_to_textfiles_name_function_warn",
"dask/bag/tests/test_bag.py::test_to_textfiles_encoding",
"dask/bag/tests/test_bag.py::test_to_textfiles_inputs",
"dask/bag/tests/test_bag.py::test_to_textfiles_endlines",
"dask/bag/tests/test_bag.py::test_string_namespace",
"dask/bag/tests/test_bag.py::test_string_namespace_with_unicode",
"dask/bag/tests/test_bag.py::test_str_empty_split",
"dask/bag/tests/test_bag.py::test_map_with_iterator_function",
"dask/bag/tests/test_bag.py::test_ensure_compute_output_is_concrete",
"dask/bag/tests/test_bag.py::test_bag_class_extend",
"dask/bag/tests/test_bag.py::test_gh715",
"dask/bag/tests/test_bag.py::test_bag_compute_forward_kwargs",
"dask/bag/tests/test_bag.py::test_to_delayed",
"dask/bag/tests/test_bag.py::test_to_delayed_optimizes",
"dask/bag/tests/test_bag.py::test_from_delayed",
"dask/bag/tests/test_bag.py::test_from_delayed_iterator",
"dask/bag/tests/test_bag.py::test_range",
"dask/bag/tests/test_bag.py::test_zip[1]",
"dask/bag/tests/test_bag.py::test_zip[7]",
"dask/bag/tests/test_bag.py::test_zip[10]",
"dask/bag/tests/test_bag.py::test_zip[28]",
"dask/bag/tests/test_bag.py::test_repartition[1-1]",
"dask/bag/tests/test_bag.py::test_repartition[1-2]",
"dask/bag/tests/test_bag.py::test_repartition[1-7]",
"dask/bag/tests/test_bag.py::test_repartition[1-11]",
"dask/bag/tests/test_bag.py::test_repartition[1-23]",
"dask/bag/tests/test_bag.py::test_repartition[2-1]",
"dask/bag/tests/test_bag.py::test_repartition[2-2]",
"dask/bag/tests/test_bag.py::test_repartition[2-7]",
"dask/bag/tests/test_bag.py::test_repartition[2-11]",
"dask/bag/tests/test_bag.py::test_repartition[2-23]",
"dask/bag/tests/test_bag.py::test_repartition[5-1]",
"dask/bag/tests/test_bag.py::test_repartition[5-2]",
"dask/bag/tests/test_bag.py::test_repartition[5-7]",
"dask/bag/tests/test_bag.py::test_repartition[5-11]",
"dask/bag/tests/test_bag.py::test_repartition[5-23]",
"dask/bag/tests/test_bag.py::test_repartition[12-1]",
"dask/bag/tests/test_bag.py::test_repartition[12-2]",
"dask/bag/tests/test_bag.py::test_repartition[12-7]",
"dask/bag/tests/test_bag.py::test_repartition[12-11]",
"dask/bag/tests/test_bag.py::test_repartition[12-23]",
"dask/bag/tests/test_bag.py::test_repartition[23-1]",
"dask/bag/tests/test_bag.py::test_repartition[23-2]",
"dask/bag/tests/test_bag.py::test_repartition[23-7]",
"dask/bag/tests/test_bag.py::test_repartition[23-11]",
"dask/bag/tests/test_bag.py::test_repartition[23-23]",
"dask/bag/tests/test_bag.py::test_repartition_names",
"dask/bag/tests/test_bag.py::test_accumulate",
"dask/bag/tests/test_bag.py::test_groupby_tasks",
"dask/bag/tests/test_bag.py::test_groupby_tasks_names",
"dask/bag/tests/test_bag.py::test_groupby_tasks_2[1000-20-100]",
"dask/bag/tests/test_bag.py::test_groupby_tasks_2[12345-234-1042]",
"dask/bag/tests/test_bag.py::test_groupby_tasks_3",
"dask/bag/tests/test_bag.py::test_to_textfiles_empty_partitions",
"dask/bag/tests/test_bag.py::test_reduction_empty",
"dask/bag/tests/test_bag.py::test_reduction_empty_aggregate[1]",
"dask/bag/tests/test_bag.py::test_reduction_empty_aggregate[2]",
"dask/bag/tests/test_bag.py::test_reduction_empty_aggregate[4]",
"dask/bag/tests/test_bag.py::test_reduction_with_non_comparable_objects",
"dask/bag/tests/test_bag.py::test_empty",
"dask/bag/tests/test_bag.py::test_bag_picklable",
"dask/bag/tests/test_bag.py::test_msgpack_unicode",
"dask/bag/tests/test_bag.py::test_bag_with_single_callable",
"dask/bag/tests/test_bag.py::test_optimize_fuse_keys",
"dask/bag/tests/test_bag.py::test_reductions_are_lazy",
"dask/bag/tests/test_bag.py::test_repeated_groupby",
"dask/bag/tests/test_bag.py::test_temporary_directory",
"dask/bag/tests/test_bag.py::test_empty_bag",
"dask/bag/tests/test_bag.py::test_bag_paths",
"dask/dataframe/tests/test_rolling.py::test_map_overlap[1]",
"dask/dataframe/tests/test_rolling.py::test_map_overlap[4]",
"dask/dataframe/tests/test_rolling.py::test_map_partitions_names",
"dask/dataframe/tests/test_rolling.py::test_map_partitions_errors",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-1-count-args0-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-1-sum-args1-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-1-mean-args2-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-1-median-args3-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-1-min-args4-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-1-max-args5-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-1-std-args6-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-1-var-args7-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-1-skew-args8-True]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-1-kurt-args9-True]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-1-quantile-args10-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-1-apply-args11-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-2-count-args0-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-2-sum-args1-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-2-mean-args2-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-2-median-args3-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-2-min-args4-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-2-max-args5-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-2-std-args6-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-2-var-args7-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-2-skew-args8-True]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-2-kurt-args9-True]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-2-quantile-args10-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-2-apply-args11-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-4-count-args0-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-4-sum-args1-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-4-mean-args2-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-4-median-args3-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-4-min-args4-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-4-max-args5-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-4-std-args6-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-4-var-args7-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-4-skew-args8-True]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-4-kurt-args9-True]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-4-quantile-args10-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-4-apply-args11-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-5-count-args0-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-5-sum-args1-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-5-mean-args2-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-5-median-args3-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-5-min-args4-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-5-max-args5-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-5-std-args6-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-5-var-args7-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-5-skew-args8-True]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-5-kurt-args9-True]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-5-quantile-args10-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[True-5-apply-args11-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-1-count-args0-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-1-sum-args1-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-1-mean-args2-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-1-median-args3-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-1-min-args4-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-1-max-args5-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-1-std-args6-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-1-var-args7-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-1-skew-args8-True]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-1-kurt-args9-True]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-1-quantile-args10-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-1-apply-args11-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-2-count-args0-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-2-sum-args1-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-2-mean-args2-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-2-median-args3-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-2-min-args4-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-2-max-args5-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-2-std-args6-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-2-var-args7-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-2-skew-args8-True]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-2-kurt-args9-True]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-2-quantile-args10-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-2-apply-args11-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-4-count-args0-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-4-sum-args1-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-4-mean-args2-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-4-median-args3-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-4-min-args4-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-4-max-args5-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-4-std-args6-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-4-var-args7-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-4-skew-args8-True]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-4-kurt-args9-True]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-4-quantile-args10-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-4-apply-args11-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-5-count-args0-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-5-sum-args1-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-5-mean-args2-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-5-median-args3-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-5-min-args4-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-5-max-args5-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-5-std-args6-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-5-var-args7-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-5-skew-args8-True]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-5-kurt-args9-True]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-5-quantile-args10-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_methods[False-5-apply-args11-False]",
"dask/dataframe/tests/test_rolling.py::test_rolling_raises",
"dask/dataframe/tests/test_rolling.py::test_rolling_names",
"dask/dataframe/tests/test_rolling.py::test_rolling_axis",
"dask/dataframe/tests/test_rolling.py::test_rolling_partition_size",
"dask/dataframe/tests/test_rolling.py::test_rolling_repr",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_repr",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_constructor",
"dask/tests/test_base.py::test_normalize_function",
"dask/tests/test_base.py::test_tokenize",
"dask/tests/test_base.py::test_tokenize_numpy_array_consistent_on_values",
"dask/tests/test_base.py::test_tokenize_numpy_array_supports_uneven_sizes",
"dask/tests/test_base.py::test_tokenize_discontiguous_numpy_array",
"dask/tests/test_base.py::test_tokenize_numpy_datetime",
"dask/tests/test_base.py::test_tokenize_numpy_scalar",
"dask/tests/test_base.py::test_tokenize_numpy_array_on_object_dtype",
"dask/tests/test_base.py::test_tokenize_numpy_memmap",
"dask/tests/test_base.py::test_tokenize_numpy_memmap_no_filename",
"dask/tests/test_base.py::test_tokenize_numpy_ufunc_consistent",
"dask/tests/test_base.py::test_tokenize_partial_func_args_kwargs_consistent",
"dask/tests/test_base.py::test_normalize_base",
"dask/tests/test_base.py::test_tokenize_pandas",
"dask/tests/test_base.py::test_tokenize_kwargs",
"dask/tests/test_base.py::test_tokenize_same_repr",
"dask/tests/test_base.py::test_tokenize_method",
"dask/tests/test_base.py::test_tokenize_sequences",
"dask/tests/test_base.py::test_tokenize_dict",
"dask/tests/test_base.py::test_tokenize_set",
"dask/tests/test_base.py::test_tokenize_ordered_dict",
"dask/tests/test_base.py::test_tokenize_object_array_with_nans",
"dask/tests/test_base.py::test_tokenize_base_types[1]",
"dask/tests/test_base.py::test_tokenize_base_types[True]",
"dask/tests/test_base.py::test_tokenize_base_types[a0]",
"dask/tests/test_base.py::test_tokenize_base_types[a1]",
"dask/tests/test_base.py::test_tokenize_base_types[1.0]",
"dask/tests/test_base.py::test_tokenize_base_types[1j0]",
"dask/tests/test_base.py::test_tokenize_base_types[1j1]",
"dask/tests/test_base.py::test_tokenize_base_types[x7]",
"dask/tests/test_base.py::test_tokenize_base_types[x8]",
"dask/tests/test_base.py::test_tokenize_base_types[x9]",
"dask/tests/test_base.py::test_tokenize_base_types[None]",
"dask/tests/test_base.py::test_tokenize_base_types[str]",
"dask/tests/test_base.py::test_tokenize_base_types[int]",
"dask/tests/test_base.py::test_is_dask_collection",
"dask/tests/test_base.py::test_custom_collection",
"dask/tests/test_base.py::test_compute_no_opt",
"dask/tests/test_base.py::test_compute_array",
"dask/tests/test_base.py::test_persist_array",
"dask/tests/test_base.py::test_compute_dataframe",
"dask/tests/test_base.py::test_compute_array_dataframe",
"dask/tests/test_base.py::test_compute_array_bag",
"dask/tests/test_base.py::test_compute_with_literal",
"dask/tests/test_base.py::test_compute_nested",
"dask/tests/test_base.py::test_use_cloudpickle_to_tokenize_functions_in__main__",
"dask/tests/test_base.py::test_optimizations_keyword",
"dask/tests/test_base.py::test_default_imports",
"dask/tests/test_base.py::test_persist_literals",
"dask/tests/test_base.py::test_persist_delayed",
"dask/tests/test_base.py::test_persist_array_bag",
"dask/tests/test_base.py::test_normalize_function_limited_size",
"dask/tests/test_base.py::test_optimize_globals",
"dask/tests/test_base.py::test_optimize_None"
]
| [
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[1S-count-args0-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[1S-sum-args1-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[1S-mean-args2-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[1S-median-args3-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[1S-min-args4-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[1S-max-args5-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[1S-std-args6-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[1S-var-args7-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[1S-skew-args8-True]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[1S-kurt-args9-True]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[1S-quantile-args10-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[1S-apply-args11-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[2S-count-args0-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[2S-sum-args1-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[2S-mean-args2-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[2S-median-args3-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[2S-min-args4-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[2S-max-args5-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[2S-std-args6-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[2S-var-args7-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[2S-skew-args8-True]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[2S-kurt-args9-True]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[2S-quantile-args10-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[2S-apply-args11-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[3S-count-args0-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[3S-sum-args1-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[3S-mean-args2-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[3S-median-args3-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[3S-min-args4-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[3S-max-args5-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[3S-std-args6-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[3S-var-args7-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[3S-skew-args8-True]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[3S-kurt-args9-True]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[3S-quantile-args10-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[3S-apply-args11-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[window3-count-args0-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[window3-sum-args1-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[window3-mean-args2-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[window3-median-args3-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[window3-min-args4-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[window3-max-args5-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[window3-std-args6-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[window3-var-args7-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[window3-skew-args8-True]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[window3-kurt-args9-True]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[window3-quantile-args10-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_methods[window3-apply-args11-False]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_window_too_large[window0]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling_window_too_large[window1]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling[6s-6s]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling[2s-2s]",
"dask/dataframe/tests/test_rolling.py::test_time_rolling[6s-2s]"
]
| []
| []
| BSD 3-Clause "New" or "Revised" License | 1,976 | [
"docs/source/dataframe-api.rst",
"dask/bag/core.py",
"dask/array/percentile.py",
"dask/dot.py",
"dask/dataframe/__init__.py",
"dask/dataframe/rolling.py",
"docs/source/setup/kubernetes.rst",
"dask/array/core.py",
"docs/source/spark.rst",
"dask/base.py",
"docs/source/changelog.rst",
"docs/source/inspect.rst"
]
| [
"docs/source/dataframe-api.rst",
"dask/bag/core.py",
"dask/array/percentile.py",
"dask/dot.py",
"dask/dataframe/__init__.py",
"dask/dataframe/rolling.py",
"docs/source/setup/kubernetes.rst",
"dask/array/core.py",
"docs/source/spark.rst",
"dask/base.py",
"docs/source/changelog.rst",
"docs/source/inspect.rst"
]
|
|
dask__dask-3028 | 8149c4b67291bd91859f3c7ef7286f58aa09e646 | 2017-12-21 21:41:19 | a1653463534a7dd9212f45f833aa17b7dd12e574 | TomAugspurger: Just a small comment, if you could raise for `others=str`.
Sorry about the extra commits. I fixed the merge conflict in the changelog, but the CI services didn't like something about it. | diff --git a/dask/array/core.py b/dask/array/core.py
index bfecfc992..206ac4c2d 100644
--- a/dask/array/core.py
+++ b/dask/array/core.py
@@ -18,12 +18,12 @@ import uuid
import warnings
try:
- from cytoolz import (partition, concat, join, first,
+ from cytoolz import (partition, concat, concatv, join, first,
groupby, valmap, accumulate, assoc)
from cytoolz.curried import filter, pluck
except ImportError:
- from toolz import (partition, concat, join, first,
+ from toolz import (partition, concat, concatv, join, first,
groupby, valmap, accumulate, assoc)
from toolz.curried import filter, pluck
from toolz import pipe, map, reduce
@@ -32,14 +32,15 @@ import numpy as np
from . import chunk
from .numpy_compat import _make_sliced_dtype
from .slicing import slice_array, replace_ellipsis
-from ..base import Base, tokenize, dont_optimize, compute_as_if_collection
+from ..base import (Base, tokenize, dont_optimize, compute_as_if_collection,
+ persist)
from ..context import _globals, globalmethod
from ..utils import (homogeneous_deepmap, ndeepmap, ignoring, concrete,
is_integer, IndexCallable, funcname, derived_from,
SerializableLock, ensure_dict, Dispatch)
from ..compatibility import unicode, long, getargspec, zip_longest, apply
from ..core import quote
-from ..delayed import to_task_dask
+from ..delayed import Delayed, to_task_dask
from .. import threaded, core
from .. import sharedict
from ..sharedict import ShareDict
@@ -815,7 +816,8 @@ def broadcast_chunks(*chunkss):
return tuple(result)
-def store(sources, targets, lock=True, regions=None, compute=True, **kwargs):
+def store(sources, targets, lock=True, regions=None, compute=True,
+ return_stored=False, **kwargs):
""" Store dask arrays in array-like objects, overwrite data in target
This stores dask arrays into object that supports numpy-style setitem
@@ -842,6 +844,8 @@ def store(sources, targets, lock=True, regions=None, compute=True, **kwargs):
for the corresponding source and target in sources and targets, respectively.
compute: boolean, optional
If true compute immediately, return ``dask.delayed.Delayed`` otherwise
+ return_stored: boolean, optional
+ Optionally return the stored result (default False).
Examples
--------
@@ -859,6 +863,7 @@ def store(sources, targets, lock=True, regions=None, compute=True, **kwargs):
>>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP
"""
+
if isinstance(sources, Array):
sources = [sources]
targets = [targets]
@@ -880,31 +885,73 @@ def store(sources, targets, lock=True, regions=None, compute=True, **kwargs):
raise ValueError("Different number of sources [%d] and targets [%d] than regions [%d]"
% (len(sources), len(targets), len(regions)))
- updates = {}
- keys = []
+ # Optimize all sources together
+ sources_dsk = sharedict.merge(*[e.__dask_graph__() for e in sources])
+ sources_dsk = Array.__dask_optimize__(
+ sources_dsk,
+ [e.__dask_keys__() for e in sources]
+ )
+
+ tgt_dsks = []
+ store_keys = []
+ store_dsks = []
+ if return_stored:
+ load_names = []
+ load_dsks = []
for tgt, src, reg in zip(targets, sources, regions):
# if out is a delayed object update dictionary accordingly
try:
- dsk = {}
- dsk.update(tgt.dask)
+ each_tgt_dsk = {}
+ each_tgt_dsk.update(tgt.dask)
tgt = tgt.key
except AttributeError:
- dsk = {}
+ each_tgt_dsk = {}
+
+ src = Array(sources_dsk, src.name, src.chunks, src.dtype)
+
+ each_store_dsk = insert_to_ooc(
+ src, tgt, lock=lock, region=reg, return_stored=return_stored
+ )
+
+ if return_stored:
+ load_names.append('load-store-%s' % src.name)
+ load_dsks.append(retrieve_from_ooc(
+ each_store_dsk.keys(),
+ each_store_dsk
+ ))
+
+ tgt_dsks.append(each_tgt_dsk)
+
+ store_keys.extend(each_store_dsk.keys())
+ store_dsks.append(each_store_dsk)
- update = insert_to_ooc(src, tgt, lock=lock, region=reg)
- keys.extend(update)
+ store_dsks_mrg = sharedict.merge(*concatv(
+ store_dsks, tgt_dsks, [sources_dsk]
+ ))
- update.update(dsk)
- updates.update(update)
+ if return_stored:
+ if compute:
+ store_dlyds = [Delayed(k, store_dsks_mrg) for k in store_keys]
+ store_dlyds = persist(*store_dlyds)
+ store_dsks_mrg = sharedict.merge(*[e.dask for e in store_dlyds])
- name = 'store-' + tokenize(*keys)
- dsk = sharedict.merge((name, updates), *[src.dask for src in sources])
- if compute:
- compute_as_if_collection(Array, dsk, keys, **kwargs)
+ load_dsks_mrg = sharedict.merge(store_dsks_mrg, *load_dsks)
+
+ result = tuple(
+ Array(load_dsks_mrg, n, src.chunks, src.dtype) for n in load_names
+ )
+
+ return result
else:
- from ..delayed import Delayed
- dsk.update({name: keys})
- return Delayed(name, dsk)
+ name = 'store-' + tokenize(*store_keys)
+ dsk = sharedict.merge({name: store_keys}, store_dsks_mrg)
+ result = Delayed(name, dsk)
+
+ if compute:
+ result.compute()
+ return None
+ else:
+ return result
def blockdims_from_blockshape(shape, chunks):
@@ -2157,9 +2204,7 @@ def atop(func, out_ind, *args, **kwargs):
concatenate : bool, keyword only
If true concatenate arrays along dummy indices, else provide lists
adjust_chunks : dict
- Dictionary mapping index to information to adjust chunk sizes. Can
- either be a constant chunksize, a tuple of all chunksizes, or a
- function that converts old chunksize to new chunksize
+ Dictionary mapping index to function to be applied to chunk sizes
new_axes : dict, keyword only
New indexes and their dimension lengths
@@ -2568,7 +2613,7 @@ def concatenate(seq, axis=0, allow_unknown_chunksizes=False):
return Array(dsk2, name, chunks, dtype=dt)
-def store_chunk(x, out, index, lock, region):
+def store_chunk(x, out, index, lock, region, return_stored):
"""
A function inserted in a Dask graph for storing a chunk.
@@ -2584,15 +2629,21 @@ def store_chunk(x, out, index, lock, region):
Lock to use before writing to ``out``.
region: slice-like or None
Where relative to ``out`` to store ``x``.
+ return_stored: bool
+ Whether to return ``out``.
Examples
--------
>>> a = np.ones((5, 6))
>>> b = np.empty(a.shape)
- >>> store_chunk(a, b, (slice(None), slice(None)), False, None)
+ >>> store_chunk(a, b, (slice(None), slice(None)), False, None, False)
"""
+ result = None
+ if return_stored:
+ result = out
+
subindex = index
if region is not None:
subindex = fuse_slice(region, index)
@@ -2605,10 +2656,10 @@ def store_chunk(x, out, index, lock, region):
if lock:
lock.release()
- return None
+ return result
-def insert_to_ooc(arr, out, lock=True, region=None):
+def insert_to_ooc(arr, out, lock=True, region=None, return_stored=False):
"""
Creates a Dask graph for storing chunks from ``arr`` in ``out``.
@@ -2624,6 +2675,9 @@ def insert_to_ooc(arr, out, lock=True, region=None):
region: slice-like, optional
Where in ``out`` to store ``arr``'s results
(default is ``None``, meaning all of ``out``).
+ return_stored: bool, optional
+ Whether to return ``out``
+ (default is ``False``, meaning ``None`` is returned).
Examples
--------
@@ -2642,13 +2696,79 @@ def insert_to_ooc(arr, out, lock=True, region=None):
dsk = dict()
for t, slc in zip(core.flatten(arr.__dask_keys__()), slices):
store_key = (name,) + t[1:]
- dsk[store_key] = (
- store_chunk, t, out, slc, lock, region
- )
+ dsk[store_key] = (store_chunk, t, out, slc, lock, region, return_stored)
return dsk
+def load_chunk(x, index, lock, region):
+ """
+ A function inserted in a Dask graph for loading a chunk.
+
+ Parameters
+ ----------
+ x: array-like
+ An array (potentially a NumPy one)
+ index: slice-like
+ Where to store result from ``x`` in ``out``.
+ lock: Lock-like or False
+ Lock to use before writing to ``out``.
+ region: slice-like or None
+ Where relative to ``out`` to store ``x``.
+
+ Examples
+ --------
+
+ >>> a = np.ones((5, 6))
+ >>> load_chunk(a, (slice(None), slice(None)), False, None) # doctest: +SKIP
+ """
+
+ result = None
+
+ subindex = index
+ if region is not None:
+ subindex = fuse_slice(region, index)
+
+ if lock:
+ lock.acquire()
+ try:
+ result = x[subindex]
+ finally:
+ if lock:
+ lock.release()
+
+ return result
+
+
+def retrieve_from_ooc(keys, dsk):
+ """
+ Creates a Dask graph for loading stored ``keys`` from ``dsk``.
+
+ Parameters
+ ----------
+ keys: Sequence
+ A sequence containing Dask graph keys to load
+ dsk: Mapping
+ A Dask graph corresponding to a Dask Array
+
+ Examples
+ --------
+ >>> import dask.array as da
+ >>> d = da.ones((5, 6), chunks=(2, 3))
+ >>> a = np.empty(d.shape)
+ >>> g = insert_to_ooc(d, a)
+ >>> retrieve_from_ooc(g.keys(), g) # doctest: +SKIP
+ """
+
+ load_dsk = dict()
+ for each_key in keys:
+ load_key = ('load-%s' % each_key[0],) + each_key[1:]
+ # Reuse the result and arguments from `store_chunk` in `load_chunk`.
+ load_dsk[load_key] = (load_chunk, each_key,) + dsk[each_key][3:-1]
+
+ return load_dsk
+
+
def asarray(a):
"""Convert the input to a dask array.
diff --git a/dask/dataframe/accessor.py b/dask/dataframe/accessor.py
index 10ab0f3bc..5462f9a6e 100644
--- a/dask/dataframe/accessor.py
+++ b/dask/dataframe/accessor.py
@@ -119,6 +119,21 @@ class StringAccessor(Accessor):
def split(self, pat=None, n=-1):
return self._function_map('split', pat=pat, n=n)
+ @derived_from(pd.core.strings.StringMethods)
+ def cat(self, others=None, sep=None, na_rep=None):
+ from .core import Series, Index
+ if others is None:
+ raise NotImplementedError("x.str.cat() with `others == None`")
+
+ valid_types = (Series, Index, pd.Series, pd.Index)
+ if isinstance(others, valid_types):
+ others = [others]
+ elif not all(isinstance(a, valid_types) for a in others):
+ raise TypeError("others must be Series/Index")
+
+ return self._series.map_partitions(str_cat, *others, sep=sep,
+ na_rep=na_rep, meta=self._series._meta)
+
def __getitem__(self, index):
return self._series.map_partitions(str_get, index,
meta=self._series._meta)
@@ -127,3 +142,7 @@ class StringAccessor(Accessor):
def str_get(series, index):
""" Implements series.str[index] """
return series.str[index]
+
+
+def str_cat(self, *others, **kwargs):
+ return self.str.cat(others=others, **kwargs)
diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst
index 247d55663..349aabf7f 100644
--- a/docs/source/changelog.rst
+++ b/docs/source/changelog.rst
@@ -35,7 +35,8 @@ DataFrame
- Fixed ``dd.concat`` losing the index dtype when the data contained a categorical (:issue:`2932`) `Tom Augspurger`_
- ``DataFrame.merge()`` (:pr:`2960`) now supports merging on a combination of columns and the index `Jon Mease`_
- Removed the deprecated ``dd.rolling*`` methods, in preperation for their removal in the next pandas release (:pr:`2995`) `Tom Augspurger`_
-- Fix metadata inference bug in which single-partition series were mistakenly special cased (:pr:`3035`) `Jim Crist`
+- Fix metadata inference bug in which single-partition series were mistakenly special cased (:pr:`3035`) `Jim Crist`_
+- Add support for ``Series.str.cat`` (:pr:`3028`) `Jim Crist`_
Core
| Implement .str.cat
This should be doable:
```python
a = dd.from_pandas(pd.Series(["a"] * 100), 2)
b = dd.from_pandas(pd.Series(['b'] * 100), 2)
a.str.cat(b, sep=":") # NotImplementedError
```
Some issues around alignment probably. | dask/dask | diff --git a/dask/array/tests/test_array_core.py b/dask/array/tests/test_array_core.py
index 928206559..0eececc3c 100644
--- a/dask/array/tests/test_array_core.py
+++ b/dask/array/tests/test_array_core.py
@@ -19,7 +19,7 @@ from toolz.curried import identity
import dask
import dask.array as da
from dask.base import tokenize, compute_as_if_collection
-from dask.delayed import delayed
+from dask.delayed import Delayed, delayed
from dask.local import get_sync
from dask.utils import ignoring, tmpfile, tmpdir
from dask.utils_test import inc
@@ -1213,14 +1213,31 @@ def test_store_delayed_target():
atd = delayed(make_target)('at')
btd = delayed(make_target)('bt')
- store([a, b], [atd, btd])
+ # test not keeping result
+ st = store([a, b], [atd, btd])
at = targs['at']
bt = targs['bt']
+ assert st is None
assert_eq(at, a)
assert_eq(bt, b)
+ # test keeping result
+ st = store([a, b], [atd, btd], return_stored=True, compute=False)
+ st = dask.compute(*st)
+
+ at = targs['at']
+ bt = targs['bt']
+
+ assert st is not None
+ assert isinstance(st, tuple)
+ assert all([isinstance(v, np.ndarray) for v in st])
+ assert_eq(at, a)
+ assert_eq(bt, b)
+ assert_eq(st[0], a)
+ assert_eq(st[1], b)
+
pytest.raises(ValueError, lambda: store([a], [at, bt]))
pytest.raises(ValueError, lambda: store(at, at))
pytest.raises(ValueError, lambda: store([at, bt], [at, bt]))
@@ -1233,7 +1250,8 @@ def test_store():
at = np.empty(shape=(4, 4))
bt = np.empty(shape=(4, 4))
- store([a, b], [at, bt])
+ st = store([a, b], [at, bt])
+ assert st is None
assert (at == 2).all()
assert (bt == 3).all()
@@ -1252,22 +1270,77 @@ def test_store_regions():
at = np.zeros(shape=(8, 4, 6))
bt = np.zeros(shape=(8, 4, 6))
v = store([a, b], [at, bt], regions=region, compute=False)
+ assert isinstance(v, Delayed)
+ assert (at == 0).all() and (bt[region] == 0).all()
+ assert all([ev is None for ev in v.compute()])
+ assert (at[region] == 2).all() and (bt[region] == 3).all()
+ assert not (bt == 3).all() and not ( bt == 0 ).all()
+ assert not (at == 2).all() and not ( at == 0 ).all()
+
+ # Single region (keep result):
+ at = np.zeros(shape=(8, 4, 6))
+ bt = np.zeros(shape=(8, 4, 6))
+ v = store(
+ [a, b], [at, bt], regions=region, compute=False, return_stored=True
+ )
+ assert isinstance(v, tuple)
+ assert all([isinstance(e, da.Array) for e in v])
assert (at == 0).all() and (bt[region] == 0).all()
- v.compute()
+
+ ar, br = v
+ assert ar.dtype == a.dtype
+ assert br.dtype == b.dtype
+ assert ar.shape == a.shape
+ assert br.shape == b.shape
+ assert ar.chunks == a.chunks
+ assert br.chunks == b.chunks
+
+ ar, br = da.compute(ar, br)
assert (at[region] == 2).all() and (bt[region] == 3).all()
assert not (bt == 3).all() and not ( bt == 0 ).all()
assert not (at == 2).all() and not ( at == 0 ).all()
+ assert (br == 3).all()
+ assert (ar == 2).all()
# Multiple regions:
at = np.zeros(shape=(8, 4, 6))
bt = np.zeros(shape=(8, 4, 6))
v = store([a, b], [at, bt], regions=[region, region], compute=False)
+ assert isinstance(v, Delayed)
assert (at == 0).all() and (bt[region] == 0).all()
- v.compute()
+ assert all([ev is None for ev in v.compute()])
assert (at[region] == 2).all() and (bt[region] == 3).all()
assert not (bt == 3).all() and not ( bt == 0 ).all()
assert not (at == 2).all() and not ( at == 0 ).all()
+ # Multiple regions (keep result):
+ at = np.zeros(shape=(8, 4, 6))
+ bt = np.zeros(shape=(8, 4, 6))
+ v = store(
+ [a, b], [at, bt],
+ regions=[region, region],
+ compute=False,
+ return_stored=True
+ )
+ assert isinstance(v, tuple)
+ assert all([isinstance(e, da.Array) for e in v])
+ assert (at == 0).all() and (bt[region] == 0).all()
+
+ ar, br = v
+ assert ar.dtype == a.dtype
+ assert br.dtype == b.dtype
+ assert ar.shape == a.shape
+ assert br.shape == b.shape
+ assert ar.chunks == a.chunks
+ assert br.chunks == b.chunks
+
+ ar, br = da.compute(ar, br)
+ assert (at[region] == 2).all() and (bt[region] == 3).all()
+ assert not (bt == 3).all() and not ( bt == 0 ).all()
+ assert not (at == 2).all() and not ( at == 0 ).all()
+ assert (br == 3).all()
+ assert (ar == 2).all()
+
def test_store_compute_false():
d = da.ones((4, 4), chunks=(2, 2))
@@ -1277,8 +1350,18 @@ def test_store_compute_false():
bt = np.zeros(shape=(4, 4))
v = store([a, b], [at, bt], compute=False)
+ assert isinstance(v, Delayed)
+ assert (at == 0).all() and (bt == 0).all()
+ assert all([ev is None for ev in v.compute()])
+ assert (at == 2).all() and (bt == 3).all()
+
+ at = np.zeros(shape=(4, 4))
+ bt = np.zeros(shape=(4, 4))
+
+ dat, dbt = store([a, b], [at, bt], compute=False, return_stored=True)
+ assert isinstance(dat, Array) and isinstance(dbt, Array)
assert (at == 0).all() and (bt == 0).all()
- v.compute()
+ assert (dat.compute() == at).all() and (dbt.compute() == bt).all()
assert (at == 2).all() and (bt == 3).all()
@@ -1320,6 +1403,7 @@ def test_store_locks():
lock = Lock()
v = store([a, b], [at, bt], compute=False, lock=lock)
+ assert isinstance(v, Delayed)
dsk = v.dask
locks = set(vv for v in dsk.values() for vv in v if isinstance(vv, _Lock))
assert locks == set([lock])
@@ -1328,16 +1412,18 @@ def test_store_locks():
at = NonthreadSafeStore()
v = store([a, b], [at, at], lock=lock,
get=dask.threaded.get, num_workers=10)
+ assert v is None
# Don't assume thread safety by default
at = NonthreadSafeStore()
- store(a, at, get=dask.threaded.get, num_workers=10)
- a.store(at, get=dask.threaded.get, num_workers=10)
+ assert store(a, at, get=dask.threaded.get, num_workers=10) is None
+ assert a.store(at, get=dask.threaded.get, num_workers=10) is None
# Ensure locks can be removed
at = ThreadSafeStore()
for i in range(10):
- a.store(at, lock=False, get=dask.threaded.get, num_workers=10)
+ st = a.store(at, lock=False, get=dask.threaded.get, num_workers=10)
+ assert st is None
if at.max_concurrent_uses > 1:
break
if i == 9:
@@ -1350,7 +1436,8 @@ def test_store_multiprocessing_lock():
a = d + 1
at = np.zeros(shape=(10, 10))
- a.store(at, get=dask.multiprocessing.get, num_workers=10)
+ st = a.store(at, get=dask.multiprocessing.get, num_workers=10)
+ assert st is None
def test_to_hdf5():
diff --git a/dask/dataframe/tests/test_dataframe.py b/dask/dataframe/tests/test_dataframe.py
index 1472dd9fc..f01784f16 100644
--- a/dask/dataframe/tests/test_dataframe.py
+++ b/dask/dataframe/tests/test_dataframe.py
@@ -1378,44 +1378,61 @@ def test_datetime_accessor():
def test_str_accessor():
df = pd.DataFrame({'x': ['abc', 'bcd', 'cdef', 'DEFG'], 'y': [1, 2, 3, 4]},
- index=['e', 'f', 'g', 'H'])
+ index=['E', 'f', 'g', 'h'])
- a = dd.from_pandas(df, 2, sort=False)
+ ddf = dd.from_pandas(df, 2)
# Check that str not in dir/hasattr for non-object columns
- assert 'str' not in dir(a.y)
- assert not hasattr(a.y, 'str')
+ assert 'str' not in dir(ddf.y)
+ assert not hasattr(ddf.y, 'str')
# not implemented methods don't show up
- assert 'get_dummies' not in dir(a.x.str)
- assert not hasattr(a.x.str, 'get_dummies')
+ assert 'get_dummies' not in dir(ddf.x.str)
+ assert not hasattr(ddf.x.str, 'get_dummies')
- assert 'upper' in dir(a.x.str)
- assert_eq(a.x.str.upper(), df.x.str.upper())
- assert set(a.x.str.upper().dask) == set(a.x.str.upper().dask)
+ assert 'upper' in dir(ddf.x.str)
+ assert_eq(ddf.x.str.upper(), df.x.str.upper())
+ assert set(ddf.x.str.upper().dask) == set(ddf.x.str.upper().dask)
- assert 'upper' in dir(a.index.str)
- assert_eq(a.index.str.upper(), df.index.str.upper())
- assert set(a.index.str.upper().dask) == set(a.index.str.upper().dask)
+ assert 'upper' in dir(ddf.index.str)
+ assert_eq(ddf.index.str.upper(), df.index.str.upper())
+ assert set(ddf.index.str.upper().dask) == set(ddf.index.str.upper().dask)
# make sure to pass thru args & kwargs
- assert 'contains' in dir(a.x.str)
- assert_eq(a.x.str.contains('a'), df.x.str.contains('a'))
- assert set(a.x.str.contains('a').dask) == set(a.x.str.contains('a').dask)
+ assert 'contains' in dir(ddf.x.str)
+ assert_eq(ddf.x.str.contains('a'), df.x.str.contains('a'))
+ assert set(ddf.x.str.contains('a').dask) == set(ddf.x.str.contains('a').dask)
- assert_eq(a.x.str.contains('d', case=False), df.x.str.contains('d', case=False))
- assert set(a.x.str.contains('d', case=False).dask) == set(a.x.str.contains('d', case=False).dask)
+ assert_eq(ddf.x.str.contains('d', case=False), df.x.str.contains('d', case=False))
+ assert (set(ddf.x.str.contains('d', case=False).dask) ==
+ set(ddf.x.str.contains('d', case=False).dask))
for na in [True, False]:
- assert_eq(a.x.str.contains('a', na=na), df.x.str.contains('a', na=na))
- assert set(a.x.str.contains('a', na=na).dask) == set(a.x.str.contains('a', na=na).dask)
+ assert_eq(ddf.x.str.contains('a', na=na), df.x.str.contains('a', na=na))
+ assert (set(ddf.x.str.contains('a', na=na).dask) ==
+ set(ddf.x.str.contains('a', na=na).dask))
for regex in [True, False]:
- assert_eq(a.x.str.contains('a', regex=regex), df.x.str.contains('a', regex=regex))
- assert set(a.x.str.contains('a', regex=regex).dask) == set(a.x.str.contains('a', regex=regex).dask)
+ assert_eq(ddf.x.str.contains('a', regex=regex), df.x.str.contains('a', regex=regex))
+ assert (set(ddf.x.str.contains('a', regex=regex).dask) ==
+ set(ddf.x.str.contains('a', regex=regex).dask))
+
+ assert_eq(ddf.x.str[:2], df.x.str[:2])
+ assert_eq(ddf.x.str[1], df.x.str[1])
+
+ # str.cat
+ sol = df.x.str.cat(df.x.str.upper(), sep=':')
+ assert_eq(ddf.x.str.cat(ddf.x.str.upper(), sep=':'), sol)
+ assert_eq(ddf.x.str.cat(df.x.str.upper(), sep=':'), sol)
+ assert_eq(ddf.x.str.cat([ddf.x.str.upper(), df.x.str.lower()], sep=':'),
+ df.x.str.cat([df.x.str.upper(), df.x.str.lower()], sep=':'))
- assert_eq(df.x.str[:2], df.x.str[:2])
- assert_eq(a.x.str[1], a.x.str[1])
+ for o in ['foo', ['foo']]:
+ with pytest.raises(TypeError):
+ ddf.x.str.cat(o)
+
+ with pytest.raises(NotImplementedError):
+ ddf.x.str.cat(sep=':')
def test_empty_max():
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 3
} | 1.20 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[complete]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
click==8.0.4
cloudpickle==2.2.1
coverage==6.2
-e git+https://github.com/dask/dask.git@8149c4b67291bd91859f3c7ef7286f58aa09e646#egg=dask
distributed==1.20.2
execnet==1.9.0
HeapDict==1.0.1
importlib-metadata==4.8.3
iniconfig==1.1.1
locket==1.0.0
msgpack-python==0.5.6
numpy==1.19.5
packaging==21.3
pandas==1.1.5
partd==1.2.0
pluggy==1.0.0
psutil==7.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2025.2
six==1.17.0
sortedcontainers==2.4.0
tblib==1.7.0
tomli==1.2.3
toolz==0.12.0
tornado==6.1
typing_extensions==4.1.1
zict==2.1.0
zipp==3.6.0
| name: dask
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- click==8.0.4
- cloudpickle==2.2.1
- coverage==6.2
- distributed==1.20.2
- execnet==1.9.0
- heapdict==1.0.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- locket==1.0.0
- msgpack-python==0.5.6
- numpy==1.19.5
- packaging==21.3
- pandas==1.1.5
- partd==1.2.0
- pluggy==1.0.0
- psutil==7.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- six==1.17.0
- sortedcontainers==2.4.0
- tblib==1.7.0
- tomli==1.2.3
- toolz==0.12.0
- tornado==6.1
- typing-extensions==4.1.1
- zict==2.1.0
- zipp==3.6.0
prefix: /opt/conda/envs/dask
| [
"dask/array/tests/test_array_core.py::test_store_delayed_target",
"dask/array/tests/test_array_core.py::test_store_regions",
"dask/array/tests/test_array_core.py::test_store_compute_false",
"dask/dataframe/tests/test_dataframe.py::test_str_accessor"
]
| [
"dask/array/tests/test_array_core.py::test_matmul",
"dask/dataframe/tests/test_dataframe.py::test_Dataframe",
"dask/dataframe/tests/test_dataframe.py::test_attributes",
"dask/dataframe/tests/test_dataframe.py::test_timezone_freq[npartitions1]",
"dask/dataframe/tests/test_dataframe.py::test_clip[2-5]",
"dask/dataframe/tests/test_dataframe.py::test_clip[2.5-3.5]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_picklable",
"dask/dataframe/tests/test_dataframe.py::test_repartition_freq_divisions",
"dask/dataframe/tests/test_dataframe.py::test_select_dtypes[include0-None]",
"dask/dataframe/tests/test_dataframe.py::test_select_dtypes[None-exclude1]",
"dask/dataframe/tests/test_dataframe.py::test_select_dtypes[include2-exclude2]",
"dask/dataframe/tests/test_dataframe.py::test_select_dtypes[include3-None]",
"dask/dataframe/tests/test_dataframe.py::test_to_timestamp",
"dask/dataframe/tests/test_dataframe.py::test_apply",
"dask/dataframe/tests/test_dataframe.py::test_cov_corr_mixed",
"dask/dataframe/tests/test_dataframe.py::test_apply_infer_columns",
"dask/dataframe/tests/test_dataframe.py::test_info",
"dask/dataframe/tests/test_dataframe.py::test_groupby_multilevel_info",
"dask/dataframe/tests/test_dataframe.py::test_categorize_info",
"dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx2-True]",
"dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx2-False]",
"dask/dataframe/tests/test_dataframe.py::test_shift",
"dask/dataframe/tests/test_dataframe.py::test_shift_with_freq",
"dask/dataframe/tests/test_dataframe.py::test_first_and_last[first]",
"dask/dataframe/tests/test_dataframe.py::test_first_and_last[last]",
"dask/dataframe/tests/test_dataframe.py::test_datetime_loc_open_slicing"
]
| [
"dask/array/tests/test_array_core.py::test_getem",
"dask/array/tests/test_array_core.py::test_top",
"dask/array/tests/test_array_core.py::test_top_supports_broadcasting_rules",
"dask/array/tests/test_array_core.py::test_top_literals",
"dask/array/tests/test_array_core.py::test_atop_literals",
"dask/array/tests/test_array_core.py::test_concatenate3_on_scalars",
"dask/array/tests/test_array_core.py::test_chunked_dot_product",
"dask/array/tests/test_array_core.py::test_chunked_transpose_plus_one",
"dask/array/tests/test_array_core.py::test_broadcast_dimensions_works_with_singleton_dimensions",
"dask/array/tests/test_array_core.py::test_broadcast_dimensions",
"dask/array/tests/test_array_core.py::test_Array",
"dask/array/tests/test_array_core.py::test_uneven_chunks",
"dask/array/tests/test_array_core.py::test_numblocks_suppoorts_singleton_block_dims",
"dask/array/tests/test_array_core.py::test_keys",
"dask/array/tests/test_array_core.py::test_Array_computation",
"dask/array/tests/test_array_core.py::test_stack",
"dask/array/tests/test_array_core.py::test_short_stack",
"dask/array/tests/test_array_core.py::test_stack_scalars",
"dask/array/tests/test_array_core.py::test_stack_promote_type",
"dask/array/tests/test_array_core.py::test_stack_rechunk",
"dask/array/tests/test_array_core.py::test_concatenate",
"dask/array/tests/test_array_core.py::test_concatenate_unknown_axes",
"dask/array/tests/test_array_core.py::test_concatenate_rechunk",
"dask/array/tests/test_array_core.py::test_concatenate_fixlen_strings",
"dask/array/tests/test_array_core.py::test_block_simple_row_wise",
"dask/array/tests/test_array_core.py::test_block_simple_column_wise",
"dask/array/tests/test_array_core.py::test_block_with_1d_arrays_row_wise",
"dask/array/tests/test_array_core.py::test_block_with_1d_arrays_multiple_rows",
"dask/array/tests/test_array_core.py::test_block_with_1d_arrays_column_wise",
"dask/array/tests/test_array_core.py::test_block_mixed_1d_and_2d",
"dask/array/tests/test_array_core.py::test_block_complicated",
"dask/array/tests/test_array_core.py::test_block_nested",
"dask/array/tests/test_array_core.py::test_block_3d",
"dask/array/tests/test_array_core.py::test_block_with_mismatched_shape",
"dask/array/tests/test_array_core.py::test_block_no_lists",
"dask/array/tests/test_array_core.py::test_block_invalid_nesting",
"dask/array/tests/test_array_core.py::test_block_empty_lists",
"dask/array/tests/test_array_core.py::test_block_tuple",
"dask/array/tests/test_array_core.py::test_binops",
"dask/array/tests/test_array_core.py::test_broadcast_shapes",
"dask/array/tests/test_array_core.py::test_elemwise_on_scalars",
"dask/array/tests/test_array_core.py::test_elemwise_with_ndarrays",
"dask/array/tests/test_array_core.py::test_elemwise_differently_chunked",
"dask/array/tests/test_array_core.py::test_elemwise_dtype",
"dask/array/tests/test_array_core.py::test_operators",
"dask/array/tests/test_array_core.py::test_operator_dtype_promotion",
"dask/array/tests/test_array_core.py::test_field_access",
"dask/array/tests/test_array_core.py::test_field_access_with_shape",
"dask/array/tests/test_array_core.py::test_T",
"dask/array/tests/test_array_core.py::test_norm",
"dask/array/tests/test_array_core.py::test_broadcast_to",
"dask/array/tests/test_array_core.py::test_broadcast_to_array",
"dask/array/tests/test_array_core.py::test_broadcast_to_scalar",
"dask/array/tests/test_array_core.py::test_broadcast_to_chunks",
"dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape0-v_shape0]",
"dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape1-v_shape1]",
"dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape2-v_shape2]",
"dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape3-v_shape3]",
"dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape4-v_shape4]",
"dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape5-v_shape5]",
"dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape6-v_shape6]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape0-new_shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape1-new_shape1-5]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape2-new_shape2-5]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape3-new_shape3-12]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape4-new_shape4-12]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape5-new_shape5-chunks5]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape6-new_shape6-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape7-new_shape7-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape8-new_shape8-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape9-new_shape9-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape10-new_shape10-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape11-new_shape11-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape12-new_shape12-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape13-new_shape13-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape14-new_shape14-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape15-new_shape15-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape16-new_shape16-chunks16]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape17-new_shape17-3]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape18-new_shape18-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape19-new_shape19-chunks19]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape20-new_shape20-1]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape21-new_shape21-1]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape22-new_shape22-24]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape23-new_shape23-6]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape24-new_shape24-6]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape25-new_shape25-6]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape26-new_shape26-chunks26]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape27-new_shape27-chunks27]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape28-new_shape28-chunks28]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape29-new_shape29-chunks29]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape30-new_shape30-chunks30]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape31-new_shape31-chunks31]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape32-new_shape32-chunks32]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape33-new_shape33-chunks33]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape34-new_shape34-chunks34]",
"dask/array/tests/test_array_core.py::test_reshape_exceptions",
"dask/array/tests/test_array_core.py::test_reshape_splat",
"dask/array/tests/test_array_core.py::test_reshape_fails_for_dask_only",
"dask/array/tests/test_array_core.py::test_reshape_unknown_dimensions",
"dask/array/tests/test_array_core.py::test_full",
"dask/array/tests/test_array_core.py::test_map_blocks",
"dask/array/tests/test_array_core.py::test_map_blocks2",
"dask/array/tests/test_array_core.py::test_map_blocks_with_constants",
"dask/array/tests/test_array_core.py::test_map_blocks_with_kwargs",
"dask/array/tests/test_array_core.py::test_map_blocks_with_chunks",
"dask/array/tests/test_array_core.py::test_map_blocks_dtype_inference",
"dask/array/tests/test_array_core.py::test_from_function_requires_block_args",
"dask/array/tests/test_array_core.py::test_repr",
"dask/array/tests/test_array_core.py::test_slicing_with_ellipsis",
"dask/array/tests/test_array_core.py::test_slicing_with_ndarray",
"dask/array/tests/test_array_core.py::test_dtype",
"dask/array/tests/test_array_core.py::test_blockdims_from_blockshape",
"dask/array/tests/test_array_core.py::test_coerce",
"dask/array/tests/test_array_core.py::test_bool",
"dask/array/tests/test_array_core.py::test_store",
"dask/array/tests/test_array_core.py::test_store_locks",
"dask/array/tests/test_array_core.py::test_to_dask_dataframe",
"dask/array/tests/test_array_core.py::test_np_array_with_zero_dimensions",
"dask/array/tests/test_array_core.py::test_dtype_complex",
"dask/array/tests/test_array_core.py::test_astype",
"dask/array/tests/test_array_core.py::test_arithmetic",
"dask/array/tests/test_array_core.py::test_elemwise_consistent_names",
"dask/array/tests/test_array_core.py::test_optimize",
"dask/array/tests/test_array_core.py::test_slicing_with_non_ndarrays",
"dask/array/tests/test_array_core.py::test_getter",
"dask/array/tests/test_array_core.py::test_size",
"dask/array/tests/test_array_core.py::test_nbytes",
"dask/array/tests/test_array_core.py::test_itemsize",
"dask/array/tests/test_array_core.py::test_Array_normalizes_dtype",
"dask/array/tests/test_array_core.py::test_from_array_with_lock",
"dask/array/tests/test_array_core.py::test_from_array_tasks_always_call_getter",
"dask/array/tests/test_array_core.py::test_from_array_no_asarray",
"dask/array/tests/test_array_core.py::test_from_array_getitem",
"dask/array/tests/test_array_core.py::test_from_array_minus_one",
"dask/array/tests/test_array_core.py::test_asarray",
"dask/array/tests/test_array_core.py::test_asanyarray",
"dask/array/tests/test_array_core.py::test_from_func",
"dask/array/tests/test_array_core.py::test_concatenate3_2",
"dask/array/tests/test_array_core.py::test_map_blocks3",
"dask/array/tests/test_array_core.py::test_from_array_with_missing_chunks",
"dask/array/tests/test_array_core.py::test_normalize_chunks",
"dask/array/tests/test_array_core.py::test_raise_on_no_chunks",
"dask/array/tests/test_array_core.py::test_chunks_is_immutable",
"dask/array/tests/test_array_core.py::test_raise_on_bad_kwargs",
"dask/array/tests/test_array_core.py::test_long_slice",
"dask/array/tests/test_array_core.py::test_ellipsis_slicing",
"dask/array/tests/test_array_core.py::test_point_slicing",
"dask/array/tests/test_array_core.py::test_point_slicing_with_full_slice",
"dask/array/tests/test_array_core.py::test_slice_with_floats",
"dask/array/tests/test_array_core.py::test_slice_with_integer_types",
"dask/array/tests/test_array_core.py::test_index_with_integer_types",
"dask/array/tests/test_array_core.py::test_vindex_basic",
"dask/array/tests/test_array_core.py::test_vindex_nd",
"dask/array/tests/test_array_core.py::test_vindex_negative",
"dask/array/tests/test_array_core.py::test_vindex_errors",
"dask/array/tests/test_array_core.py::test_vindex_merge",
"dask/array/tests/test_array_core.py::test_empty_array",
"dask/array/tests/test_array_core.py::test_memmap",
"dask/array/tests/test_array_core.py::test_to_npy_stack",
"dask/array/tests/test_array_core.py::test_view",
"dask/array/tests/test_array_core.py::test_view_fortran",
"dask/array/tests/test_array_core.py::test_map_blocks_with_changed_dimension",
"dask/array/tests/test_array_core.py::test_broadcast_chunks",
"dask/array/tests/test_array_core.py::test_chunks_error",
"dask/array/tests/test_array_core.py::test_array_compute_forward_kwargs",
"dask/array/tests/test_array_core.py::test_dont_fuse_outputs",
"dask/array/tests/test_array_core.py::test_dont_dealias_outputs",
"dask/array/tests/test_array_core.py::test_timedelta_op",
"dask/array/tests/test_array_core.py::test_to_delayed",
"dask/array/tests/test_array_core.py::test_to_delayed_optimizes",
"dask/array/tests/test_array_core.py::test_cumulative",
"dask/array/tests/test_array_core.py::test_atop_names",
"dask/array/tests/test_array_core.py::test_atop_new_axes",
"dask/array/tests/test_array_core.py::test_atop_kwargs",
"dask/array/tests/test_array_core.py::test_atop_chunks",
"dask/array/tests/test_array_core.py::test_from_delayed",
"dask/array/tests/test_array_core.py::test_A_property",
"dask/array/tests/test_array_core.py::test_copy_mutate",
"dask/array/tests/test_array_core.py::test_npartitions",
"dask/array/tests/test_array_core.py::test_astype_gh1151",
"dask/array/tests/test_array_core.py::test_elemwise_name",
"dask/array/tests/test_array_core.py::test_map_blocks_name",
"dask/array/tests/test_array_core.py::test_array_picklable",
"dask/array/tests/test_array_core.py::test_from_array_raises_on_bad_chunks",
"dask/array/tests/test_array_core.py::test_concatenate_axes",
"dask/array/tests/test_array_core.py::test_atop_concatenate",
"dask/array/tests/test_array_core.py::test_common_blockdim",
"dask/array/tests/test_array_core.py::test_uneven_chunks_that_fit_neatly",
"dask/array/tests/test_array_core.py::test_elemwise_uneven_chunks",
"dask/array/tests/test_array_core.py::test_uneven_chunks_atop",
"dask/array/tests/test_array_core.py::test_warn_bad_rechunking",
"dask/array/tests/test_array_core.py::test_optimize_fuse_keys",
"dask/array/tests/test_array_core.py::test_concatenate_stack_dont_warn",
"dask/array/tests/test_array_core.py::test_map_blocks_delayed",
"dask/array/tests/test_array_core.py::test_no_chunks",
"dask/array/tests/test_array_core.py::test_no_chunks_2d",
"dask/array/tests/test_array_core.py::test_no_chunks_yes_chunks",
"dask/array/tests/test_array_core.py::test_raise_informative_errors_no_chunks",
"dask/array/tests/test_array_core.py::test_no_chunks_slicing_2d",
"dask/array/tests/test_array_core.py::test_index_array_with_array_1d",
"dask/array/tests/test_array_core.py::test_index_array_with_array_2d",
"dask/array/tests/test_array_core.py::test_setitem_1d",
"dask/array/tests/test_array_core.py::test_setitem_2d",
"dask/array/tests/test_array_core.py::test_setitem_errs",
"dask/array/tests/test_array_core.py::test_zero_slice_dtypes",
"dask/array/tests/test_array_core.py::test_zero_sized_array_rechunk",
"dask/array/tests/test_array_core.py::test_atop_zero_shape",
"dask/array/tests/test_array_core.py::test_atop_zero_shape_new_axes",
"dask/array/tests/test_array_core.py::test_broadcast_against_zero_shape",
"dask/array/tests/test_array_core.py::test_from_array_name",
"dask/array/tests/test_array_core.py::test_concatenate_errs",
"dask/array/tests/test_array_core.py::test_stack_errs",
"dask/array/tests/test_array_core.py::test_atop_with_numpy_arrays",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other0-100]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other0-6]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other1-100]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other1-6]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other2-100]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other2-6]",
"dask/array/tests/test_array_core.py::test_constructor_plugin",
"dask/array/tests/test_array_core.py::test_no_warnings_on_metadata",
"dask/array/tests/test_array_core.py::test_delayed_array_key_hygeine",
"dask/dataframe/tests/test_dataframe.py::test_head_tail",
"dask/dataframe/tests/test_dataframe.py::test_head_npartitions",
"dask/dataframe/tests/test_dataframe.py::test_head_npartitions_warn",
"dask/dataframe/tests/test_dataframe.py::test_index_head",
"dask/dataframe/tests/test_dataframe.py::test_Series",
"dask/dataframe/tests/test_dataframe.py::test_Index",
"dask/dataframe/tests/test_dataframe.py::test_Scalar",
"dask/dataframe/tests/test_dataframe.py::test_column_names",
"dask/dataframe/tests/test_dataframe.py::test_index_names",
"dask/dataframe/tests/test_dataframe.py::test_timezone_freq[1]",
"dask/dataframe/tests/test_dataframe.py::test_rename_columns",
"dask/dataframe/tests/test_dataframe.py::test_rename_series",
"dask/dataframe/tests/test_dataframe.py::test_describe",
"dask/dataframe/tests/test_dataframe.py::test_describe_empty",
"dask/dataframe/tests/test_dataframe.py::test_cumulative",
"dask/dataframe/tests/test_dataframe.py::test_dropna",
"dask/dataframe/tests/test_dataframe.py::test_where_mask",
"dask/dataframe/tests/test_dataframe.py::test_map_partitions_multi_argument",
"dask/dataframe/tests/test_dataframe.py::test_map_partitions",
"dask/dataframe/tests/test_dataframe.py::test_map_partitions_names",
"dask/dataframe/tests/test_dataframe.py::test_map_partitions_column_info",
"dask/dataframe/tests/test_dataframe.py::test_map_partitions_method_names",
"dask/dataframe/tests/test_dataframe.py::test_map_partitions_keeps_kwargs_in_dict",
"dask/dataframe/tests/test_dataframe.py::test_metadata_inference_single_partition_aligned_args",
"dask/dataframe/tests/test_dataframe.py::test_drop_duplicates",
"dask/dataframe/tests/test_dataframe.py::test_drop_duplicates_subset",
"dask/dataframe/tests/test_dataframe.py::test_get_partition",
"dask/dataframe/tests/test_dataframe.py::test_ndim",
"dask/dataframe/tests/test_dataframe.py::test_dtype",
"dask/dataframe/tests/test_dataframe.py::test_value_counts",
"dask/dataframe/tests/test_dataframe.py::test_unique",
"dask/dataframe/tests/test_dataframe.py::test_isin",
"dask/dataframe/tests/test_dataframe.py::test_len",
"dask/dataframe/tests/test_dataframe.py::test_size",
"dask/dataframe/tests/test_dataframe.py::test_nbytes",
"dask/dataframe/tests/test_dataframe.py::test_quantile",
"dask/dataframe/tests/test_dataframe.py::test_quantile_missing",
"dask/dataframe/tests/test_dataframe.py::test_empty_quantile",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_quantile",
"dask/dataframe/tests/test_dataframe.py::test_index",
"dask/dataframe/tests/test_dataframe.py::test_assign",
"dask/dataframe/tests/test_dataframe.py::test_map",
"dask/dataframe/tests/test_dataframe.py::test_concat",
"dask/dataframe/tests/test_dataframe.py::test_args",
"dask/dataframe/tests/test_dataframe.py::test_known_divisions",
"dask/dataframe/tests/test_dataframe.py::test_unknown_divisions",
"dask/dataframe/tests/test_dataframe.py::test_align[inner]",
"dask/dataframe/tests/test_dataframe.py::test_align[outer]",
"dask/dataframe/tests/test_dataframe.py::test_align[left]",
"dask/dataframe/tests/test_dataframe.py::test_align[right]",
"dask/dataframe/tests/test_dataframe.py::test_align_axis[inner]",
"dask/dataframe/tests/test_dataframe.py::test_align_axis[outer]",
"dask/dataframe/tests/test_dataframe.py::test_align_axis[left]",
"dask/dataframe/tests/test_dataframe.py::test_align_axis[right]",
"dask/dataframe/tests/test_dataframe.py::test_combine",
"dask/dataframe/tests/test_dataframe.py::test_combine_first",
"dask/dataframe/tests/test_dataframe.py::test_random_partitions",
"dask/dataframe/tests/test_dataframe.py::test_series_round",
"dask/dataframe/tests/test_dataframe.py::test_repartition_divisions",
"dask/dataframe/tests/test_dataframe.py::test_repartition_on_pandas_dataframe",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions_same_limits",
"dask/dataframe/tests/test_dataframe.py::test_repartition_object_index",
"dask/dataframe/tests/test_dataframe.py::test_repartition_freq_errors",
"dask/dataframe/tests/test_dataframe.py::test_embarrassingly_parallel_operations",
"dask/dataframe/tests/test_dataframe.py::test_fillna",
"dask/dataframe/tests/test_dataframe.py::test_fillna_multi_dataframe",
"dask/dataframe/tests/test_dataframe.py::test_ffill_bfill",
"dask/dataframe/tests/test_dataframe.py::test_fillna_series_types",
"dask/dataframe/tests/test_dataframe.py::test_sample",
"dask/dataframe/tests/test_dataframe.py::test_sample_without_replacement",
"dask/dataframe/tests/test_dataframe.py::test_datetime_accessor",
"dask/dataframe/tests/test_dataframe.py::test_empty_max",
"dask/dataframe/tests/test_dataframe.py::test_query",
"dask/dataframe/tests/test_dataframe.py::test_eval",
"dask/dataframe/tests/test_dataframe.py::test_deterministic_apply_concat_apply_names",
"dask/dataframe/tests/test_dataframe.py::test_aca_meta_infer",
"dask/dataframe/tests/test_dataframe.py::test_aca_split_every",
"dask/dataframe/tests/test_dataframe.py::test_reduction_method",
"dask/dataframe/tests/test_dataframe.py::test_reduction_method_split_every",
"dask/dataframe/tests/test_dataframe.py::test_pipe",
"dask/dataframe/tests/test_dataframe.py::test_gh_517",
"dask/dataframe/tests/test_dataframe.py::test_drop_axis_1",
"dask/dataframe/tests/test_dataframe.py::test_gh580",
"dask/dataframe/tests/test_dataframe.py::test_rename_dict",
"dask/dataframe/tests/test_dataframe.py::test_rename_function",
"dask/dataframe/tests/test_dataframe.py::test_rename_index",
"dask/dataframe/tests/test_dataframe.py::test_to_frame",
"dask/dataframe/tests/test_dataframe.py::test_apply_warns",
"dask/dataframe/tests/test_dataframe.py::test_applymap",
"dask/dataframe/tests/test_dataframe.py::test_abs",
"dask/dataframe/tests/test_dataframe.py::test_round",
"dask/dataframe/tests/test_dataframe.py::test_cov",
"dask/dataframe/tests/test_dataframe.py::test_corr",
"dask/dataframe/tests/test_dataframe.py::test_cov_corr_meta",
"dask/dataframe/tests/test_dataframe.py::test_autocorr",
"dask/dataframe/tests/test_dataframe.py::test_index_time_properties",
"dask/dataframe/tests/test_dataframe.py::test_nlargest_nsmallest",
"dask/dataframe/tests/test_dataframe.py::test_reset_index",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_compute_forward_kwargs",
"dask/dataframe/tests/test_dataframe.py::test_series_iteritems",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_iterrows",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_itertuples",
"dask/dataframe/tests/test_dataframe.py::test_astype",
"dask/dataframe/tests/test_dataframe.py::test_astype_categoricals",
"dask/dataframe/tests/test_dataframe.py::test_astype_categoricals_known",
"dask/dataframe/tests/test_dataframe.py::test_groupby_callable",
"dask/dataframe/tests/test_dataframe.py::test_methods_tokenize_differently",
"dask/dataframe/tests/test_dataframe.py::test_gh_1301",
"dask/dataframe/tests/test_dataframe.py::test_timeseries_sorted",
"dask/dataframe/tests/test_dataframe.py::test_column_assignment",
"dask/dataframe/tests/test_dataframe.py::test_columns_assignment",
"dask/dataframe/tests/test_dataframe.py::test_attribute_assignment",
"dask/dataframe/tests/test_dataframe.py::test_setitem_triggering_realign",
"dask/dataframe/tests/test_dataframe.py::test_inplace_operators",
"dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx0-True]",
"dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx0-False]",
"dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx1-True]",
"dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx1-False]",
"dask/dataframe/tests/test_dataframe.py::test_idxmaxmin_empty_partitions",
"dask/dataframe/tests/test_dataframe.py::test_getitem_meta",
"dask/dataframe/tests/test_dataframe.py::test_getitem_multilevel",
"dask/dataframe/tests/test_dataframe.py::test_diff",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-2-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-2-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-2-20]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-5-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-5-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-5-20]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-2-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-2-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-2-20]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-5-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-5-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-5-20]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-2-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-2-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-2-20]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-5-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-5-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-5-20]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-2-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-2-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-2-20]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-5-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-5-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-5-20]",
"dask/dataframe/tests/test_dataframe.py::test_split_out_drop_duplicates[None]",
"dask/dataframe/tests/test_dataframe.py::test_split_out_drop_duplicates[2]",
"dask/dataframe/tests/test_dataframe.py::test_split_out_value_counts[None]",
"dask/dataframe/tests/test_dataframe.py::test_split_out_value_counts[2]",
"dask/dataframe/tests/test_dataframe.py::test_values",
"dask/dataframe/tests/test_dataframe.py::test_copy",
"dask/dataframe/tests/test_dataframe.py::test_del",
"dask/dataframe/tests/test_dataframe.py::test_memory_usage[True-True]",
"dask/dataframe/tests/test_dataframe.py::test_memory_usage[True-False]",
"dask/dataframe/tests/test_dataframe.py::test_memory_usage[False-True]",
"dask/dataframe/tests/test_dataframe.py::test_memory_usage[False-False]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[sum]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[mean]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[std]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[var]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[count]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[min]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[max]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[idxmin]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[idxmax]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[prod]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[all]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[sem]",
"dask/dataframe/tests/test_dataframe.py::test_to_datetime",
"dask/dataframe/tests/test_dataframe.py::test_to_timedelta",
"dask/dataframe/tests/test_dataframe.py::test_slice_on_filtered_boundary[0]",
"dask/dataframe/tests/test_dataframe.py::test_slice_on_filtered_boundary[9]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_nonmonotonic",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[-1-None-False-False-drop0]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[-1-None-False-True-drop1]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[None-3-False-False-drop2]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[None-3-True-False-drop3]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[-0.5-None-False-False-drop4]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[-0.5-None-False-True-drop5]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[-1.5-None-False-True-drop6]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[None-3.5-False-False-drop7]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[None-3.5-True-False-drop8]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[None-2.5-False-False-drop9]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index0-0-9]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index1--1-None]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index2-None-10]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index3-None-None]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index4--1-None]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index5-None-2]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index6--2-3]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index7-None-None]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index8-left8-None]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index9-None-right9]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index10-left10-None]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index11-None-right11]",
"dask/dataframe/tests/test_dataframe.py::test_better_errors_object_reductions",
"dask/dataframe/tests/test_dataframe.py::test_sample_empty_partitions",
"dask/dataframe/tests/test_dataframe.py::test_coerce",
"dask/dataframe/tests/test_dataframe.py::test_bool"
]
| []
| BSD 3-Clause "New" or "Revised" License | 1,977 | [
"dask/dataframe/accessor.py",
"docs/source/changelog.rst",
"dask/array/core.py"
]
| [
"dask/dataframe/accessor.py",
"docs/source/changelog.rst",
"dask/array/core.py"
]
|
cdent__gabbi-238 | d91d8ec9a5516615289d77a30529f9ea496b98c4 | 2017-12-22 13:29:05 | 6801cba8eabc3fc521bc1df488009bfad19c8012 | diff --git a/gabbi/case.py b/gabbi/case.py
index a7a0f49..2403859 100644
--- a/gabbi/case.py
+++ b/gabbi/case.py
@@ -481,15 +481,15 @@ class HTTPTestCase(testtools.TestCase):
def _replace_headers_template(self, test_name, headers):
replaced_headers = {}
- for name in headers:
- try:
+ try:
+ for name in headers:
replaced_name = self.replace_template(name)
replaced_headers[replaced_name] = self.replace_template(
headers[name]
)
- except TypeError as exc:
- raise exception.GabbiFormatError(
- 'malformed headers in test %s: %s' % (test_name, exc))
+ except TypeError as exc:
+ raise exception.GabbiFormatError(
+ 'malformed headers in test %s: %s' % (test_name, exc))
return replaced_headers
| Regression with 1.39.0 and empty reponse_headers field
One of the tests in Gnocchi has an empty `reponse_headers` field:
https://github.com/gnocchixyz/gnocchi/blob/master/gnocchi/tests/functional/gabbits/resource.yaml#L503
This broke the test suite in some way as one behaviour changed between Gabbi 1.38.0 and 1.39.0. I've bisected and the culprit is 8de923ac825bfeb34cd8c9384634c68ebda345bd.
This is not critical as the fix is easy on Gnocchi's side: remove the empty and useless `reponse_headers` field in the YAML file https://github.com/gnocchixyz/gnocchi/pull/599. Still, I thought it might be useful to report it just in case there's something bigger behind that we would have missed. | cdent/gabbi | diff --git a/gabbi/tests/test_replacers.py b/gabbi/tests/test_replacers.py
index 65fdc3f..9505e60 100644
--- a/gabbi/tests/test_replacers.py
+++ b/gabbi/tests/test_replacers.py
@@ -18,6 +18,7 @@ import os
import unittest
from gabbi import case
+from gabbi import exception
class EnvironReplaceTest(unittest.TestCase):
@@ -56,3 +57,13 @@ class EnvironReplaceTest(unittest.TestCase):
os.environ['moo'] = "True"
self.assertEqual(True, http_case._environ_replace(message))
+
+
+class TestReplaceHeaders(unittest.TestCase):
+
+ def test_empty_headers(self):
+ """A None value in headers should cause a GabbiFormatError."""
+ http_case = case.HTTPTestCase('test_request')
+ self.assertRaises(
+ exception.GabbiFormatError,
+ http_case._replace_headers_template, 'foo', None)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_git_commit_hash",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 1
} | 1.39 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest-cov",
"hacking",
"sphinx",
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt",
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
colorama==0.4.5
coverage==6.2
decorator==5.1.1
distlib==0.3.9
docutils==0.18.1
filelock==3.4.1
fixtures==4.0.1
flake8==3.8.4
-e git+https://github.com/cdent/gabbi.git@d91d8ec9a5516615289d77a30529f9ea496b98c4#egg=gabbi
hacking==4.1.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
Jinja2==3.0.3
jsonpath-rw==1.4.0
jsonpath-rw-ext==1.2.2
MarkupSafe==2.0.1
mccabe==0.6.1
packaging==21.3
pbr==6.1.1
platformdirs==2.4.0
pluggy==1.0.0
ply==3.11
py==1.11.0
pycodestyle==2.6.0
pyflakes==2.2.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytz==2025.2
PyYAML==6.0.1
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
testtools==2.6.0
toml==0.10.2
tomli==1.2.3
tox==3.28.0
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.17.1
wsgi_intercept==1.13.1
zipp==3.6.0
| name: gabbi
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- colorama==0.4.5
- coverage==6.2
- decorator==5.1.1
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- fixtures==4.0.1
- flake8==3.8.4
- hacking==4.1.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- jinja2==3.0.3
- jsonpath-rw==1.4.0
- jsonpath-rw-ext==1.2.2
- markupsafe==2.0.1
- mccabe==0.6.1
- packaging==21.3
- pbr==6.1.1
- platformdirs==2.4.0
- pluggy==1.0.0
- ply==3.11
- py==1.11.0
- pycodestyle==2.6.0
- pyflakes==2.2.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytz==2025.2
- pyyaml==6.0.1
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- testtools==2.6.0
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.17.1
- wsgi-intercept==1.13.1
- zipp==3.6.0
prefix: /opt/conda/envs/gabbi
| [
"gabbi/tests/test_replacers.py::TestReplaceHeaders::test_empty_headers"
]
| []
| [
"gabbi/tests/test_replacers.py::EnvironReplaceTest::test_environ_boolean"
]
| []
| Apache License 2.0 | 1,978 | [
"gabbi/case.py"
]
| [
"gabbi/case.py"
]
|
|
palantir__python-language-server-211 | 897980b7e2bd71811311cb49b18cf89ed3aa9cbe | 2017-12-26 01:15:40 | 12b93fe83b9c01a8cdf5a6fe902af60c59742b99 | evandrocoan: @lgeiger, This should have been fixed by https://github.com/palantir/python-language-server/pull/234
lgeiger: > This should have been fixed by #234
👍I rebased. Let's see what CI thinks.
lgeiger: Thanks @evandrocoan for pointing this out.
I had to revert #220 to fix #239. Now the tests should pass. | diff --git a/pyls/plugins/pycodestyle_lint.py b/pyls/plugins/pycodestyle_lint.py
index 8441281..96efafd 100644
--- a/pyls/plugins/pycodestyle_lint.py
+++ b/pyls/plugins/pycodestyle_lint.py
@@ -12,12 +12,12 @@ def pyls_lint(config, document):
log.debug("Got pycodestyle settings: %s", settings)
opts = {
- 'exclude': ','.join(settings.get('exclude') or []),
- 'filename': ','.join(settings.get('filename') or []),
+ 'exclude': settings.get('exclude'),
+ 'filename': settings.get('filename'),
'hang_closing': settings.get('hangClosing'),
- 'ignore': ','.join(settings.get('ignore') or []),
+ 'ignore': settings.get('ignore'),
'max_line_length': settings.get('maxLineLength'),
- 'select': ','.join(settings.get('select') or []),
+ 'select': settings.get('select'),
}
kwargs = {k: v for k, v in opts.items() if v}
styleguide = pycodestyle.StyleGuide(kwargs)
| Fix ignored and select settings interface with pycodestyle
On https://github.com/PyCQA/pycodestyle/pull/722 they refused to fix their interface. When passing the list arguments as `ignore` and `select` settings to `pycodestyle`, it is required to pass a python list as `["E201", "E501"]`, instead of a string `"E201,E501"`, otherwise they will cause the issue pointed on: https://github.com/tomv564/LSP/issues/244#issuecomment-358753274 | palantir/python-language-server | diff --git a/test/plugins/test_pycodestyle_lint.py b/test/plugins/test_pycodestyle_lint.py
index 028997f..583da79 100644
--- a/test/plugins/test_pycodestyle_lint.py
+++ b/test/plugins/test_pycodestyle_lint.py
@@ -8,7 +8,7 @@ from pyls.plugins import pycodestyle_lint
DOC_URI = uris.from_fs_path(__file__)
DOC = """import sys
-def hello():
+def hello( ):
\tpass
import json
@@ -40,6 +40,14 @@ def test_pycodestyle(config):
assert mod_import['range']['start'] == {'line': 7, 'character': 0}
assert mod_import['range']['end'] == {'line': 7, 'character': 1}
+ msg = "E201 whitespace after '('"
+ mod_import = [d for d in diags if d['message'] == msg][0]
+
+ assert mod_import['code'] == 'E201'
+ assert mod_import['severity'] == lsp.DiagnosticSeverity.Warning
+ assert mod_import['range']['start'] == {'line': 2, 'character': 10}
+ assert mod_import['range']['end'] == {'line': 2, 'character': 14}
+
def test_pycodestyle_config(workspace):
""" Test that we load config files properly.
@@ -66,7 +74,7 @@ def test_pycodestyle_config(workspace):
assert [d for d in diags if d['code'] == 'W191']
content = {
- 'setup.cfg': ('[pycodestyle]\nignore = W191', True),
+ 'setup.cfg': ('[pycodestyle]\nignore = W191, E201', True),
'tox.ini': ('', False)
}
@@ -77,18 +85,16 @@ def test_pycodestyle_config(workspace):
# And make sure we don't get any warnings
diags = pycodestyle_lint.pyls_lint(config, doc)
- assert len([d for d in diags if d['code'] == 'W191']) == 0 if working else 1
+ assert len([d for d in diags if d['code'] == 'W191']) == (0 if working else 1)
+ assert len([d for d in diags if d['code'] == 'E201']) == (0 if working else 1)
+ assert [d for d in diags if d['code'] == 'W391']
os.unlink(os.path.join(workspace.root_path, conf_file))
# Make sure we can ignore via the PYLS config as well
- config.update({'plugins': {'pycodestyle': {'ignore': ['W191']}}})
+ config.update({'plugins': {'pycodestyle': {'ignore': ['W191', 'E201']}}})
# And make sure we only get one warning
diags = pycodestyle_lint.pyls_lint(config, doc)
assert not [d for d in diags if d['code'] == 'W191']
-
- # Ignore both warnings
- config.update({'plugins': {'pycodestyle': {'ignore': ['W191', 'W391']}}})
- # And make sure we get neither
- assert not [d for d in diags if d['code'] == 'W191']
- assert not [d for d in diags if d['code'] == 'W391']
+ assert not [d for d in diags if d['code'] == 'E201']
+ assert [d for d in diags if d['code'] == 'W391']
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.13 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"coverage"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cachetools==5.5.2
chardet==5.2.0
colorama==0.4.6
configparser==7.2.0
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
future==1.0.0
iniconfig==2.1.0
jedi==0.19.2
json-rpc==1.15.0
mccabe==0.7.0
packaging==24.2
parso==0.8.4
platformdirs==4.3.7
pluggy==1.5.0
pycodestyle==2.13.0
pydocstyle==6.3.0
pyflakes==3.3.1
pyproject-api==1.9.0
pytest==8.3.5
pytest-cov==6.0.0
-e git+https://github.com/palantir/python-language-server.git@897980b7e2bd71811311cb49b18cf89ed3aa9cbe#egg=python_language_server
pytoolconfig==1.3.1
rope==1.13.0
snowballstemmer==2.2.0
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
versioneer==0.29
virtualenv==20.29.3
yapf==0.43.0
| name: python-language-server
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cachetools==5.5.2
- chardet==5.2.0
- colorama==0.4.6
- configparser==7.2.0
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- future==1.0.0
- iniconfig==2.1.0
- jedi==0.19.2
- json-rpc==1.15.0
- mccabe==0.7.0
- packaging==24.2
- parso==0.8.4
- platformdirs==4.3.7
- pluggy==1.5.0
- pycodestyle==2.13.0
- pydocstyle==6.3.0
- pyflakes==3.3.1
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pytoolconfig==1.3.1
- rope==1.13.0
- snowballstemmer==2.2.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- versioneer==0.29
- virtualenv==20.29.3
- yapf==0.43.0
prefix: /opt/conda/envs/python-language-server
| [
"test/plugins/test_pycodestyle_lint.py::test_pycodestyle_config"
]
| []
| [
"test/plugins/test_pycodestyle_lint.py::test_pycodestyle"
]
| []
| MIT License | 1,979 | [
"pyls/plugins/pycodestyle_lint.py"
]
| [
"pyls/plugins/pycodestyle_lint.py"
]
|
Agizin__Algorithm-Visualization-13 | f0641b860b384bf2760819b4f1bd5548261718c3 | 2017-12-26 15:26:58 | f0641b860b384bf2760819b4f1bd5548261718c3 | diff --git a/algviz/interface/__init__.py b/algviz/interface/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/algviz/interface/high_level.py b/algviz/interface/high_level.py
new file mode 100644
index 0000000..41d78f6
--- /dev/null
+++ b/algviz/interface/high_level.py
@@ -0,0 +1,19 @@
+import sys
+
+from . import output, visitors
+
+__output_manager = None
+
+def do_setup():
+ global __output_manager
+ if __output_manager is None:
+ __output_manager = output.OutputManager(sys.stdout)
+
+def show(obj, var=None, api=None, metadata=None):
+ global __output_manager
+ do_setup()
+ if api is None:
+ api = visitors.DispatchVisitor
+ visitor = api(__output_manager)
+ with __output_manager.start_snapshot():
+ visitor.traverse(obj, var=var, metadata=metadata)
diff --git a/algviz/interface/output.py b/algviz/interface/output.py
new file mode 100644
index 0000000..eb6a05e
--- /dev/null
+++ b/algviz/interface/output.py
@@ -0,0 +1,201 @@
+import contextlib
+import json
+import sys
+from algviz.parser import json_objects
+
+class OutputStateError(Exception):
+ """For output operations that don't make sense given the state of the output"""
+
+class _OutputContext:
+ """Don't work with this class directly. Prefer to use OutputManager."""
+ def __init__(self, parent=None, outfile=sys.stdout):
+ self.parent = parent
+ self.indent = 2 if parent is None else parent.indent + 2
+ self.comma_needed = False
+ self.outfile = outfile
+ self.closed = False
+ self.cur_child = None
+
+ def write(self, text):
+ # if '\n' in text:
+ # print("newline in next line: {!r}".format(text), file=sys.stderr)
+ print(text, end="", file=self.outfile)
+
+ def begin(self):
+ self.write(self.open_char)
+
+ def end(self):
+ if self.parent is not None:
+ assert not self.parent.closed, "parent block ended before this one did"
+ self.end_child()
+ if self.comma_needed: # we're closing a non-empty empty dict/list, so put a newline
+ if self.parent is not None:
+ self.parent.do_indent()
+ else:
+ self.write("\n")
+ self.write(self.close_char)
+ self.closed = True
+
+ def end_child(self):
+ """If there is a child block (a list or dict), make sure it is
+ closed before printing anything else at this level.
+ """
+ if self.cur_child is not None and not self.cur_child.closed:
+ self.cur_child.end()
+ self.cur_child = None
+
+ def do_indent(self):
+ self.write("\n" + (" " * self.indent))
+
+ def comma_newline(self):
+ self.end_child()
+ if self.comma_needed:
+ # First item in list/dict doesn't need a comma before it
+ self.write(",")
+ else:
+ self.comma_needed = True
+ # Indentation is nice
+ self.do_indent()
+
+ def write_literal(self, lit):
+ """Write a str or int. Could also do list or dict, I suppose."""
+ self.write(json.dumps(lit).strip("\n"))
+
+ def push_child(self, child_cls):
+ if self.cur_child is not None:
+ assert self.cur_child.closed, "began child block without ending previous child"
+ self.cur_child = child_cls(parent=self, outfile=self.outfile)
+ self.cur_child.begin()
+
+
+class _DictOutputContext(_OutputContext):
+ open_char = "{"
+ close_char = "}"
+ def __init__(self, *args, **kwargs):
+ self.keys_used = set()
+ super().__init__(*args, **kwargs)
+
+ def key_val(self, key, val):
+ if key in self.keys_used:
+ raise OutputStateError("Key {!r} is a duplicate in this mapping"
+ .format(key))
+ self.keys_used.add(key)
+ self.comma_newline()
+ self.write_literal(key)
+ self.write(": ")
+ self.write_literal(val)
+
+ def key_push(self, key, *args, **kwargs):
+ self.comma_newline()
+ self.write_literal(key)
+ self.write(": ")
+ self.push_child(*args, **kwargs)
+
+class _ListOutputContext(_OutputContext):
+ open_char = "["
+ close_char = "]"
+ def item(self, val):
+ """Add a literal to the list"""
+ self.comma_newline()
+ self.write_literal(val)
+
+ def item_push(self, *args, **kwargs):
+ """Open a dict or list within this list"""
+ self.comma_newline()
+ self.push_child(*args, **kwargs)
+
+
+class OutputManager:
+ """Useful for outputting valid JSON without maintaining too much state."""
+
+ def __init__(self, outfile=sys.stdout):
+ # self._in_dict = False
+ # self._in_list = True
+ self.outfile = outfile
+ self.snapshot_ctx = _ListOutputContext(parent=None, outfile=outfile)
+ self.context = self.snapshot_ctx
+ self.context.begin()
+ self.uids = set()
+ self._next_key = None
+ # The idea is that the user calls next_item repeatedly if in an array context,
+ # or alternates calls to next_key and next_item if in a dict context.
+ def next_key(self, key):
+ if not isinstance(key, str):
+ raise TypeError("JSON keys must be strings, not {}".format(key))
+ if self._next_key is not None:
+ raise OutputStateError("previous key ({}) not used when new key ({}) added"
+ .format(self._next_key, key))
+ elif not isinstance(self.context, _DictOutputContext):
+ raise OutputStateError("cannot set a key ({}) in non-mapping context {}"
+ .format(key, self.context))
+ else:
+ self._next_key = key
+
+ def _use_key(self):
+ if self._next_key is None:
+ raise OutputStateError("Must set a key with `next_key` before adding a key-value pair")
+ result = self._next_key
+ self._next_key = None
+ return result
+
+ def next_val(self, val):
+ """Use this to append a literal (or JSON-encodable) value as the next
+ item in the current context.
+ """
+ if isinstance(self.context, _DictOutputContext):
+ # sneakily keep track of uids
+ if (self._next_key == json_objects.Tokens.UID
+ or json_objects.aliases.get(self._next_key) == json_objects.Tokens.UID):
+ if val in self.uids:
+ raise OutputStateError("uid {} is already defined in this snapshot"
+ .format(val))
+ else:
+ self.uids.add(val)
+ self.context.key_val(self._use_key(), val)
+ else:
+ self.context.item(val)
+
+ def _push(self, *args, **kwargs):
+ if isinstance(self.context, _DictOutputContext):
+ self.context.key_push(self._use_key(), *args, **kwargs)
+ else:
+ self.context.item_push(*args, **kwargs)
+ self.context = self.context.cur_child
+
+ @contextlib.contextmanager
+ def push(self, mapping=True):
+ """Use this to append a sub-context as the next item in the current
+ context. (The sub-context is for a dictionary by default.)
+
+ After calling `push`, the `OutputManager.context` field will hold the
+ new child context.
+
+ When you're done with that context, you should call `pop` to cleanly
+ end the context that you pushed and to restore the
+ `OutputManager.context` field to its original value.
+ """
+ if mapping:
+ self._push(_DictOutputContext)
+ else:
+ self._push(_ListOutputContext)
+ try:
+ yield
+ finally:
+ self.context.end()
+ self.context = self.context.parent
+
+ def start_snapshot(self):
+ """Write a snapshot. Use as a context manager"""
+ # if self.context is not self.snapshot_ctx:
+ # self.snapshot_ctx.cur_child.
+ self.context = self.snapshot_ctx
+ return self.push(mapping=False)
+
+ def end(self):
+ self.snapshot_ctx.end()
+ self.outfile.flush()
+ # print("", file=self.outfile)
+
+ def current_snapshot(self):
+ return self.snapshot_ctx.cur_child
+
diff --git a/algviz/interface/visitors.py b/algviz/interface/visitors.py
new file mode 100644
index 0000000..2b8dca9
--- /dev/null
+++ b/algviz/interface/visitors.py
@@ -0,0 +1,192 @@
+import abc
+
+from algviz.parser.json_objects import Tokens
+
+class Visitor(metaclass=abc.ABCMeta):
+
+ def __init__(self, output_mngr, data_visitor=None):
+ """Positional parameters:
+ * An `algviz.interface.output.OutputManager` to use for output
+
+ Keyword parameters:
+ * data_visitor -- an instance of visitor to use on any data by default.
+ If this is None, DispatchVisitor will be used.
+ """
+ self.output_mngr = output_mngr
+ # assert hasattr(self, "type_"), "Visitor subclasses need a 'type_' attribute"
+ self.data_visitor = data_visitor
+ if data_visitor is None:
+ self.data_visitor = DispatchVisitor(self.output_mngr)
+
+ def uid(self, obj):
+ """Return a unique identifier for this object.
+
+ The identifier is guaranteed unique until the state of the objects
+ under inspection is altered, i.e. until the objects are mutated,
+ overwritten, or recounted.
+ """
+ return str(id(obj))
+
+ def traverse(self, obj, **kwargs):
+ # To traverse most simple objects, we can just visit them.
+ # More complicated objects like graphs will require actual traversal
+ if self.uid(obj) in self.output_mngr.uids:
+ self.output_mngr.next_val(self.uid(obj))
+ else:
+ with self.output_mngr.push():
+ self.visit(obj, **kwargs)
+
+ @abc.abstractmethod
+ def visit(self, obj, metadata=None, var=None):
+ """
+ Emit the inside of the JSON dictionary representing the given object.
+ I.e. print out all the key-value pairs that represent the object.
+
+ `visit()` shouldn't be called directly but may be called from within
+ `traverse`. Therefore when you implement `visit()` in subclasses, call
+ `traverse` to visit your attributes. (Or just use @algviz macros so
+ you don't have to think about it.)
+ """
+ self.output_mngr.next_key(Tokens.TYPE)
+ self.output_mngr.next_val(self.type_)
+ self.output_mngr.next_key(Tokens.UID)
+ self.output_mngr.next_val(self.uid(obj))
+ if metadata is not None:
+ self.output_mngr.next_key(Tokens.METADATA)
+ self.output_mngr.next_val(metadata)
+ if var is not None:
+ self.output_mngr.next_key(Tokens.VARNAME)
+ self.output_mngr.next_val(var)
+
+
+class DispatchVisitor(Visitor):
+ """Handle objects with a default handler. Useful when data stored is of mixed types.
+
+ Methods are dispatched to instances of an appropriate visitor based on the
+ class of the given object. The MRO is checked so that, e.g., a subclass of
+ Foo will be handled by the Foo handler unless it has its own handler.
+
+ By default, the handlers are given in `_dispatch_visit_dict`. The
+ `updates` keyword argument to `__init__` is used to modify the instance's
+ copy of that dictionary for more customized behavior.
+ """
+
+ def __init__(self, output_mngr, updates=None, **kwargs):
+ # If data_visitor is unspecified, a new instance of this class is
+ # created. So we must use `self` instead to prevent a crash.
+ kwargs.setdefault("data_visitor", self)
+ super().__init__(output_mngr, **kwargs)
+ self.dispatch_dict = _dispatch_visit_dict.copy()
+ if updates is not None:
+ # This lets us do interesting things like choose non-default handlers for some data structure. E.g. assume a `list` instance represents a heap
+ self.dispatch_dict.update(updates)
+
+ def _dispatch_method(self, methodname, obj, *args, **kwargs):
+ # Call the named method on the appropriate visitor subclass
+ for superclass in type(obj).mro():
+ if superclass in self.dispatch_dict:
+ # Get an appropriate visitor
+ visitor = self.dispatch_dict[superclass](self.output_mngr, data_visitor=self)
+ # Call the desired method on that visitor
+ return getattr(visitor, methodname)(obj, *args, **kwargs)
+
+ def uid(self, obj, **kwargs):
+ return self._dispatch_method("uid", obj, **kwargs)
+
+ def traverse(self, obj, *args, **kwargs):
+ return self._dispatch_method("traverse", obj, *args, **kwargs)
+
+ def visit(self, obj, *args, **kwargs):
+ return self._dispatch_method("visit", obj, *args, **kwargs)
+
+_dispatch_visit_dict = {
+ # list: ArrayVisitor,
+ # int: NumberVisitor,
+}
+def default_for_type(*types):
+ """Decorated class will become the default visitor for the given type(s).
+ See DispatchVisitor.
+
+ Returns a decorator
+ """
+ def _decorator(cls):
+ for type_ in types:
+ assert type_ not in _dispatch_visit_dict, (
+ "Multiple handlers for type {}: {} and {}".format(
+ type_, cls, _dispatch_visit_dict[type_]))
+ _dispatch_visit_dict[type_] = cls
+ return cls
+ return _decorator
+
+@default_for_type(list)
+class ArrayVisitor(Visitor):
+ """Visit an array, letting `self.data_visitor` traverse each item in the array.
+
+ This visitor handles the array with `self.length` and `self.get_item`. If
+ your object implements __len__ and __getitem__, then you won't need to
+ change those methods. (On the other hand, you could override these methods
+ to do something cool, e.g. treat an int as an array of True and False
+ values.)
+ """
+ type_ = Tokens.ARRAY_T
+ # We don't assume the object being treated as an Array is reasonable.
+ # E.g. you could easily have an Array of bits represented by an int.
+
+ def length(self, array):
+ return len(array)
+
+ def get_item(self, array, i):
+ return array[i]
+
+ def visit(self, array, *args, **kwargs):
+ """
+ context is guaranteed to be a dictionary context where the array body should go, or else
+ If we make it here, somebody already checked that the uid hasn't been included in this snapshot yet.
+ """
+ super().visit(array, *args, **kwargs) # UID and TYPE
+ self.output_mngr.next_key(Tokens.DATA)
+ with self.output_mngr.push(mapping=False):
+ for i in range(self.length(array)):
+ self.data_visitor.traverse(self.get_item(array, i))
+
+# class TreeVisitor
+
+@default_for_type(int, float)
+class NumberVisitor(Visitor):
+
+ def traverse(self, i, **kwargs):
+ # A float or int can be handed straight to the output manager
+ # This is a rare case where it's appropriate to reimplement `traverse`
+ assert isinstance(i, (float, int))
+ self.output_mngr.next_val(i)
+
+ def visit(self, i, *args, **kwargs):
+ raise NotImplementedError("Something has gone wrong if we're visiting an int (since visiting it implies making a JSON dictionary for it)")
+
+
+@default_for_type(str)
+class StringVisitor(Visitor):
+
+ type_ = Tokens.STRING_T
+
+ def to_str(self, obj):
+ """Override this if you have some non-string object that you want to
+ display as a string, and if calling `__str__` on it isn't good enough.
+ (E.g. if you need to do `bytes.to_string(encoding="UTF-8")` instead.)
+
+ `to_str` should return a string.
+ """
+ return str(obj)
+
+ def visit(self, str_, *args, **kwargs):
+ super().visit(str_, *args, **kwargs)
+ self.output_mngr.next_key(Tokens.DATA)
+ self.output_mngr.next_val(self.to_str(str_))
+
+@default_for_type(object)
+class WidgetVisitor(Visitor):
+ """A Widget is a "don't care" object, quite like a `void*`"""
+ type_ = Tokens.WIDGET_T
+
+ def visit(self, *args, **kwargs):
+ return super().visit(*args, **kwargs)
diff --git a/algviz/interface/weird_visitors.py b/algviz/interface/weird_visitors.py
new file mode 100644
index 0000000..45ed59c
--- /dev/null
+++ b/algviz/interface/weird_visitors.py
@@ -0,0 +1,17 @@
+from . import visitors
+import math
+
+class BitmapArrayVisitor(visitors.ArrayVisitor):
+ """Interpret an `int` as an array of 0s and 1s"""
+
+ def __init__(self, output_mngr, *args, data_visitor=None, **kwargs):
+ if data_visitor is None:
+ data_visitor = visitors.NumberVisitor(output_mngr)
+ super().__init__(output_mngr, *args, data_visitor=data_visitor, **kwargs)
+
+ def length(self, x):
+ return math.ceil(math.log(x, 2))
+
+ def get_item(self, x, i):
+ # Return the i'th bit of x
+ return int(bool(x & (2**i)))
diff --git a/algviz/parser/json_objects.py b/algviz/parser/json_objects.py
index cd36f94..ace9048 100644
--- a/algviz/parser/json_objects.py
+++ b/algviz/parser/json_objects.py
@@ -1,5 +1,7 @@
import json
from . import structures
+import logging
+logger = logging.getLogger(__name__)
class Tokens:
"""Tokens we expect to see in the JSON"""
@@ -204,6 +206,18 @@ def decode_snapshot(*objects):
skip=json_keys_to_skip)
return sd.finalize()
+def reads(text):
+ """This smoothly handles the case where we never printed the closing "]",
+ since that's hard to do."""
+ try:
+ return decode_json(text)
+ except json.JSONDecodeError:
+ logger.info("decoding again with extra ']' added on the end")
+ return decode_json(text + "]")
+
+def read(file_obj):
+ return reads(file_obj.read())
+
def validate(json_stuff):
# We will want to check stuff here, but obviously we don't yet.
# TODO open an issue for this.
diff --git a/algviz/tools/quicksort_tree.py b/algviz/tools/quicksort_tree.py
new file mode 100644
index 0000000..18efc23
--- /dev/null
+++ b/algviz/tools/quicksort_tree.py
@@ -0,0 +1,59 @@
+"""
+Demonstrates some low-level hacking on our own APIs, by printing a call tree
+for a recursive quicksort function.
+
+This whole thing could be done very differently by using our tree-visitor API,
+once we have one. (The two branches of "quicksort" would be performed by the
+`get_child()` method of the tree API.)
+"""
+
+import sys
+import argparse
+
+from algviz.interface import output
+
+def quicksort(items, uid_str, do_a_thing):
+ do_a_thing(uid_str, items)
+ if len(items) <= 1:
+ return items
+ [pivot, *rest] = items
+ return (quicksort([x for x in rest if x <= pivot], uid_str + "L", do_a_thing)
+ + [pivot]
+ + quicksort([x for x in rest if x > pivot], uid_str + "R", do_a_thing))
+
+def mk_qs_node_visitor(output_manager):
+ def visit_qs_node(uid_str, items):
+ nonlocal output_manager
+ result = {"uid": uid_str,
+ "type": "btnode",
+ "data": {"type": "array",
+ "data": items}}
+
+ if len(items) > 1:
+ # This node will have children
+ result["children"] = [uid_str + "L", uid_str + "R"]
+ # `next_val` can print anything that `json.dumps()` accepts:
+ output_manager.next_val(result)
+ return visit_qs_node
+
+def read_numbers(infile):
+ return [float(num) for num in infile.read().split()]
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="""
+ Quick-sort some numbers and print the tree of calls made
+ Example usage: `echo 1 8 4 5 6 2 9 | %(prog)s -`
+ """)
+ parser.add_argument("infile", type=argparse.FileType("r"),
+ help="File with whitespace-separated numbers to sort")
+ args = parser.parse_args()
+ numbers = read_numbers(args.infile)
+ out = output.OutputManager()
+ visitor = mk_qs_node_visitor(out)
+ with out.start_snapshot():
+ quicksort(numbers, "mycalltree", visitor)
+ out.end()
+
+if __name__ == "__main__":
+ main()
diff --git a/setup.py b/setup.py
index e6337ee..ad6c5e8 100644
--- a/setup.py
+++ b/setup.py
@@ -10,6 +10,7 @@ setup(name='algviz',
entry_points={
"console_scripts": [
"algviz_graph_mockup=algviz.tools.graph_drawing_mockup:main",
+ "algviz_quicksort_example=algviz.tools.quicksort_tree:main",
]},
install_requires=['pygraphviz'],
)
| APIs to print JSON from Python
This is the "API" part of #4 . | Agizin/Algorithm-Visualization | diff --git a/algviz/interface/test_high_level.py b/algviz/interface/test_high_level.py
new file mode 100644
index 0000000..4da7401
--- /dev/null
+++ b/algviz/interface/test_high_level.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python3
+
+import unittest
+from unittest import mock
+
+from . import visitors
+from .testutil import TempFileTestMixin
+from algviz.parser import json_objects
+
+class HighLevelTestCase(TempFileTestMixin, unittest.TestCase):
+
+ def setUp(self):
+ self.setup_tempfile()
+
+ def tearDown(self):
+ self.teardown_tempfile()
+
+ def test_functional_show_interface(self):
+ mylist = [1, 2, 3, 4, 5]
+ with self.patch_stdout(): # replace stdout with self.tempfile
+ from . import high_level as hl
+ hl.show(mylist, "myvarname", visitors.ArrayVisitor)
+ hl.show("mystring", "stringname")
+ self.assertEqual(mylist, [1, 2, 3, 4, 5], msg="We broke the list while printing it")
+ text = self.read_tempfile()
+ [list_snapshot, str_snapshot] = json_objects.reads(text)
+ self.assertEqual(list(list_snapshot.names["myvarname"]),
+ [1, 2, 3, 4, 5])
+ self.assertEqual(str(str_snapshot.names["stringname"]),
+ "mystring")
diff --git a/algviz/interface/test_output.py b/algviz/interface/test_output.py
new file mode 100644
index 0000000..8121be7
--- /dev/null
+++ b/algviz/interface/test_output.py
@@ -0,0 +1,107 @@
+import unittest
+import tempfile
+import contextlib
+
+from . import output
+
+class OutputManagerTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.tmpfile = tempfile.TemporaryFile("r+")
+ self.outman = output.OutputManager(outfile=self.tmpfile)
+
+ def tearDown(self):
+ self.tmpfile.close()
+
+ def test_basic_usage(self):
+ with self.outman.start_snapshot(): # start a snapshot
+ with self.outman.push(): # start an object
+ self.outman.next_key("mykey")
+ with self.outman.push(mapping=False):
+ for i in range(5):
+ self.outman.next_val(i)
+ self.outman.next_key("other")
+ self.outman.next_val("thingy")
+ self.outman.end() # close the list of snapshots
+ result = self._get_text()
+ self.assertEqual(result.strip(),
+ """
+[
+ [
+ {
+ "mykey": [
+ 0,
+ 1,
+ 2,
+ 3,
+ 4
+ ],
+ "other": "thingy"
+ }
+ ]
+]
+ """.strip())
+
+ def test_error_for_duplicate_key(self):
+ with self.assertRaisesRegex(output.OutputStateError,
+ "Key .data. is a duplicate.*"):
+ with self.outman.start_snapshot():
+ with self.outman.push(): # start an object
+ self.outman.next_key("data")
+ self.outman.next_val(1)
+ self.outman.next_key("data")
+ self.outman.next_val(2)
+
+ def test_error_for_invalid_key(self):
+ with self.assertRaisesRegex(TypeError,
+ "JSON keys must be string.*"):
+ with self.outman.start_snapshot():
+ with self.outman.push():
+ self.outman.next_key(12)
+
+ def test_error_for_setting_next_key_without_using_prev_key(self):
+ with self.assertRaisesRegex(output.OutputStateError,
+ "previous key .*foo.* not used .*bar.*"):
+ with self.outman.start_snapshot():
+ with self.outman.push():
+ self.outman.next_key("foo")
+ self.outman.next_key("bar")
+
+ def test_error_for_key_value_pair_in_a_list(self):
+ with self.assertRaisesRegex(output.OutputStateError,
+ "cannot set a key .* in non-mapping context .*"):
+ with self.outman.start_snapshot():
+ self.outman.next_key("asdf")
+
+ def test_error_for_adding_value_with_no_key_in_mapping(self):
+ with self.assertRaisesRegex(output.OutputStateError,
+ "Must set a key .*"):
+ with self.outman.start_snapshot():
+ with self.outman.push():
+ # Do the first key normally
+ self.outman.next_key("llama")
+ self.outman.next_val("elephant")
+ # Now mess up
+ self.outman.next_val("aardvark")
+
+
+ def test_error_for_defining_same_uid_twice_in_snapshot(self):
+ with self.assertRaisesRegex(output.OutputStateError,
+ "uid .*asdf.* already defined in this snapshot"):
+ with self.outman.start_snapshot():
+ with self.outman.push():
+ # First widget
+ self.outman.next_key("uid")
+ self.outman.next_val("asdf")
+ self.outman.next_key("type")
+ self.outman.next_val("widget")
+ with self.outman.push():
+ self.outman.next_key("uid")
+ self.outman.next_val("asdf")
+
+ def _get_text(self):
+ self.tmpfile.seek(0)
+ return self.tmpfile.read()
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/algviz/interface/test_visitors.py b/algviz/interface/test_visitors.py
new file mode 100644
index 0000000..164c96f
--- /dev/null
+++ b/algviz/interface/test_visitors.py
@@ -0,0 +1,95 @@
+import unittest
+
+from algviz.parser import json_objects, structures
+from . import output
+from . import visitors
+from .testutil import TempFileMixin
+
+class VisitorTestCaseMixin(TempFileMixin):
+
+ def setUp(self):
+ self.setup_tempfile()
+ self.output_mngr = output.OutputManager(outfile=self.tempfile)
+ self.visitor = self.visitor_cls(self.output_mngr)
+
+ def tearDown(self):
+ self.teardown_tempfile()
+
+ def read_result(self):
+ self.output_mngr.end()
+ text = self.read_tempfile()
+ return text, json_objects.decode_json(text)
+
+ def to_hell_and_back_full_result(self, instance, **kwargs):
+ """Convenience for test cases where you only need to encode and decode
+ one instance. Returns (json_text, decoded_object)
+ """
+ with self.output_mngr.start_snapshot():
+ self.visitor.traverse(instance, **kwargs)
+ return self.read_result()
+
+ def to_hell_and_back(self, instance, **kwargs):
+ """Visit the object, print it out, decode it, and return the resulting object"""
+ _, snapshots = self.to_hell_and_back_full_result(instance, **kwargs)
+ return snapshots[-1].obj_table.getuid(self.visitor.uid(instance))
+
+ def test_metadata(self):
+ """Make sure metadata makes it through the process the way it should"""
+ def mk_metadata():
+ return {"I": {"AM": ["metadataaaaaaaaaaa", 1]},
+ "the number eight": 8, "note": "keys must be strings"}
+ self.assertIsNot(mk_metadata(), mk_metadata(),
+ msg="""This test doesn't work. We want different
+ instances of identical dictionaries, or else the test
+ can be passed by calling `metadata.clear()`.""")
+ result = self.to_hell_and_back(self.sample_instance(),
+ metadata=mk_metadata())
+ self.assertEqual(mk_metadata(), result.metadata)
+
+ def test_varnames(self):
+ """Ensure the correct object has the correct variable name"""
+ inst1 = self.sample_instance()
+ inst2 = self.sample_instance()
+ with self.output_mngr.start_snapshot():
+ self.visitor.traverse(inst1, var="inst1")
+ self.visitor.traverse(inst2, var="inst2")
+ _, [snapshot] = self.read_result()
+ for inst, name in [(inst1, "inst1"), (inst2, "inst2")]:
+ self.assertEqual(snapshot.names[name],
+ snapshot.obj_table.getuid(self.visitor.uid(inst)))
+
+ def sample_instance(self):
+ """Should return an object suitable for `self.visitor` to traverse.
+
+ Successive calls should return distinct objects.
+ """
+ raise NotImplementedError("Implement in each subclass. See docstring")
+
+
+class WidgetVisitorTestCase(VisitorTestCaseMixin, unittest.TestCase):
+ visitor_cls = visitors.WidgetVisitor
+
+ def test_widget_export_and_import(self):
+ with self.output_mngr.start_snapshot():
+ self.visitor.traverse("Some string", var="first")
+ self.visitor.traverse(7, var="second", metadata={"hello": "world"})
+ _, snapshots = self.read_result()
+ first = snapshots[0].names["first"]
+ self.assertIsInstance(first, structures.Widget)
+ snd = snapshots[0].names["second"]
+ self.assertIsInstance(snd, structures.Widget)
+ self.assertEqual(snd.metadata, {"hello": "world"})
+
+ def sample_instance(self):
+ return object()
+
+class ArrayVisitorTestCase(VisitorTestCaseMixin, unittest.TestCase):
+ visitor_cls = visitors.ArrayVisitor
+
+ def sample_instance(self):
+ return [1, 2, 3]
+
+ def test_array_export_and_import(self):
+ arr = self.to_hell_and_back([1, 2, 3])
+ self.assertIsInstance(arr, structures.Array)
+ self.assertEqual(list(arr), [1, 2, 3])
diff --git a/algviz/interface/test_weird_visitors.py b/algviz/interface/test_weird_visitors.py
new file mode 100644
index 0000000..11e9f57
--- /dev/null
+++ b/algviz/interface/test_weird_visitors.py
@@ -0,0 +1,28 @@
+import unittest
+import tempfile
+
+from algviz.parser import json_objects
+from . import weird_visitors
+from . import output
+
+from .test_visitors import VisitorTestCaseMixin
+
+class BitmapVisitorTestCase(VisitorTestCaseMixin, unittest.TestCase):
+ visitor_cls = weird_visitors.BitmapArrayVisitor
+ def setUp(self):
+ super().setUp()
+ self._next_sample_bool = True
+
+ def test_bitmap_visit(self):
+ with self.output_mngr.start_snapshot():
+ self.visitor.traverse(123, var="mybits") # traverse 123 == 0b1111011 as a bitmap
+ _, snapshots = self.read_result()
+ array = snapshots[0].names["mybits"]
+ self.assertEqual(list(array), [1, 1, 0, 1, 1, 1, 1])
+
+ def sample_instance(self):
+ # This is a hack to make the number returned always be the same
+ # but the UID of two consecutive instances be different.
+ # (True == 1 but id(True) != id(1))
+ self._next_sample_bool ^= True
+ return True if self._next_sample_bool else 1
diff --git a/algviz/interface/testutil.py b/algviz/interface/testutil.py
new file mode 100644
index 0000000..b024db2
--- /dev/null
+++ b/algviz/interface/testutil.py
@@ -0,0 +1,64 @@
+import contextlib
+import tempfile
+import unittest
+from unittest import mock
+
+class TempFileMixin(object):
+ def setup_tempfile(self):
+ self.tempfile = tempfile.TemporaryFile("r+")
+
+ def read_tempfile(self):
+ self.tempfile.seek(0)
+ return self.tempfile.read()
+
+ def teardown_tempfile(self):
+ self.tempfile.close()
+
+ @contextlib.contextmanager
+ def patch_stdout(self):
+ try:
+ with mock.patch("sys.stdout", new=self.tempfile):
+ yield
+ finally:
+ pass
+
+class TempFileTestMixin(TempFileMixin):
+ """Must precede `unittest.TestCase` in the method resolution order (`mro`).
+
+ (This means it must be listed before `unittest.TestCase` in the subclass
+ definition.)
+ """
+ def setUp(self):
+ self.setup_tempfile()
+ super().setUp()
+
+ def tearDown(self):
+ self.teardown_tempfile()
+ super().tearDown()
+
+class TempFileMixinTestCase(TempFileTestMixin, unittest.TestCase):
+
+ def test_patching_stdout(self):
+
+ with self.patch_stdout():
+ print("I am a potato")
+ self.assertEqual(self.read_tempfile(),
+ "I am a potato\n")
+
+ def test_patching_stdout_error_condition(self):
+ class MySillyException(Exception):
+ pass
+ try:
+ with self.patch_stdout():
+ print("foo")
+ raise MySillyException()
+ except MySillyException:
+ pass
+ # Now make sure stdout is normal again...
+ # This means our test may have to print stuff to stdout
+ print("excuse me")
+ self.assertEqual(self.read_tempfile(),
+ "foo\n")
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/algviz/parser/test_json_objects.py b/algviz/parser/test_json_objects.py
index 04c14fc..1f0eac0 100644
--- a/algviz/parser/test_json_objects.py
+++ b/algviz/parser/test_json_objects.py
@@ -46,6 +46,14 @@ class JSONObjectsTestCase(unittest.TestCase):
structures.Array([1, 2, 3, snapshot.names["my_widget"]],
uid="testuid"))
+ def test_can_handle_missing_outermost_close_bracket(self):
+ """Sometimes it's more trouble than it's worth to print the last
+ closing brace, since that amounts to saying "I'm confident there will
+ be no more printing after this!"
+ """
+ self.assertEqual(json_objects.reads('[[{"T": "widget"}]]'),
+ json_objects.reads('[[{"T": "widget"}]'))
+
class GenericDecodingTestCase(unittest.TestCase):
"""Make a subclass of this to test decoding of a specific type of object.
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_issue_reference",
"has_added_files",
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pygraphviz",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/Agizin/Algorithm-Visualization.git@f0641b860b384bf2760819b4f1bd5548261718c3#egg=algviz
exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pygraphviz @ file:///croot/pygraphviz_1671045577740/work
pytest==8.3.5
tomli==2.2.1
| name: Algorithm-Visualization
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- atk-1.0=2.36.0=ha1a6a79_0
- boost-cpp=1.82.0=hdb19cb5_2
- bzip2=1.0.8=h5eee18b_6
- c-ares=1.19.1=h5eee18b_0
- ca-certificates=2025.2.25=h06a4308_0
- cairo=1.16.0=hb05425b_5
- expat=2.6.4=h6a678d5_0
- font-ttf-dejavu-sans-mono=2.37=hd3eb1b0_0
- font-ttf-inconsolata=2.001=hcb22688_0
- font-ttf-source-code-pro=2.030=hd3eb1b0_0
- font-ttf-ubuntu=0.83=h8b1ccd4_0
- fontconfig=2.14.1=h55d465d_3
- fonts-anaconda=1=h8fa9717_0
- fonts-conda-ecosystem=1=hd3eb1b0_0
- freetype=2.12.1=h4a9f257_0
- fribidi=1.0.10=h7b6447c_0
- gdk-pixbuf=2.42.10=h5eee18b_1
- giflib=5.2.2=h5eee18b_0
- glib=2.78.4=h6a678d5_0
- glib-tools=2.78.4=h6a678d5_0
- gobject-introspection=1.78.1=py39h42194e9_2
- graphite2=1.3.14=h295c915_1
- graphviz=2.50.0=h78213b7_2
- gtk2=2.24.33=h27e1c3a_3
- gts=0.7.6=hb67d8dd_3
- harfbuzz=10.2.0=hf296adc_0
- icu=73.1=h6a678d5_0
- jpeg=9e=h5eee18b_3
- krb5=1.20.1=h143b758_1
- lcms2=2.16=hb9589c4_0
- ld_impl_linux-64=2.40=h12ee557_0
- lerc=4.0.0=h6a678d5_0
- libboost=1.82.0=h109eef0_2
- libcurl=8.12.1=hc9e6f67_0
- libdeflate=1.22=h5eee18b_0
- libedit=3.1.20230828=h5eee18b_0
- libev=4.33=h7f8727e_1
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgd=2.3.3=h6a678d5_3
- libglib=2.78.4=hdc74915_0
- libgomp=11.2.0=h1234567_1
- libiconv=1.16=h5eee18b_3
- libnghttp2=1.57.0=h2d74bed_0
- libpng=1.6.39=h5eee18b_0
- librsvg=2.56.3=hf6914bd_1
- libssh2=1.11.1=h251f7ec_0
- libstdcxx-ng=11.2.0=h1234567_1
- libtiff=4.5.1=hffd6297_1
- libtool=2.4.7=h6a678d5_0
- libuuid=1.41.5=h5eee18b_0
- libwebp=1.2.4=h11a3e52_1
- libwebp-base=1.2.4=h5eee18b_1
- libxcb=1.15=h7f8727e_0
- libxml2=2.13.5=hfdd30dd_0
- lz4-c=1.9.4=h6a678d5_1
- ncurses=6.4=h6a678d5_0
- ninja=1.12.1=h06a4308_0
- ninja-base=1.12.1=hdb19cb5_0
- nspr=4.35=h6a678d5_0
- nss=3.89.1=h6a678d5_0
- openjpeg=2.5.2=he7f1fd0_0
- openssl=3.0.16=h5eee18b_0
- pango=1.50.7=h0fee60c_1
- pcre2=10.42=hebb0a14_1
- pip=25.0=py39h06a4308_0
- pixman=0.40.0=h7f8727e_1
- poppler=24.09.0=hcf11d46_1
- poppler-data=0.4.11=h06a4308_1
- pygraphviz=1.9=py39h5eee18b_1
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- zstd=1.5.6=hc292b87_0
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/Algorithm-Visualization
| [
"algviz/interface/test_high_level.py::HighLevelTestCase::test_functional_show_interface",
"algviz/interface/test_output.py::OutputManagerTestCase::test_basic_usage",
"algviz/interface/test_output.py::OutputManagerTestCase::test_error_for_adding_value_with_no_key_in_mapping",
"algviz/interface/test_output.py::OutputManagerTestCase::test_error_for_defining_same_uid_twice_in_snapshot",
"algviz/interface/test_output.py::OutputManagerTestCase::test_error_for_duplicate_key",
"algviz/interface/test_output.py::OutputManagerTestCase::test_error_for_invalid_key",
"algviz/interface/test_output.py::OutputManagerTestCase::test_error_for_key_value_pair_in_a_list",
"algviz/interface/test_output.py::OutputManagerTestCase::test_error_for_setting_next_key_without_using_prev_key",
"algviz/interface/test_visitors.py::WidgetVisitorTestCase::test_metadata",
"algviz/interface/test_visitors.py::WidgetVisitorTestCase::test_varnames",
"algviz/interface/test_visitors.py::WidgetVisitorTestCase::test_widget_export_and_import",
"algviz/interface/test_visitors.py::ArrayVisitorTestCase::test_array_export_and_import",
"algviz/interface/test_visitors.py::ArrayVisitorTestCase::test_metadata",
"algviz/interface/test_visitors.py::ArrayVisitorTestCase::test_varnames",
"algviz/interface/test_weird_visitors.py::BitmapVisitorTestCase::test_bitmap_visit",
"algviz/interface/test_weird_visitors.py::BitmapVisitorTestCase::test_metadata",
"algviz/interface/test_weird_visitors.py::BitmapVisitorTestCase::test_varnames",
"algviz/interface/testutil.py::TempFileMixinTestCase::test_patching_stdout",
"algviz/interface/testutil.py::TempFileMixinTestCase::test_patching_stdout_error_condition",
"algviz/parser/test_json_objects.py::JSONObjectsTestCase::test_aliases_are_not_already_tokens",
"algviz/parser/test_json_objects.py::JSONObjectsTestCase::test_can_handle_missing_outermost_close_bracket",
"algviz/parser/test_json_objects.py::JSONObjectsTestCase::test_fix_aliases",
"algviz/parser/test_json_objects.py::JSONObjectsTestCase::test_literal_decoding",
"algviz/parser/test_json_objects.py::JSONObjectsTestCase::test_var_key_shows_up_in_namespace",
"algviz/parser/test_json_objects.py::GenericDecodingTestCase::test_equality_depends_on_uid",
"algviz/parser/test_json_objects.py::GenericDecodingTestCase::test_has_proper_metadata",
"algviz/parser/test_json_objects.py::GenericDecodingTestCase::test_has_proper_type",
"algviz/parser/test_json_objects.py::GenericDecodingTestCase::test_has_proper_uid",
"algviz/parser/test_json_objects.py::GenericDecodingTestCase::test_hash_matches_hash_of_placeholder",
"algviz/parser/test_json_objects.py::GenericDecodingTestCase::test_matches_expected_object",
"algviz/parser/test_json_objects.py::GenericDecodingTestCase::test_same_object_method_works_based_on_uid",
"algviz/parser/test_json_objects.py::ArrayDecodingTestCase::test_equality_depends_on_uid",
"algviz/parser/test_json_objects.py::ArrayDecodingTestCase::test_has_proper_metadata",
"algviz/parser/test_json_objects.py::ArrayDecodingTestCase::test_has_proper_type",
"algviz/parser/test_json_objects.py::ArrayDecodingTestCase::test_has_proper_uid",
"algviz/parser/test_json_objects.py::ArrayDecodingTestCase::test_hash_matches_hash_of_placeholder",
"algviz/parser/test_json_objects.py::ArrayDecodingTestCase::test_matches_expected_object",
"algviz/parser/test_json_objects.py::ArrayDecodingTestCase::test_same_object_method_works_based_on_uid",
"algviz/parser/test_json_objects.py::TreeNodeDecodingTestCase::test_equality_depends_on_uid",
"algviz/parser/test_json_objects.py::TreeNodeDecodingTestCase::test_has_proper_metadata",
"algviz/parser/test_json_objects.py::TreeNodeDecodingTestCase::test_has_proper_type",
"algviz/parser/test_json_objects.py::TreeNodeDecodingTestCase::test_has_proper_uid",
"algviz/parser/test_json_objects.py::TreeNodeDecodingTestCase::test_hash_matches_hash_of_placeholder",
"algviz/parser/test_json_objects.py::TreeNodeDecodingTestCase::test_matches_expected_object",
"algviz/parser/test_json_objects.py::TreeNodeDecodingTestCase::test_same_object_method_works_based_on_uid",
"algviz/parser/test_json_objects.py::NullDecodingTestCase::test_equality_depends_on_uid",
"algviz/parser/test_json_objects.py::NullDecodingTestCase::test_has_proper_metadata",
"algviz/parser/test_json_objects.py::NullDecodingTestCase::test_has_proper_type",
"algviz/parser/test_json_objects.py::NullDecodingTestCase::test_has_proper_uid",
"algviz/parser/test_json_objects.py::NullDecodingTestCase::test_hash_matches_hash_of_placeholder",
"algviz/parser/test_json_objects.py::NullDecodingTestCase::test_matches_expected_object",
"algviz/parser/test_json_objects.py::NullDecodingTestCase::test_same_object_method_works_based_on_uid",
"algviz/parser/test_json_objects.py::OtherNullDecodingTestCase::test_equality_depends_on_uid",
"algviz/parser/test_json_objects.py::OtherNullDecodingTestCase::test_has_proper_metadata",
"algviz/parser/test_json_objects.py::OtherNullDecodingTestCase::test_has_proper_type",
"algviz/parser/test_json_objects.py::OtherNullDecodingTestCase::test_has_proper_uid",
"algviz/parser/test_json_objects.py::OtherNullDecodingTestCase::test_hash_matches_hash_of_placeholder",
"algviz/parser/test_json_objects.py::OtherNullDecodingTestCase::test_matches_expected_object",
"algviz/parser/test_json_objects.py::OtherNullDecodingTestCase::test_same_object_method_works_based_on_uid",
"algviz/parser/test_json_objects.py::StringDecodingTestCase::test_equality_depends_on_uid",
"algviz/parser/test_json_objects.py::StringDecodingTestCase::test_has_proper_metadata",
"algviz/parser/test_json_objects.py::StringDecodingTestCase::test_has_proper_type",
"algviz/parser/test_json_objects.py::StringDecodingTestCase::test_has_proper_uid",
"algviz/parser/test_json_objects.py::StringDecodingTestCase::test_hash_matches_hash_of_placeholder",
"algviz/parser/test_json_objects.py::StringDecodingTestCase::test_matches_expected_object",
"algviz/parser/test_json_objects.py::StringDecodingTestCase::test_same_object_method_works_based_on_uid",
"algviz/parser/test_json_objects.py::PointerDecodingTestCase::test_equality_depends_on_uid",
"algviz/parser/test_json_objects.py::PointerDecodingTestCase::test_has_proper_metadata",
"algviz/parser/test_json_objects.py::PointerDecodingTestCase::test_has_proper_type",
"algviz/parser/test_json_objects.py::PointerDecodingTestCase::test_has_proper_uid",
"algviz/parser/test_json_objects.py::PointerDecodingTestCase::test_hash_matches_hash_of_placeholder",
"algviz/parser/test_json_objects.py::PointerDecodingTestCase::test_matches_expected_object",
"algviz/parser/test_json_objects.py::PointerDecodingTestCase::test_same_object_method_works_based_on_uid",
"algviz/parser/test_json_objects.py::GraphDecodingTestCase::test_equality_depends_on_uid",
"algviz/parser/test_json_objects.py::GraphDecodingTestCase::test_has_proper_metadata",
"algviz/parser/test_json_objects.py::GraphDecodingTestCase::test_has_proper_type",
"algviz/parser/test_json_objects.py::GraphDecodingTestCase::test_has_proper_uid",
"algviz/parser/test_json_objects.py::GraphDecodingTestCase::test_hash_matches_hash_of_placeholder",
"algviz/parser/test_json_objects.py::GraphDecodingTestCase::test_matches_expected_object",
"algviz/parser/test_json_objects.py::GraphDecodingTestCase::test_same_object_method_works_based_on_uid",
"algviz/parser/test_json_objects.py::NodeDecodingTestCase::test_equality_depends_on_uid",
"algviz/parser/test_json_objects.py::NodeDecodingTestCase::test_has_proper_metadata",
"algviz/parser/test_json_objects.py::NodeDecodingTestCase::test_has_proper_type",
"algviz/parser/test_json_objects.py::NodeDecodingTestCase::test_has_proper_uid",
"algviz/parser/test_json_objects.py::NodeDecodingTestCase::test_hash_matches_hash_of_placeholder",
"algviz/parser/test_json_objects.py::NodeDecodingTestCase::test_matches_expected_object",
"algviz/parser/test_json_objects.py::NodeDecodingTestCase::test_same_object_method_works_based_on_uid",
"algviz/parser/test_json_objects.py::EdgeDecodingTestCase::test_equality_depends_on_uid",
"algviz/parser/test_json_objects.py::EdgeDecodingTestCase::test_has_proper_metadata",
"algviz/parser/test_json_objects.py::EdgeDecodingTestCase::test_has_proper_type",
"algviz/parser/test_json_objects.py::EdgeDecodingTestCase::test_has_proper_uid",
"algviz/parser/test_json_objects.py::EdgeDecodingTestCase::test_hash_matches_hash_of_placeholder",
"algviz/parser/test_json_objects.py::EdgeDecodingTestCase::test_matches_expected_object",
"algviz/parser/test_json_objects.py::EdgeDecodingTestCase::test_same_object_method_works_based_on_uid"
]
| []
| []
| []
| null | 1,980 | [
"setup.py",
"algviz/interface/visitors.py",
"algviz/interface/weird_visitors.py",
"algviz/interface/output.py",
"algviz/interface/high_level.py",
"algviz/interface/__init__.py",
"algviz/tools/quicksort_tree.py",
"algviz/parser/json_objects.py"
]
| [
"setup.py",
"algviz/interface/visitors.py",
"algviz/interface/weird_visitors.py",
"algviz/interface/output.py",
"algviz/interface/high_level.py",
"algviz/interface/__init__.py",
"algviz/tools/quicksort_tree.py",
"algviz/parser/json_objects.py"
]
|
|
missionpinball__mpf-1062 | 6ba39ef0491365f87da0a9bc28ab541a600bc7f4 | 2017-12-26 19:49:15 | 2c1bb3aa1e25674916bc4e0d17ccb6c3c87bd01b | diff --git a/mpf/_version.py b/mpf/_version.py
index 11d3ee171..d79f94544 100644
--- a/mpf/_version.py
+++ b/mpf/_version.py
@@ -10,7 +10,7 @@ PyPI.
"""
-__version__ = '0.50.0-dev.44'
+__version__ = '0.50.0-dev.43'
'''The full version of MPF.'''
__short_version__ = '0.50'
diff --git a/mpf/core/config_processor.py b/mpf/core/config_processor.py
index 95868ca8b..56b8ad0fc 100755
--- a/mpf/core/config_processor.py
+++ b/mpf/core/config_processor.py
@@ -26,6 +26,9 @@ class ConfigProcessor(object):
if not ConfigValidator.config_spec:
ConfigValidator.load_config_spec()
+ if not config:
+ return dict()
+
for k in config.keys():
try:
if config_type not in ConfigValidator.config_spec[k][
diff --git a/mpf/core/data_manager.py b/mpf/core/data_manager.py
index b06683d07..42f5a4f39 100644
--- a/mpf/core/data_manager.py
+++ b/mpf/core/data_manager.py
@@ -73,6 +73,9 @@ class DataManager(MpfController):
self.debug_log("Didn't find the %s file. No prob. We'll create "
"it when we save.", self.name)
+ if not self.data:
+ self.data = {}
+
def get_data(self, section=None):
"""Return the value of this DataManager's data.
diff --git a/mpf/core/machine.py b/mpf/core/machine.py
index ea4067514..b6e542940 100644
--- a/mpf/core/machine.py
+++ b/mpf/core/machine.py
@@ -698,8 +698,7 @@ class MachineController(LogMixin):
self.info_log("Starting the main run loop.")
try:
init = Util.ensure_future(self.initialise(), loop=self.clock.loop)
- self.clock.loop.run_until_complete(Util.first([init, self.stop_future], cancel_others=False,
- loop=self.clock.loop))
+ self.clock.loop.run_until_complete(Util.first([init, self.stop_future], loop=self.clock.loop))
except RuntimeError:
# do not show a runtime useless runtime error
self.error_log("Failed to initialise MPF")
diff --git a/mpf/file_interfaces/yaml_interface.py b/mpf/file_interfaces/yaml_interface.py
index 4be0ba59c..e68a4b31d 100644
--- a/mpf/file_interfaces/yaml_interface.py
+++ b/mpf/file_interfaces/yaml_interface.py
@@ -275,7 +275,7 @@ class YamlInterface(FileInterface):
@staticmethod
def process(data_string: Iterable[str]) -> dict:
"""Parse yaml from a string."""
- return Util.keys_to_lower(yaml.load(data_string, Loader=MpfLoader))
+ return yaml.load(data_string, Loader=MpfLoader)
def save(self, filename: str, data: dict) -> None: # pragma: no cover
"""Save config to yaml file."""
| String placeholders are lower cased. Support uppercase strings
Fix and test | missionpinball/mpf | diff --git a/mpf/tests/machine_files/config_interface/config/test_config_interface.yaml b/mpf/tests/machine_files/config_interface/config/test_config_interface.yaml
index f5a729241..20966a09a 100644
--- a/mpf/tests/machine_files/config_interface/config/test_config_interface.yaml
+++ b/mpf/tests/machine_files/config_interface/config/test_config_interface.yaml
@@ -26,6 +26,3 @@ test_section:
case_sensitive_1: test
Case_sensitive_2: test
case_sensitive_3: Test
-
-Test_section_1:
- test: test
diff --git a/mpf/tests/machine_files/p_roc/config/config.yaml b/mpf/tests/machine_files/p_roc/config/config.yaml
index 3e718b0f0..c35357873 100644
--- a/mpf/tests/machine_files/p_roc/config/config.yaml
+++ b/mpf/tests/machine_files/p_roc/config/config.yaml
@@ -4,7 +4,7 @@ hardware:
driverboards: pdb
platform: p_roc
-P_ROC:
+p_roc:
dmd_timing_cycles: 1, 2, 3, 4
switches:
diff --git a/mpf/tests/machine_files/shows/config/test_shows.yaml b/mpf/tests/machine_files/shows/config/test_shows.yaml
index f6235c992..c17e8c680 100644
--- a/mpf/tests/machine_files/shows/config/test_shows.yaml
+++ b/mpf/tests/machine_files/shows/config/test_shows.yaml
@@ -171,8 +171,8 @@ show_player:
show_assoc_tokens:
speed: 1
show_tokens:
- line1Num: tag1
- line1Color: red
+ line1num: tag1
+ line1color: red
stop_show_assoc_tokens:
show_assoc_tokens:
action: stop
diff --git a/mpf/tests/test_Config.py b/mpf/tests/test_Config.py
index 97014c7b1..9246105be 100644
--- a/mpf/tests/test_Config.py
+++ b/mpf/tests/test_Config.py
@@ -14,8 +14,6 @@ class TestConfig(MpfTestCase):
self.add_to_config_validator('test_section',
dict(__valid_in__='machine'))
- self.add_to_config_validator('test_section_1',
- dict(__valid_in__='machine'))
super().setUp()
@@ -49,19 +47,16 @@ class TestConfig(MpfTestCase):
self.assertEqual('+5', self.machine.config['test_section']['str_plus5'])
self.assertEqual('+0.5', self.machine.config['test_section']['str_plus0point5'])
- # keys should be all lowercase
+ # keys should keep case
self.assertIn('case_sensitive_1', self.machine.config['test_section'])
- self.assertIn('case_sensitive_2', self.machine.config['test_section'])
+ self.assertIn('Case_sensitive_2', self.machine.config['test_section'])
self.assertIn('case_sensitive_3', self.machine.config['test_section'])
# values should be case sensitive
self.assertEqual(self.machine.config['test_section']['case_sensitive_1'], 'test')
- self.assertEqual(self.machine.config['test_section']['case_sensitive_2'], 'test')
+ self.assertEqual(self.machine.config['test_section']['Case_sensitive_2'], 'test')
self.assertEqual(self.machine.config['test_section']['case_sensitive_3'], 'Test')
- # key should be lowercase even though it's uppercase in the config
- self.assertIn('test_section_1', self.machine.config)
-
def test_config_validator(self):
validation_failure_info = (("key", "entry"), "subkey")
# test config spec syntax error
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 5
} | 0.33 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc",
"pip install -U pip setuptools",
"pip install Cython==0.24.1"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asciimatics==1.14.0
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
Cython==0.24.1
future==1.0.0
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
-e git+https://github.com/missionpinball/mpf.git@6ba39ef0491365f87da0a9bc28ab541a600bc7f4#egg=mpf
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
Pillow==8.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
psutil==7.0.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyfiglet==0.8.post1
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pyserial==3.5
pyserial-asyncio==0.6
pytest==6.2.4
ruamel.base==1.0.0
ruamel.yaml==0.10.23
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing==3.7.4.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
wcwidth==0.2.13
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: mpf
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- asciimatics==1.14.0
- cython==0.24.1
- future==1.0.0
- pillow==8.4.0
- pip==21.3.1
- psutil==7.0.0
- pyfiglet==0.8.post1
- pyserial==3.5
- pyserial-asyncio==0.6
- ruamel-base==1.0.0
- ruamel-yaml==0.10.23
- setuptools==59.6.0
- typing==3.7.4.3
- wcwidth==0.2.13
prefix: /opt/conda/envs/mpf
| [
"mpf/tests/test_Config.py::TestConfig::test_config_file"
]
| []
| [
"mpf/tests/test_Config.py::TestConfig::test_config_merge",
"mpf/tests/test_Config.py::TestConfig::test_config_validator"
]
| []
| MIT License | 1,981 | [
"mpf/file_interfaces/yaml_interface.py",
"mpf/core/config_processor.py",
"mpf/_version.py",
"mpf/core/data_manager.py",
"mpf/core/machine.py"
]
| [
"mpf/file_interfaces/yaml_interface.py",
"mpf/core/config_processor.py",
"mpf/_version.py",
"mpf/core/data_manager.py",
"mpf/core/machine.py"
]
|
|
inhumantsar__python-ec2-reaper-15 | d4b0f08b945f95f550149482486c4301f87f3619 | 2017-12-27 16:55:28 | d4b0f08b945f95f550149482486c4301f87f3619 | diff --git a/ec2_reaper/aws_lambda.py b/ec2_reaper/aws_lambda.py
index e9fcdd2..e90645d 100644
--- a/ec2_reaper/aws_lambda.py
+++ b/ec2_reaper/aws_lambda.py
@@ -25,10 +25,15 @@ TAG_MATCHER = json.loads(TAG_MATCHER) if isinstance(TAG_MATCHER, strclasses) els
SLACK_ENDPOINT = os.environ.get('SLACK_ENDPOINT', None)
DEBUG = os.environ.get('DEBUG', True)
+log.debug('startup: got value for DEBUG: {} ({})'.format(DEBUG, type(DEBUG)))
+if isinstance(DEBUG, str):
+ DEBUG = False if DEBUG.lower() == 'false' else True
+
if DEBUG:
log.setLevel(logging.DEBUG)
logging.getLogger('botocore').setLevel(logging.INFO)
logging.getLogger('boto3').setLevel(logging.INFO)
+ log.debug('startup: debug logging on')
else:
log.setLevel(logging.INFO)
logging.getLogger('botocore').setLevel(logging.WARNING)
| lambda function doesn't honor the debug option properly
passed `DEBUG: false` as an env var but it still does NO-OPs and debug logging. the env var is probably being read by python in as a string. | inhumantsar/python-ec2-reaper | diff --git a/tests/test_lambda_handler.py b/tests/test_lambda_handler.py
index bc153b4..7ad0968 100644
--- a/tests/test_lambda_handler.py
+++ b/tests/test_lambda_handler.py
@@ -2,6 +2,7 @@ import logging
import json
import sys
from datetime import datetime, timedelta
+import os
from ec2_reaper import aws_lambda
from ec2_reaper import LOCAL_TZ
@@ -10,12 +11,19 @@ logging.basicConfig(level=logging.DEBUG)
logging.getLogger('botocore').setLevel(logging.INFO)
logging.getLogger('boto3').setLevel(logging.INFO)
+# mock has some weirdness in python 3.3, 3.5, and 3.6
if sys.version_info < (3, 0) or (sys.version_info >= (3, 5) and
sys.version_info < (3, 6)):
from mock import patch
else:
from unittest.mock import patch
+# py 2.7 has reload built in but it's moved around a bit in py3+
+if sys.version_info >= (3, 0) and sys.version_info < (3, 4):
+ from imp import reload
+elif sys.version_info >= (3, 4):
+ from importlib import reload
+
# when no results, handler should have called reap, *not* called (slack) notify,
# and should have returned a happy response json obj,
@patch.object(aws_lambda, 'reap')
@@ -55,3 +63,27 @@ def test_reap_2neg_1pos(mock_notify, mock_reap):
assert r['statusCode'] == 200
assert r['body']['log'] == mock_reap_results
assert r['body']['reaped'] == 1
+
+# env vars come in as strings, so bools like DEBUG need testing
+def test_debug_envvar():
+ from ec2_reaper import aws_lambda as al
+ # true
+ os.environ['DEBUG'] = 'true'
+ reload(al)
+ assert al.DEBUG == True
+ os.environ['DEBUG'] = 'True'
+ reload(al)
+ assert al.DEBUG == True
+
+ # default to safety
+ os.environ['DEBUG'] = 'mooooooooo'
+ reload(al)
+ assert al.DEBUG == True
+
+ # false
+ os.environ['DEBUG'] = 'False'
+ reload(al)
+ assert al.DEBUG == False
+ os.environ['DEBUG'] = 'false'
+ reload(al)
+ assert al.DEBUG == False
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"tox",
"flake8"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
boto3==1.5.6
botocore==1.8.50
certifi==2021.5.30
charset-normalizer==2.0.12
click==6.7
distlib==0.3.9
docutils==0.18.1
-e git+https://github.com/inhumantsar/python-ec2-reaper.git@d4b0f08b945f95f550149482486c4301f87f3619#egg=ec2_reaper
filelock==3.4.1
flake8==5.0.4
idna==3.10
importlib-metadata==4.2.0
importlib-resources==5.4.0
iniconfig==1.1.1
jmespath==0.10.0
mccabe==0.7.0
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2017.3
requests==2.27.1
s3transfer==0.1.13
six==1.17.0
slacker==0.9.60
toml==0.10.2
tomli==1.2.3
tox==3.28.0
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.16.2
zipp==3.6.0
| name: python-ec2-reaper
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- boto3==1.5.6
- botocore==1.8.50
- charset-normalizer==2.0.12
- click==6.7
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- flake8==5.0.4
- idna==3.10
- importlib-metadata==4.2.0
- importlib-resources==5.4.0
- iniconfig==1.1.1
- jmespath==0.10.0
- mccabe==0.7.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2017.3
- requests==2.27.1
- s3transfer==0.1.13
- six==1.17.0
- slacker==0.9.60
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.16.2
- zipp==3.6.0
prefix: /opt/conda/envs/python-ec2-reaper
| [
"tests/test_lambda_handler.py::test_debug_envvar"
]
| []
| [
"tests/test_lambda_handler.py::test_reap_no_results",
"tests/test_lambda_handler.py::test_reap_2neg_1pos"
]
| []
| BSD License | 1,982 | [
"ec2_reaper/aws_lambda.py"
]
| [
"ec2_reaper/aws_lambda.py"
]
|
|
inhumantsar__python-ec2-reaper-16 | d4b0f08b945f95f550149482486c4301f87f3619 | 2017-12-27 19:35:06 | d4b0f08b945f95f550149482486c4301f87f3619 | diff --git a/ec2_reaper/aws_lambda.py b/ec2_reaper/aws_lambda.py
index e9fcdd2..ff4f30b 100644
--- a/ec2_reaper/aws_lambda.py
+++ b/ec2_reaper/aws_lambda.py
@@ -25,10 +25,15 @@ TAG_MATCHER = json.loads(TAG_MATCHER) if isinstance(TAG_MATCHER, strclasses) els
SLACK_ENDPOINT = os.environ.get('SLACK_ENDPOINT', None)
DEBUG = os.environ.get('DEBUG', True)
+log.debug('startup: got value for DEBUG: {} ({})'.format(DEBUG, type(DEBUG)))
+if isinstance(DEBUG, str):
+ DEBUG = False if DEBUG.lower() == 'false' else True
+
if DEBUG:
log.setLevel(logging.DEBUG)
logging.getLogger('botocore').setLevel(logging.INFO)
logging.getLogger('boto3').setLevel(logging.INFO)
+ log.debug('startup: debug logging on')
else:
log.setLevel(logging.INFO)
logging.getLogger('botocore').setLevel(logging.WARNING)
@@ -42,9 +47,12 @@ class DateTimeJSONEncoder(json.JSONEncoder):
return json.JSONEncoder.default(self, o)
def _respond(body, error=True, headers=None, status_code=500):
- o = {'statusCode': status_code, 'body': body}
+ o = {'statusCode': status_code}
if headers:
o['headers'] = headers
+
+ # just in case body contains untranslatable datetimes
+ o['body'] = json.loads(json.dumps(body, cls=DateTimeJSONEncoder))
return o
def _get_expires(launch_time, min_age=MIN_AGE):
| lambda return val isn't json serializable
datetimes cause lambda's return to fail. everything else still happens but the return code is always an error. | inhumantsar/python-ec2-reaper | diff --git a/tests/test_lambda_handler.py b/tests/test_lambda_handler.py
index bc153b4..298f793 100644
--- a/tests/test_lambda_handler.py
+++ b/tests/test_lambda_handler.py
@@ -2,6 +2,7 @@ import logging
import json
import sys
from datetime import datetime, timedelta
+import os
from ec2_reaper import aws_lambda
from ec2_reaper import LOCAL_TZ
@@ -10,12 +11,19 @@ logging.basicConfig(level=logging.DEBUG)
logging.getLogger('botocore').setLevel(logging.INFO)
logging.getLogger('boto3').setLevel(logging.INFO)
+# mock has some weirdness in python 3.3, 3.5, and 3.6
if sys.version_info < (3, 0) or (sys.version_info >= (3, 5) and
sys.version_info < (3, 6)):
from mock import patch
else:
from unittest.mock import patch
+# py 2.7 has reload built in but it's moved around a bit in py3+
+if sys.version_info >= (3, 0) and sys.version_info < (3, 4):
+ from imp import reload
+elif sys.version_info >= (3, 4):
+ from importlib import reload
+
# when no results, handler should have called reap, *not* called (slack) notify,
# and should have returned a happy response json obj,
@patch.object(aws_lambda, 'reap')
@@ -53,5 +61,29 @@ def test_reap_2neg_1pos(mock_notify, mock_reap):
mock_notify.assert_called()
mock_reap.assert_called_once()
assert r['statusCode'] == 200
- assert r['body']['log'] == mock_reap_results
+ assert r['body']['log'] == json.loads(json.dumps(mock_reap_results, cls=aws_lambda.DateTimeJSONEncoder))
assert r['body']['reaped'] == 1
+
+# env vars come in as strings, so bools like DEBUG need testing
+def test_debug_envvar():
+ from ec2_reaper import aws_lambda as al
+ # true
+ os.environ['DEBUG'] = 'true'
+ reload(al)
+ assert al.DEBUG == True
+ os.environ['DEBUG'] = 'True'
+ reload(al)
+ assert al.DEBUG == True
+
+ # default to safety
+ os.environ['DEBUG'] = 'mooooooooo'
+ reload(al)
+ assert al.DEBUG == True
+
+ # false
+ os.environ['DEBUG'] = 'False'
+ reload(al)
+ assert al.DEBUG == False
+ os.environ['DEBUG'] = 'false'
+ reload(al)
+ assert al.DEBUG == False
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"flake8",
"tox"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
boto3==1.5.6
botocore==1.8.50
certifi==2021.5.30
charset-normalizer==2.0.12
click==6.7
distlib==0.3.9
docutils==0.18.1
-e git+https://github.com/inhumantsar/python-ec2-reaper.git@d4b0f08b945f95f550149482486c4301f87f3619#egg=ec2_reaper
filelock==3.4.1
flake8==5.0.4
idna==3.10
importlib-metadata==4.2.0
importlib-resources==5.4.0
iniconfig==1.1.1
jmespath==0.10.0
mccabe==0.7.0
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2017.3
requests==2.27.1
s3transfer==0.1.13
six==1.17.0
slacker==0.9.60
toml==0.10.2
tomli==1.2.3
tox==3.28.0
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.16.2
zipp==3.6.0
| name: python-ec2-reaper
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- boto3==1.5.6
- botocore==1.8.50
- charset-normalizer==2.0.12
- click==6.7
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- flake8==5.0.4
- idna==3.10
- importlib-metadata==4.2.0
- importlib-resources==5.4.0
- iniconfig==1.1.1
- jmespath==0.10.0
- mccabe==0.7.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2017.3
- requests==2.27.1
- s3transfer==0.1.13
- six==1.17.0
- slacker==0.9.60
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.16.2
- zipp==3.6.0
prefix: /opt/conda/envs/python-ec2-reaper
| [
"tests/test_lambda_handler.py::test_reap_2neg_1pos",
"tests/test_lambda_handler.py::test_debug_envvar"
]
| []
| [
"tests/test_lambda_handler.py::test_reap_no_results"
]
| []
| BSD License | 1,983 | [
"ec2_reaper/aws_lambda.py"
]
| [
"ec2_reaper/aws_lambda.py"
]
|
|
quantling__pyndl-127 | 8a6f338b23c8f15c212dffa83ed729aacebe3fa3 | 2017-12-27 21:06:43 | 8a6f338b23c8f15c212dffa83ed729aacebe3fa3 | diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index ddcc33e..f678cfa 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -14,8 +14,4 @@
* Step 3
### System details
-* Operating System:
-* Python Version:
-<!-- run `python --version` for version details -->
-* Installed python packages
-<!-- run `pip freeze` for a list of installed packages -->
+<!-- run `python -c "import pyndl; pyndl.sysinfo()"` and copy the output -->
diff --git a/pyndl/__init__.py b/pyndl/__init__.py
index b1632c8..97671e8 100644
--- a/pyndl/__init__.py
+++ b/pyndl/__init__.py
@@ -1,7 +1,13 @@
+import os
+import sys
+import multiprocessing as mp
+from pip._vendor import pkg_resources
+
+
__author__ = ('David-Elias Künstle, Lennard Schneider, '
'Konstantin Sering, Marc Weitz')
__author_email__ = '[email protected]'
-__version__ = '0.3.3'
+__version__ = '0.3.4'
__license__ = 'MIT'
__description__ = ('Naive discriminative learning implements learning and '
'classification models based on the Rescorla-Wagner '
@@ -28,6 +34,46 @@ __doc__ = """
:version: %s
:author: %s
:contact: %s
-:date: 2017-04-18
+:date: 2017-12-27
:copyright: %s
""" % (__description__, __version__, __author__, __author_email__, __license__)
+
+
+def sysinfo():
+ """
+ Prints system the dependency information
+ """
+ pyndl = pkg_resources.working_set.by_key["pyndl"]
+ dependencies = [str(r) for r in pyndl.requires()]
+
+ header = ("Pyndl Information\n"
+ "=================\n\n")
+
+ general = ("General Information\n"
+ "-------------------\n"
+ "Python version: {}\n"
+ "Pyndl version: {}\n\n").format(sys.version.split()[0], __version__)
+
+ uname = os.uname()
+ osinfo = ("Operating System\n"
+ "----------------\n"
+ "OS: {s.sysname} {s.machine}\n"
+ "Kernel: {s.release}\n"
+ "CPU: {cpu_count}\n").format(s=uname, cpu_count=mp.cpu_count())
+
+ if uname.sysname == "Linux":
+ names, *lines = os.popen("free -m").readlines()
+ for identifier in ["Mem:", "Swap:"]:
+ memory = [line for line in lines if identifier in line][0]
+ ix, total, used, *rest = memory.split()
+ osinfo += "{} {}MiB/{}MiB\n".format(identifier, used, total)
+
+ osinfo += "\n"
+
+ deps = ("Dependencies\n"
+ "------------\n")
+
+ deps += "\n".join("{pkg.__name__}: {pkg.__version__}".format(pkg=__import__(dep))
+ for dep in dependencies)
+
+ print(header + general + osinfo + deps)
diff --git a/requirements.txt b/requirements.txt
index 0cabe05..a89ce96 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -6,3 +6,4 @@ xarray
netCDF4
numpydoc
docopt
+pip
| get version information via function
We should provide a function to show information about the system, the installed packages, etc. like in [pandas](https://github.com/pandas-dev/pandas/blob/master/pandas/_version.py).
This may especially useful for issue reporting as it is much more precise than `pip freeze` | quantling/pyndl | diff --git a/tests/test_pyndl.py b/tests/test_pyndl.py
new file mode 100644
index 0000000..fc511d2
--- /dev/null
+++ b/tests/test_pyndl.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python3
+
+# pylint: disable=C0111
+
+import sys
+import re
+from io import StringIO
+from contextlib import redirect_stdout
+
+import pyndl
+
+
+def test_sysinfo():
+ out = StringIO()
+ with redirect_stdout(out):
+ pyndl.sysinfo()
+ out = out.getvalue()
+
+ pattern = re.compile("[a-zA-Z0-9_\. ]*\n[\=]*\n+([a-zA-Z0-9_ ]*\n[\-]*\n"
+ "([a-zA-Z0-9_ ]*: [a-zA-Z0-9_\.\-/ ]*\n+)+)+")
+ assert pattern.match(out)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 3
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-runner",
"coveralls",
"sphinx"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
cftime==1.6.0
charset-normalizer==2.0.12
coverage==6.2
coveralls==3.3.1
cycler==0.11.0
Cython==3.0.12
docopt==0.6.2
docutils==0.18.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==3.0.3
kiwisolver==1.3.1
MarkupSafe==2.0.1
matplotlib==3.3.4
netCDF4==1.6.1
numpy==1.19.5
numpydoc==1.1.0
packaging==21.3
pandas==1.1.5
Pillow==8.4.0
pluggy==1.0.0
py==1.11.0
Pygments==2.14.0
-e git+https://github.com/quantling/pyndl.git@8a6f338b23c8f15c212dffa83ed729aacebe3fa3#egg=pyndl
pyparsing==3.1.4
pytest==7.0.1
pytest-runner==5.3.2
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
xarray==0.16.2
zipp==3.6.0
| name: pyndl
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- cftime==1.6.0
- charset-normalizer==2.0.12
- coverage==6.2
- coveralls==3.3.1
- cycler==0.11.0
- cython==3.0.12
- docopt==0.6.2
- docutils==0.18.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==3.0.3
- kiwisolver==1.3.1
- markupsafe==2.0.1
- matplotlib==3.3.4
- netcdf4==1.6.1
- numpy==1.19.5
- numpydoc==1.1.0
- packaging==21.3
- pandas==1.1.5
- pillow==8.4.0
- pluggy==1.0.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-runner==5.3.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- xarray==0.16.2
- zipp==3.6.0
prefix: /opt/conda/envs/pyndl
| [
"tests/test_pyndl.py::test_sysinfo"
]
| []
| []
| []
| MIT License | 1,984 | [
"pyndl/__init__.py",
".github/ISSUE_TEMPLATE.md",
"requirements.txt"
]
| [
"pyndl/__init__.py",
".github/ISSUE_TEMPLATE.md",
"requirements.txt"
]
|
|
JonathonReinhart__scuba-107 | b323aff30dc09cb16bb19eca41a0c2d15d9a22b6 | 2017-12-28 20:58:49 | b323aff30dc09cb16bb19eca41a0c2d15d9a22b6 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7943d52..346ce8c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,6 +3,10 @@ All notable changes to this project will be documented in this file.
This project adheres to [Semantic Versioning](http://semver.org/).
## [Unreleased]
+### Changed
+- Allow `script` to be a single string value in the "common script schema"
+ which applies to hooks and aliases (#102)
+
### Fixed
- Display nicer error message if no command is given and image doesn't specify a `Cmd` (#104)
- Don't mangle && in scripts (#100)
diff --git a/doc/yaml-reference.md b/doc/yaml-reference.md
index 6adece5..c8021ed 100644
--- a/doc/yaml-reference.md
+++ b/doc/yaml-reference.md
@@ -54,7 +54,7 @@ hooks:
## Common script schema
Several parts of `.scuba.yml` which define "scripts" use a common schema.
-The *common script schema* can define a "script" in one of two forms:
+The *common script schema* can define a "script" in one of several forms:
The *simple* form is simply a single string value:
```yaml
@@ -63,7 +63,14 @@ hooks:
```
The *complex* form is a mapping, which must contain a `script` subkey, whose
-value is a list of strings making up the script:
+value is either single string value
+```yaml
+hooks:
+ root:
+ script: echo hello
+```
+
+... or a list of strings making up the script:
```yaml
hooks:
root:
diff --git a/example/per_alias_image/.scuba.yml b/example/per_alias_image/.scuba.yml
index 9b9bae0..df73881 100644
--- a/example/per_alias_image/.scuba.yml
+++ b/example/per_alias_image/.scuba.yml
@@ -2,11 +2,12 @@ image: !from_yaml ../common.yml image
aliases:
# This one inherits the default, top-level 'image'
+ # and specifies "script" as a string
default:
- script:
- - cat /etc/os-release
+ script: cat /etc/os-release
# This one specifies a different image to use
+ # and specifies "script" as a list
different:
image: alpine
script:
diff --git a/scuba/config.py b/scuba/config.py
index d1d33eb..b8e0417 100644
--- a/scuba/config.py
+++ b/scuba/config.py
@@ -118,10 +118,13 @@ def _process_script_node(node, name):
if not script:
raise ConfigError("{0}: must have a 'script' subkey".format(name))
- if not isinstance(script, list):
- raise ConfigError("{0}.script: must be a list".format(name))
+ if isinstance(script, list):
+ return script
- return script
+ if isinstance(script, basestring):
+ return [script]
+
+ raise ConfigError("{0}.script: must be a string or list".format(name))
raise ConfigError("{0}: must be string or dict".format(name))
| Allow `script` to be a single string value
Currently, this is invalid:
```yaml
aliases:
foo:
image: xyz
script: echo Hello
``` | JonathonReinhart/scuba | diff --git a/tests/test_config.py b/tests/test_config.py
index 8e4d7c9..118379e 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -8,26 +8,52 @@ from unittest import TestCase
import logging
import os
from os.path import join
-from tempfile import mkdtemp
from shutil import rmtree
from scuba.utils import shlex_split
import scuba.config
-class TestConfig(TestCase):
- def setUp(self):
- self.orig_path = os.getcwd()
- self.path = mkdtemp('scubatest')
- os.chdir(self.path)
- logging.info('Temp path: ' + self.path)
-
- def tearDown(self):
- rmtree(self.path)
- self.path = None
-
- os.chdir(self.orig_path)
- self.orig_path = None
+class TestCommonScriptSchema(TmpDirTestCase):
+ def test_simple(self):
+ '''Simple form: value is a string'''
+ node = 'foo'
+ result = scuba.config._process_script_node(node, 'dontcare')
+ assert_equals(result, ['foo'])
+
+ def test_script_key_string(self):
+ '''Value is a mapping: script is a string'''
+ node = dict(
+ script = 'foo',
+ otherkey = 'other',
+ )
+ result = scuba.config._process_script_node(node, 'dontcare')
+ assert_equals(result, ['foo'])
+
+ def test_script_key_list(self):
+ '''Value is a mapping: script is a list'''
+ node = dict(
+ script = [
+ 'foo',
+ 'bar',
+ ],
+ otherkey = 'other',
+ )
+ result = scuba.config._process_script_node(node, 'dontcare')
+ assert_equals(result, ['foo', 'bar'])
+
+ def test_script_key_mapping_invalid(self):
+ '''Value is a mapping: script is a mapping (invalid)'''
+ node = dict(
+ script = dict(
+ whatisthis = 'idontknow',
+ ),
+ )
+ assert_raises(scuba.config.ConfigError,
+ scuba.config._process_script_node, node, 'dontcare')
+
+
+class TestConfig(TmpDirTestCase):
######################################################################
# Find config
@@ -335,19 +361,6 @@ class TestConfig(TestCase):
assert_raises(scuba.config.ConfigError, scuba.config.load_config, '.scuba.yml')
- # TODO: Any reason this shouldn't be valid?
- def test_hooks_invalid_script_type(self):
- '''hooks with string "script" are invalid'''
- with open('.scuba.yml', 'w') as f:
- f.write('''
- image: na
- hooks:
- user:
- script: this should be in a list under script
- ''')
-
- assert_raises(scuba.config.ConfigError, scuba.config.load_config, '.scuba.yml')
-
def test_hooks_missing_script(self):
'''hooks with dict, but missing "script" are invalid'''
with open('.scuba.yml', 'w') as f:
diff --git a/tests/test_main.py b/tests/test_main.py
index 00f8d4b..588d0e9 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -11,8 +11,7 @@ except ImportError:
import logging
import os
import sys
-from tempfile import mkdtemp, TemporaryFile, NamedTemporaryFile
-from shutil import rmtree
+from tempfile import TemporaryFile, NamedTemporaryFile
import subprocess
import scuba.__main__ as main
@@ -22,22 +21,7 @@ import scuba
DOCKER_IMAGE = 'debian:8.2'
-class TestMain(TestCase):
- def setUp(self):
- # Run each test in its own temp directory
- self.orig_path = os.getcwd()
- self.path = mkdtemp('scubatest')
- os.chdir(self.path)
- logging.info('Temp path: ' + self.path)
-
-
- def tearDown(self):
- # Restore the working dir and cleanup the temp one
- rmtree(self.path)
- self.path = None
- os.chdir(self.orig_path)
- self.orig_path = None
-
+class TestMain(TmpDirTestCase):
def run_scuba(self, args, exp_retval=0, mock_isatty=False, stdin=None):
'''Run scuba, checking its return value
diff --git a/tests/utils.py b/tests/utils.py
index 1dfaa74..1444b3d 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -4,6 +4,8 @@ from nose.tools import *
from os.path import normpath
import tempfile
import shutil
+import unittest
+import logging
def assert_set_equal(a, b):
assert_equal(set(a), set(b))
@@ -100,3 +102,20 @@ class RedirStd(object):
if self.orig_stderr:
sys.stderr = self.orig_stderr
+
+
+class TmpDirTestCase(unittest.TestCase):
+ def setUp(self):
+ # Run each test in its own temp directory
+ self.orig_path = os.getcwd()
+ self.path = tempfile.mkdtemp('scubatest')
+ os.chdir(self.path)
+ logging.info('Temp path: ' + self.path)
+
+
+ def tearDown(self):
+ # Restore the working dir and cleanup the temp one
+ shutil.rmtree(self.path)
+ self.path = None
+ os.chdir(self.orig_path)
+ self.orig_path = None
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 4
} | 2.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc g++ make"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==3.7.1
exceptiongroup==1.2.2
iniconfig==2.1.0
nose==1.3.7
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
PyYAML==6.0.2
-e git+https://github.com/JonathonReinhart/scuba.git@b323aff30dc09cb16bb19eca41a0c2d15d9a22b6#egg=scuba
tomli==2.2.1
| name: scuba
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==3.7.1
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pyyaml==6.0.2
- tomli==2.2.1
prefix: /opt/conda/envs/scuba
| [
"tests/test_config.py::TestCommonScriptSchema::test_script_key_string"
]
| [
"tests/test_main.py::TestMain::test_arbitrary_docker_args",
"tests/test_main.py::TestMain::test_args",
"tests/test_main.py::TestMain::test_basic",
"tests/test_main.py::TestMain::test_complex_commands_in_alias",
"tests/test_main.py::TestMain::test_created_file_ownership",
"tests/test_main.py::TestMain::test_dry_run",
"tests/test_main.py::TestMain::test_home_writable_root",
"tests/test_main.py::TestMain::test_home_writable_scubauser",
"tests/test_main.py::TestMain::test_image_entrypoint",
"tests/test_main.py::TestMain::test_image_override",
"tests/test_main.py::TestMain::test_image_override_with_alias",
"tests/test_main.py::TestMain::test_list_aliases",
"tests/test_main.py::TestMain::test_no_cmd",
"tests/test_main.py::TestMain::test_no_docker",
"tests/test_main.py::TestMain::test_redirect_stdin",
"tests/test_main.py::TestMain::test_root_hook",
"tests/test_main.py::TestMain::test_user_hook",
"tests/test_main.py::TestMain::test_user_root",
"tests/test_main.py::TestMain::test_user_scubauser",
"tests/test_main.py::TestMain::test_with_tty",
"tests/test_main.py::TestMain::test_without_tty",
"tests/test_main.py::TestMain::test_yml_not_needed_with_image_override"
]
| [
"tests/test_config.py::TestCommonScriptSchema::test_script_key_list",
"tests/test_config.py::TestCommonScriptSchema::test_script_key_mapping_invalid",
"tests/test_config.py::TestCommonScriptSchema::test_simple",
"tests/test_config.py::TestConfig::test_find_config_cur_dir",
"tests/test_config.py::TestConfig::test_find_config_nonexist",
"tests/test_config.py::TestConfig::test_find_config_parent_dir",
"tests/test_config.py::TestConfig::test_find_config_way_up",
"tests/test_config.py::TestConfig::test_hooks_invalid_list",
"tests/test_config.py::TestConfig::test_hooks_missing_script",
"tests/test_config.py::TestConfig::test_hooks_mixed",
"tests/test_config.py::TestConfig::test_load_config__no_spaces_in_aliases",
"tests/test_config.py::TestConfig::test_load_config_empty",
"tests/test_config.py::TestConfig::test_load_config_image_from_yaml",
"tests/test_config.py::TestConfig::test_load_config_image_from_yaml_missing_arg",
"tests/test_config.py::TestConfig::test_load_config_image_from_yaml_missing_file",
"tests/test_config.py::TestConfig::test_load_config_image_from_yaml_nested_key_missing",
"tests/test_config.py::TestConfig::test_load_config_image_from_yaml_nested_keys",
"tests/test_config.py::TestConfig::test_load_config_image_from_yaml_unicode_args",
"tests/test_config.py::TestConfig::test_load_config_minimal",
"tests/test_config.py::TestConfig::test_load_config_with_aliases",
"tests/test_config.py::TestConfig::test_load_unexpected_node",
"tests/test_config.py::TestConfig::test_process_command_alias_overrides_image",
"tests/test_config.py::TestConfig::test_process_command_aliases_unused",
"tests/test_config.py::TestConfig::test_process_command_aliases_used_noargs",
"tests/test_config.py::TestConfig::test_process_command_aliases_used_withargs",
"tests/test_config.py::TestConfig::test_process_command_empty",
"tests/test_config.py::TestConfig::test_process_command_multiline_aliases_forbid_user_args",
"tests/test_config.py::TestConfig::test_process_command_multiline_aliases_used",
"tests/test_config.py::TestConfig::test_process_command_no_aliases",
"tests/test_main.py::TestMain::test_config_error",
"tests/test_main.py::TestMain::test_handle_get_image_command_error",
"tests/test_main.py::TestMain::test_multiline_alias_no_args_error",
"tests/test_main.py::TestMain::test_no_image_cmd",
"tests/test_main.py::TestMain::test_version"
]
| []
| MIT License | 1,986 | [
"example/per_alias_image/.scuba.yml",
"doc/yaml-reference.md",
"scuba/config.py",
"CHANGELOG.md"
]
| [
"example/per_alias_image/.scuba.yml",
"doc/yaml-reference.md",
"scuba/config.py",
"CHANGELOG.md"
]
|
|
nipy__nipype-2349 | 045b28ef9056fac1107bc4f0707859d043f3bfd1 | 2017-12-29 08:57:36 | 1c174dfcda622fe6cebd71069dadc8cacc87dd6f | diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py
index fc3617036..6bb9a943f 100644
--- a/nipype/interfaces/io.py
+++ b/nipype/interfaces/io.py
@@ -1555,16 +1555,12 @@ class DataFinder(IOBase):
class FSSourceInputSpec(BaseInterfaceInputSpec):
- subjects_dir = Directory(
- mandatory=True, desc='Freesurfer subjects directory.')
- subject_id = Str(
- mandatory=True, desc='Subject name for whom to retrieve data')
- hemi = traits.Enum(
- 'both',
- 'lh',
- 'rh',
- usedefault=True,
- desc='Selects hemisphere specific outputs')
+ subjects_dir = Directory(exists=True, mandatory=True,
+ desc='Freesurfer subjects directory.')
+ subject_id = Str(mandatory=True,
+ desc='Subject name for whom to retrieve data')
+ hemi = traits.Enum('both', 'lh', 'rh', usedefault=True,
+ desc='Selects hemisphere specific outputs')
class FSSourceOutputSpec(TraitedSpec):
| FreeSurferSource doesn't check subjects_dir
### Summary
`FreeSurferSource` doesn't check `subjects_dir` is an existing path.
### How to replicate the behavior
```
from nipype.interfaces.io import FreeSurferSource
fs = FreeSurferSource()
fs.inputs.subjects_dir = 'path/to/no/existing/directory'
fs.inputs.subject_id = 'sub-01'
res = fs.run()
```
### Actual behavior
Doesn't give an error.
### Expected behavior
Should check if directory exists (and possibly if there are files for `subject_id` ?).
| nipy/nipype | diff --git a/nipype/interfaces/tests/test_io.py b/nipype/interfaces/tests/test_io.py
index a2103eadf..76fc9e257 100644
--- a/nipype/interfaces/tests/test_io.py
+++ b/nipype/interfaces/tests/test_io.py
@@ -16,7 +16,7 @@ from collections import namedtuple
import pytest
import nipype
import nipype.interfaces.io as nio
-from nipype.interfaces.base import Undefined
+from nipype.interfaces.base import Undefined, TraitError
# Check for boto
noboto = False
@@ -498,6 +498,12 @@ def test_freesurfersource():
assert fss.inputs.subjects_dir == Undefined
+def test_freesurfersource_incorrectdir():
+ fss = nio.FreeSurferSource()
+ with pytest.raises(TraitError) as err:
+ fss.inputs.subjects_dir = 'path/to/no/existing/directory'
+
+
def test_jsonsink_input():
ds = nio.JSONFileSink()
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.14 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
click==8.0.4
configparser==5.2.0
decorator==4.4.2
funcsigs==1.0.2
future==1.0.0
importlib-metadata==4.8.3
iniconfig==1.1.1
isodate==0.6.1
lxml==5.3.1
mock==5.2.0
networkx==2.5.1
nibabel==3.2.2
-e git+https://github.com/nipy/nipype.git@045b28ef9056fac1107bc4f0707859d043f3bfd1#egg=nipype
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
prov==1.5.0
py==1.11.0
pydot==1.4.2
pydotplus==2.0.2
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
rdflib==5.0.0
scipy==1.5.4
simplejson==3.20.1
six==1.17.0
tomli==1.2.3
traits==6.4.1
typing_extensions==4.1.1
zipp==3.6.0
| name: nipype
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- click==8.0.4
- configparser==5.2.0
- decorator==4.4.2
- funcsigs==1.0.2
- future==1.0.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isodate==0.6.1
- lxml==5.3.1
- mock==5.2.0
- networkx==2.5.1
- nibabel==3.2.2
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- prov==1.5.0
- py==1.11.0
- pydot==1.4.2
- pydotplus==2.0.2
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- rdflib==5.0.0
- scipy==1.5.4
- simplejson==3.20.1
- six==1.17.0
- tomli==1.2.3
- traits==6.4.1
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/nipype
| [
"nipype/interfaces/tests/test_io.py::test_freesurfersource_incorrectdir"
]
| []
| [
"nipype/interfaces/tests/test_io.py::test_datagrabber",
"nipype/interfaces/tests/test_io.py::test_selectfiles[SF_args0-inputs_att0-expected0]",
"nipype/interfaces/tests/test_io.py::test_selectfiles[SF_args1-inputs_att1-expected1]",
"nipype/interfaces/tests/test_io.py::test_selectfiles[SF_args2-inputs_att2-expected2]",
"nipype/interfaces/tests/test_io.py::test_selectfiles[SF_args3-inputs_att3-expected3]",
"nipype/interfaces/tests/test_io.py::test_selectfiles[SF_args4-inputs_att4-expected4]",
"nipype/interfaces/tests/test_io.py::test_selectfiles_valueerror",
"nipype/interfaces/tests/test_io.py::test_datagrabber_order",
"nipype/interfaces/tests/test_io.py::test_datasink",
"nipype/interfaces/tests/test_io.py::test_datasink_localcopy",
"nipype/interfaces/tests/test_io.py::test_datasink_substitutions",
"nipype/interfaces/tests/test_io.py::test_datasink_copydir_1",
"nipype/interfaces/tests/test_io.py::test_datasink_copydir_2",
"nipype/interfaces/tests/test_io.py::test_datafinder_depth",
"nipype/interfaces/tests/test_io.py::test_datafinder_unpack",
"nipype/interfaces/tests/test_io.py::test_freesurfersource",
"nipype/interfaces/tests/test_io.py::test_jsonsink_input",
"nipype/interfaces/tests/test_io.py::test_jsonsink[inputs_attributes0]",
"nipype/interfaces/tests/test_io.py::test_jsonsink[inputs_attributes1]"
]
| []
| Apache License 2.0 | 1,989 | [
"nipype/interfaces/io.py"
]
| [
"nipype/interfaces/io.py"
]
|
|
ethereum__pyrlp-46 | 691d1e107dd920882300372dc3d20151942b85af | 2017-12-29 15:02:18 | 691d1e107dd920882300372dc3d20151942b85af | diff --git a/.travis.yml b/.travis.yml
index b6580be..e936b90 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -2,12 +2,20 @@ language: python
python:
- "3.5"
sudo: false
-env:
- - TOXENV=py27
- - TOXENV=py34
- - TOXENV=py35
- - TOXENV=pypy
- #- TOXENV=pypy3
+matrix:
+ include:
+ - python: "2.7"
+ env: TOXENV=py27
+ - python: "3.4"
+ env: TOXENV=py34
+ - python: "3.5"
+ env: TOXENV=py35
+ - python: "3.6"
+ env: TOXENV=py36
+ - python: "pypy"
+ env: TOXENV=pypy
+ - python: "pypy3"
+ env: TOXENV=pypy3
install:
- pip install tox
diff --git a/rlp/sedes/lists.py b/rlp/sedes/lists.py
index ee7b3b4..434ecb0 100644
--- a/rlp/sedes/lists.py
+++ b/rlp/sedes/lists.py
@@ -157,7 +157,7 @@ class Serializable(object):
"""
fields = tuple()
- _sedes = None
+ _cached_sedes = {}
_mutable = True
_cached_rlp = None
@@ -222,9 +222,9 @@ class Serializable(object):
@classmethod
def get_sedes(cls):
- if not cls._sedes:
- cls._sedes = List(sedes for _, sedes in cls.fields)
- return cls._sedes
+ if cls not in cls._cached_sedes:
+ cls._cached_sedes[cls] = List(sedes for _, sedes in cls.fields)
+ return cls._cached_sedes[cls]
@classmethod
def serialize(cls, obj):
diff --git a/tox.ini b/tox.ini
index 23f937a..71f7ae6 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,8 +1,11 @@
[tox]
-envlist = py{27,34,35,py,py3}
+envlist = py{27,34,35,36,py,py3}
[testenv]
-passenv = TRAVIS TRAVIS_JOB_ID TRAVIS_BRANCH
+passenv =
+ TRAVIS
+ TRAVIS_JOB_ID
+ TRAVIS_BRANCH
setenv =
PYTHONPATH = {toxinidir}:{toxinidir}/pyrlp
commands =
@@ -17,5 +20,6 @@ basepython =
py27: python2.7
py34: python3.4
py35: python3.5
+ py36: python3.6
pypy: pypy
pypy3: pypy3
| Serializable.get_sedes() breaks inheritance when child defines different fields
That method [caches the sedes in the _sedes class variable](https://github.com/ethereum/pyrlp/blob/develop/rlp/sedes/lists.py#L226), so if you call get_sedes() on a parent class, any childs of that class on which get_sedes() has not been called yet will end up with the parent's sedes no matter what is defined in their fields attribute.
```
class CustomSerializable(rlp.Serializable):
pass
class A(rlp.Serializable):
fields = [('foo', rlp.Serializable)]
class B(A):
fields = [('foo', CustomSerializable)]
print(A.get_sedes())
print(B.get_sedes())
[<class 'rlp.sedes.lists.Serializable'>]
[<class 'rlp.sedes.lists.Serializable'>]
``` | ethereum/pyrlp | diff --git a/tests/test_serializable.py b/tests/test_serializable.py
index 92394c1..371c8b3 100644
--- a/tests/test_serializable.py
+++ b/tests/test_serializable.py
@@ -192,3 +192,59 @@ def test_make_mutable():
assert test2.is_mutable()
assert test1a.is_mutable()
assert test1b.is_mutable()
+
+
+def test_inheritance():
+ class Parent(Serializable):
+ fields = (
+ ('field1', big_endian_int),
+ ('field2', big_endian_int),
+ )
+ class Child1(Parent):
+ fields = (
+ ('field1', big_endian_int),
+ ('field2', big_endian_int),
+ ('field3', big_endian_int),
+ )
+ class Child2(Parent):
+ fields = (
+ ('field1', big_endian_int),
+ )
+ class Child3(Parent):
+ fields = (
+ ('new_field1', big_endian_int),
+ ('field2', big_endian_int),
+ )
+ class Child4(Parent):
+ fields = (
+ ('field1', binary),
+ ('field2', binary),
+ ('field3', big_endian_int),
+ )
+
+ p = Parent(1, 2)
+ c1 = Child1(1, 2, 3)
+ c2 = Child2(1)
+ c3 = Child3(1, 2)
+ c4 = Child4(b'a', b'b', 5)
+
+ assert Parent.serialize(p) == [b'\x01', b'\x02']
+ assert Child1.serialize(c1) == [b'\x01', b'\x02', b'\x03']
+ assert Child2.serialize(c2) == [b'\x01']
+ assert Child3.serialize(c3) == [b'\x01', b'\x02']
+ assert Child4.serialize(c4) == [b'a', b'b', b'\x05']
+
+ with pytest.raises(AttributeError):
+ p.field3
+ with pytest.raises(AttributeError):
+ p.new_field1
+
+ with pytest.raises(AttributeError):
+ c2.field2
+ with pytest.raises(AttributeError):
+ c2.new_field1
+
+ with pytest.raises(AttributeError):
+ c3.field1
+ with pytest.raises(AttributeError):
+ c3.field3
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 3
} | 0.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
-e git+https://github.com/ethereum/pyrlp.git@691d1e107dd920882300372dc3d20151942b85af#egg=rlp
tomli==2.2.1
| name: pyrlp
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
- wheel==0.23.0
prefix: /opt/conda/envs/pyrlp
| [
"tests/test_serializable.py::test_inheritance"
]
| []
| [
"tests/test_serializable.py::test_serializable",
"tests/test_serializable.py::test_make_immutable",
"tests/test_serializable.py::test_make_mutable"
]
| []
| MIT License | 1,990 | [
".travis.yml",
"rlp/sedes/lists.py",
"tox.ini"
]
| [
".travis.yml",
"rlp/sedes/lists.py",
"tox.ini"
]
|
|
beetbox__beets-2775 | 721fedd7b9050ff0b6f17c33bdf00ea638484a03 | 2017-12-30 20:15:22 | 0f9ffeec3eb95f2612e78bee6380af984f639b78 | nguillaumin: This is the first time I do something in Python, so let me know if I did anything wrong, thanks! | diff --git a/beets/autotag/mb.py b/beets/autotag/mb.py
index 5c484d2b3..b5ed4f358 100644
--- a/beets/autotag/mb.py
+++ b/beets/autotag/mb.py
@@ -36,6 +36,9 @@ if util.SNI_SUPPORTED:
else:
BASE_URL = 'http://musicbrainz.org/'
+NON_AUDIO_FORMATS = ['Data CD', 'DVD', 'DVD-Video', 'Blu-ray', 'HD-DVD', 'VCD',
+ 'SVCD', 'UMD', 'VHS']
+
musicbrainzngs.set_useragent('beets', beets.__version__,
'http://beets.io/')
@@ -275,6 +278,9 @@ def album_info(release):
disctitle = medium.get('title')
format = medium.get('format')
+ if format in NON_AUDIO_FORMATS:
+ continue
+
all_tracks = medium['track-list']
if 'pregap' in medium:
all_tracks.insert(0, medium['pregap'])
diff --git a/docs/changelog.rst b/docs/changelog.rst
index bd084a890..2a7349d65 100644
--- a/docs/changelog.rst
+++ b/docs/changelog.rst
@@ -6,6 +6,9 @@ Changelog
Changelog goes here!
+Fixes:
+* Non-audio media (DVD-Video, etc.) are now skipped by the autotagger. :bug:`2688`
+
1.4.6 (December 21, 2017)
-------------------------
| Don't try to match non-audio media fetched from MusicBrainz
Matching a folder containing all audio files of a MB release made of CD+DVD for instance fails since beets does take into account also the video files.
For instance (1 CD+ 1DVD):
https://musicbrainz.org/release/2775743e-2d9c-4b7f-9baa-1d24dc44c769
https://musicbrainz.org/release/f457c3e1-0d98-4904-9d9e-a085bc5b232b
I imagine the same happens for enhanced CDs with some additional video track.
In general I think the right thing to do is that all media files that are marked as video in MB should not be considered for matching. After all beets currently only deals with audio files. | beetbox/beets | diff --git a/test/test_mb.py b/test/test_mb.py
index ca1bf2a1a..9c51d0fe3 100644
--- a/test/test_mb.py
+++ b/test/test_mb.py
@@ -27,7 +27,7 @@ import mock
class MBAlbumInfoTest(_common.TestCase):
def _make_release(self, date_str='2009', tracks=None, track_length=None,
- track_artist=False):
+ track_artist=False, medium_format='FORMAT'):
release = {
'title': 'ALBUM TITLE',
'id': 'ALBUM ID',
@@ -90,7 +90,7 @@ class MBAlbumInfoTest(_common.TestCase):
release['medium-list'].append({
'position': '1',
'track-list': track_list,
- 'format': 'FORMAT',
+ 'format': medium_format,
'title': 'MEDIUM TITLE',
})
return release
@@ -324,6 +324,27 @@ class MBAlbumInfoTest(_common.TestCase):
d = mb.album_info(release)
self.assertEqual(d.data_source, 'MusicBrainz')
+ def test_skip_non_audio_dvd(self):
+ tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0),
+ self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)]
+ release = self._make_release(tracks=tracks, medium_format="DVD")
+ d = mb.album_info(release)
+ self.assertEqual(len(d.tracks), 0)
+
+ def test_skip_non_audio_dvd_video(self):
+ tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0),
+ self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)]
+ release = self._make_release(tracks=tracks, medium_format="DVD-Video")
+ d = mb.album_info(release)
+ self.assertEqual(len(d.tracks), 0)
+
+ def test_no_skip_dvd_audio(self):
+ tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0),
+ self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)]
+ release = self._make_release(tracks=tracks, medium_format="DVD-Audio")
+ d = mb.album_info(release)
+ self.assertEqual(len(d.tracks), 2)
+
class ParseIDTest(_common.TestCase):
def test_parse_id_correct(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 2
} | 1.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"responses",
"mock",
"pylast",
"rarfile",
"pyxdg",
"python-mpd2",
"discogs-client"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | anyio==4.9.0
-e git+https://github.com/beetbox/beets.git@721fedd7b9050ff0b6f17c33bdf00ea638484a03#egg=beets
certifi==2025.1.31
charset-normalizer==3.4.1
discogs-client==2.3.0
exceptiongroup==1.2.2
h11==0.14.0
httpcore==1.0.7
httpx==0.28.1
idna==3.10
iniconfig==2.1.0
jellyfish==1.2.0
mock==5.2.0
munkres==1.1.4
musicbrainzngs==0.7.1
mutagen==1.47.0
oauthlib==3.2.2
packaging==24.2
pluggy==1.5.0
pylast==5.5.0
pytest==8.3.5
python-mpd2==3.1.1
pyxdg==0.28
PyYAML==6.0.2
rarfile==4.2
requests==2.32.3
responses==0.25.7
six==1.17.0
sniffio==1.3.1
tomli==2.2.1
typing_extensions==4.13.0
Unidecode==1.3.8
urllib3==2.3.0
| name: beets
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- anyio==4.9.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- discogs-client==2.3.0
- exceptiongroup==1.2.2
- h11==0.14.0
- httpcore==1.0.7
- httpx==0.28.1
- idna==3.10
- iniconfig==2.1.0
- jellyfish==1.2.0
- mock==5.2.0
- munkres==1.1.4
- musicbrainzngs==0.7.1
- mutagen==1.47.0
- oauthlib==3.2.2
- packaging==24.2
- pluggy==1.5.0
- pylast==5.5.0
- pytest==8.3.5
- python-mpd2==3.1.1
- pyxdg==0.28
- pyyaml==6.0.2
- rarfile==4.2
- requests==2.32.3
- responses==0.25.7
- six==1.17.0
- sniffio==1.3.1
- tomli==2.2.1
- typing-extensions==4.13.0
- unidecode==1.3.8
- urllib3==2.3.0
prefix: /opt/conda/envs/beets
| [
"test/test_mb.py::MBAlbumInfoTest::test_skip_non_audio_dvd",
"test/test_mb.py::MBAlbumInfoTest::test_skip_non_audio_dvd_video"
]
| []
| [
"test/test_mb.py::MBAlbumInfoTest::test_data_source",
"test/test_mb.py::MBAlbumInfoTest::test_detect_various_artists",
"test/test_mb.py::MBAlbumInfoTest::test_missing_language",
"test/test_mb.py::MBAlbumInfoTest::test_no_durations",
"test/test_mb.py::MBAlbumInfoTest::test_no_release_date",
"test/test_mb.py::MBAlbumInfoTest::test_no_skip_dvd_audio",
"test/test_mb.py::MBAlbumInfoTest::test_parse_artist_sort_name",
"test/test_mb.py::MBAlbumInfoTest::test_parse_asin",
"test/test_mb.py::MBAlbumInfoTest::test_parse_catalognum",
"test/test_mb.py::MBAlbumInfoTest::test_parse_country",
"test/test_mb.py::MBAlbumInfoTest::test_parse_disambig",
"test/test_mb.py::MBAlbumInfoTest::test_parse_disctitle",
"test/test_mb.py::MBAlbumInfoTest::test_parse_media",
"test/test_mb.py::MBAlbumInfoTest::test_parse_medium_numbers_single_medium",
"test/test_mb.py::MBAlbumInfoTest::test_parse_medium_numbers_two_mediums",
"test/test_mb.py::MBAlbumInfoTest::test_parse_recording_artist",
"test/test_mb.py::MBAlbumInfoTest::test_parse_release_full_date",
"test/test_mb.py::MBAlbumInfoTest::test_parse_release_type",
"test/test_mb.py::MBAlbumInfoTest::test_parse_release_with_year",
"test/test_mb.py::MBAlbumInfoTest::test_parse_release_year_month_only",
"test/test_mb.py::MBAlbumInfoTest::test_parse_releasegroupid",
"test/test_mb.py::MBAlbumInfoTest::test_parse_status",
"test/test_mb.py::MBAlbumInfoTest::test_parse_textrepr",
"test/test_mb.py::MBAlbumInfoTest::test_parse_track_indices",
"test/test_mb.py::MBAlbumInfoTest::test_parse_tracks",
"test/test_mb.py::MBAlbumInfoTest::test_track_artist_overrides_recording_artist",
"test/test_mb.py::MBAlbumInfoTest::test_track_length_overrides_recording_length",
"test/test_mb.py::MBAlbumInfoTest::test_various_artists_defaults_false",
"test/test_mb.py::ParseIDTest::test_parse_id_correct",
"test/test_mb.py::ParseIDTest::test_parse_id_non_id_returns_none",
"test/test_mb.py::ParseIDTest::test_parse_id_url_finds_id",
"test/test_mb.py::ArtistFlatteningTest::test_alias",
"test/test_mb.py::ArtistFlatteningTest::test_single_artist",
"test/test_mb.py::ArtistFlatteningTest::test_two_artists",
"test/test_mb.py::MBLibraryTest::test_match_album",
"test/test_mb.py::MBLibraryTest::test_match_album_empty",
"test/test_mb.py::MBLibraryTest::test_match_track",
"test/test_mb.py::MBLibraryTest::test_match_track_empty"
]
| []
| MIT License | 1,991 | [
"docs/changelog.rst",
"beets/autotag/mb.py"
]
| [
"docs/changelog.rst",
"beets/autotag/mb.py"
]
|
pypa__wheel-213 | d120bf93a6ef38a8eae9580f18878ae8a29b4787 | 2017-12-30 21:43:50 | d120bf93a6ef38a8eae9580f18878ae8a29b4787 | codecov[bot]: # [Codecov](https://codecov.io/gh/pypa/wheel/pull/213?src=pr&el=h1) Report
> Merging [#213](https://codecov.io/gh/pypa/wheel/pull/213?src=pr&el=desc) into [master](https://codecov.io/gh/pypa/wheel/commit/d120bf93a6ef38a8eae9580f18878ae8a29b4787?src=pr&el=desc) will **decrease** coverage by `0.04%`.
> The diff coverage is `100%`.
[](https://codecov.io/gh/pypa/wheel/pull/213?src=pr&el=tree)
```diff
@@ Coverage Diff @@
## master #213 +/- ##
==========================================
- Coverage 69.84% 69.79% -0.05%
==========================================
Files 17 17
Lines 1817 1814 -3
==========================================
- Hits 1269 1266 -3
Misses 548 548
```
| [Impacted Files](https://codecov.io/gh/pypa/wheel/pull/213?src=pr&el=tree) | Coverage Δ | |
|---|---|---|
| [wheel/metadata.py](https://codecov.io/gh/pypa/wheel/pull/213/diff?src=pr&el=tree#diff-d2hlZWwvbWV0YWRhdGEucHk=) | `91.38% <100%> (+2.39%)` | :arrow_up: |
| [wheel/pep425tags.py](https://codecov.io/gh/pypa/wheel/pull/213/diff?src=pr&el=tree#diff-d2hlZWwvcGVwNDI1dGFncy5weQ==) | `83.01% <0%> (-4.72%)` | :arrow_down: |
| [wheel/install.py](https://codecov.io/gh/pypa/wheel/pull/213/diff?src=pr&el=tree#diff-d2hlZWwvaW5zdGFsbC5weQ==) | `80.14% <0%> (-0.44%)` | :arrow_down: |
| [wheel/tool/\_\_init\_\_.py](https://codecov.io/gh/pypa/wheel/pull/213/diff?src=pr&el=tree#diff-d2hlZWwvdG9vbC9fX2luaXRfXy5weQ==) | `35.47% <0%> (-0.43%)` | :arrow_down: |
| [wheel/wininst2wheel.py](https://codecov.io/gh/pypa/wheel/pull/213/diff?src=pr&el=tree#diff-d2hlZWwvd2luaW5zdDJ3aGVlbC5weQ==) | `16.66% <0%> (+0.13%)` | :arrow_up: |
| [wheel/egg2wheel.py](https://codecov.io/gh/pypa/wheel/pull/213/diff?src=pr&el=tree#diff-d2hlZWwvZWdnMndoZWVsLnB5) | `65.62% <0%> (+1%)` | :arrow_up: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/pypa/wheel/pull/213?src=pr&el=continue).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/pypa/wheel/pull/213?src=pr&el=footer). Last update [d120bf9...61b1df5](https://codecov.io/gh/pypa/wheel/pull/213?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
| diff --git a/CHANGES.txt b/CHANGES.txt
index 5c3ceee..a182ed7 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,6 +1,7 @@
Unreleased
==========
- Fixed displaying of errors on Python 3
+- Fixed single digit versions in wheel files not being properly recognized
0.30.0
======
diff --git a/wheel/install.py b/wheel/install.py
index 5c3e3d9..3d71d3d 100644
--- a/wheel/install.py
+++ b/wheel/install.py
@@ -34,9 +34,8 @@ VERSION_TOO_HIGH = (1, 0)
# Non-greedy matching of an optional build number may be too clever (more
# invalid wheel filenames will match). Separate regex for .dist-info?
WHEEL_INFO_RE = re.compile(
- r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
- ((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
- \.whl|\.dist-info)$""",
+ r"""^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))(-(?P<build>\d.*?))?
+ -(?P<pyver>[a-z].+?)-(?P<abi>.+?)-(?P<plat>.+?)(\.whl|\.dist-info)$""",
re.VERBOSE).match
diff --git a/wheel/metadata.py b/wheel/metadata.py
index 29638e7..75dce67 100644
--- a/wheel/metadata.py
+++ b/wheel/metadata.py
@@ -255,7 +255,7 @@ def generate_requirements(extras_require):
if extra:
yield ('Provides-Extra', extra)
if condition:
- condition += " and "
+ condition = "(" + condition + ") and "
condition += "extra == '%s'" % extra
if condition:
condition = '; ' + condition
| "and extra" breaks environment marker logic
In pypa/setuptools#1242, we discovered that wheel is allegedly simply appending ` and extra=="..."`, which will break the logic when the existing environment marker contains or expressions. | pypa/wheel | diff --git a/tests/test_install.py b/tests/test_install.py
index 60b04e8..42db4f0 100644
--- a/tests/test_install.py
+++ b/tests/test_install.py
@@ -100,3 +100,9 @@ def test_install():
def test_install_tool():
"""Slightly improve coverage of wheel.install"""
wheel.tool.install([TESTWHEEL], force=True, dry_run=True)
+
+
+def test_wheelfile_re():
+ # Regression test for #208
+ wf = WheelFile('foo-2-py3-none-any.whl')
+ assert wf.distinfo_name == 'foo-2.dist-info'
diff --git a/tests/test_metadata.py b/tests/test_metadata.py
new file mode 100644
index 0000000..842d1d3
--- /dev/null
+++ b/tests/test_metadata.py
@@ -0,0 +1,25 @@
+from wheel.metadata import generate_requirements
+
+
+def test_generate_requirements():
+ extras_require = {
+ 'test': ['ipykernel', 'ipython', 'mock'],
+ 'test:python_version == "3.3"': ['pytest<3.3.0'],
+ 'test:python_version >= "3.4" or python_version == "2.7"': ['pytest'],
+ }
+ expected_metadata = [
+ ('Provides-Extra',
+ 'test'),
+ ('Requires-Dist',
+ "ipykernel; extra == 'test'"),
+ ('Requires-Dist',
+ "ipython; extra == 'test'"),
+ ('Requires-Dist',
+ "mock; extra == 'test'"),
+ ('Requires-Dist',
+ 'pytest (<3.3.0); (python_version == "3.3") and extra == \'test\''),
+ ('Requires-Dist',
+ 'pytest; (python_version >= "3.4" or python_version == "2.7") and extra == \'test\''),
+ ]
+ generated_metadata = sorted(set(generate_requirements(extras_require)))
+ assert generated_metadata == expected_metadata
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 3
} | 0.30 | {
"env_vars": null,
"env_yml_path": [],
"install": "pip install -e .[tool]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [],
"python": "3.6",
"reqs_path": [],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: wheel
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/wheel
| [
"tests/test_install.py::test_wheelfile_re",
"tests/test_metadata.py::test_generate_requirements"
]
| []
| [
"tests/test_install.py::test_compatibility_tags",
"tests/test_install.py::test_pick_best",
"tests/test_install.py::test_install",
"tests/test_install.py::test_install_tool"
]
| []
| MIT License | 1,992 | [
"CHANGES.txt",
"wheel/metadata.py",
"wheel/install.py"
]
| [
"CHANGES.txt",
"wheel/metadata.py",
"wheel/install.py"
]
|
beetbox__beets-2776 | a5fc80ec50363c642b64b5621c705dfd1157c31c | 2017-12-31 03:21:37 | 0f9ffeec3eb95f2612e78bee6380af984f639b78 | diff --git a/beets/autotag/mb.py b/beets/autotag/mb.py
index b5ed4f358..eb7c30808 100644
--- a/beets/autotag/mb.py
+++ b/beets/autotag/mb.py
@@ -39,6 +39,8 @@ else:
NON_AUDIO_FORMATS = ['Data CD', 'DVD', 'DVD-Video', 'Blu-ray', 'HD-DVD', 'VCD',
'SVCD', 'UMD', 'VHS']
+SKIPPED_TRACKS = ['[data track]']
+
musicbrainzngs.set_useragent('beets', beets.__version__,
'http://beets.io/')
@@ -286,6 +288,16 @@ def album_info(release):
all_tracks.insert(0, medium['pregap'])
for track in all_tracks:
+
+ if ('title' in track['recording'] and
+ track['recording']['title'] in SKIPPED_TRACKS):
+ continue
+
+ if ('video' in track['recording'] and
+ track['recording']['video'] == 'true' and
+ config['match']['ignore_video_tracks']):
+ continue
+
# Basic information from the recording.
index += 1
ti = track_info(
diff --git a/beets/config_default.yaml b/beets/config_default.yaml
index 942459738..01c1e0f65 100644
--- a/beets/config_default.yaml
+++ b/beets/config_default.yaml
@@ -126,5 +126,6 @@ match:
original_year: no
ignored: []
required: []
+ ignore_video_tracks: yes
track_length_grace: 10
track_length_max: 30
diff --git a/docs/changelog.rst b/docs/changelog.rst
index 2a7349d65..4ff54b222 100644
--- a/docs/changelog.rst
+++ b/docs/changelog.rst
@@ -7,7 +7,10 @@ Changelog
Changelog goes here!
Fixes:
+
* Non-audio media (DVD-Video, etc.) are now skipped by the autotagger. :bug:`2688`
+* Non-audio tracks (data tracks, video tracks, etc.) are now skipped by the
+ autotagger. :bug:`1210`
1.4.6 (December 21, 2017)
diff --git a/docs/reference/config.rst b/docs/reference/config.rst
index 4015a4fc2..84f41a385 100644
--- a/docs/reference/config.rst
+++ b/docs/reference/config.rst
@@ -774,6 +774,17 @@ want to enforce to the ``required`` setting::
No tags are required by default.
+.. _ignore_video_tracks:
+
+ignore_video_tracks
+~~~~~~~~~~~~~~~~~~~
+
+By default, video tracks within a release will be ignored. If you want them to
+be included (for example if you would like to track the audio-only versions of
+the video tracks), set it to ``no``.
+
+Default: ``yes``.
+
.. _path-format-config:
Path Format Configuration
| Ignore missing video tracks identified by MusicBrainz during import and in missing plugin
Musicbrainz albums sometimes have video tracks in them (example: http://musicbrainz.org/release/49da37ee-065a-4d7f-a204-9dda8047aad4 ) -- is it possible to ignore video tracks we importing or using `beet missing`?
| beetbox/beets | diff --git a/test/test_mb.py b/test/test_mb.py
index 9c51d0fe3..35f7c3aa2 100644
--- a/test/test_mb.py
+++ b/test/test_mb.py
@@ -95,7 +95,7 @@ class MBAlbumInfoTest(_common.TestCase):
})
return release
- def _make_track(self, title, tr_id, duration, artist=False):
+ def _make_track(self, title, tr_id, duration, artist=False, video=False):
track = {
'title': title,
'id': tr_id,
@@ -113,6 +113,8 @@ class MBAlbumInfoTest(_common.TestCase):
'name': 'RECORDING ARTIST CREDIT',
}
]
+ if video:
+ track['video'] = 'true'
return track
def test_parse_release_with_year(self):
@@ -345,6 +347,41 @@ class MBAlbumInfoTest(_common.TestCase):
d = mb.album_info(release)
self.assertEqual(len(d.tracks), 2)
+ def test_skip_data_track(self):
+ tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0),
+ self._make_track('[data track]', 'ID DATA TRACK',
+ 100.0 * 1000.0),
+ self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)]
+ release = self._make_release(tracks=tracks)
+ d = mb.album_info(release)
+ self.assertEqual(len(d.tracks), 2)
+ self.assertEqual(d.tracks[0].title, 'TITLE ONE')
+ self.assertEqual(d.tracks[1].title, 'TITLE TWO')
+
+ def test_skip_video_tracks_by_default(self):
+ tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0),
+ self._make_track('TITLE VIDEO', 'ID VIDEO', 100.0 * 1000.0,
+ False, True),
+ self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)]
+ release = self._make_release(tracks=tracks)
+ d = mb.album_info(release)
+ self.assertEqual(len(d.tracks), 2)
+ self.assertEqual(d.tracks[0].title, 'TITLE ONE')
+ self.assertEqual(d.tracks[1].title, 'TITLE TWO')
+
+ def test_no_skip_video_tracks_if_configured(self):
+ config['match']['ignore_video_tracks'] = False
+ tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0),
+ self._make_track('TITLE VIDEO', 'ID VIDEO', 100.0 * 1000.0,
+ False, True),
+ self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)]
+ release = self._make_release(tracks=tracks)
+ d = mb.album_info(release)
+ self.assertEqual(len(d.tracks), 3)
+ self.assertEqual(d.tracks[0].title, 'TITLE ONE')
+ self.assertEqual(d.tracks[1].title, 'TITLE VIDEO')
+ self.assertEqual(d.tracks[2].title, 'TITLE TWO')
+
class ParseIDTest(_common.TestCase):
def test_parse_id_correct(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 4
} | 1.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"responses",
"mock",
"pylast",
"rarfile",
"pyxdg",
"python-mpd2",
"discogs-client"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | anyio==4.9.0
-e git+https://github.com/beetbox/beets.git@a5fc80ec50363c642b64b5621c705dfd1157c31c#egg=beets
certifi==2025.1.31
charset-normalizer==3.4.1
discogs-client==2.3.0
exceptiongroup==1.2.2
h11==0.14.0
httpcore==1.0.7
httpx==0.28.1
idna==3.10
iniconfig==2.1.0
jellyfish==1.2.0
mock==5.2.0
munkres==1.1.4
musicbrainzngs==0.7.1
mutagen==1.47.0
oauthlib==3.2.2
packaging==24.2
pluggy==1.5.0
pylast==5.5.0
pytest==8.3.5
python-mpd2==3.1.1
pyxdg==0.28
PyYAML==6.0.2
rarfile==4.2
requests==2.32.3
responses==0.25.7
six==1.17.0
sniffio==1.3.1
tomli==2.2.1
typing_extensions==4.13.0
Unidecode==1.3.8
urllib3==2.3.0
| name: beets
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- anyio==4.9.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- discogs-client==2.3.0
- exceptiongroup==1.2.2
- h11==0.14.0
- httpcore==1.0.7
- httpx==0.28.1
- idna==3.10
- iniconfig==2.1.0
- jellyfish==1.2.0
- mock==5.2.0
- munkres==1.1.4
- musicbrainzngs==0.7.1
- mutagen==1.47.0
- oauthlib==3.2.2
- packaging==24.2
- pluggy==1.5.0
- pylast==5.5.0
- pytest==8.3.5
- python-mpd2==3.1.1
- pyxdg==0.28
- pyyaml==6.0.2
- rarfile==4.2
- requests==2.32.3
- responses==0.25.7
- six==1.17.0
- sniffio==1.3.1
- tomli==2.2.1
- typing-extensions==4.13.0
- unidecode==1.3.8
- urllib3==2.3.0
prefix: /opt/conda/envs/beets
| [
"test/test_mb.py::MBAlbumInfoTest::test_skip_data_track",
"test/test_mb.py::MBAlbumInfoTest::test_skip_video_tracks_by_default"
]
| []
| [
"test/test_mb.py::MBAlbumInfoTest::test_data_source",
"test/test_mb.py::MBAlbumInfoTest::test_detect_various_artists",
"test/test_mb.py::MBAlbumInfoTest::test_missing_language",
"test/test_mb.py::MBAlbumInfoTest::test_no_durations",
"test/test_mb.py::MBAlbumInfoTest::test_no_release_date",
"test/test_mb.py::MBAlbumInfoTest::test_no_skip_dvd_audio",
"test/test_mb.py::MBAlbumInfoTest::test_no_skip_video_tracks_if_configured",
"test/test_mb.py::MBAlbumInfoTest::test_parse_artist_sort_name",
"test/test_mb.py::MBAlbumInfoTest::test_parse_asin",
"test/test_mb.py::MBAlbumInfoTest::test_parse_catalognum",
"test/test_mb.py::MBAlbumInfoTest::test_parse_country",
"test/test_mb.py::MBAlbumInfoTest::test_parse_disambig",
"test/test_mb.py::MBAlbumInfoTest::test_parse_disctitle",
"test/test_mb.py::MBAlbumInfoTest::test_parse_media",
"test/test_mb.py::MBAlbumInfoTest::test_parse_medium_numbers_single_medium",
"test/test_mb.py::MBAlbumInfoTest::test_parse_medium_numbers_two_mediums",
"test/test_mb.py::MBAlbumInfoTest::test_parse_recording_artist",
"test/test_mb.py::MBAlbumInfoTest::test_parse_release_full_date",
"test/test_mb.py::MBAlbumInfoTest::test_parse_release_type",
"test/test_mb.py::MBAlbumInfoTest::test_parse_release_with_year",
"test/test_mb.py::MBAlbumInfoTest::test_parse_release_year_month_only",
"test/test_mb.py::MBAlbumInfoTest::test_parse_releasegroupid",
"test/test_mb.py::MBAlbumInfoTest::test_parse_status",
"test/test_mb.py::MBAlbumInfoTest::test_parse_textrepr",
"test/test_mb.py::MBAlbumInfoTest::test_parse_track_indices",
"test/test_mb.py::MBAlbumInfoTest::test_parse_tracks",
"test/test_mb.py::MBAlbumInfoTest::test_skip_non_audio_dvd",
"test/test_mb.py::MBAlbumInfoTest::test_skip_non_audio_dvd_video",
"test/test_mb.py::MBAlbumInfoTest::test_track_artist_overrides_recording_artist",
"test/test_mb.py::MBAlbumInfoTest::test_track_length_overrides_recording_length",
"test/test_mb.py::MBAlbumInfoTest::test_various_artists_defaults_false",
"test/test_mb.py::ParseIDTest::test_parse_id_correct",
"test/test_mb.py::ParseIDTest::test_parse_id_non_id_returns_none",
"test/test_mb.py::ParseIDTest::test_parse_id_url_finds_id",
"test/test_mb.py::ArtistFlatteningTest::test_alias",
"test/test_mb.py::ArtistFlatteningTest::test_single_artist",
"test/test_mb.py::ArtistFlatteningTest::test_two_artists",
"test/test_mb.py::MBLibraryTest::test_match_album",
"test/test_mb.py::MBLibraryTest::test_match_album_empty",
"test/test_mb.py::MBLibraryTest::test_match_track",
"test/test_mb.py::MBLibraryTest::test_match_track_empty"
]
| []
| MIT License | 1,993 | [
"beets/config_default.yaml",
"docs/changelog.rst",
"beets/autotag/mb.py",
"docs/reference/config.rst"
]
| [
"beets/config_default.yaml",
"docs/changelog.rst",
"beets/autotag/mb.py",
"docs/reference/config.rst"
]
|
|
scrapy__w3lib-100 | ac1b7212b5f9badf2b35aaf6e094d839fb3ba858 | 2017-12-31 15:12:25 | ac1b7212b5f9badf2b35aaf6e094d839fb3ba858 | codecov[bot]: # [Codecov](https://codecov.io/gh/scrapy/w3lib/pull/100?src=pr&el=h1) Report
> Merging [#100](https://codecov.io/gh/scrapy/w3lib/pull/100?src=pr&el=desc) into [master](https://codecov.io/gh/scrapy/w3lib/commit/0d365519ecac8da99fd051ea1a522486b13a7e8e?src=pr&el=desc) will **not change** coverage.
> The diff coverage is `100%`.
```diff
@@ Coverage Diff @@
## master #100 +/- ##
=======================================
Coverage 94.88% 94.88%
=======================================
Files 7 7
Lines 469 469
Branches 95 95
=======================================
Hits 445 445
Misses 16 16
Partials 8 8
```
| [Impacted Files](https://codecov.io/gh/scrapy/w3lib/pull/100?src=pr&el=tree) | Coverage Δ | |
|---|---|---|
| [w3lib/http.py](https://codecov.io/gh/scrapy/w3lib/pull/100/diff?src=pr&el=tree#diff-dzNsaWIvaHR0cC5weQ==) | `100% <100%> (ø)` | :arrow_up: |
kmike: Could you please rebase it, to fix CI? Code looks good to me. | diff --git a/w3lib/http.py b/w3lib/http.py
index accfb5d..c7b94a2 100644
--- a/w3lib/http.py
+++ b/w3lib/http.py
@@ -78,7 +78,7 @@ def headers_dict_to_raw(headers_dict):
return b'\r\n'.join(raw_lines)
-def basic_auth_header(username, password):
+def basic_auth_header(username, password, encoding='ISO-8859-1'):
"""
Return an `Authorization` header field value for `HTTP Basic Access Authentication (RFC 2617)`_
@@ -95,5 +95,5 @@ def basic_auth_header(username, password):
# XXX: RFC 2617 doesn't define encoding, but ISO-8859-1
# seems to be the most widely used encoding here. See also:
# http://greenbytes.de/tech/webdav/draft-ietf-httpauth-basicauth-enc-latest.html
- auth = auth.encode('ISO-8859-1')
+ auth = auth.encode(encoding)
return b'Basic ' + urlsafe_b64encode(auth)
| allow to customize encoding in w3lib.http.basic_auth_header
See https://github.com/scrapy/scrapy/pull/2530 | scrapy/w3lib | diff --git a/tests/test_http.py b/tests/test_http.py
index 453624f..01f903e 100644
--- a/tests/test_http.py
+++ b/tests/test_http.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+
import unittest
from collections import OrderedDict
from w3lib.http import (basic_auth_header,
@@ -14,6 +16,13 @@ class HttpTests(unittest.TestCase):
self.assertEqual(b'Basic c29tZXVzZXI6QDx5dTk-Jm8_UQ==',
basic_auth_header('someuser', '@<yu9>&o?Q'))
+ def test_basic_auth_header_encoding(self):
+ self.assertEqual(b'Basic c29tw6Z1c8Oocjpzw7htZXDDpHNz',
+ basic_auth_header(u'somæusèr', u'sømepäss', encoding='utf8'))
+ # default encoding (ISO-8859-1)
+ self.assertEqual(b'Basic c29t5nVz6HI6c_htZXDkc3M=',
+ basic_auth_header(u'somæusèr', u'sømepäss'))
+
def test_headers_raw_dict_none(self):
self.assertIsNone(headers_raw_to_dict(None))
self.assertIsNone(headers_dict_to_raw(None))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 1
} | 1.18 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
pytest-cov==6.0.0
six==1.17.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
-e git+https://github.com/scrapy/w3lib.git@ac1b7212b5f9badf2b35aaf6e094d839fb3ba858#egg=w3lib
| name: w3lib
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- pytest-cov==6.0.0
- six==1.17.0
prefix: /opt/conda/envs/w3lib
| [
"tests/test_http.py::HttpTests::test_basic_auth_header_encoding"
]
| []
| [
"tests/test_http.py::HttpTests::test_basic_auth_header",
"tests/test_http.py::HttpTests::test_headers_dict_to_raw",
"tests/test_http.py::HttpTests::test_headers_dict_to_raw_listtuple",
"tests/test_http.py::HttpTests::test_headers_dict_to_raw_wrong_values",
"tests/test_http.py::HttpTests::test_headers_raw_dict_none",
"tests/test_http.py::HttpTests::test_headers_raw_to_dict"
]
| []
| BSD 3-Clause "New" or "Revised" License | 1,994 | [
"w3lib/http.py"
]
| [
"w3lib/http.py"
]
|
asottile__yesqa-4 | 52da14636029e7e8cc70c6a61912c14fa27ca50e | 2017-12-31 16:32:27 | ad85b55968036d30088048335194733ecaf06c13 | diff --git a/yesqa.py b/yesqa.py
index 008dc57..40cf153 100644
--- a/yesqa.py
+++ b/yesqa.py
@@ -43,14 +43,28 @@ def _remove_comments(tokens):
def _rewrite_noqa_comment(tokens, i, flake8_results):
+ # find logical lines that this noqa comment may affect
+ lines = set()
+ j = i
+ while j >= 0 and tokens[j].name not in {'NL', 'NEWLINE'}:
+ t = tokens[j]
+ if t.line is not None:
+ lines.update(range(t.line, t.line + t.src.count('\n') + 1))
+ j -= 1
+
+ lints = set()
+ for line in lines:
+ lints.update(flake8_results[line])
+
token = tokens[i]
match = NOQA_RE.match(token.src)
+
# exclude all lints on the line but no lints
- if token.line not in flake8_results:
+ if not lints:
_remove_comment(tokens, i)
elif match.group().lower() != '# noqa':
codes = set(SEP_RE.split(match.group(1)[2:]))
- expected_codes = codes & flake8_results[token.line]
+ expected_codes = codes & lints
if expected_codes != codes:
comment = '# noqa: {}'.format(','.join(sorted(expected_codes)))
tokens[i] = token._replace(src=NOQA_RE.sub(comment, token.src))
| False positive: removes `noqa` on multi-line string
```python
"""
aaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
""" # noqa
```
```console
$ flake8 test.py
$ yesqa test.py
Rewriting test.py
$ flake8 test.py
test.py:2:80: E501 line too long (82 > 79 characters)
``` | asottile/yesqa | diff --git a/tests/yesqa_test.py b/tests/yesqa_test.py
index e0225c8..9043a86 100644
--- a/tests/yesqa_test.py
+++ b/tests/yesqa_test.py
@@ -32,9 +32,11 @@ def test_non_utf8_bytes(tmpdir, capsys):
(
'', # noop
'# hello\n', # comment at beginning of file
- 'import os # noqa\n', # still needed
- 'import os # NOQA\n', # still needed
- 'import os # noqa: F401\n', # still needed
+ # still needed
+ 'import os # noqa\n',
+ 'import os # NOQA\n',
+ 'import os # noqa: F401\n',
+ '"""\n' + 'a' * 40 + ' ' + 'b' * 60 + '\n""" # noqa\n',
),
)
def test_ok(assert_rewrite, src):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
flake8==7.2.0
iniconfig==2.1.0
mccabe==0.7.0
packaging==24.2
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.2
pytest==8.3.5
tokenize_rt==6.1.0
tomli==2.2.1
-e git+https://github.com/asottile/yesqa.git@52da14636029e7e8cc70c6a61912c14fa27ca50e#egg=yesqa
| name: yesqa
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- flake8==7.2.0
- iniconfig==2.1.0
- mccabe==0.7.0
- packaging==24.2
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.2
- pytest==8.3.5
- tokenize-rt==6.1.0
- tomli==2.2.1
prefix: /opt/conda/envs/yesqa
| [
"tests/yesqa_test.py::test_ok[\"\"\"\\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
]
| []
| [
"tests/yesqa_test.py::test_non_utf8_bytes",
"tests/yesqa_test.py::test_ok[]",
"tests/yesqa_test.py::test_ok[#",
"tests/yesqa_test.py::test_ok[import",
"tests/yesqa_test.py::test_rewrite[x",
"tests/yesqa_test.py::test_rewrite[import",
"tests/yesqa_test.py::test_rewrite[#",
"tests/yesqa_test.py::test_main"
]
| []
| MIT License | 1,995 | [
"yesqa.py"
]
| [
"yesqa.py"
]
|
|
MechanicalSoup__MechanicalSoup-181 | 736083562d4cc03efdf08f7be16ced8fb1460ee5 | 2018-01-01 02:07:10 | 7171e2b79ba0fbe02a7245066cb5536ddb2fe94e | diff --git a/mechanicalsoup/form.py b/mechanicalsoup/form.py
index 7819535..285fac6 100644
--- a/mechanicalsoup/form.py
+++ b/mechanicalsoup/form.py
@@ -307,7 +307,8 @@ class Form(object):
found = False
inps = self.form.select('input[type="submit"], button[type="submit"]')
for inp in inps:
- if inp == submit or inp['name'] == submit:
+ if inp == submit or (inp.has_attr('name') and
+ inp['name'] == submit):
if found:
raise LinkNotFoundError(
"Multiple submit elements match: {0}".format(submit)
| Crash on input without name in form
I am currently dealing with a form containing a submit button without a name. When submitting this form (with multiple submit buttons) via
```
browser.submit_selected(btnName='name')
```
this causes a crash in `Form.choose_submit()` since the attribute `name` is not present, [here](https://github.com/MechanicalSoup/MechanicalSoup/blob/master/mechanicalsoup/form.py#L310).
I now that inputs without a name do not make any sense, but it would be nice to harden MechanicalSoup against websites out there in the wild... :) | MechanicalSoup/MechanicalSoup | diff --git a/tests/test_form.py b/tests/test_form.py
index 4f89422..a97a5b4 100644
--- a/tests/test_form.py
+++ b/tests/test_form.py
@@ -350,5 +350,21 @@ def test_form_print_summary(capsys):
assert err == ""
+def test_issue180():
+ """Test that a KeyError is not raised when Form.choose_submit is called
+ on a form where a submit element is missing its name-attribute."""
+ browser = mechanicalsoup.StatefulBrowser()
+ html = '''
+<form>
+ <input type="submit" value="Invalid" />
+ <input type="submit" name="valid" value="Valid" />
+</form>
+'''
+ browser.open_fake_page(html)
+ form = browser.select_form()
+ with pytest.raises(mechanicalsoup.utils.LinkNotFoundError):
+ form.choose_submit('not_found')
+
+
if __name__ == '__main__':
pytest.main(sys.argv)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-flake8",
"pytest-mock",
"requests_mock"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
beautifulsoup4==4.12.3
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
flake8==5.0.4
idna==3.10
importlib-metadata==4.2.0
iniconfig==1.1.1
lxml==5.3.1
mccabe==0.7.0
-e git+https://github.com/MechanicalSoup/MechanicalSoup.git@736083562d4cc03efdf08f7be16ced8fb1460ee5#egg=MechanicalSoup
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-flake8==1.1.1
pytest-mock==3.6.1
requests==2.27.1
requests-mock==1.12.1
six==1.17.0
soupsieve==2.3.2.post1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: MechanicalSoup
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- beautifulsoup4==4.12.3
- charset-normalizer==2.0.12
- coverage==6.2
- flake8==5.0.4
- idna==3.10
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- lxml==5.3.1
- mccabe==0.7.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-flake8==1.1.1
- pytest-mock==3.6.1
- requests==2.27.1
- requests-mock==1.12.1
- six==1.17.0
- soupsieve==2.3.2.post1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/MechanicalSoup
| [
"tests/test_form.py::test_issue180"
]
| [
"tests/test_form.py::flake-8::FLAKE8"
]
| [
"tests/test_form.py::test_submit_online",
"tests/test_form.py::test_submit_set",
"tests/test_form.py::test_choose_submit[preview]",
"tests/test_form.py::test_choose_submit[save]",
"tests/test_form.py::test_choose_submit[cancel]",
"tests/test_form.py::test_choose_submit_fail[not",
"tests/test_form.py::test_choose_submit_fail[found]",
"tests/test_form.py::test_choose_submit_multiple_match",
"tests/test_form.py::test_form_noaction",
"tests/test_form.py::test_form_action",
"tests/test_form.py::test_set_select[default]",
"tests/test_form.py::test_set_select[selected]",
"tests/test_form.py::test_set_select_multiple[select",
"tests/test_form.py::test_form_not_found",
"tests/test_form.py::test_form_check_uncheck",
"tests/test_form.py::test_form_print_summary"
]
| []
| MIT License | 1,996 | [
"mechanicalsoup/form.py"
]
| [
"mechanicalsoup/form.py"
]
|
|
NeuralEnsemble__python-neo-446 | ab720e75e2c947385a2eb977ee2002770d16dde2 | 2018-01-02 16:46:59 | f0285a7ab15ff6535d3e6736e0163c4fa6aea091 | diff --git a/neo/core/analogsignal.py b/neo/core/analogsignal.py
index a202fffe..86dcfcae 100644
--- a/neo/core/analogsignal.py
+++ b/neo/core/analogsignal.py
@@ -250,7 +250,7 @@ class AnalogSignal(BaseSignal):
for k, v in self.__dict__.items():
try:
setattr(new_signal, k, deepcopy(v, memo))
- except:
+ except TypeError:
setattr(new_signal, k, v)
return new_signal
diff --git a/neo/core/epoch.py b/neo/core/epoch.py
index 85cf8117..a20a2f19 100644
--- a/neo/core/epoch.py
+++ b/neo/core/epoch.py
@@ -10,6 +10,7 @@ This module defines :class:`Epoch`, an array of epochs.
from __future__ import absolute_import, division, print_function
import sys
+from copy import deepcopy
import numpy as np
import quantities as pq
@@ -189,6 +190,21 @@ class Epoch(BaseNeo, pq.Quantity):
"description", "annotations"):
setattr(self, attr, getattr(other, attr, None))
+ def __deepcopy__(self, memo):
+ cls = self.__class__
+ new_ep = cls(times=self.times, durations=self.durations,
+ labels=self.labels, units=self.units,
+ name=self.name, description=self.description,
+ file_origin=self.file_origin)
+ new_ep.__dict__.update(self.__dict__)
+ memo[id(self)] = new_ep
+ for k, v in self.__dict__.items():
+ try:
+ setattr(new_ep, k, deepcopy(v, memo))
+ except TypeError:
+ setattr(new_ep, k, v)
+ return new_ep
+
def duplicate_with_new_data(self, signal):
'''
Create a new :class:`Epoch` with the same metadata
diff --git a/neo/core/event.py b/neo/core/event.py
index e296357e..d9e5b250 100644
--- a/neo/core/event.py
+++ b/neo/core/event.py
@@ -10,6 +10,7 @@ This module defines :class:`Event`, an array of events.
from __future__ import absolute_import, division, print_function
import sys
+from copy import deepcopy
import numpy as np
import quantities as pq
@@ -30,6 +31,7 @@ def _new_event(cls, signal, times = None, labels=None, units=None, name=None,
e.segment = segment
return e
+
class Event(BaseNeo, pq.Quantity):
'''
Array of events.
@@ -177,6 +179,21 @@ class Event(BaseNeo, pq.Quantity):
"annotations"):
setattr(self, attr, getattr(other, attr, None))
+ def __deepcopy__(self, memo):
+ cls = self.__class__
+ new_ev = cls(times=self.times,
+ labels=self.labels, units=self.units,
+ name=self.name, description=self.description,
+ file_origin=self.file_origin)
+ new_ev.__dict__.update(self.__dict__)
+ memo[id(self)] = new_ev
+ for k, v in self.__dict__.items():
+ try:
+ setattr(new_ev, k, deepcopy(v, memo))
+ except TypeError:
+ setattr(new_ev, k, v)
+ return new_ev
+
def duplicate_with_new_data(self, signal):
'''
Create a new :class:`Event` with the same metadata
diff --git a/neo/core/irregularlysampledsignal.py b/neo/core/irregularlysampledsignal.py
index 8a0df4c0..4a6b9463 100644
--- a/neo/core/irregularlysampledsignal.py
+++ b/neo/core/irregularlysampledsignal.py
@@ -23,6 +23,7 @@ the old object.
# needed for Python 3 compatibility
from __future__ import absolute_import, division, print_function
+from copy import deepcopy
import numpy as np
import quantities as pq
@@ -194,6 +195,21 @@ class IrregularlySampledSignal(BaseSignal):
self.times = getattr(obj, 'times', None)
return obj
+ def __deepcopy__(self, memo):
+ cls = self.__class__
+ new_signal = cls(self.times, np.array(self), units=self.units,
+ time_units=self.times.units, dtype=self.dtype,
+ t_start=self.t_start, name=self.name,
+ file_origin=self.file_origin, description=self.description)
+ new_signal.__dict__.update(self.__dict__)
+ memo[id(self)] = new_signal
+ for k, v in self.__dict__.items():
+ try:
+ setattr(new_signal, k, deepcopy(v, memo))
+ except TypeError:
+ setattr(new_signal, k, v)
+ return new_signal
+
def __repr__(self):
'''
Returns a string representing the :class:`IrregularlySampledSignal`.
diff --git a/neo/core/spiketrain.py b/neo/core/spiketrain.py
index 15ad2124..f3cab166 100644
--- a/neo/core/spiketrain.py
+++ b/neo/core/spiketrain.py
@@ -402,6 +402,22 @@ class SpikeTrain(BaseNeo, pq.Quantity):
if hasattr(obj, 'lazy_shape'):
self.lazy_shape = obj.lazy_shape
+ def __deepcopy__(self, memo):
+ cls = self.__class__
+ new_st = cls(np.array(self), self.t_stop, units=self.units,
+ dtype=self.dtype, copy=True, sampling_rate=self.sampling_rate,
+ t_start=self.t_start, waveforms=self.waveforms,
+ left_sweep=self.left_sweep, name=self.name,
+ file_origin=self.file_origin, description=self.description)
+ new_st.__dict__.update(self.__dict__)
+ memo[id(self)] = new_st
+ for k, v in self.__dict__.items():
+ try:
+ setattr(new_st, k, copy.deepcopy(v, memo))
+ except TypeError:
+ setattr(new_st, k, v)
+ return new_st
+
def __repr__(self):
'''
Returns a string representing the :class:`SpikeTrain`.
diff --git a/neo/io/igorproio.py b/neo/io/igorproio.py
index 1f8c75f7..7a008a5d 100644
--- a/neo/io/igorproio.py
+++ b/neo/io/igorproio.py
@@ -28,11 +28,10 @@ except ImportError:
class IgorIO(BaseIO):
"""
- Class for reading Igor Binary Waves (.ibw) written by WaveMetrics’
+ Class for reading Igor Binary Waves (.ibw)
+ or Packed Experiment (.pxp) files written by WaveMetrics’
IGOR Pro software.
- Support for Packed Experiment (.pxp) files is planned.
-
It requires the `igor` Python package by W. Trevor King.
Usage:
| Implement deepcopy for all classes
#399 implements `__deepcopy__` for `AnalogSignal` and so fixes #220.
This should also be done for the other Neo classes. | NeuralEnsemble/python-neo | diff --git a/neo/test/coretest/test_block.py b/neo/test/coretest/test_block.py
index 4f93f79b..7e9f0c94 100644
--- a/neo/test/coretest/test_block.py
+++ b/neo/test/coretest/test_block.py
@@ -7,6 +7,7 @@ Tests of the neo.core.block.Block class
from __future__ import absolute_import, division, print_function
from datetime import datetime
+from copy import deepcopy
import unittest
@@ -744,6 +745,10 @@ class TestBlock(unittest.TestCase):
assert_same_sub_schema(self.units2,
self.blk2.list_children_by_class('Unit'))
+ def test__deepcopy(self):
+ blk1_copy = deepcopy(self.blk1)
+ assert_same_sub_schema(blk1_copy, self.blk1)
+
if __name__ == "__main__":
unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 6
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/NeuralEnsemble/python-neo.git@ab720e75e2c947385a2eb977ee2002770d16dde2#egg=neo
nose==1.3.7
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
quantities==0.13.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: python-neo
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- quantities==0.13.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/python-neo
| [
"neo/test/coretest/test_block.py::TestBlock::test__deepcopy"
]
| []
| [
"neo/test/coretest/test_block.py::Test__generate_datasets::test__fake_neo__cascade",
"neo/test/coretest/test_block.py::Test__generate_datasets::test__fake_neo__nocascade",
"neo/test/coretest/test_block.py::Test__generate_datasets::test__get_fake_values",
"neo/test/coretest/test_block.py::TestBlock::test__children",
"neo/test/coretest/test_block.py::TestBlock::test__creation",
"neo/test/coretest/test_block.py::TestBlock::test__filter_annotation_single",
"neo/test/coretest/test_block.py::TestBlock::test__filter_attribute_single",
"neo/test/coretest/test_block.py::TestBlock::test__filter_attribute_single_nores",
"neo/test/coretest/test_block.py::TestBlock::test__filter_multi",
"neo/test/coretest/test_block.py::TestBlock::test__filter_multi_nores",
"neo/test/coretest/test_block.py::TestBlock::test__filter_multi_partres_annotation_annotation",
"neo/test/coretest/test_block.py::TestBlock::test__filter_multi_partres_annotation_attribute",
"neo/test/coretest/test_block.py::TestBlock::test__filter_none",
"neo/test/coretest/test_block.py::TestBlock::test__filter_single_annotation_container",
"neo/test/coretest/test_block.py::TestBlock::test__filter_single_annotation_container_norecur",
"neo/test/coretest/test_block.py::TestBlock::test__filter_single_annotation_nodata",
"neo/test/coretest/test_block.py::TestBlock::test__filter_single_annotation_nodata_container",
"neo/test/coretest/test_block.py::TestBlock::test__filter_single_annotation_nodata_container_norecur",
"neo/test/coretest/test_block.py::TestBlock::test__filter_single_annotation_nodata_norecur",
"neo/test/coretest/test_block.py::TestBlock::test__filter_single_annotation_norecur",
"neo/test/coretest/test_block.py::TestBlock::test__filter_single_annotation_nores",
"neo/test/coretest/test_block.py::TestBlock::test__filter_single_annotation_obj_single",
"neo/test/coretest/test_block.py::TestBlock::test__filter_single_attribute_container_data",
"neo/test/coretest/test_block.py::TestBlock::test__filter_single_attribute_container_norecur",
"neo/test/coretest/test_block.py::TestBlock::test__filter_single_attribute_container_norecur_nores",
"neo/test/coretest/test_block.py::TestBlock::test__filter_single_attribute_nodata",
"neo/test/coretest/test_block.py::TestBlock::test__filter_single_attribute_nodata_container_norecur",
"neo/test/coretest/test_block.py::TestBlock::test__filter_single_attribute_nodata_container_norecur_nores",
"neo/test/coretest/test_block.py::TestBlock::test__filter_single_attribute_nodata_container_nores",
"neo/test/coretest/test_block.py::TestBlock::test__filter_single_attribute_nodata_norecur",
"neo/test/coretest/test_block.py::TestBlock::test__filter_single_attribute_norecur",
"neo/test/coretest/test_block.py::TestBlock::test__filterdata_multi",
"neo/test/coretest/test_block.py::TestBlock::test__filterdata_multi_nores",
"neo/test/coretest/test_block.py::TestBlock::test__filterdata_multi_partres_annotation_annotation",
"neo/test/coretest/test_block.py::TestBlock::test__filterdata_multi_partres_annotation_attribute",
"neo/test/coretest/test_block.py::TestBlock::test__merge",
"neo/test/coretest/test_block.py::TestBlock::test__size",
"neo/test/coretest/test_block.py::TestBlock::test_block_init",
"neo/test/coretest/test_block.py::TestBlock::test_block_list_units"
]
| []
| BSD 3-Clause "New" or "Revised" License | 1,998 | [
"neo/core/event.py",
"neo/core/analogsignal.py",
"neo/io/igorproio.py",
"neo/core/irregularlysampledsignal.py",
"neo/core/epoch.py",
"neo/core/spiketrain.py"
]
| [
"neo/core/event.py",
"neo/core/analogsignal.py",
"neo/io/igorproio.py",
"neo/core/irregularlysampledsignal.py",
"neo/core/epoch.py",
"neo/core/spiketrain.py"
]
|
|
dask__dask-3042 | a1653463534a7dd9212f45f833aa17b7dd12e574 | 2018-01-02 16:55:37 | a1653463534a7dd9212f45f833aa17b7dd12e574 | mrocklin: This is great. Thank you for the bug report and the excellent fix.
I suspect that this bug escaped notice for a while because of the ambiguous use of the name `token`, which we often use to mean "unique identifier" but which in the case of this method seems to mean "name of operation". We might consider changing the name in this method as well from `token=` to `name=` or something similar.
That's just a small thought though. I'm +1 on this change after tests pass.
mrocklin: The last failing test is due to https://github.com/dask/dask/issues/3043 and is unrelated to this PR. | diff --git a/dask/array/core.py b/dask/array/core.py
index bfecfc992..206ac4c2d 100644
--- a/dask/array/core.py
+++ b/dask/array/core.py
@@ -18,12 +18,12 @@ import uuid
import warnings
try:
- from cytoolz import (partition, concat, join, first,
+ from cytoolz import (partition, concat, concatv, join, first,
groupby, valmap, accumulate, assoc)
from cytoolz.curried import filter, pluck
except ImportError:
- from toolz import (partition, concat, join, first,
+ from toolz import (partition, concat, concatv, join, first,
groupby, valmap, accumulate, assoc)
from toolz.curried import filter, pluck
from toolz import pipe, map, reduce
@@ -32,14 +32,15 @@ import numpy as np
from . import chunk
from .numpy_compat import _make_sliced_dtype
from .slicing import slice_array, replace_ellipsis
-from ..base import Base, tokenize, dont_optimize, compute_as_if_collection
+from ..base import (Base, tokenize, dont_optimize, compute_as_if_collection,
+ persist)
from ..context import _globals, globalmethod
from ..utils import (homogeneous_deepmap, ndeepmap, ignoring, concrete,
is_integer, IndexCallable, funcname, derived_from,
SerializableLock, ensure_dict, Dispatch)
from ..compatibility import unicode, long, getargspec, zip_longest, apply
from ..core import quote
-from ..delayed import to_task_dask
+from ..delayed import Delayed, to_task_dask
from .. import threaded, core
from .. import sharedict
from ..sharedict import ShareDict
@@ -815,7 +816,8 @@ def broadcast_chunks(*chunkss):
return tuple(result)
-def store(sources, targets, lock=True, regions=None, compute=True, **kwargs):
+def store(sources, targets, lock=True, regions=None, compute=True,
+ return_stored=False, **kwargs):
""" Store dask arrays in array-like objects, overwrite data in target
This stores dask arrays into object that supports numpy-style setitem
@@ -842,6 +844,8 @@ def store(sources, targets, lock=True, regions=None, compute=True, **kwargs):
for the corresponding source and target in sources and targets, respectively.
compute: boolean, optional
If true compute immediately, return ``dask.delayed.Delayed`` otherwise
+ return_stored: boolean, optional
+ Optionally return the stored result (default False).
Examples
--------
@@ -859,6 +863,7 @@ def store(sources, targets, lock=True, regions=None, compute=True, **kwargs):
>>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP
"""
+
if isinstance(sources, Array):
sources = [sources]
targets = [targets]
@@ -880,31 +885,73 @@ def store(sources, targets, lock=True, regions=None, compute=True, **kwargs):
raise ValueError("Different number of sources [%d] and targets [%d] than regions [%d]"
% (len(sources), len(targets), len(regions)))
- updates = {}
- keys = []
+ # Optimize all sources together
+ sources_dsk = sharedict.merge(*[e.__dask_graph__() for e in sources])
+ sources_dsk = Array.__dask_optimize__(
+ sources_dsk,
+ [e.__dask_keys__() for e in sources]
+ )
+
+ tgt_dsks = []
+ store_keys = []
+ store_dsks = []
+ if return_stored:
+ load_names = []
+ load_dsks = []
for tgt, src, reg in zip(targets, sources, regions):
# if out is a delayed object update dictionary accordingly
try:
- dsk = {}
- dsk.update(tgt.dask)
+ each_tgt_dsk = {}
+ each_tgt_dsk.update(tgt.dask)
tgt = tgt.key
except AttributeError:
- dsk = {}
+ each_tgt_dsk = {}
+
+ src = Array(sources_dsk, src.name, src.chunks, src.dtype)
+
+ each_store_dsk = insert_to_ooc(
+ src, tgt, lock=lock, region=reg, return_stored=return_stored
+ )
+
+ if return_stored:
+ load_names.append('load-store-%s' % src.name)
+ load_dsks.append(retrieve_from_ooc(
+ each_store_dsk.keys(),
+ each_store_dsk
+ ))
+
+ tgt_dsks.append(each_tgt_dsk)
+
+ store_keys.extend(each_store_dsk.keys())
+ store_dsks.append(each_store_dsk)
- update = insert_to_ooc(src, tgt, lock=lock, region=reg)
- keys.extend(update)
+ store_dsks_mrg = sharedict.merge(*concatv(
+ store_dsks, tgt_dsks, [sources_dsk]
+ ))
- update.update(dsk)
- updates.update(update)
+ if return_stored:
+ if compute:
+ store_dlyds = [Delayed(k, store_dsks_mrg) for k in store_keys]
+ store_dlyds = persist(*store_dlyds)
+ store_dsks_mrg = sharedict.merge(*[e.dask for e in store_dlyds])
- name = 'store-' + tokenize(*keys)
- dsk = sharedict.merge((name, updates), *[src.dask for src in sources])
- if compute:
- compute_as_if_collection(Array, dsk, keys, **kwargs)
+ load_dsks_mrg = sharedict.merge(store_dsks_mrg, *load_dsks)
+
+ result = tuple(
+ Array(load_dsks_mrg, n, src.chunks, src.dtype) for n in load_names
+ )
+
+ return result
else:
- from ..delayed import Delayed
- dsk.update({name: keys})
- return Delayed(name, dsk)
+ name = 'store-' + tokenize(*store_keys)
+ dsk = sharedict.merge({name: store_keys}, store_dsks_mrg)
+ result = Delayed(name, dsk)
+
+ if compute:
+ result.compute()
+ return None
+ else:
+ return result
def blockdims_from_blockshape(shape, chunks):
@@ -2157,9 +2204,7 @@ def atop(func, out_ind, *args, **kwargs):
concatenate : bool, keyword only
If true concatenate arrays along dummy indices, else provide lists
adjust_chunks : dict
- Dictionary mapping index to information to adjust chunk sizes. Can
- either be a constant chunksize, a tuple of all chunksizes, or a
- function that converts old chunksize to new chunksize
+ Dictionary mapping index to function to be applied to chunk sizes
new_axes : dict, keyword only
New indexes and their dimension lengths
@@ -2568,7 +2613,7 @@ def concatenate(seq, axis=0, allow_unknown_chunksizes=False):
return Array(dsk2, name, chunks, dtype=dt)
-def store_chunk(x, out, index, lock, region):
+def store_chunk(x, out, index, lock, region, return_stored):
"""
A function inserted in a Dask graph for storing a chunk.
@@ -2584,15 +2629,21 @@ def store_chunk(x, out, index, lock, region):
Lock to use before writing to ``out``.
region: slice-like or None
Where relative to ``out`` to store ``x``.
+ return_stored: bool
+ Whether to return ``out``.
Examples
--------
>>> a = np.ones((5, 6))
>>> b = np.empty(a.shape)
- >>> store_chunk(a, b, (slice(None), slice(None)), False, None)
+ >>> store_chunk(a, b, (slice(None), slice(None)), False, None, False)
"""
+ result = None
+ if return_stored:
+ result = out
+
subindex = index
if region is not None:
subindex = fuse_slice(region, index)
@@ -2605,10 +2656,10 @@ def store_chunk(x, out, index, lock, region):
if lock:
lock.release()
- return None
+ return result
-def insert_to_ooc(arr, out, lock=True, region=None):
+def insert_to_ooc(arr, out, lock=True, region=None, return_stored=False):
"""
Creates a Dask graph for storing chunks from ``arr`` in ``out``.
@@ -2624,6 +2675,9 @@ def insert_to_ooc(arr, out, lock=True, region=None):
region: slice-like, optional
Where in ``out`` to store ``arr``'s results
(default is ``None``, meaning all of ``out``).
+ return_stored: bool, optional
+ Whether to return ``out``
+ (default is ``False``, meaning ``None`` is returned).
Examples
--------
@@ -2642,13 +2696,79 @@ def insert_to_ooc(arr, out, lock=True, region=None):
dsk = dict()
for t, slc in zip(core.flatten(arr.__dask_keys__()), slices):
store_key = (name,) + t[1:]
- dsk[store_key] = (
- store_chunk, t, out, slc, lock, region
- )
+ dsk[store_key] = (store_chunk, t, out, slc, lock, region, return_stored)
return dsk
+def load_chunk(x, index, lock, region):
+ """
+ A function inserted in a Dask graph for loading a chunk.
+
+ Parameters
+ ----------
+ x: array-like
+ An array (potentially a NumPy one)
+ index: slice-like
+ Where to store result from ``x`` in ``out``.
+ lock: Lock-like or False
+ Lock to use before writing to ``out``.
+ region: slice-like or None
+ Where relative to ``out`` to store ``x``.
+
+ Examples
+ --------
+
+ >>> a = np.ones((5, 6))
+ >>> load_chunk(a, (slice(None), slice(None)), False, None) # doctest: +SKIP
+ """
+
+ result = None
+
+ subindex = index
+ if region is not None:
+ subindex = fuse_slice(region, index)
+
+ if lock:
+ lock.acquire()
+ try:
+ result = x[subindex]
+ finally:
+ if lock:
+ lock.release()
+
+ return result
+
+
+def retrieve_from_ooc(keys, dsk):
+ """
+ Creates a Dask graph for loading stored ``keys`` from ``dsk``.
+
+ Parameters
+ ----------
+ keys: Sequence
+ A sequence containing Dask graph keys to load
+ dsk: Mapping
+ A Dask graph corresponding to a Dask Array
+
+ Examples
+ --------
+ >>> import dask.array as da
+ >>> d = da.ones((5, 6), chunks=(2, 3))
+ >>> a = np.empty(d.shape)
+ >>> g = insert_to_ooc(d, a)
+ >>> retrieve_from_ooc(g.keys(), g) # doctest: +SKIP
+ """
+
+ load_dsk = dict()
+ for each_key in keys:
+ load_key = ('load-%s' % each_key[0],) + each_key[1:]
+ # Reuse the result and arguments from `store_chunk` in `load_chunk`.
+ load_dsk[load_key] = (load_chunk, each_key,) + dsk[each_key][3:-1]
+
+ return load_dsk
+
+
def asarray(a):
"""Convert the input to a dask array.
diff --git a/dask/compatibility.py b/dask/compatibility.py
index 95a63efe2..fdadfa70e 100644
--- a/dask/compatibility.py
+++ b/dask/compatibility.py
@@ -55,6 +55,12 @@ if PY3:
def _getargspec(func):
return inspect.getfullargspec(func)
+ def get_named_args(func):
+ """Get all non ``*args/**kwargs`` arguments for a function"""
+ s = inspect.signature(func)
+ return [n for n, p in s.parameters.items()
+ if p.kind == p.POSITIONAL_OR_KEYWORD]
+
def reraise(exc, tb=None):
if exc.__traceback__ is not tb:
raise exc.with_traceback(tb)
@@ -93,6 +99,14 @@ else:
def _getargspec(func):
return inspect.getargspec(func)
+ def get_named_args(func):
+ """Get all non ``*args/**kwargs`` arguments for a function"""
+ try:
+ return getargspec(func).args
+ except TypeError as e:
+ # Be consistent with py3
+ raise ValueError(*e.args)
+
def gzip_decompress(b):
f = gzip.GzipFile(fileobj=BytesIO(b))
result = f.read()
diff --git a/dask/dataframe/accessor.py b/dask/dataframe/accessor.py
index 10ab0f3bc..5462f9a6e 100644
--- a/dask/dataframe/accessor.py
+++ b/dask/dataframe/accessor.py
@@ -119,6 +119,21 @@ class StringAccessor(Accessor):
def split(self, pat=None, n=-1):
return self._function_map('split', pat=pat, n=n)
+ @derived_from(pd.core.strings.StringMethods)
+ def cat(self, others=None, sep=None, na_rep=None):
+ from .core import Series, Index
+ if others is None:
+ raise NotImplementedError("x.str.cat() with `others == None`")
+
+ valid_types = (Series, Index, pd.Series, pd.Index)
+ if isinstance(others, valid_types):
+ others = [others]
+ elif not all(isinstance(a, valid_types) for a in others):
+ raise TypeError("others must be Series/Index")
+
+ return self._series.map_partitions(str_cat, *others, sep=sep,
+ na_rep=na_rep, meta=self._series._meta)
+
def __getitem__(self, index):
return self._series.map_partitions(str_get, index,
meta=self._series._meta)
@@ -127,3 +142,7 @@ class StringAccessor(Accessor):
def str_get(series, index):
""" Implements series.str[index] """
return series.str[index]
+
+
+def str_cat(self, *others, **kwargs):
+ return self.str.cat(others=others, **kwargs)
diff --git a/dask/dataframe/core.py b/dask/dataframe/core.py
index f31475ebc..e0e022a33 100644
--- a/dask/dataframe/core.py
+++ b/dask/dataframe/core.py
@@ -1360,27 +1360,29 @@ Dask Name: {name}, {task} tasks""".format(klass=self.__class__.__name__,
return new_dd_object(dsk, name, num._meta, divisions=[None, None])
- def _cum_agg(self, token, chunk, aggregate, axis, skipna=True,
+ def _cum_agg(self, op_name, chunk, aggregate, axis, skipna=True,
chunk_kwargs=None):
""" Wrapper for cumulative operation """
axis = self._validate_axis(axis)
if axis == 1:
- name = '{0}{1}(axis=1)'.format(self._token_prefix, token)
+ name = '{0}{1}(axis=1)'.format(self._token_prefix, op_name)
return self.map_partitions(chunk, token=name, **chunk_kwargs)
else:
# cumulate each partitions
- name1 = '{0}{1}-map'.format(self._token_prefix, token)
+ name1 = '{0}{1}-map'.format(self._token_prefix, op_name)
cumpart = map_partitions(chunk, self, token=name1, meta=self,
**chunk_kwargs)
- name2 = '{0}{1}-take-last'.format(self._token_prefix, token)
+ name2 = '{0}{1}-take-last'.format(self._token_prefix, op_name)
cumlast = map_partitions(_take_last, cumpart, skipna,
meta=pd.Series([]), token=name2)
- name = '{0}{1}'.format(self._token_prefix, token)
- cname = '{0}{1}-cum-last'.format(self._token_prefix, token)
+ suffix = tokenize(self)
+ name = '{0}{1}-{2}'.format(self._token_prefix, op_name, suffix)
+ cname = '{0}{1}-cum-last-{2}'.format(self._token_prefix, op_name,
+ suffix)
# aggregate cumulated partisions and its previous last element
dask = {}
@@ -2966,7 +2968,11 @@ def elemwise(op, *args, **kwargs):
# should not occur in current funcs
msg = 'elemwise with 2 or more DataFrames and Scalar is not supported'
raise NotImplementedError(msg)
- meta = _emulate(op, *args, **kwargs)
+ # For broadcastable series, use no rows.
+ parts = [d._meta if _is_broadcastable(d)
+ else d._meta_nonempty for d in dasks]
+ with raise_on_meta_error(funcname(op)):
+ meta = partial_by_order(*parts, function=op, other=other)
return new_dd_object(dsk, _name, meta, divisions)
@@ -3152,9 +3158,9 @@ def apply_concat_apply(args, chunk=None, aggregate=None, combine=None,
dsk[(b, j)] = (aggregate, conc)
if meta is no_default:
- meta_chunk = _emulate(apply, chunk, args, chunk_kwargs)
- meta = _emulate(apply, aggregate, [_concat([meta_chunk])],
- aggregate_kwargs)
+ meta_chunk = _emulate(chunk, *args, **chunk_kwargs)
+ meta = _emulate(aggregate, _concat([meta_chunk]),
+ **aggregate_kwargs)
meta = make_meta(meta)
for arg in args:
@@ -3173,15 +3179,8 @@ def _extract_meta(x, nonempty=False):
"""
Extract internal cache data (``_meta``) from dd.DataFrame / dd.Series
"""
- if isinstance(x, Scalar):
+ if isinstance(x, (Scalar, _Frame)):
return x._meta_nonempty if nonempty else x._meta
- elif isinstance(x, _Frame):
- if (isinstance(x, Series) and
- x.npartitions == 1 and
- x.known_divisions): # may be broadcastable
- return x._meta
- else:
- return x._meta_nonempty if nonempty else x._meta
elif isinstance(x, list):
return [_extract_meta(_x, nonempty) for _x in x]
elif isinstance(x, tuple):
diff --git a/dask/dataframe/groupby.py b/dask/dataframe/groupby.py
index ed7a1d8d5..a6c7752d3 100644
--- a/dask/dataframe/groupby.py
+++ b/dask/dataframe/groupby.py
@@ -289,8 +289,17 @@ def _nunique_df_chunk(df, *index, **kwargs):
return grouped
+def _drop_duplicates_rename(df):
+ # Avoid duplicate index labels in a groupby().apply() context
+ # https://github.com/dask/dask/issues/3039
+ # https://github.com/pandas-dev/pandas/pull/18882
+ names = [None] * df.index.nlevels
+ return df.drop_duplicates().rename_axis(names, copy=False)
+
+
def _nunique_df_combine(df, levels):
- result = df.groupby(level=levels, sort=False).apply(pd.DataFrame.drop_duplicates)
+ result = df.groupby(level=levels,
+ sort=False).apply(_drop_duplicates_rename)
if isinstance(levels, list):
result.index = pd.MultiIndex.from_arrays([
diff --git a/dask/utils.py b/dask/utils.py
index 89d3ac50b..10f23125a 100644
--- a/dask/utils.py
+++ b/dask/utils.py
@@ -16,7 +16,7 @@ import multiprocessing as mp
import uuid
from weakref import WeakValueDictionary
-from .compatibility import getargspec, PY3, unicode, bind_method
+from .compatibility import get_named_args, getargspec, PY3, unicode, bind_method
from .core import get_deps
from .context import _globals
from .optimize import key_split # noqa: F401
@@ -485,10 +485,10 @@ def derived_from(original_klass, version=None, ua_args=[]):
doc = ''
try:
- method_args = getargspec(method).args
- original_args = getargspec(original_method).args
+ method_args = get_named_args(method)
+ original_args = get_named_args(original_method)
not_supported = [m for m in original_args if m not in method_args]
- except TypeError:
+ except ValueError:
not_supported = []
if len(ua_args) > 0:
diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst
index 1cbd01691..b6b532f86 100644
--- a/docs/source/changelog.rst
+++ b/docs/source/changelog.rst
@@ -26,6 +26,7 @@ bounds indexes (:pr:`2967`) `Stephan Hoyer`_
DataFrame
+++++++++
+- Fixed naming bug in cumulative aggregations (:issue:`3037`) `Martijn Arts`_
- Fixed ``dd.read_csv`` when ``names`` is given but ``header`` is not set to ``None`` (:issue:`2976`) `Martijn Arts`_
- Fixed ``dd.read_csv`` so that passing instances of ``CategoricalDtype`` in ``dtype`` will result in known categoricals (:pr:`2997`) `Tom Augspurger`_
- Prevent ``bool()`` coercion from calling compute (:pr:`2958`) `Albert DeFusco`_
@@ -35,6 +36,8 @@ DataFrame
- Fixed ``dd.concat`` losing the index dtype when the data contained a categorical (:issue:`2932`) `Tom Augspurger`_
- ``DataFrame.merge()`` (:pr:`2960`) now supports merging on a combination of columns and the index `Jon Mease`_
- Removed the deprecated ``dd.rolling*`` methods, in preperation for their removal in the next pandas release (:pr:`2995`) `Tom Augspurger`_
+- Fix metadata inference bug in which single-partition series were mistakenly special cased (:pr:`3035`) `Jim Crist`_
+- Add support for ``Series.str.cat`` (:pr:`3028`) `Jim Crist`_
Core
| repeated cumsum on dataframe returns the results of the first cumsum
reproduce with
```python
import pandas as pd
import dask.dataframe as ddf
df = pd.DataFrame(dict(a=list('aabbcc')),
index=pd.date_range(start='20100101', periods=6))
df['ones']=1
df['twos']=2
dadf = ddf.from_pandas(df, npartitions=3)
dadf['onescs']=dadf.ones.cumsum()
dadf['twoscs']=dadf.twos.cumsum()
print dadf.compute()
```
which yields
```
a ones twos onescs twoscs
2010-01-01 a 1 2 1 1
2010-01-02 a 1 2 2 2
2010-01-03 b 1 2 3 3
2010-01-04 b 1 2 4 4
2010-01-05 c 1 2 5 5
2010-01-06 c 1 2 6 6
```
Pandas itself works correctly:
```python
df['onescs']=df.ones.cumsum()
df['twoscs']=df.twos.cumsum()
print df
```
which yields
```
a ones twos onescs twoscs
2010-01-01 a 1 2 1 2
2010-01-02 a 1 2 2 4
2010-01-03 b 1 2 3 6
2010-01-04 b 1 2 4 8
2010-01-05 c 1 2 5 10
2010-01-06 c 1 2 6 12
``` | dask/dask | diff --git a/dask/array/tests/test_array_core.py b/dask/array/tests/test_array_core.py
index 928206559..0eececc3c 100644
--- a/dask/array/tests/test_array_core.py
+++ b/dask/array/tests/test_array_core.py
@@ -19,7 +19,7 @@ from toolz.curried import identity
import dask
import dask.array as da
from dask.base import tokenize, compute_as_if_collection
-from dask.delayed import delayed
+from dask.delayed import Delayed, delayed
from dask.local import get_sync
from dask.utils import ignoring, tmpfile, tmpdir
from dask.utils_test import inc
@@ -1213,14 +1213,31 @@ def test_store_delayed_target():
atd = delayed(make_target)('at')
btd = delayed(make_target)('bt')
- store([a, b], [atd, btd])
+ # test not keeping result
+ st = store([a, b], [atd, btd])
at = targs['at']
bt = targs['bt']
+ assert st is None
assert_eq(at, a)
assert_eq(bt, b)
+ # test keeping result
+ st = store([a, b], [atd, btd], return_stored=True, compute=False)
+ st = dask.compute(*st)
+
+ at = targs['at']
+ bt = targs['bt']
+
+ assert st is not None
+ assert isinstance(st, tuple)
+ assert all([isinstance(v, np.ndarray) for v in st])
+ assert_eq(at, a)
+ assert_eq(bt, b)
+ assert_eq(st[0], a)
+ assert_eq(st[1], b)
+
pytest.raises(ValueError, lambda: store([a], [at, bt]))
pytest.raises(ValueError, lambda: store(at, at))
pytest.raises(ValueError, lambda: store([at, bt], [at, bt]))
@@ -1233,7 +1250,8 @@ def test_store():
at = np.empty(shape=(4, 4))
bt = np.empty(shape=(4, 4))
- store([a, b], [at, bt])
+ st = store([a, b], [at, bt])
+ assert st is None
assert (at == 2).all()
assert (bt == 3).all()
@@ -1252,22 +1270,77 @@ def test_store_regions():
at = np.zeros(shape=(8, 4, 6))
bt = np.zeros(shape=(8, 4, 6))
v = store([a, b], [at, bt], regions=region, compute=False)
+ assert isinstance(v, Delayed)
+ assert (at == 0).all() and (bt[region] == 0).all()
+ assert all([ev is None for ev in v.compute()])
+ assert (at[region] == 2).all() and (bt[region] == 3).all()
+ assert not (bt == 3).all() and not ( bt == 0 ).all()
+ assert not (at == 2).all() and not ( at == 0 ).all()
+
+ # Single region (keep result):
+ at = np.zeros(shape=(8, 4, 6))
+ bt = np.zeros(shape=(8, 4, 6))
+ v = store(
+ [a, b], [at, bt], regions=region, compute=False, return_stored=True
+ )
+ assert isinstance(v, tuple)
+ assert all([isinstance(e, da.Array) for e in v])
assert (at == 0).all() and (bt[region] == 0).all()
- v.compute()
+
+ ar, br = v
+ assert ar.dtype == a.dtype
+ assert br.dtype == b.dtype
+ assert ar.shape == a.shape
+ assert br.shape == b.shape
+ assert ar.chunks == a.chunks
+ assert br.chunks == b.chunks
+
+ ar, br = da.compute(ar, br)
assert (at[region] == 2).all() and (bt[region] == 3).all()
assert not (bt == 3).all() and not ( bt == 0 ).all()
assert not (at == 2).all() and not ( at == 0 ).all()
+ assert (br == 3).all()
+ assert (ar == 2).all()
# Multiple regions:
at = np.zeros(shape=(8, 4, 6))
bt = np.zeros(shape=(8, 4, 6))
v = store([a, b], [at, bt], regions=[region, region], compute=False)
+ assert isinstance(v, Delayed)
assert (at == 0).all() and (bt[region] == 0).all()
- v.compute()
+ assert all([ev is None for ev in v.compute()])
assert (at[region] == 2).all() and (bt[region] == 3).all()
assert not (bt == 3).all() and not ( bt == 0 ).all()
assert not (at == 2).all() and not ( at == 0 ).all()
+ # Multiple regions (keep result):
+ at = np.zeros(shape=(8, 4, 6))
+ bt = np.zeros(shape=(8, 4, 6))
+ v = store(
+ [a, b], [at, bt],
+ regions=[region, region],
+ compute=False,
+ return_stored=True
+ )
+ assert isinstance(v, tuple)
+ assert all([isinstance(e, da.Array) for e in v])
+ assert (at == 0).all() and (bt[region] == 0).all()
+
+ ar, br = v
+ assert ar.dtype == a.dtype
+ assert br.dtype == b.dtype
+ assert ar.shape == a.shape
+ assert br.shape == b.shape
+ assert ar.chunks == a.chunks
+ assert br.chunks == b.chunks
+
+ ar, br = da.compute(ar, br)
+ assert (at[region] == 2).all() and (bt[region] == 3).all()
+ assert not (bt == 3).all() and not ( bt == 0 ).all()
+ assert not (at == 2).all() and not ( at == 0 ).all()
+ assert (br == 3).all()
+ assert (ar == 2).all()
+
def test_store_compute_false():
d = da.ones((4, 4), chunks=(2, 2))
@@ -1277,8 +1350,18 @@ def test_store_compute_false():
bt = np.zeros(shape=(4, 4))
v = store([a, b], [at, bt], compute=False)
+ assert isinstance(v, Delayed)
+ assert (at == 0).all() and (bt == 0).all()
+ assert all([ev is None for ev in v.compute()])
+ assert (at == 2).all() and (bt == 3).all()
+
+ at = np.zeros(shape=(4, 4))
+ bt = np.zeros(shape=(4, 4))
+
+ dat, dbt = store([a, b], [at, bt], compute=False, return_stored=True)
+ assert isinstance(dat, Array) and isinstance(dbt, Array)
assert (at == 0).all() and (bt == 0).all()
- v.compute()
+ assert (dat.compute() == at).all() and (dbt.compute() == bt).all()
assert (at == 2).all() and (bt == 3).all()
@@ -1320,6 +1403,7 @@ def test_store_locks():
lock = Lock()
v = store([a, b], [at, bt], compute=False, lock=lock)
+ assert isinstance(v, Delayed)
dsk = v.dask
locks = set(vv for v in dsk.values() for vv in v if isinstance(vv, _Lock))
assert locks == set([lock])
@@ -1328,16 +1412,18 @@ def test_store_locks():
at = NonthreadSafeStore()
v = store([a, b], [at, at], lock=lock,
get=dask.threaded.get, num_workers=10)
+ assert v is None
# Don't assume thread safety by default
at = NonthreadSafeStore()
- store(a, at, get=dask.threaded.get, num_workers=10)
- a.store(at, get=dask.threaded.get, num_workers=10)
+ assert store(a, at, get=dask.threaded.get, num_workers=10) is None
+ assert a.store(at, get=dask.threaded.get, num_workers=10) is None
# Ensure locks can be removed
at = ThreadSafeStore()
for i in range(10):
- a.store(at, lock=False, get=dask.threaded.get, num_workers=10)
+ st = a.store(at, lock=False, get=dask.threaded.get, num_workers=10)
+ assert st is None
if at.max_concurrent_uses > 1:
break
if i == 9:
@@ -1350,7 +1436,8 @@ def test_store_multiprocessing_lock():
a = d + 1
at = np.zeros(shape=(10, 10))
- a.store(at, get=dask.multiprocessing.get, num_workers=10)
+ st = a.store(at, get=dask.multiprocessing.get, num_workers=10)
+ assert st is None
def test_to_hdf5():
diff --git a/dask/array/tests/test_chunk.py b/dask/array/tests/test_chunk.py
index c951fb2eb..4d9059932 100644
--- a/dask/array/tests/test_chunk.py
+++ b/dask/array/tests/test_chunk.py
@@ -16,7 +16,6 @@ def test_keepdims_wrapper_no_axis():
summer_wrapped = keepdims_wrapper(summer)
assert summer_wrapped != summer
- assert summer_wrapped == keepdims_wrapper(summer_wrapped)
a = np.arange(24).reshape(1, 2, 3, 4)
@@ -44,7 +43,6 @@ def test_keepdims_wrapper_one_axis():
summer_wrapped = keepdims_wrapper(summer)
assert summer_wrapped != summer
- assert summer_wrapped == keepdims_wrapper(summer_wrapped)
a = np.arange(24).reshape(1, 2, 3, 4)
@@ -72,7 +70,6 @@ def test_keepdims_wrapper_two_axes():
summer_wrapped = keepdims_wrapper(summer)
assert summer_wrapped != summer
- assert summer_wrapped == keepdims_wrapper(summer_wrapped)
a = np.arange(24).reshape(1, 2, 3, 4)
diff --git a/dask/dataframe/tests/test_dataframe.py b/dask/dataframe/tests/test_dataframe.py
index 9c9aa323b..f4230fd3d 100644
--- a/dask/dataframe/tests/test_dataframe.py
+++ b/dask/dataframe/tests/test_dataframe.py
@@ -519,6 +519,22 @@ def test_map_partitions_keeps_kwargs_in_dict():
assert a.x.map_partitions(f, x=5)._name != a.x.map_partitions(f, x=6)._name
+def test_metadata_inference_single_partition_aligned_args():
+ # https://github.com/dask/dask/issues/3034
+ # Previously broadcastable series functionality broke this
+
+ df = pd.DataFrame({'x': [1, 2, 3, 4, 5]})
+ ddf = dd.from_pandas(df, npartitions=1)
+
+ def check(df, df_x):
+ assert len(df) == len(df_x)
+ assert len(df) > 0
+ return df
+
+ res = dd.map_partitions(check, ddf, ddf.x)
+ assert_eq(res, ddf)
+
+
def test_drop_duplicates():
res = d.drop_duplicates()
res2 = d.drop_duplicates(split_every=2)
@@ -1362,44 +1378,61 @@ def test_datetime_accessor():
def test_str_accessor():
df = pd.DataFrame({'x': ['abc', 'bcd', 'cdef', 'DEFG'], 'y': [1, 2, 3, 4]},
- index=['e', 'f', 'g', 'H'])
+ index=['E', 'f', 'g', 'h'])
- a = dd.from_pandas(df, 2, sort=False)
+ ddf = dd.from_pandas(df, 2)
# Check that str not in dir/hasattr for non-object columns
- assert 'str' not in dir(a.y)
- assert not hasattr(a.y, 'str')
+ assert 'str' not in dir(ddf.y)
+ assert not hasattr(ddf.y, 'str')
# not implemented methods don't show up
- assert 'get_dummies' not in dir(a.x.str)
- assert not hasattr(a.x.str, 'get_dummies')
+ assert 'get_dummies' not in dir(ddf.x.str)
+ assert not hasattr(ddf.x.str, 'get_dummies')
- assert 'upper' in dir(a.x.str)
- assert_eq(a.x.str.upper(), df.x.str.upper())
- assert set(a.x.str.upper().dask) == set(a.x.str.upper().dask)
+ assert 'upper' in dir(ddf.x.str)
+ assert_eq(ddf.x.str.upper(), df.x.str.upper())
+ assert set(ddf.x.str.upper().dask) == set(ddf.x.str.upper().dask)
- assert 'upper' in dir(a.index.str)
- assert_eq(a.index.str.upper(), df.index.str.upper())
- assert set(a.index.str.upper().dask) == set(a.index.str.upper().dask)
+ assert 'upper' in dir(ddf.index.str)
+ assert_eq(ddf.index.str.upper(), df.index.str.upper())
+ assert set(ddf.index.str.upper().dask) == set(ddf.index.str.upper().dask)
# make sure to pass thru args & kwargs
- assert 'contains' in dir(a.x.str)
- assert_eq(a.x.str.contains('a'), df.x.str.contains('a'))
- assert set(a.x.str.contains('a').dask) == set(a.x.str.contains('a').dask)
+ assert 'contains' in dir(ddf.x.str)
+ assert_eq(ddf.x.str.contains('a'), df.x.str.contains('a'))
+ assert set(ddf.x.str.contains('a').dask) == set(ddf.x.str.contains('a').dask)
- assert_eq(a.x.str.contains('d', case=False), df.x.str.contains('d', case=False))
- assert set(a.x.str.contains('d', case=False).dask) == set(a.x.str.contains('d', case=False).dask)
+ assert_eq(ddf.x.str.contains('d', case=False), df.x.str.contains('d', case=False))
+ assert (set(ddf.x.str.contains('d', case=False).dask) ==
+ set(ddf.x.str.contains('d', case=False).dask))
for na in [True, False]:
- assert_eq(a.x.str.contains('a', na=na), df.x.str.contains('a', na=na))
- assert set(a.x.str.contains('a', na=na).dask) == set(a.x.str.contains('a', na=na).dask)
+ assert_eq(ddf.x.str.contains('a', na=na), df.x.str.contains('a', na=na))
+ assert (set(ddf.x.str.contains('a', na=na).dask) ==
+ set(ddf.x.str.contains('a', na=na).dask))
for regex in [True, False]:
- assert_eq(a.x.str.contains('a', regex=regex), df.x.str.contains('a', regex=regex))
- assert set(a.x.str.contains('a', regex=regex).dask) == set(a.x.str.contains('a', regex=regex).dask)
+ assert_eq(ddf.x.str.contains('a', regex=regex), df.x.str.contains('a', regex=regex))
+ assert (set(ddf.x.str.contains('a', regex=regex).dask) ==
+ set(ddf.x.str.contains('a', regex=regex).dask))
+
+ assert_eq(ddf.x.str[:2], df.x.str[:2])
+ assert_eq(ddf.x.str[1], df.x.str[1])
+
+ # str.cat
+ sol = df.x.str.cat(df.x.str.upper(), sep=':')
+ assert_eq(ddf.x.str.cat(ddf.x.str.upper(), sep=':'), sol)
+ assert_eq(ddf.x.str.cat(df.x.str.upper(), sep=':'), sol)
+ assert_eq(ddf.x.str.cat([ddf.x.str.upper(), df.x.str.lower()], sep=':'),
+ df.x.str.cat([df.x.str.upper(), df.x.str.lower()], sep=':'))
- assert_eq(df.x.str[:2], df.x.str[:2])
- assert_eq(a.x.str[1], a.x.str[1])
+ for o in ['foo', ['foo']]:
+ with pytest.raises(TypeError):
+ ddf.x.str.cat(o)
+
+ with pytest.raises(NotImplementedError):
+ ddf.x.str.cat(sep=':')
def test_empty_max():
@@ -2823,3 +2856,18 @@ def test_bool():
for cond in conditions:
with pytest.raises(ValueError):
bool(cond)
+
+
+def test_cumulative_multiple_columns():
+ # GH 3037
+ df = pd.DataFrame(np.random.randn(100, 5), columns=list('abcde'))
+ ddf = dd.from_pandas(df, 5)
+
+ for d in [ddf, df]:
+ for c in df.columns:
+ d[c + 'cs'] = d[c].cumsum()
+ d[c + 'cmin'] = d[c].cummin()
+ d[c + 'cmax'] = d[c].cummax()
+ d[c + 'cp'] = d[c].cumprod()
+
+ assert_eq(ddf, df)
diff --git a/dask/dataframe/tests/test_groupby.py b/dask/dataframe/tests/test_groupby.py
index 2c6a675df..de61dcd4d 100644
--- a/dask/dataframe/tests/test_groupby.py
+++ b/dask/dataframe/tests/test_groupby.py
@@ -160,6 +160,9 @@ def test_groupby_on_index(get):
def func(df):
return df.assign(b=df.b - df.b.mean())
+ def func2(df):
+ return df[['b']] - df[['b']].mean()
+
with dask.set_options(get=get):
with pytest.warns(None):
assert_eq(ddf.groupby('a').apply(func),
@@ -168,8 +171,8 @@ def test_groupby_on_index(get):
assert_eq(ddf.groupby('a').apply(func).set_index('a'),
pdf.groupby('a').apply(func).set_index('a'))
- assert_eq(pdf2.groupby(pdf2.index).apply(func),
- ddf2.groupby(ddf2.index).apply(func))
+ assert_eq(pdf2.groupby(pdf2.index).apply(func2),
+ ddf2.groupby(ddf2.index).apply(func2))
def test_groupby_multilevel_getitem():
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 7
} | 1.20 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[complete]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
click==8.0.4
cloudpickle==2.2.1
coverage==6.2
-e git+https://github.com/dask/dask.git@a1653463534a7dd9212f45f833aa17b7dd12e574#egg=dask
distributed==1.20.2
execnet==1.9.0
HeapDict==1.0.1
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
locket==1.0.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
msgpack-python==0.5.6
numpy==1.19.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pandas==1.1.5
partd==1.2.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
psutil==7.0.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2025.2
six==1.17.0
sortedcontainers==2.4.0
tblib==1.7.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
toolz==0.12.0
tornado==6.1
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zict==2.1.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: dask
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- click==8.0.4
- cloudpickle==2.2.1
- coverage==6.2
- distributed==1.20.2
- execnet==1.9.0
- heapdict==1.0.1
- locket==1.0.0
- msgpack-python==0.5.6
- numpy==1.19.5
- pandas==1.1.5
- partd==1.2.0
- psutil==7.0.0
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- six==1.17.0
- sortedcontainers==2.4.0
- tblib==1.7.0
- tomli==1.2.3
- toolz==0.12.0
- tornado==6.1
- zict==2.1.0
prefix: /opt/conda/envs/dask
| [
"dask/array/tests/test_array_core.py::test_store_delayed_target",
"dask/array/tests/test_array_core.py::test_store_regions",
"dask/array/tests/test_array_core.py::test_store_compute_false",
"dask/dataframe/tests/test_dataframe.py::test_metadata_inference_single_partition_aligned_args",
"dask/dataframe/tests/test_dataframe.py::test_str_accessor",
"dask/dataframe/tests/test_dataframe.py::test_cumulative_multiple_columns"
]
| [
"dask/array/tests/test_array_core.py::test_matmul",
"dask/dataframe/tests/test_dataframe.py::test_Dataframe",
"dask/dataframe/tests/test_dataframe.py::test_attributes",
"dask/dataframe/tests/test_dataframe.py::test_timezone_freq[npartitions1]",
"dask/dataframe/tests/test_dataframe.py::test_clip[2-5]",
"dask/dataframe/tests/test_dataframe.py::test_clip[2.5-3.5]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_picklable",
"dask/dataframe/tests/test_dataframe.py::test_repartition_freq_divisions",
"dask/dataframe/tests/test_dataframe.py::test_select_dtypes[include0-None]",
"dask/dataframe/tests/test_dataframe.py::test_select_dtypes[None-exclude1]",
"dask/dataframe/tests/test_dataframe.py::test_select_dtypes[include2-exclude2]",
"dask/dataframe/tests/test_dataframe.py::test_select_dtypes[include3-None]",
"dask/dataframe/tests/test_dataframe.py::test_to_timestamp",
"dask/dataframe/tests/test_dataframe.py::test_apply",
"dask/dataframe/tests/test_dataframe.py::test_cov_corr_mixed",
"dask/dataframe/tests/test_dataframe.py::test_apply_infer_columns",
"dask/dataframe/tests/test_dataframe.py::test_info",
"dask/dataframe/tests/test_dataframe.py::test_groupby_multilevel_info",
"dask/dataframe/tests/test_dataframe.py::test_categorize_info",
"dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx2-True]",
"dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx2-False]",
"dask/dataframe/tests/test_dataframe.py::test_shift",
"dask/dataframe/tests/test_dataframe.py::test_shift_with_freq",
"dask/dataframe/tests/test_dataframe.py::test_first_and_last[first]",
"dask/dataframe/tests/test_dataframe.py::test_first_and_last[last]",
"dask/dataframe/tests/test_dataframe.py::test_datetime_loc_open_slicing",
"dask/dataframe/tests/test_groupby.py::test_full_groupby_multilevel[grouper4]",
"dask/dataframe/tests/test_groupby.py::test_groupby_dir",
"dask/dataframe/tests/test_groupby.py::test_groupby_on_index[get_sync]",
"dask/dataframe/tests/test_groupby.py::test_groupby_on_index[get]",
"dask/dataframe/tests/test_groupby.py::test_groupby_multilevel_getitem",
"dask/dataframe/tests/test_groupby.py::test_groupby_multilevel_agg",
"dask/dataframe/tests/test_groupby.py::test_groupby_index_array",
"dask/dataframe/tests/test_groupby.py::test_apply_shuffle_multilevel[grouper5]",
"dask/dataframe/tests/test_groupby.py::test_groupby_apply_tasks",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>0-False-spec0]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>0-None-spec0]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>1-False-spec0]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>1-None-spec0]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>2-False-spec0]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>2-None-spec0]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>3-False-spec0]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>3-None-spec0]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>4-False-spec0]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>4-None-spec0]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>0-False-spec0]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>0-None-spec0]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>1-False-spec0]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>1-None-spec0]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>2-False-spec0]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>2-None-spec0]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>1-mean]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>3-mean]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>4-mean]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>1-mean]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>2-mean]",
"dask/dataframe/tests/test_groupby.py::test_groupby_meta_content[<lambda>0-grouper4]",
"dask/dataframe/tests/test_groupby.py::test_groupby_meta_content[<lambda>1-grouper4]",
"dask/dataframe/tests/test_groupby.py::test_groupby_meta_content[<lambda>2-grouper4]",
"dask/dataframe/tests/test_groupby.py::test_split_out_multi_column_groupby",
"dask/dataframe/tests/test_groupby.py::test_groupby_unaligned_index",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_agg_funcs[mean]",
"dask/dataframe/tests/test_groupby.py::test_groupby_agg_custom__mode"
]
| [
"dask/array/tests/test_array_core.py::test_getem",
"dask/array/tests/test_array_core.py::test_top",
"dask/array/tests/test_array_core.py::test_top_supports_broadcasting_rules",
"dask/array/tests/test_array_core.py::test_top_literals",
"dask/array/tests/test_array_core.py::test_atop_literals",
"dask/array/tests/test_array_core.py::test_concatenate3_on_scalars",
"dask/array/tests/test_array_core.py::test_chunked_dot_product",
"dask/array/tests/test_array_core.py::test_chunked_transpose_plus_one",
"dask/array/tests/test_array_core.py::test_broadcast_dimensions_works_with_singleton_dimensions",
"dask/array/tests/test_array_core.py::test_broadcast_dimensions",
"dask/array/tests/test_array_core.py::test_Array",
"dask/array/tests/test_array_core.py::test_uneven_chunks",
"dask/array/tests/test_array_core.py::test_numblocks_suppoorts_singleton_block_dims",
"dask/array/tests/test_array_core.py::test_keys",
"dask/array/tests/test_array_core.py::test_Array_computation",
"dask/array/tests/test_array_core.py::test_stack",
"dask/array/tests/test_array_core.py::test_short_stack",
"dask/array/tests/test_array_core.py::test_stack_scalars",
"dask/array/tests/test_array_core.py::test_stack_promote_type",
"dask/array/tests/test_array_core.py::test_stack_rechunk",
"dask/array/tests/test_array_core.py::test_concatenate",
"dask/array/tests/test_array_core.py::test_concatenate_unknown_axes",
"dask/array/tests/test_array_core.py::test_concatenate_rechunk",
"dask/array/tests/test_array_core.py::test_concatenate_fixlen_strings",
"dask/array/tests/test_array_core.py::test_block_simple_row_wise",
"dask/array/tests/test_array_core.py::test_block_simple_column_wise",
"dask/array/tests/test_array_core.py::test_block_with_1d_arrays_row_wise",
"dask/array/tests/test_array_core.py::test_block_with_1d_arrays_multiple_rows",
"dask/array/tests/test_array_core.py::test_block_with_1d_arrays_column_wise",
"dask/array/tests/test_array_core.py::test_block_mixed_1d_and_2d",
"dask/array/tests/test_array_core.py::test_block_complicated",
"dask/array/tests/test_array_core.py::test_block_nested",
"dask/array/tests/test_array_core.py::test_block_3d",
"dask/array/tests/test_array_core.py::test_block_with_mismatched_shape",
"dask/array/tests/test_array_core.py::test_block_no_lists",
"dask/array/tests/test_array_core.py::test_block_invalid_nesting",
"dask/array/tests/test_array_core.py::test_block_empty_lists",
"dask/array/tests/test_array_core.py::test_block_tuple",
"dask/array/tests/test_array_core.py::test_binops",
"dask/array/tests/test_array_core.py::test_broadcast_shapes",
"dask/array/tests/test_array_core.py::test_elemwise_on_scalars",
"dask/array/tests/test_array_core.py::test_elemwise_with_ndarrays",
"dask/array/tests/test_array_core.py::test_elemwise_differently_chunked",
"dask/array/tests/test_array_core.py::test_elemwise_dtype",
"dask/array/tests/test_array_core.py::test_operators",
"dask/array/tests/test_array_core.py::test_operator_dtype_promotion",
"dask/array/tests/test_array_core.py::test_field_access",
"dask/array/tests/test_array_core.py::test_field_access_with_shape",
"dask/array/tests/test_array_core.py::test_T",
"dask/array/tests/test_array_core.py::test_norm",
"dask/array/tests/test_array_core.py::test_broadcast_to",
"dask/array/tests/test_array_core.py::test_broadcast_to_array",
"dask/array/tests/test_array_core.py::test_broadcast_to_scalar",
"dask/array/tests/test_array_core.py::test_broadcast_to_chunks",
"dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape0-v_shape0]",
"dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape1-v_shape1]",
"dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape2-v_shape2]",
"dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape3-v_shape3]",
"dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape4-v_shape4]",
"dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape5-v_shape5]",
"dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape6-v_shape6]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape0-new_shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape1-new_shape1-5]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape2-new_shape2-5]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape3-new_shape3-12]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape4-new_shape4-12]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape5-new_shape5-chunks5]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape6-new_shape6-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape7-new_shape7-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape8-new_shape8-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape9-new_shape9-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape10-new_shape10-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape11-new_shape11-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape12-new_shape12-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape13-new_shape13-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape14-new_shape14-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape15-new_shape15-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape16-new_shape16-chunks16]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape17-new_shape17-3]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape18-new_shape18-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape19-new_shape19-chunks19]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape20-new_shape20-1]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape21-new_shape21-1]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape22-new_shape22-24]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape23-new_shape23-6]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape24-new_shape24-6]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape25-new_shape25-6]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape26-new_shape26-chunks26]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape27-new_shape27-chunks27]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape28-new_shape28-chunks28]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape29-new_shape29-chunks29]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape30-new_shape30-chunks30]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape31-new_shape31-chunks31]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape32-new_shape32-chunks32]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape33-new_shape33-chunks33]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape34-new_shape34-chunks34]",
"dask/array/tests/test_array_core.py::test_reshape_exceptions",
"dask/array/tests/test_array_core.py::test_reshape_splat",
"dask/array/tests/test_array_core.py::test_reshape_fails_for_dask_only",
"dask/array/tests/test_array_core.py::test_reshape_unknown_dimensions",
"dask/array/tests/test_array_core.py::test_full",
"dask/array/tests/test_array_core.py::test_map_blocks",
"dask/array/tests/test_array_core.py::test_map_blocks2",
"dask/array/tests/test_array_core.py::test_map_blocks_with_constants",
"dask/array/tests/test_array_core.py::test_map_blocks_with_kwargs",
"dask/array/tests/test_array_core.py::test_map_blocks_with_chunks",
"dask/array/tests/test_array_core.py::test_map_blocks_dtype_inference",
"dask/array/tests/test_array_core.py::test_from_function_requires_block_args",
"dask/array/tests/test_array_core.py::test_repr",
"dask/array/tests/test_array_core.py::test_slicing_with_ellipsis",
"dask/array/tests/test_array_core.py::test_slicing_with_ndarray",
"dask/array/tests/test_array_core.py::test_dtype",
"dask/array/tests/test_array_core.py::test_blockdims_from_blockshape",
"dask/array/tests/test_array_core.py::test_coerce",
"dask/array/tests/test_array_core.py::test_bool",
"dask/array/tests/test_array_core.py::test_store",
"dask/array/tests/test_array_core.py::test_store_locks",
"dask/array/tests/test_array_core.py::test_to_dask_dataframe",
"dask/array/tests/test_array_core.py::test_np_array_with_zero_dimensions",
"dask/array/tests/test_array_core.py::test_dtype_complex",
"dask/array/tests/test_array_core.py::test_astype",
"dask/array/tests/test_array_core.py::test_arithmetic",
"dask/array/tests/test_array_core.py::test_elemwise_consistent_names",
"dask/array/tests/test_array_core.py::test_optimize",
"dask/array/tests/test_array_core.py::test_slicing_with_non_ndarrays",
"dask/array/tests/test_array_core.py::test_getter",
"dask/array/tests/test_array_core.py::test_size",
"dask/array/tests/test_array_core.py::test_nbytes",
"dask/array/tests/test_array_core.py::test_itemsize",
"dask/array/tests/test_array_core.py::test_Array_normalizes_dtype",
"dask/array/tests/test_array_core.py::test_from_array_with_lock",
"dask/array/tests/test_array_core.py::test_from_array_tasks_always_call_getter",
"dask/array/tests/test_array_core.py::test_from_array_no_asarray",
"dask/array/tests/test_array_core.py::test_from_array_getitem",
"dask/array/tests/test_array_core.py::test_from_array_minus_one",
"dask/array/tests/test_array_core.py::test_asarray",
"dask/array/tests/test_array_core.py::test_asanyarray",
"dask/array/tests/test_array_core.py::test_from_func",
"dask/array/tests/test_array_core.py::test_concatenate3_2",
"dask/array/tests/test_array_core.py::test_map_blocks3",
"dask/array/tests/test_array_core.py::test_from_array_with_missing_chunks",
"dask/array/tests/test_array_core.py::test_normalize_chunks",
"dask/array/tests/test_array_core.py::test_raise_on_no_chunks",
"dask/array/tests/test_array_core.py::test_chunks_is_immutable",
"dask/array/tests/test_array_core.py::test_raise_on_bad_kwargs",
"dask/array/tests/test_array_core.py::test_long_slice",
"dask/array/tests/test_array_core.py::test_ellipsis_slicing",
"dask/array/tests/test_array_core.py::test_point_slicing",
"dask/array/tests/test_array_core.py::test_point_slicing_with_full_slice",
"dask/array/tests/test_array_core.py::test_slice_with_floats",
"dask/array/tests/test_array_core.py::test_slice_with_integer_types",
"dask/array/tests/test_array_core.py::test_index_with_integer_types",
"dask/array/tests/test_array_core.py::test_vindex_basic",
"dask/array/tests/test_array_core.py::test_vindex_nd",
"dask/array/tests/test_array_core.py::test_vindex_negative",
"dask/array/tests/test_array_core.py::test_vindex_errors",
"dask/array/tests/test_array_core.py::test_vindex_merge",
"dask/array/tests/test_array_core.py::test_empty_array",
"dask/array/tests/test_array_core.py::test_memmap",
"dask/array/tests/test_array_core.py::test_to_npy_stack",
"dask/array/tests/test_array_core.py::test_view",
"dask/array/tests/test_array_core.py::test_view_fortran",
"dask/array/tests/test_array_core.py::test_map_blocks_with_changed_dimension",
"dask/array/tests/test_array_core.py::test_broadcast_chunks",
"dask/array/tests/test_array_core.py::test_chunks_error",
"dask/array/tests/test_array_core.py::test_array_compute_forward_kwargs",
"dask/array/tests/test_array_core.py::test_dont_fuse_outputs",
"dask/array/tests/test_array_core.py::test_dont_dealias_outputs",
"dask/array/tests/test_array_core.py::test_timedelta_op",
"dask/array/tests/test_array_core.py::test_to_delayed",
"dask/array/tests/test_array_core.py::test_to_delayed_optimizes",
"dask/array/tests/test_array_core.py::test_cumulative",
"dask/array/tests/test_array_core.py::test_atop_names",
"dask/array/tests/test_array_core.py::test_atop_new_axes",
"dask/array/tests/test_array_core.py::test_atop_kwargs",
"dask/array/tests/test_array_core.py::test_atop_chunks",
"dask/array/tests/test_array_core.py::test_from_delayed",
"dask/array/tests/test_array_core.py::test_A_property",
"dask/array/tests/test_array_core.py::test_copy_mutate",
"dask/array/tests/test_array_core.py::test_npartitions",
"dask/array/tests/test_array_core.py::test_astype_gh1151",
"dask/array/tests/test_array_core.py::test_elemwise_name",
"dask/array/tests/test_array_core.py::test_map_blocks_name",
"dask/array/tests/test_array_core.py::test_array_picklable",
"dask/array/tests/test_array_core.py::test_from_array_raises_on_bad_chunks",
"dask/array/tests/test_array_core.py::test_concatenate_axes",
"dask/array/tests/test_array_core.py::test_atop_concatenate",
"dask/array/tests/test_array_core.py::test_common_blockdim",
"dask/array/tests/test_array_core.py::test_uneven_chunks_that_fit_neatly",
"dask/array/tests/test_array_core.py::test_elemwise_uneven_chunks",
"dask/array/tests/test_array_core.py::test_uneven_chunks_atop",
"dask/array/tests/test_array_core.py::test_warn_bad_rechunking",
"dask/array/tests/test_array_core.py::test_optimize_fuse_keys",
"dask/array/tests/test_array_core.py::test_concatenate_stack_dont_warn",
"dask/array/tests/test_array_core.py::test_map_blocks_delayed",
"dask/array/tests/test_array_core.py::test_no_chunks",
"dask/array/tests/test_array_core.py::test_no_chunks_2d",
"dask/array/tests/test_array_core.py::test_no_chunks_yes_chunks",
"dask/array/tests/test_array_core.py::test_raise_informative_errors_no_chunks",
"dask/array/tests/test_array_core.py::test_no_chunks_slicing_2d",
"dask/array/tests/test_array_core.py::test_index_array_with_array_1d",
"dask/array/tests/test_array_core.py::test_index_array_with_array_2d",
"dask/array/tests/test_array_core.py::test_setitem_1d",
"dask/array/tests/test_array_core.py::test_setitem_2d",
"dask/array/tests/test_array_core.py::test_setitem_errs",
"dask/array/tests/test_array_core.py::test_zero_slice_dtypes",
"dask/array/tests/test_array_core.py::test_zero_sized_array_rechunk",
"dask/array/tests/test_array_core.py::test_atop_zero_shape",
"dask/array/tests/test_array_core.py::test_atop_zero_shape_new_axes",
"dask/array/tests/test_array_core.py::test_broadcast_against_zero_shape",
"dask/array/tests/test_array_core.py::test_from_array_name",
"dask/array/tests/test_array_core.py::test_concatenate_errs",
"dask/array/tests/test_array_core.py::test_stack_errs",
"dask/array/tests/test_array_core.py::test_atop_with_numpy_arrays",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other0-100]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other0-6]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other1-100]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other1-6]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other2-100]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other2-6]",
"dask/array/tests/test_array_core.py::test_constructor_plugin",
"dask/array/tests/test_array_core.py::test_no_warnings_on_metadata",
"dask/array/tests/test_array_core.py::test_delayed_array_key_hygeine",
"dask/array/tests/test_chunk.py::test_keepdims_wrapper_no_axis",
"dask/array/tests/test_chunk.py::test_keepdims_wrapper_one_axis",
"dask/array/tests/test_chunk.py::test_keepdims_wrapper_two_axes",
"dask/array/tests/test_chunk.py::test_coarsen",
"dask/array/tests/test_chunk.py::test_integer_input",
"dask/dataframe/tests/test_dataframe.py::test_head_tail",
"dask/dataframe/tests/test_dataframe.py::test_head_npartitions",
"dask/dataframe/tests/test_dataframe.py::test_head_npartitions_warn",
"dask/dataframe/tests/test_dataframe.py::test_index_head",
"dask/dataframe/tests/test_dataframe.py::test_Series",
"dask/dataframe/tests/test_dataframe.py::test_Index",
"dask/dataframe/tests/test_dataframe.py::test_Scalar",
"dask/dataframe/tests/test_dataframe.py::test_column_names",
"dask/dataframe/tests/test_dataframe.py::test_index_names",
"dask/dataframe/tests/test_dataframe.py::test_timezone_freq[1]",
"dask/dataframe/tests/test_dataframe.py::test_rename_columns",
"dask/dataframe/tests/test_dataframe.py::test_rename_series",
"dask/dataframe/tests/test_dataframe.py::test_describe",
"dask/dataframe/tests/test_dataframe.py::test_describe_empty",
"dask/dataframe/tests/test_dataframe.py::test_cumulative",
"dask/dataframe/tests/test_dataframe.py::test_dropna",
"dask/dataframe/tests/test_dataframe.py::test_where_mask",
"dask/dataframe/tests/test_dataframe.py::test_map_partitions_multi_argument",
"dask/dataframe/tests/test_dataframe.py::test_map_partitions",
"dask/dataframe/tests/test_dataframe.py::test_map_partitions_names",
"dask/dataframe/tests/test_dataframe.py::test_map_partitions_column_info",
"dask/dataframe/tests/test_dataframe.py::test_map_partitions_method_names",
"dask/dataframe/tests/test_dataframe.py::test_map_partitions_keeps_kwargs_in_dict",
"dask/dataframe/tests/test_dataframe.py::test_drop_duplicates",
"dask/dataframe/tests/test_dataframe.py::test_drop_duplicates_subset",
"dask/dataframe/tests/test_dataframe.py::test_get_partition",
"dask/dataframe/tests/test_dataframe.py::test_ndim",
"dask/dataframe/tests/test_dataframe.py::test_dtype",
"dask/dataframe/tests/test_dataframe.py::test_value_counts",
"dask/dataframe/tests/test_dataframe.py::test_unique",
"dask/dataframe/tests/test_dataframe.py::test_isin",
"dask/dataframe/tests/test_dataframe.py::test_len",
"dask/dataframe/tests/test_dataframe.py::test_size",
"dask/dataframe/tests/test_dataframe.py::test_nbytes",
"dask/dataframe/tests/test_dataframe.py::test_quantile",
"dask/dataframe/tests/test_dataframe.py::test_quantile_missing",
"dask/dataframe/tests/test_dataframe.py::test_empty_quantile",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_quantile",
"dask/dataframe/tests/test_dataframe.py::test_index",
"dask/dataframe/tests/test_dataframe.py::test_assign",
"dask/dataframe/tests/test_dataframe.py::test_map",
"dask/dataframe/tests/test_dataframe.py::test_concat",
"dask/dataframe/tests/test_dataframe.py::test_args",
"dask/dataframe/tests/test_dataframe.py::test_known_divisions",
"dask/dataframe/tests/test_dataframe.py::test_unknown_divisions",
"dask/dataframe/tests/test_dataframe.py::test_align[inner]",
"dask/dataframe/tests/test_dataframe.py::test_align[outer]",
"dask/dataframe/tests/test_dataframe.py::test_align[left]",
"dask/dataframe/tests/test_dataframe.py::test_align[right]",
"dask/dataframe/tests/test_dataframe.py::test_align_axis[inner]",
"dask/dataframe/tests/test_dataframe.py::test_align_axis[outer]",
"dask/dataframe/tests/test_dataframe.py::test_align_axis[left]",
"dask/dataframe/tests/test_dataframe.py::test_align_axis[right]",
"dask/dataframe/tests/test_dataframe.py::test_combine",
"dask/dataframe/tests/test_dataframe.py::test_combine_first",
"dask/dataframe/tests/test_dataframe.py::test_random_partitions",
"dask/dataframe/tests/test_dataframe.py::test_series_round",
"dask/dataframe/tests/test_dataframe.py::test_repartition_divisions",
"dask/dataframe/tests/test_dataframe.py::test_repartition_on_pandas_dataframe",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-1-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-2-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-4-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-int-5-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-1-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-2-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-4-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-float-5-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-1-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-2-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-4-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>0-M8[ns]-5-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-1-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-2-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-4-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-int-5-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-1-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-2-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-4-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-float-5-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-1-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-2-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-4-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-1-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-1-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-2-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-2-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-4-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-4-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-5-True]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions[<lambda>1-M8[ns]-5-5-False]",
"dask/dataframe/tests/test_dataframe.py::test_repartition_npartitions_same_limits",
"dask/dataframe/tests/test_dataframe.py::test_repartition_object_index",
"dask/dataframe/tests/test_dataframe.py::test_repartition_freq_errors",
"dask/dataframe/tests/test_dataframe.py::test_embarrassingly_parallel_operations",
"dask/dataframe/tests/test_dataframe.py::test_fillna",
"dask/dataframe/tests/test_dataframe.py::test_fillna_multi_dataframe",
"dask/dataframe/tests/test_dataframe.py::test_ffill_bfill",
"dask/dataframe/tests/test_dataframe.py::test_fillna_series_types",
"dask/dataframe/tests/test_dataframe.py::test_sample",
"dask/dataframe/tests/test_dataframe.py::test_sample_without_replacement",
"dask/dataframe/tests/test_dataframe.py::test_datetime_accessor",
"dask/dataframe/tests/test_dataframe.py::test_empty_max",
"dask/dataframe/tests/test_dataframe.py::test_query",
"dask/dataframe/tests/test_dataframe.py::test_eval",
"dask/dataframe/tests/test_dataframe.py::test_deterministic_apply_concat_apply_names",
"dask/dataframe/tests/test_dataframe.py::test_aca_meta_infer",
"dask/dataframe/tests/test_dataframe.py::test_aca_split_every",
"dask/dataframe/tests/test_dataframe.py::test_reduction_method",
"dask/dataframe/tests/test_dataframe.py::test_reduction_method_split_every",
"dask/dataframe/tests/test_dataframe.py::test_pipe",
"dask/dataframe/tests/test_dataframe.py::test_gh_517",
"dask/dataframe/tests/test_dataframe.py::test_drop_axis_1",
"dask/dataframe/tests/test_dataframe.py::test_gh580",
"dask/dataframe/tests/test_dataframe.py::test_rename_dict",
"dask/dataframe/tests/test_dataframe.py::test_rename_function",
"dask/dataframe/tests/test_dataframe.py::test_rename_index",
"dask/dataframe/tests/test_dataframe.py::test_to_frame",
"dask/dataframe/tests/test_dataframe.py::test_apply_warns",
"dask/dataframe/tests/test_dataframe.py::test_applymap",
"dask/dataframe/tests/test_dataframe.py::test_abs",
"dask/dataframe/tests/test_dataframe.py::test_round",
"dask/dataframe/tests/test_dataframe.py::test_cov",
"dask/dataframe/tests/test_dataframe.py::test_corr",
"dask/dataframe/tests/test_dataframe.py::test_cov_corr_meta",
"dask/dataframe/tests/test_dataframe.py::test_autocorr",
"dask/dataframe/tests/test_dataframe.py::test_index_time_properties",
"dask/dataframe/tests/test_dataframe.py::test_nlargest_nsmallest",
"dask/dataframe/tests/test_dataframe.py::test_reset_index",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_compute_forward_kwargs",
"dask/dataframe/tests/test_dataframe.py::test_series_iteritems",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_iterrows",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_itertuples",
"dask/dataframe/tests/test_dataframe.py::test_astype",
"dask/dataframe/tests/test_dataframe.py::test_astype_categoricals",
"dask/dataframe/tests/test_dataframe.py::test_astype_categoricals_known",
"dask/dataframe/tests/test_dataframe.py::test_groupby_callable",
"dask/dataframe/tests/test_dataframe.py::test_methods_tokenize_differently",
"dask/dataframe/tests/test_dataframe.py::test_gh_1301",
"dask/dataframe/tests/test_dataframe.py::test_timeseries_sorted",
"dask/dataframe/tests/test_dataframe.py::test_column_assignment",
"dask/dataframe/tests/test_dataframe.py::test_columns_assignment",
"dask/dataframe/tests/test_dataframe.py::test_attribute_assignment",
"dask/dataframe/tests/test_dataframe.py::test_setitem_triggering_realign",
"dask/dataframe/tests/test_dataframe.py::test_inplace_operators",
"dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx0-True]",
"dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx0-False]",
"dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx1-True]",
"dask/dataframe/tests/test_dataframe.py::test_idxmaxmin[idx1-False]",
"dask/dataframe/tests/test_dataframe.py::test_idxmaxmin_empty_partitions",
"dask/dataframe/tests/test_dataframe.py::test_getitem_meta",
"dask/dataframe/tests/test_dataframe.py::test_getitem_multilevel",
"dask/dataframe/tests/test_dataframe.py::test_diff",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-2-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-2-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-2-20]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-5-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-5-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[None-5-20]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-2-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-2-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-2-20]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-5-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-5-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[1-5-20]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-2-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-2-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-2-20]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-5-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-5-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[5-5-20]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-2-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-2-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-2-20]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-5-1]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-5-4]",
"dask/dataframe/tests/test_dataframe.py::test_hash_split_unique[20-5-20]",
"dask/dataframe/tests/test_dataframe.py::test_split_out_drop_duplicates[None]",
"dask/dataframe/tests/test_dataframe.py::test_split_out_drop_duplicates[2]",
"dask/dataframe/tests/test_dataframe.py::test_split_out_value_counts[None]",
"dask/dataframe/tests/test_dataframe.py::test_split_out_value_counts[2]",
"dask/dataframe/tests/test_dataframe.py::test_values",
"dask/dataframe/tests/test_dataframe.py::test_copy",
"dask/dataframe/tests/test_dataframe.py::test_del",
"dask/dataframe/tests/test_dataframe.py::test_memory_usage[True-True]",
"dask/dataframe/tests/test_dataframe.py::test_memory_usage[True-False]",
"dask/dataframe/tests/test_dataframe.py::test_memory_usage[False-True]",
"dask/dataframe/tests/test_dataframe.py::test_memory_usage[False-False]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[sum]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[mean]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[std]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[var]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[count]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[min]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[max]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[idxmin]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[idxmax]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[prod]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[all]",
"dask/dataframe/tests/test_dataframe.py::test_dataframe_reductions_arithmetic[sem]",
"dask/dataframe/tests/test_dataframe.py::test_to_datetime",
"dask/dataframe/tests/test_dataframe.py::test_to_timedelta",
"dask/dataframe/tests/test_dataframe.py::test_slice_on_filtered_boundary[0]",
"dask/dataframe/tests/test_dataframe.py::test_slice_on_filtered_boundary[9]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_nonmonotonic",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[-1-None-False-False-drop0]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[-1-None-False-True-drop1]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[None-3-False-False-drop2]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[None-3-True-False-drop3]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[-0.5-None-False-False-drop4]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[-0.5-None-False-True-drop5]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[-1.5-None-False-True-drop6]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[None-3.5-False-False-drop7]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[None-3.5-True-False-drop8]",
"dask/dataframe/tests/test_dataframe.py::test_with_boundary[None-2.5-False-False-drop9]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index0-0-9]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index1--1-None]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index2-None-10]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index3-None-None]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index4--1-None]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index5-None-2]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index6--2-3]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index7-None-None]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index8-left8-None]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index9-None-right9]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index10-left10-None]",
"dask/dataframe/tests/test_dataframe.py::test_boundary_slice_same[index11-None-right11]",
"dask/dataframe/tests/test_dataframe.py::test_better_errors_object_reductions",
"dask/dataframe/tests/test_dataframe.py::test_sample_empty_partitions",
"dask/dataframe/tests/test_dataframe.py::test_coerce",
"dask/dataframe/tests/test_dataframe.py::test_bool",
"dask/dataframe/tests/test_groupby.py::test_full_groupby",
"dask/dataframe/tests/test_groupby.py::test_full_groupby_multilevel[<lambda>0]",
"dask/dataframe/tests/test_groupby.py::test_full_groupby_multilevel[<lambda>1]",
"dask/dataframe/tests/test_groupby.py::test_full_groupby_multilevel[<lambda>2]",
"dask/dataframe/tests/test_groupby.py::test_full_groupby_multilevel[<lambda>3]",
"dask/dataframe/tests/test_groupby.py::test_groupby_get_group",
"dask/dataframe/tests/test_groupby.py::test_dataframe_groupby_nunique",
"dask/dataframe/tests/test_groupby.py::test_dataframe_groupby_nunique_across_group_same_value",
"dask/dataframe/tests/test_groupby.py::test_series_groupby_propagates_names",
"dask/dataframe/tests/test_groupby.py::test_series_groupby",
"dask/dataframe/tests/test_groupby.py::test_series_groupby_errors",
"dask/dataframe/tests/test_groupby.py::test_groupby_set_index",
"dask/dataframe/tests/test_groupby.py::test_split_apply_combine_on_series",
"dask/dataframe/tests/test_groupby.py::test_groupby_reduction_split[split_every]",
"dask/dataframe/tests/test_groupby.py::test_groupby_reduction_split[split_out]",
"dask/dataframe/tests/test_groupby.py::test_apply_shuffle",
"dask/dataframe/tests/test_groupby.py::test_apply_shuffle_multilevel[<lambda>0]",
"dask/dataframe/tests/test_groupby.py::test_apply_shuffle_multilevel[<lambda>1]",
"dask/dataframe/tests/test_groupby.py::test_apply_shuffle_multilevel[<lambda>2]",
"dask/dataframe/tests/test_groupby.py::test_apply_shuffle_multilevel[<lambda>3]",
"dask/dataframe/tests/test_groupby.py::test_apply_shuffle_multilevel[<lambda>4]",
"dask/dataframe/tests/test_groupby.py::test_numeric_column_names",
"dask/dataframe/tests/test_groupby.py::test_groupby_multiprocessing",
"dask/dataframe/tests/test_groupby.py::test_groupby_normalize_index",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>0-False-spec1]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>0-False-spec2]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>0-False-spec3]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>0-False-var]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>0-None-spec1]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>0-None-spec2]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>0-None-spec3]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>0-None-var]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>1-False-spec1]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>1-False-spec2]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>1-False-spec3]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>1-False-var]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>1-None-spec1]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>1-None-spec2]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>1-None-spec3]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>1-None-var]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>2-False-spec1]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>2-False-spec2]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>2-False-spec3]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>2-False-var]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>2-None-spec1]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>2-None-spec2]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>2-None-spec3]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>2-None-var]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>3-False-spec1]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>3-False-spec2]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>3-False-spec3]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>3-False-var]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>3-None-spec1]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>3-None-spec2]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>3-None-spec3]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>3-None-var]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>4-False-spec1]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>4-False-spec2]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>4-False-spec3]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>4-False-var]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>4-None-spec1]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>4-None-spec2]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>4-None-spec3]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__examples[<lambda>4-None-var]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>0-False-spec1]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>0-False-spec2]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>0-False-sum]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>0-False-size]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>0-None-spec1]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>0-None-spec2]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>0-None-sum]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>0-None-size]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>1-False-spec1]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>1-False-spec2]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>1-False-sum]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>1-False-size]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>1-None-spec1]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>1-None-spec2]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>1-None-sum]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>1-None-size]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>2-False-spec1]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>2-False-spec2]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>2-False-sum]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>2-False-size]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>2-None-spec1]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>2-None-spec2]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>2-None-sum]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregate__examples[<lambda>2-None-size]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__single_element_groups[sum]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__single_element_groups[min]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__single_element_groups[max]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__single_element_groups[count]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__single_element_groups[size]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__single_element_groups[std]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__single_element_groups[var]",
"dask/dataframe/tests/test_groupby.py::test_aggregate__single_element_groups[mean]",
"dask/dataframe/tests/test_groupby.py::test_aggregate_build_agg_args__reuse_of_intermediates",
"dask/dataframe/tests/test_groupby.py::test_aggregate__dask",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>0-sum]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>0-var]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>0-mean]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>0-count]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>0-size]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>0-std]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>0-nunique]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>0-min]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>0-max]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>1-sum]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>1-var]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>1-count]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>1-size]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>1-std]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>1-nunique]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>1-min]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>1-max]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>2-sum]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>2-var]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>2-mean]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>2-count]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>2-size]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>2-std]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>2-nunique]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>2-min]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>2-max]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>3-sum]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>3-var]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>3-count]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>3-size]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>3-std]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>3-nunique]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>3-min]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>3-max]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>4-sum]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>4-var]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>4-count]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>4-size]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>4-std]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>4-nunique]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>4-min]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_aggregations_multilevel[<lambda>4-max]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>0-sum]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>0-var]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>0-mean]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>0-count]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>0-size]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>0-std]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>0-min]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>0-max]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>0-nunique]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>1-sum]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>1-var]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>1-count]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>1-size]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>1-std]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>1-min]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>1-max]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>1-nunique]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>2-sum]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>2-var]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>2-count]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>2-size]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>2-std]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>2-min]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>2-max]",
"dask/dataframe/tests/test_groupby.py::test_series_aggregations_multilevel[<lambda>2-nunique]",
"dask/dataframe/tests/test_groupby.py::test_groupby_meta_content[<lambda>0-<lambda>0]",
"dask/dataframe/tests/test_groupby.py::test_groupby_meta_content[<lambda>0-<lambda>1]",
"dask/dataframe/tests/test_groupby.py::test_groupby_meta_content[<lambda>0-<lambda>2]",
"dask/dataframe/tests/test_groupby.py::test_groupby_meta_content[<lambda>0-<lambda>3]",
"dask/dataframe/tests/test_groupby.py::test_groupby_meta_content[<lambda>1-<lambda>0]",
"dask/dataframe/tests/test_groupby.py::test_groupby_meta_content[<lambda>1-<lambda>1]",
"dask/dataframe/tests/test_groupby.py::test_groupby_meta_content[<lambda>1-<lambda>2]",
"dask/dataframe/tests/test_groupby.py::test_groupby_meta_content[<lambda>1-<lambda>3]",
"dask/dataframe/tests/test_groupby.py::test_groupby_meta_content[<lambda>2-<lambda>0]",
"dask/dataframe/tests/test_groupby.py::test_groupby_meta_content[<lambda>2-<lambda>1]",
"dask/dataframe/tests/test_groupby.py::test_groupby_meta_content[<lambda>2-<lambda>2]",
"dask/dataframe/tests/test_groupby.py::test_groupby_meta_content[<lambda>2-<lambda>3]",
"dask/dataframe/tests/test_groupby.py::test_groupy_non_aligned_index",
"dask/dataframe/tests/test_groupby.py::test_groupy_series_wrong_grouper",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[None-2-1]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[None-2-4]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[None-2-20]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[None-5-1]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[None-5-4]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[None-5-20]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[1-2-1]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[1-2-4]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[1-2-20]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[1-5-1]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[1-5-4]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[1-5-20]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[5-2-1]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[5-2-4]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[5-2-20]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[5-5-1]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[5-5-4]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[5-5-20]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[20-2-1]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[20-2-4]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[20-2-20]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[20-5-1]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[20-5-4]",
"dask/dataframe/tests/test_groupby.py::test_hash_groupby_aggregate[20-5-20]",
"dask/dataframe/tests/test_groupby.py::test_groupby_split_out_num",
"dask/dataframe/tests/test_groupby.py::test_groupby_not_supported",
"dask/dataframe/tests/test_groupby.py::test_groupby_numeric_column",
"dask/dataframe/tests/test_groupby.py::test_cumulative[cumsum-a-c]",
"dask/dataframe/tests/test_groupby.py::test_cumulative[cumsum-a-d]",
"dask/dataframe/tests/test_groupby.py::test_cumulative[cumsum-a-sel2]",
"dask/dataframe/tests/test_groupby.py::test_cumulative[cumsum-key1-c]",
"dask/dataframe/tests/test_groupby.py::test_cumulative[cumsum-key1-d]",
"dask/dataframe/tests/test_groupby.py::test_cumulative[cumsum-key1-sel2]",
"dask/dataframe/tests/test_groupby.py::test_cumulative[cumprod-a-c]",
"dask/dataframe/tests/test_groupby.py::test_cumulative[cumprod-a-d]",
"dask/dataframe/tests/test_groupby.py::test_cumulative[cumprod-a-sel2]",
"dask/dataframe/tests/test_groupby.py::test_cumulative[cumprod-key1-c]",
"dask/dataframe/tests/test_groupby.py::test_cumulative[cumprod-key1-d]",
"dask/dataframe/tests/test_groupby.py::test_cumulative[cumprod-key1-sel2]",
"dask/dataframe/tests/test_groupby.py::test_cumulative[cumcount-a-c]",
"dask/dataframe/tests/test_groupby.py::test_cumulative[cumcount-a-d]",
"dask/dataframe/tests/test_groupby.py::test_cumulative[cumcount-a-sel2]",
"dask/dataframe/tests/test_groupby.py::test_cumulative[cumcount-key1-c]",
"dask/dataframe/tests/test_groupby.py::test_cumulative[cumcount-key1-d]",
"dask/dataframe/tests/test_groupby.py::test_cumulative[cumcount-key1-sel2]",
"dask/dataframe/tests/test_groupby.py::test_cumulative_axis1[cumsum]",
"dask/dataframe/tests/test_groupby.py::test_cumulative_axis1[cumprod]",
"dask/dataframe/tests/test_groupby.py::test_groupby_slice_agg_reduces",
"dask/dataframe/tests/test_groupby.py::test_groupby_agg_grouper_single",
"dask/dataframe/tests/test_groupby.py::test_groupby_agg_grouper_multiple[a]",
"dask/dataframe/tests/test_groupby.py::test_groupby_agg_grouper_multiple[slice_1]",
"dask/dataframe/tests/test_groupby.py::test_groupby_agg_grouper_multiple[slice_2]",
"dask/dataframe/tests/test_groupby.py::test_groupby_agg_grouper_multiple[slice_3]",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_agg_funcs[cumprod]",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_agg_funcs[cumcount]",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_agg_funcs[cumsum]",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_agg_funcs[var]",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_agg_funcs[sum]",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_agg_funcs[count]",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_agg_funcs[size]",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_agg_funcs[std]",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_agg_funcs[min]",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_agg_funcs[max]",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_apply[amin-group_args0]",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_apply[amin-group_args1]",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_apply[amin-group_args2]",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_apply[amin-idx]",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_apply[mean-group_args0]",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_apply[mean-group_args1]",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_apply[mean-group_args2]",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_apply[mean-idx]",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_apply[<lambda>-group_args0]",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_apply[<lambda>-group_args1]",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_apply[<lambda>-group_args2]",
"dask/dataframe/tests/test_groupby.py::test_groupby_column_and_index_apply[<lambda>-idx]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_groupby_agg_custom_sum[pandas_spec0-dask_spec0-False]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_groupby_agg_custom_sum[pandas_spec1-dask_spec1-True]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_groupby_agg_custom_sum[pandas_spec2-dask_spec2-False]",
"dask/dataframe/tests/test_groupby.py::test_dataframe_groupby_agg_custom_sum[pandas_spec3-dask_spec3-False]",
"dask/dataframe/tests/test_groupby.py::test_series_groupby_agg_custom_mean[mean-mean]",
"dask/dataframe/tests/test_groupby.py::test_series_groupby_agg_custom_mean[pandas_spec1-dask_spec1]",
"dask/dataframe/tests/test_groupby.py::test_series_groupby_agg_custom_mean[pandas_spec2-dask_spec2]",
"dask/dataframe/tests/test_groupby.py::test_groupby_agg_custom__name_clash_with_internal_same_column",
"dask/dataframe/tests/test_groupby.py::test_groupby_agg_custom__name_clash_with_internal_different_column"
]
| []
| BSD 3-Clause "New" or "Revised" License | 1,999 | [
"dask/dataframe/groupby.py",
"dask/dataframe/accessor.py",
"dask/dataframe/core.py",
"dask/array/core.py",
"dask/compatibility.py",
"docs/source/changelog.rst",
"dask/utils.py"
]
| [
"dask/dataframe/groupby.py",
"dask/dataframe/accessor.py",
"dask/dataframe/core.py",
"dask/array/core.py",
"dask/compatibility.py",
"docs/source/changelog.rst",
"dask/utils.py"
]
|
OpenNMT__OpenNMT-tf-42 | 009a5922e29e343587d3b574087c536a347a4d04 | 2018-01-03 13:02:46 | 009a5922e29e343587d3b574087c536a347a4d04 | diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 00000000..27e3f78c
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "third_party/OpenNMTTokenizer"]
+ path = third_party/OpenNMTTokenizer
+ url = https://github.com/OpenNMT/Tokenizer.git
diff --git a/.travis.yml b/.travis.yml
index ec1257e4..25be5856 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -2,7 +2,18 @@ language: python
python:
- "2.7"
- "3.5"
-install:
+addons:
+ apt:
+ sources:
+ - george-edison55-precise-backports
+ - ubuntu-toolchain-r-test
+ packages:
+ - gcc-4.8
+ - g++-4.8
+ - cmake
+ - cmake-data
+ - libboost-python-dev
+before_install:
- pip install tensorflow==1.4.0
- pip install pyyaml
- pip install nose2
@@ -14,6 +25,13 @@ install:
pip install sphinx_rtd_theme
pip install recommonmark
fi
+install:
+ - export CXX="g++-4.8" CC="gcc-4.8"
+ - mkdir build && cd build
+ - cmake ..
+ - make
+ - export PYTHONPATH="$PYTHONPATH:$PWD/third_party/OpenNMTTokenizer/bindings/python/"
+ - cd ..
script:
- nose2
- if [ "$TRAVIS_PYTHON_VERSION" == "3.5" ]; then pylint opennmt/ bin/; fi
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 00000000..6b165d40
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,8 @@
+cmake_minimum_required(VERSION 3.1)
+
+set(CMAKE_CXX_STANDARD 11)
+set(CMAKE_BUILD_TYPE Release)
+set(LIB_ONLY ON)
+set(WITH_PYTHON_BINDINGS ON)
+
+add_subdirectory(third_party/OpenNMTTokenizer)
diff --git a/README.md b/README.md
index 0847d567..7b45728d 100644
--- a/README.md
+++ b/README.md
@@ -18,6 +18,7 @@ OpenNMT-tf focuses on modularity and extensibility using standard TensorFlow mod
* **hybrid encoder-decoder models**<br/>e.g. self-attention encoder and RNN decoder or vice versa.
* **multi-source training**<br/>e.g. source text and Moses translation as inputs for machine translation.
* **multiple input format**<br/>text with support of mixed word/character embeddings or real vectors serialized in *TFRecord* files.
+* **on-the-fly tokenization**<br/>apply advanced tokenization dynamically during the training and detokenize the predictions during inference or evaluation.
and all of the above can be used simultaneously to train novel and complex architectures. See the [predefined models](config/models) to discover how they are defined.
@@ -76,6 +77,8 @@ python -m bin.main infer --config config/opennmt-defaults.yml config/data/toy-en
**Note:** do not expect any good translation results with this toy example. Consider training on [larger parallel datasets](http://www.statmt.org/wmt16/translation-task.html) instead.
+*For more advanced usages, see the [documentation](http://opennmt.net/OpenNMT-tf).*
+
## Compatibility with {Lua,Py}Torch implementations
OpenNMT-tf has been designed from scratch and compatibility with the {Lua,Py}Torch implementations in terms of usage, design, and features is not a priority. Please submit a feature request for any missing feature or behavior that you found useful in the {Lua,Py}Torch implementations.
diff --git a/bin/build_vocab.py b/bin/build_vocab.py
index e0d4e414..c92050fd 100644
--- a/bin/build_vocab.py
+++ b/bin/build_vocab.py
@@ -6,8 +6,6 @@ from opennmt import constants
from opennmt import tokenizers
from opennmt import utils
-from opennmt.utils.misc import get_classnames_in_module
-
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
@@ -17,9 +15,6 @@ def main():
parser.add_argument(
"--save_vocab", required=True,
help="Output vocabulary file.")
- parser.add_argument(
- "--tokenizer", default="SpaceTokenizer", choices=get_classnames_in_module(tokenizers),
- help="Tokenizer class name.")
parser.add_argument(
"--min_frequency", type=int, default=1,
help="Minimum word frequency.")
@@ -29,9 +24,10 @@ def main():
parser.add_argument(
"--without_sequence_tokens", default=False, action="store_true",
help="If set, do not add special sequence tokens (start, end) in the vocabulary.")
+ tokenizers.add_command_line_arguments(parser)
args = parser.parse_args()
- tokenizer = getattr(tokenizers, args.tokenizer)()
+ tokenizer = tokenizers.build_tokenizer(args)
special_tokens = [constants.PADDING_TOKEN]
if not args.without_sequence_tokens:
diff --git a/bin/detokenize_text.py b/bin/detokenize_text.py
new file mode 100644
index 00000000..9e768adf
--- /dev/null
+++ b/bin/detokenize_text.py
@@ -0,0 +1,22 @@
+"""Standalone script to detokenize a corpus."""
+
+from __future__ import print_function
+
+import argparse
+
+from opennmt import tokenizers
+
+
+def main():
+ parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ parser.add_argument(
+ "--delimiter", default=" ",
+ help="Token delimiter used in text serialization.")
+ tokenizers.add_command_line_arguments(parser)
+ args = parser.parse_args()
+
+ tokenizer = tokenizers.build_tokenizer(args)
+ tokenizer.detokenize_stream(delimiter=args.delimiter)
+
+if __name__ == "__main__":
+ main()
diff --git a/bin/tokenize_text.py b/bin/tokenize_text.py
index 4192c255..24dea304 100644
--- a/bin/tokenize_text.py
+++ b/bin/tokenize_text.py
@@ -3,29 +3,20 @@
from __future__ import print_function
import argparse
-import sys
from opennmt import tokenizers
-from opennmt.utils.misc import get_classnames_in_module
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
- parser.add_argument(
- "--tokenizer", default="SpaceTokenizer", choices=get_classnames_in_module(tokenizers),
- help="Tokenizer class name.")
parser.add_argument(
"--delimiter", default=" ",
help="Token delimiter for text serialization.")
+ tokenizers.add_command_line_arguments(parser)
args = parser.parse_args()
- tokenizer = getattr(tokenizers, args.tokenizer)()
-
- for line in sys.stdin:
- line = line.strip()
- tokens = tokenizer(line)
- merged_tokens = args.delimiter.join(tokens)
- print(merged_tokens)
+ tokenizer = tokenizers.build_tokenizer(args)
+ tokenizer.tokenize_stream(delimiter=args.delimiter)
if __name__ == "__main__":
main()
diff --git a/config/sample.yml b/config/sample.yml
index 63926bd6..69eaec5e 100644
--- a/config/sample.yml
+++ b/config/sample.yml
@@ -77,7 +77,7 @@ train:
# (optional) Save evaluation predictions in model_dir/eval/.
save_eval_predictions: false
# (optional) Evalutator or list of evaluators that are called on the saved evaluation predictions.
- # Available evaluators: BLEU
+ # Available evaluators: BLEU, BLEU-detok
external_evaluators: BLEU
# (optional) The maximum length of feature sequences during training (default: None).
maximum_features_length: 70
diff --git a/config/tokenization/aggressive.yml b/config/tokenization/aggressive.yml
new file mode 100644
index 00000000..bb149a87
--- /dev/null
+++ b/config/tokenization/aggressive.yml
@@ -0,0 +1,2 @@
+mode: aggressive
+joiner_annotate: true
diff --git a/config/tokenization/sample.yml b/config/tokenization/sample.yml
new file mode 100644
index 00000000..384d985d
--- /dev/null
+++ b/config/tokenization/sample.yml
@@ -0,0 +1,12 @@
+# This is a sample tokenization configuration with all values set to their default.
+
+mode: conservative
+bpe_model_path: ""
+joiner: ■
+joiner_annotate: false
+joiner_new: false
+case_feature: false
+segment_case: false
+segment_numbers: false
+segment_alphabet_change: false
+segment_alphabet: []
diff --git a/docs/data.md b/docs/data.md
index 8ddebe0f..dbbaf83b 100644
--- a/docs/data.md
+++ b/docs/data.md
@@ -9,7 +9,7 @@ The format of the data files is defined by the `opennmt.inputters.Inputter` used
All `opennmt.inputters.TextInputter`s expect a text file as input where:
* sentences are separated by a **newline**
-* tokens are separated by a **space** (unless a custom tokenizer is set)
+* tokens are separated by a **space** (unless a custom tokenizer is set, see [Tokenization](tokenization.html))
For example:
diff --git a/docs/index.rst b/docs/index.rst
index 7d28237a..bcf86678 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -7,6 +7,7 @@ Overview
:maxdepth: 1
data.md
+ tokenization.md
configuration.md
training.md
serving.md
diff --git a/docs/package/opennmt.tokenizers.opennmt_tokenizer.rst b/docs/package/opennmt.tokenizers.opennmt_tokenizer.rst
new file mode 100644
index 00000000..45450fd8
--- /dev/null
+++ b/docs/package/opennmt.tokenizers.opennmt_tokenizer.rst
@@ -0,0 +1,7 @@
+opennmt\.tokenizers\.opennmt\_tokenizer module
+==============================================
+
+.. automodule:: opennmt.tokenizers.opennmt_tokenizer
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/package/opennmt.tokenizers.rst b/docs/package/opennmt.tokenizers.rst
index 4c5aa243..1cdea2d3 100644
--- a/docs/package/opennmt.tokenizers.rst
+++ b/docs/package/opennmt.tokenizers.rst
@@ -11,5 +11,6 @@ Submodules
.. toctree::
+ opennmt.tokenizers.opennmt_tokenizer
opennmt.tokenizers.tokenizer
diff --git a/docs/tokenization.md b/docs/tokenization.md
new file mode 100644
index 00000000..37b3da4a
--- /dev/null
+++ b/docs/tokenization.md
@@ -0,0 +1,89 @@
+# Tokenization
+
+OpenNMT-tf can use the OpenNMT [Tokenizer](https://github.com/OpenNMT/Tokenizer) as a plugin to provide advanced tokenization behaviors.
+
+## Installation
+
+The following tools and packages are required:
+
+* C++11 compiler
+* CMake
+* Boost.Python
+
+On Ubuntu, these packages can be installed with `apt-get`:
+
+```bash
+sudo apt-get install build-essential gcc cmake libboost-python-dev
+```
+
+1\. Fetch the Tokenizer plugin under OpenNMT-tf repository:
+
+```bash
+git submodule update --init
+```
+
+2\. Compile the tokenizer plugin:
+
+```bash
+mkdir build && cd build
+cmake .. && make
+cd ..
+```
+
+3\. Configure your environment for Python to find the newly generated package:
+
+```bash
+export PYTHONPATH="$PYTHONPATH:$HOME/OpenNMT-tf/build/third_party/OpenNMTTokenizer/bindings/python/"
+```
+
+4\. Test the plugin:
+
+```bash
+$ echo "Hello world!" | python -m bin.tokenize_text --tokenizer OpenNMTTokenizer
+Hello world !
+```
+
+## Usage
+
+YAML files are used to set the tokenizer options to ensure consistency during data preparation and training. See the sample file `config/tokenization/sample.yml`.
+
+Here is an example workflow:
+
+1\. Build the vocabularies with the custom tokenizer, e.g.:
+
+```bash
+python -m bin.build_vocab --tokenizer OpenNMTTokenizer --tokenizer_config config/tokenization/aggressive.yml --size 50000 --save_vocab data/enfr/en-vocab.txt data/enfr/en-train.txt
+python -m bin.build_vocab --tokenizer OpenNMTTokenizer --tokenizer_config config/tokenization/aggressive.yml --size 50000 --save_vocab data/enfr/fr-vocab.txt data/enfr/fr-train.txt
+```
+
+*The text files are only given as examples and are not part of the repository.*
+
+2\. Update your model's `TextInputter`s to use the custom tokenizer, e.g.:
+
+```python
+return onmt.models.SequenceToSequence(
+ source_inputter=onmt.inputters.WordEmbedder(
+ vocabulary_file_key="source_words_vocabulary",
+ embedding_size=512,
+ tokenizer=onmt.tokenizers.OpenNMTTokenizer(
+ configuration_file_or_key="source_tokenizer_config")),
+ target_inputter=onmt.inputters.WordEmbedder(
+ vocabulary_file_key="target_words_vocabulary",
+ embedding_size=512,
+ tokenizer=onmt.tokenizers.OpenNMTTokenizer(
+ configuration_file_or_key="target_tokenizer_config")),
+ ...)
+```
+
+3\. Reference the tokenizer configurations in the data configuration, e.g.:
+
+```yaml
+data:
+ source_tokenizer_config: config/tokenization/aggressive.yml
+ target_tokenizer_config: config/tokenization/aggressive.yml
+```
+
+## Notes
+
+* As of now, tokenizers are not part of the exported graph.
+* Predictions saved during inference or evaluation are detokenized. Consider using the "BLEU-detok" external evaluator that applies a simple word level tokenization before computing the BLEU score.
diff --git a/opennmt/inputters/text_inputter.py b/opennmt/inputters/text_inputter.py
index 5e2e78bd..a40b4c42 100644
--- a/opennmt/inputters/text_inputter.py
+++ b/opennmt/inputters/text_inputter.py
@@ -224,7 +224,7 @@ class TextInputter(Inputter):
if "tokens" not in data:
text = data["raw"]
- tokens = self.tokenizer(text)
+ tokens = self.tokenizer.tokenize(text)
length = tf.shape(tokens)[0]
data = self.set_data_field(data, "tokens", tokens, padded_shape=[None], volatile=True)
diff --git a/opennmt/models/sequence_to_sequence.py b/opennmt/models/sequence_to_sequence.py
index 368d01b5..a7f51fcb 100644
--- a/opennmt/models/sequence_to_sequence.py
+++ b/opennmt/models/sequence_to_sequence.py
@@ -231,5 +231,5 @@ class SequenceToSequence(Model):
for i in range(n_best):
tokens = prediction["tokens"][i][:prediction["length"][i] - 1] # Ignore </s>.
- sentence = b" ".join(tokens)
- print_bytes(sentence, stream=stream)
+ sentence = self.target_inputter.tokenizer.detokenize(tokens)
+ print_bytes(tf.compat.as_bytes(sentence), stream=stream)
diff --git a/opennmt/tokenizers/__init__.py b/opennmt/tokenizers/__init__.py
index 45d0f25d..ab98e921 100644
--- a/opennmt/tokenizers/__init__.py
+++ b/opennmt/tokenizers/__init__.py
@@ -3,4 +3,33 @@
Tokenizers can work on string ``tf.Tensor`` as in-graph transformation.
"""
+import sys
+import inspect
+
+try:
+ import pyonmttok
+ from opennmt.tokenizers.opennmt_tokenizer import OpenNMTTokenizer
+except ImportError:
+ pass
+
from opennmt.tokenizers.tokenizer import SpaceTokenizer, CharacterTokenizer
+
+def add_command_line_arguments(parser):
+ """Adds command line arguments to select the tokenizer."""
+ choices = []
+ module = sys.modules[__name__]
+ for symbol in dir(module):
+ if inspect.isclass(getattr(module, symbol)):
+ choices.append(symbol)
+
+ parser.add_argument(
+ "--tokenizer", default="SpaceTokenizer", choices=choices,
+ help="Tokenizer class name.")
+ parser.add_argument(
+ "--tokenizer_config", default=None,
+ help="Tokenization configuration file.")
+
+def build_tokenizer(args):
+ """Returns a new tokenizer based on command line arguments."""
+ module = sys.modules[__name__]
+ return getattr(module, args.tokenizer)(configuration_file_or_key=args.tokenizer_config)
diff --git a/opennmt/tokenizers/opennmt_tokenizer.py b/opennmt/tokenizers/opennmt_tokenizer.py
new file mode 100644
index 00000000..5aced32d
--- /dev/null
+++ b/opennmt/tokenizers/opennmt_tokenizer.py
@@ -0,0 +1,60 @@
+"""Define the OpenNMT tokenizer."""
+
+import six
+
+import pyonmttok
+
+import tensorflow as tf
+
+from opennmt.tokenizers.tokenizer import Tokenizer
+
+
+def create_tokenizer(config):
+ """Creates a new OpenNMT tokenizer.
+
+ Args:
+ config: A dictionary of tokenization options.
+
+ Returns:
+ A ``pyonmttok.Tokenizer``.
+ """
+ def _set(kwargs, key):
+ if key in config:
+ value = config[key]
+ if isinstance(value, six.string_types):
+ value = tf.compat.as_bytes(value)
+ kwargs[key] = value
+
+ kwargs = {}
+ _set(kwargs, "bpe_model_path")
+ _set(kwargs, "joiner")
+ _set(kwargs, "joiner_annotate")
+ _set(kwargs, "joiner_new")
+ _set(kwargs, "case_feature")
+ _set(kwargs, "segment_case")
+ _set(kwargs, "segment_numbers")
+ _set(kwargs, "segment_alphabet_change")
+ _set(kwargs, "segment_alphabet")
+
+ return pyonmttok.Tokenizer(config.get("mode", "conservative"), **kwargs)
+
+
+class OpenNMTTokenizer(Tokenizer):
+ """Uses the OpenNMT tokenizer."""
+
+ def __init__(self, configuration_file_or_key=None):
+ super(OpenNMTTokenizer, self).__init__(configuration_file_or_key=configuration_file_or_key)
+ self._tokenizer = None
+
+ def _tokenize_string(self, text):
+ if self._tokenizer is None:
+ self._tokenizer = create_tokenizer(self._config)
+ text = tf.compat.as_bytes(text)
+ tokens, _ = self._tokenizer.tokenize(text)
+ return tokens
+
+ def _detokenize_string(self, tokens):
+ if self._tokenizer is None:
+ self._tokenizer = create_tokenizer(self._config)
+ tokens = [tf.compat.as_bytes(token) for token in tokens]
+ return self._tokenizer.detokenize(tokens)
diff --git a/opennmt/tokenizers/tokenizer.py b/opennmt/tokenizers/tokenizer.py
index ebc571be..cb96c15b 100644
--- a/opennmt/tokenizers/tokenizer.py
+++ b/opennmt/tokenizers/tokenizer.py
@@ -1,16 +1,84 @@
+# -*- coding: utf-8 -*-
+
"""Define base tokenizers."""
+import sys
+import os
import abc
import six
+import yaml
import tensorflow as tf
+from opennmt.utils.misc import print_bytes
+
@six.add_metaclass(abc.ABCMeta)
class Tokenizer(object):
"""Base class for tokenizers."""
- def __call__(self, text):
+ def __init__(self, configuration_file_or_key=None):
+ """Initializes the tokenizer.
+
+ Args:
+ configuration_file_or_key: The YAML configuration file or a the key to
+ the YAML configuration file.
+ """
+ self._config = {}
+ if configuration_file_or_key is not None and os.path.isfile(configuration_file_or_key):
+ configuration_file = configuration_file_or_key
+ with open(configuration_file) as conf_file:
+ self._config = yaml.load(conf_file)
+ self._configuration_file_key = None
+ else:
+ self._configuration_file_key = configuration_file_or_key
+
+ def initialize(self, metadata):
+ """Initializes the tokenizer (e.g. load BPE models).
+
+ Any external assets should be registered in the standard assets collection:
+
+ .. code-block:: python
+
+ tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, filename)
+
+ Args:
+ metadata: A dictionary containing additional metadata set
+ by the user.
+ """
+ if self._configuration_file_key is not None:
+ configuration_file = metadata[self._configuration_file_key]
+ with open(configuration_file) as conf_file:
+ self._config = yaml.load(conf_file)
+
+ def tokenize_stream(self, input_stream=sys.stdin, output_stream=sys.stdout, delimiter=" "):
+ """Tokenizes a stream of sentences.
+
+ Args:
+ input_stream: The input stream.
+ output_stream: The output stream.
+ delimiter: The token delimiter to use for text serialization.
+ """
+ for line in input_stream:
+ line = line.strip()
+ tokens = self.tokenize(line)
+ merged_tokens = delimiter.join(tokens)
+ print_bytes(tf.compat.as_bytes(merged_tokens), stream=output_stream)
+
+ def detokenize_stream(self, input_stream=sys.stdin, output_stream=sys.stdout, delimiter=" "):
+ """Detokenizes a stream of sentences.
+
+ Args:
+ input_stream: The input stream.
+ output_stream: The output stream.
+ delimiter: The token delimiter used for text serialization.
+ """
+ for line in input_stream:
+ tokens = line.strip().split(delimiter)
+ string = self.detokenize(tokens)
+ print_bytes(tf.compat.as_bytes(string), stream=output_stream)
+
+ def tokenize(self, text):
"""Tokenizes text.
Args:
@@ -19,27 +87,61 @@ class Tokenizer(object):
Returns:
A 1-D string ``tf.Tensor`` if :obj:`text` is a ``tf.Tensor`` or a list of
Python unicode strings otherwise.
+
+ Raises:
+ ValueError: if the rank of :obj:`text` is greater than 0.
"""
if tf.contrib.framework.is_tensor(text):
- return self._tokenize_tensor(text)
+ rank = len(text.get_shape().as_list())
+ if rank == 0:
+ return self._tokenize_tensor(text)
+ else:
+ raise ValueError("Unsupported tensor rank for tokenization: {}".format(rank))
else:
text = tf.compat.as_text(text)
return self._tokenize_string(text)
- def initialize(self, metadata):
- """Initializes the tokenizer (e.g. load BPE models).
+ def detokenize(self, tokens, sequence_length=None):
+ """Detokenizes tokens.
- Any external assets should be registered in the standard assets collection:
+ The Tensor version supports batches of tokens.
- .. code-block:: python
+ Args:
+ tokens: The tokens as a 1-D or 2-D ``tf.Tensor`` or list of Python
+ strings.
+ sequence_length: The length of each sequence. Required if :obj:`tokens`
+ is a ``tf.Tensor``.
- tf.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, filename)
+ Returns:
+ A 0-D or 1-D string ``tf.Tensor`` if :obj:`tokens` is a ``tf.Tensor`` or a
+ Python unicode strings otherwise.
- Args:
- metadata: A dictionary containing additional metadata set
- by the user.
+ Raises:
+ ValueError: if the rank of :obj:`tokens` is greater than 2.
+ ValueError: if :obj:`tokens` is a 2-D ``tf.Tensor`` and
+ :obj:`sequence_length` is not set.
"""
- pass
+ if tf.contrib.framework.is_tensor(tokens):
+ rank = len(tokens.get_shape().as_list())
+ if rank == 1:
+ return self._detokenize_tensor(tokens)
+ elif rank == 2:
+ if sequence_length is None:
+ raise ValueError("sequence_length is required for Tensor detokenization")
+ batch_size = tf.shape(tokens)[0]
+ array = tf.TensorArray(tf.string, size=batch_size, dynamic_size=False)
+ _, array = tf.while_loop(
+ lambda i, _: i < batch_size,
+ lambda i, a: (
+ i + 1, a.write(i, self._detokenize_tensor(tokens[i, :sequence_length[i]]))),
+ (tf.constant(0), array),
+ back_prop=False)
+ return array.stack()
+ else:
+ raise ValueError("Unsupported tensor rank for detokenization: {}".format(rank))
+ else:
+ tokens = [tf.compat.as_text(token) for token in tokens]
+ return self._detokenize_string(tokens)
def _tokenize_tensor(self, text):
"""Tokenizes a tensor.
@@ -54,10 +156,24 @@ class Tokenizer(object):
A 1-D string ``tf.Tensor``.
"""
text = tf.py_func(
- lambda x: tf.compat.as_bytes("\0".join(self(x))), [text], tf.string)
+ lambda x: tf.compat.as_bytes("\0".join(self.tokenize(x))), [text], tf.string)
tokens = tf.string_split([text], delimiter="\0").values
return tokens
+ def _detokenize_tensor(self, tokens):
+ """Detokenizes tokens.
+
+ When not overriden, this default implementation uses a ``tf.py_func``
+ operation to call the string-based detokenization.
+
+ Args:
+ tokens: A 1-D ``tf.Tensor``.
+
+ Returns:
+ A 0-D string ``tf.Tensor``.
+ """
+ return tf.py_func(self.detokenize, [tokens], tf.string)
+
@abc.abstractmethod
def _tokenize_string(self, text):
"""Tokenizes a Python unicode string.
@@ -72,19 +188,40 @@ class Tokenizer(object):
"""
raise NotImplementedError()
+ @abc.abstractmethod
+ def _detokenize_string(self, tokens):
+ """Detokenizes tokens.
+
+ Args:
+ tokens: A list of Python unicode strings.
+
+ Returns:
+ A unicode Python string.
+ """
+ raise NotImplementedError()
+
class SpaceTokenizer(Tokenizer):
"""A tokenizer that splits on spaces."""
def _tokenize_tensor(self, text):
- return tf.string_split([text]).values
+ return tf.string_split([text], delimiter=" ").values
+
+ def _detokenize_tensor(self, tokens):
+ return tf.foldl(lambda a, x: a + " " + x, tokens, back_prop=False)
def _tokenize_string(self, text):
return text.split()
+ def _detokenize_string(self, tokens):
+ return " ".join(tokens)
+
class CharacterTokenizer(Tokenizer):
"""A tokenizer that splits unicode characters."""
def _tokenize_string(self, text):
- return list(text)
+ return list(text.replace(" ", u"▁"))
+
+ def _detokenize_string(self, tokens):
+ return "".join(tokens).replace(u"▁", " ")
diff --git a/opennmt/utils/evaluator.py b/opennmt/utils/evaluator.py
index 183be623..1d5c1f9c 100644
--- a/opennmt/utils/evaluator.py
+++ b/opennmt/utils/evaluator.py
@@ -9,6 +9,13 @@ import six
import tensorflow as tf
from tensorflow.python.summary.writer.writer_cache import FileWriterCache as SummaryWriterCache
+from opennmt import tokenizers
+
+
+def _word_level_tokenization(input_filename, output_filename):
+ tokenizer = tokenizers.OpenNMTTokenizer()
+ with open(input_filename, "rb") as input_file, open(output_filename, "wb") as output_file:
+ tokenizer.tokenize_stream(input_stream=input_file, output_stream=output_file)
@six.add_metaclass(abc.ABCMeta)
@@ -82,6 +89,26 @@ class BLEUEvaluator(ExternalEvaluator):
return None
+class BLEUDetokEvaluator(BLEUEvaluator):
+ """Evaluator applying a simple tokenization before calling multi-bleu.perl."""
+
+ def __init__(self, labels_file=None, output_dir=None):
+ if not hasattr(tokenizers, "OpenNMTTokenizer"):
+ raise RuntimeError("The BLEU-detok evaluator only works when the OpenNMT tokenizer "
+ "is available. Please re-check its installation.")
+ super(BLEUDetokEvaluator, self).__init__(labels_file=labels_file, output_dir=output_dir)
+
+ def name(self):
+ return "BLEU-detok"
+
+ def score(self, labels_file, predictions_path):
+ tok_labels_file = labels_file + ".light_tok"
+ tok_predictions_path = predictions_path + ".light_tok"
+ _word_level_tokenization(labels_file, tok_labels_file)
+ _word_level_tokenization(predictions_path, tok_predictions_path)
+ return super(BLEUDetokEvaluator, self).score(tok_labels_file, tok_predictions_path)
+
+
def external_evaluation_fn(evaluators_name, labels_file, output_dir=None):
"""Returns a callable to be used in
:class:`opennmt.utils.hooks.SaveEvaluationPredictionHook` that calls one or
@@ -110,6 +137,8 @@ def external_evaluation_fn(evaluators_name, labels_file, output_dir=None):
name = name.lower()
if name == "bleu":
evaluator = BLEUEvaluator(labels_file=labels_file, output_dir=output_dir)
+ elif name == "bleu-detok":
+ evaluator = BLEUDetokEvaluator(labels_file=labels_file, output_dir=output_dir)
else:
raise ValueError("No evaluator associated with the name: {}".format(name))
evaluators.append(evaluator)
diff --git a/opennmt/utils/misc.py b/opennmt/utils/misc.py
index 66c2db19..ca9e49b6 100644
--- a/opennmt/utils/misc.py
+++ b/opennmt/utils/misc.py
@@ -3,7 +3,6 @@
from __future__ import print_function
import sys
-import inspect
import tensorflow as tf
@@ -43,14 +42,6 @@ def count_lines(filename):
pass
return i + 1
-def get_classnames_in_module(module):
- """Returns a list of classnames exposed by a module."""
- names = []
- for symbol in dir(module):
- if inspect.isclass(getattr(module, symbol)):
- names.append(symbol)
- return names
-
def count_parameters():
"""Returns the total number of trainable parameters."""
total = 0
diff --git a/opennmt/utils/vocab.py b/opennmt/utils/vocab.py
index a32c4ea6..e9f20593 100644
--- a/opennmt/utils/vocab.py
+++ b/opennmt/utils/vocab.py
@@ -44,7 +44,7 @@ class Vocab(object):
for line in text:
line = tf.compat.as_text(line.strip())
if tokenizer:
- tokens = tokenizer(line)
+ tokens = tokenizer.tokenize(line)
else:
tokens = line.split()
for token in tokens:
diff --git a/third_party/OpenNMTTokenizer b/third_party/OpenNMTTokenizer
new file mode 160000
index 00000000..60ac5efd
--- /dev/null
+++ b/third_party/OpenNMTTokenizer
@@ -0,0 +1,1 @@
+Subproject commit 60ac5efd0db175ad2d736f1086e434680dc068bc
diff --git a/third_party/learn_bpe.py b/third_party/learn_bpe.py
new file mode 100644
index 00000000..2d1da384
--- /dev/null
+++ b/third_party/learn_bpe.py
@@ -0,0 +1,272 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Author: Rico Sennrich
+
+# The MIT License (MIT)
+
+# Copyright (c) 2015 University of Edinburgh
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+"""Use byte pair encoding (BPE) to learn a variable-length encoding of the vocabulary in a text.
+Unlike the original BPE, it does not compress the plain text, but can be used to reduce the vocabulary
+of a text to a configurable number of symbols, with only a small increase in the number of tokens.
+
+Reference:
+Rico Sennrich, Barry Haddow and Alexandra Birch (2016). Neural Machine Translation of Rare Words with Subword Units.
+Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (ACL 2016). Berlin, Germany.
+"""
+
+from __future__ import unicode_literals
+
+import sys
+import codecs
+import re
+import copy
+import argparse
+from collections import defaultdict, Counter
+
+# hack for python2/3 compatibility
+from io import open
+argparse.open = open
+
+def create_parser():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description="learn BPE-based word segmentation")
+
+ parser.add_argument(
+ '--input', '-i', type=argparse.FileType('r'), default=sys.stdin,
+ metavar='PATH',
+ help="Input text (default: standard input).")
+
+ parser.add_argument(
+ '--output', '-o', type=argparse.FileType('w'), default=sys.stdout,
+ metavar='PATH',
+ help="Output file for BPE codes (default: standard output)")
+ parser.add_argument(
+ '--symbols', '-s', type=int, default=10000,
+ help="Create this many new symbols (each representing a character n-gram) (default: %(default)s))")
+ parser.add_argument(
+ '--min-frequency', type=int, default=2, metavar='FREQ',
+ help='Stop if no symbol pair has frequency >= FREQ (default: %(default)s))')
+ parser.add_argument('--dict-input', action="store_true",
+ help="If set, input file is interpreted as a dictionary where each line contains a word-count pair")
+ parser.add_argument(
+ '--verbose', '-v', action="store_true",
+ help="verbose mode.")
+
+ return parser
+
+def get_vocabulary(fobj, is_dict=False):
+ """Read text and return dictionary that encodes vocabulary
+ """
+ vocab = Counter()
+ for line in fobj:
+ if is_dict:
+ word, count = line.strip().split()
+ vocab[word] = int(count)
+ else:
+ for word in line.split():
+ vocab[word] += 1
+ return vocab
+
+def update_pair_statistics(pair, changed, stats, indices):
+ """Minimally update the indices and frequency of symbol pairs
+
+ if we merge a pair of symbols, only pairs that overlap with occurrences
+ of this pair are affected, and need to be updated.
+ """
+ stats[pair] = 0
+ indices[pair] = defaultdict(int)
+ first, second = pair
+ new_pair = first+second
+ for j, word, old_word, freq in changed:
+
+ # find all instances of pair, and update frequency/indices around it
+ i = 0
+ while True:
+ # find first symbol
+ try:
+ i = old_word.index(first, i)
+ except ValueError:
+ break
+ # if first symbol is followed by second symbol, we've found an occurrence of pair (old_word[i:i+2])
+ if i < len(old_word)-1 and old_word[i+1] == second:
+ # assuming a symbol sequence "A B C", if "B C" is merged, reduce the frequency of "A B"
+ if i:
+ prev = old_word[i-1:i+1]
+ stats[prev] -= freq
+ indices[prev][j] -= 1
+ if i < len(old_word)-2:
+ # assuming a symbol sequence "A B C B", if "B C" is merged, reduce the frequency of "C B".
+ # however, skip this if the sequence is A B C B C, because the frequency of "C B" will be reduced by the previous code block
+ if old_word[i+2] != first or i >= len(old_word)-3 or old_word[i+3] != second:
+ nex = old_word[i+1:i+3]
+ stats[nex] -= freq
+ indices[nex][j] -= 1
+ i += 2
+ else:
+ i += 1
+
+ i = 0
+ while True:
+ try:
+ # find new pair
+ i = word.index(new_pair, i)
+ except ValueError:
+ break
+ # assuming a symbol sequence "A BC D", if "B C" is merged, increase the frequency of "A BC"
+ if i:
+ prev = word[i-1:i+1]
+ stats[prev] += freq
+ indices[prev][j] += 1
+ # assuming a symbol sequence "A BC B", if "B C" is merged, increase the frequency of "BC B"
+ # however, if the sequence is A BC BC, skip this step because the count of "BC BC" will be incremented by the previous code block
+ if i < len(word)-1 and word[i+1] != new_pair:
+ nex = word[i:i+2]
+ stats[nex] += freq
+ indices[nex][j] += 1
+ i += 1
+
+
+def get_pair_statistics(vocab):
+ """Count frequency of all symbol pairs, and create index"""
+
+ # data structure of pair frequencies
+ stats = defaultdict(int)
+
+ #index from pairs to words
+ indices = defaultdict(lambda: defaultdict(int))
+
+ for i, (word, freq) in enumerate(vocab):
+ prev_char = word[0]
+ for char in word[1:]:
+ stats[prev_char, char] += freq
+ indices[prev_char, char][i] += 1
+ prev_char = char
+
+ return stats, indices
+
+
+def replace_pair(pair, vocab, indices):
+ """Replace all occurrences of a symbol pair ('A', 'B') with a new symbol 'AB'"""
+ first, second = pair
+ pair_str = ''.join(pair)
+ pair_str = pair_str.replace('\\','\\\\')
+ changes = []
+ pattern = re.compile(r'(?<!\S)' + re.escape(first + ' ' + second) + r'(?!\S)')
+ if sys.version_info < (3, 0):
+ iterator = indices[pair].iteritems()
+ else:
+ iterator = indices[pair].items()
+ for j, freq in iterator:
+ if freq < 1:
+ continue
+ word, freq = vocab[j]
+ new_word = ' '.join(word)
+ new_word = pattern.sub(pair_str, new_word)
+ new_word = tuple(new_word.split())
+
+ vocab[j] = (new_word, freq)
+ changes.append((j, new_word, word, freq))
+
+ return changes
+
+def prune_stats(stats, big_stats, threshold):
+ """Prune statistics dict for efficiency of max()
+
+ The frequency of a symbol pair never increases, so pruning is generally safe
+ (until we the most frequent pair is less frequent than a pair we previously pruned)
+ big_stats keeps full statistics for when we need to access pruned items
+ """
+ for item,freq in list(stats.items()):
+ if freq < threshold:
+ del stats[item]
+ if freq < 0:
+ big_stats[item] += freq
+ else:
+ big_stats[item] = freq
+
+
+def main(infile, outfile, num_symbols, min_frequency=2, verbose=False, is_dict=False):
+ """Learn num_symbols BPE operations from vocabulary, and write to outfile.
+ """
+
+ # version 0.2 changes the handling of the end-of-word token ('</w>');
+ # version numbering allows bckward compatibility
+ outfile.write('#version: 0.2\n')
+
+ vocab = get_vocabulary(infile, is_dict)
+ vocab = dict([(tuple(x[:-1])+(x[-1]+'</w>',) ,y) for (x,y) in vocab.items()])
+ sorted_vocab = sorted(vocab.items(), key=lambda x: x[1], reverse=True)
+
+ stats, indices = get_pair_statistics(sorted_vocab)
+ big_stats = copy.deepcopy(stats)
+ # threshold is inspired by Zipfian assumption, but should only affect speed
+ threshold = max(stats.values()) / 10
+ for i in range(num_symbols):
+ if stats:
+ most_frequent = max(stats, key=lambda x: (stats[x], x))
+
+ # we probably missed the best pair because of pruning; go back to full statistics
+ if not stats or (i and stats[most_frequent] < threshold):
+ prune_stats(stats, big_stats, threshold)
+ stats = copy.deepcopy(big_stats)
+ most_frequent = max(stats, key=lambda x: (stats[x], x))
+ # threshold is inspired by Zipfian assumption, but should only affect speed
+ threshold = stats[most_frequent] * i/(i+10000.0)
+ prune_stats(stats, big_stats, threshold)
+
+ if stats[most_frequent] < min_frequency:
+ sys.stderr.write('no pair has frequency >= {0}. Stopping\n'.format(min_frequency))
+ break
+
+ if verbose:
+ sys.stderr.write('pair {0}: {1} {2} -> {1}{2} (frequency {3})\n'.format(i, most_frequent[0], most_frequent[1], stats[most_frequent]))
+ outfile.write('{0} {1}\n'.format(*most_frequent))
+ changes = replace_pair(most_frequent, sorted_vocab, indices)
+ update_pair_statistics(most_frequent, changes, stats, indices)
+ stats[most_frequent] = 0
+ if not i % 100:
+ prune_stats(stats, big_stats, threshold)
+
+
+if __name__ == '__main__':
+
+ # python 2/3 compatibility
+ if sys.version_info < (3, 0):
+ sys.stderr = codecs.getwriter('UTF-8')(sys.stderr)
+ sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
+ sys.stdin = codecs.getreader('UTF-8')(sys.stdin)
+ else:
+ sys.stderr = codecs.getwriter('UTF-8')(sys.stderr.buffer)
+ sys.stdout = codecs.getwriter('UTF-8')(sys.stdout.buffer)
+ sys.stdin = codecs.getreader('UTF-8')(sys.stdin.buffer)
+
+ parser = create_parser()
+ args = parser.parse_args()
+
+ # read/write files as UTF-8
+ if args.input.name != '<stdin>':
+ args.input = codecs.open(args.input.name, encoding='utf-8')
+ if args.output.name != '<stdout>':
+ args.output = codecs.open(args.output.name, 'w', encoding='utf-8')
+
+ main(args.input, args.output, args.symbols, args.min_frequency, args.verbose, is_dict=args.dict_input)
| Add a proper corpus tokenizer
While working on the big-corpus MT training I noticed, that current implementation lacks some proper tokenization script. Currently available script only makes splits on whitespaces. So I wrote one for myself based on Spacy library. I'm not sure what are your thoughts about 3rd-party tools, but if you're ok with it, I can make a pull-request.
```python
"""Standalone script to tokenize a corpus based on Spacy NLP library."""
from __future__ import print_function
import argparse
import sys
import spacy
reload(sys)
sys.setdefaultencoding('utf-8')
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--lang", default="en",
help="Language of your text.")
parser.add_argument(
"--delimiter", default=" ",
help="Token delimiter for text serialization.")
args = parser.parse_args()
nlp = spacy.load(args.lang, disable=['parser', 'tagger', 'ner'])
lines = []
for line in sys.stdin:
line = line.strip().decode("utf-8")
tokens = nlp(line, disable=['parser', 'tagger', 'ner'])
merged_tokens = args.delimiter.join([str(token) for token in tokens])
print(merged_tokens)
if __name__ == "__main__":
main()
```
The usage:
```python -m bin.tokenize_text_spacy < data/PathTo/giga-fren.release2.fixed.en > data/PathTo/giga-fren.release2.token.en```
Performance:
~1.1GB of text per hour OR ~6-8mln of sentences per hour
22.5mln sentences for En<->Fr where processed in ~3hrs for Eng and 4.5hrs fo French
| OpenNMT/OpenNMT-tf | diff --git a/opennmt/tests/tokenizer_test.py b/opennmt/tests/tokenizer_test.py
index da3875ee..59c74e48 100644
--- a/opennmt/tests/tokenizer_test.py
+++ b/opennmt/tests/tokenizer_test.py
@@ -10,25 +10,62 @@ class TokenizerTest(tf.test.TestCase):
def _testTokenizerOnTensor(self, tokenizer, text, ref_tokens):
ref_tokens = [tf.compat.as_bytes(token) for token in ref_tokens]
text = tf.constant(text)
- tokens = tokenizer(text)
+ tokens = tokenizer.tokenize(text)
with self.test_session() as sess:
tokens = sess.run(tokens)
self.assertAllEqual(ref_tokens, tokens)
def _testTokenizerOnString(self, tokenizer, text, ref_tokens):
ref_tokens = [tf.compat.as_text(token) for token in ref_tokens]
- tokens = tokenizer(text)
+ tokens = tokenizer.tokenize(text)
self.assertAllEqual(ref_tokens, tokens)
def _testTokenizer(self, tokenizer, text, ref_tokens):
self._testTokenizerOnTensor(tokenizer, text, ref_tokens)
self._testTokenizerOnString(tokenizer, text, ref_tokens)
+ def _testDetokenizerOnTensor(self, tokenizer, tokens, ref_text):
+ ref_text = tf.compat.as_bytes(ref_text)
+ tokens = tf.constant(tokens)
+ text = tokenizer.detokenize(tokens)
+ with self.test_session() as sess:
+ text = sess.run(text)
+ self.assertEqual(ref_text, text)
+
+ def _testDetokenizerOnBatchTensor(self, tokenizer, tokens, ref_text):
+ ref_text = [tf.compat.as_bytes(t) for t in ref_text]
+ sequence_length = [len(x) for x in tokens]
+ max_length = max(sequence_length)
+ tokens = [tok + [""] * (max_length - len(tok)) for tok in tokens]
+ tokens = tf.constant(tokens)
+ sequence_length = tf.constant(sequence_length)
+ text = tokenizer.detokenize(tokens, sequence_length=sequence_length)
+ with self.test_session() as sess:
+ text = sess.run(text)
+ self.assertAllEqual(ref_text, text)
+
+ def _testDetokenizerOnString(self, tokenizer, tokens, ref_text):
+ tokens = [tf.compat.as_text(token) for token in tokens]
+ ref_text = tf.compat.as_text(ref_text)
+ text = tokenizer.detokenize(tokens)
+ self.assertAllEqual(ref_text, text)
+
+ def _testDetokenizer(self, tokenizer, tokens, ref_text):
+ self._testDetokenizerOnBatchTensor(tokenizer, tokens, ref_text)
+ for tok, ref in zip(tokens, ref_text):
+ self._testDetokenizerOnTensor(tokenizer, tok, ref)
+ self._testDetokenizerOnString(tokenizer, tok, ref)
+
def testSpaceTokenizer(self):
self._testTokenizer(SpaceTokenizer(), "Hello world !", ["Hello", "world", "!"])
+ self._testDetokenizer(
+ SpaceTokenizer(),
+ [["Hello", "world", "!"], ["Test"], ["My", "name"]],
+ ["Hello world !", "Test", "My name"])
def testCharacterTokenizer(self):
- self._testTokenizer(CharacterTokenizer(), "a b", ["a", " ", "b"])
+ self._testTokenizer(CharacterTokenizer(), "a b", ["a", "▁", "b"])
+ self._testDetokenizer(CharacterTokenizer(), [["a", "▁", "b"]], ["a b"])
self._testTokenizer(CharacterTokenizer(), "你好,世界!", ["你", "好", ",", "世", "界", "!"])
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 15
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[TensorFlow]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"nose2"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
bleach==1.5.0
certifi==2021.5.30
dataclasses==0.8
enum34==1.1.10
html5lib==0.9999999
importlib-metadata==4.8.3
iniconfig==1.1.1
Markdown==3.3.7
nose2==0.13.0
numpy==1.19.5
-e git+https://github.com/OpenNMT/OpenNMT-tf.git@009a5922e29e343587d3b574087c536a347a4d04#egg=OpenNMT_tf
packaging==21.3
pluggy==1.0.0
protobuf==3.19.6
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==6.0.1
six==1.17.0
tensorflow==1.4.0
tensorflow-tensorboard==0.4.0
tomli==1.2.3
typing_extensions==4.1.1
Werkzeug==2.0.3
zipp==3.6.0
| name: OpenNMT-tf
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- bleach==1.5.0
- dataclasses==0.8
- enum34==1.1.10
- html5lib==0.9999999
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- markdown==3.3.7
- nose2==0.13.0
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- protobuf==3.19.6
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==6.0.1
- six==1.17.0
- tensorflow==1.4.0
- tensorflow-tensorboard==0.4.0
- tomli==1.2.3
- typing-extensions==4.1.1
- werkzeug==2.0.3
- zipp==3.6.0
prefix: /opt/conda/envs/OpenNMT-tf
| [
"opennmt/tests/tokenizer_test.py::TokenizerTest::testCharacterTokenizer",
"opennmt/tests/tokenizer_test.py::TokenizerTest::testSpaceTokenizer"
]
| []
| [
"opennmt/tests/tokenizer_test.py::TokenizerTest::test_session"
]
| []
| MIT License | 2,000 | [
"opennmt/tokenizers/__init__.py",
"docs/data.md",
"docs/index.rst",
"opennmt/utils/vocab.py",
"docs/tokenization.md",
"bin/detokenize_text.py",
"config/tokenization/sample.yml",
"third_party/learn_bpe.py",
".gitmodules",
"config/sample.yml",
"opennmt/utils/misc.py",
"opennmt/utils/evaluator.py",
"opennmt/models/sequence_to_sequence.py",
"config/tokenization/aggressive.yml",
"README.md",
"opennmt/inputters/text_inputter.py",
"opennmt/tokenizers/tokenizer.py",
"opennmt/tokenizers/opennmt_tokenizer.py",
"docs/package/opennmt.tokenizers.rst",
"docs/package/opennmt.tokenizers.opennmt_tokenizer.rst",
"third_party/OpenNMTTokenizer",
".travis.yml",
"CMakeLists.txt",
"bin/tokenize_text.py",
"bin/build_vocab.py"
]
| [
"opennmt/tokenizers/__init__.py",
"docs/data.md",
"docs/index.rst",
"opennmt/utils/vocab.py",
"docs/tokenization.md",
"bin/detokenize_text.py",
"config/tokenization/sample.yml",
"third_party/learn_bpe.py",
".gitmodules",
"config/sample.yml",
"opennmt/utils/misc.py",
"opennmt/utils/evaluator.py",
"opennmt/models/sequence_to_sequence.py",
"config/tokenization/aggressive.yml",
"README.md",
"opennmt/inputters/text_inputter.py",
"opennmt/tokenizers/tokenizer.py",
"opennmt/tokenizers/opennmt_tokenizer.py",
"docs/package/opennmt.tokenizers.rst",
"docs/package/opennmt.tokenizers.opennmt_tokenizer.rst",
"third_party/OpenNMTTokenizer",
".travis.yml",
"CMakeLists.txt",
"bin/tokenize_text.py",
"bin/build_vocab.py"
]
|
|
ucfopen__canvasapi-125 | 80a16b1773e1d07254b381c04add92861b47bb8d | 2018-01-03 18:02:14 | db3c377b68f2953e1618f4e4588cc2db8603841e | diff --git a/README.md b/README.md
index a9e5ece..29d9dee 100644
--- a/README.md
+++ b/README.md
@@ -92,8 +92,8 @@ Individual users can be pulled from the API as well:
# Retrieve a list of courses the user is enrolled in
>>> courses = user.get_courses()
-# Grab a different user by their SIS ID
->>> sis_user = canvas.get_user('some_user', 'sis_login_id')
+# Grab a different user by their login ID
+>>> login_id_user = canvas.get_user('some_user', 'login_id')
```
#### Paginated Lists
diff --git a/canvasapi/account.py b/canvasapi/account.py
index eb52ec0..f2dff76 100644
--- a/canvasapi/account.py
+++ b/canvasapi/account.py
@@ -4,7 +4,7 @@ from six import python_2_unicode_compatible
from canvasapi.canvas_object import CanvasObject
from canvasapi.grading_standard import GradingStandard
-from canvasapi.exceptions import RequiredFieldMissing
+from canvasapi.exceptions import CanvasException, RequiredFieldMissing
from canvasapi.paginated_list import PaginatedList
from canvasapi.rubric import Rubric
from canvasapi.util import combine_kwargs, obj_or_id
@@ -154,6 +154,29 @@ class Account(CanvasObject):
)
return AccountNotification(self._requester, response.json())
+ def delete(self):
+ """
+ Delete the current account
+
+ Note: Cannot delete an account with active courses or active
+ sub accounts. Cannot delete a root account.
+
+ :calls: `DELETE /api/v1/accounts/:account_id/sub_accounts/:id \
+ <https://canvas.beta.instructure.com/doc/api/accounts.html#method.sub_accounts.destroy>`_
+
+ :returns: True if successfully deleted; False otherwise.
+ :rtype: bool
+ """
+ if not hasattr(self, 'parent_account_id') or not self.parent_account_id:
+ raise CanvasException("Cannot delete a root account.")
+
+ response = self._requester.request(
+ 'DELETE',
+ 'accounts/{}/sub_accounts/{}'.format(self.parent_account_id, self.id)
+ )
+
+ return response.json().get('workflow_state') == 'deleted'
+
def delete_user(self, user):
"""
Delete a user record from a Canvas root account.
diff --git a/docs/getting-started.rst b/docs/getting-started.rst
index eebb59e..9159afe 100644
--- a/docs/getting-started.rst
+++ b/docs/getting-started.rst
@@ -67,8 +67,8 @@ User objects
# Retrieve a list of courses the user is enrolled in
>>> courses = user.get_courses()
- # Grab a different user by their SIS ID
- >>> sis_user = canvas.get_user('some_user', 'sis_login_id')
+ # Grab a different user by their login ID
+ >>> login_id_user = canvas.get_user('some_user', 'login_id')
Paginated Lists
~~~~~~~~~~~~~~~
| Canvas Release 2017-11-18
https://community.canvaslms.com/docs/DOC-13339#jive_content_id_API
The November 18th release of Canvas includes some changes to the API that we should keep an eye on. Of particular interest:
- subaccounts can now be deleted
- `sis_login_id` is deprecated. should use `login_id` instead.
This gives us two action items:
- [x] Add support for subaccount deletion (https://canvas.beta.instructure.com/doc/api/accounts.html#method.sub_accounts.destroy)
- [x] Change references of `sis_login_id` in our documentation to `login_id`
| ucfopen/canvasapi | diff --git a/tests/fixtures/account.json b/tests/fixtures/account.json
index 0044b61..8ee4012 100644
--- a/tests/fixtures/account.json
+++ b/tests/fixtures/account.json
@@ -83,7 +83,8 @@
"default_storage_quota_mb": 500,
"default_user_storage_quota_mb": 50,
"default_group_storage_quota_mb": 50,
- "default_time_zone": "America/New_York"
+ "default_time_zone": "America/New_York",
+ "workflow_state": "active"
},
"status_code": 200
},
@@ -97,6 +98,22 @@
},
"status_code": 200
},
+ "delete_subaccount": {
+ "method": "DELETE",
+ "endpoint": "accounts/1/sub_accounts/101",
+ "data": {
+ "id": 101,
+ "name": "New Subaccount",
+ "parent_account_id": 1,
+ "root_account_id": 1,
+ "default_storage_quota_mb": 500,
+ "default_user_storage_quota_mb": 50,
+ "default_group_storage_quota_mb": 50,
+ "default_time_zone": "America/New_York",
+ "workflow_state": "deleted"
+ },
+ "status_code": 200
+ },
"delete_user": {
"method": "DELETE",
"endpoint": "accounts/1/users/1",
diff --git a/tests/fixtures/course.json b/tests/fixtures/course.json
index 16cf256..1aa9eb8 100644
--- a/tests/fixtures/course.json
+++ b/tests/fixtures/course.json
@@ -357,11 +357,11 @@
},
"get_user_id_type": {
"method": "GET",
- "endpoint": "courses/1/users/sis_login_id:SISLOGIN",
+ "endpoint": "courses/1/users/login_id:LOGINID",
"data": {
"id": 123456,
"name": "Abby Smith",
- "sis_login_id": "SISLOGIN"
+ "login_id": "LOGINID"
},
"status_code": 200
},
diff --git a/tests/test_account.py b/tests/test_account.py
index dac1e25..cb82092 100644
--- a/tests/test_account.py
+++ b/tests/test_account.py
@@ -12,7 +12,7 @@ from canvasapi.course import Course
from canvasapi.enrollment import Enrollment
from canvasapi.enrollment_term import EnrollmentTerm
from canvasapi.external_tool import ExternalTool
-from canvasapi.exceptions import RequiredFieldMissing
+from canvasapi.exceptions import CanvasException, RequiredFieldMissing
from canvasapi.grading_standard import GradingStandard
from canvasapi.group import Group, GroupCategory
from canvasapi.login import Login
@@ -133,6 +133,18 @@ class TestAccount(unittest.TestCase):
with self.assertRaises(RequiredFieldMissing):
self.account.create_notification({})
+ # delete()
+ def test_delete(self, m):
+ register_uris({'account': ['create_subaccount', 'delete_subaccount']}, m)
+
+ subaccount = self.account.create_subaccount({'name': 'New Subaccount'})
+
+ self.assertTrue(subaccount.delete())
+
+ def test_delete_root_account(self, m):
+ with self.assertRaises(CanvasException):
+ self.account.delete()
+
# delete_user()
def test_delete_user_id(self, m):
register_uris({'account': ['delete_user']}, m)
diff --git a/tests/test_course.py b/tests/test_course.py
index 11dcc13..888ca59 100644
--- a/tests/test_course.py
+++ b/tests/test_course.py
@@ -95,7 +95,7 @@ class TestCourse(unittest.TestCase):
def test_get_user_id_type(self, m):
register_uris({'course': ['get_user_id_type']}, m)
- user = self.course.get_user("SISLOGIN", "sis_login_id")
+ user = self.course.get_user("LOGINID", "login_id")
self.assertIsInstance(user, User)
self.assertTrue(hasattr(user, 'name'))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 3
} | 0.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"requests_mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/ucfopen/canvasapi.git@80a16b1773e1d07254b381c04add92861b47bb8d#egg=canvasapi
certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup==1.2.2
execnet==2.1.1
idna==3.10
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
pytest-xdist==3.6.1
pytz==2025.2
requests==2.32.3
requests-mock==1.12.1
six==1.17.0
tomli==2.2.1
urllib3==2.3.0
| name: canvasapi
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-xdist==3.6.1
- pytz==2025.2
- requests==2.32.3
- requests-mock==1.12.1
- six==1.17.0
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/canvasapi
| [
"tests/test_account.py::TestAccount::test_delete",
"tests/test_account.py::TestAccount::test_delete_root_account"
]
| []
| [
"tests/test_account.py::TestAccount::test__str__",
"tests/test_account.py::TestAccount::test_activate_role",
"tests/test_account.py::TestAccount::test_add_authentication_providers",
"tests/test_account.py::TestAccount::test_add_grading_standards",
"tests/test_account.py::TestAccount::test_add_grading_standards_empty_list",
"tests/test_account.py::TestAccount::test_add_grading_standards_missing_name_key",
"tests/test_account.py::TestAccount::test_add_grading_standards_missing_value_key",
"tests/test_account.py::TestAccount::test_add_grading_standards_non_dict_list",
"tests/test_account.py::TestAccount::test_close_notification_for_user_id",
"tests/test_account.py::TestAccount::test_close_notification_for_user_obj",
"tests/test_account.py::TestAccount::test_create_account",
"tests/test_account.py::TestAccount::test_create_course",
"tests/test_account.py::TestAccount::test_create_course_missing_field",
"tests/test_account.py::TestAccount::test_create_enrollment_term",
"tests/test_account.py::TestAccount::test_create_external_tool",
"tests/test_account.py::TestAccount::test_create_group_category",
"tests/test_account.py::TestAccount::test_create_notification",
"tests/test_account.py::TestAccount::test_create_notification_missing_field",
"tests/test_account.py::TestAccount::test_create_role",
"tests/test_account.py::TestAccount::test_create_subaccount",
"tests/test_account.py::TestAccount::test_create_user",
"tests/test_account.py::TestAccount::test_create_user_login",
"tests/test_account.py::TestAccount::test_create_user_login_fail_on_login_unique_id",
"tests/test_account.py::TestAccount::test_create_user_login_fail_on_user_id",
"tests/test_account.py::TestAccount::test_create_user_missing_field",
"tests/test_account.py::TestAccount::test_deactivate_role",
"tests/test_account.py::TestAccount::test_delete_user_id",
"tests/test_account.py::TestAccount::test_delete_user_obj",
"tests/test_account.py::TestAccount::test_get_authentication_provider",
"tests/test_account.py::TestAccount::test_get_courses",
"tests/test_account.py::TestAccount::test_get_department_level_grade_data_completed",
"tests/test_account.py::TestAccount::test_get_department_level_grade_data_current",
"tests/test_account.py::TestAccount::test_get_department_level_grade_data_with_given_term",
"tests/test_account.py::TestAccount::test_get_department_level_participation_data_completed",
"tests/test_account.py::TestAccount::test_get_department_level_participation_data_current",
"tests/test_account.py::TestAccount::test_get_department_level_participation_data_with_given_term",
"tests/test_account.py::TestAccount::test_get_department_level_statistics_completed",
"tests/test_account.py::TestAccount::test_get_department_level_statistics_current",
"tests/test_account.py::TestAccount::test_get_department_level_statistics_with_given_term",
"tests/test_account.py::TestAccount::test_get_enrollment",
"tests/test_account.py::TestAccount::test_get_external_tool",
"tests/test_account.py::TestAccount::test_get_external_tools",
"tests/test_account.py::TestAccount::test_get_grading_standards",
"tests/test_account.py::TestAccount::test_get_index_of_reports",
"tests/test_account.py::TestAccount::test_get_outcome_group",
"tests/test_account.py::TestAccount::test_get_outcome_groups_in_context",
"tests/test_account.py::TestAccount::test_get_outcome_links_in_context",
"tests/test_account.py::TestAccount::test_get_reports",
"tests/test_account.py::TestAccount::test_get_role",
"tests/test_account.py::TestAccount::test_get_root_outcome_group",
"tests/test_account.py::TestAccount::test_get_rubric",
"tests/test_account.py::TestAccount::test_get_single_grading_standard",
"tests/test_account.py::TestAccount::test_get_subaccounts",
"tests/test_account.py::TestAccount::test_get_user_notifications_id",
"tests/test_account.py::TestAccount::test_get_user_notifications_obj",
"tests/test_account.py::TestAccount::test_get_users",
"tests/test_account.py::TestAccount::test_list_authentication_providers",
"tests/test_account.py::TestAccount::test_list_enrollment_terms",
"tests/test_account.py::TestAccount::test_list_group_categories",
"tests/test_account.py::TestAccount::test_list_groups",
"tests/test_account.py::TestAccount::test_list_roles",
"tests/test_account.py::TestAccount::test_list_rubrics",
"tests/test_account.py::TestAccount::test_list_user_logins",
"tests/test_account.py::TestAccount::test_show_account_auth_settings",
"tests/test_account.py::TestAccount::test_update",
"tests/test_account.py::TestAccount::test_update_account_auth_settings",
"tests/test_account.py::TestAccount::test_update_fail",
"tests/test_account.py::TestAccount::test_update_role",
"tests/test_course.py::TestCourse::test__str__",
"tests/test_course.py::TestCourse::test_add_grading_standards",
"tests/test_course.py::TestCourse::test_add_grading_standards_empty_list",
"tests/test_course.py::TestCourse::test_add_grading_standards_missing_name_key",
"tests/test_course.py::TestCourse::test_add_grading_standards_missing_value_key",
"tests/test_course.py::TestCourse::test_add_grading_standards_non_dict_list",
"tests/test_course.py::TestCourse::test_conclude",
"tests/test_course.py::TestCourse::test_course_files",
"tests/test_course.py::TestCourse::test_create_assignment",
"tests/test_course.py::TestCourse::test_create_assignment_fail",
"tests/test_course.py::TestCourse::test_create_assignment_group",
"tests/test_course.py::TestCourse::test_create_course_section",
"tests/test_course.py::TestCourse::test_create_discussion_topic",
"tests/test_course.py::TestCourse::test_create_external_feed",
"tests/test_course.py::TestCourse::test_create_external_tool",
"tests/test_course.py::TestCourse::test_create_folder",
"tests/test_course.py::TestCourse::test_create_group_category",
"tests/test_course.py::TestCourse::test_create_module",
"tests/test_course.py::TestCourse::test_create_module_fail",
"tests/test_course.py::TestCourse::test_create_page",
"tests/test_course.py::TestCourse::test_create_page_fail",
"tests/test_course.py::TestCourse::test_create_quiz",
"tests/test_course.py::TestCourse::test_create_quiz_fail",
"tests/test_course.py::TestCourse::test_delete",
"tests/test_course.py::TestCourse::test_delete_external_feed",
"tests/test_course.py::TestCourse::test_edit_front_page",
"tests/test_course.py::TestCourse::test_enroll_user",
"tests/test_course.py::TestCourse::test_get_assignment",
"tests/test_course.py::TestCourse::test_get_assignment_group",
"tests/test_course.py::TestCourse::test_get_assignments",
"tests/test_course.py::TestCourse::test_get_course_level_assignment_data",
"tests/test_course.py::TestCourse::test_get_course_level_participation_data",
"tests/test_course.py::TestCourse::test_get_course_level_student_summary_data",
"tests/test_course.py::TestCourse::test_get_discussion_topic",
"tests/test_course.py::TestCourse::test_get_discussion_topics",
"tests/test_course.py::TestCourse::test_get_enrollments",
"tests/test_course.py::TestCourse::test_get_external_tool",
"tests/test_course.py::TestCourse::test_get_external_tools",
"tests/test_course.py::TestCourse::test_get_file",
"tests/test_course.py::TestCourse::test_get_folder",
"tests/test_course.py::TestCourse::test_get_full_discussion_topic",
"tests/test_course.py::TestCourse::test_get_grading_standards",
"tests/test_course.py::TestCourse::test_get_module",
"tests/test_course.py::TestCourse::test_get_modules",
"tests/test_course.py::TestCourse::test_get_outcome_group",
"tests/test_course.py::TestCourse::test_get_outcome_groups_in_context",
"tests/test_course.py::TestCourse::test_get_outcome_links_in_context",
"tests/test_course.py::TestCourse::test_get_outcome_result_rollups",
"tests/test_course.py::TestCourse::test_get_outcome_results",
"tests/test_course.py::TestCourse::test_get_page",
"tests/test_course.py::TestCourse::test_get_pages",
"tests/test_course.py::TestCourse::test_get_quiz",
"tests/test_course.py::TestCourse::test_get_quiz_fail",
"tests/test_course.py::TestCourse::test_get_quizzes",
"tests/test_course.py::TestCourse::test_get_recent_students",
"tests/test_course.py::TestCourse::test_get_root_outcome_group",
"tests/test_course.py::TestCourse::test_get_rubric",
"tests/test_course.py::TestCourse::test_get_section",
"tests/test_course.py::TestCourse::test_get_settings",
"tests/test_course.py::TestCourse::test_get_single_grading_standard",
"tests/test_course.py::TestCourse::test_get_submission",
"tests/test_course.py::TestCourse::test_get_user",
"tests/test_course.py::TestCourse::test_get_user_id_type",
"tests/test_course.py::TestCourse::test_get_user_in_a_course_level_assignment_data",
"tests/test_course.py::TestCourse::test_get_user_in_a_course_level_messaging_data",
"tests/test_course.py::TestCourse::test_get_user_in_a_course_level_participation_data",
"tests/test_course.py::TestCourse::test_get_users",
"tests/test_course.py::TestCourse::test_list_assignment_groups",
"tests/test_course.py::TestCourse::test_list_external_feeds",
"tests/test_course.py::TestCourse::test_list_folders",
"tests/test_course.py::TestCourse::test_list_gradeable_students",
"tests/test_course.py::TestCourse::test_list_group_categories",
"tests/test_course.py::TestCourse::test_list_groups",
"tests/test_course.py::TestCourse::test_list_multiple_submissions",
"tests/test_course.py::TestCourse::test_list_multiple_submissions_grouped_param",
"tests/test_course.py::TestCourse::test_list_rubrics",
"tests/test_course.py::TestCourse::test_list_sections",
"tests/test_course.py::TestCourse::test_list_submissions",
"tests/test_course.py::TestCourse::test_list_tabs",
"tests/test_course.py::TestCourse::test_mark_submission_as_read",
"tests/test_course.py::TestCourse::test_mark_submission_as_unread",
"tests/test_course.py::TestCourse::test_preview_html",
"tests/test_course.py::TestCourse::test_reorder_pinned_topics",
"tests/test_course.py::TestCourse::test_reorder_pinned_topics_comma_separated_string",
"tests/test_course.py::TestCourse::test_reorder_pinned_topics_invalid_input",
"tests/test_course.py::TestCourse::test_reorder_pinned_topics_tuple",
"tests/test_course.py::TestCourse::test_reset",
"tests/test_course.py::TestCourse::test_show_front_page",
"tests/test_course.py::TestCourse::test_subit_assignment_fail",
"tests/test_course.py::TestCourse::test_submit_assignment",
"tests/test_course.py::TestCourse::test_update",
"tests/test_course.py::TestCourse::test_update_settings",
"tests/test_course.py::TestCourse::test_update_submission",
"tests/test_course.py::TestCourse::test_update_tab",
"tests/test_course.py::TestCourse::test_upload",
"tests/test_course.py::TestCourseNickname::test__str__",
"tests/test_course.py::TestCourseNickname::test_remove"
]
| []
| MIT License | 2,001 | [
"canvasapi/account.py",
"README.md",
"docs/getting-started.rst"
]
| [
"canvasapi/account.py",
"README.md",
"docs/getting-started.rst"
]
|
|
pytorch__ignite-47 | 7e6a36ed94cb87b3ee26d46a563141a2dfc1bd43 | 2018-01-03 19:50:11 | 7e6a36ed94cb87b3ee26d46a563141a2dfc1bd43 | diff --git a/README.rst b/README.rst
index ee56c8de..99f6efd0 100644
--- a/README.rst
+++ b/README.rst
@@ -14,6 +14,7 @@ Ignite is a high-level library to help with training neural networks in PyTorch.
- `Training & Validation History`_
- `Events & Event Handlers`_
- `Logging`_
+ - `Metrics`_
- `Examples`_
- `How does this compare to Torchnet?`_
- `Contributing`_
@@ -128,6 +129,16 @@ Ignite uses `python's standard library logging module <https://docs.python.org/2
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
+Metrics
++++++++
+Ignite supports certain metrics which can be used to classify the performance of a given model. The metrics currently available in :code:`ignite` are:
+
+- :code:`binary_accuracy` : This takes a :code:`History` object (either :code:`training_history` or :code:`validation_history`) and an optional callable transform and computes the binary accuracy which is 1 if the values are equal or 0 otherwise. This is generally used for binary classification tasks
+- :code:`categorical_accuracy` : This is the :code:`binary_accuracy` equivalent for multi-class classification where number of classes are greater than 2.
+- :code:`top_k_categorical_accuracy` : This computes the Top K classification accuracy, which is a popular mode of evaluating models on larger datasets with higher number of classes. The semantics are similar to :code:`categorical_accuracy` except there is an additional argument for the value of :code:`k`
+- :code:`mean_squared_error` : Generally used in regression tasks, this computes the sum of squared deviations between the predicted value and the actual value for a given input datapoint. This function takes a :code:`History` object and an optional callable transform and computes the mean squared error. The square root of this gives the root mean squared error (RMSE).
+- :code:`mean_absolute_error` : This is similar to the :code:`mean_squared_error` function, but instead computes the sum of absolute deviations between the predicted value and the actual value for a given input datapoint.
+
Examples
++++++++
At present, there is an example of how to use ignite to train a digit classifier on MNIST in `examples/
diff --git a/ignite/trainer/trainer.py b/ignite/trainer/trainer.py
index 6e21dd11..b1a93246 100644
--- a/ignite/trainer/trainer.py
+++ b/ignite/trainer/trainer.py
@@ -103,6 +103,29 @@ class Trainer(object):
self._event_handlers[event_name].append((handler, args, kwargs))
self._logger.debug("added handler for event % ", event_name)
+ def on(self, event_name, *args, **kwargs):
+ """
+ Decorator shortcut for add_event_handler
+
+ Parameters
+ ----------
+ event_name: enum
+ event from ignite.trainer.TrainingEvents to attach the
+ handler to
+ args:
+ optional args to be passed to `handler`
+ kwargs:
+ optional keyword args to be passed to `handler`
+
+ Returns
+ -------
+ None
+ """
+ def decorator(f):
+ self.add_event_handler(event_name, f, *args, **kwargs)
+ return f
+ return decorator
+
def _fire_event(self, event_name):
if event_name in self._event_handlers.keys():
self._logger.debug("firing handlers for event %s ", event_name)
| Add annotations to attach event handlers
See #37 for details | pytorch/ignite | diff --git a/tests/ignite/trainer/test_trainer.py b/tests/ignite/trainer/test_trainer.py
index 1bb8cda0..e784a897 100644
--- a/tests/ignite/trainer/test_trainer.py
+++ b/tests/ignite/trainer/test_trainer.py
@@ -410,3 +410,32 @@ def test_create_supervised():
assert loss == approx(17.0)
assert model.weight.data[0, 0] == approx(1.3)
assert model.bias.data[0] == approx(0.8)
+
+
+def test_on_decorator():
+ max_epochs = 5
+ num_batches = 3
+ training_data = _create_mock_data_loader(max_epochs, num_batches)
+
+ trainer = Trainer(MagicMock(return_value=1), MagicMock())
+
+ class Counter(object):
+ def __init__(self, count=0):
+ self.count = count
+
+ started_counter = Counter()
+
+ @trainer.on(TrainingEvents.TRAINING_ITERATION_STARTED, started_counter)
+ def handle_training_iteration_started(trainer, started_counter):
+ started_counter.count += 1
+
+ completed_counter = Counter()
+
+ @trainer.on(TrainingEvents.TRAINING_ITERATION_COMPLETED, completed_counter)
+ def handle_training_iteration_completed(trainer, completed_counter):
+ completed_counter.count += 1
+
+ trainer.run(training_data, max_epochs=max_epochs)
+
+ assert started_counter.count == 15
+ assert completed_counter.count == 15
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_issue_reference",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"numpy",
"mock",
"pytest",
"codecov",
"pytest-cov",
"tqdm",
"scikit-learn",
"visdom",
"torchvision",
"tensorboardX",
"gym"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
charset-normalizer==2.0.12
cloudpickle==2.2.1
codecov==2.1.13
coverage==6.2
dataclasses==0.8
decorator==4.4.2
enum34==1.1.10
gym==0.26.2
gym-notices==0.0.8
idna==3.10
-e git+https://github.com/pytorch/ignite.git@7e6a36ed94cb87b3ee26d46a563141a2dfc1bd43#egg=ignite
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
joblib==1.1.1
jsonpatch==1.32
jsonpointer==2.3
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
networkx==2.5.1
numpy==1.19.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
Pillow==8.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
protobuf==4.21.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
requests==2.27.1
scikit-learn==0.24.2
scipy==1.5.4
six==1.17.0
tensorboardX==2.6.2.2
threadpoolctl==3.1.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
torch==1.10.1
torchvision==0.11.2
tornado==6.1
tqdm==4.64.1
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
visdom==0.2.4
websocket-client==1.3.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: ignite
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- charset-normalizer==2.0.12
- cloudpickle==2.2.1
- codecov==2.1.13
- coverage==6.2
- dataclasses==0.8
- decorator==4.4.2
- enum34==1.1.10
- gym==0.26.2
- gym-notices==0.0.8
- idna==3.10
- importlib-resources==5.4.0
- joblib==1.1.1
- jsonpatch==1.32
- jsonpointer==2.3
- mock==5.2.0
- networkx==2.5.1
- numpy==1.19.5
- pillow==8.4.0
- protobuf==4.21.0
- pytest-cov==4.0.0
- requests==2.27.1
- scikit-learn==0.24.2
- scipy==1.5.4
- six==1.17.0
- tensorboardx==2.6.2.2
- threadpoolctl==3.1.0
- tomli==1.2.3
- torch==1.10.1
- torchvision==0.11.2
- tornado==6.1
- tqdm==4.64.1
- urllib3==1.26.20
- visdom==0.2.4
- websocket-client==1.3.1
prefix: /opt/conda/envs/ignite
| [
"tests/ignite/trainer/test_trainer.py::test_on_decorator"
]
| [
"tests/ignite/trainer/test_trainer.py::test_create_supervised"
]
| [
"tests/ignite/trainer/test_trainer.py::test_adding_handler_for_non_existent_event_throws_error",
"tests/ignite/trainer/test_trainer.py::test_exception_handler_called_on_error",
"tests/ignite/trainer/test_trainer.py::test_adding_multiple_event_handlers",
"tests/ignite/trainer/test_trainer.py::test_args_and_kwargs_are_passed_to_event",
"tests/ignite/trainer/test_trainer.py::test_current_epoch_counter_increases_every_epoch",
"tests/ignite/trainer/test_trainer.py::test_current_iteration_counter_increases_every_iteration",
"tests/ignite/trainer/test_trainer.py::test_current_validation_iteration_counter_increases_every_iteration",
"tests/ignite/trainer/test_trainer.py::test_validate_is_not_called_by_default",
"tests/ignite/trainer/test_trainer.py::test_stopping_criterion_is_max_epochs",
"tests/ignite/trainer/test_trainer.py::test_terminate_at_end_of_epoch_stops_training",
"tests/ignite/trainer/test_trainer.py::test_terminate_at_start_of_epoch_stops_training_after_completing_iteration",
"tests/ignite/trainer/test_trainer.py::test_terminate_stops_training_mid_epoch",
"tests/ignite/trainer/test_trainer.py::test_terminate_stops_trainer_when_called_during_validation",
"tests/ignite/trainer/test_trainer.py::test_terminate_after_training_iteration_skips_validation_run",
"tests/ignite/trainer/test_trainer.py::test_training_iteration_events_are_fired",
"tests/ignite/trainer/test_trainer.py::test_validation_iteration_events_are_fired",
"tests/ignite/trainer/test_trainer.py::test_validation_iteration_events_are_fired_when_validate_is_called_explicitly"
]
| []
| BSD 3-Clause "New" or "Revised" License | 2,002 | [
"README.rst",
"ignite/trainer/trainer.py"
]
| [
"README.rst",
"ignite/trainer/trainer.py"
]
|
|
Azure__WALinuxAgent-986 | 30e638ddab04bd4ec473fe8369a86f64e717776e | 2018-01-03 23:14:09 | 6e9b985c1d7d564253a1c344bab01b45093103cd | diff --git a/azurelinuxagent/ga/env.py b/azurelinuxagent/ga/env.py
index 0456cb06..26487818 100644
--- a/azurelinuxagent/ga/env.py
+++ b/azurelinuxagent/ga/env.py
@@ -86,7 +86,7 @@ class EnvHandler(object):
version=CURRENT_VERSION,
op=WALAEventOperation.Firewall,
is_success=success,
- log_event=True)
+ log_event=False)
timeout = conf.get_root_device_scsi_timeout()
if timeout is not None:
diff --git a/azurelinuxagent/ga/monitor.py b/azurelinuxagent/ga/monitor.py
index e3ef292f..9b90aa7b 100644
--- a/azurelinuxagent/ga/monitor.py
+++ b/azurelinuxagent/ga/monitor.py
@@ -203,7 +203,8 @@ class MonitorHandler(object):
version=CURRENT_VERSION,
op=WALAEventOperation.HeartBeat,
is_success=True,
- message=msg)
+ message=msg,
+ log_event=False)
counter += 1
@@ -222,7 +223,7 @@ class MonitorHandler(object):
version=CURRENT_VERSION,
op=WALAEventOperation.HttpErrors,
is_success=False,
- msg=msg)
+ message=msg)
try:
self.collect_and_send_events()
diff --git a/azurelinuxagent/ga/update.py b/azurelinuxagent/ga/update.py
index 26009299..1b6f913e 100644
--- a/azurelinuxagent/ga/update.py
+++ b/azurelinuxagent/ga/update.py
@@ -295,7 +295,7 @@ class UpdateHandler(object):
duration=elapsed_milliseconds(utc_start),
message="Incarnation {0}".format(
exthandlers_handler.last_etag),
- log_event=True)
+ log_event=False)
time.sleep(GOAL_STATE_INTERVAL)
diff --git a/azurelinuxagent/pa/provision/cloudinit.py b/azurelinuxagent/pa/provision/cloudinit.py
index 22c3f9ca..fa47799a 100644
--- a/azurelinuxagent/pa/provision/cloudinit.py
+++ b/azurelinuxagent/pa/provision/cloudinit.py
@@ -64,7 +64,7 @@ class CloudInitProvisionHandler(ProvisionHandler):
logger.info("Finished provisioning")
self.report_ready(thumbprint)
- self.report_event("Provision succeed",
+ self.report_event("Provisioning with cloud-init succeeded",
is_success=True,
duration=elapsed_milliseconds(utc_start))
self.report_event(self.create_guest_state_telemetry_messsage(),
diff --git a/azurelinuxagent/pa/provision/default.py b/azurelinuxagent/pa/provision/default.py
index 5d6f1565..44e171b4 100644
--- a/azurelinuxagent/pa/provision/default.py
+++ b/azurelinuxagent/pa/provision/default.py
@@ -89,7 +89,7 @@ class ProvisionHandler(object):
self.write_provisioned()
- self.report_event("Provision succeeded",
+ self.report_event("Provisioning succeeded",
is_success=True,
duration=elapsed_milliseconds(utc_start))
diff --git a/azurelinuxagent/pa/provision/factory.py b/azurelinuxagent/pa/provision/factory.py
index d87765f3..9e88618f 100644
--- a/azurelinuxagent/pa/provision/factory.py
+++ b/azurelinuxagent/pa/provision/factory.py
@@ -16,9 +16,7 @@
#
import azurelinuxagent.common.conf as conf
-import azurelinuxagent.common.logger as logger
-from azurelinuxagent.common.utils.textutil import Version
from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \
DISTRO_FULL_NAME
| Update provisioning signal
We use the same signal today to indicate provision success/failure between cloud-init and agent based provisioning. We should clarify how to distinguish these. | Azure/WALinuxAgent | diff --git a/tests/pa/test_provision.py b/tests/pa/test_provision.py
index ab0a9102..2c2d2c9b 100644
--- a/tests/pa/test_provision.py
+++ b/tests/pa/test_provision.py
@@ -151,7 +151,7 @@ class TestProvision(AgentTestCase):
ph.run()
- call1 = call("Provision succeeded", duration=ANY, is_success=True)
+ call1 = call("Provisioning succeeded", duration=ANY, is_success=True)
call2 = call(ANY, is_success=True, operation=WALAEventOperation.GuestState)
ph.report_event.assert_has_calls([call1, call2])
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 6
} | 2.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pyasn1",
"nose",
"nose-cov",
"pytest"
],
"pre_install": null,
"python": "3.4",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
cov-core==1.15.0
coverage==6.2
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
nose==1.3.7
nose-cov==1.6
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyasn1==0.5.1
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
-e git+https://github.com/Azure/WALinuxAgent.git@30e638ddab04bd4ec473fe8369a86f64e717776e#egg=WALinuxAgent
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: WALinuxAgent
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- cov-core==1.15.0
- coverage==6.2
- nose==1.3.7
- nose-cov==1.6
- pyasn1==0.5.1
prefix: /opt/conda/envs/WALinuxAgent
| [
"tests/pa/test_provision.py::TestProvision::test_provision_telemetry_success"
]
| []
| [
"tests/pa/test_provision.py::TestProvision::test_customdata",
"tests/pa/test_provision.py::TestProvision::test_is_provisioned_is_provisioned",
"tests/pa/test_provision.py::TestProvision::test_is_provisioned_not_deprovisioned",
"tests/pa/test_provision.py::TestProvision::test_is_provisioned_not_provisioned",
"tests/pa/test_provision.py::TestProvision::test_provision",
"tests/pa/test_provision.py::TestProvision::test_provision_telemetry_fail",
"tests/pa/test_provision.py::TestProvision::test_provisioning_is_skipped_when_not_enabled"
]
| []
| Apache License 2.0 | 2,003 | [
"azurelinuxagent/pa/provision/cloudinit.py",
"azurelinuxagent/ga/update.py",
"azurelinuxagent/ga/env.py",
"azurelinuxagent/pa/provision/default.py",
"azurelinuxagent/ga/monitor.py",
"azurelinuxagent/pa/provision/factory.py"
]
| [
"azurelinuxagent/pa/provision/cloudinit.py",
"azurelinuxagent/ga/update.py",
"azurelinuxagent/ga/env.py",
"azurelinuxagent/pa/provision/default.py",
"azurelinuxagent/ga/monitor.py",
"azurelinuxagent/pa/provision/factory.py"
]
|
|
Azure__WALinuxAgent-987 | 30e638ddab04bd4ec473fe8369a86f64e717776e | 2018-01-03 23:27:34 | 6e9b985c1d7d564253a1c344bab01b45093103cd | diff --git a/azurelinuxagent/common/protocol/restapi.py b/azurelinuxagent/common/protocol/restapi.py
index 275cedb0..540ec5d9 100644
--- a/azurelinuxagent/common/protocol/restapi.py
+++ b/azurelinuxagent/common/protocol/restapi.py
@@ -321,9 +321,9 @@ class Protocol(DataContract):
def get_artifacts_profile(self):
raise NotImplementedError()
- def download_ext_handler_pkg(self, uri, headers=None):
+ def download_ext_handler_pkg(self, uri, headers=None, use_proxy=True):
try:
- resp = restutil.http_get(uri, use_proxy=True, headers=headers)
+ resp = restutil.http_get(uri, headers=headers, use_proxy=use_proxy)
if restutil.request_succeeded(resp):
return resp.read()
except Exception as e:
diff --git a/azurelinuxagent/common/protocol/wire.py b/azurelinuxagent/common/protocol/wire.py
index a92e0b89..2dc5297c 100644
--- a/azurelinuxagent/common/protocol/wire.py
+++ b/azurelinuxagent/common/protocol/wire.py
@@ -172,7 +172,7 @@ class WireProtocol(Protocol):
logger.warn("Download did not succeed, falling back to host plugin")
host = self.client.get_host_plugin()
uri, headers = host.get_artifact_request(uri, host.manifest_uri)
- package = super(WireProtocol, self).download_ext_handler_pkg(uri, headers=headers)
+ package = super(WireProtocol, self).download_ext_handler_pkg(uri, headers=headers, use_proxy=False)
return package
def report_provision_status(self, provision_status):
diff --git a/azurelinuxagent/ga/env.py b/azurelinuxagent/ga/env.py
index 0456cb06..26487818 100644
--- a/azurelinuxagent/ga/env.py
+++ b/azurelinuxagent/ga/env.py
@@ -86,7 +86,7 @@ class EnvHandler(object):
version=CURRENT_VERSION,
op=WALAEventOperation.Firewall,
is_success=success,
- log_event=True)
+ log_event=False)
timeout = conf.get_root_device_scsi_timeout()
if timeout is not None:
diff --git a/azurelinuxagent/ga/monitor.py b/azurelinuxagent/ga/monitor.py
index e3ef292f..9b90aa7b 100644
--- a/azurelinuxagent/ga/monitor.py
+++ b/azurelinuxagent/ga/monitor.py
@@ -203,7 +203,8 @@ class MonitorHandler(object):
version=CURRENT_VERSION,
op=WALAEventOperation.HeartBeat,
is_success=True,
- message=msg)
+ message=msg,
+ log_event=False)
counter += 1
@@ -222,7 +223,7 @@ class MonitorHandler(object):
version=CURRENT_VERSION,
op=WALAEventOperation.HttpErrors,
is_success=False,
- msg=msg)
+ message=msg)
try:
self.collect_and_send_events()
diff --git a/azurelinuxagent/ga/update.py b/azurelinuxagent/ga/update.py
index 26009299..1b6f913e 100644
--- a/azurelinuxagent/ga/update.py
+++ b/azurelinuxagent/ga/update.py
@@ -295,7 +295,7 @@ class UpdateHandler(object):
duration=elapsed_milliseconds(utc_start),
message="Incarnation {0}".format(
exthandlers_handler.last_etag),
- log_event=True)
+ log_event=False)
time.sleep(GOAL_STATE_INTERVAL)
diff --git a/azurelinuxagent/pa/provision/cloudinit.py b/azurelinuxagent/pa/provision/cloudinit.py
index 22c3f9ca..fa47799a 100644
--- a/azurelinuxagent/pa/provision/cloudinit.py
+++ b/azurelinuxagent/pa/provision/cloudinit.py
@@ -64,7 +64,7 @@ class CloudInitProvisionHandler(ProvisionHandler):
logger.info("Finished provisioning")
self.report_ready(thumbprint)
- self.report_event("Provision succeed",
+ self.report_event("Provisioning with cloud-init succeeded",
is_success=True,
duration=elapsed_milliseconds(utc_start))
self.report_event(self.create_guest_state_telemetry_messsage(),
diff --git a/azurelinuxagent/pa/provision/default.py b/azurelinuxagent/pa/provision/default.py
index 5d6f1565..44e171b4 100644
--- a/azurelinuxagent/pa/provision/default.py
+++ b/azurelinuxagent/pa/provision/default.py
@@ -89,7 +89,7 @@ class ProvisionHandler(object):
self.write_provisioned()
- self.report_event("Provision succeeded",
+ self.report_event("Provisioning succeeded",
is_success=True,
duration=elapsed_milliseconds(utc_start))
diff --git a/azurelinuxagent/pa/provision/factory.py b/azurelinuxagent/pa/provision/factory.py
index d87765f3..9e88618f 100644
--- a/azurelinuxagent/pa/provision/factory.py
+++ b/azurelinuxagent/pa/provision/factory.py
@@ -16,9 +16,7 @@
#
import azurelinuxagent.common.conf as conf
-import azurelinuxagent.common.logger as logger
-from azurelinuxagent.common.utils.textutil import Version
from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \
DISTRO_FULL_NAME
| [2.2.14] HostGAPlugin still use proxy while auto-updating
Hi there,
I saw there was an issue #769 "HostGAPlugin requests should never go through proxy". But currently it seems that only the blob status upload traffic can ignore the proxy when proxy is misconfigured. For the auto-update feature, only the manifest file can be downloaded without proxy, the WALA update packages still use proxy and fail to be downloaded. The extension downloading process is the same as auto-update. Is it by design? Thanks!
My steps:
1. Configure /etc/waagent.conf to set a wrong proxy, enable auto-update, and enable verbose log:
HttpProxy.Host=172.16.0.1
HttpProxy.Port=3128
Logs.Verbose=y
AutoUpdate.Enabled=y
Service waagent restart
2. Check /var/log/waagent.log
```
2017/07/12 06:29:39.476351 VERBOSE HTTP proxy: [172.16.0.1:3128]
2017/07/12 06:29:39.483259 VERBOSE HTTP connection [GET] [http://rdfepirv2sg1prdstr03.blob.core.windows.net:80/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml] [None] [None]
2017/07/12 06:29:49.513148 VERBOSE HTTP connection [GET] [http://rdfepirv2sg1prdstr03.blob.core.windows.net:80/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml] [None] [None]
2017/07/12 06:29:59.547066 VERBOSE HTTP connection [GET] [http://rdfepirv2sg1prdstr03.blob.core.windows.net:80/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml] [None] [None]
2017/07/12 06:30:09.572601 VERBOSE Fetch failed from [https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml]: [000009] HTTP GET failed
2017/07/12 06:30:09.591745 VERBOSE Manifest could not be downloaded, falling back to host plugin
2017/07/12 06:30:09.602838 VERBOSE HostGAPlugin: Getting API versions at [http://100.107.240.13:32526/versions]
2017/07/12 06:30:09.614843 VERBOSE HTTP connection [GET] [/versions] [None] [{'x-ms-containerid': u'd4384bb2-4f5c-4680-8c3d-9fa51841ba7d'}]
2017/07/12 06:30:09.632135 VERBOSE HTTP response status: [200]
2017/07/12 06:30:09.639179 INFO Event: name=WALinuxAgent, op=InitializeHostPlugin, message=
2017/07/12 06:30:09.650182 VERBOSE Fetch [http://100.107.240.13:32526/extensionArtifact] with headers [{'x-ms-containerid': u'd4384bb2-4f5c-4680-8c3d-9fa51841ba7d', 'x-ms-version': '2015-09-01', 'x-ms-host-config-name': u'77bbebc9f3994cc48fbef61834a1822e.0.77bbebc9f3994cc48fbef61834a1822e.0.wala692214ui4.1.xml', 'x-ms-artifact-location': u'https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml'}]
2017/07/12 06:30:09.689246 VERBOSE HTTP connection [GET] [/extensionArtifact] [None] [{'x-ms-containerid': u'd4384bb2-4f5c-4680-8c3d-9fa51841ba7d', 'x-ms-version': '2015-09-01', 'x-ms-host-config-name': u'77bbebc9f3994cc48fbef61834a1822e.0.77bbebc9f3994cc48fbef61834a1822e.0.wala692214ui4.1.xml', 'x-ms-artifact-location': u'https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml'}]
2017/07/12 06:30:09.763930 VERBOSE HTTP response status: [200]
2017/07/12 06:30:09.770335 VERBOSE Manifest downloaded successfully from host plugin
2017/07/12 06:30:09.778017 INFO Setting host plugin as default channel
2017/07/12 06:30:09.785914 VERBOSE Load ExtensionManifest.xml
2017/07/12 06:30:09.794175 VERBOSE Loading Agent WALinuxAgent-2.2.10 from package package
2017/07/12 06:30:09.803674 VERBOSE Agent WALinuxAgent-2.2.10 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2017/07/12 06:30:09.816401 VERBOSE Ensuring Agent WALinuxAgent-2.2.10 is downloaded
2017/07/12 06:30:09.825242 VERBOSE Using host plugin as default channel
2017/07/12 06:30:09.833194 VERBOSE HTTP proxy: [172.16.0.1:3128]
2017/07/12 06:30:09.839940 VERBOSE HTTP connection [GET] [http://100.107.240.13:32526/extensionArtifact] [None] [{'x-ms-containerid': u'd4384bb2-4f5c-4680-8c3d-9fa51841ba7d', 'x-ms-artifact-manifest-location': u'https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml', 'x-ms-version': '2015-09-01', 'x-ms-host-config-name': u'77bbebc9f3994cc48fbef61834a1822e.0.77bbebc9f3994cc48fbef61834a1822e.0.wala692214ui4.1.xml', 'x-ms-artifact-location': u'https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent__Prod__2.2.10'}]
2017/07/12 06:30:19.911177 VERBOSE HTTP connection [GET] [http://100.107.240.13:32526/extensionArtifact] [None] [{'x-ms-containerid': u'd4384bb2-4f5c-4680-8c3d-9fa51841ba7d', 'x-ms-artifact-manifest-location': u'https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml', 'x-ms-version': '2015-09-01', 'x-ms-host-config-name': u'77bbebc9f3994cc48fbef61834a1822e.0.77bbebc9f3994cc48fbef61834a1822e.0.wala692214ui4.1.xml', 'x-ms-artifact-location': u'https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent__Prod__2.2.10'}]
2017/07/12 06:30:29.992187 VERBOSE HTTP connection [GET] [http://100.107.240.13:32526/extensionArtifact] [None] [{'x-ms-containerid': u'd4384bb2-4f5c-4680-8c3d-9fa51841ba7d', 'x-ms-artifact-manifest-location': u'https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml', 'x-ms-version': '2015-09-01', 'x-ms-host-config-name': u'77bbebc9f3994cc48fbef61834a1822e.0.77bbebc9f3994cc48fbef61834a1822e.0.wala692214ui4.1.xml', 'x-ms-artifact-location': u'https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent__Prod__2.2.10'}]
2017/07/12 06:30:39.543400 VERBOSE Found event file: /var/lib/waagent/events/1499855409649965.tld
2017/07/12 06:30:39.554873 VERBOSE Processed event file: /var/lib/waagent/events/1499855409649965.tld
2017/07/12 06:30:39.566265 VERBOSE HTTP connection [POST] [/machine?comp=telemetrydata] [<?xml version="1.0"?><TelemetryData version="1.0"><Provider id="69B669B9-4AF8-4C50-BDC4-6006FA76E975"><Event id="1"><![CDATA[<Param Name="Name" Value="WALinuxAgent" T="mt:wstr" /><Param Name="Version" Value="2.2.14" T="mt:wstr" /><Param Name="IsInternal" Value="False" T="mt:bool" /><Param Name="Operation" Value="InitializeHostPlugin" T="mt:wstr" /><Param Name="OperationSuccess" Value="True" T="mt:bool" /><Param Name="Message" Value="" T="mt:wstr" /><Param Name="Duration" Value="0" T="mt:uint64" /><Param Name="ExtensionType" Value="" T="mt:wstr" /><Param Name="OSVersion" Value="Linux:redhat-6.9-Santiago:2.6.32-696.3.2.el6.x86_64" T="mt:wstr" /><Param Name="GAVersion" Value="WALinuxAgent-2.2.14" T="mt:wstr" /><Param Name="RAM" Value="1679" T="mt:uint64" /><Param Name="Processors" Value="1" T="mt:uint64" /><Param Name="VMName" Value="wala692214ui4" T="mt:wstr" /><Param Name="TenantName" Value="77bbebc9f3994cc48fbef61834a1822e" T="mt:wstr" /><Param Name="RoleName" Value="wala692214ui4" T="mt:wstr" /><Param Name="RoleInstanceName" Value="77bbebc9f3994cc48fbef61834a1822e.wala692214ui4" T="mt:wstr" /><Param Name="ContainerId" Value="d4384bb2-4f5c-4680-8c3d-9fa51841ba7d" T="mt:wstr" />]]></Event></Provider></TelemetryData>] [{'Content-Type': 'text/xml;charset=utf-8', 'x-ms-version': '2012-11-30', 'x-ms-agent-name': 'WALinuxAgent'}]
2017/07/12 06:30:39.768153 VERBOSE HTTP response status: [200]
2017/07/12 06:30:40.064471 VERBOSE Agent WALinuxAgent-2.2.10 download from http://100.107.240.13:32526/extensionArtifact failed [[000009] HTTP GET failed]
2017/07/12 06:30:40.079068 WARNING Host plugin download unsuccessful
2017/07/12 06:30:40.089613 VERBOSE Using host plugin as default channel
2017/07/12 06:30:40.097071 VERBOSE HTTP proxy: [172.16.0.1:3128]
2017/07/12 06:30:40.103549 VERBOSE HTTP connection [GET] [http://100.107.240.13:32526/extensionArtifact] [None] [{'x-ms-containerid': u'd4384bb2-4f5c-4680-8c3d-9fa51841ba7d', 'x-ms-artifact-manifest-location': u'https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml', 'x-ms-version': '2015-09-01', 'x-ms-host-config-name': u'77bbebc9f3994cc48fbef61834a1822e.0.77bbebc9f3994cc48fbef61834a1822e.0.wala692214ui4.1.xml', 'x-ms-artifact-location': u'https://rdfepirv2sg1prdstr04.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent__Prod__2.2.10'}]
2017/07/12 06:30:50.176833 VERBOSE HTTP connection [GET] [http://100.107.240.13:32526/extensionArtifact] [None] [{'x-ms-containerid': u'd4384bb2-4f5c-4680-8c3d-9fa51841ba7d', 'x-ms-artifact-manifest-location': u'https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml', 'x-ms-version': '2015-09-01', 'x-ms-host-config-name': u'77bbebc9f3994cc48fbef61834a1822e.0.77bbebc9f3994cc48fbef61834a1822e.0.wala692214ui4.1.xml', 'x-ms-artifact-location': u'https://rdfepirv2sg1prdstr04.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent__Prod__2.2.10'}]
2017/07/12 06:31:00.249870 VERBOSE HTTP connection [GET] [http://100.107.240.13:32526/extensionArtifact] [None] [{'x-ms-containerid': u'd4384bb2-4f5c-4680-8c3d-9fa51841ba7d', 'x-ms-artifact-manifest-location': u'https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent_Prod_asiasoutheast_manifest.xml', 'x-ms-version': '2015-09-01', 'x-ms-host-config-name': u'77bbebc9f3994cc48fbef61834a1822e.0.77bbebc9f3994cc48fbef61834a1822e.0.wala692214ui4.1.xml', 'x-ms-artifact-location': u'https://rdfepirv2sg1prdstr04.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent__Prod__2.2.10'}]
2017/07/12 06:31:10.331413 VERBOSE Agent WALinuxAgent-2.2.10 download from http://100.107.240.13:32526/extensionArtifact failed [[000009] HTTP GET failed]
2017/07/12 06:31:10.347366 WARNING Host plugin download unsuccessful
2017/07/12 06:31:10.355276 VERBOSE Using host plugin as default channel
2017/07/12 06:31:10.363528 VERBOSE HTTP proxy: [172.16.0.1:3128]
...
``` | Azure/WALinuxAgent | diff --git a/tests/pa/test_provision.py b/tests/pa/test_provision.py
index ab0a9102..2c2d2c9b 100644
--- a/tests/pa/test_provision.py
+++ b/tests/pa/test_provision.py
@@ -151,7 +151,7 @@ class TestProvision(AgentTestCase):
ph.run()
- call1 = call("Provision succeeded", duration=ANY, is_success=True)
+ call1 = call("Provisioning succeeded", duration=ANY, is_success=True)
call2 = call(ANY, is_success=True, operation=WALAEventOperation.GuestState)
ph.report_event.assert_has_calls([call1, call2])
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 8
} | 2.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "",
"pip_packages": [
"pyasn1",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.4",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyasn1==0.5.1
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
-e git+https://github.com/Azure/WALinuxAgent.git@30e638ddab04bd4ec473fe8369a86f64e717776e#egg=WALinuxAgent
zipp==3.6.0
| name: WALinuxAgent
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyasn1==0.5.1
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/WALinuxAgent
| [
"tests/pa/test_provision.py::TestProvision::test_provision_telemetry_success"
]
| []
| [
"tests/pa/test_provision.py::TestProvision::test_customdata",
"tests/pa/test_provision.py::TestProvision::test_is_provisioned_is_provisioned",
"tests/pa/test_provision.py::TestProvision::test_is_provisioned_not_deprovisioned",
"tests/pa/test_provision.py::TestProvision::test_is_provisioned_not_provisioned",
"tests/pa/test_provision.py::TestProvision::test_provision",
"tests/pa/test_provision.py::TestProvision::test_provision_telemetry_fail",
"tests/pa/test_provision.py::TestProvision::test_provisioning_is_skipped_when_not_enabled"
]
| []
| Apache License 2.0 | 2,004 | [
"azurelinuxagent/pa/provision/cloudinit.py",
"azurelinuxagent/ga/update.py",
"azurelinuxagent/ga/env.py",
"azurelinuxagent/common/protocol/restapi.py",
"azurelinuxagent/common/protocol/wire.py",
"azurelinuxagent/pa/provision/default.py",
"azurelinuxagent/ga/monitor.py",
"azurelinuxagent/pa/provision/factory.py"
]
| [
"azurelinuxagent/pa/provision/cloudinit.py",
"azurelinuxagent/ga/update.py",
"azurelinuxagent/ga/env.py",
"azurelinuxagent/common/protocol/restapi.py",
"azurelinuxagent/common/protocol/wire.py",
"azurelinuxagent/pa/provision/default.py",
"azurelinuxagent/ga/monitor.py",
"azurelinuxagent/pa/provision/factory.py"
]
|
|
spulec__freezegun-221 | 7ad16a5579b28fc939a69cc04f0e99ba5e87b206 | 2018-01-04 11:05:22 | 181f7ac7f909e561e26f5b293d2d40e82eb99f7a | diff --git a/freezegun/_async.py b/freezegun/_async.py
new file mode 100644
index 0000000..a4f2e03
--- /dev/null
+++ b/freezegun/_async.py
@@ -0,0 +1,17 @@
+import functools
+
+import asyncio
+
+
+def wrap_coroutine(api, coroutine):
+ @functools.wraps(coroutine)
+ @asyncio.coroutine
+ def wrapper(*args, **kwargs):
+ with api as time_factory:
+ if api.as_arg:
+ result = yield from coroutine(time_factory, *args, **kwargs)
+ else:
+ result = yield from coroutine(*args, **kwargs)
+ return result
+
+ return wrapper
diff --git a/freezegun/api.py b/freezegun/api.py
index c642635..7c3585e 100644
--- a/freezegun/api.py
+++ b/freezegun/api.py
@@ -36,6 +36,15 @@ try:
except ImportError:
import copyreg
+try:
+ iscoroutinefunction = inspect.iscoroutinefunction
+ from freezegun._async import wrap_coroutine
+except AttributeError:
+ iscoroutinefunction = lambda x: False
+
+ def wrap_coroutine(*args):
+ raise NotImplementedError()
+
# Stolen from six
def with_metaclass(meta, *bases):
@@ -318,6 +327,8 @@ class _freeze_time(object):
def __call__(self, func):
if inspect.isclass(func):
return self.decorate_class(func)
+ elif iscoroutinefunction(func):
+ return self.decorate_coroutine(func)
return self.decorate_callable(func)
def decorate_class(self, klass):
@@ -495,6 +506,9 @@ class _freeze_time(object):
uuid._uuid_generate_time = real_uuid_generate_time
uuid._UuidCreate = real_uuid_create
+ def decorate_coroutine(self, coroutine):
+ return wrap_coroutine(self, coroutine)
+
def decorate_callable(self, func):
def wrapper(*args, **kwargs):
with self as time_factory:
| Mark notation does not work when using pytest-asyncio library
Simple test case fails when using both libraries and the mark notation:
```
from freezegun import freeze_time
import datetime
import asyncio
import pytest
import pytest_asyncio
@freeze_time("2012-01-14")
async def test():
assert datetime.datetime.now() == datetime.datetime(2012, 1, 14) # passes
@pytest.mark.asyncio
@freeze_time("2012-01-14")
async def test_async():
assert datetime.datetime.now() == datetime.datetime(2012, 1, 14) # fails
```
If we use a different notation then it works as expected:
```
from freezegun import freeze_time
import datetime
import asyncio
import pytest
import pytest_asyncio
@freeze_time("2012-01-14")
async def test():
assert datetime.datetime.now() == datetime.datetime(2012, 1, 14) # passes
@pytest.mark.asyncio
async def test_async():
with freeze_time("2012-01-14"):
assert datetime.datetime.now() == datetime.datetime(2012, 1, 14) # passes
```
Dependencies using Python 3:
```
freezegun==0.3.8
pytest==3.0.7
pytest-asyncio==0.5.0
```
| spulec/freezegun | diff --git a/tests/test_asyncio.py b/tests/test_asyncio.py
new file mode 100644
index 0000000..3be1ebf
--- /dev/null
+++ b/tests/test_asyncio.py
@@ -0,0 +1,36 @@
+import datetime
+from textwrap import dedent
+
+from nose.plugins import skip
+
+from freezegun import freeze_time
+
+try:
+ import asyncio
+except ImportError:
+ asyncio = False
+
+
+def test_time_freeze_coroutine():
+ if not asyncio:
+ raise skip.SkipTest('asyncio required')
+ @asyncio.coroutine
+ @freeze_time('1970-01-01')
+ def frozen_coroutine():
+ assert datetime.date.today() == datetime.date(1970, 1, 1)
+
+ asyncio.get_event_loop().run_until_complete(frozen_coroutine())
+
+
+def test_time_freeze_async_def():
+ try:
+ exec('async def foo(): pass')
+ except SyntaxError:
+ raise skip.SkipTest('async def not supported')
+ else:
+ exec(dedent('''
+ @freeze_time('1970-01-01')
+ async def frozen_coroutine():
+ assert datetime.date.today() == datetime.date(1970, 1, 1)
+ asyncio.get_event_loop().run_until_complete(frozen_coroutine())
+ '''))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==3.7.1
coveralls==1.11.1
docopt==0.6.2
-e git+https://github.com/spulec/freezegun.git@7ad16a5579b28fc939a69cc04f0e99ba5e87b206#egg=freezegun
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
mock==5.2.0
nose==1.3.7
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
requests==2.27.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: freezegun
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- coverage==3.7.1
- coveralls==1.11.1
- docopt==0.6.2
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- mock==5.2.0
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- requests==2.27.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/freezegun
| [
"tests/test_asyncio.py::test_time_freeze_async_def"
]
| []
| [
"tests/test_asyncio.py::test_time_freeze_coroutine"
]
| []
| Apache License 2.0 | 2,006 | [
"freezegun/_async.py",
"freezegun/api.py"
]
| [
"freezegun/_async.py",
"freezegun/api.py"
]
|
|
ucfopen__canvasapi-127 | 5644a6c89cc851216ec8114443d3af857ac6f70e | 2018-01-04 17:53:13 | db3c377b68f2953e1618f4e4588cc2db8603841e | diff --git a/canvasapi/canvas.py b/canvasapi/canvas.py
index b6dfd7d..2f98dcf 100644
--- a/canvasapi/canvas.py
+++ b/canvasapi/canvas.py
@@ -472,20 +472,19 @@ class Canvas(object):
:type recipients: `list` of `str`
:param body: The body of the message being added.
:type body: `str`
- :rtype: :class:`canvasapi.paginated_list.PaginatedList` of
- :class:`canvasapi.conversation.Conversation`
+ :rtype: list of :class:`canvasapi.conversation.Conversation`
"""
from canvasapi.conversation import Conversation
- return PaginatedList(
- Conversation,
- self.__requester,
+ kwargs['recipients'] = recipients
+ kwargs['body'] = body
+
+ response = self.__requester.request(
'POST',
'conversations',
- recipients=recipients,
- body=body,
_kwargs=combine_kwargs(**kwargs)
)
+ return [Conversation(self.__requester, convo) for convo in response.json()]
def get_conversation(self, conversation, **kwargs):
"""
| Create Conversation method not creating
`Canvas.create_conversation()` doesn't appear to be working properly.
If I try to create a conversation using the code bellow, nothing happens.
```
convo = canvas.create_conversation([USER_ID], 'Hello!')
```
The method returns a `PaginatedList`. However, if I iterate over the `PaginatedList`, the conversations are then created in Canvas. I suspect this is due to an odd interaction between PaginatedList and `POST` requests.
Additionally, in my testing, I was unable to successfully add a subject as a keyword argument:
```
convo = canvas.create_conversation([USER_ID], 'Hello!', subject='Test Subject')
```
The subject remained "(No Subject)" | ucfopen/canvasapi | diff --git a/tests/fixtures/conversation.json b/tests/fixtures/conversation.json
index 00b3e68..3c1bc71 100644
--- a/tests/fixtures/conversation.json
+++ b/tests/fixtures/conversation.json
@@ -51,16 +51,163 @@
"status_code": 200
},
"create_conversation": {
+ "method": "POST",
+ "endpoint": "conversations",
+ "data": [{
+ "id": 1,
+ "subject": "A Conversation",
+ "workflow_state": "unread",
+ "last_message": "Hello, World!",
+ "last_message_at": "2018-01-01T00:00:00Z",
+ "last_authored_message": "Hello, World!",
+ "last_authored_message_at": "2018-01-01T00:00:00Z",
+ "message_count": 1,
+ "subscribed": true,
+ "private": true,
+ "starred": false,
+ "properties": ["last_author"],
+ "messages": [{
+ "id": 1,
+ "author_id": 1,
+ "created_at": "2018-01-01T00:00:00Z",
+ "generated": false,
+ "body": "Hello, World!",
+ "forwarded_messages": [],
+ "attachments": [],
+ "media_comment": null,
+ "participating_user_ids": [1, 2]
+ }],
+ "audience": [2],
+ "audience_contexts": {
+ "courses": {
+ "1": ["StudentEnrollment"]
+ },
+ "groups": {}
+ },
+ "participants": [
+ {
+ "id": 1,
+ "name": "John",
+ "common_courses": {},
+ "common_groups": {}
+ },
+ {
+ "id": 2,
+ "name": "Joe",
+ "common_courses": {
+ "1": ["StudentEnrollment"]
+ },
+ "common_groups": {}
+ }
+ ],
+ "visible": true,
+ "context_code": null
+ }],
+ "status_code": 200
+ },
+ "create_conversation_multiple": {
"method": "POST",
"endpoint": "conversations",
"data": [
{
- "recipients": ["1", "2"],
- "body": "Test Conversation Body"
+ "id": 1,
+ "subject": "A Conversation",
+ "workflow_state": "unread",
+ "last_message": "Hey guys!",
+ "last_message_at": "2018-01-01T00:00:00Z",
+ "last_authored_message": "Hey guys!",
+ "last_authored_message_at": "2018-01-01T00:00:00Z",
+ "message_count": 1,
+ "subscribed": true,
+ "private": true,
+ "starred": false,
+ "properties": ["last_author"],
+ "messages": [{
+ "id": 1,
+ "author_id": 1,
+ "created_at": "2018-01-01T00:00:00Z",
+ "generated": false,
+ "body": "Hey guys!",
+ "forwarded_messages": [],
+ "attachments": [],
+ "media_comment": null,
+ "participating_user_ids": [1, 2]
+ }],
+ "audience": [2],
+ "audience_contexts": {
+ "courses": {
+ "1": ["StudentEnrollment"]
+ },
+ "groups": {}
+ },
+ "participants": [
+ {
+ "id": 1,
+ "name": "John",
+ "common_courses": {},
+ "common_groups": {}
+ },
+ {
+ "id": 2,
+ "name": "Joe",
+ "common_courses": {
+ "1": ["StudentEnrollment"]
+ },
+ "common_groups": {}
+ }
+ ],
+ "visible": true,
+ "context_code": null
},
{
- "recipients": ["3", "4"],
- "body": "Test Conversation Body 2"
+ "id": 2,
+ "subject": "A Conversation",
+ "workflow_state": "unread",
+ "last_message": "Hey guys!",
+ "last_message_at": "2018-01-01T00:00:00Z",
+ "last_authored_message": "Hey guys!",
+ "last_authored_message_at": "2018-01-01T00:00:00Z",
+ "message_count": 1,
+ "subscribed": true,
+ "private": true,
+ "starred": false,
+ "properties": ["last_author"],
+ "messages": [{
+ "id": 2,
+ "author_id": 1,
+ "created_at": "2018-01-01T00:00:00Z",
+ "generated": false,
+ "body": "Hey guys!",
+ "forwarded_messages": [],
+ "attachments": [],
+ "media_comment": null,
+ "participating_user_ids": [1, 3]
+ }],
+ "audience": [3],
+ "audience_contexts": {
+ "courses": {
+ "1": ["StudentEnrollment"]
+ },
+ "groups": {}
+ },
+ "participants": [
+ {
+ "id": 1,
+ "name": "John",
+ "common_courses": {},
+ "common_groups": {}
+ },
+ {
+ "id": 3,
+ "name": "Jack",
+ "common_courses": {
+ "1": ["StudentEnrollment"]
+ },
+ "common_groups": {}
+ }
+ ],
+ "visible": true,
+ "context_code": null
}
],
"status_code": 200
diff --git a/tests/test_canvas.py b/tests/test_canvas.py
index 3d75228..c3c59c1 100644
--- a/tests/test_canvas.py
+++ b/tests/test_canvas.py
@@ -331,14 +331,39 @@ class TestCanvas(unittest.TestCase):
def test_create_conversation(self, m):
register_uris({'conversation': ['create_conversation']}, m)
- recipients = ['1', '2']
- body = 'Test Conversation Body'
+ recipients = ['2']
+ body = 'Hello, World!'
- conversations = self.canvas.create_conversation(recipients=recipients, body=body)
- conversation_list = [conversation for conversation in conversations]
+ conversations = self.canvas.create_conversation(
+ recipients=recipients,
+ body=body
+ )
+ self.assertIsInstance(conversations, list)
+ self.assertEqual(len(conversations), 1)
+ self.assertIsInstance(conversations[0], Conversation)
+ self.assertTrue(hasattr(conversations[0], 'last_message'))
+ self.assertEqual(conversations[0].last_message, body)
- self.assertIsInstance(conversation_list[0], Conversation)
- self.assertEqual(len(conversation_list), 2)
+ def test_create_conversation_multiple_people(self, m):
+ register_uris({'conversation': ['create_conversation_multiple']}, m)
+
+ recipients = ['2', '3']
+ body = 'Hey guys!'
+
+ conversations = self.canvas.create_conversation(
+ recipients=recipients,
+ body=body
+ )
+ self.assertIsInstance(conversations, list)
+ self.assertEqual(len(conversations), 2)
+
+ self.assertIsInstance(conversations[0], Conversation)
+ self.assertTrue(hasattr(conversations[0], 'last_message'))
+ self.assertEqual(conversations[0].last_message, body)
+
+ self.assertIsInstance(conversations[1], Conversation)
+ self.assertTrue(hasattr(conversations[1], 'last_message'))
+ self.assertEqual(conversations[1].last_message, body)
# get_conversation()
def test_get_conversation(self, m):
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 1
} | 0.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"requests_mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/ucfopen/canvasapi.git@5644a6c89cc851216ec8114443d3af857ac6f70e#egg=canvasapi
certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup==1.2.2
execnet==2.1.1
idna==3.10
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
pytest-xdist==3.6.1
pytz==2025.2
requests==2.32.3
requests-mock==1.12.1
six==1.17.0
tomli==2.2.1
urllib3==2.3.0
| name: canvasapi
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-xdist==3.6.1
- pytz==2025.2
- requests==2.32.3
- requests-mock==1.12.1
- six==1.17.0
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/canvasapi
| [
"tests/test_canvas.py::TestCanvas::test_create_conversation",
"tests/test_canvas.py::TestCanvas::test_create_conversation_multiple_people"
]
| [
"tests/test_canvas.py::TestCanvas::test_init_deprecate_url_contains_version"
]
| [
"tests/test_canvas.py::TestCanvas::test_clear_course_nicknames",
"tests/test_canvas.py::TestCanvas::test_conversations_batch_update",
"tests/test_canvas.py::TestCanvas::test_conversations_batch_updated_fail_on_event",
"tests/test_canvas.py::TestCanvas::test_conversations_batch_updated_fail_on_ids",
"tests/test_canvas.py::TestCanvas::test_conversations_get_running_batches",
"tests/test_canvas.py::TestCanvas::test_conversations_mark_all_as_read",
"tests/test_canvas.py::TestCanvas::test_conversations_unread_count",
"tests/test_canvas.py::TestCanvas::test_create_account",
"tests/test_canvas.py::TestCanvas::test_create_appointment_group",
"tests/test_canvas.py::TestCanvas::test_create_appointment_group_fail_on_context_codes",
"tests/test_canvas.py::TestCanvas::test_create_appointment_group_fail_on_title",
"tests/test_canvas.py::TestCanvas::test_create_calendar_event",
"tests/test_canvas.py::TestCanvas::test_create_calendar_event_fail",
"tests/test_canvas.py::TestCanvas::test_create_group",
"tests/test_canvas.py::TestCanvas::test_get_account",
"tests/test_canvas.py::TestCanvas::test_get_account_fail",
"tests/test_canvas.py::TestCanvas::test_get_account_sis_id",
"tests/test_canvas.py::TestCanvas::test_get_accounts",
"tests/test_canvas.py::TestCanvas::test_get_activity_stream_summary",
"tests/test_canvas.py::TestCanvas::test_get_appointment_group",
"tests/test_canvas.py::TestCanvas::test_get_calendar_event",
"tests/test_canvas.py::TestCanvas::test_get_conversation",
"tests/test_canvas.py::TestCanvas::test_get_conversations",
"tests/test_canvas.py::TestCanvas::test_get_course",
"tests/test_canvas.py::TestCanvas::test_get_course_accounts",
"tests/test_canvas.py::TestCanvas::test_get_course_fail",
"tests/test_canvas.py::TestCanvas::test_get_course_nickname",
"tests/test_canvas.py::TestCanvas::test_get_course_nickname_fail",
"tests/test_canvas.py::TestCanvas::test_get_course_nicknames",
"tests/test_canvas.py::TestCanvas::test_get_course_non_unicode_char",
"tests/test_canvas.py::TestCanvas::test_get_course_sis_id",
"tests/test_canvas.py::TestCanvas::test_get_course_with_start_date",
"tests/test_canvas.py::TestCanvas::test_get_courses",
"tests/test_canvas.py::TestCanvas::test_get_file",
"tests/test_canvas.py::TestCanvas::test_get_group",
"tests/test_canvas.py::TestCanvas::test_get_group_category",
"tests/test_canvas.py::TestCanvas::test_get_group_sis_id",
"tests/test_canvas.py::TestCanvas::test_get_outcome",
"tests/test_canvas.py::TestCanvas::test_get_outcome_group",
"tests/test_canvas.py::TestCanvas::test_get_root_outcome_group",
"tests/test_canvas.py::TestCanvas::test_get_section",
"tests/test_canvas.py::TestCanvas::test_get_section_sis_id",
"tests/test_canvas.py::TestCanvas::test_get_todo_items",
"tests/test_canvas.py::TestCanvas::test_get_upcoming_events",
"tests/test_canvas.py::TestCanvas::test_get_user",
"tests/test_canvas.py::TestCanvas::test_get_user_by_id_type",
"tests/test_canvas.py::TestCanvas::test_get_user_fail",
"tests/test_canvas.py::TestCanvas::test_get_user_self",
"tests/test_canvas.py::TestCanvas::test_list_appointment_groups",
"tests/test_canvas.py::TestCanvas::test_list_calendar_events",
"tests/test_canvas.py::TestCanvas::test_list_group_participants",
"tests/test_canvas.py::TestCanvas::test_list_user_participants",
"tests/test_canvas.py::TestCanvas::test_reserve_time_slot",
"tests/test_canvas.py::TestCanvas::test_reserve_time_slot_by_participant_id",
"tests/test_canvas.py::TestCanvas::test_search_accounts",
"tests/test_canvas.py::TestCanvas::test_search_all_courses",
"tests/test_canvas.py::TestCanvas::test_search_recipients",
"tests/test_canvas.py::TestCanvas::test_set_course_nickname"
]
| []
| MIT License | 2,007 | [
"canvasapi/canvas.py"
]
| [
"canvasapi/canvas.py"
]
|
|
Azure__WALinuxAgent-988 | 30e638ddab04bd4ec473fe8369a86f64e717776e | 2018-01-04 19:04:14 | 6e9b985c1d7d564253a1c344bab01b45093103cd | diff --git a/azurelinuxagent/common/protocol/restapi.py b/azurelinuxagent/common/protocol/restapi.py
index 275cedb0..540ec5d9 100644
--- a/azurelinuxagent/common/protocol/restapi.py
+++ b/azurelinuxagent/common/protocol/restapi.py
@@ -321,9 +321,9 @@ class Protocol(DataContract):
def get_artifacts_profile(self):
raise NotImplementedError()
- def download_ext_handler_pkg(self, uri, headers=None):
+ def download_ext_handler_pkg(self, uri, headers=None, use_proxy=True):
try:
- resp = restutil.http_get(uri, use_proxy=True, headers=headers)
+ resp = restutil.http_get(uri, headers=headers, use_proxy=use_proxy)
if restutil.request_succeeded(resp):
return resp.read()
except Exception as e:
diff --git a/azurelinuxagent/common/protocol/wire.py b/azurelinuxagent/common/protocol/wire.py
index a92e0b89..2dc5297c 100644
--- a/azurelinuxagent/common/protocol/wire.py
+++ b/azurelinuxagent/common/protocol/wire.py
@@ -172,7 +172,7 @@ class WireProtocol(Protocol):
logger.warn("Download did not succeed, falling back to host plugin")
host = self.client.get_host_plugin()
uri, headers = host.get_artifact_request(uri, host.manifest_uri)
- package = super(WireProtocol, self).download_ext_handler_pkg(uri, headers=headers)
+ package = super(WireProtocol, self).download_ext_handler_pkg(uri, headers=headers, use_proxy=False)
return package
def report_provision_status(self, provision_status):
diff --git a/azurelinuxagent/ga/env.py b/azurelinuxagent/ga/env.py
index 0456cb06..26487818 100644
--- a/azurelinuxagent/ga/env.py
+++ b/azurelinuxagent/ga/env.py
@@ -86,7 +86,7 @@ class EnvHandler(object):
version=CURRENT_VERSION,
op=WALAEventOperation.Firewall,
is_success=success,
- log_event=True)
+ log_event=False)
timeout = conf.get_root_device_scsi_timeout()
if timeout is not None:
diff --git a/azurelinuxagent/ga/monitor.py b/azurelinuxagent/ga/monitor.py
index e3ef292f..9b90aa7b 100644
--- a/azurelinuxagent/ga/monitor.py
+++ b/azurelinuxagent/ga/monitor.py
@@ -203,7 +203,8 @@ class MonitorHandler(object):
version=CURRENT_VERSION,
op=WALAEventOperation.HeartBeat,
is_success=True,
- message=msg)
+ message=msg,
+ log_event=False)
counter += 1
@@ -222,7 +223,7 @@ class MonitorHandler(object):
version=CURRENT_VERSION,
op=WALAEventOperation.HttpErrors,
is_success=False,
- msg=msg)
+ message=msg)
try:
self.collect_and_send_events()
diff --git a/azurelinuxagent/ga/update.py b/azurelinuxagent/ga/update.py
index 26009299..80870cae 100644
--- a/azurelinuxagent/ga/update.py
+++ b/azurelinuxagent/ga/update.py
@@ -295,7 +295,7 @@ class UpdateHandler(object):
duration=elapsed_milliseconds(utc_start),
message="Incarnation {0}".format(
exthandlers_handler.last_etag),
- log_event=True)
+ log_event=False)
time.sleep(GOAL_STATE_INTERVAL)
@@ -704,13 +704,13 @@ class GuestAgent(object):
version = None
if path is not None:
m = AGENT_DIR_PATTERN.match(path)
- if m == None:
+ if m is None:
raise UpdateError(u"Illegal agent directory: {0}".format(path))
version = m.group(1)
elif self.pkg is not None:
version = pkg.version
- if version == None:
+ if version is None:
raise UpdateError(u"Illegal agent version: {0}".format(version))
self.version = FlexibleVersion(version)
@@ -727,6 +727,13 @@ class GuestAgent(object):
if isinstance(e, ResourceGoneError):
raise
+ # The agent was improperly blacklisting versions due to a timeout
+ # encountered while downloading a later version. Errors of type
+ # socket.error are IOError, so this should provide sufficient
+ # protection against a large class of I/O operation failures.
+ if isinstance(e, IOError):
+ raise
+
# Note the failure, blacklist the agent if the package downloaded
# - An exception with a downloaded package indicates the package
# is corrupt (e.g., missing the HandlerManifest.json file)
diff --git a/azurelinuxagent/pa/provision/cloudinit.py b/azurelinuxagent/pa/provision/cloudinit.py
index 22c3f9ca..fa47799a 100644
--- a/azurelinuxagent/pa/provision/cloudinit.py
+++ b/azurelinuxagent/pa/provision/cloudinit.py
@@ -64,7 +64,7 @@ class CloudInitProvisionHandler(ProvisionHandler):
logger.info("Finished provisioning")
self.report_ready(thumbprint)
- self.report_event("Provision succeed",
+ self.report_event("Provisioning with cloud-init succeeded",
is_success=True,
duration=elapsed_milliseconds(utc_start))
self.report_event(self.create_guest_state_telemetry_messsage(),
diff --git a/azurelinuxagent/pa/provision/default.py b/azurelinuxagent/pa/provision/default.py
index 5d6f1565..44e171b4 100644
--- a/azurelinuxagent/pa/provision/default.py
+++ b/azurelinuxagent/pa/provision/default.py
@@ -89,7 +89,7 @@ class ProvisionHandler(object):
self.write_provisioned()
- self.report_event("Provision succeeded",
+ self.report_event("Provisioning succeeded",
is_success=True,
duration=elapsed_milliseconds(utc_start))
diff --git a/azurelinuxagent/pa/provision/factory.py b/azurelinuxagent/pa/provision/factory.py
index d87765f3..9e88618f 100644
--- a/azurelinuxagent/pa/provision/factory.py
+++ b/azurelinuxagent/pa/provision/factory.py
@@ -16,9 +16,7 @@
#
import azurelinuxagent.common.conf as conf
-import azurelinuxagent.common.logger as logger
-from azurelinuxagent.common.utils.textutil import Version
from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \
DISTRO_FULL_NAME
| Socket timeout causes blacklist
Found in logs, we need to handle this case more gracefully:
```
2017/11/17 11:47:16.626326 WARNING Agent WALinuxAgent-2.2.18 failed with exception: The read operation timed out
2017/11/17 11:47:16.705491 WARNING Traceback (most recent call last):
File "bin/WALinuxAgent-2.2.18-py2.7.egg/azurelinuxagent/ga/update.py", line 271, in run
exthandlers_handler.run()
File "bin/WALinuxAgent-2.2.18-py2.7.egg/azurelinuxagent/ga/exthandlers.py", line 198, in run
self.handle_ext_handlers(etag)
File "bin/WALinuxAgent-2.2.18-py2.7.egg/azurelinuxagent/ga/exthandlers.py", line 285, in handle_ext_handlers
self.handle_ext_handler(ext_handler, etag)
File "bin/WALinuxAgent-2.2.18-py2.7.egg/azurelinuxagent/ga/exthandlers.py", line 291, in handle_ext_handler
ext_handler_i.decide_version()
File "bin/WALinuxAgent-2.2.18-py2.7.egg/azurelinuxagent/ga/exthandlers.py", line 432, in decide_version
pkg_list = self.protocol.get_ext_handler_pkgs(self.ext_handler)
File "bin/WALinuxAgent-2.2.18-py2.7.egg/azurelinuxagent/common/protocol/wire.py", line 148, in get_ext_handler_pkgs
man = self.client.get_ext_manifest(ext_handler, goal_state)
File "bin/WALinuxAgent-2.2.18-py2.7.egg/azurelinuxagent/common/protocol/wire.py", line 809, in get_ext_manifest
xml_text = self.fetch_manifest(ext_handler.versionUris)
File "bin/WALinuxAgent-2.2.18-py2.7.egg/azurelinuxagent/common/protocol/wire.py", line 606, in fetch_manifest
response = self.fetch(version.uri)
File "bin/WALinuxAgent-2.2.18-py2.7.egg/azurelinuxagent/common/protocol/wire.py", line 653, in fetch
return self.decode_config(resp.read())
File "/usr/lib/python3.4/http/client.py", line 541, in read
s = self._safe_read(self.length)
File "/usr/lib/python3.4/http/client.py", line 691, in _safe_read
chunk = self.fp.read(min(amt, MAXAMOUNT))
File "/usr/lib/python3.4/socket.py", line 374, in readinto
return self._sock.recv_into(b)
File "/usr/lib/python3.4/ssl.py", line 769, in recv_into
return self.read(nbytes, buffer)
File "/usr/lib/python3.4/ssl.py", line 641, in read
v = self._sslobj.read(len, buffer)
socket.timeout: The read operation timed out
2017/11/17 11:47:16.784583 WARNING Agent WALinuxAgent-2.2.18 launched with command 'python3 -u bin/WALinuxAgent-2.2.18-py2.7.egg -run-exthandlers' returned code: 1
2017/11/17 11:47:16.785549 WARNING Agent WALinuxAgent-2.2.18 is permanently blacklisted
```
This type of timeout should not be causing a blacklist. | Azure/WALinuxAgent | diff --git a/tests/ga/test_update.py b/tests/ga/test_update.py
index 88535f5b..a40278c7 100644
--- a/tests/ga/test_update.py
+++ b/tests/ga/test_update.py
@@ -387,6 +387,30 @@ class TestGuestAgent(UpdateTestCase):
self.assertTrue(agent.is_blacklisted)
self.assertEqual(agent.is_blacklisted, agent.error.is_blacklisted)
+ @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded")
+ @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded")
+ def test_resource_gone_error_not_blacklisted(self, mock_loaded, mock_downloaded):
+ try:
+ mock_downloaded.side_effect = ResourceGoneError()
+ agent = GuestAgent(path=self.agent_path)
+ self.assertFalse(agent.is_blacklisted)
+ except ResourceGoneError:
+ pass
+ except:
+ self.fail("Exception was not expected!")
+
+ @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded")
+ @patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded")
+ def test_ioerror_not_blacklisted(self, mock_loaded, mock_downloaded):
+ try:
+ mock_downloaded.side_effect = IOError()
+ agent = GuestAgent(path=self.agent_path)
+ self.assertFalse(agent.is_blacklisted)
+ except IOError:
+ pass
+ except:
+ self.fail("Exception was not expected!")
+
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded")
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded")
def test_is_downloaded(self, mock_loaded, mock_downloaded):
@@ -1550,6 +1574,7 @@ class ProtocolMock(object):
self.call_counts["update_goal_state"] += 1
self.goal_state_forced = self.goal_state_forced or forced
+
class ResponseMock(Mock):
def __init__(self, status=restutil.httpclient.OK, response=None, reason=None):
Mock.__init__(self)
@@ -1579,5 +1604,6 @@ class TimeMock(Mock):
self.next_time += self.time_increment
return current_time
+
if __name__ == '__main__':
unittest.main()
diff --git a/tests/pa/test_provision.py b/tests/pa/test_provision.py
index ab0a9102..2c2d2c9b 100644
--- a/tests/pa/test_provision.py
+++ b/tests/pa/test_provision.py
@@ -151,7 +151,7 @@ class TestProvision(AgentTestCase):
ph.run()
- call1 = call("Provision succeeded", duration=ANY, is_success=True)
+ call1 = call("Provisioning succeeded", duration=ANY, is_success=True)
call2 = call(ANY, is_success=True, operation=WALAEventOperation.GuestState)
ph.report_event.assert_has_calls([call1, call2])
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 8
} | 2.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pyasn1",
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.4",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
nose==1.3.7
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyasn1==0.5.1
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
-e git+https://github.com/Azure/WALinuxAgent.git@30e638ddab04bd4ec473fe8369a86f64e717776e#egg=WALinuxAgent
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: WALinuxAgent
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- nose==1.3.7
- pyasn1==0.5.1
prefix: /opt/conda/envs/WALinuxAgent
| [
"tests/ga/test_update.py::TestGuestAgent::test_ioerror_not_blacklisted",
"tests/pa/test_provision.py::TestProvision::test_provision_telemetry_success"
]
| []
| [
"tests/ga/test_update.py::TestGuestAgentError::test_clear",
"tests/ga/test_update.py::TestGuestAgentError::test_creation",
"tests/ga/test_update.py::TestGuestAgentError::test_mark_failure",
"tests/ga/test_update.py::TestGuestAgentError::test_mark_failure_permanent",
"tests/ga/test_update.py::TestGuestAgentError::test_save",
"tests/ga/test_update.py::TestGuestAgentError::test_str",
"tests/ga/test_update.py::TestGuestAgent::test_clear_error",
"tests/ga/test_update.py::TestGuestAgent::test_creation",
"tests/ga/test_update.py::TestGuestAgent::test_download",
"tests/ga/test_update.py::TestGuestAgent::test_download_fail",
"tests/ga/test_update.py::TestGuestAgent::test_download_fallback",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_download_skips_blacklisted",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded_download_fails",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded_load_manifest_fails",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded_unpack_fails",
"tests/ga/test_update.py::TestGuestAgent::test_is_available",
"tests/ga/test_update.py::TestGuestAgent::test_is_blacklisted",
"tests/ga/test_update.py::TestGuestAgent::test_is_downloaded",
"tests/ga/test_update.py::TestGuestAgent::test_load_error",
"tests/ga/test_update.py::TestGuestAgent::test_load_manifest",
"tests/ga/test_update.py::TestGuestAgent::test_load_manifest_is_empty",
"tests/ga/test_update.py::TestGuestAgent::test_load_manifest_is_malformed",
"tests/ga/test_update.py::TestGuestAgent::test_load_manifest_missing",
"tests/ga/test_update.py::TestGuestAgent::test_mark_failure",
"tests/ga/test_update.py::TestGuestAgent::test_resource_gone_error_not_blacklisted",
"tests/ga/test_update.py::TestGuestAgent::test_unpack",
"tests/ga/test_update.py::TestGuestAgent::test_unpack_fail",
"tests/ga/test_update.py::TestUpdate::test_creation",
"tests/ga/test_update.py::TestUpdate::test_emit_restart_event_emits_event_if_not_clean_start",
"tests/ga/test_update.py::TestUpdate::test_emit_restart_event_writes_sentinal_file",
"tests/ga/test_update.py::TestUpdate::test_ensure_no_orphans",
"tests/ga/test_update.py::TestUpdate::test_ensure_no_orphans_ignores_exceptions",
"tests/ga/test_update.py::TestUpdate::test_ensure_no_orphans_kills_after_interval",
"tests/ga/test_update.py::TestUpdate::test_ensure_no_orphans_skips_if_no_orphans",
"tests/ga/test_update.py::TestUpdate::test_ensure_partition_assigned",
"tests/ga/test_update.py::TestUpdate::test_ensure_readonly_leaves_unmodified",
"tests/ga/test_update.py::TestUpdate::test_ensure_readonly_sets_readonly",
"tests/ga/test_update.py::TestUpdate::test_evaluate_agent_health_ignores_installed_agent",
"tests/ga/test_update.py::TestUpdate::test_evaluate_agent_health_raises_exception_for_restarting_agent",
"tests/ga/test_update.py::TestUpdate::test_evaluate_agent_health_resets_with_new_agent",
"tests/ga/test_update.py::TestUpdate::test_evaluate_agent_health_will_not_raise_exception_for_long_restarts",
"tests/ga/test_update.py::TestUpdate::test_evaluate_agent_health_will_not_raise_exception_too_few_restarts",
"tests/ga/test_update.py::TestUpdate::test_filter_blacklisted_agents",
"tests/ga/test_update.py::TestUpdate::test_find_agents",
"tests/ga/test_update.py::TestUpdate::test_find_agents_does_reload",
"tests/ga/test_update.py::TestUpdate::test_find_agents_sorts",
"tests/ga/test_update.py::TestUpdate::test_get_host_plugin_returns_host_for_wireserver",
"tests/ga/test_update.py::TestUpdate::test_get_host_plugin_returns_none_otherwise",
"tests/ga/test_update.py::TestUpdate::test_get_latest_agent",
"tests/ga/test_update.py::TestUpdate::test_get_latest_agent_excluded",
"tests/ga/test_update.py::TestUpdate::test_get_latest_agent_no_updates",
"tests/ga/test_update.py::TestUpdate::test_get_latest_agent_skip_updates",
"tests/ga/test_update.py::TestUpdate::test_get_latest_agent_skips_unavailable",
"tests/ga/test_update.py::TestUpdate::test_get_pid_files",
"tests/ga/test_update.py::TestUpdate::test_get_pid_files_returns_previous",
"tests/ga/test_update.py::TestUpdate::test_is_clean_start_returns_false_for_exceptions",
"tests/ga/test_update.py::TestUpdate::test_is_clean_start_returns_false_when_sentinal_exists",
"tests/ga/test_update.py::TestUpdate::test_is_clean_start_returns_true_when_no_sentinal",
"tests/ga/test_update.py::TestUpdate::test_is_orphaned_returns_false_if_parent_exists",
"tests/ga/test_update.py::TestUpdate::test_is_orphaned_returns_true_if_parent_does_not_exist",
"tests/ga/test_update.py::TestUpdate::test_is_orphaned_returns_true_if_parent_is_init",
"tests/ga/test_update.py::TestUpdate::test_is_version_available",
"tests/ga/test_update.py::TestUpdate::test_is_version_available_accepts_current",
"tests/ga/test_update.py::TestUpdate::test_is_version_available_rejects",
"tests/ga/test_update.py::TestUpdate::test_is_version_available_rejects_by_default",
"tests/ga/test_update.py::TestUpdate::test_package_filter_for_agent_manifest",
"tests/ga/test_update.py::TestUpdate::test_purge_agents",
"tests/ga/test_update.py::TestUpdate::test_run",
"tests/ga/test_update.py::TestUpdate::test_run_clears_sentinal_on_successful_exit",
"tests/ga/test_update.py::TestUpdate::test_run_emits_restart_event",
"tests/ga/test_update.py::TestUpdate::test_run_keeps_running",
"tests/ga/test_update.py::TestUpdate::test_run_latest",
"tests/ga/test_update.py::TestUpdate::test_run_latest_captures_signals",
"tests/ga/test_update.py::TestUpdate::test_run_latest_creates_only_one_signal_handler",
"tests/ga/test_update.py::TestUpdate::test_run_latest_defaults_to_current",
"tests/ga/test_update.py::TestUpdate::test_run_latest_exception_blacklists",
"tests/ga/test_update.py::TestUpdate::test_run_latest_exception_does_not_blacklist_if_terminating",
"tests/ga/test_update.py::TestUpdate::test_run_latest_forwards_output",
"tests/ga/test_update.py::TestUpdate::test_run_latest_nonzero_code_marks_failures",
"tests/ga/test_update.py::TestUpdate::test_run_latest_passes_child_args",
"tests/ga/test_update.py::TestUpdate::test_run_latest_polling_stops_at_failure",
"tests/ga/test_update.py::TestUpdate::test_run_latest_polling_stops_at_success",
"tests/ga/test_update.py::TestUpdate::test_run_latest_polls_and_waits_for_success",
"tests/ga/test_update.py::TestUpdate::test_run_latest_polls_frequently_if_installed_is_latest",
"tests/ga/test_update.py::TestUpdate::test_run_latest_polls_moderately_if_installed_not_latest",
"tests/ga/test_update.py::TestUpdate::test_run_leaves_sentinal_on_unsuccessful_exit",
"tests/ga/test_update.py::TestUpdate::test_run_stops_if_orphaned",
"tests/ga/test_update.py::TestUpdate::test_run_stops_if_update_available",
"tests/ga/test_update.py::TestUpdate::test_set_agents_sets_agents",
"tests/ga/test_update.py::TestUpdate::test_set_agents_sorts_agents",
"tests/ga/test_update.py::TestUpdate::test_set_sentinal",
"tests/ga/test_update.py::TestUpdate::test_set_sentinal_writes_current_agent",
"tests/ga/test_update.py::TestUpdate::test_shutdown",
"tests/ga/test_update.py::TestUpdate::test_shutdown_ignores_exceptions",
"tests/ga/test_update.py::TestUpdate::test_shutdown_ignores_missing_sentinal_file",
"tests/ga/test_update.py::TestUpdate::test_update_available_returns_true_if_current_gets_blacklisted",
"tests/ga/test_update.py::TestUpdate::test_upgrade_available_handles_missing_family",
"tests/ga/test_update.py::TestUpdate::test_upgrade_available_includes_old_agents",
"tests/ga/test_update.py::TestUpdate::test_upgrade_available_purges_old_agents",
"tests/ga/test_update.py::TestUpdate::test_upgrade_available_returns_true_on_first_use",
"tests/ga/test_update.py::TestUpdate::test_upgrade_available_skips_if_too_frequent",
"tests/ga/test_update.py::TestUpdate::test_upgrade_available_skips_if_when_no_new_versions",
"tests/ga/test_update.py::TestUpdate::test_upgrade_available_skips_when_no_versions",
"tests/ga/test_update.py::TestUpdate::test_upgrade_available_skips_when_updates_are_disabled",
"tests/ga/test_update.py::TestUpdate::test_upgrade_available_sorts",
"tests/ga/test_update.py::TestUpdate::test_upgrade_available_will_refresh_goal_state",
"tests/ga/test_update.py::TestUpdate::test_write_pid_file",
"tests/ga/test_update.py::TestUpdate::test_write_pid_file_ignores_exceptions",
"tests/pa/test_provision.py::TestProvision::test_customdata",
"tests/pa/test_provision.py::TestProvision::test_is_provisioned_is_provisioned",
"tests/pa/test_provision.py::TestProvision::test_is_provisioned_not_deprovisioned",
"tests/pa/test_provision.py::TestProvision::test_is_provisioned_not_provisioned",
"tests/pa/test_provision.py::TestProvision::test_provision",
"tests/pa/test_provision.py::TestProvision::test_provision_telemetry_fail",
"tests/pa/test_provision.py::TestProvision::test_provisioning_is_skipped_when_not_enabled"
]
| []
| Apache License 2.0 | 2,008 | [
"azurelinuxagent/pa/provision/cloudinit.py",
"azurelinuxagent/ga/update.py",
"azurelinuxagent/ga/env.py",
"azurelinuxagent/common/protocol/restapi.py",
"azurelinuxagent/common/protocol/wire.py",
"azurelinuxagent/pa/provision/default.py",
"azurelinuxagent/ga/monitor.py",
"azurelinuxagent/pa/provision/factory.py"
]
| [
"azurelinuxagent/pa/provision/cloudinit.py",
"azurelinuxagent/ga/update.py",
"azurelinuxagent/ga/env.py",
"azurelinuxagent/common/protocol/restapi.py",
"azurelinuxagent/common/protocol/wire.py",
"azurelinuxagent/pa/provision/default.py",
"azurelinuxagent/ga/monitor.py",
"azurelinuxagent/pa/provision/factory.py"
]
|
|
google__mobly-380 | 7e5e62af4ab4537bf619f0ee403c05f004c5baf0 | 2018-01-04 19:45:31 | 7e5e62af4ab4537bf619f0ee403c05f004c5baf0 | dthkao: <img class="emoji" title=":lgtm:" alt=":lgtm:" align="absmiddle" src="https://reviewable.io/lgtm.png" height="20" width="61"/>
---
Review status: 0 of 2 files reviewed at latest revision, all discussions resolved, some commit checks failed.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/380#-:-L28eYMOEVk9ut_WEarD:bnfp4nl)*
<!-- Sent from Reviewable.io -->
| diff --git a/mobly/controllers/android_device.py b/mobly/controllers/android_device.py
index e4f05de..4eee55b 100644
--- a/mobly/controllers/android_device.py
+++ b/mobly/controllers/android_device.py
@@ -421,7 +421,7 @@ class AndroidDevice(object):
"""
def __init__(self, serial=''):
- self._serial = serial
+ self._serial = str(serial)
# logging.log_path only exists when this is used in an Mobly test run.
self._log_path_base = getattr(logging, 'log_path', '/tmp/logs')
self._log_path = os.path.join(self._log_path_base,
@@ -570,6 +570,7 @@ class AndroidDevice(object):
Raises:
DeviceError: tries to update serial when any service is running.
"""
+ new_serial = str(new_serial)
if self.has_active_service:
raise DeviceError(
self,
| adb call fails if numeric serial numbers are used
If users use devices whose serials are entirely numeric and do not wrap the serial with quotation mark, we get:
```
Traceback (most recent call last):
File ".../mobly/test_runner.py", line 420, in _register_controller
objects = create(controller_config)
File ".../mobly/controllers/android_device.py", line 87, in create
ads = get_instances_with_configs(configs)
File ".../mobly/controllers/android_device.py", line 257, in get_instances_with_configs
ad = AndroidDevice(serial)
File ".../mobly/controllers/android_device.py", line 402, in __init__
if not self.is_bootloader and self.is_rootable:
File ".../mobly/controllers/android_device.py", line 637, in is_rootable
build_type_output = self.adb.getprop('ro.build.type').lower()
File ".../mobly/controllers/android_device_lib/adb.py", line 175, in getprop
return self.shell('getprop %s' % prop_name).decode('utf-8').strip()
File ".../mobly/controllers/android_device_lib/adb.py", line 199, in adb_call
clean_name, args, shell=shell, timeout=timeout)
File ".../mobly/controllers/android_device_lib/adb.py", line 161, in _exec_adb_cmd
return self._exec_cmd(adb_cmd, shell=shell, timeout=timeout)
File ".../mobly/controllers/android_device_lib/adb.py", line 122, in _exec_cmd
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell)
File "/usr/lib/python2.7/subprocess.py", line 711, in __init__
errread, errwrite)
File "/usr/lib/python2.7/subprocess.py", line 1343, in _execute_child
raise child_exception
TypeError: execv() arg 2 must contain only strings
```
| google/mobly | diff --git a/tests/mobly/controllers/android_device_test.py b/tests/mobly/controllers/android_device_test.py
index e57db60..f9c6cf3 100755
--- a/tests/mobly/controllers/android_device_test.py
+++ b/tests/mobly/controllers/android_device_test.py
@@ -233,17 +233,17 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
def test_AndroidDevice_instantiation(self, MockFastboot, MockAdbProxy):
"""Verifies the AndroidDevice object's basic attributes are correctly
set after instantiation.
"""
mock_serial = 1
ad = android_device.AndroidDevice(serial=mock_serial)
- self.assertEqual(ad.serial, 1)
+ self.assertEqual(ad.serial, '1')
self.assertEqual(ad.model, 'fakemodel')
self.assertIsNone(ad._adb_logcat_process)
self.assertIsNone(ad.adb_logcat_file_path)
@@ -253,29 +253,29 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
def test_AndroidDevice_build_info(self, MockFastboot, MockAdbProxy):
"""Verifies the AndroidDevice object's basic attributes are correctly
set after instantiation.
"""
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
build_info = ad.build_info
self.assertEqual(build_info['build_id'], 'AB42')
self.assertEqual(build_info['build_type'], 'userdebug')
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
def test_AndroidDevice_device_info(self, MockFastboot, MockAdbProxy):
ad = android_device.AndroidDevice(serial=1)
device_info = ad.device_info
- self.assertEqual(device_info['serial'], 1)
+ self.assertEqual(device_info['serial'], '1')
self.assertEqual(device_info['model'], 'fakemodel')
self.assertEqual(device_info['build_info']['build_id'], 'AB42')
self.assertEqual(device_info['build_info']['build_type'], 'userdebug')
@@ -297,7 +297,7 @@ class AndroidDeviceTest(unittest.TestCase):
"""Verifies AndroidDevice.take_bug_report calls the correct adb command
and writes the bugreport file to the correct path.
"""
- mock_serial = 1
+ mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
ad.take_bug_report('test_something', 'sometime')
expected_path = os.path.join(
@@ -306,17 +306,17 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1, fail_br=True))
+ return_value=mock_android_device.MockAdbProxy('1', fail_br=True))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
def test_AndroidDevice_take_bug_report_fail(self, create_dir_mock,
FastbootProxy, MockAdbProxy):
"""Verifies AndroidDevice.take_bug_report writes out the correct message
when taking bugreport fails.
"""
- mock_serial = 1
+ mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
expected_msg = '.* Failed to take bugreport.'
with self.assertRaisesRegex(android_device.Error, expected_msg):
@@ -324,14 +324,14 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
def test_AndroidDevice_take_bug_report_with_destination(
self, create_dir_mock, FastbootProxy, MockAdbProxy):
- mock_serial = 1
+ mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
dest = tempfile.gettempdir()
ad.take_bug_report("test_something", "sometime", destination=dest)
@@ -341,17 +341,17 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy(
- 1, fail_br_before_N=True))
+ '1', fail_br_before_N=True))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
def test_AndroidDevice_take_bug_report_fallback(
self, create_dir_mock, FastbootProxy, MockAdbProxy):
"""Verifies AndroidDevice.take_bug_report falls back to traditional
bugreport on builds that do not have bugreportz.
"""
- mock_serial = 1
+ mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
ad.take_bug_report('test_something', 'sometime')
expected_path = os.path.join(
@@ -360,10 +360,10 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch(
'mobly.utils.start_standing_subprocess', return_value='process')
@@ -375,7 +375,7 @@ class AndroidDeviceTest(unittest.TestCase):
object, including various function calls and the expected behaviors of
the calls.
"""
- mock_serial = 1
+ mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
expected_msg = '.* No ongoing adb logcat collection found.'
# Expect error if stop is called before start.
@@ -406,10 +406,10 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch(
'mobly.utils.start_standing_subprocess', return_value='process')
@@ -421,7 +421,7 @@ class AndroidDeviceTest(unittest.TestCase):
object, including various function calls and the expected behaviors of
the calls.
"""
- mock_serial = 1
+ mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
ad.adb_logcat_param = '-b radio'
expected_msg = '.* No ongoing adb logcat collection found.'
@@ -442,17 +442,17 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch(
'mobly.utils.start_standing_subprocess', return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
def test_AndroidDevice_change_log_path(self, stop_proc_mock,
start_proc_mock, FastbootProxy,
MockAdbProxy):
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
ad.start_adb_logcat()
ad.stop_adb_logcat()
old_path = ad.log_path
@@ -463,17 +463,17 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch(
'mobly.utils.start_standing_subprocess', return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
def test_AndroidDevice_change_log_path_no_log_exists(
self, stop_proc_mock, start_proc_mock, FastbootProxy,
MockAdbProxy):
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
old_path = ad.log_path
new_log_path = tempfile.mkdtemp()
ad.log_path = new_log_path
@@ -482,10 +482,10 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch(
'mobly.utils.start_standing_subprocess', return_value='process')
@@ -493,7 +493,7 @@ class AndroidDeviceTest(unittest.TestCase):
def test_AndroidDevice_change_log_path_with_service(
self, stop_proc_mock, start_proc_mock, creat_dir_mock,
FastbootProxy, MockAdbProxy):
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
ad.start_adb_logcat()
new_log_path = tempfile.mkdtemp()
expected_msg = '.* Cannot change `log_path` when there is service running.'
@@ -502,10 +502,10 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch(
'mobly.utils.start_standing_subprocess', return_value='process')
@@ -513,7 +513,7 @@ class AndroidDeviceTest(unittest.TestCase):
def test_AndroidDevice_change_log_path_with_existing_file(
self, stop_proc_mock, start_proc_mock, creat_dir_mock,
FastbootProxy, MockAdbProxy):
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
new_log_path = tempfile.mkdtemp()
with open(os.path.join(new_log_path, 'file.txt'), 'w') as f:
f.write('hahah.')
@@ -523,10 +523,10 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch(
'mobly.utils.start_standing_subprocess', return_value='process')
@@ -534,19 +534,19 @@ class AndroidDeviceTest(unittest.TestCase):
def test_AndroidDevice_update_serial(self, stop_proc_mock, start_proc_mock,
creat_dir_mock, FastbootProxy,
MockAdbProxy):
- ad = android_device.AndroidDevice(serial=1)
- ad.update_serial(2)
- self.assertEqual(ad.serial, 2)
+ ad = android_device.AndroidDevice(serial='1')
+ ad.update_serial('2')
+ self.assertEqual(ad.serial, '2')
self.assertEqual(ad.debug_tag, ad.serial)
self.assertEqual(ad.adb.serial, ad.serial)
self.assertEqual(ad.fastboot.serial, ad.serial)
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch(
'mobly.utils.start_standing_subprocess', return_value='process')
@@ -554,18 +554,18 @@ class AndroidDeviceTest(unittest.TestCase):
def test_AndroidDevice_update_serial_with_service_running(
self, stop_proc_mock, start_proc_mock, creat_dir_mock,
FastbootProxy, MockAdbProxy):
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
ad.start_adb_logcat()
expected_msg = '.* Cannot change device serial number when there is service running.'
with self.assertRaisesRegex(android_device.Error, expected_msg):
- ad.update_serial(2)
+ ad.update_serial('2')
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch(
'mobly.utils.start_standing_subprocess', return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
@@ -579,7 +579,7 @@ class AndroidDeviceTest(unittest.TestCase):
file, locates the correct adb log lines within the given time range,
and writes the lines to the correct output file.
"""
- mock_serial = 1
+ mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
# Direct the log path of the ad to a temp dir to avoid racing.
ad._log_path_base = self.tmp_dir
@@ -606,32 +606,32 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.snippet_client.SnippetClient')
@mock.patch('mobly.utils.get_available_host_port')
def test_AndroidDevice_load_snippet(self, MockGetPort, MockSnippetClient,
MockFastboot, MockAdbProxy):
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
ad.load_snippet('snippet', MOCK_SNIPPET_PACKAGE_NAME)
self.assertTrue(hasattr(ad, 'snippet'))
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.snippet_client.SnippetClient',
return_value=MockSnippetClient)
@mock.patch('mobly.utils.get_available_host_port')
def test_AndroidDevice_load_snippet_dup_package(
self, MockGetPort, MockSnippetClient, MockFastboot, MockAdbProxy):
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
ad.load_snippet('snippet', MOCK_SNIPPET_PACKAGE_NAME)
expected_msg = ('Snippet package "%s" has already been loaded under '
'name "snippet".') % MOCK_SNIPPET_PACKAGE_NAME
@@ -640,17 +640,17 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.snippet_client.SnippetClient',
return_value=MockSnippetClient)
@mock.patch('mobly.utils.get_available_host_port')
def test_AndroidDevice_load_snippet_dup_snippet_name(
self, MockGetPort, MockSnippetClient, MockFastboot, MockAdbProxy):
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
ad.load_snippet('snippet', MOCK_SNIPPET_PACKAGE_NAME)
expected_msg = ('Attribute "%s" is already registered with package '
'"%s", it cannot be used again.') % (
@@ -660,16 +660,16 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.snippet_client.SnippetClient')
@mock.patch('mobly.utils.get_available_host_port')
def test_AndroidDevice_load_snippet_dup_attribute_name(
self, MockGetPort, MockSnippetClient, MockFastboot, MockAdbProxy):
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
expected_msg = ('Attribute "%s" already exists, please use a different'
' name') % 'adb'
with self.assertRaisesRegex(android_device.Error, expected_msg):
@@ -677,10 +677,10 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.snippet_client.SnippetClient')
@mock.patch('mobly.utils.get_available_host_port')
@@ -697,7 +697,7 @@ class AndroidDeviceTest(unittest.TestCase):
side_effect=expected_e)
MockSnippetClient.stop_app = mock.Mock(
side_effect=Exception('stop failed.'))
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
try:
ad.load_snippet('snippet', MOCK_SNIPPET_PACKAGE_NAME)
except Exception as e:
@@ -705,30 +705,30 @@ class AndroidDeviceTest(unittest.TestCase):
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.snippet_client.SnippetClient')
@mock.patch('mobly.utils.get_available_host_port')
def test_AndroidDevice_snippet_cleanup(
self, MockGetPort, MockSnippetClient, MockFastboot, MockAdbProxy):
- ad = android_device.AndroidDevice(serial=1)
+ ad = android_device.AndroidDevice(serial='1')
ad.load_snippet('snippet', MOCK_SNIPPET_PACKAGE_NAME)
ad.stop_services()
self.assertFalse(hasattr(ad, 'snippet'))
@mock.patch(
'mobly.controllers.android_device_lib.adb.AdbProxy',
- return_value=mock_android_device.MockAdbProxy(1))
+ return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch(
'mobly.controllers.android_device_lib.fastboot.FastbootProxy',
- return_value=mock_android_device.MockFastbootProxy(1))
+ return_value=mock_android_device.MockFastbootProxy('1'))
def test_AndroidDevice_debug_tag(self, MockFastboot, MockAdbProxy):
- mock_serial = 1
+ mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
- self.assertEqual(ad.debug_tag, 1)
+ self.assertEqual(ad.debug_tag, '1')
try:
raise android_device.DeviceError(ad, 'Something')
except android_device.DeviceError as e:
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
future==1.0.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/google/mobly.git@7e5e62af4ab4537bf619f0ee403c05f004c5baf0#egg=mobly
mock==1.0.1
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
portpicker==1.6.0
psutil==7.0.0
pytest @ file:///croot/pytest_1738938843180/work
pytz==2025.2
PyYAML==6.0.2
timeout-decorator==0.5.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: mobly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- future==1.0.0
- mock==1.0.1
- portpicker==1.6.0
- psutil==7.0.0
- pytz==2025.2
- pyyaml==6.0.2
- timeout-decorator==0.5.0
prefix: /opt/conda/envs/mobly
| [
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_device_info",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_instantiation"
]
| []
| [
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_build_info",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_cat_adb_log",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_change_log_path",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_change_log_path_no_log_exists",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_change_log_path_with_existing_file",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_change_log_path_with_service",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_debug_tag",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_dup_attribute_name",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_dup_package",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_dup_snippet_name",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_load_snippet_start_app_fails",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_snippet_cleanup",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report_fail",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report_fallback",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_bug_report_with_destination",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_logcat",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_take_logcat_with_user_param",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_update_serial",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_AndroidDevice_update_serial_with_service_running",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_dict_list",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_empty_config",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_no_valid_config",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_not_list_config",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_pickup_all",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_string_list",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_create_with_usb_id",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_no_match",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_success_with_serial",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_success_with_serial_and_extra_field",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_device_too_many_matches",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_devices_no_match",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_get_devices_success_with_extra_field",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_start_services_on_ads",
"tests/mobly/controllers/android_device_test.py::AndroidDeviceTest::test_start_services_on_ads_skip_logcat"
]
| []
| Apache License 2.0 | 2,009 | [
"mobly/controllers/android_device.py"
]
| [
"mobly/controllers/android_device.py"
]
|
nylas__nylas-python-98 | 068a626706188eea00b606608ab06c9b9ea307fb | 2018-01-04 22:28:49 | 068a626706188eea00b606608ab06c9b9ea307fb | diff --git a/nylas/client/restful_models.py b/nylas/client/restful_models.py
index d8d1c98..7f31653 100644
--- a/nylas/client/restful_models.py
+++ b/nylas/client/restful_models.py
@@ -387,18 +387,27 @@ class File(NylasAPIObject):
collection_name = 'files'
def save(self): # pylint: disable=arguments-differ
- if hasattr(self, 'stream') and self.stream is not None:
- data = {self.filename: self.stream}
- elif hasattr(self, 'data') and self.data is not None:
- data = {self.filename: StringIO(self.data)}
- else:
+ stream = getattr(self, "stream", None)
+ if not stream:
+ data = getattr(self, "data", None)
+ if data:
+ stream = StringIO(data)
+
+ if not stream:
message = (
"File object not properly formatted, "
"must provide either a stream or data."
)
raise FileUploadError(message=message)
- new_obj = self.api._create_resources(File, data)
+ file_info = (
+ self.filename,
+ stream,
+ self.content_type,
+ {}, # upload headers
+ )
+
+ new_obj = self.api._create_resources(File, {"file": file_info})
new_obj = new_obj[0]
for attr in self.attrs:
if hasattr(new_obj, attr):
| File.filename doesn't apply when type is stream:
Setting a filename on an attachment only works when using `.data`. When `.stream` is used, it falls back to the name of the file on the uploading system.
```
myfile = self.nylas_client.files.create()
myfile.content_type = 'application/pdf'
myfile.filename = attachment_name
with open(attachment_path, 'rb') as f:
myfile.stream = f
myfile.save()
myfile.filename = attachment_name
# Create a new draft
draft = self.nylas_client.drafts.create()
if type(recipients) == str:
recipients = [recipients]
draft.to = [{'email': recipient} for recipient in recipients]
draft.subject = subject
draft.body = message
draft.attach(myfile)
``` | nylas/nylas-python | diff --git a/tests/conftest.py b/tests/conftest.py
index f8fb9ad..4e8eb72 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,6 +1,7 @@
import re
import json
import copy
+import cgi
import random
import string
import pytest
@@ -674,24 +675,59 @@ def mock_draft_sent_response(mocked_responses, api_url):
@pytest.fixture
-def mock_files(mocked_responses, api_url):
- body = [{
- "content_type": "text/plain",
- "filename": "a.txt",
- "id": "3qfe4k3siosfjtjpfdnon8zbn",
- "account_id": "6aakaxzi4j5gn6f7kbb9e0fxs",
- "object": "file",
- "size": 762878
- }]
+def mock_files(mocked_responses, api_url, account_id):
+ files_content = {
+ "3qfe4k3siosfjtjpfdnon8zbn": b"Hello, World!",
+ }
+ files_metadata = {
+ "3qfe4k3siosfjtjpfdnon8zbn": {
+ "id": "3qfe4k3siosfjtjpfdnon8zbn",
+ "content_type": "text/plain",
+ "filename": "hello.txt",
+ "account_id": account_id,
+ "object": "file",
+ "size": len(files_content["3qfe4k3siosfjtjpfdnon8zbn"])
+ }
+ }
mocked_responses.add(
+ responses.GET,
+ api_url + '/files',
+ body=json.dumps(list(files_metadata.values())),
+ )
+ for file_id in files_content:
+ mocked_responses.add(
+ responses.POST,
+ "{base}/files/{file_id}".format(base=api_url, file_id=file_id),
+ body=json.dumps(files_metadata[file_id]),
+ )
+ mocked_responses.add(
+ responses.GET,
+ "{base}/files/{file_id}/download".format(base=api_url, file_id=file_id),
+ body=files_content[file_id],
+ )
+
+ def create_callback(request):
+ uploaded_lines = request.body.decode('utf8').splitlines()
+ content_disposition = uploaded_lines[1]
+ _, params = cgi.parse_header(content_disposition)
+ filename = params.get("filename", None)
+ content = "".join(uploaded_lines[3:-1])
+ size = len(content.encode('utf8'))
+
+ body = [{
+ "id": generate_id(),
+ "content_type": "text/plain",
+ "filename": filename,
+ "account_id": account_id,
+ "object": "file",
+ "size": size,
+ }]
+ return (200, {}, json.dumps(body))
+
+ mocked_responses.add_callback(
responses.POST,
api_url + '/files/',
- body=json.dumps(body),
- )
- mocked_responses.add(
- responses.GET,
- api_url + '/files/3qfe4k3siosfjtjpfdnon8zbn/download',
- body='test body',
+ callback=create_callback,
)
diff --git a/tests/test_files.py b/tests/test_files.py
index 046c274..8da27fa 100644
--- a/tests/test_files.py
+++ b/tests/test_files.py
@@ -1,19 +1,65 @@
+import cgi
+from io import BytesIO
import pytest
from nylas.client.errors import FileUploadError
@pytest.mark.usefixtures("mock_files")
-def test_file_upload(api_client):
+def test_file_upload_data(api_client, mocked_responses):
+ data = "Hello, World!"
+
myfile = api_client.files.create()
- myfile.filename = 'test.txt'
- myfile.data = "Hello World."
+ myfile.filename = 'hello.txt'
+ myfile.data = data
+
+ assert not mocked_responses.calls
myfile.save()
+ assert len(mocked_responses.calls) == 1
+
+ assert myfile.filename == 'hello.txt'
+ assert myfile.size == 13
- assert myfile.filename == 'a.txt'
- assert myfile.size == 762878
+ upload_body = mocked_responses.calls[0].request.body
+ upload_lines = upload_body.decode("utf8").splitlines()
+
+ content_disposition = upload_lines[1]
+ _, params = cgi.parse_header(content_disposition)
+ assert params["filename"] == "hello.txt"
+ assert "Hello, World!" in upload_lines
+
+
[email protected]("mock_files")
+def test_file_upload_stream(api_client, mocked_responses):
+ stream = BytesIO(b"Hello, World!")
+ stream.name = "wacky.txt"
+ myfile = api_client.files.create()
+ myfile.filename = 'hello.txt'
+ myfile.stream = stream
+ assert not mocked_responses.calls
+ myfile.save()
+ assert len(mocked_responses.calls) == 1
+
+ assert myfile.filename == 'hello.txt'
+ assert myfile.size == 13
+
+ upload_body = mocked_responses.calls[0].request.body
+ upload_lines = upload_body.decode("utf8").splitlines()
+
+ content_disposition = upload_lines[1]
+ _, params = cgi.parse_header(content_disposition)
+ assert params["filename"] == "hello.txt"
+ assert "Hello, World!" in upload_lines
+
+
[email protected]("mock_files")
+def test_file_download(api_client, mocked_responses):
+ assert not mocked_responses.calls
+ myfile = api_client.files.first()
+ assert len(mocked_responses.calls) == 1
data = myfile.download().decode()
- assert data == 'test body'
+ assert len(mocked_responses.calls) == 2
+ assert data == "Hello, World!"
def test_file_invalid_upload(api_client):
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 3.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-pylint",
"responses"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"examples/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astroid==2.11.7
attrs==22.2.0
bump2version==1.0.1
bumpversion==0.6.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
cookies==2.2.1
coverage==6.2
cryptography==40.0.2
dill==0.3.4
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
isort==5.10.1
lazy-object-proxy==1.7.1
mccabe==0.7.0
ndg-httpsclient==0.5.1
-e git+https://github.com/nylas/nylas-python.git@068a626706188eea00b606608ab06c9b9ea307fb#egg=nylas
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
py==1.11.0
pyasn1==0.5.1
pycparser==2.21
pylint==2.13.9
pyOpenSSL==23.2.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-pylint==0.18.0
requests==2.27.1
responses==0.6.1
six==1.17.0
toml==0.10.2
tomli==1.2.3
typed-ast==1.5.5
typing_extensions==4.1.1
urllib3==1.26.20
URLObject==2.4.3
wrapt==1.16.0
zipp==3.6.0
| name: nylas-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astroid==2.11.7
- attrs==22.2.0
- bump2version==1.0.1
- bumpversion==0.6.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- cookies==2.2.1
- coverage==6.2
- cryptography==40.0.2
- dill==0.3.4
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isort==5.10.1
- lazy-object-proxy==1.7.1
- mccabe==0.7.0
- ndg-httpsclient==0.5.1
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pyasn1==0.5.1
- pycparser==2.21
- pylint==2.13.9
- pyopenssl==23.2.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-pylint==0.18.0
- requests==2.27.1
- responses==0.6.1
- six==1.17.0
- toml==0.10.2
- tomli==1.2.3
- typed-ast==1.5.5
- typing-extensions==4.1.1
- urllib3==1.26.20
- urlobject==2.4.3
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/nylas-python
| [
"tests/test_files.py::test_file_upload_stream"
]
| []
| [
"tests/test_files.py::test_file_upload_data",
"tests/test_files.py::test_file_download",
"tests/test_files.py::test_file_invalid_upload",
"tests/test_files.py::test_file_upload_errors"
]
| []
| MIT License | 2,010 | [
"nylas/client/restful_models.py"
]
| [
"nylas/client/restful_models.py"
]
|
|
tornadoweb__tornado-2242 | 810c341d2488c11813995dd2beadab7ba00ff44d | 2018-01-05 03:45:47 | 03f13800e854a6fc9e6efa2168e694d9599348bd | diff --git a/.travis.yml b/.travis.yml
index 4b2ac2bc..e3b33c04 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,7 +1,6 @@
# https://travis-ci.org/tornadoweb/tornado
language: python
python:
- - 2.7.8
- 2.7
- pypy
- 3.3
@@ -9,7 +8,7 @@ python:
- 3.5
- 3.6
- nightly
- - pypy3
+ - pypy3.5-5.8.0
install:
- if [[ $TRAVIS_PYTHON_VERSION == 2* ]]; then travis_retry pip install futures mock monotonic trollius; fi
diff --git a/docs/releases.rst b/docs/releases.rst
index 3a9ef777..128c7603 100644
--- a/docs/releases.rst
+++ b/docs/releases.rst
@@ -4,6 +4,7 @@ Release notes
.. toctree::
:maxdepth: 2
+ releases/v4.5.3
releases/v4.5.2
releases/v4.5.1
releases/v4.5.0
diff --git a/docs/releases/v4.5.3.rst b/docs/releases/v4.5.3.rst
new file mode 100644
index 00000000..b1102459
--- /dev/null
+++ b/docs/releases/v4.5.3.rst
@@ -0,0 +1,49 @@
+What's new in Tornado 4.5.2
+===========================
+
+Aug 27, 2017
+------------
+
+`tornado.curl_httpclient`
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Improved debug logging on Python 3.
+
+`tornado.httpserver`
+~~~~~~~~~~~~~~~~~~~~
+
+- ``Content-Length`` and ``Transfer-Encoding`` headers are no longer
+ sent with 1xx or 204 responses (this was already true of 304
+ responses).
+- Reading chunked requests no longer leaves the connection in a broken
+ state.
+
+`tornado.iostream`
+~~~~~~~~~~~~~~~~~~
+
+- Writing a `memoryview` can no longer result in "BufferError:
+ Existing exports of data: object cannot be re-sized".
+
+`tornado.options`
+~~~~~~~~~~~~~~~~~
+
+- Duplicate option names are now detected properly whether they use
+ hyphens or underscores.
+
+`tornado.testing`
+~~~~~~~~~~~~~~~~~
+
+- `.AsyncHTTPTestCase.fetch` now uses ``127.0.0.1`` instead of
+ ``localhost``, improving compatibility with systems that have
+ partially-working ipv6 stacks.
+
+`tornado.web`
+~~~~~~~~~~~~~
+
+- It is no longer allowed to send a body with 1xx or 204 responses.
+
+`tornado.websocket`
+~~~~~~~~~~~~~~~~~~~
+
+- Requests with invalid websocket headers now get a response with
+ status code 400 instead of a closed connection.
diff --git a/setup.py b/setup.py
index 66d846be..a1feea67 100644
--- a/setup.py
+++ b/setup.py
@@ -103,7 +103,7 @@ http://api.mongodb.org/python/current/installation.html#osx
kwargs = {}
-version = "4.5.2"
+version = "4.5.3"
with open('README.rst') as f:
kwargs['long_description'] = f.read()
diff --git a/tornado/__init__.py b/tornado/__init__.py
index 3eaa57b8..fa71bf61 100644
--- a/tornado/__init__.py
+++ b/tornado/__init__.py
@@ -25,5 +25,5 @@ from __future__ import absolute_import, division, print_function
# is zero for an official release, positive for a development branch,
# or negative for a release candidate or beta (after the base version
# number has been incremented)
-version = "4.5.2"
-version_info = (4, 5, 2, 0)
+version = "4.5.3"
+version_info = (4, 5, 3, 0)
diff --git a/tornado/curl_httpclient.py b/tornado/curl_httpclient.py
index eef4a17a..8558d65c 100644
--- a/tornado/curl_httpclient.py
+++ b/tornado/curl_httpclient.py
@@ -493,6 +493,7 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
def _curl_debug(self, debug_type, debug_msg):
debug_types = ('I', '<', '>', '<', '>')
+ debug_msg = native_str(debug_msg)
if debug_type == 0:
curl_log.debug('%s', debug_msg.strip())
elif debug_type in (1, 2):
diff --git a/tornado/http1connection.py b/tornado/http1connection.py
index 53744ece..32bed6c9 100644
--- a/tornado/http1connection.py
+++ b/tornado/http1connection.py
@@ -349,10 +349,11 @@ class HTTP1Connection(httputil.HTTPConnection):
# self._request_start_line.version or
# start_line.version?
self._request_start_line.version == 'HTTP/1.1' and
- # 304 responses have no body (not even a zero-length body), and so
- # should not have either Content-Length or Transfer-Encoding.
- # headers.
+ # 1xx, 204 and 304 responses have no body (not even a zero-length
+ # body), and so should not have either Content-Length or
+ # Transfer-Encoding headers.
start_line.code not in (204, 304) and
+ (start_line.code < 100 or start_line.code >= 200) and
# No need to chunk the output if a Content-Length is specified.
'Content-Length' not in headers and
# Applications are discouraged from touching Transfer-Encoding,
@@ -592,6 +593,9 @@ class HTTP1Connection(httputil.HTTPConnection):
chunk_len = yield self.stream.read_until(b"\r\n", max_bytes=64)
chunk_len = int(chunk_len.strip(), 16)
if chunk_len == 0:
+ crlf = yield self.stream.read_bytes(2)
+ if crlf != b'\r\n':
+ raise httputil.HTTPInputError("improperly terminated chunked request")
return
total_size += chunk_len
if total_size > self._max_body_size:
diff --git a/tornado/iostream.py b/tornado/iostream.py
index a1619c49..639ed508 100644
--- a/tornado/iostream.py
+++ b/tornado/iostream.py
@@ -1061,7 +1061,12 @@ class IOStream(BaseIOStream):
return chunk
def write_to_fd(self, data):
- return self.socket.send(data)
+ try:
+ return self.socket.send(data)
+ finally:
+ # Avoid keeping to data, which can be a memoryview.
+ # See https://github.com/tornadoweb/tornado/pull/2008
+ del data
def connect(self, address, callback=None, server_hostname=None):
"""Connects the socket to a remote address without blocking.
@@ -1471,6 +1476,10 @@ class SSLIOStream(IOStream):
# simply return 0 bytes written.
return 0
raise
+ finally:
+ # Avoid keeping to data, which can be a memoryview.
+ # See https://github.com/tornadoweb/tornado/pull/2008
+ del data
def read_from_fd(self):
if self._ssl_accepting:
@@ -1528,7 +1537,12 @@ class PipeIOStream(BaseIOStream):
os.close(self.fd)
def write_to_fd(self, data):
- return os.write(self.fd, data)
+ try:
+ return os.write(self.fd, data)
+ finally:
+ # Avoid keeping to data, which can be a memoryview.
+ # See https://github.com/tornadoweb/tornado/pull/2008
+ del data
def read_from_fd(self):
try:
diff --git a/tornado/options.py b/tornado/options.py
index 0a72cc65..707fbd35 100644
--- a/tornado/options.py
+++ b/tornado/options.py
@@ -223,9 +223,10 @@ class OptionParser(object):
override options set earlier on the command line, but can be overridden
by later flags.
"""
- if name in self._options:
+ normalized = self._normalize_name(name)
+ if normalized in self._options:
raise Error("Option %r already defined in %s" %
- (name, self._options[name].file_name))
+ (normalized, self._options[normalized].file_name))
frame = sys._getframe(0)
options_file = frame.f_code.co_filename
@@ -247,7 +248,6 @@ class OptionParser(object):
group_name = group
else:
group_name = file_name
- normalized = self._normalize_name(name)
option = _Option(name, file_name=file_name,
default=default, type=type, help=help,
metavar=metavar, multiple=multiple,
diff --git a/tornado/web.py b/tornado/web.py
index d79889fa..e8d102b5 100644
--- a/tornado/web.py
+++ b/tornado/web.py
@@ -974,7 +974,8 @@ class RequestHandler(object):
if self.check_etag_header():
self._write_buffer = []
self.set_status(304)
- if self._status_code in (204, 304):
+ if (self._status_code in (204, 304) or
+ (self._status_code >= 100 and self._status_code < 200)):
assert not self._write_buffer, "Cannot send body with %s" % self._status_code
self._clear_headers_for_304()
elif "Content-Length" not in self._headers:
diff --git a/tornado/websocket.py b/tornado/websocket.py
index 69437ee4..0e9d339f 100644
--- a/tornado/websocket.py
+++ b/tornado/websocket.py
@@ -616,6 +616,14 @@ class WebSocketProtocol13(WebSocketProtocol):
def accept_connection(self):
try:
self._handle_websocket_headers()
+ except ValueError:
+ self.handler.set_status(400)
+ log_msg = "Missing/Invalid WebSocket headers"
+ self.handler.finish(log_msg)
+ gen_log.debug(log_msg)
+ return
+
+ try:
self._accept_connection()
except ValueError:
gen_log.debug("Malformed WebSocket request received",
| release: 4.5.3
The following PRs have been nominated for inclusion in a 4.5.3 release:
- #2225
- #2020
- #2236 (fixes issue introduced by #2225)
- #1949 (fixes tests for tornado_http2) | tornadoweb/tornado | diff --git a/tornado/test/httpserver_test.py b/tornado/test/httpserver_test.py
index 11cb7231..59eb6fd1 100644
--- a/tornado/test/httpserver_test.py
+++ b/tornado/test/httpserver_test.py
@@ -786,9 +786,12 @@ class KeepAliveTest(AsyncHTTPTestCase):
def test_keepalive_chunked(self):
self.http_version = b'HTTP/1.0'
self.connect()
- self.stream.write(b'POST / HTTP/1.0\r\nConnection: keep-alive\r\n'
+ self.stream.write(b'POST / HTTP/1.0\r\n'
+ b'Connection: keep-alive\r\n'
b'Transfer-Encoding: chunked\r\n'
- b'\r\n0\r\n')
+ b'\r\n'
+ b'0\r\n'
+ b'\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
diff --git a/tornado/test/iostream_test.py b/tornado/test/iostream_test.py
index 91bc7bf6..56fffe60 100644
--- a/tornado/test/iostream_test.py
+++ b/tornado/test/iostream_test.py
@@ -9,7 +9,7 @@ from tornado.netutil import ssl_wrap_socket
from tornado.stack_context import NullContext
from tornado.tcpserver import TCPServer
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, bind_unused_port, ExpectLog, gen_test
-from tornado.test.util import unittest, skipIfNonUnix, refusing_port
+from tornado.test.util import unittest, skipIfNonUnix, refusing_port, skipPypy3V58
from tornado.web import RequestHandler, Application
import errno
import logging
@@ -539,6 +539,7 @@ class TestIOStreamMixin(object):
client.close()
@skipIfNonUnix
+ @skipPypy3V58
def test_inline_read_error(self):
# An error on an inline read is raised without logging (on the
# assumption that it will eventually be noticed or logged further
@@ -557,6 +558,7 @@ class TestIOStreamMixin(object):
server.close()
client.close()
+ @skipPypy3V58
def test_async_read_error_logging(self):
# Socket errors on asynchronous reads should be logged (but only
# once).
@@ -993,7 +995,7 @@ class TestIOStreamStartTLS(AsyncTestCase):
server_future = self.server_start_tls(_server_ssl_options())
client_future = self.client_start_tls(
ssl.create_default_context(),
- server_hostname=b'127.0.0.1')
+ server_hostname='127.0.0.1')
with ExpectLog(gen_log, "SSL Error"):
with self.assertRaises(ssl.SSLError):
# The client fails to connect with an SSL error.
diff --git a/tornado/test/options_test.py b/tornado/test/options_test.py
index bafeea6f..1a0ac8fb 100644
--- a/tornado/test/options_test.py
+++ b/tornado/test/options_test.py
@@ -7,7 +7,7 @@ import sys
from tornado.options import OptionParser, Error
from tornado.util import basestring_type, PY3
-from tornado.test.util import unittest
+from tornado.test.util import unittest, subTest
if PY3:
from io import StringIO
@@ -232,6 +232,24 @@ class OptionsTest(unittest.TestCase):
self.assertRegexpMatches(str(cm.exception),
'Option.*foo.*already defined')
+ def test_error_redefine_underscore(self):
+ # Ensure that the dash/underscore normalization doesn't
+ # interfere with the redefinition error.
+ tests = [
+ ('foo-bar', 'foo-bar'),
+ ('foo_bar', 'foo_bar'),
+ ('foo-bar', 'foo_bar'),
+ ('foo_bar', 'foo-bar'),
+ ]
+ for a, b in tests:
+ with subTest(self, a=a, b=b):
+ options = OptionParser()
+ options.define(a)
+ with self.assertRaises(Error) as cm:
+ options.define(b)
+ self.assertRegexpMatches(str(cm.exception),
+ 'Option.*foo.bar.*already defined')
+
def test_dash_underscore_cli(self):
# Dashes and underscores should be interchangeable.
for defined_name in ['foo-bar', 'foo_bar']:
diff --git a/tornado/test/simple_httpclient_test.py b/tornado/test/simple_httpclient_test.py
index 02d57c5f..0e75e530 100644
--- a/tornado/test/simple_httpclient_test.py
+++ b/tornado/test/simple_httpclient_test.py
@@ -272,16 +272,9 @@ class SimpleHTTPClientTestMixin(object):
@skipIfNoIPv6
def test_ipv6(self):
- try:
- [sock] = bind_sockets(None, '::1', family=socket.AF_INET6)
- port = sock.getsockname()[1]
- self.http_server.add_socket(sock)
- except socket.gaierror as e:
- if e.args[0] == socket.EAI_ADDRFAMILY:
- # python supports ipv6, but it's not configured on the network
- # interface, so skip this test.
- return
- raise
+ [sock] = bind_sockets(None, '::1', family=socket.AF_INET6)
+ port = sock.getsockname()[1]
+ self.http_server.add_socket(sock)
url = '%s://[::1]:%d/hello' % (self.get_protocol(), port)
# ipv6 is currently enabled by default but can be disabled
@@ -327,7 +320,7 @@ class SimpleHTTPClientTestMixin(object):
self.assertNotIn("Content-Length", response.headers)
def test_host_header(self):
- host_re = re.compile(b"^localhost:[0-9]+$")
+ host_re = re.compile(b"^127.0.0.1:[0-9]+$")
response = self.fetch("/host_echo")
self.assertTrue(host_re.match(response.body))
diff --git a/tornado/test/util.py b/tornado/test/util.py
index 6c032da6..90a9c7b8 100644
--- a/tornado/test/util.py
+++ b/tornado/test/util.py
@@ -1,5 +1,6 @@
from __future__ import absolute_import, division, print_function
+import contextlib
import os
import platform
import socket
@@ -34,14 +35,39 @@ skipOnAppEngine = unittest.skipIf('APPENGINE_RUNTIME' in os.environ,
skipIfNoNetwork = unittest.skipIf('NO_NETWORK' in os.environ,
'network access disabled')
-skipIfNoIPv6 = unittest.skipIf(not socket.has_ipv6, 'ipv6 support not present')
-
-
skipBefore33 = unittest.skipIf(sys.version_info < (3, 3), 'PEP 380 (yield from) not available')
skipBefore35 = unittest.skipIf(sys.version_info < (3, 5), 'PEP 492 (async/await) not available')
skipNotCPython = unittest.skipIf(platform.python_implementation() != 'CPython',
'Not CPython implementation')
+# Used for tests affected by
+# https://bitbucket.org/pypy/pypy/issues/2616/incomplete-error-handling-in
+# TODO: remove this after pypy3 5.8 is obsolete.
+skipPypy3V58 = unittest.skipIf(platform.python_implementation() == 'PyPy' and
+ sys.version_info > (3,) and
+ sys.pypy_version_info < (5, 9),
+ 'pypy3 5.8 has buggy ssl module')
+
+
+def _detect_ipv6():
+ if not socket.has_ipv6:
+ # socket.has_ipv6 check reports whether ipv6 was present at compile
+ # time. It's usually true even when ipv6 doesn't work for other reasons.
+ return False
+ sock = None
+ try:
+ sock = socket.socket(socket.AF_INET6)
+ sock.bind(('::1', 0))
+ except socket.error:
+ return False
+ finally:
+ if sock is not None:
+ sock.close()
+ return True
+
+
+skipIfNoIPv6 = unittest.skipIf(not _detect_ipv6(), 'ipv6 support not present')
+
def refusing_port():
"""Returns a local port number that will refuse all connections.
@@ -94,3 +120,15 @@ def is_coverage_running():
except AttributeError:
return False
return mod.startswith('coverage')
+
+
+def subTest(test, *args, **kwargs):
+ """Compatibility shim for unittest.TestCase.subTest.
+
+ Usage: ``with tornado.test.util.subTest(self, x=x):``
+ """
+ try:
+ subTest = test.subTest # py34+
+ except AttributeError:
+ subTest = contextlib.contextmanager(lambda *a, **kw: (yield))
+ return subTest(*args, **kwargs)
diff --git a/tornado/test/web_test.py b/tornado/test/web_test.py
index d79ea52c..013c2ac2 100644
--- a/tornado/test/web_test.py
+++ b/tornado/test/web_test.py
@@ -356,7 +356,7 @@ class AuthRedirectTest(WebTestCase):
response = self.wait()
self.assertEqual(response.code, 302)
self.assertTrue(re.match(
- 'http://example.com/login\?next=http%3A%2F%2Flocalhost%3A[0-9]+%2Fabsolute',
+ 'http://example.com/login\?next=http%3A%2F%2F127.0.0.1%3A[0-9]+%2Fabsolute',
response.headers['Location']), response.headers['Location'])
@@ -2134,7 +2134,7 @@ class StreamingRequestBodyTest(WebTestCase):
stream.write(b"4\r\nqwer\r\n")
data = yield self.data
self.assertEquals(data, b"qwer")
- stream.write(b"0\r\n")
+ stream.write(b"0\r\n\r\n")
yield self.finished
data = yield gen.Task(stream.read_until_close)
# This would ideally use an HTTP1Connection to read the response.
diff --git a/tornado/test/websocket_test.py b/tornado/test/websocket_test.py
index d47a74e6..95a5ecd4 100644
--- a/tornado/test/websocket_test.py
+++ b/tornado/test/websocket_test.py
@@ -189,6 +189,13 @@ class WebSocketTest(WebSocketBaseTestCase):
response = self.fetch('/echo')
self.assertEqual(response.code, 400)
+ def test_missing_websocket_key(self):
+ response = self.fetch('/echo',
+ headers={'Connection': 'Upgrade',
+ 'Upgrade': 'WebSocket',
+ 'Sec-WebSocket-Version': '13'})
+ self.assertEqual(response.code, 400)
+
def test_bad_websocket_version(self):
response = self.fetch('/echo',
headers={'Connection': 'Upgrade',
diff --git a/tornado/testing.py b/tornado/testing.py
index 74d04b60..82a3b937 100644
--- a/tornado/testing.py
+++ b/tornado/testing.py
@@ -423,7 +423,7 @@ class AsyncHTTPTestCase(AsyncTestCase):
def get_url(self, path):
"""Returns an absolute url for the given path on the test server."""
- return '%s://localhost:%s%s' % (self.get_protocol(),
+ return '%s://127.0.0.1:%s%s' % (self.get_protocol(),
self.get_http_port(), path)
def tearDown(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_issue_reference",
"has_added_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 10
} | 4.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc python3-dev"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
execnet==1.9.0
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
tomli==1.2.3
-e git+https://github.com/tornadoweb/tornado.git@810c341d2488c11813995dd2beadab7ba00ff44d#egg=tornado
typing_extensions==4.1.1
zipp==3.6.0
| name: tornado
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- execnet==1.9.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/tornado
| [
"tornado/test/options_test.py::OptionsTest::test_error_redefine_underscore",
"tornado/test/websocket_test.py::WebSocketTest::test_missing_websocket_key"
]
| [
"tornado/test/httpserver_test.py::HTTPServerRawTest::test_invalid_content_length",
"tornado/test/httpserver_test.py::HTTPServerRawTest::test_malformed_first_line",
"tornado/test/httpserver_test.py::HTTPServerRawTest::test_malformed_headers",
"tornado/test/httpserver_test.py::UnixSocketTest::test_unix_socket_bad_request",
"tornado/test/httpserver_test.py::BodyLimitsTest::test_body_size_override_reset",
"tornado/test/httpserver_test.py::BodyLimitsTest::test_large_body_buffered",
"tornado/test/httpserver_test.py::BodyLimitsTest::test_large_body_buffered_chunked",
"tornado/test/httpserver_test.py::BodyLimitsTest::test_large_body_streaming",
"tornado/test/httpserver_test.py::BodyLimitsTest::test_large_body_streaming_chunked",
"tornado/test/httpserver_test.py::BodyLimitsTest::test_timeout",
"tornado/test/iostream_test.py::TestIOStreamWebMixin::test_connection_closed",
"tornado/test/iostream_test.py::TestIOStreamWebMixin::test_read_until_close",
"tornado/test/iostream_test.py::TestIOStreamWebMixin::test_read_zero_bytes",
"tornado/test/iostream_test.py::TestIOStreamWebMixin::test_write_while_connecting",
"tornado/test/iostream_test.py::TestIOStreamWebMixin::test_future_interface",
"tornado/test/iostream_test.py::TestIOStreamWebMixin::test_future_close_while_reading",
"tornado/test/iostream_test.py::TestIOStreamWebMixin::test_future_read_until_close",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_streaming_callback_with_data_in_buffer",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_write_zero_bytes",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_connection_refused",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_gaierror",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_read_callback_error",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_streaming_callback",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_streaming_until_close",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_streaming_until_close_future",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_delayed_close_callback",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_future_delayed_close_callback",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_close_buffered_data",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_read_until_close_after_close",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_read_until_close_with_error",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_streaming_read_until_close_after_close",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_large_read_until",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_close_callback_with_pending_read",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_inline_read_error",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_async_read_error_logging",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_future_close_callback",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_write_memoryview",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_read_bytes_partial",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_read_until_max_bytes",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_read_until_max_bytes_inline",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_read_until_max_bytes_ignores_extra",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_read_until_regex_max_bytes",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_read_until_regex_max_bytes_inline",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_read_until_regex_max_bytes_ignores_extra",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_small_reads_from_large_buffer",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_small_read_untils_from_large_buffer",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_flow_control",
"tornado/test/iostream_test.py::TestIOStreamMixin::test_future_write",
"tornado/test/iostream_test.py::TestIOStream::test_read_until_max_bytes",
"tornado/test/iostream_test.py::TestIOStream::test_read_until_max_bytes_ignores_extra",
"tornado/test/iostream_test.py::TestIOStream::test_read_until_max_bytes_inline",
"tornado/test/iostream_test.py::TestIOStream::test_read_until_regex_max_bytes",
"tornado/test/iostream_test.py::TestIOStream::test_read_until_regex_max_bytes_ignores_extra",
"tornado/test/iostream_test.py::TestIOStream::test_read_until_regex_max_bytes_inline",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_inline_read_error",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_read_until_close_after_close",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_read_until_max_bytes",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_read_until_max_bytes_ignores_extra",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_read_until_max_bytes_inline",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_read_until_regex_max_bytes",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_read_until_regex_max_bytes_ignores_extra",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_read_until_regex_max_bytes_inline",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_streaming_read_until_close_after_close",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_write_zero_bytes",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_inline_read_error",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_read_until_close_after_close",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_read_until_max_bytes",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_read_until_max_bytes_ignores_extra",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_read_until_max_bytes_inline",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_read_until_regex_max_bytes",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_read_until_regex_max_bytes_ignores_extra",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_read_until_regex_max_bytes_inline",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_streaming_read_until_close_after_close",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_write_zero_bytes",
"tornado/test/simple_httpclient_test.py::MaxHeaderSizeTest::test_large_headers",
"tornado/test/simple_httpclient_test.py::MaxBodySizeTest::test_large_body",
"tornado/test/simple_httpclient_test.py::ChunkedWithContentLengthTest::test_chunked_with_content_length"
]
| [
"tornado/test/httpserver_test.py::SSLv23Test::test_error_logging",
"tornado/test/httpserver_test.py::SSLv23Test::test_large_post",
"tornado/test/httpserver_test.py::SSLv23Test::test_non_ssl_request",
"tornado/test/httpserver_test.py::SSLv23Test::test_ssl",
"tornado/test/httpserver_test.py::SSLv3Test::test_error_logging",
"tornado/test/httpserver_test.py::SSLv3Test::test_large_post",
"tornado/test/httpserver_test.py::SSLv3Test::test_non_ssl_request",
"tornado/test/httpserver_test.py::SSLv3Test::test_ssl",
"tornado/test/httpserver_test.py::TLSv1Test::test_error_logging",
"tornado/test/httpserver_test.py::TLSv1Test::test_large_post",
"tornado/test/httpserver_test.py::TLSv1Test::test_non_ssl_request",
"tornado/test/httpserver_test.py::TLSv1Test::test_ssl",
"tornado/test/httpserver_test.py::SSLContextTest::test_error_logging",
"tornado/test/httpserver_test.py::SSLContextTest::test_large_post",
"tornado/test/httpserver_test.py::SSLContextTest::test_non_ssl_request",
"tornado/test/httpserver_test.py::SSLContextTest::test_ssl",
"tornado/test/httpserver_test.py::BadSSLOptionsTest::test_missing_arguments",
"tornado/test/httpserver_test.py::BadSSLOptionsTest::test_missing_key",
"tornado/test/httpserver_test.py::HTTPConnectionTest::test_100_continue",
"tornado/test/httpserver_test.py::HTTPConnectionTest::test_multipart_form",
"tornado/test/httpserver_test.py::HTTPConnectionTest::test_newlines",
"tornado/test/httpserver_test.py::HTTPServerTest::test_double_slash",
"tornado/test/httpserver_test.py::HTTPServerTest::test_empty_post_parameters",
"tornado/test/httpserver_test.py::HTTPServerTest::test_empty_query_string",
"tornado/test/httpserver_test.py::HTTPServerTest::test_malformed_body",
"tornado/test/httpserver_test.py::HTTPServerTest::test_query_string_encoding",
"tornado/test/httpserver_test.py::HTTPServerTest::test_types",
"tornado/test/httpserver_test.py::HTTPServerRawTest::test_chunked_request_body",
"tornado/test/httpserver_test.py::HTTPServerRawTest::test_chunked_request_uppercase",
"tornado/test/httpserver_test.py::HTTPServerRawTest::test_empty_request",
"tornado/test/httpserver_test.py::XHeaderTest::test_ip_headers",
"tornado/test/httpserver_test.py::XHeaderTest::test_scheme_headers",
"tornado/test/httpserver_test.py::XHeaderTest::test_trusted_downstream",
"tornado/test/httpserver_test.py::SSLXHeaderTest::test_request_without_xprotocol",
"tornado/test/httpserver_test.py::ManualProtocolTest::test_manual_protocol",
"tornado/test/httpserver_test.py::UnixSocketTest::test_unix_socket",
"tornado/test/httpserver_test.py::KeepAliveTest::test_cancel_during_download",
"tornado/test/httpserver_test.py::KeepAliveTest::test_finish_while_closed",
"tornado/test/httpserver_test.py::KeepAliveTest::test_http10",
"tornado/test/httpserver_test.py::KeepAliveTest::test_http10_keepalive",
"tornado/test/httpserver_test.py::KeepAliveTest::test_http10_keepalive_extra_crlf",
"tornado/test/httpserver_test.py::KeepAliveTest::test_keepalive_chunked",
"tornado/test/httpserver_test.py::KeepAliveTest::test_pipelined_cancel",
"tornado/test/httpserver_test.py::KeepAliveTest::test_pipelined_requests",
"tornado/test/httpserver_test.py::KeepAliveTest::test_request_close",
"tornado/test/httpserver_test.py::KeepAliveTest::test_two_requests",
"tornado/test/httpserver_test.py::GzipTest::test_gzip",
"tornado/test/httpserver_test.py::GzipTest::test_uncompressed",
"tornado/test/httpserver_test.py::GzipUnsupportedTest::test_gzip_unsupported",
"tornado/test/httpserver_test.py::GzipUnsupportedTest::test_uncompressed",
"tornado/test/httpserver_test.py::StreamingChunkSizeTest::test_chunked_body",
"tornado/test/httpserver_test.py::StreamingChunkSizeTest::test_chunked_compressed",
"tornado/test/httpserver_test.py::StreamingChunkSizeTest::test_compressed_body",
"tornado/test/httpserver_test.py::StreamingChunkSizeTest::test_regular_body",
"tornado/test/httpserver_test.py::MaxHeaderSizeTest::test_large_headers",
"tornado/test/httpserver_test.py::MaxHeaderSizeTest::test_small_headers",
"tornado/test/httpserver_test.py::IdleTimeoutTest::test_idle_after_use",
"tornado/test/httpserver_test.py::IdleTimeoutTest::test_unused_connection",
"tornado/test/httpserver_test.py::BodyLimitsTest::test_large_body_streaming_chunked_override",
"tornado/test/httpserver_test.py::BodyLimitsTest::test_large_body_streaming_override",
"tornado/test/httpserver_test.py::BodyLimitsTest::test_small_body",
"tornado/test/httpserver_test.py::LegacyInterfaceTest::test_legacy_interface",
"tornado/test/iostream_test.py::TestIOStreamWebHTTP::test_connection_closed",
"tornado/test/iostream_test.py::TestIOStreamWebHTTP::test_future_close_while_reading",
"tornado/test/iostream_test.py::TestIOStreamWebHTTP::test_future_interface",
"tornado/test/iostream_test.py::TestIOStreamWebHTTP::test_future_read_until_close",
"tornado/test/iostream_test.py::TestIOStreamWebHTTP::test_read_until_close",
"tornado/test/iostream_test.py::TestIOStreamWebHTTP::test_read_zero_bytes",
"tornado/test/iostream_test.py::TestIOStreamWebHTTP::test_write_while_connecting",
"tornado/test/iostream_test.py::TestIOStreamWebHTTPS::test_connection_closed",
"tornado/test/iostream_test.py::TestIOStreamWebHTTPS::test_future_close_while_reading",
"tornado/test/iostream_test.py::TestIOStreamWebHTTPS::test_future_interface",
"tornado/test/iostream_test.py::TestIOStreamWebHTTPS::test_future_read_until_close",
"tornado/test/iostream_test.py::TestIOStreamWebHTTPS::test_read_until_close",
"tornado/test/iostream_test.py::TestIOStreamWebHTTPS::test_read_zero_bytes",
"tornado/test/iostream_test.py::TestIOStreamWebHTTPS::test_write_while_connecting",
"tornado/test/iostream_test.py::TestIOStream::test_async_read_error_logging",
"tornado/test/iostream_test.py::TestIOStream::test_close_buffered_data",
"tornado/test/iostream_test.py::TestIOStream::test_close_callback_with_pending_read",
"tornado/test/iostream_test.py::TestIOStream::test_connection_refused",
"tornado/test/iostream_test.py::TestIOStream::test_delayed_close_callback",
"tornado/test/iostream_test.py::TestIOStream::test_flow_control",
"tornado/test/iostream_test.py::TestIOStream::test_future_close_callback",
"tornado/test/iostream_test.py::TestIOStream::test_future_delayed_close_callback",
"tornado/test/iostream_test.py::TestIOStream::test_future_write",
"tornado/test/iostream_test.py::TestIOStream::test_gaierror",
"tornado/test/iostream_test.py::TestIOStream::test_inline_read_error",
"tornado/test/iostream_test.py::TestIOStream::test_large_read_until",
"tornado/test/iostream_test.py::TestIOStream::test_read_bytes_partial",
"tornado/test/iostream_test.py::TestIOStream::test_read_callback_error",
"tornado/test/iostream_test.py::TestIOStream::test_read_until_close_after_close",
"tornado/test/iostream_test.py::TestIOStream::test_read_until_close_with_error",
"tornado/test/iostream_test.py::TestIOStream::test_small_read_untils_from_large_buffer",
"tornado/test/iostream_test.py::TestIOStream::test_small_reads_from_large_buffer",
"tornado/test/iostream_test.py::TestIOStream::test_streaming_callback",
"tornado/test/iostream_test.py::TestIOStream::test_streaming_callback_with_data_in_buffer",
"tornado/test/iostream_test.py::TestIOStream::test_streaming_read_until_close_after_close",
"tornado/test/iostream_test.py::TestIOStream::test_streaming_until_close",
"tornado/test/iostream_test.py::TestIOStream::test_streaming_until_close_future",
"tornado/test/iostream_test.py::TestIOStream::test_write_memoryview",
"tornado/test/iostream_test.py::TestIOStream::test_write_zero_bytes",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_async_read_error_logging",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_close_buffered_data",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_close_callback_with_pending_read",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_connection_refused",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_delayed_close_callback",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_flow_control",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_future_close_callback",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_future_delayed_close_callback",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_future_write",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_gaierror",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_large_read_until",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_read_bytes_partial",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_read_callback_error",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_read_until_close_with_error",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_small_read_untils_from_large_buffer",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_small_reads_from_large_buffer",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_streaming_callback",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_streaming_callback_with_data_in_buffer",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_streaming_until_close",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_streaming_until_close_future",
"tornado/test/iostream_test.py::TestIOStreamSSL::test_write_memoryview",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_async_read_error_logging",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_close_buffered_data",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_close_callback_with_pending_read",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_connection_refused",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_delayed_close_callback",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_flow_control",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_future_close_callback",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_future_delayed_close_callback",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_future_write",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_gaierror",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_large_read_until",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_read_bytes_partial",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_read_callback_error",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_read_until_close_with_error",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_small_read_untils_from_large_buffer",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_small_reads_from_large_buffer",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_streaming_callback",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_streaming_callback_with_data_in_buffer",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_streaming_until_close",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_streaming_until_close_future",
"tornado/test/iostream_test.py::TestIOStreamSSLContext::test_write_memoryview",
"tornado/test/iostream_test.py::TestIOStreamStartTLS::test_check_hostname",
"tornado/test/iostream_test.py::TestIOStreamStartTLS::test_handshake_fail",
"tornado/test/iostream_test.py::TestIOStreamStartTLS::test_start_tls_smtp",
"tornado/test/iostream_test.py::WaitForHandshakeTest::test_wait_for_handshake_already_connected",
"tornado/test/iostream_test.py::WaitForHandshakeTest::test_wait_for_handshake_already_waiting_error",
"tornado/test/iostream_test.py::WaitForHandshakeTest::test_wait_for_handshake_callback",
"tornado/test/iostream_test.py::WaitForHandshakeTest::test_wait_for_handshake_future",
"tornado/test/iostream_test.py::TestPipeIOStream::test_pipe_iostream",
"tornado/test/iostream_test.py::TestPipeIOStream::test_pipe_iostream_big_write",
"tornado/test/options_test.py::OptionsTest::test_as_dict",
"tornado/test/options_test.py::OptionsTest::test_dash_underscore_cli",
"tornado/test/options_test.py::OptionsTest::test_dash_underscore_file",
"tornado/test/options_test.py::OptionsTest::test_dash_underscore_introspection",
"tornado/test/options_test.py::OptionsTest::test_error_redefine",
"tornado/test/options_test.py::OptionsTest::test_getitem",
"tornado/test/options_test.py::OptionsTest::test_group_dict",
"tornado/test/options_test.py::OptionsTest::test_help",
"tornado/test/options_test.py::OptionsTest::test_items",
"tornado/test/options_test.py::OptionsTest::test_iter",
"tornado/test/options_test.py::OptionsTest::test_mock_patch",
"tornado/test/options_test.py::OptionsTest::test_multiple_int",
"tornado/test/options_test.py::OptionsTest::test_multiple_string",
"tornado/test/options_test.py::OptionsTest::test_parse_callbacks",
"tornado/test/options_test.py::OptionsTest::test_parse_command_line",
"tornado/test/options_test.py::OptionsTest::test_parse_config_file",
"tornado/test/options_test.py::OptionsTest::test_setattr",
"tornado/test/options_test.py::OptionsTest::test_setattr_type_check",
"tornado/test/options_test.py::OptionsTest::test_setattr_with_callback",
"tornado/test/options_test.py::OptionsTest::test_setitem",
"tornado/test/options_test.py::OptionsTest::test_subcommand",
"tornado/test/options_test.py::OptionsTest::test_types",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_304_with_content_length",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_all_methods",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_basic_auth",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_basic_auth_explicit_mode",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_body_encoding",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_body_sanity_checks",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_chunked",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_chunked_close",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_configure_defaults",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_credentials_in_url",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_final_callback_stack_context",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_follow_redirect",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_future_http_error",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_future_http_error_no_raise",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_future_interface",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_header_callback",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_header_callback_stack_context",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_header_types",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_hello_world",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_multi_line_headers",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_non_ascii_header",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_patch_receives_payload",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_post",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_put_307",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_reuse_request_from_response",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_streaming_callback",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_streaming_stack_context",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_types",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_unsupported_auth_mode",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_100_continue",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_100_continue_early_response",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_async_body_producer_chunked",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_async_body_producer_content_length",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_connect_timeout",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_connection_limit",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_connection_refused",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_gzip",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_head_request",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_header_reuse",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_host_header",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_ipv6",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_max_redirects",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_native_body_producer_chunked",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_native_body_producer_content_length",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_no_content",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_no_content_length",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_options_request",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_queue_timeout",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_redirect_connection_limit",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_request_timeout",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_see_other_redirect",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_singleton",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_streaming_follow_redirects",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_sync_body_producer_chunked",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_sync_body_producer_content_length",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_100_continue",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_100_continue_early_response",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_async_body_producer_chunked",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_async_body_producer_content_length",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_connect_timeout",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_connection_limit",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_connection_refused",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_error_logging",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_gzip",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_head_request",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_header_reuse",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_host_header",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_ipv6",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_max_redirects",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_native_body_producer_chunked",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_native_body_producer_content_length",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_no_content",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_no_content_length",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_options_request",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_queue_timeout",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_redirect_connection_limit",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_request_timeout",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_see_other_redirect",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_singleton",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_ssl_context",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_ssl_context_handshake_fail",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_ssl_options",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_ssl_options_handshake_fail",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_streaming_follow_redirects",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_sync_body_producer_chunked",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_sync_body_producer_content_length",
"tornado/test/simple_httpclient_test.py::CreateAsyncHTTPClientTestCase::test_max_clients",
"tornado/test/simple_httpclient_test.py::HTTP100ContinueTestCase::test_100_continue",
"tornado/test/simple_httpclient_test.py::HTTP204NoContentTestCase::test_204_no_content",
"tornado/test/simple_httpclient_test.py::HostnameMappingTestCase::test_hostname_mapping",
"tornado/test/simple_httpclient_test.py::HostnameMappingTestCase::test_port_mapping",
"tornado/test/simple_httpclient_test.py::ResolveTimeoutTestCase::test_resolve_timeout",
"tornado/test/simple_httpclient_test.py::MaxHeaderSizeTest::test_small_headers",
"tornado/test/simple_httpclient_test.py::MaxBodySizeTest::test_small_body",
"tornado/test/simple_httpclient_test.py::MaxBufferSizeTest::test_large_body",
"tornado/test/web_test.py::SecureCookieV1Test::test_arbitrary_bytes",
"tornado/test/web_test.py::SecureCookieV1Test::test_cookie_tampering_future_timestamp",
"tornado/test/web_test.py::SecureCookieV1Test::test_round_trip",
"tornado/test/web_test.py::SecureCookieV2Test::test_key_version_increment_version",
"tornado/test/web_test.py::SecureCookieV2Test::test_key_version_invalidate_version",
"tornado/test/web_test.py::SecureCookieV2Test::test_key_version_roundtrip",
"tornado/test/web_test.py::SecureCookieV2Test::test_key_version_roundtrip_differing_version",
"tornado/test/web_test.py::SecureCookieV2Test::test_round_trip",
"tornado/test/web_test.py::CookieTest::test_cookie_special_char",
"tornado/test/web_test.py::CookieTest::test_get_cookie",
"tornado/test/web_test.py::CookieTest::test_set_cookie",
"tornado/test/web_test.py::CookieTest::test_set_cookie_domain",
"tornado/test/web_test.py::CookieTest::test_set_cookie_expires_days",
"tornado/test/web_test.py::CookieTest::test_set_cookie_false_flags",
"tornado/test/web_test.py::CookieTest::test_set_cookie_max_age",
"tornado/test/web_test.py::CookieTest::test_set_cookie_overwrite",
"tornado/test/web_test.py::AuthRedirectTest::test_absolute_auth_redirect",
"tornado/test/web_test.py::AuthRedirectTest::test_relative_auth_redirect",
"tornado/test/web_test.py::ConnectionCloseTest::test_connection_close",
"tornado/test/web_test.py::RequestEncodingTest::test_error",
"tornado/test/web_test.py::RequestEncodingTest::test_group_encoding",
"tornado/test/web_test.py::RequestEncodingTest::test_group_question_mark",
"tornado/test/web_test.py::RequestEncodingTest::test_slashes",
"tornado/test/web_test.py::WSGISafeWebTest::test_decode_argument",
"tornado/test/web_test.py::WSGISafeWebTest::test_decode_argument_invalid_unicode",
"tornado/test/web_test.py::WSGISafeWebTest::test_decode_argument_plus",
"tornado/test/web_test.py::WSGISafeWebTest::test_get_argument",
"tornado/test/web_test.py::WSGISafeWebTest::test_get_body_arguments",
"tornado/test/web_test.py::WSGISafeWebTest::test_get_query_arguments",
"tornado/test/web_test.py::WSGISafeWebTest::test_header_injection",
"tornado/test/web_test.py::WSGISafeWebTest::test_multi_header",
"tornado/test/web_test.py::WSGISafeWebTest::test_no_gzip",
"tornado/test/web_test.py::WSGISafeWebTest::test_optional_path",
"tornado/test/web_test.py::WSGISafeWebTest::test_redirect",
"tornado/test/web_test.py::WSGISafeWebTest::test_reverse_url",
"tornado/test/web_test.py::WSGISafeWebTest::test_types",
"tornado/test/web_test.py::WSGISafeWebTest::test_uimodule_resources",
"tornado/test/web_test.py::WSGISafeWebTest::test_uimodule_unescaped",
"tornado/test/web_test.py::WSGISafeWebTest::test_web_redirect",
"tornado/test/web_test.py::WSGISafeWebTest::test_web_redirect_double_slash",
"tornado/test/web_test.py::NonWSGIWebTests::test_empty_flush",
"tornado/test/web_test.py::NonWSGIWebTests::test_flow_control",
"tornado/test/web_test.py::ErrorResponseTest::test_default",
"tornado/test/web_test.py::ErrorResponseTest::test_failed_write_error",
"tornado/test/web_test.py::ErrorResponseTest::test_write_error",
"tornado/test/web_test.py::StaticFileTest::test_absolute_static_url",
"tornado/test/web_test.py::StaticFileTest::test_absolute_version_exclusion",
"tornado/test/web_test.py::StaticFileTest::test_include_host_override",
"tornado/test/web_test.py::StaticFileTest::test_path_traversal_protection",
"tornado/test/web_test.py::StaticFileTest::test_relative_version_exclusion",
"tornado/test/web_test.py::StaticFileTest::test_root_static_path",
"tornado/test/web_test.py::StaticFileTest::test_static_304_if_modified_since",
"tornado/test/web_test.py::StaticFileTest::test_static_304_if_none_match",
"tornado/test/web_test.py::StaticFileTest::test_static_404",
"tornado/test/web_test.py::StaticFileTest::test_static_compressed_files",
"tornado/test/web_test.py::StaticFileTest::test_static_etag",
"tornado/test/web_test.py::StaticFileTest::test_static_files",
"tornado/test/web_test.py::StaticFileTest::test_static_head",
"tornado/test/web_test.py::StaticFileTest::test_static_head_range",
"tornado/test/web_test.py::StaticFileTest::test_static_if_modified_since_pre_epoch",
"tornado/test/web_test.py::StaticFileTest::test_static_if_modified_since_time_zone",
"tornado/test/web_test.py::StaticFileTest::test_static_invalid_range",
"tornado/test/web_test.py::StaticFileTest::test_static_range_if_none_match",
"tornado/test/web_test.py::StaticFileTest::test_static_unsatisfiable_range_invalid_start",
"tornado/test/web_test.py::StaticFileTest::test_static_unsatisfiable_range_zero_suffix",
"tornado/test/web_test.py::StaticFileTest::test_static_url",
"tornado/test/web_test.py::StaticFileTest::test_static_with_range",
"tornado/test/web_test.py::StaticFileTest::test_static_with_range_end_edge",
"tornado/test/web_test.py::StaticFileTest::test_static_with_range_full_file",
"tornado/test/web_test.py::StaticFileTest::test_static_with_range_full_past_end",
"tornado/test/web_test.py::StaticFileTest::test_static_with_range_neg_end",
"tornado/test/web_test.py::StaticFileTest::test_static_with_range_partial_past_end",
"tornado/test/web_test.py::StaticDefaultFilenameTest::test_static_default_filename",
"tornado/test/web_test.py::StaticDefaultFilenameTest::test_static_default_redirect",
"tornado/test/web_test.py::StaticFileWithPathTest::test_serve",
"tornado/test/web_test.py::CustomStaticFileTest::test_serve",
"tornado/test/web_test.py::CustomStaticFileTest::test_static_url",
"tornado/test/web_test.py::HostMatchingTest::test_host_matching",
"tornado/test/web_test.py::DefaultHostMatchingTest::test_default_host_matching",
"tornado/test/web_test.py::NamedURLSpecGroupsTest::test_named_urlspec_groups",
"tornado/test/web_test.py::ClearHeaderTest::test_clear_header",
"tornado/test/web_test.py::Header204Test::test_204_headers",
"tornado/test/web_test.py::Header304Test::test_304_headers",
"tornado/test/web_test.py::StatusReasonTest::test_status",
"tornado/test/web_test.py::DateHeaderTest::test_date_header",
"tornado/test/web_test.py::RaiseWithReasonTest::test_httperror_str",
"tornado/test/web_test.py::RaiseWithReasonTest::test_httperror_str_from_httputil",
"tornado/test/web_test.py::RaiseWithReasonTest::test_raise_with_reason",
"tornado/test/web_test.py::ErrorHandlerXSRFTest::test_404_xsrf",
"tornado/test/web_test.py::ErrorHandlerXSRFTest::test_error_xsrf",
"tornado/test/web_test.py::GzipTestCase::test_gzip",
"tornado/test/web_test.py::GzipTestCase::test_gzip_not_requested",
"tornado/test/web_test.py::GzipTestCase::test_gzip_static",
"tornado/test/web_test.py::GzipTestCase::test_vary_already_present",
"tornado/test/web_test.py::GzipTestCase::test_vary_already_present_multiple",
"tornado/test/web_test.py::PathArgsInPrepareTest::test_kw",
"tornado/test/web_test.py::PathArgsInPrepareTest::test_pos",
"tornado/test/web_test.py::ClearAllCookiesTest::test_clear_all_cookies",
"tornado/test/web_test.py::ExceptionHandlerTest::test_http_error",
"tornado/test/web_test.py::ExceptionHandlerTest::test_known_error",
"tornado/test/web_test.py::ExceptionHandlerTest::test_unknown_error",
"tornado/test/web_test.py::BuggyLoggingTest::test_buggy_log_exception",
"tornado/test/web_test.py::UIMethodUIModuleTest::test_ui_method",
"tornado/test/web_test.py::GetArgumentErrorTest::test_catch_error",
"tornado/test/web_test.py::MultipleExceptionTest::test_multi_exception",
"tornado/test/web_test.py::SetLazyPropertiesTest::test_set_properties",
"tornado/test/web_test.py::GetCurrentUserTest::test_get_current_user_from_ui_module_is_lazy",
"tornado/test/web_test.py::GetCurrentUserTest::test_get_current_user_from_ui_module_works",
"tornado/test/web_test.py::GetCurrentUserTest::test_get_current_user_works",
"tornado/test/web_test.py::UnimplementedHTTPMethodsTest::test_unimplemented_standard_methods",
"tornado/test/web_test.py::UnimplementedNonStandardMethodsTest::test_unimplemented_other",
"tornado/test/web_test.py::UnimplementedNonStandardMethodsTest::test_unimplemented_patch",
"tornado/test/web_test.py::AllHTTPMethodsTest::test_standard_methods",
"tornado/test/web_test.py::PatchMethodTest::test_other",
"tornado/test/web_test.py::PatchMethodTest::test_patch",
"tornado/test/web_test.py::FinishInPrepareTest::test_finish_in_prepare",
"tornado/test/web_test.py::Default404Test::test_404",
"tornado/test/web_test.py::Custom404Test::test_404",
"tornado/test/web_test.py::DefaultHandlerArgumentsTest::test_403",
"tornado/test/web_test.py::HandlerByNameTest::test_handler_by_name",
"tornado/test/web_test.py::StreamingRequestBodyTest::test_close_during_upload",
"tornado/test/web_test.py::StreamingRequestBodyTest::test_early_return",
"tornado/test/web_test.py::StreamingRequestBodyTest::test_early_return_with_data",
"tornado/test/web_test.py::StreamingRequestBodyTest::test_streaming_body",
"tornado/test/web_test.py::DecoratedStreamingRequestFlowControlTest::test_flow_control_chunked_body",
"tornado/test/web_test.py::DecoratedStreamingRequestFlowControlTest::test_flow_control_compressed_body",
"tornado/test/web_test.py::DecoratedStreamingRequestFlowControlTest::test_flow_control_fixed_body",
"tornado/test/web_test.py::NativeStreamingRequestFlowControlTest::test_flow_control_chunked_body",
"tornado/test/web_test.py::NativeStreamingRequestFlowControlTest::test_flow_control_compressed_body",
"tornado/test/web_test.py::NativeStreamingRequestFlowControlTest::test_flow_control_fixed_body",
"tornado/test/web_test.py::IncorrectContentLengthTest::test_content_length_too_high",
"tornado/test/web_test.py::IncorrectContentLengthTest::test_content_length_too_low",
"tornado/test/web_test.py::ClientCloseTest::test_client_close",
"tornado/test/web_test.py::SignedValueTest::test_expired",
"tornado/test/web_test.py::SignedValueTest::test_key_version_retrieval",
"tornado/test/web_test.py::SignedValueTest::test_key_versioning_invalid_key",
"tornado/test/web_test.py::SignedValueTest::test_key_versioning_read_write_default_key",
"tornado/test/web_test.py::SignedValueTest::test_key_versioning_read_write_non_default_key",
"tornado/test/web_test.py::SignedValueTest::test_known_values",
"tornado/test/web_test.py::SignedValueTest::test_name_swap",
"tornado/test/web_test.py::SignedValueTest::test_non_ascii",
"tornado/test/web_test.py::SignedValueTest::test_payload_tampering",
"tornado/test/web_test.py::SignedValueTest::test_signature_tampering",
"tornado/test/web_test.py::XSRFTest::test_cross_user",
"tornado/test/web_test.py::XSRFTest::test_distinct_tokens",
"tornado/test/web_test.py::XSRFTest::test_refresh_token",
"tornado/test/web_test.py::XSRFTest::test_versioning",
"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_argument_invalid_format",
"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_body_no_cookie",
"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_cookie_invalid_format",
"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_cookie_no_body",
"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_no_token",
"tornado/test/web_test.py::XSRFTest::test_xsrf_success_header",
"tornado/test/web_test.py::XSRFTest::test_xsrf_success_non_hex_token",
"tornado/test/web_test.py::XSRFTest::test_xsrf_success_post_body",
"tornado/test/web_test.py::XSRFTest::test_xsrf_success_query_string",
"tornado/test/web_test.py::XSRFTest::test_xsrf_success_short_token",
"tornado/test/web_test.py::XSRFCookieKwargsTest::test_xsrf_httponly",
"tornado/test/web_test.py::FinishExceptionTest::test_finish_exception",
"tornado/test/web_test.py::DecoratorTest::test_addslash",
"tornado/test/web_test.py::DecoratorTest::test_removeslash",
"tornado/test/web_test.py::CacheTest::test_multiple_strong_etag_match",
"tornado/test/web_test.py::CacheTest::test_multiple_strong_etag_not_match",
"tornado/test/web_test.py::CacheTest::test_multiple_weak_etag_match",
"tornado/test/web_test.py::CacheTest::test_multiple_weak_etag_not_match",
"tornado/test/web_test.py::CacheTest::test_strong_etag_match",
"tornado/test/web_test.py::CacheTest::test_strong_etag_not_match",
"tornado/test/web_test.py::CacheTest::test_weak_etag_match",
"tornado/test/web_test.py::CacheTest::test_weak_etag_not_match",
"tornado/test/web_test.py::CacheTest::test_wildcard_etag",
"tornado/test/web_test.py::RequestSummaryTest::test_missing_remote_ip",
"tornado/test/web_test.py::HTTPErrorTest::test_copy",
"tornado/test/web_test.py::ApplicationTest::test_listen",
"tornado/test/web_test.py::URLSpecReverseTest::test_non_reversible",
"tornado/test/web_test.py::URLSpecReverseTest::test_reverse",
"tornado/test/web_test.py::URLSpecReverseTest::test_reverse_arguments",
"tornado/test/web_test.py::RedirectHandlerTest::test_basic_redirect",
"tornado/test/web_test.py::RedirectHandlerTest::test_redirect_pattern",
"tornado/test/websocket_test.py::WebSocketTest::test_async_prepare",
"tornado/test/websocket_test.py::WebSocketTest::test_bad_websocket_version",
"tornado/test/websocket_test.py::WebSocketTest::test_binary_message",
"tornado/test/websocket_test.py::WebSocketTest::test_check_origin_invalid",
"tornado/test/websocket_test.py::WebSocketTest::test_check_origin_invalid_partial_url",
"tornado/test/websocket_test.py::WebSocketTest::test_check_origin_invalid_subdomains",
"tornado/test/websocket_test.py::WebSocketTest::test_check_origin_valid_no_path",
"tornado/test/websocket_test.py::WebSocketTest::test_check_origin_valid_with_path",
"tornado/test/websocket_test.py::WebSocketTest::test_client_close_reason",
"tornado/test/websocket_test.py::WebSocketTest::test_coroutine",
"tornado/test/websocket_test.py::WebSocketTest::test_error_in_on_message",
"tornado/test/websocket_test.py::WebSocketTest::test_http_request",
"tornado/test/websocket_test.py::WebSocketTest::test_path_args",
"tornado/test/websocket_test.py::WebSocketTest::test_render_message",
"tornado/test/websocket_test.py::WebSocketTest::test_server_close_reason",
"tornado/test/websocket_test.py::WebSocketTest::test_unicode_message",
"tornado/test/websocket_test.py::WebSocketTest::test_websocket_callbacks",
"tornado/test/websocket_test.py::WebSocketTest::test_websocket_close_buffered_data",
"tornado/test/websocket_test.py::WebSocketTest::test_websocket_gen",
"tornado/test/websocket_test.py::WebSocketTest::test_websocket_header_echo",
"tornado/test/websocket_test.py::WebSocketTest::test_websocket_headers",
"tornado/test/websocket_test.py::WebSocketTest::test_websocket_http_fail",
"tornado/test/websocket_test.py::WebSocketTest::test_websocket_http_success",
"tornado/test/websocket_test.py::WebSocketTest::test_websocket_network_fail",
"tornado/test/websocket_test.py::WebSocketNativeCoroutineTest::test_native_coroutine",
"tornado/test/websocket_test.py::NoCompressionTest::test_message_sizes",
"tornado/test/websocket_test.py::ServerOnlyCompressionTest::test_message_sizes",
"tornado/test/websocket_test.py::ClientOnlyCompressionTest::test_message_sizes",
"tornado/test/websocket_test.py::DefaultCompressionTest::test_message_sizes",
"tornado/test/websocket_test.py::PythonMaskFunctionTest::test_mask",
"tornado/test/websocket_test.py::CythonMaskFunctionTest::test_mask",
"tornado/test/websocket_test.py::ServerPeriodicPingTest::test_server_ping",
"tornado/test/websocket_test.py::ClientPeriodicPingTest::test_client_ping",
"tornado/test/websocket_test.py::MaxMessageSizeTest::test_large_message"
]
| []
| Apache License 2.0 | 2,011 | [
"tornado/curl_httpclient.py",
"docs/releases.rst",
"tornado/web.py",
"tornado/__init__.py",
"setup.py",
"tornado/iostream.py",
"tornado/websocket.py",
"docs/releases/v4.5.3.rst",
".travis.yml",
"tornado/http1connection.py",
"tornado/options.py"
]
| [
"tornado/curl_httpclient.py",
"docs/releases.rst",
"tornado/web.py",
"tornado/__init__.py",
"setup.py",
"tornado/iostream.py",
"tornado/websocket.py",
"docs/releases/v4.5.3.rst",
".travis.yml",
"tornado/http1connection.py",
"tornado/options.py"
]
|
|
PyCQA__pyflakes-322 | 8a1feac08dae2478e3f67ab4018af86ff4ec56f0 | 2018-01-05 10:39:36 | 8a1feac08dae2478e3f67ab4018af86ff4ec56f0 | bitglue: Also, we should probably merge this and #303 together and release a new major version since it breaks backwards compatibility.
hugovk: Appveyor looks like it needs an updated pip to recognise the new python_requires. I had a go updating it in another branch, but there's a pypy bug preventing that. They've just fixed it, and it'll be in the next pypy release, 5.10.1.
See https://bitbucket.org/pypy/pypy/issues/2720/ensurepip-on-pypy-c-jit-93579-a4194a67868f and https://stackoverflow.com/q/47999518/724176.
I do not know when that will be out. Options are to wait until it's sorted until merge, or comment out that pypy build until then. (Merge with the failure doesn't really help.)
bitglue: Let's just comment that build with an explanation for now.
hugovk: Commented out! | diff --git a/.appveyor.yml b/.appveyor.yml
index 41b1dbb..8a62cf8 100644
--- a/.appveyor.yml
+++ b/.appveyor.yml
@@ -13,18 +13,21 @@ install:
- ps: 7z x pypy2-v5.3.1-win32.zip | Out-Null
- move pypy2-v5.3.1-win32 C:\
- - ps: (New-Object Net.WebClient).DownloadFile('https://bitbucket.org/pypy/pypy/downloads/pypy3-2.4.0-win32.zip', "$env:appveyor_build_folder\pypy3-2.4.0-win32.zip")
- - ps: 7z x pypy3-2.4.0-win32.zip | Out-Null
- - move pypy3-2.4.0-win32 C:\
+ # Note: pypy3-v5.10.0 is commented out as it needs an updated pip to
+ # recognise the new python_requires, but there's a pypy bug preventing that.
+ # It's been fixed and will be in the next 5.10.1 release.
+ # See https://bitbucket.org/pypy/pypy/issues/2720/ensurepip-on-pypy-c-jit-93579-a4194a67868f
+ # and https://stackoverflow.com/q/47999518/724176.
- # pypy3 installer provides 'pypy.exe', not pypy3.exe.
- - copy C:\pypy3-2.4.0-win32\pypy.exe C:\pypy3-2.4.0-win32\pypy3.exe
+# - ps: (New-Object Net.WebClient).DownloadFile('https://bitbucket.org/pypy/pypy/downloads/pypy3-v5.10.0-win32.zip', "$env:appveyor_build_folder\pypy3-v5.10.0-win32.zip")
+# - ps: 7z x pypy3-v5.10.0-win32.zip | Out-Null
+# - move pypy3-v5.10.0-win32 C:\
# workaround https://github.com/pypa/virtualenv/issues/93
- mkdir C:\python33\tcl\tcl8.6
- mkdir C:\python33\tcl\tk8.6
- - mkdir C:\pypy3-2.4.0-win32\tcl\tcl8.6
- - mkdir C:\pypy3-2.4.0-win32\tcl\tk8.6
+# - mkdir C:\pypy3-v5.10.0-win32\tcl\tcl8.6
+# - mkdir C:\pypy3-v5.10.0-win32\tcl\tk8.6
# Only pypy2-5.3.1 is integrated into tox, as pypy3-2.4.0 fails and
# a Windows distribution of pypy3-5.2 isnt available yet.
@@ -33,14 +36,13 @@ install:
# pypy3-2.4.0 and pypy-2.6.1 are manually bootstrapped and tested
- ps: (New-Object Net.WebClient).DownloadFile('https://bootstrap.pypa.io/get-pip.py', "$env:appveyor_build_folder\get-pip.py")
# errors are ignored due to https://github.com/pypa/pip/issues/2669#issuecomment-136405390
- # NOTE: If and when a new version of PyPy3 is released for Windows that
- # supports anything newer than Python 3.2, remove the setuptools pin.
- - ps: C:\pypy3-2.4.0-win32\pypy3 "$env:appveyor_build_folder\get-pip.py"; C:\pypy3-2.4.0-win32\pypy3 -m pip install -U --force-reinstall pip==8.1.2 "setuptools<30"; echo "ignore error"
+# - ps: C:\pypy3-v5.10.0-win32\pypy3 "$env:appveyor_build_folder\get-pip.py"; echo "ignore error"
+# - ps: C:\pypy3-v5.10.0-win32\pypy3 -m pip install -U --force-reinstall pip setuptools; echo "ignore error"
- ps: C:\pypy-2.6.1-win32\pypy "$env:appveyor_build_folder\get-pip.py"
build: off
test_script:
- python -m tox
- - C:\pypy3-2.4.0-win32\pypy3 setup.py test -q
+# - C:\pypy3-v5.10.0-win32\pypy3 setup.py test -q
- C:\pypy-2.6.1-win32\pypy setup.py test -q
diff --git a/.travis.yml b/.travis.yml
index 780b117..29f1d44 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,9 +1,7 @@
language: python
cache: pip
python:
- - 2.6
- 2.7
- - 3.3
- 3.4
- 3.5
- 3.6
@@ -11,13 +9,10 @@ python:
- pypy
- pypy-5.3
- pypy3
- - pypy3.3-5.2-alpha1
install:
- - pip install flake8==2.1.0 pep8==1.5.6
- - python setup.py install
+ - pip install --upgrade .
- pip list
- - if [ "$TRAVIS_PYTHON_VERSION" != "nightly" ]; then flake8 --version; fi
script:
- python setup.py test -q
- - if [ "$TRAVIS_PYTHON_VERSION" != "nightly" ]; then flake8 pyflakes setup.py; fi
+ - if [ "$TRAVIS_PYTHON_VERSION" != "nightly" ]; then pip install flake8==2.1.0 pep8==1.5.6 && flake8 --version && flake8 pyflakes setup.py; fi
sudo: false
diff --git a/README.rst b/README.rst
index 45f0ab8..6699b4c 100644
--- a/README.rst
+++ b/README.rst
@@ -9,7 +9,7 @@ parsing the source file, not importing it, so it is safe to use on
modules with side effects. It's also much faster.
It is `available on PyPI <https://pypi.python.org/pypi/pyflakes>`_
-and it supports all active versions of Python from 2.5 to 3.6.
+and it supports all active versions of Python: 2.7 and 3.4 to 3.6.
diff --git a/pyflakes/__main__.py b/pyflakes/__main__.py
index a69e689..68cd9ef 100644
--- a/pyflakes/__main__.py
+++ b/pyflakes/__main__.py
@@ -1,5 +1,5 @@
from pyflakes.api import main
-# python -m pyflakes (with Python >= 2.7)
+# python -m pyflakes
if __name__ == '__main__':
main(prog='pyflakes')
diff --git a/pyflakes/api.py b/pyflakes/api.py
index 49ee38d..805a886 100644
--- a/pyflakes/api.py
+++ b/pyflakes/api.py
@@ -89,19 +89,8 @@ def checkPath(filename, reporter=None):
if reporter is None:
reporter = modReporter._makeDefaultReporter()
try:
- # in Python 2.6, compile() will choke on \r\n line endings. In later
- # versions of python it's smarter, and we want binary mode to give
- # compile() the best opportunity to do the right thing WRT text
- # encodings.
- if sys.version_info < (2, 7):
- mode = 'rU'
- else:
- mode = 'rb'
-
- with open(filename, mode) as f:
+ with open(filename, 'rb') as f:
codestr = f.read()
- if sys.version_info < (2, 7):
- codestr += '\n' # Work around for Python <= 2.6
except UnicodeError:
reporter.unexpectedError(filename, 'problem decoding source')
return 1
@@ -117,6 +106,10 @@ def isPythonFile(filename):
if filename.endswith('.py'):
return True
+ # Avoid obvious Emacs backup files
+ if filename.endswith("~"):
+ return False
+
max_bytes = 128
try:
diff --git a/pyflakes/checker.py b/pyflakes/checker.py
index baef833..bd5eba5 100644
--- a/pyflakes/checker.py
+++ b/pyflakes/checker.py
@@ -5,14 +5,13 @@ Implement the central Checker class.
Also, it models the Bindings and Scopes.
"""
import __future__
+import ast
import doctest
import os
import sys
PY2 = sys.version_info < (3, 0)
-PY32 = sys.version_info < (3, 3) # Python 2.5 to 3.2
-PY33 = sys.version_info < (3, 4) # Python 2.5 to 3.3
-PY34 = sys.version_info < (3, 5) # Python 2.5 to 3.4
+PY34 = sys.version_info < (3, 5) # Python 2.7 to 3.4
try:
sys.pypy_version_info
PYPY = True
@@ -21,16 +20,6 @@ except AttributeError:
builtin_vars = dir(__import__('__builtin__' if PY2 else 'builtins'))
-try:
- import ast
-except ImportError: # Python 2.5
- import _ast as ast
-
- if 'decorator_list' not in ast.ClassDef._fields:
- # Patch the missing attribute 'decorator_list'
- ast.ClassDef.decorator_list = ()
- ast.FunctionDef.decorator_list = property(lambda s: s.decorators)
-
from pyflakes import messages
@@ -38,15 +27,22 @@ if PY2:
def getNodeType(node_class):
# workaround str.upper() which is locale-dependent
return str(unicode(node_class.__name__).upper())
+
+ def get_raise_argument(node):
+ return node.type
+
else:
def getNodeType(node_class):
return node_class.__name__.upper()
+ def get_raise_argument(node):
+ return node.exc
+
# Silence `pyflakes` from reporting `undefined name 'unicode'` in Python 3.
unicode = str
# Python >= 3.3 uses ast.Try instead of (ast.TryExcept + ast.TryFinally)
-if PY32:
+if PY2:
def getAlternatives(n):
if isinstance(n, (ast.If, ast.TryFinally)):
return [n.body]
@@ -131,13 +127,17 @@ def convert_to_value(item):
result.name,
result,
)
- elif (not PY33) and isinstance(item, ast.NameConstant):
+ elif (not PY2) and isinstance(item, ast.NameConstant):
# None, True, False are nameconstants in python3, but names in 2
return item.value
else:
return UnhandledKeyType()
+def is_notimplemented_name_node(node):
+ return isinstance(node, ast.Name) and getNodeName(node) == 'NotImplemented'
+
+
class Binding(object):
"""
Represents the binding of a value to a name.
@@ -410,8 +410,8 @@ class FunctionScope(Scope):
@ivar globals: Names declared 'global' in this function.
"""
usesLocals = False
- alwaysUsed = set(['__tracebackhide__',
- '__traceback_info__', '__traceback_supplement__'])
+ alwaysUsed = {'__tracebackhide__', '__traceback_info__',
+ '__traceback_supplement__'}
def __init__(self):
super(FunctionScope, self).__init__()
@@ -710,10 +710,14 @@ class Checker(object):
# try enclosing function scopes and global scope
for scope in self.scopeStack[-1::-1]:
- # only generators used in a class scope can access the names
- # of the class. this is skipped during the first iteration
- if in_generators is False and isinstance(scope, ClassScope):
- continue
+ if isinstance(scope, ClassScope):
+ if not PY2 and name == '__class__':
+ return
+ elif in_generators is False:
+ # only generators used in a class scope can access the
+ # names of the class. this is skipped during the first
+ # iteration
+ continue
try:
scope[name].used = (self.scope, node)
@@ -961,7 +965,7 @@ class Checker(object):
# "stmt" type nodes
DELETE = PRINT = FOR = ASYNCFOR = WHILE = IF = WITH = WITHITEM = \
- ASYNCWITH = ASYNCWITHITEM = RAISE = TRYFINALLY = EXEC = \
+ ASYNCWITH = ASYNCWITHITEM = TRYFINALLY = EXEC = \
EXPR = ASSIGN = handleChildren
PASS = ignore
@@ -985,6 +989,19 @@ class Checker(object):
EQ = NOTEQ = LT = LTE = GT = GTE = IS = ISNOT = IN = NOTIN = \
MATMULT = ignore
+ def RAISE(self, node):
+ self.handleChildren(node)
+
+ arg = get_raise_argument(node)
+
+ if isinstance(arg, ast.Call):
+ if is_notimplemented_name_node(arg.func):
+ # Handle "raise NotImplemented(...)"
+ self.report(messages.RaiseNotImplemented, node)
+ elif is_notimplemented_name_node(arg):
+ # Handle "raise NotImplemented"
+ self.report(messages.RaiseNotImplemented, node)
+
# additional node types
COMPREHENSION = KEYWORD = FORMATTEDVALUE = JOINEDSTR = handleChildren
@@ -1178,9 +1195,9 @@ class Checker(object):
wildcard = getattr(node.args, arg_name)
if not wildcard:
continue
- args.append(wildcard if PY33 else wildcard.arg)
+ args.append(wildcard if PY2 else wildcard.arg)
if is_py3_func:
- if PY33: # Python 2.5 to 3.3
+ if PY2: # Python 2.7
argannotation = arg_name + 'annotation'
annotations.append(getattr(node.args, argannotation))
else: # Python >= 3.4
@@ -1221,7 +1238,7 @@ class Checker(object):
self.report(messages.UnusedVariable, binding.source, name)
self.deferAssignment(checkUnusedAssignments)
- if PY32:
+ if PY2:
def checkReturnWithArgumentInsideGenerator():
"""
Check to see if there is any return statement with
diff --git a/pyflakes/messages.py b/pyflakes/messages.py
index 670f95f..eb87a72 100644
--- a/pyflakes/messages.py
+++ b/pyflakes/messages.py
@@ -239,3 +239,7 @@ class ForwardAnnotationSyntaxError(Message):
def __init__(self, filename, loc, annotation):
Message.__init__(self, filename, loc)
self.message_args = (annotation,)
+
+
+class RaiseNotImplemented(Message):
+ message = "'raise NotImplemented' should be 'raise NotImplementedError'"
diff --git a/setup.py b/setup.py
index 387b7bc..6567b53 100755
--- a/setup.py
+++ b/setup.py
@@ -44,6 +44,7 @@ setup(
author_email="[email protected]",
url="https://github.com/PyCQA/pyflakes",
packages=["pyflakes", "pyflakes.scripts", "pyflakes.test"],
+ python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
classifiers=[
"Development Status :: 6 - Mature",
"Environment :: Console",
@@ -51,7 +52,13 @@ setup(
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
+ "Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.4",
+ "Programming Language :: Python :: 3.5",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: Implementation :: CPython",
+ "Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development",
"Topic :: Utilities",
],
diff --git a/tox.ini b/tox.ini
index 737e334..fabad9d 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,7 +1,7 @@
[tox]
skip_missing_interpreters = True
envlist =
- py26,py27,py33,py34,py35,py36,pypy,pypy3
+ py27,py34,py35,py36,pypy,pypy3
[testenv]
deps =
| Drop support for EOL Python versions?
I'd like to suggest pyflakes only supports non-EOL Python versions.
Python 2.5, 2.6, 3.2 and 3.3 are EOL and no longer receive security updates.

https://github.com/PyCQA/pyflakes#pyflakes says:
> It is available on PyPI and it supports all active versions of Python from 2.5 to 3.6.
It can be argued that 2.5-2.6 and 3.0-3.3 are no longer active and no longer need support.
Here's the pip installs for pyflakes from PyPI for the last month (via `pypinfo --percent --pip --markdown pyflakes pyversion`) showing low or non-existent numbers for the EOL versions.
| python_version | percent | download_count |
| -------------- | ------: | -------------: |
| 2.7 | 54.1% | 582,452 |
| 3.6 | 20.5% | 221,023 |
| 3.5 | 17.2% | 185,122 |
| 3.4 | 6.8% | 72,930 |
| 3.3 | 0.6% | 6,432 |
| 2.6 | 0.4% | 4,789 |
| 3.7 | 0.4% | 4,226 |
| 3.2 | 0.0% | 414 |
| None | 0.0% | 4 |
Some other PyCQA projects are planning on ending all Python 2 support around mid-2018 when Python 3.7 is due out (https://github.com/PyCQA/meta/issues/19). | PyCQA/pyflakes | diff --git a/pyflakes/test/harness.py b/pyflakes/test/harness.py
index 009923f..3f592db 100644
--- a/pyflakes/test/harness.py
+++ b/pyflakes/test/harness.py
@@ -1,5 +1,4 @@
-import sys
import textwrap
import unittest
@@ -7,12 +6,8 @@ from pyflakes import checker
__all__ = ['TestCase', 'skip', 'skipIf']
-if sys.version_info < (2, 7):
- skip = lambda why: (lambda func: 'skip') # not callable
- skipIf = lambda cond, why: (skip(why) if cond else lambda func: func)
-else:
- skip = unittest.skip
- skipIf = unittest.skipIf
+skip = unittest.skip
+skipIf = unittest.skipIf
PyCF_ONLY_AST = 1024
diff --git a/pyflakes/test/test_api.py b/pyflakes/test/test_api.py
index 8aa6df9..56ca269 100644
--- a/pyflakes/test/test_api.py
+++ b/pyflakes/test/test_api.py
@@ -180,6 +180,7 @@ class TestIterSourceCode(TestCase):
"""
os.mkdir(os.path.join(self.tempdir, 'foo'))
apath = self.makeEmptyFile('foo', 'a.py')
+ self.makeEmptyFile('foo', 'a.py~')
os.mkdir(os.path.join(self.tempdir, 'bar'))
bpath = self.makeEmptyFile('bar', 'b.py')
cpath = self.makeEmptyFile('c.py')
@@ -514,7 +515,7 @@ foo(bar=baz, bax)
foo(bar=baz, bax)
%s""" % (sourcePath, column, message, last_line)])
- @skipIf(PYPY, 'Output in PyPy varies highly, dependending on version')
+ @skipIf(PYPY, 'Output in PyPy varies highly, depending on version')
def test_invalidEscape(self):
"""
The invalid escape syntax raises ValueError in Python 2
diff --git a/pyflakes/test/test_dict.py b/pyflakes/test/test_dict.py
index 628ec0c..b9059c2 100644
--- a/pyflakes/test/test_dict.py
+++ b/pyflakes/test/test_dict.py
@@ -19,15 +19,11 @@ class Test(TestCase):
@skipIf(version_info < (3,),
"bytes and strings with same 'value' are not equal in python3")
- @skipIf(version_info[0:2] == (3, 2),
- "python3.2 does not allow u"" literal string definition")
def test_duplicate_keys_bytes_vs_unicode_py3(self):
self.flakes("{b'a': 1, u'a': 2}")
@skipIf(version_info < (3,),
"bytes and strings with same 'value' are not equal in python3")
- @skipIf(version_info[0:2] == (3, 2),
- "python3.2 does not allow u"" literal string definition")
def test_duplicate_values_bytes_vs_unicode_py3(self):
self.flakes(
"{1: b'a', 1: u'a'}",
diff --git a/pyflakes/test/test_imports.py b/pyflakes/test/test_imports.py
index 4723954..586a0a5 100644
--- a/pyflakes/test/test_imports.py
+++ b/pyflakes/test/test_imports.py
@@ -569,7 +569,6 @@ class Test(TestCase):
''')
def test_redefinedByExcept(self):
- as_exc = ', ' if version_info < (2, 6) else ' as '
expected = [m.RedefinedWhileUnused]
if version_info >= (3,):
# The exc variable is unused inside the exception handler.
@@ -577,8 +576,8 @@ class Test(TestCase):
self.flakes('''
import fu
try: pass
- except Exception%sfu: pass
- ''' % as_exc, *expected)
+ except Exception as fu: pass
+ ''', *expected)
def test_usedInRaise(self):
self.flakes('''
@@ -1149,13 +1148,6 @@ class TestSpecialAll(TestCase):
return "hello"
''', m.UndefinedName)
-
-class Python26Tests(TestCase):
- """
- Tests for checking of syntax which is valid in Python 2.6 and newer.
- """
-
- @skipIf(version_info < (2, 6), "Python >= 2.6 only")
def test_usedAsClassDecorator(self):
"""
Using an imported name as a class decorator results in no warnings,
diff --git a/pyflakes/test/test_other.py b/pyflakes/test/test_other.py
index 5bae7a0..364b375 100644
--- a/pyflakes/test/test_other.py
+++ b/pyflakes/test/test_other.py
@@ -81,7 +81,6 @@ class Test(TestCase):
(1 for a, b in [(1, 2)])
''')
- @skipIf(version_info < (2, 7), "Python >= 2.7 only")
def test_redefinedInSetComprehension(self):
"""
Test that reusing a variable in a set comprehension does not raise
@@ -111,7 +110,6 @@ class Test(TestCase):
{1 for a, b in [(1, 2)]}
''')
- @skipIf(version_info < (2, 7), "Python >= 2.7 only")
def test_redefinedInDictComprehension(self):
"""
Test that reusing a variable in a dict comprehension does not raise
@@ -279,7 +277,6 @@ class Test(TestCase):
a = classmethod(a)
''')
- @skipIf(version_info < (2, 6), "Python >= 2.6 only")
def test_modernProperty(self):
self.flakes("""
class A:
@@ -1584,7 +1581,6 @@ class TestUnusedAssignment(TestCase):
pass
''', m.UndefinedName)
- @skipIf(version_info < (2, 7), "Python >= 2.7 only")
def test_dictComprehension(self):
"""
Dict comprehensions are properly handled.
@@ -1593,7 +1589,6 @@ class TestUnusedAssignment(TestCase):
a = {1: x for x in range(10)}
''')
- @skipIf(version_info < (2, 7), "Python >= 2.7 only")
def test_setComprehensionAndLiteral(self):
"""
Set comprehensions are properly handled.
@@ -1604,17 +1599,16 @@ class TestUnusedAssignment(TestCase):
''')
def test_exceptionUsedInExcept(self):
- as_exc = ', ' if version_info < (2, 6) else ' as '
self.flakes('''
try: pass
- except Exception%se: e
- ''' % as_exc)
+ except Exception as e: e
+ ''')
self.flakes('''
def download_review():
try: pass
- except Exception%se: e
- ''' % as_exc)
+ except Exception as e: e
+ ''')
@skipIf(version_info < (3,),
"In Python 2 exception names stay bound after the exception handler")
@@ -1625,12 +1619,11 @@ class TestUnusedAssignment(TestCase):
''', m.UnusedVariable)
def test_exceptionUnusedInExceptInFunction(self):
- as_exc = ', ' if version_info < (2, 6) else ' as '
self.flakes('''
def download_review():
try: pass
- except Exception%se: pass
- ''' % as_exc, m.UnusedVariable)
+ except Exception as e: pass
+ ''', m.UnusedVariable)
def test_exceptWithoutNameInFunction(self):
"""
@@ -1983,3 +1976,20 @@ class TestAsyncStatements(TestCase):
self.flakes('''
a: 'a: "A"'
''', m.ForwardAnnotationSyntaxError)
+
+ def test_raise_notimplemented(self):
+ self.flakes('''
+ raise NotImplementedError("This is fine")
+ ''')
+
+ self.flakes('''
+ raise NotImplementedError
+ ''')
+
+ self.flakes('''
+ raise NotImplemented("This isn't gonna work")
+ ''', m.RaiseNotImplemented)
+
+ self.flakes('''
+ raise NotImplemented
+ ''', m.RaiseNotImplemented)
diff --git a/pyflakes/test/test_undefined_names.py b/pyflakes/test/test_undefined_names.py
index 3d19210..8434cd8 100644
--- a/pyflakes/test/test_undefined_names.py
+++ b/pyflakes/test/test_undefined_names.py
@@ -728,14 +728,13 @@ class Test(TestCase):
B = dict((i, str(i)) for i in T)
''')
- if version_info >= (2, 7):
- self.flakes('''
- class A:
- T = range(10)
+ self.flakes('''
+ class A:
+ T = range(10)
- X = {x for x in T}
- Y = {x:x for x in T}
- ''')
+ X = {x for x in T}
+ Y = {x:x for x in T}
+ ''')
def test_definedInClassNested(self):
"""Defined name for nested generator expressions in a class."""
@@ -761,7 +760,6 @@ class Test(TestCase):
(42 for i in range(i))
''', m.UndefinedName)
- @skipIf(version_info < (2, 7), 'Dictionary comprehensions do not exist')
def test_definedFromLambdaInDictionaryComprehension(self):
"""
Defined name referenced from a lambda function within a dict/set
@@ -780,7 +778,6 @@ class Test(TestCase):
any(lambda: id(x) for x in range(10))
''')
- @skipIf(version_info < (2, 7), 'Dictionary comprehensions do not exist')
def test_undefinedFromLambdaInDictionaryComprehension(self):
"""
Undefined name referenced from a lambda function within a dict/set
@@ -799,6 +796,24 @@ class Test(TestCase):
any(lambda: id(y) for x in range(10))
''', m.UndefinedName)
+ def test_dunderClass(self):
+ """
+ `__class__` is defined in class scope under Python 3, but is not
+ in Python 2.
+ """
+ code = '''
+ class Test(object):
+ def __init__(self):
+ print(__class__.__name__)
+ self.x = 1
+
+ t = Test()
+ '''
+ if version_info < (3,):
+ self.flakes(code, m.UndefinedName)
+ else:
+ self.flakes(code)
+
class NameTests(TestCase):
"""
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 9
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"flake8",
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mccabe==0.7.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycodestyle==2.9.1
-e git+https://github.com/PyCQA/pyflakes.git@8a1feac08dae2478e3f67ab4018af86ff4ec56f0#egg=pyflakes
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: pyflakes
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- flake8==5.0.4
- importlib-metadata==4.2.0
- mccabe==0.7.0
- pycodestyle==2.9.1
prefix: /opt/conda/envs/pyflakes
| [
"pyflakes/test/test_other.py::TestAsyncStatements::test_raise_notimplemented",
"pyflakes/test/test_undefined_names.py::Test::test_dunderClass"
]
| []
| [
"pyflakes/test/test_api.py::TestIterSourceCode::test_emptyDirectory",
"pyflakes/test/test_api.py::TestIterSourceCode::test_explicitFiles",
"pyflakes/test/test_api.py::TestIterSourceCode::test_multipleDirectories",
"pyflakes/test/test_api.py::TestIterSourceCode::test_onlyPythonSource",
"pyflakes/test/test_api.py::TestIterSourceCode::test_recurses",
"pyflakes/test/test_api.py::TestIterSourceCode::test_shebang",
"pyflakes/test/test_api.py::TestIterSourceCode::test_singleFile",
"pyflakes/test/test_api.py::TestReporter::test_flake",
"pyflakes/test/test_api.py::TestReporter::test_multiLineSyntaxError",
"pyflakes/test/test_api.py::TestReporter::test_syntaxError",
"pyflakes/test/test_api.py::TestReporter::test_syntaxErrorNoOffset",
"pyflakes/test/test_api.py::TestReporter::test_unexpectedError",
"pyflakes/test/test_api.py::CheckTests::test_CRLFLineEndings",
"pyflakes/test/test_api.py::CheckTests::test_checkPathNonExisting",
"pyflakes/test/test_api.py::CheckTests::test_checkRecursive",
"pyflakes/test/test_api.py::CheckTests::test_encodedFileUTF8",
"pyflakes/test/test_api.py::CheckTests::test_eofSyntaxError",
"pyflakes/test/test_api.py::CheckTests::test_eofSyntaxErrorWithTab",
"pyflakes/test/test_api.py::CheckTests::test_invalidEscape",
"pyflakes/test/test_api.py::CheckTests::test_legacyScript",
"pyflakes/test/test_api.py::CheckTests::test_misencodedFileUTF16",
"pyflakes/test/test_api.py::CheckTests::test_misencodedFileUTF8",
"pyflakes/test/test_api.py::CheckTests::test_missingTrailingNewline",
"pyflakes/test/test_api.py::CheckTests::test_multilineSyntaxError",
"pyflakes/test/test_api.py::CheckTests::test_nonDefaultFollowsDefaultSyntaxError",
"pyflakes/test/test_api.py::CheckTests::test_nonKeywordAfterKeywordSyntaxError",
"pyflakes/test/test_api.py::CheckTests::test_pyflakesWarning",
"pyflakes/test/test_api.py::IntegrationTests::test_errors_io",
"pyflakes/test/test_api.py::IntegrationTests::test_errors_syntax",
"pyflakes/test/test_api.py::IntegrationTests::test_fileWithFlakes",
"pyflakes/test/test_api.py::IntegrationTests::test_goodFile",
"pyflakes/test/test_api.py::IntegrationTests::test_readFromStdin",
"pyflakes/test/test_api.py::TestMain::test_errors_io",
"pyflakes/test/test_api.py::TestMain::test_errors_syntax",
"pyflakes/test/test_api.py::TestMain::test_fileWithFlakes",
"pyflakes/test/test_api.py::TestMain::test_goodFile",
"pyflakes/test/test_api.py::TestMain::test_readFromStdin",
"pyflakes/test/test_dict.py::Test::test_duplicate_key_float_and_int",
"pyflakes/test/test_dict.py::Test::test_duplicate_keys",
"pyflakes/test/test_dict.py::Test::test_duplicate_keys_bools",
"pyflakes/test/test_dict.py::Test::test_duplicate_keys_bools_false",
"pyflakes/test/test_dict.py::Test::test_duplicate_keys_bytes_vs_unicode_py3",
"pyflakes/test/test_dict.py::Test::test_duplicate_keys_in_function",
"pyflakes/test/test_dict.py::Test::test_duplicate_keys_in_lambda",
"pyflakes/test/test_dict.py::Test::test_duplicate_keys_ints",
"pyflakes/test/test_dict.py::Test::test_duplicate_keys_none",
"pyflakes/test/test_dict.py::Test::test_duplicate_keys_tuples",
"pyflakes/test/test_dict.py::Test::test_duplicate_keys_tuples_int_and_float",
"pyflakes/test/test_dict.py::Test::test_duplicate_values_bytes_vs_unicode_py3",
"pyflakes/test/test_dict.py::Test::test_duplicate_variable_keys",
"pyflakes/test/test_dict.py::Test::test_duplicate_variable_values",
"pyflakes/test/test_dict.py::Test::test_duplicate_variable_values_same_value",
"pyflakes/test/test_dict.py::Test::test_multiple_duplicate_keys",
"pyflakes/test/test_dict.py::Test::test_no_duplicate_key_error_same_value",
"pyflakes/test/test_dict.py::Test::test_no_duplicate_key_errors",
"pyflakes/test/test_dict.py::Test::test_no_duplicate_key_errors_bool_or_none",
"pyflakes/test/test_dict.py::Test::test_no_duplicate_key_errors_func_call",
"pyflakes/test/test_dict.py::Test::test_no_duplicate_key_errors_instance_attributes",
"pyflakes/test/test_dict.py::Test::test_no_duplicate_key_errors_ints",
"pyflakes/test/test_dict.py::Test::test_no_duplicate_key_errors_tuples",
"pyflakes/test/test_dict.py::Test::test_no_duplicate_key_errors_vars",
"pyflakes/test/test_dict.py::Test::test_no_duplicate_keys_tuples_same_first_element",
"pyflakes/test/test_imports.py::TestImportationObject::test_import_as",
"pyflakes/test/test_imports.py::TestImportationObject::test_import_basic",
"pyflakes/test/test_imports.py::TestImportationObject::test_import_submodule",
"pyflakes/test/test_imports.py::TestImportationObject::test_import_submodule_as",
"pyflakes/test/test_imports.py::TestImportationObject::test_import_submodule_as_source_name",
"pyflakes/test/test_imports.py::TestImportationObject::test_importfrom_future",
"pyflakes/test/test_imports.py::TestImportationObject::test_importfrom_member",
"pyflakes/test/test_imports.py::TestImportationObject::test_importfrom_member_as",
"pyflakes/test/test_imports.py::TestImportationObject::test_importfrom_relative",
"pyflakes/test/test_imports.py::TestImportationObject::test_importfrom_relative_parent",
"pyflakes/test/test_imports.py::TestImportationObject::test_importfrom_relative_with_module",
"pyflakes/test/test_imports.py::TestImportationObject::test_importfrom_relative_with_module_as",
"pyflakes/test/test_imports.py::TestImportationObject::test_importfrom_star",
"pyflakes/test/test_imports.py::TestImportationObject::test_importfrom_star_relative",
"pyflakes/test/test_imports.py::TestImportationObject::test_importfrom_submodule_member",
"pyflakes/test/test_imports.py::TestImportationObject::test_importfrom_submodule_member_as",
"pyflakes/test/test_imports.py::Test::test_aliasedImport",
"pyflakes/test/test_imports.py::Test::test_aliasedImportShadowModule",
"pyflakes/test/test_imports.py::Test::test_assignRHSFirst",
"pyflakes/test/test_imports.py::Test::test_assignedToGlobal",
"pyflakes/test/test_imports.py::Test::test_differentSubmoduleImport",
"pyflakes/test/test_imports.py::Test::test_duplicateSubmoduleImport",
"pyflakes/test/test_imports.py::Test::test_functionNamesAreBoundNow",
"pyflakes/test/test_imports.py::Test::test_functionsRunLater",
"pyflakes/test/test_imports.py::Test::test_futureImport",
"pyflakes/test/test_imports.py::Test::test_futureImportFirst",
"pyflakes/test/test_imports.py::Test::test_futureImportStar",
"pyflakes/test/test_imports.py::Test::test_futureImportUndefined",
"pyflakes/test/test_imports.py::Test::test_futureImportUsed",
"pyflakes/test/test_imports.py::Test::test_ignoreNonImportRedefinitions",
"pyflakes/test/test_imports.py::Test::test_importInClass",
"pyflakes/test/test_imports.py::Test::test_importStar",
"pyflakes/test/test_imports.py::Test::test_importStar_relative",
"pyflakes/test/test_imports.py::Test::test_importUsedInMethodDefinition",
"pyflakes/test/test_imports.py::Test::test_importedInClass",
"pyflakes/test/test_imports.py::Test::test_localImportStar",
"pyflakes/test/test_imports.py::Test::test_methodsDontUseClassScope",
"pyflakes/test/test_imports.py::Test::test_nestedClassAndFunctionScope",
"pyflakes/test/test_imports.py::Test::test_nestedFunctionsNestScope",
"pyflakes/test/test_imports.py::Test::test_newAssignment",
"pyflakes/test/test_imports.py::Test::test_nonGlobalDoesNotRedefine",
"pyflakes/test/test_imports.py::Test::test_notUsedInNestedScope",
"pyflakes/test/test_imports.py::Test::test_packageImport",
"pyflakes/test/test_imports.py::Test::test_redefinedButUsedLater",
"pyflakes/test/test_imports.py::Test::test_redefinedByClass",
"pyflakes/test/test_imports.py::Test::test_redefinedByExcept",
"pyflakes/test/test_imports.py::Test::test_redefinedByFor",
"pyflakes/test/test_imports.py::Test::test_redefinedByFunction",
"pyflakes/test/test_imports.py::Test::test_redefinedBySubclass",
"pyflakes/test/test_imports.py::Test::test_redefinedIf",
"pyflakes/test/test_imports.py::Test::test_redefinedIfElse",
"pyflakes/test/test_imports.py::Test::test_redefinedInClass",
"pyflakes/test/test_imports.py::Test::test_redefinedInNestedFunction",
"pyflakes/test/test_imports.py::Test::test_redefinedInNestedFunctionTwice",
"pyflakes/test/test_imports.py::Test::test_redefinedTry",
"pyflakes/test/test_imports.py::Test::test_redefinedTryElse",
"pyflakes/test/test_imports.py::Test::test_redefinedTryExcept",
"pyflakes/test/test_imports.py::Test::test_redefinedTryExceptElse",
"pyflakes/test/test_imports.py::Test::test_redefinedTryExceptElseFinally",
"pyflakes/test/test_imports.py::Test::test_redefinedTryExceptFinally",
"pyflakes/test/test_imports.py::Test::test_redefinedTryExceptMulti",
"pyflakes/test/test_imports.py::Test::test_redefinedTryNested",
"pyflakes/test/test_imports.py::Test::test_redefinedWhileUnused",
"pyflakes/test/test_imports.py::Test::test_shadowedByFor",
"pyflakes/test/test_imports.py::Test::test_shadowedByForDeep",
"pyflakes/test/test_imports.py::Test::test_shadowedByLambda",
"pyflakes/test/test_imports.py::Test::test_shadowedByParameter",
"pyflakes/test/test_imports.py::Test::test_tryingMultipleImports",
"pyflakes/test/test_imports.py::Test::test_unusedImport",
"pyflakes/test/test_imports.py::Test::test_unusedImport_relative",
"pyflakes/test/test_imports.py::Test::test_unusedInNestedScope",
"pyflakes/test/test_imports.py::Test::test_unusedPackageImport",
"pyflakes/test/test_imports.py::Test::test_unused_package_with_submodule_import",
"pyflakes/test/test_imports.py::Test::test_usedAndGlobal",
"pyflakes/test/test_imports.py::Test::test_usedImport",
"pyflakes/test/test_imports.py::Test::test_usedImport_relative",
"pyflakes/test/test_imports.py::Test::test_usedInAssert",
"pyflakes/test/test_imports.py::Test::test_usedInAssignment",
"pyflakes/test/test_imports.py::Test::test_usedInAttributeAssign",
"pyflakes/test/test_imports.py::Test::test_usedInCall",
"pyflakes/test/test_imports.py::Test::test_usedInClass",
"pyflakes/test/test_imports.py::Test::test_usedInClassBase",
"pyflakes/test/test_imports.py::Test::test_usedInDict",
"pyflakes/test/test_imports.py::Test::test_usedInElifConditional",
"pyflakes/test/test_imports.py::Test::test_usedInElse",
"pyflakes/test/test_imports.py::Test::test_usedInExcept",
"pyflakes/test/test_imports.py::Test::test_usedInExec",
"pyflakes/test/test_imports.py::Test::test_usedInFor",
"pyflakes/test/test_imports.py::Test::test_usedInForElse",
"pyflakes/test/test_imports.py::Test::test_usedInFunction",
"pyflakes/test/test_imports.py::Test::test_usedInGetattr",
"pyflakes/test/test_imports.py::Test::test_usedInGlobal",
"pyflakes/test/test_imports.py::Test::test_usedInIfBody",
"pyflakes/test/test_imports.py::Test::test_usedInIfConditional",
"pyflakes/test/test_imports.py::Test::test_usedInKeywordArg",
"pyflakes/test/test_imports.py::Test::test_usedInLambda",
"pyflakes/test/test_imports.py::Test::test_usedInList",
"pyflakes/test/test_imports.py::Test::test_usedInListComp",
"pyflakes/test/test_imports.py::Test::test_usedInLogic",
"pyflakes/test/test_imports.py::Test::test_usedInOperators",
"pyflakes/test/test_imports.py::Test::test_usedInParameterDefault",
"pyflakes/test/test_imports.py::Test::test_usedInRaise",
"pyflakes/test/test_imports.py::Test::test_usedInReturn",
"pyflakes/test/test_imports.py::Test::test_usedInSlice",
"pyflakes/test/test_imports.py::Test::test_usedInSliceObj",
"pyflakes/test/test_imports.py::Test::test_usedInSubscript",
"pyflakes/test/test_imports.py::Test::test_usedInTry",
"pyflakes/test/test_imports.py::Test::test_usedInTryFinally",
"pyflakes/test/test_imports.py::Test::test_usedInTuple",
"pyflakes/test/test_imports.py::Test::test_usedInWhile",
"pyflakes/test/test_imports.py::Test::test_usedInYield",
"pyflakes/test/test_imports.py::Test::test_used_package_with_submodule_import",
"pyflakes/test/test_imports.py::TestSpecialAll::test_augmentedAssignment",
"pyflakes/test/test_imports.py::TestSpecialAll::test_ignoredInClass",
"pyflakes/test/test_imports.py::TestSpecialAll::test_ignoredInFunction",
"pyflakes/test/test_imports.py::TestSpecialAll::test_importStarExported",
"pyflakes/test/test_imports.py::TestSpecialAll::test_importStarNotExported",
"pyflakes/test/test_imports.py::TestSpecialAll::test_redefinedByGenExp",
"pyflakes/test/test_imports.py::TestSpecialAll::test_unboundExported",
"pyflakes/test/test_imports.py::TestSpecialAll::test_unrecognizable",
"pyflakes/test/test_imports.py::TestSpecialAll::test_usedAsClassDecorator",
"pyflakes/test/test_imports.py::TestSpecialAll::test_usedAsDecorator",
"pyflakes/test/test_imports.py::TestSpecialAll::test_usedInGenExp",
"pyflakes/test/test_imports.py::TestSpecialAll::test_warningSuppressed",
"pyflakes/test/test_other.py::Test::test_attrAugmentedAssignment",
"pyflakes/test/test_other.py::Test::test_breakInsideLoop",
"pyflakes/test/test_other.py::Test::test_breakOutsideLoop",
"pyflakes/test/test_other.py::Test::test_classFunctionDecorator",
"pyflakes/test/test_other.py::Test::test_classNameDefinedPreviously",
"pyflakes/test/test_other.py::Test::test_classNameUndefinedInClassBody",
"pyflakes/test/test_other.py::Test::test_classRedefinedAsFunction",
"pyflakes/test/test_other.py::Test::test_classRedefinition",
"pyflakes/test/test_other.py::Test::test_classWithReturn",
"pyflakes/test/test_other.py::Test::test_classWithYield",
"pyflakes/test/test_other.py::Test::test_classWithYieldFrom",
"pyflakes/test/test_other.py::Test::test_comparison",
"pyflakes/test/test_other.py::Test::test_containment",
"pyflakes/test/test_other.py::Test::test_continueInFinally",
"pyflakes/test/test_other.py::Test::test_continueInsideLoop",
"pyflakes/test/test_other.py::Test::test_continueOutsideLoop",
"pyflakes/test/test_other.py::Test::test_defaultExceptLast",
"pyflakes/test/test_other.py::Test::test_defaultExceptNotLast",
"pyflakes/test/test_other.py::Test::test_doubleAssignmentConditionally",
"pyflakes/test/test_other.py::Test::test_doubleAssignmentWithUse",
"pyflakes/test/test_other.py::Test::test_duplicateArgs",
"pyflakes/test/test_other.py::Test::test_ellipsis",
"pyflakes/test/test_other.py::Test::test_extendedSlice",
"pyflakes/test/test_other.py::Test::test_functionDecorator",
"pyflakes/test/test_other.py::Test::test_functionRedefinedAsClass",
"pyflakes/test/test_other.py::Test::test_globalDeclaredInDifferentScope",
"pyflakes/test/test_other.py::Test::test_identity",
"pyflakes/test/test_other.py::Test::test_localReferencedBeforeAssignment",
"pyflakes/test/test_other.py::Test::test_loopControl",
"pyflakes/test/test_other.py::Test::test_modernProperty",
"pyflakes/test/test_other.py::Test::test_moduleWithReturn",
"pyflakes/test/test_other.py::Test::test_moduleWithYield",
"pyflakes/test/test_other.py::Test::test_moduleWithYieldFrom",
"pyflakes/test/test_other.py::Test::test_redefinedClassFunction",
"pyflakes/test/test_other.py::Test::test_redefinedFunction",
"pyflakes/test/test_other.py::Test::test_redefinedIfElseFunction",
"pyflakes/test/test_other.py::Test::test_redefinedIfElseInListComp",
"pyflakes/test/test_other.py::Test::test_redefinedIfFunction",
"pyflakes/test/test_other.py::Test::test_redefinedInDictComprehension",
"pyflakes/test/test_other.py::Test::test_redefinedInGenerator",
"pyflakes/test/test_other.py::Test::test_redefinedInSetComprehension",
"pyflakes/test/test_other.py::Test::test_redefinedTryExceptFunction",
"pyflakes/test/test_other.py::Test::test_redefinedTryFunction",
"pyflakes/test/test_other.py::Test::test_redefinedUnderscoreFunction",
"pyflakes/test/test_other.py::Test::test_redefinedUnderscoreImportation",
"pyflakes/test/test_other.py::Test::test_starredAssignmentErrors",
"pyflakes/test/test_other.py::Test::test_starredAssignmentNoError",
"pyflakes/test/test_other.py::Test::test_unaryPlus",
"pyflakes/test/test_other.py::Test::test_undefinedBaseClass",
"pyflakes/test/test_other.py::Test::test_varAugmentedAssignment",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_assert_static",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_assert_tuple",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_assert_tuple_empty",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_assert_with_message",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_assert_without_message",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_assignInForLoop",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_assignInListComprehension",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_assignToGlobal",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_assignToMember",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_assignToNonlocal",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_assignmentInsideLoop",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_augmentedAssignmentImportedFunctionCall",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_closedOver",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_dictComprehension",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_doubleClosedOver",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_exceptWithoutNameInFunction",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_exceptWithoutNameInFunctionTuple",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_exceptionUnusedInExcept",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_exceptionUnusedInExceptInFunction",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_exceptionUsedInExcept",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_f_string",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_generatorExpression",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_ifexp",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_listUnpacking",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_setComprehensionAndLiteral",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_tracebackhideSpecialVariable",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_tupleUnpacking",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_unusedVariable",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_unusedVariableAsLocals",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_unusedVariableNoLocals",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_variableUsedInLoop",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementAttributeName",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementComplicatedTarget",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementListNames",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementNameDefinedInBody",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementNoNames",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementSingleName",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementSingleNameRedefined",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementSingleNameUndefined",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementSubscript",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementSubscriptUndefined",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementTupleNames",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementTupleNamesRedefined",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementTupleNamesUndefined",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementUndefinedInExpression",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_withStatementUndefinedInside",
"pyflakes/test/test_other.py::TestUnusedAssignment::test_yieldFromUndefined",
"pyflakes/test/test_other.py::TestAsyncStatements::test_asyncDef",
"pyflakes/test/test_other.py::TestAsyncStatements::test_asyncDefAwait",
"pyflakes/test/test_other.py::TestAsyncStatements::test_asyncDefUndefined",
"pyflakes/test/test_other.py::TestAsyncStatements::test_asyncFor",
"pyflakes/test/test_other.py::TestAsyncStatements::test_asyncWith",
"pyflakes/test/test_other.py::TestAsyncStatements::test_asyncWithItem",
"pyflakes/test/test_other.py::TestAsyncStatements::test_continueInAsyncForFinally",
"pyflakes/test/test_other.py::TestAsyncStatements::test_formatstring",
"pyflakes/test/test_other.py::TestAsyncStatements::test_loopControlInAsyncFor",
"pyflakes/test/test_other.py::TestAsyncStatements::test_loopControlInAsyncForElse",
"pyflakes/test/test_other.py::TestAsyncStatements::test_matmul",
"pyflakes/test/test_other.py::TestAsyncStatements::test_variable_annotations",
"pyflakes/test/test_undefined_names.py::Test::test_annotationUndefined",
"pyflakes/test/test_undefined_names.py::Test::test_badNestedClass",
"pyflakes/test/test_undefined_names.py::Test::test_builtinWindowsError",
"pyflakes/test/test_undefined_names.py::Test::test_builtins",
"pyflakes/test/test_undefined_names.py::Test::test_definedAsStarArgs",
"pyflakes/test/test_undefined_names.py::Test::test_definedAsStarUnpack",
"pyflakes/test/test_undefined_names.py::Test::test_definedByGlobal",
"pyflakes/test/test_undefined_names.py::Test::test_definedByGlobalMultipleNames",
"pyflakes/test/test_undefined_names.py::Test::test_definedFromLambdaInDictionaryComprehension",
"pyflakes/test/test_undefined_names.py::Test::test_definedFromLambdaInGenerator",
"pyflakes/test/test_undefined_names.py::Test::test_definedInClass",
"pyflakes/test/test_undefined_names.py::Test::test_definedInClassNested",
"pyflakes/test/test_undefined_names.py::Test::test_definedInGenExp",
"pyflakes/test/test_undefined_names.py::Test::test_definedInListComp",
"pyflakes/test/test_undefined_names.py::Test::test_del",
"pyflakes/test/test_undefined_names.py::Test::test_delConditional",
"pyflakes/test/test_undefined_names.py::Test::test_delConditionalNested",
"pyflakes/test/test_undefined_names.py::Test::test_delExceptionInExcept",
"pyflakes/test/test_undefined_names.py::Test::test_delGlobal",
"pyflakes/test/test_undefined_names.py::Test::test_delUndefined",
"pyflakes/test/test_undefined_names.py::Test::test_delWhile",
"pyflakes/test/test_undefined_names.py::Test::test_delWhileNested",
"pyflakes/test/test_undefined_names.py::Test::test_delWhileTestUsage",
"pyflakes/test/test_undefined_names.py::Test::test_doubleNestingReportsClosestName",
"pyflakes/test/test_undefined_names.py::Test::test_functionsNeedGlobalScope",
"pyflakes/test/test_undefined_names.py::Test::test_globalFromNestedScope",
"pyflakes/test/test_undefined_names.py::Test::test_globalImportStar",
"pyflakes/test/test_undefined_names.py::Test::test_globalInGlobalScope",
"pyflakes/test/test_undefined_names.py::Test::test_global_reset_name_only",
"pyflakes/test/test_undefined_names.py::Test::test_intermediateClassScopeIgnored",
"pyflakes/test/test_undefined_names.py::Test::test_keywordOnlyArgs",
"pyflakes/test/test_undefined_names.py::Test::test_keywordOnlyArgsUndefined",
"pyflakes/test/test_undefined_names.py::Test::test_laterRedefinedGlobalFromNestedScope",
"pyflakes/test/test_undefined_names.py::Test::test_laterRedefinedGlobalFromNestedScope2",
"pyflakes/test/test_undefined_names.py::Test::test_laterRedefinedGlobalFromNestedScope3",
"pyflakes/test/test_undefined_names.py::Test::test_magicGlobalsBuiltins",
"pyflakes/test/test_undefined_names.py::Test::test_magicGlobalsFile",
"pyflakes/test/test_undefined_names.py::Test::test_magicGlobalsName",
"pyflakes/test/test_undefined_names.py::Test::test_magicGlobalsPath",
"pyflakes/test/test_undefined_names.py::Test::test_metaClassUndefined",
"pyflakes/test/test_undefined_names.py::Test::test_namesDeclaredInExceptBlocks",
"pyflakes/test/test_undefined_names.py::Test::test_nestedClass",
"pyflakes/test/test_undefined_names.py::Test::test_undefined",
"pyflakes/test/test_undefined_names.py::Test::test_undefinedAugmentedAssignment",
"pyflakes/test/test_undefined_names.py::Test::test_undefinedExceptionName",
"pyflakes/test/test_undefined_names.py::Test::test_undefinedExceptionNameObscuringGlobalVariableFalsePositive1",
"pyflakes/test/test_undefined_names.py::Test::test_undefinedExceptionNameObscuringGlobalVariableFalsePositive2",
"pyflakes/test/test_undefined_names.py::Test::test_undefinedExceptionNameObscuringLocalVariable2",
"pyflakes/test/test_undefined_names.py::Test::test_undefinedExceptionNameObscuringLocalVariableFalsePositive1",
"pyflakes/test/test_undefined_names.py::Test::test_undefinedExceptionNameObscuringLocalVariableFalsePositive2",
"pyflakes/test/test_undefined_names.py::Test::test_undefinedFromLambdaInComprehension",
"pyflakes/test/test_undefined_names.py::Test::test_undefinedFromLambdaInDictionaryComprehension",
"pyflakes/test/test_undefined_names.py::Test::test_undefinedInGenExpNested",
"pyflakes/test/test_undefined_names.py::Test::test_undefinedInListComp",
"pyflakes/test/test_undefined_names.py::Test::test_undefinedInLoop",
"pyflakes/test/test_undefined_names.py::Test::test_undefinedWithErrorHandler",
"pyflakes/test/test_undefined_names.py::Test::test_unusedAsStarUnpack",
"pyflakes/test/test_undefined_names.py::Test::test_usedAsStarUnpack",
"pyflakes/test/test_undefined_names.py::NameTests::test_impossibleContext"
]
| []
| MIT License | 2,012 | [
"README.rst",
"pyflakes/messages.py",
"setup.py",
".travis.yml",
"tox.ini",
"pyflakes/api.py",
"pyflakes/__main__.py",
"pyflakes/checker.py",
".appveyor.yml"
]
| [
"README.rst",
"pyflakes/messages.py",
"setup.py",
".travis.yml",
"tox.ini",
"pyflakes/api.py",
"pyflakes/__main__.py",
"pyflakes/checker.py",
".appveyor.yml"
]
|
pyrates__roll-45 | 9309c131e681926e83d454c0087bbffcf4669d77 | 2018-01-05 10:58:25 | 9309c131e681926e83d454c0087bbffcf4669d77 | diff --git a/docs/changelog.md b/docs/changelog.md
index a88f171..856e2c0 100644
--- a/docs/changelog.md
+++ b/docs/changelog.md
@@ -10,6 +10,12 @@ A changelog:
## In progress
+* **Breaking changes**:
+ * `Request.route` is now always set, but `Request.route.payload` is `None`
+ when path is not found. This allows to catch a not found request in the
+ `request` hook. Note: if the `request` hook does not return a response,
+ a 404 is still automatically raised.
+ ([#45](https://github.com/pyrates/roll/pull/45))
* Added `request.host` shortcut ([#43](https://github.com/pyrates/roll/pull/43))
diff --git a/docs/reference.md b/docs/reference.md
index cf743ce..b253f06 100644
--- a/docs/reference.md
+++ b/docs/reference.md
@@ -191,7 +191,8 @@ please refers to that documentation for available patterns.
A namedtuple to collect matched route data with attributes:
* **payload** (`dict`): the data received by the `@app.route` decorator,
- contains all handlers plus optionnal custom data.
+ contains all handlers plus optionnal custom data. Value is `None` when request
+ path is not found.
* **vars** (`dict`): URL placeholders resolved for the current route.
diff --git a/roll/__init__.py b/roll/__init__.py
index 3ec2b20..1ff018f 100644
--- a/roll/__init__.py
+++ b/roll/__init__.py
@@ -15,7 +15,7 @@ from io import BytesIO
from typing import TypeVar
from urllib.parse import parse_qs, unquote
-from autoroutes import Routes as BaseRoutes
+from autoroutes import Routes
from biscuits import Cookie, parse
from httptools import HttpParserError, HttpRequestParser, parse_url
from multifruits import Parser, extract_filename, parse_content_disposition
@@ -381,16 +381,6 @@ class Protocol(asyncio.Protocol):
self.writer.close()
-class Routes(BaseRoutes):
- """Customized to raise our own `HttpError` in case of 404."""
-
- def match(self, url: str):
- payload, params = super().match(url)
- if not payload:
- raise HttpError(HTTPStatus.NOT_FOUND, url)
- return payload, params
-
-
Route = namedtuple('Route', ['payload', 'vars'])
@@ -423,6 +413,8 @@ class Roll:
try:
request.route = Route(*self.routes.match(request.path))
if not await self.hook('request', request, response):
+ if not request.route.payload:
+ raise HttpError(HTTPStatus.NOT_FOUND, request.path)
# Uppercased in order to only consider HTTP verbs.
if request.method.upper() not in request.route.payload:
raise HttpError(HTTPStatus.METHOD_NOT_ALLOWED)
| 404 missing from logger
I think it's from https://github.com/pyrates/roll/commit/7655d0ec3fc4009d6f150d8dda4715388abe6c11
Not sure why we moved the request hook after the dispatching.
Was it to have `request.route` set before?
| pyrates/roll | diff --git a/tests/test_hooks.py b/tests/test_hooks.py
index a602f20..81b5172 100644
--- a/tests/test_hooks.py
+++ b/tests/test_hooks.py
@@ -67,3 +67,17 @@ async def test_third_parties_can_call_hook_their_way(client, app):
assert await app.hook('custom', myarg='kwarg') == 'kwarg'
assert await app.hook('custom', 'arg') == 'arg'
+
+
+async def test_request_hook_is_called_even_if_path_is_not_found(client, app):
+
+ @app.listen('request')
+ async def listener(request, response):
+ if not request.route.payload:
+ response.status = 400
+ response.body = b'Really this is a bad request'
+ return True # Shortcuts the response process.
+
+ resp = await client.get('/not-found')
+ assert resp.status == HTTPStatus.BAD_REQUEST
+ assert resp.body == b'Really this is a bad request'
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 3
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
autoroutes==0.2.0
biscuits==0.1.1
certifi==2021.5.30
httptools==0.0.10
importlib-metadata==4.8.3
iniconfig==1.1.1
multifruits==0.1.0
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-asyncio==0.16.0
-e git+https://github.com/pyrates/roll.git@9309c131e681926e83d454c0087bbffcf4669d77#egg=roll
tomli==1.2.3
typing_extensions==4.1.1
uvloop==0.9.1
zipp==3.6.0
| name: roll
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- autoroutes==0.2.0
- biscuits==0.1.1
- httptools==0.0.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- multifruits==0.1.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-asyncio==0.16.0
- tomli==1.2.3
- typing-extensions==4.1.1
- uvloop==0.9.1
- zipp==3.6.0
prefix: /opt/conda/envs/roll
| [
"tests/test_hooks.py::test_request_hook_is_called_even_if_path_is_not_found"
]
| []
| [
"tests/test_hooks.py::test_request_hook_can_alter_response",
"tests/test_hooks.py::test_response_hook_can_alter_response",
"tests/test_hooks.py::test_error_with_json_format",
"tests/test_hooks.py::test_third_parties_can_call_hook_their_way"
]
| []
| null | 2,013 | [
"docs/changelog.md",
"docs/reference.md",
"roll/__init__.py"
]
| [
"docs/changelog.md",
"docs/reference.md",
"roll/__init__.py"
]
|
|
tox-dev__tox-726 | 8a8d1a71c7957582c5f4ed4c4b367b7a76c7cf92 | 2018-01-05 12:52:40 | 36ff71d18d10e3c0d4275179d8912abc385b20f0 | matthiasha: This will make Devpi report those tests as "passed", right?
r2dan: @matthiasha: Yes, setup of those test environments will be "passed" on devpi. | diff --git a/.gitignore b/.gitignore
index f232c593..517aca48 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,6 +15,7 @@ doc/_build/
tox.egg-info
.tox
.cache
+.python-version
coverage.xml
htmlcov
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index f51af1fa..1217c0b5 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -101,7 +101,7 @@ Short version
The test environments above are usually enough to cover most cases locally.
#. Consider the
- `checklist <https://github.com/Avira/tox/blob/master/.github/PULL_REQUEST_TEMPLATE.md>`_
+ `checklist <https://github.com/tox-dev/tox/blob/master/.github/PULL_REQUEST_TEMPLATE.md>`_
in the pull request form
Long version
@@ -178,7 +178,7 @@ Here is a simple overview, with tox-specific bits:
$ git push -u
-#. submit a pull request through the GitHub website and and consider the `checklist <https://github.com/Avira/tox/blob/master/.github/PULL_REQUEST_TEMPLATE.md>`_ in the pull request form::
+#. submit a pull request through the GitHub website and and consider the `checklist <https://github.com/tox-dev/tox/blob/master/.github/PULL_REQUEST_TEMPLATE.md>`_ in the pull request form::
head-fork: YOUR_GITHUB_USERNAME/tox
compare: your-branch-name
diff --git a/CONTRIBUTORS b/CONTRIBUTORS
index edb19b1b..36e57397 100644
--- a/CONTRIBUTORS
+++ b/CONTRIBUTORS
@@ -5,6 +5,7 @@ Alexandre Conrad
Allan Feldman
Andrii Soldatenko
Anthon van der Neuth
+Anthony Sottile
Asmund Grammeltwedt
Barry Warsaw
Bartolome Sanchez Salado
@@ -18,12 +19,14 @@ Cyril Roelandt
Eli Collins
Eugene Yunak
Fernando L. Pereira
+Florian Schulze
Hazal Ozturk
Henk-Jaap Wagenaar
Ian Stapleton Cordasco
Igor Duarte Cardoso
Ionel Maries Cristian
Itxaka Serrano
+Jake Windle
Jannis Leidel
Johannes Christ
Josh Smeaton
@@ -36,6 +39,7 @@ Lukasz Rogalski
Manuel Jacob
Marc Abramowitz
Marc Schlaich
+Mariusz Rusiniak
Mark Hirota
Matt Good
Matt Jeffery
diff --git a/changelog/426.bugfix.rst b/changelog/426.bugfix.rst
new file mode 100644
index 00000000..caf55dd2
--- /dev/null
+++ b/changelog/426.bugfix.rst
@@ -0,0 +1,1 @@
+Write directly to stdout buffer if possible to prevent str vs bytes issues - by @asottile
diff --git a/changelog/672.bugfix.rst b/changelog/672.bugfix.rst
new file mode 100644
index 00000000..742a1ce7
--- /dev/null
+++ b/changelog/672.bugfix.rst
@@ -0,0 +1,1 @@
+fix #672 reporting to json file when skip-missing-interpreters option is used - by @r2dan
diff --git a/changelog/723.bugfix.rst b/changelog/723.bugfix.rst
index 8ec40c8e..b6757a19 100644
--- a/changelog/723.bugfix.rst
+++ b/changelog/723.bugfix.rst
@@ -1,5 +1,5 @@
Fixed an issue where invocation of Tox from the Python package, where
-invocation errors (failed actions) occur results in a change in the
+invocation errors (failed actions) occur results in a change in the
sys.stdout stream encoding in Python 3.x.
New behaviour is that sys.stdout is reset back to its original encoding
after invocation errors - by @tonybaloney
diff --git a/changelog/727.bugfix.rst b/changelog/727.bugfix.rst
new file mode 100644
index 00000000..f8db4df0
--- /dev/null
+++ b/changelog/727.bugfix.rst
@@ -0,0 +1,1 @@
+The reading of command output sometimes failed with ``IOError: [Errno 0] Error`` on Windows, this was fixed by using a simpler method to update the read buffers. - by @fschulze
diff --git a/doc/example/platform.rst b/doc/example/platform.rst
index 5661fc65..647dc3b3 100644
--- a/doc/example/platform.rst
+++ b/doc/example/platform.rst
@@ -26,7 +26,7 @@ and the following ``tox.ini`` content:
mymacos: darwin
mywindows: win32
- # you can specify dependencies and their versions based on platform filtered envirements
+ # you can specify dependencies and their versions based on platform filtered environments
deps = mylinux,mymacos: py==1.4.32
mywindows: py==1.4.30
diff --git a/setup.py b/setup.py
index 8b5a63f7..4d91d9f6 100644
--- a/setup.py
+++ b/setup.py
@@ -59,7 +59,7 @@ def main():
'towncrier >= 17.8.0'],
'lint': ['flake8 == 3.4.1',
'flake8-bugbear == 17.4.0',
- 'pre-commit == 1.4.3'],
+ 'pre-commit == 1.4.4'],
'publish': ['devpi',
'twine']},
classifiers=['Development Status :: 5 - Production/Stable',
diff --git a/tox/session.py b/tox/session.py
index 27547872..f532f212 100644
--- a/tox/session.py
+++ b/tox/session.py
@@ -135,7 +135,7 @@ class Action(object):
fout.write("actionid: %s\nmsg: %s\ncmdargs: %r\n\n" % (self.id, self.msg, args))
fout.flush()
outpath = py.path.local(fout.name)
- fin = outpath.open()
+ fin = outpath.open('rb')
fin.read() # read the header, so it won't be written to stdout
stdout = fout
elif returnout:
@@ -159,21 +159,25 @@ class Action(object):
self.report.logpopen(popen, env=env)
try:
if resultjson and not redirect:
- assert popen.stderr is None # prevent deadlock
+ if popen.stderr is not None:
+ # prevent deadlock
+ raise ValueError("stderr must not be piped here")
+ # we read binary from the process and must write using a
+ # binary stream
+ buf = getattr(sys.stdout, 'buffer', sys.stdout)
out = None
last_time = time.time()
while 1:
- fin_pos = fin.tell()
# we have to read one byte at a time, otherwise there
# might be no output for a long time with slow tests
data = fin.read(1)
if data:
- sys.stdout.write(data)
- if '\n' in data or (time.time() - last_time) > 1:
+ buf.write(data)
+ if b'\n' in data or (time.time() - last_time) > 1:
# we flush on newlines or after 1 second to
# provide quick enough feedback to the user
# when printing a dot per test
- sys.stdout.flush()
+ buf.flush()
last_time = time.time()
elif popen.poll() is not None:
if popen.stdout is not None:
@@ -181,7 +185,8 @@ class Action(object):
break
else:
time.sleep(0.1)
- fin.seek(fin_pos)
+ # the seek updates internal read buffers
+ fin.seek(0, 1)
fin.close()
else:
out, err = popen.communicate()
@@ -478,6 +483,7 @@ class Session:
action = self.newaction(venv, "getenv", venv.envconfig.envdir)
with action:
venv.status = 0
+ default_ret_code = 1
envlog = self.resultlog.get_envlog(venv.name)
try:
status = venv.update(action=action)
@@ -492,11 +498,18 @@ class Session:
"Error creating virtualenv. Note that some special "
"characters (e.g. ':' and unicode symbols) in paths are "
"not supported by virtualenv. Error details: %r" % e)
+ except tox.exception.InterpreterNotFound as e:
+ status = e
+ if self.config.option.skip_missing_interpreters:
+ default_ret_code = 0
if status:
commandlog = envlog.get_commandlog("setup")
- commandlog.add_command(["setup virtualenv"], str(status), 1)
+ commandlog.add_command(["setup virtualenv"], str(status), default_ret_code)
venv.status = status
- self.report.error(str(status))
+ if default_ret_code == 0:
+ self.report.skip(str(status))
+ else:
+ self.report.error(str(status))
return False
commandpath = venv.getcommandpath("python")
envlog.set_python_info(commandpath)
diff --git a/tox/venv.py b/tox/venv.py
index 3b3ecb32..69571637 100755
--- a/tox/venv.py
+++ b/tox/venv.py
@@ -149,9 +149,6 @@ class VirtualEnv(object):
return True
return False
- def _ispython3(self):
- return "python3" in str(self.envconfig.basepython)
-
def update(self, action):
""" return status string for updating actual venv to match configuration.
if status string is empty, all is ok.
@@ -170,8 +167,6 @@ class VirtualEnv(object):
self.just_created = True
except tox.exception.UnsupportedInterpreter:
return sys.exc_info()[1]
- except tox.exception.InterpreterNotFound:
- return sys.exc_info()[1]
try:
self.hook.tox_testenv_install_deps(action=action, venv=self)
except tox.exception.InvocationError:
| --result-json does not respect --skip-missing-interpreters-option
When executing tox with --result-json and --skip-missing-interpreters-option, the latter seems not to be reflected in the generated json file. Please find the sample project attached below:
[missing_interpreters.tar.gz](https://github.com/tox-dev/tox/files/1474831/missing_interpreters.tar.gz)
To reproduce the issue simply invoke:
`tox --skip-missing-interpreters --result-json toxreport.json`
Tox main process exits with return code 0, but in toxreport.json file user can see the following:
```
"python": {
"setup": [
{
"command": [
"setup virtualenv"
],
"output": "InterpreterNotFound: xyz_unknown_interpreter",
"retcode": "1"
}
]
}
```
In my opinion retcode here should be changed to 0 if --skip-missing-interpreters option is explicitly used. | tox-dev/tox | diff --git a/tests/test_venv.py b/tests/test_venv.py
index c2ca3e46..073c96c3 100644
--- a/tests/test_venv.py
+++ b/tests/test_venv.py
@@ -385,11 +385,11 @@ def test_install_command_not_installed_bash(newmocksession):
def test_install_python3(tmpdir, newmocksession):
- if not py.path.local.sysfind('python3.5'):
- pytest.skip("needs python3.5")
+ if not py.path.local.sysfind('python3'):
+ pytest.skip("needs python3")
mocksession = newmocksession([], """
[testenv:py123]
- basepython=python3.5
+ basepython=python3
deps=
dep1
dep2
diff --git a/tests/test_z_cmdline.py b/tests/test_z_cmdline.py
index 970a7b64..6ef66d8f 100644
--- a/tests/test_z_cmdline.py
+++ b/tests/test_z_cmdline.py
@@ -312,6 +312,27 @@ def test_skip_unknown_interpreter(cmd, initproj):
assert any(msg == l for l in result.outlines)
+def test_skip_unknown_interpreter_result_json(cmd, initproj, tmpdir):
+ report_path = tmpdir.join("toxresult.json")
+ initproj("interp123-0.5", filedefs={
+ 'tests': {'test_hello.py': "def test_hello(): pass"},
+ 'tox.ini': '''
+ [testenv:python]
+ basepython=xyz_unknown_interpreter
+ [testenv]
+ changedir=tests
+ '''
+ })
+ result = cmd("--skip-missing-interpreters", "--result-json", report_path)
+ assert not result.ret
+ msg = 'SKIPPED: python: InterpreterNotFound: xyz_unknown_interpreter'
+ assert any(msg == l for l in result.outlines)
+ setup_result_from_json = json.load(report_path)["testenvs"]["python"]["setup"]
+ for setup_step in setup_result_from_json:
+ assert "InterpreterNotFound" in setup_step["output"]
+ assert setup_step["retcode"] == "0"
+
+
def test_unknown_dep(cmd, initproj):
initproj("dep123-0.7", filedefs={
'tests': {'test_hello.py': "def test_hello(): pass"},
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 8
} | 2.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[testing,docs,lint]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-timeout",
"pytest-xdist"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
aspy.yaml==1.3.0
attrs==22.2.0
Babel==2.11.0
cached-property==1.5.2
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
click-default-group==1.2.4
coverage==6.2
distlib==0.3.9
docutils==0.17.1
execnet==1.9.0
filelock==3.4.1
flake8==3.4.1
flake8-bugbear==17.4.0
identify==2.4.4
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
incremental==22.10.0
iniconfig==1.1.1
Jinja2==3.0.3
MarkupSafe==2.0.1
mccabe==0.6.1
nodeenv==1.6.0
packaging==21.3
platformdirs==2.4.0
pluggy==0.13.1
pre-commit==1.4.3
py==1.11.0
pycodestyle==2.3.1
pyflakes==1.5.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-timeout==2.1.0
pytest-xdist==3.0.2
pytz==2025.2
PyYAML==6.0.1
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==1.8.6
sphinxcontrib-serializinghtml==1.1.5
sphinxcontrib-websupport==1.2.4
tomli==1.2.3
towncrier==21.9.0
-e git+https://github.com/tox-dev/tox.git@8a8d1a71c7957582c5f4ed4c4b367b7a76c7cf92#egg=tox
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.17.1
zipp==3.6.0
| name: tox
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- aspy-yaml==1.3.0
- attrs==22.2.0
- babel==2.11.0
- cached-property==1.5.2
- charset-normalizer==2.0.12
- click==8.0.4
- click-default-group==1.2.4
- coverage==6.2
- distlib==0.3.9
- docutils==0.17.1
- execnet==1.9.0
- filelock==3.4.1
- flake8==3.4.1
- flake8-bugbear==17.4.0
- identify==2.4.4
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- incremental==22.10.0
- iniconfig==1.1.1
- jinja2==3.0.3
- markupsafe==2.0.1
- mccabe==0.6.1
- nodeenv==1.6.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==0.13.1
- pre-commit==1.4.3
- py==1.11.0
- pycodestyle==2.3.1
- pyflakes==1.5.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-timeout==2.1.0
- pytest-xdist==3.0.2
- pytz==2025.2
- pyyaml==6.0.1
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==1.8.6
- sphinxcontrib-serializinghtml==1.1.5
- sphinxcontrib-websupport==1.2.4
- tomli==1.2.3
- towncrier==21.9.0
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.17.1
- zipp==3.6.0
prefix: /opt/conda/envs/tox
| [
"tests/test_z_cmdline.py::test_skip_unknown_interpreter_result_json"
]
| [
"tests/test_venv.py::test_create",
"tests/test_venv.py::test_install_deps_wildcard",
"tests/test_venv.py::test_install_deps_indexserver",
"tests/test_venv.py::test_install_deps_pre",
"tests/test_venv.py::test_installpkg_indexserver",
"tests/test_venv.py::test_install_recreate",
"tests/test_venv.py::test_install_sdist_extras",
"tests/test_venv.py::test_develop_extras",
"tests/test_venv.py::test_install_python3",
"tests/test_venv.py::TestCreationConfig::test_python_recreation",
"tests/test_venv.py::TestVenvTest::test_pythonpath_usage",
"tests/test_venv.py::test_env_variables_added_to_pcall",
"tests/test_venv.py::test_installpkg_no_upgrade",
"tests/test_venv.py::test_installpkg_upgrade",
"tests/test_venv.py::test_run_install_command",
"tests/test_venv.py::test_run_custom_install_command",
"tests/test_z_cmdline.py::test_report_protocol",
"tests/test_z_cmdline.py::test__resolve_pkg",
"tests/test_z_cmdline.py::test_minversion",
"tests/test_z_cmdline.py::test_notoxini_help_still_works",
"tests/test_z_cmdline.py::test_envdir_equals_toxini_errors_out",
"tests/test_z_cmdline.py::test_venv_special_chars_issue252",
"tests/test_z_cmdline.py::test_unknown_environment"
]
| [
"tests/test_venv.py::test_getdigest",
"tests/test_venv.py::test_getsupportedinterpreter",
"tests/test_venv.py::test_commandpath_venv_precedence",
"tests/test_venv.py::test_create_sitepackages",
"tests/test_venv.py::test_env_variables_added_to_needs_reinstall",
"tests/test_venv.py::test_test_hashseed_is_in_output",
"tests/test_venv.py::test_test_runtests_action_command_is_in_output",
"tests/test_venv.py::test_install_error",
"tests/test_venv.py::test_install_command_not_installed",
"tests/test_venv.py::test_install_command_whitelisted",
"tests/test_venv.py::test_install_command_not_installed_bash",
"tests/test_venv.py::TestCreationConfig::test_basic",
"tests/test_venv.py::TestCreationConfig::test_matchingdependencies",
"tests/test_venv.py::TestCreationConfig::test_matchingdependencies_file",
"tests/test_venv.py::TestCreationConfig::test_matchingdependencies_latest",
"tests/test_venv.py::TestCreationConfig::test_dep_recreation",
"tests/test_venv.py::TestCreationConfig::test_develop_recreation",
"tests/test_venv.py::TestVenvTest::test_envbindir_path",
"tests/test_venv.py::test_command_relative_issue36",
"tests/test_venv.py::test_ignore_outcome_failing_cmd",
"tests/test_venv.py::test_tox_testenv_create",
"tests/test_venv.py::test_tox_testenv_pre_post",
"tests/test_z_cmdline.py::test__resolve_pkg_doubledash",
"tests/test_z_cmdline.py::TestSession::test_make_sdist",
"tests/test_z_cmdline.py::TestSession::test_make_sdist_distshare",
"tests/test_z_cmdline.py::TestSession::test_log_pcall",
"tests/test_z_cmdline.py::TestSession::test_summary_status",
"tests/test_z_cmdline.py::TestSession::test_getvenv",
"tests/test_z_cmdline.py::test_notoxini_help_ini_still_works",
"tests/test_z_cmdline.py::test_run_custom_install_command_error",
"tests/test_z_cmdline.py::test_unknown_interpreter_and_env",
"tests/test_z_cmdline.py::test_unknown_interpreter",
"tests/test_z_cmdline.py::test_skip_platform_mismatch",
"tests/test_z_cmdline.py::test_skip_unknown_interpreter",
"tests/test_z_cmdline.py::test_unknown_dep",
"tests/test_z_cmdline.py::test_skip_sdist",
"tests/test_z_cmdline.py::test_minimal_setup_py_empty",
"tests/test_z_cmdline.py::test_minimal_setup_py_comment_only",
"tests/test_z_cmdline.py::test_minimal_setup_py_non_functional",
"tests/test_z_cmdline.py::test_sdist_fails",
"tests/test_z_cmdline.py::test_no_setup_py_exits",
"tests/test_z_cmdline.py::test_package_install_fails",
"tests/test_z_cmdline.py::test_toxuone_env",
"tests/test_z_cmdline.py::test_different_config_cwd",
"tests/test_z_cmdline.py::test_json",
"tests/test_z_cmdline.py::test_developz",
"tests/test_z_cmdline.py::test_usedevelop",
"tests/test_z_cmdline.py::test_usedevelop_mixed",
"tests/test_z_cmdline.py::test_test_usedevelop[.]",
"tests/test_z_cmdline.py::test_test_usedevelop[src]",
"tests/test_z_cmdline.py::test_alwayscopy",
"tests/test_z_cmdline.py::test_alwayscopy_default",
"tests/test_z_cmdline.py::test_empty_activity_ignored",
"tests/test_z_cmdline.py::test_empty_activity_shown_verbose",
"tests/test_z_cmdline.py::test_test_piphelp",
"tests/test_z_cmdline.py::test_notest",
"tests/test_z_cmdline.py::test_PYC",
"tests/test_z_cmdline.py::test_env_VIRTUALENV_PYTHON",
"tests/test_z_cmdline.py::test_sdistonly",
"tests/test_z_cmdline.py::test_separate_sdist_no_sdistfile",
"tests/test_z_cmdline.py::test_separate_sdist",
"tests/test_z_cmdline.py::test_sdist_latest",
"tests/test_z_cmdline.py::test_installpkg",
"tests/test_z_cmdline.py::test_envsitepackagesdir",
"tests/test_z_cmdline.py::test_envsitepackagesdir_skip_missing_issue280",
"tests/test_z_cmdline.py::test_verbosity[]",
"tests/test_z_cmdline.py::test_verbosity[-v]",
"tests/test_z_cmdline.py::test_verbosity[-vv]",
"tests/test_z_cmdline.py::test_envtmpdir",
"tests/test_z_cmdline.py::test_missing_env_fails",
"tests/test_z_cmdline.py::test_tox_console_script",
"tests/test_z_cmdline.py::test_tox_quickstart_script",
"tests/test_z_cmdline.py::test_tox_cmdline"
]
| []
| MIT License | 2,014 | [
"changelog/727.bugfix.rst",
"tox/session.py",
"changelog/723.bugfix.rst",
"tox/venv.py",
"setup.py",
"CONTRIBUTORS",
"CONTRIBUTING.rst",
"changelog/426.bugfix.rst",
".gitignore",
"doc/example/platform.rst",
"changelog/672.bugfix.rst"
]
| [
"changelog/727.bugfix.rst",
"tox/session.py",
"changelog/723.bugfix.rst",
"tox/venv.py",
"setup.py",
"CONTRIBUTORS",
"CONTRIBUTING.rst",
"changelog/426.bugfix.rst",
".gitignore",
"doc/example/platform.rst",
"changelog/672.bugfix.rst"
]
|
joke2k__faker-671 | fc7bbc43efc9adf923b58b43db6c979a3c17ac86 | 2018-01-05 18:03:46 | 29dff0a0f2a31edac21a18cfa50b5bc9206304b2 | diff --git a/faker/providers/date_time/__init__.py b/faker/providers/date_time/__init__.py
index 8d1d8ff6..5df3c699 100644
--- a/faker/providers/date_time/__init__.py
+++ b/faker/providers/date_time/__init__.py
@@ -1342,7 +1342,10 @@ class Provider(BaseProvider):
"""
start_date = self._parse_date_time(start_date, tzinfo=tzinfo)
end_date = self._parse_date_time(end_date, tzinfo=tzinfo)
- ts = self.generator.random.randint(start_date, end_date)
+ if end_date - start_date <= 1:
+ ts = start_date + self.generator.random.random()
+ else:
+ ts = self.generator.random.randint(start_date, end_date)
return datetime(1970, 1, 1, tzinfo=tzinfo) + timedelta(seconds=ts)
def date_between(self, start_date='-30y', end_date='today'):
| past_datetime() crashes when start_date is within one second of now
I ran into a problem while generating a bunch of fake dates where I was calling `fake.past_datetime()` with the start date using the result of a previous `fake.past_datetime()` call (simulating a bunch of files being created as part of a collection). The first call could randomly return a value which is within one second of the current time, causing the second call to trigger an exception from `random.randint()` because the `a` and `b` values were the same.
This was pretty easy to solve simply by using `fake.date_time_between(start_date=previous_created_date)` but it might be useful if `past_datetime()` was safe to call like this. | joke2k/faker | diff --git a/tests/providers/test_date_time.py b/tests/providers/test_date_time.py
index f34250e6..1c81f7c3 100644
--- a/tests/providers/test_date_time.py
+++ b/tests/providers/test_date_time.py
@@ -191,6 +191,10 @@ class TestDateTime(unittest.TestCase):
self.assertTrue(datetime_start <= random_date)
self.assertTrue(datetime_end >= random_date)
+ def test_past_datetime_within_second(self):
+ # Should not raise a ``ValueError``
+ self.factory.past_datetime(start_date='+1s')
+
def test_date_between_dates(self):
date_end = date.today()
date_start = date_end - timedelta(days=10)
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"tests/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
dnspython==2.2.1
email-validator==1.0.3
-e git+https://github.com/joke2k/faker.git@fc7bbc43efc9adf923b58b43db6c979a3c17ac86#egg=Faker
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
mock==2.0.0
packaging==21.3
pbr==6.1.1
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
six==1.17.0
text-unidecode==1.3
tomli==1.2.3
typing_extensions==4.1.1
UkPostcodeParser==1.1.2
zipp==3.6.0
| name: faker
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- dnspython==2.2.1
- email-validator==1.0.3
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- mock==2.0.0
- packaging==21.3
- pbr==6.1.1
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- six==1.17.0
- text-unidecode==1.3
- tomli==1.2.3
- typing-extensions==4.1.1
- ukpostcodeparser==1.1.2
- zipp==3.6.0
prefix: /opt/conda/envs/faker
| [
"tests/providers/test_date_time.py::TestDateTime::test_past_datetime_within_second"
]
| []
| [
"tests/providers/test_date_time.py::TestKoKR::test_day",
"tests/providers/test_date_time.py::TestKoKR::test_month",
"tests/providers/test_date_time.py::TestDateTime::test_date_between",
"tests/providers/test_date_time.py::TestDateTime::test_date_between_dates",
"tests/providers/test_date_time.py::TestDateTime::test_date_object",
"tests/providers/test_date_time.py::TestDateTime::test_date_this_period",
"tests/providers/test_date_time.py::TestDateTime::test_date_time_between",
"tests/providers/test_date_time.py::TestDateTime::test_date_time_between_dates",
"tests/providers/test_date_time.py::TestDateTime::test_date_time_between_dates_with_tzinfo",
"tests/providers/test_date_time.py::TestDateTime::test_date_time_this_period",
"tests/providers/test_date_time.py::TestDateTime::test_date_time_this_period_with_tzinfo",
"tests/providers/test_date_time.py::TestDateTime::test_datetime_safe",
"tests/providers/test_date_time.py::TestDateTime::test_datetime_safe_new_date",
"tests/providers/test_date_time.py::TestDateTime::test_datetimes_with_and_without_tzinfo",
"tests/providers/test_date_time.py::TestDateTime::test_day",
"tests/providers/test_date_time.py::TestDateTime::test_future_date",
"tests/providers/test_date_time.py::TestDateTime::test_future_datetime",
"tests/providers/test_date_time.py::TestDateTime::test_month",
"tests/providers/test_date_time.py::TestDateTime::test_parse_date",
"tests/providers/test_date_time.py::TestDateTime::test_parse_date_time",
"tests/providers/test_date_time.py::TestDateTime::test_parse_timedelta",
"tests/providers/test_date_time.py::TestDateTime::test_past_date",
"tests/providers/test_date_time.py::TestDateTime::test_past_datetime",
"tests/providers/test_date_time.py::TestDateTime::test_time_object",
"tests/providers/test_date_time.py::TestDateTime::test_time_series",
"tests/providers/test_date_time.py::TestDateTime::test_timezone_conversion",
"tests/providers/test_date_time.py::TestPlPL::test_day",
"tests/providers/test_date_time.py::TestPlPL::test_month",
"tests/providers/test_date_time.py::TestAr::test_ar_aa",
"tests/providers/test_date_time.py::TestAr::test_ar_eg"
]
| []
| MIT License | 2,015 | [
"faker/providers/date_time/__init__.py"
]
| [
"faker/providers/date_time/__init__.py"
]
|
|
nteract__papermill-81 | f3eb72e0db1cdf4ddf2d99ddfa38615b30c97e7e | 2018-01-05 18:45:52 | 575acfaf5e36c415690be31cfbd02228cde59c17 | rgbkrk: We'll want to get a patch release out for this pretty quick, records are generally broken right now.
rgbkrk: Weird that the push is failing, though it looks like we should be mocking the kernelspec lookup.
rgbkrk: I've re-kicked the one failing job since everything else passes.
codecov[bot]: # [Codecov](https://codecov.io/gh/nteract/papermill/pull/81?src=pr&el=h1) Report
> Merging [#81](https://codecov.io/gh/nteract/papermill/pull/81?src=pr&el=desc) into [master](https://codecov.io/gh/nteract/papermill/commit/f3eb72e0db1cdf4ddf2d99ddfa38615b30c97e7e?src=pr&el=desc) will **not change** coverage.
> The diff coverage is `16.66%`.
```diff
@@ Coverage Diff @@
## master #81 +/- ##
=======================================
Coverage 54.97% 54.97%
=======================================
Files 8 8
Lines 864 864
=======================================
Hits 475 475
Misses 389 389
```
rgbkrk: Added a basic test that really just verifies that the papermill record payload is passed through correctly.
rgbkrk: /cc @willingc | diff --git a/papermill/api.py b/papermill/api.py
index 15b2745..8f68c88 100644
--- a/papermill/api.py
+++ b/papermill/api.py
@@ -32,7 +32,7 @@ def record(name, value):
"""
# IPython.display.display takes a tuple of objects as first parameter
# `http://ipython.readthedocs.io/en/stable/api/generated/IPython.display.html#IPython.display.display`
- ip_display(({RECORD_OUTPUT_TYPE: {name: value}},), raw=True)
+ ip_display(data={RECORD_OUTPUT_TYPE: {name: value}}, raw=True)
def display(name, obj):
@@ -46,7 +46,7 @@ def display(name, obj):
"""
data, metadata = IPython.core.formatters.format_display_data(obj)
metadata['papermill'] = dict(name=name)
- ip_display(data, metadata=metadata, raw=True)
+ ip_display(data=data, metadata=metadata, raw=True)
def read_notebook(path):
@@ -171,7 +171,7 @@ class Notebook(object):
raise PapermillException(
"Output Name '%s' is not available in this notebook.")
output = outputs[name]
- ip_display(output.data, metadata=output.metadata, raw=True)
+ ip_display(data=output.data, metadata=output.metadata, raw=True)
def _get_papermill_metadata(nb, name, default=None):
@@ -245,11 +245,11 @@ class NotebookCollection(object):
def display_output(self, key, output_name):
"""Display markdown of output"""
if isinstance(key, string_types):
- ip_display((Markdown("### %s" % str(key)),))
+ ip_display(Markdown("### %s" % str(key)))
self[key].display_output(output_name)
else:
for i, k in enumerate(key):
if i > 0:
- ip_display((Markdown("<hr>"),)) # tag between outputs
- ip_display((Markdown("### %s" % str(k)),))
+ ip_display(Markdown("<hr>")) # tag between outputs
+ ip_display(Markdown("### %s" % str(k)))
self[k].display_output(output_name)
| Error on record() with new versions
Versions above 0.11 seems to present an error when running the record() function.
As the example on the readme file:
```python-traceback
"""notebook.ipynb"""
import papermill as pm
pm.record("hello", "world")
pm.record("number", 123)
pm.record("some_list", [1, 3, 5])
pm.record("some_dict", {"a": 1, "b": 2})`
gives:
`---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-20-447d49aec58c> in <module>()
2 import papermill as pm
3
----> 4 pm.record("hello", "world")
5 pm.record("number", 123)
6 pm.record("some_list", [1, 3, 5])
~\Anaconda3\envs\mestrado\lib\site-packages\papermill\api.py in record(name, value)
33 # IPython.display.display takes a tuple of objects as first parameter
34 # `http://ipython.readthedocs.io/en/stable/api/generated/IPython.display.html#IPython.display.display`
---> 35 ip_display(({RECORD_OUTPUT_TYPE: {name: value}},), raw=True)
36
37
~\Anaconda3\envs\mestrado\lib\site-packages\IPython\core\display.py in display(include, exclude, metadata, transient, display_id, *objs, **kwargs)
293 for obj in objs:
294 if raw:
--> 295 publish_display_data(data=obj, metadata=metadata, **kwargs)
296 else:
297 format_dict, md_dict = format(obj, include=include, exclude=exclude)
~\Anaconda3\envs\mestrado\lib\site-packages\IPython\core\display.py in publish_display_data(data, metadata, source, transient, **kwargs)
118 data=data,
119 metadata=metadata,
--> 120 **kwargs
121 )
122
~\Anaconda3\envs\mestrado\lib\site-packages\ipykernel\zmqshell.py in publish(self, data, metadata, source, transient, update)
115 if transient is None:
116 transient = {}
--> 117 self._validate_data(data, metadata)
118 content = {}
119 content['data'] = encode_images(data)
~\Anaconda3\envs\mestrado\lib\site-packages\IPython\core\displaypub.py in _validate_data(self, data, metadata)
48
49 if not isinstance(data, dict):
---> 50 raise TypeError('data must be a dict, got: %r' % data)
51 if metadata is not None:
52 if not isinstance(metadata, dict):
TypeError: data must be a dict, got: {'application/papermill.record+json': {'hello': 'world'}}`
``` | nteract/papermill | diff --git a/papermill/tests/test_api.py b/papermill/tests/test_api.py
index a03710a..4fc1883 100644
--- a/papermill/tests/test_api.py
+++ b/papermill/tests/test_api.py
@@ -9,7 +9,7 @@ else:
import pandas as pd
from pandas.util.testing import assert_frame_equal
-from .. import display, read_notebook, read_notebooks, PapermillException
+from .. import display, read_notebook, read_notebooks, PapermillException, record
from ..api import Notebook
from . import get_notebook_path, get_notebook_dir
@@ -91,4 +91,11 @@ def test_display(format_display_data_mock, ip_display_mock):
display('display_name', {'display_obj': 'hello'})
format_display_data_mock.assert_called_once_with({'display_obj': 'hello'})
- ip_display_mock.assert_called_once_with({'foo': 'bar'}, metadata={'metadata': 'baz', 'papermill': {'name': 'display_name'}}, raw=True)
+ ip_display_mock.assert_called_once_with(data={'foo': 'bar'}, metadata={'metadata': 'baz', 'papermill': {'name': 'display_name'}}, raw=True)
+
+
+
+@patch('papermill.api.ip_display')
+def test_record(ip_display_mock):
+ record('a', 3)
+ ip_display_mock.assert_called_once_with(data={'application/papermill.record+json': {'a': 3}}, raw=True)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.11 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt",
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | ansiwrap==0.8.4
async-generator==1.10
attrs==22.2.0
backcall==0.2.0
bleach==4.1.0
boto3==1.23.10
botocore==1.26.10
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
codecov==2.1.13
coverage==6.2
decorator==5.1.1
defusedxml==0.7.1
entrypoints==0.4
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
ipython==7.16.3
ipython-genutils==0.2.0
jedi==0.17.2
Jinja2==3.0.3
jmespath==0.10.0
jsonschema==3.2.0
jupyter-client==7.1.2
jupyter-core==4.9.2
jupyterlab-pygments==0.1.2
MarkupSafe==2.0.1
mistune==0.8.4
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
nest-asyncio==1.6.0
numpy==1.19.5
packaging==21.3
pandas==1.1.5
pandocfilters==1.5.1
-e git+https://github.com/nteract/papermill.git@f3eb72e0db1cdf4ddf2d99ddfa38615b30c97e7e#egg=papermill
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.1
pyzmq==25.1.2
requests==2.27.1
s3transfer==0.5.2
six==1.17.0
testpath==0.6.0
textwrap3==0.9.2
tomli==1.2.3
tornado==6.1
tqdm==4.64.1
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
zipp==3.6.0
| name: papermill
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- ansiwrap==0.8.4
- async-generator==1.10
- attrs==22.2.0
- backcall==0.2.0
- bleach==4.1.0
- boto3==1.23.10
- botocore==1.26.10
- charset-normalizer==2.0.12
- click==8.0.4
- codecov==2.1.13
- coverage==6.2
- decorator==5.1.1
- defusedxml==0.7.1
- entrypoints==0.4
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- ipython==7.16.3
- ipython-genutils==0.2.0
- jedi==0.17.2
- jinja2==3.0.3
- jmespath==0.10.0
- jsonschema==3.2.0
- jupyter-client==7.1.2
- jupyter-core==4.9.2
- jupyterlab-pygments==0.1.2
- markupsafe==2.0.1
- mistune==0.8.4
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nest-asyncio==1.6.0
- numpy==1.19.5
- packaging==21.3
- pandas==1.1.5
- pandocfilters==1.5.1
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pluggy==1.0.0
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.1
- pyzmq==25.1.2
- requests==2.27.1
- s3transfer==0.5.2
- six==1.17.0
- testpath==0.6.0
- textwrap3==0.9.2
- tomli==1.2.3
- tornado==6.1
- tqdm==4.64.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- zipp==3.6.0
prefix: /opt/conda/envs/papermill
| [
"papermill/tests/test_api.py::test_display",
"papermill/tests/test_api.py::test_record"
]
| []
| [
"papermill/tests/test_api.py::TestNotebookClass::test",
"papermill/tests/test_api.py::TestNotebookClass::test_bad_file_ext",
"papermill/tests/test_api.py::TestNotebookClass::test_path_without_node",
"papermill/tests/test_api.py::TestNotebookCollection::test"
]
| []
| BSD 3-Clause "New" or "Revised" License | 2,016 | [
"papermill/api.py"
]
| [
"papermill/api.py"
]
|
mapbox__mapbox-sdk-py-217 | 9593552604be25b1f4db96804ffadc729995f1a7 | 2018-01-06 00:16:56 | d29f0c4bf475f3a126b61e89bcd1336fec5a7675 | diff --git a/README.rst b/README.rst
index fc69108..635c48b 100644
--- a/README.rst
+++ b/README.rst
@@ -26,11 +26,7 @@ Services
- GeoJSON & Polyline formatting
- Instructions as text or HTML
-- **Distance V1** `examples <./docs/distance.md#distance>`__, `website <https://www.mapbox.com/api-documentation/?language=Python#directions-matrix>`__
-
- - Travel-time tables between up to 100 points
- - Profiles for driving, walking and cycling
-
+- **Distance V1** **DEPRECATED**
- **Geocoding V5** `examples <./docs/geocoding.md#geocoding>`__, `website <https://www.mapbox.com/api-documentation/?language=Python#geocoding>`__
- Forward (place names ⇢ longitude, latitude)
diff --git a/docs/distance.md b/docs/distance.md
index a46588e..5b1b31f 100644
--- a/docs/distance.md
+++ b/docs/distance.md
@@ -1,5 +1,10 @@
# Distance
+**DEPRECATED**
+
+The `mapbox.services.distance` module will be removed in 1.0. Please switch
+to the new `mapbox.services.matrix` module.
+
The `Distance` class from the `mapbox.services.distance` module provides
access to the Mapbox Distance API. You can also import it directly from the
`mapbox` module.
diff --git a/mapbox/services/distance.py b/mapbox/services/distance.py
index 9362532..7f9e1eb 100644
--- a/mapbox/services/distance.py
+++ b/mapbox/services/distance.py
@@ -1,34 +1,23 @@
-from uritemplate import URITemplate
+"""Distance API V1 **DEPRECATED**"""
-from mapbox.encoding import encode_coordinates_json
-from mapbox.errors import InvalidProfileError
-from mapbox.services.base import Service
+import warnings
+from mapbox.errors import MapboxDeprecationWarning
+from mapbox.services.matrix import DirectionsMatrix
-class Distance(Service):
- """Access to the Distance API V1"""
- api_name = 'distances'
- api_version = 'v1'
+class Distance(object):
+ """Access to the Distance API V1 ***DEPRECATED**"""
- valid_profiles = ['driving', 'cycling', 'walking']
-
- @property
- def baseuri(self):
- return 'https://{0}/{1}/{2}/mapbox'.format(
- self.host, self.api_name, self.api_version)
-
- def _validate_profile(self, profile):
- if profile not in self.valid_profiles:
- raise InvalidProfileError(
- "{0} is not a valid profile".format(profile))
- return profile
+ def __init__(self, access_token=None, host=None, cache=None):
+ warnings.warn(
+ "The distance module will be removed in the next version. "
+ "Use the matrix module instead.", MapboxDeprecationWarning)
+ self.access_token = access_token
+ self.host = host
+ self.cache = cache
def distances(self, features, profile='driving'):
- profile = self._validate_profile(profile)
- coords = encode_coordinates_json(features)
- uri = URITemplate(self.baseuri + '/{profile}').expand(profile=profile)
- res = self.session.post(uri, data=coords,
- headers={'Content-Type': 'application/json'})
- self.handle_http_error(res)
- return res
+ service = DirectionsMatrix(
+ access_token=self.access_token, host=self.host, cache=self.cache)
+ return service.matrix(features, profile=profile)
diff --git a/mapbox/services/matrix.py b/mapbox/services/matrix.py
index 9ca07cb..394d8bd 100644
--- a/mapbox/services/matrix.py
+++ b/mapbox/services/matrix.py
@@ -102,4 +102,4 @@ class DirectionsMatrix(Service):
uri = '{0}/{1}/{2}'.format(self.baseuri, profile, coords)
res = self.session.get(uri, params=query)
self.handle_http_error(res)
- return res
+ return res
\ No newline at end of file
| Deprecate distance module
As of version 0.15, users should get their travel times from the Matrix API, not from the Distance API. We'll mark the methods of the distance module deprecated and call those of the matrix module instead of hitting the Distance API endpoint. | mapbox/mapbox-sdk-py | diff --git a/tests/test_distances.py b/tests/test_distances.py
index 4c4be8b..d264600 100644
--- a/tests/test_distances.py
+++ b/tests/test_distances.py
@@ -2,6 +2,7 @@ import pytest
import responses
import mapbox
+from mapbox.errors import MapboxDeprecationWarning
points = [{
@@ -18,8 +19,7 @@ points = [{
"type": "Point",
"coordinates": [
-86.577791,
- 36.722137
- ]}}, {
+ 36.722137]}}, {
"type": "Feature",
"properties": {},
"geometry": {
@@ -29,38 +29,19 @@ points = [{
36.922175]}}]
-def test_class_attrs():
- """Get expected class attr values"""
- serv = mapbox.Distance()
- assert serv.api_name == 'distances'
- assert serv.api_version == 'v1'
-
-
-def test_profile_invalid():
- """'jetpack' is not a valid profile."""
- with pytest.raises(ValueError):
- mapbox.Distance(access_token='pk.test')._validate_profile('jetpack')
-
-
[email protected]('profile', ['driving', 'cycling', 'walking'])
-def test_profile_valid(profile):
- """Profiles are valid"""
- assert profile == mapbox.Distance(
- access_token='pk.test')._validate_profile(profile)
-
-
@responses.activate
def test_distance():
responses.add(
- responses.POST,
- 'https://api.mapbox.com/distances/v1/mapbox/driving?access_token=pk.test',
+ responses.GET,
+ 'https://api.mapbox.com/directions-matrix/v1/mapbox/driving/-87.337875,36.539156;-86.577791,36.722137;-88.247685,36.922175?access_token=pk.test',
match_querystring=True,
body='{"durations":[[0,4977,5951],[4963,0,9349],[5881,9317,0]]}',
status=200,
content_type='application/json')
- res = mapbox.Distance(access_token='pk.test').distances(points)
+ with pytest.warns(MapboxDeprecationWarning):
+ res = mapbox.Distance(access_token='pk.test').distances(points)
assert res.status_code == 200
assert list(res.json().keys()) == ["durations", ]
@@ -69,14 +50,15 @@ def test_distance():
def test_distances_matrix():
responses.add(
- responses.POST,
- 'https://api.mapbox.com/distances/v1/mapbox/driving?access_token=pk.test',
+ responses.GET,
+ 'https://api.mapbox.com/directions-matrix/v1/mapbox/driving/-87.337875,36.539156;-86.577791,36.722137;-88.247685,36.922175?access_token=pk.test',
match_querystring=True,
body='{"durations":[[0,4977,5951],[4963,0,9349],[5881,9317,0]]}',
status=200,
content_type='application/json')
- res = mapbox.Distance(access_token='pk.test').distances(points)
+ with pytest.warns(MapboxDeprecationWarning):
+ res = mapbox.Distance(access_token='pk.test').distances(points)
matrix = res.json()['durations']
# 3x3 list
assert len(matrix) == 3
diff --git a/tests/test_matrix.py b/tests/test_matrix.py
index 5d8dbf7..f637709 100644
--- a/tests/test_matrix.py
+++ b/tests/test_matrix.py
@@ -91,4 +91,4 @@ def test_matrix(waypoints):
matrix = res.json()['durations']
# 3x3 list
assert len(matrix) == 3
- assert len(matrix[0]) == 3
+ assert len(matrix[0]) == 3
\ No newline at end of file
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 4
} | 0.14 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"pip install -U pip"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
boto3==1.23.10
botocore==1.26.10
CacheControl==0.12.14
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
click-plugins==1.1.1
cligj==0.7.2
coverage==6.2
coveralls==3.3.1
distlib==0.3.9
docopt==0.6.2
filelock==3.4.1
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
iso3166==2.1.1
jmespath==0.10.0
-e git+https://github.com/mapbox/mapbox-sdk-py.git@9593552604be25b1f4db96804ffadc729995f1a7#egg=mapbox
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
msgpack==1.0.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
polyline==1.4.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
requests==2.27.1
responses==0.17.0
s3transfer==0.5.2
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
tox==3.28.0
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
uritemplate==4.1.1
urllib3==1.26.20
virtualenv==20.17.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: mapbox-sdk-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- boto3==1.23.10
- botocore==1.26.10
- cachecontrol==0.12.14
- charset-normalizer==2.0.12
- click==8.0.4
- click-plugins==1.1.1
- cligj==0.7.2
- coverage==6.2
- coveralls==3.3.1
- distlib==0.3.9
- docopt==0.6.2
- filelock==3.4.1
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iso3166==2.1.1
- jmespath==0.10.0
- msgpack==1.0.5
- pip==21.3.1
- platformdirs==2.4.0
- polyline==1.4.0
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- requests==2.27.1
- responses==0.17.0
- s3transfer==0.5.2
- six==1.17.0
- tomli==1.2.3
- tox==3.28.0
- uritemplate==4.1.1
- urllib3==1.26.20
- virtualenv==20.17.1
prefix: /opt/conda/envs/mapbox-sdk-py
| [
"tests/test_distances.py::test_distance",
"tests/test_distances.py::test_distances_matrix"
]
| []
| [
"tests/test_matrix.py::test_class_attrs",
"tests/test_matrix.py::test_profile_invalid",
"tests/test_matrix.py::test_profile_valid[mapbox/driving]",
"tests/test_matrix.py::test_profile_valid[mapbox/cycling]",
"tests/test_matrix.py::test_profile_valid[mapbox/walking]",
"tests/test_matrix.py::test_deprecated_profile[mapbox.driving]",
"tests/test_matrix.py::test_deprecated_profile[mapbox.cycling]",
"tests/test_matrix.py::test_deprecated_profile[mapbox.walking]",
"tests/test_matrix.py::test_null_query",
"tests/test_matrix.py::test_query",
"tests/test_matrix.py::test_matrix[waypoints0]",
"tests/test_matrix.py::test_matrix[waypoints1]",
"tests/test_matrix.py::test_matrix[waypoints2]"
]
| []
| MIT License | 2,017 | [
"README.rst",
"docs/distance.md",
"mapbox/services/matrix.py",
"mapbox/services/distance.py"
]
| [
"README.rst",
"docs/distance.md",
"mapbox/services/matrix.py",
"mapbox/services/distance.py"
]
|
|
nginxinc__crossplane-21 | 641124a79676e71b979950b88d2109daf2a67881 | 2018-01-06 02:07:17 | 8709d938119f967ce938dd5163b233ce5439d30d | diff --git a/crossplane/__main__.py b/crossplane/__main__.py
index c92d7f3..16fe34a 100644
--- a/crossplane/__main__.py
+++ b/crossplane/__main__.py
@@ -5,6 +5,7 @@ import sys
from argparse import ArgumentParser, FileType, RawDescriptionHelpFormatter
from traceback import format_exception
+from . import __version__
from .lexer import lex as lex_file
from .parser import parse as parse_file
from .errors import NgxParserBaseException
@@ -70,12 +71,14 @@ def _dump_payload(obj, fp, indent):
fp.write(json.dumps(obj, **kwargs) + '\n')
-def parse(filename, out, indent=None, catch=None, tb_onerror=None):
+def parse(filename, out, indent=None, catch=None, tb_onerror=None, ignore='', single=False):
+ ignore = ignore.split(',') if ignore else []
+
def callback(e):
exc = sys.exc_info() + (10,)
return ''.join(format_exception(*exc)).rstrip()
- kwargs = {'catch_errors': catch}
+ kwargs = {'catch_errors': catch, 'ignore': ignore, 'single': single}
if tb_onerror:
kwargs['onerror'] = callback
@@ -157,6 +160,7 @@ def parse_args(args=None):
description='various operations for nginx config files',
usage='%(prog)s <command> [options]'
)
+ parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__)
subparsers = parser.add_subparsers(title='commands')
def create_subparser(function, help):
@@ -170,8 +174,10 @@ def parse_args(args=None):
p.add_argument('filename', help='the nginx config file')
p.add_argument('-o', '--out', type=FileType('w'), default='-', help='write output to a file')
p.add_argument('-i', '--indent', type=int, metavar='NUM', help='number of spaces to indent output')
+ p.add_argument('--ignore', metavar='DIRECTIVES', default='', help='ignore directives (comma-separated)')
p.add_argument('--no-catch', action='store_false', dest='catch', help='only collect first error in file')
p.add_argument('--tb-onerror', action='store_true', help='include tracebacks in config errors')
+ p.add_argument('--single-file', action='store_true', dest='single', help='do not include other config files')
p = create_subparser(lex, 'lexes tokens from an nginx config file')
p.add_argument('filename', help='the nginx config file')
@@ -201,7 +207,7 @@ def parse_args(args=None):
parsed = parser.parse_args(args=args)
- # this addresses a bug that was added to argparse in Python 3.3
+ # this addresses a bug that was added to argparse in Python 3.3
if not parsed.__dict__:
parser.error('too few arguments')
diff --git a/crossplane/parser.py b/crossplane/parser.py
index 8f0e0f0..d91d835 100644
--- a/crossplane/parser.py
+++ b/crossplane/parser.py
@@ -19,8 +19,17 @@ def _prepare_if_args(stmt):
args[:] = args[start:end]
-def parse(filename, onerror=None, catch_errors=True):
- """Parses an nginx config file and returns a nested dict payload"""
+def parse(filename, onerror=None, catch_errors=True, ignore=(), single=False):
+ """
+ Parses an nginx config file and returns a nested dict payload
+
+ :param filename: string contianing the name of the config file to parse
+ :param onerror: function that determines what's saved in "callback"
+ :param catch_errors: bool; if False, parse stops after first error
+ :param ignore: list or tuple of directives to exclude from the payload
+ :param single: bool; if True, including from other files doesn't happen
+ :returns: a payload that describes the parsed nginx config
+ """
config_dir = os.path.dirname(filename)
payload = {
@@ -82,6 +91,13 @@ def parse(filename, onerror=None, catch_errors=True):
stmt['args'].append(token)
token, __ = next(tokens)
+ # consume the directive if it is ignored and move on
+ if stmt['directive'] in ignore:
+ # if this directive was a block consume it too
+ if token == '{':
+ _parse(parsing, tokens, consume=True)
+ continue
+
# prepare arguments
if stmt['directive'] == 'if':
_prepare_if_args(stmt)
@@ -103,7 +119,7 @@ def parse(filename, onerror=None, catch_errors=True):
raise e
# add "includes" to the payload if this is an include statement
- if stmt['directive'] == 'include':
+ if not single and stmt['directive'] == 'include':
pattern = args[0]
if not os.path.isabs(args[0]):
pattern = os.path.join(config_dir, args[0])
| Option to ignore includes
This is a nice to have option at least in my usecase. Right now I am 'text' validating the config generated by controller. Instead I plan to run in through crossplane and validate the args and the directives part. Controller has some default 'include' directives but does not generate them. So when I run it through crossplane it complains about those missing includes.
| nginxinc/crossplane | diff --git a/tests/test_parse.py b/tests/test_parse.py
index da5fe2b..c407ee9 100644
--- a/tests/test_parse.py
+++ b/tests/test_parse.py
@@ -247,3 +247,123 @@ def test_includes_globbed():
}
]
}
+
+
+def test_includes_single():
+ dirname = os.path.join(here, 'configs', 'includes-regular')
+ config = os.path.join(dirname, 'nginx.conf')
+ payload = crossplane.parse(config, single=True)
+ assert payload == {
+ 'status': 'ok',
+ 'errors': [],
+ 'config': [
+ {
+ 'file': os.path.join(dirname, 'nginx.conf'),
+ 'status': 'ok',
+ 'errors': [],
+ 'parsed': [
+ {
+ 'directive': 'events',
+ 'line': 1,
+ 'args': [],
+ 'block': []
+ },
+ {
+ 'directive': 'http',
+ 'line': 2,
+ 'args': [],
+ 'block': [
+ {
+ 'directive': 'include',
+ 'line': 3,
+ 'args': ['conf.d/server.conf']
+ # no 'includes' key
+ }
+ ]
+ }
+ ]
+ }
+ # single config parsed
+ ]
+ }
+
+
+def test_ignore_directives():
+ dirname = os.path.join(here, 'configs', 'simple')
+ config = os.path.join(dirname, 'nginx.conf')
+
+ # check that you can ignore multiple directives
+ payload = crossplane.parse(config, ignore=['listen', 'server_name'])
+ assert payload == {
+ "status": "ok",
+ "errors": [],
+ "config": [
+ {
+ "file": os.path.join(dirname, 'nginx.conf'),
+ "status": "ok",
+ "errors": [],
+ "parsed": [
+ {
+ "directive": "events",
+ "line": 1,
+ "args": [],
+ "block": [
+ {
+ "directive": "worker_connections",
+ "line": 2,
+ "args": ["1024"]
+ }
+ ]
+ },
+ {
+ "directive": "http",
+ "line": 5,
+ "args": [],
+ "block": [
+ {
+ "directive": "server",
+ "line": 6,
+ "args": [],
+ "block": [
+ {
+ "directive": "location",
+ "line": 9,
+ "args": ["/"],
+ "block": [
+ {
+ "directive": "return",
+ "line": 10,
+ "args": ["200", "foo bar baz"]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+
+ # check that you can also ignore block directives
+ payload = crossplane.parse(config, ignore=['events', 'server'])
+ assert payload == {
+ "status": "ok",
+ "errors": [],
+ "config": [
+ {
+ "file": os.path.join(dirname, 'nginx.conf'),
+ "status": "ok",
+ "errors": [],
+ "parsed": [
+ {
+ "directive": "http",
+ "line": 5,
+ "args": [],
+ "block": []
+ }
+ ]
+ }
+ ]
+ }
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 2
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
-e git+https://github.com/nginxinc/crossplane.git@641124a79676e71b979950b88d2109daf2a67881#egg=crossplane
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: crossplane
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/crossplane
| [
"tests/test_parse.py::test_includes_single",
"tests/test_parse.py::test_ignore_directives"
]
| [
"tests/test_parse.py::test_includes_globbed"
]
| [
"tests/test_parse.py::test_includes_regular"
]
| []
| Apache License 2.0 | 2,018 | [
"crossplane/__main__.py",
"crossplane/parser.py"
]
| [
"crossplane/__main__.py",
"crossplane/parser.py"
]
|
|
mapbox__mapbox-sdk-py-218 | d29f0c4bf475f3a126b61e89bcd1336fec5a7675 | 2018-01-06 04:56:02 | d29f0c4bf475f3a126b61e89bcd1336fec5a7675 | diff --git a/CHANGES b/CHANGES
index fe6cdbb..571cacb 100644
--- a/CHANGES
+++ b/CHANGES
@@ -5,6 +5,12 @@
- Add support for sequences of geometries and coordinate pairs to read_points
(#212).
+Deprecations:
+
+- 'account' keyword argument of `Uploader` class's `list()`, `delete()`,
+ and `status()` methods will be removed at 1.0. It is replaced by the
+ 'username' keyword argument.
+
0.14.0 (2017-07-10)
-------------------
- Permit images of size 1 and 1280 (#196)
diff --git a/mapbox/services/uploads.py b/mapbox/services/uploads.py
index fb6726c..2b7353e 100644
--- a/mapbox/services/uploads.py
+++ b/mapbox/services/uploads.py
@@ -1,7 +1,8 @@
+"""Mapbox Uploads API
"""
-Mapbox Uploads API
-"""
+
import re
+import warnings
from boto3.session import Session as boto3_session
from uritemplate import URITemplate
@@ -64,6 +65,15 @@ class Uploader(Service):
return tileset
+ # TODO: remove this method at 1.0.
+ def _resolve_username(self, account, username):
+ """Resolve username and handle deprecation of account kwarg"""
+ if account is not None:
+ warnings.warn(
+ "Use keyword argument 'username' instead of 'account'",
+ DeprecationWarning)
+ return username or account or self.username
+
def stage(self, fileobj, creds=None, callback=None):
"""Stages data in a Mapbox-owned S3 bucket
@@ -140,7 +150,7 @@ class Uploader(Service):
requests.Response
"""
tileset = self._validate_tileset(tileset)
- account, _name = tileset.split(".")
+ username, _name = tileset.split(".")
msg = {'tileset': tileset,
'url': stage_url}
@@ -150,15 +160,15 @@ class Uploader(Service):
msg['name'] = name if name else _name
- uri = URITemplate(self.baseuri + '/{account}').expand(
- account=account)
+ uri = URITemplate(self.baseuri + '/{username}').expand(
+ username=username)
resp = self.session.post(uri, json=msg)
self.handle_http_error(resp)
return resp
- def list(self, account=None):
+ def list(self, account=None, username=None):
"""List of all uploads
Returns a Response object, the json() method of which returns
@@ -166,81 +176,74 @@ class Uploader(Service):
Parameters
----------
- account: str
- Account name, defaults to the service's username.
+ username : str
+ Account username, defaults to the service's username.
+ account : str, **deprecated**
+ Alias for username. Will be removed in version 1.0.
Returns
-------
requests.Response
"""
-
- if account is None:
- account = self.username
- uri = URITemplate(self.baseuri + '/{account}').expand(
- account=account)
+ username = self._resolve_username(account, username)
+ uri = URITemplate(self.baseuri + '/{username}').expand(
+ username=username)
resp = self.session.get(uri)
self.handle_http_error(resp)
-
return resp
- def delete(self, upload, account=None):
+ def delete(self, upload, account=None, username=None):
"""Delete the specified upload
Parameters
----------
upload: str
The id of the upload or a dict with key 'id'.
- account: str
- Account name, defaults to the service's username.
+ username : str
+ Account username, defaults to the service's username.
+ account : str, **deprecated**
+ Alias for username. Will be removed in version 1.0.
Returns
-------
requests.Response
"""
-
- if account is None:
- account = self.username
-
+ username = self._resolve_username(account, username)
if isinstance(upload, dict):
upload_id = upload['id']
else:
upload_id = upload
-
- uri = URITemplate(self.baseuri + '/{account}/{upload_id}').expand(
- account=account, upload_id=upload_id)
+ uri = URITemplate(self.baseuri + '/{username}/{upload_id}').expand(
+ username=username, upload_id=upload_id)
resp = self.session.delete(uri)
self.handle_http_error(resp)
-
return resp
- def status(self, upload, account=None):
+ def status(self, upload, account=None, username=None):
"""Check status of upload
Parameters
----------
upload: str
The id of the upload or a dict with key 'id'.
- account: str
- Account name, defaults to the service's username.
+ username : str
+ Account username, defaults to the service's username.
+ account : str, **deprecated**
+ Alias for username. Will be removed in version 1.0.
Returns
-------
requests.Response
"""
-
- if account is None:
- account = self.username
-
+ username = self._resolve_username(account, username)
if isinstance(upload, dict):
upload_id = upload['id']
else:
upload_id = upload
-
- uri = URITemplate(self.baseuri + '/{account}/{upload_id}').expand(
- account=account, upload_id=upload_id)
+ uri = URITemplate(self.baseuri + '/{username}/{upload_id}').expand(
+ username=username, upload_id=upload_id)
resp = self.session.get(uri)
self.handle_http_error(resp)
-
return resp
def upload(self, fileobj, tileset, name=None, patch=False, callback=None):
| Uploads API parameters should be username, not account
For consistency with the HTTP api documentation. I'd like to keep the `account` kwarg as a deprecated alias until 1.0. | mapbox/mapbox-sdk-py | diff --git a/tests/test_upload.py b/tests/test_upload.py
index 4bb31d9..b802c38 100644
--- a/tests/test_upload.py
+++ b/tests/test_upload.py
@@ -32,6 +32,14 @@ def test_class_attrs():
assert serv.api_version == 'v1'
+# TODO: remove at 1.0.
+def test_resolve_username():
+ """Username is resolved and deprecation warning raised"""
+ serv = mapbox.Uploader()
+ with pytest.warns(DeprecationWarning):
+ assert serv._resolve_username('foo', 'bar') == 'bar'
+
+
@responses.activate
def test_get_credentials():
query_body = """
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 2
} | 0.14 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"responses",
"flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
boto3==1.23.10
botocore==1.26.10
CacheControl==0.12.14
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
click-plugins==1.1.1
cligj==0.7.2
coverage==6.2
coveralls==3.3.1
distlib==0.3.9
docopt==0.6.2
filelock==3.4.1
flake8==5.0.4
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
iso3166==2.1.1
jmespath==0.10.0
-e git+https://github.com/mapbox/mapbox-sdk-py.git@d29f0c4bf475f3a126b61e89bcd1336fec5a7675#egg=mapbox
mccabe==0.7.0
msgpack==1.0.5
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
polyline==1.4.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
requests==2.27.1
responses==0.17.0
s3transfer==0.5.2
six==1.17.0
toml==0.10.2
tomli==1.2.3
tox==3.28.0
typing_extensions==4.1.1
uritemplate==4.1.1
urllib3==1.26.20
virtualenv==20.17.1
zipp==3.6.0
| name: mapbox-sdk-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- boto3==1.23.10
- botocore==1.26.10
- cachecontrol==0.12.14
- charset-normalizer==2.0.12
- click==8.0.4
- click-plugins==1.1.1
- cligj==0.7.2
- coverage==6.2
- coveralls==3.3.1
- distlib==0.3.9
- docopt==0.6.2
- filelock==3.4.1
- flake8==5.0.4
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- iso3166==2.1.1
- jmespath==0.10.0
- mccabe==0.7.0
- msgpack==1.0.5
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- polyline==1.4.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- requests==2.27.1
- responses==0.17.0
- s3transfer==0.5.2
- six==1.17.0
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- typing-extensions==4.1.1
- uritemplate==4.1.1
- urllib3==1.26.20
- virtualenv==20.17.1
- zipp==3.6.0
prefix: /opt/conda/envs/mapbox-sdk-py
| [
"tests/test_upload.py::test_resolve_username"
]
| []
| [
"tests/test_upload.py::test_class_attrs",
"tests/test_upload.py::test_get_credentials",
"tests/test_upload.py::test_create",
"tests/test_upload.py::test_create_name",
"tests/test_upload.py::test_list",
"tests/test_upload.py::test_status",
"tests/test_upload.py::test_delete",
"tests/test_upload.py::test_stage",
"tests/test_upload.py::test_stage_filename",
"tests/test_upload.py::test_big_stage",
"tests/test_upload.py::test_upload",
"tests/test_upload.py::test_upload_error",
"tests/test_upload.py::test_upload_patch",
"tests/test_upload.py::test_upload_tileset_validation",
"tests/test_upload.py::test_upload_tileset_validation_specialchar",
"tests/test_upload.py::test_upload_tileset_validation_username",
"tests/test_upload.py::test_create_tileset_validation"
]
| []
| MIT License | 2,019 | [
"CHANGES",
"mapbox/services/uploads.py"
]
| [
"CHANGES",
"mapbox/services/uploads.py"
]
|
|
mrocklin__sparse-67 | 9be8058708127553d543a61001843e7064f885b9 | 2018-01-06 05:55:11 | bbb0869c882b914124c44e789214d945ff785aa4 | hameerabbasi: Hello, the correct way to fix this would be to modify `__rmul__` to return `scalar * COO`. Fixing it this way causes the broadcasting path to be taken which will be much, much slower and will fail for scalar `COO` objects, returning 1-D instead of scalar.
fujiisoup: Hi, @hameerabbasi
Thanks for the quick response.
> the correct way to fix this would be to modify __rmul__ to return scalar * COO
`np.number` has `__mul__` method and thus `COO.__rmul__` will not be called.
Instead, `np.number.__mul__` calls `COO.__array_ufunc__`.
Another option would be to have a special treatment for the `func is operator.mul` case in `__array_ufunc__`, but the code would be more complicated.
hameerabbasi: Hey! I came up with a long-term solution to this, hope it will be of use.
```
@staticmethod
def _elemwise(func, *args, **kwargs):
assert len(args) >= 1
self = args[0]
if isinstance(self, scipy.sparse.spmatrix):
self = COO.from_numpy(self)
elif np.isscalar(self):
func = partial(func, self)
other = args[1]
if isinstance(other, scipy.sparse.spmatrix):
other = COO.from_scipy_sparse(other)
return other._elemwise_unary(func, *args[2:], **kwargs)
if len(args) == 1:
return self._elemwise_unary(func, *args[1:], **kwargs)
else:
other = args[1]
if isinstance(other, scipy.sparse.spmatrix):
other = COO.from_scipy_sparse(other)
if isinstance(other, COO):
return self._elemwise_binary(func, *args[1:], **kwargs)
else:
return self._elemwise_unary(func, *args[1:], **kwargs)
```
If you check this in, maybe @mrocklin can review it.
hameerabbasi: @fujiisoup I've edited the elemwise to account for the unit test failure.
fujiisoup: Cool!!
I added more tests for left-side operations.
Additionally, I added a support also for 0-dimensional `np.ndarray`.
hameerabbasi: Would you be kind enough to rebase this on `master`? Your other PR caused conflicts with this one.
hameerabbasi: Just waiting on either of @nils-werner or @mrocklin to review these changes... Since I was involved in adding code, I shouldn't be the one to review it. :-) | diff --git a/sparse/core.py b/sparse/core.py
index e455a9d..9a28145 100644
--- a/sparse/core.py
+++ b/sparse/core.py
@@ -1,7 +1,7 @@
from __future__ import absolute_import, division, print_function
from collections import Iterable, defaultdict, deque
-from functools import reduce
+from functools import reduce, partial
import numbers
import operator
@@ -728,6 +728,13 @@ class COO(object):
self = args[0]
if isinstance(self, scipy.sparse.spmatrix):
self = COO.from_numpy(self)
+ elif np.isscalar(self) or (isinstance(self, np.ndarray)
+ and self.ndim == 0):
+ func = partial(func, self)
+ other = args[1]
+ if isinstance(other, scipy.sparse.spmatrix):
+ other = COO.from_numpy(other)
+ return other._elemwise_unary(func, *args[2:], **kwargs)
if len(args) == 1:
return self._elemwise_unary(func, *args[1:], **kwargs)
| left-side np.scalar multiplication
Hi,
I noticed multiplication with `np.scalar` such as `np.float32` fails,
```python
In [1]: import numpy as np
...: import sparse
...: x = sparse.random((2, 3, 4), density=0.5)
...: x * np.float32(2.0) # This succeeds
...:
Out[1]: <COO: shape=(2, 3, 4), dtype=float64, nnz=12, sorted=False, duplicates=True>
In [2]: np.float32(2.0) * x # fails
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-2-e1d24f7d85b3> in <module>()
----> 1 np.float32(2.0) * x # fails
~/Dropbox/projects/sparse/sparse/core.py in __array_ufunc__(self, ufunc, method, *inputs, **kwargs)
491 def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
492 if method == '__call__':
--> 493 return COO._elemwise(ufunc, *inputs, **kwargs)
494 elif method == 'reduce':
495 return COO._reduce(ufunc, *inputs, **kwargs)
~/Dropbox/projects/sparse/sparse/core.py in _elemwise(func, *args, **kwargs)
735 other = args[1]
736 if isinstance(other, COO):
--> 737 return self._elemwise_binary(func, *args[1:], **kwargs)
738 elif isinstance(other, scipy.sparse.spmatrix):
739 other = COO.from_scipy_sparse(other)
AttributeError: 'numpy.float32' object has no attribute '_elemwise_binary'
``` | mrocklin/sparse | diff --git a/sparse/tests/test_core.py b/sparse/tests/test_core.py
index 394abe7..2858e02 100644
--- a/sparse/tests/test_core.py
+++ b/sparse/tests/test_core.py
@@ -265,8 +265,11 @@ def test_op_scipy_sparse():
(operator.le, -3),
(operator.eq, 1)
])
-def test_elemwise_scalar(func, scalar):
[email protected]('convert_to_np_number', [True, False])
+def test_elemwise_scalar(func, scalar, convert_to_np_number):
xs = sparse.random((2, 3, 4), density=0.5)
+ if convert_to_np_number:
+ scalar = np.float32(scalar)
y = scalar
x = xs.todense()
@@ -278,6 +281,33 @@ def test_elemwise_scalar(func, scalar):
assert_eq(fs, func(x, y))
[email protected]('func, scalar', [
+ (operator.mul, 5),
+ (operator.add, 0),
+ (operator.sub, 0),
+ (operator.gt, -5),
+ (operator.lt, 5),
+ (operator.ne, 0),
+ (operator.ge, -5),
+ (operator.le, 3),
+ (operator.eq, 1)
+])
[email protected]('convert_to_np_number', [True, False])
+def test_leftside_elemwise_scalar(func, scalar, convert_to_np_number):
+ xs = sparse.random((2, 3, 4), density=0.5)
+ if convert_to_np_number:
+ scalar = np.float32(scalar)
+ y = scalar
+
+ x = xs.todense()
+ fs = func(y, xs)
+
+ assert isinstance(fs, COO)
+ assert xs.nnz >= fs.nnz
+
+ assert_eq(fs, func(y, x))
+
+
@pytest.mark.parametrize('func, scalar', [
(operator.add, 5),
(operator.sub, -5),
@@ -601,15 +631,19 @@ def test_broadcast_to(shape1, shape2):
assert_eq(np.broadcast_to(x, shape2), a.broadcast_to(shape2))
-def test_scalar_multiplication():
[email protected]('scalar', [2, 2.5, np.float32(2.0), np.int8(3)])
+def test_scalar_multiplication(scalar):
a = sparse.random((2, 3, 4), density=0.5)
x = a.todense()
- assert_eq(x * 2, a * 2)
- assert_eq(2 * x, 2 * a)
- assert_eq(x / 2, a / 2)
- assert_eq(x / 2.5, a / 2.5)
- assert_eq(x // 2.5, a // 2.5)
+ assert_eq(x * scalar, a * scalar)
+ assert (a * scalar).nnz == a.nnz
+ assert_eq(scalar * x, scalar * a)
+ assert (scalar * a).nnz == a.nnz
+ assert_eq(x / scalar, a / scalar)
+ assert (a / scalar).nnz == a.nnz
+ assert_eq(x // scalar, a // scalar)
+ # division may reduce nnz.
@pytest.mark.filterwarnings('ignore:divide by zero')
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[tests]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig==1.1.1
mccabe==0.7.0
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-flake8==1.1.1
scipy==1.5.4
-e git+https://github.com/mrocklin/sparse.git@9be8058708127553d543a61001843e7064f885b9#egg=sparse
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: sparse
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- flake8==5.0.4
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- mccabe==0.7.0
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-flake8==1.1.1
- scipy==1.5.4
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/sparse
| [
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-mul-5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-add-0]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-sub-0]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-gt--5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-lt-5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-ge--5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-le-3]",
"sparse/tests/test_core.py::test_scalar_multiplication[scalar2]",
"sparse/tests/test_core.py::test_scalar_multiplication[scalar3]"
]
| [
"sparse/__init__.py::flake-8::FLAKE8",
"sparse/core.py::flake-8::FLAKE8",
"sparse/core.py::sparse.core.COO",
"sparse/slicing.py::flake-8::FLAKE8",
"sparse/utils.py::flake-8::FLAKE8",
"sparse/tests/test_core.py::flake-8::FLAKE8"
]
| [
"sparse/core.py::sparse.core.random",
"sparse/slicing.py::sparse.slicing.check_index",
"sparse/slicing.py::sparse.slicing.normalize_index",
"sparse/slicing.py::sparse.slicing.normalize_slice",
"sparse/slicing.py::sparse.slicing.posify_index",
"sparse/slicing.py::sparse.slicing.replace_ellipsis",
"sparse/slicing.py::sparse.slicing.sanitize_index",
"sparse/tests/test_core.py::test_reductions[True-None-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[True-None-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[True-None-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[True-None-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[True-None-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[True-0-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[True-0-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[True-0-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[True-0-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[True-0-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[True-1-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[True-1-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[True-1-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[True-1-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[True-1-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[True-2-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[True-2-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[True-2-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[True-2-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[True-2-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[True-axis4-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[True-axis4-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[True-axis4-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[True-axis4-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[True-axis4-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[False-None-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[False-None-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[False-None-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[False-None-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[False-None-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[False-0-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[False-0-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[False-0-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[False-0-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[False-0-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[False-1-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[False-1-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[False-1-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[False-1-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[False-1-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[False-2-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[False-2-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[False-2-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[False-2-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[False-2-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[False-axis4-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[False-axis4-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[False-axis4-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[False-axis4-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[False-axis4-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-None-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-None-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-None-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-None-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-None-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-0-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-0-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-0-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-0-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-0-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-1-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-1-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-1-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-1-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-1-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-2-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-2-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-2-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-2-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-2-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-axis4-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-axis4-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-axis4-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-axis4-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-axis4-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-None-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-None-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-None-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-None-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-None-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-0-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-0-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-0-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-0-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-0-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-1-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-1-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-1-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-1-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-1-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-2-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-2-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-2-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-2-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-2-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-axis4-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-axis4-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-axis4-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-axis4-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-axis4-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_transpose[None]",
"sparse/tests/test_core.py::test_transpose[axis1]",
"sparse/tests/test_core.py::test_transpose[axis2]",
"sparse/tests/test_core.py::test_transpose[axis3]",
"sparse/tests/test_core.py::test_transpose[axis4]",
"sparse/tests/test_core.py::test_transpose[axis5]",
"sparse/tests/test_core.py::test_transpose[axis6]",
"sparse/tests/test_core.py::test_transpose_error[axis0]",
"sparse/tests/test_core.py::test_transpose_error[axis1]",
"sparse/tests/test_core.py::test_transpose_error[axis2]",
"sparse/tests/test_core.py::test_transpose_error[axis3]",
"sparse/tests/test_core.py::test_transpose_error[axis4]",
"sparse/tests/test_core.py::test_transpose_error[axis5]",
"sparse/tests/test_core.py::test_reshape[a0-b0]",
"sparse/tests/test_core.py::test_reshape[a1-b1]",
"sparse/tests/test_core.py::test_reshape[a2-b2]",
"sparse/tests/test_core.py::test_reshape[a3-b3]",
"sparse/tests/test_core.py::test_reshape[a4-b4]",
"sparse/tests/test_core.py::test_reshape[a5-b5]",
"sparse/tests/test_core.py::test_reshape[a6-b6]",
"sparse/tests/test_core.py::test_reshape[a7-b7]",
"sparse/tests/test_core.py::test_reshape[a8-b8]",
"sparse/tests/test_core.py::test_reshape[a9-b9]",
"sparse/tests/test_core.py::test_large_reshape",
"sparse/tests/test_core.py::test_reshape_same",
"sparse/tests/test_core.py::test_to_scipy_sparse",
"sparse/tests/test_core.py::test_tensordot[a_shape0-b_shape0-axes0]",
"sparse/tests/test_core.py::test_tensordot[a_shape1-b_shape1-axes1]",
"sparse/tests/test_core.py::test_tensordot[a_shape2-b_shape2-axes2]",
"sparse/tests/test_core.py::test_tensordot[a_shape3-b_shape3-axes3]",
"sparse/tests/test_core.py::test_tensordot[a_shape4-b_shape4-axes4]",
"sparse/tests/test_core.py::test_tensordot[a_shape5-b_shape5-axes5]",
"sparse/tests/test_core.py::test_tensordot[a_shape6-b_shape6-axes6]",
"sparse/tests/test_core.py::test_tensordot[a_shape7-b_shape7-axes7]",
"sparse/tests/test_core.py::test_tensordot[a_shape8-b_shape8-axes8]",
"sparse/tests/test_core.py::test_tensordot[a_shape9-b_shape9-0]",
"sparse/tests/test_core.py::test_dot",
"sparse/tests/test_core.py::test_elemwise[expm1]",
"sparse/tests/test_core.py::test_elemwise[log1p]",
"sparse/tests/test_core.py::test_elemwise[sin]",
"sparse/tests/test_core.py::test_elemwise[tan]",
"sparse/tests/test_core.py::test_elemwise[sinh]",
"sparse/tests/test_core.py::test_elemwise[tanh]",
"sparse/tests/test_core.py::test_elemwise[floor]",
"sparse/tests/test_core.py::test_elemwise[ceil]",
"sparse/tests/test_core.py::test_elemwise[sqrt]",
"sparse/tests/test_core.py::test_elemwise[conjugate0]",
"sparse/tests/test_core.py::test_elemwise[round_]",
"sparse/tests/test_core.py::test_elemwise[rint]",
"sparse/tests/test_core.py::test_elemwise[<lambda>0]",
"sparse/tests/test_core.py::test_elemwise[conjugate1]",
"sparse/tests/test_core.py::test_elemwise[conjugate2]",
"sparse/tests/test_core.py::test_elemwise[<lambda>1]",
"sparse/tests/test_core.py::test_elemwise[abs]",
"sparse/tests/test_core.py::test_elemwise_binary[shape0-mul]",
"sparse/tests/test_core.py::test_elemwise_binary[shape0-add]",
"sparse/tests/test_core.py::test_elemwise_binary[shape0-sub]",
"sparse/tests/test_core.py::test_elemwise_binary[shape0-gt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape0-lt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape0-ne]",
"sparse/tests/test_core.py::test_elemwise_binary[shape1-mul]",
"sparse/tests/test_core.py::test_elemwise_binary[shape1-add]",
"sparse/tests/test_core.py::test_elemwise_binary[shape1-sub]",
"sparse/tests/test_core.py::test_elemwise_binary[shape1-gt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape1-lt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape1-ne]",
"sparse/tests/test_core.py::test_elemwise_binary[shape2-mul]",
"sparse/tests/test_core.py::test_elemwise_binary[shape2-add]",
"sparse/tests/test_core.py::test_elemwise_binary[shape2-sub]",
"sparse/tests/test_core.py::test_elemwise_binary[shape2-gt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape2-lt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape2-ne]",
"sparse/tests/test_core.py::test_elemwise_binary[shape3-mul]",
"sparse/tests/test_core.py::test_elemwise_binary[shape3-add]",
"sparse/tests/test_core.py::test_elemwise_binary[shape3-sub]",
"sparse/tests/test_core.py::test_elemwise_binary[shape3-gt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape3-lt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape3-ne]",
"sparse/tests/test_core.py::test_auto_densification_fails[pow]",
"sparse/tests/test_core.py::test_auto_densification_fails[truediv]",
"sparse/tests/test_core.py::test_auto_densification_fails[floordiv]",
"sparse/tests/test_core.py::test_auto_densification_fails[ge]",
"sparse/tests/test_core.py::test_auto_densification_fails[le]",
"sparse/tests/test_core.py::test_auto_densification_fails[eq]",
"sparse/tests/test_core.py::test_op_scipy_sparse",
"sparse/tests/test_core.py::test_elemwise_scalar[True-mul-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-add-0]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-sub-0]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-pow-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-truediv-3]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-floordiv-4]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-gt-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-lt--5]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-ne-0]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-ge-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-le--3]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-eq-1]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-mul-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-add-0]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-sub-0]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-pow-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-truediv-3]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-floordiv-4]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-gt-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-lt--5]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-ne-0]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-ge-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-le--3]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-eq-1]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-ne-0]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-eq-1]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-mul-5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-add-0]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-sub-0]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-gt--5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-lt-5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-ne-0]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-ge--5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-le-3]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-eq-1]",
"sparse/tests/test_core.py::test_scalar_densification_fails[add-5]",
"sparse/tests/test_core.py::test_scalar_densification_fails[sub--5]",
"sparse/tests/test_core.py::test_scalar_densification_fails[pow--3]",
"sparse/tests/test_core.py::test_scalar_densification_fails[truediv-0]",
"sparse/tests/test_core.py::test_scalar_densification_fails[floordiv-0]",
"sparse/tests/test_core.py::test_scalar_densification_fails[gt--5]",
"sparse/tests/test_core.py::test_scalar_densification_fails[lt-5]",
"sparse/tests/test_core.py::test_scalar_densification_fails[ne-1]",
"sparse/tests/test_core.py::test_scalar_densification_fails[ge--3]",
"sparse/tests/test_core.py::test_scalar_densification_fails[le-3]",
"sparse/tests/test_core.py::test_scalar_densification_fails[eq-0]",
"sparse/tests/test_core.py::test_bitwise_binary[shape0-and_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape0-or_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape0-xor]",
"sparse/tests/test_core.py::test_bitwise_binary[shape1-and_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape1-or_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape1-xor]",
"sparse/tests/test_core.py::test_bitwise_binary[shape2-and_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape2-or_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape2-xor]",
"sparse/tests/test_core.py::test_bitwise_binary[shape3-and_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape3-or_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape3-xor]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape0-and_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape0-or_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape0-xor]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape1-and_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape1-or_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape1-xor]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape2-and_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape2-or_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape2-xor]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape3-and_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape3-or_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape3-xor]",
"sparse/tests/test_core.py::test_elemwise_binary_empty",
"sparse/tests/test_core.py::test_gt",
"sparse/tests/test_core.py::test_slicing[0]",
"sparse/tests/test_core.py::test_slicing[1]",
"sparse/tests/test_core.py::test_slicing[-1]",
"sparse/tests/test_core.py::test_slicing[index3]",
"sparse/tests/test_core.py::test_slicing[index4]",
"sparse/tests/test_core.py::test_slicing[index5]",
"sparse/tests/test_core.py::test_slicing[index6]",
"sparse/tests/test_core.py::test_slicing[index7]",
"sparse/tests/test_core.py::test_slicing[index8]",
"sparse/tests/test_core.py::test_slicing[index9]",
"sparse/tests/test_core.py::test_slicing[index10]",
"sparse/tests/test_core.py::test_slicing[index11]",
"sparse/tests/test_core.py::test_slicing[index12]",
"sparse/tests/test_core.py::test_slicing[index13]",
"sparse/tests/test_core.py::test_slicing[index14]",
"sparse/tests/test_core.py::test_slicing[index15]",
"sparse/tests/test_core.py::test_slicing[index16]",
"sparse/tests/test_core.py::test_slicing[index17]",
"sparse/tests/test_core.py::test_slicing[index18]",
"sparse/tests/test_core.py::test_slicing[index19]",
"sparse/tests/test_core.py::test_slicing[index20]",
"sparse/tests/test_core.py::test_slicing[index21]",
"sparse/tests/test_core.py::test_slicing[index22]",
"sparse/tests/test_core.py::test_slicing[index23]",
"sparse/tests/test_core.py::test_slicing[index24]",
"sparse/tests/test_core.py::test_slicing[index25]",
"sparse/tests/test_core.py::test_slicing[index26]",
"sparse/tests/test_core.py::test_slicing[index27]",
"sparse/tests/test_core.py::test_slicing[index28]",
"sparse/tests/test_core.py::test_slicing[index29]",
"sparse/tests/test_core.py::test_slicing[index30]",
"sparse/tests/test_core.py::test_slicing[index31]",
"sparse/tests/test_core.py::test_slicing[index32]",
"sparse/tests/test_core.py::test_slicing[index33]",
"sparse/tests/test_core.py::test_slicing[index34]",
"sparse/tests/test_core.py::test_slicing[index35]",
"sparse/tests/test_core.py::test_slicing[index36]",
"sparse/tests/test_core.py::test_slicing[index37]",
"sparse/tests/test_core.py::test_slicing[index38]",
"sparse/tests/test_core.py::test_slicing[index39]",
"sparse/tests/test_core.py::test_slicing[index40]",
"sparse/tests/test_core.py::test_slicing[index41]",
"sparse/tests/test_core.py::test_slicing[index42]",
"sparse/tests/test_core.py::test_slicing[index43]",
"sparse/tests/test_core.py::test_slicing[index44]",
"sparse/tests/test_core.py::test_custom_dtype_slicing",
"sparse/tests/test_core.py::test_slicing_errors[index0]",
"sparse/tests/test_core.py::test_slicing_errors[index1]",
"sparse/tests/test_core.py::test_slicing_errors[index2]",
"sparse/tests/test_core.py::test_slicing_errors[5]",
"sparse/tests/test_core.py::test_slicing_errors[-5]",
"sparse/tests/test_core.py::test_slicing_errors[foo]",
"sparse/tests/test_core.py::test_slicing_errors[index6]",
"sparse/tests/test_core.py::test_canonical",
"sparse/tests/test_core.py::test_concatenate",
"sparse/tests/test_core.py::test_concatenate_mixed[stack-0]",
"sparse/tests/test_core.py::test_concatenate_mixed[stack-1]",
"sparse/tests/test_core.py::test_concatenate_mixed[concatenate-0]",
"sparse/tests/test_core.py::test_concatenate_mixed[concatenate-1]",
"sparse/tests/test_core.py::test_stack[0-shape0]",
"sparse/tests/test_core.py::test_stack[0-shape1]",
"sparse/tests/test_core.py::test_stack[0-shape2]",
"sparse/tests/test_core.py::test_stack[1-shape0]",
"sparse/tests/test_core.py::test_stack[1-shape1]",
"sparse/tests/test_core.py::test_stack[1-shape2]",
"sparse/tests/test_core.py::test_stack[-1-shape0]",
"sparse/tests/test_core.py::test_stack[-1-shape1]",
"sparse/tests/test_core.py::test_stack[-1-shape2]",
"sparse/tests/test_core.py::test_large_concat_stack",
"sparse/tests/test_core.py::test_coord_dtype",
"sparse/tests/test_core.py::test_addition",
"sparse/tests/test_core.py::test_addition_not_ok_when_large_and_sparse",
"sparse/tests/test_core.py::test_broadcasting[shape10-shape20-add]",
"sparse/tests/test_core.py::test_broadcasting[shape10-shape20-mul]",
"sparse/tests/test_core.py::test_broadcasting[shape11-shape21-add]",
"sparse/tests/test_core.py::test_broadcasting[shape11-shape21-mul]",
"sparse/tests/test_core.py::test_broadcasting[shape12-shape22-add]",
"sparse/tests/test_core.py::test_broadcasting[shape12-shape22-mul]",
"sparse/tests/test_core.py::test_broadcasting[shape13-shape23-add]",
"sparse/tests/test_core.py::test_broadcasting[shape13-shape23-mul]",
"sparse/tests/test_core.py::test_broadcasting[shape14-shape24-add]",
"sparse/tests/test_core.py::test_broadcasting[shape14-shape24-mul]",
"sparse/tests/test_core.py::test_broadcasting[shape15-shape25-add]",
"sparse/tests/test_core.py::test_broadcasting[shape15-shape25-mul]",
"sparse/tests/test_core.py::test_broadcast_to[shape10-shape20]",
"sparse/tests/test_core.py::test_broadcast_to[shape11-shape21]",
"sparse/tests/test_core.py::test_broadcast_to[shape12-shape22]",
"sparse/tests/test_core.py::test_scalar_multiplication[2]",
"sparse/tests/test_core.py::test_scalar_multiplication[2.5]",
"sparse/tests/test_core.py::test_scalar_exponentiation",
"sparse/tests/test_core.py::test_create_with_lists_of_tuples",
"sparse/tests/test_core.py::test_sizeof",
"sparse/tests/test_core.py::test_scipy_sparse_interface",
"sparse/tests/test_core.py::test_cache_csr",
"sparse/tests/test_core.py::test_empty_shape",
"sparse/tests/test_core.py::test_single_dimension",
"sparse/tests/test_core.py::test_raise_dense",
"sparse/tests/test_core.py::test_large_sum",
"sparse/tests/test_core.py::test_add_many_sparse_arrays",
"sparse/tests/test_core.py::test_caching",
"sparse/tests/test_core.py::test_scalar_slicing",
"sparse/tests/test_core.py::test_triul[shape0-0]",
"sparse/tests/test_core.py::test_triul[shape1-1]",
"sparse/tests/test_core.py::test_triul[shape2--1]",
"sparse/tests/test_core.py::test_triul[shape3--2]",
"sparse/tests/test_core.py::test_triul[shape4-1000]",
"sparse/tests/test_core.py::test_empty_reduction",
"sparse/tests/test_core.py::test_random_shape[0.1-shape0]",
"sparse/tests/test_core.py::test_random_shape[0.1-shape1]",
"sparse/tests/test_core.py::test_random_shape[0.1-shape2]",
"sparse/tests/test_core.py::test_random_shape[0.3-shape0]",
"sparse/tests/test_core.py::test_random_shape[0.3-shape1]",
"sparse/tests/test_core.py::test_random_shape[0.3-shape2]",
"sparse/tests/test_core.py::test_random_shape[0.5-shape0]",
"sparse/tests/test_core.py::test_random_shape[0.5-shape1]",
"sparse/tests/test_core.py::test_random_shape[0.5-shape2]",
"sparse/tests/test_core.py::test_random_shape[0.7-shape0]",
"sparse/tests/test_core.py::test_random_shape[0.7-shape1]",
"sparse/tests/test_core.py::test_random_shape[0.7-shape2]",
"sparse/tests/test_core.py::test_two_random_unequal",
"sparse/tests/test_core.py::test_two_random_same_seed",
"sparse/tests/test_core.py::test_random_sorted",
"sparse/tests/test_core.py::test_random_rvs[0.0-shape0-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.0-shape0-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.0-shape0-<lambda>-bool]",
"sparse/tests/test_core.py::test_random_rvs[0.0-shape1-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.0-shape1-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.0-shape1-<lambda>-bool]",
"sparse/tests/test_core.py::test_random_rvs[0.01-shape0-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.01-shape0-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.01-shape0-<lambda>-bool]",
"sparse/tests/test_core.py::test_random_rvs[0.01-shape1-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.01-shape1-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.01-shape1-<lambda>-bool]",
"sparse/tests/test_core.py::test_random_rvs[0.1-shape0-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.1-shape0-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.1-shape0-<lambda>-bool]",
"sparse/tests/test_core.py::test_random_rvs[0.1-shape1-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.1-shape1-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.1-shape1-<lambda>-bool]",
"sparse/tests/test_core.py::test_random_rvs[0.2-shape0-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.2-shape0-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.2-shape0-<lambda>-bool]",
"sparse/tests/test_core.py::test_random_rvs[0.2-shape1-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.2-shape1-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.2-shape1-<lambda>-bool]",
"sparse/tests/test_core.py::test_scalar_shape_construction"
]
| []
| Modified BSD License | 2,020 | [
"sparse/core.py"
]
| [
"sparse/core.py"
]
|
networkx__networkx-2816 | b271d45e1329ef65d888366c595c010070abe035 | 2018-01-07 03:23:01 | 93b4b9227aa8a7ac4cbd946cf3dae3b168e17b45 | diff --git a/networkx/classes/reportviews.py b/networkx/classes/reportviews.py
index ac5255f2f..6fe61f28f 100644
--- a/networkx/classes/reportviews.py
+++ b/networkx/classes/reportviews.py
@@ -791,8 +791,7 @@ class OutMultiEdgeDataView(OutEdgeDataView):
if data in dd else (n, nbr, default)
def __len__(self):
- return sum(len(kd) for n, nbrs in self._nodes_nbrs()
- for nbr, kd in nbrs.items())
+ return sum(1 for e in self)
def __iter__(self):
return (self._report(n, nbr, k, dd) for n, nbrs in self._nodes_nbrs()
@@ -821,10 +820,6 @@ class MultiEdgeDataView(OutMultiEdgeDataView):
"""An EdgeDataView class for edges of MultiGraph; See EdgeDataView"""
__slots__ = ()
- def __len__(self):
- # nbunch makes it hard to count edges between nodes in nbunch
- return sum(1 for e in self)
-
def __iter__(self):
seen = {}
for n, nbrs in self._nodes_nbrs():
@@ -1016,7 +1011,7 @@ class EdgeView(OutEdgeView):
dataview = EdgeDataView
def __len__(self):
- return sum(len(nbrs) for n, nbrs in self._nodes_nbrs()) // 2
+ return sum(len(nbrs) + (n in nbrs) for n, nbrs in self._nodes_nbrs()) // 2
def __iter__(self):
seen = {}
@@ -1120,8 +1115,7 @@ class MultiEdgeView(OutMultiEdgeView):
dataview = MultiEdgeDataView
def __len__(self):
- return sum(len(kdict) for n, nbrs in self._nodes_nbrs()
- for nbr, kdict in nbrs.items()) // 2
+ return sum(1 for e in self)
def __iter__(self):
seen = {}
| len(G.edges) unexpected values
I'm not sure if this is a bug or expected behavior but it's at least confusing. This is using 2.0 nx.Graph() - I would provide the data to recreate, but it's private and I'm not sure why this is occurring, which might just be my lack of knowledge
```
>>> len(G.edges())
300
>>> G.number_of_edges()
312
>>> count = 0
>>> s = set()
>>> for edge in G.edges():
... seen = edge in s or (edge[1], edge[0]) in s
... if not seen:
... count += 1
... s.add(edge)
>>> count
312
```
What would be likely reasons that len() would give a different answer than number_of_edges()? I thought it was because of reversed edges, but that doesn't seem to be the case either. | networkx/networkx | diff --git a/networkx/classes/tests/test_reportviews.py b/networkx/classes/tests/test_reportviews.py
index a4a5c2c1a..7fd96d4dd 100644
--- a/networkx/classes/tests/test_reportviews.py
+++ b/networkx/classes/tests/test_reportviews.py
@@ -326,6 +326,12 @@ class TestEdgeDataView(object):
assert_equal(len(self.G.edges()), 8)
assert_equal(len(self.G.edges), 8)
+ H = self.G.copy()
+ H.add_edge(1, 1)
+ assert_equal(len(H.edges(1)), 3)
+ assert_equal(len(H.edges()), 9)
+ assert_equal(len(H.edges), 9)
+
class TestOutEdgeDataView(TestEdgeDataView):
def setUp(self):
@@ -351,6 +357,12 @@ class TestOutEdgeDataView(TestEdgeDataView):
assert_equal(len(self.G.edges()), 8)
assert_equal(len(self.G.edges), 8)
+ H = self.G.copy()
+ H.add_edge(1, 1)
+ assert_equal(len(H.edges(1)), 2)
+ assert_equal(len(H.edges()), 9)
+ assert_equal(len(H.edges), 9)
+
class TestInEdgeDataView(TestOutEdgeDataView):
def setUp(self):
@@ -486,6 +498,12 @@ class TestEdgeView(object):
num_ed = 9 if self.G.is_multigraph() else 8
assert_equal(len(ev), num_ed)
+ H = self.G.copy()
+ H.add_edge(1, 1)
+ assert_equal(len(H.edges(1)), 3 + H.is_multigraph() - H.is_directed())
+ assert_equal(len(H.edges()), num_ed + 1)
+ assert_equal(len(H.edges), num_ed + 1)
+
def test_and(self):
# print("G & H edges:", gnv & hnv)
ev = self.eview(self.G)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 2.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libgdal-dev graphviz"
],
"python": "3.6",
"reqs_path": [
"requirements/default.txt",
"requirements/test.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
codecov==2.1.13
coverage==6.2
decorator==5.1.1
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/networkx/networkx.git@b271d45e1329ef65d888366c595c010070abe035#egg=networkx
nose==1.3.7
nose-ignore-docstring==0.2
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
requests==2.27.1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: networkx
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- codecov==2.1.13
- coverage==6.2
- decorator==5.1.1
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- nose-ignore-docstring==0.2
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- requests==2.27.1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/networkx
| [
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_len"
]
| [
"networkx/classes/tests/test_reportviews.py::TestNodeViewSetOps::test_len",
"networkx/classes/tests/test_reportviews.py::TestNodeViewSetOps::test_and",
"networkx/classes/tests/test_reportviews.py::TestNodeViewSetOps::test_or",
"networkx/classes/tests/test_reportviews.py::TestNodeViewSetOps::test_xor",
"networkx/classes/tests/test_reportviews.py::TestNodeViewSetOps::test_sub",
"networkx/classes/tests/test_reportviews.py::TestNodeDataViewSetOps::test_len",
"networkx/classes/tests/test_reportviews.py::TestNodeDataViewSetOps::test_and",
"networkx/classes/tests/test_reportviews.py::TestNodeDataViewSetOps::test_or",
"networkx/classes/tests/test_reportviews.py::TestNodeDataViewSetOps::test_xor",
"networkx/classes/tests/test_reportviews.py::TestNodeDataViewSetOps::test_sub",
"networkx/classes/tests/test_reportviews.py::TestNodeDataViewDefaultSetOps::test_len",
"networkx/classes/tests/test_reportviews.py::TestNodeDataViewDefaultSetOps::test_and",
"networkx/classes/tests/test_reportviews.py::TestNodeDataViewDefaultSetOps::test_or",
"networkx/classes/tests/test_reportviews.py::TestNodeDataViewDefaultSetOps::test_xor",
"networkx/classes/tests/test_reportviews.py::TestNodeDataViewDefaultSetOps::test_sub",
"networkx/classes/tests/test_reportviews.py::TestEdgeDataView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestEdgeDataView::test_str",
"networkx/classes/tests/test_reportviews.py::TestEdgeDataView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestEdgeDataView::test_iterdata",
"networkx/classes/tests/test_reportviews.py::TestEdgeDataView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestEdgeDataView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestEdgeDataView::test_len",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeDataView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeDataView::test_str",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeDataView::test_iterdata",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeDataView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeDataView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeDataView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeDataView::test_len",
"networkx/classes/tests/test_reportviews.py::TestInEdgeDataView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestInEdgeDataView::test_str",
"networkx/classes/tests/test_reportviews.py::TestInEdgeDataView::test_iterdata",
"networkx/classes/tests/test_reportviews.py::TestInEdgeDataView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestInEdgeDataView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestInEdgeDataView::test_len",
"networkx/classes/tests/test_reportviews.py::TestInEdgeDataView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeDataView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeDataView::test_str",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeDataView::test_iterdata",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeDataView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeDataView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeDataView::test_len",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeDataView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeDataView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeDataView::test_str",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeDataView::test_iterdata",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeDataView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeDataView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeDataView::test_len",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeDataView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeDataView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeDataView::test_str",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeDataView::test_iterdata",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeDataView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeDataView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeDataView::test_len",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeDataView::test_repr"
]
| [
"networkx/classes/tests/test_reportviews.py::TestNodeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestNodeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestNodeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestNodeView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestNodeView::test_getitem",
"networkx/classes/tests/test_reportviews.py::TestNodeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestNodeView::test_call",
"networkx/classes/tests/test_reportviews.py::TestNodeDataView::test_viewtype",
"networkx/classes/tests/test_reportviews.py::TestNodeDataView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestNodeDataView::test_str",
"networkx/classes/tests/test_reportviews.py::TestNodeDataView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestNodeDataView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestNodeDataView::test_getitem",
"networkx/classes/tests/test_reportviews.py::TestNodeDataView::test_iter",
"networkx/classes/tests/test_reportviews.py::test_nodedataview_unhashable",
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_call",
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_data",
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_and",
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_or",
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_xor",
"networkx/classes/tests/test_reportviews.py::TestEdgeView::test_sub",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_call",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_data",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_and",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_or",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_xor",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_sub",
"networkx/classes/tests/test_reportviews.py::TestOutEdgeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_call",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_data",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_and",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_or",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_xor",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_sub",
"networkx/classes/tests/test_reportviews.py::TestInEdgeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_call",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_data",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_iterkeys",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_or",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_sub",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_xor",
"networkx/classes/tests/test_reportviews.py::TestMultiEdgeView::test_and",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_call",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_data",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_iterkeys",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_or",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_sub",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_xor",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_and",
"networkx/classes/tests/test_reportviews.py::TestOutMultiEdgeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_contains",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_call",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_data",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_iterkeys",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_or",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_sub",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_xor",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_and",
"networkx/classes/tests/test_reportviews.py::TestInMultiEdgeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestDegreeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestDegreeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestDegreeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestDegreeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestDegreeView::test_nbunch",
"networkx/classes/tests/test_reportviews.py::TestDegreeView::test_getitem",
"networkx/classes/tests/test_reportviews.py::TestDegreeView::test_weight",
"networkx/classes/tests/test_reportviews.py::TestDegreeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestDiDegreeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestDiDegreeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestDiDegreeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestDiDegreeView::test_nbunch",
"networkx/classes/tests/test_reportviews.py::TestDiDegreeView::test_getitem",
"networkx/classes/tests/test_reportviews.py::TestDiDegreeView::test_weight",
"networkx/classes/tests/test_reportviews.py::TestDiDegreeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestDiDegreeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestOutDegreeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestOutDegreeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestOutDegreeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestOutDegreeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestOutDegreeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestOutDegreeView::test_nbunch",
"networkx/classes/tests/test_reportviews.py::TestOutDegreeView::test_getitem",
"networkx/classes/tests/test_reportviews.py::TestOutDegreeView::test_weight",
"networkx/classes/tests/test_reportviews.py::TestInDegreeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestInDegreeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestInDegreeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestInDegreeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestInDegreeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestInDegreeView::test_nbunch",
"networkx/classes/tests/test_reportviews.py::TestInDegreeView::test_getitem",
"networkx/classes/tests/test_reportviews.py::TestInDegreeView::test_weight",
"networkx/classes/tests/test_reportviews.py::TestMultiDegreeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestMultiDegreeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestMultiDegreeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestMultiDegreeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestMultiDegreeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestMultiDegreeView::test_nbunch",
"networkx/classes/tests/test_reportviews.py::TestMultiDegreeView::test_getitem",
"networkx/classes/tests/test_reportviews.py::TestMultiDegreeView::test_weight",
"networkx/classes/tests/test_reportviews.py::TestDiMultiDegreeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestDiMultiDegreeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestDiMultiDegreeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestDiMultiDegreeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestDiMultiDegreeView::test_nbunch",
"networkx/classes/tests/test_reportviews.py::TestDiMultiDegreeView::test_getitem",
"networkx/classes/tests/test_reportviews.py::TestDiMultiDegreeView::test_weight",
"networkx/classes/tests/test_reportviews.py::TestDiMultiDegreeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestOutMultiDegreeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestOutMultiDegreeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestOutMultiDegreeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestOutMultiDegreeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestOutMultiDegreeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestOutMultiDegreeView::test_nbunch",
"networkx/classes/tests/test_reportviews.py::TestOutMultiDegreeView::test_getitem",
"networkx/classes/tests/test_reportviews.py::TestOutMultiDegreeView::test_weight",
"networkx/classes/tests/test_reportviews.py::TestInMultiDegreeView::test_pickle",
"networkx/classes/tests/test_reportviews.py::TestInMultiDegreeView::test_iter",
"networkx/classes/tests/test_reportviews.py::TestInMultiDegreeView::test_len",
"networkx/classes/tests/test_reportviews.py::TestInMultiDegreeView::test_str",
"networkx/classes/tests/test_reportviews.py::TestInMultiDegreeView::test_repr",
"networkx/classes/tests/test_reportviews.py::TestInMultiDegreeView::test_nbunch",
"networkx/classes/tests/test_reportviews.py::TestInMultiDegreeView::test_getitem",
"networkx/classes/tests/test_reportviews.py::TestInMultiDegreeView::test_weight"
]
| []
| BSD 3-Clause | 2,021 | [
"networkx/classes/reportviews.py"
]
| [
"networkx/classes/reportviews.py"
]
|
|
PlasmaPy__PlasmaPy-228 | 82eece6d5648641af1878f6846240dbf2a37a190 | 2018-01-07 15:28:48 | 82eece6d5648641af1878f6846240dbf2a37a190 | pep8speaks: Hello @siddharth185! Thanks for submitting the PR.
- In the file [`plasmapy/mathematics/mathematics.py`](https://github.com/PlasmaPy/PlasmaPy/blob/a19f915b468467cd9564f5417d5d38077ec4698a/plasmapy/mathematics/mathematics.py), following are the PEP8 issues :
> [Line 41:80](https://github.com/PlasmaPy/PlasmaPy/blob/a19f915b468467cd9564f5417d5d38077ec4698a/plasmapy/mathematics/mathematics.py#L41): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (82 > 79 characters)
> [Line 119:80](https://github.com/PlasmaPy/PlasmaPy/blob/a19f915b468467cd9564f5417d5d38077ec4698a/plasmapy/mathematics/mathematics.py#L119): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (87 > 79 characters)
StanczakDominik: As for the test failures... suddenly all the quantity docstrings seem to be failing, but @siddharth185 didn't change anything related to that. I checked on master at my own fork and well hello there, they're failing on master. This is looking like an @astropy change... I'm looking into it, because I think a potential change on their end may have fixed #135.
StanczakDominik: The newest changes on master should solve the docstring problem.
StanczakDominik: I'm pretty sure this can be merged after it's rebased on top of master! I'll make sure and merge it tonight.
StanczakDominik: Merging it. Thanks, @siddharth185 ! | diff --git a/plasmapy/mathematics/mathematics.py b/plasmapy/mathematics/mathematics.py
index b20ed5e2..58c10e5f 100644
--- a/plasmapy/mathematics/mathematics.py
+++ b/plasmapy/mathematics/mathematics.py
@@ -3,6 +3,7 @@
import numpy as np
from scipy import special
from astropy import units as u
+from scipy.special import wofz as Faddeeva_function
def plasma_dispersion_func(zeta):
@@ -57,9 +58,9 @@ def plasma_dispersion_func(zeta):
>>> plasma_dispersion_func(0)
1.7724538509055159j
>>> plasma_dispersion_func(1j)
- 0.7578721561413119j
+ 0.757872156141312j
>>> plasma_dispersion_func(-1.52+0.47j)
- (0.6088888957234255+0.3349458388287403j)
+ (0.6088888957234254+0.33494583882874024j)
"""
@@ -79,7 +80,7 @@ def plasma_dispersion_func(zeta):
raise ValueError("The argument to plasma_dispersion_function is "
"not finite.")
- Z = 1j * np.sqrt(np.pi) * np.exp(-zeta**2) * (1.0 + special.erf(1j * zeta))
+ Z = 1j * np.sqrt(np.pi) * Faddeeva_function(zeta)
return Z
@@ -124,9 +125,9 @@ def plasma_dispersion_func_deriv(zeta):
>>> plasma_dispersion_func_deriv(0)
(-2+0j)
>>> plasma_dispersion_func_deriv(1j)
- (-0.48425568771737626+0j)
+ (-0.48425568771737604+0j)
>>> plasma_dispersion_func_deriv(-1.52+0.47j)
- (0.1658713314982294+0.4458797880593507j)
+ (0.16587133149822897+0.44587978805935047j)
"""
| Use Dawson function for dispersion
`plasma_dispersion_func` under `mathematics.py` currently uses `erf()` along with some other terms. This can be simplified to Dawson function, [dawsn](https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.dawsn.html), and may even offer some minor speedups if scipy implements it in C code. | PlasmaPy/PlasmaPy | diff --git a/plasmapy/mathematics/tests/test_dispersion.py b/plasmapy/mathematics/tests/test_dispersion.py
index 9dabb8e6..1c7eb3ca 100644
--- a/plasmapy/mathematics/tests/test_dispersion.py
+++ b/plasmapy/mathematics/tests/test_dispersion.py
@@ -15,7 +15,7 @@
(0, 1j * np.sqrt(π)),
(1, -1.076_159_013_825_536_8 + 0.652_049_332_173_292_2j),
(1j, 0.757_872_156_141_311_87j),
- (1.2 + 4.4j, -0.054_246_146_372_377_471 + 0.207_960_589_336_958_13j),
+ (1.2 + 4.4j, -0.054_246_157_069_223_27+0.207_960_584_359_855_62j),
(9.2j, plasma_dispersion_func(9.2j * units.dimensionless_unscaled)),
(5.4 - 3.1j, -0.139_224_873_051_713_11 - 0.082_067_822_640_155_802j),
(9.9 - 10j, 2.013_835_257_947_027_6 - 25.901_274_737_989_727j),
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/automated-code-tests.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astropy==6.0.1
astropy-iers-data==0.2025.3.31.0.36.18
certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
coveralls==4.0.1
docopt==0.6.2
exceptiongroup==1.2.2
flake8==7.2.0
idna==3.10
iniconfig==2.1.0
mccabe==0.7.0
numpy==1.26.4
packaging==24.2
-e git+https://github.com/PlasmaPy/PlasmaPy.git@82eece6d5648641af1878f6846240dbf2a37a190#egg=plasmapy
pluggy==1.5.0
pycodestyle==2.13.0
pyerfa==2.0.1.5
pyflakes==3.3.2
pytest==8.3.5
PyYAML==6.0.2
requests==2.32.3
scipy==1.13.1
tomli==2.2.1
urllib3==2.3.0
| name: PlasmaPy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astropy==6.0.1
- astropy-iers-data==0.2025.3.31.0.36.18
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- coveralls==4.0.1
- docopt==0.6.2
- exceptiongroup==1.2.2
- flake8==7.2.0
- idna==3.10
- iniconfig==2.1.0
- mccabe==0.7.0
- numpy==1.26.4
- packaging==24.2
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyerfa==2.0.1.5
- pyflakes==3.3.2
- pytest==8.3.5
- pyyaml==6.0.2
- requests==2.32.3
- scipy==1.13.1
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/PlasmaPy
| [
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[(1.2+4.4j)-(-0.05424615706922327+0.20796058435985562j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[9.2j-0.10806460304119532j]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[9j-(-0.012123822585585753+0j)]"
]
| [
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_errors[-TypeError]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_errors[w1-UnitsError]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_errors[inf-ValueError]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_errors[nan-ValueError]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_deriv_errors[-TypeError]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_deriv_errors[w1-UnitsError]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_deriv_errors[inf-ValueError]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_deriv_errors[nan-ValueError]"
]
| [
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[0-1.7724538509055159j]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[1-(-1.0761590138255368+0.6520493321732922j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[1j-0.7578721561413119j]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[(5.4-3.1j)-(-0.1392248730517131-0.0820678226401558j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[(9.9-10j)-(2.0138352579470276-25.901274737989727j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func[(4.5-10j)-(-1.3674950463400947e+35-6.853923234842271e+34j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_power_series_expansion",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_roots",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[0--2]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[1-(0.152318-1.3041j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[1j--0.484257]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[(1.2+4.4j)-(-0.0397561-0.0217392j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[(5.4-3.1j)-(0.0124491+0.0231383j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[(9.9-10j)-(476.153+553.121j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[(5+7j)-(-0.0045912-0.0126104j)]",
"plasmapy/mathematics/tests/test_dispersion.py::test_plasma_dispersion_func_deriv[(4.5-10j)-(2.60153e+36-2.11814e+36j)]"
]
| []
| BSD 3-Clause "New" or "Revised" License | 2,022 | [
"plasmapy/mathematics/mathematics.py"
]
| [
"plasmapy/mathematics/mathematics.py"
]
|
graphql-python__graphene-641 | 38db32e4f2d57f54a77879f9277ad4408792c881 | 2018-01-08 17:42:57 | 8c7ca74c6f083007a7c83c843f494357aec69371 | diff --git a/graphene/relay/connection.py b/graphene/relay/connection.py
index afe6ffb..3e2e9ad 100644
--- a/graphene/relay/connection.py
+++ b/graphene/relay/connection.py
@@ -73,7 +73,7 @@ class Connection(ObjectType):
edge = type(edge_name, edge_bases, {})
cls.Edge = edge
- _meta.name = name
+ options['name'] = name
_meta.node = node
_meta.fields = OrderedDict([
('page_info', Field(PageInfo, name='pageInfo', required=True)),
| Bug: Name of a connection is always the name of the class
```
class BranchesOnClient(relay.Connection):
pass
```
Based on the implementation of `relay.Connection` I expect the default name to be `BranchesOnClientConnection`.
```
class BranchesOnClient(relay.Connection, name="Foo"):
pass
```
Now I expect the type name to be `Foo`.
In fact, in both cases the name will be `BranchesOnClient`. This is because `BaseType` sets `_meta.name` to the class name if no `name` argument is passed, and no `name` argument is passed because `relay.Connection` already caught it, assigned it to `_meta`, and removed it from the `**kwargs` dict that is passed on through the ``__init_subclass_with_meta__`` chain. | graphql-python/graphene | diff --git a/graphene/relay/tests/test_connection.py b/graphene/relay/tests/test_connection.py
index b6a26df..3697888 100644
--- a/graphene/relay/tests/test_connection.py
+++ b/graphene/relay/tests/test_connection.py
@@ -52,6 +52,21 @@ def test_connection_inherit_abstracttype():
assert list(fields.keys()) == ['page_info', 'edges', 'extra']
+def test_connection_name():
+ custom_name = "MyObjectCustomNameConnection"
+
+ class BaseConnection(object):
+ extra = String()
+
+ class MyObjectConnection(BaseConnection, Connection):
+
+ class Meta:
+ node = MyObject
+ name = custom_name
+
+ assert MyObjectConnection._meta.name == custom_name
+
+
def test_edge():
class MyObjectConnection(Connection):
@@ -122,9 +137,10 @@ def test_connectionfield_node_deprecated():
field = ConnectionField(MyObject)
with pytest.raises(Exception) as exc_info:
field.type
-
+
assert "ConnectionField's now need a explicit ConnectionType for Nodes." in str(exc_info.value)
+
def test_connectionfield_custom_args():
class MyObjectConnection(Connection):
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 2.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
coveralls==3.3.1
docopt==0.6.2
fastdiff==0.3.0
-e git+https://github.com/graphql-python/graphene.git@38db32e4f2d57f54a77879f9277ad4408792c881#egg=graphene
graphql-core==2.3.2
graphql-relay==0.4.5
idna==3.10
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
iso8601==1.1.0
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
promise==2.3
py @ file:///opt/conda/conda-bld/py_1644396412707/work
py-cpuinfo==9.0.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-benchmark==3.4.1
pytest-cov==4.0.0
pytest-mock==3.6.1
pytz==2025.2
requests==2.27.1
Rx==1.6.3
six==1.17.0
snapshottest==0.6.0
termcolor==1.1.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
wasmer==1.1.0
wasmer-compiler-cranelift==1.1.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: graphene
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- charset-normalizer==2.0.12
- coverage==6.2
- coveralls==3.3.1
- docopt==0.6.2
- fastdiff==0.3.0
- graphql-core==2.3.2
- graphql-relay==0.4.5
- idna==3.10
- iso8601==1.1.0
- mock==5.2.0
- promise==2.3
- py-cpuinfo==9.0.0
- pytest-benchmark==3.4.1
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytz==2025.2
- requests==2.27.1
- rx==1.6.3
- six==1.17.0
- snapshottest==0.6.0
- termcolor==1.1.0
- tomli==1.2.3
- urllib3==1.26.20
- wasmer==1.1.0
- wasmer-compiler-cranelift==1.1.0
prefix: /opt/conda/envs/graphene
| [
"graphene/relay/tests/test_connection.py::test_connection_name"
]
| []
| [
"graphene/relay/tests/test_connection.py::test_connection",
"graphene/relay/tests/test_connection.py::test_connection_inherit_abstracttype",
"graphene/relay/tests/test_connection.py::test_edge",
"graphene/relay/tests/test_connection.py::test_edge_with_bases",
"graphene/relay/tests/test_connection.py::test_pageinfo",
"graphene/relay/tests/test_connection.py::test_connectionfield",
"graphene/relay/tests/test_connection.py::test_connectionfield_node_deprecated",
"graphene/relay/tests/test_connection.py::test_connectionfield_custom_args"
]
| []
| MIT License | 2,024 | [
"graphene/relay/connection.py"
]
| [
"graphene/relay/connection.py"
]
|
|
networkx__networkx-2819 | 51aebc8fb7faf1f644bac72513352941a6e7f895 | 2018-01-08 20:45:26 | 93b4b9227aa8a7ac4cbd946cf3dae3b168e17b45 | diff --git a/networkx/algorithms/components/attracting.py b/networkx/algorithms/components/attracting.py
index c1abc4822..e0861f63c 100644
--- a/networkx/algorithms/components/attracting.py
+++ b/networkx/algorithms/components/attracting.py
@@ -8,6 +8,7 @@
#
# Authors: Christopher Ellison
"""Attracting components."""
+import warnings as _warnings
import networkx as nx
from networkx.utils.decorators import not_implemented_for
@@ -20,7 +21,7 @@ __all__ = ['number_attracting_components',
@not_implemented_for('undirected')
def attracting_components(G):
- """Generates a list of attracting components in `G`.
+ """Generates the attracting components in `G`.
An attracting component in a directed graph `G` is a strongly connected
component with the property that a random walker on the graph will never
@@ -49,7 +50,6 @@ def attracting_components(G):
--------
number_attracting_components
is_attracting_component
- attracting_component_subgraphs
"""
scc = list(nx.strongly_connected_components(G))
@@ -82,11 +82,9 @@ def number_attracting_components(G):
--------
attracting_components
is_attracting_component
- attracting_component_subgraphs
"""
- n = len(list(attracting_components(G)))
- return n
+ return sum(1 for ac in attracting_components(G))
@not_implemented_for('undirected')
@@ -112,49 +110,25 @@ def is_attracting_component(G):
--------
attracting_components
number_attracting_components
- attracting_component_subgraphs
"""
ac = list(attracting_components(G))
- if len(ac[0]) == len(G):
- attracting = True
- else:
- attracting = False
- return attracting
+ if len(ac) == 1:
+ return len(ac[0]) == len(G)
+ return False
@not_implemented_for('undirected')
def attracting_component_subgraphs(G, copy=True):
- """Generates a list of attracting component subgraphs from `G`.
-
- Parameters
- ----------
- G : DiGraph, MultiDiGraph
- The graph to be analyzed.
-
- Returns
- -------
- subgraphs : list
- A list of node-induced subgraphs of the attracting components of `G`.
-
- copy : bool
- If copy is True, graph, node, and edge attributes are copied to the
- subgraphs.
-
- Raises
- ------
- NetworkXNotImplemented :
- If the input graph is undirected.
-
- See Also
- --------
- attracting_components
- number_attracting_components
- is_attracting_component
+ """DEPRECATED: Use ``(G.subgraph(c) for c in attracting_components(G))``
+ Or ``(G.subgraph(c).copy() for c in attracting_components(G))``
"""
- for ac in attracting_components(G):
+ msg = "attracting_component_subgraphs is deprecated and will be removed" \
+ "in 2.2. Use (G.subgraph(c).copy() for c in attracting_components(G))"
+ _warnings.warn(msg, DeprecationWarning)
+ for c in attracting_components(G):
if copy:
- yield G.subgraph(ac).copy()
+ yield G.subgraph(c).copy()
else:
- yield G.subgraph(ac)
+ yield G.subgraph(c)
diff --git a/networkx/algorithms/components/biconnected.py b/networkx/algorithms/components/biconnected.py
index 255cd25e5..587249145 100644
--- a/networkx/algorithms/components/biconnected.py
+++ b/networkx/algorithms/components/biconnected.py
@@ -10,6 +10,7 @@
# Dan Schult ([email protected])
# Aric Hagberg ([email protected])
"""Biconnected components and articulation points."""
+import warnings as _warnings
from itertools import chain
import networkx as nx
from networkx.utils.decorators import not_implemented_for
@@ -62,7 +63,6 @@ def is_biconnected(G):
biconnected_components
articulation_points
biconnected_component_edges
- biconnected_component_subgraphs
is_strongly_connected
is_weakly_connected
is_connected
@@ -89,9 +89,12 @@ def is_biconnected(G):
"""
bcc = list(biconnected_components(G))
- if not bcc: # No bicomponents (it could be an empty graph)
- return False
- return len(bcc[0]) == len(G)
+ if len(bcc) == 1:
+ return len(bcc[0]) == len(G)
+ return False # Multiple bicomponents or No bicomponents (empty graph?)
+# if len(bcc) == 0: # No bicomponents (it could be an empty graph)
+# return False
+# return len(bcc[0]) == len(G)
@not_implemented_for('directed')
@@ -142,7 +145,6 @@ def biconnected_component_edges(G):
is_biconnected,
biconnected_components,
articulation_points,
- biconnected_component_subgraphs
Notes
-----
@@ -236,7 +238,6 @@ def biconnected_components(G):
is_biconnected
articulation_points
biconnected_component_edges
- biconnected_component_subgraphs
Notes
-----
@@ -264,96 +265,18 @@ def biconnected_components(G):
@not_implemented_for('directed')
def biconnected_component_subgraphs(G, copy=True):
- """Return a generator of graphs, one graph for each biconnected component
- of the input graph.
-
- Biconnected components are maximal subgraphs such that the removal of a
- node (and all edges incident on that node) will not disconnect the
- subgraph. Note that nodes may be part of more than one biconnected
- component. Those nodes are articulation points, or cut vertices. The
- removal of articulation points will increase the number of connected
- components of the graph.
-
- Notice that by convention a dyad is considered a biconnected component.
-
- Parameters
- ----------
- G : NetworkX Graph
- An undirected graph.
-
- Returns
- -------
- graphs : generator
- Generator of graphs, one graph for each biconnected component.
-
- Raises
- ------
- NetworkXNotImplemented :
- If the input graph is not undirected.
-
- Examples
- --------
-
- >>> G = nx.lollipop_graph(5, 1)
- >>> print(nx.is_biconnected(G))
- False
- >>> bicomponents = list(nx.biconnected_component_subgraphs(G))
- >>> len(bicomponents)
- 2
- >>> G.add_edge(0, 5)
- >>> print(nx.is_biconnected(G))
- True
- >>> bicomponents = list(nx.biconnected_component_subgraphs(G))
- >>> len(bicomponents)
- 1
-
- You can generate a sorted list of biconnected components, largest
- first, using sort.
-
- >>> G.remove_edge(0, 5)
- >>> [len(c) for c in sorted(nx.biconnected_component_subgraphs(G),
- ... key=len, reverse=True)]
- [5, 2]
-
- If you only want the largest connected component, it's more
- efficient to use max instead of sort.
-
- >>> Gc = max(nx.biconnected_component_subgraphs(G), key=len)
-
- See Also
- --------
- is_biconnected
- articulation_points
- biconnected_component_edges
- biconnected_components
-
- Notes
- -----
- The algorithm to find articulation points and biconnected
- components is implemented using a non-recursive depth-first-search
- (DFS) that keeps track of the highest level that back edges reach
- in the DFS tree. A node `n` is an articulation point if, and only
- if, there exists a subtree rooted at `n` such that there is no
- back edge from any successor of `n` that links to a predecessor of
- `n` in the DFS tree. By keeping track of all the edges traversed
- by the DFS we can obtain the biconnected components because all
- edges of a bicomponent will be traversed consecutively between
- articulation points.
-
- Graph, node, and edge attributes are copied to the subgraphs.
-
- References
- ----------
- .. [1] Hopcroft, J.; Tarjan, R. (1973).
- "Efficient algorithms for graph manipulation".
- Communications of the ACM 16: 372–378. doi:10.1145/362248.362272
+ """DEPRECATED: Use ``(G.subgraph(c) for c in biconnected_components(G))``
+ Or ``(G.subgraph(c).copy() for c in biconnected_components(G))``
"""
- for comp_nodes in biconnected_components(G):
+ msg = "connected_component_subgraphs is deprecated and will be removed" \
+ "in 2.2. Use (G.subgraph(c).copy() for c in biconnected_components(G))"
+ _warnings.warn(msg, DeprecationWarning)
+ for c in biconnected_components(G):
if copy:
- yield G.subgraph(comp_nodes).copy()
+ yield G.subgraph(c).copy()
else:
- yield G.subgraph(comp_nodes)
+ yield G.subgraph(c)
@not_implemented_for('directed')
@@ -402,7 +325,6 @@ def articulation_points(G):
is_biconnected
biconnected_components
biconnected_component_edges
- biconnected_component_subgraphs
Notes
-----
diff --git a/networkx/algorithms/components/connected.py b/networkx/algorithms/components/connected.py
index 2d811ad28..f7ea55587 100644
--- a/networkx/algorithms/components/connected.py
+++ b/networkx/algorithms/components/connected.py
@@ -10,6 +10,7 @@
# Aric Hagberg ([email protected])
# Christopher Ellison
"""Connected components."""
+import warnings as _warnings
import networkx as nx
from networkx.utils.decorators import not_implemented_for
from ...utils import arbitrary_element
@@ -76,49 +77,13 @@ def connected_components(G):
@not_implemented_for('directed')
def connected_component_subgraphs(G, copy=True):
- """Generate connected components as subgraphs.
-
- Parameters
- ----------
- G : NetworkX graph
- An undirected graph.
-
- copy: bool (default=True)
- If True make a copy of the graph attributes
-
- Returns
- -------
- comp : generator
- A generator of graphs, one for each connected component of G.
-
- Raises
- ------
- NetworkXNotImplemented:
- If G is directed.
-
- Examples
- --------
- >>> G = nx.path_graph(4)
- >>> G.add_edge(5,6)
- >>> graphs = list(nx.connected_component_subgraphs(G))
-
- If you only want the largest connected component, it's more
- efficient to use max instead of sort:
-
- >>> Gc = max(nx.connected_component_subgraphs(G), key=len)
-
- See Also
- --------
- connected_components
- strongly_connected_component_subgraphs
- weakly_connected_component_subgraphs
-
- Notes
- -----
- For undirected graphs only.
- Graph, node, and edge attributes are copied to the subgraphs by default.
+ """DEPRECATED: Use ``(G.subgraph(c) for c in connected_components(G))``
+ Or ``(G.subgraph(c).copy() for c in connected_components(G))``
"""
+ msg = "connected_component_subgraphs is deprecated and will be removed" \
+ "in 2.2. Use (G.subgraph(c).copy() for c in connected_components(G))"
+ _warnings.warn(msg, DeprecationWarning)
for c in connected_components(G):
if copy:
yield G.subgraph(c).copy()
@@ -150,12 +115,12 @@ def number_connected_components(G):
For undirected graphs only.
"""
- return len(list(connected_components(G)))
+ return sum(1 for cc in connected_components(G))
@not_implemented_for('directed')
def is_connected(G):
- """Return True if the graph is connected, false otherwise.
+ """Return True if the graph is connected, False otherwise.
Parameters
----------
@@ -194,12 +159,12 @@ def is_connected(G):
if len(G) == 0:
raise nx.NetworkXPointlessConcept('Connectivity is undefined ',
'for the null graph.')
- return len(set(_plain_bfs(G, arbitrary_element(G)))) == len(G)
+ return sum(1 for node in _plain_bfs(G, arbitrary_element(G))) == len(G)
@not_implemented_for('directed')
def node_connected_component(G, n):
- """Return the nodes in the component of graph containing node n.
+ """Return the set of nodes in the component of graph containing node n.
Parameters
----------
diff --git a/networkx/algorithms/components/strongly_connected.py b/networkx/algorithms/components/strongly_connected.py
index 9b26d3a84..4c2e37dd9 100644
--- a/networkx/algorithms/components/strongly_connected.py
+++ b/networkx/algorithms/components/strongly_connected.py
@@ -11,6 +11,7 @@
# Christopher Ellison
# Ben Edwards ([email protected])
"""Strongly connected components."""
+import warnings as _warnings
import networkx as nx
from networkx.utils.decorators import not_implemented_for
@@ -267,54 +268,18 @@ def strongly_connected_components_recursive(G):
@not_implemented_for('undirected')
def strongly_connected_component_subgraphs(G, copy=True):
- """Generate strongly connected components as subgraphs.
-
- Parameters
- ----------
- G : NetworkX Graph
- A directed graph.
-
- copy : boolean, optional
- if copy is True, Graph, node, and edge attributes are copied to
- the subgraphs.
-
- Returns
- -------
- comp : generator of graphs
- A generator of graphs, one for each strongly connected component of G.
-
- Raises
- ------
- NetworkXNotImplemented:
- If G is undirected.
-
- Examples
- --------
- Generate a sorted list of strongly connected components, largest first.
-
- >>> G = nx.cycle_graph(4, create_using=nx.DiGraph())
- >>> nx.add_cycle(G, [10, 11, 12])
- >>> [len(Gc) for Gc in sorted(nx.strongly_connected_component_subgraphs(G),
- ... key=len, reverse=True)]
- [4, 3]
-
- If you only want the largest component, it's more efficient to
- use max instead of sort.
-
- >>> Gc = max(nx.strongly_connected_component_subgraphs(G), key=len)
-
- See Also
- --------
- strongly_connected_components
- connected_component_subgraphs
- weakly_connected_component_subgraphs
+ """DEPRECATED: Use ``(G.subgraph(c) for c in strongly_connected_components(G))``
+ Or ``(G.subgraph(c).copy() for c in strongly_connected_components(G))``
"""
- for comp in strongly_connected_components(G):
+ msg = "strongly_connected_component_subgraphs is deprecated and will be removed in 2.2" \
+ "use (G.subgraph(c).copy() for c in strongly_connected_components(G))"
+ _warnings.warn(msg, DeprecationWarning)
+ for c in strongly_connected_components(G):
if copy:
- yield G.subgraph(comp).copy()
+ yield G.subgraph(c).copy()
else:
- yield G.subgraph(comp)
+ yield G.subgraph(c)
@not_implemented_for('undirected')
@@ -346,7 +311,7 @@ def number_strongly_connected_components(G):
-----
For directed graphs only.
"""
- return len(list(strongly_connected_components(G)))
+ return sum(1 for scc in strongly_connected_components(G))
@not_implemented_for('undirected')
@@ -434,7 +399,10 @@ def condensation(G, scc=None):
mapping = {}
members = {}
C = nx.DiGraph()
- i = 0 # required if G is empty
+ # Add mapping dict as graph attribute
+ C.graph['mapping'] = mapping
+ if len(G) == 0:
+ return C
for i, component in enumerate(scc):
members[i] = component
mapping.update((n, i) for n in component)
@@ -444,6 +412,4 @@ def condensation(G, scc=None):
if mapping[u] != mapping[v])
# Add a list of members (ie original nodes) to each node (ie scc) in C.
nx.set_node_attributes(C, members, 'members')
- # Add mapping dict as graph attribute
- C.graph['mapping'] = mapping
return C
diff --git a/networkx/algorithms/components/weakly_connected.py b/networkx/algorithms/components/weakly_connected.py
index eea3cfd69..012e67c48 100644
--- a/networkx/algorithms/components/weakly_connected.py
+++ b/networkx/algorithms/components/weakly_connected.py
@@ -9,6 +9,7 @@
# Authors: Aric Hagberg ([email protected])
# Christopher Ellison
"""Weakly connected components."""
+import warnings as _warnings
import networkx as nx
from networkx.utils.decorators import not_implemented_for
@@ -103,63 +104,23 @@ def number_weakly_connected_components(G):
For directed graphs only.
"""
- return len(list(weakly_connected_components(G)))
+ return sum(1 for wcc in weakly_connected_components(G))
@not_implemented_for('undirected')
def weakly_connected_component_subgraphs(G, copy=True):
- """Generate weakly connected components as subgraphs.
-
- Parameters
- ----------
- G : NetworkX graph
- A directed graph.
-
- copy: bool (default=True)
- If True make a copy of the graph attributes
-
- Returns
- -------
- comp : generator
- A generator of graphs, one for each weakly connected component of G.
-
- Raises
- ------
- NetworkXNotImplemented:
- If G is undirected.
-
- Examples
- --------
- Generate a sorted list of weakly connected components, largest first.
-
- >>> G = nx.path_graph(4, create_using=nx.DiGraph())
- >>> nx.add_path(G, [10, 11, 12])
- >>> [len(c) for c in sorted(nx.weakly_connected_component_subgraphs(G),
- ... key=len, reverse=True)]
- [4, 3]
-
- If you only want the largest component, it's more efficient to
- use max instead of sort:
-
- >>> Gc = max(nx.weakly_connected_component_subgraphs(G), key=len)
-
- See Also
- --------
- weakly_connected_components
- strongly_connected_component_subgraphs
- connected_component_subgraphs
-
- Notes
- -----
- For directed graphs only.
- Graph, node, and edge attributes are copied to the subgraphs by default.
+ """DEPRECATED: Use ``(G.subgraph(c) for c in weakly_connected_components(G))``
+ Or ``(G.subgraph(c).copy() for c in weakly_connected_components(G))``
"""
- for comp in weakly_connected_components(G):
+ msg = "weakly_connected_component_subgraphs is deprecated and will be removed in 2.2" \
+ "use (G.subgraph(c).copy() for c in weakly_connected_components(G))"
+ _warnings.warn(msg, DeprecationWarning)
+ for c in weakly_connected_components(G):
if copy:
- yield G.subgraph(comp).copy()
+ yield G.subgraph(c).copy()
else:
- yield G.subgraph(comp)
+ yield G.subgraph(c)
@not_implemented_for('undirected')
diff --git a/networkx/classes/digraph.py b/networkx/classes/digraph.py
index 0fdc8569e..3ffc8da71 100644
--- a/networkx/classes/digraph.py
+++ b/networkx/classes/digraph.py
@@ -136,8 +136,10 @@ class DiGraph(Graph):
>>> G.edges[1, 2]['weight'] = 4
Warning: we protect the graph data structure by making `G.edges[1, 2]` a
- read-only dict-like structure. Use 2 sets of brackets to add/change
- data attributes. (For multigraphs: `MG.edges[u, v, key][name] = value`).
+ read-only dict-like structure. However, you can assign to attributes
+ in e.g. `G.edges[1, 2]`. Thus, use 2 sets of brackets to add/change
+ data attributes: `G.edges[1, 2]['weight'] = 4`
+ (For multigraphs: `MG.edges[u, v, key][name] = value`).
**Shortcuts:**
diff --git a/networkx/classes/graph.py b/networkx/classes/graph.py
index b5cf78528..12e9b6aa8 100644
--- a/networkx/classes/graph.py
+++ b/networkx/classes/graph.py
@@ -145,9 +145,11 @@ class Graph(object):
>>> G[1][2]['weight'] = 4.7
>>> G.edges[1, 2]['weight'] = 4
- Warning: we protect the graph data structure by making `G.edges[1, 2]` a
- read-only dict-like structure. Use 2 sets of brackets to add/change
- data attributes. (For multigraphs: `MG.edges[u, v, key][name] = value`).
+ Warning: we protect the graph data structure by making `G.edges` a
+ read-only dict-like structure. However, you can assign to attributes
+ in e.g. `G.edges[1, 2]`. Thus, use 2 sets of brackets to add/change
+ data attributes: `G.edges[1, 2]['weight'] = 4`
+ (For multigraphs: `MG.edges[u, v, key][name] = value`).
**Shortcuts:**
diff --git a/networkx/classes/multidigraph.py b/networkx/classes/multidigraph.py
index 68dbb4333..b9849d217 100644
--- a/networkx/classes/multidigraph.py
+++ b/networkx/classes/multidigraph.py
@@ -141,8 +141,10 @@ class MultiDiGraph(MultiGraph, DiGraph):
>>> G.edges[1, 2, 0]['weight'] = 4
Warning: we protect the graph data structure by making `G.edges[1, 2]` a
- read-only dict-like structure. Use 2 sets of brackets to add/change
- data attributes. (For multigraphs: `MG.edges[u, v, key][name] = value`).
+ read-only dict-like structure. However, you can assign to attributes
+ in e.g. `G.edges[1, 2]`. Thus, use 2 sets of brackets to add/change
+ data attributes: `G.edges[1, 2]['weight'] = 4`
+ (For multigraphs: `MG.edges[u, v, key][name] = value`).
**Shortcuts:**
diff --git a/networkx/classes/multigraph.py b/networkx/classes/multigraph.py
index 9e5e9c1bd..ba705c5c7 100644
--- a/networkx/classes/multigraph.py
+++ b/networkx/classes/multigraph.py
@@ -140,8 +140,10 @@ class MultiGraph(Graph):
>>> G.edges[1, 2, 0]['weight'] = 4
Warning: we protect the graph data structure by making `G.edges[1, 2]` a
- read-only dict-like structure. Use 2 sets of brackets to add/change
- data attributes. (For multigraphs: `MG.edges[u, v, key][name] = value`).
+ read-only dict-like structure. However, you can assign to attributes
+ in e.g. `G.edges[1, 2]`. Thus, use 2 sets of brackets to add/change
+ data attributes: `G.edges[1, 2]['weight'] = 4`
+ (For multigraphs: `MG.edges[u, v, key][name] = value`).
**Shortcuts:**
@@ -762,9 +764,11 @@ class MultiGraph(Graph):
{'weight': 7}
Warning: we protect the graph data structure by making
- `G.edges[1, 2, key]` and `G[1][2][key]` read-only dict-like
- structures. You need to specify all edge info to assign to
- the edge data associated with that edge.
+ `G.edges` and `G[1][2]` read-only dict-like structures.
+ However, you can assign values to attributes in e.g.
+ `G.edges[1, 2, 'a']` or `G[1][2]['a']` using an additional
+ bracket as shown next. You need to specify all edge info
+ to assign to the edge data associated with an edge.
>>> G[0][1]['a']['weight'] = 10
>>> G.edges[0, 1, 'a']['weight'] = 10
diff --git a/networkx/convert_matrix.py b/networkx/convert_matrix.py
index b1c7f6193..67bca77f6 100644
--- a/networkx/convert_matrix.py
+++ b/networkx/convert_matrix.py
@@ -27,7 +27,6 @@ See Also
nx_agraph, nx_pydot
"""
-import warnings as _warnings
import itertools
import networkx as nx
from networkx.convert import _prep_create_using
diff --git a/tools/travis/build_docs.sh b/tools/travis/build_docs.sh
index 294a348d5..eb4899037 100755
--- a/tools/travis/build_docs.sh
+++ b/tools/travis/build_docs.sh
@@ -6,7 +6,6 @@ pip install --retries 3 -q -r requirements/doc.txt
export SPHINXCACHE=$HOME/.cache/sphinx
cd doc
make html
-make doctest
make latexpdf
cd ..
| Refactor components API
Following from the discussion on #1404, it would be good to remove the `[bi]connected_component_subgraphs` functions and make `[bi]connected_components` yield subgraphs (now they yield sets of nodes).
We decided to not make this change right now, on the one hand, in order to not overwhelm users with API changes (2.0 release will come with many changes), and on the other hand, because right now generating subgraphs (with `copy=True` which is the default) is much slower than generating sets of nodes.
We expect that the speed of generating subgraphs will improve in the short term if we extend #1164 approach to subgraph generation. Once we do that we can revisit this to see if the speed penalty for subgraph generation is a showstoper for this API change.
| networkx/networkx | diff --git a/networkx/algorithms/components/tests/test_attracting.py b/networkx/algorithms/components/tests/test_attracting.py
index 92b4f9e2f..953fd97fc 100644
--- a/networkx/algorithms/components/tests/test_attracting.py
+++ b/networkx/algorithms/components/tests/test_attracting.py
@@ -15,6 +15,8 @@ class TestAttractingComponents(object):
self.G3 = nx.DiGraph()
self.G3.add_edges_from([(0, 1), (1, 2), (2, 1), (0, 3), (3, 4), (4, 3)])
+ self.G4 = nx.DiGraph()
+
def test_attracting_components(self):
ac = list(nx.attracting_components(self.G1))
assert_true({2} in ac)
@@ -31,10 +33,14 @@ class TestAttractingComponents(object):
assert_true((3, 4) in ac)
assert_equal(len(ac), 2)
+ ac = list(nx.attracting_components(self.G4))
+ assert_equal(ac, [])
+
def test_number_attacting_components(self):
assert_equal(nx.number_attracting_components(self.G1), 3)
assert_equal(nx.number_attracting_components(self.G2), 1)
assert_equal(nx.number_attracting_components(self.G3), 2)
+ assert_equal(nx.number_attracting_components(self.G4), 0)
def test_is_attracting_component(self):
assert_false(nx.is_attracting_component(self.G1))
@@ -42,10 +48,12 @@ class TestAttractingComponents(object):
assert_false(nx.is_attracting_component(self.G3))
g2 = self.G3.subgraph([1, 2])
assert_true(nx.is_attracting_component(g2))
+ assert_false(nx.is_attracting_component(self.G4))
def test_connected_raise(self):
G=nx.Graph()
assert_raises(NetworkXNotImplemented, nx.attracting_components, G)
assert_raises(NetworkXNotImplemented, nx.number_attracting_components, G)
assert_raises(NetworkXNotImplemented, nx.is_attracting_component, G)
+ # deprecated
assert_raises(NetworkXNotImplemented, nx.attracting_component_subgraphs, G)
diff --git a/networkx/algorithms/components/tests/test_biconnected.py b/networkx/algorithms/components/tests/test_biconnected.py
index faf216f4c..54612bc48 100644
--- a/networkx/algorithms/components/tests/test_biconnected.py
+++ b/networkx/algorithms/components/tests/test_biconnected.py
@@ -71,6 +71,7 @@ def test_biconnected_components_cycle():
answer = [{0, 1, 2}, {1, 3, 4}]
assert_components_equal(list(nx.biconnected_components(G)), answer)
+# deprecated
def test_biconnected_component_subgraphs_cycle():
G=nx.cycle_graph(3)
nx.add_cycle(G, [1, 3, 4, 5])
@@ -169,10 +170,18 @@ def test_biconnected_eppstein():
bcc = list(nx.biconnected_components(G2))
assert_components_equal(bcc, answer_G2)
+def test_null_graph():
+ G = nx.Graph()
+ assert_false(nx.is_biconnected(G))
+ assert_equal(list(nx.biconnected_components(G)), [])
+ assert_equal(list(nx.biconnected_component_edges(G)), [])
+ assert_equal(list(nx.articulation_points(G)), [])
+
def test_connected_raise():
DG = nx.DiGraph()
assert_raises(NetworkXNotImplemented, nx.biconnected_components, DG)
- assert_raises(NetworkXNotImplemented, nx.biconnected_component_subgraphs, DG)
assert_raises(NetworkXNotImplemented, nx.biconnected_component_edges, DG)
assert_raises(NetworkXNotImplemented, nx.articulation_points, DG)
assert_raises(NetworkXNotImplemented, nx.is_biconnected, DG)
+ # deprecated
+ assert_raises(NetworkXNotImplemented, nx.biconnected_component_subgraphs, DG)
diff --git a/networkx/algorithms/components/tests/test_connected.py b/networkx/algorithms/components/tests/test_connected.py
index 168f561a7..99f58386e 100644
--- a/networkx/algorithms/components/tests/test_connected.py
+++ b/networkx/algorithms/components/tests/test_connected.py
@@ -41,6 +41,10 @@ class TestConnected:
C = [[0, 1, 2], [3, 4]]
self.gc.append((G, C))
+ G = nx.DiGraph()
+ C = []
+ self.gc.append((G, C))
+
def test_connected_components(self):
cc = nx.connected_components
@@ -72,6 +76,7 @@ class TestConnected:
C = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
assert_equal(ncc(G, 1), C)
+ # deprecated
def test_connected_component_subgraphs(self):
wcc = nx.weakly_connected_component_subgraphs
cc = nx.connected_component_subgraphs
@@ -90,7 +95,8 @@ class TestConnected:
def test_connected_raise(self):
assert_raises(NetworkXNotImplemented, nx.connected_components, self.DG)
assert_raises(NetworkXNotImplemented, nx.number_connected_components, self.DG)
- assert_raises(NetworkXNotImplemented, nx.connected_component_subgraphs, self.DG)
assert_raises(NetworkXNotImplemented, nx.node_connected_component, self.DG,1)
assert_raises(NetworkXNotImplemented, nx.is_connected, self.DG)
assert_raises(nx.NetworkXPointlessConcept, nx.is_connected, nx.Graph())
+ # deprecated
+ assert_raises(NetworkXNotImplemented, nx.connected_component_subgraphs, self.DG)
diff --git a/networkx/algorithms/components/tests/test_strongly_connected.py b/networkx/algorithms/components/tests/test_strongly_connected.py
index 0886cb91b..5468aec72 100644
--- a/networkx/algorithms/components/tests/test_strongly_connected.py
+++ b/networkx/algorithms/components/tests/test_strongly_connected.py
@@ -67,6 +67,7 @@ class TestStronglyConnected:
else:
assert_false(nx.is_strongly_connected(G))
+ # deprecated
def test_strongly_connected_component_subgraphs(self):
scc = nx.strongly_connected_component_subgraphs
for G, C in self.gc:
@@ -134,12 +135,21 @@ class TestStronglyConnected:
for n, d in cG.nodes(data=True):
assert_equal(set(C[n]), cG.nodes[n]['members'])
+ def test_null_graph(self):
+ G=nx.DiGraph()
+ assert_equal(list(nx.strongly_connected_components(G)), [])
+ assert_equal(list(nx.kosaraju_strongly_connected_components(G)), [])
+ assert_equal(list(nx.strongly_connected_components_recursive(G)), [])
+ assert_equal(len(nx.condensation(G)), 0)
+ assert_raises(nx.NetworkXPointlessConcept, nx.is_strongly_connected, nx.DiGraph())
+
def test_connected_raise(self):
G=nx.Graph()
assert_raises(NetworkXNotImplemented, nx.strongly_connected_components, G)
assert_raises(NetworkXNotImplemented, nx.kosaraju_strongly_connected_components, G)
assert_raises(NetworkXNotImplemented, nx.strongly_connected_components_recursive, G)
- assert_raises(NetworkXNotImplemented, nx.strongly_connected_component_subgraphs, G)
assert_raises(NetworkXNotImplemented, nx.is_strongly_connected, G)
assert_raises(nx.NetworkXPointlessConcept, nx.is_strongly_connected, nx.DiGraph())
assert_raises(NetworkXNotImplemented, nx.condensation, G)
+ # deprecated
+ assert_raises(NetworkXNotImplemented, nx.strongly_connected_component_subgraphs, G)
diff --git a/networkx/algorithms/components/tests/test_subgraph_copies.py b/networkx/algorithms/components/tests/test_subgraph_copies.py
index b6c8dc611..9acedba2d 100644
--- a/networkx/algorithms/components/tests/test_subgraph_copies.py
+++ b/networkx/algorithms/components/tests/test_subgraph_copies.py
@@ -4,6 +4,9 @@ from copy import deepcopy
from nose.tools import assert_equal
import networkx as nx
+# deprecated in 2.1 for removal in 2.2
+
+
class TestSubgraphAttributesDicts:
def setUp(self):
diff --git a/networkx/algorithms/components/tests/test_weakly_connected.py b/networkx/algorithms/components/tests/test_weakly_connected.py
index ead698971..8c993debc 100644
--- a/networkx/algorithms/components/tests/test_weakly_connected.py
+++ b/networkx/algorithms/components/tests/test_weakly_connected.py
@@ -47,6 +47,7 @@ class TestWeaklyConnected:
c = nx.number_connected_components(U)
assert_equal(w, c)
+ # deprecated
def test_weakly_connected_component_subgraphs(self):
wcc = nx.weakly_connected_component_subgraphs
cc = nx.connected_component_subgraphs
@@ -61,9 +62,16 @@ class TestWeaklyConnected:
U = G.to_undirected()
assert_equal(nx.is_weakly_connected(G), nx.is_connected(U))
+ def test_null_graph(self):
+ G=nx.DiGraph()
+ assert_equal(list(nx.weakly_connected_components(G)), [])
+ assert_equal(nx.number_weakly_connected_components(G), 0)
+ assert_raises(nx.NetworkXPointlessConcept, nx.is_weakly_connected, G)
+
def test_connected_raise(self):
G=nx.Graph()
assert_raises(NetworkXNotImplemented,nx.weakly_connected_components, G)
assert_raises(NetworkXNotImplemented,nx.number_weakly_connected_components, G)
- assert_raises(NetworkXNotImplemented,nx.weakly_connected_component_subgraphs, G)
assert_raises(NetworkXNotImplemented,nx.is_weakly_connected, G)
+ # deprecated
+ assert_raises(NetworkXNotImplemented,nx.weakly_connected_component_subgraphs, G)
diff --git a/networkx/algorithms/shortest_paths/weighted.py b/networkx/algorithms/shortest_paths/weighted.py
index 3403393ba..16099b451 100644
--- a/networkx/algorithms/shortest_paths/weighted.py
+++ b/networkx/algorithms/shortest_paths/weighted.py
@@ -19,7 +19,6 @@ from heapq import heappush, heappop
from itertools import count
import networkx as nx
from networkx.utils import generate_unique_node
-import warnings as _warnings
__all__ = ['dijkstra_path',
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 11
} | 2.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libgdal-dev graphviz"
],
"python": "3.6",
"reqs_path": [
"requirements/default.txt",
"requirements/test.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
codecov==2.1.13
coverage==6.2
decorator==5.1.1
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/networkx/networkx.git@51aebc8fb7faf1f644bac72513352941a6e7f895#egg=networkx
nose==1.3.7
nose-ignore-docstring==0.2
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
requests==2.27.1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: networkx
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- codecov==2.1.13
- coverage==6.2
- decorator==5.1.1
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- nose-ignore-docstring==0.2
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- requests==2.27.1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/networkx
| [
"networkx/algorithms/components/tests/test_strongly_connected.py::TestStronglyConnected::test_null_graph"
]
| [
"networkx/algorithms/components/tests/test_attracting.py::TestAttractingComponents::test_attracting_components",
"networkx/algorithms/components/tests/test_attracting.py::TestAttractingComponents::test_number_attacting_components",
"networkx/algorithms/components/tests/test_attracting.py::TestAttractingComponents::test_is_attracting_component",
"networkx/algorithms/components/tests/test_connected.py::TestConnected::test_connected_components",
"networkx/algorithms/components/tests/test_connected.py::TestConnected::test_number_connected_components",
"networkx/algorithms/components/tests/test_connected.py::TestConnected::test_number_connected_components2",
"networkx/algorithms/components/tests/test_connected.py::TestConnected::test_connected_components2",
"networkx/algorithms/components/tests/test_connected.py::TestConnected::test_node_connected_components",
"networkx/algorithms/components/tests/test_connected.py::TestConnected::test_connected_component_subgraphs",
"networkx/algorithms/components/tests/test_connected.py::TestConnected::test_is_connected",
"networkx/algorithms/components/tests/test_connected.py::TestConnected::test_connected_raise",
"networkx/algorithms/components/tests/test_strongly_connected.py::TestStronglyConnected::test_tarjan",
"networkx/algorithms/components/tests/test_strongly_connected.py::TestStronglyConnected::test_tarjan_recursive",
"networkx/algorithms/components/tests/test_strongly_connected.py::TestStronglyConnected::test_kosaraju",
"networkx/algorithms/components/tests/test_strongly_connected.py::TestStronglyConnected::test_number_strongly_connected_components",
"networkx/algorithms/components/tests/test_strongly_connected.py::TestStronglyConnected::test_is_strongly_connected",
"networkx/algorithms/components/tests/test_strongly_connected.py::TestStronglyConnected::test_strongly_connected_component_subgraphs",
"networkx/algorithms/components/tests/test_strongly_connected.py::TestStronglyConnected::test_condensation_mapping_and_members",
"networkx/algorithms/components/tests/test_subgraph_copies.py::TestSubgraphAttributesDicts::test_subgraphs_default_copy_behavior",
"networkx/algorithms/components/tests/test_subgraph_copies.py::TestSubgraphAttributesDicts::test_subgraphs_copy",
"networkx/algorithms/components/tests/test_subgraph_copies.py::TestSubgraphAttributesDicts::test_subgraphs_no_copy",
"networkx/algorithms/components/tests/test_weakly_connected.py::TestWeaklyConnected::test_weakly_connected_components",
"networkx/algorithms/components/tests/test_weakly_connected.py::TestWeaklyConnected::test_number_weakly_connected_components",
"networkx/algorithms/components/tests/test_weakly_connected.py::TestWeaklyConnected::test_weakly_connected_component_subgraphs",
"networkx/algorithms/components/tests/test_weakly_connected.py::TestWeaklyConnected::test_is_weakly_connected"
]
| [
"networkx/algorithms/components/tests/test_attracting.py::TestAttractingComponents::test_connected_raise",
"networkx/algorithms/components/tests/test_biconnected.py::test_barbell",
"networkx/algorithms/components/tests/test_biconnected.py::test_articulation_points_repetitions",
"networkx/algorithms/components/tests/test_biconnected.py::test_articulation_points_cycle",
"networkx/algorithms/components/tests/test_biconnected.py::test_is_biconnected",
"networkx/algorithms/components/tests/test_biconnected.py::test_empty_is_biconnected",
"networkx/algorithms/components/tests/test_biconnected.py::test_biconnected_components_cycle",
"networkx/algorithms/components/tests/test_biconnected.py::test_biconnected_component_subgraphs_cycle",
"networkx/algorithms/components/tests/test_biconnected.py::test_biconnected_components1",
"networkx/algorithms/components/tests/test_biconnected.py::test_biconnected_components2",
"networkx/algorithms/components/tests/test_biconnected.py::test_biconnected_davis",
"networkx/algorithms/components/tests/test_biconnected.py::test_biconnected_karate",
"networkx/algorithms/components/tests/test_biconnected.py::test_biconnected_eppstein",
"networkx/algorithms/components/tests/test_biconnected.py::test_null_graph",
"networkx/algorithms/components/tests/test_biconnected.py::test_connected_raise",
"networkx/algorithms/components/tests/test_strongly_connected.py::TestStronglyConnected::test_contract_scc1",
"networkx/algorithms/components/tests/test_strongly_connected.py::TestStronglyConnected::test_contract_scc_isolate",
"networkx/algorithms/components/tests/test_strongly_connected.py::TestStronglyConnected::test_contract_scc_edge",
"networkx/algorithms/components/tests/test_strongly_connected.py::TestStronglyConnected::test_connected_raise",
"networkx/algorithms/components/tests/test_weakly_connected.py::TestWeaklyConnected::test_null_graph",
"networkx/algorithms/components/tests/test_weakly_connected.py::TestWeaklyConnected::test_connected_raise"
]
| []
| BSD 3-Clause | 2,025 | [
"networkx/classes/graph.py",
"networkx/classes/multigraph.py",
"networkx/classes/digraph.py",
"networkx/convert_matrix.py",
"networkx/algorithms/components/connected.py",
"tools/travis/build_docs.sh",
"networkx/algorithms/components/weakly_connected.py",
"networkx/algorithms/components/attracting.py",
"networkx/classes/multidigraph.py",
"networkx/algorithms/components/biconnected.py",
"networkx/algorithms/components/strongly_connected.py"
]
| [
"networkx/classes/graph.py",
"networkx/classes/multigraph.py",
"networkx/classes/digraph.py",
"networkx/convert_matrix.py",
"networkx/algorithms/components/connected.py",
"tools/travis/build_docs.sh",
"networkx/algorithms/components/weakly_connected.py",
"networkx/algorithms/components/attracting.py",
"networkx/classes/multidigraph.py",
"networkx/algorithms/components/biconnected.py",
"networkx/algorithms/components/strongly_connected.py"
]
|
|
pre-commit__pre-commit-680 | 0628df535b47ee503efbda2b777254f1d7b7f4bc | 2018-01-09 17:46:44 | 65f60e25930a4979a4571e41f320b81f622b2556 | diff --git a/pre_commit/commands/run.py b/pre_commit/commands/run.py
index c70eff0..3a08c8d 100644
--- a/pre_commit/commands/run.py
+++ b/pre_commit/commands/run.py
@@ -223,8 +223,8 @@ def run(runner, args, environ=os.environ):
return 1
if _has_unstaged_config(runner) and not no_stash:
logger.error(
- 'Your pre-commit configuration is unstaged.\n'
- '`git add {}` to fix this.'.format(runner.config_file),
+ 'Your .pre-commit-config.yaml is unstaged.\n'
+ '`git add .pre-commit-config.yaml` to fix this.',
)
return 1
diff --git a/pre_commit/main.py b/pre_commit/main.py
index 865571a..4c9202a 100644
--- a/pre_commit/main.py
+++ b/pre_commit/main.py
@@ -42,7 +42,7 @@ def _add_color_option(parser):
def _add_config_option(parser):
parser.add_argument(
- '-c', '--config', default=C.CONFIG_FILE,
+ '-c', '--config', default='.pre-commit-config.yaml',
help='Path to alternate config file',
)
diff --git a/pre_commit/repository.py b/pre_commit/repository.py
index 5c11921..bc0ecad 100644
--- a/pre_commit/repository.py
+++ b/pre_commit/repository.py
@@ -269,14 +269,14 @@ class MetaRepository(LocalRepository):
{
'id': 'check-hooks-apply',
'name': 'Check hooks apply to the repository',
- 'files': C.CONFIG_FILE,
+ 'files': '.pre-commit-config.yaml',
'language': 'system',
'entry': _make_entry(check_hooks_apply),
},
{
'id': 'check-useless-excludes',
'name': 'Check for useless excludes',
- 'files': C.CONFIG_FILE,
+ 'files': '.pre-commit-config.yaml',
'language': 'system',
'entry': _make_entry(check_useless_excludes),
},
diff --git a/pre_commit/store.py b/pre_commit/store.py
index 3262bda..9c67345 100644
--- a/pre_commit/store.py
+++ b/pre_commit/store.py
@@ -156,6 +156,21 @@ class Store(object):
def make_local(self, deps):
def make_local_strategy(directory):
copy_tree_to_path(resource_filename('empty_template'), directory)
+
+ env = no_git_env()
+ name, email = 'pre-commit', '[email protected]'
+ env['GIT_AUTHOR_NAME'] = env['GIT_COMMITTER_NAME'] = name
+ env['GIT_AUTHOR_EMAIL'] = env['GIT_COMMITTER_EMAIL'] = email
+
+ # initialize the git repository so it looks more like cloned repos
+ def _git_cmd(*args):
+ cmd_output('git', '-C', directory, *args, env=env)
+
+ _git_cmd('init', '.')
+ _git_cmd('config', 'remote.origin.url', '<<unknown>>')
+ _git_cmd('add', '.')
+ _git_cmd('commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')
+
return self._new_repo(
'local:{}'.format(','.join(sorted(deps))), C.LOCAL_REPO_VERSION,
make_local_strategy,
| Crash on `local`-only `golang` repositories
While investigating: https://github.com/pre-commit/pre-commit-hooks/issues/255
Using this configuration:
```yaml
repos:
- repo: local
hooks:
- id: talisman
name: talisman
entry: talisman -githook pre-commit
pass_filenames: false
types: [text]
language: golang
additional_dependencies: [github.com/thoughtworks/talisman]
```
```
$ pre-commit run --all-files
[INFO] Initializing environment for local:github.com/thoughtworks/talisman.
[INFO] Installing environment for local.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
An unexpected error has occurred: CalledProcessError: Command: ('/usr/bin/git', 'config', 'remote.origin.url')
Return code: 1
Expected return code: 0
Output: (none)
Errors: (none)
Check the log at /home/asottile/.cache/pre-commit/pre-commit.log
```
```
An unexpected error has occurred: CalledProcessError: Command: ('/usr/bin/git', 'config', 'remote.origin.url')
Return code: 1
Expected return code: 0
Output: (none)
Errors: (none)
Traceback (most recent call last):
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/error_handler.py", line 47, in error_handler
yield
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/main.py", line 259, in main
return run(runner, args)
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 256, in run
repo.require_installed()
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/repository.py", line 202, in require_installed
_install_all(self._venvs, self.repo_config['repo'], self.store)
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/repository.py", line 102, in _install_all
language.install_environment(cmd_runner, version, deps)
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/languages/golang.py", line 60, in install_environment
remote = git.get_remote_url(repo_cmd_runner.path())
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/git.py", line 41, in get_remote_url
ret = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)[1]
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/util.py", line 188, in cmd_output
returncode, cmd, retcode, output=(stdout, stderr),
CalledProcessError: Command: ('/usr/bin/git', 'config', 'remote.origin.url')
Return code: 1
Expected return code: 0
Output: (none)
Errors: (none)
``` | pre-commit/pre-commit | diff --git a/testing/fixtures.py b/testing/fixtures.py
index edb1bcd..b1c7a89 100644
--- a/testing/fixtures.py
+++ b/testing/fixtures.py
@@ -47,7 +47,7 @@ def modify_manifest(path):
with io.open(manifest_path, 'w') as manifest_file:
manifest_file.write(ordered_dump(manifest, **C.YAML_DUMP_KWARGS))
cmd_output(
- 'git', 'commit', '-am', 'update {}'.format(C.MANIFEST_FILE), cwd=path,
+ 'git', 'commit', '-am', 'update .pre-commit-hooks.yaml', cwd=path,
)
diff --git a/tests/commands/run_test.py b/tests/commands/run_test.py
index 97c82c2..336222d 100644
--- a/tests/commands/run_test.py
+++ b/tests/commands/run_test.py
@@ -683,7 +683,7 @@ def test_error_with_unstaged_config(
):
args = run_opts()
ret, printed = _do_run(cap_out, modified_config_repo, args)
- assert b'Your pre-commit configuration is unstaged.' in printed
+ assert b'Your .pre-commit-config.yaml is unstaged.' in printed
assert ret == 1
@@ -695,7 +695,7 @@ def test_no_unstaged_error_with_all_files_or_files(
):
args = run_opts(**opts)
ret, printed = _do_run(cap_out, modified_config_repo, args)
- assert b'Your pre-commit configuration is unstaged.' not in printed
+ assert b'Your .pre-commit-config.yaml is unstaged.' not in printed
def test_files_running_subdir(
diff --git a/tests/repository_test.py b/tests/repository_test.py
index 2b9ab6e..1d38d24 100644
--- a/tests/repository_test.py
+++ b/tests/repository_test.py
@@ -541,6 +541,24 @@ def test_additional_golang_dependencies_installed(
assert 'hello' in binaries
+def test_local_golang_additional_dependencies(store):
+ config = {
+ 'repo': 'local',
+ 'hooks': [{
+ 'id': 'hello',
+ 'name': 'hello',
+ 'entry': 'hello',
+ 'language': 'golang',
+ 'additional_dependencies': ['github.com/golang/example/hello'],
+ }],
+ }
+ repo = Repository.create(config, store)
+ (_, hook), = repo.hooks
+ ret = repo.run_hook(hook, ('filename',))
+ assert ret[0] == 0
+ assert _norm_out(ret[1]) == b"Hello, Go examples!\n"
+
+
def test_reinstall(tempdir_factory, store, log_info_mock):
path = make_repo(tempdir_factory, 'python_hooks_repo')
config = make_config_from_repo(path)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 4
} | 1.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aspy.yaml==1.3.0
attrs==22.2.0
cached-property==1.5.2
certifi==2021.5.30
coverage==6.2
distlib==0.3.9
filelock==3.4.1
flake8==5.0.4
identify==2.4.4
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
mccabe==0.7.0
mock==5.2.0
nodeenv==1.6.0
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
-e git+https://github.com/pre-commit/pre-commit.git@0628df535b47ee503efbda2b777254f1d7b7f4bc#egg=pre_commit
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
pytest-env==0.6.2
PyYAML==6.0.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
virtualenv==20.17.1
zipp==3.6.0
| name: pre-commit
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aspy-yaml==1.3.0
- attrs==22.2.0
- cached-property==1.5.2
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- flake8==5.0.4
- identify==2.4.4
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- mccabe==0.7.0
- mock==5.2.0
- nodeenv==1.6.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-env==0.6.2
- pyyaml==6.0.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- virtualenv==20.17.1
- zipp==3.6.0
prefix: /opt/conda/envs/pre-commit
| [
"tests/commands/run_test.py::test_error_with_unstaged_config"
]
| [
"tests/commands/run_test.py::test_hook_install_failure",
"tests/repository_test.py::test_switch_language_versions_doesnt_clobber",
"tests/repository_test.py::test_run_a_ruby_hook",
"tests/repository_test.py::test_run_versioned_ruby_hook",
"tests/repository_test.py::test_run_ruby_hook_with_disable_shared_gems",
"tests/repository_test.py::test_golang_hook",
"tests/repository_test.py::test_additional_ruby_dependencies_installed",
"tests/repository_test.py::test_additional_golang_dependencies_installed",
"tests/repository_test.py::test_local_golang_additional_dependencies"
]
| [
"tests/commands/run_test.py::test_run_all_hooks_failing",
"tests/commands/run_test.py::test_arbitrary_bytes_hook",
"tests/commands/run_test.py::test_hook_that_modifies_but_returns_zero",
"tests/commands/run_test.py::test_types_hook_repository",
"tests/commands/run_test.py::test_exclude_types_hook_repository",
"tests/commands/run_test.py::test_global_exclude",
"tests/commands/run_test.py::test_show_diff_on_failure",
"tests/commands/run_test.py::test_run[options0-outputs0-0-True]",
"tests/commands/run_test.py::test_run[options1-outputs1-0-True]",
"tests/commands/run_test.py::test_run[options2-outputs2-0-True]",
"tests/commands/run_test.py::test_run[options3-outputs3-1-True]",
"tests/commands/run_test.py::test_run[options4-outputs4-0-True]",
"tests/commands/run_test.py::test_run[options5-outputs5-0-True]",
"tests/commands/run_test.py::test_run[options6-outputs6-0-False]",
"tests/commands/run_test.py::test_run_output_logfile",
"tests/commands/run_test.py::test_always_run",
"tests/commands/run_test.py::test_always_run_alt_config",
"tests/commands/run_test.py::test_origin_source_error_msg[master-master-False]",
"tests/commands/run_test.py::test_origin_source_error_msg[master--True]",
"tests/commands/run_test.py::test_origin_source_error_msg[-master-True]",
"tests/commands/run_test.py::test_has_unmerged_paths",
"tests/commands/run_test.py::test_merge_conflict",
"tests/commands/run_test.py::test_merge_conflict_modified",
"tests/commands/run_test.py::test_merge_conflict_resolved",
"tests/commands/run_test.py::test_compute_cols[hooks0-True-80]",
"tests/commands/run_test.py::test_compute_cols[hooks1-False-81]",
"tests/commands/run_test.py::test_compute_cols[hooks2-True-85]",
"tests/commands/run_test.py::test_compute_cols[hooks3-False-82]",
"tests/commands/run_test.py::test_get_skips[environ0-expected_output0]",
"tests/commands/run_test.py::test_get_skips[environ1-expected_output1]",
"tests/commands/run_test.py::test_get_skips[environ2-expected_output2]",
"tests/commands/run_test.py::test_get_skips[environ3-expected_output3]",
"tests/commands/run_test.py::test_get_skips[environ4-expected_output4]",
"tests/commands/run_test.py::test_get_skips[environ5-expected_output5]",
"tests/commands/run_test.py::test_get_skips[environ6-expected_output6]",
"tests/commands/run_test.py::test_skip_hook",
"tests/commands/run_test.py::test_hook_id_not_in_non_verbose_output",
"tests/commands/run_test.py::test_hook_id_in_verbose_output",
"tests/commands/run_test.py::test_multiple_hooks_same_id",
"tests/commands/run_test.py::test_non_ascii_hook_id",
"tests/commands/run_test.py::test_stdout_write_bug_py26",
"tests/commands/run_test.py::test_lots_of_files",
"tests/commands/run_test.py::test_push_hook",
"tests/commands/run_test.py::test_commit_msg_hook",
"tests/commands/run_test.py::test_local_hook_passes",
"tests/commands/run_test.py::test_local_hook_fails",
"tests/commands/run_test.py::test_meta_hook_passes",
"tests/commands/run_test.py::test_no_unstaged_error_with_all_files_or_files[opts0]",
"tests/commands/run_test.py::test_no_unstaged_error_with_all_files_or_files[opts1]",
"tests/commands/run_test.py::test_files_running_subdir",
"tests/commands/run_test.py::test_pass_filenames[True-hook_args0-foo.py]",
"tests/commands/run_test.py::test_pass_filenames[False-hook_args1-]",
"tests/commands/run_test.py::test_pass_filenames[True-hook_args2-some",
"tests/commands/run_test.py::test_pass_filenames[False-hook_args3-some",
"tests/commands/run_test.py::test_fail_fast",
"tests/commands/run_test.py::test_include_exclude_base_case",
"tests/commands/run_test.py::test_matches_broken_symlink",
"tests/commands/run_test.py::test_include_exclude_total_match",
"tests/commands/run_test.py::test_include_exclude_does_search_instead_of_match",
"tests/commands/run_test.py::test_include_exclude_exclude_removes_files",
"tests/repository_test.py::test_python_hook",
"tests/repository_test.py::test_python_hook_default_version",
"tests/repository_test.py::test_python_hook_args_with_spaces",
"tests/repository_test.py::test_python_hook_weird_setup_cfg",
"tests/repository_test.py::test_versioned_python_hook",
"tests/repository_test.py::test_run_a_node_hook",
"tests/repository_test.py::test_run_versioned_node_hook",
"tests/repository_test.py::test_system_hook_with_spaces",
"tests/repository_test.py::test_missing_executable",
"tests/repository_test.py::test_run_a_script_hook",
"tests/repository_test.py::test_run_hook_with_spaced_args",
"tests/repository_test.py::test_run_hook_with_curly_braced_arguments",
"tests/repository_test.py::TestPygrep::test_grep_hook_matching",
"tests/repository_test.py::TestPygrep::test_grep_hook_case_insensitive",
"tests/repository_test.py::TestPygrep::test_grep_hook_not_matching[nope]",
"tests/repository_test.py::TestPygrep::test_grep_hook_not_matching[foo'bar]",
"tests/repository_test.py::TestPygrep::test_grep_hook_not_matching[^\\\\[INFO\\\\]]",
"tests/repository_test.py::TestPCRE::test_grep_hook_matching",
"tests/repository_test.py::TestPCRE::test_grep_hook_case_insensitive",
"tests/repository_test.py::TestPCRE::test_grep_hook_not_matching[nope]",
"tests/repository_test.py::TestPCRE::test_grep_hook_not_matching[foo'bar]",
"tests/repository_test.py::TestPCRE::test_grep_hook_not_matching[^\\\\[INFO\\\\]]",
"tests/repository_test.py::TestPCRE::test_pcre_hook_many_files",
"tests/repository_test.py::TestPCRE::test_missing_pcre_support",
"tests/repository_test.py::test_cwd_of_hook",
"tests/repository_test.py::test_lots_of_files",
"tests/repository_test.py::test_venvs",
"tests/repository_test.py::test_additional_dependencies",
"tests/repository_test.py::test_additional_dependencies_duplicated",
"tests/repository_test.py::test_additional_python_dependencies_installed",
"tests/repository_test.py::test_additional_dependencies_roll_forward",
"tests/repository_test.py::test_additional_node_dependencies_installed",
"tests/repository_test.py::test_reinstall",
"tests/repository_test.py::test_control_c_control_c_on_install",
"tests/repository_test.py::test_invalidated_virtualenv",
"tests/repository_test.py::test_really_long_file_paths",
"tests/repository_test.py::test_config_overrides_repo_specifics",
"tests/repository_test.py::test_tags_on_repositories",
"tests/repository_test.py::test_local_repository",
"tests/repository_test.py::test_local_python_repo",
"tests/repository_test.py::test_hook_id_not_present",
"tests/repository_test.py::test_meta_hook_not_present",
"tests/repository_test.py::test_too_new_version",
"tests/repository_test.py::test_versions_ok[0.1.0]",
"tests/repository_test.py::test_versions_ok[1.4.4]",
"tests/repository_test.py::test_manifest_hooks"
]
| []
| MIT License | 2,026 | [
"pre_commit/store.py",
"pre_commit/main.py",
"pre_commit/repository.py",
"pre_commit/commands/run.py"
]
| [
"pre_commit/store.py",
"pre_commit/main.py",
"pre_commit/repository.py",
"pre_commit/commands/run.py"
]
|
|
pyslackers__slack-sansio-27 | 28b1c6928d086f8a4d2322a207ac99faf19a6839 | 2018-01-09 18:19:07 | 28b1c6928d086f8a4d2322a207ac99faf19a6839 | coveralls:
[](https://coveralls.io/builds/14970855)
Coverage increased (+0.1%) to 90.765% when pulling **1087375ff50f2a8db607c880a465723b8f58d458 on rate-limit-wait** into **28b1c6928d086f8a4d2322a207ac99faf19a6839 on master**.
| diff --git a/README.rst b/README.rst
index 7578d2f..fa28ba4 100644
--- a/README.rst
+++ b/README.rst
@@ -73,6 +73,8 @@ Changelog
dev
```
+* Add ``minimum_time`` argument to ``SlackAPI.iter`` in order to force a minimum elapsed time between two call to the API
+
0.3.2
`````
diff --git a/slack/io/abc.py b/slack/io/abc.py
index 4a5ea84..7332345 100644
--- a/slack/io/abc.py
+++ b/slack/io/abc.py
@@ -80,7 +80,7 @@ class SlackAPI(abc.ABC):
global_headers=self._headers, token=self._token)
return await self._make_query(url, body, headers)
- async def iter(self, url, data=None, headers=None, *, limit=200, iterkey=None, itermode=None):
+ async def iter(self, url, data=None, headers=None, *, limit=200, iterkey=None, itermode=None, minimum_time=None):
"""
Iterate over a slack API method supporting pagination
@@ -91,6 +91,8 @@ class SlackAPI(abc.ABC):
limit: Maximum number of results to return per call.
iterkey: Key in response data to iterate over (required for url string).
itermode: Iteration mode (required for url string) (one of `cursor`, `page` or `timeline`)
+ minimum_time: Minimum elapsed time (in seconds) between two calls to the Slack API (default to 0).
+ If not reached the client will sleep for the remaining time.
Returns:
Async iterator over `response_data[key]`
@@ -101,9 +103,15 @@ class SlackAPI(abc.ABC):
if not data:
data = {}
+ last_request_time = None
while True:
+ current_time = time.time()
+ if minimum_time and last_request_time and last_request_time + minimum_time > current_time:
+ await self.sleep(last_request_time + minimum_time - current_time)
+
data, iterkey, itermode = sansio.prepare_iter_request(url, data, iterkey=iterkey, itermode=itermode,
limit=limit, itervalue=itervalue)
+ last_request_time = time.time()
response_data = await self.query(url, data, headers)
itervalue = sansio.decode_iter_request(response_data)
for item in response_data[iterkey]:
diff --git a/slack/io/requests.py b/slack/io/requests.py
index 5e0a87a..c384053 100644
--- a/slack/io/requests.py
+++ b/slack/io/requests.py
@@ -76,7 +76,7 @@ class SlackAPI(abc.SlackAPI):
global_headers=self._headers, token=self._token)
return self._make_query(url, body, headers)
- def iter(self, url, data=None, headers=None, *, limit=200, iterkey=None, itermode=None):
+ def iter(self, url, data=None, headers=None, *, limit=200, iterkey=None, itermode=None, minimum_time=None):
"""
Iterate over a slack API method supporting pagination
@@ -87,6 +87,8 @@ class SlackAPI(abc.SlackAPI):
limit: Maximum number of results to return per call.
iterkey: Key in response data to iterate over (required for url string).
itermode: Iteration mode (required for url string) (one of `cursor`, `page` or `timeline`)
+ minimum_time: Minimum elapsed time (in seconds) between two calls to the Slack API (default to 0).
+ If not reached the client will sleep for the remaining time.
Returns:
Async iterator over `response_data[key]`
@@ -97,9 +99,15 @@ class SlackAPI(abc.SlackAPI):
if not data:
data = {}
+ last_request_time = None
while True:
+ current_time = time.time()
+ if minimum_time and last_request_time and last_request_time + minimum_time > current_time:
+ self.sleep(last_request_time + minimum_time - current_time)
+
data, iterkey, itermode = sansio.prepare_iter_request(url, data, iterkey=iterkey, itermode=itermode,
limit=limit, itervalue=itervalue)
+ last_request_time = time.time()
response_data = self.query(url, data, headers)
itervalue = sansio.decode_iter_request(response_data)
for item in response_data[iterkey]:
| Rate limit
Slack has a rate limit of 1 query / second. When using the iteration mode for paginated request it's easy to get rate limited.
We should add a way to ensure x seconds have passed between multiple api call using the iteration mode. | pyslackers/slack-sansio | diff --git a/tests/test_io.py b/tests/test_io.py
index 6ff909d..fc01c8c 100644
--- a/tests/test_io.py
+++ b/tests/test_io.py
@@ -1,9 +1,11 @@
import json
import asks
import trio
+import time
import slack
import curio
import pytest
+import asyncio
import aiohttp
import requests
import datetime
@@ -166,6 +168,40 @@ class TestABC:
async for _ in client.iter('https://slack.com/api/channels.list'): # noQa: F841
pass
+ @pytest.mark.parametrize('client', ({'retry_when_rate_limit': False,
+ '_request': [
+ {'body': 'channels_iter'},
+ {'body': 'channels'}
+ ]}, ), indirect=True)
+ async def test_iter_wait(self, client):
+ client.sleep = asynctest.CoroutineMock()
+
+ channels = 0
+ async for _ in client.iter(methods.CHANNELS_LIST, minimum_time=2): # noQa: F841
+ channels += 1
+
+ assert channels == 4
+ assert client._request.call_count == 2
+ assert client.sleep.call_count == 1
+ assert 2 > client.sleep.call_args[0][0] > 1.9
+
+ @pytest.mark.parametrize('client', ({'retry_when_rate_limit': False,
+ '_request': [
+ {'body': 'channels_iter'},
+ {'body': 'channels'}
+ ]}, ), indirect=True)
+ async def test_iter_no_wait(self, client):
+ client.sleep = asynctest.CoroutineMock()
+
+ channels = 0
+ async for _ in client.iter(methods.CHANNELS_LIST, minimum_time=1): # noQa: F841
+ channels += 1
+ await asyncio.sleep(0.5)
+
+ assert channels == 4
+ assert client._request.call_count == 2
+ assert client.sleep.call_count == 0
+
@pytest.mark.parametrize('client', ({'_request': [{'body': 'auth.test'}, {'body': 'users.info'}]}, ), indirect=True)
async def test_find_bot_id(self, client):
bot_id = await client._find_bot_id()
@@ -314,6 +350,40 @@ class TestNoAsync:
'POST', 'https://slack.com/api/channels.list', {}, {'limit': 200, 'token': token, 'cursor': itercursor}
)
+ @pytest.mark.parametrize('client', ({'retry_when_rate_limit': False,
+ '_request': [
+ {'body': 'channels_iter'},
+ {'body': 'channels'}
+ ]}, ), indirect=True)
+ def test_iter_wait(self, client):
+ client.sleep = asynctest.CoroutineMock()
+
+ channels = 0
+ for _ in client.iter(methods.CHANNELS_LIST, minimum_time=2): # noQa: F841
+ channels += 1
+
+ assert channels == 4
+ assert client._request.call_count == 2
+ assert client.sleep.call_count == 1
+ assert 2 > client.sleep.call_args[0][0] > 1.9
+
+ @pytest.mark.parametrize('client', ({'retry_when_rate_limit': False,
+ '_request': [
+ {'body': 'channels_iter'},
+ {'body': 'channels'}
+ ]}, ), indirect=True)
+ def test_iter_no_wait(self, client):
+ client.sleep = asynctest.CoroutineMock()
+
+ channels = 0
+ for _ in client.iter(methods.CHANNELS_LIST, minimum_time=1): # noQa: F841
+ channels += 1
+ time.sleep(0.5)
+
+ assert channels == 4
+ assert client._request.call_count == 2
+ assert client.sleep.call_count == 0
+
@pytest.mark.parametrize('client', ({'_request': [{'body': 'auth.test'}, {'body': 'users.info'}]}, ), indirect=True)
def test_find_bot_id(self, client):
bot_id = client._find_bot_id()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 3
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiohappyeyeballs==2.6.1
aiohttp==3.11.14
aiosignal==1.3.2
alabaster==0.7.16
anyio==3.7.1
asks==3.0.0
async-generator==1.10
async-timeout==5.0.1
asynctest==0.13.0
attrs==25.3.0
Automat==24.8.1
babel==2.17.0
cachetools==5.5.2
certifi==2025.1.31
cffi==1.17.1
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
constantly==23.10.4
coverage==7.8.0
cryptography==44.0.2
curio==1.6
distlib==0.3.9
docutils==0.21.2
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
filelock==3.18.0
flake8==7.2.0
frozenlist==1.5.0
h11==0.14.0
hyperlink==21.0.0
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
incremental==24.7.2
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.1.6
MarkupSafe==3.0.2
mccabe==0.7.0
multidict==6.2.0
outcome==1.3.0.post0
packaging @ file:///croot/packaging_1734472117206/work
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pockets==0.9.1
propcache==0.3.1
pyasn1==0.6.1
pyasn1_modules==0.4.2
pycodestyle==2.13.0
pycparser==2.22
pyflakes==3.3.1
Pygments==2.19.1
pyOpenSSL==25.0.0
pyproject-api==1.9.0
pytest @ file:///croot/pytest_1738938843180/work
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-cover==3.0.0
pytest-coverage==0.0
pytest-runner==6.0.1
requests==2.32.3
service-identity==24.2.0
six==1.17.0
-e git+https://github.com/pyslackers/slack-sansio.git@28b1c6928d086f8a4d2322a207ac99faf19a6839#egg=slack_sansio
sniffio==1.3.1
snowballstemmer==2.2.0
sortedcontainers==2.4.0
Sphinx==7.4.7
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-asyncio==0.3.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-napoleon==0.7
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
tomli==2.2.1
tox==4.25.0
treq==24.9.1
trio==0.29.0
Twisted==24.11.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
websocket-client==1.8.0
yarl==1.18.3
zipp==3.21.0
zope.interface==7.2
| name: slack-sansio
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aiohappyeyeballs==2.6.1
- aiohttp==3.11.14
- aiosignal==1.3.2
- alabaster==0.7.16
- anyio==3.7.1
- asks==3.0.0
- async-generator==1.10
- async-timeout==5.0.1
- asynctest==0.13.0
- attrs==25.3.0
- automat==24.8.1
- babel==2.17.0
- cachetools==5.5.2
- certifi==2025.1.31
- cffi==1.17.1
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- constantly==23.10.4
- coverage==7.8.0
- cryptography==44.0.2
- curio==1.6
- distlib==0.3.9
- docutils==0.21.2
- filelock==3.18.0
- flake8==7.2.0
- frozenlist==1.5.0
- h11==0.14.0
- hyperlink==21.0.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- incremental==24.7.2
- jinja2==3.1.6
- markupsafe==3.0.2
- mccabe==0.7.0
- multidict==6.2.0
- outcome==1.3.0.post0
- platformdirs==4.3.7
- pockets==0.9.1
- propcache==0.3.1
- pyasn1==0.6.1
- pyasn1-modules==0.4.2
- pycodestyle==2.13.0
- pycparser==2.22
- pyflakes==3.3.1
- pygments==2.19.1
- pyopenssl==25.0.0
- pyproject-api==1.9.0
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-cover==3.0.0
- pytest-coverage==0.0
- pytest-runner==6.0.1
- requests==2.32.3
- service-identity==24.2.0
- six==1.17.0
- sniffio==1.3.1
- snowballstemmer==2.2.0
- sortedcontainers==2.4.0
- sphinx==7.4.7
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-asyncio==0.3.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-napoleon==0.7
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- tomli==2.2.1
- tox==4.25.0
- treq==24.9.1
- trio==0.29.0
- twisted==24.11.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
- websocket-client==1.8.0
- yarl==1.18.3
- zipp==3.21.0
- zope-interface==7.2
prefix: /opt/conda/envs/slack-sansio
| [
"tests/test_io.py::TestABC::test_iter_wait[FakeIO-client0]",
"tests/test_io.py::TestABC::test_iter_no_wait[FakeIO-client0]",
"tests/test_io.py::TestNoAsync::test_iter_wait[client0-SlackAPI]",
"tests/test_io.py::TestNoAsync::test_iter_no_wait[client0-SlackAPI]"
]
| [
"tests/test_io.py::TestTrio::test__request",
"tests/test_io.py::TestCurio::test__request"
]
| [
"tests/test_io.py::TestABC::test_query[client0-FakeIO]",
"tests/test_io.py::TestABC::test_query[client1-FakeIO]",
"tests/test_io.py::TestABC::test_query_url[client0-FakeIO]",
"tests/test_io.py::TestABC::test_query_url[client1-FakeIO]",
"tests/test_io.py::TestABC::test_query_long_url[client0-FakeIO]",
"tests/test_io.py::TestABC::test_query_long_url[client1-FakeIO]",
"tests/test_io.py::TestABC::test_query_webhook_url[client0-FakeIO]",
"tests/test_io.py::TestABC::test_query_webhook_url[client1-FakeIO]",
"tests/test_io.py::TestABC::test_query_data[client0-FakeIO]",
"tests/test_io.py::TestABC::test_query_data[client1-FakeIO]",
"tests/test_io.py::TestABC::test_query_data_webhook[client0-FakeIO]",
"tests/test_io.py::TestABC::test_query_data_webhook[client1-FakeIO]",
"tests/test_io.py::TestABC::test_query_headers[client0-FakeIO]",
"tests/test_io.py::TestABC::test_query_headers[client1-FakeIO]",
"tests/test_io.py::TestABC::test_retry_rate_limited[FakeIO-client0]",
"tests/test_io.py::TestABC::test_retry_rate_limited_with_body[FakeIO-client0]",
"tests/test_io.py::TestABC::test_retry_rate_limited_with_headers[FakeIO-client0]",
"tests/test_io.py::TestABC::test_raise_rate_limited[FakeIO-client0]",
"tests/test_io.py::TestABC::test_iter[FakeIO-client0]",
"tests/test_io.py::TestABC::test_iter_itermode_iterkey[FakeIO-client0]",
"tests/test_io.py::TestABC::test_iter_not_supported[client0-FakeIO]",
"tests/test_io.py::TestABC::test_iter_not_supported[client1-FakeIO]",
"tests/test_io.py::TestABC::test_find_bot_id[FakeIO-client0]",
"tests/test_io.py::TestABC::test_find_rtm_url[FakeIO-client0]",
"tests/test_io.py::TestABC::test_incoming_rtm[client0-rtm_iterator0-FakeIO]",
"tests/test_io.py::TestABC::test_incoming_rtm[client1-rtm_iterator0-FakeIO]",
"tests/test_io.py::TestABC::test_incoming_rtm_reconnect[client0-FakeIO-rtm_iterator0]",
"tests/test_io.py::TestABC::test_incoming_rtm_reconnect[client1-FakeIO-rtm_iterator0]",
"tests/test_io.py::TestABC::test_incoming_rtm_discard_bot_id[client0-FakeIO-rtm_iterator0]",
"tests/test_io.py::TestABC::test_incoming_rtm_discard_bot_id[client1-FakeIO-rtm_iterator0]",
"tests/test_io.py::TestABC::test_incoming_rtm_skip[client0-FakeIO-rtm_iterator0]",
"tests/test_io.py::TestABC::test_incoming_rtm_skip[client1-FakeIO-rtm_iterator0]",
"tests/test_io.py::TestNoAsync::test_query[client0-SlackAPI]",
"tests/test_io.py::TestNoAsync::test_query[client1-SlackAPI]",
"tests/test_io.py::TestNoAsync::test_query_data[client0-SlackAPI]",
"tests/test_io.py::TestNoAsync::test_query_data[client1-SlackAPI]",
"tests/test_io.py::TestNoAsync::test_query_headers[client0-SlackAPI]",
"tests/test_io.py::TestNoAsync::test_query_headers[client1-SlackAPI]",
"tests/test_io.py::TestNoAsync::test_retry_rate_limited[client0-SlackAPI]",
"tests/test_io.py::TestNoAsync::test_retry_rate_limited_with_body[client0-SlackAPI]",
"tests/test_io.py::TestNoAsync::test_retry_rate_limited_with_headers[client0-SlackAPI]",
"tests/test_io.py::TestNoAsync::test_raise_rate_limited[client0-SlackAPI]",
"tests/test_io.py::TestNoAsync::test_iter[client0-SlackAPI]",
"tests/test_io.py::TestNoAsync::test_find_bot_id[client0-SlackAPI]",
"tests/test_io.py::TestNoAsync::test_find_rtm_url[client0-SlackAPI]",
"tests/test_io.py::TestNoAsync::test_incoming_rtm[client0-rtm_iterator_non_async0-SlackAPI]",
"tests/test_io.py::TestNoAsync::test_incoming_rtm[client1-rtm_iterator_non_async0-SlackAPI]",
"tests/test_io.py::TestNoAsync::test_incoming_rtm_reconnect[client0-rtm_iterator_non_async0-SlackAPI]",
"tests/test_io.py::TestNoAsync::test_incoming_rtm_reconnect[client1-rtm_iterator_non_async0-SlackAPI]",
"tests/test_io.py::TestNoAsync::test_incoming_rtm_discard_bot_id[client0-rtm_iterator_non_async0-SlackAPI]",
"tests/test_io.py::TestNoAsync::test_incoming_rtm_discard_bot_id[client1-rtm_iterator_non_async0-SlackAPI]",
"tests/test_io.py::TestNoAsync::test_incoming_rtm_skip[client0-rtm_iterator_non_async0-SlackAPI]",
"tests/test_io.py::TestNoAsync::test_incoming_rtm_skip[client1-rtm_iterator_non_async0-SlackAPI]",
"tests/test_io.py::TestRequest::test_sleep",
"tests/test_io.py::TestRequest::test__request",
"tests/test_io.py::TestAiohttp::test_sleep",
"tests/test_io.py::TestAiohttp::test__request",
"tests/test_io.py::TestTrio::test_sleep",
"tests/test_io.py::TestCurio::test_sleep"
]
| []
| MIT License | 2,027 | [
"README.rst",
"slack/io/abc.py",
"slack/io/requests.py"
]
| [
"README.rst",
"slack/io/abc.py",
"slack/io/requests.py"
]
|
nipy__nipype-2363 | f3b09125eceec7b90ad801176892aab06e012f8c | 2018-01-09 22:24:45 | 1c174dfcda622fe6cebd71069dadc8cacc87dd6f | diff --git a/CHANGES b/CHANGES
index fa1716688..8dcca2ba6 100644
--- a/CHANGES
+++ b/CHANGES
@@ -1,6 +1,7 @@
Upcoming release (0.14.1)
=========================
+* FIX: Errors parsing ``$DISPLAY`` (https://github.com/nipy/nipype/pull/2363)
* FIX: MultiProc starting workers at dubious wd (https://github.com/nipy/nipype/pull/2368)
* REF+FIX: Move BIDSDataGrabber to `interfaces.io` + fix correct default behavior (https://github.com/nipy/nipype/pull/2336)
* ENH: Add AFNI interface for 3dConvertDset (https://github.com/nipy/nipype/pull/2337)
diff --git a/nipype/utils/config.py b/nipype/utils/config.py
index 15264b9ed..30b826d23 100644
--- a/nipype/utils/config.py
+++ b/nipype/utils/config.py
@@ -312,11 +312,11 @@ class NipypeConfig(object):
def _mock():
pass
- # Store a fake Xvfb object
- ndisp = int(sysdisplay.split(':')[-1])
+ # Store a fake Xvfb object. Format - <host>:<display>[.<screen>]
+ ndisp = sysdisplay.split(':')[-1].split('.')[0]
Xvfb = namedtuple('Xvfb', ['new_display', 'stop'])
- self._display = Xvfb(ndisp, _mock)
- return sysdisplay
+ self._display = Xvfb(int(ndisp), _mock)
+ return self.get_display()
else:
if 'darwin' in sys.platform:
raise RuntimeError(
@@ -343,8 +343,7 @@ class NipypeConfig(object):
if not hasattr(self._display, 'new_display'):
setattr(self._display, 'new_display',
self._display.vdisplay_num)
-
- return ':%d' % self._display.new_display
+ return self.get_display()
def stop_display(self):
"""Closes the display if started"""
| issue with MIPAV module
Dear all,
I've tried to run the example on MP2RAGE-skullstripping http://nipype.readthedocs.io/en/latest/users/examples/smri_cbs_skullstripping.html with nipype 0.14.0 and CBStools version 3.0. I use a conda environment based on python 2.7.
Here is the error message:
Traceback:
Traceback (most recent call last):
File "/home/raid1/fbeyer/local/miniconda2/envs/nip14/lib/python2.7/site-packages/nipype/pipeline/plugins/linear.py", line 43, in run
node.run(updatehash=updatehash)
File "/home/raid1/fbeyer/local/miniconda2/envs/nip14/lib/python2.7/site-packages/nipype/pipeline/engine/nodes.py", line 407, in run
self._run_interface()
File "/home/raid1/fbeyer/local/miniconda2/envs/nip14/lib/python2.7/site-packages/nipype/pipeline/engine/nodes.py", line 517, in _run_interface
self._result = self._run_command(execute)
File "/home/raid1/fbeyer/local/miniconda2/envs/nip14/lib/python2.7/site-packages/nipype/pipeline/engine/nodes.py", line 650, in _run_command
result = self._interface.run()
File "/home/raid1/fbeyer/local/miniconda2/envs/nip14/lib/python2.7/site-packages/nipype/interfaces/base.py", line 1063, in run
env['DISPLAY'] = config.get_display()
File "/home/raid1/fbeyer/local/miniconda2/envs/nip14/lib/python2.7/site-packages/nipype/utils/config.py", line 286, in get_display
ndisp = int(sysdisplay.split(':')[-1])
ValueError: invalid literal for int() with base 10: '0.0'
When running the MIPAV command separately it works fine.
Do you have an idea what could be the reason for this?
Best
Frauke | nipy/nipype | diff --git a/nipype/utils/tests/test_config.py b/nipype/utils/tests/test_config.py
index 9c322128e..65ada4c64 100644
--- a/nipype/utils/tests/test_config.py
+++ b/nipype/utils/tests/test_config.py
@@ -28,6 +28,17 @@ xvfbpatch_old.Xvfb.return_value = MagicMock(
spec=['vdisplay_num', 'start', 'stop'], vdisplay_num=2010)
[email protected]('dispvar', [':12', 'localhost:12', 'localhost:12.1'])
+def test_display_parse(monkeypatch, dispvar):
+ """Check that when $DISPLAY is defined, the display is correctly parsed"""
+ config._display = None
+ config._config.remove_option('execution', 'display_variable')
+ monkeypatch.setenv('DISPLAY', dispvar)
+ assert config.get_display() == ':12'
+ # Test that it was correctly cached
+ assert config.get_display() == ':12'
+
+
@pytest.mark.parametrize('dispnum', range(5))
def test_display_config(monkeypatch, dispnum):
"""Check that the display_variable option is used ($DISPLAY not set)"""
@@ -46,7 +57,7 @@ def test_display_system(monkeypatch, dispnum):
config._display = None
config._config.remove_option('execution', 'display_variable')
dispstr = ':%d' % dispnum
- monkeypatch.setitem(os.environ, 'DISPLAY', dispstr)
+ monkeypatch.setenv('DISPLAY', dispstr)
assert config.get_display() == dispstr
# Test that it was correctly cached
assert config.get_display() == dispstr
@@ -58,7 +69,7 @@ def test_display_config_and_system(monkeypatch):
config._display = None
dispstr = ':10'
config.set('execution', 'display_variable', dispstr)
- monkeypatch.setitem(os.environ, 'DISPLAY', ':0')
+ monkeypatch.setenv('DISPLAY', ':0')
assert config.get_display() == dispstr
# Test that it was correctly cached
assert config.get_display() == dispstr
@@ -72,10 +83,17 @@ def test_display_noconfig_nosystem_patched(monkeypatch):
config._config.remove_option('execution', 'display_variable')
monkeypatch.delitem(os.environ, 'DISPLAY', raising=False)
monkeypatch.setitem(sys.modules, 'xvfbwrapper', xvfbpatch)
+ monkeypatch.setattr(sys, 'platform', value='linux')
assert config.get_display() == ":2010"
# Test that it was correctly cached
assert config.get_display() == ':2010'
+ # Check that raises in Mac
+ config._display = None
+ monkeypatch.setattr(sys, 'platform', value='darwin')
+ with pytest.raises(RuntimeError):
+ config.get_display()
+
def test_display_empty_patched(monkeypatch):
"""
@@ -85,12 +103,18 @@ def test_display_empty_patched(monkeypatch):
config._display = None
if config.has_option('execution', 'display_variable'):
config._config.remove_option('execution', 'display_variable')
- monkeypatch.setitem(os.environ, 'DISPLAY', '')
+ monkeypatch.setenv('DISPLAY', '')
monkeypatch.setitem(sys.modules, 'xvfbwrapper', xvfbpatch)
+ monkeypatch.setattr(sys, 'platform', value='linux')
assert config.get_display() == ':2010'
# Test that it was correctly cached
assert config.get_display() == ':2010'
+ # Check that raises in Mac
+ config._display = None
+ monkeypatch.setattr(sys, 'platform', value='darwin')
+ with pytest.raises(RuntimeError):
+ config.get_display()
def test_display_noconfig_nosystem_patched_oldxvfbwrapper(monkeypatch):
"""
@@ -102,10 +126,16 @@ def test_display_noconfig_nosystem_patched_oldxvfbwrapper(monkeypatch):
config._config.remove_option('execution', 'display_variable')
monkeypatch.delitem(os.environ, 'DISPLAY', raising=False)
monkeypatch.setitem(sys.modules, 'xvfbwrapper', xvfbpatch_old)
+ monkeypatch.setattr(sys, 'platform', value='linux')
assert config.get_display() == ":2010"
# Test that it was correctly cached
assert config.get_display() == ':2010'
+ # Check that raises in Mac
+ config._display = None
+ monkeypatch.setattr(sys, 'platform', value='darwin')
+ with pytest.raises(RuntimeError):
+ config.get_display()
def test_display_empty_patched_oldxvfbwrapper(monkeypatch):
"""
@@ -115,12 +145,18 @@ def test_display_empty_patched_oldxvfbwrapper(monkeypatch):
config._display = None
if config.has_option('execution', 'display_variable'):
config._config.remove_option('execution', 'display_variable')
- monkeypatch.setitem(os.environ, 'DISPLAY', '')
+ monkeypatch.setenv('DISPLAY', '')
monkeypatch.setitem(sys.modules, 'xvfbwrapper', xvfbpatch_old)
+ monkeypatch.setattr(sys, 'platform', value='linux')
assert config.get_display() == ':2010'
# Test that it was correctly cached
assert config.get_display() == ':2010'
+ # Check that raises in Mac
+ config._display = None
+ monkeypatch.setattr(sys, 'platform', value='darwin')
+ with pytest.raises(RuntimeError):
+ config.get_display()
def test_display_noconfig_nosystem_notinstalled(monkeypatch):
"""
@@ -130,7 +166,7 @@ def test_display_noconfig_nosystem_notinstalled(monkeypatch):
config._display = None
if config.has_option('execution', 'display_variable'):
config._config.remove_option('execution', 'display_variable')
- monkeypatch.delitem(os.environ, 'DISPLAY', raising=False)
+ monkeypatch.delenv('DISPLAY', raising=False)
monkeypatch.setitem(sys.modules, 'xvfbwrapper', None)
with pytest.raises(RuntimeError):
config.get_display()
@@ -144,13 +180,14 @@ def test_display_empty_notinstalled(monkeypatch):
config._display = None
if config.has_option('execution', 'display_variable'):
config._config.remove_option('execution', 'display_variable')
- monkeypatch.setitem(os.environ, 'DISPLAY', '')
+ monkeypatch.setenv('DISPLAY', '')
monkeypatch.setitem(sys.modules, 'xvfbwrapper', None)
with pytest.raises(RuntimeError):
config.get_display()
@pytest.mark.skipif(not has_Xvfb, reason='xvfbwrapper not installed')
[email protected]('darwin' in sys.platform, reason='macosx requires root for Xvfb')
def test_display_noconfig_nosystem_installed(monkeypatch):
"""
Check that actually uses xvfbwrapper when installed (not mocked)
@@ -159,7 +196,7 @@ def test_display_noconfig_nosystem_installed(monkeypatch):
config._display = None
if config.has_option('execution', 'display_variable'):
config._config.remove_option('execution', 'display_variable')
- monkeypatch.delitem(os.environ, 'DISPLAY', raising=False)
+ monkeypatch.delenv('DISPLAY', raising=False)
newdisp = config.get_display()
assert int(newdisp.split(':')[-1]) > 1000
# Test that it was correctly cached
@@ -167,6 +204,7 @@ def test_display_noconfig_nosystem_installed(monkeypatch):
@pytest.mark.skipif(not has_Xvfb, reason='xvfbwrapper not installed')
[email protected]('darwin' in sys.platform, reason='macosx requires root for Xvfb')
def test_display_empty_installed(monkeypatch):
"""
Check that actually uses xvfbwrapper when installed (not mocked)
@@ -175,7 +213,7 @@ def test_display_empty_installed(monkeypatch):
config._display = None
if config.has_option('execution', 'display_variable'):
config._config.remove_option('execution', 'display_variable')
- monkeypatch.setitem(os.environ, 'DISPLAY', '')
+ monkeypatch.setenv('DISPLAY', '')
newdisp = config.get_display()
assert int(newdisp.split(':')[-1]) > 1000
# Test that it was correctly cached
@@ -191,7 +229,7 @@ def test_display_empty_macosx(monkeypatch):
config._display = None
if config.has_option('execution', 'display_variable'):
config._config.remove_option('execution', 'display_variable')
- monkeypatch.delitem(os.environ, 'DISPLAY', '')
+ monkeypatch.delenv('DISPLAY', '')
monkeypatch.setattr(sys, 'platform', 'darwin')
with pytest.raises(RuntimeError):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 2
} | 0.14 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
click==8.0.4
configparser==5.2.0
decorator==4.4.2
funcsigs==1.0.2
future==1.0.0
importlib-metadata==4.8.3
iniconfig==1.1.1
isodate==0.6.1
lxml==5.3.1
mock==5.2.0
networkx==2.5.1
nibabel==3.2.2
-e git+https://github.com/nipy/nipype.git@f3b09125eceec7b90ad801176892aab06e012f8c#egg=nipype
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
prov==1.5.0
py==1.11.0
pydot==1.4.2
pydotplus==2.0.2
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
rdflib==5.0.0
scipy==1.5.4
simplejson==3.20.1
six==1.17.0
tomli==1.2.3
traits==6.4.1
typing_extensions==4.1.1
zipp==3.6.0
| name: nipype
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- click==8.0.4
- configparser==5.2.0
- decorator==4.4.2
- funcsigs==1.0.2
- future==1.0.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isodate==0.6.1
- lxml==5.3.1
- mock==5.2.0
- networkx==2.5.1
- nibabel==3.2.2
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- prov==1.5.0
- py==1.11.0
- pydot==1.4.2
- pydotplus==2.0.2
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- rdflib==5.0.0
- scipy==1.5.4
- simplejson==3.20.1
- six==1.17.0
- tomli==1.2.3
- traits==6.4.1
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/nipype
| [
"nipype/utils/tests/test_config.py::test_display_parse[localhost:12]",
"nipype/utils/tests/test_config.py::test_display_parse[localhost:12.1]"
]
| []
| [
"nipype/utils/tests/test_config.py::test_display_parse[:12]",
"nipype/utils/tests/test_config.py::test_display_config[0]",
"nipype/utils/tests/test_config.py::test_display_config[1]",
"nipype/utils/tests/test_config.py::test_display_config[2]",
"nipype/utils/tests/test_config.py::test_display_config[3]",
"nipype/utils/tests/test_config.py::test_display_config[4]",
"nipype/utils/tests/test_config.py::test_display_system[0]",
"nipype/utils/tests/test_config.py::test_display_system[1]",
"nipype/utils/tests/test_config.py::test_display_system[2]",
"nipype/utils/tests/test_config.py::test_display_system[3]",
"nipype/utils/tests/test_config.py::test_display_system[4]",
"nipype/utils/tests/test_config.py::test_display_config_and_system",
"nipype/utils/tests/test_config.py::test_display_noconfig_nosystem_patched",
"nipype/utils/tests/test_config.py::test_display_empty_patched",
"nipype/utils/tests/test_config.py::test_display_noconfig_nosystem_patched_oldxvfbwrapper",
"nipype/utils/tests/test_config.py::test_display_empty_patched_oldxvfbwrapper",
"nipype/utils/tests/test_config.py::test_display_noconfig_nosystem_notinstalled",
"nipype/utils/tests/test_config.py::test_display_empty_notinstalled",
"nipype/utils/tests/test_config.py::test_display_empty_macosx",
"nipype/utils/tests/test_config.py::test_cwd_cached"
]
| []
| Apache License 2.0 | 2,028 | [
"nipype/utils/config.py",
"CHANGES"
]
| [
"nipype/utils/config.py",
"CHANGES"
]
|
|
palantir__python-language-server-220 | 85907d6f7af5aaf1d6fd13c543327319b0c02282 | 2018-01-09 23:13:07 | db16c239dad05eaaa9ff3250e51fdd3c80ecf172 | gatesn: @lgeiger hopefully last iteration on this stuff!
lgeiger: Thanks for taking a look and maintaining this language server!
Unfortunately it looks like this doesn't fully fix the problem. I rebased #211 onto this branch and the bug is still partially present. I hope I'm missing something obvious here 😄
gatesn: Will merge this as it seems to resolve #210, then will look again at your rebased branch. Thanks for testing. | diff --git a/pyls/language_server.py b/pyls/language_server.py
index b898ae1..f310182 100644
--- a/pyls/language_server.py
+++ b/pyls/language_server.py
@@ -65,7 +65,7 @@ class LanguageServer(dispatcher.JSONRPCMethodDispatcher, JSONRPCServer):
pass
def m_initialize(self, **kwargs):
- log.debug("Language server intialized with %s", kwargs)
+ log.debug("Language server initialized with %s", kwargs)
if 'rootUri' in kwargs:
self.root_uri = kwargs['rootUri']
elif 'rootPath' in kwargs:
diff --git a/pyls/plugins/pycodestyle_lint.py b/pyls/plugins/pycodestyle_lint.py
index d157a38..2940166 100644
--- a/pyls/plugins/pycodestyle_lint.py
+++ b/pyls/plugins/pycodestyle_lint.py
@@ -12,12 +12,12 @@ def pyls_lint(config, document):
log.debug("Got pycodestyle settings: %s", settings)
opts = {
- 'exclude': settings.get('exclude'),
- 'filename': settings.get('filename'),
+ 'exclude': ','.join(settings.get('exclude') or []),
+ 'filename': ','.join(settings.get('filename') or []),
'hang_closing': settings.get('hangClosing'),
- 'ignore': settings.get('ignore'),
+ 'ignore': ','.join(settings.get('ignore') or []),
'max_line_length': settings.get('maxLineLength'),
- 'select': settings.get('select'),
+ 'select': ','.join(settings.get('select') or []),
}
styleguide = pycodestyle.StyleGuide({k: v for k, v in opts.items() if v is not None})
@@ -27,6 +27,7 @@ def pyls_lint(config, document):
)
c.check_all()
diagnostics = c.report.diagnostics
+
return diagnostics
| pycodestyle config not being read, seeing runtime errors since 0.12.0
Since upgrading to 0.12.1 (from 0.11.1) I noticed that I was getting D100 and D103 docstring warnings in my code, even though I ignored them via `$HOME/.config/pycodestyle`. I tried adding a `setup.cfg` to the project directory but I still got the warnings.
When running `pyls` (as `pyls --tcp --port 9001 -v`) in TCP mode I saw the following output after loading a Python file. Could the issue I'm seeing be related to the pydocstyle error?
```
[...]
2017-12-25 11:09:59,221 UTC - INFO - pyls.config.config - Loaded pyls plugin pycodestyle from <module 'pyls.plugins.pycodestyle_lint' from '/home/terr/.local/lib/python3.5/site-packages/pyls/plugins/pycodestyle_lint.py'>
[...]
2017-12-25 11:04:15,188 UTC - INFO - pyls.config.config - Loaded pyls plugin mccabe from <module 'pyls.plugins.mccabe_lint' from '/home/terr/.local/lib/python3.5/site-packages/pyls/plugins/mccabe_lint.py'>
2017-12-25 11:04:15,188 UTC - INFO - pyls.config.config - Loaded pyls plugin jedi_signature_help from <module 'pyls.plugins.signature' from '/home/terr/.local/lib/python3.5/site-packages/pyls/plugins/signature.py'>
Usage: pydocstyle [options] [<file|dir>...]
pyls: error: no such option: --tcp
```
I'm assuming the "no such option: --tcp" error doesn't occur in IO mode. I've got the most recent version of `pydocstyle` installed, 2.1.1.
I also tried 0.12.0, where I got the same output and results (warnings about D100/D103). Version 0.11.1 still works fine, both in IO and TCP mode. | palantir/python-language-server | diff --git a/test/plugins/test_pycodestyle_lint.py b/test/plugins/test_pycodestyle_lint.py
index a93b513..028997f 100644
--- a/test/plugins/test_pycodestyle_lint.py
+++ b/test/plugins/test_pycodestyle_lint.py
@@ -86,4 +86,9 @@ def test_pycodestyle_config(workspace):
# And make sure we only get one warning
diags = pycodestyle_lint.pyls_lint(config, doc)
assert not [d for d in diags if d['code'] == 'W191']
- assert [d for d in diags if d['code'] == 'W391']
+
+ # Ignore both warnings
+ config.update({'plugins': {'pycodestyle': {'ignore': ['W191', 'W391']}}})
+ # And make sure we get neither
+ assert not [d for d in diags if d['code'] == 'W191']
+ assert not [d for d in diags if d['code'] == 'W391']
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 0.12 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cachetools==5.5.2
chardet==5.2.0
colorama==0.4.6
configparser==7.2.0
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
future==1.0.0
iniconfig==2.1.0
jedi==0.19.2
json-rpc==1.15.0
mccabe==0.7.0
packaging==24.2
parso==0.8.4
platformdirs==4.3.7
pluggy==1.5.0
pycodestyle==2.13.0
pydocstyle==6.3.0
pyflakes==3.3.1
pyproject-api==1.9.0
pytest==8.3.5
pytest-cov==6.0.0
-e git+https://github.com/palantir/python-language-server.git@85907d6f7af5aaf1d6fd13c543327319b0c02282#egg=python_language_server
pytoolconfig==1.3.1
rope==1.13.0
snowballstemmer==2.2.0
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
versioneer==0.29
virtualenv==20.29.3
yapf==0.43.0
| name: python-language-server
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cachetools==5.5.2
- chardet==5.2.0
- colorama==0.4.6
- configparser==7.2.0
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- future==1.0.0
- iniconfig==2.1.0
- jedi==0.19.2
- json-rpc==1.15.0
- mccabe==0.7.0
- packaging==24.2
- parso==0.8.4
- platformdirs==4.3.7
- pluggy==1.5.0
- pycodestyle==2.13.0
- pydocstyle==6.3.0
- pyflakes==3.3.1
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pytoolconfig==1.3.1
- rope==1.13.0
- snowballstemmer==2.2.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- versioneer==0.29
- virtualenv==20.29.3
- yapf==0.43.0
prefix: /opt/conda/envs/python-language-server
| [
"test/plugins/test_pycodestyle_lint.py::test_pycodestyle_config"
]
| []
| [
"test/plugins/test_pycodestyle_lint.py::test_pycodestyle"
]
| []
| MIT License | 2,029 | [
"pyls/language_server.py",
"pyls/plugins/pycodestyle_lint.py"
]
| [
"pyls/language_server.py",
"pyls/plugins/pycodestyle_lint.py"
]
|
dpkp__kafka-python-1338 | a69320b8e3199fa9d7cfa3947a242e699a045c3b | 2018-01-10 00:58:02 | 618c5051493693c1305aa9f08e8a0583d5fcf0e3 | diff --git a/kafka/coordinator/base.py b/kafka/coordinator/base.py
index 30b9c40..24412c9 100644
--- a/kafka/coordinator/base.py
+++ b/kafka/coordinator/base.py
@@ -377,19 +377,23 @@ class BaseCoordinator(object):
# before the pending rebalance has completed.
if self.join_future is None:
self.state = MemberState.REBALANCING
- self.join_future = self._send_join_group_request()
+ future = self._send_join_group_request()
+
+ self.join_future = future # this should happen before adding callbacks
# handle join completion in the callback so that the
# callback will be invoked even if the consumer is woken up
# before finishing the rebalance
- self.join_future.add_callback(self._handle_join_success)
+ future.add_callback(self._handle_join_success)
# we handle failures below after the request finishes.
# If the join completes after having been woken up, the
# exception is ignored and we will rejoin
- self.join_future.add_errback(self._handle_join_failure)
+ future.add_errback(self._handle_join_failure)
+
+ else:
+ future = self.join_future
- future = self.join_future
self._client.poll(future=future)
if future.failed():
| AttributeError: 'NoneType' object has no attribute 'failed'
Via #1315 comments:
```
Traceback (most recent call last):
File "./client_staging.py", line 53, in <module>
results = consumer.poll(timeout_ms=10000, max_records=1)
File "/usr/local/lib/python2.7/dist-packages/kafka/consumer/group.py", line 601, in poll
records = self._poll_once(remaining, max_records)
File "/usr/local/lib/python2.7/dist-packages/kafka/consumer/group.py", line 621, in _poll_once
self._coordinator.poll()
File "/usr/local/lib/python2.7/dist-packages/kafka/coordinator/consumer.py", line 271, in poll
self.ensure_active_group()
File "/usr/local/lib/python2.7/dist-packages/kafka/coordinator/base.py", line 401, in ensure_active_group
if future.failed():
AttributeError: 'NoneType' object has no attribute 'failed'
``` | dpkp/kafka-python | diff --git a/test/test_coordinator.py b/test/test_coordinator.py
index 7dc0e04..f567369 100644
--- a/test/test_coordinator.py
+++ b/test/test_coordinator.py
@@ -620,3 +620,16 @@ def test_lookup_coordinator_failure(mocker, coordinator):
return_value=Future().failure(Exception('foobar')))
future = coordinator.lookup_coordinator()
assert future.failed()
+
+
+def test_ensure_active_group(mocker, coordinator):
+ coordinator._subscription.subscribe(topics=['foobar'])
+ mocker.patch.object(coordinator, 'coordinator_unknown', return_value=False)
+ mocker.patch.object(coordinator, '_send_join_group_request', return_value=Future().success(True))
+ mocker.patch.object(coordinator, 'need_rejoin', side_effect=[True, True, False])
+ mocker.patch.object(coordinator, '_on_join_complete')
+ mocker.patch.object(coordinator, '_heartbeat_thread')
+
+ coordinator.ensure_active_group()
+
+ coordinator._send_join_group_request.assert_called_once_with()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_issue_reference"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-catchlog",
"pytest-sugar",
"pytest-mock",
"mock",
"python-snappy",
"lz4",
"xxhash"
],
"pre_install": [
"apt-get update",
"apt-get install -y libsnappy-dev"
],
"python": "3.6",
"reqs_path": [
"docs/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
cramjam==2.5.0
docutils==0.18.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==3.0.3
-e git+https://github.com/dpkp/kafka-python.git@a69320b8e3199fa9d7cfa3947a242e699a045c3b#egg=kafka_python
lz4==3.1.10
MarkupSafe==2.0.1
mock==5.2.0
packaging==21.3
pluggy==1.0.0
pockets==0.9.1
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-catchlog==1.2.2
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-sugar==0.9.6
python-snappy==0.7.3
pytz==2025.2
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-napoleon==0.7
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
termcolor==1.1.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
xxhash==3.2.0
zipp==3.6.0
| name: kafka-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- cramjam==2.5.0
- docutils==0.18.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==3.0.3
- lz4==3.1.10
- markupsafe==2.0.1
- mock==5.2.0
- packaging==21.3
- pluggy==1.0.0
- pockets==0.9.1
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-catchlog==1.2.2
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-sugar==0.9.6
- python-snappy==0.7.3
- pytz==2025.2
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-napoleon==0.7
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- termcolor==1.1.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- xxhash==3.2.0
- zipp==3.6.0
prefix: /opt/conda/envs/kafka-python
| [
"test/test_coordinator.py::test_ensure_active_group"
]
| []
| [
"test/test_coordinator.py::test_init",
"test/test_coordinator.py::test_autocommit_enable_api_version[api_version0]",
"test/test_coordinator.py::test_autocommit_enable_api_version[api_version1]",
"test/test_coordinator.py::test_autocommit_enable_api_version[api_version2]",
"test/test_coordinator.py::test_autocommit_enable_api_version[api_version3]",
"test/test_coordinator.py::test_protocol_type",
"test/test_coordinator.py::test_group_protocols",
"test/test_coordinator.py::test_pattern_subscription[api_version0]",
"test/test_coordinator.py::test_pattern_subscription[api_version1]",
"test/test_coordinator.py::test_pattern_subscription[api_version2]",
"test/test_coordinator.py::test_pattern_subscription[api_version3]",
"test/test_coordinator.py::test_lookup_assignor",
"test/test_coordinator.py::test_join_complete",
"test/test_coordinator.py::test_subscription_listener",
"test/test_coordinator.py::test_subscription_listener_failure",
"test/test_coordinator.py::test_perform_assignment",
"test/test_coordinator.py::test_on_join_prepare",
"test/test_coordinator.py::test_need_rejoin",
"test/test_coordinator.py::test_refresh_committed_offsets_if_needed",
"test/test_coordinator.py::test_fetch_committed_offsets",
"test/test_coordinator.py::test_close",
"test/test_coordinator.py::test_commit_offsets_async",
"test/test_coordinator.py::test_commit_offsets_sync",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version0-foobar-True-None-False-False-True-False]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version1-foobar-True-None-True-True-False-False]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version2-foobar-True-None-True-True-False-False]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version3-foobar-False-None-False-False-False-False]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version4-foobar-True-error4-True-True-True-False]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version5-foobar-True-error5-True-True-True-False]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version6-foobar-True-error6-True-True-True-False]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version7-foobar-True-error7-True-True-False-True]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version8-foobar-True-None-True-True-False-False]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version9-None-True-None-False-False-True-False]",
"test/test_coordinator.py::test_send_offset_commit_request_fail",
"test/test_coordinator.py::test_send_offset_commit_request_versions[api_version0-OffsetCommitRequest_v0]",
"test/test_coordinator.py::test_send_offset_commit_request_versions[api_version1-OffsetCommitRequest_v1]",
"test/test_coordinator.py::test_send_offset_commit_request_versions[api_version2-OffsetCommitRequest_v2]",
"test/test_coordinator.py::test_send_offset_commit_request_failure",
"test/test_coordinator.py::test_send_offset_commit_request_success",
"test/test_coordinator.py::test_handle_offset_commit_response[response0-GroupAuthorizationFailedError-False-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response1-OffsetMetadataTooLargeError-False-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response2-InvalidCommitOffsetSizeError-False-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response3-GroupLoadInProgressError-False-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response4-GroupCoordinatorNotAvailableError-True-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response5-NotCoordinatorForGroupError-True-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response6-RequestTimedOutError-True-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response7-CommitFailedError-False-True]",
"test/test_coordinator.py::test_handle_offset_commit_response[response8-CommitFailedError-False-True]",
"test/test_coordinator.py::test_handle_offset_commit_response[response9-CommitFailedError-False-True]",
"test/test_coordinator.py::test_handle_offset_commit_response[response10-InvalidTopicError-False-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response11-TopicAuthorizationFailedError-False-False]",
"test/test_coordinator.py::test_send_offset_fetch_request_fail",
"test/test_coordinator.py::test_send_offset_fetch_request_versions[api_version0-OffsetFetchRequest_v0]",
"test/test_coordinator.py::test_send_offset_fetch_request_versions[api_version1-OffsetFetchRequest_v1]",
"test/test_coordinator.py::test_send_offset_fetch_request_versions[api_version2-OffsetFetchRequest_v1]",
"test/test_coordinator.py::test_send_offset_fetch_request_failure",
"test/test_coordinator.py::test_send_offset_fetch_request_success",
"test/test_coordinator.py::test_handle_offset_fetch_response[response0-GroupLoadInProgressError-False]",
"test/test_coordinator.py::test_handle_offset_fetch_response[response1-NotCoordinatorForGroupError-True]",
"test/test_coordinator.py::test_handle_offset_fetch_response[response2-TopicAuthorizationFailedError-False]",
"test/test_coordinator.py::test_handle_offset_fetch_response[response3-None-False]",
"test/test_coordinator.py::test_heartbeat",
"test/test_coordinator.py::test_lookup_coordinator_failure"
]
| []
| Apache License 2.0 | 2,030 | [
"kafka/coordinator/base.py"
]
| [
"kafka/coordinator/base.py"
]
|
|
ultrabug__py3status-1221 | dcd3dda64b82e536cfd0233691d374a22e96aeac | 2018-01-10 08:09:08 | dcd3dda64b82e536cfd0233691d374a22e96aeac | tobes: IMHO this breaks the formatter, because the original tests fail.
With #1095 I do not actually understand what the issue is that you are describing. Maybe if I understood that then this would make more sense.
Now maybe the current behaviour of the formatter is incorrect. Then we should have a clear discussion about that and how to improve it.
lasers: Brief explanation. Apply the git patch. I included some examples. Test each as you go.
```
+ # format = 'A, B, C // {true}' # IS OK | A, B, C // True
+ # format = 'A, B, C // {false}' # IS NOT OK |
+ # format = 'A, B, C // {none}' # IS NOT OK |
+ # format = 'A, B, C // {false} ' # IS OK | A, B, C // False
+ # format = 'A, B, C // {none} ' # IS OK | A, B, C // None
```
Different examples.
* `'Repeat: {repeat}'` -- Expected -- `'Repeat: False'` -- Got `''`.
* `'Hi. Long string. {none}'` -- Expected -- `Hi. Long string. None` -- Got `''`.
First, I never knew about this `{false}` --> `''` and `{none}` --> `''` until I made a pull request... and failed the tests here. Color me surprised. I think few tests may be wrong (fixed now) because it prints `True` on `{true}` and just disappear on `{false}`, `{none}`.
tobes: > First, I never knew about this {false} --> '' and {none} --> '' until I made a pull request
That is intentional. Now whether it is a good idea is a different issue.
The main issue is that the formatter still lacks documentation.
lasers: >That is intentional. Now whether it is a good idea is a different issue.
I amended two tests in this commit. Check them out and decide.
And we can have many `{False}`, `{None}` as we want... and still get nothing.
```
diff --git a/py3status/modules/static_string.py b/py3status/modules/static_string.py
index dbcec8c6..e15b30a4 100644
--- a/py3status/modules/static_string.py
+++ b/py3status/modules/static_string.py
@@ -16,12 +16,15 @@ class Py3status:
"""
"""
# available configuration parameters
- format = 'Hello, world!'
+ format = ' '.join(['{false} {none}' for x in range(5)])
+ # format += ' '
+ # format += ' {true}'
def static_string(self):
return {
'cached_until': self.py3.CACHE_FOREVER,
- 'full_text': self.py3.safe_format(self.format),
+ 'full_text': self.py3.safe_format(
+ self.format, {'true': True, 'false': False, 'none': None}),
}
```
```
$ python3 static_string.py
[]
```
```
$ python3 static_string.py # add space
{'full_text': 'False None False None False None False None False None ', 'cached_until': -1}
```
```
$ python3 static_string.py # add {true}
{'cached_until': -1, 'full_text': 'False None False None False None False None False None True'}
```
It's been said many times that the truth will set you free. I guess it was really true.
ultrabug: @tobes bump plz? | diff --git a/py3status/formatter.py b/py3status/formatter.py
index e265819e..db2875de 100644
--- a/py3status/formatter.py
+++ b/py3status/formatter.py
@@ -268,7 +268,9 @@ class Placeholder:
output = u'{%s%s}' % (self.key, self.format)
value = value_ = output.format(**{self.key: value})
- if block.commands.not_zero:
+ if block.parent is None:
+ valid = True
+ elif block.commands.not_zero:
valid = value_ not in ['', 'None', None, False, '0', '0.0', 0, 0.0]
else:
# '', None, and False are ignored
| Formatting returns empty when closing with a False or None placeholder
Formatting returns empty when closing with a `False` or `None` placeholder.
```diff
diff --git a/py3status/modules/static_string.py b/py3status/modules/static_string.py
index dbcec8c6..593b3740 100644
--- a/py3status/modules/static_string.py
+++ b/py3status/modules/static_string.py
@@ -18,10 +18,17 @@ class Py3status:
# available configuration parameters
format = 'Hello, world!'
+ # format = 'A, B, C // {true}' # IS OK | A, B, C // True
+ # format = 'A, B, C // {false}' # IS NOT OK |
+ # format = 'A, B, C // {none}' # IS NOT OK |
+ # format = 'A, B, C // {false} ' # IS OK | A, B, C // False
+ # format = 'A, B, C // {none} ' # IS OK | A, B, C // None
+
def static_string(self):
+ new_dict = {'true': True, 'false': False, 'none': None}
return {
'cached_until': self.py3.CACHE_FOREVER,
- 'full_text': self.py3.safe_format(self.format),
+ 'full_text': self.py3.safe_format(self.format, new_dict),
}
``` | ultrabug/py3status | diff --git a/tests/test_formatter.py b/tests/test_formatter.py
index 0d1bf9b5..76febab4 100644
--- a/tests/test_formatter.py
+++ b/tests/test_formatter.py
@@ -296,10 +296,18 @@ def test_26():
def test_27():
- run_formatter({'format': '{None}', 'expected': '', })
+ run_formatter({'format': '{None}', 'expected': 'None', })
def test_27a():
+ run_formatter({'format': '{None} {no}', 'expected': 'None False', })
+
+
+def test_27b():
+ run_formatter({'format': '[Hello {None}] {no}', 'expected': ' False', })
+
+
+def test_27c():
run_formatter({'format': '[Hi, my name is {None_str}]', 'expected': '', })
@@ -312,7 +320,7 @@ def test_29():
def test_30():
- run_formatter({'format': '{no}', 'expected': '', })
+ run_formatter({'format': '{no}', 'expected': 'False', })
def test_31():
@@ -1134,7 +1142,7 @@ def test_module_true_value():
def test_module_false_value():
- run_formatter({'format': '{module_false}', 'expected': ''})
+ run_formatter({'format': '{module_false}', 'expected': 'False'})
def test_zero_format_1():
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 1
} | 3.12 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig==1.1.1
mccabe==0.7.0
packaging==21.3
pluggy==1.0.0
py==1.11.0
-e git+https://github.com/ultrabug/py3status.git@dcd3dda64b82e536cfd0233691d374a22e96aeac#egg=py3status
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
pytest-flake8==1.1.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: py3status
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- flake8==5.0.4
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- mccabe==0.7.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-flake8==1.1.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/py3status
| [
"tests/test_formatter.py::test_27",
"tests/test_formatter.py::test_27a",
"tests/test_formatter.py::test_27b",
"tests/test_formatter.py::test_30",
"tests/test_formatter.py::test_module_false_value"
]
| []
| [
"tests/test_formatter.py::test_1",
"tests/test_formatter.py::test_2",
"tests/test_formatter.py::test_3",
"tests/test_formatter.py::test_4",
"tests/test_formatter.py::test_5",
"tests/test_formatter.py::test_6",
"tests/test_formatter.py::test_7",
"tests/test_formatter.py::test_8",
"tests/test_formatter.py::test_9",
"tests/test_formatter.py::test_10",
"tests/test_formatter.py::test_11",
"tests/test_formatter.py::test_12",
"tests/test_formatter.py::test_13",
"tests/test_formatter.py::test_14",
"tests/test_formatter.py::test_15",
"tests/test_formatter.py::test_16",
"tests/test_formatter.py::test_16a",
"tests/test_formatter.py::test_16b",
"tests/test_formatter.py::test_17",
"tests/test_formatter.py::test_18",
"tests/test_formatter.py::test_19",
"tests/test_formatter.py::test_20",
"tests/test_formatter.py::test_21",
"tests/test_formatter.py::test_22",
"tests/test_formatter.py::test_23",
"tests/test_formatter.py::test_24",
"tests/test_formatter.py::test_24a",
"tests/test_formatter.py::test_24b",
"tests/test_formatter.py::test_25",
"tests/test_formatter.py::test_26",
"tests/test_formatter.py::test_27c",
"tests/test_formatter.py::test_28",
"tests/test_formatter.py::test_29",
"tests/test_formatter.py::test_31",
"tests/test_formatter.py::test_32",
"tests/test_formatter.py::test_33",
"tests/test_formatter.py::test_34",
"tests/test_formatter.py::test_35",
"tests/test_formatter.py::test_36",
"tests/test_formatter.py::test_37",
"tests/test_formatter.py::test_38",
"tests/test_formatter.py::test_39",
"tests/test_formatter.py::test_40",
"tests/test_formatter.py::test_41",
"tests/test_formatter.py::test_42",
"tests/test_formatter.py::test_43",
"tests/test_formatter.py::test_44",
"tests/test_formatter.py::test_45",
"tests/test_formatter.py::test_46",
"tests/test_formatter.py::test_47",
"tests/test_formatter.py::test_48",
"tests/test_formatter.py::test_49",
"tests/test_formatter.py::test_50",
"tests/test_formatter.py::test_51",
"tests/test_formatter.py::test_52",
"tests/test_formatter.py::test_53",
"tests/test_formatter.py::test_54",
"tests/test_formatter.py::test_55",
"tests/test_formatter.py::test_56",
"tests/test_formatter.py::test_57",
"tests/test_formatter.py::test_58",
"tests/test_formatter.py::test_58a",
"tests/test_formatter.py::test_59",
"tests/test_formatter.py::test_59a",
"tests/test_formatter.py::test_60",
"tests/test_formatter.py::test_61",
"tests/test_formatter.py::test_62",
"tests/test_formatter.py::test_63",
"tests/test_formatter.py::test_64",
"tests/test_formatter.py::test_65",
"tests/test_formatter.py::test_66",
"tests/test_formatter.py::test_67",
"tests/test_formatter.py::test_68",
"tests/test_formatter.py::test_69",
"tests/test_formatter.py::test_70",
"tests/test_formatter.py::test_70a",
"tests/test_formatter.py::test_71",
"tests/test_formatter.py::test_72",
"tests/test_formatter.py::test_73",
"tests/test_formatter.py::test_74",
"tests/test_formatter.py::test_75",
"tests/test_formatter.py::test_76",
"tests/test_formatter.py::test_77",
"tests/test_formatter.py::test_78",
"tests/test_formatter.py::test_else_true",
"tests/test_formatter.py::test_else_false",
"tests/test_formatter.py::test_color_name_1",
"tests/test_formatter.py::test_color_name_2",
"tests/test_formatter.py::test_color_name_3",
"tests/test_formatter.py::test_color_name_4",
"tests/test_formatter.py::test_color_name_4a",
"tests/test_formatter.py::test_color_name_5",
"tests/test_formatter.py::test_color_name_5a",
"tests/test_formatter.py::test_color_name_6",
"tests/test_formatter.py::test_color_name_7",
"tests/test_formatter.py::test_color_name_7a",
"tests/test_formatter.py::test_color_1",
"tests/test_formatter.py::test_color_1a",
"tests/test_formatter.py::test_color_2",
"tests/test_formatter.py::test_color_3",
"tests/test_formatter.py::test_color_4",
"tests/test_formatter.py::test_color_5",
"tests/test_formatter.py::test_color_6",
"tests/test_formatter.py::test_color_7",
"tests/test_formatter.py::test_color_7a",
"tests/test_formatter.py::test_color_8",
"tests/test_formatter.py::test_color_8a",
"tests/test_formatter.py::test_color_9",
"tests/test_formatter.py::test_color_9a",
"tests/test_formatter.py::test_composite_1",
"tests/test_formatter.py::test_composite_2",
"tests/test_formatter.py::test_composite_3",
"tests/test_formatter.py::test_composite_4",
"tests/test_formatter.py::test_composite_5",
"tests/test_formatter.py::test_composite_6",
"tests/test_formatter.py::test_attr_getter",
"tests/test_formatter.py::test_min_length_1",
"tests/test_formatter.py::test_min_length_2",
"tests/test_formatter.py::test_min_length_3",
"tests/test_formatter.py::test_min_length_4",
"tests/test_formatter.py::test_min_length_5",
"tests/test_formatter.py::test_min_length_6",
"tests/test_formatter.py::test_numeric_strings_1",
"tests/test_formatter.py::test_numeric_strings_2",
"tests/test_formatter.py::test_numeric_strings_3",
"tests/test_formatter.py::test_numeric_strings_4",
"tests/test_formatter.py::test_numeric_strings_5",
"tests/test_formatter.py::test_numeric_strings_6",
"tests/test_formatter.py::test_not_zero_1",
"tests/test_formatter.py::test_not_zero_2",
"tests/test_formatter.py::test_not_zero_3",
"tests/test_formatter.py::test_not_zero_4",
"tests/test_formatter.py::test_not_zero_5",
"tests/test_formatter.py::test_not_zero_6",
"tests/test_formatter.py::test_not_zero_7",
"tests/test_formatter.py::test_not_zero_8",
"tests/test_formatter.py::test_not_zero_9",
"tests/test_formatter.py::test_not_zero_10",
"tests/test_formatter.py::test_not_zero_11",
"tests/test_formatter.py::test_bad_composite_color",
"tests/test_formatter.py::test_soft_1",
"tests/test_formatter.py::test_soft_2",
"tests/test_formatter.py::test_soft_3",
"tests/test_formatter.py::test_soft_4",
"tests/test_formatter.py::test_soft_5",
"tests/test_formatter.py::test_soft_6",
"tests/test_formatter.py::test_soft_7",
"tests/test_formatter.py::test_module_true",
"tests/test_formatter.py::test_module_false",
"tests/test_formatter.py::test_module_true_value",
"tests/test_formatter.py::test_zero_format_1",
"tests/test_formatter.py::test_zero_format_2",
"tests/test_formatter.py::test_zero_format_3",
"tests/test_formatter.py::test_zero_format_4",
"tests/test_formatter.py::test_inherit_not_zero_1",
"tests/test_formatter.py::test_inherit_not_zero_2",
"tests/test_formatter.py::test_inherit_not_zero_3",
"tests/test_formatter.py::test_inherit_show_1",
"tests/test_formatter.py::test_inherit_color_1",
"tests/test_formatter.py::test_inherit_color_2",
"tests/test_formatter.py::test_conditions_1",
"tests/test_formatter.py::test_conditions_2",
"tests/test_formatter.py::test_conditions_3",
"tests/test_formatter.py::test_conditions_4",
"tests/test_formatter.py::test_conditions_5",
"tests/test_formatter.py::test_conditions_6",
"tests/test_formatter.py::test_conditions_7",
"tests/test_formatter.py::test_conditions_8",
"tests/test_formatter.py::test_conditions_9",
"tests/test_formatter.py::test_conditions_10",
"tests/test_formatter.py::test_conditions_11",
"tests/test_formatter.py::test_conditions_12",
"tests/test_formatter.py::test_conditions_13",
"tests/test_formatter.py::test_conditions_14",
"tests/test_formatter.py::test_conditions_15",
"tests/test_formatter.py::test_conditions_16",
"tests/test_formatter.py::test_conditions_17",
"tests/test_formatter.py::test_conditions_18",
"tests/test_formatter.py::test_conditions_19",
"tests/test_formatter.py::test_conditions_20",
"tests/test_formatter.py::test_conditions_21",
"tests/test_formatter.py::test_conditions_22",
"tests/test_formatter.py::test_conditions_23",
"tests/test_formatter.py::test_trailing_zeroes_1",
"tests/test_formatter.py::test_trailing_zeroes_2",
"tests/test_formatter.py::test_ceiling_numbers_1",
"tests/test_formatter.py::test_ceiling_numbers_2"
]
| []
| BSD 3-Clause "New" or "Revised" License | 2,031 | [
"py3status/formatter.py"
]
| [
"py3status/formatter.py"
]
|
NeuralEnsemble__python-neo-454 | 6b6c7ef2d148de5431cbd8f254430251c3d34dde | 2018-01-10 10:48:54 | f0285a7ab15ff6535d3e6736e0163c4fa6aea091 | pep8speaks: Hello @apdavison! Thanks for submitting the PR.
- In the file [`neo/core/epoch.py`](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py), following are the PEP8 issues :
> [Line 22:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L22): [E302](https://duckduckgo.com/?q=pep8%20E302) expected 2 blank lines, found 1
> [Line 23:17](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L23): [E127](https://duckduckgo.com/?q=pep8%20E127) continuation line over-indented for visual indent
> [Line 23:75](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L23): [E251](https://duckduckgo.com/?q=pep8%20E251) unexpected spaces around keyword / parameter equals
> [Line 23:77](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L23): [E251](https://duckduckgo.com/?q=pep8%20E251) unexpected spaces around keyword / parameter equals
> [Line 26:70](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L26): [W291](https://duckduckgo.com/?q=pep8%20W291) trailing whitespace
> [Line 28:15](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L28): [E201](https://duckduckgo.com/?q=pep8%20E201) whitespace after '('
> [Line 28:100](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L28): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (112 > 99 characters)
> [Line 29:18](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L29): [E127](https://duckduckgo.com/?q=pep8%20E127) continuation line over-indented for visual indent
> [Line 33:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L33): [E302](https://duckduckgo.com/?q=pep8%20E302) expected 2 blank lines, found 1
> [Line 118:5](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L118): [E301](https://duckduckgo.com/?q=pep8%20E301) expected 1 blank line, found 0
> [Line 125:60](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L125): [W291](https://duckduckgo.com/?q=pep8%20W291) trailing whitespace
> [Line 207:67](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L207): [W291](https://duckduckgo.com/?q=pep8%20W291) trailing whitespace
> [Line 244:9](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L244): [E265](https://duckduckgo.com/?q=pep8%20E265) block comment should start with '# '
> [Line 245:9](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/core/epoch.py#L245): [E265](https://duckduckgo.com/?q=pep8%20E265) block comment should start with '# '
- In the file [`neo/test/coretest/test_epoch.py`](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py), following are the PEP8 issues :
> [Line 240:58](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L240): [E231](https://duckduckgo.com/?q=pep8%20E231) missing whitespace after ','
> [Line 256:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L256): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 257:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L257): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 258:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L258): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 259:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L259): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 262:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L262): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 290:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L290): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 293:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L293): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 294:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L294): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 295:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L295): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 298:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L298): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 314:5](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L314): [E303](https://duckduckgo.com/?q=pep8%20E303) too many blank lines (2)
> [Line 325:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L325): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 326:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L326): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 327:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L327): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 328:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L328): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 331:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L331): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 359:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L359): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 361:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L361): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 362:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L362): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 363:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L363): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 366:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L366): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 380:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L380): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 394:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L394): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 395:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L395): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 396:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L396): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 397:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L397): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 400:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L400): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 414:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L414): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 428:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L428): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 431:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L431): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 432:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L432): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 433:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L433): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 436:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L436): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 450:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L450): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 464:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L464): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 465:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L465): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 466:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L466): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 467:21](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L467): [E128](https://duckduckgo.com/?q=pep8%20E128) continuation line under-indented for visual indent
> [Line 470:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L470): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 471:25](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L471): [E221](https://duckduckgo.com/?q=pep8%20E221) multiple spaces before operator
> [Line 484:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L484): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 511:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L511): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace
> [Line 541:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L541): [E302](https://duckduckgo.com/?q=pep8%20E302) expected 2 blank lines, found 1
> [Line 561:1](https://github.com/NeuralEnsemble/python-neo/blob/a51ab8b16efe9d342375b688c269567647e76f1e/neo/test/coretest/test_epoch.py#L561): [E305](https://duckduckgo.com/?q=pep8%20E305) expected 2 blank lines after class or function definition, found 1
coveralls:
[](https://coveralls.io/builds/14981826)
Coverage increased (+0.05%) to 49.221% when pulling **a51ab8b16efe9d342375b688c269567647e76f1e on apdavison:issue413** into **6fd9f49c8b0c497adfa777549cc97dc301f2dba2 on NeuralEnsemble:master**.
coveralls:
[](https://coveralls.io/builds/14981916)
Coverage increased (+0.05%) to 49.221% when pulling **cbcf404cdd0ca665b2da22360ef78d4a1c705f75 on apdavison:issue413** into **6fd9f49c8b0c497adfa777549cc97dc301f2dba2 on NeuralEnsemble:master**.
samuelgarcia: Hi andrew. Could you fix conflict in the file. So it would trigered a new test on circleci ?
So we could merge.
bjoern1001001: I have a question about this, because I have been working on `SpikeTrain.__getitem__` for the last days. It's rather general, but I noticed this here.
I noticed here that what you wrote works in Epoch for things like epoch[1], while it would raise an error in SpikeTrain, because when you create the new Epoch in `__getitem__`, times is a quantity scalar, NOT an array or Epoch. Creating a SpikeTrain like this in `__getitem__` via `SpikeTrain(times=super(...).__getitem, ...)` would raise an error here saying it's testing the length of an unsized object. Is it supposed to be okay that epochs are created with a scalar quantity instead of a quantity array? And if that is the case, shouldn't it be converted to an array then in order to ensure consistency?
A little example of what happens:
`epoch = neo.Epoch([1,2,3]*pq.s)`
`ep1 = epoch[1] # ep1 is now an Epoch with times=2*pq.s, not [2]*pq.s or array([2]*pq.s)`
This is because the following happens in `__getitem__`:
`times = super(...).__getitem__(1)`
Times now becomes a scalar quantity, because numpy `__getitem__` returns a scalar instead of an array when called with int index. From this pq.Quantity.__getitem__ creates a quantity, because it is not yet a quantity.
`obj = Epoch(times=times, ...)`
This is not a problem, because there is no test about whether times is an array or not
`...`
`return obj # This returns obj, with times=2*pq.s`
Should it be like this or should a test for this be introduced?
Apart from that I'd like to know if any of you has deeper knowledge of `__getitem__` because I noticed together with @JuliaSprenger that there a scalar quantity is returned instead of a SpikeTrain object. I've been trying to change this but it fails. Trying to copy what you did, I noticed that this works for Epoch, but not for SpikeTrain.
Again some code to illustrate what I mean:
What currently happens:
`st1 = new SpikeTrain(times=[1,2,3]*pq.s, ...)`
`testobj = st1[1]`
testobj is now a (scalar) quantity object, not a SpikeTrain
`print(testobject) # This prints 2 s`
`new_st = st1[0:1]`
new_st is a SpikeTrain, because numpy returns a SpikeTrain array
`print(new_st) # This prints [1] s`
If one would do the same as you did in Epoch for SpikeTrain, it would raise an error
`st2 = st1[1] # This would raise an error`
What happens SpikeTrain `__getitem__`:
`times = super(...).__getitem__(1)`
times is now a scalar quantity just like in Epoch
`obj = SpikeTrain(times=times, ...)`
This will raise an error in `SpikeTrain.__new__` (line 220), because times is a scalar, and thus an unsized object: TypeError: len() of unsized object
On the other hand, calling things like `st1[0:1]` will work just fine, because numpy then returns an array (or a subclass, which is SpikeTrain in this case)
I then tried creating a new SpikeTrain if I get a quantity by creating a list with a single entry:
`if not isinstance(obj, SpikeTrain):`
`obj = SpikeTrain(times=obj.magnitude, t_stop=self.t_stop, units=self.units) # Error happens here`
This works for most cases, but it fails whenever calling sorted(st1, st2), because apparently some kind of 'intermediate' SpikeTrains are generated that don't contain any attributes.
The error I get then is that SpikeTrain object [referring to `self` here] has no attribute t_stop.
If anybody knows more about this stuff, I'd really appreciate your help.
bjoern1001001: I kept working on this problem and noticed that returning a SpikeTrain when st[int] is called would create more problems than it solves. So it seems to be fine that a scalar Quantity object is returned.
But in my opinion this should be consistent across all neo objects, which means that also here in Epoch.__getitem__ an Epoch should be returned only, when ep[slice] was called, not when ep[int] was called. Do you agree with this @apdavison @samuelgarcia?
This would also simplify the code a bit here, because no new Epoch would have to be created but instead you would only have to check if an Epoch was returned from super(...).__getitem__() and if that is the case, slice durations correctly, like it is done in SpikeTrain.__getitem__.
On a more general level, as I wrote in the previous comment, I want to ask if it makes sense to allow Epochs to be created with times being a scalar instead of an array. In class SpikeTrain there are checks that fail if times is not a list or an array, whereas in Epoch nothing similar happens. If times is a scalar, then ep.times is simply a scalar quantity. Are there reasons for this or should checks be implemented to make sure it's consistent? And if it should be allowed, wouldn't it make sense to wrap it in an array then?
Apart from that it's also not assured that ep.labels and ep.durations are the same length as ep.times. I think this should also be checked in order to ensure consistency. | diff --git a/neo/core/epoch.py b/neo/core/epoch.py
index a5cd367c..359947f9 100644
--- a/neo/core/epoch.py
+++ b/neo/core/epoch.py
@@ -24,10 +24,10 @@ def _new_epoch(cls, times=None, durations=None, labels=None, units=None,
name=None, description=None, file_origin=None, annotations=None, segment=None):
'''
A function to map epoch.__new__ to function that
- does not do the unit checking. This is needed for pickle to work.
+ does not do the unit checking. This is needed for pickle to work.
'''
- e = Epoch(times=times, durations=durations, labels=labels, units=units, name=name, file_origin=file_origin,
- description=description, **annotations)
+ e = Epoch(times=times, durations=durations, labels=labels, units=units, name=name,
+ file_origin=file_origin, description=description, **annotations)
e.segment = segment
return e
@@ -151,6 +151,16 @@ class Epoch(BaseNeo, pq.Quantity):
label, time, dur in zip(labels, self.times, self.durations)]
return '<Epoch: %s>' % ', '.join(objs)
+ def __getitem__(self, i):
+ '''
+ Get the item or slice :attr:`i`.
+ '''
+ obj = Epoch(times=super(Epoch, self).__getitem__(i))
+ obj._copy_data_complement(self)
+ obj.durations = self.durations[i]
+ obj.labels = self.labels[i]
+ return obj
+
@property
def times(self):
return pq.Quantity(self)
@@ -232,10 +242,7 @@ class Epoch(BaseNeo, pq.Quantity):
_t_stop = np.inf
indices = (self >= _t_start) & (self <= _t_stop)
-
new_epc = self[indices]
- new_epc.durations = self.durations[indices]
- new_epc.labels = self.labels[indices]
return new_epc
def as_array(self, units=None):
| Slicing epochs does only slice times not durations
When slicing an epoch only the times array is sliced and the durations array is not modified. This results in incompatible number of time stamps and durations. | NeuralEnsemble/python-neo | diff --git a/neo/test/coretest/test_epoch.py b/neo/test/coretest/test_epoch.py
index 93f3acac..82449951 100644
--- a/neo/test/coretest/test_epoch.py
+++ b/neo/test/coretest/test_epoch.py
@@ -497,6 +497,28 @@ class TestEpoch(unittest.TestCase):
self.assertIsInstance(epc_as_q, pq.Quantity)
assert_array_equal(times * pq.ms, epc_as_q)
+ def test_getitem(self):
+ times = [2, 3, 4, 5]
+ durations = [0.1, 0.2, 0.3, 0.4]
+ labels = ["A", "B", "C", "D"]
+ epc = Epoch(times * pq.ms, durations=durations * pq.ms, labels=labels)
+ single_epoch = epc[2]
+ self.assertIsInstance(single_epoch, Epoch)
+ assert_array_equal(single_epoch.times, np.array([4.0]))
+ assert_array_equal(single_epoch.durations, np.array([0.3]))
+ assert_array_equal(single_epoch.labels, np.array(["C"]))
+
+ def test_slice(self):
+ times = [2, 3, 4, 5]
+ durations = [0.1, 0.2, 0.3, 0.4]
+ labels = ["A", "B", "C", "D"]
+ epc = Epoch(times * pq.ms, durations=durations * pq.ms, labels=labels)
+ single_epoch = epc[1:3]
+ self.assertIsInstance(single_epoch, Epoch)
+ assert_array_equal(single_epoch.times, np.array([3.0, 4.0]))
+ assert_array_equal(single_epoch.durations, np.array([0.2, 0.3]))
+ assert_array_equal(single_epoch.labels, np.array(["B", "C"]))
+
class TestDuplicateWithNewData(unittest.TestCase):
def setUp(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/NeuralEnsemble/python-neo.git@6b6c7ef2d148de5431cbd8f254430251c3d34dde#egg=neo
nose==1.3.7
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
quantities==0.13.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: python-neo
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- quantities==0.13.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/python-neo
| [
"neo/test/coretest/test_epoch.py::TestEpoch::test_getitem",
"neo/test/coretest/test_epoch.py::TestEpoch::test_slice"
]
| []
| [
"neo/test/coretest/test_epoch.py::Test__generate_datasets::test__fake_neo__cascade",
"neo/test/coretest/test_epoch.py::Test__generate_datasets::test__fake_neo__nocascade",
"neo/test/coretest/test_epoch.py::Test__generate_datasets::test__get_fake_values",
"neo/test/coretest/test_epoch.py::TestEpoch::test_Epoch_creation",
"neo/test/coretest/test_epoch.py::TestEpoch::test_Epoch_merge",
"neo/test/coretest/test_epoch.py::TestEpoch::test_Epoch_repr",
"neo/test/coretest/test_epoch.py::TestEpoch::test__children",
"neo/test/coretest/test_epoch.py::TestEpoch::test__time_slice",
"neo/test/coretest/test_epoch.py::TestEpoch::test_as_array",
"neo/test/coretest/test_epoch.py::TestEpoch::test_as_quantity",
"neo/test/coretest/test_epoch.py::TestEpoch::test_time_slice2",
"neo/test/coretest/test_epoch.py::TestEpoch::test_time_slice_differnt_units",
"neo/test/coretest/test_epoch.py::TestEpoch::test_time_slice_empty",
"neo/test/coretest/test_epoch.py::TestEpoch::test_time_slice_none_both",
"neo/test/coretest/test_epoch.py::TestEpoch::test_time_slice_none_start",
"neo/test/coretest/test_epoch.py::TestEpoch::test_time_slice_none_stop",
"neo/test/coretest/test_epoch.py::TestEpoch::test_time_slice_out_of_boundries",
"neo/test/coretest/test_epoch.py::TestDuplicateWithNewData::test_duplicate_with_new_data",
"neo/test/coretest/test_epoch.py::TestEventFunctions::test__pickle"
]
| []
| BSD 3-Clause "New" or "Revised" License | 2,032 | [
"neo/core/epoch.py"
]
| [
"neo/core/epoch.py"
]
|
graphql-python__graphene-644 | 38db32e4f2d57f54a77879f9277ad4408792c881 | 2018-01-10 14:24:14 | 8c7ca74c6f083007a7c83c843f494357aec69371 | diff --git a/graphene/utils/str_converters.py b/graphene/utils/str_converters.py
index ae8ceff..6fcdfb7 100644
--- a/graphene/utils/str_converters.py
+++ b/graphene/utils/str_converters.py
@@ -1,13 +1,13 @@
import re
-# From this response in Stackoverflow
+# Adapted from this response in Stackoverflow
# http://stackoverflow.com/a/19053800/1072990
def to_camel_case(snake_str):
components = snake_str.split('_')
# We capitalize the first letter of each component except the first one
- # with the 'title' method and join them together.
- return components[0] + "".join(x.title() if x else '_' for x in components[1:])
+ # with the 'capitalize' method and join them together.
+ return components[0] + ''.join(x.capitalize() if x else '_' for x in components[1:])
# From this response in Stackoverflow
| Fields with numbers in names do not capitalize correctly
I noticed that the field with names containing numbers (e.g. `field_i18n`) doesn't capitalize correctly.
For example:
`correct_field` becomes `correctField`, but `field_i18n` becomes `field_I18N` and `t1e2s3t` becomes `t1E2S3T` which is obvious incorrect.
This caused by using `.title` method in `str_converter.py` instead of `.capitalize`. Title method is suitable for use with words, but field names are single word. | graphql-python/graphene | diff --git a/graphene/utils/tests/test_str_converters.py b/graphene/utils/tests/test_str_converters.py
index 2ee7d7a..11f7e15 100644
--- a/graphene/utils/tests/test_str_converters.py
+++ b/graphene/utils/tests/test_str_converters.py
@@ -16,6 +16,7 @@ def test_camel_case():
assert to_camel_case('snakes_on_a_plane') == 'snakesOnAPlane'
assert to_camel_case('snakes_on_a__plane') == 'snakesOnA_Plane'
assert to_camel_case('i_phone_hysteria') == 'iPhoneHysteria'
+ assert to_camel_case('field_i18n') == 'fieldI18n'
def test_to_const():
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 2.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
coveralls==3.3.1
docopt==0.6.2
fastdiff==0.3.0
-e git+https://github.com/graphql-python/graphene.git@38db32e4f2d57f54a77879f9277ad4408792c881#egg=graphene
graphql-core==2.3.2
graphql-relay==0.4.5
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
iso8601==1.1.0
mock==5.2.0
packaging==21.3
pluggy==1.0.0
promise==2.3
py==1.11.0
py-cpuinfo==9.0.0
pyparsing==3.1.4
pytest==7.0.1
pytest-benchmark==3.4.1
pytest-cov==4.0.0
pytest-mock==3.6.1
pytz==2025.2
requests==2.27.1
Rx==1.6.3
six==1.17.0
snapshottest==0.6.0
termcolor==1.1.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
wasmer==1.1.0
wasmer-compiler-cranelift==1.1.0
zipp==3.6.0
| name: graphene
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- coverage==6.2
- coveralls==3.3.1
- docopt==0.6.2
- fastdiff==0.3.0
- graphql-core==2.3.2
- graphql-relay==0.4.5
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- iso8601==1.1.0
- mock==5.2.0
- packaging==21.3
- pluggy==1.0.0
- promise==2.3
- py==1.11.0
- py-cpuinfo==9.0.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-benchmark==3.4.1
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytz==2025.2
- requests==2.27.1
- rx==1.6.3
- six==1.17.0
- snapshottest==0.6.0
- termcolor==1.1.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wasmer==1.1.0
- wasmer-compiler-cranelift==1.1.0
- zipp==3.6.0
prefix: /opt/conda/envs/graphene
| [
"graphene/utils/tests/test_str_converters.py::test_camel_case"
]
| []
| [
"graphene/utils/tests/test_str_converters.py::test_snake_case",
"graphene/utils/tests/test_str_converters.py::test_to_const"
]
| []
| MIT License | 2,033 | [
"graphene/utils/str_converters.py"
]
| [
"graphene/utils/str_converters.py"
]
|
|
mrocklin__sparse-70 | 49f64793c75f37cf137a3f86077135a22097ac0e | 2018-01-10 15:53:28 | bbb0869c882b914124c44e789214d945ff785aa4 | diff --git a/requirements.txt b/requirements.txt
index 6bad103..6e5129d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,2 +1,2 @@
numpy
-scipy
+scipy >= 0.19
diff --git a/sparse/core.py b/sparse/core.py
index 9a28145..0c6dfa3 100644
--- a/sparse/core.py
+++ b/sparse/core.py
@@ -253,6 +253,29 @@ class COO(object):
def nbytes(self):
return self.data.nbytes + self.coords.nbytes
+ def __len__(self):
+ """
+ Get "length" of array, which is by definition the size of the first
+ dimension.
+
+ Returns
+ -------
+ int
+ The size of the first dimension.
+
+ See Also
+ --------
+ numpy.ndarray.__len__ : Numpy equivalent property.
+
+ Examples
+ --------
+ >>> x = np.zeros((10, 10))
+ >>> s = COO.from_numpy(x)
+ >>> len(s)
+ 10
+ """
+ return self.shape[0]
+
def __sizeof__(self):
return self.nbytes
| Minimum version of scipy
I noticed that we need at least scipy=0.19.
Can we update `requirements.txt` or work to support lower version of scipy?
| mrocklin/sparse | diff --git a/sparse/tests/test_core.py b/sparse/tests/test_core.py
index 2858e02..7500765 100644
--- a/sparse/tests/test_core.py
+++ b/sparse/tests/test_core.py
@@ -875,3 +875,8 @@ def test_scalar_shape_construction():
s = COO(coords, x, shape=5)
assert_eq(x, s)
+
+
+def test_len():
+ s = sparse.random((20, 30, 40))
+ assert len(s) == 20
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[tests]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-flake8"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig==1.1.1
mccabe==0.7.0
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-flake8==1.1.1
scipy==1.5.4
-e git+https://github.com/mrocklin/sparse.git@49f64793c75f37cf137a3f86077135a22097ac0e#egg=sparse
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: sparse
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- flake8==5.0.4
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- mccabe==0.7.0
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-flake8==1.1.1
- scipy==1.5.4
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/sparse
| [
"sparse/core.py::sparse.core.COO.__len__",
"sparse/tests/test_core.py::test_len"
]
| [
"sparse/__init__.py::flake-8::FLAKE8",
"sparse/core.py::flake-8::FLAKE8",
"sparse/core.py::sparse.core.COO",
"sparse/slicing.py::flake-8::FLAKE8",
"sparse/utils.py::flake-8::FLAKE8",
"sparse/tests/test_core.py::flake-8::FLAKE8"
]
| [
"sparse/core.py::sparse.core.random",
"sparse/slicing.py::sparse.slicing.check_index",
"sparse/slicing.py::sparse.slicing.normalize_index",
"sparse/slicing.py::sparse.slicing.normalize_slice",
"sparse/slicing.py::sparse.slicing.posify_index",
"sparse/slicing.py::sparse.slicing.replace_ellipsis",
"sparse/slicing.py::sparse.slicing.sanitize_index",
"sparse/tests/test_core.py::test_reductions[True-None-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[True-None-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[True-None-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[True-None-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[True-None-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[True-0-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[True-0-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[True-0-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[True-0-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[True-0-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[True-1-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[True-1-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[True-1-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[True-1-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[True-1-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[True-2-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[True-2-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[True-2-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[True-2-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[True-2-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[True-axis4-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[True-axis4-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[True-axis4-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[True-axis4-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[True-axis4-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[False-None-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[False-None-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[False-None-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[False-None-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[False-None-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[False-0-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[False-0-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[False-0-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[False-0-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[False-0-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[False-1-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[False-1-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[False-1-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[False-1-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[False-1-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[False-2-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[False-2-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[False-2-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[False-2-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[False-2-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_reductions[False-axis4-max-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_reductions[False-axis4-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_reductions[False-axis4-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_reductions[False-axis4-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_reductions[False-axis4-min-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-None-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-None-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-None-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-None-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-None-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-0-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-0-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-0-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-0-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-0-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-1-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-1-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-1-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-1-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-1-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-2-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-2-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-2-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-2-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-2-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-axis4-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-axis4-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-axis4-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-axis4-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[True-axis4-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-None-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-None-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-None-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-None-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-None-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-0-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-0-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-0-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-0-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-0-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-1-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-1-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-1-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-1-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-1-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-2-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-2-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-2-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-2-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-2-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-axis4-amax-kwargs0-eqkwargs0]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-axis4-sum-kwargs1-eqkwargs1]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-axis4-sum-kwargs2-eqkwargs2]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-axis4-prod-kwargs3-eqkwargs3]",
"sparse/tests/test_core.py::test_ufunc_reductions[False-axis4-amin-kwargs4-eqkwargs4]",
"sparse/tests/test_core.py::test_transpose[None]",
"sparse/tests/test_core.py::test_transpose[axis1]",
"sparse/tests/test_core.py::test_transpose[axis2]",
"sparse/tests/test_core.py::test_transpose[axis3]",
"sparse/tests/test_core.py::test_transpose[axis4]",
"sparse/tests/test_core.py::test_transpose[axis5]",
"sparse/tests/test_core.py::test_transpose[axis6]",
"sparse/tests/test_core.py::test_transpose_error[axis0]",
"sparse/tests/test_core.py::test_transpose_error[axis1]",
"sparse/tests/test_core.py::test_transpose_error[axis2]",
"sparse/tests/test_core.py::test_transpose_error[axis3]",
"sparse/tests/test_core.py::test_transpose_error[axis4]",
"sparse/tests/test_core.py::test_transpose_error[axis5]",
"sparse/tests/test_core.py::test_reshape[a0-b0]",
"sparse/tests/test_core.py::test_reshape[a1-b1]",
"sparse/tests/test_core.py::test_reshape[a2-b2]",
"sparse/tests/test_core.py::test_reshape[a3-b3]",
"sparse/tests/test_core.py::test_reshape[a4-b4]",
"sparse/tests/test_core.py::test_reshape[a5-b5]",
"sparse/tests/test_core.py::test_reshape[a6-b6]",
"sparse/tests/test_core.py::test_reshape[a7-b7]",
"sparse/tests/test_core.py::test_reshape[a8-b8]",
"sparse/tests/test_core.py::test_reshape[a9-b9]",
"sparse/tests/test_core.py::test_large_reshape",
"sparse/tests/test_core.py::test_reshape_same",
"sparse/tests/test_core.py::test_to_scipy_sparse",
"sparse/tests/test_core.py::test_tensordot[a_shape0-b_shape0-axes0]",
"sparse/tests/test_core.py::test_tensordot[a_shape1-b_shape1-axes1]",
"sparse/tests/test_core.py::test_tensordot[a_shape2-b_shape2-axes2]",
"sparse/tests/test_core.py::test_tensordot[a_shape3-b_shape3-axes3]",
"sparse/tests/test_core.py::test_tensordot[a_shape4-b_shape4-axes4]",
"sparse/tests/test_core.py::test_tensordot[a_shape5-b_shape5-axes5]",
"sparse/tests/test_core.py::test_tensordot[a_shape6-b_shape6-axes6]",
"sparse/tests/test_core.py::test_tensordot[a_shape7-b_shape7-axes7]",
"sparse/tests/test_core.py::test_tensordot[a_shape8-b_shape8-axes8]",
"sparse/tests/test_core.py::test_tensordot[a_shape9-b_shape9-0]",
"sparse/tests/test_core.py::test_dot",
"sparse/tests/test_core.py::test_elemwise[expm1]",
"sparse/tests/test_core.py::test_elemwise[log1p]",
"sparse/tests/test_core.py::test_elemwise[sin]",
"sparse/tests/test_core.py::test_elemwise[tan]",
"sparse/tests/test_core.py::test_elemwise[sinh]",
"sparse/tests/test_core.py::test_elemwise[tanh]",
"sparse/tests/test_core.py::test_elemwise[floor]",
"sparse/tests/test_core.py::test_elemwise[ceil]",
"sparse/tests/test_core.py::test_elemwise[sqrt]",
"sparse/tests/test_core.py::test_elemwise[conjugate0]",
"sparse/tests/test_core.py::test_elemwise[round_]",
"sparse/tests/test_core.py::test_elemwise[rint]",
"sparse/tests/test_core.py::test_elemwise[<lambda>0]",
"sparse/tests/test_core.py::test_elemwise[conjugate1]",
"sparse/tests/test_core.py::test_elemwise[conjugate2]",
"sparse/tests/test_core.py::test_elemwise[<lambda>1]",
"sparse/tests/test_core.py::test_elemwise[abs]",
"sparse/tests/test_core.py::test_elemwise_binary[shape0-mul]",
"sparse/tests/test_core.py::test_elemwise_binary[shape0-add]",
"sparse/tests/test_core.py::test_elemwise_binary[shape0-sub]",
"sparse/tests/test_core.py::test_elemwise_binary[shape0-gt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape0-lt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape0-ne]",
"sparse/tests/test_core.py::test_elemwise_binary[shape1-mul]",
"sparse/tests/test_core.py::test_elemwise_binary[shape1-add]",
"sparse/tests/test_core.py::test_elemwise_binary[shape1-sub]",
"sparse/tests/test_core.py::test_elemwise_binary[shape1-gt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape1-lt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape1-ne]",
"sparse/tests/test_core.py::test_elemwise_binary[shape2-mul]",
"sparse/tests/test_core.py::test_elemwise_binary[shape2-add]",
"sparse/tests/test_core.py::test_elemwise_binary[shape2-sub]",
"sparse/tests/test_core.py::test_elemwise_binary[shape2-gt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape2-lt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape2-ne]",
"sparse/tests/test_core.py::test_elemwise_binary[shape3-mul]",
"sparse/tests/test_core.py::test_elemwise_binary[shape3-add]",
"sparse/tests/test_core.py::test_elemwise_binary[shape3-sub]",
"sparse/tests/test_core.py::test_elemwise_binary[shape3-gt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape3-lt]",
"sparse/tests/test_core.py::test_elemwise_binary[shape3-ne]",
"sparse/tests/test_core.py::test_auto_densification_fails[pow]",
"sparse/tests/test_core.py::test_auto_densification_fails[truediv]",
"sparse/tests/test_core.py::test_auto_densification_fails[floordiv]",
"sparse/tests/test_core.py::test_auto_densification_fails[ge]",
"sparse/tests/test_core.py::test_auto_densification_fails[le]",
"sparse/tests/test_core.py::test_auto_densification_fails[eq]",
"sparse/tests/test_core.py::test_op_scipy_sparse",
"sparse/tests/test_core.py::test_elemwise_scalar[True-mul-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-add-0]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-sub-0]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-pow-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-truediv-3]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-floordiv-4]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-gt-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-lt--5]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-ne-0]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-ge-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-le--3]",
"sparse/tests/test_core.py::test_elemwise_scalar[True-eq-1]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-mul-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-add-0]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-sub-0]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-pow-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-truediv-3]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-floordiv-4]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-gt-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-lt--5]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-ne-0]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-ge-5]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-le--3]",
"sparse/tests/test_core.py::test_elemwise_scalar[False-eq-1]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-mul-5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-add-0]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-sub-0]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-gt--5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-lt-5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-ne-0]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-ge--5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-le-3]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[True-eq-1]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-mul-5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-add-0]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-sub-0]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-gt--5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-lt-5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-ne-0]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-ge--5]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-le-3]",
"sparse/tests/test_core.py::test_leftside_elemwise_scalar[False-eq-1]",
"sparse/tests/test_core.py::test_scalar_densification_fails[add-5]",
"sparse/tests/test_core.py::test_scalar_densification_fails[sub--5]",
"sparse/tests/test_core.py::test_scalar_densification_fails[pow--3]",
"sparse/tests/test_core.py::test_scalar_densification_fails[truediv-0]",
"sparse/tests/test_core.py::test_scalar_densification_fails[floordiv-0]",
"sparse/tests/test_core.py::test_scalar_densification_fails[gt--5]",
"sparse/tests/test_core.py::test_scalar_densification_fails[lt-5]",
"sparse/tests/test_core.py::test_scalar_densification_fails[ne-1]",
"sparse/tests/test_core.py::test_scalar_densification_fails[ge--3]",
"sparse/tests/test_core.py::test_scalar_densification_fails[le-3]",
"sparse/tests/test_core.py::test_scalar_densification_fails[eq-0]",
"sparse/tests/test_core.py::test_bitwise_binary[shape0-and_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape0-or_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape0-xor]",
"sparse/tests/test_core.py::test_bitwise_binary[shape1-and_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape1-or_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape1-xor]",
"sparse/tests/test_core.py::test_bitwise_binary[shape2-and_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape2-or_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape2-xor]",
"sparse/tests/test_core.py::test_bitwise_binary[shape3-and_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape3-or_]",
"sparse/tests/test_core.py::test_bitwise_binary[shape3-xor]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape0-and_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape0-or_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape0-xor]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape1-and_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape1-or_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape1-xor]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape2-and_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape2-or_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape2-xor]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape3-and_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape3-or_]",
"sparse/tests/test_core.py::test_bitwise_binary_bool[shape3-xor]",
"sparse/tests/test_core.py::test_elemwise_binary_empty",
"sparse/tests/test_core.py::test_gt",
"sparse/tests/test_core.py::test_slicing[0]",
"sparse/tests/test_core.py::test_slicing[1]",
"sparse/tests/test_core.py::test_slicing[-1]",
"sparse/tests/test_core.py::test_slicing[index3]",
"sparse/tests/test_core.py::test_slicing[index4]",
"sparse/tests/test_core.py::test_slicing[index5]",
"sparse/tests/test_core.py::test_slicing[index6]",
"sparse/tests/test_core.py::test_slicing[index7]",
"sparse/tests/test_core.py::test_slicing[index8]",
"sparse/tests/test_core.py::test_slicing[index9]",
"sparse/tests/test_core.py::test_slicing[index10]",
"sparse/tests/test_core.py::test_slicing[index11]",
"sparse/tests/test_core.py::test_slicing[index12]",
"sparse/tests/test_core.py::test_slicing[index13]",
"sparse/tests/test_core.py::test_slicing[index14]",
"sparse/tests/test_core.py::test_slicing[index15]",
"sparse/tests/test_core.py::test_slicing[index16]",
"sparse/tests/test_core.py::test_slicing[index17]",
"sparse/tests/test_core.py::test_slicing[index18]",
"sparse/tests/test_core.py::test_slicing[index19]",
"sparse/tests/test_core.py::test_slicing[index20]",
"sparse/tests/test_core.py::test_slicing[index21]",
"sparse/tests/test_core.py::test_slicing[index22]",
"sparse/tests/test_core.py::test_slicing[index23]",
"sparse/tests/test_core.py::test_slicing[index24]",
"sparse/tests/test_core.py::test_slicing[index25]",
"sparse/tests/test_core.py::test_slicing[index26]",
"sparse/tests/test_core.py::test_slicing[index27]",
"sparse/tests/test_core.py::test_slicing[index28]",
"sparse/tests/test_core.py::test_slicing[index29]",
"sparse/tests/test_core.py::test_slicing[index30]",
"sparse/tests/test_core.py::test_slicing[index31]",
"sparse/tests/test_core.py::test_slicing[index32]",
"sparse/tests/test_core.py::test_slicing[index33]",
"sparse/tests/test_core.py::test_slicing[index34]",
"sparse/tests/test_core.py::test_slicing[index35]",
"sparse/tests/test_core.py::test_slicing[index36]",
"sparse/tests/test_core.py::test_slicing[index37]",
"sparse/tests/test_core.py::test_slicing[index38]",
"sparse/tests/test_core.py::test_slicing[index39]",
"sparse/tests/test_core.py::test_slicing[index40]",
"sparse/tests/test_core.py::test_slicing[index41]",
"sparse/tests/test_core.py::test_slicing[index42]",
"sparse/tests/test_core.py::test_slicing[index43]",
"sparse/tests/test_core.py::test_slicing[index44]",
"sparse/tests/test_core.py::test_custom_dtype_slicing",
"sparse/tests/test_core.py::test_slicing_errors[index0]",
"sparse/tests/test_core.py::test_slicing_errors[index1]",
"sparse/tests/test_core.py::test_slicing_errors[index2]",
"sparse/tests/test_core.py::test_slicing_errors[5]",
"sparse/tests/test_core.py::test_slicing_errors[-5]",
"sparse/tests/test_core.py::test_slicing_errors[foo]",
"sparse/tests/test_core.py::test_slicing_errors[index6]",
"sparse/tests/test_core.py::test_canonical",
"sparse/tests/test_core.py::test_concatenate",
"sparse/tests/test_core.py::test_concatenate_mixed[stack-0]",
"sparse/tests/test_core.py::test_concatenate_mixed[stack-1]",
"sparse/tests/test_core.py::test_concatenate_mixed[concatenate-0]",
"sparse/tests/test_core.py::test_concatenate_mixed[concatenate-1]",
"sparse/tests/test_core.py::test_stack[0-shape0]",
"sparse/tests/test_core.py::test_stack[0-shape1]",
"sparse/tests/test_core.py::test_stack[0-shape2]",
"sparse/tests/test_core.py::test_stack[1-shape0]",
"sparse/tests/test_core.py::test_stack[1-shape1]",
"sparse/tests/test_core.py::test_stack[1-shape2]",
"sparse/tests/test_core.py::test_stack[-1-shape0]",
"sparse/tests/test_core.py::test_stack[-1-shape1]",
"sparse/tests/test_core.py::test_stack[-1-shape2]",
"sparse/tests/test_core.py::test_large_concat_stack",
"sparse/tests/test_core.py::test_coord_dtype",
"sparse/tests/test_core.py::test_addition",
"sparse/tests/test_core.py::test_addition_not_ok_when_large_and_sparse",
"sparse/tests/test_core.py::test_broadcasting[shape10-shape20-add]",
"sparse/tests/test_core.py::test_broadcasting[shape10-shape20-mul]",
"sparse/tests/test_core.py::test_broadcasting[shape11-shape21-add]",
"sparse/tests/test_core.py::test_broadcasting[shape11-shape21-mul]",
"sparse/tests/test_core.py::test_broadcasting[shape12-shape22-add]",
"sparse/tests/test_core.py::test_broadcasting[shape12-shape22-mul]",
"sparse/tests/test_core.py::test_broadcasting[shape13-shape23-add]",
"sparse/tests/test_core.py::test_broadcasting[shape13-shape23-mul]",
"sparse/tests/test_core.py::test_broadcasting[shape14-shape24-add]",
"sparse/tests/test_core.py::test_broadcasting[shape14-shape24-mul]",
"sparse/tests/test_core.py::test_broadcasting[shape15-shape25-add]",
"sparse/tests/test_core.py::test_broadcasting[shape15-shape25-mul]",
"sparse/tests/test_core.py::test_broadcast_to[shape10-shape20]",
"sparse/tests/test_core.py::test_broadcast_to[shape11-shape21]",
"sparse/tests/test_core.py::test_broadcast_to[shape12-shape22]",
"sparse/tests/test_core.py::test_scalar_multiplication[2]",
"sparse/tests/test_core.py::test_scalar_multiplication[2.5]",
"sparse/tests/test_core.py::test_scalar_multiplication[scalar2]",
"sparse/tests/test_core.py::test_scalar_multiplication[scalar3]",
"sparse/tests/test_core.py::test_scalar_exponentiation",
"sparse/tests/test_core.py::test_create_with_lists_of_tuples",
"sparse/tests/test_core.py::test_sizeof",
"sparse/tests/test_core.py::test_scipy_sparse_interface",
"sparse/tests/test_core.py::test_cache_csr",
"sparse/tests/test_core.py::test_empty_shape",
"sparse/tests/test_core.py::test_single_dimension",
"sparse/tests/test_core.py::test_raise_dense",
"sparse/tests/test_core.py::test_large_sum",
"sparse/tests/test_core.py::test_add_many_sparse_arrays",
"sparse/tests/test_core.py::test_caching",
"sparse/tests/test_core.py::test_scalar_slicing",
"sparse/tests/test_core.py::test_triul[shape0-0]",
"sparse/tests/test_core.py::test_triul[shape1-1]",
"sparse/tests/test_core.py::test_triul[shape2--1]",
"sparse/tests/test_core.py::test_triul[shape3--2]",
"sparse/tests/test_core.py::test_triul[shape4-1000]",
"sparse/tests/test_core.py::test_empty_reduction",
"sparse/tests/test_core.py::test_random_shape[0.1-shape0]",
"sparse/tests/test_core.py::test_random_shape[0.1-shape1]",
"sparse/tests/test_core.py::test_random_shape[0.1-shape2]",
"sparse/tests/test_core.py::test_random_shape[0.3-shape0]",
"sparse/tests/test_core.py::test_random_shape[0.3-shape1]",
"sparse/tests/test_core.py::test_random_shape[0.3-shape2]",
"sparse/tests/test_core.py::test_random_shape[0.5-shape0]",
"sparse/tests/test_core.py::test_random_shape[0.5-shape1]",
"sparse/tests/test_core.py::test_random_shape[0.5-shape2]",
"sparse/tests/test_core.py::test_random_shape[0.7-shape0]",
"sparse/tests/test_core.py::test_random_shape[0.7-shape1]",
"sparse/tests/test_core.py::test_random_shape[0.7-shape2]",
"sparse/tests/test_core.py::test_two_random_unequal",
"sparse/tests/test_core.py::test_two_random_same_seed",
"sparse/tests/test_core.py::test_random_sorted",
"sparse/tests/test_core.py::test_random_rvs[0.0-shape0-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.0-shape0-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.0-shape0-<lambda>-bool]",
"sparse/tests/test_core.py::test_random_rvs[0.0-shape1-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.0-shape1-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.0-shape1-<lambda>-bool]",
"sparse/tests/test_core.py::test_random_rvs[0.01-shape0-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.01-shape0-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.01-shape0-<lambda>-bool]",
"sparse/tests/test_core.py::test_random_rvs[0.01-shape1-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.01-shape1-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.01-shape1-<lambda>-bool]",
"sparse/tests/test_core.py::test_random_rvs[0.1-shape0-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.1-shape0-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.1-shape0-<lambda>-bool]",
"sparse/tests/test_core.py::test_random_rvs[0.1-shape1-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.1-shape1-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.1-shape1-<lambda>-bool]",
"sparse/tests/test_core.py::test_random_rvs[0.2-shape0-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.2-shape0-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.2-shape0-<lambda>-bool]",
"sparse/tests/test_core.py::test_random_rvs[0.2-shape1-None-float64]",
"sparse/tests/test_core.py::test_random_rvs[0.2-shape1-rvs-int]",
"sparse/tests/test_core.py::test_random_rvs[0.2-shape1-<lambda>-bool]",
"sparse/tests/test_core.py::test_scalar_shape_construction"
]
| []
| Modified BSD License | 2,034 | [
"requirements.txt",
"sparse/core.py"
]
| [
"requirements.txt",
"sparse/core.py"
]
|
|
automl__SMAC3-368 | dc8ebc9763d15516d4f96c32e75c431783487845 | 2018-01-11 16:33:51 | f710fa60dbf2c64e42ce14aa0eb529f92378560a | diff --git a/smac/facade/func_facade.py b/smac/facade/func_facade.py
index c747fd01b..5a5aefc74 100644
--- a/smac/facade/func_facade.py
+++ b/smac/facade/func_facade.py
@@ -52,8 +52,12 @@ def fmin_smac(func: callable,
"""
# create configuration space
cs = ConfigurationSpace()
+
+ # Adjust zero padding
+ tmplt = 'x{0:0' + str(len(str(len(bounds)))) + 'd}'
+
for idx, (lower_bound, upper_bound) in enumerate(bounds):
- parameter = UniformFloatHyperparameter(name="x%d" % (idx + 1),
+ parameter = UniformFloatHyperparameter(name=tmplt.format(idx + 1),
lower=lower_bound,
upper=upper_bound,
default_value=x0[idx])
@@ -77,11 +81,10 @@ def fmin_smac(func: callable,
smac = SMAC(scenario=scenario, tae_runner=ta, rng=rng)
smac.logger = logging.getLogger(smac.__module__ + "." + smac.__class__.__name__)
incumbent = smac.optimize()
-
config_id = smac.solver.runhistory.config_ids[incumbent]
run_key = RunKey(config_id, None, 0)
incumbent_performance = smac.solver.runhistory.data[run_key]
- incumbent = np.array([incumbent['x%d' % (idx + 1)]
+ incumbent = np.array([incumbent[tmplt.format(idx + 1)]
for idx in range(len(bounds))], dtype=np.float)
return incumbent, incumbent_performance.cost, \
smac
| Wrong parameters order
Hi all,
we have realized that when using fmin_smac, the input parameters given to the function are processed in the wrong order if their count is higher than 9.
For instance when executing this command:
x, cost, _ = fmin_smac(func=to_minimize,
x0=[0.5, 20, 0.7, 0.15, 10, 0.5, 1.0, 0.1, 0.25, 1.0, 0.5],
bounds=[(0, 1), (5, 25), (0.1, 2.0), (0.0, 1.5), (2, 20), (0, 1), (1, 1.3), (0.001, 1), (0.001, 2), (0.05, 9), (0.2, 0.8)],
maxfun=2000,
rng=3)
the input parameters are swapped in the following way, as reported in the configspace.pcs file:
x1 real [0.0, 1.0] [0.5]
x10 real [0.05, 9.0] [1.0]
x11 real [0.2, 0.8] [0.5]
x2 real [5.0, 25.0] [20.0]
x3 real [0.1, 2.0] [0.7]
x4 real [0.0, 1.5] [0.15]
x5 real [2.0, 20.0] [10.0]
x6 real [0.0, 1.0] [0.5]
x7 real [1.0, 1.3] [1.0]
x8 real [0.001, 1.0] [0.1]
x9 real [0.001, 2.0] [0.25]
Any idea what's happening there? Thanks for your help!
Best,
L.
| automl/SMAC3 | diff --git a/test/test_facade/test_func_facade.py b/test/test_facade/test_func_facade.py
index 11ab7c88d..43748ed1c 100644
--- a/test/test_facade/test_func_facade.py
+++ b/test/test_facade/test_func_facade.py
@@ -33,3 +33,20 @@ class TestSMACFacade(unittest.TestCase):
self.assertEqual(type(f), type(f_s))
self.output_dirs.append(smac.scenario.output_dir)
+
+ def test_parameter_order(self):
+ def func(x):
+ for i in range(len(x)):
+ self.assertLess(i - 1, x[i])
+ self.assertGreater(i, x[i])
+ return 1
+
+ default = [i - 0.5 for i in range(10)]
+ bounds = [(i - 1, i) for i in range(10)]
+ print(default, bounds)
+ _, _, smac = fmin_smac(func=func, x0=default,
+ bounds=bounds,
+ maxfun=1)
+
+ self.output_dirs.append(smac.scenario.output_dir)
+
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y build-essential swig"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
ConfigSpace==0.4.19
Cython==3.0.12
docutils==0.18.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==3.0.3
joblib==1.1.1
MarkupSafe==2.0.1
nose==1.3.7
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
psutil==7.0.0
py==1.11.0
Pygments==2.14.0
pynisher==0.6.4
pyparsing==3.1.4
pyrfr==0.8.2
pytest==7.0.1
pytz==2025.2
requests==2.27.1
scikit-learn==0.24.2
scipy==1.5.4
six==1.17.0
-e git+https://github.com/automl/SMAC3.git@dc8ebc9763d15516d4f96c32e75c431783487845#egg=smac
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
threadpoolctl==3.1.0
tomli==1.2.3
typing==3.7.4.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: SMAC3
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- configspace==0.4.19
- cython==3.0.12
- docutils==0.18.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==3.0.3
- joblib==1.1.1
- markupsafe==2.0.1
- nose==1.3.7
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- psutil==7.0.0
- py==1.11.0
- pygments==2.14.0
- pynisher==0.6.4
- pyparsing==3.1.4
- pyrfr==0.8.2
- pytest==7.0.1
- pytz==2025.2
- requests==2.27.1
- scikit-learn==0.24.2
- scipy==1.5.4
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- threadpoolctl==3.1.0
- tomli==1.2.3
- typing==3.7.4.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/SMAC3
| [
"test/test_facade/test_func_facade.py::TestSMACFacade::test_parameter_order"
]
| []
| [
"test/test_facade/test_func_facade.py::TestSMACFacade::test_func_smac"
]
| []
| BSD 3-Clause License | 2,035 | [
"smac/facade/func_facade.py"
]
| [
"smac/facade/func_facade.py"
]
|
|
joke2k__faker-678 | 891e000f06fbc2f13b16331a5577a8d9952281c6 | 2018-01-12 08:11:04 | 29dff0a0f2a31edac21a18cfa50b5bc9206304b2 | diff --git a/faker/providers/internet/__init__.py b/faker/providers/internet/__init__.py
index acdda088..f357f2c7 100644
--- a/faker/providers/internet/__init__.py
+++ b/faker/providers/internet/__init__.py
@@ -75,9 +75,13 @@ class Provider(BaseProvider):
return string
@lowercase
- def email(self):
- pattern = self.random_element(self.email_formats)
- return "".join(self.generator.parse(pattern).split(" "))
+ def email(self, domain=None):
+ if domain:
+ email = '{0}@{1}'.format(self.user_name(), domain)
+ else:
+ pattern = self.random_element(self.email_formats)
+ email = "".join(self.generator.parse(pattern).split(" "))
+ return email
@lowercase
def safe_email(self):
| NameError: name 'args' is not defined
I'm getting an error using the *args method.
```
fake.email(*args, **kwargs)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'args' is not defined
```
Also can I change the **kwargs with @gmail to get [email protected]?
| joke2k/faker | diff --git a/tests/providers/test_internet.py b/tests/providers/test_internet.py
index 555e7975..fe444b5b 100644
--- a/tests/providers/test_internet.py
+++ b/tests/providers/test_internet.py
@@ -14,6 +14,17 @@ from faker.utils import text
from tests import string_types
+class TestInternetProvider(unittest.TestCase):
+ """ Tests internet """
+
+ def setUp(self):
+ self.factory = Faker()
+
+ def test_email(self):
+ email = self.factory.email(domain='example.com')
+ self.assertEqual(email.split('@')[1], 'example.com')
+
+
class TestJaJP(unittest.TestCase):
""" Tests internet in the ja_JP locale """
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [],
"python": "3.6",
"reqs_path": [
"tests/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
dnspython==2.2.1
email-validator==1.0.3
execnet==1.9.0
-e git+https://github.com/joke2k/faker.git@891e000f06fbc2f13b16331a5577a8d9952281c6#egg=Faker
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
mock==2.0.0
packaging==21.3
pbr==6.1.1
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
six==1.17.0
text-unidecode==1.3
tomli==1.2.3
typing_extensions==4.1.1
UkPostcodeParser==1.1.2
zipp==3.6.0
| name: faker
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- dnspython==2.2.1
- email-validator==1.0.3
- execnet==1.9.0
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- mock==2.0.0
- packaging==21.3
- pbr==6.1.1
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- six==1.17.0
- text-unidecode==1.3
- tomli==1.2.3
- typing-extensions==4.1.1
- ukpostcodeparser==1.1.2
- zipp==3.6.0
prefix: /opt/conda/envs/faker
| [
"tests/providers/test_internet.py::TestInternetProvider::test_email"
]
| []
| [
"tests/providers/test_internet.py::TestJaJP::test_internet",
"tests/providers/test_internet.py::TestZhCN::test_email",
"tests/providers/test_internet.py::TestZhTW::test_email",
"tests/providers/test_internet.py::TestHuHU::test_internet",
"tests/providers/test_internet.py::TestPlPL::test_free_email_domain",
"tests/providers/test_internet.py::TestPlPL::test_tld",
"tests/providers/test_internet.py::TestNlNl::test_ascii_company_email",
"tests/providers/test_internet.py::TestNlNl::test_ascii_free_email",
"tests/providers/test_internet.py::TestNlNl::test_ascii_safe_email",
"tests/providers/test_internet.py::TestArAa::test_ascii_company_email",
"tests/providers/test_internet.py::TestArAa::test_ascii_free_email",
"tests/providers/test_internet.py::TestArAa::test_ascii_safe_email"
]
| []
| MIT License | 2,036 | [
"faker/providers/internet/__init__.py"
]
| [
"faker/providers/internet/__init__.py"
]
|
|
msgpack__msgpack-python-276 | d9ec8fc905fc9ed37c86700f794adeb883b4f5ea | 2018-01-12 08:45:18 | 3c9c6edbc88908fceb3c69ff3d6455be8b5914c8 | diff --git a/Makefile b/Makefile
index 6a9906c..4030080 100644
--- a/Makefile
+++ b/Makefile
@@ -18,7 +18,8 @@ serve-doc: all
.PHONY: clean
clean:
rm -rf build
- rm msgpack/*.so
+ rm -f msgpack/_packer.cpp
+ rm -f msgpack/_unpacker.cpp
rm -rf msgpack/__pycache__
rm -rf test/__pycache__
diff --git a/README.rst b/README.rst
index a5038db..8925a65 100644
--- a/README.rst
+++ b/README.rst
@@ -47,9 +47,9 @@ In case of packer, use UTF-8 always. Storing other than UTF-8 is not recommende
For backward compatibility, you can use ``use_bin_type=False`` and pack ``bytes``
object into msgpack raw type.
-In case of unpacker, there is new ``raw_as_bytes`` option. It is ``True`` by default
+In case of unpacker, there is new ``raw`` option. It is ``True`` by default
for backward compatibility, but it is changed to ``False`` in near future.
-You can use ``raw_as_bytes=False`` instead of ``encoding='utf-8'``.
+You can use ``raw=False`` instead of ``encoding='utf-8'``.
Planned backward incompatible changes
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -58,14 +58,14 @@ When msgpack 1.0, I planning these breaking changes:
* packer and unpacker: Remove ``encoding`` and ``unicode_errors`` option.
* packer: Change default of ``use_bin_type`` option from False to True.
-* unpacker: Change default of ``raw_as_bytes`` option from True to False.
+* unpacker: Change default of ``raw`` option from True to False.
* unpacker: Reduce all ``max_xxx_len`` options for typical usage.
* unpacker: Remove ``write_bytes`` option from all methods.
To avoid these breaking changes breaks your application, please:
* Don't use deprecated options.
-* Pass ``use_bin_type`` and ``raw_as_bytes`` options explicitly.
+* Pass ``use_bin_type`` and ``raw`` options explicitly.
* If your application handle large (>1MB) data, specify ``max_xxx_len`` options too.
@@ -113,14 +113,14 @@ msgpack provides ``dumps`` and ``loads`` as an alias for compatibility with
>>> import msgpack
>>> msgpack.packb([1, 2, 3], use_bin_type=True)
'\x93\x01\x02\x03'
- >>> msgpack.unpackb(_, raw_as_bytes=False)
+ >>> msgpack.unpackb(_, raw=False)
[1, 2, 3]
``unpack`` unpacks msgpack's array to Python's list, but can also unpack to tuple:
.. code-block:: pycon
- >>> msgpack.unpackb(b'\x93\x01\x02\x03', use_list=False, raw_as_bytes=False)
+ >>> msgpack.unpackb(b'\x93\x01\x02\x03', use_list=False, raw=False)
(1, 2, 3)
You should always specify the ``use_list`` keyword argument for backward compatibility.
@@ -146,7 +146,7 @@ stream (or from bytes provided through its ``feed`` method).
buf.seek(0)
- unpacker = msgpack.Unpacker(buf, raw_as_bytes=False)
+ unpacker = msgpack.Unpacker(buf, raw=False)
for unpacked in unpacker:
print(unpacked)
@@ -179,7 +179,7 @@ It is also possible to pack/unpack custom data types. Here is an example for
packed_dict = msgpack.packb(useful_dict, default=encode_datetime, use_bin_type=True)
- this_dict_again = msgpack.unpackb(packed_dict, object_hook=decode_datetime, raw_as_bytes=False)
+ this_dict_again = msgpack.unpackb(packed_dict, object_hook=decode_datetime, raw=False)
``Unpacker``'s ``object_hook`` callback receives a dict; the
``object_pairs_hook`` callback may instead be used to receive a list of
@@ -209,7 +209,7 @@ It is also possible to pack/unpack custom data types using the **ext** type.
...
>>> data = array.array('d', [1.2, 3.4])
>>> packed = msgpack.packb(data, default=default, use_bin_type=True)
- >>> unpacked = msgpack.unpackb(packed, ext_hook=ext_hook, raw_as_bytes=False)
+ >>> unpacked = msgpack.unpackb(packed, ext_hook=ext_hook, raw=False)
>>> data == unpacked
True
@@ -257,7 +257,7 @@ For backward compatibility reasons, msgpack-python will still default all
strings to byte strings, unless you specify the ``use_bin_type=True`` option in
the packer. If you do so, it will use a non-standard type called **bin** to
serialize byte arrays, and **raw** becomes to mean **str**. If you want to
-distinguish **bin** and **raw** in the unpacker, specify ``raw_as_bytes=False``.
+distinguish **bin** and **raw** in the unpacker, specify ``raw=False``.
Note that Python 2 defaults to byte-arrays over Unicode strings:
@@ -267,7 +267,7 @@ Note that Python 2 defaults to byte-arrays over Unicode strings:
>>> msgpack.unpackb(msgpack.packb([b'spam', u'eggs']))
['spam', 'eggs']
>>> msgpack.unpackb(msgpack.packb([b'spam', u'eggs'], use_bin_type=True),
- raw_as_bytes=False)
+ raw=False)
['spam', u'eggs']
This is the same code in Python 3 (same behaviour, but Python 3 has a
@@ -279,7 +279,7 @@ different default):
>>> msgpack.unpackb(msgpack.packb([b'spam', u'eggs']))
[b'spam', b'eggs']
>>> msgpack.unpackb(msgpack.packb([b'spam', u'eggs'], use_bin_type=True),
- raw_as_bytes=False)
+ raw=False)
[b'spam', 'eggs']
diff --git a/msgpack/_unpacker.pyx b/msgpack/_unpacker.pyx
index b796d04..806be4f 100644
--- a/msgpack/_unpacker.pyx
+++ b/msgpack/_unpacker.pyx
@@ -43,7 +43,7 @@ from msgpack import ExtType
cdef extern from "unpack.h":
ctypedef struct msgpack_user:
bint use_list
- bint raw_as_bytes
+ bint raw
bint has_pairs_hook # call object_hook with k-v pairs
PyObject* object_hook
PyObject* list_hook
@@ -74,14 +74,14 @@ cdef extern from "unpack.h":
cdef inline init_ctx(unpack_context *ctx,
object object_hook, object object_pairs_hook,
object list_hook, object ext_hook,
- bint use_list, bint raw_as_bytes,
+ bint use_list, bint raw,
char* encoding, char* unicode_errors,
Py_ssize_t max_str_len, Py_ssize_t max_bin_len,
Py_ssize_t max_array_len, Py_ssize_t max_map_len,
Py_ssize_t max_ext_len):
unpack_init(ctx)
ctx.user.use_list = use_list
- ctx.user.raw_as_bytes = raw_as_bytes
+ ctx.user.raw = raw
ctx.user.object_hook = ctx.user.list_hook = <PyObject*>NULL
ctx.user.max_str_len = max_str_len
ctx.user.max_bin_len = max_bin_len
@@ -158,7 +158,7 @@ cdef inline int get_data_from_buffer(object obj,
return 1
def unpackb(object packed, object object_hook=None, object list_hook=None,
- bint use_list=True, bint raw_as_bytes=True,
+ bint use_list=True, bint raw=True,
encoding=None, unicode_errors="strict",
object_pairs_hook=None, ext_hook=ExtType,
Py_ssize_t max_str_len=2147483647, # 2**32-1
@@ -185,7 +185,7 @@ def unpackb(object packed, object object_hook=None, object list_hook=None,
cdef int new_protocol = 0
if encoding is not None:
- PyErr_WarnEx(PendingDeprecationWarning, "encoding is deprecated, Use raw_as_bytes=False instead.", 1)
+ PyErr_WarnEx(PendingDeprecationWarning, "encoding is deprecated, Use raw=False instead.", 1)
if isinstance(encoding, unicode):
encoding = encoding.encode('ascii')
elif not isinstance(encoding, bytes):
@@ -203,7 +203,7 @@ def unpackb(object packed, object object_hook=None, object list_hook=None,
get_data_from_buffer(packed, &view, &buf, &buf_len, &new_protocol)
try:
init_ctx(&ctx, object_hook, object_pairs_hook, list_hook, ext_hook,
- use_list, raw_as_bytes, cenc, cerr,
+ use_list, raw, cenc, cerr,
max_str_len, max_bin_len, max_array_len, max_map_len, max_ext_len)
ret = unpack_construct(&ctx, buf, buf_len, &off)
finally:
@@ -261,7 +261,7 @@ cdef class Unpacker(object):
If true, unpack msgpack array to Python list.
Otherwise, unpack to Python tuple. (default: True)
- :param bool raw_as_bytes:
+ :param bool raw:
If true, unpack msgpack raw to Python bytes (default).
Otherwise, unpack to Python str (or unicode on Python 2) by decoding
with UTF-8 encoding (recommended).
@@ -299,7 +299,7 @@ cdef class Unpacker(object):
Limits max length of map. (default: 2**31-1)
:param str encoding:
- Deprecated, use raw_as_bytes instead.
+ Deprecated, use raw instead.
Encoding used for decoding msgpack raw.
If it is None (default), msgpack raw is deserialized to Python bytes.
@@ -310,13 +310,13 @@ cdef class Unpacker(object):
Example of streaming deserialize from file-like object::
- unpacker = Unpacker(file_like, raw_as_bytes=False)
+ unpacker = Unpacker(file_like, raw=False)
for o in unpacker:
process(o)
Example of streaming deserialize from socket::
- unpacker = Unpacker(raw_as_bytes=False)
+ unpacker = Unpacker(raw=False)
while True:
buf = sock.recv(1024**2)
if not buf:
@@ -345,7 +345,7 @@ cdef class Unpacker(object):
self.buf = NULL
def __init__(self, file_like=None, Py_ssize_t read_size=0,
- bint use_list=True, bint raw_as_bytes=True,
+ bint use_list=True, bint raw=True,
object object_hook=None, object object_pairs_hook=None, object list_hook=None,
encoding=None, unicode_errors='strict', int max_buffer_size=0,
object ext_hook=ExtType,
@@ -384,7 +384,7 @@ cdef class Unpacker(object):
self.stream_offset = 0
if encoding is not None:
- PyErr_WarnEx(PendingDeprecationWarning, "encoding is deprecated, Use raw_as_bytes=False instead.", 1)
+ PyErr_WarnEx(PendingDeprecationWarning, "encoding is deprecated, Use raw=False instead.", 1)
if isinstance(encoding, unicode):
self.encoding = encoding.encode('ascii')
elif isinstance(encoding, bytes):
@@ -404,7 +404,7 @@ cdef class Unpacker(object):
cerr = PyBytes_AsString(self.unicode_errors)
init_ctx(&self.ctx, object_hook, object_pairs_hook, list_hook,
- ext_hook, use_list, raw_as_bytes, cenc, cerr,
+ ext_hook, use_list, raw, cenc, cerr,
max_str_len, max_bin_len, max_array_len,
max_map_len, max_ext_len)
diff --git a/msgpack/fallback.py b/msgpack/fallback.py
index 675ee8a..bf6c9a6 100644
--- a/msgpack/fallback.py
+++ b/msgpack/fallback.py
@@ -145,7 +145,7 @@ class Unpacker(object):
If true, unpack msgpack array to Python list.
Otherwise, unpack to Python tuple. (default: True)
- :param bool raw_as_bytes:
+ :param bool raw:
If true, unpack msgpack raw to Python bytes (default).
Otherwise, unpack to Python str (or unicode on Python 2) by decoding
with UTF-8 encoding (recommended).
@@ -193,13 +193,13 @@ class Unpacker(object):
example of streaming deserialize from file-like object::
- unpacker = Unpacker(file_like, raw_as_bytes=False)
+ unpacker = Unpacker(file_like, raw=False)
for o in unpacker:
process(o)
example of streaming deserialize from socket::
- unpacker = Unpacker(raw_as_bytes=False)
+ unpacker = Unpacker(raw=False)
while True:
buf = sock.recv(1024**2)
if not buf:
@@ -209,7 +209,7 @@ class Unpacker(object):
process(o)
"""
- def __init__(self, file_like=None, read_size=0, use_list=True, raw_as_bytes=True,
+ def __init__(self, file_like=None, read_size=0, use_list=True, raw=True,
object_hook=None, object_pairs_hook=None, list_hook=None,
encoding=None, unicode_errors=None, max_buffer_size=0,
ext_hook=ExtType,
@@ -221,7 +221,7 @@ class Unpacker(object):
if encoding is not None:
warnings.warn(
- "encoding is deprecated, Use raw_as_bytes=False instead.",
+ "encoding is deprecated, Use raw=False instead.",
PendingDeprecationWarning)
if unicode_errors is not None:
@@ -257,7 +257,7 @@ class Unpacker(object):
if read_size > self._max_buffer_size:
raise ValueError("read_size must be smaller than max_buffer_size")
self._read_size = read_size or min(self._max_buffer_size, 16*1024)
- self._raw_as_bytes = bool(raw_as_bytes)
+ self._raw = bool(raw)
self._encoding = encoding
self._unicode_errors = unicode_errors
self._use_list = use_list
@@ -606,7 +606,7 @@ class Unpacker(object):
if typ == TYPE_RAW:
if self._encoding is not None:
obj = obj.decode(self._encoding, self._unicode_errors)
- elif self._raw_as_bytes:
+ elif self._raw:
obj = bytes(obj)
else:
obj = obj.decode('utf_8')
@@ -715,7 +715,7 @@ class Packer(object):
encoding = 'utf_8'
else:
warnings.warn(
- "encoding is deprecated, Use raw_as_bytes=False instead.",
+ "encoding is deprecated, Use raw=False instead.",
PendingDeprecationWarning)
if unicode_errors is None:
diff --git a/msgpack/unpack.h b/msgpack/unpack.h
index 8c2fc46..d7b5e00 100644
--- a/msgpack/unpack.h
+++ b/msgpack/unpack.h
@@ -21,7 +21,7 @@
typedef struct unpack_user {
bool use_list;
- bool raw_as_bytes;
+ bool raw;
bool has_pairs_hook;
PyObject *object_hook;
PyObject *list_hook;
@@ -229,7 +229,7 @@ static inline int unpack_callback_raw(unpack_user* u, const char* b, const char*
if (u->encoding) {
py = PyUnicode_Decode(p, l, u->encoding, u->unicode_errors);
- } else if (u->raw_as_bytes) {
+ } else if (u->raw) {
py = PyBytes_FromStringAndSize(p, l);
} else {
py = PyUnicode_DecodeUTF8(p, l, NULL);
| [RFC] "raw_as_bytes" is bad name because "raw" is legacy name
Since I had been inactive for months, I forgot new spec renamed "raw" type as "str".
Naming new option as "raw_as_bytes" is bad idea.
Luckily, I didn't release it yet.
Let's rename the option name, but what name?
It should be short and clear, and based on Python 3 and new spec.
* decode
* decode_str
* str
* raw (`raw=True` returns bytes, and `raw=False` returns str/unicode)
For now, I prefer `raw`. Any opinion?
Or should I stop removing `encoding` option?
I want support only UTF-8, and I don't want to use encoding name because
I should canonicalize the name. (utf_8, utf8, utf-8, UTF-8, etc...) | msgpack/msgpack-python | diff --git a/test/test_limits.py b/test/test_limits.py
index 3febc30..74e48c1 100644
--- a/test/test_limits.py
+++ b/test/test_limits.py
@@ -39,11 +39,11 @@ def test_max_str_len():
d = 'x' * 3
packed = packb(d)
- unpacker = Unpacker(max_str_len=3, raw_as_bytes=False)
+ unpacker = Unpacker(max_str_len=3, raw=False)
unpacker.feed(packed)
assert unpacker.unpack() == d
- unpacker = Unpacker(max_str_len=2, raw_as_bytes=False)
+ unpacker = Unpacker(max_str_len=2, raw=False)
with pytest.raises(UnpackValueError):
unpacker.feed(packed)
unpacker.unpack()
diff --git a/test/test_pack.py b/test/test_pack.py
index 29f5887..b447f9c 100644
--- a/test/test_pack.py
+++ b/test/test_pack.py
@@ -31,11 +31,11 @@ def testPack():
def testPackUnicode():
test_data = ["", "abcd", ["defgh"], "Русский текст"]
for td in test_data:
- re = unpackb(packb(td), use_list=1, raw_as_bytes=False)
+ re = unpackb(packb(td), use_list=1, raw=False)
assert re == td
packer = Packer()
data = packer.pack(td)
- re = Unpacker(BytesIO(data), raw_as_bytes=False, use_list=1).unpack()
+ re = Unpacker(BytesIO(data), raw=False, use_list=1).unpack()
assert re == td
def testPackUTF32(): # deprecated
@@ -72,14 +72,14 @@ def testIgnoreUnicodeErrors(): # deprecated
def testStrictUnicodeUnpack():
with raises(UnicodeDecodeError):
- unpackb(packb(b'abc\xeddef'), raw_as_bytes=False, use_list=1)
+ unpackb(packb(b'abc\xeddef'), raw=False, use_list=1)
def testStrictUnicodePack(): # deprecated
with raises(UnicodeEncodeError):
packb("abc\xeddef", encoding='ascii', unicode_errors='strict')
def testIgnoreErrorsPack(): # deprecated
- re = unpackb(packb("abcФФФdef", encoding='ascii', unicode_errors='ignore'), raw_as_bytes=False, use_list=1)
+ re = unpackb(packb("abcФФФdef", encoding='ascii', unicode_errors='ignore'), raw=False, use_list=1)
assert re == "abcdef"
def testDecodeBinary():
diff --git a/test/test_stricttype.py b/test/test_stricttype.py
index 13239f1..87e7c1c 100644
--- a/test/test_stricttype.py
+++ b/test/test_stricttype.py
@@ -11,7 +11,7 @@ def test_namedtuple():
return dict(o._asdict())
raise TypeError('Unsupported type %s' % (type(o),))
packed = packb(T(1, 42), strict_types=True, use_bin_type=True, default=default)
- unpacked = unpackb(packed, raw_as_bytes=False)
+ unpacked = unpackb(packed, raw=False)
assert unpacked == {'foo': 1, 'bar': 42}
@@ -32,7 +32,7 @@ def test_tuple():
return o
data = packb(t, strict_types=True, use_bin_type=True, default=default)
- expected = unpackb(data, raw_as_bytes=False, object_hook=convert)
+ expected = unpackb(data, raw=False, object_hook=convert)
assert expected == t
@@ -53,10 +53,10 @@ def test_tuple_ext():
def convert(code, payload):
if code == MSGPACK_EXT_TYPE_TUPLE:
# Unpack and convert to tuple
- return tuple(unpackb(payload, raw_as_bytes=False, ext_hook=convert))
+ return tuple(unpackb(payload, raw=False, ext_hook=convert))
raise ValueError('Unknown Ext code {}'.format(code))
data = packb(t, strict_types=True, use_bin_type=True, default=default)
- expected = unpackb(data, raw_as_bytes=False, ext_hook=convert)
+ expected = unpackb(data, raw=False, ext_hook=convert)
assert expected == t
diff --git a/test/test_unpack.py b/test/test_unpack.py
index 143f999..00a1061 100644
--- a/test/test_unpack.py
+++ b/test/test_unpack.py
@@ -48,7 +48,7 @@ def test_unpacker_ext_hook():
def __init__(self):
super(MyUnpacker, self).__init__(
- ext_hook=self._hook, raw_as_bytes=False)
+ ext_hook=self._hook, raw=False)
def _hook(self, code, data):
if code == 1:
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 5
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
Cython==0.27.3
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/msgpack/msgpack-python.git@d9ec8fc905fc9ed37c86700f794adeb883b4f5ea#egg=msgpack
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: msgpack-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- cython==0.27.3
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/msgpack-python
| [
"test/test_limits.py::test_max_str_len",
"test/test_pack.py::testPackUnicode",
"test/test_pack.py::testStrictUnicodeUnpack",
"test/test_pack.py::testIgnoreErrorsPack",
"test/test_stricttype.py::test_namedtuple",
"test/test_stricttype.py::test_tuple",
"test/test_stricttype.py::test_tuple_ext",
"test/test_unpack.py::test_unpacker_ext_hook"
]
| []
| [
"test/test_limits.py::test_integer",
"test/test_limits.py::test_array_header",
"test/test_limits.py::test_map_header",
"test/test_limits.py::test_max_bin_len",
"test/test_limits.py::test_max_array_len",
"test/test_limits.py::test_max_map_len",
"test/test_limits.py::test_max_ext_len",
"test/test_pack.py::testPack",
"test/test_pack.py::testPackUTF32",
"test/test_pack.py::testPackBytes",
"test/test_pack.py::testPackByteArrays",
"test/test_pack.py::testIgnoreUnicodeErrors",
"test/test_pack.py::testStrictUnicodePack",
"test/test_pack.py::testDecodeBinary",
"test/test_pack.py::testPackFloat",
"test/test_pack.py::testArraySize",
"test/test_pack.py::test_manualreset",
"test/test_pack.py::testMapSize",
"test/test_pack.py::test_odict",
"test/test_pack.py::test_pairlist",
"test/test_unpack.py::test_unpack_array_header_from_file",
"test/test_unpack.py::test_unpacker_hook_refcnt"
]
| []
| Apache License 2.0 | 2,037 | [
"README.rst",
"Makefile",
"msgpack/unpack.h",
"msgpack/_unpacker.pyx",
"msgpack/fallback.py"
]
| [
"README.rst",
"Makefile",
"msgpack/unpack.h",
"msgpack/_unpacker.pyx",
"msgpack/fallback.py"
]
|
|
civisanalytics__civisml-extensions-22 | 8f4b19e074bace3172269bdce3c16bd3dd242256 | 2018-01-12 15:51:27 | a88dae1468b02f8a19ecfbcfe87e62805afc67e9 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6344c7c..c527285 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,6 +6,11 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
## Unreleased
+### Fixed
+- Now caching CV indices. When CV generators are passed with `shuffle=True` and
+ no `random_state` is set, they produce different CV folds on each call to
+ `split` (#22).
+
### Fixed
- Updated `scipy` dependency in `requirements.txt` file to `scipy>=0.14,<2.0`
- ``DataFrameETL`` now correctly handles all ``Categorial``-type columns
diff --git a/civismlext/stacking.py b/civismlext/stacking.py
index c06a986..8c04a47 100644
--- a/civismlext/stacking.py
+++ b/civismlext/stacking.py
@@ -233,20 +233,25 @@ class BaseStackedModel(BaseEstimator):
out-of-sample predictions on the test folds as features for the
meta-estimator. Also return the fit_params for the meta-estimator.
"""
- # TO DO:
- # -use joblib to split base estimator training up
-
y = y.squeeze()
# Construct CV iterator
cv = self._check_cv(y=y)
+ # Extract CV indices since we need them twice, and un-seeded CV
+ # generators with `shuffle=True` split differently each time.
+ train_inds = []
+ test_inds = []
+ for train, test in cv.split(X, y):
+ train_inds.append(train)
+ test_inds.append(test)
fit_params_ests = self._extract_fit_params(**fit_params)
_fit_predict = self._get_fit_predict_function()
+ _jobs = []
+
# Loop over CV folds to get out-of-sample predictions, which become the
# features for the meta-estimator.
- _jobs = []
- for train, test in cv.split(X, y):
+ for train, test in zip(train_inds, test_inds):
for name, est in self.estimator_list[:-1]:
# adapted from sklearn.model_selection._fit_and_predict
# Adjust length of sample weights
@@ -269,7 +274,7 @@ class BaseStackedModel(BaseEstimator):
# Extract the results from joblib
Xmeta, ymeta = None, None
- for train, test in cv.split(X, y):
+ for test in test_inds:
ybase = np.empty((y[test].shape[0], 0))
for name, est in self.estimator_list[:-1]:
# Build design matrix out of out-of-sample predictions
| `_base_est_fit_predict` looks wrong
I believe cv.split(X, y) might give different results when called at different times. There might be randomness involved like shuffling, etc.
This is problematic because `ybase`, which is based on the old `train` & `test` but not coincide with with the new `test` and therefore `y[test]`.
old `train` & `test`:
```
for train, test in cv.split(X, y):
for name, est in self.estimator_list[:-1]:
# adapted from sklearn.model_selection._fit_and_predict
# Adjust length of sample weights
fit_params_est_adjusted = dict([
(k, _index_param_value(X, v, train))
for k, v in fit_params_ests[name].items()])
# Fit estimator on training set and score out-of-sample
_jobs.append(delayed(_fit_predict)(
clone(est),
X[train],
y[train],
X[test],
**fit_params_est_adjusted))
```
new `train` & `test`:
```
# Extract the results from joblib
Xmeta, ymeta = None, None
for train, test in cv.split(X, y):
ybase = np.empty((y[test].shape[0], 0))
for name, est in self.estimator_list[:-1]:
# Build design matrix out of out-of-sample predictions
ybase = np.hstack((ybase, _out.pop(0)))
# Append the test outputs to what will eventually be the features
# for the meta-estimator.
if Xmeta is not None:
ymeta = np.concatenate((ymeta, y[test]))
Xmeta = np.vstack((Xmeta, ybase))
else:
Xmeta = ybase
ymeta = y[test]
``` | civisanalytics/civisml-extensions | diff --git a/civismlext/test/test_stacking.py b/civismlext/test/test_stacking.py
index eefd2dc..39741c6 100644
--- a/civismlext/test/test_stacking.py
+++ b/civismlext/test/test_stacking.py
@@ -8,7 +8,7 @@ from ..nonnegative import NonNegativeLinearRegression
import pytest
import numpy as np
from sklearn.tree import DecisionTreeClassifier
-from sklearn.model_selection import train_test_split
+from sklearn.model_selection import KFold, train_test_split
from sklearn.metrics import roc_curve, auc, mean_squared_error
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
@@ -729,6 +729,32 @@ def test_fit_regression():
assert sr.meta_estimator.fit_params == {'bar': 'b'}
+def test_cv_shuffle_indices():
+ """Make sure xmeta and ymeta retain the correct order, even when the CV
+ generator is shuffling. This is checking for the bug reported in issue #16.
+ """
+ estlist = [('be1', PassThruReg()),
+ ('be2', PassThruReg()),
+ ('meta', PassThruReg())]
+ sr = StackedRegressor(estlist, cv=KFold(n_splits=2, shuffle=True))
+ x = np.arange(6)
+ y = np.arange(6)
+
+ # Suppose the train indices of a 2-fold CV are:
+ # [a, b, c] and [d, e, f]
+ # Then the test indices are:
+ # [d, e, f] and [a, b, c]
+ # Since xmeta is just a pass-through of x[train] (horizontally stacked
+ # twice, due to the two base estimators) and ymeta is a pass-through of
+ # y[test], and since x = y, we should expect that:
+ # xmeta[inds, 0] == ymeta
+ #
+ inds = np.array([3, 4, 5, 0, 1, 2])
+
+ xmeta, ymeta, _ = sr._base_est_fit_predict(x, y)
+ np.testing.assert_equal(xmeta[inds, 0], ymeta)
+
+
@mock.patch('civismlext.stacking.clone', lambda x: x)
def test_base_est_fit_predict_multiout_regression():
estlist = [('be1', PassThruReg()),
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 2
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
-e git+https://github.com/civisanalytics/civisml-extensions.git@8f4b19e074bace3172269bdce3c16bd3dd242256#egg=civisml_extensions
importlib-metadata==4.8.3
iniconfig==1.1.1
numpy==1.19.5
packaging==21.3
pandas==0.25.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2025.2
scikit-learn==0.19.2
scipy==1.5.4
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: civisml-extensions
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- numpy==1.19.5
- packaging==21.3
- pandas==0.25.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- scikit-learn==0.19.2
- scipy==1.5.4
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/civisml-extensions
| [
"civismlext/test/test_stacking.py::test_cv_shuffle_indices"
]
| []
| [
"civismlext/test/test_stacking.py::test_init[StackedClassifier]",
"civismlext/test/test_stacking.py::test_init[StackedRegressor]",
"civismlext/test/test_stacking.py::test_get_params[StackedClassifier]",
"civismlext/test/test_stacking.py::test_get_params[StackedRegressor]",
"civismlext/test/test_stacking.py::test_get_params_estimator_list_empty[StackedClassifier]",
"civismlext/test/test_stacking.py::test_get_params_estimator_list_empty[StackedRegressor]",
"civismlext/test/test_stacking.py::test_set_params[StackedClassifier]",
"civismlext/test/test_stacking.py::test_set_params[StackedRegressor]",
"civismlext/test/test_stacking.py::test_properties_clf",
"civismlext/test/test_stacking.py::test_properties_regression",
"civismlext/test/test_stacking.py::test_smoke_clf_methods[1]",
"civismlext/test/test_stacking.py::test_smoke_clf_methods[3]",
"civismlext/test/test_stacking.py::test_smoke_multiclass_clf_methods[1]",
"civismlext/test/test_stacking.py::test_smoke_multiclass_clf_methods[3]",
"civismlext/test/test_stacking.py::test_smoke_regression_methods[1]",
"civismlext/test/test_stacking.py::test_smoke_regression_methods[3]",
"civismlext/test/test_stacking.py::test_smoke_multiout_regression_methods[1]",
"civismlext/test/test_stacking.py::test_smoke_multiout_regression_methods[3]",
"civismlext/test/test_stacking.py::test_validate_estimators[StackedClassifier]",
"civismlext/test/test_stacking.py::test_validate_estimators[StackedRegressor]",
"civismlext/test/test_stacking.py::test_validate_clf_estimators",
"civismlext/test/test_stacking.py::test_check_clf_methods",
"civismlext/test/test_stacking.py::test_validate_estimators_in_fit[StackedClassifier]",
"civismlext/test/test_stacking.py::test_validate_estimators_in_fit[StackedRegressor]",
"civismlext/test/test_stacking.py::test_validate_names[StackedClassifier]",
"civismlext/test/test_stacking.py::test_validate_names[StackedRegressor]",
"civismlext/test/test_stacking.py::test_validate_names_in_fit[StackedClassifier]",
"civismlext/test/test_stacking.py::test_validate_names_in_fit[StackedRegressor]",
"civismlext/test/test_stacking.py::test_validate_at_least_2_estimators[StackedClassifier]",
"civismlext/test/test_stacking.py::test_validate_at_least_2_estimators[StackedRegressor]",
"civismlext/test/test_stacking.py::test_extract_fit_params[StackedClassifier]",
"civismlext/test/test_stacking.py::test_extract_fit_params[StackedRegressor]",
"civismlext/test/test_stacking.py::test_base_est_fit[StackedClassifier]",
"civismlext/test/test_stacking.py::test_base_est_fit[StackedRegressor]",
"civismlext/test/test_stacking.py::test_base_est_predict_clf",
"civismlext/test/test_stacking.py::test_base_est_predict_reg",
"civismlext/test/test_stacking.py::test_base_est_fit_predict_clf",
"civismlext/test/test_stacking.py::test_base_est_fit_predict_multiout_clf",
"civismlext/test/test_stacking.py::test_fit_clf",
"civismlext/test/test_stacking.py::test_fit_multiclass_clf",
"civismlext/test/test_stacking.py::test_fit_multiout_clf",
"civismlext/test/test_stacking.py::test_base_est_fit_predict_regression",
"civismlext/test/test_stacking.py::test_fit_regression",
"civismlext/test/test_stacking.py::test_base_est_fit_predict_multiout_regression",
"civismlext/test/test_stacking.py::test_fit_multiout_regression",
"civismlext/test/test_stacking.py::test_fit_pred_simple_regression",
"civismlext/test/test_stacking.py::test_fit_pred_simple_clf",
"civismlext/test/test_stacking.py::test_fit_params_regression",
"civismlext/test/test_stacking.py::test_fit_params_clf",
"civismlext/test/test_stacking.py::test_integration_regression[1]",
"civismlext/test/test_stacking.py::test_integration_regression[3]",
"civismlext/test/test_stacking.py::test_integration_clf[1]",
"civismlext/test/test_stacking.py::test_integration_clf[3]"
]
| []
| BSD 3-Clause "New" or "Revised" License | 2,038 | [
"civismlext/stacking.py",
"CHANGELOG.md"
]
| [
"civismlext/stacking.py",
"CHANGELOG.md"
]
|
|
ucfopen__canvasapi-133 | 4234acba45be780265a19f4aeb52df5ee5bf924f | 2018-01-12 15:59:21 | c69f6a9801ac275fdad46d97fa95c77c25d6f953 | diff --git a/canvasapi/util.py b/canvasapi/util.py
index 86d6ee4..88741ab 100644
--- a/canvasapi/util.py
+++ b/canvasapi/util.py
@@ -126,12 +126,13 @@ def obj_or_id(parameter, param_name, object_types):
def get_institution_url(base_url):
"""
- Trim '/api/v1' from a given root URL.
+ Clean up a given base URL.
:param base_url: The base URL of the API.
:type base_url: str
:rtype: str
"""
+ base_url = base_url.rstrip('/')
index = base_url.find('/api/v1')
if index != -1:
| Properly strip out trailing slash from BASE_URL
A user may include a trailing slash at the end of their `BASE_URL`. When we manually tack on `/api/v1` we are including a slash before `api`, which can end up forming strange URLs like this: `some.site//api/v1`.
While this works for most endpoints, our PaginatedList implementation chokes when trying to parse the `next` link header because Canvas returns back a URL with multiple slashes. | ucfopen/canvasapi | diff --git a/tests/test_util.py b/tests/test_util.py
index 472b4f1..15fe7ad 100644
--- a/tests/test_util.py
+++ b/tests/test_util.py
@@ -413,5 +413,26 @@ class TestUtil(unittest.TestCase):
# get_institution_url()
def test_get_institution_url(self, m):
- base_url = 'https://my.canvas.edu/api/v1'
- self.assertEqual(get_institution_url(base_url), 'https://my.canvas.edu')
+ correct_url = 'https://my.canvas.edu'
+
+ self.assertEqual(
+ get_institution_url('https://my.canvas.edu/'), correct_url
+ )
+ self.assertEqual(
+ get_institution_url('https://my.canvas.edu/api/v1'), correct_url
+ )
+ self.assertEqual(
+ get_institution_url('https://my.canvas.edu/api/v1/'), correct_url
+ )
+ self.assertEqual(
+ get_institution_url('https://my.canvas.edu/test/2/'),
+ correct_url + '/test/2'
+ )
+ self.assertEqual(
+ get_institution_url('https://my.canvas.edu/test/2/api/v1'),
+ correct_url + '/test/2'
+ )
+ self.assertEqual(
+ get_institution_url('https://my.canvas.edu/test/2/api/v1/'),
+ correct_url + '/test/2'
+ )
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 1
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"flake8",
"pycodestyle",
"coverage",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt",
"dev_requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
-e git+https://github.com/ucfopen/canvasapi.git@4234acba45be780265a19f4aeb52df5ee5bf924f#egg=canvasapi
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
docutils==0.17.1
flake8==5.0.4
idna==3.10
imagesize==1.4.1
importlib-metadata==4.2.0
iniconfig==1.1.1
Jinja2==3.0.3
MarkupSafe==2.0.1
mccabe==0.7.0
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytz==2025.2
requests==2.27.1
requests-mock==1.12.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==4.3.2
sphinx-rtd-theme==1.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: canvasapi
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- docutils==0.17.1
- flake8==5.0.4
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- jinja2==3.0.3
- markupsafe==2.0.1
- mccabe==0.7.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytz==2025.2
- requests==2.27.1
- requests-mock==1.12.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==4.3.2
- sphinx-rtd-theme==1.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/canvasapi
| [
"tests/test_util.py::TestUtil::test_get_institution_url"
]
| []
| [
"tests/test_util.py::TestUtil::test_combine_kwargs_empty",
"tests/test_util.py::TestUtil::test_combine_kwargs_multiple_dicts",
"tests/test_util.py::TestUtil::test_combine_kwargs_multiple_mixed",
"tests/test_util.py::TestUtil::test_combine_kwargs_multiple_nested_dicts",
"tests/test_util.py::TestUtil::test_combine_kwargs_nested_dict",
"tests/test_util.py::TestUtil::test_combine_kwargs_single",
"tests/test_util.py::TestUtil::test_combine_kwargs_single_dict",
"tests/test_util.py::TestUtil::test_combine_kwargs_single_generator_empty",
"tests/test_util.py::TestUtil::test_combine_kwargs_single_generator_multiple_items",
"tests/test_util.py::TestUtil::test_combine_kwargs_single_generator_single_item",
"tests/test_util.py::TestUtil::test_combine_kwargs_single_list_empty",
"tests/test_util.py::TestUtil::test_combine_kwargs_single_list_multiple_items",
"tests/test_util.py::TestUtil::test_combine_kwargs_single_list_single_item",
"tests/test_util.py::TestUtil::test_combine_kwargs_super_nested_dict",
"tests/test_util.py::TestUtil::test_combine_kwargs_the_gauntlet",
"tests/test_util.py::TestUtil::test_is_multivalued_bool",
"tests/test_util.py::TestUtil::test_is_multivalued_bytes",
"tests/test_util.py::TestUtil::test_is_multivalued_chain",
"tests/test_util.py::TestUtil::test_is_multivalued_dict",
"tests/test_util.py::TestUtil::test_is_multivalued_dict_items",
"tests/test_util.py::TestUtil::test_is_multivalued_dict_iter",
"tests/test_util.py::TestUtil::test_is_multivalued_dict_keys",
"tests/test_util.py::TestUtil::test_is_multivalued_dict_values",
"tests/test_util.py::TestUtil::test_is_multivalued_generator_call",
"tests/test_util.py::TestUtil::test_is_multivalued_generator_expr",
"tests/test_util.py::TestUtil::test_is_multivalued_integer_types",
"tests/test_util.py::TestUtil::test_is_multivalued_list",
"tests/test_util.py::TestUtil::test_is_multivalued_list_iter",
"tests/test_util.py::TestUtil::test_is_multivalued_set",
"tests/test_util.py::TestUtil::test_is_multivalued_set_iter",
"tests/test_util.py::TestUtil::test_is_multivalued_str",
"tests/test_util.py::TestUtil::test_is_multivalued_tuple",
"tests/test_util.py::TestUtil::test_is_multivalued_tuple_iter",
"tests/test_util.py::TestUtil::test_is_multivalued_unicode",
"tests/test_util.py::TestUtil::test_is_multivalued_zip",
"tests/test_util.py::TestUtil::test_obj_or_id_int",
"tests/test_util.py::TestUtil::test_obj_or_id_obj",
"tests/test_util.py::TestUtil::test_obj_or_id_obj_no_id",
"tests/test_util.py::TestUtil::test_obj_or_id_str_invalid",
"tests/test_util.py::TestUtil::test_obj_or_id_str_valid"
]
| []
| MIT License | 2,039 | [
"canvasapi/util.py"
]
| [
"canvasapi/util.py"
]
|
|
diyan__pywinrm-202 | eb6a408e3941260e753b6596eab1f099cddc6515 | 2018-01-12 19:14:21 | 3eb4005044b323459fed55c797adbaccdd6bcf4a | diff --git a/winrm/transport.py b/winrm/transport.py
index 81e8826..430c761 100644
--- a/winrm/transport.py
+++ b/winrm/transport.py
@@ -147,19 +147,21 @@ class Transport(object):
def build_session(self):
session = requests.Session()
- session.verify = self.server_cert_validation == 'validate'
- if session.verify and self.ca_trust_path:
- session.verify = self.ca_trust_path
-
- # configure proxies from HTTP/HTTPS_PROXY envvars
+ # allow some settings to be merged from env
session.trust_env = True
settings = session.merge_environment_settings(url=self.endpoint, proxies={}, stream=None,
verify=None, cert=None)
- # we're only applying proxies and/or verify from env, other settings are ignored
+ # get proxy settings from env
+ # FUTURE: allow proxy to be passed in directly to supersede this value
session.proxies = settings['proxies']
- if settings['verify'] is not None or self.ca_trust_path is not None:
+ # specified validation mode takes precedence
+ session.verify = self.server_cert_validation == 'validate'
+
+ # patch in CA path override if one was specified in init or env
+ if session.verify and (self.ca_trust_path is not None or settings['verify'] is not None):
+ # session.verify can be either a bool or path to a CA store; prefer passed-in value over env if both are present
session.verify = self.ca_trust_path or settings['verify']
encryption_available = False
| server_cert_validation 'ignore' no longer works in 0.3.0
The following no longer works against a self-signed certificate, has `server_cert_validation` changed?
```
from winrm.protocol import Protocol
p = Protocol(
endpoint='https://somewindows:5986/wsman',
transport='ntlm',
username=r'\Admin',
password='ThePassword1',
server_cert_validation='ignore')
shell_id = p.open_shell()
command_id = p.run_command(shell_id, 'ipconfig', ['/all'])
std_out, std_err, status_code = p.get_command_output(shell_id, command_id)
print std_out
p.cleanup_command(shell_id, command_id)
p.close_shell(shell_id)
```
> Traceback (most recent call last):
> File "winrmtest.py", line 9, in <module>
> shell_id = p.open_shell()
> File "/usr/lib/python2.7/site-packages/winrm/protocol.py", line 157, in open_shell
> res = self.send_message(xmltodict.unparse(req))
> File "/usr/lib/python2.7/site-packages/winrm/protocol.py", line 234, in send_message
> resp = self.transport.send_message(message)
> File "/usr/lib/python2.7/site-packages/winrm/transport.py", line 256, in send_message
> response = self._send_message_request(prepared_request, message)
> File "/usr/lib/python2.7/site-packages/winrm/transport.py", line 261, in _send_message_request
> response = self.session.send(prepared_request, timeout=self.read_timeout_sec)
> File "/usr/lib/python2.7/site-packages/requests/sessions.py", line 618, in send
> r = adapter.send(request, **kwargs)
> File "/usr/lib/python2.7/site-packages/requests/adapters.py", line 506, in send
> raise SSLError(e, request=request)
> requests.exceptions.SSLError: HTTPSConnectionPool(host='123.45.67.89', port=5986): Max retries exceeded with url: /wsman (Caused by SSLError(SSLError("bad handshake: Error([('SSL routines', 'tls_process_server_certificate', 'certificate verify failed')],)",),))
With:
```
pip show pywinrm
---
Metadata-Version: 2.0
Name: pywinrm
Version: 0.3.0
Summary: Python library for Windows Remote Management
Home-page: http://github.com/diyan/pywinrm/
Author: Alexey Diyan
Author-email: [email protected]
Installer: pip
License: MIT license
Location: /usr/lib/python2.7/site-packages
Requires: six, requests-ntlm, requests, xmltodict
Classifiers:
Development Status :: 4 - Beta
Environment :: Console
Intended Audience :: Developers
Intended Audience :: System Administrators
Natural Language :: English
License :: OSI Approved :: MIT License
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: Implementation :: PyPy
Topic :: Software Development :: Libraries :: Python Modules
Topic :: System :: Clustering
Topic :: System :: Distributed Computing
Topic :: System :: Systems Administration
```
However this previous release does work
```
pip show pywinrm
---
Metadata-Version: 2.0
Name: pywinrm
Version: 0.2.2
Summary: Python library for Windows Remote Management
Home-page: http://github.com/diyan/pywinrm/
Author: Alexey Diyan
Author-email: [email protected]
Installer: pip
License: MIT license
Location: /usr/lib/python2.7/site-packages
Requires: six, requests-ntlm, requests, xmltodict
Classifiers:
Development Status :: 4 - Beta
Environment :: Console
Intended Audience :: Developers
Intended Audience :: System Administrators
Natural Language :: English
License :: OSI Approved :: MIT License
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: Implementation :: PyPy
Topic :: Software Development :: Libraries :: Python Modules
Topic :: System :: Clustering
Topic :: System :: Distributed Computing
Topic :: System :: Systems Administration
```
| diyan/pywinrm | diff --git a/winrm/tests/test_transport.py b/winrm/tests/test_transport.py
index 94ebe32..14e00cf 100644
--- a/winrm/tests/test_transport.py
+++ b/winrm/tests/test_transport.py
@@ -1,21 +1,63 @@
# coding=utf-8
import os
+import tempfile
from winrm.transport import Transport
-def test_build_session():
- transport = Transport(endpoint="Endpoint",
+def test_build_session_cert_validate():
+ t_default = Transport(endpoint="Endpoint",
server_cert_validation='validate',
username='test',
password='test',
auth_method='basic',
)
- os.environ['REQUESTS_CA_BUNDLE'] = 'path_to_REQUESTS_CA_CERT'
- transport.build_session()
- assert(transport.session.verify == 'path_to_REQUESTS_CA_CERT')
- del os.environ['REQUESTS_CA_BUNDLE']
+ t_ca_override = Transport(endpoint="Endpoint",
+ server_cert_validation='validate',
+ username='test',
+ password='test',
+ auth_method='basic',
+ ca_trust_path='overridepath',
+ )
+ try:
+ os.environ['REQUESTS_CA_BUNDLE'] = 'path_to_REQUESTS_CA_CERT'
+ t_default.build_session()
+ t_ca_override.build_session()
+ assert(t_default.session.verify == 'path_to_REQUESTS_CA_CERT')
+ assert(t_ca_override.session.verify == 'overridepath')
+ finally:
+ del os.environ['REQUESTS_CA_BUNDLE']
- os.environ['CURL_CA_BUNDLE'] = 'path_to_CURL_CA_CERT'
- transport.build_session()
- assert(transport.session.verify == 'path_to_CURL_CA_CERT')
- del os.environ['CURL_CA_BUNDLE']
+ try:
+ os.environ['CURL_CA_BUNDLE'] = 'path_to_CURL_CA_CERT'
+ t_default.build_session()
+ t_ca_override.build_session()
+ assert(t_default.session.verify == 'path_to_CURL_CA_CERT')
+ assert (t_ca_override.session.verify == 'overridepath')
+ finally:
+ del os.environ['CURL_CA_BUNDLE']
+
+
+def test_build_session_cert_ignore():
+ t_default = Transport(endpoint="Endpoint",
+ server_cert_validation='ignore',
+ username='test',
+ password='test',
+ auth_method='basic',
+ )
+ t_ca_override = Transport(endpoint="Endpoint",
+ server_cert_validation='ignore',
+ username='test',
+ password='test',
+ auth_method='basic',
+ ca_trust_path='boguspath'
+ )
+ try:
+ os.environ['REQUESTS_CA_BUNDLE'] = 'path_to_REQUESTS_CA_CERT'
+ os.environ['CURL_CA_BUNDLE'] = 'path_to_CURL_CA_CERT'
+ t_default.build_session()
+ t_ca_override.build_session()
+ assert(isinstance(t_default.session.verify, bool) and not t_default.session.verify)
+ assert (isinstance(t_ca_override.session.verify, bool) and not t_ca_override.session.verify)
+ finally:
+ del os.environ['REQUESTS_CA_BUNDLE']
+ del os.environ['CURL_CA_BUNDLE']
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[kerberos,credssp]",
"log_parser": "parse_log_pytest_v2",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-pep8",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements-test.txt"
],
"test_cmd": "py.test -v --pep8 --cov=winrm --cov-report=term-missing winrm/tests/"
} | certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
coverage==6.2
cryptography==40.0.2
dataclasses==0.8
decorator==5.1.1
execnet==1.9.0
gssapi==1.7.3
idna==3.10
krb5==0.3.0
mock==5.2.0
ntlm-auth==1.5.0
pep8==1.7.1
py==1.11.0
pycparser==2.21
pyspnego==0.5.4
pytest==3.2.5
pytest-cache==1.0
pytest-cov==2.6.0
pytest-pep8==1.0.6
-e git+https://github.com/diyan/pywinrm.git@eb6a408e3941260e753b6596eab1f099cddc6515#egg=pywinrm
requests==2.27.1
requests-credssp==2.0.0
requests-kerberos==0.15.0
requests-ntlm==1.1.0
six==1.17.0
urllib3==1.26.20
xmltodict==0.14.2
| name: pywinrm
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cffi==1.15.1
- charset-normalizer==2.0.12
- coverage==6.2
- cryptography==40.0.2
- dataclasses==0.8
- decorator==5.1.1
- execnet==1.9.0
- gssapi==1.7.3
- idna==3.10
- krb5==0.3.0
- mock==5.2.0
- ntlm-auth==1.5.0
- pep8==1.7.1
- py==1.11.0
- pycparser==2.21
- pyspnego==0.5.4
- pytest==3.2.5
- pytest-cache==1.0
- pytest-cov==2.6.0
- pytest-pep8==1.0.6
- requests==2.27.1
- requests-credssp==2.0.0
- requests-kerberos==0.15.0
- requests-ntlm==1.1.0
- six==1.17.0
- urllib3==1.26.20
- xmltodict==0.14.2
prefix: /opt/conda/envs/pywinrm
| [
"winrm/tests/test_transport.py::test_build_session_cert_ignore"
]
| []
| [
"winrm/tests/__init__.py",
"winrm/tests/conftest.py",
"winrm/tests/test_cmd.py",
"winrm/tests/test_encryption.py",
"winrm/tests/test_encryption.py::test_init_with_invalid_protocol",
"winrm/tests/test_encryption.py::test_encrypt_message",
"winrm/tests/test_encryption.py::test_encrypt_large_credssp_message",
"winrm/tests/test_encryption.py::test_decrypt_message",
"winrm/tests/test_encryption.py::test_decrypt_message_boundary_with_end_hyphens",
"winrm/tests/test_encryption.py::test_decrypt_message_length_mismatch",
"winrm/tests/test_encryption.py::test_decrypt_large_credssp_message",
"winrm/tests/test_encryption.py::test_decrypt_message_decryption_not_needed",
"winrm/tests/test_encryption.py::test_get_credssp_trailer_length_gcm",
"winrm/tests/test_encryption.py::test_get_credssp_trailer_length_md5_rc4",
"winrm/tests/test_encryption.py::test_get_credssp_trailer_length_sha256_3des",
"winrm/tests/test_encryption.py::test_get_credssp_trailer_length_sha384_aes",
"winrm/tests/test_encryption.py::test_get_credssp_trailer_length_no_hash",
"winrm/tests/test_integration_protocol.py",
"winrm/tests/test_integration_session.py",
"winrm/tests/test_nori_type_casting.py",
"winrm/tests/test_powershell.py",
"winrm/tests/test_protocol.py",
"winrm/tests/test_protocol.py::test_open_shell_and_close_shell",
"winrm/tests/test_protocol.py::test_run_command_with_arguments_and_cleanup_command",
"winrm/tests/test_protocol.py::test_run_command_without_arguments_and_cleanup_command",
"winrm/tests/test_protocol.py::test_get_command_output",
"winrm/tests/test_protocol.py::test_set_timeout_as_sec",
"winrm/tests/test_protocol.py::test_fail_set_read_timeout_as_sec",
"winrm/tests/test_protocol.py::test_fail_set_operation_timeout_as_sec",
"winrm/tests/test_session.py",
"winrm/tests/test_session.py::test_run_cmd",
"winrm/tests/test_session.py::test_target_as_hostname",
"winrm/tests/test_session.py::test_target_as_hostname_then_port",
"winrm/tests/test_session.py::test_target_as_schema_then_hostname",
"winrm/tests/test_session.py::test_target_as_schema_then_hostname_then_port",
"winrm/tests/test_session.py::test_target_as_full_url",
"winrm/tests/test_session.py::test_target_with_dots",
"winrm/tests/test_transport.py::test_build_session_cert_validate",
"winrm/tests/test_wql.py"
]
| []
| MIT License | 2,040 | [
"winrm/transport.py"
]
| [
"winrm/transport.py"
]
|
|
wright-group__WrightTools-448 | 49cb335e4b0dd3556304ec72daa65cd812493f3b | 2018-01-12 21:09:47 | 592649ce55c9fa7847325c9e9b15b320a38f1389 | pep8speaks: Hello @untzag! Thanks for submitting the PR.
- In the file [`WrightTools/_dataset.py`](https://github.com/wright-group/WrightTools/blob/b2240c89d941647b42212c46b3463c1bc55b9d71/WrightTools/_dataset.py), following are the PEP8 issues :
> [Line 217:9](https://github.com/wright-group/WrightTools/blob/b2240c89d941647b42212c46b3463c1bc55b9d71/WrightTools/_dataset.py#L217): [E306](https://duckduckgo.com/?q=pep8%20E306) expected 1 blank line before a nested definition, found 0
| diff --git a/WrightTools/_dataset.py b/WrightTools/_dataset.py
index 0331efe..9f46ef3 100644
--- a/WrightTools/_dataset.py
+++ b/WrightTools/_dataset.py
@@ -6,13 +6,14 @@
import posixpath
import collections
-from concurrent.futures import ThreadPoolExecutor
import numpy as np
import h5py
+from . import exceptions as wt_exceptions
from . import kit as wt_kit
+from . import units as wt_units
# --- class ---------------------------------------------------------------------------------------
@@ -196,6 +197,27 @@ class Dataset(h5py.Dataset):
self.chunkwise(f, min=min, max=max, replace=replace)
+ def convert(self, destination_units):
+ """Convert units.
+
+ Parameters
+ ----------
+ destination_units : string (optional)
+ Units to convert into.
+ """
+ if not wt_units.is_valid_conversion(self.units, destination_units):
+ kind = wt_units.kind(self.units)
+ valid = list(wt_units.dicts[kind].keys())
+ raise wt_exceptions.UnitsError(valid, destination_units)
+ if self.units is None:
+ return
+
+ def f(dataset, s, destination_units):
+ dataset[s] = wt_units.converter(dataset[s], dataset.units, destination_units)
+
+ self.chunkwise(f, destination_units=destination_units)
+ self.units = destination_units
+
def log(self, base=np.e, floor=None):
"""Take the log of the entire dataset.
diff --git a/WrightTools/data/_axis.py b/WrightTools/data/_axis.py
index 11132b0..1485046 100644
--- a/WrightTools/data/_axis.py
+++ b/WrightTools/data/_axis.py
@@ -79,20 +79,10 @@ class Axis(object):
@property
def label(self):
- label_seed = [v.label for v in self.variables]
- symbol_type = wt_units.get_default_symbol_type(self.units)
- label = r'$\mathsf{'
- for part in label_seed:
- if self.units_kind is not None:
- units_dictionary = getattr(wt_units, self.units_kind)
- label += getattr(wt_units, symbol_type)[self.units]
- if part is not '':
- label += r'_{' + str(part) + r'}'
- else:
- label += self.name.replace('_', '\,\,')
- # TODO: handle all operators
- label += r'='
- label = label[:-1] # remove the last equals sign
+ symbol = wt_units.get_symbol(self.units)
+ label = r'$\mathsf{' + self.expression
+ for v in self.variables:
+ label = label.replace(v.natural_name, '%s_{%s}' % (symbol, v.label))
if self.units_kind:
units_dictionary = getattr(wt_units, self.units_kind)
label += r'\,'
@@ -139,9 +129,7 @@ class Axis(object):
@property
def units_kind(self):
"""Units kind."""
- for dic in wt_units.unit_dicts:
- if self.units in dic.keys():
- return dic['kind']
+ return wt_units.kind(self.units)
@property
def variables(self):
@@ -156,21 +144,23 @@ class Axis(object):
finally:
return self._variables
- def convert(self, destination_units):
+ def convert(self, destination_units, *, convert_variables=False):
"""Convert axis to destination_units.
Parameters
----------
destination_units : string
Destination units.
+ convert_variables : boolean (optional)
+ Toggle conversion of stored arrays. Default is False.
"""
- destination_units_kind = None
- for dic in wt_units.unit_dicts:
- if destination_units in dic.keys():
- destination_units_kind = dic['kind']
- break
- if not self.units_kind == destination_units_kind:
- raise wt_exceptions.UnitsError(self.units_kind, destination_units_kind)
+ if not wt_units.is_valid_conversion(self.units, destination_units):
+ kind = wt_units.kind(self.units)
+ valid = list(wt_units.dicts[kind].keys())
+ raise wt_exceptions.UnitsError(valid, destination_units)
+ if convert_variables:
+ for v in self.variables:
+ v.convert(destination_units)
self.units = destination_units
def max(self):
diff --git a/WrightTools/data/_data.py b/WrightTools/data/_data.py
index fba69ab..09e060d 100644
--- a/WrightTools/data/_data.py
+++ b/WrightTools/data/_data.py
@@ -313,9 +313,19 @@ class Data(Group):
idx = tuple(idx)
data = out.create_data(name='chop%03i' % i)
for v in self.variables:
- data.create_variable(name=v.natural_name, values=v[idx], units=v.units)
+ kwargs = {}
+ kwargs['name'] = v.natural_name
+ kwargs['values'] = v[idx]
+ kwargs['units'] = v.units
+ kwargs['label'] = v.label
+ data.create_variable(**kwargs)
for c in self.channels:
- data.create_channel(name=c.natural_name, values=c[idx], units=c.units)
+ kwargs = {}
+ kwargs['name'] = c.natural_name
+ kwargs['values'] = c[idx]
+ kwargs['units'] = c.units
+ kwargs['label'] = c.label
+ data.create_channel(**kwargs)
data.transform([a.expression for a in kept_axes if a.expression not in at.keys()])
i += 1
out.flush()
@@ -380,13 +390,15 @@ class Data(Group):
self._axes.pop(axis_index)
self._update_natural_namespace()
- def convert(self, destination_units, verbose=True):
- """Convert all compatable constants and axes to given units.
+ def convert(self, destination_units, *, convert_variables=False, verbose=True):
+ """Convert all compatable axes to given units.
Parameters
----------
destination_units : str
Destination units.
+ convert_variables : boolean (optional)
+ Toggle conversion of stored arrays. Default is False
verbose : bool (optional)
Toggle talkback. Default is True.
@@ -397,13 +409,11 @@ class Data(Group):
axis object in data.axes or data.constants.
"""
# get kind of units
- for dic in wt_units.unit_dicts:
- if destination_units in dic.keys():
- units_kind = dic['kind']
+ units_kind = wt_units.kind(destination_units)
# apply to all compatible axes
for axis in self.axes + self.constants:
if axis.units_kind == units_kind:
- axis.convert(destination_units)
+ axis.convert(destination_units, convert_variables=convert_variables)
if verbose:
print('axis', axis.expression, 'converted')
diff --git a/WrightTools/kit/_array.py b/WrightTools/kit/_array.py
index c74c4b9..b7cd2d1 100644
--- a/WrightTools/kit/_array.py
+++ b/WrightTools/kit/_array.py
@@ -270,12 +270,18 @@ def valid_index(index, shape):
tuple
Valid index.
"""
+ # append slices to index
+ index = list(index)
+ while len(index) < len(shape):
+ index.append(slice(None))
+ # fill out, in reverse
out = []
for i, s in zip(index[::-1], shape[::-1]):
if s == 1:
- out.append(0)
- elif isinstance(i, slice):
- out.append(i)
+ if isinstance(i, slice):
+ out.append(slice(None))
+ else:
+ out.append(0)
else:
- out.append(min(s - 1, i))
+ out.append(i)
return tuple(out[::-1])
diff --git a/WrightTools/units.py b/WrightTools/units.py
index 131f667..76383da 100644
--- a/WrightTools/units.py
+++ b/WrightTools/units.py
@@ -4,34 +4,31 @@
# --- import --------------------------------------------------------------------------------------
-from __future__ import absolute_import, division, print_function, unicode_literals
+import collections
import numpy as np
import warnings
-# --- units ---------------------------------------------------------------------------------------
+# --- define --------------------------------------------------------------------------------------
# units are stored in dictionaries of like kind. format:
# unit : to native, from native, units_symbol, units_label
# angle units (native: rad)
-angle = {'kind': 'angle',
- 'rad': ['x', 'x', r'rad'],
+angle = {'rad': ['x', 'x', r'rad'],
'deg': ['x/57.2958', '57.2958*x', r'deg']}
# delay units (native: fs)
fs_per_mm = 3336.
-delay = {'kind': 'delay',
- 'fs': ['x', 'x', r'fs'],
+delay = {'fs': ['x', 'x', r'fs'],
'ps': ['x*1e3', 'x/1e3', r'ps'],
'ns': ['x*1e6', 'x/1e6', r'ns'],
'mm_delay': ['x*2*fs_per_mm', 'x/(2*fs_per_mm)', r'mm']}
# energy units (native: nm)
-energy = {'kind': 'energy',
- 'nm': ['x', 'x', r'nm'],
+energy = {'nm': ['x', 'x', r'nm'],
'wn': ['1e7/x', '1e7/x', r'cm^{-1}'],
'eV': ['1240./x', '1240./x', r'eV'],
'meV': ['1240000./x', '1240000./x', r'meV'],
@@ -40,30 +37,24 @@ energy = {'kind': 'energy',
'GHz': ['2.99792458e8/x', '2.99792458e8/x', r'GHz']}
# fluence units (native: uJ per sq. cm)
-fluence = {'kind': 'fluence',
- 'uJ per sq. cm': ['x', 'x', r'\frac{\mu J}{cm^{2}}']}
+fluence = {'uJ per sq. cm': ['x', 'x', r'\frac{\mu J}{cm^{2}}']}
# optical density units (native: od)
-od = {'kind': 'od',
- 'mOD': ['1e3*x', 'x/1e3', r'mOD'],
+od = {'mOD': ['1e3*x', 'x/1e3', r'mOD'],
'OD': ['x', 'x', r'OD']}
# position units (native: mm)
-position = {'kind': 'position',
- # can't have same name as energy nm
- 'nm_p': ['x/1e6', '1e6/x', r'nm'],
+position = {'nm_p': ['x/1e6', '1e6/x', r'nm'],
'um': ['x/1000.', '1000/x.', r'um'],
'mm': ['x', 'x', r'mm'],
'cm': ['10.*x', 'x/10.', r'cm'],
'in': ['x*0.039370', '0.039370*x', r'in']}
# pulse width units (native: FWHM)
-pulse_width = {'kind': 'pulse_width',
- 'FWHM': ['x', 'x', r'FWHM']}
+pulse_width = {'FWHM': ['x', 'x', r'FWHM']}
# time units (native: s)
-time = {'kind': 'time',
- 'fs_t': ['x/1e15', 'x*1e15', r'fs'],
+time = {'fs_t': ['x/1e15', 'x*1e15', r'fs'],
'ps_t': ['x/1e12', 'x*1e12', r'ps'],
'ns_t': ['x/1e9', 'x*1e9', r'ns'],
'us_t': ['x/1e6', 'x*1e6', r'us'],
@@ -73,7 +64,18 @@ time = {'kind': 'time',
'h_t': ['x*3600.', 'x/3600.', r'h'],
'd_t': ['x*86400.', 'x/86400.', r'd']}
-unit_dicts = [angle, delay, energy, time, position, pulse_width, fluence, od]
+dicts = collections.OrderedDict()
+dicts['angle'] = angle
+dicts['delay'] = delay
+dicts['energy'] = energy
+dicts['time'] = time
+dicts['position'] = position
+dicts['pulse_width'] = pulse_width
+dicts['fluence'] = fluence
+dicts['od'] = od
+
+
+# --- functions -----------------------------------------------------------------------------------
def converter(val, current_unit, destination_unit):
@@ -94,7 +96,7 @@ def converter(val, current_unit, destination_unit):
Converted value.
"""
x = val
- for dic in unit_dicts:
+ for dic in dicts.values():
if current_unit in dic.keys() and destination_unit in dic.keys():
try:
native = eval(dic[current_unit][0])
@@ -111,86 +113,71 @@ def converter(val, current_unit, destination_unit):
pass
else:
warnings.warn('conversion {0} to {1} not valid: returning input'.format(
- current_unit, destination_unit))
+ current_unit, destination_unit))
return val
-def kind(units):
- """Find the kind of given units.
+def get_symbol(units):
+ """Get default symbol type.
Parameters
----------
- units : string
- The units of interest
+ units_str : string
+ Units.
Returns
-------
string
- The kind of the given units. If no match is found, returns None.
+ LaTeX formatted symbol.
"""
- for d in unit_dicts:
- if units in d.keys():
- return str(d['kind'])
-
-
-# --- symbol --------------------------------------------------------------------------------------
-
-
-class SymbolDict(dict):
- """Subclass dictionary to get at __missing__ method."""
-
- def __missing__(self, key):
- """Define what happens when key is missing."""
- return self['default']
-
-
-# color
-color_symbols = SymbolDict()
-color_symbols['default'] = r'E'
-color_symbols['nm'] = r'\lambda'
-color_symbols['wn'] = r'\bar\nu'
-color_symbols['eV'] = r'\hslash\omega'
-color_symbols['Hz'] = r'f'
-color_symbols['THz'] = r'f'
-color_symbols['GHz'] = r'f'
+ if kind(units) == 'energy':
+ d = {}
+ d['nm'] = r'\lambda'
+ d['wn'] = r'\bar\nu'
+ d['eV'] = r'\hslash\omega'
+ d['Hz'] = r'f'
+ d['THz'] = r'f'
+ d['GHz'] = r'f'
+ return d.get(units, 'E')
+ elif kind(units) == 'delay':
+ return r'\tau'
+ elif kind(units) == 'fluence':
+ return r'\mathcal{F}'
+ elif kind(units) == 'pulse_width':
+ return r'\sigma'
+ else:
+ return kind(units)
-# delay
-delay_symbols = SymbolDict()
-delay_symbols['default'] = r'\tau'
-# fluence
-fluence_symbols = SymbolDict()
-fluence_symbols['default'] = r'\mathcal{F}'
+def get_valid_conversions(units):
+ valid = list(dicts[kind(units)])
+ valid.remove(units)
+ return tuple(valid)
-# pulse width
-pulse_width_symbols = SymbolDict()
-pulse_width_symbols['default'] = r'\sigma'
-# catch all
-none_symbols = SymbolDict()
-none_symbols['default'] = ''
+def is_valid_conversion(a, b):
+ for dic in dicts.values():
+ if a in dic.keys() and b in dic.keys():
+ return True
+ if a is None and b is None:
+ return True
+ else:
+ return False
-def get_default_symbol_type(units_str):
- """Get default symbol type.
+def kind(units):
+ """Find the kind of given units.
Parameters
----------
- units_str : string
- Units.
+ units : string
+ The units of interest
Returns
-------
string
- Symbol dictionary name.
+ The kind of the given units. If no match is found, returns None.
"""
- if units_str in ['nm', 'wn', 'eV']:
- return 'color_symbols'
- elif units_str in ['fs', 'ps', 'ns']:
- return 'delay_symbols'
- elif units_str in ['uJ per sq. cm']:
- return 'fluence_symbols'
- elif units_str in ['FWHM']:
- return 'pulse_width_symbols'
- else:
- return 'none_symbols'
+ for k, v in dicts.items():
+ if units in v.keys():
+ return k
| api for "forcing" unit conversion of underlying variables
currently, `data.convert` and `axis.convert` simply change the units that are returned when slicing into the axis object---the variables themselves remain untouched---I like this behavior and want to keep it
in addition to the above, users should be able to convert the variables directly
step one: add `convert` method to `Variable` class
step two: add `convert_variables=False` kwarg to `data.convert`, `axis.convert`, which toggles propagation to dependent variables
we should think carefully, because variables can appear in multiple axes | wright-group/WrightTools | diff --git a/tests/data/axis/convert_axis.py b/tests/data/axis/convert_axis.py
new file mode 100644
index 0000000..4e1408b
--- /dev/null
+++ b/tests/data/axis/convert_axis.py
@@ -0,0 +1,68 @@
+"""Test axis unit conversion."""
+
+
+# --- import --------------------------------------------------------------------------------------
+
+
+import numpy as np
+
+import WrightTools as wt
+from WrightTools import datasets
+
+
+# --- define --------------------------------------------------------------------------------------
+
+
+def test_convert_variables():
+ p = datasets.KENT.LDS821_TRSF
+ ignore = ['d1', 'd2', 'wm']
+ data = wt.data.from_KENT(p, ignore=ignore)
+ data.w2.convert('meV', convert_variables=True)
+ assert data.w2.units == 'meV'
+ assert data['w2'].units == 'meV'
+ data.close()
+
+
+def test_exception():
+ p = datasets.PyCMDS.w1_000
+ data = wt.data.from_PyCMDS(p)
+ try:
+ data.w1.convert('fs')
+ except wt.exceptions.UnitsError:
+ assert True
+ else:
+ assert False
+ assert data.w1.units == 'nm'
+ assert data['w1'].units == 'nm'
+ data.close()
+
+
+def test_w1_wa():
+ p = datasets.PyCMDS.w1_wa_000
+ data = wt.data.from_PyCMDS(p)
+ assert data.wa.units == 'nm'
+ data.wa.convert('eV')
+ assert data.wa.units == 'eV'
+ assert np.isclose(data.wa.max(), 1.5802564757220569)
+ assert np.isclose(data.wa.min(), 0.6726385958618104)
+ assert data['wa'].units == 'nm'
+ data.close()
+
+
+def test_wigner():
+ p = datasets.COLORS.v2p2_WL_wigner
+ data = wt.data.from_COLORS(p)
+ data.d1.convert('ns')
+ assert data.d1.units == 'ns'
+ assert data['d1'].units == 'fs'
+ data.close()
+
+
+# --- run -----------------------------------------------------------------------------------------
+
+
+if __name__ == '__main__':
+ test_convert_variables()
+ test_exception()
+ test_w1_wa()
+ test_wigner()
diff --git a/tests/data/chop.py b/tests/data/chop.py
index 1b59ed1..0f2f37e 100755
--- a/tests/data/chop.py
+++ b/tests/data/chop.py
@@ -20,6 +20,8 @@ def test_2D_to_1D():
for d in chop.values():
assert d.w2.size == 81
assert d.axis_expressions == ('w2',)
+ for k in data.variable_names:
+ assert d[k].label == data[k].label
data.close()
chop.close()
@@ -85,4 +87,5 @@ def test_parent():
if __name__ == "__main__":
+ test_2D_to_1D()
test_3D_to_1D()
diff --git a/tests/data/convert_data.py b/tests/data/convert_data.py
new file mode 100644
index 0000000..e232390
--- /dev/null
+++ b/tests/data/convert_data.py
@@ -0,0 +1,57 @@
+"""Test data unit conversion."""
+
+
+# --- import --------------------------------------------------------------------------------------
+
+
+import numpy as np
+
+import WrightTools as wt
+from WrightTools import datasets
+
+
+# --- define --------------------------------------------------------------------------------------
+
+
+def test_convert_variables():
+ p = datasets.KENT.LDS821_TRSF
+ ignore = ['d1', 'd2', 'wm']
+ data = wt.data.from_KENT(p, ignore=ignore)
+ data.convert('meV', convert_variables=True)
+ assert data.w1.units == 'meV'
+ assert data.w2.units == 'meV'
+ assert data['w2'].units == 'meV'
+ assert data['w2'].units == 'meV'
+ data.close()
+
+
+def test_w1_wa():
+ p = datasets.PyCMDS.w1_wa_000
+ data = wt.data.from_PyCMDS(p)
+ assert data.wa.units == 'nm'
+ data.convert('eV')
+ assert data.wa.units == 'eV'
+ assert np.isclose(data.wa.max(), 1.5802564757220569)
+ assert np.isclose(data.wa.min(), 0.6726385958618104)
+ assert data['wa'].units == 'nm'
+ data.close()
+
+
+def test_wigner():
+ p = datasets.COLORS.v2p2_WL_wigner
+ data = wt.data.from_COLORS(p)
+ data.convert('ns')
+ assert data.d1.units == 'ns'
+ assert data['d1'].units == 'fs'
+ assert data.wm.units == 'nm'
+ assert data['wm'].units == 'nm'
+ data.close()
+
+
+# --- run -----------------------------------------------------------------------------------------
+
+
+if __name__ == '__main__':
+ test_convert_variables()
+ test_w1_wa()
+ test_wigner()
diff --git a/tests/dataset/convert_dataset.py b/tests/dataset/convert_dataset.py
new file mode 100644
index 0000000..6c82d5a
--- /dev/null
+++ b/tests/dataset/convert_dataset.py
@@ -0,0 +1,53 @@
+"""Test dataset unit conversion."""
+
+
+# --- import --------------------------------------------------------------------------------------
+
+
+import numpy as np
+
+import WrightTools as wt
+from WrightTools import datasets
+
+
+# --- define --------------------------------------------------------------------------------------
+
+
+def test_exception():
+ p = datasets.PyCMDS.w1_000
+ data = wt.data.from_PyCMDS(p)
+ try:
+ data['w1'].convert('fs')
+ except wt.exceptions.UnitsError:
+ assert True
+ else:
+ assert False
+ assert data['w1'].units == 'nm'
+ data.close()
+
+
+def test_w1_wa():
+ p = datasets.PyCMDS.w1_wa_000
+ data = wt.data.from_PyCMDS(p)
+ assert data['wa'].units == 'nm'
+ data['wa'].convert('eV')
+ assert np.isclose(data['wa'].max(), 1.5802564757220569)
+ assert np.isclose(data['wa'].min(), 0.6726385958618104)
+ data.close()
+
+
+def test_wigner():
+ p = datasets.COLORS.v2p2_WL_wigner
+ data = wt.data.from_COLORS(p)
+ data['d1'].convert('ns')
+ assert data['d1'].units == 'ns'
+ data.close()
+
+
+# --- run -----------------------------------------------------------------------------------------
+
+
+if __name__ == '__main__':
+ test_exception()
+ test_w1_wa()
+ test_wigner()
diff --git a/tests/kit/valid_index.py b/tests/kit/valid_index.py
new file mode 100644
index 0000000..16a0e53
--- /dev/null
+++ b/tests/kit/valid_index.py
@@ -0,0 +1,65 @@
+"""Test valid index function."""
+
+
+# --- import --------------------------------------------------------------------------------------
+
+
+import WrightTools as wt
+
+
+# --- test ----------------------------------------------------------------------------------------
+
+
+def test__1_5__7():
+ index = (1, 5)
+ shape = (7,)
+ assert wt.kit.valid_index(index, shape) == (5,)
+
+
+def test__4_2_12__1_25_1():
+ index = (4, 2, 12)
+ shape = (1, 25, 1)
+ assert wt.kit.valid_index(index, shape) == (0, 2, 0)
+
+
+def test__s__23():
+ index = (slice(None),)
+ shape = (23,)
+ assert wt.kit.valid_index(index, shape) == (slice(None),)
+
+
+def test__s__1_25():
+ index = (slice(None),)
+ shape = (1, 25,)
+ assert wt.kit.valid_index(index, shape) == (slice(None), slice(None))
+
+
+def test__ss_ss__1_25():
+ index = (slice(20, None, 1), slice(20, None, 1))
+ shape = (1, 25,)
+ assert wt.kit.valid_index(index, shape) == (slice(None), slice(20, None, 1))
+
+
+def test__s__13_25_99():
+ index = (slice(None),)
+ shape = (13, 25, 99)
+ assert wt.kit.valid_index(index, shape) == (slice(None), slice(None), slice(None))
+
+
+def test__s_s__51():
+ index = (slice(None), slice(None))
+ shape = (51,)
+ assert wt.kit.valid_index(index, shape) == (slice(None),)
+
+
+# --- run -----------------------------------------------------------------------------------------
+
+
+if __name__ == '__main__':
+ test__1_5__7()
+ test__4_2_12__1_25_1()
+ test__s__23()
+ test__s__1_25()
+ test__ss_ss__1_25()
+ test__s__13_25_99()
+ test__s_s__51()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 5
} | 2.13 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | appdirs==1.4.4
contourpy==1.3.0
coverage==7.8.0
cycler==0.12.1
exceptiongroup==1.2.2
fonttools==4.56.0
h5py==3.13.0
imageio==2.37.0
importlib_resources==6.5.2
iniconfig==2.1.0
kiwisolver==1.4.7
matplotlib==3.9.4
numexpr==2.10.2
numpy==2.0.2
packaging==24.2
pillow==11.1.0
pluggy==1.5.0
pyparsing==3.2.3
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
scipy==1.13.1
six==1.17.0
tidy_headers==1.0.4
tomli==2.2.1
-e git+https://github.com/wright-group/WrightTools.git@49cb335e4b0dd3556304ec72daa65cd812493f3b#egg=WrightTools
zipp==3.21.0
| name: WrightTools
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- appdirs==1.4.4
- contourpy==1.3.0
- coverage==7.8.0
- cycler==0.12.1
- exceptiongroup==1.2.2
- fonttools==4.56.0
- h5py==3.13.0
- imageio==2.37.0
- importlib-resources==6.5.2
- iniconfig==2.1.0
- kiwisolver==1.4.7
- matplotlib==3.9.4
- numexpr==2.10.2
- numpy==2.0.2
- packaging==24.2
- pillow==11.1.0
- pluggy==1.5.0
- pyparsing==3.2.3
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- scipy==1.13.1
- six==1.17.0
- tidy-headers==1.0.4
- tomli==2.2.1
- zipp==3.21.0
prefix: /opt/conda/envs/WrightTools
| [
"tests/kit/valid_index.py::test__s__1_25",
"tests/kit/valid_index.py::test__ss_ss__1_25",
"tests/kit/valid_index.py::test__s__13_25_99"
]
| [
"tests/data/axis/convert_axis.py::test_convert_variables",
"tests/data/axis/convert_axis.py::test_exception",
"tests/data/axis/convert_axis.py::test_w1_wa",
"tests/data/axis/convert_axis.py::test_wigner",
"tests/data/chop.py::test_2D_to_1D",
"tests/data/chop.py::test_3D_to_1D",
"tests/data/chop.py::test_3D_to_1D_at",
"tests/data/chop.py::test_3D_to_2D",
"tests/data/chop.py::test_3D_to_2D_at",
"tests/data/chop.py::test_parent",
"tests/data/convert_data.py::test_convert_variables",
"tests/data/convert_data.py::test_w1_wa",
"tests/data/convert_data.py::test_wigner",
"tests/dataset/convert_dataset.py::test_exception",
"tests/dataset/convert_dataset.py::test_w1_wa",
"tests/dataset/convert_dataset.py::test_wigner"
]
| [
"tests/kit/valid_index.py::test__1_5__7",
"tests/kit/valid_index.py::test__4_2_12__1_25_1",
"tests/kit/valid_index.py::test__s__23",
"tests/kit/valid_index.py::test__s_s__51"
]
| []
| MIT License | 2,041 | [
"WrightTools/kit/_array.py",
"WrightTools/_dataset.py",
"WrightTools/units.py",
"WrightTools/data/_axis.py",
"WrightTools/data/_data.py"
]
| [
"WrightTools/kit/_array.py",
"WrightTools/_dataset.py",
"WrightTools/units.py",
"WrightTools/data/_axis.py",
"WrightTools/data/_data.py"
]
|
mkdocs__mkdocs-1376 | d6449f90808b50979694a3e91b49b532f3bf785d | 2018-01-13 00:06:20 | 27f06517db4d8b73b162f2a2af65826ddcc8db54 | waylan: Awesome! I'll have to take a closer look when I have some time.
djeebus: Just found an issue: plugins aren't getting normalized properly. Should have it fixed soon too.
djeebus: All ready now!
waylan: The py33 tests are being handled in #1453. Guess we can move on with this issue by updating the docs. | diff --git a/docs/about/release-notes.md b/docs/about/release-notes.md
index e5a2abaf..9ae74fb1 100644
--- a/docs/about/release-notes.md
+++ b/docs/about/release-notes.md
@@ -25,6 +25,25 @@ The current and past members of the MkDocs team.
### Major Additions to Development Version
+#### Path Based Settings are Relative to Configuration File (#543)
+
+Previously any relative paths in the various configuration options were
+resolved relative to the current working directory. They are now resolved
+relative to the configuration file. As the documentation has always encouraged
+running the various MkDocs commands from the directory that contains the
+configuration file (project root), this change will not affect most users.
+However, it will make it much easier to implement automated builds or otherwise
+run commands from a location other than the project root.
+
+Simply use the `-f/--config-file` option and point it at the configuration file:
+
+```sh
+mkdocs build --config-file /path/to/my/config/file.yml
+```
+
+As previously, if no file is specified, MkDocs looks for a file named
+`mkdocs.yml` in the current working directory.
+
#### Refactor Search Plugin
The search plugin has been completely refactored to include support for the
diff --git a/docs/user-guide/configuration.md b/docs/user-guide/configuration.md
index 333b7e67..8cac62f9 100644
--- a/docs/user-guide/configuration.md
+++ b/docs/user-guide/configuration.md
@@ -215,9 +215,10 @@ If a set of key/value pairs, the following nested keys can be defined:
#### custom_dir:
- A directory to custom a theme. This can either be a relative directory, in
- which case it is resolved relative to the directory containing your
- configuration file, or it can be an absolute directory path.
+ A directory containing a custom theme. This can either be a relative
+ directory, in which case it is resolved relative to the directory containing
+ your configuration file, or it can be an absolute directory path from the
+ root of your local file system.
See [styling your docs][theme_dir] for details if you would like to tweak an
existing theme.
@@ -240,19 +241,19 @@ If a set of key/value pairs, the following nested keys can be defined:
### docs_dir
-Lets you set the directory containing the documentation source markdown files.
-This can either be a relative directory, in which case it is resolved relative
-to the directory containing your configuration file, or it can be an absolute
-directory path from the root of your local file system.
+The directory containing the documentation source markdown files. This can
+either be a relative directory, in which case it is resolved relative to the
+directory containing your configuration file, or it can be an absolute directory
+path from the root of your local file system.
**default**: `'docs'`
### site_dir
-Lets you set the directory where the output HTML and other files are created.
-This can either be a relative directory, in which case it is resolved relative
-to the directory containing your configuration file, or it can be an absolute
-directory path from the root of your local file system.
+The directory where the output HTML and other files are created. This can either
+be a relative directory, in which case it is resolved relative to the directory
+containing your configuration file, or it can be an absolute directory path from
+the root of your local file system.
**default**: `'site'`
diff --git a/docs/user-guide/custom-themes.md b/docs/user-guide/custom-themes.md
index 2f0021dd..18f72b28 100644
--- a/docs/user-guide/custom-themes.md
+++ b/docs/user-guide/custom-themes.md
@@ -20,41 +20,48 @@ and their usage.
## Creating a custom theme
The bare minimum required for a custom theme is a `main.html` [Jinja2 template]
-file. This should be placed in a directory which will be the `custom_dir` and it
-should be created next to the `mkdocs.yml` configuration file. Within
-`mkdocs.yml`, specify the theme `custom_dir` option and set it to the name of
-the directory containing `main.html`. For example, given this example project
-layout:
-
- mkdocs.yml
- docs/
- index.md
- about.md
- custom_theme/
- main.html
- ...
-
-You would include the following settings in `mkdocs.yml` to use the custom theme
+file which is placed in a directory that is *not* a child of the [docs_dir].
+Within `mkdocs.yml`, set the theme.[custom_dir] option to the path of the
+directory containing `main.html`. The path should be relative to the
+configuration file. For example, given this example project layout:
+
+```no-highlight
+mkdocs.yml
+docs/
+ index.md
+ about.md
+custom_theme/
+ main.html
+ ...
+```
+
+... you would include the following settings in `mkdocs.yml` to use the custom theme
directory:
- theme:
- name: null
- custom_dir: 'custom_theme'
+```yaml
+theme:
+ name: null
+ custom_dir: 'custom_theme/'
+```
!!! Note
- Generally, when building your own custom theme, the theme `name`
- configuration setting would be set to `null`. However, if used in
- combination with the `custom_dir` configuration value a custom theme can be
- used to replace only specific parts of a built-in theme. For example, with
- the above layout and if you set `name: "mkdocs"` then the `main.html` file
- in the `custom_dir` would replace that in the theme but otherwise the
- `mkdocs` theme would remain the same. This is useful if you want to make
+ Generally, when building your own custom theme, the theme.[name]
+ configuration setting would be set to `null`. However, if the
+ theme.[custom_dir] configuration value is used in combination with an
+ existing theme, the theme.[custom_dir] can be used to replace only specific
+ parts of a built-in theme. For example, with the above layout and if you set
+ `name: "mkdocs"` then the `main.html` file in the theme.[custom_dir] would
+ replace the file of the same name in the `mkdocs` theme but otherwise the
+ `mkdocs` theme would remain unchanged. This is useful if you want to make
small adjustments to an existing theme.
For more specific information, see [styling your docs].
[styling your docs]: ./styling-your-docs.md#using-the-theme-custom_dir
+[custom_dir]: ./configuration.md#custom_dir
+[name]: ./configuration.md#name
+[docs_dir]:./configuration.md#docs_dir
## Basic theme
diff --git a/docs/user-guide/styling-your-docs.md b/docs/user-guide/styling-your-docs.md
index 4837e8f6..c97abf99 100644
--- a/docs/user-guide/styling-your-docs.md
+++ b/docs/user-guide/styling-your-docs.md
@@ -138,7 +138,7 @@ And then point your `mkdocs.yml` configuration file at the new directory:
```yaml
theme:
name: mkdocs
- custom_dir: custom_theme
+ custom_dir: custom_theme/
```
To override the 404 error page ("file not found"), add a new template file named
diff --git a/mkdocs/config/base.py b/mkdocs/config/base.py
index afcec0a9..60f9b587 100644
--- a/mkdocs/config/base.py
+++ b/mkdocs/config/base.py
@@ -21,13 +21,14 @@ class Config(utils.UserDict):
for running validation on the structure and contents.
"""
- def __init__(self, schema):
+ def __init__(self, schema, config_file_path=None):
"""
The schema is a Python dict which maps the config name to a validator.
"""
self._schema = schema
self._schema_keys = set(dict(schema).keys())
+ self.config_file_path = config_file_path
self.data = {}
self.user_configs = []
@@ -172,7 +173,7 @@ def load_config(config_file=None, **kwargs):
# Initialise the config with the default schema .
from mkdocs import config
- cfg = Config(schema=config.DEFAULT_SCHEMA)
+ cfg = Config(schema=config.DEFAULT_SCHEMA, config_file_path=options['config_file_path'])
# First load the config file
cfg.load_file(config_file)
# Then load the options to overwrite anything in the config.
diff --git a/mkdocs/config/config_options.py b/mkdocs/config/config_options.py
index 9a7eb0b6..30da2a2a 100644
--- a/mkdocs/config/config_options.py
+++ b/mkdocs/config/config_options.py
@@ -293,6 +293,24 @@ class FilesystemObject(Type):
super(FilesystemObject, self).__init__(type_=utils.string_types, **kwargs)
self.exists = exists
+ def pre_validation(self, config, key_name):
+ value = config[key_name]
+
+ if not value:
+ return
+
+ if os.path.isabs(value):
+ return
+
+ if config.config_file_path is None:
+ # Unable to determine absolute path of the config file; fall back
+ # to trusting the relative path
+ return
+
+ config_dir = os.path.dirname(config.config_file_path)
+ value = os.path.join(config_dir, value)
+ config[key_name] = value
+
def run_validation(self, value):
value = super(FilesystemObject, self).run_validation(value)
if self.exists and not self.existence_test(value):
@@ -311,9 +329,11 @@ class Dir(FilesystemObject):
name = 'directory'
def post_validation(self, config, key_name):
+ if config.config_file_path is None:
+ return
# Validate that the dir is not the parent dir of the config file.
- if os.path.dirname(config['config_file_path']) == config[key_name]:
+ if os.path.dirname(config.config_file_path) == config[key_name]:
raise ValidationError(
("The '{0}' should not be the parent directory of the config "
"file. Use a child directory instead so that the config file "
@@ -430,7 +450,12 @@ class Theme(BaseConfigOption):
# Ensure custom_dir is an absolute path
if 'custom_dir' in theme_config and not os.path.isabs(theme_config['custom_dir']):
- theme_config['custom_dir'] = os.path.abspath(theme_config['custom_dir'])
+ config_dir = os.path.dirname(config.config_file_path)
+ theme_config['custom_dir'] = os.path.join(config_dir, theme_config['custom_dir'])
+
+ if 'custom_dir' in theme_config and not os.path.isdir(theme_config['custom_dir']):
+ raise ValidationError("The path set in {name}.custom_dir ('{path}') does not exist.".
+ format(path=theme_config['custom_dir'], name=self.name))
config[key_name] = theme.Theme(**theme_config)
@@ -621,6 +646,10 @@ class Plugins(OptionallyRequired):
def __init__(self, **kwargs):
super(Plugins, self).__init__(**kwargs)
self.installed_plugins = plugins.get_plugins()
+ self.config_file_path = None
+
+ def pre_validation(self, config, key_name):
+ self.config_file_path = config.config_file_path
def run_validation(self, value):
if not isinstance(value, (list, tuple)):
@@ -635,11 +664,15 @@ class Plugins(OptionallyRequired):
if not isinstance(cfg, dict):
raise ValidationError('Invalid config options for '
'the "{0}" plugin.'.format(name))
- plgins[name] = self.load_plugin(name, cfg)
- elif isinstance(item, utils.string_types):
- plgins[item] = self.load_plugin(item, {})
+ item = name
else:
+ cfg = {}
+
+ if not isinstance(item, utils.string_types):
raise ValidationError('Invalid Plugins configuration')
+
+ plgins[item] = self.load_plugin(item, cfg)
+
return plgins
def load_plugin(self, name, config):
@@ -654,7 +687,7 @@ class Plugins(OptionallyRequired):
plugins.BasePlugin.__name__))
plugin = Plugin()
- errors, warnings = plugin.load_config(config)
+ errors, warnings = plugin.load_config(config, self.config_file_path)
self.warnings.extend(warnings)
errors_message = '\n'.join(
"Plugin value: '{}'. Error: {}".format(x, y)
diff --git a/mkdocs/plugins.py b/mkdocs/plugins.py
index 69d99ed8..de4e837d 100644
--- a/mkdocs/plugins.py
+++ b/mkdocs/plugins.py
@@ -42,10 +42,10 @@ class BasePlugin(object):
config_scheme = ()
config = {}
- def load_config(self, options):
+ def load_config(self, options, config_file_path=None):
""" Load config from a dict of options. Returns a tuple of (errors, warnings)."""
- self.config = Config(schema=self.config_scheme)
+ self.config = Config(schema=self.config_scheme, config_file_path=config_file_path)
self.config.load_dict(options)
return self.config.validate()
diff --git a/tox.ini b/tox.ini
index 52d0d7e0..bee768b2 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,6 +1,6 @@
[tox]
envlist =
- py{27,34,35,36}-{unittests,integration,min-req},
+ py{27,34,35,36,py,py3}-{unittests,integration,min-req},
flake8, markdown-lint, linkchecker, jshint, csslint
[testenv]
@@ -9,6 +9,7 @@ deps=
py{27,34,35,36,py,py3}-{unittests,integration}: -rrequirements/project.txt
py{27,34,35,36,py,py3}-min-req: -rrequirements/project-min.txt
py{27,34,35,36,py,py3}-{unittests,min-req}: -rrequirements/test.txt
+ py{27,py}-{unittests,min-req}: backports.tempfile
commands=
{envpython} --version
py{27,34,35,36,py,py3}-{unittests,min-req}: {envbindir}/nosetests --with-coverage --cover-package mkdocs mkdocs
| Paths are relative to you, not the config file
You can do `mkdocs build --config-file=path/to/mkdocs.yml` but any configuration values in that file are relative to your current working directory.
This seems wrong to me, but it's only something I've ran into when trying to automate MkDocs builds (for integration testing). I'm not sure if we want to change this, or what people really expect. The common use case is that you are in the same directory as the config file anyway, so that doen't matter.
Something like this should be changed or at least decided for a 1.0 release.
Thoughts?
| mkdocs/mkdocs | diff --git a/mkdocs/tests/build_tests.py b/mkdocs/tests/build_tests.py
index ac5f5116..32b86206 100644
--- a/mkdocs/tests/build_tests.py
+++ b/mkdocs/tests/build_tests.py
@@ -3,8 +3,6 @@
from __future__ import unicode_literals
import os
-import shutil
-import tempfile
import unittest
import mock
import io
@@ -15,6 +13,11 @@ except ImportError:
# In Py3 use builtin zip function
pass
+try:
+ # py>=3.2
+ from tempfile import TemporaryDirectory
+except ImportError:
+ from backports.tempfile import TemporaryDirectory
from mkdocs import nav
from mkdocs.commands import build
@@ -319,9 +322,7 @@ class BuildTests(unittest.TestCase):
self.assertEqual(page.content.strip(), '<p>foo</p>')
def test_copying_media(self):
- docs_dir = tempfile.mkdtemp()
- site_dir = tempfile.mkdtemp()
- try:
+ with TemporaryDirectory() as docs_dir, TemporaryDirectory() as site_dir:
# Create a non-empty markdown file, image, html file, dot file and dot directory.
f = open(os.path.join(docs_dir, 'index.md'), 'w')
f.write(dedent("""
@@ -351,14 +352,9 @@ class BuildTests(unittest.TestCase):
self.assertTrue(os.path.isfile(os.path.join(site_dir, 'example.html')))
self.assertFalse(os.path.isfile(os.path.join(site_dir, '.hidden')))
self.assertFalse(os.path.isfile(os.path.join(site_dir, '.git/hidden')))
- finally:
- shutil.rmtree(docs_dir)
- shutil.rmtree(site_dir)
def test_copy_theme_files(self):
- docs_dir = tempfile.mkdtemp()
- site_dir = tempfile.mkdtemp()
- try:
+ with TemporaryDirectory() as docs_dir, TemporaryDirectory() as site_dir:
# Create a non-empty markdown file.
f = open(os.path.join(docs_dir, 'index.md'), 'w')
f.write(dedent("""
@@ -383,9 +379,6 @@ class BuildTests(unittest.TestCase):
self.assertFalse(os.path.isfile(os.path.join(site_dir, 'base.html')))
self.assertFalse(os.path.isfile(os.path.join(site_dir, 'content.html')))
self.assertFalse(os.path.isfile(os.path.join(site_dir, 'nav.html')))
- finally:
- shutil.rmtree(docs_dir)
- shutil.rmtree(site_dir)
def test_strict_mode_valid(self):
pages = [
@@ -467,9 +460,7 @@ class BuildTests(unittest.TestCase):
self.assertEqual(context['config']['extra']['a'], 1)
def test_BOM(self):
- docs_dir = tempfile.mkdtemp()
- site_dir = tempfile.mkdtemp()
- try:
+ with TemporaryDirectory() as docs_dir, TemporaryDirectory() as site_dir:
# Create an UTF-8 Encoded file with BOM (as Micorsoft editors do). See #1186.
f = io.open(os.path.join(docs_dir, 'index.md'), 'w', encoding='utf-8-sig')
f.write('# An UTF-8 encoded file with a BOM')
@@ -490,7 +481,3 @@ class BuildTests(unittest.TestCase):
self.assertTrue(
'<h1 id="an-utf-8-encoded-file-with-a-bom">An UTF-8 encoded file with a BOM</h1>' in output
)
-
- finally:
- shutil.rmtree(docs_dir)
- shutil.rmtree(site_dir)
diff --git a/mkdocs/tests/config/base_tests.py b/mkdocs/tests/config/base_tests.py
index ce56cd93..5118279f 100644
--- a/mkdocs/tests/config/base_tests.py
+++ b/mkdocs/tests/config/base_tests.py
@@ -3,6 +3,12 @@ import os
import tempfile
import unittest
+try:
+ # py>=3.2
+ from tempfile import TemporaryDirectory
+except ImportError:
+ from backports.tempfile import TemporaryDirectory
+
from mkdocs import exceptions
from mkdocs.config import base, defaults
from mkdocs.config.config_options import BaseConfigOption
@@ -42,7 +48,9 @@ class ConfigBaseTests(unittest.TestCase):
Allows users to specify a config other than the default `mkdocs.yml`.
"""
- config_file = tempfile.NamedTemporaryFile('w', delete=False)
+ temp_dir = TemporaryDirectory()
+ config_file = open(os.path.join(temp_dir.name, 'mkdocs.yml'), 'w')
+ os.mkdir(os.path.join(temp_dir.name, 'docs'))
try:
config_file.write("site_name: MkDocs Test\n")
config_file.flush()
@@ -64,7 +72,12 @@ class ConfigBaseTests(unittest.TestCase):
`load_config` can accept an open file descriptor.
"""
- config_file = tempfile.NamedTemporaryFile('r+', delete=False)
+ temp_dir = TemporaryDirectory()
+ temp_path = temp_dir.name
+ config_fname = os.path.join(temp_path, 'mkdocs.yml')
+
+ config_file = open(config_fname, 'w+')
+ os.mkdir(os.path.join(temp_path, 'docs'))
try:
config_file.write("site_name: MkDocs Test\n")
config_file.flush()
@@ -75,7 +88,7 @@ class ConfigBaseTests(unittest.TestCase):
# load_config will always close the file
self.assertTrue(config_file.closed)
finally:
- os.remove(config_file.name)
+ temp_dir.cleanup()
def test_load_from_closed_file(self):
"""
@@ -83,7 +96,10 @@ class ConfigBaseTests(unittest.TestCase):
Ensure `load_config` reloads the closed file.
"""
- config_file = tempfile.NamedTemporaryFile('w', delete=False)
+ temp_dir = TemporaryDirectory()
+ config_file = open(os.path.join(temp_dir.name, 'mkdocs.yml'), 'w')
+ os.mkdir(os.path.join(temp_dir.name, 'docs'))
+
try:
config_file.write("site_name: MkDocs Test\n")
config_file.flush()
@@ -93,7 +109,7 @@ class ConfigBaseTests(unittest.TestCase):
self.assertTrue(isinstance(cfg, base.Config))
self.assertEqual(cfg['site_name'], 'MkDocs Test')
finally:
- os.remove(config_file.name)
+ temp_dir.cleanup()
def test_load_from_deleted_file(self):
"""
@@ -234,3 +250,28 @@ class ConfigBaseTests(unittest.TestCase):
('invalid_option', 'run_validation warning'),
('invalid_option', 'post_validation warning'),
])
+
+ def test_load_from_file_with_relative_paths(self):
+ """
+ When explicitly setting a config file, paths should be relative to the
+ config file, not the working directory.
+ """
+
+ config_dir = TemporaryDirectory()
+ config_fname = os.path.join(config_dir.name, 'mkdocs.yml')
+ docs_dir = os.path.join(config_dir.name, 'src')
+ os.mkdir(docs_dir)
+
+ config_file = open(config_fname, 'w')
+
+ try:
+ config_file.write("docs_dir: src\nsite_name: MkDocs Test\n")
+ config_file.flush()
+ config_file.close()
+
+ cfg = base.load_config(config_file=config_file)
+ self.assertTrue(isinstance(cfg, base.Config))
+ self.assertEqual(cfg['site_name'], 'MkDocs Test')
+ self.assertEqual(cfg['docs_dir'], docs_dir)
+ finally:
+ config_dir.cleanup()
diff --git a/mkdocs/tests/config/config_options_tests.py b/mkdocs/tests/config/config_options_tests.py
index 17c4141b..56b7a724 100644
--- a/mkdocs/tests/config/config_options_tests.py
+++ b/mkdocs/tests/config/config_options_tests.py
@@ -6,6 +6,7 @@ import unittest
import mkdocs
from mkdocs import utils
from mkdocs.config import config_options
+from mkdocs.config.base import Config
class OptionallyRequiredTest(unittest.TestCase):
@@ -272,18 +273,21 @@ class DirTest(unittest.TestCase):
option.validate, [])
def test_doc_dir_is_config_dir(self):
+ cfg = Config(
+ [('docs_dir', config_options.Dir())],
+ config_file_path=os.path.join(os.path.abspath('.'), 'mkdocs.yml'),
+ )
test_config = {
- 'config_file_path': os.path.join(os.path.abspath('.'), 'mkdocs.yml'),
'docs_dir': '.'
}
- docs_dir = config_options.Dir()
+ cfg.load_dict(test_config)
- test_config['docs_dir'] = docs_dir.validate(test_config['docs_dir'])
+ fails, warns = cfg.validate()
- self.assertRaises(config_options.ValidationError,
- docs_dir.post_validation, test_config, 'docs_dir')
+ self.assertEqual(len(fails), 1)
+ self.assertEqual(len(warns), 0)
class SiteDirTest(unittest.TestCase):
@@ -293,12 +297,23 @@ class SiteDirTest(unittest.TestCase):
site_dir = config_options.SiteDir()
docs_dir = config_options.Dir()
- config['config_file_path'] = os.path.join(os.path.abspath('..'), 'mkdocs.yml')
+ fname = os.path.join(os.path.abspath('..'), 'mkdocs.yml')
config['docs_dir'] = docs_dir.validate(config['docs_dir'])
config['site_dir'] = site_dir.validate(config['site_dir'])
- site_dir.post_validation(config, 'site_dir')
- return True # No errors were raised
+
+ schema = [
+ ('site_dir', site_dir),
+ ('docs_dir', docs_dir),
+ ]
+ cfg = Config(schema, fname)
+ cfg.load_dict(config)
+ failed, warned = cfg.validate()
+
+ if failed:
+ raise config_options.ValidationError(failed)
+
+ return True
def test_doc_dir_in_site_dir(self):
diff --git a/mkdocs/tests/config/config_tests.py b/mkdocs/tests/config/config_tests.py
index ebfdba48..d956bf3b 100644
--- a/mkdocs/tests/config/config_tests.py
+++ b/mkdocs/tests/config/config_tests.py
@@ -3,10 +3,16 @@
from __future__ import unicode_literals
import os
-import shutil
import tempfile
import unittest
+try:
+ # py>=3.2
+ from tempfile import TemporaryDirectory
+except ImportError:
+ from backports.tempfile import TemporaryDirectory
+
+
import mkdocs
from mkdocs import config
from mkdocs import utils
@@ -81,8 +87,11 @@ class ConfigTests(unittest.TestCase):
pages:
- 'Introduction': 'index.md'
""")
- config_file = tempfile.NamedTemporaryFile('w', delete=False)
- try:
+ with TemporaryDirectory() as temp_path:
+ os.mkdir(os.path.join(temp_path, 'docs'))
+ config_path = os.path.join(temp_path, 'mkdocs.yml')
+ config_file = open(config_path, 'w')
+
config_file.write(ensure_utf(file_contents))
config_file.flush()
config_file.close()
@@ -90,93 +99,87 @@ class ConfigTests(unittest.TestCase):
result = config.load_config(config_file=config_file.name)
self.assertEqual(result['site_name'], expected_result['site_name'])
self.assertEqual(result['pages'], expected_result['pages'])
- finally:
- os.remove(config_file.name)
def test_theme(self):
-
- mytheme = tempfile.mkdtemp()
- custom = tempfile.mkdtemp()
-
- configs = [
- dict(), # default theme
- {"theme": "readthedocs"}, # builtin theme
- {"theme_dir": mytheme}, # custom only
- {"theme": "readthedocs", "theme_dir": custom}, # builtin and custom
- {"theme": {'name': 'readthedocs'}}, # builtin as complex
- {"theme": {'name': None, 'custom_dir': mytheme}}, # custom only as complex
- {"theme": {'name': 'readthedocs', 'custom_dir': custom}}, # builtin and custom as complex
- { # user defined variables
- 'theme': {
- 'name': 'mkdocs',
- 'static_templates': ['foo.html'],
- 'show_sidebar': False,
- 'some_var': 'bar'
+ with TemporaryDirectory() as mytheme, TemporaryDirectory() as custom:
+ configs = [
+ dict(), # default theme
+ {"theme": "readthedocs"}, # builtin theme
+ {"theme_dir": mytheme}, # custom only
+ {"theme": "readthedocs", "theme_dir": custom}, # builtin and custom
+ {"theme": {'name': 'readthedocs'}}, # builtin as complex
+ {"theme": {'name': None, 'custom_dir': mytheme}}, # custom only as complex
+ {"theme": {'name': 'readthedocs', 'custom_dir': custom}}, # builtin and custom as complex
+ { # user defined variables
+ 'theme': {
+ 'name': 'mkdocs',
+ 'static_templates': ['foo.html'],
+ 'show_sidebar': False,
+ 'some_var': 'bar'
+ }
}
- }
- ]
-
- mkdocs_dir = os.path.abspath(os.path.dirname(mkdocs.__file__))
- mkdocs_templates_dir = os.path.join(mkdocs_dir, 'templates')
- theme_dir = os.path.abspath(os.path.join(mkdocs_dir, 'themes'))
-
- results = (
- {
- 'dirs': [os.path.join(theme_dir, 'mkdocs'), mkdocs_templates_dir],
- 'static_templates': ['404.html', 'sitemap.xml'],
- 'vars': {'include_search_page': False, 'search_index_only': False}
- }, {
- 'dirs': [os.path.join(theme_dir, 'readthedocs'), mkdocs_templates_dir],
- 'static_templates': ['404.html', 'sitemap.xml'],
- 'vars': {'include_search_page': True, 'search_index_only': False}
- }, {
- 'dirs': [mytheme, mkdocs_templates_dir],
- 'static_templates': ['sitemap.xml'],
- 'vars': {}
- }, {
- 'dirs': [custom, os.path.join(theme_dir, 'readthedocs'), mkdocs_templates_dir],
- 'static_templates': ['404.html', 'sitemap.xml'],
- 'vars': {'include_search_page': True, 'search_index_only': False}
- }, {
- 'dirs': [os.path.join(theme_dir, 'readthedocs'), mkdocs_templates_dir],
- 'static_templates': ['404.html', 'sitemap.xml'],
- 'vars': {'include_search_page': True, 'search_index_only': False}
- }, {
- 'dirs': [mytheme, mkdocs_templates_dir],
- 'static_templates': ['sitemap.xml'],
- 'vars': {}
- }, {
- 'dirs': [custom, os.path.join(theme_dir, 'readthedocs'), mkdocs_templates_dir],
- 'static_templates': ['404.html', 'sitemap.xml'],
- 'vars': {'include_search_page': True, 'search_index_only': False}
- }, {
- 'dirs': [os.path.join(theme_dir, 'mkdocs'), mkdocs_templates_dir],
- 'static_templates': ['404.html', 'sitemap.xml', 'foo.html'],
- 'vars': {
- 'show_sidebar': False,
- 'some_var': 'bar',
- 'include_search_page': False,
- 'search_index_only': False
+ ]
+
+ mkdocs_dir = os.path.abspath(os.path.dirname(mkdocs.__file__))
+ mkdocs_templates_dir = os.path.join(mkdocs_dir, 'templates')
+ theme_dir = os.path.abspath(os.path.join(mkdocs_dir, 'themes'))
+
+ results = (
+ {
+ 'dirs': [os.path.join(theme_dir, 'mkdocs'), mkdocs_templates_dir],
+ 'static_templates': ['404.html', 'sitemap.xml'],
+ 'vars': {'include_search_page': False, 'search_index_only': False}
+ }, {
+ 'dirs': [os.path.join(theme_dir, 'readthedocs'), mkdocs_templates_dir],
+ 'static_templates': ['404.html', 'sitemap.xml'],
+ 'vars': {'include_search_page': True, 'search_index_only': False}
+ }, {
+ 'dirs': [mytheme, mkdocs_templates_dir],
+ 'static_templates': ['sitemap.xml'],
+ 'vars': {}
+ }, {
+ 'dirs': [custom, os.path.join(theme_dir, 'readthedocs'), mkdocs_templates_dir],
+ 'static_templates': ['404.html', 'sitemap.xml'],
+ 'vars': {'include_search_page': True, 'search_index_only': False}
+ }, {
+ 'dirs': [os.path.join(theme_dir, 'readthedocs'), mkdocs_templates_dir],
+ 'static_templates': ['404.html', 'sitemap.xml'],
+ 'vars': {'include_search_page': True, 'search_index_only': False}
+ }, {
+ 'dirs': [mytheme, mkdocs_templates_dir],
+ 'static_templates': ['sitemap.xml'],
+ 'vars': {}
+ }, {
+ 'dirs': [custom, os.path.join(theme_dir, 'readthedocs'), mkdocs_templates_dir],
+ 'static_templates': ['404.html', 'sitemap.xml'],
+ 'vars': {'include_search_page': True, 'search_index_only': False}
+ }, {
+ 'dirs': [os.path.join(theme_dir, 'mkdocs'), mkdocs_templates_dir],
+ 'static_templates': ['404.html', 'sitemap.xml', 'foo.html'],
+ 'vars': {
+ 'show_sidebar': False,
+ 'some_var': 'bar',
+ 'include_search_page': False,
+ 'search_index_only': False
+ }
}
- }
- )
+ )
- for config_contents, result in zip(configs, results):
+ for config_contents, result in zip(configs, results):
- c = config.Config(schema=(
- ('theme', config_options.Theme(default='mkdocs')),
- ('theme_dir', config_options.ThemeDir(exists=True)),
- ))
- c.load_dict(config_contents)
- errors, warnings = c.validate()
- self.assertEqual(len(errors), 0)
- self.assertEqual(c['theme'].dirs, result['dirs'])
- self.assertEqual(c['theme'].static_templates, set(result['static_templates']))
- self.assertEqual(dict([(k, c['theme'][k]) for k in iter(c['theme'])]), result['vars'])
+ c = config.Config(schema=(
+ ('theme', config_options.Theme(default='mkdocs')),
+ ('theme_dir', config_options.ThemeDir(exists=True)),
+ ))
+ c.load_dict(config_contents)
+ errors, warnings = c.validate()
+ self.assertEqual(len(errors), 0)
+ self.assertEqual(c['theme'].dirs, result['dirs'])
+ self.assertEqual(c['theme'].static_templates, set(result['static_templates']))
+ self.assertEqual(dict([(k, c['theme'][k]) for k in iter(c['theme'])]), result['vars'])
def test_default_pages(self):
- tmp_dir = tempfile.mkdtemp()
- try:
+ with TemporaryDirectory() as tmp_dir:
open(os.path.join(tmp_dir, 'index.md'), 'w').close()
open(os.path.join(tmp_dir, 'about.md'), 'w').close()
conf = config.Config(schema=config.DEFAULT_SCHEMA)
@@ -187,12 +190,9 @@ class ConfigTests(unittest.TestCase):
})
conf.validate()
self.assertEqual(['index.md', 'about.md'], conf['pages'])
- finally:
- shutil.rmtree(tmp_dir)
def test_default_pages_nested(self):
- tmp_dir = tempfile.mkdtemp()
- try:
+ with TemporaryDirectory() as tmp_dir:
open(os.path.join(tmp_dir, 'index.md'), 'w').close()
open(os.path.join(tmp_dir, 'getting-started.md'), 'w').close()
open(os.path.join(tmp_dir, 'about.md'), 'w').close()
@@ -228,8 +228,6 @@ class ConfigTests(unittest.TestCase):
os.path.join('subC', 'index.md')
]}
], conf['pages'])
- finally:
- shutil.rmtree(tmp_dir)
def test_doc_dir_in_site_dir(self):
diff --git a/mkdocs/tests/plugin_tests.py b/mkdocs/tests/plugin_tests.py
index 6fa4091a..ac2c3983 100644
--- a/mkdocs/tests/plugin_tests.py
+++ b/mkdocs/tests/plugin_tests.py
@@ -5,6 +5,7 @@ from __future__ import unicode_literals
import unittest
import mock
+import os
from mkdocs import plugins
from mkdocs import utils
@@ -14,7 +15,8 @@ from mkdocs import config
class DummyPlugin(plugins.BasePlugin):
config_scheme = (
('foo', config.config_options.Type(utils.string_types, default='default foo')),
- ('bar', config.config_options.Type(int, default=0))
+ ('bar', config.config_options.Type(int, default=0)),
+ ('dir', config.config_options.Dir(exists=False)),
)
def on_pre_page(self, content, **kwargs):
@@ -29,18 +31,27 @@ class DummyPlugin(plugins.BasePlugin):
class TestPluginClass(unittest.TestCase):
def test_valid_plugin_options(self):
+ test_dir = 'test'
options = {
- 'foo': 'some value'
+ 'foo': 'some value',
+ 'dir': test_dir,
}
+ cfg_fname = os.path.join('tmp', 'test', 'fname.yml')
+ cfg_fname = os.path.abspath(cfg_fname)
+
+ cfg_dirname = os.path.dirname(cfg_fname)
+ expected = os.path.join(cfg_dirname, test_dir)
+
expected = {
'foo': 'some value',
- 'bar': 0
+ 'bar': 0,
+ 'dir': expected,
}
plugin = DummyPlugin()
- errors, warnings = plugin.load_config(options)
+ errors, warnings = plugin.load_config(options, config_file_path=cfg_fname)
self.assertEqual(plugin.config, expected)
self.assertEqual(errors, [])
self.assertEqual(warnings, [])
@@ -132,7 +143,8 @@ class TestPluginConfig(unittest.TestCase):
self.assertIsInstance(cfg['plugins']['sample'], plugins.BasePlugin)
expected = {
'foo': 'default foo',
- 'bar': 0
+ 'bar': 0,
+ 'dir': None,
}
self.assertEqual(cfg['plugins']['sample'].config, expected)
@@ -154,7 +166,8 @@ class TestPluginConfig(unittest.TestCase):
self.assertIsInstance(cfg['plugins']['sample'], plugins.BasePlugin)
expected = {
'foo': 'foo value',
- 'bar': 42
+ 'bar': 42,
+ 'dir': None,
}
self.assertEqual(cfg['plugins']['sample'].config, expected)
@@ -194,7 +207,8 @@ class TestPluginConfig(unittest.TestCase):
self.assertIsInstance(cfg['plugins']['sample'], plugins.BasePlugin)
expected = {
'foo': 'default foo',
- 'bar': 0
+ 'bar': 0,
+ 'dir': None,
}
self.assertEqual(cfg['plugins']['sample'].config, expected)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 8
} | 0.17 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-mock",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/project.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | click==8.1.8
exceptiongroup==1.2.2
importlib_metadata==8.6.1
iniconfig==2.1.0
Jinja2==3.1.6
livereload==2.7.1
Markdown==3.7
MarkupSafe==3.0.2
mdx-gh-links==0.4
-e git+https://github.com/mkdocs/mkdocs.git@d6449f90808b50979694a3e91b49b532f3bf785d#egg=mkdocs
mock==5.2.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-mock==3.14.0
PyYAML==6.0.2
tomli==2.2.1
tornado==6.4.2
zipp==3.21.0
| name: mkdocs
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- click==8.1.8
- exceptiongroup==1.2.2
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jinja2==3.1.6
- livereload==2.7.1
- markdown==3.7
- markupsafe==3.0.2
- mdx-gh-links==0.4
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-mock==3.14.0
- pyyaml==6.0.2
- tomli==2.2.1
- tornado==6.4.2
- zipp==3.21.0
prefix: /opt/conda/envs/mkdocs
| [
"mkdocs/tests/config/base_tests.py::ConfigBaseTests::test_load_from_file_with_relative_paths",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_doc_dir_is_config_dir",
"mkdocs/tests/config/config_options_tests.py::SiteDirTest::test_common_prefix",
"mkdocs/tests/config/config_options_tests.py::SiteDirTest::test_doc_dir_in_site_dir",
"mkdocs/tests/config/config_options_tests.py::SiteDirTest::test_site_dir_in_docs_dir",
"mkdocs/tests/plugin_tests.py::TestPluginClass::test_invalid_plugin_options",
"mkdocs/tests/plugin_tests.py::TestPluginClass::test_valid_plugin_options",
"mkdocs/tests/plugin_tests.py::TestPluginCollection::test_event_returns_None",
"mkdocs/tests/plugin_tests.py::TestPluginCollection::test_run_event_on_collection",
"mkdocs/tests/plugin_tests.py::TestPluginCollection::test_run_event_twice_on_collection",
"mkdocs/tests/plugin_tests.py::TestPluginConfig::test_plugin_config_none_with_default",
"mkdocs/tests/plugin_tests.py::TestPluginConfig::test_plugin_config_with_options",
"mkdocs/tests/plugin_tests.py::TestPluginConfig::test_plugin_config_without_options"
]
| [
"mkdocs/tests/build_tests.py::BuildTests::test_BOM",
"mkdocs/tests/build_tests.py::BuildTests::test_absolute_link",
"mkdocs/tests/build_tests.py::BuildTests::test_anchor_only_link",
"mkdocs/tests/build_tests.py::BuildTests::test_convert_internal_asbolute_media",
"mkdocs/tests/build_tests.py::BuildTests::test_convert_internal_link",
"mkdocs/tests/build_tests.py::BuildTests::test_convert_internal_link_differing_directory",
"mkdocs/tests/build_tests.py::BuildTests::test_convert_internal_link_with_anchor",
"mkdocs/tests/build_tests.py::BuildTests::test_convert_internal_media",
"mkdocs/tests/build_tests.py::BuildTests::test_convert_markdown",
"mkdocs/tests/build_tests.py::BuildTests::test_convert_multiple_internal_links",
"mkdocs/tests/build_tests.py::BuildTests::test_copy_theme_files",
"mkdocs/tests/build_tests.py::BuildTests::test_copying_media",
"mkdocs/tests/build_tests.py::BuildTests::test_dont_convert_code_block_urls",
"mkdocs/tests/build_tests.py::BuildTests::test_empty_document",
"mkdocs/tests/build_tests.py::BuildTests::test_extension_config",
"mkdocs/tests/build_tests.py::BuildTests::test_ignore_email_links",
"mkdocs/tests/build_tests.py::BuildTests::test_ignore_external_link",
"mkdocs/tests/build_tests.py::BuildTests::test_markdown_custom_extension",
"mkdocs/tests/build_tests.py::BuildTests::test_markdown_duplicate_custom_extension",
"mkdocs/tests/build_tests.py::BuildTests::test_markdown_fenced_code_extension",
"mkdocs/tests/build_tests.py::BuildTests::test_markdown_table_extension",
"mkdocs/tests/build_tests.py::BuildTests::test_not_use_directory_urls",
"mkdocs/tests/build_tests.py::BuildTests::test_strict_mode_invalid",
"mkdocs/tests/build_tests.py::BuildTests::test_strict_mode_valid"
]
| [
"mkdocs/tests/build_tests.py::BuildTests::test_extra_context",
"mkdocs/tests/config/base_tests.py::ConfigBaseTests::test_load_from_closed_file",
"mkdocs/tests/config/base_tests.py::ConfigBaseTests::test_load_from_deleted_file",
"mkdocs/tests/config/base_tests.py::ConfigBaseTests::test_load_from_file",
"mkdocs/tests/config/base_tests.py::ConfigBaseTests::test_load_from_missing_file",
"mkdocs/tests/config/base_tests.py::ConfigBaseTests::test_load_from_open_file",
"mkdocs/tests/config/base_tests.py::ConfigBaseTests::test_load_missing_required",
"mkdocs/tests/config/base_tests.py::ConfigBaseTests::test_missing_required",
"mkdocs/tests/config/base_tests.py::ConfigBaseTests::test_post_validation_error",
"mkdocs/tests/config/base_tests.py::ConfigBaseTests::test_pre_and_run_validation_errors",
"mkdocs/tests/config/base_tests.py::ConfigBaseTests::test_pre_validation_error",
"mkdocs/tests/config/base_tests.py::ConfigBaseTests::test_run_and_post_validation_errors",
"mkdocs/tests/config/base_tests.py::ConfigBaseTests::test_run_validation_error",
"mkdocs/tests/config/base_tests.py::ConfigBaseTests::test_unrecognised_keys",
"mkdocs/tests/config/base_tests.py::ConfigBaseTests::test_validation_warnings",
"mkdocs/tests/config/config_options_tests.py::OptionallyRequiredTest::test_default",
"mkdocs/tests/config/config_options_tests.py::OptionallyRequiredTest::test_empty",
"mkdocs/tests/config/config_options_tests.py::OptionallyRequiredTest::test_replace_default",
"mkdocs/tests/config/config_options_tests.py::OptionallyRequiredTest::test_required",
"mkdocs/tests/config/config_options_tests.py::OptionallyRequiredTest::test_required_no_default",
"mkdocs/tests/config/config_options_tests.py::TypeTest::test_length",
"mkdocs/tests/config/config_options_tests.py::TypeTest::test_multiple_types",
"mkdocs/tests/config/config_options_tests.py::TypeTest::test_single_type",
"mkdocs/tests/config/config_options_tests.py::IpAddressTest::test_default_address",
"mkdocs/tests/config/config_options_tests.py::IpAddressTest::test_invalid_address_format",
"mkdocs/tests/config/config_options_tests.py::IpAddressTest::test_invalid_address_missing_port",
"mkdocs/tests/config/config_options_tests.py::IpAddressTest::test_invalid_address_port",
"mkdocs/tests/config/config_options_tests.py::IpAddressTest::test_invalid_address_type",
"mkdocs/tests/config/config_options_tests.py::IpAddressTest::test_named_address",
"mkdocs/tests/config/config_options_tests.py::IpAddressTest::test_valid_IPv6_address",
"mkdocs/tests/config/config_options_tests.py::IpAddressTest::test_valid_address",
"mkdocs/tests/config/config_options_tests.py::URLTest::test_invalid",
"mkdocs/tests/config/config_options_tests.py::URLTest::test_invalid_url",
"mkdocs/tests/config/config_options_tests.py::URLTest::test_valid_url",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_edit_uri_bitbucket",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_edit_uri_custom",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_edit_uri_github",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_edit_uri_gitlab",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_repo_name_bitbucket",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_repo_name_custom",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_repo_name_custom_and_empty_edit_uri",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_repo_name_github",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_repo_name_gitlab",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_file",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_incorrect_type_attribute_error",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_incorrect_type_type_error",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_missing_dir",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_missing_dir_but_required",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_valid_dir",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_theme_as_complex_config",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_theme_as_simple_config",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_theme_as_string",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_theme_config_missing_name",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_theme_default",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_theme_invalid_type",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_theme_name_is_none",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_uninstalled_theme_as_config",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_uninstalled_theme_as_string",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_invalid_config",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_invalid_type",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_old_format",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_provided_dict",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_provided_empty",
"mkdocs/tests/config/config_options_tests.py::PrivateTest::test_defined",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_builtins",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_builtins_config",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_configkey",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_duplicates",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_invalid_config_item",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_invalid_config_option",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_invalid_dict_item",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_list_dicts",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_mixed_list",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_none",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_not_list",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_simple_list",
"mkdocs/tests/config/config_tests.py::ConfigTests::test_config_option",
"mkdocs/tests/config/config_tests.py::ConfigTests::test_default_pages",
"mkdocs/tests/config/config_tests.py::ConfigTests::test_default_pages_nested",
"mkdocs/tests/config/config_tests.py::ConfigTests::test_doc_dir_in_site_dir",
"mkdocs/tests/config/config_tests.py::ConfigTests::test_empty_config",
"mkdocs/tests/config/config_tests.py::ConfigTests::test_invalid_config",
"mkdocs/tests/config/config_tests.py::ConfigTests::test_missing_config_file",
"mkdocs/tests/config/config_tests.py::ConfigTests::test_missing_site_name",
"mkdocs/tests/config/config_tests.py::ConfigTests::test_nonexistant_config",
"mkdocs/tests/config/config_tests.py::ConfigTests::test_theme",
"mkdocs/tests/plugin_tests.py::TestPluginCollection::test_run_undefined_event_on_collection",
"mkdocs/tests/plugin_tests.py::TestPluginCollection::test_run_unknown_event_on_collection",
"mkdocs/tests/plugin_tests.py::TestPluginCollection::test_set_multiple_plugins_on_collection",
"mkdocs/tests/plugin_tests.py::TestPluginCollection::test_set_plugin_on_collection",
"mkdocs/tests/plugin_tests.py::TestPluginConfig::test_plugin_config_empty_list_with_default",
"mkdocs/tests/plugin_tests.py::TestPluginConfig::test_plugin_config_empty_list_with_empty_default",
"mkdocs/tests/plugin_tests.py::TestPluginConfig::test_plugin_config_multivalue_dict",
"mkdocs/tests/plugin_tests.py::TestPluginConfig::test_plugin_config_none_with_empty_default",
"mkdocs/tests/plugin_tests.py::TestPluginConfig::test_plugin_config_not_list",
"mkdocs/tests/plugin_tests.py::TestPluginConfig::test_plugin_config_not_string_or_dict",
"mkdocs/tests/plugin_tests.py::TestPluginConfig::test_plugin_config_options_not_dict",
"mkdocs/tests/plugin_tests.py::TestPluginConfig::test_plugin_config_uninstalled"
]
| []
| BSD 2-Clause "Simplified" License | 2,042 | [
"docs/about/release-notes.md",
"mkdocs/plugins.py",
"docs/user-guide/custom-themes.md",
"docs/user-guide/styling-your-docs.md",
"docs/user-guide/configuration.md",
"mkdocs/config/base.py",
"tox.ini",
"mkdocs/config/config_options.py"
]
| [
"docs/about/release-notes.md",
"mkdocs/plugins.py",
"docs/user-guide/custom-themes.md",
"docs/user-guide/styling-your-docs.md",
"docs/user-guide/configuration.md",
"mkdocs/config/base.py",
"tox.ini",
"mkdocs/config/config_options.py"
]
|
tox-dev__tox-742 | 36ff71d18d10e3c0d4275179d8912abc385b20f0 | 2018-01-14 00:17:39 | 36ff71d18d10e3c0d4275179d8912abc385b20f0 | codecov[bot]: # [Codecov](https://codecov.io/gh/tox-dev/tox/pull/742?src=pr&el=h1) Report
> Merging [#742](https://codecov.io/gh/tox-dev/tox/pull/742?src=pr&el=desc) into [master](https://codecov.io/gh/tox-dev/tox/commit/36ff71d18d10e3c0d4275179d8912abc385b20f0?src=pr&el=desc) will **increase** coverage by `<.01%`.
> The diff coverage is `100%`.
[](https://codecov.io/gh/tox-dev/tox/pull/742?src=pr&el=tree)
```diff
@@ Coverage Diff @@
## master #742 +/- ##
==========================================
+ Coverage 94.67% 94.67% +<.01%
==========================================
Files 11 11
Lines 2383 2384 +1
==========================================
+ Hits 2256 2257 +1
Misses 127 127
```
| [Impacted Files](https://codecov.io/gh/tox-dev/tox/pull/742?src=pr&el=tree) | Coverage Δ | |
|---|---|---|
| [tox/config.py](https://codecov.io/gh/tox-dev/tox/pull/742/diff?src=pr&el=tree#diff-dG94L2NvbmZpZy5weQ==) | `97.75% <100%> (ø)` | :arrow_up: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/tox-dev/tox/pull/742?src=pr&el=continue).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/tox-dev/tox/pull/742?src=pr&el=footer). Last update [36ff71d...0c5a478](https://codecov.io/gh/tox-dev/tox/pull/742?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
| diff --git a/changelog/740.feature.rst b/changelog/740.feature.rst
new file mode 100644
index 00000000..236efbd8
--- /dev/null
+++ b/changelog/740.feature.rst
@@ -0,0 +1,2 @@
+Forward ``PROCESSOR_ARCHITECTURE`` by default on Windows to fix
+``platform.machine()``.
diff --git a/tox/config.py b/tox/config.py
index d03c97e0..ec9dc6c2 100755
--- a/tox/config.py
+++ b/tox/config.py
@@ -530,6 +530,7 @@ def tox_addoption(parser):
# for `multiprocessing.cpu_count()` on Windows
# (prior to Python 3.4).
passenv.add("NUMBER_OF_PROCESSORS")
+ passenv.add("PROCESSOR_ARCHITECTURE") # platform.machine()
passenv.add("USERPROFILE") # needed for `os.path.expanduser()`
passenv.add("MSYSTEM") # fixes #429
else:
| Consider whitelisting `PROCESSOR_ARCHITECTURE` environment variable
On windows, this is necessary for [`platform.machine()`](https://docs.python.org/3/library/platform.html#platform.machine) to function
```
$ tox --version
2.9.1 imported from c:\users\anthony\appdata\local\temp\venv\lib\site-packages\tox\__init__.pyc
```
```ini
[tox]
envlist = py27
skipsdist = true
[testenv]
commands =
python -c 'import platform; print(platform.machine())'
```
```
$ tox -e py27
py27 create: C:\Users\Anthony\AppData\Local\Temp\.tox\py27
py27 installed:
py27 runtests: PYTHONHASHSEED='154'
py27 runtests: commands[0] | python -c import platform; print(platform.machine())
______________________________________________________ summary _______________________________________________________
py27: commands succeeded
congratulations :)
$ TOX_TESTENV_PASSENV=PROCESSOR_ARCHITECTURE tox -e py27
py27 installed:
py27 runtests: PYTHONHASHSEED='864'
py27 runtests: commands[0] | python -c import platform; print(platform.machine())
AMD64
______________________________________________________ summary _______________________________________________________
py27: commands succeeded
congratulations :)
```
Hitting this in https://github.com/pre-commit/pre-commit/pull/685 where I'm working around this by adding `PROCESSOR_ARCHITECTURE` to `passenv` | tox-dev/tox | diff --git a/tests/test_config.py b/tests/test_config.py
index e8e8d559..1f446caa 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -908,6 +908,7 @@ class TestConfigTestEnv:
assert "TEMP" in envconfig.passenv
assert "TMP" in envconfig.passenv
assert "NUMBER_OF_PROCESSORS" in envconfig.passenv
+ assert "PROCESSOR_ARCHITECTURE" in envconfig.passenv
assert "USERPROFILE" in envconfig.passenv
assert "MSYSTEM" in envconfig.passenv
else:
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 2.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-timeout"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
coverage==6.2
distlib==0.3.9
filelock==3.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
pytest-timeout==2.1.0
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
-e git+https://github.com/tox-dev/tox.git@36ff71d18d10e3c0d4275179d8912abc385b20f0#egg=tox
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
virtualenv==20.17.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: tox
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- platformdirs==2.4.0
- pytest-cov==4.0.0
- pytest-timeout==2.1.0
- six==1.17.0
- tomli==1.2.3
- virtualenv==20.17.1
prefix: /opt/conda/envs/tox
| [
"tests/test_config.py::TestConfigTestEnv::test_passenv_as_multiline_list[win32]"
]
| [
"tests/test_config.py::TestVenvConfig::test_force_dep_with_url",
"tests/test_config.py::TestIniParser::test_getbool",
"tests/test_config.py::TestCmdInvocation::test_listenvs",
"tests/test_config.py::TestCmdInvocation::test_listenvs_verbose_description",
"tests/test_config.py::TestCmdInvocation::test_listenvs_all",
"tests/test_config.py::TestCmdInvocation::test_no_tox_ini"
]
| [
"tests/test_config.py::TestVenvConfig::test_config_parsing_minimal",
"tests/test_config.py::TestVenvConfig::test_config_parsing_multienv",
"tests/test_config.py::TestVenvConfig::test_envdir_set_manually",
"tests/test_config.py::TestVenvConfig::test_envdir_set_manually_with_substitutions",
"tests/test_config.py::TestVenvConfig::test_force_dep_version",
"tests/test_config.py::TestVenvConfig::test_process_deps",
"tests/test_config.py::TestVenvConfig::test_is_same_dep",
"tests/test_config.py::TestConfigPlatform::test_config_parse_platform",
"tests/test_config.py::TestConfigPlatform::test_config_parse_platform_rex",
"tests/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[win]",
"tests/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[lin]",
"tests/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[osx]",
"tests/test_config.py::TestConfigPackage::test_defaults",
"tests/test_config.py::TestConfigPackage::test_defaults_distshare",
"tests/test_config.py::TestConfigPackage::test_defaults_changed_dir",
"tests/test_config.py::TestConfigPackage::test_project_paths",
"tests/test_config.py::TestParseconfig::test_search_parents",
"tests/test_config.py::TestParseconfig::test_explicit_config_path",
"tests/test_config.py::test_get_homedir",
"tests/test_config.py::TestGetcontextname::test_blank",
"tests/test_config.py::TestGetcontextname::test_jenkins",
"tests/test_config.py::TestGetcontextname::test_hudson_legacy",
"tests/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section",
"tests/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section_multiline",
"tests/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section_posargs",
"tests/test_config.py::TestIniParserAgainstCommandsKey::test_command_section_and_posargs_substitution",
"tests/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution",
"tests/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution_global",
"tests/test_config.py::TestIniParserAgainstCommandsKey::test_regression_issue595",
"tests/test_config.py::TestIniParser::test_getstring_single",
"tests/test_config.py::TestIniParser::test_missing_substitution",
"tests/test_config.py::TestIniParser::test_getstring_fallback_sections",
"tests/test_config.py::TestIniParser::test_getstring_substitution",
"tests/test_config.py::TestIniParser::test_getlist",
"tests/test_config.py::TestIniParser::test_getdict",
"tests/test_config.py::TestIniParser::test_normal_env_sub_works",
"tests/test_config.py::TestIniParser::test_missing_env_sub_raises_config_error_in_non_testenv",
"tests/test_config.py::TestIniParser::test_missing_env_sub_populates_missing_subs",
"tests/test_config.py::TestIniParser::test_getstring_environment_substitution_with_default",
"tests/test_config.py::TestIniParser::test_value_matches_section_substitution",
"tests/test_config.py::TestIniParser::test_value_doesn_match_section_substitution",
"tests/test_config.py::TestIniParser::test_getstring_other_section_substitution",
"tests/test_config.py::TestIniParser::test_argvlist",
"tests/test_config.py::TestIniParser::test_argvlist_windows_escaping",
"tests/test_config.py::TestIniParser::test_argvlist_multiline",
"tests/test_config.py::TestIniParser::test_argvlist_quoting_in_command",
"tests/test_config.py::TestIniParser::test_argvlist_comment_after_command",
"tests/test_config.py::TestIniParser::test_argvlist_command_contains_hash",
"tests/test_config.py::TestIniParser::test_argvlist_positional_substitution",
"tests/test_config.py::TestIniParser::test_argvlist_quoted_posargs",
"tests/test_config.py::TestIniParser::test_argvlist_posargs_with_quotes",
"tests/test_config.py::TestIniParser::test_positional_arguments_are_only_replaced_when_standing_alone",
"tests/test_config.py::TestIniParser::test_posargs_are_added_escaped_issue310",
"tests/test_config.py::TestIniParser::test_substitution_with_multiple_words",
"tests/test_config.py::TestIniParser::test_getargv",
"tests/test_config.py::TestIniParser::test_getpath",
"tests/test_config.py::TestIniParserPrefix::test_basic_section_access",
"tests/test_config.py::TestIniParserPrefix::test_fallback_sections",
"tests/test_config.py::TestIniParserPrefix::test_value_matches_prefixed_section_substitution",
"tests/test_config.py::TestIniParserPrefix::test_value_doesn_match_prefixed_section_substitution",
"tests/test_config.py::TestIniParserPrefix::test_other_section_substitution",
"tests/test_config.py::TestConfigTestEnv::test_commentchars_issue33",
"tests/test_config.py::TestConfigTestEnv::test_defaults",
"tests/test_config.py::TestConfigTestEnv::test_sitepackages_switch",
"tests/test_config.py::TestConfigTestEnv::test_installpkg_tops_develop",
"tests/test_config.py::TestConfigTestEnv::test_specific_command_overrides",
"tests/test_config.py::TestConfigTestEnv::test_whitelist_externals",
"tests/test_config.py::TestConfigTestEnv::test_changedir",
"tests/test_config.py::TestConfigTestEnv::test_ignore_errors",
"tests/test_config.py::TestConfigTestEnv::test_envbindir",
"tests/test_config.py::TestConfigTestEnv::test_envbindir_jython[jython]",
"tests/test_config.py::TestConfigTestEnv::test_envbindir_jython[pypy]",
"tests/test_config.py::TestConfigTestEnv::test_envbindir_jython[pypy3]",
"tests/test_config.py::TestConfigTestEnv::test_passenv_as_multiline_list[linux2]",
"tests/test_config.py::TestConfigTestEnv::test_passenv_as_space_separated_list[win32]",
"tests/test_config.py::TestConfigTestEnv::test_passenv_as_space_separated_list[linux2]",
"tests/test_config.py::TestConfigTestEnv::test_passenv_with_factor",
"tests/test_config.py::TestConfigTestEnv::test_passenv_from_global_env",
"tests/test_config.py::TestConfigTestEnv::test_passenv_glob_from_global_env",
"tests/test_config.py::TestConfigTestEnv::test_changedir_override",
"tests/test_config.py::TestConfigTestEnv::test_install_command_setting",
"tests/test_config.py::TestConfigTestEnv::test_install_command_must_contain_packages",
"tests/test_config.py::TestConfigTestEnv::test_install_command_substitutions",
"tests/test_config.py::TestConfigTestEnv::test_pip_pre",
"tests/test_config.py::TestConfigTestEnv::test_pip_pre_cmdline_override",
"tests/test_config.py::TestConfigTestEnv::test_simple",
"tests/test_config.py::TestConfigTestEnv::test_substitution_error",
"tests/test_config.py::TestConfigTestEnv::test_substitution_defaults",
"tests/test_config.py::TestConfigTestEnv::test_substitution_notfound_issue246",
"tests/test_config.py::TestConfigTestEnv::test_substitution_notfound_issue515",
"tests/test_config.py::TestConfigTestEnv::test_substitution_nested_env_defaults",
"tests/test_config.py::TestConfigTestEnv::test_substitution_positional",
"tests/test_config.py::TestConfigTestEnv::test_substitution_noargs_issue240",
"tests/test_config.py::TestConfigTestEnv::test_substitution_double",
"tests/test_config.py::TestConfigTestEnv::test_posargs_backslashed_or_quoted",
"tests/test_config.py::TestConfigTestEnv::test_rewrite_posargs",
"tests/test_config.py::TestConfigTestEnv::test_rewrite_simple_posargs",
"tests/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_testenv[envlist0-deps0]",
"tests/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_testenv[envlist1-deps1]",
"tests/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_section",
"tests/test_config.py::TestConfigTestEnv::test_multilevel_substitution",
"tests/test_config.py::TestConfigTestEnv::test_recursive_substitution_cycle_fails",
"tests/test_config.py::TestConfigTestEnv::test_single_value_from_other_secton",
"tests/test_config.py::TestConfigTestEnv::test_factors",
"tests/test_config.py::TestConfigTestEnv::test_factor_ops",
"tests/test_config.py::TestConfigTestEnv::test_envconfigs_based_on_factors",
"tests/test_config.py::TestConfigTestEnv::test_default_factors",
"tests/test_config.py::TestConfigTestEnv::test_factors_in_boolean",
"tests/test_config.py::TestConfigTestEnv::test_factors_in_setenv",
"tests/test_config.py::TestConfigTestEnv::test_factor_use_not_checked",
"tests/test_config.py::TestConfigTestEnv::test_factors_groups_touch",
"tests/test_config.py::TestConfigTestEnv::test_period_in_factor",
"tests/test_config.py::TestConfigTestEnv::test_ignore_outcome",
"tests/test_config.py::TestGlobalOptions::test_notest",
"tests/test_config.py::TestGlobalOptions::test_verbosity",
"tests/test_config.py::TestGlobalOptions::test_quiet[args0-0]",
"tests/test_config.py::TestGlobalOptions::test_quiet[args1-1]",
"tests/test_config.py::TestGlobalOptions::test_quiet[args2-2]",
"tests/test_config.py::TestGlobalOptions::test_quiet[args3-3]",
"tests/test_config.py::TestGlobalOptions::test_substitution_jenkins_default",
"tests/test_config.py::TestGlobalOptions::test_substitution_jenkins_context",
"tests/test_config.py::TestGlobalOptions::test_sdist_specification",
"tests/test_config.py::TestGlobalOptions::test_env_selection",
"tests/test_config.py::TestGlobalOptions::test_py_venv",
"tests/test_config.py::TestGlobalOptions::test_default_environments",
"tests/test_config.py::TestGlobalOptions::test_envlist_expansion",
"tests/test_config.py::TestGlobalOptions::test_envlist_cross_product",
"tests/test_config.py::TestGlobalOptions::test_envlist_multiline",
"tests/test_config.py::TestGlobalOptions::test_minversion",
"tests/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_true",
"tests/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_false",
"tests/test_config.py::TestGlobalOptions::test_defaultenv_commandline",
"tests/test_config.py::TestGlobalOptions::test_defaultenv_partial_override",
"tests/test_config.py::TestHashseedOption::test_default",
"tests/test_config.py::TestHashseedOption::test_passing_integer",
"tests/test_config.py::TestHashseedOption::test_passing_string",
"tests/test_config.py::TestHashseedOption::test_passing_empty_string",
"tests/test_config.py::TestHashseedOption::test_passing_no_argument",
"tests/test_config.py::TestHashseedOption::test_setenv",
"tests/test_config.py::TestHashseedOption::test_noset",
"tests/test_config.py::TestHashseedOption::test_noset_with_setenv",
"tests/test_config.py::TestHashseedOption::test_one_random_hashseed",
"tests/test_config.py::TestHashseedOption::test_setenv_in_one_testenv",
"tests/test_config.py::TestSetenv::test_getdict_lazy",
"tests/test_config.py::TestSetenv::test_getdict_lazy_update",
"tests/test_config.py::TestSetenv::test_setenv_uses_os_environ",
"tests/test_config.py::TestSetenv::test_setenv_default_os_environ",
"tests/test_config.py::TestSetenv::test_setenv_uses_other_setenv",
"tests/test_config.py::TestSetenv::test_setenv_recursive_direct",
"tests/test_config.py::TestSetenv::test_setenv_overrides",
"tests/test_config.py::TestSetenv::test_setenv_with_envdir_and_basepython",
"tests/test_config.py::TestSetenv::test_setenv_ordering_1",
"tests/test_config.py::TestSetenv::test_setenv_cross_section_subst_issue294",
"tests/test_config.py::TestSetenv::test_setenv_cross_section_subst_twice",
"tests/test_config.py::TestSetenv::test_setenv_cross_section_mixed",
"tests/test_config.py::TestIndexServer::test_indexserver",
"tests/test_config.py::TestIndexServer::test_parse_indexserver",
"tests/test_config.py::TestIndexServer::test_multiple_homedir_relative_local_indexservers",
"tests/test_config.py::TestConfigConstSubstitutions::test_replace_pathsep_unix[:]",
"tests/test_config.py::TestConfigConstSubstitutions::test_replace_pathsep_unix[;]",
"tests/test_config.py::TestConfigConstSubstitutions::test_pathsep_regex",
"tests/test_config.py::TestParseEnv::test_parse_recreate",
"tests/test_config.py::TestCmdInvocation::test_help",
"tests/test_config.py::TestCmdInvocation::test_version_simple",
"tests/test_config.py::TestCmdInvocation::test_version_no_plugins",
"tests/test_config.py::TestCmdInvocation::test_version_with_normal_plugin",
"tests/test_config.py::TestCmdInvocation::test_version_with_fileless_module",
"tests/test_config.py::TestCmdInvocation::test_listenvs_all_verbose_description",
"tests/test_config.py::TestCmdInvocation::test_listenvs_all_verbose_description_no_additional_environments",
"tests/test_config.py::TestCmdInvocation::test_config_specific_ini",
"tests/test_config.py::TestCmdInvocation::test_override_workdir",
"tests/test_config.py::TestCmdInvocation::test_showconfig_with_force_dep_version",
"tests/test_config.py::test_env_spec[-e",
"tests/test_config.py::TestCommandParser::test_command_parser_for_word",
"tests/test_config.py::TestCommandParser::test_command_parser_for_posargs",
"tests/test_config.py::TestCommandParser::test_command_parser_for_multiple_words",
"tests/test_config.py::TestCommandParser::test_command_parser_for_substitution_with_spaces",
"tests/test_config.py::TestCommandParser::test_command_parser_with_complex_word_set",
"tests/test_config.py::TestCommandParser::test_command_with_runs_of_whitespace",
"tests/test_config.py::TestCommandParser::test_command_with_split_line_in_subst_arguments",
"tests/test_config.py::TestCommandParser::test_command_parsing_for_issue_10"
]
| []
| MIT License | 2,044 | [
"changelog/740.feature.rst",
"tox/config.py"
]
| [
"changelog/740.feature.rst",
"tox/config.py"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.