instance_id
stringlengths 10
57
| base_commit
stringlengths 40
40
| created_at
stringdate 2014-04-30 14:58:36
2025-04-30 20:14:11
| environment_setup_commit
stringlengths 40
40
| hints_text
stringlengths 0
273k
| patch
stringlengths 251
7.06M
| problem_statement
stringlengths 11
52.5k
| repo
stringlengths 7
53
| test_patch
stringlengths 231
997k
| meta
dict | version
stringclasses 851
values | install_config
dict | requirements
stringlengths 93
34.2k
⌀ | environment
stringlengths 760
20.5k
⌀ | FAIL_TO_PASS
listlengths 1
9.39k
| FAIL_TO_FAIL
listlengths 0
2.69k
| PASS_TO_PASS
listlengths 0
7.87k
| PASS_TO_FAIL
listlengths 0
192
| license_name
stringclasses 55
values | __index_level_0__
int64 0
21.4k
| before_filepaths
listlengths 1
105
| after_filepaths
listlengths 1
105
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mne-tools__mne-python-2076 | 7f8071891d7a8c2fdaa61ea3a8819394fcd86de2 | 2015-05-09 22:40:37 | edceb8f38349d6dc0cade1c9f8384cc0707ce3e8 | diff --git a/mne/datasets/sample/__init__.py b/mne/datasets/sample/__init__.py
index 1a278cfcc..3e4de83f4 100644
--- a/mne/datasets/sample/__init__.py
+++ b/mne/datasets/sample/__init__.py
@@ -1,4 +1,4 @@
"""MNE sample dataset
"""
-from .sample import data_path, has_sample_data
+from .sample import data_path, has_sample_data, get_version
diff --git a/mne/datasets/sample/sample.py b/mne/datasets/sample/sample.py
index 87ed46446..15d4b1281 100644
--- a/mne/datasets/sample/sample.py
+++ b/mne/datasets/sample/sample.py
@@ -5,7 +5,8 @@
from ...utils import verbose
from ...fixes import partial
-from ..utils import has_dataset, _data_path, _doc
+from ..utils import (has_dataset, _data_path, _data_path_doc,
+ _get_version, _version_doc)
has_sample_data = partial(has_dataset, name='sample')
@@ -18,5 +19,11 @@ def data_path(path=None, force_update=False, update_path=True, download=True,
update_path=update_path, name='sample',
download=download)
-data_path.__doc__ = _doc.format(name='sample',
- conf='MNE_DATASETS_SAMPLE_PATH')
+data_path.__doc__ = _data_path_doc.format(name='sample',
+ conf='MNE_DATASETS_SAMPLE_PATH')
+
+
+def get_version():
+ return _get_version('sample')
+
+get_version.__doc__ = _version_doc.format(name='sample')
diff --git a/mne/datasets/somato/__init__.py b/mne/datasets/somato/__init__.py
index ab144bc9d..aa3f82d8d 100644
--- a/mne/datasets/somato/__init__.py
+++ b/mne/datasets/somato/__init__.py
@@ -1,4 +1,4 @@
"""Somatosensory dataset
"""
-from .somato import data_path, has_somato_data
+from .somato import data_path, has_somato_data, get_version
diff --git a/mne/datasets/somato/somato.py b/mne/datasets/somato/somato.py
index 525f2af3d..d0daf987b 100644
--- a/mne/datasets/somato/somato.py
+++ b/mne/datasets/somato/somato.py
@@ -1,11 +1,12 @@
-# Authors: Alexandre Gramfort <[email protected]>
+# Authors: Alexandre Gramfort <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# License: BSD Style.
from ...utils import verbose
from ...fixes import partial
-from ..utils import has_dataset, _data_path, _doc
+from ..utils import (has_dataset, _data_path, _data_path_doc,
+ _get_version, _version_doc)
has_somato_data = partial(has_dataset, name='somato')
@@ -18,5 +19,11 @@ def data_path(path=None, force_update=False, update_path=True, download=True,
update_path=update_path, name='somato',
download=download)
-data_path.__doc__ = _doc.format(name='somato',
- conf='MNE_DATASETS_SOMATO_PATH')
+data_path.__doc__ = _data_path_doc.format(name='somato',
+ conf='MNE_DATASETS_SOMATO_PATH')
+
+
+def get_version():
+ return _get_version('somato')
+
+get_version.__doc__ = _version_doc.format(name='somato')
diff --git a/mne/datasets/spm_face/__init__.py b/mne/datasets/spm_face/__init__.py
index 43b513e9f..90f01c728 100644
--- a/mne/datasets/spm_face/__init__.py
+++ b/mne/datasets/spm_face/__init__.py
@@ -1,4 +1,4 @@
"""SPM face dataset
"""
-from .spm_data import data_path, has_spm_data
+from .spm_data import data_path, has_spm_data, get_version
diff --git a/mne/datasets/spm_face/spm_data.py b/mne/datasets/spm_face/spm_data.py
index ca681de16..19c6461ee 100644
--- a/mne/datasets/spm_face/spm_data.py
+++ b/mne/datasets/spm_face/spm_data.py
@@ -4,7 +4,8 @@
from ...utils import verbose
from ...fixes import partial
-from ..utils import has_dataset, _data_path, _doc
+from ..utils import (has_dataset, _data_path, _data_path_doc,
+ _get_version, _version_doc)
has_spm_data = partial(has_dataset, name='spm')
@@ -17,5 +18,11 @@ def data_path(path=None, force_update=False, update_path=True, download=True,
update_path=update_path, name='spm',
download=download)
-data_path.__doc__ = _doc.format(name='spm',
- conf='MNE_DATASETS_SPM_DATA_PATH')
+data_path.__doc__ = _data_path_doc.format(name='spm',
+ conf='MNE_DATASETS_SPM_DATA_PATH')
+
+
+def get_version():
+ return _get_version('spm')
+
+get_version.__doc__ = _version_doc.format(name='spm')
diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py
index 3ede4d718..be6f5fb0d 100644
--- a/mne/datasets/utils.py
+++ b/mne/datasets/utils.py
@@ -17,7 +17,7 @@ from ..externals.six import string_types
from ..externals.six.moves import input
-_doc = """Get path to local copy of {name} dataset
+_data_path_doc = """Get path to local copy of {name} dataset
Parameters
----------
@@ -49,6 +49,16 @@ _doc = """Get path to local copy of {name} dataset
"""
+_version_doc = """Get version of the local {name} dataset
+
+ Returns
+ -------
+ version : str | None
+ Version of the {name} local dataset, or None if the dataset
+ does not exist locally.
+"""
+
+
def _dataset_version(path, name):
"""Get the version of the dataset"""
ver_fname = op.join(path, 'version.txt')
@@ -64,7 +74,7 @@ def _dataset_version(path, name):
def _data_path(path=None, force_update=False, update_path=True, download=True,
- name=None, check_version=True):
+ name=None, check_version=False, return_version=False):
"""Aux function
"""
key = {'sample': 'MNE_DATASETS_SAMPLE_PATH',
@@ -229,8 +239,14 @@ def _data_path(path=None, force_update=False, update_path=True, download=True,
'you may need to update the {name} dataset by using '
'mne.datasets.{name}.data_path(force_update=True)'.format(
name=name, current=data_version, newest=mne_version))
+ return (path, data_version) if return_version else path
+
- return path
+def _get_version(name):
+ """Helper to get a dataset version"""
+ if not has_dataset(name):
+ return None
+ return _data_path(name=name, return_version=True)[1]
def has_dataset(name):
diff --git a/mne/io/base.py b/mne/io/base.py
index 501e8a7f1..cb3623324 100644
--- a/mne/io/base.py
+++ b/mne/io/base.py
@@ -1054,6 +1054,7 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin,
block : bool
Whether to halt program execution until the figure is closed.
Useful for setting bad channels on the fly (click on line).
+ May not work on all systems / platforms.
highpass : float | None
Highpass to apply when displaying data.
lowpass : float | None
diff --git a/mne/viz/raw.py b/mne/viz/raw.py
index 518280032..1bea5a1f4 100644
--- a/mne/viz/raw.py
+++ b/mne/viz/raw.py
@@ -373,6 +373,7 @@ def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=None,
block : bool
Whether to halt program execution until the figure is closed.
Useful for setting bad channels on the fly by clicking on a line.
+ May not work on all systems / platforms.
highpass : float | None
Highpass to apply when displaying data.
lowpass : float | None
@@ -623,7 +624,10 @@ def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=None,
_toggle_options(None, params)
if show:
- plt.show(block=block)
+ try:
+ plt.show(block=block)
+ except TypeError: # not all versions have this
+ plt.show()
return fig
diff --git a/setup.py b/setup.py
index 9cbd8af53..353d7c37d 100755
--- a/setup.py
+++ b/setup.py
@@ -71,6 +71,7 @@ if __name__ == "__main__":
'mne.datasets.somato',
'mne.datasets.spm_face',
'mne.datasets.testing',
+ 'mne.datasets.tests',
'mne.externals',
'mne.io', 'mne.io.tests',
'mne.io.array', 'mne.io.array.tests',
| Q: Dataset versions
Have we gotten any useful information from embedding dataset versions in our tarballs? As far as I can tell it has just led to a bunch of warnings when doing imports (see e.g. Mainak's recent notebook examples). I propose we get rid of them for now, and replace them with a new system in the future if need be. But for now I think it's beyond YAGNI and into the adding-annoyance category... | mne-tools/mne-python | diff --git a/mne/datasets/testing/__init__.py b/mne/datasets/testing/__init__.py
index 816bbf18a..7fa74ee41 100644
--- a/mne/datasets/testing/__init__.py
+++ b/mne/datasets/testing/__init__.py
@@ -1,4 +1,4 @@
"""MNE sample dataset
"""
-from ._testing import data_path, requires_testing_data
+from ._testing import data_path, requires_testing_data, get_version
diff --git a/mne/datasets/testing/_testing.py b/mne/datasets/testing/_testing.py
index fe23cf4fb..932bd2e20 100644
--- a/mne/datasets/testing/_testing.py
+++ b/mne/datasets/testing/_testing.py
@@ -7,7 +7,8 @@ import numpy as np
from ...utils import verbose, get_config
from ...fixes import partial
-from ..utils import has_dataset, _data_path, _doc
+from ..utils import (has_dataset, _data_path, _data_path_doc,
+ _get_version, _version_doc)
has_testing_data = partial(has_dataset, name='testing')
@@ -24,8 +25,14 @@ def data_path(path=None, force_update=False, update_path=True,
update_path=update_path, name='testing',
download=download)
-data_path.__doc__ = _doc.format(name='testing',
- conf='MNE_DATASETS_TESTING_PATH')
+data_path.__doc__ = _data_path_doc.format(name='testing',
+ conf='MNE_DATASETS_TESTING_PATH')
+
+
+def get_version():
+ return _get_version('testing')
+
+get_version.__doc__ = _version_doc.format(name='testing')
# Allow forcing of testing dataset skip (for Debian tests) using:
diff --git a/mne/datasets/tests/__init__.py b/mne/datasets/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/mne/datasets/tests/test_datasets.py b/mne/datasets/tests/test_datasets.py
new file mode 100644
index 000000000..76bacaa14
--- /dev/null
+++ b/mne/datasets/tests/test_datasets.py
@@ -0,0 +1,15 @@
+from nose.tools import assert_true
+
+from mne import datasets
+from mne.externals.six import string_types
+
+
+def test_datasets():
+ """Test simple dataset functions
+ """
+ for dname in ('sample', 'somato', 'spm_face', 'testing'):
+ dataset = getattr(datasets, dname)
+ if dataset.data_path(download=False) != '':
+ assert_true(isinstance(dataset.get_version(), string_types))
+ else:
+ assert_true(dataset.get_version() is None)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 10
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"numpy>=1.16.0",
"pandas>=1.0.0",
"scikit-learn",
"h5py",
"pysurfer",
"nose",
"nose-timer",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | apptools==5.2.1
certifi @ file:///croot/certifi_1671487769961/work/certifi
configobj==5.0.9
cycler==0.11.0
envisage==7.0.3
exceptiongroup==1.2.2
fonttools==4.38.0
h5py==3.8.0
importlib-metadata==6.7.0
importlib-resources==5.12.0
iniconfig==2.0.0
joblib==1.3.2
kiwisolver==1.4.5
matplotlib==3.5.3
mayavi==4.8.1
-e git+https://github.com/mne-tools/mne-python.git@7f8071891d7a8c2fdaa61ea3a8819394fcd86de2#egg=mne
nibabel==4.0.2
nose==1.3.7
nose-timer==1.0.1
numpy==1.21.6
packaging==24.0
pandas==1.3.5
Pillow==9.5.0
pluggy==1.2.0
pyface==8.0.0
Pygments==2.17.2
pyparsing==3.1.4
pysurfer==0.11.2
pytest==7.4.4
python-dateutil==2.9.0.post0
pytz==2025.2
scikit-learn==1.0.2
scipy==1.7.3
six==1.17.0
threadpoolctl==3.1.0
tomli==2.0.1
traits==6.4.3
traitsui==8.0.0
typing_extensions==4.7.1
vtk==9.3.1
zipp==3.15.0
| name: mne-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- apptools==5.2.1
- configobj==5.0.9
- cycler==0.11.0
- envisage==7.0.3
- exceptiongroup==1.2.2
- fonttools==4.38.0
- h5py==3.8.0
- importlib-metadata==6.7.0
- importlib-resources==5.12.0
- iniconfig==2.0.0
- joblib==1.3.2
- kiwisolver==1.4.5
- matplotlib==3.5.3
- mayavi==4.8.1
- nibabel==4.0.2
- nose==1.3.7
- nose-timer==1.0.1
- numpy==1.21.6
- packaging==24.0
- pandas==1.3.5
- pillow==9.5.0
- pluggy==1.2.0
- pyface==8.0.0
- pygments==2.17.2
- pyparsing==3.1.4
- pysurfer==0.11.2
- pytest==7.4.4
- python-dateutil==2.9.0.post0
- pytz==2025.2
- scikit-learn==1.0.2
- scipy==1.7.3
- six==1.17.0
- threadpoolctl==3.1.0
- tomli==2.0.1
- traits==6.4.3
- traitsui==8.0.0
- typing-extensions==4.7.1
- vtk==9.3.1
- zipp==3.15.0
prefix: /opt/conda/envs/mne-python
| [
"mne/datasets/tests/test_datasets.py::test_datasets"
]
| []
| []
| []
| BSD 3-Clause "New" or "Revised" License | 132 | [
"mne/datasets/utils.py",
"mne/datasets/spm_face/spm_data.py",
"mne/datasets/spm_face/__init__.py",
"mne/datasets/somato/somato.py",
"mne/datasets/sample/sample.py",
"mne/datasets/somato/__init__.py",
"setup.py",
"mne/io/base.py",
"mne/viz/raw.py",
"mne/datasets/sample/__init__.py"
]
| [
"mne/datasets/utils.py",
"mne/datasets/spm_face/spm_data.py",
"mne/datasets/spm_face/__init__.py",
"mne/datasets/somato/somato.py",
"mne/datasets/sample/sample.py",
"mne/datasets/somato/__init__.py",
"setup.py",
"mne/io/base.py",
"mne/viz/raw.py",
"mne/datasets/sample/__init__.py"
]
|
|
pre-commit__pre-commit-226 | d97ea30c4bb309a2877fed95323ac8c793c0679f | 2015-05-10 16:40:06 | 9515ca06378d74f1e4f8013db2b5230c1f15edaa | coveralls:
[](https://coveralls.io/builds/2530482)
Coverage decreased (-0.07%) to 99.93% when pulling **8aa931ef823fbeda568db1d3c50c55b04d83fa2e on Lucas-C:master** into **d97ea30c4bb309a2877fed95323ac8c793c0679f on pre-commit:master**.
coveralls:
[](https://coveralls.io/builds/2530482)
Coverage decreased (-0.07%) to 99.93% when pulling **8aa931ef823fbeda568db1d3c50c55b04d83fa2e on Lucas-C:master** into **d97ea30c4bb309a2877fed95323ac8c793c0679f on pre-commit:master**.
coveralls:
[](https://coveralls.io/builds/2530482)
Coverage decreased (-0.07%) to 99.93% when pulling **8aa931ef823fbeda568db1d3c50c55b04d83fa2e on Lucas-C:master** into **d97ea30c4bb309a2877fed95323ac8c793c0679f on pre-commit:master**.
asottile: :+1: Looks great, address my last couple comments and I'll merge it
coveralls:
[](https://coveralls.io/builds/2530626)
Coverage remained the same at 100.0% when pulling **e1098d96177b1558c586b369b6d6729d8fa1c19c on Lucas-C:master** into **d97ea30c4bb309a2877fed95323ac8c793c0679f on pre-commit:master**.
Lucas-C: Thanks for all your comments !
Everything should be fixed now :)
coveralls:
[](https://coveralls.io/builds/2530648)
Coverage remained the same at 100.0% when pulling **f5504c1f257e8041bc977de64cfb89139d88d676 on Lucas-C:master** into **d97ea30c4bb309a2877fed95323ac8c793c0679f on pre-commit:master**.
Lucas-C: I guess one last to do will be to update the docs :smile: | diff --git a/pre_commit/clientlib/validate_config.py b/pre_commit/clientlib/validate_config.py
index 44c7cd8..bdd0e2c 100644
--- a/pre_commit/clientlib/validate_config.py
+++ b/pre_commit/clientlib/validate_config.py
@@ -6,6 +6,13 @@ from pre_commit.clientlib.validate_base import is_regex_valid
from pre_commit.errors import FatalError
+_LOCAL_HOOKS_MAGIC_REPO_STRING = 'local'
+
+
+def is_local_hooks(repo_entry):
+ return repo_entry['repo'] == _LOCAL_HOOKS_MAGIC_REPO_STRING
+
+
class InvalidConfigError(FatalError):
pass
@@ -53,7 +60,12 @@ def try_regex(repo, hook, value, field_name):
def validate_config_extra(config):
for repo in config:
- if 'sha' not in repo:
+ if is_local_hooks(repo):
+ if 'sha' in repo:
+ raise InvalidConfigError(
+ '"sha" property provided for local hooks'
+ )
+ elif 'sha' not in repo:
raise InvalidConfigError(
'Missing "sha" field for repository {0}'.format(repo['repo'])
)
diff --git a/pre_commit/commands/run.py b/pre_commit/commands/run.py
index 4e3fb18..5e8745b 100644
--- a/pre_commit/commands/run.py
+++ b/pre_commit/commands/run.py
@@ -18,15 +18,6 @@ from pre_commit.util import noop_context
logger = logging.getLogger('pre_commit')
-class HookExecutor(object):
- def __init__(self, hook, invoker):
- self.hook = hook
- self._invoker = invoker
-
- def invoke(self, filenames):
- return self._invoker(self.hook, filenames)
-
-
def _get_skips(environ):
skips = environ.get('SKIP', '')
return set(skip.strip() for skip in skips.split(',') if skip.strip())
@@ -80,8 +71,7 @@ def get_filenames(args, include_expr, exclude_expr):
return getter(include_expr, exclude_expr)
-def _run_single_hook(hook_executor, args, write, skips=frozenset()):
- hook = hook_executor.hook
+def _run_single_hook(hook, repo, args, write, skips=frozenset()):
filenames = get_filenames(args, hook['files'], hook['exclude'])
if hook['id'] in skips:
_print_user_skipped(hook, write, args)
@@ -95,7 +85,7 @@ def _run_single_hook(hook_executor, args, write, skips=frozenset()):
write(get_hook_message(_hook_msg_start(hook, args.verbose), end_len=6))
sys.stdout.flush()
- retcode, stdout, stderr = hook_executor.invoke(filenames)
+ retcode, stdout, stderr = repo.run_hook(hook, filenames)
if retcode != hook['expected_return_value']:
retcode = 1
@@ -119,19 +109,19 @@ def _run_single_hook(hook_executor, args, write, skips=frozenset()):
return retcode
-def _run_hooks(hook_executors, args, write, environ):
+def _run_hooks(repo_hooks, args, write, environ):
"""Actually run the hooks."""
skips = _get_skips(environ)
retval = 0
- for hook_executor in hook_executors:
- retval |= _run_single_hook(hook_executor, args, write, skips)
+ for repo, hook in repo_hooks:
+ retval |= _run_single_hook(hook, repo, args, write, skips)
return retval
-def get_hook_executors(runner):
+def get_repo_hooks(runner):
for repo in runner.repositories:
- for _, repo_hook in repo.hooks:
- yield HookExecutor(repo_hook, repo.run_hook)
+ for _, hook in repo.hooks:
+ yield (repo, hook)
def _has_unmerged_paths(runner):
@@ -159,13 +149,13 @@ def run(runner, args, write=sys_stdout_write_wrapper, environ=os.environ):
ctx = staged_files_only(runner.cmd_runner)
with ctx:
- hook_executors = list(get_hook_executors(runner))
+ repo_hooks = list(get_repo_hooks(runner))
if args.hook:
- hook_executors = [
- he for he in hook_executors
- if he.hook['id'] == args.hook
+ repo_hooks = [
+ (repo, hook) for repo, hook in repo_hooks
+ if hook['id'] == args.hook
]
- if not hook_executors:
+ if not repo_hooks:
write('No hook with id `{0}`\n'.format(args.hook))
return 1
- return _run_hooks(hook_executors, args, write, environ)
+ return _run_hooks(repo_hooks, args, write, environ)
diff --git a/pre_commit/repository.py b/pre_commit/repository.py
index cbe0535..7ca6a44 100644
--- a/pre_commit/repository.py
+++ b/pre_commit/repository.py
@@ -5,6 +5,10 @@ import shutil
from cached_property import cached_property
+from pre_commit import git
+from pre_commit.clientlib.validate_config import is_local_hooks
+from pre_commit.clientlib.validate_manifest import MANIFEST_JSON_SCHEMA
+from pre_commit.jsonschema_extensions import apply_defaults
from pre_commit.languages.all import languages
from pre_commit.manifest import Manifest
from pre_commit.prefixed_command_runner import PrefixedCommandRunner
@@ -21,10 +25,13 @@ class Repository(object):
@classmethod
def create(cls, config, store):
- repo_path_getter = store.get_repo_path_getter(
- config['repo'], config['sha']
- )
- return cls(config, repo_path_getter)
+ if is_local_hooks(config):
+ return LocalRepository(config)
+ else:
+ repo_path_getter = store.get_repo_path_getter(
+ config['repo'], config['sha']
+ )
+ return cls(config, repo_path_getter)
@cached_property
def repo_url(self):
@@ -111,3 +118,28 @@ class Repository(object):
return languages[hook['language']].run_hook(
self.cmd_runner, hook, file_args,
)
+
+
+class LocalRepository(Repository):
+ def __init__(self, repo_config, repo_path_getter=None):
+ repo_path_getter = None
+ super(LocalRepository, self).__init__(repo_config, repo_path_getter)
+
+ @cached_property
+ def hooks(self):
+ return tuple(
+ (hook['id'], apply_defaults(hook, MANIFEST_JSON_SCHEMA['items']))
+ for hook in self.repo_config['hooks']
+ )
+
+ @cached_property
+ def cmd_runner(self):
+ return PrefixedCommandRunner(git.get_root())
+
+ @cached_property
+ def sha(self):
+ raise NotImplementedError
+
+ @cached_property
+ def manifest(self):
+ raise NotImplementedError
| Add support for pcre / scripts / system hooks definition in .pre-commit-config.yaml
Everything is in the title :)
_Rationale:_ a `pre-commit` user shouldn't have to setup a git repository to configure a pre-commit check that can be defined in 5 lines or less.
_Example:_ taken the from [do_not_commit](https://github.com/pricematch/pricematch-pre-commit-hooks/blob/master/hooks.yaml) hook:
- id: do_not_commit
name: Block if "DO NOT COMMIT" is found
entry: DO NOT COMMIT
language: pcre
files: ^(.*)$
_Suggested solutions:_
1. Allow for pcre / scripts / system hooks definition in _.pre-commit-config.yaml_
2. Allow for the `repo` field in _.pre-commit-config.yaml_ to point to a subfolder (of the git repo configured with `pre-commit`) that contains a _hooks.yaml_ .
This currently crashes because `pre-commit` expect to find git repository in the folder point by `repo`. | pre-commit/pre-commit | diff --git a/testing/fixtures.py b/testing/fixtures.py
index 1b1b802..1c0184a 100644
--- a/testing/fixtures.py
+++ b/testing/fixtures.py
@@ -60,12 +60,16 @@ def write_config(directory, config):
config_file.write(ordered_dump([config], **C.YAML_DUMP_KWARGS))
-def make_consuming_repo(tmpdir_factory, repo_source):
- path = make_repo(tmpdir_factory, repo_source)
- config = make_config_from_repo(path)
- git_path = git_dir(tmpdir_factory)
+def add_config_to_repo(git_path, config):
write_config(git_path, config)
with cwd(git_path):
cmd_output('git', 'add', C.CONFIG_FILE)
cmd_output('git', 'commit', '-m', 'Add hooks config')
return git_path
+
+
+def make_consuming_repo(tmpdir_factory, repo_source):
+ path = make_repo(tmpdir_factory, repo_source)
+ config = make_config_from_repo(path)
+ git_path = git_dir(tmpdir_factory)
+ return add_config_to_repo(git_path, config)
diff --git a/tests/clientlib/validate_config_test.py b/tests/clientlib/validate_config_test.py
index 51eb7e4..c507f28 100644
--- a/tests/clientlib/validate_config_test.py
+++ b/tests/clientlib/validate_config_test.py
@@ -1,5 +1,6 @@
from __future__ import unicode_literals
+import jsonschema
import pytest
from pre_commit.clientlib.validate_config import CONFIG_JSON_SCHEMA
@@ -25,7 +26,7 @@ def test_run(input, expected_output):
assert run(input) == expected_output
[email protected](('manifest_obj', 'expected'), (
[email protected](('config_obj', 'expected'), (
([], False),
(
[{
@@ -66,8 +67,8 @@ def test_run(input, expected_output):
False,
),
))
-def test_is_valid_according_to_schema(manifest_obj, expected):
- ret = is_valid_according_to_schema(manifest_obj, CONFIG_JSON_SCHEMA)
+def test_is_valid_according_to_schema(config_obj, expected):
+ ret = is_valid_according_to_schema(config_obj, CONFIG_JSON_SCHEMA)
assert ret is expected
@@ -121,3 +122,55 @@ def test_config_with_ok_exclude_regex_passes():
CONFIG_JSON_SCHEMA,
)
validate_config_extra(config)
+
+
[email protected]('config_obj', (
+ [{
+ 'repo': 'local',
+ 'sha': 'foo',
+ 'hooks': [{
+ 'id': 'do_not_commit',
+ 'name': 'Block if "DO NOT COMMIT" is found',
+ 'entry': 'DO NOT COMMIT',
+ 'language': 'pcre',
+ 'files': '^(.*)$',
+ }],
+ }],
+))
+def test_config_with_local_hooks_definition_fails(config_obj):
+ with pytest.raises((
+ jsonschema.exceptions.ValidationError, InvalidConfigError
+ )):
+ jsonschema.validate(config_obj, CONFIG_JSON_SCHEMA)
+ config = apply_defaults(config_obj, CONFIG_JSON_SCHEMA)
+ validate_config_extra(config)
+
+
[email protected]('config_obj', (
+ [{
+ 'repo': 'local',
+ 'hooks': [{
+ 'id': 'arg-per-line',
+ 'name': 'Args per line hook',
+ 'entry': 'bin/hook.sh',
+ 'language': 'script',
+ 'files': '',
+ 'args': ['hello', 'world'],
+ }],
+ }],
+ [{
+ 'repo': 'local',
+ 'hooks': [{
+ 'id': 'arg-per-line',
+ 'name': 'Args per line hook',
+ 'entry': 'bin/hook.sh',
+ 'language': 'script',
+ 'files': '',
+ 'args': ['hello', 'world'],
+ }]
+ }],
+))
+def test_config_with_local_hooks_definition_passes(config_obj):
+ jsonschema.validate(config_obj, CONFIG_JSON_SCHEMA)
+ config = apply_defaults(config_obj, CONFIG_JSON_SCHEMA)
+ validate_config_extra(config)
diff --git a/tests/commands/run_test.py b/tests/commands/run_test.py
index 9cca610..aad0611 100644
--- a/tests/commands/run_test.py
+++ b/tests/commands/run_test.py
@@ -14,10 +14,12 @@ from pre_commit.commands.run import _get_skips
from pre_commit.commands.run import _has_unmerged_paths
from pre_commit.commands.run import get_changed_files
from pre_commit.commands.run import run
+from pre_commit.ordereddict import OrderedDict
from pre_commit.runner import Runner
from pre_commit.util import cmd_output
from pre_commit.util import cwd
from testing.auto_namedtuple import auto_namedtuple
+from testing.fixtures import add_config_to_repo
from testing.fixtures import make_consuming_repo
@@ -81,7 +83,7 @@ def _test_run(repo, options, expected_outputs, expected_ret, stage):
stage_a_file()
args = _get_opts(**options)
ret, printed = _do_run(repo, args)
- assert ret == expected_ret
+ assert ret == expected_ret, (ret, expected_ret, printed)
for expected_output_part in expected_outputs:
assert expected_output_part in printed
@@ -313,9 +315,7 @@ def test_lots_of_files(mock_out_store_directory, tmpdir_factory):
git_path = make_consuming_repo(tmpdir_factory, 'python_hooks_repo')
with cwd(git_path):
# Override files so we run against them
- with io.open(
- '.pre-commit-config.yaml', 'a+',
- ) as config_file:
+ with io.open('.pre-commit-config.yaml', 'a+') as config_file:
config_file.write(' files: ""\n')
# Write a crap ton of files
@@ -334,3 +334,66 @@ def test_lots_of_files(mock_out_store_directory, tmpdir_factory):
stderr=subprocess.STDOUT,
env=env,
)
+
+
+def test_local_hook_passes(
+ repo_with_passing_hook, mock_out_store_directory,
+):
+ config = OrderedDict((
+ ('repo', 'local'),
+ ('hooks', (OrderedDict((
+ ('id', 'pylint'),
+ ('name', 'PyLint'),
+ ('entry', 'python -m pylint.__main__'),
+ ('language', 'system'),
+ ('files', r'\.py$'),
+ )), OrderedDict((
+ ('id', 'do_not_commit'),
+ ('name', 'Block if "DO NOT COMMIT" is found'),
+ ('entry', 'DO NOT COMMIT'),
+ ('language', 'pcre'),
+ ('files', '^(.*)$'),
+ ))))
+ ))
+ add_config_to_repo(repo_with_passing_hook, config)
+
+ with io.open('dummy.py', 'w') as staged_file:
+ staged_file.write('"""TODO: something"""\n')
+ cmd_output('git', 'add', 'dummy.py')
+
+ _test_run(
+ repo_with_passing_hook,
+ options={},
+ expected_outputs=[''],
+ expected_ret=0,
+ stage=False
+ )
+
+
+def test_local_hook_fails(
+ repo_with_passing_hook, mock_out_store_directory,
+):
+ config = OrderedDict((
+ ('repo', 'local'),
+ ('hooks', [OrderedDict((
+ ('id', 'no-todo'),
+ ('name', 'No TODO'),
+ ('entry', 'grep -iI todo'),
+ ('expected_return_value', 1),
+ ('language', 'system'),
+ ('files', ''),
+ ))])
+ ))
+ add_config_to_repo(repo_with_passing_hook, config)
+
+ with io.open('dummy.py', 'w') as staged_file:
+ staged_file.write('"""TODO: something"""\n')
+ cmd_output('git', 'add', 'dummy.py')
+
+ _test_run(
+ repo_with_passing_hook,
+ options={},
+ expected_outputs=[''],
+ expected_ret=1,
+ stage=False
+ )
diff --git a/tests/repository_test.py b/tests/repository_test.py
index cde6a76..c0bd079 100644
--- a/tests/repository_test.py
+++ b/tests/repository_test.py
@@ -12,6 +12,7 @@ from pre_commit.clientlib.validate_config import CONFIG_JSON_SCHEMA
from pre_commit.clientlib.validate_config import validate_config_extra
from pre_commit.jsonschema_extensions import apply_defaults
from pre_commit.languages.python import PythonEnv
+from pre_commit.ordereddict import OrderedDict
from pre_commit.repository import Repository
from pre_commit.util import cmd_output
from pre_commit.util import cwd
@@ -377,3 +378,22 @@ def test_tags_on_repositories(in_tmpdir, tmpdir_factory, store):
ret = repo_2.run_hook(repo_2.hooks[0][1], ['bar'])
assert ret[0] == 0
assert ret[1] == 'bar\nHello World\n'
+
+
+def test_local_repository():
+ config = OrderedDict((
+ ('repo', 'local'),
+ ('hooks', [OrderedDict((
+ ('id', 'do_not_commit'),
+ ('name', 'Block if "DO NOT COMMIT" is found'),
+ ('entry', 'DO NOT COMMIT'),
+ ('language', 'pcre'),
+ ('files', '^(.*)$'),
+ ))])
+ ))
+ local_repo = Repository.create(config, 'dummy')
+ with pytest.raises(NotImplementedError):
+ local_repo.sha
+ with pytest.raises(NotImplementedError):
+ local_repo.manifest
+ assert len(local_repo.hooks) == 1
diff --git a/tests/runner_test.py b/tests/runner_test.py
index b1a5d5d..7399c4d 100644
--- a/tests/runner_test.py
+++ b/tests/runner_test.py
@@ -5,8 +5,10 @@ import os
import os.path
import pre_commit.constants as C
+from pre_commit.ordereddict import OrderedDict
from pre_commit.runner import Runner
from pre_commit.util import cwd
+from testing.fixtures import add_config_to_repo
from testing.fixtures import git_dir
from testing.fixtures import make_consuming_repo
@@ -52,6 +54,31 @@ def test_repositories(tmpdir_factory, mock_out_store_directory):
assert len(runner.repositories) == 1
+def test_local_hooks(tmpdir_factory, mock_out_store_directory):
+ config = OrderedDict((
+ ('repo', 'local'),
+ ('hooks', (OrderedDict((
+ ('id', 'arg-per-line'),
+ ('name', 'Args per line hook'),
+ ('entry', 'bin/hook.sh'),
+ ('language', 'script'),
+ ('files', ''),
+ ('args', ['hello', 'world']),
+ )), OrderedDict((
+ ('id', 'do_not_commit'),
+ ('name', 'Block if "DO NOT COMMIT" is found'),
+ ('entry', 'DO NOT COMMIT'),
+ ('language', 'pcre'),
+ ('files', '^(.*)$'),
+ ))))
+ ))
+ git_path = git_dir(tmpdir_factory)
+ add_config_to_repo(git_path, config)
+ runner = Runner(git_path)
+ assert len(runner.repositories) == 1
+ assert len(runner.repositories[0].hooks) == 2
+
+
def test_pre_commit_path():
runner = Runner(os.path.join('foo', 'bar'))
expected_path = os.path.join('foo', 'bar', '.git', 'hooks', 'pre-commit')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 3
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aspy.yaml==1.3.0
astroid==1.3.2
attrs==22.2.0
cached-property==1.5.2
certifi==2021.5.30
coverage==6.2
distlib==0.3.9
filelock==3.4.1
flake8==5.0.4
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
jsonschema==3.2.0
logilab-common==1.9.7
mccabe==0.7.0
mock==5.2.0
mypy-extensions==1.0.0
nodeenv==1.6.0
ordereddict==1.1
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
-e git+https://github.com/pre-commit/pre-commit.git@d97ea30c4bb309a2877fed95323ac8c793c0679f#egg=pre_commit
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pylint==1.3.1
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
PyYAML==6.0.1
simplejson==3.20.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
virtualenv==20.17.1
zipp==3.6.0
| name: pre-commit
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- argparse==1.4.0
- aspy-yaml==1.3.0
- astroid==1.3.2
- attrs==22.2.0
- cached-property==1.5.2
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- flake8==5.0.4
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- jsonschema==3.2.0
- logilab-common==1.9.7
- mccabe==0.7.0
- mock==5.2.0
- mypy-extensions==1.0.0
- nodeenv==1.6.0
- ordereddict==1.1
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pylint==1.3.1
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pyyaml==6.0.1
- simplejson==3.20.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- virtualenv==20.17.1
- zipp==3.6.0
prefix: /opt/conda/envs/pre-commit
| [
"tests/clientlib/validate_config_test.py::test_config_with_local_hooks_definition_fails[config_obj0]",
"tests/clientlib/validate_config_test.py::test_config_with_local_hooks_definition_passes[config_obj0]",
"tests/clientlib/validate_config_test.py::test_config_with_local_hooks_definition_passes[config_obj1]",
"tests/repository_test.py::test_local_repository"
]
| [
"tests/clientlib/validate_config_test.py::test_run[input0-0]",
"tests/clientlib/validate_config_test.py::test_run[input1-0]",
"tests/commands/run_test.py::test_lots_of_files",
"tests/repository_test.py::test_python_hook",
"tests/repository_test.py::test_python_hook_args_with_spaces",
"tests/repository_test.py::test_versioned_python_hook",
"tests/repository_test.py::test_run_a_node_hook",
"tests/repository_test.py::test_run_versioned_node_hook",
"tests/repository_test.py::test_run_a_ruby_hook",
"tests/repository_test.py::test_run_versioned_ruby_hook",
"tests/repository_test.py::test_system_hook_with_spaces",
"tests/repository_test.py::test_run_a_script_hook",
"tests/repository_test.py::test_run_hook_with_spaced_args",
"tests/repository_test.py::test_pcre_hook_no_match",
"tests/repository_test.py::test_pcre_hook_matching",
"tests/repository_test.py::test_pcre_many_files",
"tests/repository_test.py::test_cwd_of_hook",
"tests/repository_test.py::test_lots_of_files",
"tests/repository_test.py::test_languages",
"tests/repository_test.py::test_reinstall",
"tests/repository_test.py::test_control_c_control_c_on_install",
"tests/repository_test.py::test_really_long_file_paths",
"tests/repository_test.py::test_config_overrides_repo_specifics",
"tests/repository_test.py::test_tags_on_repositories",
"tests/runner_test.py::test_repositories",
"tests/runner_test.py::test_local_hooks"
]
| [
"tests/clientlib/validate_config_test.py::test_run[input2-1]",
"tests/clientlib/validate_config_test.py::test_run[input3-1]",
"tests/clientlib/validate_config_test.py::test_run[input4-1]",
"tests/clientlib/validate_config_test.py::test_is_valid_according_to_schema[config_obj0-False]",
"tests/clientlib/validate_config_test.py::test_is_valid_according_to_schema[config_obj1-True]",
"tests/clientlib/validate_config_test.py::test_is_valid_according_to_schema[config_obj2-True]",
"tests/clientlib/validate_config_test.py::test_is_valid_according_to_schema[config_obj3-False]",
"tests/clientlib/validate_config_test.py::test_config_with_failing_regexes_fails",
"tests/clientlib/validate_config_test.py::test_config_with_ok_regexes_passes",
"tests/clientlib/validate_config_test.py::test_config_with_invalid_exclude_regex_fails",
"tests/clientlib/validate_config_test.py::test_config_with_ok_exclude_regex_passes",
"tests/commands/run_test.py::test_has_unmerged_paths[some-True]",
"tests/commands/run_test.py::test_has_unmerged_paths[-False]",
"tests/commands/run_test.py::test_get_skips[environ0-expected_output0]",
"tests/commands/run_test.py::test_get_skips[environ1-expected_output1]",
"tests/commands/run_test.py::test_get_skips[environ2-expected_output2]",
"tests/commands/run_test.py::test_get_skips[environ3-expected_output3]",
"tests/commands/run_test.py::test_get_skips[environ4-expected_output4]",
"tests/commands/run_test.py::test_get_skips[environ5-expected_output5]",
"tests/commands/run_test.py::test_get_skips[environ6-expected_output6]",
"tests/commands/run_test.py::test_get_changed_files",
"tests/repository_test.py::test_repo_url",
"tests/repository_test.py::test_sha",
"tests/runner_test.py::test_init_has_no_side_effects",
"tests/runner_test.py::test_create_sets_correct_directory",
"tests/runner_test.py::test_create_changes_to_git_root",
"tests/runner_test.py::test_config_file_path",
"tests/runner_test.py::test_pre_commit_path",
"tests/runner_test.py::test_pre_push_path",
"tests/runner_test.py::test_cmd_runner"
]
| []
| MIT License | 133 | [
"pre_commit/clientlib/validate_config.py",
"pre_commit/repository.py",
"pre_commit/commands/run.py"
]
| [
"pre_commit/clientlib/validate_config.py",
"pre_commit/repository.py",
"pre_commit/commands/run.py"
]
|
mne-tools__mne-python-2080 | f2ab0641f087c63beab715ff5fdaa9399c0807fe | 2015-05-10 19:50:29 | edceb8f38349d6dc0cade1c9f8384cc0707ce3e8 | Eric89GXL: Okay, this has some potentially nice (up to 2x) memory-saving changes for EDF, but I don't think we can get the same benefit for KIT (at least not easily). This unifies `preload_data` and how `preload` is handled across raw types, and allows `data_buffer` to be used for EDF and KIT as well as FIF.
Ready for review from my end. @teonlamont please look closely to make sure it makes sense. Tests still pass, and I've been relying on those being correct to make sure things all worked properly.
agramfort: @teonlamont please take a close look | diff --git a/mne/io/base.py b/mne/io/base.py
index 9a3c83be2..578109de7 100644
--- a/mne/io/base.py
+++ b/mne/io/base.py
@@ -215,22 +215,39 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin,
Subclasses must provide the following methods:
- * _read_segment(start, stop, sel, projector, verbose)
+ * _read_segment(start, stop, sel, data_buffer, projector, verbose)
(only needed for types that support on-demand disk reads)
"""
@verbose
- def __init__(self, info, data=None,
+ def __init__(self, info, preload=False,
first_samps=(0,), last_samps=None,
filenames=(), rawdirs=(),
comp=None, orig_comp_grade=None,
- orig_format='double', verbose=None):
- # some functions (e.g., filtering) only work w/64-bit data
- if data is not None:
- if data.dtype not in (np.float64, np.complex128):
+ orig_format='double',
+ verbose=None):
+ # wait until the end to preload data, but triage here
+ if isinstance(preload, np.ndarray):
+ # some functions (e.g., filtering) only work w/64-bit data
+ if preload.dtype not in (np.float64, np.complex128):
raise RuntimeError('datatype must be float64 or complex128, '
- 'not %s' % data.dtype)
+ 'not %s' % preload.dtype)
+ self._data = preload
+ self.preload = True
+ self._last_samps = np.array([self._data.shape[1] - 1])
+ load_from_disk = False
+ else:
+ if last_samps is None:
+ raise ValueError('last_samps must be given unless preload is '
+ 'an ndarray')
+ if preload is False:
+ self.preload = False
+ load_from_disk = False
+ elif preload is not True and not isinstance(preload, string_types):
+ raise ValueError('bad preload: %s' % preload)
+ else:
+ load_from_disk = True
+ self._last_samps = np.array(last_samps)
self.info = info
- self._data = data
cals = np.empty(info['nchan'])
for k in range(info['nchan']):
cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal']
@@ -240,19 +257,70 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin,
self.comp = comp
self._orig_comp_grade = orig_comp_grade
self._filenames = list(filenames)
- self.preload = True if data is not None else False
self._first_samps = np.array(first_samps)
self.orig_format = orig_format
- if data is not None:
- self._last_samps = np.array([self._data.shape[1] - 1])
- else:
- self._last_samps = np.array(last_samps)
self._projectors = list()
self._projector = None
+ # If we have True or a string, actually do the preloading
+ if load_from_disk:
+ self._preload_data(preload)
- def _read_segment(start, stop, sel, projector, verbose):
+ def _read_segment(start, stop, sel, data_buffer, projector, verbose):
+ """Read a chunk of raw data
+
+ Parameters
+ ----------
+ start : int, (optional)
+ first sample to include (first is 0). If omitted, defaults to the
+ first sample in data.
+ stop : int, (optional)
+ First sample to not include.
+ If omitted, data is included to the end.
+ sel : array, optional
+ Indices of channels to select.
+ data_buffer : array or str, optional
+ numpy array to fill with data read, must have the correct shape.
+ If str, a np.memmap with the correct data type will be used
+ to store the data.
+ projector : array
+ SSP operator to apply to the data.
+ verbose : bool, str, int, or None
+ If not None, override default verbose level (see mne.verbose).
+
+ Returns
+ -------
+ data : array, [channels x samples]
+ the data matrix (channels x samples).
+ times : array, [samples]
+ returns the time values corresponding to the samples.
+ """
raise NotImplementedError
+ @verbose
+ def preload_data(self, verbose=None):
+ """Preload raw data
+
+ Parameters
+ ----------
+ verbose : bool, str, int, or None
+ If not None, override default verbose level (see mne.verbose).
+
+ Notes
+ -----
+ This function will preload raw data if it was not already preloaded.
+ If data were already preloaded, it will do nothing.
+ """
+ if not self.preload:
+ self._preload_data(True)
+
+ def _preload_data(self, preload):
+ """This function actually preloads the data"""
+ data_buffer = preload if isinstance(preload, string_types) else None
+ self._data = self._read_segment(data_buffer=data_buffer)[0]
+ assert len(self._data) == self.info['nchan']
+ self.preload = True
+ self.close()
+
@property
def first_samp(self):
return self._first_samps[0]
@@ -1344,7 +1412,6 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin,
raws : list, or Raw instance
list of Raw instances to concatenate to the current instance
(in order), or a single raw instance to concatenate.
-
preload : bool, str, or None (default None)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
@@ -1414,7 +1481,8 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin,
def close(self):
"""Clean up the object.
- Does nothing for now.
+ Does nothing for objects that close their file descriptors.
+ Things like RawFIF will override this method.
"""
pass
diff --git a/mne/io/edf/edf.py b/mne/io/edf/edf.py
index 5a8eb8e5f..700849294 100644
--- a/mne/io/edf/edf.py
+++ b/mne/io/edf/edf.py
@@ -60,9 +60,12 @@ class RawEDF(_BaseRaw):
-1 corresponds to the last channel.
If None, the annotation channel is not used.
Note: this is overruled by the annotation file if specified.
- preload : bool
- If True, all data are loaded at initialization.
- If False, data are not read until save.
+ preload : bool or str (default False)
+ Preload data into memory for data manipulation and faster indexing.
+ If True, the data will be preloaded into memory (fast, requires
+ large amount of memory). If preload is a string, preload is the
+ file name of a memory-mapped file which is used to store the data
+ on the hard drive (slower, requires less memory).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
@@ -89,18 +92,9 @@ class RawEDF(_BaseRaw):
# Raw attributes
last_samps = [self._edf_info['nsamples'] - 1]
super(RawEDF, self).__init__(
- info, last_samps=last_samps, orig_format='int', verbose=verbose)
-
- if preload:
- self.preload = preload
- logger.info('Reading raw data from %s...' % input_fname)
- self._data, _ = self._read_segment()
- assert len(self._data) == self.info['nchan']
- self._last_samps = np.array([self._data.shape[1] - 1])
- logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs'
- % (self.first_samp, self.last_samp,
- float(self.first_samp) / self.info['sfreq'],
- float(self.last_samp) / self.info['sfreq']))
+ info, preload, last_samps=last_samps, orig_format='int',
+ verbose=verbose)
+
logger.info('Ready.')
def __repr__(self):
@@ -110,50 +104,27 @@ class RawEDF(_BaseRaw):
"n_channels x n_times : %s x %s" % (n_chan, data_range))
return "<RawEDF | %s>" % ', '.join(s)
- def _read_segment(self, start=0, stop=None, sel=None, verbose=None,
- projector=None):
- """Read a chunk of raw data
-
- Parameters
- ----------
- start : int, (optional)
- first sample to include (first is 0). If omitted, defaults to the
- first sample in data.
- stop : int, (optional)
- First sample to not include.
- If omitted, data is included to the end.
- sel : array, optional
- Indices of channels to select.
- projector : array
- SSP operator to apply to the data.
- verbose : bool, str, int, or None
- If not None, override default verbose level (see mne.verbose).
-
- Returns
- -------
- data : array, [channels x samples]
- the data matrix (channels x samples).
- times : array, [samples]
- returns the time values corresponding to the samples.
- """
+ @verbose
+ def _read_segment(self, start=0, stop=None, sel=None, data_buffer=None,
+ projector=None, verbose=None):
+ """Read a chunk of raw data"""
from scipy.interpolate import interp1d
if sel is None:
- sel = list(range(self.info['nchan']))
- elif len(sel) == 1 and sel[0] == 0 and start == 0 and stop == 1:
- return (666, 666)
+ sel = np.arange(self.info['nchan'])
if projector is not None:
raise NotImplementedError('Currently does not handle projections.')
if stop is None:
stop = self.last_samp + 1
elif stop > self.last_samp + 1:
stop = self.last_samp + 1
+ sel = np.array(sel)
# Initial checks
start = int(start)
stop = int(stop)
n_samps = self._edf_info['n_samps']
- max_samp = self._edf_info['max_samp']
+ buf_len = self._edf_info['max_samp']
sfreq = self.info['sfreq']
n_chan = self.info['nchan']
data_size = self._edf_info['data_size']
@@ -165,8 +136,8 @@ class RawEDF(_BaseRaw):
subtype = self._edf_info['subtype']
# this is used to deal with indexing in the middle of a sampling period
- blockstart = int(floor(float(start) / max_samp) * max_samp)
- blockstop = int(ceil(float(stop) / max_samp) * max_samp)
+ blockstart = int(floor(float(start) / buf_len) * buf_len)
+ blockstop = int(ceil(float(stop) / buf_len) * buf_len)
if blockstop > self.last_samp:
blockstop = self.last_samp + 1
@@ -188,100 +159,179 @@ class RawEDF(_BaseRaw):
picks = [stim_channel, tal_channel]
offsets[picks] = 0
- with open(self.info['filename'], 'rb') as fid:
+ # set up output array
+ data_shape = (len(sel), stop - start)
+ if isinstance(data_buffer, np.ndarray):
+ if data_buffer.shape != data_shape:
+ raise ValueError('data_buffer has incorrect shape')
+ data = data_buffer
+ else:
+ data = np.empty(data_shape, dtype=float)
+
+ read_size = blockstop - blockstart
+ this_data = np.empty((len(sel), buf_len))
+ """
+ Consider this example:
+
+ tmin, tmax = (2, 27)
+ read_size = 30
+ buf_len = 10
+ sfreq = 1.
+
+ +---------+---------+---------+
+ File structure: | buf0 | buf1 | buf2 |
+ +---------+---------+---------+
+ File time: 0 10 20 30
+ +---------+---------+---------+
+ Requested time: 2 27
+
+ | |
+ blockstart blockstop
+ | |
+ start stop
+
+ We need 27 - 2 = 25 samples (per channel) to store our data, and
+ we need to read from 3 buffers (30 samples) to get all of our data.
+
+ On all reads but the first, the data we read starts at
+ the first sample of the buffer. On all reads but the last,
+ the data we read ends on the last sample of the buffer.
+
+ We call this_data the variable that stores the current buffer's data,
+ and data the variable that stores the total output.
+
+ On the first read, we need to do this::
+
+ >>> data[0:buf_len-2] = this_data[2:buf_len]
+
+ On the second read, we need to do::
+
+ >>> data[1*buf_len-2:2*buf_len-2] = this_data[0:buf_len]
+
+ On the final read, we need to do::
+
+ >>> data[2*buf_len-2:3*buf_len-2-3] = this_data[0:buf_len-3]
+
+ """
+ with open(self.info['filename'], 'rb', buffering=0) as fid:
# extract data
- fid.seek(data_offset)
- buffer_size = blockstop - blockstart
- pointer = blockstart * n_chan * data_size
- fid.seek(data_offset + pointer)
- datas = np.empty((n_chan, buffer_size), dtype=float)
- blocks = int(ceil(float(buffer_size) / max_samp))
- for i in range(blocks):
- data = np.empty((n_chan, max_samp), dtype=np.int32)
+ fid.seek(data_offset + blockstart * n_chan * data_size)
+ n_blk = int(ceil(float(read_size) / buf_len))
+ start_offset = start - blockstart
+ end_offset = blockstop - stop
+ for bi in range(n_blk):
+ # Triage start (sidx) and end (eidx) indices for
+ # data (d) and read (r)
+ if bi == 0:
+ d_sidx = 0
+ r_sidx = start_offset
+ else:
+ d_sidx = bi * buf_len - start_offset
+ r_sidx = 0
+ if bi == n_blk - 1:
+ d_eidx = data_shape[1]
+ r_eidx = buf_len - end_offset
+ else:
+ d_eidx = (bi + 1) * buf_len - start_offset
+ r_eidx = buf_len
+ n_buf_samp = r_eidx - r_sidx
+ count = 0
for j, samp in enumerate(n_samps):
# bdf data: 24bit data
- if subtype in ('24BIT', 'bdf'):
- ch_data = np.fromfile(fid, dtype=np.uint8,
- count=samp * data_size)
- ch_data = ch_data.reshape(-1, 3).astype(np.int32)
- ch_data = ((ch_data[:, 0]) +
- (ch_data[:, 1] << 8) +
- (ch_data[:, 2] << 16))
- # 24th bit determines the sign
- ch_data[ch_data >= (1 << 23)] -= (1 << 24)
- # edf data: 16bit data
+ if j not in sel:
+ fid.seek(samp * data_size, 1)
+ continue
+ if samp == buf_len:
+ # use faster version with skips built in
+ if r_sidx > 0:
+ fid.seek(r_sidx * data_size, 1)
+ ch_data = _read_ch(fid, subtype, n_buf_samp, data_size)
+ if r_eidx < buf_len:
+ fid.seek((buf_len - r_eidx) * data_size, 1)
else:
- ch_data = np.fromfile(fid, dtype='<i2', count=samp)
- if j == tal_channel:
- # don't resample tal_channel,
- # pad with zeros instead.
- n_missing = int(max_samp - samp)
- ch_data = np.hstack([ch_data,
- [0] * n_missing])
- elif j == stim_channel and samp < max_samp:
- if annot and annotmap or \
- tal_channel is not None:
- # don't bother with resampling the stim ch
- # because it gets overwritten later on.
- ch_data = np.zeros(max_samp)
+ # read in all the data and triage appropriately
+ ch_data = _read_ch(fid, subtype, samp, data_size)
+ if j == tal_channel:
+ # don't resample tal_channel,
+ # pad with zeros instead.
+ n_missing = int(buf_len - samp)
+ ch_data = np.hstack([ch_data, [0] * n_missing])
+ ch_data = ch_data[r_sidx:r_eidx]
+ elif j == stim_channel:
+ if annot and annotmap or \
+ tal_channel is not None:
+ # don't bother with resampling the stim ch
+ # because it gets overwritten later on.
+ ch_data = np.zeros(n_buf_samp)
+ else:
+ warnings.warn('Interpolating stim channel.'
+ ' Events may jitter.')
+ oldrange = np.linspace(0, 1, samp + 1, True)
+ newrange = np.linspace(0, 1, buf_len, False)
+ newrange = newrange[r_sidx:r_eidx]
+ ch_data = interp1d(
+ oldrange, np.append(ch_data, 0),
+ kind='zero')(newrange)
else:
- warnings.warn('Interpolating stim channel.'
- ' Events may jitter.')
- oldrange = np.linspace(0, 1, samp + 1,
- True)
- newrange = np.linspace(0, 1, max_samp,
- False)
- ch_data = interp1d(
- oldrange, np.append(ch_data, 0),
- kind='zero')(newrange)
- elif samp != max_samp:
- ch_data = resample(x=ch_data, up=max_samp, down=samp,
- npad=0)
- data[j] = ch_data
- start_pt = int(max_samp * i)
- stop_pt = int(start_pt + max_samp)
- datas[:, start_pt:stop_pt] = data
- datas *= gains.T
- datas += offsets
+ ch_data = resample(ch_data, buf_len, samp,
+ npad=0)[r_sidx:r_eidx]
+ this_data[count, :n_buf_samp] = ch_data
+ count += 1
+ data[:, d_sidx:d_eidx] = this_data[:, :n_buf_samp]
+ data *= gains.T[sel]
+ data += offsets[sel]
if stim_channel is not None:
+ stim_channel_idx = np.where(sel == stim_channel)[0][0]
if annot and annotmap:
- datas[stim_channel] = 0
evts = _read_annot(annot, annotmap, sfreq, self.last_samp)
- datas[stim_channel, :evts.size] = evts[start:stop]
+ stim = evts[start:stop]
elif tal_channel is not None:
- evts = _parse_tal_channel(datas[tal_channel])
+ tal_channel_idx = np.where(sel == tal_channel)[0][0]
+ evts = _parse_tal_channel(data[tal_channel_idx])
self._edf_info['events'] = evts
unique_annots = sorted(set([e[2] for e in evts]))
mapping = dict((a, n + 1) for n, a in enumerate(unique_annots))
- datas[stim_channel] = 0
+ stim = np.zeros(read_size)
for t_start, t_duration, annotation in evts:
evid = mapping[annotation]
n_start = int(t_start * sfreq)
n_stop = int(t_duration * sfreq) + n_start - 1
# make sure events without duration get one sample
n_stop = n_stop if n_stop > n_start else n_start + 1
- if any(datas[stim_channel][n_start:n_stop]):
+ if any(stim[n_start:n_stop]):
raise NotImplementedError('EDF+ with overlapping '
'events not supported.')
- datas[stim_channel][n_start:n_stop] = evid
+ stim[n_start:n_stop] = evid
else:
- # Allows support for up to 16-bit trigger values
- mask = 2 ** 16 - 1
- stim = np.array(datas[stim_channel], int)
- mask = mask * np.ones(stim.shape, int)
- stim = np.bitwise_and(stim, mask)
- datas[stim_channel] = stim
- datastart = start - blockstart
- datastop = stop - blockstart
- datas = datas[sel, datastart:datastop]
+ # Allows support for up to 16-bit trigger values (2 ** 16 - 1)
+ stim = np.bitwise_and(data[stim_channel_idx].astype(int),
+ 65535)
+ data[stim_channel_idx, :] = \
+ stim[start - blockstart:stop - blockstart]
logger.info('[done]')
times = np.arange(start, stop, dtype=float) / self.info['sfreq']
-
- return datas, times
+ return data, times
+
+
+def _read_ch(fid, subtype, samp, data_size):
+ """Helper to read a number of samples for a single channel"""
+ if subtype in ('24BIT', 'bdf'):
+ ch_data = np.fromfile(fid, dtype=np.uint8,
+ count=samp * data_size)
+ ch_data = ch_data.reshape(-1, 3).astype(np.int32)
+ ch_data = ((ch_data[:, 0]) +
+ (ch_data[:, 1] << 8) +
+ (ch_data[:, 2] << 16))
+ # 24th bit determines the sign
+ ch_data[ch_data >= (1 << 23)] -= (1 << 24)
+ # edf data: 16bit data
+ else:
+ ch_data = np.fromfile(fid, dtype='<i2', count=samp)
+ return ch_data
def _parse_tal_channel(tal_channel_data):
@@ -324,46 +374,7 @@ def _parse_tal_channel(tal_channel_data):
def _get_edf_info(fname, stim_channel, annot, annotmap, tal_channel,
eog, misc, preload):
- """Extracts all the information from the EDF+,BDF file.
-
- Parameters
- ----------
- fname : str
- Raw EDF+,BDF file to be read.
- stim_channel : str | int | None
- The channel name or channel index (starting at 0).
- -1 corresponds to the last channel.
- If None, there will be no stim channel added.
- annot : str | None
- Path to annotation file.
- If None, no derived stim channel will be added (for files requiring
- annotation file to interpret stim channel).
- annotmap : str | None
- Path to annotation map file containing mapping from label to trigger.
- Must be specified if annot is not None.
- tal_channel : int | None
- The channel index (starting at 0).
- Index of the channel containing EDF+ annotations.
- -1 corresponds to the last channel.
- If None, the annotation channel is not used.
- Note: this is overruled by the annotation file if specified.
- eog : list of str | None
- Names of channels that should be designated EOG channels. Names should
- correspond to the electrodes in the edf file. Default is None.
- misc : list of str | None
- Names of channels that should be designated MISC channels. Names
- should correspond to the electrodes in the edf file. Default is None.
- preload : bool
- If True, all data are loaded at initialization.
- If False, data are not read until save.
-
- Returns
- -------
- info : instance of Info
- The measurement info.
- edf_info : dict
- A dict containing all the EDF+,BDF specific parameters.
- """
+ """Extracts all the information from the EDF+,BDF file"""
if eog is None:
eog = []
@@ -632,9 +643,12 @@ def read_raw_edf(input_fname, montage=None, eog=None, misc=None,
-1 corresponds to the last channel.
If None, the annotation channel is not used.
Note: this is overruled by the annotation file if specified.
- preload : bool
- If True, all data are loaded at initialization.
- If False, data are not read until save.
+ preload : bool or str (default False)
+ Preload data into memory for data manipulation and faster indexing.
+ If True, the data will be preloaded into memory (fast, requires
+ large amount of memory). If preload is a string, preload is the
+ file name of a memory-mapped file which is used to store the data
+ on the hard drive (slower, requires less memory).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
diff --git a/mne/io/fiff/raw.py b/mne/io/fiff/raw.py
index 6d3027fcd..9894f8513 100644
--- a/mne/io/fiff/raw.py
+++ b/mne/io/fiff/raw.py
@@ -114,7 +114,7 @@ class RawFIF(_BaseRaw):
_check_raw_compatibility(raws)
super(RawFIF, self).__init__(
- copy.deepcopy(raws[0].info), None,
+ copy.deepcopy(raws[0].info), False,
[r.first_samp for r in raws], [r.last_samp for r in raws],
[r.filename for r in raws], [r._rawdir for r in raws],
copy.deepcopy(raws[0].comp), raws[0]._orig_comp_grade,
@@ -135,36 +135,6 @@ class RawFIF(_BaseRaw):
if proj:
self.apply_proj()
- def _preload_data(self, preload):
- """This function actually preloads the data"""
- if isinstance(preload, string_types):
- # we will use a memmap: preload is a filename
- data_buffer = preload
- else:
- data_buffer = None
-
- self._data = self._read_segment(data_buffer=data_buffer)[0]
- self.preload = True
- # close files once data are preloaded
- self.close()
-
- @verbose
- def preload_data(self, verbose=None):
- """Preload raw data
-
- Parameters
- ----------
- verbose : bool, str, int, or None
- If not None, override default verbose level (see mne.verbose).
-
- Notes
- -----
- This function will preload raw data if it was not already preloaded.
- If data were already preloaded, it will do nothing.
- """
- if not self.preload:
- self._preload_data(True)
-
@verbose
def _read_raw_file(self, fname, allow_maxshield, preload, compensation,
do_check_fname=True, verbose=None):
@@ -376,36 +346,10 @@ class RawFIF(_BaseRaw):
return raw, next_fname
+ @verbose
def _read_segment(self, start=0, stop=None, sel=None, data_buffer=None,
- verbose=None, projector=None):
- """Read a chunk of raw data
-
- Parameters
- ----------
- start : int, (optional)
- first sample to include (first is 0). If omitted, defaults to the
- first sample in data.
- stop : int, (optional)
- First sample to not include.
- If omitted, data is included to the end.
- sel : array, optional
- Indices of channels to select.
- data_buffer : array or str, optional
- numpy array to fill with data read, must have the correct shape.
- If str, a np.memmap with the correct data type will be used
- to store the data.
- verbose : bool, str, int, or None
- If not None, override default verbose level (see mne.verbose).
- projector : array
- SSP operator to apply to the data.
-
- Returns
- -------
- data : array, [channels x samples]
- the data matrix (channels x samples).
- times : array, [samples]
- returns the time values corresponding to the samples.
- """
+ projector=None, verbose=None):
+ """Read a chunk of raw data"""
# Initial checks
start = int(start)
stop = self.n_times if stop is None else min([int(stop), self.n_times])
diff --git a/mne/io/kit/kit.py b/mne/io/kit/kit.py
index 9ccb87ccb..8de71ac9a 100644
--- a/mne/io/kit/kit.py
+++ b/mne/io/kit/kit.py
@@ -63,9 +63,12 @@ class RawKIT(_BaseRaw):
stimthresh : float
The threshold level for accepting voltage changes in KIT trigger
channels as a trigger event. If None, stim must also be set to None.
- preload : bool
- If True, all data are loaded at initialization.
- If False, data are not read until save.
+ preload : bool or str (default False)
+ Preload data into memory for data manipulation and faster indexing.
+ If True, the data will be preloaded into memory (fast, requires
+ large amount of memory). If preload is a string, preload is the
+ file name of a memory-mapped file which is used to store the data
+ on the hard drive (slower, requires less memory).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
@@ -90,9 +93,9 @@ class RawKIT(_BaseRaw):
logger.info('Creating Info structure...')
last_samps = [self._kit_info['n_samples'] - 1]
+ self._set_stimchannels(info, stim)
super(RawKIT, self).__init__(
- info, last_samps=last_samps, verbose=verbose)
- self._set_stimchannels(stim)
+ info, preload, last_samps=last_samps, verbose=verbose)
if isinstance(mrk, list):
mrk = [read_mrk(marker) if isinstance(marker, string_types)
@@ -103,19 +106,9 @@ class RawKIT(_BaseRaw):
self.info['dig'] = dig_points
self.info['dev_head_t'] = dev_head_t
elif (mrk is not None or elp is not None or hsp is not None):
- err = ("mrk, elp and hsp need to be provided as a group (all or "
- "none)")
- raise ValueError(err)
+ raise ValueError('mrk, elp and hsp need to be provided as a group '
+ '(all or none)')
- if preload:
- self.preload = preload
- logger.info('Reading raw data from %s...' % input_fname)
- self._data, _ = self._read_segment()
- assert self._data.shape == (self.info['nchan'], self.last_samp + 1)
-
- logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs'
- % (self.first_samp, self.last_samp,
- self.times[0], self.times[-1]))
logger.info('Ready.')
def __repr__(self):
@@ -151,7 +144,7 @@ class RawKIT(_BaseRaw):
return stim_ch
- def _set_stimchannels(self, stim='<'):
+ def _set_stimchannels(self, info, stim='<'):
"""Specify how the trigger channel is synthesized from analog channels.
Has to be done before loading data. For a RawKIT instance that has been
@@ -160,6 +153,8 @@ class RawKIT(_BaseRaw):
Parameters
----------
+ info : instance of MeasInfo
+ The measurement info.
stim : list of int | '<' | '>'
Can be submitted as list of trigger channels.
If a list is not specified, the default triggers extracted from
@@ -171,7 +166,7 @@ class RawKIT(_BaseRaw):
"""
if stim is not None:
if isinstance(stim, str):
- picks = pick_types(self.info, meg=False, ref_meg=False,
+ picks = pick_types(info, meg=False, ref_meg=False,
misc=True, exclude=[])[:8]
if stim == '<':
stim = picks[::-1]
@@ -181,16 +176,16 @@ class RawKIT(_BaseRaw):
raise ValueError("stim needs to be list of int, '>' or "
"'<', not %r" % str(stim))
elif np.max(stim) >= self._kit_info['nchan']:
- msg = ("Tried to set stim channel %i, but sqd file only has %i"
- " channels" % (np.max(stim), self._kit_info['nchan']))
- raise ValueError(msg)
+ raise ValueError('Tried to set stim channel %i, but sqd file '
+ 'only has %i channels'
+ % (np.max(stim), self._kit_info['nchan']))
# modify info
- self.info['nchan'] = self._kit_info['nchan'] + 1
+ info['nchan'] = self._kit_info['nchan'] + 1
ch_name = 'STI 014'
chan_info = {}
chan_info['cal'] = KIT.CALIB_FACTOR
- chan_info['logno'] = self.info['nchan']
- chan_info['scanno'] = self.info['nchan']
+ chan_info['logno'] = info['nchan']
+ chan_info['scanno'] = info['nchan']
chan_info['range'] = 1.0
chan_info['unit'] = FIFF.FIFF_UNIT_NONE
chan_info['unit_mul'] = 0
@@ -198,50 +193,27 @@ class RawKIT(_BaseRaw):
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['loc'] = np.zeros(12)
chan_info['kind'] = FIFF.FIFFV_STIM_CH
- self.info['chs'].append(chan_info)
- self.info['ch_names'].append(ch_name)
+ info['chs'].append(chan_info)
+ info['ch_names'].append(ch_name)
if self.preload:
err = "Can't change stim channel after preloading data"
raise NotImplementedError(err)
self._kit_info['stim'] = stim
- def _read_segment(self, start=0, stop=None, sel=None, verbose=None,
- projector=None):
- """Read a chunk of raw data
-
- Parameters
- ----------
- start : int, (optional)
- first sample to include (first is 0). If omitted, defaults to the
- first sample in data.
- stop : int, (optional)
- First sample to not include.
- If omitted, data is included to the end.
- sel : array, optional
- Indices of channels to select.
- projector : array
- SSP operator to apply to the data.
- verbose : bool, str, int, or None
- If not None, override default verbose level (see mne.verbose).
-
- Returns
- -------
- data : array, [channels x samples]
- the data matrix (channels x samples).
- times : array, [samples]
- returns the time values corresponding to the samples.
- """
+ @verbose
+ def _read_segment(self, start=0, stop=None, sel=None, data_buffer=None,
+ projector=None, verbose=None):
+ """Read a chunk of raw data"""
if sel is None:
- sel = list(range(self.info['nchan']))
- elif len(sel) == 1 and sel[0] == 0 and start == 0 and stop == 1:
- return (666, 666)
+ sel = np.arange(self.info['nchan'])
if projector is not None:
raise NotImplementedError('Currently does not handle projections.')
if stop is None:
stop = self.last_samp + 1
elif stop > self.last_samp + 1:
stop = self.last_samp + 1
+ sel = np.array(sel)
# Initial checks
start = int(start)
@@ -294,6 +266,14 @@ class RawKIT(_BaseRaw):
data = np.vstack((data, stim_ch))
data = data[sel]
+ # This maybe should be refactored to actually save memory...
+ data_shape = data.shape
+ if isinstance(data_buffer, np.ndarray):
+ if data_buffer.shape != data_shape:
+ raise ValueError('data_buffer has incorrect shape')
+ data_buffer[...] = data
+ data = data_buffer
+
logger.info('[done]')
times = np.arange(start, stop, dtype=float) / self.info['sfreq']
@@ -413,9 +393,8 @@ class EpochsKIT(EpochsArray):
for key, val in event_id.items():
if val not in events[:, 2]:
- msg = ('No matching events found for %s '
- '(event id %i)' % (key, val))
- raise ValueError(msg)
+ raise ValueError('No matching events found for %s '
+ '(event id %i)' % (key, val))
data = self._read_data()
assert data.shape == (self._kit_info['n_epochs'], self.info['nchan'],
@@ -520,8 +499,8 @@ def _set_dig_kit(mrk, elp, hsp, auto_decimate=True):
msg = ("The selected head shape contained {n_in} points, which is "
"more than recommended ({n_rec}), and was automatically "
"downsampled to {n_new} points. The preferred way to "
- "downsample is using FastScan.")
- msg = msg.format(n_in=n_pts, n_rec=KIT.DIG_POINTS, n_new=n_new)
+ "downsample is using FastScan."
+ ).format(n_in=n_pts, n_rec=KIT.DIG_POINTS, n_new=n_new)
logger.warning(msg)
if isinstance(elp, string_types):
| preload_data is only for RawFIF
I was just working with a RawEDF and I went to call `raw.preload_data`, but I noticed that this method didn't exist for this Raw class. It's currently only implemented for RawFIF. I also saw that there is a new (to me at least) parameter in `_read_segment` called `data_buffer` that is being used whereas the other modules don't have this. should we use this method's use? | mne-tools/mne-python | diff --git a/mne/io/brainvision/tests/test_brainvision.py b/mne/io/brainvision/tests/test_brainvision.py
index 5131d978a..13762077d 100644
--- a/mne/io/brainvision/tests/test_brainvision.py
+++ b/mne/io/brainvision/tests/test_brainvision.py
@@ -47,6 +47,7 @@ def test_brainvision_data():
assert_raises(TypeError, read_raw_brainvision, vhdr_path, montage,
preload=True, scale="0")
raw_py = read_raw_brainvision(vhdr_path, montage, eog=eog, preload=True)
+ raw_py.preload_data() # currently does nothing
assert_equal(raw_py.info['highpass'], 0.)
assert_equal(raw_py.info['lowpass'], 250.)
diff --git a/mne/io/bti/tests/test_bti.py b/mne/io/bti/tests/test_bti.py
index 914233034..f6dc30465 100644
--- a/mne/io/bti/tests/test_bti.py
+++ b/mne/io/bti/tests/test_bti.py
@@ -53,6 +53,7 @@ def test_read_pdf():
def test_crop():
""" Test crop raw """
raw = read_raw_bti(pdf_fnames[0], config_fnames[0], hs_fnames[0])
+ raw.preload_data() # currently does nothing
y, t = raw[:]
t0, t1 = 0.25 * t[-1], 0.75 * t[-1]
mask = (t0 <= t) * (t <= t1)
diff --git a/mne/io/edf/tests/test_edf.py b/mne/io/edf/tests/test_edf.py
index 326a8ea51..f2959c3e2 100644
--- a/mne/io/edf/tests/test_edf.py
+++ b/mne/io/edf/tests/test_edf.py
@@ -47,9 +47,6 @@ def test_bdf_data():
picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
data_py, _ = raw_py[picks]
- print(raw_py) # to test repr
- print(raw_py.info) # to test Info repr
-
# this .mat was generated using the EEG Lab Biosemi Reader
raw_eeglab = io.loadmat(bdf_eeglab_path)
raw_eeglab = raw_eeglab['data'] * 1e-6 # data are stored in microvolts
@@ -118,13 +115,20 @@ def test_read_segment():
assert_allclose(data1, data11, rtol=1e-6)
assert_array_almost_equal(times1, times11)
assert_equal(sorted(raw1.info.keys()), sorted(raw11.info.keys()))
-
- raw2 = read_raw_edf(edf_path, stim_channel=None, preload=True)
- raw2_file = op.join(tempdir, 'test2-raw.fif')
- raw2.save(raw2_file, overwrite=True)
- data2, times2 = raw2[:139, :]
- assert_allclose(data1, data2, rtol=1e-6)
- assert_array_equal(times1, times2)
+ data2, times2 = raw1[0, 0:1]
+ assert_array_equal(data2[0], data1[0, 0:1])
+ assert_array_equal(times2, times1[0:1])
+
+ buffer_fname = op.join(tempdir, 'buffer')
+ for preload in (buffer_fname, True, False): # false here means "delayed"
+ raw2 = read_raw_edf(edf_path, stim_channel=None, preload=preload)
+ if preload is False:
+ raw2.preload_data()
+ raw2_file = op.join(tempdir, 'test2-raw.fif')
+ raw2.save(raw2_file, overwrite=True)
+ data2, times2 = raw2[:139, :]
+ assert_allclose(data1, data2, rtol=1e-6)
+ assert_array_equal(times1, times2)
raw1 = Raw(raw1_file, preload=True)
raw2 = Raw(raw2_file, preload=True)
diff --git a/mne/io/egi/tests/test_egi.py b/mne/io/egi/tests/test_egi.py
index b12e59c38..4faf0535b 100644
--- a/mne/io/egi/tests/test_egi.py
+++ b/mne/io/egi/tests/test_egi.py
@@ -26,7 +26,8 @@ def test_io_egi():
tempdir = _TempDir()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', category=RuntimeWarning)
- read_raw_egi(egi_fname, include=None)
+ raw = read_raw_egi(egi_fname, include=None)
+ raw.preload_data() # currently does nothing
assert_equal(len(w), 1)
assert_true(w[0].category == RuntimeWarning)
msg = 'Did not find any event code with more than one event.'
diff --git a/mne/io/kit/tests/test_kit.py b/mne/io/kit/tests/test_kit.py
index b3fe8a434..43f711f2c 100644
--- a/mne/io/kit/tests/test_kit.py
+++ b/mne/io/kit/tests/test_kit.py
@@ -96,12 +96,25 @@ def test_read_segment():
preload=True)
raw2_file = op.join(tempdir, 'test2-raw.fif')
raw2.save(raw2_file, buffer_size_sec=.1, overwrite=True)
+ data1, times1 = raw1[0, 0:1]
+
raw1 = Raw(raw1_file, preload=True)
raw2 = Raw(raw2_file, preload=True)
assert_array_equal(raw1._data, raw2._data)
+ data2, times2 = raw2[0, 0:1]
+ assert_array_almost_equal(data1, data2)
+ assert_array_almost_equal(times1, times2)
raw3 = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<',
preload=True)
assert_array_almost_equal(raw1._data, raw3._data)
+ raw4 = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<',
+ preload=False)
+ raw4.preload_data()
+ buffer_fname = op.join(tempdir, 'buffer')
+ assert_array_almost_equal(raw1._data, raw4._data)
+ raw5 = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path, stim='<',
+ preload=buffer_fname)
+ assert_array_almost_equal(raw1._data, raw5._data)
def test_ch_loc():
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 4
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"numpy>=1.16.0",
"pandas>=1.0.0",
"scikit-learn",
"h5py",
"pysurfer",
"nose",
"nose-timer",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | apptools==5.2.1
certifi @ file:///croot/certifi_1671487769961/work/certifi
configobj==5.0.9
cycler==0.11.0
envisage==7.0.3
exceptiongroup==1.2.2
fonttools==4.38.0
h5py==3.8.0
importlib-metadata==6.7.0
importlib-resources==5.12.0
iniconfig==2.0.0
joblib==1.3.2
kiwisolver==1.4.5
matplotlib==3.5.3
mayavi==4.8.1
-e git+https://github.com/mne-tools/mne-python.git@f2ab0641f087c63beab715ff5fdaa9399c0807fe#egg=mne
nibabel==4.0.2
nose==1.3.7
nose-timer==1.0.1
numpy==1.21.6
packaging==24.0
pandas==1.3.5
Pillow==9.5.0
pluggy==1.2.0
pyface==8.0.0
Pygments==2.17.2
pyparsing==3.1.4
pysurfer==0.11.2
pytest==7.4.4
python-dateutil==2.9.0.post0
pytz==2025.2
scikit-learn==1.0.2
scipy==1.7.3
six==1.17.0
threadpoolctl==3.1.0
tomli==2.0.1
traits==6.4.3
traitsui==8.0.0
typing_extensions==4.7.1
vtk==9.3.1
zipp==3.15.0
| name: mne-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- apptools==5.2.1
- configobj==5.0.9
- cycler==0.11.0
- envisage==7.0.3
- exceptiongroup==1.2.2
- fonttools==4.38.0
- h5py==3.8.0
- importlib-metadata==6.7.0
- importlib-resources==5.12.0
- iniconfig==2.0.0
- joblib==1.3.2
- kiwisolver==1.4.5
- matplotlib==3.5.3
- mayavi==4.8.1
- nibabel==4.0.2
- nose==1.3.7
- nose-timer==1.0.1
- numpy==1.21.6
- packaging==24.0
- pandas==1.3.5
- pillow==9.5.0
- pluggy==1.2.0
- pyface==8.0.0
- pygments==2.17.2
- pyparsing==3.1.4
- pysurfer==0.11.2
- pytest==7.4.4
- python-dateutil==2.9.0.post0
- pytz==2025.2
- scikit-learn==1.0.2
- scipy==1.7.3
- six==1.17.0
- threadpoolctl==3.1.0
- tomli==2.0.1
- traits==6.4.3
- traitsui==8.0.0
- typing-extensions==4.7.1
- vtk==9.3.1
- zipp==3.15.0
prefix: /opt/conda/envs/mne-python
| [
"mne/io/brainvision/tests/test_brainvision.py::test_brainvision_data",
"mne/io/bti/tests/test_bti.py::test_crop",
"mne/io/edf/tests/test_edf.py::test_read_segment",
"mne/io/egi/tests/test_egi.py::test_io_egi",
"mne/io/kit/tests/test_kit.py::test_read_segment"
]
| []
| [
"mne/io/brainvision/tests/test_brainvision.py::test_brainvision_data_filters",
"mne/io/brainvision/tests/test_brainvision.py::test_events",
"mne/io/brainvision/tests/test_brainvision.py::test_read_segment",
"mne/io/bti/tests/test_bti.py::test_read_config",
"mne/io/bti/tests/test_bti.py::test_read_pdf",
"mne/io/bti/tests/test_bti.py::test_raw",
"mne/io/bti/tests/test_bti.py::test_setup_headshape",
"mne/io/edf/tests/test_edf.py::test_bdf_data",
"mne/io/edf/tests/test_edf.py::test_edf_data",
"mne/io/edf/tests/test_edf.py::test_append",
"mne/io/edf/tests/test_edf.py::test_parse_annotation",
"mne/io/edf/tests/test_edf.py::test_edf_annotations",
"mne/io/edf/tests/test_edf.py::test_write_annotations",
"mne/io/kit/tests/test_kit.py::test_data",
"mne/io/kit/tests/test_kit.py::test_epochs",
"mne/io/kit/tests/test_kit.py::test_ch_loc",
"mne/io/kit/tests/test_kit.py::test_stim_ch"
]
| []
| BSD 3-Clause "New" or "Revised" License | 134 | [
"mne/io/fiff/raw.py",
"mne/io/kit/kit.py",
"mne/io/base.py",
"mne/io/edf/edf.py"
]
| [
"mne/io/fiff/raw.py",
"mne/io/kit/kit.py",
"mne/io/base.py",
"mne/io/edf/edf.py"
]
|
typesafehub__conductr-cli-57 | 357be15356b14f09069cf542e8a4765edd6a0d0a | 2015-05-13 17:22:23 | 357be15356b14f09069cf542e8a4765edd6a0d0a | diff --git a/conductr_cli/conduct_load.py b/conductr_cli/conduct_load.py
index 38bc3d8..982965d 100644
--- a/conductr_cli/conduct_load.py
+++ b/conductr_cli/conduct_load.py
@@ -19,11 +19,14 @@ def load(args):
"""`conduct load` command"""
print('Retrieving bundle...')
- bundle_file, bundle_headers = urlretrieve(get_url(args.bundle))
+ bundle_name, bundle_url = get_url(args.bundle)
+ bundle_file, bundle_headers = urlretrieve(bundle_url)
+
+ configuration_file, configuration_headers, configuration_name = (None, None, None)
if args.configuration is not None:
print('Retrieving configuration...')
- configuration_file, configuration_headers = urlretrieve(get_url(args.configuration)) \
- if args.configuration is not None else (None, None)
+ configuration_name, configuration_url = get_url(args.configuration)
+ configuration_file, configuration_headers = urlretrieve(configuration_url)
bundle_conf = ConfigFactory.parse_string(bundle_utils.conf(bundle_file))
overlay_bundle_conf = None if configuration_file is None else \
@@ -39,10 +42,10 @@ def load(args):
('roles', ' '.join(with_bundle_configurations(ConfigTree.get_list, 'roles'))),
('bundleName', with_bundle_configurations(ConfigTree.get_string, 'name')),
('system', with_bundle_configurations(ConfigTree.get_string, 'system')),
- ('bundle', open(bundle_file, 'rb'))
+ ('bundle', (bundle_name, open(bundle_file, 'rb')))
]
if configuration_file is not None:
- files.append(('configuration', open(configuration_file, 'rb')))
+ files.append(('configuration', (configuration_name, open(configuration_file, 'rb'))))
print('Loading bundle to ConductR...')
response = requests.post(url, files=files)
@@ -74,4 +77,5 @@ def get_url(uri):
parsed = urlparse(uri, scheme='file')
op = Path(uri)
np = str(op.cwd() / op if parsed.scheme == 'file' and op.root == '' else parsed.path)
- return urlunparse(ParseResult(parsed.scheme, parsed.netloc, np, parsed.params, parsed.query, parsed.fragment))
+ url = urlunparse(ParseResult(parsed.scheme, parsed.netloc, np, parsed.params, parsed.query, parsed.fragment))
+ return (url.split('/')[-1], url)
| Unable to load bundle from https
ConductR RC1, CLI 0.15
Loading a bundle via https hangs. The cli must be terminated.
```bash
> conduct load https://github.com/typesafehub/project-doc/releases/download/1.0/project-doc-1.0-SNAPSHOT-e78ed07d4a895e14595a21aef1bf616b1b0e4d886f3265bc7b152acf93d259b5.zip
Retrieving bundle...
Retrieving configuration...
Loading bundle to ConductR...
```
If I wget the bundle to the local file system, the bundle loads successfully. | typesafehub/conductr-cli | diff --git a/conductr_cli/test/test_conduct_load.py b/conductr_cli/test/test_conduct_load.py
index b5503cb..93ee6f7 100644
--- a/conductr_cli/test/test_conduct_load.py
+++ b/conductr_cli/test/test_conduct_load.py
@@ -2,6 +2,7 @@ from unittest import TestCase
from conductr_cli.test.cli_test_case import CliTestCase, create_temp_bundle, create_temp_bundle_with_contents, strip_margin
from conductr_cli import conduct_load
from urllib.error import URLError
+import os
import shutil
try:
@@ -54,7 +55,7 @@ class TestConductLoadCommand(TestCase, CliTestCase):
('roles', ' '.join(roles)),
('bundleName', bundleName),
('system', system),
- ('bundle', 1)
+ ('bundle', ('bundle.zip', 1))
]
output_template = """|Retrieving bundle...
@@ -177,7 +178,7 @@ class TestConductLoadCommand(TestCase, CliTestCase):
[call(self.bundle_file, 'rb'), call(config_file, 'rb')]
)
- expected_files = self.default_files + [('configuration', 1)]
+ expected_files = self.default_files + [('configuration', ('bundle.zip', 1))]
expected_files[4] = ('bundleName', 'overlaid-name')
http_method.assert_called_with(self.default_url, files=expected_files)
@@ -361,3 +362,16 @@ class TestConductLoadCommand(TestCase, CliTestCase):
strip_margin("""|ERROR: File not found: no_such.conf
|"""),
self.output(stderr))
+
+
+class TestGetUrl(TestCase):
+
+ def test_url(self):
+ filename, url = conduct_load.get_url('https://site.com/bundle-1.0-e78ed07d4a895e14595a21aef1bf616b1b0e4d886f3265bc7b152acf93d259b5.zip')
+ self.assertEqual('bundle-1.0-e78ed07d4a895e14595a21aef1bf616b1b0e4d886f3265bc7b152acf93d259b5.zip', filename)
+ self.assertEqual('https://site.com/bundle-1.0-e78ed07d4a895e14595a21aef1bf616b1b0e4d886f3265bc7b152acf93d259b5.zip', url)
+
+ def test_file(self):
+ filename, url = conduct_load.get_url('bundle-1.0-e78ed07d4a895e14595a21aef1bf616b1b0e4d886f3265bc7b152acf93d259b5.zip')
+ self.assertEqual('bundle-1.0-e78ed07d4a895e14595a21aef1bf616b1b0e4d886f3265bc7b152acf93d259b5.zip', filename)
+ self.assertEqual('file://' + os.getcwd() + '/bundle-1.0-e78ed07d4a895e14595a21aef1bf616b1b0e4d886f3265bc7b152acf93d259b5.zip', url)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 1
} | 0.15 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"flake8",
"pep8-naming",
"git+https://github.com/zheller/flake8-quotes#aef86c4f8388e790332757e5921047ad53160a75",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | argcomplete==3.6.1
certifi==2025.1.31
charset-normalizer==3.4.1
-e git+https://github.com/typesafehub/conductr-cli.git@357be15356b14f09069cf542e8a4765edd6a0d0a#egg=conductr_cli
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
flake8==7.2.0
flake8-quotes @ git+https://github.com/zheller/flake8-quotes@e75accbd40f0e1fa1e8f0e746b93c9766db8f106
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mccabe==0.7.0
nose==1.3.7
packaging @ file:///croot/packaging_1734472117206/work
pep8-naming==0.14.1
pluggy @ file:///croot/pluggy_1733169602837/work
pycodestyle==2.13.0
pyflakes==3.3.1
pyhocon==0.2.1
pyparsing==2.0.3
pytest @ file:///croot/pytest_1738938843180/work
requests==2.32.3
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
urllib3==2.3.0
| name: conductr-cli
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- argcomplete==3.6.1
- certifi==2025.1.31
- charset-normalizer==3.4.1
- flake8==7.2.0
- flake8-quotes==3.4.0
- idna==3.10
- mccabe==0.7.0
- nose==1.3.7
- pep8-naming==0.14.1
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pyhocon==0.2.1
- pyparsing==2.0.3
- requests==2.32.3
- urllib3==2.3.0
prefix: /opt/conda/envs/conductr-cli
| [
"conductr_cli/test/test_conduct_load.py::TestConductLoadCommand::test_failure",
"conductr_cli/test/test_conduct_load.py::TestConductLoadCommand::test_failure_invalid_address",
"conductr_cli/test/test_conduct_load.py::TestConductLoadCommand::test_success",
"conductr_cli/test/test_conduct_load.py::TestConductLoadCommand::test_success_custom_ip_port",
"conductr_cli/test/test_conduct_load.py::TestConductLoadCommand::test_success_long_ids",
"conductr_cli/test/test_conduct_load.py::TestConductLoadCommand::test_success_verbose",
"conductr_cli/test/test_conduct_load.py::TestConductLoadCommand::test_success_with_configuration",
"conductr_cli/test/test_conduct_load.py::TestGetUrl::test_file",
"conductr_cli/test/test_conduct_load.py::TestGetUrl::test_url"
]
| []
| [
"conductr_cli/test/test_conduct_load.py::TestConductLoadCommand::test_failure_no_bundle",
"conductr_cli/test/test_conduct_load.py::TestConductLoadCommand::test_failure_no_configuration",
"conductr_cli/test/test_conduct_load.py::TestConductLoadCommand::test_failure_no_disk_space",
"conductr_cli/test/test_conduct_load.py::TestConductLoadCommand::test_failure_no_memory",
"conductr_cli/test/test_conduct_load.py::TestConductLoadCommand::test_failure_no_nr_of_cpus",
"conductr_cli/test/test_conduct_load.py::TestConductLoadCommand::test_failure_no_roles",
"conductr_cli/test/test_conduct_load.py::TestConductLoadCommand::test_failure_roles_not_a_list"
]
| []
| Apache License 2.0 | 135 | [
"conductr_cli/conduct_load.py"
]
| [
"conductr_cli/conduct_load.py"
]
|
|
zopefoundation__Acquisition-10 | 6f20ea96fff40814882da580844949ea97f6c4ba | 2015-05-13 21:46:49 | 6f20ea96fff40814882da580844949ea97f6c4ba | diff --git a/CHANGES.rst b/CHANGES.rst
index 3e617c3..0b44a8b 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -4,7 +4,15 @@ Changelog
4.2.2 (unreleased)
------------------
-- TBD
+- Make the pure-Python Acquirer objects cooperatively use the
+ superclass ``__getattribute__`` method, like the C implementation.
+ See https://github.com/zopefoundation/Acquisition/issues/7.
+
+- The pure-Python implicit acquisition wrapper allows wrapped objects
+ to use ``object.__getattribute__(self, name)``. This differs from
+ the C implementation, but is important for compatibility with the
+ pure-Python versions of libraries like ``persistent``. See
+ https://github.com/zopefoundation/Acquisition/issues/9.
4.2.1 (2015-04-23)
------------------
diff --git a/src/Acquisition/__init__.py b/src/Acquisition/__init__.py
index eae7d7c..c44bbba 100644
--- a/src/Acquisition/__init__.py
+++ b/src/Acquisition/__init__.py
@@ -2,10 +2,12 @@ from __future__ import absolute_import, print_function
# pylint:disable=W0212,R0911,R0912
+
import os
import operator
import sys
import types
+import weakref
import ExtensionClass
@@ -39,17 +41,30 @@ def _apply_filter(predicate, inst, name, result, extra, orig):
return predicate(orig, inst, name, result, extra)
if sys.version_info < (3,):
+ import copy_reg
+
def _rebound_method(method, wrapper):
"""Returns a version of the method with self bound to `wrapper`"""
if isinstance(method, types.MethodType):
method = types.MethodType(method.im_func, wrapper, method.im_class)
return method
+ exec("""def _reraise(tp, value, tb=None):
+ raise tp, value, tb
+""")
else: # pragma: no cover (python 2 is currently our reference)
+ import copyreg as copy_reg
+
def _rebound_method(method, wrapper):
"""Returns a version of the method with self bound to `wrapper`"""
if isinstance(method, types.MethodType):
method = types.MethodType(method.__func__, wrapper)
return method
+ def _reraise(tp, value, tb=None):
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
###
# Wrapper object protocol, mostly ported from C directly
@@ -283,16 +298,58 @@ def _Wrapper_findattr(wrapper, name,
_NOT_GIVEN = object() # marker
+_OGA = object.__getattribute__
+
+# Map from object types with slots to their generated, derived
+# types (or None if no derived type is needed)
+_wrapper_subclass_cache = weakref.WeakKeyDictionary()
+
+def _make_wrapper_subclass_if_needed(cls, obj, container):
+ # If the type of an object to be wrapped has __slots__, then we
+ # must create a wrapper subclass that has descriptors for those
+ # same slots. In this way, its methods that use object.__getattribute__
+ # directly will continue to work, even when given an instance of _Wrapper
+ if getattr(cls, '_Wrapper__DERIVED', False):
+ return None
+ type_obj = type(obj)
+ wrapper_subclass = _wrapper_subclass_cache.get(type_obj, _NOT_GIVEN)
+ if wrapper_subclass is _NOT_GIVEN:
+ slotnames = copy_reg._slotnames(type_obj)
+ if slotnames and not isinstance(obj, _Wrapper):
+ new_type_dict = {'_Wrapper__DERIVED': True}
+ def _make_property(slotname):
+ return property(lambda s: getattr(s._obj, slotname),
+ lambda s, v: setattr(s._obj, slotname, v),
+ lambda s: delattr(s._obj, slotname))
+ for slotname in slotnames:
+ new_type_dict[slotname] = _make_property(slotname)
+ new_type = type(cls.__name__ + '_' + type_obj.__name__,
+ (cls,),
+ new_type_dict)
+ else:
+ new_type = None
+ wrapper_subclass = _wrapper_subclass_cache[type_obj] = new_type
+ return wrapper_subclass
class _Wrapper(ExtensionClass.Base):
- __slots__ = ('_obj','_container',)
+ __slots__ = ('_obj','_container', '__dict__')
_IS_IMPLICIT = None
def __new__(cls, obj, container):
- inst = super(_Wrapper,cls).__new__(cls)
+ wrapper_subclass = _make_wrapper_subclass_if_needed(cls, obj, container)
+ if wrapper_subclass:
+ inst = wrapper_subclass(obj, container)
+ else:
+ inst = super(_Wrapper,cls).__new__(cls)
inst._obj = obj
inst._container = container
+ if hasattr(obj, '__dict__') and not isinstance(obj, _Wrapper):
+ # Make our __dict__ refer to the same dict
+ # as the other object, so that if it has methods that
+ # use `object.__getattribute__` they still work. Note that because we have
+ # slots, we won't interfere with the contents of that dict
+ object.__setattr__(inst, '__dict__', obj.__dict__)
return inst
def __init__(self, obj, container):
@@ -325,11 +382,11 @@ class _Wrapper(ExtensionClass.Base):
def __getattribute__(self, name):
if name in ('_obj', '_container'):
- return object.__getattribute__(self, name)
- if self._obj is not None or self._container is not None:
+ return _OGA(self, name)
+ if _OGA(self, '_obj') is not None or _OGA(self, '_container') is not None:
return _Wrapper_findattr(self, name, None, None, None,
True, type(self)._IS_IMPLICIT, False, False)
- return object.__getattribute__(self, name)
+ return _OGA(self, name)
def __of__(self, parent):
# Based on __of__ in the C code;
@@ -684,11 +741,15 @@ class _Acquirer(ExtensionClass.Base):
def __getattribute__(self, name):
try:
- return ExtensionClass.Base.__getattribute__(self, name)
+ return super(_Acquirer, self).__getattribute__(name)
except AttributeError:
# the doctests have very specific error message
- # requirements
- raise AttributeError(name)
+ # requirements (but at least we can preserve the traceback)
+ _, _, tb = sys.exc_info()
+ try:
+ _reraise(AttributeError, AttributeError(name), tb)
+ finally:
+ del tb
def __of__(self, context):
return type(self)._Wrapper(self, context)
| Pure-Python Wrapper objects break `object.__getattribute__` in methods of the wrapped object
Certain "base" classes make direct use of `object.__getattribute__` in their method implementations. (i.e., *outside* of their own `__getattribute__` implementation). They may do this for performance reasons, or to simplify their implementation of `__getattribute__`. One prominent example is `persistent.Persistent`, but there are undoubtedly others.
If classes like this are wrapped with a pure-Python acquisition wrapper, those methods fail, because the `self` they have is not the `self` they expect (it's the wrapper), and directly calling `object.__getattribute__` bypasses the wrapper's `__getattribute__` implementation.
Here's an example test that fails with pure-Python, but works under the C implementation:
```python
def test_object_getattribute_in_rebound_method(self):
class Persistent(object):
__slots__ = ('__flags')
def __init__(self):
self.__flags = 42
def get_flags(self):
return object.__getattribute__(self, '_Persistent__flags')
wrapped = Persistent()
wrapper = Acquisition.ImplicitAcquisitionWrapper(wrapped, None)
self.assertEqual(wrapped.get_flags(), wrapper.get_flags())
```
It fails like so:
```
File "/Acquisition/tests.py", line 3171, in test_object_getattribute_in_rebound_method
self.assertEqual(wrapped.get_flags(), wrapper.get_flags())
File "/Acquisition/tests.py", line 3166, in get_flags
return object.__getattribute__(self, '_Persistent__flags')
AttributeError: 'ImplicitAcquisitionWrapper' object has no attribute '_Persistent__flags'
```
I'm trying to figure out how to fix this. | zopefoundation/Acquisition | diff --git a/src/Acquisition/tests.py b/src/Acquisition/tests.py
index 4bc523f..277d0fe 100644
--- a/src/Acquisition/tests.py
+++ b/src/Acquisition/tests.py
@@ -3124,6 +3124,125 @@ class TestAcquire(unittest.TestCase):
found = self.acquire(self.a.b.c, AQ_PARENT)
self.assertTrue(found.aq_self is self.a.b.aq_self)
+class TestCooperativeBase(unittest.TestCase):
+
+ def _make_acquirer(self, kind):
+ from ExtensionClass import Base
+
+ class ExtendsBase(Base):
+ def __getattribute__(self, name):
+ if name == 'magic':
+ return 42
+ return super(ExtendsBase,self).__getattribute__(name)
+
+ class Acquirer(kind, ExtendsBase):
+ pass
+
+ return Acquirer()
+
+ def _check___getattribute___is_cooperative(self, acquirer):
+ self.assertEqual(getattr(acquirer, 'magic'), 42)
+
+ def test_implicit___getattribute__is_cooperative(self):
+ self._check___getattribute___is_cooperative(self._make_acquirer(Acquisition.Implicit))
+
+ def test_explicit___getattribute__is_cooperative(self):
+ self._check___getattribute___is_cooperative(self._make_acquirer(Acquisition.Explicit))
+
+if 'Acquisition._Acquisition' not in sys.modules:
+ # Implicitly wrapping an object that uses object.__getattribute__
+ # in its implementation of __getattribute__ doesn't break.
+ # This can arise with the `persistent` library or other
+ # "base" classes.
+
+ # The C implementation doesn't directly support this; however,
+ # it is used heavily in the Python implementation of Persistent.
+
+ class TestImplicitWrappingGetattribute(unittest.TestCase):
+
+ def test_object_getattribute_in_rebound_method_with_slots(self):
+
+ class Persistent(object):
+ __slots__ = ('__flags',)
+ def __init__(self):
+ self.__flags = 42
+
+ def get_flags(self):
+ return object.__getattribute__(self, '_Persistent__flags')
+
+ wrapped = Persistent()
+ wrapper = Acquisition.ImplicitAcquisitionWrapper(wrapped, None)
+
+ self.assertEqual(wrapped.get_flags(), wrapper.get_flags())
+
+ # Changing it is not reflected in the wrapper's dict (this is an
+ # implementation detail)
+ wrapper._Persistent__flags = -1
+ self.assertEqual(wrapped.get_flags(), -1)
+ self.assertEqual(wrapped.get_flags(), wrapper.get_flags())
+
+ wrapper_dict = object.__getattribute__(wrapper, '__dict__')
+ self.assertFalse('_Persistent__flags' in wrapper_dict)
+
+ def test_type_with_slots_reused(self):
+
+ class Persistent(object):
+ __slots__ = ('__flags',)
+ def __init__(self):
+ self.__flags = 42
+
+ def get_flags(self):
+ return object.__getattribute__(self, '_Persistent__flags')
+
+ wrapped = Persistent()
+ wrapper = Acquisition.ImplicitAcquisitionWrapper(wrapped, None)
+ wrapper2 = Acquisition.ImplicitAcquisitionWrapper(wrapped, None)
+
+ self.assertTrue( type(wrapper) is type(wrapper2))
+
+ def test_object_getattribute_in_rebound_method_with_dict(self):
+
+ class Persistent(object):
+ def __init__(self):
+ self.__flags = 42
+
+ def get_flags(self):
+ return object.__getattribute__(self, '_Persistent__flags')
+
+ wrapped = Persistent()
+ wrapper = Acquisition.ImplicitAcquisitionWrapper(wrapped, None)
+
+ self.assertEqual(wrapped.get_flags(), wrapper.get_flags())
+
+ # Changing it is also reflected in both dicts (this is an
+ # implementation detail)
+ wrapper._Persistent__flags = -1
+ self.assertEqual(wrapped.get_flags(), -1)
+ self.assertEqual(wrapped.get_flags(), wrapper.get_flags())
+
+ wrapper_dict = object.__getattribute__(wrapper, '__dict__')
+ self.assertTrue('_Persistent__flags' in wrapper_dict)
+
+
+ def test_object_getattribute_in_rebound_method_with_slots_and_dict(self):
+
+ class Persistent(object):
+ __slots__ = ('__flags', '__dict__')
+ def __init__(self):
+ self.__flags = 42
+ self.__oid = 'oid'
+
+ def get_flags(self):
+ return object.__getattribute__(self, '_Persistent__flags')
+
+ def get_oid(self):
+ return object.__getattribute__(self, '_Persistent__oid')
+
+ wrapped = Persistent()
+ wrapper = Acquisition.ImplicitAcquisitionWrapper(wrapped, None)
+
+ self.assertEqual(wrapped.get_flags(), wrapper.get_flags())
+ self.assertEqual(wrapped.get_oid(), wrapper.get_oid())
class TestUnicode(unittest.TestCase):
@@ -3537,6 +3656,7 @@ def test_suite():
unittest.makeSuite(TestAcquire),
unittest.makeSuite(TestUnicode),
unittest.makeSuite(TestProxying),
+ unittest.makeSuite(TestCooperativeBase),
]
# This file is only available in a source checkout, skip it
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | 4.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/zopefoundation/Acquisition.git@6f20ea96fff40814882da580844949ea97f6c4ba#egg=Acquisition
certifi @ file:///croot/certifi_1671487769961/work/certifi
exceptiongroup==1.2.2
ExtensionClass==5.1
importlib-metadata==6.7.0
iniconfig==2.0.0
nose==1.3.7
packaging==24.0
pluggy==1.2.0
pytest==7.4.4
tomli==2.0.1
typing_extensions==4.7.1
zipp==3.15.0
zope.interface==6.4.post2
| name: Acquisition
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- extensionclass==5.1
- importlib-metadata==6.7.0
- iniconfig==2.0.0
- nose==1.3.7
- packaging==24.0
- pluggy==1.2.0
- pytest==7.4.4
- tomli==2.0.1
- typing-extensions==4.7.1
- zipp==3.15.0
- zope-interface==6.4.post2
prefix: /opt/conda/envs/Acquisition
| [
"src/Acquisition/tests.py::TestCooperativeBase::test_explicit___getattribute__is_cooperative",
"src/Acquisition/tests.py::TestCooperativeBase::test_implicit___getattribute__is_cooperative",
"src/Acquisition/tests.py::TestImplicitWrappingGetattribute::test_object_getattribute_in_rebound_method_with_dict",
"src/Acquisition/tests.py::TestImplicitWrappingGetattribute::test_object_getattribute_in_rebound_method_with_slots",
"src/Acquisition/tests.py::TestImplicitWrappingGetattribute::test_object_getattribute_in_rebound_method_with_slots_and_dict"
]
| [
"src/Acquisition/tests.py::test_suite"
]
| [
"src/Acquisition/tests.py::test_unwrapped",
"src/Acquisition/tests.py::test_simple",
"src/Acquisition/tests.py::test__of__exception",
"src/Acquisition/tests.py::test_muliple",
"src/Acquisition/tests.py::test_pinball",
"src/Acquisition/tests.py::test_explicit",
"src/Acquisition/tests.py::test_mixed_explicit_and_explicit",
"src/Acquisition/tests.py::test_aq_inContextOf",
"src/Acquisition/tests.py::test_AqAlg",
"src/Acquisition/tests.py::test_explicit_acquisition",
"src/Acquisition/tests.py::test_creating_wrappers_directly",
"src/Acquisition/tests.py::test_cant_pickle_acquisition_wrappers_classic",
"src/Acquisition/tests.py::test_cant_pickle_acquisition_wrappers_newstyle",
"src/Acquisition/tests.py::test_cant_persist_acquisition_wrappers_classic",
"src/Acquisition/tests.py::test_cant_persist_acquisition_wrappers_newstyle",
"src/Acquisition/tests.py::test_interfaces",
"src/Acquisition/tests.py::test_mixin_post_class_definition",
"src/Acquisition/tests.py::test_mixin_base",
"src/Acquisition/tests.py::test_Basic_gc",
"src/Acquisition/tests.py::test_Wrapper_gc",
"src/Acquisition/tests.py::test_container_proxying",
"src/Acquisition/tests.py::test___parent__no_wrappers",
"src/Acquisition/tests.py::test_implicit_wrapper_as___parent__",
"src/Acquisition/tests.py::test_explicit_wrapper_as___parent__",
"src/Acquisition/tests.py::test_implicit_wrapper_has_nonwrapper_as_aq_parent",
"src/Acquisition/tests.py::test_explicit_wrapper_has_nonwrapper_as_aq_parent",
"src/Acquisition/tests.py::test___parent__aq_parent_circles",
"src/Acquisition/tests.py::test_python_impl_cycle",
"src/Acquisition/tests.py::test_unwrapped_implicit_acquirer_unwraps__parent__",
"src/Acquisition/tests.py::test__iter__after_AttributeError",
"src/Acquisition/tests.py::test_special_names",
"src/Acquisition/tests.py::test_deleting_parent_attrs",
"src/Acquisition/tests.py::test__cmp__is_called_on_wrapped_object",
"src/Acquisition/tests.py::test_wrapped_methods_have_correct_self",
"src/Acquisition/tests.py::test_cannot_set_attributes_on_empty_wrappers",
"src/Acquisition/tests.py::test_getitem_setitem_not_implemented",
"src/Acquisition/tests.py::test_getitem_setitem_implemented",
"src/Acquisition/tests.py::test_wrapped_objects_are_unwrapped_on_set",
"src/Acquisition/tests.py::test_wrapper_calls_of_on_non_wrapper",
"src/Acquisition/tests.py::test_aq_inContextOf_odd_cases",
"src/Acquisition/tests.py::test_search_repeated_objects",
"src/Acquisition/tests.py::TestParent::test_parent_parent_circles",
"src/Acquisition/tests.py::TestParent::test_parent_parent_parent_circles",
"src/Acquisition/tests.py::TestAcquire::test_explicit_module_default",
"src/Acquisition/tests.py::TestAcquire::test_explicit_module_false",
"src/Acquisition/tests.py::TestAcquire::test_explicit_module_true",
"src/Acquisition/tests.py::TestAcquire::test_explicit_wrapper_default",
"src/Acquisition/tests.py::TestAcquire::test_explicit_wrapper_false",
"src/Acquisition/tests.py::TestAcquire::test_explicit_wrapper_true",
"src/Acquisition/tests.py::TestAcquire::test_no_wrapper_but___parent___falls_back_to_default",
"src/Acquisition/tests.py::TestAcquire::test_unwrapped_falls_back_to_default",
"src/Acquisition/tests.py::TestAcquire::test_w_unicode_attr_name",
"src/Acquisition/tests.py::TestAcquire::test_wrapper_falls_back_to_default",
"src/Acquisition/tests.py::TestImplicitWrappingGetattribute::test_type_with_slots_reused",
"src/Acquisition/tests.py::TestUnicode::test_explicit_aq_unicode_should_be_called",
"src/Acquisition/tests.py::TestUnicode::test_explicit_should_fall_back_to_str",
"src/Acquisition/tests.py::TestUnicode::test_implicit_aq_unicode_should_be_called",
"src/Acquisition/tests.py::TestUnicode::test_implicit_should_fall_back_to_str",
"src/Acquisition/tests.py::TestUnicode::test_str_fallback_should_be_called_with_wrapped_self",
"src/Acquisition/tests.py::TestUnicode::test_unicode_should_be_called_with_wrapped_self",
"src/Acquisition/tests.py::TestProxying::test_explicit_proxy_bool",
"src/Acquisition/tests.py::TestProxying::test_explicit_proxy_call",
"src/Acquisition/tests.py::TestProxying::test_explicit_proxy_comporison",
"src/Acquisition/tests.py::TestProxying::test_explicit_proxy_contains",
"src/Acquisition/tests.py::TestProxying::test_explicit_proxy_hash",
"src/Acquisition/tests.py::TestProxying::test_explicit_proxy_special_meths",
"src/Acquisition/tests.py::TestProxying::test_implicit_proxy_bool",
"src/Acquisition/tests.py::TestProxying::test_implicit_proxy_call",
"src/Acquisition/tests.py::TestProxying::test_implicit_proxy_comporison",
"src/Acquisition/tests.py::TestProxying::test_implicit_proxy_contains",
"src/Acquisition/tests.py::TestProxying::test_implicit_proxy_hash",
"src/Acquisition/tests.py::TestProxying::test_implicit_proxy_special_meths"
]
| []
| Zope Public License 2.1 | 136 | [
"src/Acquisition/__init__.py",
"CHANGES.rst"
]
| [
"src/Acquisition/__init__.py",
"CHANGES.rst"
]
|
|
chimpler__pyhocon-28 | 1dfded0867d7de333d2ee9bc90ee25b2d690f819 | 2015-05-14 03:44:54 | 4683937b1d195ce2f53ca78987571e41bfe273e7 | diff --git a/pyhocon/config_tree.py b/pyhocon/config_tree.py
index 60b2d88..d045c7b 100644
--- a/pyhocon/config_tree.py
+++ b/pyhocon/config_tree.py
@@ -70,6 +70,9 @@ class ConfigTree(OrderedDict):
type=l.__class__.__name__)
)
else:
+ # if there was an override keep overide value
+ if isinstance(value, ConfigValues):
+ value.overriden_value = self.get(key_elt, None)
super(ConfigTree, self).__setitem__(key_elt, value)
else:
next_config_tree = super(ConfigTree, self).get(key_elt)
| ${?foo} unsets previous value if new is undefined
pyhocon==0.3.2
Apparently `${?VAR_NAME}` no longer complains about undefined variables but is seems to be unsetting the key. My expectation is that `${?foo}` would leave the key referencing the last value unless it had something to replace it with.
```
...
File "/Users/bkuberek/Development/people-service/.venv/lib/python2.7/site-packages/pyhocon/__init__.py", line 203, in parse
ConfigParser._resolve_substitutions(config, substitutions)
File "/Users/bkuberek/Development/people-service/.venv/lib/python2.7/site-packages/pyhocon/__init__.py", line 246, in _resolve_substitutions
is_optional_resolved, resolved_value = ConfigParser._resolve_variable(config, substitution)
File "/Users/bkuberek/Development/people-service/.venv/lib/python2.7/site-packages/pyhocon/__init__.py", line 228, in _resolve_variable
col=col(substitution.loc, substitution.instring)))
pyhocon.exceptions.ConfigSubstitutionException: Cannot resolve variable ${database.name} (line: 20, col: 102)
```
Here is my configuration:
```
{
database {
host = "localhost"
port = "5432"
user = "people"
name = "peopledb"
pass = "dev"
url = "postgresql://"${database.user}":"${database.pass}"@"${database.host}":"${database.port}"/"${database.name}
# Override from environment variables if they exist
host = ${?DB_HOST}
port = ${?DB_PORT}
user = ${?DB_USER}
name = ${?DB_NAME}
pass = ${?DB_PASS}
}
}
```
| chimpler/pyhocon | diff --git a/tests/test_config_parser.py b/tests/test_config_parser.py
index b343d35..c0dbb15 100644
--- a/tests/test_config_parser.py
+++ b/tests/test_config_parser.py
@@ -797,7 +797,21 @@ class TestConfigParser(object):
assert config['database.user'] == 'test_user'
assert config['database.pass'] == 'test_pass'
- def test_substitution_override(self):
+ def test_substitution_flat_override(self):
+ config = ConfigFactory.parse_string(
+ """
+ database {
+ name = peopledb
+ pass = peoplepass
+ name = ${?NOT_EXISTS}
+ pass = ${?NOT_EXISTS}
+ }
+ """)
+
+ assert config['database.name'] == 'peopledb'
+ assert config['database.pass'] == 'peoplepass'
+
+ def test_substitution_nested_override(self):
config = ConfigFactory.parse_string(
"""
database {
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
-e git+https://github.com/chimpler/pyhocon.git@1dfded0867d7de333d2ee9bc90ee25b2d690f819#egg=pyhocon
pyparsing==2.0.3
pytest==8.3.5
tomli==2.2.1
| name: pyhocon
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pyparsing==2.0.3
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/pyhocon
| [
"tests/test_config_parser.py::TestConfigParser::test_substitution_flat_override"
]
| []
| [
"tests/test_config_parser.py::TestConfigParser::test_parse_simple_value",
"tests/test_config_parser.py::TestConfigParser::test_parse_with_enclosing_brace",
"tests/test_config_parser.py::TestConfigParser::test_parse_with_enclosing_square_bracket",
"tests/test_config_parser.py::TestConfigParser::test_quoted_key_with_dots",
"tests/test_config_parser.py::TestConfigParser::test_comma_to_separate_expr",
"tests/test_config_parser.py::TestConfigParser::test_dict_merge",
"tests/test_config_parser.py::TestConfigParser::test_parse_with_comments",
"tests/test_config_parser.py::TestConfigParser::test_missing_config",
"tests/test_config_parser.py::TestConfigParser::test_parse_null",
"tests/test_config_parser.py::TestConfigParser::test_parse_empty",
"tests/test_config_parser.py::TestConfigParser::test_parse_override",
"tests/test_config_parser.py::TestConfigParser::test_concat_dict",
"tests/test_config_parser.py::TestConfigParser::test_concat_string",
"tests/test_config_parser.py::TestConfigParser::test_concat_list",
"tests/test_config_parser.py::TestConfigParser::test_bad_concat",
"tests/test_config_parser.py::TestConfigParser::test_string_substitutions",
"tests/test_config_parser.py::TestConfigParser::test_int_substitutions",
"tests/test_config_parser.py::TestConfigParser::test_cascade_string_substitutions",
"tests/test_config_parser.py::TestConfigParser::test_dict_substitutions",
"tests/test_config_parser.py::TestConfigParser::test_list_substitutions",
"tests/test_config_parser.py::TestConfigParser::test_non_existent_substitution",
"tests/test_config_parser.py::TestConfigParser::test_non_compatible_substitution",
"tests/test_config_parser.py::TestConfigParser::test_concat_multi_line_string",
"tests/test_config_parser.py::TestConfigParser::test_concat_multi_line_list",
"tests/test_config_parser.py::TestConfigParser::test_concat_multi_line_dict",
"tests/test_config_parser.py::TestConfigParser::test_parse_URL_from_samples",
"tests/test_config_parser.py::TestConfigParser::test_include_dict_from_samples",
"tests/test_config_parser.py::TestConfigParser::test_list_of_dicts",
"tests/test_config_parser.py::TestConfigParser::test_list_of_lists",
"tests/test_config_parser.py::TestConfigParser::test_list_of_dicts_with_merge",
"tests/test_config_parser.py::TestConfigParser::test_list_of_lists_with_merge",
"tests/test_config_parser.py::TestConfigParser::test_invalid_assignment",
"tests/test_config_parser.py::TestConfigParser::test_invalid_dict",
"tests/test_config_parser.py::TestConfigParser::test_include_list",
"tests/test_config_parser.py::TestConfigParser::test_include_dict",
"tests/test_config_parser.py::TestConfigParser::test_substitution_override",
"tests/test_config_parser.py::TestConfigParser::test_substitution_nested_override",
"tests/test_config_parser.py::TestConfigParser::test_optional_substitution",
"tests/test_config_parser.py::TestConfigParser::test_substitution_cycle",
"tests/test_config_parser.py::TestConfigParser::test_assign_number_with_eol",
"tests/test_config_parser.py::TestConfigParser::test_assign_strings_with_eol",
"tests/test_config_parser.py::TestConfigParser::test_assign_list_numbers_with_eol",
"tests/test_config_parser.py::TestConfigParser::test_assign_list_strings_with_eol",
"tests/test_config_parser.py::TestConfigParser::test_assign_dict_strings_with_equal_sign_with_eol",
"tests/test_config_parser.py::TestConfigParser::test_assign_dict_strings_no_equal_sign_with_eol"
]
| []
| Apache License 2.0 | 137 | [
"pyhocon/config_tree.py"
]
| [
"pyhocon/config_tree.py"
]
|
|
mkdocs__mkdocs-528 | 93a181a208af0440f44e7f51081b9df9d9f30568 | 2015-05-15 04:38:15 | 463c5b647e9ce5992b519708a0b9c4cba891d65c | landscape-bot: [](https://landscape.io/diff/153156)
Code quality remained the same when pulling **[d3fa1bd](https://github.com/waylan/mkdocs/commit/d3fa1bd74f3d126c31e03d9330922b265354fa15) on waylan:519** into **[667fc66](https://github.com/mkdocs/mkdocs/commit/667fc663a2d9f6b3c7fb5c26777161a4ad42368f) on mkdocs:master**.
d0ugal: This looks great! I should be able to review it today.
landscape-bot: [](https://landscape.io/diff/154259)
Code quality remained the same when pulling **[06a74af](https://github.com/waylan/mkdocs/commit/06a74afdc8bfe120398344b021738e305ca246d3) on waylan:519** into **[93a181a](https://github.com/mkdocs/mkdocs/commit/93a181a208af0440f44e7f51081b9df9d9f30568) on mkdocs:master**.
d0ugal: This is lovely, do you plan to do any more updates? I'm happy to merge as it is. Tested it and it works great.
waylan: The only other thing I would consider doing at this point is removing the OrderedDict stuff for the config as this removes the need for it. I don't think it would hurt to leave it in, but removing it removes one more thing to maintain long-term. @facelessuser have any thoughts?
Regardless, that could happen latter. Feel free to merge any time.
facelessuser: Remove it, unless ordered dicts will be desired somewhere in the future. It hurts nothing, but I only added it because, at the time, it was decided not to do the extension config this way. But now we are, so the original need is gone. | diff --git a/docs/user-guide/configuration.md b/docs/user-guide/configuration.md
index 924e6207..4b9c5b8d 100644
--- a/docs/user-guide/configuration.md
+++ b/docs/user-guide/configuration.md
@@ -8,7 +8,7 @@ Guide to all available configuration settings.
Project settings are always configured by using a YAML configuration file in the project directory named `mkdocs.yml`.
-As a miniumum this configuration file must contain the `site_name` setting. All other settings are optional.
+As a minimum this configuration file must contain the `site_name` setting. All other settings are optional.
## Project information
@@ -16,7 +16,7 @@ As a miniumum this configuration file must contain the `site_name` setting. All
This is a **required setting**, and should be a string that is used as the main title for the project documentation. For example:
- site_name: Mashmallow Generator
+ site_name: Marshmallow Generator
When rendering the theme this setting will be passed as the `site_name` context variable.
@@ -109,9 +109,9 @@ If you have a lot of project documentation you might choose to use headings to b
pages:
- Introduction: 'index.md'
- User Guide:
- - 'Creating a new Mashmallow project': 'user-guide/creating.md'
- - 'Mashmallow API guide': 'user-guide/api.md'
- - 'Configuring Mashmallow': 'user-guide/configuration.md'
+ - 'Creating a new Marshmallow project': 'user-guide/creating.md'
+ - 'Marshmallow API guide': 'user-guide/api.md'
+ - 'Configuring Marshmallow': 'user-guide/configuration.md'
- About:
- License: 'about/license.md'
@@ -150,16 +150,21 @@ Lets you set the directory where the output HTML and other files are created. T
**default**: `'site'`
-**Note**: If you are using source code control you will normally want to ensure that your *build output* files are not commited into the repository, and only keep the *source* files under version control. For example, if using `git` you might add the following line to your `.gitignore` file:
+!!! note "Note:"
+ If you are using source code control you will normally want to ensure
+ that your *build output* files are not committed into the repository, and only
+ keep the *source* files under version control. For example, if using `git` you
+ might add the following line to your `.gitignore` file:
- site/
+ site/
-If you're using another source code control you'll want to check its documentation on how to ignore specific directories.
+ If you're using another source code control you'll want to check its
+ documentation on how to ignore specific directories.
### extra_css
-Set a list of css files to be included by the theme.
+Set a list of CSS files to be included by the theme.
**default**: By default `extra_css` will contain a list of all the CSS files found within the `docs_dir`, if none are found it will be `[]` (an empty list).
@@ -207,7 +212,7 @@ Determines if a broken link to a page within the documentation is considered a w
Determines the address used when running `mkdocs serve`. Setting this allows you to use another port, or allows you to make the service accessible over your local network by using the `0.0.0.0` address.
-As with all settings, you can set this from the command line, which can be usful, for example:
+As with all settings, you can set this from the command line, which can be useful, for example:
mkdocs serve --dev-addr=0.0.0.0:80 # Run on port 80, accessible over the local network.
@@ -217,30 +222,62 @@ As with all settings, you can set this from the command line, which can be usful
### markdown_extensions
-MkDocs uses the [Python Markdown][pymkd] library to translate Markdown files into HTML. Python Markdown supports a variety of [extensions][pymdk-extensions] that customize how pages are formatted. This setting lets you enable a list of extensions beyond the ones that MkDocs uses by default (`meta`, `toc`, `tables`, and `fenced_code`).
+MkDocs uses the [Python Markdown][pymkd] library to translate Markdown files
+into HTML. Python Markdown supports a variety of [extensions][pymdk-extensions]
+that customize how pages are formatted. This setting lets you enable a list of
+extensions beyond the ones that MkDocs uses by default (`meta`, `toc`, `tables`,
+and `fenced_code`).
For example, to enable the [SmartyPants typography extension][smarty], use:
- markdown_extensions: [smartypants]
+ markdown_extensions:
+ - smarty
-Some extensions provide configuration options of their own. If you would like to set any configuration options, then you can define `markdown_extensions` as a key/value mapping rather than a list. The key must be the name of the extension and the value must be a key/value pair (option name/option value) for the configuration option.
+Some extensions provide configuration options of their own. If you would like to
+set any configuration options, then you can nest a key/value mapping
+(`option_name: option value`) of any options that a given extension supports.
+See the documentation for the extension you are using to determine what options
+they support.
For example, to enable permalinks in the (included) `toc` extension, use:
markdown_extensions:
- toc:
+ - toc:
+ permalink: True
+
+Note that a colon (`:`) must follow the extension name (`toc`) and then on a new line
+the option name and value must be indented and seperated by a colon. If you would like
+to define multipe options for a single extension, each option must be defined on
+a seperate line:
+
+ markdown_extensions:
+ - toc:
permalink: True
+ separator: "_"
-Add additonal items for each extension. If you have no configuration options to set for a specific extension, then you may leave that extensions options blank:
+Add an additional item to the list for each extension. If you have no
+configuration options to set for a specific extension, then simply omit options
+for that extension:
markdown_extensions:
- smartypants:
- toc:
+ - smarty
+ - toc:
permalink: True
+ - sane_lists
+!!! note "See Also:"
+ The Python-Markdown documentation provides a [list of extensions][exts]
+ which are available out-of-the-box. For a list of configuration options
+ available for a given extension, see the documentation for that extension.
+
+ You may also install and use various [third party extensions][3rd]. Consult the
+ documentation provided by those extensions for installation instructions and
+ available configuration options.
**default**: `[]`
[pymdk-extensions]: http://pythonhosted.org/Markdown/extensions/index.html
[pymkd]: http://pythonhosted.org/Markdown/
-[smarty]: https://pypi.python.org/pypi/mdx_smartypants
+[smarty]: https://pythonhosted.org/Markdown/extensions/smarty.html
+[exts]:https://pythonhosted.org/Markdown/extensions/index.html
+[3rd]: https://github.com/waylan/Python-Markdown/wiki/Third-Party-Extensions
\ No newline at end of file
diff --git a/mkdocs.yml b/mkdocs.yml
index 930ccd95..e092f9ec 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -5,9 +5,9 @@ site_description: Project documentation with Markdown.
repo_url: https://github.com/mkdocs/mkdocs/
markdown_extensions:
- toc:
+ - toc:
permalink: ""
- admonition:
+ - admonition:
copyright: Copyright © 2014, <a href="https://twitter.com/_tomchristie">Tom Christie</a>.
google_analytics: ['UA-27795084-5', 'mkdocs.org']
diff --git a/mkdocs/build.py b/mkdocs/build.py
index 73d9a309..70f60e7a 100644
--- a/mkdocs/build.py
+++ b/mkdocs/build.py
@@ -17,30 +17,17 @@ import mkdocs
log = logging.getLogger(__name__)
-def convert_markdown(markdown_source, site_navigation=None, extensions=(), strict=False):
+def convert_markdown(markdown_source, config, site_navigation=None):
"""
- Convert the Markdown source file to HTML content, and additionally
- return the parsed table of contents, and a dictionary of any metadata
- that was specified in the Markdown file.
-
- `extensions` is an optional sequence of Python Markdown extensions to add
- to the default set.
+ Convert the Markdown source file to HTML as per the config and site_navigation.
+ Return a tuple of the HTML as a string, the parsed table of contents,
+ and a dictionary of any metadata that was specified in the Markdown file.
"""
-
- # Generate the HTML from the markdown source
- if isinstance(extensions, dict):
- user_extensions = list(extensions.keys())
- extension_configs = dict([(k, v) for k, v in extensions.items() if isinstance(v, dict)])
- else:
- user_extensions = list(extensions)
- extension_configs = {}
- builtin_extensions = ['meta', 'toc', 'tables', 'fenced_code']
- mkdocs_extensions = [RelativePathExtension(site_navigation, strict), ]
- extensions = utils.reduce_list(builtin_extensions + mkdocs_extensions + user_extensions)
-
- html_content, table_of_contents, meta = utils.convert_markdown(markdown_source, extensions, extension_configs)
-
- return (html_content, table_of_contents, meta)
+ return utils.convert_markdown(
+ markdown_source=markdown_source,
+ extensions=[RelativePathExtension(site_navigation, config['strict'])] + config['markdown_extensions'],
+ extension_configs=config['mdx_configs']
+ )
def get_global_context(nav, config):
@@ -182,8 +169,9 @@ def _build_page(page, config, site_navigation, env, dump_json):
# Process the markdown text
html_content, table_of_contents, meta = convert_markdown(
- input_content, site_navigation,
- extensions=config['markdown_extensions'], strict=config['strict']
+ markdown_source=input_content,
+ config=config,
+ site_navigation=site_navigation
)
context = get_global_context(site_navigation, config)
diff --git a/mkdocs/config/base.py b/mkdocs/config/base.py
index 870d00b7..53ffbad1 100644
--- a/mkdocs/config/base.py
+++ b/mkdocs/config/base.py
@@ -1,9 +1,9 @@
import six
import logging
import os
+import yaml
from mkdocs import exceptions
-from mkdocs import utils
from mkdocs.config import config_options, defaults
log = logging.getLogger('mkdocs.config')
@@ -82,7 +82,7 @@ class Config(six.moves.UserDict):
self.data.update(patch)
def load_file(self, config_file):
- return self.load_dict(utils.yaml_load(config_file))
+ return self.load_dict(yaml.load(config_file))
def _open_config_file(config_file):
diff --git a/mkdocs/config/config_options.py b/mkdocs/config/config_options.py
index c6ac2910..6c47197b 100644
--- a/mkdocs/config/config_options.py
+++ b/mkdocs/config/config_options.py
@@ -343,3 +343,56 @@ class NumPages(BaseConfigOption):
config[key_name] = len(config['pages']) > self.at_lest
except TypeError:
config[key_name] = False
+
+
+class Private(BaseConfigOption):
+ """
+ Private Config Option
+
+ A config option only for internal use. Raises an error if set by the user.
+ """
+
+ def run_validation(self, value):
+ raise ValidationError('For internal use only.')
+
+
+class MarkdownExtensions(BaseConfigOption):
+ """
+ Markdown Extensions Config Option
+
+ A list of extensions. If a list item contains extension configs,
+ those are set on the private setting passed to `configkey`. The
+ `builtins` keyword accepts a list of extensions which cannot be
+ overriden by the user. However, builtins can be duplicated to define
+ config options for them if desired.
+ """
+ def __init__(self, builtins=None, configkey='mdx_configs', **kwargs):
+ super(MarkdownExtensions, self).__init__(**kwargs)
+ self.builtins = builtins or []
+ self.configkey = configkey
+ self.configdata = {}
+
+ def run_validation(self, value):
+ if not isinstance(value, (list, tuple)):
+ raise ValidationError('Invalid Markdown Extensions configuration')
+ extensions = []
+ for item in value:
+ if isinstance(item, dict):
+ if len(item) > 1:
+ raise ValidationError('Invalid Markdown Extensions configuration')
+ ext, cfg = item.popitem()
+ extensions.append(ext)
+ if cfg is None:
+ continue
+ if not isinstance(cfg, dict):
+ raise ValidationError('Invalid config options for Markdown '
+ "Extension '{0}'.".format(ext))
+ self.configdata[ext] = cfg
+ elif isinstance(item, six.string_types):
+ extensions.append(item)
+ else:
+ raise ValidationError('Invalid Markdown Extensions configuration')
+ return utils.reduce_list(self.builtins + extensions)
+
+ def post_validation(self, config, key_name):
+ config[self.configkey] = self.configdata
diff --git a/mkdocs/config/defaults.py b/mkdocs/config/defaults.py
index 9d7bcd6d..2c2d929f 100644
--- a/mkdocs/config/defaults.py
+++ b/mkdocs/config/defaults.py
@@ -90,8 +90,12 @@ DEFAULT_SCHEMA = (
('include_next_prev', config_options.NumPages()),
# PyMarkdown extension names.
- ('markdown_extensions', config_options.Type(
- (list, dict, tuple), default=())),
+ ('markdown_extensions', config_options.MarkdownExtensions(
+ builtins=['meta', 'toc', 'tables', 'fenced_code'],
+ configkey='mdx_configs', default=[])),
+
+ # PyMarkdown Extension Configs. For internal use only.
+ ('mdx_configs', config_options.Private()),
# enabling strict mode causes MkDocs to stop the build when a problem is
# encountered rather than display an error.
diff --git a/mkdocs/utils.py b/mkdocs/utils.py
index de7c67a5..7ff7f07e 100644
--- a/mkdocs/utils.py
+++ b/mkdocs/utils.py
@@ -12,30 +12,8 @@ import shutil
import markdown
import six
-import yaml
from mkdocs import toc
-from mkdocs.legacy import OrderedDict
-
-
-def yaml_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
- """
- Make all YAML dictionaries load as ordered Dicts.
- http://stackoverflow.com/a/21912744/3609487
- """
- class OrderedLoader(Loader):
- pass
-
- def construct_mapping(loader, node):
- loader.flatten_mapping(node)
- return object_pairs_hook(loader.construct_pairs(node))
-
- OrderedLoader.add_constructor(
- yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
- construct_mapping
- )
-
- return yaml.load(stream, OrderedLoader)
def reduce_list(data_set):
@@ -279,12 +257,9 @@ def convert_markdown(markdown_source, extensions=None, extension_configs=None):
`extensions` is an optional sequence of Python Markdown extensions to add
to the default set.
"""
- extensions = extensions or []
- extension_configs = extension_configs or {}
-
md = markdown.Markdown(
- extensions=extensions,
- extension_configs=extension_configs
+ extensions=extensions or [],
+ extension_configs=extension_configs or {}
)
html_content = md.convert(markdown_source)
| smartypants extension error
I'm new to mkdocs and have recently installed it. I'm running on Windows and am using Python 3.4.
I was walking though some of the mkdocs documentation and wanted to try to use smartypants. Initially I found out I had to install the extension itself which wasn't mentioned in the mkdocs documentation (might be helpful to add that). After installing the extension with pip I tried to use it using all 3 of the below configuration formats..
```
markdown_extensions: [smartypants]
```
```
markdown_extensions:
- smartypants
```
```
markdown_extensions:
smartypants:
```
Either way, when I try to run mkdocs serve I get the below error
```
ERROR - Error building page index.md
[E 150512 11:21:27 build:221] Error building page index.md
Traceback (most recent call last):
File "C:\tools\python\Scripts\mkdocs-script.py", line 9, in <module>
load_entry_point('mkdocs==0.12.2', 'console_scripts', 'mkdocs')()
File "C:\tools\python\lib\site-packages\mkdocs\main.py", line 77, in run_main
main(cmd, args=sys.argv[2:], options=dict(opts))
File "C:\tools\python\lib\site-packages\mkdocs\main.py", line 49, in main
serve(config, options=options)
File "C:\tools\python\lib\site-packages\mkdocs\serve.py", line 22, in serve
builder()
File "C:\tools\python\lib\site-packages\mkdocs\serve.py", line 19, in builder
build(config, live_server=True)
File "C:\tools\python\lib\site-packages\mkdocs\build.py", line 252, in build
build_pages(config)
File "C:\tools\python\lib\site-packages\mkdocs\build.py", line 219, in build_pages
_build_page(page, config, site_navigation, env, dump_json)
File "C:\tools\python\lib\site-packages\mkdocs\build.py", line 173, in _build_page
extensions=config['markdown_extensions'], strict=config['strict']
File "C:\tools\python\lib\site-packages\mkdocs\build.py", line 36, in convert_markdown
extensions=extensions
File "C:\tools\python\lib\site-packages\markdown\__init__.py", line 159, in __init__
configs=kwargs.get('extension_configs', {}))
File "C:\tools\python\lib\site-packages\markdown\__init__.py", line 185, in registerExtensions
ext = self.build_extension(ext, configs.get(ext, {}))
File "C:\tools\python\lib\site-packages\markdown\__init__.py", line 291, in build_extension
return module.makeExtension(**configs)
File "C:\tools\python\lib\site-packages\mdx_smartypants\core.py", line 52, in makeExtension
return SmartypantsExt(configs=configs)
File "C:\tools\python\lib\site-packages\mdx_smartypants\core.py", line 41, in __init__
self.configs = dict(configs)
TypeError: 'NoneType' object is not iterable
``` | mkdocs/mkdocs | diff --git a/mkdocs/tests/build_tests.py b/mkdocs/tests/build_tests.py
index 470dac46..d474925b 100644
--- a/mkdocs/tests/build_tests.py
+++ b/mkdocs/tests/build_tests.py
@@ -9,16 +9,26 @@ import unittest
from six.moves import zip
import mock
-from mkdocs import build, nav
-from mkdocs.config import base as config_base, defaults as config_defaults
+from mkdocs import build, nav, config
from mkdocs.exceptions import MarkdownNotFound
from mkdocs.tests.base import dedent
+def load_config(cfg=None):
+ """ Helper to build a simple config for testing. """
+ cfg = cfg or {}
+ if 'site_name' not in cfg:
+ cfg['site_name'] = 'Example'
+ conf = config.base.Config(schema=config.defaults.DEFAULT_SCHEMA)
+ conf.load_dict(cfg)
+ assert(conf.validate() == ([], []))
+ return conf
+
+
class BuildTests(unittest.TestCase):
def test_empty_document(self):
- html, toc, meta = build.convert_markdown("")
+ html, toc, meta = build.convert_markdown("", load_config())
self.assertEqual(html, '')
self.assertEqual(len(list(toc)), 0)
@@ -28,7 +38,6 @@ class BuildTests(unittest.TestCase):
"""
Ensure that basic Markdown -> HTML and TOC works.
"""
-
html, toc, meta = build.convert_markdown(dedent("""
page_title: custom title
@@ -39,7 +48,7 @@ class BuildTests(unittest.TestCase):
# Heading 2
And some more text.
- """))
+ """), load_config())
expected_html = dedent("""
<h1 id="heading-1">Heading 1</h1>
@@ -62,25 +71,25 @@ class BuildTests(unittest.TestCase):
def test_convert_internal_link(self):
md_text = 'An [internal link](internal.md) to another document.'
expected = '<p>An <a href="internal/">internal link</a> to another document.</p>'
- html, toc, meta = build.convert_markdown(md_text)
+ html, toc, meta = build.convert_markdown(md_text, load_config())
self.assertEqual(html.strip(), expected.strip())
def test_convert_multiple_internal_links(self):
md_text = '[First link](first.md) [second link](second.md).'
expected = '<p><a href="first/">First link</a> <a href="second/">second link</a>.</p>'
- html, toc, meta = build.convert_markdown(md_text)
+ html, toc, meta = build.convert_markdown(md_text, load_config())
self.assertEqual(html.strip(), expected.strip())
def test_convert_internal_link_differing_directory(self):
md_text = 'An [internal link](../internal.md) to another document.'
expected = '<p>An <a href="../internal/">internal link</a> to another document.</p>'
- html, toc, meta = build.convert_markdown(md_text)
+ html, toc, meta = build.convert_markdown(md_text, load_config())
self.assertEqual(html.strip(), expected.strip())
def test_convert_internal_link_with_anchor(self):
md_text = 'An [internal link](internal.md#section1.1) to another document.'
expected = '<p>An <a href="internal/#section1.1">internal link</a> to another document.</p>'
- html, toc, meta = build.convert_markdown(md_text)
+ html, toc, meta = build.convert_markdown(md_text, load_config())
self.assertEqual(html.strip(), expected.strip())
def test_convert_internal_media(self):
@@ -103,7 +112,7 @@ class BuildTests(unittest.TestCase):
for (page, expected) in zip(site_navigation.walk_pages(), expected_results):
md_text = ''
- html, _, _ = build.convert_markdown(md_text, site_navigation=site_navigation)
+ html, _, _ = build.convert_markdown(md_text, load_config(), site_navigation=site_navigation)
self.assertEqual(html, template % expected)
def test_convert_internal_asbolute_media(self):
@@ -126,7 +135,7 @@ class BuildTests(unittest.TestCase):
for (page, expected) in zip(site_navigation.walk_pages(), expected_results):
md_text = ''
- html, _, _ = build.convert_markdown(md_text, site_navigation=site_navigation)
+ html, _, _ = build.convert_markdown(md_text, load_config(), site_navigation=site_navigation)
self.assertEqual(html, template % expected)
def test_dont_convert_code_block_urls(self):
@@ -146,11 +155,10 @@ class BuildTests(unittest.TestCase):
for page in site_navigation.walk_pages():
markdown = 'An HTML Anchor::\n\n <a href="index.md">My example link</a>\n'
- html, _, _ = build.convert_markdown(markdown, site_navigation=site_navigation)
+ html, _, _ = build.convert_markdown(markdown, load_config(), site_navigation=site_navigation)
self.assertEqual(dedent(html), expected)
def test_anchor_only_link(self):
-
pages = [
'index.md',
'internal.md',
@@ -161,13 +169,13 @@ class BuildTests(unittest.TestCase):
for page in site_navigation.walk_pages():
markdown = '[test](#test)'
- html, _, _ = build.convert_markdown(markdown, site_navigation=site_navigation)
+ html, _, _ = build.convert_markdown(markdown, load_config(), site_navigation=site_navigation)
self.assertEqual(html, '<p><a href="#test">test</a></p>')
def test_ignore_external_link(self):
md_text = 'An [external link](http://example.com/external.md).'
expected = '<p>An <a href="http://example.com/external.md">external link</a>.</p>'
- html, toc, meta = build.convert_markdown(md_text)
+ html, toc, meta = build.convert_markdown(md_text, load_config())
self.assertEqual(html.strip(), expected.strip())
def test_not_use_directory_urls(self):
@@ -177,20 +185,19 @@ class BuildTests(unittest.TestCase):
'internal.md',
]
site_navigation = nav.SiteNavigation(pages, use_directory_urls=False)
- html, toc, meta = build.convert_markdown(md_text, site_navigation=site_navigation)
+ html, toc, meta = build.convert_markdown(md_text, load_config(), site_navigation=site_navigation)
self.assertEqual(html.strip(), expected.strip())
def test_markdown_table_extension(self):
"""
Ensure that the table extension is supported.
"""
-
html, toc, meta = build.convert_markdown(dedent("""
First Header | Second Header
-------------- | --------------
Content Cell 1 | Content Cell 2
Content Cell 3 | Content Cell 4
- """))
+ """), load_config())
expected_html = dedent("""
<table>
@@ -219,12 +226,11 @@ class BuildTests(unittest.TestCase):
"""
Ensure that the fenced code extension is supported.
"""
-
html, toc, meta = build.convert_markdown(dedent("""
```
print 'foo'
```
- """))
+ """), load_config())
expected_html = dedent("""
<pre><code>print 'foo'\n</code></pre>
@@ -241,24 +247,29 @@ class BuildTests(unittest.TestCase):
# Check that the plugin is not active when not requested.
expected_without_smartstrong = "<p>foo<strong>bar</strong>baz</p>"
- html_base, _, _ = build.convert_markdown(md_input)
+ html_base, _, _ = build.convert_markdown(md_input, load_config())
self.assertEqual(html_base.strip(), expected_without_smartstrong)
# Check that the plugin is active when requested.
+ cfg = load_config({
+ 'markdown_extensions': ['smart_strong']
+ })
expected_with_smartstrong = "<p>foo__bar__baz</p>"
- html_ext, _, _ = build.convert_markdown(md_input, extensions=['smart_strong'])
+ html_ext, _, _ = build.convert_markdown(md_input, cfg)
self.assertEqual(html_ext.strip(), expected_with_smartstrong)
def test_markdown_duplicate_custom_extension(self):
"""
Duplicated extension names should not cause problems.
"""
+ cfg = load_config({
+ 'markdown_extensions': ['toc']
+ })
md_input = "foo"
- html_ext, _, _ = build.convert_markdown(md_input, ['toc'])
+ html_ext, _, _ = build.convert_markdown(md_input, cfg)
self.assertEqual(html_ext.strip(), '<p>foo</p>')
def test_copying_media(self):
-
docs_dir = tempfile.mkdtemp()
site_dir = tempfile.mkdtemp()
try:
@@ -281,14 +292,11 @@ class BuildTests(unittest.TestCase):
os.mkdir(os.path.join(docs_dir, '.git'))
open(os.path.join(docs_dir, '.git/hidden'), 'w').close()
- conf = config_base.Config(schema=config_defaults.DEFAULT_SCHEMA)
- conf.load_dict({
- 'site_name': 'Example',
+ cfg = load_config({
'docs_dir': docs_dir,
'site_dir': site_dir
})
- conf.validate()
- build.build(conf)
+ build.build(cfg)
# Verify only the markdown (coverted to html) and the image are copied.
self.assertTrue(os.path.isfile(os.path.join(site_dir, 'index.html')))
@@ -308,8 +316,8 @@ class BuildTests(unittest.TestCase):
site_nav = nav.SiteNavigation(pages)
valid = "[test](internal.md)"
- build.convert_markdown(valid, site_nav, strict=False)
- build.convert_markdown(valid, site_nav, strict=True)
+ build.convert_markdown(valid, load_config({'strict': False}), site_nav)
+ build.convert_markdown(valid, load_config({'strict': True}), site_nav)
def test_strict_mode_invalid(self):
pages = [
@@ -320,24 +328,24 @@ class BuildTests(unittest.TestCase):
site_nav = nav.SiteNavigation(pages)
invalid = "[test](bad_link.md)"
- build.convert_markdown(invalid, site_nav, strict=False)
+ build.convert_markdown(invalid, load_config({'strict': False}), site_nav)
self.assertRaises(
MarkdownNotFound,
- build.convert_markdown, invalid, site_nav, strict=True)
+ build.convert_markdown, invalid, load_config({'strict': True}), site_nav)
def test_extension_config(self):
"""
Test that a dictionary of 'markdown_extensions' is recognized as
both a list of extensions and a dictionary of extnesion configs.
"""
- markdown_extensions = {
- 'toc': {'permalink': True},
- 'meta': None # This gets ignored as it is an invalid config
- }
+ cfg = load_config({
+ 'markdown_extensions': [{'toc': {'permalink': True}}]
+ })
+
html, toc, meta = build.convert_markdown(dedent("""
# A Header
- """), extensions=markdown_extensions)
+ """), cfg)
expected_html = dedent("""
<h1 id="a-header">A Header<a class="headerlink" href="#a-header" title="Permanent link">¶</a></h1>
@@ -348,17 +356,14 @@ class BuildTests(unittest.TestCase):
def test_extra_context(self):
# Same as the default schema, but don't verify the docs_dir exists.
- config = config_base.Config(schema=config_defaults.DEFAULT_SCHEMA)
- config.load_dict({
+ cfg = load_config({
'site_name': "Site",
'extra': {
'a': 1
}
})
- self.assertEqual(config.validate(), ([], []))
-
- context = build.get_global_context(mock.Mock(), config)
+ context = build.get_global_context(mock.Mock(), cfg)
self.assertEqual(context['config']['extra'], {
'a': 1
diff --git a/mkdocs/tests/config/config_options_tests.py b/mkdocs/tests/config/config_options_tests.py
index a499ece5..c4a42f94 100644
--- a/mkdocs/tests/config/config_options_tests.py
+++ b/mkdocs/tests/config/config_options_tests.py
@@ -325,3 +325,170 @@ class NumPagesTest(unittest.TestCase):
'key': True,
'pages': None
}, config)
+
+
+class PrivateTest(unittest.TestCase):
+
+ def test_defined(self):
+
+ option = config_options.Private()
+ self.assertRaises(config_options.ValidationError,
+ option.validate, 'somevalue')
+
+
+class MarkdownExtensionsTest(unittest.TestCase):
+
+ def test_simple_list(self):
+ option = config_options.MarkdownExtensions()
+ config = {
+ 'markdown_extensions': ['foo', 'bar']
+ }
+ config['markdown_extensions'] = option.validate(config['markdown_extensions'])
+ option.post_validation(config, 'markdown_extensions')
+ self.assertEqual({
+ 'markdown_extensions': ['foo', 'bar'],
+ 'mdx_configs': {}
+ }, config)
+
+ def test_list_dicts(self):
+ option = config_options.MarkdownExtensions()
+ config = {
+ 'markdown_extensions': [
+ {'foo': {'foo_option': 'foo value'}},
+ {'bar': {'bar_option': 'bar value'}},
+ {'baz': None}
+ ]
+ }
+ config['markdown_extensions'] = option.validate(config['markdown_extensions'])
+ option.post_validation(config, 'markdown_extensions')
+ self.assertEqual({
+ 'markdown_extensions': ['foo', 'bar', 'baz'],
+ 'mdx_configs': {
+ 'foo': {'foo_option': 'foo value'},
+ 'bar': {'bar_option': 'bar value'}
+ }
+ }, config)
+
+ def test_mixed_list(self):
+ option = config_options.MarkdownExtensions()
+ config = {
+ 'markdown_extensions': [
+ 'foo',
+ {'bar': {'bar_option': 'bar value'}}
+ ]
+ }
+ config['markdown_extensions'] = option.validate(config['markdown_extensions'])
+ option.post_validation(config, 'markdown_extensions')
+ self.assertEqual({
+ 'markdown_extensions': ['foo', 'bar'],
+ 'mdx_configs': {
+ 'bar': {'bar_option': 'bar value'}
+ }
+ }, config)
+
+ def test_builtins(self):
+ option = config_options.MarkdownExtensions(builtins=['meta', 'toc'])
+ config = {
+ 'markdown_extensions': ['foo', 'bar']
+ }
+ config['markdown_extensions'] = option.validate(config['markdown_extensions'])
+ option.post_validation(config, 'markdown_extensions')
+ self.assertEqual({
+ 'markdown_extensions': ['meta', 'toc', 'foo', 'bar'],
+ 'mdx_configs': {}
+ }, config)
+
+ def test_duplicates(self):
+ option = config_options.MarkdownExtensions(builtins=['meta', 'toc'])
+ config = {
+ 'markdown_extensions': ['meta', 'toc']
+ }
+ config['markdown_extensions'] = option.validate(config['markdown_extensions'])
+ option.post_validation(config, 'markdown_extensions')
+ self.assertEqual({
+ 'markdown_extensions': ['meta', 'toc'],
+ 'mdx_configs': {}
+ }, config)
+
+ def test_builtins_config(self):
+ option = config_options.MarkdownExtensions(builtins=['meta', 'toc'])
+ config = {
+ 'markdown_extensions': [
+ {'toc': {'permalink': True}}
+ ]
+ }
+ config['markdown_extensions'] = option.validate(config['markdown_extensions'])
+ option.post_validation(config, 'markdown_extensions')
+ self.assertEqual({
+ 'markdown_extensions': ['meta', 'toc'],
+ 'mdx_configs': {'toc': {'permalink': True}}
+ }, config)
+
+ def test_configkey(self):
+ option = config_options.MarkdownExtensions(configkey='bar')
+ config = {
+ 'markdown_extensions': [
+ {'foo': {'foo_option': 'foo value'}}
+ ]
+ }
+ config['markdown_extensions'] = option.validate(config['markdown_extensions'])
+ option.post_validation(config, 'markdown_extensions')
+ self.assertEqual({
+ 'markdown_extensions': ['foo'],
+ 'bar': {
+ 'foo': {'foo_option': 'foo value'}
+ }
+ }, config)
+
+ def test_none(self):
+ option = config_options.MarkdownExtensions(default=[])
+ config = {
+ 'markdown_extensions': None
+ }
+ config['markdown_extensions'] = option.validate(config['markdown_extensions'])
+ option.post_validation(config, 'markdown_extensions')
+ self.assertEqual({
+ 'markdown_extensions': [],
+ 'mdx_configs': {}
+ }, config)
+
+ def test_not_list(self):
+ option = config_options.MarkdownExtensions()
+ self.assertRaises(config_options.ValidationError,
+ option.validate, 'not a list')
+
+ def test_invalid_config_option(self):
+ option = config_options.MarkdownExtensions()
+ config = {
+ 'markdown_extensions': [
+ {'foo': 'not a dict'}
+ ]
+ }
+ self.assertRaises(
+ config_options.ValidationError,
+ option.validate, config['markdown_extensions']
+ )
+
+ def test_invalid_config_item(self):
+ option = config_options.MarkdownExtensions()
+ config = {
+ 'markdown_extensions': [
+ ['not a dict']
+ ]
+ }
+ self.assertRaises(
+ config_options.ValidationError,
+ option.validate, config['markdown_extensions']
+ )
+
+ def test_invalid_dict_item(self):
+ option = config_options.MarkdownExtensions()
+ config = {
+ 'markdown_extensions': [
+ {'key1': 'value', 'key2': 'too many keys'}
+ ]
+ }
+ self.assertRaises(
+ config_options.ValidationError,
+ option.validate, config['markdown_extensions']
+ )
diff --git a/mkdocs/tests/utils_tests.py b/mkdocs/tests/utils_tests.py
index 41d066e6..44af6799 100644
--- a/mkdocs/tests/utils_tests.py
+++ b/mkdocs/tests/utils_tests.py
@@ -5,7 +5,6 @@ import os
import unittest
from mkdocs import nav, utils
-from mkdocs.tests.base import dedent
class UtilsTests(unittest.TestCase):
@@ -70,32 +69,6 @@ class UtilsTests(unittest.TestCase):
urls = utils.create_media_urls(site_navigation, [path])
self.assertEqual(urls[0], expected_result)
- def test_yaml_load(self):
- try:
- from collections import OrderedDict
- except ImportError:
- # Don't test if can't import OrderdDict
- # Exception can be removed when Py26 support is removed
- return
-
- yaml_text = dedent('''
- test:
- key1: 1
- key2: 2
- key3: 3
- key4: 4
- key5: 5
- key5: 6
- key3: 7
- ''')
-
- self.assertEqual(
- utils.yaml_load(yaml_text),
- OrderedDict([('test', OrderedDict([('key1', 1), ('key2', 2),
- ('key3', 7), ('key4', 4),
- ('key5', 6)]))])
- )
-
def test_reduce_list(self):
self.assertEqual(
utils.reduce_list([1, 2, 3, 4, 5, 5, 2, 4, 6, 7, 8]),
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 7
} | 0.12 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/project.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | click==8.1.8
coverage==7.8.0
exceptiongroup==1.2.2
execnet==2.1.1
ghp-import==2.1.0
importlib_metadata==8.6.1
iniconfig==2.1.0
Jinja2==3.1.6
livereload==2.7.1
Markdown==3.7
MarkupSafe==3.0.2
-e git+https://github.com/mkdocs/mkdocs.git@93a181a208af0440f44e7f51081b9df9d9f30568#egg=mkdocs
mock==5.2.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
PyYAML==6.0.2
six==1.17.0
tomli==2.2.1
tornado==6.4.2
typing_extensions==4.13.0
zipp==3.21.0
| name: mkdocs
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- click==8.1.8
- coverage==7.8.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- ghp-import==2.1.0
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jinja2==3.1.6
- livereload==2.7.1
- markdown==3.7
- markupsafe==3.0.2
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- six==1.17.0
- tomli==2.2.1
- tornado==6.4.2
- typing-extensions==4.13.0
- zipp==3.21.0
prefix: /opt/conda/envs/mkdocs
| [
"mkdocs/tests/config/config_options_tests.py::PrivateTest::test_defined",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_builtins",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_builtins_config",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_configkey",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_duplicates",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_invalid_config_item",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_invalid_config_option",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_invalid_dict_item",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_list_dicts",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_mixed_list",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_none",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_not_list",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_simple_list"
]
| [
"mkdocs/tests/build_tests.py::BuildTests::test_anchor_only_link",
"mkdocs/tests/build_tests.py::BuildTests::test_convert_internal_asbolute_media",
"mkdocs/tests/build_tests.py::BuildTests::test_convert_internal_link",
"mkdocs/tests/build_tests.py::BuildTests::test_convert_internal_link_differing_directory",
"mkdocs/tests/build_tests.py::BuildTests::test_convert_internal_link_with_anchor",
"mkdocs/tests/build_tests.py::BuildTests::test_convert_internal_media",
"mkdocs/tests/build_tests.py::BuildTests::test_convert_markdown",
"mkdocs/tests/build_tests.py::BuildTests::test_convert_multiple_internal_links",
"mkdocs/tests/build_tests.py::BuildTests::test_copying_media",
"mkdocs/tests/build_tests.py::BuildTests::test_dont_convert_code_block_urls",
"mkdocs/tests/build_tests.py::BuildTests::test_empty_document",
"mkdocs/tests/build_tests.py::BuildTests::test_extension_config",
"mkdocs/tests/build_tests.py::BuildTests::test_ignore_external_link",
"mkdocs/tests/build_tests.py::BuildTests::test_markdown_custom_extension",
"mkdocs/tests/build_tests.py::BuildTests::test_markdown_duplicate_custom_extension",
"mkdocs/tests/build_tests.py::BuildTests::test_markdown_fenced_code_extension",
"mkdocs/tests/build_tests.py::BuildTests::test_markdown_table_extension",
"mkdocs/tests/build_tests.py::BuildTests::test_not_use_directory_urls",
"mkdocs/tests/build_tests.py::BuildTests::test_strict_mode_invalid",
"mkdocs/tests/build_tests.py::BuildTests::test_strict_mode_valid",
"mkdocs/tests/utils_tests.py::UtilsTests::test_create_media_urls"
]
| [
"mkdocs/tests/build_tests.py::BuildTests::test_extra_context",
"mkdocs/tests/config/config_options_tests.py::BaseConfigOptionTest::test_default",
"mkdocs/tests/config/config_options_tests.py::BaseConfigOptionTest::test_empty",
"mkdocs/tests/config/config_options_tests.py::BaseConfigOptionTest::test_replace_default",
"mkdocs/tests/config/config_options_tests.py::BaseConfigOptionTest::test_required",
"mkdocs/tests/config/config_options_tests.py::BaseConfigOptionTest::test_required_no_default",
"mkdocs/tests/config/config_options_tests.py::TypeTest::test_length",
"mkdocs/tests/config/config_options_tests.py::TypeTest::test_multiple_types",
"mkdocs/tests/config/config_options_tests.py::TypeTest::test_single_type",
"mkdocs/tests/config/config_options_tests.py::URLTest::test_invalid",
"mkdocs/tests/config/config_options_tests.py::URLTest::test_invalid_url",
"mkdocs/tests/config/config_options_tests.py::URLTest::test_valid_url",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_repo_name_bitbucket",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_repo_name_custom",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_repo_name_github",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_file",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_incorrect_type_attribute_error",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_incorrect_type_type_error",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_missing_dir",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_missing_dir_but_required",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_valid_dir",
"mkdocs/tests/config/config_options_tests.py::SiteDirTest::test_doc_dir_in_site_dir",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_theme",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_theme_invalid",
"mkdocs/tests/config/config_options_tests.py::ExtrasTest::test_empty",
"mkdocs/tests/config/config_options_tests.py::ExtrasTest::test_invalid",
"mkdocs/tests/config/config_options_tests.py::ExtrasTest::test_provided",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_invalid_config",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_invalid_type",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_provided",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_provided_dict",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_provided_empty",
"mkdocs/tests/config/config_options_tests.py::NumPagesTest::test_invalid_pages",
"mkdocs/tests/config/config_options_tests.py::NumPagesTest::test_many_pages",
"mkdocs/tests/config/config_options_tests.py::NumPagesTest::test_one_page",
"mkdocs/tests/config/config_options_tests.py::NumPagesTest::test_provided",
"mkdocs/tests/utils_tests.py::UtilsTests::test_get_themes",
"mkdocs/tests/utils_tests.py::UtilsTests::test_html_path",
"mkdocs/tests/utils_tests.py::UtilsTests::test_is_html_file",
"mkdocs/tests/utils_tests.py::UtilsTests::test_is_markdown_file",
"mkdocs/tests/utils_tests.py::UtilsTests::test_nest_paths",
"mkdocs/tests/utils_tests.py::UtilsTests::test_reduce_list",
"mkdocs/tests/utils_tests.py::UtilsTests::test_url_path"
]
| []
| BSD 2-Clause "Simplified" License | 138 | [
"mkdocs/config/defaults.py",
"docs/user-guide/configuration.md",
"mkdocs/config/base.py",
"mkdocs.yml",
"mkdocs/build.py",
"mkdocs/utils.py",
"mkdocs/config/config_options.py"
]
| [
"mkdocs/config/defaults.py",
"docs/user-guide/configuration.md",
"mkdocs/config/base.py",
"mkdocs.yml",
"mkdocs/build.py",
"mkdocs/utils.py",
"mkdocs/config/config_options.py"
]
|
drincruz__python-trie-20 | 57d83c22c0f5eb9faa10a54c138c4e2e5a0f88d8 | 2015-05-15 12:31:04 | 57d83c22c0f5eb9faa10a54c138c4e2e5a0f88d8 | diff --git a/README.md b/README.md
index 77303a0..ba35836 100644
--- a/README.md
+++ b/README.md
@@ -19,7 +19,7 @@ Example
>>> t.contains_word("dogg")
False
>>> t.root
- {'d': {'o': {'g': {'!THIS_IS_THE_END!': {'word': 'dog'}, 's': {'!THIS_IS_THE_END!': {'word': 'dogs'}}, 'g': {'y': {'!THIS_IS_THE_END!': {'word': 'doggy'}}}}}}}
+ {'d': {'o': {'g': {'\0': {'word': 'dog'}, 's': {'\0': {'word': 'dogs'}}, 'g': {'y': {'\0': {'word': 'doggy'}}}}}}}
Unit Testing
diff --git a/trie/trie.py b/trie/trie.py
index 96f3fdb..4bd31f7 100644
--- a/trie/trie.py
+++ b/trie/trie.py
@@ -10,7 +10,7 @@ class TriePy(object):
"""
# A terminator to represent and end of a path
- __TRIE_TERMINATOR = '!THIS_IS_THE_END!'
+ __TRIE_TERMINATOR = '\0'
def __init__(self):
"""
| change default TRIE_TERMINATOR default
'!THIS_IS_THE_END!' was _really_ just an example. Using a \0 terminator makes the most sense. Let's do that. | drincruz/python-trie | diff --git a/tests/test_trie.py b/tests/test_trie.py
index 0b3aa6b..7b1fa5e 100644
--- a/tests/test_trie.py
+++ b/tests/test_trie.py
@@ -15,7 +15,7 @@ def test_abc_add_word():
my_test = {"a":
{"b":
{"c":
- {"!THIS_IS_THE_END!":
+ {"\0":
{"word": "abc"}
}
}
@@ -34,8 +34,8 @@ def test_asterisk_add_word():
my_test = {"*":
{"x":
{"*":
- { "z":
- {"!THIS_IS_THE_END!":
+ {"z":
+ {"\0":
{"word": "*x*z"}
}
}
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 2
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
nose==1.3.7
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
-e git+https://github.com/drincruz/python-trie.git@57d83c22c0f5eb9faa10a54c138c4e2e5a0f88d8#egg=TriePy
| name: python-trie
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- nose==1.3.7
prefix: /opt/conda/envs/python-trie
| [
"tests/test_trie.py::test_abc_add_word",
"tests/test_trie.py::test_asterisk_add_word"
]
| []
| [
"tests/test_trie.py::test_contains_true",
"tests/test_trie.py::test_contains_false"
]
| []
| BSD 3-Clause "New" or "Revised" License | 139 | [
"README.md",
"trie/trie.py"
]
| [
"README.md",
"trie/trie.py"
]
|
|
yolothreat__utilitybelt-53 | ef2526c4e15c886c2a68162b67e7d7fb25ec9ee3 | 2015-05-15 17:38:13 | ef2526c4e15c886c2a68162b67e7d7fb25ec9ee3 | diff --git a/.gitignore b/.gitignore
index c26136f..601e383 100644
--- a/.gitignore
+++ b/.gitignore
@@ -57,6 +57,7 @@ venv
## Data
data/*
+*.json
# swap files
*.swp
diff --git a/utilitybelt/utilitybelt.py b/utilitybelt/utilitybelt.py
index 180865c..8307e7a 100644
--- a/utilitybelt/utilitybelt.py
+++ b/utilitybelt/utilitybelt.py
@@ -143,6 +143,24 @@ def is_url(url):
return bool(re.match(re_url, url))
+def is_hash(fhash):
+ """Returns true for valid hashes, false for invalid."""
+
+ # Intentionally doing if/else statement for ease of testing and reading
+ if re.match(re_md5, fhash):
+ return True
+ elif re.match(re_sha1, fhash):
+ return True
+ elif re.match(re_sha256, fhash):
+ return True
+ elif re.match(re_sha512, fhash):
+ return True
+ elif re.match(re_ssdeep, fhash):
+ return True
+ else:
+ return False
+
+
def ip_to_geo(ipaddress):
"""Convert IP to Geographic Information"""
@@ -245,7 +263,10 @@ def vt_ip_check(ip, vt_api):
url = 'https://www.virustotal.com/vtapi/v2/ip-address/report'
parameters = {'ip': ip, 'apikey': vt_api}
response = requests.get(url, params=parameters)
- return response.json()
+ try:
+ return response.json()
+ except ValueError:
+ return None
def vt_name_check(domain, vt_api):
@@ -256,7 +277,24 @@ def vt_name_check(domain, vt_api):
url = 'https://www.virustotal.com/vtapi/v2/domain/report'
parameters = {'domain': domain, 'apikey': vt_api}
response = requests.get(url, params=parameters)
- return response.json()
+ try:
+ return response.json()
+ except ValueError:
+ return None
+
+
+def vt_hash_check(fhash, vt_api):
+ """Checks VirusTotal for occurrences of a file hash"""
+ if not is_hash(fhash):
+ return None
+
+ url = 'https://www.virustotal.com/vtapi/v2/file/report'
+ parameters = {'resource': fhash, 'apikey': vt_api}
+ response = requests.get(url, params=parameters)
+ try:
+ return response.json()
+ except ValueError:
+ return None
def ipinfo_ip_check(ip):
| `vt_name_check()` and `vt_api_check()` do not handle errors properly
If VirusTotal returns an error (such as exceeding rate limits), these functions throw a `JSONDecodeError`. | yolothreat/utilitybelt | diff --git a/utilitybelt/tests/tests.py b/utilitybelt/tests/tests.py
index b826966..ea1f525 100644
--- a/utilitybelt/tests/tests.py
+++ b/utilitybelt/tests/tests.py
@@ -1,4 +1,5 @@
import os
+import time
import unittest
from utilitybelt import utilitybelt as ub
@@ -94,6 +95,21 @@ class TestUB(unittest.TestCase):
self.assertFalse(ub.is_reserved("8.8.4.4"))
self.assertFalse(ub.is_reserved("192.30.252.131"))
+ def test_is_hash(self):
+ # all hashes of the empty string
+ self.assertIsInstance(ub.is_hash("d41d8cd98f00b204e9800998ecf8427e"), bool)
+ # MD5
+ self.assertTrue(ub.is_hash("d41d8cd98f00b204e9800998ecf8427e"))
+ # SHA1
+ self.assertTrue(ub.is_hash("da39a3ee5e6b4b0d3255bfef95601890afd80709"))
+ # SHA256
+ self.assertTrue(ub.is_hash("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"))
+ # SHA512
+ self.assertTrue(ub.is_hash("cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"))
+ # ssdeep
+ self.assertTrue(ub.is_hash("96:EQOJvOl4ab3hhiNFXc4wwcweomr0cNJDBoqXjmAHKX8dEt001nfEhVIuX0dDcs:3mzpAsZpprbshfu3oujjdENdp21"))
+ self.assertFalse(ub.is_hash("KilroyWasHere"))
+
def test_vt_ip_check(self):
vt_api = os.environ["VT_API"]
self.assertIsNone(ub.vt_ip_check('asdf', vt_api))
@@ -108,6 +124,7 @@ class TestUB(unittest.TestCase):
if resolution['hostname'] == "github.com":
is_gh = True
self.assertTrue(is_gh)
+ time.sleep(15) # VT rate limiting
def test_vt_name_check(self):
vt_api = os.environ["VT_API"]
@@ -120,6 +137,37 @@ class TestUB(unittest.TestCase):
if resolution['ip_address'] == '192.30.252.130':
is_gh = True
self.assertTrue(is_gh)
+ time.sleep(15) # VT rate limiting
+
+ def test_vt_hash_check(self):
+ vt_api = os.environ["VT_API"]
+ self.assertIsNone(ub.vt_hash_check('asdf', vt_api))
+ vt_hash_data = ub.vt_hash_check("fe03b4181707f1ea1f3c69dc0a9904181c6fce91", vt_api)
+ self.assertIsInstance(vt_hash_data, dict)
+ self.assertIn('resource', vt_hash_data)
+ self.assertIn('positives', vt_hash_data)
+ self.assertGreater(vt_hash_data['positives'], 0)
+ time.sleep(15) # VT rate limiting
+ vt_hash_data = ub.vt_hash_check("d41d8cd98f00b204e9800998ecf8427e", vt_api)
+ self.assertIn('positives', vt_hash_data)
+ self.assertEqual(vt_hash_data['positives'], 0)
+ time.sleep(15) # VT rate limiting
+
+ def test_vt_rate_limiting(self):
+ vt_api = os.environ["VT_API"]
+ # Exceed 4x in 60 seconds
+ data = ub.vt_hash_check("d41d8cd98f00b204e9800998ecf8427e", vt_api)
+ self.assertIsInstance(data, dict)
+ data = ub.vt_hash_check("d41d8cd98f00b204e9800998ecf8427e", vt_api)
+ data = ub.vt_hash_check("d41d8cd98f00b204e9800998ecf8427e", vt_api)
+ data = ub.vt_hash_check("d41d8cd98f00b204e9800998ecf8427e", vt_api)
+ data = ub.vt_name_check("example.org", vt_api)
+ self.assertIsNone(data)
+ data = ub.vt_ip_check("192.30.252.130", vt_api)
+ self.assertIsNone(data)
+ data = ub.vt_hash_check("d41d8cd98f00b204e9800998ecf8427e", vt_api)
+ self.assertIsNone(data)
+ time.sleep(15)
def test_ipinfo(self):
self.assertIsNone(ub.ipinfo_ip_check('asdf'))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc libgeoip-dev"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | beautifulsoup4==4.13.3
certifi==2025.1.31
cfgv==3.4.0
charset-normalizer==3.4.1
-e git+https://github.com/yolothreat/utilitybelt.git@ef2526c4e15c886c2a68162b67e7d7fb25ec9ee3#egg=cnd_utilitybelt
coverage==7.8.0
coveralls==4.0.1
distlib==0.3.9
docopt==0.6.2
exceptiongroup==1.2.2
filelock==3.18.0
identify==2.6.9
idna==3.10
iniconfig==2.1.0
netaddr==1.3.0
nodeenv==1.9.1
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
pygeoip==0.3.2
pytest==8.3.5
pytest-cov==6.0.0
PyYAML==6.0.2
requests==2.32.3
soupsieve==2.6
tomli==2.2.1
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
| name: utilitybelt
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- beautifulsoup4==4.13.3
- certifi==2025.1.31
- cfgv==3.4.0
- charset-normalizer==3.4.1
- coverage==7.8.0
- coveralls==4.0.1
- distlib==0.3.9
- docopt==0.6.2
- exceptiongroup==1.2.2
- filelock==3.18.0
- identify==2.6.9
- idna==3.10
- iniconfig==2.1.0
- netaddr==1.3.0
- nodeenv==1.9.1
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- pygeoip==0.3.2
- pytest==8.3.5
- pytest-cov==6.0.0
- pyyaml==6.0.2
- requests==2.32.3
- soupsieve==2.6
- tomli==2.2.1
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
prefix: /opt/conda/envs/utilitybelt
| [
"utilitybelt/tests/tests.py::TestUB::test_is_hash"
]
| [
"utilitybelt/tests/tests.py::TestUB::test_domain_to_geo",
"utilitybelt/tests/tests.py::TestUB::test_dshield_ip_check",
"utilitybelt/tests/tests.py::TestUB::test_ipinfo",
"utilitybelt/tests/tests.py::TestUB::test_ipvoid_check",
"utilitybelt/tests/tests.py::TestUB::test_reverse_dns",
"utilitybelt/tests/tests.py::TestUB::test_reverse_dns_sna",
"utilitybelt/tests/tests.py::TestUB::test_urlvoid_check",
"utilitybelt/tests/tests.py::TestUB::test_urlvoid_ip_check",
"utilitybelt/tests/tests.py::TestUB::test_vt_hash_check",
"utilitybelt/tests/tests.py::TestUB::test_vt_ip_check",
"utilitybelt/tests/tests.py::TestUB::test_vt_name_check",
"utilitybelt/tests/tests.py::TestUB::test_vt_rate_limiting"
]
| [
"utilitybelt/tests/tests.py::TestUB::test_ip_between",
"utilitybelt/tests/tests.py::TestUB::test_ip_to_geo",
"utilitybelt/tests/tests.py::TestUB::test_ip_to_geojson",
"utilitybelt/tests/tests.py::TestUB::test_ip_to_long",
"utilitybelt/tests/tests.py::TestUB::test_ips_to_geojson",
"utilitybelt/tests/tests.py::TestUB::test_is_IPv4Address",
"utilitybelt/tests/tests.py::TestUB::test_is_reserved",
"utilitybelt/tests/tests.py::TestUB::test_is_rfc1918",
"utilitybelt/tests/tests.py::TestUB::test_is_url"
]
| []
| MIT License | 140 | [
".gitignore",
"utilitybelt/utilitybelt.py"
]
| [
".gitignore",
"utilitybelt/utilitybelt.py"
]
|
|
pysmt__pysmt-114 | 4fd83af49e7784a874f57620809404d530929366 | 2015-05-17 17:12:04 | 689cc79ff2731837903b14daa266afc99b4feb21 | diff --git a/pysmt/simplifier.py b/pysmt/simplifier.py
index 418bc0d..3303577 100644
--- a/pysmt/simplifier.py
+++ b/pysmt/simplifier.py
@@ -445,7 +445,7 @@ class Simplifier(walkers.DagWalker):
res = args[0].bv_unsigned_value() // args[1].bv_unsigned_value()
res = res % 2**formula.bv_width()
return self.manager.BV(res, width=formula.bv_width())
- return self.manager.BVUdiv(*args)
+ return self.manager.BVUDiv(*args)
def walk_bv_urem(self, formula, args):
if args[0].is_bv_constant() and args[1].is_bv_constant:
@@ -454,7 +454,7 @@ class Simplifier(walkers.DagWalker):
else:
res = args[0].bv_unsigned_value() % args[1].bv_unsigned_value()
return self.manager.BV(res, width=formula.bv_width())
- return self.manager.BVUrem(*args)
+ return self.manager.BVURem(*args)
def walk_bv_ult(self, formula, args):
if args[0].is_bv_constant() and args[1].is_bv_constant:
@@ -506,7 +506,7 @@ class Simplifier(walkers.DagWalker):
filler = bitstr[0]
res = filler*formula.bv_extend_step() + bitstr
return self.manager.BV(res, width=formula.bv_width())
- return self.manager.BVSext(args[0], formula.bv_extend_step())
+ return self.manager.BVSExt(args[0], formula.bv_extend_step())
def walk_bv_zext(self, formula, args):
if args[0].is_bv_constant():
@@ -517,7 +517,7 @@ class Simplifier(walkers.DagWalker):
return self.manager.BVZExt(args[0], formula.bv_extend_step())
def walk_bv_concat(self, formula, args):
- if args[0].is_bv_constant() and args[1].is_bv_constant:
+ if args[0].is_bv_constant() and args[1].is_bv_constant():
w0 = args[0].bv_width()
w1 = args[1].bv_width()
res = (2**w1) * args[0].bv_unsigned_value() + \
@@ -526,14 +526,14 @@ class Simplifier(walkers.DagWalker):
return self.manager.BVConcat(*args)
def walk_bv_lshl(self, formula, args):
- if args[0].is_bv_constant() and args[1].is_bv_constant:
+ if args[0].is_bv_constant() and args[1].is_bv_constant():
res = args[0].bv_unsigned_value() << args[1].bv_unsigned_value()
w = args[0].bv_width()
return self.manager.BV(res % (2 ** w), w)
return self.manager.BVLShl(*args)
def walk_bv_lshr(self, formula, args):
- if args[0].is_bv_constant() and args[1].is_bv_constant:
+ if args[0].is_bv_constant() and args[1].is_bv_constant():
res = args[0].bv_unsigned_value() >> args[1].bv_unsigned_value()
w = args[0].bv_width()
return self.manager.BV(res % (2 ** w), w)
diff --git a/pysmt/walkers/identitydag.py b/pysmt/walkers/identitydag.py
index 1fc1915..b422926 100644
--- a/pysmt/walkers/identitydag.py
+++ b/pysmt/walkers/identitydag.py
@@ -31,72 +31,135 @@ class IdentityDagWalker(DagWalker):
DagWalker.__init__(self,
env=env,
invalidate_memoization=invalidate_memoization)
+ self.mgr = self.env.formula_manager
def walk_symbol(self, formula, args):
- return self.env.formula_manager.Symbol(formula.symbol_name(),
+ return self.mgr.Symbol(formula.symbol_name(),
formula.symbol_type())
def walk_real_constant(self, formula, args):
- return self.env.formula_manager.Real(formula.constant_value())
+ return self.mgr.Real(formula.constant_value())
def walk_int_constant(self, formula, args):
- return self.env.formula_manager.Int(formula.constant_value())
+ return self.mgr.Int(formula.constant_value())
def walk_bool_constant(self, formula, args):
- return self.env.formula_manager.Bool(formula.constant_value())
+ return self.mgr.Bool(formula.constant_value())
def walk_and(self, formula, args):
- return self.env.formula_manager.And(args)
+ return self.mgr.And(args)
def walk_or(self, formula, args):
- return self.env.formula_manager.Or(args)
+ return self.mgr.Or(args)
def walk_not(self, formula, args):
- return self.env.formula_manager.Not(args[0])
+ return self.mgr.Not(*args)
def walk_iff(self, formula, args):
- return self.env.formula_manager.Iff(args[0], args[1])
+ return self.mgr.Iff(*args)
def walk_implies(self, formula, args):
- return self.env.formula_manager.Implies(args[0], args[1])
+ return self.mgr.Implies(*args)
def walk_equals(self, formula, args):
- return self.env.formula_manager.Equals(args[0], args[1])
+ return self.mgr.Equals(*args)
def walk_ite(self, formula, args):
- return self.env.formula_manager.Ite(args[0], args[1], args[2])
+ return self.mgr.Ite(*args)
def walk_ge(self, formula, args):
- return self.env.formula_manager.GE(args[0], args[1])
+ return self.mgr.GE(*args)
def walk_le(self, formula, args):
- return self.env.formula_manager.LE(args[0], args[1])
+ return self.mgr.LE(*args)
def walk_gt(self, formula, args):
- return self.env.formula_manager.GT(args[0], args[1])
+ return self.mgr.GT(*args)
def walk_lt(self, formula, args):
- return self.env.formula_manager.LT(args[0], args[1])
+ return self.mgr.LT(*args)
def walk_forall(self, formula, args):
qvars = [self.walk_symbol(v, None) for v in formula.quantifier_vars()]
- return self.env.formula_manager.ForAll(qvars,args[0])
+ return self.mgr.ForAll(qvars, *args)
def walk_exists(self, formula, args):
qvars = [self.walk_symbol(v, None) for v in formula.quantifier_vars()]
- return self.env.formula_manager.Exists(qvars,args[0])
+ return self.mgr.Exists(qvars, *args)
def walk_plus(self, formula, args):
- return self.env.formula_manager.Plus(args)
+ return self.mgr.Plus(args)
def walk_times(self, formula, args):
- return self.env.formula_manager.Times(args[0], args[1])
+ return self.mgr.Times(*args)
def walk_minus(self, formula, args):
- return self.env.formula_manager.Minus(args[0], args[1])
+ return self.mgr.Minus(*args)
def walk_function(self, formula, args):
- return self.env.formula_manager.Function(formula.function_name(), args)
+ return self.mgr.Function(formula.function_name(), args)
def walk_toreal(self, formula, args):
- return self.env.formula_manager.ToReal(args[0])
+ return self.mgr.ToReal(*args)
+
+ def walk_bv_constant(self, formula, args):
+ return self.mgr.BV(formula.constant_value(), formula.bv_width())
+
+ def walk_bv_and(self, formula, args):
+ return self.mgr.BVAnd(*args)
+
+ def walk_bv_not(self, formula, args):
+ return self.mgr.BVNot(*args)
+
+ def walk_bv_neg(self, formula, args):
+ return self.mgr.BVNeg(*args)
+
+ def walk_bv_or(self, formula, args):
+ return self.mgr.BVOr(*args)
+
+ def walk_bv_xor(self, formula, args):
+ return self.mgr.BVXor(*args)
+
+ def walk_bv_add(self, formula, args):
+ return self.mgr.BVAdd(*args)
+
+ def walk_bv_mul(self, formula, args):
+ return self.mgr.BVMul(*args)
+
+ def walk_bv_udiv(self, formula, args):
+ return self.mgr.BVUDiv(*args)
+
+ def walk_bv_urem(self, formula, args):
+ return self.mgr.BVURem(*args)
+
+ def walk_bv_ult(self, formula, args):
+ return self.mgr.BVULT(*args)
+
+ def walk_bv_ule(self, formula, args):
+ return self.mgr.BVULE(*args)
+
+ def walk_bv_extract(self, formula, args):
+ return self.mgr.BVExtract(args[0],
+ start=formula.bv_extract_start(),
+ end=formula.bv_extract_end())
+
+ def walk_bv_ror(self, formula, args):
+ return self.mgr.BVRor(args[0], formula.bv_rotation_step())
+
+ def walk_bv_rol(self, formula, args):
+ return self.mgr.BVRol(args[0], formula.bv_rotation_step())
+
+ def walk_bv_sext(self, formula, args):
+ return self.mgr.BVSExt(args[0], formula.bv_extend_step())
+
+ def walk_bv_zext(self, formula, args):
+ return self.mgr.BVZExt(args[0], formula.bv_extend_step())
+
+ def walk_bv_concat(self, formula, args):
+ return self.mgr.BVConcat(*args)
+
+ def walk_bv_lshl(self, formula, args):
+ return self.mgr.BVLShl(*args)
+
+ def walk_bv_lshr(self, formula, args):
+ return self.mgr.BVLShr(*args)
| IdentityDagWalker lacks BV support | pysmt/pysmt | diff --git a/pysmt/test/test_walkers.py b/pysmt/test/test_walkers.py
index 22a96c7..6defb39 100644
--- a/pysmt/test/test_walkers.py
+++ b/pysmt/test/test_walkers.py
@@ -25,6 +25,7 @@ from pysmt.typing import INT, BOOL, REAL, FunctionType
from pysmt.walkers import TreeWalker, DagWalker, IdentityDagWalker
from pysmt.test import TestCase
from pysmt.formula import FormulaManager
+from pysmt.test.examples import get_example_formulae
from six.moves import xrange
@@ -100,7 +101,7 @@ class TestWalkers(TestCase):
self.assertFalse(tree_walker.is_complete())
- def test_identity_walker(self):
+ def test_identity_walker_simple(self):
def walk_and_to_or(formula, args, **kwargs):
return Or(args)
@@ -125,6 +126,11 @@ class TestWalkers(TestCase):
result = walker.walk(alternation)
self.assertEqual(result, expected)
+ def test_identity_dag_walker(self):
+ idw = IdentityDagWalker()
+ for (f, _, _, _) in get_example_formulae():
+ rebuilt = idw.walk(f)
+ self.assertTrue(rebuilt == f, "Rebuilt formula is not identical")
def test_substitution_on_quantifiers(self):
x, y = FreshSymbol(), FreshSymbol()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 2
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
iniconfig==2.1.0
nose==1.3.7
packaging==24.2
pluggy==1.5.0
-e git+https://github.com/pysmt/pysmt.git@4fd83af49e7784a874f57620809404d530929366#egg=PySMT
pytest==8.3.5
six==1.17.0
tomli==2.2.1
| name: pysmt
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- six==1.17.0
- tomli==2.2.1
prefix: /opt/conda/envs/pysmt
| [
"pysmt/test/test_walkers.py::TestWalkers::test_identity_dag_walker"
]
| []
| [
"pysmt/test/test_walkers.py::TestWalkers::test_identity_walker_simple",
"pysmt/test/test_walkers.py::TestWalkers::test_iterative_get_free_variables",
"pysmt/test/test_walkers.py::TestWalkers::test_subst",
"pysmt/test/test_walkers.py::TestWalkers::test_substituter_conditions",
"pysmt/test/test_walkers.py::TestWalkers::test_substitution_complex",
"pysmt/test/test_walkers.py::TestWalkers::test_substitution_on_functions",
"pysmt/test/test_walkers.py::TestWalkers::test_substitution_on_quantifiers",
"pysmt/test/test_walkers.py::TestWalkers::test_substitution_term",
"pysmt/test/test_walkers.py::TestWalkers::test_undefined_node",
"pysmt/test/test_walkers.py::TestWalkers::test_walker_is_complete"
]
| []
| Apache License 2.0 | 142 | [
"pysmt/walkers/identitydag.py",
"pysmt/simplifier.py"
]
| [
"pysmt/walkers/identitydag.py",
"pysmt/simplifier.py"
]
|
|
pre-commit__pre-commit-231 | 9515ca06378d74f1e4f8013db2b5230c1f15edaa | 2015-05-18 19:48:14 | 9515ca06378d74f1e4f8013db2b5230c1f15edaa | asottile: @Lucas-C look correct?
coveralls:
[](https://coveralls.io/builds/2591860)
Coverage decreased (-0.04%) to 99.96% when pulling **b140f92cd7e20368b27d19ea01227402e71c294a on no_defaults_in_config_227** into **9515ca06378d74f1e4f8013db2b5230c1f15edaa on master**.
| diff --git a/pre_commit/clientlib/validate_config.py b/pre_commit/clientlib/validate_config.py
index bdd0e2c..e4a90a6 100644
--- a/pre_commit/clientlib/validate_config.py
+++ b/pre_commit/clientlib/validate_config.py
@@ -33,7 +33,7 @@ CONFIG_JSON_SCHEMA = {
'properties': {
'id': {'type': 'string'},
'files': {'type': 'string'},
- 'exclude': {'type': 'string', 'default': '^$'},
+ 'exclude': {'type': 'string'},
'language_version': {'type': 'string'},
'args': {
'type': 'array',
@@ -71,7 +71,7 @@ def validate_config_extra(config):
)
for hook in repo['hooks']:
try_regex(repo, hook['id'], hook.get('files', ''), 'files')
- try_regex(repo, hook['id'], hook['exclude'], 'exclude')
+ try_regex(repo, hook['id'], hook.get('exclude', ''), 'exclude')
load_config = get_validator(
diff --git a/pre_commit/clientlib/validate_manifest.py b/pre_commit/clientlib/validate_manifest.py
index 283d7c4..4295014 100644
--- a/pre_commit/clientlib/validate_manifest.py
+++ b/pre_commit/clientlib/validate_manifest.py
@@ -20,6 +20,7 @@ MANIFEST_JSON_SCHEMA = {
'name': {'type': 'string'},
'description': {'type': 'string', 'default': ''},
'entry': {'type': 'string'},
+ 'exclude': {'type': 'string', 'default': '^$'},
'language': {'type': 'string'},
'language_version': {'type': 'string', 'default': 'default'},
'files': {'type': 'string'},
@@ -52,8 +53,14 @@ def validate_files(hook_config):
if not is_regex_valid(hook_config['files']):
raise InvalidManifestError(
'Invalid files regex at {0}: {1}'.format(
- hook_config['id'],
- hook_config['files'],
+ hook_config['id'], hook_config['files'],
+ )
+ )
+
+ if not is_regex_valid(hook_config.get('exclude', '')):
+ raise InvalidManifestError(
+ 'Invalid exclude regex at {0}: {1}'.format(
+ hook_config['id'], hook_config['exclude'],
)
)
| Bug: base manifest value for 'exclude' is always ignored
I stumbled upon this bug while working on #226: the culprit is [`Repository.hooks`](https://github.com/pre-commit/pre-commit/blob/master/pre_commit/repository.py#L48).
A quick fix for this would be to simply remove the default value from `pre_commit/clientlib/validate_config.py`, but the root cause is that any default value defined for a field in this file will make the corresponding manifest field useless.
Basically here is what happens in `Repository.hooks`:
- all the hooks defined in the current repository are enumerated
- at this stage, a `hook` is a dict closely matching the Yaml the config file content, **plus** default values for fields not defined in the Yaml but having a JSON schema 'default'
- when doing the dict merge, **every** (key,value) pair in `hook` overrides the corresponding manifest entry. This includes default config value like `exclude: '$^'` overriding a base manifest value like `exclude: '.bak$'`
Hence I suggest either adding a test ensuring there will never be any 'default' defined in `CONFIG_JSON_SCHEMA`, or improving the merge logic. | pre-commit/pre-commit | diff --git a/tests/clientlib/validate_config_test.py b/tests/clientlib/validate_config_test.py
index c507f28..b474f1b 100644
--- a/tests/clientlib/validate_config_test.py
+++ b/tests/clientlib/validate_config_test.py
@@ -174,3 +174,23 @@ def test_config_with_local_hooks_definition_passes(config_obj):
jsonschema.validate(config_obj, CONFIG_JSON_SCHEMA)
config = apply_defaults(config_obj, CONFIG_JSON_SCHEMA)
validate_config_extra(config)
+
+
+def test_does_not_contain_defaults():
+ """Due to the way our merging works, if this schema has any defaults they
+ will clobber potentially useful values in the backing manifest. #227
+ """
+ to_process = [(CONFIG_JSON_SCHEMA, ())]
+ while to_process:
+ schema, route = to_process.pop()
+ # Check this value
+ if isinstance(schema, dict):
+ if 'default' in schema:
+ raise AssertionError(
+ 'Unexpected default in schema at {0}'.format(
+ ' => '.join(route),
+ )
+ )
+
+ for key, value in schema.items():
+ to_process.append((value, route + (key,)))
diff --git a/tests/clientlib/validate_manifest_test.py b/tests/clientlib/validate_manifest_test.py
index 5e5690e..937f432 100644
--- a/tests/clientlib/validate_manifest_test.py
+++ b/tests/clientlib/validate_manifest_test.py
@@ -46,6 +46,9 @@ def test_additional_manifest_check_passing(obj):
[{'id': 'a', 'language': 'not a language', 'files': ''}],
[{'id': 'a', 'language': 'python3', 'files': ''}],
[{'id': 'a', 'language': 'python', 'files': 'invalid regex('}],
+ [{'id': 'a', 'language': 'not a language', 'files': ''}],
+ [{'id': 'a', 'language': 'python3', 'files': ''}],
+ [{'id': 'a', 'language': 'python', 'files': '', 'exclude': '('}],
),
)
def test_additional_manifest_failing(obj):
diff --git a/tests/manifest_test.py b/tests/manifest_test.py
index ba30d42..39ecc74 100644
--- a/tests/manifest_test.py
+++ b/tests/manifest_test.py
@@ -22,6 +22,7 @@ def test_manifest_contents(manifest):
'args': [],
'description': '',
'entry': 'bin/hook.sh',
+ 'exclude': '^$',
'expected_return_value': 0,
'files': '',
'id': 'bash_hook',
@@ -36,6 +37,7 @@ def test_hooks(manifest):
'args': [],
'description': '',
'entry': 'bin/hook.sh',
+ 'exclude': '^$',
'expected_return_value': 0,
'files': '',
'id': 'bash_hook',
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aspy.yaml==1.3.0
astroid==1.3.2
attrs==22.2.0
cached-property==1.5.2
certifi==2021.5.30
coverage==6.2
distlib==0.3.9
filelock==3.4.1
flake8==5.0.4
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
jsonschema==3.2.0
logilab-common==1.9.7
mccabe==0.7.0
mock==5.2.0
mypy-extensions==1.0.0
nodeenv==1.6.0
ordereddict==1.1
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
-e git+https://github.com/pre-commit/pre-commit.git@9515ca06378d74f1e4f8013db2b5230c1f15edaa#egg=pre_commit
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pylint==1.3.1
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
PyYAML==6.0.1
simplejson==3.20.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
virtualenv==20.17.1
zipp==3.6.0
| name: pre-commit
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- argparse==1.4.0
- aspy-yaml==1.3.0
- astroid==1.3.2
- attrs==22.2.0
- cached-property==1.5.2
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- flake8==5.0.4
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- jsonschema==3.2.0
- logilab-common==1.9.7
- mccabe==0.7.0
- mock==5.2.0
- mypy-extensions==1.0.0
- nodeenv==1.6.0
- ordereddict==1.1
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pylint==1.3.1
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pyyaml==6.0.1
- simplejson==3.20.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- virtualenv==20.17.1
- zipp==3.6.0
prefix: /opt/conda/envs/pre-commit
| [
"tests/clientlib/validate_config_test.py::test_does_not_contain_defaults",
"tests/clientlib/validate_manifest_test.py::test_additional_manifest_failing[obj5]"
]
| [
"tests/clientlib/validate_config_test.py::test_run[input0-0]",
"tests/clientlib/validate_config_test.py::test_run[input1-0]",
"tests/clientlib/validate_manifest_test.py::test_run[input0-0]",
"tests/clientlib/validate_manifest_test.py::test_run[input1-0]",
"tests/manifest_test.py::test_manifest_contents",
"tests/manifest_test.py::test_hooks"
]
| [
"tests/clientlib/validate_config_test.py::test_run[input2-1]",
"tests/clientlib/validate_config_test.py::test_run[input3-1]",
"tests/clientlib/validate_config_test.py::test_run[input4-1]",
"tests/clientlib/validate_config_test.py::test_is_valid_according_to_schema[config_obj0-False]",
"tests/clientlib/validate_config_test.py::test_is_valid_according_to_schema[config_obj1-True]",
"tests/clientlib/validate_config_test.py::test_is_valid_according_to_schema[config_obj2-True]",
"tests/clientlib/validate_config_test.py::test_is_valid_according_to_schema[config_obj3-False]",
"tests/clientlib/validate_config_test.py::test_config_with_failing_regexes_fails",
"tests/clientlib/validate_config_test.py::test_config_with_ok_regexes_passes",
"tests/clientlib/validate_config_test.py::test_config_with_invalid_exclude_regex_fails",
"tests/clientlib/validate_config_test.py::test_config_with_ok_exclude_regex_passes",
"tests/clientlib/validate_config_test.py::test_config_with_local_hooks_definition_fails[config_obj0]",
"tests/clientlib/validate_config_test.py::test_config_with_local_hooks_definition_passes[config_obj0]",
"tests/clientlib/validate_config_test.py::test_config_with_local_hooks_definition_passes[config_obj1]",
"tests/clientlib/validate_manifest_test.py::test_run[input2-1]",
"tests/clientlib/validate_manifest_test.py::test_run[input3-1]",
"tests/clientlib/validate_manifest_test.py::test_run[input4-1]",
"tests/clientlib/validate_manifest_test.py::test_additional_manifest_check_raises_for_bad_language",
"tests/clientlib/validate_manifest_test.py::test_additional_manifest_check_passing[obj0]",
"tests/clientlib/validate_manifest_test.py::test_additional_manifest_check_passing[obj1]",
"tests/clientlib/validate_manifest_test.py::test_additional_manifest_failing[obj0]",
"tests/clientlib/validate_manifest_test.py::test_additional_manifest_failing[obj1]",
"tests/clientlib/validate_manifest_test.py::test_additional_manifest_failing[obj2]",
"tests/clientlib/validate_manifest_test.py::test_additional_manifest_failing[obj3]",
"tests/clientlib/validate_manifest_test.py::test_additional_manifest_failing[obj4]",
"tests/clientlib/validate_manifest_test.py::test_is_valid_according_to_schema[manifest_obj0-False]",
"tests/clientlib/validate_manifest_test.py::test_is_valid_according_to_schema[manifest_obj1-True]",
"tests/clientlib/validate_manifest_test.py::test_is_valid_according_to_schema[manifest_obj2-True]"
]
| []
| MIT License | 143 | [
"pre_commit/clientlib/validate_manifest.py",
"pre_commit/clientlib/validate_config.py"
]
| [
"pre_commit/clientlib/validate_manifest.py",
"pre_commit/clientlib/validate_config.py"
]
|
mne-tools__mne-python-2126 | edceb8f38349d6dc0cade1c9f8384cc0707ce3e8 | 2015-05-20 03:39:59 | edceb8f38349d6dc0cade1c9f8384cc0707ce3e8 | diff --git a/.gitignore b/.gitignore
old mode 100644
new mode 100755
index a32ce2aea..cdad72a3d
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,5 @@
*.pyc
*.pyo
-*.sh
*.so
*.fif
*.tar.gz
diff --git a/Makefile b/Makefile
index 7b7102444..484275006 100755
--- a/Makefile
+++ b/Makefile
@@ -108,10 +108,3 @@ manpages:
>| ../build/manpages/$$f.1; \
done
-build-doc-dev:
- cd doc; make clean
- cd doc; DISPLAY=:1.0 xvfb-run -n 1 -s "-screen 0 1280x1024x24 -noreset -ac +extension GLX +render" make html_dev
-
-build-doc-stable:
- cd doc; make clean
- cd doc; DISPLAY=:1.0 xvfb-run -n 1 -s "-screen 0 1280x1024x24 -noreset -ac +extension GLX +render" make html
diff --git a/doc/source/_static/navy.css b/doc/source/_static/navy.css
index c8419184a..04912f9ad 100755
--- a/doc/source/_static/navy.css
+++ b/doc/source/_static/navy.css
@@ -4,7 +4,7 @@
*/
body {
- font-family: 'Open Sans', 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif;
+ font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif;
font-size: 14px;
letter-spacing: -0.01em;
line-height: 150%;
diff --git a/doc/source/_templates/layout.html b/doc/source/_templates/layout.html
index 7178e1f11..c71a45092 100755
--- a/doc/source/_templates/layout.html
+++ b/doc/source/_templates/layout.html
@@ -1,9 +1,6 @@
{% extends "!layout.html" %}
{% block extrahead %}
-
-<link href='http://fonts.googleapis.com/css?family=Open+Sans:400italic,700italic,400,700' rel='stylesheet' type='text/css'>
-
{% if use_google_analytics|tobool %}
<script type="text/javascript">
var _gaq = _gaq || [];
diff --git a/examples/README.txt b/examples/README.txt
index 75dff2db0..aebe56991 100644
--- a/examples/README.txt
+++ b/examples/README.txt
@@ -1,4 +1,6 @@
-Introductory examples
----------------------
-Introductory examples to MNE.
+General examples
+-------------------
+
+General-purpose and introductory examples to MNE.
+
diff --git a/examples/connectivity/plot_mne_inverse_coherence_epochs.py b/examples/connectivity/plot_mne_inverse_coherence_epochs.py
index d09357237..c8c973e7b 100644
--- a/examples/connectivity/plot_mne_inverse_coherence_epochs.py
+++ b/examples/connectivity/plot_mne_inverse_coherence_epochs.py
@@ -5,7 +5,7 @@ Compute coherence in source space using a MNE inverse solution
This examples computes the coherence between a seed in the left
auditory cortex and the rest of the brain based on single-trial
-MNE-dSPM inverse solutions.
+MNE-dSPM inverse soltions.
"""
# Author: Martin Luessi <[email protected]>
diff --git a/examples/forward/README.txt b/examples/forward/README.txt
deleted file mode 100644
index 89db5587d..000000000
--- a/examples/forward/README.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-
-Forward modeling
-----------------
-
-From BEM segmentation, coregistration, setting up source spaces
-to actual computation of forward solution.
diff --git a/examples/plot_compute_mne_inverse.py b/examples/inverse/plot_compute_mne_inverse.py
similarity index 100%
rename from examples/plot_compute_mne_inverse.py
rename to examples/inverse/plot_compute_mne_inverse.py
diff --git a/examples/io/README.txt b/examples/io/README.txt
deleted file mode 100644
index 45f88d173..000000000
--- a/examples/io/README.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-
-Input/Ouput
------------
-
-Reading and writing files.
diff --git a/examples/forward/plot_bem_contour_mri.py b/examples/plot_bem_contour_mri.py
similarity index 100%
rename from examples/forward/plot_bem_contour_mri.py
rename to examples/plot_bem_contour_mri.py
diff --git a/examples/visualization/plot_channel_epochs_image.py b/examples/plot_channel_epochs_image.py
similarity index 100%
rename from examples/visualization/plot_channel_epochs_image.py
rename to examples/plot_channel_epochs_image.py
diff --git a/examples/visualization/plot_clickable_image.py b/examples/plot_clickable_image.py
similarity index 100%
rename from examples/visualization/plot_clickable_image.py
rename to examples/plot_clickable_image.py
diff --git a/examples/forward/plot_coregistration_transform.py b/examples/plot_coregistration_transform.py
similarity index 100%
rename from examples/forward/plot_coregistration_transform.py
rename to examples/plot_coregistration_transform.py
diff --git a/examples/forward/plot_decimate_head_surface.py b/examples/plot_decimate_head_surface.py
similarity index 100%
rename from examples/forward/plot_decimate_head_surface.py
rename to examples/plot_decimate_head_surface.py
diff --git a/examples/preprocessing/plot_define_target_events.py b/examples/plot_define_target_events.py
similarity index 100%
rename from examples/preprocessing/plot_define_target_events.py
rename to examples/plot_define_target_events.py
diff --git a/examples/io/plot_epochs_to_data_frame.py b/examples/plot_epochs_to_data_frame.py
similarity index 100%
rename from examples/io/plot_epochs_to_data_frame.py
rename to examples/plot_epochs_to_data_frame.py
diff --git a/examples/preprocessing/plot_estimate_covariance_matrix_baseline.py b/examples/plot_estimate_covariance_matrix_baseline.py
similarity index 100%
rename from examples/preprocessing/plot_estimate_covariance_matrix_baseline.py
rename to examples/plot_estimate_covariance_matrix_baseline.py
diff --git a/examples/preprocessing/plot_estimate_covariance_matrix_raw.py b/examples/plot_estimate_covariance_matrix_raw.py
similarity index 100%
rename from examples/preprocessing/plot_estimate_covariance_matrix_raw.py
rename to examples/plot_estimate_covariance_matrix_raw.py
diff --git a/examples/visualization/plot_evoked_delayed_ssp.py b/examples/plot_evoked_delayed_ssp.py
similarity index 100%
rename from examples/visualization/plot_evoked_delayed_ssp.py
rename to examples/plot_evoked_delayed_ssp.py
diff --git a/examples/visualization/plot_evoked_topomap.py b/examples/plot_evoked_topomap.py
similarity index 100%
rename from examples/visualization/plot_evoked_topomap.py
rename to examples/plot_evoked_topomap.py
diff --git a/examples/visualization/plot_evoked_topomap_delayed_ssp.py b/examples/plot_evoked_topomap_delayed_ssp.py
similarity index 100%
rename from examples/visualization/plot_evoked_topomap_delayed_ssp.py
rename to examples/plot_evoked_topomap_delayed_ssp.py
diff --git a/examples/visualization/plot_evoked_whitening.py b/examples/plot_evoked_whitening.py
similarity index 100%
rename from examples/visualization/plot_evoked_whitening.py
rename to examples/plot_evoked_whitening.py
diff --git a/examples/plot_extract_events_from_raw.py b/examples/plot_extract_events_from_raw.py
index 4d52daf6b..4d8c1a283 100644
--- a/examples/plot_extract_events_from_raw.py
+++ b/examples/plot_extract_events_from_raw.py
@@ -25,7 +25,7 @@ raw = Raw(fname)
events = mne.find_events(raw, stim_channel='STI 014')
# Writing events
-mne.write_events('sample_audvis_raw-eve.fif', events)
+mne.write_events('events.fif', events)
for ind, before, after in events[:5]:
print("At sample %d stim channel went from %d to %d"
diff --git a/examples/plot_from_raw_to_epochs_to_evoked.py b/examples/plot_from_raw_to_epochs_to_evoked.py
index d36bff025..c2934c685 100644
--- a/examples/plot_from_raw_to_epochs_to_evoked.py
+++ b/examples/plot_from_raw_to_epochs_to_evoked.py
@@ -16,6 +16,7 @@ data and then saved to disk.
import mne
from mne import io
from mne.datasets import sample
+import matplotlib.pyplot as plt
print(__doc__)
@@ -25,17 +26,14 @@ data_path = sample.data_path()
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
-tmin, tmax = -0.2, 0.5
-
-# Select events to extract epochs from.
-event_id = {'Auditory/Left': 1, 'Auditory/Right': 2}
+event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
# Plot raw data
-fig = raw.plot(events=events, event_color={1: 'cyan', -1: 'lightgray'})
+fig = raw.plot(events=events, event_color={event_id: 'cyan', -1: 'lightgray'})
# Set up pick list: EEG + STI 014 - bad channels (modify to your needs)
include = [] # or stim channels ['STI 014']
@@ -56,21 +54,26 @@ epochs.plot()
epochs.drop_bad_epochs()
epochs.plot_drop_log(subject='sample')
-# Average epochs and get evoked data corresponding to the left stimulation
-evoked = epochs['Left'].average()
+evoked = epochs.average() # average epochs and get an Evoked dataset.
evoked.save('sample_audvis_eeg-ave.fif') # save evoked data to disk
###############################################################################
# View evoked response
+times = 1e3 * epochs.times # time in miliseconds
+
+ch_max_name, latency = evoked.get_peak(mode='neg')
evoked.plot()
-###############################################################################
-# Save evoked responses for different conditions to disk
+plt.xlim([times[0], times[-1]])
+plt.xlabel('time (ms)')
+plt.ylabel('Potential (uV)')
+plt.title('EEG evoked potential')
-# average epochs and get Evoked datasets
-evokeds = [epochs[cond].average() for cond in ['Left', 'Right']]
+plt.axvline(latency * 1e3, color='red',
+ label=ch_max_name, linewidth=2,
+ linestyle='--')
+plt.legend(loc='best')
-# save evoked data to disk
-mne.write_evokeds('sample_auditory_and_visual_eeg-ave.fif', evokeds)
+plt.show()
diff --git a/examples/plot_from_raw_to_multiple_epochs_to_evoked.py b/examples/plot_from_raw_to_multiple_epochs_to_evoked.py
new file mode 100644
index 000000000..410239032
--- /dev/null
+++ b/examples/plot_from_raw_to_multiple_epochs_to_evoked.py
@@ -0,0 +1,72 @@
+"""
+====================================================================
+Extract epochs for multiple conditions, save evoked response to disk
+====================================================================
+
+This script shows how to read the epochs for multiple conditions from
+a raw file given a list of events. The epochs are averaged to produce
+evoked data and then saved to disk.
+
+"""
+# Authors: Alexandre Gramfort <[email protected]>
+# Eric Larson <[email protected]>
+# Denis Engemann <[email protected]>
+#
+# License: BSD (3-clause)
+
+import mne
+from mne import io
+from mne.datasets import sample
+from mne.epochs import combine_event_ids
+import matplotlib.pyplot as plt
+
+print(__doc__)
+
+data_path = sample.data_path()
+
+###############################################################################
+# Set parameters
+raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
+event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
+event_ids = {'AudL': 1, 'AudR': 2, 'VisL': 3, 'VisR': 4}
+tmin = -0.2
+tmax = 0.5
+
+# Setup for reading the raw data
+raw = io.Raw(raw_fname)
+events = mne.read_events(event_fname)
+
+# Set up pick list: EEG + STI 014 - bad channels (modify to your needs)
+include = [] # or stim channels ['STI 014']
+raw.info['bads'] += ['EEG 053'] # bads + 1 more
+
+# pick EEG channels
+picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=True,
+ include=include, exclude='bads')
+# Read epochs
+epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks,
+ baseline=(None, 0), reject=dict(eeg=80e-6, eog=150e-6))
+# Let's equalize the trial counts in each condition
+epochs.equalize_event_counts(['AudL', 'AudR', 'VisL', 'VisR'], copy=False)
+# Now let's combine some conditions
+combine_event_ids(epochs, ['AudL', 'AudR'], {'Auditory': 12}, copy=False)
+combine_event_ids(epochs, ['VisL', 'VisR'], {'Visual': 34}, copy=False)
+
+# average epochs and get Evoked datasets
+evokeds = [epochs[cond].average() for cond in ['Auditory', 'Visual']]
+
+# save evoked data to disk
+mne.write_evokeds('sample_auditory_and_visual_eeg-ave.fif', evokeds)
+
+###############################################################################
+# View evoked response
+plt.clf()
+ax = plt.subplot(2, 1, 1)
+evokeds[0].plot(axes=ax)
+plt.title('EEG evoked potential, auditory trials')
+plt.ylabel('Potential (uV)')
+ax = plt.subplot(2, 1, 2)
+evokeds[1].plot(axes=ax)
+plt.title('EEG evoked potential, visual trials')
+plt.ylabel('Potential (uV)')
+plt.show()
diff --git a/examples/preprocessing/plot_interpolate_bad_channels.py b/examples/plot_interpolate_bad_channels.py
similarity index 100%
rename from examples/preprocessing/plot_interpolate_bad_channels.py
rename to examples/plot_interpolate_bad_channels.py
diff --git a/examples/forward/plot_left_cerebellum_volume_source.py b/examples/plot_left_cerebellum_volume_source.py
similarity index 100%
rename from examples/forward/plot_left_cerebellum_volume_source.py
rename to examples/plot_left_cerebellum_volume_source.py
diff --git a/examples/forward/plot_make_forward.py b/examples/plot_make_forward.py
similarity index 100%
rename from examples/forward/plot_make_forward.py
rename to examples/plot_make_forward.py
diff --git a/examples/visualization/plot_meg_eeg_fields_3d.py b/examples/plot_meg_eeg_fields_3d.py
similarity index 100%
rename from examples/visualization/plot_meg_eeg_fields_3d.py
rename to examples/plot_meg_eeg_fields_3d.py
diff --git a/examples/io/plot_objects_from_arrays.py b/examples/plot_objects_from_arrays.py
similarity index 100%
rename from examples/io/plot_objects_from_arrays.py
rename to examples/plot_objects_from_arrays.py
diff --git a/examples/io/plot_read_and_write_raw_data.py b/examples/plot_read_and_write_raw_data.py
similarity index 100%
rename from examples/io/plot_read_and_write_raw_data.py
rename to examples/plot_read_and_write_raw_data.py
diff --git a/examples/forward/plot_read_bem_surfaces.py b/examples/plot_read_bem_surfaces.py
similarity index 100%
rename from examples/forward/plot_read_bem_surfaces.py
rename to examples/plot_read_bem_surfaces.py
diff --git a/examples/io/plot_read_epochs.py b/examples/plot_read_epochs.py
similarity index 100%
rename from examples/io/plot_read_epochs.py
rename to examples/plot_read_epochs.py
diff --git a/examples/io/plot_read_evoked.py b/examples/plot_read_evoked.py
similarity index 100%
rename from examples/io/plot_read_evoked.py
rename to examples/plot_read_evoked.py
diff --git a/examples/forward/plot_read_forward.py b/examples/plot_read_forward.py
similarity index 100%
rename from examples/forward/plot_read_forward.py
rename to examples/plot_read_forward.py
diff --git a/examples/io/plot_read_noise_covariance_matrix.py b/examples/plot_read_noise_covariance_matrix.py
similarity index 100%
rename from examples/io/plot_read_noise_covariance_matrix.py
rename to examples/plot_read_noise_covariance_matrix.py
diff --git a/examples/preprocessing/plot_rereference_eeg.py b/examples/plot_rereference_eeg.py
similarity index 100%
rename from examples/preprocessing/plot_rereference_eeg.py
rename to examples/plot_rereference_eeg.py
diff --git a/examples/preprocessing/plot_shift_evoked.py b/examples/plot_shift_evoked.py
similarity index 100%
rename from examples/preprocessing/plot_shift_evoked.py
rename to examples/plot_shift_evoked.py
diff --git a/examples/simulation/plot_simulate_evoked_data.py b/examples/plot_simulate_evoked_data.py
similarity index 100%
rename from examples/simulation/plot_simulate_evoked_data.py
rename to examples/plot_simulate_evoked_data.py
diff --git a/examples/visualization/plot_ssp_projs_sensitivity_map.py b/examples/plot_ssp_projs_sensitivity_map.py
similarity index 100%
rename from examples/visualization/plot_ssp_projs_sensitivity_map.py
rename to examples/plot_ssp_projs_sensitivity_map.py
diff --git a/examples/visualization/plot_ssp_projs_topomaps.py b/examples/plot_ssp_projs_topomaps.py
similarity index 100%
rename from examples/visualization/plot_ssp_projs_topomaps.py
rename to examples/plot_ssp_projs_topomaps.py
diff --git a/examples/visualization/plot_topo_channel_epochs_image.py b/examples/plot_topo_channel_epochs_image.py
similarity index 100%
rename from examples/visualization/plot_topo_channel_epochs_image.py
rename to examples/plot_topo_channel_epochs_image.py
diff --git a/examples/visualization/plot_topo_compare_conditions.py b/examples/plot_topo_compare_conditions.py
similarity index 100%
rename from examples/visualization/plot_topo_compare_conditions.py
rename to examples/plot_topo_compare_conditions.py
diff --git a/examples/visualization/plot_topo_customized.py b/examples/plot_topo_customized.py
similarity index 100%
rename from examples/visualization/plot_topo_customized.py
rename to examples/plot_topo_customized.py
diff --git a/examples/visualization/plot_topography.py b/examples/plot_topography.py
similarity index 100%
rename from examples/visualization/plot_topography.py
rename to examples/plot_topography.py
diff --git a/examples/preprocessing/plot_virtual_evoked.py b/examples/plot_virtual_evoked.py
similarity index 100%
rename from examples/preprocessing/plot_virtual_evoked.py
rename to examples/plot_virtual_evoked.py
diff --git a/examples/io/read_events.py b/examples/read_events.py
similarity index 100%
rename from examples/io/read_events.py
rename to examples/read_events.py
diff --git a/examples/simulation/README.txt b/examples/simulation/README.txt
deleted file mode 100644
index bec6bcce3..000000000
--- a/examples/simulation/README.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-
-Data Simulation
----------------
-
-Tools to generate simulation data.
diff --git a/examples/visualization/README.txt b/examples/visualization/README.txt
deleted file mode 100644
index 25af47af5..000000000
--- a/examples/visualization/README.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-
-Visualization
--------------
-
-Looking at data and processing output.
diff --git a/examples/visualization/plot_evoked_erf_erp.py b/examples/visualization/plot_evoked_erf_erp.py
deleted file mode 100644
index db9f68b01..000000000
--- a/examples/visualization/plot_evoked_erf_erp.py
+++ /dev/null
@@ -1,48 +0,0 @@
-"""
-=================================
-Plotting ERF/ERP with evoked data
-=================================
-
-Load evoked data and plot.
-
-"""
-# Authors: Alexandre Gramfort <[email protected]>
-#
-# License: BSD (3-clause)
-
-import matplotlib.pyplot as plt
-from mne.datasets import sample
-from mne import read_evokeds
-
-print(__doc__)
-
-path = sample.data_path()
-fname = path + '/MEG/sample/sample_audvis-ave.fif'
-
-# load evoked and subtract baseline
-condition = 'Left Auditory'
-evoked = read_evokeds(fname, condition=condition, baseline=(None, 0))
-
-evoked.plot()
-
-###############################################################################
-# Or plot manually after extracting peak latency
-
-evoked = evoked.pick_types(meg=False, eeg=True)
-times = 1e3 * evoked.times # time in miliseconds
-
-ch_max_name, latency = evoked.get_peak(mode='neg')
-
-plt.figure()
-plt.plot(times, 1e6 * evoked.data.T, 'k-')
-plt.xlim([times[0], times[-1]])
-plt.xlabel('time (ms)')
-plt.ylabel('Potential (uV)')
-plt.title('EEG evoked potential')
-
-plt.axvline(latency * 1e3, color='red',
- label=ch_max_name, linewidth=2,
- linestyle='--')
-plt.legend(loc='best')
-
-plt.show()
diff --git a/mne/connectivity/spectral.py b/mne/connectivity/spectral.py
index a1bc27bce..5c0c7cbd5 100644
--- a/mne/connectivity/spectral.py
+++ b/mne/connectivity/spectral.py
@@ -539,8 +539,8 @@ def spectral_connectivity(data, method='coh', indices=None, sfreq=2 * np.pi,
to compute the connectivity between the signal with index 0 and signals
"2, 3, 4" (a total of 3 connections) one can use the following::
- indices = (np.array([0, 0, 0]), # row indices
- np.array([2, 3, 4])) # col indices
+ indices = (np.array([0, 0, 0], # row indices
+ np.array([2, 3, 4]))) # col indices
con_flat = spectral_connectivity(data, method='coh',
indices=indices, ...)
diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py
index e7745e3e7..be6f5fb0d 100644
--- a/mne/datasets/utils.py
+++ b/mne/datasets/utils.py
@@ -120,7 +120,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True,
archive_name = "MNE-sample-data-processed.tar.gz"
folder_name = "MNE-sample-data"
url = "ftp://surfer.nmr.mgh.harvard.edu/pub/data/MNE/" + archive_name
- hash_ = 'f73186795af820428e5e8e779ce5bfcf'
+ hash_ = '1bb9f993bfba2057e0039c306a717109'
elif name == 'spm':
archive_name = 'MNE-spm-face.tar.bz2'
folder_name = "MNE-spm-face"
diff --git a/mne/decoding/time_gen.py b/mne/decoding/time_gen.py
index 9332ae63a..c6598c8b6 100644
--- a/mne/decoding/time_gen.py
+++ b/mne/decoding/time_gen.py
@@ -119,12 +119,8 @@ class GeneralizationAcrossTime(object):
The categories used for training.
estimators_ : list of list of sklearn.base.BaseEstimator subclasses.
The estimators for each time point and each fold.
- y_pred_ : list of lists of arrays of floats,
- shape (n_train_times, n_test_times, n_epochs, n_prediction_dims)
- The single-trial predictions estimated by self.predict() at each
- training time and each testing time. Note that the number of testing
- times per training time need not be regular, else
- np.shape(y_pred_) = [n_train_time, n_test_time, n_epochs].
+ y_pred_ : np.ndarray, shape (n_train_times, n_test_times, n_epochs, n_prediction_dims)
+ Class labels for samples in X.
scores_ : list of lists of float
The scores estimated by self.scorer_ at each training time and each
testing time (e.g. mean accuracy of self.predict(X)). Note that the
@@ -307,12 +303,9 @@ class GeneralizationAcrossTime(object):
Returns
-------
- y_pred : list of lists of arrays of floats,
- shape (n_train_t, n_test_t, n_epochs, n_prediction_dims)
- The single-trial predictions at each training time and each testing
- time. Note that the number of testing times per training time need
- not be regular;
- else, np.shape(y_pred_) = [n_train_time, n_test_time, n_epochs].
+ y_pred_ : np.ndarray, shape (n_train_time, n_test_time, n_epochs,
+ n_prediction_dim)
+ Class labels for samples in X.
"""
# clean in case gat.predict() is called at unexpected moments
@@ -365,10 +358,12 @@ class GeneralizationAcrossTime(object):
parallel, p_time_gen, _ = parallel_func(_predict_time_loop, n_jobs)
# Loop across estimators (i.e. training times)
- self.y_pred_ = parallel(p_time_gen(X, self.estimators_[t_train], cv,
- slices, self.predict_mode)
- for t_train, slices in
- enumerate(test_times_['slices']))
+ packed = parallel(p_time_gen(X, self.estimators_[t_train], cv,
+ slices, self.predict_mode)
+ for t_train, slices in
+ enumerate(test_times_['slices']))
+
+ self.y_pred_ = np.transpose(tuple(zip(*packed)), (1, 0, 2, 3))
return self.y_pred_
def score(self, epochs=None, y=None, scorer=None, test_times=None):
@@ -463,7 +458,7 @@ class GeneralizationAcrossTime(object):
self.y_true_ = y # true regressor to be compared with y_pred
# Preprocessing for parallelization:
- n_jobs = min(len(self.y_pred_[0][0]), check_n_jobs(self.n_jobs))
+ n_jobs = min(self.y_pred_.shape[2], check_n_jobs(self.n_jobs))
parallel, p_time_gen, n_jobs = parallel_func(_score_loop, n_jobs)
# Score each training and testing time point
diff --git a/mne/forward/forward.py b/mne/forward/forward.py
index c41a70fce..abacd7091 100644
--- a/mne/forward/forward.py
+++ b/mne/forward/forward.py
@@ -1226,6 +1226,7 @@ def apply_forward_raw(fwd, stc, raw_template, start=None, stop=None,
raw.info = _fill_measurement_info(raw.info, fwd, sfreq)
raw.info['projs'] = []
raw._projector = None
+ raw._update_times()
return raw
diff --git a/mne/io/base.py b/mne/io/base.py
index 578109de7..a2ab5b8b1 100644
--- a/mne/io/base.py
+++ b/mne/io/base.py
@@ -233,7 +233,7 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin,
'not %s' % preload.dtype)
self._data = preload
self.preload = True
- self._last_samps = np.array([self._data.shape[1] - 1])
+ last_samps = [self._data.shape[1] - 1]
load_from_disk = False
else:
if last_samps is None:
@@ -246,7 +246,8 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin,
raise ValueError('bad preload: %s' % preload)
else:
load_from_disk = True
- self._last_samps = np.array(last_samps)
+ self._last_samps = np.array(last_samps)
+ self._first_samps = np.array(first_samps)
self.info = info
cals = np.empty(info['nchan'])
for k in range(info['nchan']):
@@ -257,13 +258,13 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin,
self.comp = comp
self._orig_comp_grade = orig_comp_grade
self._filenames = list(filenames)
- self._first_samps = np.array(first_samps)
self.orig_format = orig_format
self._projectors = list()
self._projector = None
# If we have True or a string, actually do the preloading
if load_from_disk:
self._preload_data(preload)
+ self._update_times()
def _read_segment(start, stop, sel, data_buffer, projector, verbose):
"""Read a chunk of raw data
@@ -321,6 +322,12 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin,
self.preload = True
self.close()
+ def _update_times(self):
+ """Helper to update times"""
+ self._times = np.arange(self.n_times) / float(self.info['sfreq'])
+ # make it immutable
+ self._times.flags.writeable = False
+
@property
def first_samp(self):
return self._first_samps[0]
@@ -868,6 +875,7 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin,
# adjust affected variables
self._data = np.concatenate(new_data, axis=1)
self.info['sfreq'] = sfreq
+ self._update_times()
def crop(self, tmin=0.0, tmax=None, copy=True):
"""Crop raw data file.
@@ -922,6 +930,7 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin,
if raw.preload:
# slice and copy to avoid the reference to large array
raw._data = raw._data[:, smin:smax + 1].copy()
+ raw._update_times()
return raw
@verbose
@@ -1355,7 +1364,7 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin,
@property
def times(self):
"""Time points"""
- return np.arange(self.n_times) / float(self.info['sfreq'])
+ return self._times
@property
def n_times(self):
@@ -1477,6 +1486,7 @@ class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin,
self._last_samps = np.r_[self._last_samps, r._last_samps]
self._rawdirs += r._rawdirs
self._filenames += r._filenames
+ self._update_times()
def close(self):
"""Clean up the object.
diff --git a/mne/io/kit/kit.py b/mne/io/kit/kit.py
index 447715681..8de71ac9a 100644
--- a/mne/io/kit/kit.py
+++ b/mne/io/kit/kit.py
@@ -114,8 +114,7 @@ class RawKIT(_BaseRaw):
def __repr__(self):
s = ('%r' % op.basename(self._kit_info['fname']),
"n_channels x n_times : %s x %s" % (len(self.info['ch_names']),
- self.last_samp + 1 -
- self.first_samp))
+ self._kit_info['n_samples']))
return "<RawKIT | %s>" % ', '.join(s)
def read_stim_ch(self, buffer_size=1e5):
@@ -495,7 +494,7 @@ def _set_dig_kit(mrk, elp, hsp, auto_decimate=True):
hsp = _read_dig_points(hsp)
n_pts = len(hsp)
if n_pts > KIT.DIG_POINTS:
- hsp = _decimate_points(hsp, res=5)
+ hsp = _decimate_points(hsp, decim=5)
n_new = len(hsp)
msg = ("The selected head shape contained {n_in} points, which is "
"more than recommended ({n_rec}), and was automatically "
@@ -550,8 +549,6 @@ def get_kit_info(rawfile):
Returns
-------
- info : instance of Info
- An Info for the instance.
sqd : dict
A dict containing all the sqd parameter settings.
"""
@@ -679,7 +676,7 @@ def get_kit_info(rawfile):
info['sfreq'] = float(sqd['sfreq'])
info['bads'] = []
info['acq_pars'], info['acq_stim'] = None, None
- info['filename'] = rawfile
+ info['filename'] = None
info['ctf_head_t'] = None
info['dev_ctf_t'] = []
info['nchan'] = sqd['nchan']
diff --git a/mne/preprocessing/ica.py b/mne/preprocessing/ica.py
index c7ba1ef94..1db0a2678 100644
--- a/mne/preprocessing/ica.py
+++ b/mne/preprocessing/ica.py
@@ -666,6 +666,7 @@ class ICA(ContainsMixin):
out._projector = None
self._export_info(out.info, raw, add_channels)
+ out._update_times()
return out
| Epoch-ing too slow when #events is large..
So I have several ABR dataset where I have about 8000 trials. Epoching is way too slow and I suspect that the issue is related to not pre-allocating memory in ```_get_data_from_disk``` and growing the ```_data``` array one epoch at a time. When I don't do ```preload=True```, when I eventually have to average, ```epochs.average()``` is too slow.
Am I missing something or is there room to speed things up? | mne-tools/mne-python | diff --git a/mne/decoding/tests/test_time_gen.py b/mne/decoding/tests/test_time_gen.py
index c1a2328a5..16ef8c904 100644
--- a/mne/decoding/tests/test_time_gen.py
+++ b/mne/decoding/tests/test_time_gen.py
@@ -124,7 +124,7 @@ def test_generalization_across_time():
# --- number of trials
assert_true(gat.y_train_.shape[0] ==
gat.y_true_.shape[0] ==
- len(gat.y_pred_[0][0]) == 14)
+ gat.y_pred_.shape[2] == 14)
# --- number of folds
assert_true(np.shape(gat.estimators_)[1] == gat.cv)
# --- length training size
diff --git a/mne/realtime/tests/test_mockclient.py b/mne/realtime/tests/test_mockclient.py
index 631ca003e..a16217f45 100644
--- a/mne/realtime/tests/test_mockclient.py
+++ b/mne/realtime/tests/test_mockclient.py
@@ -86,6 +86,7 @@ def test_find_events():
raw._data[stim_channel_idx, 520:530] = 6
raw._data[stim_channel_idx, 530:532] = 5
raw._data[stim_channel_idx, 540] = 6
+ raw._update_times()
# consecutive=False
find_events = dict(consecutive=False)
diff --git a/mne/tests/test_event.py b/mne/tests/test_event.py
index 2cde41291..8bffa5e7a 100644
--- a/mne/tests/test_event.py
+++ b/mne/tests/test_event.py
@@ -150,6 +150,7 @@ def test_find_events():
# Reset some data for ease of comparison
raw._first_samps[0] = 0
raw.info['sfreq'] = 1000
+ raw._update_times()
stim_channel = 'STI 014'
stim_channel_idx = pick_channels(raw.info['ch_names'],
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files",
"has_removed_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 15
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"numpy>=1.16.0",
"pandas>=1.0.0",
"scikit-learn",
"h5py",
"pysurfer",
"nose",
"nose-timer",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | apptools==5.2.1
certifi @ file:///croot/certifi_1671487769961/work/certifi
configobj==5.0.9
cycler==0.11.0
envisage==7.0.3
exceptiongroup==1.2.2
fonttools==4.38.0
h5py==3.8.0
importlib-metadata==6.7.0
importlib-resources==5.12.0
iniconfig==2.0.0
joblib==1.3.2
kiwisolver==1.4.5
matplotlib==3.5.3
mayavi==4.8.1
-e git+https://github.com/mne-tools/mne-python.git@edceb8f38349d6dc0cade1c9f8384cc0707ce3e8#egg=mne
nibabel==4.0.2
nose==1.3.7
nose-timer==1.0.1
numpy==1.21.6
packaging==24.0
pandas==1.3.5
Pillow==9.5.0
pluggy==1.2.0
pyface==8.0.0
Pygments==2.17.2
pyparsing==3.1.4
pysurfer==0.11.2
pytest==7.4.4
python-dateutil==2.9.0.post0
pytz==2025.2
scikit-learn==1.0.2
scipy==1.7.3
six==1.17.0
threadpoolctl==3.1.0
tomli==2.0.1
traits==6.4.3
traitsui==8.0.0
typing_extensions==4.7.1
vtk==9.3.1
zipp==3.15.0
| name: mne-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- apptools==5.2.1
- configobj==5.0.9
- cycler==0.11.0
- envisage==7.0.3
- exceptiongroup==1.2.2
- fonttools==4.38.0
- h5py==3.8.0
- importlib-metadata==6.7.0
- importlib-resources==5.12.0
- iniconfig==2.0.0
- joblib==1.3.2
- kiwisolver==1.4.5
- matplotlib==3.5.3
- mayavi==4.8.1
- nibabel==4.0.2
- nose==1.3.7
- nose-timer==1.0.1
- numpy==1.21.6
- packaging==24.0
- pandas==1.3.5
- pillow==9.5.0
- pluggy==1.2.0
- pyface==8.0.0
- pygments==2.17.2
- pyparsing==3.1.4
- pysurfer==0.11.2
- pytest==7.4.4
- python-dateutil==2.9.0.post0
- pytz==2025.2
- scikit-learn==1.0.2
- scipy==1.7.3
- six==1.17.0
- threadpoolctl==3.1.0
- tomli==2.0.1
- traits==6.4.3
- traitsui==8.0.0
- typing-extensions==4.7.1
- vtk==9.3.1
- zipp==3.15.0
prefix: /opt/conda/envs/mne-python
| [
"mne/tests/test_event.py::test_find_events"
]
| [
"mne/decoding/tests/test_time_gen.py::test_time_generalization",
"mne/decoding/tests/test_time_gen.py::test_generalization_across_time",
"mne/realtime/tests/test_mockclient.py::test_find_events",
"mne/tests/test_event.py::test_io_events"
]
| [
"mne/realtime/tests/test_mockclient.py::test_mockclient",
"mne/realtime/tests/test_mockclient.py::test_get_event_data",
"mne/tests/test_event.py::test_add_events",
"mne/tests/test_event.py::test_merge_events",
"mne/tests/test_event.py::test_pick_events",
"mne/tests/test_event.py::test_make_fixed_length_events",
"mne/tests/test_event.py::test_define_events"
]
| []
| BSD 3-Clause "New" or "Revised" License | 144 | [
"Makefile",
"examples/visualization/plot_topo_customized.py",
"mne/connectivity/spectral.py",
"examples/visualization/plot_channel_epochs_image.py",
"examples/visualization/plot_evoked_topomap_delayed_ssp.py",
"examples/io/read_events.py",
"examples/forward/plot_bem_contour_mri.py",
"examples/io/plot_read_evoked.py",
"mne/decoding/time_gen.py",
".gitignore",
"examples/preprocessing/plot_interpolate_bad_channels.py",
"examples/io/plot_read_epochs.py",
"examples/forward/plot_coregistration_transform.py",
"mne/forward/forward.py",
"examples/forward/plot_read_bem_surfaces.py",
"doc/source/_templates/layout.html",
"examples/visualization/plot_evoked_delayed_ssp.py",
"examples/visualization/plot_meg_eeg_fields_3d.py",
"examples/preprocessing/plot_virtual_evoked.py",
"examples/io/plot_epochs_to_data_frame.py",
"examples/simulation/plot_simulate_evoked_data.py",
"examples/visualization/plot_topography.py",
"examples/visualization/README.txt",
"examples/preprocessing/plot_rereference_eeg.py",
"examples/forward/plot_make_forward.py",
"examples/visualization/plot_ssp_projs_sensitivity_map.py",
"examples/plot_from_raw_to_epochs_to_evoked.py",
"examples/io/plot_read_and_write_raw_data.py",
"examples/preprocessing/plot_define_target_events.py",
"examples/forward/plot_read_forward.py",
"mne/datasets/utils.py",
"examples/preprocessing/plot_shift_evoked.py",
"examples/io/README.txt",
"examples/forward/README.txt",
"examples/visualization/plot_ssp_projs_topomaps.py",
"examples/visualization/plot_topo_channel_epochs_image.py",
"examples/forward/plot_decimate_head_surface.py",
"mne/preprocessing/ica.py",
"examples/io/plot_objects_from_arrays.py",
"mne/io/kit/kit.py",
"examples/visualization/plot_clickable_image.py",
"examples/preprocessing/plot_estimate_covariance_matrix_baseline.py",
"examples/plot_compute_mne_inverse.py",
"examples/README.txt",
"examples/plot_from_raw_to_multiple_epochs_to_evoked.py",
"examples/visualization/plot_evoked_whitening.py",
"doc/source/_static/navy.css",
"examples/visualization/plot_evoked_erf_erp.py",
"examples/visualization/plot_evoked_topomap.py",
"examples/io/plot_read_noise_covariance_matrix.py",
"examples/visualization/plot_topo_compare_conditions.py",
"mne/io/base.py",
"examples/preprocessing/plot_estimate_covariance_matrix_raw.py",
"examples/plot_extract_events_from_raw.py",
"examples/simulation/README.txt",
"examples/connectivity/plot_mne_inverse_coherence_epochs.py",
"examples/forward/plot_left_cerebellum_volume_source.py"
]
| [
"examples/plot_evoked_topomap_delayed_ssp.py",
"Makefile",
"examples/plot_read_forward.py",
"examples/plot_shift_evoked.py",
"examples/plot_virtual_evoked.py",
"mne/connectivity/spectral.py",
"examples/plot_meg_eeg_fields_3d.py",
"mne/decoding/time_gen.py",
".gitignore",
"examples/plot_evoked_delayed_ssp.py",
"examples/plot_estimate_covariance_matrix_raw.py",
"examples/plot_decimate_head_surface.py",
"examples/plot_coregistration_transform.py",
"examples/plot_evoked_whitening.py",
"examples/plot_read_evoked.py",
"examples/plot_topography.py",
"mne/forward/forward.py",
"examples/plot_bem_contour_mri.py",
"doc/source/_templates/layout.html",
"examples/plot_clickable_image.py",
"examples/plot_rereference_eeg.py",
"examples/plot_topo_customized.py",
"examples/plot_read_and_write_raw_data.py",
"examples/visualization/README.txt",
"examples/plot_simulate_evoked_data.py",
"examples/read_events.py",
"examples/plot_read_noise_covariance_matrix.py",
"examples/plot_from_raw_to_epochs_to_evoked.py",
"mne/datasets/utils.py",
"examples/plot_define_target_events.py",
"examples/plot_left_cerebellum_volume_source.py",
"examples/io/README.txt",
"examples/forward/README.txt",
"examples/plot_make_forward.py",
"examples/plot_read_bem_surfaces.py",
"examples/plot_epochs_to_data_frame.py",
"mne/preprocessing/ica.py",
"mne/io/kit/kit.py",
"examples/plot_interpolate_bad_channels.py",
"examples/plot_channel_epochs_image.py",
"examples/inverse/plot_compute_mne_inverse.py",
"examples/README.txt",
"examples/plot_topo_channel_epochs_image.py",
"examples/plot_from_raw_to_multiple_epochs_to_evoked.py",
"examples/plot_ssp_projs_sensitivity_map.py",
"doc/source/_static/navy.css",
"examples/plot_evoked_topomap.py",
"examples/visualization/plot_evoked_erf_erp.py",
"examples/plot_ssp_projs_topomaps.py",
"examples/plot_objects_from_arrays.py",
"examples/plot_read_epochs.py",
"mne/io/base.py",
"examples/plot_topo_compare_conditions.py",
"examples/plot_extract_events_from_raw.py",
"examples/simulation/README.txt",
"examples/connectivity/plot_mne_inverse_coherence_epochs.py",
"examples/plot_estimate_covariance_matrix_baseline.py"
]
|
|
web2py__pydal-195 | 0f4c0f8c47193c6f4957d61d781fafcd36782d3f | 2015-05-20 20:21:15 | 0f4c0f8c47193c6f4957d61d781fafcd36782d3f | diff --git a/CHANGES b/CHANGES
index d14a85c1..0bce22ec 100644
--- a/CHANGES
+++ b/CHANGES
@@ -13,7 +13,16 @@ Next
- Implemented JSON serialization for objects
- Refactored many internal objects to improve performance
- Added python 3.x support (experimental)
-
+- New (and, as such, experimental) unicode handling in MSSQL
+ Use mssql4n:// or mssqn3n:// uris. NB: These adapters will probably
+ become the de-facto standard for MSSQL handling.
+ Any other adapter will continue to be supported just for legacy
+ databases
+- Restricted table and field names to a "dotted-notation-friendly" syntax,
+ meaning basically anything:
+ - alphanumeric
+ - not starting with underscore or an integer
+ you need to use the "rname" attribute for anything funnier than that
Version 15.03
-------------
diff --git a/pydal/helpers/regex.py b/pydal/helpers/regex.py
index 1139aaa4..c79094b6 100644
--- a/pydal/helpers/regex.py
+++ b/pydal/helpers/regex.py
@@ -10,7 +10,7 @@ REGEX_NO_GREEDY_ENTITY_NAME = r'(.+?)'
REGEX_UPLOAD_PATTERN = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)(\.(?P<name>\w+))?\.\w+$')
REGEX_CLEANUP_FN = re.compile('[\'"\s;]+')
REGEX_UNPACK = re.compile('(?<!\|)\|(?!\|)')
-REGEX_PYTHON_KEYWORDS = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$')
+REGEX_PYTHON_KEYWORDS = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try|False|True|nonlocal)$')
REGEX_SELECT_AS_PARSER = re.compile("\s+AS\s+(\S+)")
REGEX_CONST_STRING = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')')
REGEX_SEARCH_PATTERN = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$')
@@ -20,3 +20,4 @@ REGEX_QUOTES = re.compile("'[^']*'")
REGEX_ALPHANUMERIC = re.compile('^[0-9a-zA-Z]\w*$')
REGEX_PASSWORD = re.compile('\://([^:@]*)\:')
REGEX_NOPASSWD = re.compile('\/\/[\w\.\-]+[\:\/](.+)(?=@)') # was '(?<=[\:\/])([^:@/]+)(?=@.+)'
+REGEX_VALID_TB_FLD = re.compile(r'^[^\d_][_0-9a-zA-Z]*\Z')
diff --git a/pydal/objects.py b/pydal/objects.py
index 7e5e18ca..28d20dde 100644
--- a/pydal/objects.py
+++ b/pydal/objects.py
@@ -19,7 +19,7 @@ from ._gae import Key
from .exceptions import NotFoundException, NotAuthorizedException
from .helpers.regex import REGEX_TABLE_DOT_FIELD, REGEX_ALPHANUMERIC, \
REGEX_PYTHON_KEYWORDS, REGEX_STORE_PATTERN, REGEX_UPLOAD_PATTERN, \
- REGEX_CLEANUP_FN
+ REGEX_CLEANUP_FN, REGEX_VALID_TB_FLD
from .helpers.classes import Reference, MethodAdder, SQLCallableList, SQLALL, \
Serializable, BasicStorage
from .helpers.methods import list_represent, bar_decode_integer, \
@@ -205,8 +205,8 @@ class Table(Serializable, BasicStorage):
self._actual = False # set to True by define_table()
self._db = db
self._tablename = tablename
- if (not isinstance(tablename, str) or tablename[0] == '_'
- or hasattr(DAL, tablename) or '.' in tablename
+ if (not isinstance(tablename, str) or hasattr(DAL, tablename)
+ or not REGEX_VALID_TB_FLD.match(tablename)
or REGEX_PYTHON_KEYWORDS.match(tablename)
):
raise SyntaxError('Field: invalid table name: %s, '
@@ -1396,9 +1396,10 @@ class Field(Expression, Serializable):
except UnicodeEncodeError:
raise SyntaxError('Field: invalid unicode field name')
self.name = fieldname = cleanup(fieldname)
- if not isinstance(fieldname, str) or hasattr(Table, fieldname) or \
- fieldname[0] == '_' or '.' in fieldname or \
- REGEX_PYTHON_KEYWORDS.match(fieldname):
+ if (not isinstance(fieldname, str) or hasattr(Table, fieldname)
+ or not REGEX_VALID_TB_FLD.match(fieldname)
+ or REGEX_PYTHON_KEYWORDS.match(fieldname)
+ ):
raise SyntaxError('Field: invalid field name: %s, '
'use rname for "funny" names' % fieldname)
| fix allowed table and field names
As discussed in web2py-developers (such as [here](https://groups.google.com/d/msg/web2py-developers/1r4bq-ZFFkY/fYNfGSjqKeUJ "discussion") ), we should avoid any table and field name that is not coherent with python identifiers. The cleanup() needs a revision and tests should be added to avoid regressions. We have rname for legacy and/or funny names. | web2py/pydal | diff --git a/tests/sql.py b/tests/sql.py
index a422c6f1..7da2275b 100644
--- a/tests/sql.py
+++ b/tests/sql.py
@@ -52,6 +52,18 @@ def tearDownModule():
class TestFields(unittest.TestCase):
def testFieldName(self):
+ """
+ - a "str" something
+ - not a method or property of Table
+ - "dotted-notation" friendly:
+ - a valid python identifier
+ - not a python keyword
+ - not starting with underscore or an integer
+ - not containing dots
+
+ Basically, anything alphanumeric, no symbols, only underscore as
+ punctuation
+ """
# Check that Fields cannot start with underscores
self.assertRaises(SyntaxError, Field, '_abc', 'string')
@@ -64,31 +76,48 @@ class TestFields(unittest.TestCase):
self.assertRaises(SyntaxError, Field, x, 'string')
# Check that Fields allows underscores in the body of a field name.
- self.assert_(Field('a_bc', 'string'),
+ self.assertTrue(Field('a_bc', 'string'),
"Field isn't allowing underscores in fieldnames. It should.")
+ # Check that Field names don't allow a python keyword
+ self.assertRaises(SyntaxError, Field, 'True', 'string')
+ self.assertRaises(SyntaxError, Field, 'elif', 'string')
+ self.assertRaises(SyntaxError, Field, 'while', 'string')
+
+ # Check that Field names don't allow a non-valid python identifier
+ non_valid_examples = ["1x", "xx$%@%", "xx yy", "yy\na", "yy\n"]
+ for a in non_valid_examples:
+ self.assertRaises(SyntaxError, Field, a, 'string')
+
+ # Check that Field names don't allow a unicode string
+ non_valid_examples = non_valid_examples = ["ℙƴ☂ℌøἤ", u"ℙƴ☂ℌøἤ",
+ u'àè', u'ṧøмℯ', u'тεṧт', u'♥αłüℯṧ',
+ u'ℊεᾔ℮яαт℮∂', u'♭ƴ', u'ᾔ☤ρℌℓ☺ḓ']
+ for a in non_valid_examples:
+ self.assertRaises(SyntaxError, Field, a, 'string')
+
def testFieldTypes(self):
# Check that string, and password default length is 512
for typ in ['string', 'password']:
- self.assert_(Field('abc', typ).length == 512,
+ self.assertTrue(Field('abc', typ).length == 512,
"Default length for type '%s' is not 512 or 255" % typ)
# Check that upload default length is 512
- self.assert_(Field('abc', 'upload').length == 512,
+ self.assertTrue(Field('abc', 'upload').length == 512,
"Default length for type 'upload' is not 512")
# Check that Tables passed in the type creates a reference
- self.assert_(Field('abc', Table(None, 'temp')).type
+ self.assertTrue(Field('abc', Table(None, 'temp')).type
== 'reference temp',
'Passing an Table does not result in a reference type.')
def testFieldLabels(self):
# Check that a label is successfully built from the supplied fieldname
- self.assert_(Field('abc', 'string').label == 'Abc',
+ self.assertTrue(Field('abc', 'string').label == 'Abc',
'Label built is incorrect')
- self.assert_(Field('abc_def', 'string').label == 'Abc Def',
+ self.assertTrue(Field('abc_def', 'string').label == 'Abc Def',
'Label built is incorrect')
def testFieldFormatters(self): # Formatter should be called Validator
@@ -190,6 +219,18 @@ class TestFields(unittest.TestCase):
class TestTables(unittest.TestCase):
def testTableNames(self):
+ """
+ - a "str" something
+ - not a method or property of DAL
+ - "dotted-notation" friendly:
+ - a valid python identifier
+ - not a python keyword
+ - not starting with underscore or an integer
+ - not containing dots
+
+ Basically, anything alphanumeric, no symbols, only underscore as
+ punctuation
+ """
# Check that Tables cannot start with underscores
self.assertRaises(SyntaxError, Table, None, '_abc')
@@ -202,9 +243,26 @@ class TestTables(unittest.TestCase):
self.assertRaises(SyntaxError, Table, None, x)
# Check that Table allows underscores in the body of a field name.
- self.assert_(Table(None, 'a_bc'),
+ self.assertTrue(Table(None, 'a_bc'),
"Table isn't allowing underscores in tablename. It should.")
+ # Check that Table names don't allow a python keyword
+ self.assertRaises(SyntaxError, Table, None, 'True')
+ self.assertRaises(SyntaxError, Table, None, 'elif')
+ self.assertRaises(SyntaxError, Table, None, 'while')
+
+ # Check that Table names don't allow a non-valid python identifier
+ non_valid_examples = ["1x", "xx$%@%", "xx yy", "yy\na", "yy\n"]
+ for a in non_valid_examples:
+ self.assertRaises(SyntaxError, Table, None, a)
+
+ # Check that Table names don't allow a unicode string
+ non_valid_examples = ["ℙƴ☂ℌøἤ", u"ℙƴ☂ℌøἤ",
+ u'àè', u'ṧøмℯ', u'тεṧт', u'♥αłüℯṧ',
+ u'ℊεᾔ℮яαт℮∂', u'♭ƴ', u'ᾔ☤ρℌℓ☺ḓ']
+ for a in non_valid_examples:
+ self.assertRaises(SyntaxError, Table, None, a)
+
class TestAll(unittest.TestCase):
@@ -230,12 +288,12 @@ class TestTable(unittest.TestCase):
# Does it have the correct fields?
- self.assert_(set(persons.fields).issuperset(set(['firstname',
+ self.assertTrue(set(persons.fields).issuperset(set(['firstname',
'lastname'])))
# ALL is set correctly
- self.assert_('persons.firstname, persons.lastname'
+ self.assertTrue('persons.firstname, persons.lastname'
in str(persons.ALL))
def testTableAlias(self):
@@ -246,8 +304,8 @@ class TestTable(unittest.TestCase):
# Are the different table instances with the same fields
- self.assert_(persons is not aliens)
- self.assert_(set(persons.fields) == set(aliens.fields))
+ self.assertTrue(persons is not aliens)
+ self.assertTrue(set(persons.fields) == set(aliens.fields))
db.close()
def testTableInheritance(self):
@@ -256,7 +314,7 @@ class TestTable(unittest.TestCase):
customers = Table(None, 'customers',
Field('items_purchased', 'integer'),
persons)
- self.assert_(set(customers.fields).issuperset(set(
+ self.assertTrue(set(customers.fields).issuperset(set(
['items_purchased', 'firstname', 'lastname'])))
@@ -1698,7 +1756,6 @@ class TestQuotesByDefault(unittest.TestCase):
def testme(self):
return
-
class TestGis(unittest.TestCase):
def testGeometry(self):
@@ -1897,7 +1954,7 @@ class TestRecordVersioning(unittest.TestCase):
db.t0.insert(name='web2py2')
db(db.t0.name == 'web2py2').delete()
self.assertEqual(len(db(db.t0).select()), 1)
- self.assertEquals(db(db.t0).count(), 1)
+ self.assertEqual(db(db.t0).count(), 1)
db(db.t0.id == i_id).update(name='web2py3')
self.assertEqual(len(db(db.t0).select()), 1)
self.assertEqual(db(db.t0).count(), 1)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 3
} | 15.03 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.7",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///croot/attrs_1668696182826/work
certifi @ file:///croot/certifi_1671487769961/work/certifi
flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1671697413597/work
pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
-e git+https://github.com/web2py/pydal.git@0f4c0f8c47193c6f4957d61d781fafcd36782d3f#egg=pyDAL
pytest==7.1.2
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions @ file:///croot/typing_extensions_1669924550328/work
zipp @ file:///croot/zipp_1672387121353/work
| name: pydal
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=22.1.0=py37h06a4308_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- flit-core=3.6.0=pyhd3eb1b0_0
- importlib-metadata=4.11.3=py37h06a4308_0
- importlib_metadata=4.11.3=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=22.0=py37h06a4308_0
- pip=22.3.1=py37h06a4308_0
- pluggy=1.0.0=py37h06a4308_1
- py=1.11.0=pyhd3eb1b0_0
- pytest=7.1.2=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py37h06a4308_0
- typing_extensions=4.4.0=py37h06a4308_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zipp=3.11.0=py37h06a4308_0
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/pydal
| [
"tests/sql.py::TestFields::testFieldName",
"tests/sql.py::TestTables::testTableNames"
]
| [
"tests/sql.py::TestSerializers::testSelectIterselect",
"tests/sql.py::TestIterselect::testRun"
]
| [
"tests/sql.py::TestFields::testFieldFormatters",
"tests/sql.py::TestFields::testFieldLabels",
"tests/sql.py::TestFields::testFieldTypes",
"tests/sql.py::TestFields::testRun",
"tests/sql.py::TestAll::testSQLALL",
"tests/sql.py::TestTable::testTableAlias",
"tests/sql.py::TestTable::testTableCreation",
"tests/sql.py::TestTable::testTableInheritance",
"tests/sql.py::TestInsert::testRun",
"tests/sql.py::TestSelect::testListInteger",
"tests/sql.py::TestSelect::testListReference",
"tests/sql.py::TestSelect::testListString",
"tests/sql.py::TestSelect::testRun",
"tests/sql.py::TestSelect::testTestQuery",
"tests/sql.py::TestAddMethod::testRun",
"tests/sql.py::TestBelongs::testRun",
"tests/sql.py::TestContains::testRun",
"tests/sql.py::TestLike::testRun",
"tests/sql.py::TestDatetime::testRun",
"tests/sql.py::TestExpressions::testOps",
"tests/sql.py::TestExpressions::testRun",
"tests/sql.py::TestExpressions::testSubstring",
"tests/sql.py::TestJoin::testRun",
"tests/sql.py::TestMinMaxSumAvg::testRun",
"tests/sql.py::TestMigrations::testRun",
"tests/sql.py::TestReference::testRun",
"tests/sql.py::TestClientLevelOps::testRun",
"tests/sql.py::TestVirtualFields::testRun",
"tests/sql.py::TestComputedFields::testRun",
"tests/sql.py::TestCommonFilters::testRun",
"tests/sql.py::TestImportExportFields::testRun",
"tests/sql.py::TestImportExportUuidFields::testRun",
"tests/sql.py::TestDALDictImportExport::testRun",
"tests/sql.py::TestSelectAsDict::testSelect",
"tests/sql.py::TestRNameTable::testJoin",
"tests/sql.py::TestRNameTable::testSelect",
"tests/sql.py::TestRNameFields::testInsert",
"tests/sql.py::TestRNameFields::testJoin",
"tests/sql.py::TestRNameFields::testRun",
"tests/sql.py::TestRNameFields::testSelect",
"tests/sql.py::TestQuoting::testCase",
"tests/sql.py::TestQuoting::testPKFK",
"tests/sql.py::TestTableAndFieldCase::testme",
"tests/sql.py::TestQuotesByDefault::testme",
"tests/sql.py::TestGis::testGeometry",
"tests/sql.py::TestGis::testGeometryCase",
"tests/sql.py::TestGis::testGisMigration",
"tests/sql.py::TestSQLCustomType::testRun",
"tests/sql.py::TestLazy::testLazyGetter",
"tests/sql.py::TestLazy::testRowNone",
"tests/sql.py::TestLazy::testRun",
"tests/sql.py::TestRedefine::testRun",
"tests/sql.py::TestUpdateInsert::testRun",
"tests/sql.py::TestBulkInsert::testRun",
"tests/sql.py::TestRecordVersioning::testRun",
"tests/sql.py::TestSerializers::testAsJson"
]
| []
| BSD 3-Clause "New" or "Revised" License | 145 | [
"CHANGES",
"pydal/objects.py",
"pydal/helpers/regex.py"
]
| [
"CHANGES",
"pydal/objects.py",
"pydal/helpers/regex.py"
]
|
|
rthalley__dnspython-98 | 0b8ae7fd1107f330f94afc03311f233d51968a66 | 2015-05-21 12:36:37 | a4774eea299e6a0ce01c074973dbddbe6fe20636 | diff --git a/dns/exception.py b/dns/exception.py
index 60b4912..cbcdb57 100644
--- a/dns/exception.py
+++ b/dns/exception.py
@@ -15,14 +15,86 @@
"""Common DNS Exceptions."""
+
class DNSException(Exception):
- """Abstract base class shared by all dnspython exceptions."""
- def __init__(self, *args):
+ """Abstract base class shared by all dnspython exceptions.
+
+ It supports two basic modes of operation:
+
+ a) Old/compatible mode is used if __init__ was called with
+ empty **kwargs.
+ In compatible mode all *args are passed to standard Python Exception class
+ as before and all *args are printed by standard __str__ implementation.
+ Class variable msg (or doc string if msg is None) is returned from str()
+ if *args is empty.
+
+ b) New/parametrized mode is used if __init__ was called with
+ non-empty **kwargs.
+ In the new mode *args has to be empty and all kwargs has to exactly match
+ set in class variable self.supp_kwargs. All kwargs are stored inside
+ self.kwargs and used in new __str__ implementation to construct
+ formated message based on self.fmt string.
+
+ In the simplest case it is enough to override supp_kwargs and fmt
+ class variables to get nice parametrized messages.
+ """
+ msg = None # non-parametrized message
+ supp_kwargs = set() # accepted parameters for _fmt_kwargs (sanity check)
+ fmt = None # message parametrized with results from _fmt_kwargs
+
+ def __init__(self, *args, **kwargs):
+ self._check_params(*args, **kwargs)
+ self._check_kwargs(**kwargs)
+ self.kwargs = kwargs
+ if self.msg is None:
+ # doc string is better implicit message than empty string
+ self.msg = self.__doc__
if args:
super(DNSException, self).__init__(*args)
else:
- # doc string is better implicit message than empty string
- super(DNSException, self).__init__(self.__doc__)
+ super(DNSException, self).__init__(self.msg)
+
+ def _check_params(self, *args, **kwargs):
+ """Old exceptions supported only args and not kwargs.
+
+ For sanity we do not allow to mix old and new behavior."""
+ if args or kwargs:
+ assert bool(args) != bool(kwargs), \
+ 'keyword arguments are mutually exclusive with positional args'
+
+ def _check_kwargs(self, **kwargs):
+ if kwargs:
+ assert set(kwargs.keys()) == self.supp_kwargs, \
+ 'following set of keyword args is required: %s' % (
+ self.supp_kwargs)
+
+ def _fmt_kwargs(self, **kwargs):
+ """Format kwargs before printing them.
+
+ Resulting dictionary has to have keys necessary for str.format call
+ on fmt class variable.
+ """
+ fmtargs = {}
+ for kw, data in kwargs.items():
+ if isinstance(data, (list, set)):
+ # convert list of <someobj> to list of str(<someobj>)
+ fmtargs[kw] = list(map(str, data))
+ if len(fmtargs[kw]) == 1:
+ # remove list brackets [] from single-item lists
+ fmtargs[kw] = fmtargs[kw].pop()
+ else:
+ fmtargs[kw] = data
+ return fmtargs
+
+ def __str__(self):
+ if self.kwargs and self.fmt:
+ # provide custom message constructed from keyword arguments
+ fmtargs = self._fmt_kwargs(**self.kwargs)
+ return self.fmt.format(**fmtargs)
+ else:
+ # print *args directly in the same way as old DNSException
+ return super(DNSException, self).__str__()
+
class FormError(DNSException):
"""DNS message is malformed."""
@@ -38,3 +110,5 @@ class TooBig(DNSException):
class Timeout(DNSException):
"""The DNS operation timed out."""
+ supp_kwargs = set(['timeout'])
+ fmt = "%s after {timeout} seconds" % __doc__[:-1]
diff --git a/dns/resolver.py b/dns/resolver.py
index 387bd50..443e1d0 100644
--- a/dns/resolver.py
+++ b/dns/resolver.py
@@ -48,6 +48,21 @@ if sys.platform == 'win32':
class NXDOMAIN(dns.exception.DNSException):
"""The DNS query name does not exist."""
+ supp_kwargs = set(['qname'])
+
+ def __str__(self):
+ if not 'qname' in self.kwargs:
+ return super(NXDOMAIN, self).__str__()
+
+ qname = self.kwargs['qname']
+ msg = self.__doc__[:-1]
+ if isinstance(qname, (list, set)):
+ if len(qname) > 1:
+ msg = 'None of DNS query names exist'
+ qname = list(map(str, qname))
+ else:
+ qname = qname[0]
+ return "%s: %s" % (msg, (str(qname)))
class YXDOMAIN(dns.exception.DNSException):
"""The DNS query name is too long after DNAME substitution."""
@@ -58,40 +73,36 @@ class YXDOMAIN(dns.exception.DNSException):
Timeout = dns.exception.Timeout
+
class NoAnswer(dns.exception.DNSException):
"""The DNS response does not contain an answer to the question."""
- def __init__(self, question=None):
- super(dns.exception.DNSException, self).__init__()
- self.question = question
-
- def __str__(self):
- message = self.__doc__
- if self.question:
- message = message[0:-1]
- for q in self.question:
- message += ' %s' % q
- return message
+ fmt = '%s: {query}' % __doc__[:-1]
+ supp_kwargs = set(['response'])
+ def _fmt_kwargs(self, **kwargs):
+ return super(NoAnswer, self)._fmt_kwargs(
+ query=kwargs['response'].question)
class NoNameservers(dns.exception.DNSException):
- """No non-broken nameservers are available to answer the query."""
- def __init__(self, errors=[]):
- """Optionally construct message with list of servers and errors.
+ """All nameservers failed to answer the query.
- @param errors: list of servers and respective errors
- @type errors: [(server ip address, any object convertible to string)]
- """
- super(dns.exception.DNSException, self).__init__()
- self.errors = errors
+ @param errors: list of servers and respective errors
+ @type errors: [(server ip address, any object convertible to string)]
+ Non-empty errors list will add explanatory message ()
+ """
+
+ msg = "All nameservers failed to answer the query."
+ fmt = "%s {query}: {errors}" % msg[:-1]
+ supp_kwargs = set(['request', 'errors'])
+
+ def _fmt_kwargs(self, **kwargs):
+ srv_msgs = []
+ for err in kwargs['errors']:
+ srv_msgs.append('Server %s %s port %s anwered %s' % (err[0],
+ 'TCP' if err[1] else 'UDP', err[2], err[3]))
+ return super(NoNameservers, self)._fmt_kwargs(
+ query=kwargs['request'].question, errors='; '.join(srv_msgs))
- def __str__(self):
- message = self.__doc__
- if self.errors:
- srv_msgs = []
- for err in self.errors:
- srv_msgs.append('Server %s %s' % (err[0], err[1]))
- message += ' %s' % '; '.join(srv_msgs)
- return message
class NotAbsolute(dns.exception.DNSException):
"""An absolute domain name is required but a relative name was provided."""
@@ -164,9 +175,9 @@ class Answer(object):
if raise_on_no_answer:
raise NoAnswer(question=response.question)
if raise_on_no_answer:
- raise NoAnswer(question=response.question)
+ raise NoAnswer(response=response)
if rrset is None and raise_on_no_answer:
- raise NoAnswer(question=response.question)
+ raise NoAnswer(response=response)
self.canonical_name = qname
self.rrset = rrset
if rrset is None:
@@ -742,18 +753,18 @@ class Resolver(object):
def _compute_timeout(self, start):
now = time.time()
- if now < start:
- if start - now > 1:
+ duration = now - start
+ if duration < 0:
+ if duration < -1:
# Time going backwards is bad. Just give up.
- raise Timeout
+ raise Timeout(timeout=duration)
else:
# Time went backwards, but only a little. This can
# happen, e.g. under vmware with older linux kernels.
# Pretend it didn't happen.
now = start
- duration = now - start
if duration >= self.lifetime:
- raise Timeout
+ raise Timeout(timeout=duration)
return min(self.lifetime - duration, self.timeout)
def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
@@ -839,10 +850,11 @@ class Resolver(object):
backoff = 0.10
while response is None:
if len(nameservers) == 0:
- raise NoNameservers(errors)
+ raise NoNameservers(request=request, errors=errors)
for nameserver in nameservers[:]:
timeout = self._compute_timeout(start)
try:
+ tcp_attempt = tcp
if tcp:
response = dns.query.tcp(request, nameserver,
timeout, self.port,
@@ -855,6 +867,7 @@ class Resolver(object):
source_port=source_port)
if response.flags & dns.flags.TC:
# Response truncated; retry with TCP.
+ tcp_attempt = True
timeout = self._compute_timeout(start)
response = dns.query.tcp(request, nameserver,
timeout, self.port,
@@ -865,15 +878,17 @@ class Resolver(object):
# Communication failure or timeout. Go to the
# next server
#
+ errors.append((nameserver, tcp_attempt, self.port, ex,
+ response))
response = None
- errors.append((nameserver, ex))
continue
except dns.query.UnexpectedSource as ex:
#
# Who knows? Keep going.
#
+ errors.append((nameserver, tcp_attempt, self.port, ex,
+ response))
response = None
- errors.append((nameserver, ex))
continue
except dns.exception.FormError as ex:
#
@@ -882,8 +897,9 @@ class Resolver(object):
# continue.
#
nameservers.remove(nameserver)
+ errors.append((nameserver, tcp_attempt, self.port, ex,
+ response))
response = None
- errors.append((nameserver, ex))
continue
except EOFError as ex:
#
@@ -893,13 +909,15 @@ class Resolver(object):
# mix and continue.
#
nameservers.remove(nameserver)
+ errors.append((nameserver, tcp_attempt, self.port, ex,
+ response))
response = None
- errors.append((nameserver, ex))
continue
rcode = response.rcode()
if rcode == dns.rcode.YXDOMAIN:
ex = YXDOMAIN()
- errors.append((nameserver, ex))
+ errors.append((nameserver, tcp_attempt, self.port, ex,
+ response))
raise ex
if rcode == dns.rcode.NOERROR or \
rcode == dns.rcode.NXDOMAIN:
@@ -911,7 +929,8 @@ class Resolver(object):
#
if rcode != dns.rcode.SERVFAIL or not self.retry_servfail:
nameservers.remove(nameserver)
- errors.append((nameserver, dns.rcode.to_text(rcode)))
+ errors.append((nameserver, tcp_attempt, self.port,
+ dns.rcode.to_text(rcode), response))
response = None
if not response is None:
break
@@ -932,7 +951,7 @@ class Resolver(object):
all_nxdomain = False
break
if all_nxdomain:
- raise NXDOMAIN
+ raise NXDOMAIN(qname=qnames_to_try)
answer = Answer(qname, rdtype, rdclass, response,
raise_on_no_answer)
if self.cache:
| Python3 branch is missing commits from pull #92
Please be so kind and merge #92 to python3 branch too. Currently Python2 and Python3 exceptions are very different when it comes to str().
Thank you! | rthalley/dnspython | diff --git a/tests/test_exceptions.py b/tests/test_exceptions.py
index 3fd7331..90a6af4 100644
--- a/tests/test_exceptions.py
+++ b/tests/test_exceptions.py
@@ -18,6 +18,12 @@ import unittest
from dns.exception import DNSException
+
+class FormatedError(DNSException):
+ fmt = "Custom format: {parameter}"
+ supp_kwargs = set(['parameter'])
+
+
class ExceptionTestCase(unittest.TestCase):
def test_custom_message(self):
@@ -33,6 +39,24 @@ class ExceptionTestCase(unittest.TestCase):
except DNSException as ex:
self.assertEqual(ex.__class__.__doc__, str(ex))
+ def test_formatted_error(self):
+ """Exceptions with explicit format has to respect it."""
+ params = {'parameter': 'value'}
+ try:
+ raise FormatedError(**params)
+ except FormatedError as ex:
+ msg = FormatedError.fmt.format(**params)
+ self.assertEqual(msg, str(ex))
+
+ def test_kwargs_only(self):
+ """Kwargs cannot be combined with args."""
+ with self.assertRaises(AssertionError):
+ raise FormatedError(1, a=2)
+
+ def test_kwargs_unsupported(self):
+ """Only supported kwargs are accepted."""
+ with self.assertRaises(AssertionError):
+ raise FormatedError(unsupported=2)
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 2
} | 1.12 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/rthalley/dnspython.git@0b8ae7fd1107f330f94afc03311f233d51968a66#egg=dnspython3
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: dnspython
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/dnspython
| [
"tests/test_exceptions.py::ExceptionTestCase::test_formatted_error",
"tests/test_exceptions.py::ExceptionTestCase::test_kwargs_only",
"tests/test_exceptions.py::ExceptionTestCase::test_kwargs_unsupported"
]
| []
| [
"tests/test_exceptions.py::ExceptionTestCase::test_custom_message",
"tests/test_exceptions.py::ExceptionTestCase::test_implicit_message"
]
| []
| ISC License | 146 | [
"dns/resolver.py",
"dns/exception.py"
]
| [
"dns/resolver.py",
"dns/exception.py"
]
|
|
pre-commit__pre-commit-235 | b4bc5e47423635e187d50d8730584d2c8ff06772 | 2015-05-24 03:03:11 | 5791d84236d82f8aa8609c3ff1c69a991d8c6607 | diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py
index c84d29b..47c9484 100644
--- a/pre_commit/commands/install_uninstall.py
+++ b/pre_commit/commands/install_uninstall.py
@@ -48,6 +48,9 @@ def install(runner, overwrite=False, hooks=False, hook_type='pre-commit'):
hook_path = runner.get_hook_path(hook_type)
legacy_path = hook_path + '.legacy'
+ if not os.path.exists(os.path.dirname(hook_path)):
+ os.makedirs(os.path.dirname(hook_path))
+
# If we have an existing hook, move it to pre-commit.legacy
if (
os.path.exists(hook_path) and
| Some versions of git don't create .git/hooks directory
Noticed here: https://github.com/victorlin/bugbuzz-python/pull/1#issuecomment-104971132 | pre-commit/pre-commit | diff --git a/tests/commands/install_uninstall_test.py b/tests/commands/install_uninstall_test.py
index 9e1806e..ca82c06 100644
--- a/tests/commands/install_uninstall_test.py
+++ b/tests/commands/install_uninstall_test.py
@@ -5,6 +5,7 @@ import io
import os
import os.path
import re
+import shutil
import subprocess
import sys
@@ -78,6 +79,15 @@ def test_install_pre_commit(tmpdir_factory):
assert pre_push_contents == expected_contents
+def test_install_hooks_directory_not_present(tmpdir_factory):
+ path = git_dir(tmpdir_factory)
+ # Simulate some git clients which don't make .git/hooks #234
+ shutil.rmtree(os.path.join(path, '.git', 'hooks'))
+ runner = Runner(path)
+ install(runner)
+ assert os.path.exists(runner.pre_commit_path)
+
+
def test_uninstall_does_not_blow_up_when_not_there(tmpdir_factory):
path = git_dir(tmpdir_factory)
runner = Runner(path)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aspy.yaml==1.3.0
astroid==1.3.2
attrs==22.2.0
cached-property==1.5.2
certifi==2021.5.30
coverage==6.2
distlib==0.3.9
filelock==3.4.1
flake8==5.0.4
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
jsonschema==3.2.0
logilab-common==1.9.7
mccabe==0.7.0
mock==5.2.0
mypy-extensions==1.0.0
nodeenv==1.6.0
ordereddict==1.1
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
-e git+https://github.com/pre-commit/pre-commit.git@b4bc5e47423635e187d50d8730584d2c8ff06772#egg=pre_commit
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pylint==1.3.1
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
PyYAML==6.0.1
simplejson==3.20.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
virtualenv==20.17.1
zipp==3.6.0
| name: pre-commit
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- argparse==1.4.0
- aspy-yaml==1.3.0
- astroid==1.3.2
- attrs==22.2.0
- cached-property==1.5.2
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- flake8==5.0.4
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- jsonschema==3.2.0
- logilab-common==1.9.7
- mccabe==0.7.0
- mock==5.2.0
- mypy-extensions==1.0.0
- nodeenv==1.6.0
- ordereddict==1.1
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pylint==1.3.1
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pyyaml==6.0.1
- simplejson==3.20.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- virtualenv==20.17.1
- zipp==3.6.0
prefix: /opt/conda/envs/pre-commit
| [
"tests/commands/install_uninstall_test.py::test_install_hooks_directory_not_present"
]
| [
"tests/commands/install_uninstall_test.py::test_install_pre_commit_and_run",
"tests/commands/install_uninstall_test.py::test_install_idempotent",
"tests/commands/install_uninstall_test.py::test_environment_not_sourced",
"tests/commands/install_uninstall_test.py::test_failing_hooks_returns_nonzero",
"tests/commands/install_uninstall_test.py::test_install_existing_hooks_no_overwrite",
"tests/commands/install_uninstall_test.py::test_install_existing_hook_no_overwrite_idempotent",
"tests/commands/install_uninstall_test.py::test_failing_existing_hook_returns_1",
"tests/commands/install_uninstall_test.py::test_install_overwrite_no_existing_hooks",
"tests/commands/install_uninstall_test.py::test_install_overwrite",
"tests/commands/install_uninstall_test.py::test_uninstall_restores_legacy_hooks",
"tests/commands/install_uninstall_test.py::test_replace_old_commit_script",
"tests/commands/install_uninstall_test.py::test_installs_hooks_with_hooks_True",
"tests/commands/install_uninstall_test.py::test_installed_from_venv",
"tests/commands/install_uninstall_test.py::test_pre_push_integration_failing",
"tests/commands/install_uninstall_test.py::test_pre_push_integration_accepted"
]
| [
"tests/commands/install_uninstall_test.py::test_is_not_our_pre_commit",
"tests/commands/install_uninstall_test.py::test_is_our_pre_commit",
"tests/commands/install_uninstall_test.py::test_is_not_previous_pre_commit",
"tests/commands/install_uninstall_test.py::test_is_also_not_previous_pre_commit",
"tests/commands/install_uninstall_test.py::test_is_previous_pre_commit",
"tests/commands/install_uninstall_test.py::test_install_pre_commit",
"tests/commands/install_uninstall_test.py::test_uninstall_does_not_blow_up_when_not_there",
"tests/commands/install_uninstall_test.py::test_uninstall",
"tests/commands/install_uninstall_test.py::test_uninstall_doesnt_remove_not_our_hooks"
]
| []
| MIT License | 148 | [
"pre_commit/commands/install_uninstall.py"
]
| [
"pre_commit/commands/install_uninstall.py"
]
|
|
wndhydrnt__python-oauth2-48 | f37a26ccb2a0bd5c6682a798f0e1669d75aacd5a | 2015-05-24 16:36:53 | 9d56b2202515aaaf3bed7a5b3bc3b61f7fe17199 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index f15bc9e..b5fcbb5 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -13,6 +13,7 @@ Improvements:
- Move WSGI server code into its own module ([@wndhydrnt][])
- Renamed class acting as entrypoint for WSGI server from 'Server' to 'Application' ([@wndhydrnt][])
- Client Credentials Grant example ([@shupp][])
+ - Methods `authenticate` and `render_auth_page` of a Site Adapter accept an instance of `oauth2.datatype.Client` ([@wndhydrnt][])
Bugfixes:
diff --git a/README.rst b/README.rst
index 0b77e9b..a745d83 100644
--- a/README.rst
+++ b/README.rst
@@ -42,14 +42,15 @@ Example Authorization server
# This can be used to display confirmation dialogs and the like.
class ExampleSiteAdapter(oauth2.web.AuthorizationCodeGrantSiteAdapter,
oauth2.web.ImplicitGrantSiteAdapter):
- def authenticate(self, request, environ, scopes):
+ def authenticate(self, request, environ, scopes, client):
# Check if the user has granted access
if request.post_param("confirm") == "confirm":
return {}
raise oauth2.error.UserNotAuthenticated
- def render_auth_page(self, request, response, environ, scopes):
+ def render_auth_page(self, request, response, environ, scopes,
+ client):
response.body = '''
<html>
<body>
diff --git a/docs/examples/authorization_code_grant.py b/docs/examples/authorization_code_grant.py
index a8ecf82..ff82b4c 100644
--- a/docs/examples/authorization_code_grant.py
+++ b/docs/examples/authorization_code_grant.py
@@ -68,12 +68,12 @@ class TestSiteAdapter(AuthorizationCodeGrantSiteAdapter):
</html>
"""
- def render_auth_page(self, request, response, environ, scopes):
+ def render_auth_page(self, request, response, environ, scopes, client):
response.body = self.CONFIRMATION_TEMPLATE
return response
- def authenticate(self, request, environ, scopes):
+ def authenticate(self, request, environ, scopes, client):
if request.method == "POST":
if request.post_param("confirm") == "1":
return
diff --git a/docs/examples/base_server.py b/docs/examples/base_server.py
index be43cb5..27ac9b1 100644
--- a/docs/examples/base_server.py
+++ b/docs/examples/base_server.py
@@ -11,14 +11,14 @@ import oauth2.web.wsgi
# This can be used to display confirmation dialogs and the like.
class ExampleSiteAdapter(oauth2.web.AuthorizationCodeGrantSiteAdapter,
oauth2.web.ImplicitGrantSiteAdapter):
- def authenticate(self, request, environ, scopes):
+ def authenticate(self, request, environ, scopes, client):
# Check if the user has granted access
if request.post_param("confirm") == "confirm":
return {}
raise oauth2.error.UserNotAuthenticated
- def render_auth_page(self, request, response, environ, scopes):
+ def render_auth_page(self, request, response, environ, scopes, client):
response.body = '''
<html>
<body>
diff --git a/docs/examples/implicit_grant.py b/docs/examples/implicit_grant.py
index 4368f98..0d244d3 100644
--- a/docs/examples/implicit_grant.py
+++ b/docs/examples/implicit_grant.py
@@ -40,13 +40,13 @@ class TestSiteAdapter(ImplicitGrantSiteAdapter):
</html>
"""
- def render_auth_page(self, request, response, environ, scopes):
+ def render_auth_page(self, request, response, environ, scopes, client):
# Add check if the user is logged or a redirect to the login page here
response.body = self.CONFIRMATION_TEMPLATE
return response
- def authenticate(self, request, environ, scopes):
+ def authenticate(self, request, environ, scopes, client):
if request.method == "POST":
if request.post_param("confirm") == "1":
return
diff --git a/docs/examples/resource_owner_grant.py b/docs/examples/resource_owner_grant.py
index 16d7990..d539ee6 100644
--- a/docs/examples/resource_owner_grant.py
+++ b/docs/examples/resource_owner_grant.py
@@ -157,7 +157,7 @@ class ClientApplication(object):
class TestSiteAdapter(ResourceOwnerGrantSiteAdapter):
- def authenticate(self, request, environ, scopes):
+ def authenticate(self, request, environ, scopes, client):
username = request.post_param("username")
password = request.post_param("password")
# A real world application could connect to a database, try to
diff --git a/docs/examples/tornado_server.py b/docs/examples/tornado_server.py
index 869be94..f9a77b6 100644
--- a/docs/examples/tornado_server.py
+++ b/docs/examples/tornado_server.py
@@ -70,12 +70,12 @@ class TestSiteAdapter(AuthorizationCodeGrantSiteAdapter):
</html>
"""
- def render_auth_page(self, request, response, environ, scopes):
+ def render_auth_page(self, request, response, environ, scopes, client):
response.body = self.CONFIRMATION_TEMPLATE
return response
- def authenticate(self, request, environ, scopes):
+ def authenticate(self, request, environ, scopes, client):
if request.method == "POST":
if request.post_param("confirm") == "1":
return
diff --git a/docs/migration.rst b/docs/migration.rst
index 218db95..c4ccf14 100644
--- a/docs/migration.rst
+++ b/docs/migration.rst
@@ -83,3 +83,32 @@ After:
# Instantiating storage and provider...
app = Application(provider=provider)
+
+
+Client passed to methods authenticate and render_auth_page of a Site Adapter
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Before:
+
+.. code-block:: python
+
+ class ExampleSiteAdapter(AuthenticatingSiteAdapter, UserFacingSiteAdapter):
+ def authenticate(self, request, environ, scopes):
+ # code
+
+ def render_auth_page(self, request, response, environ, scopes):
+ # code
+
+
+After:
+
+.. code-block:: python
+
+ class ExampleSiteAdapter(AuthenticatingSiteAdapter, UserFacingSiteAdapter):
+ def authenticate(self, request, environ, scopes, client):
+ # code
+
+ def render_auth_page(self, request, response, environ, scopes, client):
+ # code
+
+
diff --git a/oauth2/grant.py b/oauth2/grant.py
index a792edd..1407af0 100644
--- a/oauth2/grant.py
+++ b/oauth2/grant.py
@@ -306,12 +306,14 @@ class AuthorizeMixin(object):
explanation="Authorization denied by user")
try:
- result = self.site_adapter.authenticate(request, environ, scopes)
+ result = self.site_adapter.authenticate(request, environ, scopes,
+ self.client)
return self.sanitize_return_value(result)
except UserNotAuthenticated:
return self.site_adapter.render_auth_page(request, response,
- environ, scopes)
+ environ, scopes,
+ self.client)
@staticmethod
def sanitize_return_value(value):
@@ -816,7 +818,8 @@ class ResourceOwnerGrantHandler(GrantHandler, AccessTokenMixin):
"""
try:
data = self.site_adapter.authenticate(request, environ,
- self.scope_handler.scopes)
+ self.scope_handler.scopes,
+ self.client)
data = AuthorizeMixin.sanitize_return_value(data)
except UserNotAuthenticated:
raise OAuthInvalidError(error="invalid_client",
diff --git a/oauth2/web/__init__.py b/oauth2/web/__init__.py
index 279692e..4e98019 100644
--- a/oauth2/web/__init__.py
+++ b/oauth2/web/__init__.py
@@ -6,7 +6,7 @@ class AuthenticatingSiteAdapter(object):
"""
Extended by site adapters that need to authenticate the user.
"""
- def authenticate(self, request, environ, scopes):
+ def authenticate(self, request, environ, scopes, client):
"""
Authenticates a user and checks if she has authorized access.
@@ -20,6 +20,9 @@ class AuthenticatingSiteAdapter(object):
scope.
:type scopes: list
+ :param client: The client that initiated the authorization process
+ :type client: oauth2.datatype.Client
+
:return: A ``dict`` containing arbitrary data that will be passed to
the current storage adapter and saved with auth code and
access token. Return a tuple in the form
@@ -40,7 +43,7 @@ class UserFacingSiteAdapter(object):
Display HTML or redirect the user agent to another page of your website
where she can do something before being returned to the OAuth 2.0 server.
"""
- def render_auth_page(self, request, response, environ, scopes):
+ def render_auth_page(self, request, response, environ, scopes, client):
"""
Defines how to display a confirmation page to the user.
@@ -57,6 +60,9 @@ class UserFacingSiteAdapter(object):
scope.
:type scopes: list
+ :param client: The client that initiated the authorization process
+ :type client: oauth2.datatype.Client
+
:return: The response passed in as a parameter.
It can contain HTML or issue a redirect.
:rtype: oauth2.web.Response
diff --git a/setup.py b/setup.py
index f026e85..4200b67 100644
--- a/setup.py
+++ b/setup.py
@@ -1,3 +1,4 @@
+import os
import sys
from setuptools import setup
@@ -15,8 +16,7 @@ setup(name="python-oauth2",
author="Markus Meyer",
author_email="[email protected]",
url="https://github.com/wndhydrnt/python-oauth2",
- packages=["oauth2", "oauth2.web", "oauth2.store", "oauth2.store.dbapi",
- "oauth2.test"],
+ packages=[d[0].replace("/", ".") for d in os.walk("oauth2") if not d[0].endswith("__pycache__")],
extras_require={
"memcache": [memcache_require],
"mongodb": ["pymongo"],
| New parameter client
**AuthenticatingSiteAdapter.authenticate** and **UserFacingSiteAdapter.render_auth_page** could have a parameter to receive the client retrieved from the store.
In my case, my client table has extra columns that I need to render the authentication page, If I had access to the client object it would be easier.
Thanks! | wndhydrnt/python-oauth2 | diff --git a/oauth2/test/functional/test_authorization_code.py b/oauth2/test/functional/test_authorization_code.py
index 6706ad2..bd2ef3a 100644
--- a/oauth2/test/functional/test_authorization_code.py
+++ b/oauth2/test/functional/test_authorization_code.py
@@ -203,7 +203,7 @@ class AuthorizationCodeTestCase(unittest.TestCase):
class TestSiteAdapter(AuthorizationCodeGrantSiteAdapter):
- def authenticate(self, request, environ, scopes):
+ def authenticate(self, request, environ, scopes, client):
return {"additional": "data"}, 1
def user_has_denied_access(self, request):
diff --git a/oauth2/test/test_grant.py b/oauth2/test/test_grant.py
index e3f80b8..cfbb05b 100644
--- a/oauth2/test/test_grant.py
+++ b/oauth2/test/test_grant.py
@@ -149,6 +149,8 @@ class AuthorizeMixinTestCase(unittest.TestCase):
"""
AuthorizeMixin.authorize should return a tuple even if the SiteAdapter returns a dict
"""
+ client_mock = Mock(spec=Client)
+
test_data = {"test": "data"}
site_adapter_mock = Mock(spec=ImplicitGrantSiteAdapter)
@@ -156,6 +158,7 @@ class AuthorizeMixinTestCase(unittest.TestCase):
site_adapter_mock.authenticate.return_value = test_data
auth_mixin = AuthorizeMixin(site_adapter=site_adapter_mock)
+ auth_mixin.client = client_mock
result = auth_mixin.authorize(Mock(spec=Request), Mock(spec=Response),
environ={}, scopes=[])
@@ -167,6 +170,8 @@ class AuthorizeMixinTestCase(unittest.TestCase):
"""
AuthorizeMixin.authorize should return the tuple returned by the SiteAdapter
"""
+ client_mock = Mock(spec=Client)
+
test_data = ({"test": "data"}, 123)
site_adapter_mock = Mock(spec=ImplicitGrantSiteAdapter)
@@ -174,6 +179,7 @@ class AuthorizeMixinTestCase(unittest.TestCase):
site_adapter_mock.authenticate.return_value = test_data
auth_mixin = AuthorizeMixin(site_adapter=site_adapter_mock)
+ auth_mixin.client = client_mock
result = auth_mixin.authorize(Mock(spec=Request), Mock(spec=Response),
environ={}, scopes=[])
@@ -182,6 +188,8 @@ class AuthorizeMixinTestCase(unittest.TestCase):
self.assertEqual(result[1], test_data[1])
def test_authorize_user_not_authenticated(self):
+ client_mock = Mock(spec=Client)
+
response_mock = Mock(spec=Response)
site_adapter_mock = Mock(spec=ImplicitGrantSiteAdapter)
@@ -190,6 +198,7 @@ class AuthorizeMixinTestCase(unittest.TestCase):
site_adapter_mock.render_auth_page.return_value = response_mock
auth_mixin = AuthorizeMixin(site_adapter=site_adapter_mock)
+ auth_mixin.client = client_mock
result = auth_mixin.authorize(Mock(spec=Request), response_mock,
environ={}, scopes=[])
@@ -209,6 +218,9 @@ class AuthorizationCodeAuthHandlerTestCase(unittest.TestCase):
auth_code_store_mock = Mock(spec=AuthCodeStore)
+ client = Client(identifier="abc", secret="xyz",
+ redirect_uris=[redirect_uri])
+
response_mock = Mock(spec=Response)
request_mock = Mock(spec=Request)
@@ -231,14 +243,14 @@ class AuthorizationCodeAuthHandlerTestCase(unittest.TestCase):
token_generator=token_generator_mock
)
- handler.client = Client(identifier="abc", secret="xyz",
- redirect_uris=[redirect_uri])
+ handler.client = client
handler.state = state
response = handler.process(request_mock, response_mock, environ)
token_generator_mock.generate.assert_called_with()
site_adapter_mock.authenticate.assert_called_with(request_mock,
- environ, scopes)
+ environ, scopes,
+ client)
self.assertTrue(auth_code_store_mock.save_code.called)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.body, "")
@@ -252,6 +264,8 @@ class AuthorizationCodeAuthHandlerTestCase(unittest.TestCase):
response_mock = Mock(spec=Response)
scopes = ["scopes"]
+ client_mock = Mock(spec=Client)
+
request_mock = Mock(spec=Request)
scope_handler_mock = Mock(Scope)
@@ -266,12 +280,14 @@ class AuthorizationCodeAuthHandlerTestCase(unittest.TestCase):
scope_handler=scope_handler_mock, site_adapter=site_adapter_mock,
token_generator=Mock()
)
+ handler.client = client_mock
response = handler.process(request_mock, response_mock, environ)
site_adapter_mock.render_auth_page.assert_called_with(request_mock,
response_mock,
environ,
- scopes)
+ scopes,
+ client_mock)
self.assertEqual(response, response_mock)
def test_redirect_oauth_error(self):
@@ -801,7 +817,6 @@ class ImplicitGrantTestCase(unittest.TestCase):
class ImplicitGrantHandlerTestCase(unittest.TestCase):
def test_process_redirect_with_token(self):
- client_id = "abc"
environ = {"session": "data"}
redirect_uri = "http://callback"
scopes = ["scopes"]
@@ -810,6 +825,9 @@ class ImplicitGrantHandlerTestCase(unittest.TestCase):
access_token_store_mock = Mock(spec=AccessTokenStore)
+ client = Client(identifier="abc", secret="xyz",
+ redirect_uris=[redirect_uri])
+
request_mock = Mock(spec=Request)
responseMock = Mock(spec=Response)
@@ -831,12 +849,12 @@ class ImplicitGrantHandlerTestCase(unittest.TestCase):
client_authenticator=Mock(), scope_handler=scope_handler_mock,
site_adapter=site_adapter_mock,
token_generator=token_generator_mock)
- handler.client = Client(identifier="abc", secret="xyz",
- redirect_uris=[redirect_uri])
+ handler.client = client
result_response = handler.process(request_mock, responseMock, environ)
site_adapter_mock.authenticate.assert_called_with(request_mock,
- environ, scopes)
+ environ, scopes,
+ client)
access_token, = access_token_store_mock.save_token.call_args[0]
self.assertTrue(isinstance(access_token, AccessToken))
@@ -934,6 +952,8 @@ class ImplicitGrantHandlerTestCase(unittest.TestCase):
scopes = ["scopes"]
environ = {"session": "data"}
+ client_mock = Mock(spec=Client)
+
request_mock = Mock(spec=Request)
response_mock = Mock(spec=Response)
@@ -950,14 +970,17 @@ class ImplicitGrantHandlerTestCase(unittest.TestCase):
scope_handler=scope_handler_mock, site_adapter=site_adapter_mock,
token_generator=Mock()
)
+ handler.client = client_mock
result_response = handler.process(request_mock, response_mock, environ)
site_adapter_mock.authenticate.assert_called_with(request_mock,
- environ, scopes)
+ environ, scopes,
+ client_mock)
site_adapter_mock.render_auth_page.assert_called_with(request_mock,
response_mock,
environ,
- scopes)
+ scopes,
+ client_mock)
self.assertEqual(result_response, response_mock)
def test_process_user_denied_access(self):
@@ -1057,6 +1080,7 @@ class ResourceOwnerGrantTestCase(unittest.TestCase):
class ResourceOwnerGrantHandlerTestCase(unittest.TestCase):
def test_process(self):
access_token = "0aef"
+ client = Client(identifier="abc", secret="xyz")
expected_response_body = {"access_token": access_token,
"token_type": "Bearer"}
scopes = ["scope"]
@@ -1085,11 +1109,11 @@ class ResourceOwnerGrantHandlerTestCase(unittest.TestCase):
scope_handler=scope_handler_mock,
site_adapter=site_adapter_mock,
token_generator=token_generator_mock)
- handler.client = Client(identifier="abc", secret="xyz")
+ handler.client = client
result = handler.process(request_mock, response_mock, {})
site_adapter_mock.authenticate.assert_called_with(request_mock, {},
- scopes)
+ scopes, client)
token_generator_mock.create_access_token_data.assert_called_with(ResourceOwnerGrant.grant_type)
access_token, = access_token_store_mock.save_token.call_args[0]
self.assertTrue(isinstance(access_token, AccessToken))
@@ -1107,6 +1131,7 @@ class ResourceOwnerGrantHandlerTestCase(unittest.TestCase):
@patch("time.time", mock_time)
def test_process_with_refresh_token(self):
access_token = "0aef"
+ client = Client(identifier="abc", secret="xyz")
expected_response_body = {"access_token": access_token,
"token_type": "Bearer",
"refresh_token": "wxyz", "expires_in": 600}
@@ -1138,11 +1163,11 @@ class ResourceOwnerGrantHandlerTestCase(unittest.TestCase):
scope_handler=scope_handler_mock,
site_adapter=site_adapter_mock,
token_generator=token_generator_mock)
- handler.client = Client(identifier="abc", secret="xyz")
+ handler.client = client
result = handler.process(request_mock, response_mock, {})
site_adapter_mock.authenticate.assert_called_with(request_mock, {},
- scopes)
+ scopes, client)
token_generator_mock.create_access_token_data.assert_called_with(ResourceOwnerGrant.grant_type)
access_token, = access_token_store_mock.save_token.call_args[0]
self.assertTrue(isinstance(access_token, AccessToken))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 11
} | 0.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"numpy>=1.16.0",
"pandas>=1.0.0"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | async-timeout==5.0.1
dnspython==2.7.0
exceptiongroup==1.2.2
iniconfig==2.1.0
mock==5.2.0
mysql-connector-python @ http://dev.mysql.com/get/Downloads/Connector-Python/mysql-connector-python-1.1.7.tar.gz#sha256=66f9aeadf2b908be0e31bf683cfa199c1c13401eb7c0acce7cec56d75d76e24a
nose==1.3.7
numpy==2.0.2
packaging==24.2
pandas==2.2.3
pluggy==1.5.0
pymongo==4.11.3
pytest==8.3.5
python-dateutil==2.9.0.post0
-e git+https://github.com/wndhydrnt/python-oauth2.git@f37a26ccb2a0bd5c6682a798f0e1669d75aacd5a#egg=python_oauth2
pytz==2025.2
redis==5.2.1
six==1.17.0
tomli==2.2.1
tornado==6.4.2
tzdata==2025.2
| name: python-oauth2
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- async-timeout==5.0.1
- dnspython==2.7.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- mock==5.2.0
- mysql-connector-python==1.1.7
- nose==1.3.7
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- pluggy==1.5.0
- pymongo==4.11.3
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pytz==2025.2
- redis==5.2.1
- six==1.17.0
- tomli==2.2.1
- tornado==6.4.2
- tzdata==2025.2
prefix: /opt/conda/envs/python-oauth2
| [
"oauth2/test/functional/test_authorization_code.py::AuthorizationCodeTestCase::test_tornado",
"oauth2/test/functional/test_authorization_code.py::AuthorizationCodeTestCase::test_wsgi",
"oauth2/test/test_grant.py::AuthorizationCodeAuthHandlerTestCase::test_process",
"oauth2/test/test_grant.py::AuthorizationCodeAuthHandlerTestCase::test_process_not_confirmed",
"oauth2/test/test_grant.py::ImplicitGrantHandlerTestCase::test_process_redirect_with_token",
"oauth2/test/test_grant.py::ImplicitGrantHandlerTestCase::test_process_unconfirmed",
"oauth2/test/test_grant.py::ResourceOwnerGrantHandlerTestCase::test_process",
"oauth2/test/test_grant.py::ResourceOwnerGrantHandlerTestCase::test_process_with_refresh_token"
]
| []
| [
"oauth2/test/test_grant.py::AuthorizationCodeGrantTestCase::test_create_auth_handler",
"oauth2/test/test_grant.py::AuthorizationCodeGrantTestCase::test_create_no_match",
"oauth2/test/test_grant.py::AuthorizationCodeGrantTestCase::test_create_token_handler",
"oauth2/test/test_grant.py::AuthRequestMixinTestCase::test_read_validate_params_all_valid",
"oauth2/test/test_grant.py::AuthorizeMixinTestCase::test_authorize_dict_return",
"oauth2/test/test_grant.py::AuthorizeMixinTestCase::test_authorize_tuple_return",
"oauth2/test/test_grant.py::AuthorizeMixinTestCase::test_authorize_user_denied_access",
"oauth2/test/test_grant.py::AuthorizeMixinTestCase::test_authorize_user_not_authenticated",
"oauth2/test/test_grant.py::AuthorizationCodeAuthHandlerTestCase::test_redirect_oauth_error",
"oauth2/test/test_grant.py::AuthorizationCodeTokenHandlerTestCase::test_process_no_refresh_token",
"oauth2/test/test_grant.py::AuthorizationCodeTokenHandlerTestCase::test_process_with_refresh_token",
"oauth2/test/test_grant.py::AuthorizationCodeTokenHandlerTestCase::test_process_with_unique_access_token",
"oauth2/test/test_grant.py::AuthorizationCodeTokenHandlerTestCase::test_process_with_unique_access_token_different_scope",
"oauth2/test/test_grant.py::AuthorizationCodeTokenHandlerTestCase::test_process_with_unique_access_token_expired_token",
"oauth2/test/test_grant.py::AuthorizationCodeTokenHandlerTestCase::test_process_with_unique_access_token_no_user_id",
"oauth2/test/test_grant.py::AuthorizationCodeTokenHandlerTestCase::test_process_with_unique_access_token_not_found",
"oauth2/test/test_grant.py::AuthorizationCodeTokenHandlerTestCase::test_read_validate_params",
"oauth2/test/test_grant.py::AuthorizationCodeTokenHandlerTestCase::test_read_validate_params_missing_code",
"oauth2/test/test_grant.py::AuthorizationCodeTokenHandlerTestCase::test_read_validate_params_no_auth_code_found",
"oauth2/test/test_grant.py::AuthorizationCodeTokenHandlerTestCase::test_read_validate_params_token_expired",
"oauth2/test/test_grant.py::AuthorizationCodeTokenHandlerTestCase::test_read_validate_params_unknown_code",
"oauth2/test/test_grant.py::AuthorizationCodeTokenHandlerTestCase::test_read_validate_params_wrong_redirect_uri_in_code_data",
"oauth2/test/test_grant.py::ImplicitGrantTestCase::test_create_matching_response_type",
"oauth2/test/test_grant.py::ImplicitGrantTestCase::test_create_not_matching_response_type",
"oauth2/test/test_grant.py::ImplicitGrantHandlerTestCase::test_process_redirect_with_state",
"oauth2/test/test_grant.py::ImplicitGrantHandlerTestCase::test_process_user_denied_access",
"oauth2/test/test_grant.py::ImplicitGrantHandlerTestCase::test_process_with_scope",
"oauth2/test/test_grant.py::ImplicitGrantHandlerTestCase::test_redirect_oauth_error",
"oauth2/test/test_grant.py::ResourceOwnerGrantTestCase::test_call",
"oauth2/test/test_grant.py::ResourceOwnerGrantTestCase::test_call_no_resource_request",
"oauth2/test/test_grant.py::ResourceOwnerGrantHandlerTestCase::test_handle_error_owner_not_authenticated",
"oauth2/test/test_grant.py::ResourceOwnerGrantHandlerTestCase::test_process_invalid_user",
"oauth2/test/test_grant.py::ResourceOwnerGrantHandlerTestCase::test_process_redirect_with_scope",
"oauth2/test/test_grant.py::ResourceOwnerGrantHandlerTestCase::test_read_validate_params",
"oauth2/test/test_grant.py::ScopeTestCase::test_compare_invalid_scope_requested",
"oauth2/test/test_grant.py::ScopeTestCase::test_compare_scopes_equal",
"oauth2/test/test_grant.py::ScopeTestCase::test_compare_valid_scope_subset",
"oauth2/test/test_grant.py::ScopeTestCase::test_parse_scope_default_on_no_matching_scopes",
"oauth2/test/test_grant.py::ScopeTestCase::test_parse_scope_default_on_no_scope",
"oauth2/test/test_grant.py::ScopeTestCase::test_parse_scope_exception_on_available_scopes_no_scope_given",
"oauth2/test/test_grant.py::ScopeTestCase::test_parse_scope_no_value_on_no_scope_no_default",
"oauth2/test/test_grant.py::ScopeTestCase::test_parse_scope_scope_present_in_body",
"oauth2/test/test_grant.py::ScopeTestCase::test_parse_scope_scope_present_in_query",
"oauth2/test/test_grant.py::RefreshTokenTestCase::test_call",
"oauth2/test/test_grant.py::RefreshTokenTestCase::test_call_other_grant_type",
"oauth2/test/test_grant.py::RefreshTokenTestCase::test_call_wrong_path",
"oauth2/test/test_grant.py::RefreshTokenHandlerTestCase::test_process_no_reissue",
"oauth2/test/test_grant.py::RefreshTokenHandlerTestCase::test_process_with_reissue",
"oauth2/test/test_grant.py::RefreshTokenHandlerTestCase::test_read_validate_params",
"oauth2/test/test_grant.py::RefreshTokenHandlerTestCase::test_read_validate_params_expired_refresh_token",
"oauth2/test/test_grant.py::RefreshTokenHandlerTestCase::test_read_validate_params_invalid_refresh_token",
"oauth2/test/test_grant.py::RefreshTokenHandlerTestCase::test_read_validate_params_no_refresh_token",
"oauth2/test/test_grant.py::ClientCredentialsGrantTestCase::test_call",
"oauth2/test/test_grant.py::ClientCredentialsGrantTestCase::test_call_other_grant_type",
"oauth2/test/test_grant.py::ClientCredentialsGrantTestCase::test_call_wrong_request_path",
"oauth2/test/test_grant.py::ClientCredentialsHandlerTestCase::test_process",
"oauth2/test/test_grant.py::ClientCredentialsHandlerTestCase::test_process_with_refresh_token",
"oauth2/test/test_grant.py::ClientCredentialsHandlerTestCase::test_read_validate_params"
]
| []
| MIT License | 149 | [
"README.rst",
"docs/examples/tornado_server.py",
"oauth2/web/__init__.py",
"docs/examples/resource_owner_grant.py",
"setup.py",
"docs/examples/authorization_code_grant.py",
"oauth2/grant.py",
"CHANGELOG.md",
"docs/migration.rst",
"docs/examples/implicit_grant.py",
"docs/examples/base_server.py"
]
| [
"README.rst",
"docs/examples/tornado_server.py",
"oauth2/web/__init__.py",
"docs/examples/resource_owner_grant.py",
"setup.py",
"docs/examples/authorization_code_grant.py",
"oauth2/grant.py",
"CHANGELOG.md",
"docs/migration.rst",
"docs/examples/implicit_grant.py",
"docs/examples/base_server.py"
]
|
|
scieloorg__xylose-70 | dfb62b8a662baba1c7563ac7815a1e2f41597b22 | 2015-05-25 20:06:35 | dfb62b8a662baba1c7563ac7815a1e2f41597b22 | diff --git a/setup.py b/setup.py
index 3e2e89c..09e541f 100755
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,7 @@ except ImportError:
setup(
name="xylose",
- version='0.6b',
+ version='0.7b',
description="A SciELO library to abstract a JSON data structure that is a product of the ISIS2JSON conversion using the ISIS2JSON type 3 data model.",
author="SciELO",
author_email="[email protected]",
diff --git a/xylose/scielodocument.py b/xylose/scielodocument.py
index bed9cfc..a16ee47 100644
--- a/xylose/scielodocument.py
+++ b/xylose/scielodocument.py
@@ -788,8 +788,9 @@ class Article(object):
This method deals with the legacy fields (14).
"""
if 'v14' in self.data['article']:
- if 'f' in self.data['article']['v14'][0]:
- return self.data['article']['v14'][0]['f']
+ for item in self.data['article']['v14']:
+ if 'f' in item:
+ return item['f']
@property
def end_page(self):
@@ -798,8 +799,9 @@ class Article(object):
This method deals with the legacy fields (14).
"""
if 'v14' in self.data['article']:
- if 'l' in self.data['article']['v14'][0]:
- return self.data['article']['v14'][0]['l']
+ for item in self.data['article']['v14']:
+ if 'l' in item:
+ return item['l']
@property
def doi(self):
| Pages com bug na leitura do registro JSON.
Os novos registros quando carregados a partir do XML passaram a vir com uma estrutura de dados diferente do original:
antes:
```
[
{
_: "",
f: "1099",
l: "1101",
}
],
```
depois:
```
[
{
_: "",
f: "1099"
},
{
l: "1101",
_: ""
}
],
```
O Xylose deverá abstrair as duas formas de persistência das páginas. | scieloorg/xylose | diff --git a/tests/test_document.py b/tests/test_document.py
index 5236fdd..71a7da4 100644
--- a/tests/test_document.py
+++ b/tests/test_document.py
@@ -956,6 +956,22 @@ class ArticleTests(unittest.TestCase):
del(article.data['article']['v14'][0]['f'])
self.assertEqual(article.start_page, None)
+ def test_start_page_loaded_through_xml(self):
+ article = self.article
+
+ article.data['article']['v14'] = [
+ {
+ u'_': u'',
+ u'l': u'122'
+ },
+ {
+ u'_': u'',
+ u'f': u'110'
+ }
+ ]
+
+ self.assertEqual(article.start_page, u'110')
+
def test_last_page(self):
article = self.article
@@ -967,6 +983,23 @@ class ArticleTests(unittest.TestCase):
del(article.data['article']['v14'][0]['l'])
self.assertEqual(article.end_page, None)
+ def test_end_page_loaded_through_xml(self):
+ article = self.article
+
+ article.data['article']['v14'] = [
+ {
+ u'_': u'',
+ u'f': u'110'
+ },
+ {
+ u'_': u'',
+ u'l': u'122'
+ }
+ ]
+
+ self.assertEqual(article.end_page, u'122')
+
+
def test_without_pages(self):
article = self.article
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 2
} | 0.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "",
"pip_packages": [
"nose",
"coverage",
"mocker",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
iniconfig==2.1.0
mocker==1.1.1
nose==1.3.7
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
tomli==2.2.1
-e git+https://github.com/scieloorg/xylose.git@dfb62b8a662baba1c7563ac7815a1e2f41597b22#egg=xylose
| name: xylose
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- mocker==1.1.1
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/xylose
| [
"tests/test_document.py::ArticleTests::test_end_page_loaded_through_xml",
"tests/test_document.py::ArticleTests::test_start_page_loaded_through_xml"
]
| []
| [
"tests/test_document.py::ToolsTests::test_get_language_iso639_1_defined",
"tests/test_document.py::ToolsTests::test_get_language_iso639_1_undefined",
"tests/test_document.py::ToolsTests::test_get_language_iso639_2_defined",
"tests/test_document.py::ToolsTests::test_get_language_iso639_2_undefined",
"tests/test_document.py::ToolsTests::test_get_language_without_iso_format",
"tests/test_document.py::ToolsTests::test_get_publication_date_wrong_day",
"tests/test_document.py::ToolsTests::test_get_publication_date_wrong_day_month",
"tests/test_document.py::ToolsTests::test_get_publication_date_wrong_day_month_not_int",
"tests/test_document.py::ToolsTests::test_get_publication_date_wrong_day_not_int",
"tests/test_document.py::ToolsTests::test_get_publication_date_wrong_month_not_int",
"tests/test_document.py::ToolsTests::test_get_publication_date_year",
"tests/test_document.py::ToolsTests::test_get_publication_date_year_day",
"tests/test_document.py::ToolsTests::test_get_publication_date_year_month",
"tests/test_document.py::ToolsTests::test_get_publication_date_year_month_day",
"tests/test_document.py::JournalTests::test_any_issn_priority_electronic",
"tests/test_document.py::JournalTests::test_any_issn_priority_electronic_without_electronic",
"tests/test_document.py::JournalTests::test_any_issn_priority_print",
"tests/test_document.py::JournalTests::test_any_issn_priority_print_without_print",
"tests/test_document.py::JournalTests::test_collection_acronym",
"tests/test_document.py::JournalTests::test_creation_date",
"tests/test_document.py::JournalTests::test_current_status",
"tests/test_document.py::JournalTests::test_current_status_lots_of_changes_study_case_1",
"tests/test_document.py::JournalTests::test_current_status_some_changes",
"tests/test_document.py::JournalTests::test_current_without_v51",
"tests/test_document.py::JournalTests::test_journal",
"tests/test_document.py::JournalTests::test_journal_abbreviated_title",
"tests/test_document.py::JournalTests::test_journal_acronym",
"tests/test_document.py::JournalTests::test_journal_title",
"tests/test_document.py::JournalTests::test_journal_title_nlm",
"tests/test_document.py::JournalTests::test_journal_url",
"tests/test_document.py::JournalTests::test_languages",
"tests/test_document.py::JournalTests::test_languages_without_v350",
"tests/test_document.py::JournalTests::test_load_issn_with_v935_and_v35_ONLINE",
"tests/test_document.py::JournalTests::test_load_issn_with_v935_and_v35_PRINT",
"tests/test_document.py::JournalTests::test_load_issn_with_v935_equal_v400_and_v35_ONLINE",
"tests/test_document.py::JournalTests::test_load_issn_with_v935_equal_v400_and_v35_PRINT",
"tests/test_document.py::JournalTests::test_load_issn_with_v935_without_v35",
"tests/test_document.py::JournalTests::test_load_issn_without_v935_and_v35_ONLINE",
"tests/test_document.py::JournalTests::test_load_issn_without_v935_and_v35_PRINT",
"tests/test_document.py::JournalTests::test_load_issn_without_v935_without_v35",
"tests/test_document.py::JournalTests::test_permission_id",
"tests/test_document.py::JournalTests::test_permission_text",
"tests/test_document.py::JournalTests::test_permission_url",
"tests/test_document.py::JournalTests::test_permission_without_v540",
"tests/test_document.py::JournalTests::test_permission_without_v540_t",
"tests/test_document.py::JournalTests::test_publisher_loc",
"tests/test_document.py::JournalTests::test_publisher_name",
"tests/test_document.py::JournalTests::test_scielo_issn",
"tests/test_document.py::JournalTests::test_status",
"tests/test_document.py::JournalTests::test_status_lots_of_changes",
"tests/test_document.py::JournalTests::test_status_lots_of_changes_study_case_1",
"tests/test_document.py::JournalTests::test_status_some_changes",
"tests/test_document.py::JournalTests::test_status_without_v51",
"tests/test_document.py::JournalTests::test_subject_areas",
"tests/test_document.py::JournalTests::test_update_date",
"tests/test_document.py::JournalTests::test_without_journal_abbreviated_title",
"tests/test_document.py::JournalTests::test_without_journal_acronym",
"tests/test_document.py::JournalTests::test_without_journal_title",
"tests/test_document.py::JournalTests::test_without_journal_title_nlm",
"tests/test_document.py::JournalTests::test_without_journal_url",
"tests/test_document.py::JournalTests::test_without_publisher_loc",
"tests/test_document.py::JournalTests::test_without_publisher_name",
"tests/test_document.py::JournalTests::test_without_scielo_domain",
"tests/test_document.py::JournalTests::test_without_scielo_domain_title_v690",
"tests/test_document.py::JournalTests::test_without_subject_areas",
"tests/test_document.py::JournalTests::test_without_wos_citation_indexes",
"tests/test_document.py::JournalTests::test_without_wos_subject_areas",
"tests/test_document.py::JournalTests::test_wos_citation_indexes",
"tests/test_document.py::JournalTests::test_wos_subject_areas",
"tests/test_document.py::ArticleTests::test_acceptance_date",
"tests/test_document.py::ArticleTests::test_affiliation_just_with_affiliation_name",
"tests/test_document.py::ArticleTests::test_affiliation_without_affiliation_name",
"tests/test_document.py::ArticleTests::test_affiliations",
"tests/test_document.py::ArticleTests::test_ahead_publication_date",
"tests/test_document.py::ArticleTests::test_article",
"tests/test_document.py::ArticleTests::test_author_with_two_affiliations",
"tests/test_document.py::ArticleTests::test_author_with_two_role",
"tests/test_document.py::ArticleTests::test_author_without_affiliations",
"tests/test_document.py::ArticleTests::test_author_without_surname_and_given_names",
"tests/test_document.py::ArticleTests::test_authors",
"tests/test_document.py::ArticleTests::test_collection_acronym",
"tests/test_document.py::ArticleTests::test_collection_acronym_priorizing_collection",
"tests/test_document.py::ArticleTests::test_collection_acronym_retrieving_v992",
"tests/test_document.py::ArticleTests::test_collection_name_brazil",
"tests/test_document.py::ArticleTests::test_collection_name_undefined",
"tests/test_document.py::ArticleTests::test_corporative_authors",
"tests/test_document.py::ArticleTests::test_document_type",
"tests/test_document.py::ArticleTests::test_doi",
"tests/test_document.py::ArticleTests::test_file_code",
"tests/test_document.py::ArticleTests::test_file_code_crazy_slashs_1",
"tests/test_document.py::ArticleTests::test_file_code_crazy_slashs_2",
"tests/test_document.py::ArticleTests::test_first_author",
"tests/test_document.py::ArticleTests::test_first_author_without_author",
"tests/test_document.py::ArticleTests::test_fulltexts_field_fulltexts",
"tests/test_document.py::ArticleTests::test_fulltexts_without_field_fulltexts",
"tests/test_document.py::ArticleTests::test_html_url",
"tests/test_document.py::ArticleTests::test_invalid_document_type",
"tests/test_document.py::ArticleTests::test_issue",
"tests/test_document.py::ArticleTests::test_issue_label_field_v4",
"tests/test_document.py::ArticleTests::test_issue_label_without_field_v4",
"tests/test_document.py::ArticleTests::test_issue_url",
"tests/test_document.py::ArticleTests::test_journal_abbreviated_title",
"tests/test_document.py::ArticleTests::test_journal_acronym",
"tests/test_document.py::ArticleTests::test_journal_title",
"tests/test_document.py::ArticleTests::test_keywords",
"tests/test_document.py::ArticleTests::test_keywords_iso639_2",
"tests/test_document.py::ArticleTests::test_keywords_with_undefined_language",
"tests/test_document.py::ArticleTests::test_keywords_without_subfield_k",
"tests/test_document.py::ArticleTests::test_keywords_without_subfield_l",
"tests/test_document.py::ArticleTests::test_languages_field_fulltexts",
"tests/test_document.py::ArticleTests::test_languages_field_v40",
"tests/test_document.py::ArticleTests::test_last_page",
"tests/test_document.py::ArticleTests::test_mixed_affiliations",
"tests/test_document.py::ArticleTests::test_normalized_affiliations",
"tests/test_document.py::ArticleTests::test_normalized_affiliations_undefined_ISO_3166_CODE",
"tests/test_document.py::ArticleTests::test_normalized_affiliations_without_p",
"tests/test_document.py::ArticleTests::test_original_abstract_with_just_one_language_defined",
"tests/test_document.py::ArticleTests::test_original_abstract_with_language_defined",
"tests/test_document.py::ArticleTests::test_original_abstract_with_language_defined_but_different_of_the_article_original_language",
"tests/test_document.py::ArticleTests::test_original_abstract_without_language_defined",
"tests/test_document.py::ArticleTests::test_original_language_invalid_iso639_2",
"tests/test_document.py::ArticleTests::test_original_language_iso639_2",
"tests/test_document.py::ArticleTests::test_original_language_original",
"tests/test_document.py::ArticleTests::test_original_title_with_just_one_language_defined",
"tests/test_document.py::ArticleTests::test_original_title_with_language_defined",
"tests/test_document.py::ArticleTests::test_original_title_with_language_defined_but_different_of_the_article_original_language",
"tests/test_document.py::ArticleTests::test_original_title_without_language_defined",
"tests/test_document.py::ArticleTests::test_pdf_url",
"tests/test_document.py::ArticleTests::test_processing_date",
"tests/test_document.py::ArticleTests::test_project_name",
"tests/test_document.py::ArticleTests::test_project_sponsors",
"tests/test_document.py::ArticleTests::test_publication_contract",
"tests/test_document.py::ArticleTests::test_publication_date",
"tests/test_document.py::ArticleTests::test_publisher_id",
"tests/test_document.py::ArticleTests::test_publisher_loc",
"tests/test_document.py::ArticleTests::test_publisher_name",
"tests/test_document.py::ArticleTests::test_receive_date",
"tests/test_document.py::ArticleTests::test_review_date",
"tests/test_document.py::ArticleTests::test_start_page",
"tests/test_document.py::ArticleTests::test_subject_areas",
"tests/test_document.py::ArticleTests::test_supplement_issue",
"tests/test_document.py::ArticleTests::test_supplement_volume",
"tests/test_document.py::ArticleTests::test_thesis_degree",
"tests/test_document.py::ArticleTests::test_thesis_organization",
"tests/test_document.py::ArticleTests::test_thesis_organization_and_division",
"tests/test_document.py::ArticleTests::test_thesis_organization_without_name",
"tests/test_document.py::ArticleTests::test_translated_abstracts",
"tests/test_document.py::ArticleTests::test_translated_abstracts_without_v83",
"tests/test_document.py::ArticleTests::test_translated_abtracts_iso639_2",
"tests/test_document.py::ArticleTests::test_translated_titles",
"tests/test_document.py::ArticleTests::test_translated_titles_iso639_2",
"tests/test_document.py::ArticleTests::test_translated_titles_without_v12",
"tests/test_document.py::ArticleTests::test_volume",
"tests/test_document.py::ArticleTests::test_whitwout_acceptance_date",
"tests/test_document.py::ArticleTests::test_whitwout_ahead_publication_date",
"tests/test_document.py::ArticleTests::test_whitwout_receive_date",
"tests/test_document.py::ArticleTests::test_whitwout_review_date",
"tests/test_document.py::ArticleTests::test_without_affiliations",
"tests/test_document.py::ArticleTests::test_without_authors",
"tests/test_document.py::ArticleTests::test_without_citations",
"tests/test_document.py::ArticleTests::test_without_collection_acronym",
"tests/test_document.py::ArticleTests::test_without_corporative_authors",
"tests/test_document.py::ArticleTests::test_without_document_type",
"tests/test_document.py::ArticleTests::test_without_doi",
"tests/test_document.py::ArticleTests::test_without_html_url",
"tests/test_document.py::ArticleTests::test_without_issue",
"tests/test_document.py::ArticleTests::test_without_issue_url",
"tests/test_document.py::ArticleTests::test_without_journal_abbreviated_title",
"tests/test_document.py::ArticleTests::test_without_journal_acronym",
"tests/test_document.py::ArticleTests::test_without_journal_title",
"tests/test_document.py::ArticleTests::test_without_keywords",
"tests/test_document.py::ArticleTests::test_without_last_page",
"tests/test_document.py::ArticleTests::test_without_normalized_affiliations",
"tests/test_document.py::ArticleTests::test_without_original_abstract",
"tests/test_document.py::ArticleTests::test_without_original_title",
"tests/test_document.py::ArticleTests::test_without_pages",
"tests/test_document.py::ArticleTests::test_without_pdf_url",
"tests/test_document.py::ArticleTests::test_without_processing_date",
"tests/test_document.py::ArticleTests::test_without_project_name",
"tests/test_document.py::ArticleTests::test_without_project_sponsor",
"tests/test_document.py::ArticleTests::test_without_publication_contract",
"tests/test_document.py::ArticleTests::test_without_publication_date",
"tests/test_document.py::ArticleTests::test_without_publisher_id",
"tests/test_document.py::ArticleTests::test_without_publisher_loc",
"tests/test_document.py::ArticleTests::test_without_publisher_name",
"tests/test_document.py::ArticleTests::test_without_scielo_domain",
"tests/test_document.py::ArticleTests::test_without_scielo_domain_article_v69",
"tests/test_document.py::ArticleTests::test_without_scielo_domain_article_v69_and_with_title_v690",
"tests/test_document.py::ArticleTests::test_without_scielo_domain_title_v690",
"tests/test_document.py::ArticleTests::test_without_start_page",
"tests/test_document.py::ArticleTests::test_without_subject_areas",
"tests/test_document.py::ArticleTests::test_without_suplement_issue",
"tests/test_document.py::ArticleTests::test_without_supplement_volume",
"tests/test_document.py::ArticleTests::test_without_thesis_degree",
"tests/test_document.py::ArticleTests::test_without_thesis_organization",
"tests/test_document.py::ArticleTests::test_without_volume",
"tests/test_document.py::ArticleTests::test_without_wos_citation_indexes",
"tests/test_document.py::ArticleTests::test_without_wos_subject_areas",
"tests/test_document.py::ArticleTests::test_wos_citation_indexes",
"tests/test_document.py::ArticleTests::test_wos_subject_areas",
"tests/test_document.py::CitationTest::test_a_link_access_date",
"tests/test_document.py::CitationTest::test_analytic_institution_for_a_article_citation",
"tests/test_document.py::CitationTest::test_analytic_institution_for_a_book_citation",
"tests/test_document.py::CitationTest::test_article_title",
"tests/test_document.py::CitationTest::test_article_without_title",
"tests/test_document.py::CitationTest::test_authors_article",
"tests/test_document.py::CitationTest::test_authors_book",
"tests/test_document.py::CitationTest::test_authors_link",
"tests/test_document.py::CitationTest::test_authors_thesis",
"tests/test_document.py::CitationTest::test_book_chapter_title",
"tests/test_document.py::CitationTest::test_book_edition",
"tests/test_document.py::CitationTest::test_book_volume",
"tests/test_document.py::CitationTest::test_book_without_chapter_title",
"tests/test_document.py::CitationTest::test_citation_sample_congress",
"tests/test_document.py::CitationTest::test_citation_sample_link",
"tests/test_document.py::CitationTest::test_citation_sample_link_without_comment",
"tests/test_document.py::CitationTest::test_conference_edition",
"tests/test_document.py::CitationTest::test_conference_name",
"tests/test_document.py::CitationTest::test_conference_sponsor",
"tests/test_document.py::CitationTest::test_conference_without_name",
"tests/test_document.py::CitationTest::test_conference_without_sponsor",
"tests/test_document.py::CitationTest::test_date",
"tests/test_document.py::CitationTest::test_doi",
"tests/test_document.py::CitationTest::test_editor",
"tests/test_document.py::CitationTest::test_end_page_14",
"tests/test_document.py::CitationTest::test_end_page_514",
"tests/test_document.py::CitationTest::test_end_page_withdout_data",
"tests/test_document.py::CitationTest::test_first_author_article",
"tests/test_document.py::CitationTest::test_first_author_book",
"tests/test_document.py::CitationTest::test_first_author_link",
"tests/test_document.py::CitationTest::test_first_author_thesis",
"tests/test_document.py::CitationTest::test_first_author_without_monographic_authors",
"tests/test_document.py::CitationTest::test_first_author_without_monographic_authors_but_not_a_book_citation",
"tests/test_document.py::CitationTest::test_index_number",
"tests/test_document.py::CitationTest::test_institutions_all_fields",
"tests/test_document.py::CitationTest::test_institutions_v11",
"tests/test_document.py::CitationTest::test_institutions_v17",
"tests/test_document.py::CitationTest::test_institutions_v29",
"tests/test_document.py::CitationTest::test_institutions_v50",
"tests/test_document.py::CitationTest::test_institutions_v58",
"tests/test_document.py::CitationTest::test_invalid_edition",
"tests/test_document.py::CitationTest::test_isbn",
"tests/test_document.py::CitationTest::test_isbn_but_not_a_book",
"tests/test_document.py::CitationTest::test_issn",
"tests/test_document.py::CitationTest::test_issn_but_not_an_article",
"tests/test_document.py::CitationTest::test_issue_part",
"tests/test_document.py::CitationTest::test_issue_title",
"tests/test_document.py::CitationTest::test_journal_issue",
"tests/test_document.py::CitationTest::test_journal_volume",
"tests/test_document.py::CitationTest::test_link",
"tests/test_document.py::CitationTest::test_link_title",
"tests/test_document.py::CitationTest::test_link_without_title",
"tests/test_document.py::CitationTest::test_monographic_authors",
"tests/test_document.py::CitationTest::test_monographic_first_author",
"tests/test_document.py::CitationTest::test_pages_14",
"tests/test_document.py::CitationTest::test_pages_514",
"tests/test_document.py::CitationTest::test_pages_withdout_data",
"tests/test_document.py::CitationTest::test_publication_type_article",
"tests/test_document.py::CitationTest::test_publication_type_book",
"tests/test_document.py::CitationTest::test_publication_type_conference",
"tests/test_document.py::CitationTest::test_publication_type_link",
"tests/test_document.py::CitationTest::test_publication_type_thesis",
"tests/test_document.py::CitationTest::test_publication_type_undefined",
"tests/test_document.py::CitationTest::test_publisher",
"tests/test_document.py::CitationTest::test_publisher_address",
"tests/test_document.py::CitationTest::test_publisher_address_without_e",
"tests/test_document.py::CitationTest::test_series_book",
"tests/test_document.py::CitationTest::test_series_but_neither_journal_book_or_conference_citation",
"tests/test_document.py::CitationTest::test_series_conference",
"tests/test_document.py::CitationTest::test_series_journal",
"tests/test_document.py::CitationTest::test_source_book_title",
"tests/test_document.py::CitationTest::test_source_journal",
"tests/test_document.py::CitationTest::test_source_journal_without_journal_title",
"tests/test_document.py::CitationTest::test_sponsor",
"tests/test_document.py::CitationTest::test_start_page_14",
"tests/test_document.py::CitationTest::test_start_page_514",
"tests/test_document.py::CitationTest::test_start_page_withdout_data",
"tests/test_document.py::CitationTest::test_thesis_institution",
"tests/test_document.py::CitationTest::test_thesis_title",
"tests/test_document.py::CitationTest::test_thesis_without_title",
"tests/test_document.py::CitationTest::test_title_when_article_citation",
"tests/test_document.py::CitationTest::test_title_when_conference_citation",
"tests/test_document.py::CitationTest::test_title_when_link_citation",
"tests/test_document.py::CitationTest::test_title_when_thesis_citation",
"tests/test_document.py::CitationTest::test_with_volume_but_not_a_journal_article_neither_a_book",
"tests/test_document.py::CitationTest::test_without_analytic_institution",
"tests/test_document.py::CitationTest::test_without_authors",
"tests/test_document.py::CitationTest::test_without_date",
"tests/test_document.py::CitationTest::test_without_doi",
"tests/test_document.py::CitationTest::test_without_edition",
"tests/test_document.py::CitationTest::test_without_editor",
"tests/test_document.py::CitationTest::test_without_first_author",
"tests/test_document.py::CitationTest::test_without_index_number",
"tests/test_document.py::CitationTest::test_without_institutions",
"tests/test_document.py::CitationTest::test_without_issue",
"tests/test_document.py::CitationTest::test_without_issue_part",
"tests/test_document.py::CitationTest::test_without_issue_title",
"tests/test_document.py::CitationTest::test_without_link",
"tests/test_document.py::CitationTest::test_without_monographic_authors",
"tests/test_document.py::CitationTest::test_without_monographic_authors_but_not_a_book_citation",
"tests/test_document.py::CitationTest::test_without_publisher",
"tests/test_document.py::CitationTest::test_without_publisher_address",
"tests/test_document.py::CitationTest::test_without_series",
"tests/test_document.py::CitationTest::test_without_sponsor",
"tests/test_document.py::CitationTest::test_without_thesis_institution",
"tests/test_document.py::CitationTest::test_without_volume"
]
| []
| BSD 2-Clause "Simplified" License | 150 | [
"setup.py",
"xylose/scielodocument.py"
]
| [
"setup.py",
"xylose/scielodocument.py"
]
|
|
scieloorg__xylose-72 | 5a22638f77c1428300b0be056387459b9832a42e | 2015-05-25 20:31:06 | 5a22638f77c1428300b0be056387459b9832a42e | diff --git a/setup.py b/setup.py
index 09e541f..e3f1883 100755
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,7 @@ except ImportError:
setup(
name="xylose",
- version='0.7b',
+ version='0.8b',
description="A SciELO library to abstract a JSON data structure that is a product of the ISIS2JSON conversion using the ISIS2JSON type 3 data model.",
author="SciELO",
author_email="[email protected]",
diff --git a/xylose/scielodocument.py b/xylose/scielodocument.py
index a16ee47..39d94e3 100644
--- a/xylose/scielodocument.py
+++ b/xylose/scielodocument.py
@@ -792,6 +792,12 @@ class Article(object):
if 'f' in item:
return item['f']
+ # if nothing works until now. we will try once more. It's tested.
+
+ pages = sorted(self.data['article']['v14'][0]['_'].split('-'))
+
+ return pages[0] or None
+
@property
def end_page(self):
"""
@@ -803,6 +809,12 @@ class Article(object):
if 'l' in item:
return item['l']
+ # if nothing works until now. we will try once more. It's tested.
+
+ pages = sorted(self.data['article']['v14'][0]['_'].split('-'))
+
+ return pages[-1] or None
+
@property
def doi(self):
"""
| Outra forma de persistir páginas.
Nova forma encontrada de persistir páginas no SciELO.
```
v14: [
{
_: "23-32"
}
]
``` | scieloorg/xylose | diff --git a/tests/test_document.py b/tests/test_document.py
index 71a7da4..a409140 100644
--- a/tests/test_document.py
+++ b/tests/test_document.py
@@ -972,6 +972,28 @@ class ArticleTests(unittest.TestCase):
self.assertEqual(article.start_page, u'110')
+ def test_start_page_loaded_crazy_legacy_way_1(self):
+ article = self.article
+
+ article.data['article']['v14'] = [
+ {
+ u'_': u'110-122',
+ }
+ ]
+
+ self.assertEqual(article.start_page, u'110')
+
+ def test_start_page_loaded_crazy_legacy_way_2(self):
+ article = self.article
+
+ article.data['article']['v14'] = [
+ {
+ u'_': u'122-110',
+ }
+ ]
+
+ self.assertEqual(article.start_page, u'110')
+
def test_last_page(self):
article = self.article
@@ -999,6 +1021,27 @@ class ArticleTests(unittest.TestCase):
self.assertEqual(article.end_page, u'122')
+ def test_end_page_loaded_crazy_legacy_way_1(self):
+ article = self.article
+
+ article.data['article']['v14'] = [
+ {
+ u'_': u'110-122',
+ }
+ ]
+
+ self.assertEqual(article.end_page, u'122')
+
+ def test_end_page_loaded_crazy_legacy_way_2(self):
+ article = self.article
+
+ article.data['article']['v14'] = [
+ {
+ u'_': u'122-110',
+ }
+ ]
+
+ self.assertEqual(article.end_page, u'122')
def test_without_pages(self):
article = self.article
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 2
} | 0.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"coverage",
"mocker"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
iniconfig==2.1.0
mocker==1.1.1
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
tomli==2.2.1
-e git+https://github.com/scieloorg/xylose.git@5a22638f77c1428300b0be056387459b9832a42e#egg=xylose
| name: xylose
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- mocker==1.1.1
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/xylose
| [
"tests/test_document.py::ArticleTests::test_end_page_loaded_crazy_legacy_way_1",
"tests/test_document.py::ArticleTests::test_end_page_loaded_crazy_legacy_way_2",
"tests/test_document.py::ArticleTests::test_start_page_loaded_crazy_legacy_way_1",
"tests/test_document.py::ArticleTests::test_start_page_loaded_crazy_legacy_way_2"
]
| []
| [
"tests/test_document.py::ToolsTests::test_get_language_iso639_1_defined",
"tests/test_document.py::ToolsTests::test_get_language_iso639_1_undefined",
"tests/test_document.py::ToolsTests::test_get_language_iso639_2_defined",
"tests/test_document.py::ToolsTests::test_get_language_iso639_2_undefined",
"tests/test_document.py::ToolsTests::test_get_language_without_iso_format",
"tests/test_document.py::ToolsTests::test_get_publication_date_wrong_day",
"tests/test_document.py::ToolsTests::test_get_publication_date_wrong_day_month",
"tests/test_document.py::ToolsTests::test_get_publication_date_wrong_day_month_not_int",
"tests/test_document.py::ToolsTests::test_get_publication_date_wrong_day_not_int",
"tests/test_document.py::ToolsTests::test_get_publication_date_wrong_month_not_int",
"tests/test_document.py::ToolsTests::test_get_publication_date_year",
"tests/test_document.py::ToolsTests::test_get_publication_date_year_day",
"tests/test_document.py::ToolsTests::test_get_publication_date_year_month",
"tests/test_document.py::ToolsTests::test_get_publication_date_year_month_day",
"tests/test_document.py::JournalTests::test_any_issn_priority_electronic",
"tests/test_document.py::JournalTests::test_any_issn_priority_electronic_without_electronic",
"tests/test_document.py::JournalTests::test_any_issn_priority_print",
"tests/test_document.py::JournalTests::test_any_issn_priority_print_without_print",
"tests/test_document.py::JournalTests::test_collection_acronym",
"tests/test_document.py::JournalTests::test_creation_date",
"tests/test_document.py::JournalTests::test_current_status",
"tests/test_document.py::JournalTests::test_current_status_lots_of_changes_study_case_1",
"tests/test_document.py::JournalTests::test_current_status_some_changes",
"tests/test_document.py::JournalTests::test_current_without_v51",
"tests/test_document.py::JournalTests::test_journal",
"tests/test_document.py::JournalTests::test_journal_abbreviated_title",
"tests/test_document.py::JournalTests::test_journal_acronym",
"tests/test_document.py::JournalTests::test_journal_title",
"tests/test_document.py::JournalTests::test_journal_title_nlm",
"tests/test_document.py::JournalTests::test_journal_url",
"tests/test_document.py::JournalTests::test_languages",
"tests/test_document.py::JournalTests::test_languages_without_v350",
"tests/test_document.py::JournalTests::test_load_issn_with_v935_and_v35_ONLINE",
"tests/test_document.py::JournalTests::test_load_issn_with_v935_and_v35_PRINT",
"tests/test_document.py::JournalTests::test_load_issn_with_v935_equal_v400_and_v35_ONLINE",
"tests/test_document.py::JournalTests::test_load_issn_with_v935_equal_v400_and_v35_PRINT",
"tests/test_document.py::JournalTests::test_load_issn_with_v935_without_v35",
"tests/test_document.py::JournalTests::test_load_issn_without_v935_and_v35_ONLINE",
"tests/test_document.py::JournalTests::test_load_issn_without_v935_and_v35_PRINT",
"tests/test_document.py::JournalTests::test_load_issn_without_v935_without_v35",
"tests/test_document.py::JournalTests::test_permission_id",
"tests/test_document.py::JournalTests::test_permission_text",
"tests/test_document.py::JournalTests::test_permission_url",
"tests/test_document.py::JournalTests::test_permission_without_v540",
"tests/test_document.py::JournalTests::test_permission_without_v540_t",
"tests/test_document.py::JournalTests::test_publisher_loc",
"tests/test_document.py::JournalTests::test_publisher_name",
"tests/test_document.py::JournalTests::test_scielo_issn",
"tests/test_document.py::JournalTests::test_status",
"tests/test_document.py::JournalTests::test_status_lots_of_changes",
"tests/test_document.py::JournalTests::test_status_lots_of_changes_study_case_1",
"tests/test_document.py::JournalTests::test_status_some_changes",
"tests/test_document.py::JournalTests::test_status_without_v51",
"tests/test_document.py::JournalTests::test_subject_areas",
"tests/test_document.py::JournalTests::test_update_date",
"tests/test_document.py::JournalTests::test_without_journal_abbreviated_title",
"tests/test_document.py::JournalTests::test_without_journal_acronym",
"tests/test_document.py::JournalTests::test_without_journal_title",
"tests/test_document.py::JournalTests::test_without_journal_title_nlm",
"tests/test_document.py::JournalTests::test_without_journal_url",
"tests/test_document.py::JournalTests::test_without_publisher_loc",
"tests/test_document.py::JournalTests::test_without_publisher_name",
"tests/test_document.py::JournalTests::test_without_scielo_domain",
"tests/test_document.py::JournalTests::test_without_scielo_domain_title_v690",
"tests/test_document.py::JournalTests::test_without_subject_areas",
"tests/test_document.py::JournalTests::test_without_wos_citation_indexes",
"tests/test_document.py::JournalTests::test_without_wos_subject_areas",
"tests/test_document.py::JournalTests::test_wos_citation_indexes",
"tests/test_document.py::JournalTests::test_wos_subject_areas",
"tests/test_document.py::ArticleTests::test_acceptance_date",
"tests/test_document.py::ArticleTests::test_affiliation_just_with_affiliation_name",
"tests/test_document.py::ArticleTests::test_affiliation_without_affiliation_name",
"tests/test_document.py::ArticleTests::test_affiliations",
"tests/test_document.py::ArticleTests::test_ahead_publication_date",
"tests/test_document.py::ArticleTests::test_article",
"tests/test_document.py::ArticleTests::test_author_with_two_affiliations",
"tests/test_document.py::ArticleTests::test_author_with_two_role",
"tests/test_document.py::ArticleTests::test_author_without_affiliations",
"tests/test_document.py::ArticleTests::test_author_without_surname_and_given_names",
"tests/test_document.py::ArticleTests::test_authors",
"tests/test_document.py::ArticleTests::test_collection_acronym",
"tests/test_document.py::ArticleTests::test_collection_acronym_priorizing_collection",
"tests/test_document.py::ArticleTests::test_collection_acronym_retrieving_v992",
"tests/test_document.py::ArticleTests::test_collection_name_brazil",
"tests/test_document.py::ArticleTests::test_collection_name_undefined",
"tests/test_document.py::ArticleTests::test_corporative_authors",
"tests/test_document.py::ArticleTests::test_document_type",
"tests/test_document.py::ArticleTests::test_doi",
"tests/test_document.py::ArticleTests::test_end_page_loaded_through_xml",
"tests/test_document.py::ArticleTests::test_file_code",
"tests/test_document.py::ArticleTests::test_file_code_crazy_slashs_1",
"tests/test_document.py::ArticleTests::test_file_code_crazy_slashs_2",
"tests/test_document.py::ArticleTests::test_first_author",
"tests/test_document.py::ArticleTests::test_first_author_without_author",
"tests/test_document.py::ArticleTests::test_fulltexts_field_fulltexts",
"tests/test_document.py::ArticleTests::test_fulltexts_without_field_fulltexts",
"tests/test_document.py::ArticleTests::test_html_url",
"tests/test_document.py::ArticleTests::test_invalid_document_type",
"tests/test_document.py::ArticleTests::test_issue",
"tests/test_document.py::ArticleTests::test_issue_label_field_v4",
"tests/test_document.py::ArticleTests::test_issue_label_without_field_v4",
"tests/test_document.py::ArticleTests::test_issue_url",
"tests/test_document.py::ArticleTests::test_journal_abbreviated_title",
"tests/test_document.py::ArticleTests::test_journal_acronym",
"tests/test_document.py::ArticleTests::test_journal_title",
"tests/test_document.py::ArticleTests::test_keywords",
"tests/test_document.py::ArticleTests::test_keywords_iso639_2",
"tests/test_document.py::ArticleTests::test_keywords_with_undefined_language",
"tests/test_document.py::ArticleTests::test_keywords_without_subfield_k",
"tests/test_document.py::ArticleTests::test_keywords_without_subfield_l",
"tests/test_document.py::ArticleTests::test_languages_field_fulltexts",
"tests/test_document.py::ArticleTests::test_languages_field_v40",
"tests/test_document.py::ArticleTests::test_last_page",
"tests/test_document.py::ArticleTests::test_mixed_affiliations",
"tests/test_document.py::ArticleTests::test_normalized_affiliations",
"tests/test_document.py::ArticleTests::test_normalized_affiliations_undefined_ISO_3166_CODE",
"tests/test_document.py::ArticleTests::test_normalized_affiliations_without_p",
"tests/test_document.py::ArticleTests::test_original_abstract_with_just_one_language_defined",
"tests/test_document.py::ArticleTests::test_original_abstract_with_language_defined",
"tests/test_document.py::ArticleTests::test_original_abstract_with_language_defined_but_different_of_the_article_original_language",
"tests/test_document.py::ArticleTests::test_original_abstract_without_language_defined",
"tests/test_document.py::ArticleTests::test_original_language_invalid_iso639_2",
"tests/test_document.py::ArticleTests::test_original_language_iso639_2",
"tests/test_document.py::ArticleTests::test_original_language_original",
"tests/test_document.py::ArticleTests::test_original_title_with_just_one_language_defined",
"tests/test_document.py::ArticleTests::test_original_title_with_language_defined",
"tests/test_document.py::ArticleTests::test_original_title_with_language_defined_but_different_of_the_article_original_language",
"tests/test_document.py::ArticleTests::test_original_title_without_language_defined",
"tests/test_document.py::ArticleTests::test_pdf_url",
"tests/test_document.py::ArticleTests::test_processing_date",
"tests/test_document.py::ArticleTests::test_project_name",
"tests/test_document.py::ArticleTests::test_project_sponsors",
"tests/test_document.py::ArticleTests::test_publication_contract",
"tests/test_document.py::ArticleTests::test_publication_date",
"tests/test_document.py::ArticleTests::test_publisher_id",
"tests/test_document.py::ArticleTests::test_publisher_loc",
"tests/test_document.py::ArticleTests::test_publisher_name",
"tests/test_document.py::ArticleTests::test_receive_date",
"tests/test_document.py::ArticleTests::test_review_date",
"tests/test_document.py::ArticleTests::test_start_page",
"tests/test_document.py::ArticleTests::test_start_page_loaded_through_xml",
"tests/test_document.py::ArticleTests::test_subject_areas",
"tests/test_document.py::ArticleTests::test_supplement_issue",
"tests/test_document.py::ArticleTests::test_supplement_volume",
"tests/test_document.py::ArticleTests::test_thesis_degree",
"tests/test_document.py::ArticleTests::test_thesis_organization",
"tests/test_document.py::ArticleTests::test_thesis_organization_and_division",
"tests/test_document.py::ArticleTests::test_thesis_organization_without_name",
"tests/test_document.py::ArticleTests::test_translated_abstracts",
"tests/test_document.py::ArticleTests::test_translated_abstracts_without_v83",
"tests/test_document.py::ArticleTests::test_translated_abtracts_iso639_2",
"tests/test_document.py::ArticleTests::test_translated_titles",
"tests/test_document.py::ArticleTests::test_translated_titles_iso639_2",
"tests/test_document.py::ArticleTests::test_translated_titles_without_v12",
"tests/test_document.py::ArticleTests::test_volume",
"tests/test_document.py::ArticleTests::test_whitwout_acceptance_date",
"tests/test_document.py::ArticleTests::test_whitwout_ahead_publication_date",
"tests/test_document.py::ArticleTests::test_whitwout_receive_date",
"tests/test_document.py::ArticleTests::test_whitwout_review_date",
"tests/test_document.py::ArticleTests::test_without_affiliations",
"tests/test_document.py::ArticleTests::test_without_authors",
"tests/test_document.py::ArticleTests::test_without_citations",
"tests/test_document.py::ArticleTests::test_without_collection_acronym",
"tests/test_document.py::ArticleTests::test_without_corporative_authors",
"tests/test_document.py::ArticleTests::test_without_document_type",
"tests/test_document.py::ArticleTests::test_without_doi",
"tests/test_document.py::ArticleTests::test_without_html_url",
"tests/test_document.py::ArticleTests::test_without_issue",
"tests/test_document.py::ArticleTests::test_without_issue_url",
"tests/test_document.py::ArticleTests::test_without_journal_abbreviated_title",
"tests/test_document.py::ArticleTests::test_without_journal_acronym",
"tests/test_document.py::ArticleTests::test_without_journal_title",
"tests/test_document.py::ArticleTests::test_without_keywords",
"tests/test_document.py::ArticleTests::test_without_last_page",
"tests/test_document.py::ArticleTests::test_without_normalized_affiliations",
"tests/test_document.py::ArticleTests::test_without_original_abstract",
"tests/test_document.py::ArticleTests::test_without_original_title",
"tests/test_document.py::ArticleTests::test_without_pages",
"tests/test_document.py::ArticleTests::test_without_pdf_url",
"tests/test_document.py::ArticleTests::test_without_processing_date",
"tests/test_document.py::ArticleTests::test_without_project_name",
"tests/test_document.py::ArticleTests::test_without_project_sponsor",
"tests/test_document.py::ArticleTests::test_without_publication_contract",
"tests/test_document.py::ArticleTests::test_without_publication_date",
"tests/test_document.py::ArticleTests::test_without_publisher_id",
"tests/test_document.py::ArticleTests::test_without_publisher_loc",
"tests/test_document.py::ArticleTests::test_without_publisher_name",
"tests/test_document.py::ArticleTests::test_without_scielo_domain",
"tests/test_document.py::ArticleTests::test_without_scielo_domain_article_v69",
"tests/test_document.py::ArticleTests::test_without_scielo_domain_article_v69_and_with_title_v690",
"tests/test_document.py::ArticleTests::test_without_scielo_domain_title_v690",
"tests/test_document.py::ArticleTests::test_without_start_page",
"tests/test_document.py::ArticleTests::test_without_subject_areas",
"tests/test_document.py::ArticleTests::test_without_suplement_issue",
"tests/test_document.py::ArticleTests::test_without_supplement_volume",
"tests/test_document.py::ArticleTests::test_without_thesis_degree",
"tests/test_document.py::ArticleTests::test_without_thesis_organization",
"tests/test_document.py::ArticleTests::test_without_volume",
"tests/test_document.py::ArticleTests::test_without_wos_citation_indexes",
"tests/test_document.py::ArticleTests::test_without_wos_subject_areas",
"tests/test_document.py::ArticleTests::test_wos_citation_indexes",
"tests/test_document.py::ArticleTests::test_wos_subject_areas",
"tests/test_document.py::CitationTest::test_a_link_access_date",
"tests/test_document.py::CitationTest::test_analytic_institution_for_a_article_citation",
"tests/test_document.py::CitationTest::test_analytic_institution_for_a_book_citation",
"tests/test_document.py::CitationTest::test_article_title",
"tests/test_document.py::CitationTest::test_article_without_title",
"tests/test_document.py::CitationTest::test_authors_article",
"tests/test_document.py::CitationTest::test_authors_book",
"tests/test_document.py::CitationTest::test_authors_link",
"tests/test_document.py::CitationTest::test_authors_thesis",
"tests/test_document.py::CitationTest::test_book_chapter_title",
"tests/test_document.py::CitationTest::test_book_edition",
"tests/test_document.py::CitationTest::test_book_volume",
"tests/test_document.py::CitationTest::test_book_without_chapter_title",
"tests/test_document.py::CitationTest::test_citation_sample_congress",
"tests/test_document.py::CitationTest::test_citation_sample_link",
"tests/test_document.py::CitationTest::test_citation_sample_link_without_comment",
"tests/test_document.py::CitationTest::test_conference_edition",
"tests/test_document.py::CitationTest::test_conference_name",
"tests/test_document.py::CitationTest::test_conference_sponsor",
"tests/test_document.py::CitationTest::test_conference_without_name",
"tests/test_document.py::CitationTest::test_conference_without_sponsor",
"tests/test_document.py::CitationTest::test_date",
"tests/test_document.py::CitationTest::test_doi",
"tests/test_document.py::CitationTest::test_editor",
"tests/test_document.py::CitationTest::test_end_page_14",
"tests/test_document.py::CitationTest::test_end_page_514",
"tests/test_document.py::CitationTest::test_end_page_withdout_data",
"tests/test_document.py::CitationTest::test_first_author_article",
"tests/test_document.py::CitationTest::test_first_author_book",
"tests/test_document.py::CitationTest::test_first_author_link",
"tests/test_document.py::CitationTest::test_first_author_thesis",
"tests/test_document.py::CitationTest::test_first_author_without_monographic_authors",
"tests/test_document.py::CitationTest::test_first_author_without_monographic_authors_but_not_a_book_citation",
"tests/test_document.py::CitationTest::test_index_number",
"tests/test_document.py::CitationTest::test_institutions_all_fields",
"tests/test_document.py::CitationTest::test_institutions_v11",
"tests/test_document.py::CitationTest::test_institutions_v17",
"tests/test_document.py::CitationTest::test_institutions_v29",
"tests/test_document.py::CitationTest::test_institutions_v50",
"tests/test_document.py::CitationTest::test_institutions_v58",
"tests/test_document.py::CitationTest::test_invalid_edition",
"tests/test_document.py::CitationTest::test_isbn",
"tests/test_document.py::CitationTest::test_isbn_but_not_a_book",
"tests/test_document.py::CitationTest::test_issn",
"tests/test_document.py::CitationTest::test_issn_but_not_an_article",
"tests/test_document.py::CitationTest::test_issue_part",
"tests/test_document.py::CitationTest::test_issue_title",
"tests/test_document.py::CitationTest::test_journal_issue",
"tests/test_document.py::CitationTest::test_journal_volume",
"tests/test_document.py::CitationTest::test_link",
"tests/test_document.py::CitationTest::test_link_title",
"tests/test_document.py::CitationTest::test_link_without_title",
"tests/test_document.py::CitationTest::test_monographic_authors",
"tests/test_document.py::CitationTest::test_monographic_first_author",
"tests/test_document.py::CitationTest::test_pages_14",
"tests/test_document.py::CitationTest::test_pages_514",
"tests/test_document.py::CitationTest::test_pages_withdout_data",
"tests/test_document.py::CitationTest::test_publication_type_article",
"tests/test_document.py::CitationTest::test_publication_type_book",
"tests/test_document.py::CitationTest::test_publication_type_conference",
"tests/test_document.py::CitationTest::test_publication_type_link",
"tests/test_document.py::CitationTest::test_publication_type_thesis",
"tests/test_document.py::CitationTest::test_publication_type_undefined",
"tests/test_document.py::CitationTest::test_publisher",
"tests/test_document.py::CitationTest::test_publisher_address",
"tests/test_document.py::CitationTest::test_publisher_address_without_e",
"tests/test_document.py::CitationTest::test_series_book",
"tests/test_document.py::CitationTest::test_series_but_neither_journal_book_or_conference_citation",
"tests/test_document.py::CitationTest::test_series_conference",
"tests/test_document.py::CitationTest::test_series_journal",
"tests/test_document.py::CitationTest::test_source_book_title",
"tests/test_document.py::CitationTest::test_source_journal",
"tests/test_document.py::CitationTest::test_source_journal_without_journal_title",
"tests/test_document.py::CitationTest::test_sponsor",
"tests/test_document.py::CitationTest::test_start_page_14",
"tests/test_document.py::CitationTest::test_start_page_514",
"tests/test_document.py::CitationTest::test_start_page_withdout_data",
"tests/test_document.py::CitationTest::test_thesis_institution",
"tests/test_document.py::CitationTest::test_thesis_title",
"tests/test_document.py::CitationTest::test_thesis_without_title",
"tests/test_document.py::CitationTest::test_title_when_article_citation",
"tests/test_document.py::CitationTest::test_title_when_conference_citation",
"tests/test_document.py::CitationTest::test_title_when_link_citation",
"tests/test_document.py::CitationTest::test_title_when_thesis_citation",
"tests/test_document.py::CitationTest::test_with_volume_but_not_a_journal_article_neither_a_book",
"tests/test_document.py::CitationTest::test_without_analytic_institution",
"tests/test_document.py::CitationTest::test_without_authors",
"tests/test_document.py::CitationTest::test_without_date",
"tests/test_document.py::CitationTest::test_without_doi",
"tests/test_document.py::CitationTest::test_without_edition",
"tests/test_document.py::CitationTest::test_without_editor",
"tests/test_document.py::CitationTest::test_without_first_author",
"tests/test_document.py::CitationTest::test_without_index_number",
"tests/test_document.py::CitationTest::test_without_institutions",
"tests/test_document.py::CitationTest::test_without_issue",
"tests/test_document.py::CitationTest::test_without_issue_part",
"tests/test_document.py::CitationTest::test_without_issue_title",
"tests/test_document.py::CitationTest::test_without_link",
"tests/test_document.py::CitationTest::test_without_monographic_authors",
"tests/test_document.py::CitationTest::test_without_monographic_authors_but_not_a_book_citation",
"tests/test_document.py::CitationTest::test_without_publisher",
"tests/test_document.py::CitationTest::test_without_publisher_address",
"tests/test_document.py::CitationTest::test_without_series",
"tests/test_document.py::CitationTest::test_without_sponsor",
"tests/test_document.py::CitationTest::test_without_thesis_institution",
"tests/test_document.py::CitationTest::test_without_volume"
]
| []
| BSD 2-Clause "Simplified" License | 151 | [
"setup.py",
"xylose/scielodocument.py"
]
| [
"setup.py",
"xylose/scielodocument.py"
]
|
|
nose-devs__nose2-245 | bf9945309d5118ad4723619452c486593964d1b8 | 2015-05-27 19:59:26 | bbf5897eb1aa224100e86ba594042e4399fd2f5f | landscape-bot: [](https://landscape.io/diff/163690)
Repository health decreased by 0.00% when pulling **[28cda3a](https://github.com/dlax/nose2/commit/28cda3abf4d51b58fd686b0b6c0ff5c46c38f51b) on dlax:generator-exception** into **[bf99453](https://github.com/nose-devs/nose2/commit/bf9945309d5118ad4723619452c486593964d1b8) on nose-devs:master**.
* [1 new problem was found](https://landscape.io/diff/163690) (including 0 errors and 1 code smell).
* [1 problem was fixed](https://landscape.io/diff/163690/fixed) (including 0 errors and 1 code smell).
coveralls:
[](https://coveralls.io/builds/2664738)
Coverage decreased (-0.06%) to 84.25% when pulling **28cda3abf4d51b58fd686b0b6c0ff5c46c38f51b on dlax:generator-exception** into **bf9945309d5118ad4723619452c486593964d1b8 on nose-devs:master**.
coveralls:
[](https://coveralls.io/builds/2664738)
Coverage decreased (-0.06%) to 84.25% when pulling **28cda3abf4d51b58fd686b0b6c0ff5c46c38f51b on dlax:generator-exception** into **bf9945309d5118ad4723619452c486593964d1b8 on nose-devs:master**.
landscape-bot: [](https://landscape.io/diff/163709)
Repository health increased by 0.07% when pulling **[ce05d05](https://github.com/dlax/nose2/commit/ce05d05f4ed1d9bb60cade4ccdb031637385585b) on dlax:generator-exception** into **[bf99453](https://github.com/nose-devs/nose2/commit/bf9945309d5118ad4723619452c486593964d1b8) on nose-devs:master**.
* No new problems were introduced.
* [1 problem was fixed](https://landscape.io/diff/163709/fixed) (including 0 errors and 1 code smell).
coveralls:
[](https://coveralls.io/builds/2664909)
Coverage increased (+0.02%) to 84.32% when pulling **ce05d05f4ed1d9bb60cade4ccdb031637385585b on dlax:generator-exception** into **bf9945309d5118ad4723619452c486593964d1b8 on nose-devs:master**.
dlax: Hmm, this does not actually fixes #48 completly since the exception context is not forwarded down to the test failure. Will work further on this...
dlax: Pushed a new version.
coveralls:
[](https://coveralls.io/builds/2668668)
Coverage decreased (-2.57%) to 81.73% when pulling **778265de7a289dda9ab9755ba2c08c89642b80ac on dlax:generator-exception** into **bf9945309d5118ad4723619452c486593964d1b8 on nose-devs:master**.
landscape-bot: [](https://landscape.io/diff/164010)
Code quality remained the same when pulling **[778265d](https://github.com/dlax/nose2/commit/778265de7a289dda9ab9755ba2c08c89642b80ac) on dlax:generator-exception** into **[bf99453](https://github.com/nose-devs/nose2/commit/bf9945309d5118ad4723619452c486593964d1b8) on nose-devs:master**. | diff --git a/nose2/loader.py b/nose2/loader.py
index 2778280..394899f 100644
--- a/nose2/loader.py
+++ b/nose2/loader.py
@@ -7,6 +7,8 @@
import logging
import traceback
+import six
+
from nose2 import events
from nose2.compat import unittest
@@ -114,7 +116,11 @@ class PluggableTestLoader(object):
def _makeFailedTest(self, classname, methodname, exception):
def testFailure(self):
- raise exception
+ if isinstance(exception, Exception):
+ raise exception
+ else:
+ # exception tuple (type, value, traceback)
+ six.reraise(*exception)
attrs = {methodname: testFailure}
TestClass = type(classname, (unittest.TestCase,), attrs)
return self.suiteClass((TestClass(methodname),))
| Loader errors throw away tracebacks and exception detail
For instance if a generator test throws an AttributeError, the details of the attribute error are lost and the user only sees "AttributeError" -- not very helpful. | nose-devs/nose2 | diff --git a/nose2/tests/unit/test_loader.py b/nose2/tests/unit/test_loader.py
index 24114e5..8dc152c 100644
--- a/nose2/tests/unit/test_loader.py
+++ b/nose2/tests/unit/test_loader.py
@@ -8,6 +8,21 @@ class TestPluggableTestLoader(TestCase):
self.session = session.Session()
self.loader = loader.PluggableTestLoader(self.session)
+ def test_failed_load_tests_exception(self):
+ suite = self.loader.failedLoadTests('test', RuntimeError('err'))
+ tc = suite._tests[0]
+ with self.assertRaises(RuntimeError) as cm:
+ tc.test()
+ self.assertEqual(cm.exception.args, ('err', ))
+
+ def test_failed_load_tests_exc_info(self):
+ suite = self.loader.failedLoadTests(
+ 'test', (RuntimeError, RuntimeError('err'), None))
+ tc = suite._tests[0]
+ with self.assertRaises(RuntimeError) as cm:
+ tc.test()
+ self.assertEqual(cm.exception.args, ('err', ))
+
def test_load_from_module_calls_hook(self):
self.session.hooks.register('loadTestsFromModule', FakePlugin())
evt = events.LoadFromModuleEvent(self.loader, 'some_module')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose2",
"pytest"
],
"pre_install": [],
"python": "3.7",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi @ file:///croot/certifi_1671487769961/work/certifi
cov-core==1.15.0
coverage==7.2.7
exceptiongroup==1.2.2
importlib-metadata==6.7.0
iniconfig==2.0.0
-e git+https://github.com/nose-devs/nose2.git@bf9945309d5118ad4723619452c486593964d1b8#egg=nose2
packaging==24.0
pluggy==1.2.0
pytest==7.4.4
six==1.17.0
tomli==2.0.1
typing_extensions==4.7.1
zipp==3.15.0
| name: nose2
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cov-core==1.15.0
- coverage==7.2.7
- exceptiongroup==1.2.2
- importlib-metadata==6.7.0
- iniconfig==2.0.0
- packaging==24.0
- pluggy==1.2.0
- pytest==7.4.4
- six==1.17.0
- tomli==2.0.1
- typing-extensions==4.7.1
- zipp==3.15.0
prefix: /opt/conda/envs/nose2
| [
"nose2/tests/unit/test_loader.py::TestPluggableTestLoader::test_failed_load_tests_exc_info"
]
| []
| [
"nose2/tests/unit/test_loader.py::TestPluggableTestLoader::test_failed_load_tests_exception",
"nose2/tests/unit/test_loader.py::TestPluggableTestLoader::test_load_from_module_calls_hook",
"nose2/tests/unit/test_loader.py::TestPluggableTestLoader::test_load_from_name_calls_hook",
"nose2/tests/unit/test_loader.py::TestPluggableTestLoader::test_load_from_names_calls_hook",
"nose2/tests/unit/test_loader.py::TestPluggableTestLoader::test_loader_from_names_calls_module_hook",
"nose2/tests/unit/test_loader.py::TestPluggableTestLoader::test_loader_from_names_calls_name_hook",
"nose2/tests/unit/test_loader.py::TestPluggableTestLoader::test_loader_from_names_calls_names_hook"
]
| []
| BSD | 152 | [
"nose2/loader.py"
]
| [
"nose2/loader.py"
]
|
docker-tow__tow-62 | 08473f45c43a0f0f6a63e811266c4f1d746ff78a | 2015-05-29 12:55:52 | 08473f45c43a0f0f6a63e811266c4f1d746ff78a | diff --git a/tow/commands/build.py b/tow/commands/build.py
index f072ae0..9343554 100644
--- a/tow/commands/build.py
+++ b/tow/commands/build.py
@@ -31,11 +31,17 @@ class BuildCommand(Command):
# Check if you would like to patch Dockerfile in order to use
# reconfiguration on run phase
if namespace.tow_run:
+ command = []
(entrypoint, cmd) = dockerfile.find_entrypoint_or_cmd()
+ if cmd:
+ command += cmd
+ if entrypoint:
+ command = entrypoint + command
+ command.append("$@")
templates.process_template("tow.sh.tmpl",
os.path.join(workingdir, "tow.sh"),
- {"entrypoint": entrypoint,
- "cmd": cmd, "mapping": file_mapping,
+ {"command": command,
+ "mapping": file_mapping,
"volume_name": TOW_VOLUME})
file_mapping.append(("tow.sh", "/tow.sh", 755))
dockerfile.replace_entrypoint_or_cmd_by_tow_cmd("sh /tow.sh")
diff --git a/tow/dockerfile.py b/tow/dockerfile.py
index afd1290..7d15b2f 100644
--- a/tow/dockerfile.py
+++ b/tow/dockerfile.py
@@ -60,9 +60,9 @@ class Dockerfile(object):
# Handle array command
if command.startswith("[") and command.endswith("]"):
command = command[1:-1]
- return " ".join([sh.strip()[1:-1] for sh in command.split(",")])
+ return [sh.strip()[1:-1] for sh in command.split(",")]
else: # It's just shell notation
- return command.strip()
+ return [command.strip()]
return None
def envs(self):
diff --git a/tow/templates/tow.sh.tmpl b/tow/templates/tow.sh.tmpl
index 64d612f..9cf076b 100644
--- a/tow/templates/tow.sh.tmpl
+++ b/tow/templates/tow.sh.tmpl
@@ -7,12 +7,4 @@ fi
PATH=$PATH:`pwd`
-{% if entrypoint %}
-{% if cmd %}
-"{{entrypoint}}" "{{cmd}}" "$@"
-{% else %}
-"{{entrypoint}}" "$@"
-{% endif %}
-{% else %}
-"{{cmd}}"
-{% endif %}
+{% for cmd in command -%}"{{ cmd }}" {% endfor %}
| CMD and ENTRYPOINT arrays should stay arrays in tow.sh
```Dockerfile
ENTRYPOINT ["test"]
CMD ["arg1", "arg2"]
```
should be converted to tow.sh
```bash
"test" "arg1" "arg2" "$@"
```
but not to
```bash
"test" "arg1 arg2" "$@"
``` | docker-tow/tow | diff --git a/tests/dockerfile_tests.py b/tests/dockerfile_tests.py
index 160b5f1..00f563a 100644
--- a/tests/dockerfile_tests.py
+++ b/tests/dockerfile_tests.py
@@ -61,22 +61,22 @@ class DockerfileTest(unittest.TestCase):
def test_find_entrypoint_or_cmd(self):
d = Dockerfile("Dockerfile")
d._Dockerfile__dockerfile = ['FROM ubuntu', 'ENTRYPOINT ["/bin/sh"]', 'CMD ["-c"]']
- self.assertEqual(d.find_entrypoint_or_cmd(), ("/bin/sh", "-c"))
+ self.assertEqual(d.find_entrypoint_or_cmd(), (["/bin/sh"], ["-c"]))
def test_find_entrypoint_or_cmd_shell_style(self):
d = Dockerfile("Dockerfile")
d._Dockerfile__dockerfile = ['FROM ubuntu', 'ENTRYPOINT /bin/sh', 'CMD ["-c"]']
- self.assertEqual(d.find_entrypoint_or_cmd(), ("/bin/sh", "-c"))
+ self.assertEqual(d.find_entrypoint_or_cmd(), (["/bin/sh"], ["-c"]))
def test_find_entrypoint_or_cmd_cmd_only(self):
d = Dockerfile("Dockerfile")
d._Dockerfile__dockerfile = ['FROM ubuntu', 'CMD ["/bin/sh", "-c", "-x"]']
- self.assertEqual(d.find_entrypoint_or_cmd(), (None, "/bin/sh -c -x"))
+ self.assertEqual(d.find_entrypoint_or_cmd(), (None, ["/bin/sh", "-c", "-x"]))
def test_find_entrypoint_or_cmd_entrypoint_only(self):
d = Dockerfile("Dockerfile")
d._Dockerfile__dockerfile = ['FROM ubuntu', 'ENTRYPOINT ["/bin/sh"]']
- self.assertEqual(d.find_entrypoint_or_cmd(), ("/bin/sh", None))
+ self.assertEqual(d.find_entrypoint_or_cmd(), (["/bin/sh"], None))
def test_find_entrypoint_or_cmd_none(self):
d = Dockerfile("Dockerfile")
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.1.6
MarkupSafe==3.0.2
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
-e git+https://github.com/docker-tow/tow.git@08473f45c43a0f0f6a63e811266c4f1d746ff78a#egg=tow
| name: tow
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- jinja2==3.1.6
- markupsafe==3.0.2
prefix: /opt/conda/envs/tow
| [
"tests/dockerfile_tests.py::DockerfileTest::test_find_entrypoint_or_cmd",
"tests/dockerfile_tests.py::DockerfileTest::test_find_entrypoint_or_cmd_cmd_only",
"tests/dockerfile_tests.py::DockerfileTest::test_find_entrypoint_or_cmd_entrypoint_only",
"tests/dockerfile_tests.py::DockerfileTest::test_find_entrypoint_or_cmd_shell_style"
]
| []
| [
"tests/dockerfile_tests.py::DockerfileTest::test_add_copy",
"tests/dockerfile_tests.py::DockerfileTest::test_add_copy_after_from",
"tests/dockerfile_tests.py::DockerfileTest::test_add_copy_after_maintainer",
"tests/dockerfile_tests.py::DockerfileTest::test_find_entrypoint_or_cmd_none",
"tests/dockerfile_tests.py::DockerfileTest::test_parse_many_envs",
"tests/dockerfile_tests.py::DockerfileTest::test_parse_multiline",
"tests/dockerfile_tests.py::DockerfileTest::test_parse_spaced_envs"
]
| []
| Apache License 2.0 | 153 | [
"tow/dockerfile.py",
"tow/templates/tow.sh.tmpl",
"tow/commands/build.py"
]
| [
"tow/dockerfile.py",
"tow/templates/tow.sh.tmpl",
"tow/commands/build.py"
]
|
|
scieloorg__xylose-74 | 59090451a1aa5b226666035fc326011f1623c90c | 2015-05-29 20:39:29 | 59090451a1aa5b226666035fc326011f1623c90c | diff --git a/setup.py b/setup.py
index e3f1883..0c97773 100755
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,7 @@ except ImportError:
setup(
name="xylose",
- version='0.8b',
+ version='0.9b',
description="A SciELO library to abstract a JSON data structure that is a product of the ISIS2JSON conversion using the ISIS2JSON type 3 data model.",
author="SciELO",
author_email="[email protected]",
diff --git a/xylose/scielodocument.py b/xylose/scielodocument.py
index 39d94e3..8e5df08 100644
--- a/xylose/scielodocument.py
+++ b/xylose/scielodocument.py
@@ -27,6 +27,7 @@ else:
LICENSE_REGEX = re.compile(r'a.+href="(.+)"')
LICENSE_CREATIVE_COMMONS = re.compile(r'licenses/(.*)/.') # Extracts the creative commons id from the url.
+DOI_REGEX = re.compile(r'\d{2}\.\d+/.*$')
def html_decode(string):
@@ -820,11 +821,22 @@ class Article(object):
"""
This method retrieves the DOI of the given article, if it exists.
"""
+ raw_doi = None
+
if 'doi' in self.data:
- return self.data['doi']
+ raw_doi = self.data['doi']
if 'v237' in self.data['article']:
- return self.data['article']['v237'][0]['_']
+ raw_doi = self.data['article']['v237'][0]['_']
+
+
+ if not raw_doi:
+ return None
+
+ doi = DOI_REGEX.findall(raw_doi)
+
+ if len(doi) == 1:
+ return doi[0]
@property
def publisher_id(self):
| recuperação de DOI do documento.
Alguns DOI's estão registrados nas bases ISIS incluindo o domínio do crossref.
O Xylose deve remover qualquer conteúdo que não seja para do DOI.
ex:
http://www.crossref.org/10.1590/S2179-975X2012005000004
doi: 10.4322/actalb.02203010
O correto deve ser:
10.1590/S2179-975X2012005000004
10.4322/actalb.02203010
| scieloorg/xylose | diff --git a/tests/test_document.py b/tests/test_document.py
index a409140..4d0af56 100644
--- a/tests/test_document.py
+++ b/tests/test_document.py
@@ -1056,6 +1056,29 @@ class ArticleTests(unittest.TestCase):
self.assertEqual(article.doi, u'10.1590/S2179-975X2012005000004')
+ def test_doi_v237(self):
+ article = self.article
+
+ article.data['article']['v237'] = [{'_': u'10.1590/S2179-975X2012005000004'}]
+
+ self.assertEqual(article.doi, u'10.1590/S2179-975X2012005000004')
+
+
+ def test_doi_clean_1(self):
+ article = self.article
+
+ article.data['doi'] = u'http://www.crossref.org/10.1590/S2179-975X2012005000004'
+
+ self.assertEqual(article.doi, u'10.1590/S2179-975X2012005000004')
+
+
+ def test_doi_clean_2(self):
+ article = self.article
+
+ article.data['doi'] = u'doi: 10.4322/actalb.02203010'
+
+ self.assertEqual(article.doi, u'10.4322/actalb.02203010')
+
def test_without_doi(self):
article = self.article
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 2
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "",
"pip_packages": [
"nose",
"coverage",
"mocker",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
iniconfig==2.1.0
mocker==1.1.1
nose==1.3.7
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
tomli==2.2.1
-e git+https://github.com/scieloorg/xylose.git@59090451a1aa5b226666035fc326011f1623c90c#egg=xylose
| name: xylose
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- mocker==1.1.1
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/xylose
| [
"tests/test_document.py::ArticleTests::test_doi_clean_1",
"tests/test_document.py::ArticleTests::test_doi_clean_2"
]
| []
| [
"tests/test_document.py::ToolsTests::test_get_language_iso639_1_defined",
"tests/test_document.py::ToolsTests::test_get_language_iso639_1_undefined",
"tests/test_document.py::ToolsTests::test_get_language_iso639_2_defined",
"tests/test_document.py::ToolsTests::test_get_language_iso639_2_undefined",
"tests/test_document.py::ToolsTests::test_get_language_without_iso_format",
"tests/test_document.py::ToolsTests::test_get_publication_date_wrong_day",
"tests/test_document.py::ToolsTests::test_get_publication_date_wrong_day_month",
"tests/test_document.py::ToolsTests::test_get_publication_date_wrong_day_month_not_int",
"tests/test_document.py::ToolsTests::test_get_publication_date_wrong_day_not_int",
"tests/test_document.py::ToolsTests::test_get_publication_date_wrong_month_not_int",
"tests/test_document.py::ToolsTests::test_get_publication_date_year",
"tests/test_document.py::ToolsTests::test_get_publication_date_year_day",
"tests/test_document.py::ToolsTests::test_get_publication_date_year_month",
"tests/test_document.py::ToolsTests::test_get_publication_date_year_month_day",
"tests/test_document.py::JournalTests::test_any_issn_priority_electronic",
"tests/test_document.py::JournalTests::test_any_issn_priority_electronic_without_electronic",
"tests/test_document.py::JournalTests::test_any_issn_priority_print",
"tests/test_document.py::JournalTests::test_any_issn_priority_print_without_print",
"tests/test_document.py::JournalTests::test_collection_acronym",
"tests/test_document.py::JournalTests::test_creation_date",
"tests/test_document.py::JournalTests::test_current_status",
"tests/test_document.py::JournalTests::test_current_status_lots_of_changes_study_case_1",
"tests/test_document.py::JournalTests::test_current_status_some_changes",
"tests/test_document.py::JournalTests::test_current_without_v51",
"tests/test_document.py::JournalTests::test_journal",
"tests/test_document.py::JournalTests::test_journal_abbreviated_title",
"tests/test_document.py::JournalTests::test_journal_acronym",
"tests/test_document.py::JournalTests::test_journal_title",
"tests/test_document.py::JournalTests::test_journal_title_nlm",
"tests/test_document.py::JournalTests::test_journal_url",
"tests/test_document.py::JournalTests::test_languages",
"tests/test_document.py::JournalTests::test_languages_without_v350",
"tests/test_document.py::JournalTests::test_load_issn_with_v935_and_v35_ONLINE",
"tests/test_document.py::JournalTests::test_load_issn_with_v935_and_v35_PRINT",
"tests/test_document.py::JournalTests::test_load_issn_with_v935_equal_v400_and_v35_ONLINE",
"tests/test_document.py::JournalTests::test_load_issn_with_v935_equal_v400_and_v35_PRINT",
"tests/test_document.py::JournalTests::test_load_issn_with_v935_without_v35",
"tests/test_document.py::JournalTests::test_load_issn_without_v935_and_v35_ONLINE",
"tests/test_document.py::JournalTests::test_load_issn_without_v935_and_v35_PRINT",
"tests/test_document.py::JournalTests::test_load_issn_without_v935_without_v35",
"tests/test_document.py::JournalTests::test_permission_id",
"tests/test_document.py::JournalTests::test_permission_text",
"tests/test_document.py::JournalTests::test_permission_url",
"tests/test_document.py::JournalTests::test_permission_without_v540",
"tests/test_document.py::JournalTests::test_permission_without_v540_t",
"tests/test_document.py::JournalTests::test_publisher_loc",
"tests/test_document.py::JournalTests::test_publisher_name",
"tests/test_document.py::JournalTests::test_scielo_issn",
"tests/test_document.py::JournalTests::test_status",
"tests/test_document.py::JournalTests::test_status_lots_of_changes",
"tests/test_document.py::JournalTests::test_status_lots_of_changes_study_case_1",
"tests/test_document.py::JournalTests::test_status_some_changes",
"tests/test_document.py::JournalTests::test_status_without_v51",
"tests/test_document.py::JournalTests::test_subject_areas",
"tests/test_document.py::JournalTests::test_update_date",
"tests/test_document.py::JournalTests::test_without_journal_abbreviated_title",
"tests/test_document.py::JournalTests::test_without_journal_acronym",
"tests/test_document.py::JournalTests::test_without_journal_title",
"tests/test_document.py::JournalTests::test_without_journal_title_nlm",
"tests/test_document.py::JournalTests::test_without_journal_url",
"tests/test_document.py::JournalTests::test_without_publisher_loc",
"tests/test_document.py::JournalTests::test_without_publisher_name",
"tests/test_document.py::JournalTests::test_without_scielo_domain",
"tests/test_document.py::JournalTests::test_without_scielo_domain_title_v690",
"tests/test_document.py::JournalTests::test_without_subject_areas",
"tests/test_document.py::JournalTests::test_without_wos_citation_indexes",
"tests/test_document.py::JournalTests::test_without_wos_subject_areas",
"tests/test_document.py::JournalTests::test_wos_citation_indexes",
"tests/test_document.py::JournalTests::test_wos_subject_areas",
"tests/test_document.py::ArticleTests::test_acceptance_date",
"tests/test_document.py::ArticleTests::test_affiliation_just_with_affiliation_name",
"tests/test_document.py::ArticleTests::test_affiliation_without_affiliation_name",
"tests/test_document.py::ArticleTests::test_affiliations",
"tests/test_document.py::ArticleTests::test_ahead_publication_date",
"tests/test_document.py::ArticleTests::test_article",
"tests/test_document.py::ArticleTests::test_author_with_two_affiliations",
"tests/test_document.py::ArticleTests::test_author_with_two_role",
"tests/test_document.py::ArticleTests::test_author_without_affiliations",
"tests/test_document.py::ArticleTests::test_author_without_surname_and_given_names",
"tests/test_document.py::ArticleTests::test_authors",
"tests/test_document.py::ArticleTests::test_collection_acronym",
"tests/test_document.py::ArticleTests::test_collection_acronym_priorizing_collection",
"tests/test_document.py::ArticleTests::test_collection_acronym_retrieving_v992",
"tests/test_document.py::ArticleTests::test_collection_name_brazil",
"tests/test_document.py::ArticleTests::test_collection_name_undefined",
"tests/test_document.py::ArticleTests::test_corporative_authors",
"tests/test_document.py::ArticleTests::test_document_type",
"tests/test_document.py::ArticleTests::test_doi",
"tests/test_document.py::ArticleTests::test_doi_v237",
"tests/test_document.py::ArticleTests::test_end_page_loaded_crazy_legacy_way_1",
"tests/test_document.py::ArticleTests::test_end_page_loaded_crazy_legacy_way_2",
"tests/test_document.py::ArticleTests::test_end_page_loaded_through_xml",
"tests/test_document.py::ArticleTests::test_file_code",
"tests/test_document.py::ArticleTests::test_file_code_crazy_slashs_1",
"tests/test_document.py::ArticleTests::test_file_code_crazy_slashs_2",
"tests/test_document.py::ArticleTests::test_first_author",
"tests/test_document.py::ArticleTests::test_first_author_without_author",
"tests/test_document.py::ArticleTests::test_fulltexts_field_fulltexts",
"tests/test_document.py::ArticleTests::test_fulltexts_without_field_fulltexts",
"tests/test_document.py::ArticleTests::test_html_url",
"tests/test_document.py::ArticleTests::test_invalid_document_type",
"tests/test_document.py::ArticleTests::test_issue",
"tests/test_document.py::ArticleTests::test_issue_label_field_v4",
"tests/test_document.py::ArticleTests::test_issue_label_without_field_v4",
"tests/test_document.py::ArticleTests::test_issue_url",
"tests/test_document.py::ArticleTests::test_journal_abbreviated_title",
"tests/test_document.py::ArticleTests::test_journal_acronym",
"tests/test_document.py::ArticleTests::test_journal_title",
"tests/test_document.py::ArticleTests::test_keywords",
"tests/test_document.py::ArticleTests::test_keywords_iso639_2",
"tests/test_document.py::ArticleTests::test_keywords_with_undefined_language",
"tests/test_document.py::ArticleTests::test_keywords_without_subfield_k",
"tests/test_document.py::ArticleTests::test_keywords_without_subfield_l",
"tests/test_document.py::ArticleTests::test_languages_field_fulltexts",
"tests/test_document.py::ArticleTests::test_languages_field_v40",
"tests/test_document.py::ArticleTests::test_last_page",
"tests/test_document.py::ArticleTests::test_mixed_affiliations",
"tests/test_document.py::ArticleTests::test_normalized_affiliations",
"tests/test_document.py::ArticleTests::test_normalized_affiliations_undefined_ISO_3166_CODE",
"tests/test_document.py::ArticleTests::test_normalized_affiliations_without_p",
"tests/test_document.py::ArticleTests::test_original_abstract_with_just_one_language_defined",
"tests/test_document.py::ArticleTests::test_original_abstract_with_language_defined",
"tests/test_document.py::ArticleTests::test_original_abstract_with_language_defined_but_different_of_the_article_original_language",
"tests/test_document.py::ArticleTests::test_original_abstract_without_language_defined",
"tests/test_document.py::ArticleTests::test_original_language_invalid_iso639_2",
"tests/test_document.py::ArticleTests::test_original_language_iso639_2",
"tests/test_document.py::ArticleTests::test_original_language_original",
"tests/test_document.py::ArticleTests::test_original_title_with_just_one_language_defined",
"tests/test_document.py::ArticleTests::test_original_title_with_language_defined",
"tests/test_document.py::ArticleTests::test_original_title_with_language_defined_but_different_of_the_article_original_language",
"tests/test_document.py::ArticleTests::test_original_title_without_language_defined",
"tests/test_document.py::ArticleTests::test_pdf_url",
"tests/test_document.py::ArticleTests::test_processing_date",
"tests/test_document.py::ArticleTests::test_project_name",
"tests/test_document.py::ArticleTests::test_project_sponsors",
"tests/test_document.py::ArticleTests::test_publication_contract",
"tests/test_document.py::ArticleTests::test_publication_date",
"tests/test_document.py::ArticleTests::test_publisher_id",
"tests/test_document.py::ArticleTests::test_publisher_loc",
"tests/test_document.py::ArticleTests::test_publisher_name",
"tests/test_document.py::ArticleTests::test_receive_date",
"tests/test_document.py::ArticleTests::test_review_date",
"tests/test_document.py::ArticleTests::test_start_page",
"tests/test_document.py::ArticleTests::test_start_page_loaded_crazy_legacy_way_1",
"tests/test_document.py::ArticleTests::test_start_page_loaded_crazy_legacy_way_2",
"tests/test_document.py::ArticleTests::test_start_page_loaded_through_xml",
"tests/test_document.py::ArticleTests::test_subject_areas",
"tests/test_document.py::ArticleTests::test_supplement_issue",
"tests/test_document.py::ArticleTests::test_supplement_volume",
"tests/test_document.py::ArticleTests::test_thesis_degree",
"tests/test_document.py::ArticleTests::test_thesis_organization",
"tests/test_document.py::ArticleTests::test_thesis_organization_and_division",
"tests/test_document.py::ArticleTests::test_thesis_organization_without_name",
"tests/test_document.py::ArticleTests::test_translated_abstracts",
"tests/test_document.py::ArticleTests::test_translated_abstracts_without_v83",
"tests/test_document.py::ArticleTests::test_translated_abtracts_iso639_2",
"tests/test_document.py::ArticleTests::test_translated_titles",
"tests/test_document.py::ArticleTests::test_translated_titles_iso639_2",
"tests/test_document.py::ArticleTests::test_translated_titles_without_v12",
"tests/test_document.py::ArticleTests::test_volume",
"tests/test_document.py::ArticleTests::test_whitwout_acceptance_date",
"tests/test_document.py::ArticleTests::test_whitwout_ahead_publication_date",
"tests/test_document.py::ArticleTests::test_whitwout_receive_date",
"tests/test_document.py::ArticleTests::test_whitwout_review_date",
"tests/test_document.py::ArticleTests::test_without_affiliations",
"tests/test_document.py::ArticleTests::test_without_authors",
"tests/test_document.py::ArticleTests::test_without_citations",
"tests/test_document.py::ArticleTests::test_without_collection_acronym",
"tests/test_document.py::ArticleTests::test_without_corporative_authors",
"tests/test_document.py::ArticleTests::test_without_document_type",
"tests/test_document.py::ArticleTests::test_without_doi",
"tests/test_document.py::ArticleTests::test_without_html_url",
"tests/test_document.py::ArticleTests::test_without_issue",
"tests/test_document.py::ArticleTests::test_without_issue_url",
"tests/test_document.py::ArticleTests::test_without_journal_abbreviated_title",
"tests/test_document.py::ArticleTests::test_without_journal_acronym",
"tests/test_document.py::ArticleTests::test_without_journal_title",
"tests/test_document.py::ArticleTests::test_without_keywords",
"tests/test_document.py::ArticleTests::test_without_last_page",
"tests/test_document.py::ArticleTests::test_without_normalized_affiliations",
"tests/test_document.py::ArticleTests::test_without_original_abstract",
"tests/test_document.py::ArticleTests::test_without_original_title",
"tests/test_document.py::ArticleTests::test_without_pages",
"tests/test_document.py::ArticleTests::test_without_pdf_url",
"tests/test_document.py::ArticleTests::test_without_processing_date",
"tests/test_document.py::ArticleTests::test_without_project_name",
"tests/test_document.py::ArticleTests::test_without_project_sponsor",
"tests/test_document.py::ArticleTests::test_without_publication_contract",
"tests/test_document.py::ArticleTests::test_without_publication_date",
"tests/test_document.py::ArticleTests::test_without_publisher_id",
"tests/test_document.py::ArticleTests::test_without_publisher_loc",
"tests/test_document.py::ArticleTests::test_without_publisher_name",
"tests/test_document.py::ArticleTests::test_without_scielo_domain",
"tests/test_document.py::ArticleTests::test_without_scielo_domain_article_v69",
"tests/test_document.py::ArticleTests::test_without_scielo_domain_article_v69_and_with_title_v690",
"tests/test_document.py::ArticleTests::test_without_scielo_domain_title_v690",
"tests/test_document.py::ArticleTests::test_without_start_page",
"tests/test_document.py::ArticleTests::test_without_subject_areas",
"tests/test_document.py::ArticleTests::test_without_suplement_issue",
"tests/test_document.py::ArticleTests::test_without_supplement_volume",
"tests/test_document.py::ArticleTests::test_without_thesis_degree",
"tests/test_document.py::ArticleTests::test_without_thesis_organization",
"tests/test_document.py::ArticleTests::test_without_volume",
"tests/test_document.py::ArticleTests::test_without_wos_citation_indexes",
"tests/test_document.py::ArticleTests::test_without_wos_subject_areas",
"tests/test_document.py::ArticleTests::test_wos_citation_indexes",
"tests/test_document.py::ArticleTests::test_wos_subject_areas",
"tests/test_document.py::CitationTest::test_a_link_access_date",
"tests/test_document.py::CitationTest::test_analytic_institution_for_a_article_citation",
"tests/test_document.py::CitationTest::test_analytic_institution_for_a_book_citation",
"tests/test_document.py::CitationTest::test_article_title",
"tests/test_document.py::CitationTest::test_article_without_title",
"tests/test_document.py::CitationTest::test_authors_article",
"tests/test_document.py::CitationTest::test_authors_book",
"tests/test_document.py::CitationTest::test_authors_link",
"tests/test_document.py::CitationTest::test_authors_thesis",
"tests/test_document.py::CitationTest::test_book_chapter_title",
"tests/test_document.py::CitationTest::test_book_edition",
"tests/test_document.py::CitationTest::test_book_volume",
"tests/test_document.py::CitationTest::test_book_without_chapter_title",
"tests/test_document.py::CitationTest::test_citation_sample_congress",
"tests/test_document.py::CitationTest::test_citation_sample_link",
"tests/test_document.py::CitationTest::test_citation_sample_link_without_comment",
"tests/test_document.py::CitationTest::test_conference_edition",
"tests/test_document.py::CitationTest::test_conference_name",
"tests/test_document.py::CitationTest::test_conference_sponsor",
"tests/test_document.py::CitationTest::test_conference_without_name",
"tests/test_document.py::CitationTest::test_conference_without_sponsor",
"tests/test_document.py::CitationTest::test_date",
"tests/test_document.py::CitationTest::test_doi",
"tests/test_document.py::CitationTest::test_editor",
"tests/test_document.py::CitationTest::test_end_page_14",
"tests/test_document.py::CitationTest::test_end_page_514",
"tests/test_document.py::CitationTest::test_end_page_withdout_data",
"tests/test_document.py::CitationTest::test_first_author_article",
"tests/test_document.py::CitationTest::test_first_author_book",
"tests/test_document.py::CitationTest::test_first_author_link",
"tests/test_document.py::CitationTest::test_first_author_thesis",
"tests/test_document.py::CitationTest::test_first_author_without_monographic_authors",
"tests/test_document.py::CitationTest::test_first_author_without_monographic_authors_but_not_a_book_citation",
"tests/test_document.py::CitationTest::test_index_number",
"tests/test_document.py::CitationTest::test_institutions_all_fields",
"tests/test_document.py::CitationTest::test_institutions_v11",
"tests/test_document.py::CitationTest::test_institutions_v17",
"tests/test_document.py::CitationTest::test_institutions_v29",
"tests/test_document.py::CitationTest::test_institutions_v50",
"tests/test_document.py::CitationTest::test_institutions_v58",
"tests/test_document.py::CitationTest::test_invalid_edition",
"tests/test_document.py::CitationTest::test_isbn",
"tests/test_document.py::CitationTest::test_isbn_but_not_a_book",
"tests/test_document.py::CitationTest::test_issn",
"tests/test_document.py::CitationTest::test_issn_but_not_an_article",
"tests/test_document.py::CitationTest::test_issue_part",
"tests/test_document.py::CitationTest::test_issue_title",
"tests/test_document.py::CitationTest::test_journal_issue",
"tests/test_document.py::CitationTest::test_journal_volume",
"tests/test_document.py::CitationTest::test_link",
"tests/test_document.py::CitationTest::test_link_title",
"tests/test_document.py::CitationTest::test_link_without_title",
"tests/test_document.py::CitationTest::test_monographic_authors",
"tests/test_document.py::CitationTest::test_monographic_first_author",
"tests/test_document.py::CitationTest::test_pages_14",
"tests/test_document.py::CitationTest::test_pages_514",
"tests/test_document.py::CitationTest::test_pages_withdout_data",
"tests/test_document.py::CitationTest::test_publication_type_article",
"tests/test_document.py::CitationTest::test_publication_type_book",
"tests/test_document.py::CitationTest::test_publication_type_conference",
"tests/test_document.py::CitationTest::test_publication_type_link",
"tests/test_document.py::CitationTest::test_publication_type_thesis",
"tests/test_document.py::CitationTest::test_publication_type_undefined",
"tests/test_document.py::CitationTest::test_publisher",
"tests/test_document.py::CitationTest::test_publisher_address",
"tests/test_document.py::CitationTest::test_publisher_address_without_e",
"tests/test_document.py::CitationTest::test_series_book",
"tests/test_document.py::CitationTest::test_series_but_neither_journal_book_or_conference_citation",
"tests/test_document.py::CitationTest::test_series_conference",
"tests/test_document.py::CitationTest::test_series_journal",
"tests/test_document.py::CitationTest::test_source_book_title",
"tests/test_document.py::CitationTest::test_source_journal",
"tests/test_document.py::CitationTest::test_source_journal_without_journal_title",
"tests/test_document.py::CitationTest::test_sponsor",
"tests/test_document.py::CitationTest::test_start_page_14",
"tests/test_document.py::CitationTest::test_start_page_514",
"tests/test_document.py::CitationTest::test_start_page_withdout_data",
"tests/test_document.py::CitationTest::test_thesis_institution",
"tests/test_document.py::CitationTest::test_thesis_title",
"tests/test_document.py::CitationTest::test_thesis_without_title",
"tests/test_document.py::CitationTest::test_title_when_article_citation",
"tests/test_document.py::CitationTest::test_title_when_conference_citation",
"tests/test_document.py::CitationTest::test_title_when_link_citation",
"tests/test_document.py::CitationTest::test_title_when_thesis_citation",
"tests/test_document.py::CitationTest::test_with_volume_but_not_a_journal_article_neither_a_book",
"tests/test_document.py::CitationTest::test_without_analytic_institution",
"tests/test_document.py::CitationTest::test_without_authors",
"tests/test_document.py::CitationTest::test_without_date",
"tests/test_document.py::CitationTest::test_without_doi",
"tests/test_document.py::CitationTest::test_without_edition",
"tests/test_document.py::CitationTest::test_without_editor",
"tests/test_document.py::CitationTest::test_without_first_author",
"tests/test_document.py::CitationTest::test_without_index_number",
"tests/test_document.py::CitationTest::test_without_institutions",
"tests/test_document.py::CitationTest::test_without_issue",
"tests/test_document.py::CitationTest::test_without_issue_part",
"tests/test_document.py::CitationTest::test_without_issue_title",
"tests/test_document.py::CitationTest::test_without_link",
"tests/test_document.py::CitationTest::test_without_monographic_authors",
"tests/test_document.py::CitationTest::test_without_monographic_authors_but_not_a_book_citation",
"tests/test_document.py::CitationTest::test_without_publisher",
"tests/test_document.py::CitationTest::test_without_publisher_address",
"tests/test_document.py::CitationTest::test_without_series",
"tests/test_document.py::CitationTest::test_without_sponsor",
"tests/test_document.py::CitationTest::test_without_thesis_institution",
"tests/test_document.py::CitationTest::test_without_volume"
]
| []
| BSD 2-Clause "Simplified" License | 154 | [
"setup.py",
"xylose/scielodocument.py"
]
| [
"setup.py",
"xylose/scielodocument.py"
]
|
|
eve-val__evelink-212 | db9e7bfeef9478a047f4f7db4f88324e185e4873 | 2015-06-01 04:28:15 | 4061903e92787ce22eddb75b3776c407fb70837b | diff --git a/evelink/char.py b/evelink/char.py
index 609d76d..0514dc6 100644
--- a/evelink/char.py
+++ b/evelink/char.py
@@ -137,7 +137,7 @@ class Char(object):
"""Get a list of PI routing entries for a character's planet."""
return api.APIResult(parse_planetary_routes(api_result.result), api_result.timestamp, api_result.expires)
- @auto_call('char/KillLog', map_params={'before_kill': 'beforeKillID'})
+ @auto_call('char/KillMails', map_params={'before_kill': 'beforeKillID'})
def kills(self, before_kill=None, api_result=None):
"""Look up recent kills for a character.
@@ -147,6 +147,19 @@ class Char(object):
return api.APIResult(parse_kills(api_result.result), api_result.timestamp, api_result.expires)
+ @auto_call('char/KillLog', map_params={'before_kill': 'beforeKillID'})
+ def kill_log(self, before_kill=None, api_result=None):
+ """Look up recent kills for a character.
+
+ Note: this method uses the long cache version of the endpoint. If you
+ want to use the short cache version (recommended), use kills().
+
+ before_kill:
+ Optional. Only show kills before this kill id. (Used for paging.)
+ """
+
+ return api.APIResult(parse_kills(api_result.result), api_result.timestamp, api_result.expires)
+
@auto_call('char/Notifications')
def notifications(self, api_result=None):
"""Returns the message headers for notifications."""
diff --git a/evelink/corp.py b/evelink/corp.py
index 5e98fcc..ebf1133 100644
--- a/evelink/corp.py
+++ b/evelink/corp.py
@@ -136,7 +136,7 @@ class Corp(object):
return api.APIResult(results, api_result.timestamp, api_result.expires)
- @api.auto_call('corp/KillLog', map_params={'before_kill': 'beforeKillID'})
+ @api.auto_call('corp/KillMails', map_params={'before_kill': 'beforeKillID'})
def kills(self, before_kill=None, api_result=None):
"""Look up recent kills for a corporation.
@@ -146,6 +146,19 @@ class Corp(object):
return api.APIResult(parse_kills(api_result.result), api_result.timestamp, api_result.expires)
+ @api.auto_call('corp/KillLog', map_params={'before_kill': 'beforeKillID'})
+ def kill_log(self, before_kill=None, api_result=None):
+ """Look up recent kills for a corporation.
+
+ Note: this method uses the long cache version of the endpoint. If you
+ want to use the short cache version (recommended), use kills().
+
+ before_kill:
+ Optional. Only show kills before this kill id. (Used for paging.)
+ """
+
+ return api.APIResult(parse_kills(api_result.result), api_result.timestamp, api_result.expires)
+
@api.auto_call('corp/AccountBalance')
def wallet_info(self, api_result=None):
"""Get information about corp wallets."""
| kills() uses KillLog rather than KillMails
KillLog is a long cache endpoint, so you can only query it once.
KillMails isn't a long cache, so you can query it as often as you want. | eve-val/evelink | diff --git a/tests/test_char.py b/tests/test_char.py
index ca37666..64e3631 100644
--- a/tests/test_char.py
+++ b/tests/test_char.py
@@ -291,6 +291,23 @@ class CharTestCase(APITestCase):
self.assertEqual(current, 12345)
self.assertEqual(expires, 67890)
+ self.assertEqual(result, mock.sentinel.kills)
+ self.assertEqual(self.api.mock_calls, [
+ mock.call.get('char/KillMails', params={'characterID': 1}),
+ ])
+ self.assertEqual(mock_parse.mock_calls, [
+ mock.call(mock.sentinel.api_result),
+ ])
+
+ @mock.patch('evelink.char.parse_kills')
+ def test_kill_log(self, mock_parse):
+ self.api.get.return_value = API_RESULT_SENTINEL
+ mock_parse.return_value = mock.sentinel.kills
+
+ result, current, expires = self.char.kill_log()
+ self.assertEqual(current, 12345)
+ self.assertEqual(expires, 67890)
+
self.assertEqual(result, mock.sentinel.kills)
self.assertEqual(self.api.mock_calls, [
mock.call.get('char/KillLog', params={'characterID': 1}),
@@ -304,7 +321,7 @@ class CharTestCase(APITestCase):
self.char.kills(before_kill=12345)
self.assertEqual(self.api.mock_calls, [
- mock.call.get('char/KillLog', params={'characterID': 1, 'beforeKillID': 12345}),
+ mock.call.get('char/KillMails', params={'characterID': 1, 'beforeKillID': 12345}),
])
def test_character_sheet(self):
diff --git a/tests/test_corp.py b/tests/test_corp.py
index 0e96533..5738401 100644
--- a/tests/test_corp.py
+++ b/tests/test_corp.py
@@ -165,6 +165,23 @@ class CorpTestCase(APITestCase):
result, current, expires = self.corp.kills()
+ self.assertEqual(result, mock.sentinel.kills)
+ self.assertEqual(self.api.mock_calls, [
+ mock.call.get('corp/KillMails', params={}),
+ ])
+ self.assertEqual(mock_parse.mock_calls, [
+ mock.call(mock.sentinel.api_result),
+ ])
+ self.assertEqual(current, 12345)
+ self.assertEqual(expires, 67890)
+
+ @mock.patch('evelink.corp.parse_kills')
+ def test_kill_log(self, mock_parse):
+ self.api.get.return_value = API_RESULT_SENTINEL
+ mock_parse.return_value = mock.sentinel.kills
+
+ result, current, expires = self.corp.kill_log()
+
self.assertEqual(result, mock.sentinel.kills)
self.assertEqual(self.api.mock_calls, [
mock.call.get('corp/KillLog', params={}),
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | 0.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"mock",
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements_py3.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
-e git+https://github.com/eve-val/evelink.git@db9e7bfeef9478a047f4f7db4f88324e185e4873#egg=EVELink
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
mock==1.0b1
nose==1.1.2
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
requests==2.32.3
six==1.17.0
tomli==2.2.1
urllib3==2.3.0
| name: evelink
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- argparse==1.4.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- mock==1.0b1
- nose==1.1.2
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- requests==2.32.3
- six==1.17.0
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/evelink
| [
"tests/test_char.py::CharTestCase::test_kill_log",
"tests/test_char.py::CharTestCase::test_kills",
"tests/test_char.py::CharTestCase::test_kills_paged",
"tests/test_corp.py::CorpTestCase::test_kill_log",
"tests/test_corp.py::CorpTestCase::test_kills"
]
| []
| [
"tests/test_char.py::CharTestCase::test_assets",
"tests/test_char.py::CharTestCase::test_blueprints",
"tests/test_char.py::CharTestCase::test_calendar_attendees",
"tests/test_char.py::CharTestCase::test_calendar_events",
"tests/test_char.py::CharTestCase::test_character_sheet",
"tests/test_char.py::CharTestCase::test_contact_notifications",
"tests/test_char.py::CharTestCase::test_contacts",
"tests/test_char.py::CharTestCase::test_contract_bids",
"tests/test_char.py::CharTestCase::test_contract_items",
"tests/test_char.py::CharTestCase::test_contracts",
"tests/test_char.py::CharTestCase::test_current_training",
"tests/test_char.py::CharTestCase::test_event_attendees",
"tests/test_char.py::CharTestCase::test_faction_warfare_stats",
"tests/test_char.py::CharTestCase::test_industry_jobs",
"tests/test_char.py::CharTestCase::test_industry_jobs_history",
"tests/test_char.py::CharTestCase::test_locations",
"tests/test_char.py::CharTestCase::test_mailing_lists",
"tests/test_char.py::CharTestCase::test_medals",
"tests/test_char.py::CharTestCase::test_message_bodies",
"tests/test_char.py::CharTestCase::test_messages",
"tests/test_char.py::CharTestCase::test_notification_texts",
"tests/test_char.py::CharTestCase::test_notifications",
"tests/test_char.py::CharTestCase::test_orders",
"tests/test_char.py::CharTestCase::test_planetary_colonies",
"tests/test_char.py::CharTestCase::test_planetary_links",
"tests/test_char.py::CharTestCase::test_planetary_pins",
"tests/test_char.py::CharTestCase::test_planetary_routes",
"tests/test_char.py::CharTestCase::test_research",
"tests/test_char.py::CharTestCase::test_skill_queue",
"tests/test_char.py::CharTestCase::test_standings",
"tests/test_char.py::CharTestCase::test_wallet_balance",
"tests/test_char.py::CharTestCase::test_wallet_info",
"tests/test_char.py::CharTestCase::test_wallet_journal",
"tests/test_char.py::CharTestCase::test_wallet_limit",
"tests/test_char.py::CharTestCase::test_wallet_paged",
"tests/test_char.py::CharTestCase::test_wallet_transactions_limit",
"tests/test_char.py::CharTestCase::test_wallet_transactions_paged",
"tests/test_char.py::CharTestCase::test_wallet_transcations",
"tests/test_corp.py::CorpTestCase::test_assets",
"tests/test_corp.py::CorpTestCase::test_blueprints",
"tests/test_corp.py::CorpTestCase::test_contacts",
"tests/test_corp.py::CorpTestCase::test_container_log",
"tests/test_corp.py::CorpTestCase::test_contract_bids",
"tests/test_corp.py::CorpTestCase::test_contract_items",
"tests/test_corp.py::CorpTestCase::test_contracts",
"tests/test_corp.py::CorpTestCase::test_corporation_sheet",
"tests/test_corp.py::CorpTestCase::test_corporation_sheet_public",
"tests/test_corp.py::CorpTestCase::test_customs_offices",
"tests/test_corp.py::CorpTestCase::test_facilities",
"tests/test_corp.py::CorpTestCase::test_faction_warfare_stats",
"tests/test_corp.py::CorpTestCase::test_industry_jobs",
"tests/test_corp.py::CorpTestCase::test_industry_jobs_history",
"tests/test_corp.py::CorpTestCase::test_locations",
"tests/test_corp.py::CorpTestCase::test_medals",
"tests/test_corp.py::CorpTestCase::test_member_medals",
"tests/test_corp.py::CorpTestCase::test_members",
"tests/test_corp.py::CorpTestCase::test_members_not_extended",
"tests/test_corp.py::CorpTestCase::test_npc_standings",
"tests/test_corp.py::CorpTestCase::test_orders",
"tests/test_corp.py::CorpTestCase::test_permissions",
"tests/test_corp.py::CorpTestCase::test_permissions_log",
"tests/test_corp.py::CorpTestCase::test_shareholders",
"tests/test_corp.py::CorpTestCase::test_starbase_details",
"tests/test_corp.py::CorpTestCase::test_starbases",
"tests/test_corp.py::CorpTestCase::test_station_services",
"tests/test_corp.py::CorpTestCase::test_stations",
"tests/test_corp.py::CorpTestCase::test_titles",
"tests/test_corp.py::CorpTestCase::test_wallet_info",
"tests/test_corp.py::CorpTestCase::test_wallet_journal",
"tests/test_corp.py::CorpTestCase::test_wallet_journal_account_key",
"tests/test_corp.py::CorpTestCase::test_wallet_journal_limit",
"tests/test_corp.py::CorpTestCase::test_wallet_journal_paged",
"tests/test_corp.py::CorpTestCase::test_wallet_transactions_account_key",
"tests/test_corp.py::CorpTestCase::test_wallet_transactions_limit",
"tests/test_corp.py::CorpTestCase::test_wallet_transactions_paged",
"tests/test_corp.py::CorpTestCase::test_wallet_transcations"
]
| []
| MIT License | 155 | [
"evelink/corp.py",
"evelink/char.py"
]
| [
"evelink/corp.py",
"evelink/char.py"
]
|
|
pre-commit__pre-commit-239 | 1c46446427ab0dfa6293221426b855420533ef8d | 2015-06-02 19:44:23 | 5791d84236d82f8aa8609c3ff1c69a991d8c6607 | coveralls:
[](https://coveralls.io/builds/2711896)
Coverage decreased (-0.03%) to 99.97% when pulling **971060d4b9756a9d102e5bb3ee4d04027d35011c on Lucas-C:master** into **1c46446427ab0dfa6293221426b855420533ef8d on pre-commit:master**.
asottile: ++ thanks for the quick code! I was going to get to this later but you beat me to it! test-n-ship | diff --git a/pre_commit/commands/autoupdate.py b/pre_commit/commands/autoupdate.py
index 1a24b09..9e1e79f 100644
--- a/pre_commit/commands/autoupdate.py
+++ b/pre_commit/commands/autoupdate.py
@@ -8,6 +8,7 @@ from aspy.yaml import ordered_load
import pre_commit.constants as C
from pre_commit.clientlib.validate_config import CONFIG_JSON_SCHEMA
+from pre_commit.clientlib.validate_config import is_local_hooks
from pre_commit.clientlib.validate_config import load_config
from pre_commit.jsonschema_extensions import remove_defaults
from pre_commit.ordereddict import OrderedDict
@@ -67,6 +68,8 @@ def autoupdate(runner):
)
for repo_config in input_configs:
+ if is_local_hooks(repo_config):
+ continue
sys.stdout.write('Updating {0}...'.format(repo_config['repo']))
sys.stdout.flush()
try:
diff --git a/pre_commit/repository.py b/pre_commit/repository.py
index 71cc356..83a3c01 100644
--- a/pre_commit/repository.py
+++ b/pre_commit/repository.py
@@ -125,9 +125,8 @@ class Repository(object):
class LocalRepository(Repository):
- def __init__(self, repo_config, repo_path_getter=None):
- repo_path_getter = None
- super(LocalRepository, self).__init__(repo_config, repo_path_getter)
+ def __init__(self, repo_config):
+ super(LocalRepository, self).__init__(repo_config, None)
@cached_property
def hooks(self):
| pre-commit autoupdate fails on `local` hooks repos
```
$ pre-commit autoupdate
Updating [email protected]:pre-commit/pre-commit-hooks...updating 9ce45609a92f648c87b42207410386fd69a5d1e5 -> cf550fcab3f12015f8676b8278b30e1a5bc10e70.
Updating [email protected]:pre-commit/pre-commit...updating 4352d45451296934bc17494073b82bcacca3205c -> 1c46446427ab0dfa6293221426b855420533ef8d.
Updating [email protected]:asottile/reorder_python_imports...updating aeda21eb7df6af8c9f6cd990abb086375c71c953 -> 3d86483455ab5bd06cc1069fdd5ac57be5463f10.
Updating local...An unexpected error has occurred: AttributeError: 'NoneType' object has no attribute 'repo_path'
Check the log at ~/.pre-commit/pre-commit.log
(venv-pre_commit)asottile@work:~/workspace/pre-commit$ cat ~/.pre-commit/pre-commit.log
An unexpected error has occurred: AttributeError: 'NoneType' object has no attribute 'repo_path'
Traceback (most recent call last):
File "/home/asottile/workspace/pre-commit/pre_commit/error_handler.py", line 34, in error_handler
yield
File "/home/asottile/workspace/pre-commit/pre_commit/main.py", line 142, in main
return autoupdate(runner)
File "/home/asottile/workspace/pre-commit/pre_commit/commands/autoupdate.py", line 73, in autoupdate
new_repo_config = _update_repository(repo_config, runner)
File "/home/asottile/workspace/pre-commit/pre_commit/commands/autoupdate.py", line 33, in _update_repository
with cwd(repo.repo_path_getter.repo_path):
AttributeError: 'NoneType' object has no attribute 'repo_path'
(venv-pre_commit)asottile@work:~/workspace/pre-commit$ git diff
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 397ee72..20393a7 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -20,3 +20,10 @@
sha: aeda21eb7df6af8c9f6cd990abb086375c71c953
hooks:
- id: reorder-python-imports
+- repo: local
+ hooks:
+ - id: herp
+ name: Herp
+ entry: echo derp
+ language: system
+ files: ^$
``` | pre-commit/pre-commit | diff --git a/testing/fixtures.py b/testing/fixtures.py
index 1c0184a..820a72b 100644
--- a/testing/fixtures.py
+++ b/testing/fixtures.py
@@ -35,6 +35,19 @@ def make_repo(tmpdir_factory, repo_source):
return path
+def config_with_local_hooks():
+ return OrderedDict((
+ ('repo', 'local'),
+ ('hooks', [OrderedDict((
+ ('id', 'do_not_commit'),
+ ('name', 'Block if "DO NOT COMMIT" is found'),
+ ('entry', 'DO NOT COMMIT'),
+ ('language', 'pcre'),
+ ('files', '^(.*)$'),
+ ))])
+ ))
+
+
def make_config_from_repo(repo_path, sha=None, hooks=None, check=True):
manifest = load_manifest(os.path.join(repo_path, C.MANIFEST_FILE))
config = OrderedDict((
diff --git a/tests/commands/autoupdate_test.py b/tests/commands/autoupdate_test.py
index 5dbc439..771e67b 100644
--- a/tests/commands/autoupdate_test.py
+++ b/tests/commands/autoupdate_test.py
@@ -13,6 +13,9 @@ from pre_commit.runner import Runner
from pre_commit.util import cmd_output
from pre_commit.util import cwd
from testing.auto_namedtuple import auto_namedtuple
+from testing.fixtures import add_config_to_repo
+from testing.fixtures import config_with_local_hooks
+from testing.fixtures import git_dir
from testing.fixtures import make_config_from_repo
from testing.fixtures import make_repo
from testing.fixtures import write_config
@@ -137,3 +140,10 @@ def test_autoupdate_hook_disappearing_repo(
after = open(C.CONFIG_FILE).read()
assert ret == 1
assert before == after
+
+
+def test_autoupdate_local_hooks(tmpdir_factory):
+ git_path = git_dir(tmpdir_factory)
+ config = config_with_local_hooks()
+ path = add_config_to_repo(git_path, config)
+ assert autoupdate(Runner(path)) == 0
diff --git a/tests/repository_test.py b/tests/repository_test.py
index f2e8850..e7ad227 100644
--- a/tests/repository_test.py
+++ b/tests/repository_test.py
@@ -12,10 +12,10 @@ from pre_commit.clientlib.validate_config import CONFIG_JSON_SCHEMA
from pre_commit.clientlib.validate_config import validate_config_extra
from pre_commit.jsonschema_extensions import apply_defaults
from pre_commit.languages.python import PythonEnv
-from pre_commit.ordereddict import OrderedDict
from pre_commit.repository import Repository
from pre_commit.util import cmd_output
from pre_commit.util import cwd
+from testing.fixtures import config_with_local_hooks
from testing.fixtures import git_dir
from testing.fixtures import make_config_from_repo
from testing.fixtures import make_repo
@@ -404,16 +404,7 @@ def test_tags_on_repositories(in_tmpdir, tmpdir_factory, store):
def test_local_repository():
- config = OrderedDict((
- ('repo', 'local'),
- ('hooks', [OrderedDict((
- ('id', 'do_not_commit'),
- ('name', 'Block if "DO NOT COMMIT" is found'),
- ('entry', 'DO NOT COMMIT'),
- ('language', 'pcre'),
- ('files', '^(.*)$'),
- ))])
- ))
+ config = config_with_local_hooks()
local_repo = Repository.create(config, 'dummy')
with pytest.raises(NotImplementedError):
local_repo.sha
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_git_commit_hash",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 2
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aspy.yaml==1.3.0
astroid==1.3.2
attrs==22.2.0
cached-property==1.5.2
certifi==2021.5.30
coverage==6.2
distlib==0.3.9
filelock==3.4.1
flake8==5.0.4
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
jsonschema==3.2.0
logilab-common==1.9.7
mccabe==0.7.0
mock==5.2.0
mypy-extensions==1.0.0
nodeenv==1.6.0
ordereddict==1.1
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
-e git+https://github.com/pre-commit/pre-commit.git@1c46446427ab0dfa6293221426b855420533ef8d#egg=pre_commit
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pylint==1.3.1
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
PyYAML==6.0.1
simplejson==3.20.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
virtualenv==20.17.1
zipp==3.6.0
| name: pre-commit
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- argparse==1.4.0
- aspy-yaml==1.3.0
- astroid==1.3.2
- attrs==22.2.0
- cached-property==1.5.2
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- flake8==5.0.4
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- jsonschema==3.2.0
- logilab-common==1.9.7
- mccabe==0.7.0
- mock==5.2.0
- mypy-extensions==1.0.0
- nodeenv==1.6.0
- ordereddict==1.1
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pylint==1.3.1
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pyyaml==6.0.1
- simplejson==3.20.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- virtualenv==20.17.1
- zipp==3.6.0
prefix: /opt/conda/envs/pre-commit
| [
"tests/commands/autoupdate_test.py::test_autoupdate_local_hooks"
]
| [
"tests/commands/autoupdate_test.py::test_up_to_date_repo",
"tests/commands/autoupdate_test.py::test_autoupdate_up_to_date_repo",
"tests/commands/autoupdate_test.py::test_out_of_date_repo",
"tests/commands/autoupdate_test.py::test_autoupdate_out_of_date_repo",
"tests/commands/autoupdate_test.py::test_hook_disppearing_repo_raises",
"tests/commands/autoupdate_test.py::test_autoupdate_hook_disappearing_repo",
"tests/repository_test.py::test_python_hook",
"tests/repository_test.py::test_python_hook_args_with_spaces",
"tests/repository_test.py::test_switch_language_versions_doesnt_clobber",
"tests/repository_test.py::test_versioned_python_hook",
"tests/repository_test.py::test_run_a_node_hook",
"tests/repository_test.py::test_run_versioned_node_hook",
"tests/repository_test.py::test_run_a_ruby_hook",
"tests/repository_test.py::test_run_versioned_ruby_hook",
"tests/repository_test.py::test_system_hook_with_spaces",
"tests/repository_test.py::test_run_a_script_hook",
"tests/repository_test.py::test_run_hook_with_spaced_args",
"tests/repository_test.py::test_pcre_hook_no_match",
"tests/repository_test.py::test_pcre_hook_matching",
"tests/repository_test.py::test_pcre_many_files",
"tests/repository_test.py::test_cwd_of_hook",
"tests/repository_test.py::test_lots_of_files",
"tests/repository_test.py::test_languages",
"tests/repository_test.py::test_reinstall",
"tests/repository_test.py::test_control_c_control_c_on_install",
"tests/repository_test.py::test_really_long_file_paths",
"tests/repository_test.py::test_config_overrides_repo_specifics",
"tests/repository_test.py::test_tags_on_repositories"
]
| [
"tests/repository_test.py::test_repo_url",
"tests/repository_test.py::test_sha",
"tests/repository_test.py::test_local_repository"
]
| []
| MIT License | 156 | [
"pre_commit/commands/autoupdate.py",
"pre_commit/repository.py"
]
| [
"pre_commit/commands/autoupdate.py",
"pre_commit/repository.py"
]
|
ethanrowe__python-merky-6 | a624893701ffa9b3f1b7c990c62e62725383b0cb | 2015-06-06 15:37:29 | a624893701ffa9b3f1b7c990c62e62725383b0cb | diff --git a/README.md b/README.md
index faeee9a..b504439 100644
--- a/README.md
+++ b/README.md
@@ -197,6 +197,44 @@ Some example uses of this:
See `merky.cases.tokendict.TokenDict` for more.
+# Storage options
+
+Complex storage options are outside the scope of `merky`. However, for convenience, `merky
+does provide some minimal storage options that can serve for some cases.
+
+An arbitrary transformed structure can be stored/restored using various classes found in the
+`merky.store.structure` module. For instance:
+
+```python
+# Suppose this gives me some `merky.cases.attrgraph.AttributeGraph` instance.
+g = get_my_graph()
+
+# Now I'm going to store it a file.
+transformed = merky.AnnotationTransformer().transform(g)
+
+# Get a JSON file writer for the path 'my-graph.json'
+store = merky.store.structure.JSONFileWriteStructure('my-graph.json')
+
+# Populate the writer with the transformed data.
+store.populate(transformed)
+
+# Write the file.
+store.close()
+```
+
+Now later you want to restore it. Like so:
+
+```python
+# Get the reader.
+store = merky.store.structure.JSONFileReadStructure('my-graph.json')
+
+# And away we go.
+g = merky.cases.attrgraph.AttributeGraph.from_token(store.head, store.get)
+```
+
+For a more complex example, see `merky.test.misc.graph_storage_test`, which
+assembles a `TokenDict` of `AttributeGraphs`, writes to JSON, then restores from JSON.
+
# Don't wear a tie.
It's an anachronistic absurdity that needs to be abolished. Direct your respect elsewhere.
diff --git a/merky/cases/tokendict.py b/merky/cases/tokendict.py
index 3c6ebbc..aba991d 100644
--- a/merky/cases/tokendict.py
+++ b/merky/cases/tokendict.py
@@ -82,13 +82,14 @@ class TokenDict(object):
to be converted to a `TokenDict`.
`reader`: a function that, given a "token", returns the
corresponding structure.
- `builder`: (Optional) a function to apply to each value in the
- `TokenDict` when assembling; use this to, for instance,
+ `builder`: (Optional) a function to apply to each token in the
+ `TokenDict` when assembling; it will be called with the
+ token seen and the `reader` above. Ue this to, for instance,
convert each value into a more specific object.
Returns a new `TokenDict` instance.
"""
dict_ = reader(token)
- handler = reader if builder is None else lambda val: builder(reader(val))
+ handler = reader if builder is None else lambda val: builder(val, reader)
return cls(dict((k, handler(v)) for k, v in six.iteritems(dict_)))
diff --git a/merky/serialization.py b/merky/serialization.py
new file mode 100644
index 0000000..0e654ef
--- /dev/null
+++ b/merky/serialization.py
@@ -0,0 +1,14 @@
+import json
+
+def json_serializer(sort=True):
+ """
+ Returns a function that can serialize to JSON, with unicode enabled,
+ nans disallowed, keys sorted, and whitespace-free separators.
+
+ You can override the sorted keys via `sort=False`.
+ """
+ return json.JSONEncoder(ensure_ascii=False,
+ allow_nan=False,
+ sort_keys=sort,
+ separators=(",",":")).encode
+
diff --git a/merky/store/__init__.py b/merky/store/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/merky/store/structure.py b/merky/store/structure.py
new file mode 100644
index 0000000..dabc581
--- /dev/null
+++ b/merky/store/structure.py
@@ -0,0 +1,152 @@
+import codecs
+import collections
+import json
+from .. import serialization
+
+class Structure(object):
+ """
+ Basic interface for a serializ(ed/able) "structure".
+
+ Without providing specifics for read/write operations, gives
+ the basic interface that these things should have in common.
+
+ The purpose of the structure is to combine:
+ * The map of token/structure pairs one gets from transformation
+ * A "head" token indicating which structure is the top-level
+ structure.
+
+ The combination of the `head` token and the `get` method would
+ allow for load of specific cases like the
+ `merky.cases.attrgraph.AttributeGraph` and similar (the `from_token`
+ classmethod).
+ """
+ def populate(self, token_structure_pairs):
+ raise NotImplementedError("populate() not implemented.")
+
+ def get(self, key):
+ raise NotImplementedError("get() not implemented.")
+
+ def __getitem__(self, key):
+ raise NotImplementedError("__getitem()__ not implemented.")
+
+
+
+class TokenMapStructure(Structure):
+ """
+ Maintains an internal in-memory map of token/structure pairs.
+
+ While the `head` can be explicitly set, the `head` defaults to the
+ last token seen via `populate()`.
+ """
+ @staticmethod
+ def default_tokenmap():
+ return collections.OrderedDict()
+
+ def populate(self, token_structure_pairs):
+ """
+ Ingests the `token_structure_pairs` into the internal token map.
+
+ Sets `head` to the last token seen, regardless of the current state of `head`.
+ """
+ self.tokenmap.update(token_structure_pairs)
+ self.head = next(iter(reversed(self.tokenmap))) if len(self.tokenmap) > 0 else None
+
+ def get(self, key):
+ """
+ Returns the corresponding structure from the internal map given the token `key`.
+ """
+ return self.tokenmap.get(key)
+
+
+ def __getitem__(self, key):
+ """
+ Returns the corresponding structure from the internal map given the token 'key'.
+ """
+ return self.tokenmap[key]
+
+
+class InMemoryStructure(TokenMapStructure):
+ """
+ An in-memory "store" that has both the read and write interfaces.
+ """
+ def __init__(self, tokenmap=None, head=None):
+ self.tokenmap = self.default_tokenmap() if tokenmap is None else tokenmap
+ self.head = head
+
+ def close(self):
+ pass
+
+
+class JSONStreamReadStructure(TokenMapStructure):
+ """
+ Reads merkified structure from a utf-8 JSON stream.
+ """
+ def __init__(self, stream):
+ self.stream = stream
+ self.tokenmap, self.head = self.deserialize_from_stream(stream)
+
+ @classmethod
+ def deserialize_from_stream(cls, stream):
+ return json.load(stream, encoding='utf-8')
+
+
+class JSONStreamWriteStructure(TokenMapStructure):
+ """
+ Writes merkified structure to a utf-8 JSON stream.
+
+ The instance accumulates state internally and only serializes to stream
+ at close().
+ """
+ serializer = serialization.json_serializer(sort=False)
+
+ def __init__(self, stream):
+ self.tokenmap = self.default_tokenmap()
+ self.stream = stream
+
+ def serialize_to_stream(self, stream):
+ stream.write(self.serializer([self.tokenmap, self.head]))
+
+ def close(self):
+ self.serialize_to_stream(self.stream)
+ self.stream.flush()
+
+
+class JSONFileReadStructure(JSONStreamReadStructure):
+ """
+ Reads merkified structure from utf-8 JSON file.
+ """
+ def __init__(self, path):
+ self.path = path
+ self.tokenmap, self.head = self.deserialize_from_file(self.path)
+
+ @classmethod
+ def deserialize_from_file(cls, path):
+ with codecs.open(path, encoding="utf-8", mode="rb") as f:
+ return cls.deserialize_from_stream(f)
+
+
+class JSONFileWriteStructure(JSONStreamWriteStructure):
+ """
+ Writes merkified structure to utf-8 JSON file.
+
+ The instance accumulates state internally and only serializes to the file
+ at close().
+ """
+ def __init__(self, path):
+ self.tokenmap = self.default_tokenmap()
+ self.path = path
+
+ def serialize_to_file(self, path):
+ """
+ Writes the current state to the `path` specified.
+ """
+ with codecs.open(path, encoding="utf-8", mode="wb") as f:
+ self.serialize_to_stream(f)
+
+ def close(self):
+ """
+ Writes the state out to the file at `self.path`.
+ """
+ self.serialize_to_file(self.path)
+
+
diff --git a/merky/transformer.py b/merky/transformer.py
index b6be506..a4a370f 100644
--- a/merky/transformer.py
+++ b/merky/transformer.py
@@ -1,6 +1,5 @@
-import json
-
from . import digest
+from . import serialization
from . import tree
class Transformer(object):
@@ -75,10 +74,8 @@ class Transformer(object):
Subclasses could override this to use an alternate serialization approach; the result must
support `encode()`.
"""
- return json.JSONEncoder(ensure_ascii=False,
- allow_nan=False,
- sort_keys=True,
- separators=(",",":")).encode
+ return serialization.json_serializer()
+
def get_dispatcher(self):
"""
diff --git a/setup.py b/setup.py
index a0eed15..a16dfee 100644
--- a/setup.py
+++ b/setup.py
@@ -11,7 +11,10 @@ setup(
url = "https://github.com/ethanrowe/python-merky",
packages = ["merky",
"merky.cases",
+ "merky.store",
"merky.test",
+ "merky.test.misc",
+ "merky.test.store",
"merky.test.usecases",
],
long_description = """
| An in-memory store provides a simple token/structure storage interface
We want a simple interface for storing token/structure pairs, so we can build some higher-level abstractions around them without coupling ourselves to any particular storage mechanism (noting that the most obvious "storage" is an in-memory dict).
Do the in-memory case first, to work out the basics. Probably should have:
* get by key
* set by key; return value should indicate whether key was actually new or not.
* serialize (to JSON, probably)
* deserialize (from JSON, probably)
* close (to flush and release any resources related to the thing)
* `__enter__`/`__exit__` for context manager behavior; close on exit. | ethanrowe/python-merky | diff --git a/merky/test/misc/__init__.py b/merky/test/misc/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/merky/test/misc/graph_storage_test.py b/merky/test/misc/graph_storage_test.py
new file mode 100644
index 0000000..f1a6984
--- /dev/null
+++ b/merky/test/misc/graph_storage_test.py
@@ -0,0 +1,96 @@
+import os
+import tempfile
+import shutil
+
+from nose import tools
+
+import merky
+from merky.cases import attrgraph
+from merky.cases import tokendict
+from merky.store import structure
+
+def tempdir(func):
+ def wrapper(*p, **kw):
+ dir_ = tempfile.mkdtemp()
+ try:
+ return func(*(p + (dir_,)), **kw)
+ finally:
+ shutil.rmtree(dir_)
+ wrapper.__name__ = func.__name__
+ return wrapper
+
+
+def dag(attrs, members):
+ return attrgraph.AttributeGraph(attrs=attrs, members=members)
+
+def getpath(reader, *keys):
+ current = reader.get(reader.head)
+ for key in keys:
+ current = reader.get(current[key])
+ return current
+
+@tempdir
+def test_versioned_graph_save_and_restore(workdir):
+ path = os.path.join(workdir, 'my-graph.json')
+ static = dag({"unchanging": "eternal"}, {})
+
+ graph_s0 = dag({"name": "graph", "version": 0}, {
+ "static": static,
+ "changing": dag({"a": "A"}, {}),
+ "removed": dag({"bleep": "blorp"}, {}),
+ })
+
+ graph_s1 = dag({"name": "graph", "version": 1}, {
+ "static": static,
+ "changing": dag({"b": "B"}, {}),
+ })
+
+ version_map = tokendict.TokenDict({"v0": graph_s0,
+ "v1": graph_s0,
+ "v2": graph_s1,
+ "v3": graph_s1})
+
+ transformer = merky.AnnotationTransformer()
+ writer = structure.JSONFileWriteStructure(path)
+ writer.populate(transformer.transform(version_map))
+ writer.close()
+
+ reader = structure.JSONFileReadStructure(path)
+ tokens = tokendict.TokenDict.from_token(reader.head, reader.get)
+
+ tools.assert_equal(["v0", "v1", "v2", "v3"], list(tokens.keys()))
+
+ # Same states get same tokens.
+ tools.assert_equal(tokens.dict_["v0"], tokens.dict_["v1"])
+ tools.assert_equal(tokens.dict_["v2"], tokens.dict_["v3"])
+ tools.assert_equal(getpath(reader, "v0", 1, "static"),
+ getpath(reader, "v2", 1, "static"))
+
+ # Different states, different tokens.
+ tools.assert_not_equal(tokens.dict_["v1"], tokens.dict_["v2"])
+ tools.assert_not_equal(getpath(reader, "v0", 1, "changing"),
+ getpath(reader, "v2", 1, "changing"))
+
+
+ restored = tokendict.TokenDict.from_token(
+ reader.head,
+ reader.get,
+ attrgraph.AttributeGraph.from_token
+ )
+
+ tools.assert_equal(["v0", "v1", "v2", "v3"],
+ list(restored.keys()))
+
+ tools.assert_equal(dict(graph_s0.attrs), dict(restored["v0"].attrs))
+ tools.assert_equal(dict(graph_s1.attrs), dict(restored["v2"].attrs))
+ tools.assert_equal(["changing", "removed", "static"],
+ list(sorted(restored["v0"].members.keys())))
+ tools.assert_equal(["changing", "static"],
+ list(sorted(restored["v2"].members.keys())))
+ tools.assert_equal(dict(graph_s0.members["changing"].attrs),
+ dict(restored["v1"].members["changing"].attrs))
+ tools.assert_equal(dict(graph_s1.members["changing"].attrs),
+ dict(restored["v3"].members["changing"].attrs))
+ tools.assert_equal(dict(restored["v0"].members["static"].attrs),
+ dict(restored["v3"].members["static"].attrs))
+
diff --git a/merky/test/store/__init__.py b/merky/test/store/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/merky/test/store/structure_test.py b/merky/test/store/structure_test.py
new file mode 100644
index 0000000..8a2f071
--- /dev/null
+++ b/merky/test/store/structure_test.py
@@ -0,0 +1,166 @@
+import codecs
+import collections
+import os
+import tempfile
+import shutil
+from nose import tools
+from six.moves import StringIO
+from .. import words
+from merky.store import structure
+
+def od(*p, **kw):
+ return collections.OrderedDict(*p, **kw)
+
+class TestInMemoryStructure(object):
+
+ HEAD_TOKEN = 'sometoptoken'
+ LIST_TOKEN = 'somelisttoken'
+ DICT_TOKEN = 'somedicttoken'
+ HEAD = od((('a', 'somelisttoken'), ('b', 'somedicttoken')))
+ DICT = od(((words.EUROS, words.SHEKELS), (words.EPEES, words.ANGSTROM)))
+ LIST = ["some", "list", words.SHEKELS]
+
+ PAIRS = (
+ (LIST_TOKEN, LIST),
+ (DICT_TOKEN, DICT),
+ (HEAD_TOKEN, HEAD),
+ )
+
+
+ def get_read_store(self):
+ return structure.InMemoryStructure(od(self.PAIRS), self.HEAD_TOKEN)
+
+
+ def get_write_store(self):
+ return structure.InMemoryStructure()
+
+
+ def verify_dict(self, received, expect):
+ tools.assert_equal(expect, received)
+
+
+ def verify_list(self, received, expect):
+ tools.assert_equal(expect, received)
+
+
+ def verify_head(self, store, head):
+ tools.assert_equal(head, store.head)
+
+
+ def verify_write(self, store, head):
+ # In-memory guy has same state before and after close() call.
+ tools.assert_equal(od(self.PAIRS), store.tokenmap)
+ store.close()
+ tools.assert_equal(head, store.head)
+ tools.assert_equal(od(self.PAIRS), store.tokenmap)
+
+
+ def test_head_from_last_by_default(self):
+ store = self.get_write_store()
+ store.populate(iter(self.PAIRS))
+ self.verify_head(store, self.HEAD_TOKEN)
+ self.verify_write(store, self.HEAD_TOKEN)
+
+
+ def test_overriddable_head(self):
+ store = self.get_write_store()
+ store.populate(iter(self.PAIRS))
+ store.head = self.DICT_TOKEN
+ self.verify_head(store, self.DICT_TOKEN)
+ self.verify_write(store, self.DICT_TOKEN)
+
+
+ def test_read_operations(self):
+ store = self.get_read_store()
+ self.verify_head(store, self.HEAD_TOKEN)
+ self.verify_dict(store.get(self.HEAD_TOKEN), self.HEAD)
+ self.verify_dict(store[self.HEAD_TOKEN], self.HEAD)
+ self.verify_list(store.get(self.LIST_TOKEN), self.LIST)
+ self.verify_list(store[self.LIST_TOKEN], self.LIST)
+ self.verify_dict(store.get(self.DICT_TOKEN), self.DICT)
+ self.verify_dict(store[self.DICT_TOKEN], self.DICT)
+ tools.assert_equal(None, store.get('no no no'))
+ tools.assert_raises(KeyError, lambda: store['no no no'])
+
+
+q = lambda word: words.unify('"', word, '"')
+def commify(*w):
+ terms = []
+ for word in w[:-1]:
+ terms.append(word)
+ terms.append(',')
+ if len(w) > 0:
+ terms.append(w[-1])
+ return words.unify(*terms)
+
+jl = lambda *w: words.unify('[', commify(*w), ']')
+def jd(*pairs):
+ return words.unify('{',
+ commify(*(words.unify(k, ':', w)
+ for k, w in pairs)),
+ '}')
+
+
+class TestStreamStructure(TestInMemoryStructure):
+ def json(self, head):
+ return jl(
+ jd(
+ (q(self.LIST_TOKEN), jl(*(q(w) for w in self.LIST))),
+ (q(self.DICT_TOKEN), jd(*((q(k), q(v)) for k,v in self.DICT.items()))),
+ (q(self.HEAD_TOKEN), jd(*((q(k), q(v)) for k,v in self.HEAD.items()))),
+ ),
+ q(head),
+ )
+
+ def get_read_store(self):
+ self.stream = StringIO(self.json(self.HEAD_TOKEN))
+ return structure.JSONStreamReadStructure(self.stream)
+
+ def get_write_store(self):
+ self.stream = StringIO()
+ return structure.JSONStreamWriteStructure(self.stream)
+
+ def verify_dict(self, received, expect):
+ tools.assert_equal(dict(expect), received)
+
+ def verify_list(self, received, expect):
+ tools.assert_equal(list(expect), received)
+
+ def verify_write(self, store, head):
+ # Nothing written to stream until close().
+ tools.assert_equal(0, self.stream.tell())
+ tools.assert_equal('', self.stream.read())
+ store.close()
+ self.stream.seek(0)
+ tools.assert_equal(self.json(head), self.stream.read())
+
+ def test_no_close(self):
+ store = self.get_read_store()
+ tools.assert_raises(AttributeError, getattr, store, 'close')
+
+
+class TestFileStructure(TestStreamStructure):
+ def setup(self):
+ self.workdir = tempfile.mkdtemp()
+ self.path = os.path.join(self.workdir, "some-file.json")
+
+ def teardown(self):
+ shutil.rmtree(self.workdir)
+
+ def get_read_store(self):
+ with codecs.open(self.path, mode="wb", encoding="utf-8") as f:
+ f.write(self.json(self.HEAD_TOKEN))
+ return structure.JSONFileReadStructure(self.path)
+
+ def get_write_store(self):
+ return structure.JSONFileWriteStructure(self.path)
+
+ def verify_write(self, store, head):
+ # File doesn't even exist before close.
+ tools.assert_false(os.path.exists(self.path))
+ # But it will after close()
+ store.close()
+ tools.assert_true(os.path.exists(self.path))
+ with codecs.open(self.path, mode='rb', encoding='utf-8') as f:
+ tools.assert_equal(self.json(head), f.read())
+
diff --git a/merky/test/usecases/tokendict_test.py b/merky/test/usecases/tokendict_test.py
index 57c109f..48d6341 100644
--- a/merky/test/usecases/tokendict_test.py
+++ b/merky/test/usecases/tokendict_test.py
@@ -67,9 +67,9 @@ def test_restoration():
def test_restoration_with_builder():
- def wrapper(value):
+ def wrapper(token, reader):
def wrapped():
- return ('called me!', value)
+ return ('called me!', reader(token))
return wrapped
g = tokendict.TokenDict.from_token(TOK.token, CACHE.get, wrapper)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 4
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"mock>=1.0.1",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
iniconfig==2.1.0
-e git+https://github.com/ethanrowe/python-merky.git@a624893701ffa9b3f1b7c990c62e62725383b0cb#egg=merky
mock==5.2.0
nose==1.3.7
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
six==1.17.0
tomli==2.2.1
| name: python-merky
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- mock==5.2.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- six==1.17.0
- tomli==2.2.1
prefix: /opt/conda/envs/python-merky
| [
"merky/test/store/structure_test.py::TestInMemoryStructure::test_head_from_last_by_default",
"merky/test/store/structure_test.py::TestInMemoryStructure::test_overriddable_head",
"merky/test/store/structure_test.py::TestInMemoryStructure::test_read_operations",
"merky/test/store/structure_test.py::TestStreamStructure::test_head_from_last_by_default",
"merky/test/store/structure_test.py::TestStreamStructure::test_overriddable_head",
"merky/test/usecases/tokendict_test.py::test_dict_",
"merky/test/usecases/tokendict_test.py::test_get_and_set",
"merky/test/usecases/tokendict_test.py::test_key_sort",
"merky/test/usecases/tokendict_test.py::test_value_annotation",
"merky/test/usecases/tokendict_test.py::test_tokenization",
"merky/test/usecases/tokendict_test.py::test_restoration",
"merky/test/usecases/tokendict_test.py::test_restoration_with_builder"
]
| [
"merky/test/misc/graph_storage_test.py::test_versioned_graph_save_and_restore",
"merky/test/store/structure_test.py::TestStreamStructure::test_read_operations",
"merky/test/store/structure_test.py::TestStreamStructure::test_no_close",
"merky/test/store/structure_test.py::TestFileStructure::test_head_from_last_by_default",
"merky/test/store/structure_test.py::TestFileStructure::test_overriddable_head",
"merky/test/store/structure_test.py::TestFileStructure::test_read_operations",
"merky/test/store/structure_test.py::TestFileStructure::test_no_close"
]
| []
| []
| MIT License | 158 | [
"merky/store/__init__.py",
"merky/transformer.py",
"setup.py",
"merky/cases/tokendict.py",
"merky/store/structure.py",
"merky/serialization.py",
"README.md"
]
| [
"merky/store/__init__.py",
"merky/transformer.py",
"setup.py",
"merky/cases/tokendict.py",
"merky/store/structure.py",
"merky/serialization.py",
"README.md"
]
|
|
pydoit__doit-58 | dcefd2159bca7fa80e236fc3fec91688a39ba7c4 | 2015-06-07 18:22:26 | e551006a0a5a93a197d413b663bd455061e3f74d | diff --git a/doit/cmd_run.py b/doit/cmd_run.py
index 8a3693a..50e34b9 100644
--- a/doit/cmd_run.py
+++ b/doit/cmd_run.py
@@ -116,6 +116,18 @@ opt_pdb = {
}
+# use ".*" as default regex for delayed tasks without explicitly specified regex
+opt_auto_delayed_regex = {
+ 'name': 'auto_delayed_regex',
+ 'short': '',
+ 'long': 'auto-delayed-regex',
+ 'type': bool,
+ 'default': False,
+ 'help':
+"""Uses the default regex ".*" for every delayed task loader for which no regex was explicitly defined"""
+}
+
+
class Run(DoitCmdBase):
doc_purpose = "run tasks"
doc_usage = "[TASK/TARGET...]"
@@ -124,7 +136,8 @@ class Run(DoitCmdBase):
cmd_options = (opt_always, opt_continue, opt_verbosity,
opt_reporter, opt_outfile, opt_num_process,
- opt_parallel_type, opt_pdb, opt_single)
+ opt_parallel_type, opt_pdb, opt_single,
+ opt_auto_delayed_regex)
def __init__(self, **kwargs):
@@ -162,7 +175,7 @@ class Run(DoitCmdBase):
def _execute(self, outfile,
verbosity=None, always=False, continue_=False,
reporter='console', num_process=0, par_type='process',
- single=False):
+ single=False, auto_delayed_regex=False):
"""
@param reporter:
(str) one of provided reporters or ...
@@ -172,7 +185,7 @@ class Run(DoitCmdBase):
"""
# get tasks to be executed
# self.control is saved on instance to be used by 'auto' command
- self.control = TaskControl(self.task_list)
+ self.control = TaskControl(self.task_list, auto_delayed_regex=auto_delayed_regex)
self.control.process(self.sel_tasks)
if single:
diff --git a/doit/control.py b/doit/control.py
index 6350b99..c60d688 100644
--- a/doit/control.py
+++ b/doit/control.py
@@ -26,9 +26,10 @@ class TaskControl(object):
Value: task_name
"""
- def __init__(self, task_list):
+ def __init__(self, task_list, auto_delayed_regex=False):
self.tasks = {}
self.targets = {}
+ self.auto_delayed_regex = auto_delayed_regex
# name of task in order to be executed
# this the order as in the dodo file. the real execution
@@ -174,22 +175,51 @@ class TaskControl(object):
# by task name
if filter_ in self.tasks:
selected_task.append(filter_)
+ continue
+
# by target
- elif filter_ in self.targets:
+ if filter_ in self.targets:
selected_task.append(self.targets[filter_])
- else:
- # if can not find name check if it is a sub-task of a delayed
- basename = filter_.split(':', 1)[0]
- if basename in self.tasks:
- loader = self.tasks[basename].loader
- loader.basename = basename
- self.tasks[filter_] = Task(filter_, None, loader=loader)
- selected_task.append(filter_)
+ continue
+
+ # if can not find name check if it is a sub-task of a delayed
+ basename = filter_.split(':', 1)[0]
+ if basename in self.tasks:
+ loader = self.tasks[basename].loader
+ loader.basename = basename
+ self.tasks[filter_] = Task(filter_, None, loader=loader)
+ selected_task.append(filter_)
+ continue
+
+ # check if target matches any regex
+ import re
+ tasks = []
+ for task in list(self.tasks.values()):
+ if task.loader and (task.loader.target_regex or self.auto_delayed_regex):
+ if re.match(task.loader.target_regex if task.loader.target_regex else '.*', filter_):
+ tasks.append(task)
+ if len(tasks) > 0:
+ if len(tasks) == 1:
+ task = tasks[0]
+ loader = task.loader
+ loader.basename = task.name
+ name = '_regex_target_' + filter_
+ self.tasks[name] = Task(name, None,
+ loader=loader,
+ file_dep=[filter_])
+ selected_task.append(name)
else:
- msg = ('cmd `run` invalid parameter: "%s".' +
- ' Must be a task, or a target.\n' +
- 'Type "doit list" to see available tasks')
- raise InvalidCommand(msg % filter_)
+ name = '_regex_target_' + filter_
+ self.tasks[name] = Task(name, None,
+ task_dep=[task.name for task in tasks],
+ file_dep=[filter_])
+ selected_task.append(name)
+ else:
+ # not found
+ msg = ('cmd `run` invalid parameter: "%s".' +
+ ' Must be a task, or a target.\n' +
+ 'Type "doit list" to see available tasks')
+ raise InvalidCommand(msg % filter_)
return selected_task
@@ -416,6 +446,9 @@ class TaskDispatcher(object):
basename = this_task.loader.basename or this_task.name
new_tasks = generate_tasks(basename, ref(), ref.__doc__)
TaskControl.set_implicit_deps(self.targets, new_tasks)
+ # check itself for implicit dep (used by regex_target)
+ TaskControl.add_implicit_task_dep(
+ self.targets, this_task, this_task.file_dep)
for nt in new_tasks:
if not nt.loader:
nt.loader = DelayedLoaded
diff --git a/doit/loader.py b/doit/loader.py
index 1f3bcd4..123d0f4 100644
--- a/doit/loader.py
+++ b/doit/loader.py
@@ -98,10 +98,11 @@ def get_module(dodo_file, cwd=None, seek_parent=False):
-def create_after(executed=None):
+def create_after(executed=None, target_regex=None):
"""Annotate a task-creator function with delayed loader info"""
def decorated(func):
- func.doit_create_after = DelayedLoader(func, executed=executed)
+ func.doit_create_after = DelayedLoader(func, executed=executed,
+ target_regex=target_regex)
return func
return decorated
diff --git a/doit/reporter.py b/doit/reporter.py
index 4f5cbe3..18df356 100644
--- a/doit/reporter.py
+++ b/doit/reporter.py
@@ -53,7 +53,8 @@ class ConsoleReporter(object):
def skip_uptodate(self, task):
"""skipped up-to-date task"""
- self.write("-- %s\n" % task.title())
+ if task.actions and (task.name[0] != '_'):
+ self.write("-- %s\n" % task.title())
def skip_ignore(self, task):
"""skipped ignored task"""
diff --git a/doit/task.py b/doit/task.py
index 7595877..3440123 100644
--- a/doit/task.py
+++ b/doit/task.py
@@ -31,9 +31,10 @@ class DelayedLoader(object):
the loader call the creator function
:ivar basename: (str) basename used when creating tasks
"""
- def __init__(self, creator, executed=None):
+ def __init__(self, creator, executed=None, target_regex=None):
self.creator = creator
self.task_dep = executed
+ self.target_regex = target_regex
self.basename = None
self.created = False
| specify target of a DelayedTask on command line
Since not all tasks are created before execution starts, it is required some special handling for target names of targets that have not been created yet.
See discussion on https://github.com/getnikola/nikola/issues/1562#issuecomment-70836094
General idea is that if a target is not found, before raising an error to the user try to load DelayedTasks (as done by command list) to look for the given `target` name.
Some considerations:
1) as of now a DelayedTask creates an implicit `task_dep` for the task given in it's `executed` param. But this `task_dep` is not preserved when the DelayedTask is re-created. It should not only be preserved but all created tasks should include this `task_dep` because the guarantee that the dependent task was already executed wont exist anymore!
2) if the selected tasks for execution includes know tasks and targets they should be executed **before**, any DelayedTask is loaded to look for an unknown `target`. This will ensure that same command line will work nicely even if it is on its first execution.
<bountysource-plugin>
---
Want to back this issue? **[Post a bounty on it!](https://www.bountysource.com/issues/7952756-specify-target-of-a-delayedtask-on-command-line?utm_campaign=plugin&utm_content=tracker%2F1885485&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F1885485&utm_medium=issues&utm_source=github).
</bountysource-plugin> | pydoit/doit | diff --git a/tests/test_control.py b/tests/test_control.py
index 4237acb..734b449 100644
--- a/tests/test_control.py
+++ b/tests/test_control.py
@@ -111,6 +111,43 @@ class TestTaskControlCmdOptions(object):
assert control.tasks['taskY:foo'].loader.basename == 'taskY'
assert control.tasks['taskY:foo'].loader is t2.loader
+ def test_filter_delayed_regex_single(self):
+ t1 = Task("taskX", None)
+ t2 = Task("taskY", None, loader=DelayedLoader(lambda: None, target_regex='a.*'))
+ t3 = Task("taskZ", None, loader=DelayedLoader(lambda: None, target_regex='b.*'))
+ t4 = Task("taskW", None, loader=DelayedLoader(lambda: None))
+ control = TaskControl([t1, t2, t3, t4], auto_delayed_regex=False)
+ l = control._filter_tasks(['abc'])
+ assert isinstance(t2.loader, DelayedLoader)
+ assert len(l) == 1
+ assert l[0] == '_regex_target_abc'
+ assert control.tasks[l[0]].file_dep == {'abc'}
+ assert control.tasks[l[0]].loader.basename == 'taskY'
+ assert control.tasks[l[0]].loader is t2.loader
+
+ def test_filter_delayed_regex_multiple(self):
+ t1 = Task("taskX", None)
+ t2 = Task("taskY", None, loader=DelayedLoader(lambda: None, target_regex='a.*'))
+ t3 = Task("taskZ", None, loader=DelayedLoader(lambda: None, target_regex='ab.'))
+ t4 = Task("taskW", None, loader=DelayedLoader(lambda: None))
+ control = TaskControl([t1, t2, t3, t4], auto_delayed_regex=False)
+ l = control._filter_tasks(['abc'])
+ assert len(l) == 1
+ assert l[0] == '_regex_target_abc'
+ assert control.tasks[l[0]].file_dep == {'abc'}
+ assert set(control.tasks[l[0]].task_dep) == {t2.name, t3.name}
+
+ def test_filter_delayed_regex_auto(self):
+ t1 = Task("taskX", None)
+ t2 = Task("taskY", None, loader=DelayedLoader(lambda: None, target_regex='a.*'))
+ t3 = Task("taskZ", None, loader=DelayedLoader(lambda: None))
+ control = TaskControl([t1, t2, t3], auto_delayed_regex=True)
+ l = control._filter_tasks(['abc'])
+ assert len(l) == 1
+ assert l[0] == '_regex_target_abc'
+ assert control.tasks[l[0]].file_dep == {'abc'}
+ assert set(control.tasks[l[0]].task_dep) == {t2.name, t3.name}
+
# filter a non-existent task raises an error
def testFilterWrongName(self):
tc = TaskControl(TASKS_SAMPLE)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 5
} | 0.28 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "numpy>=1.10 pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libgeos-dev"
],
"python": "3.7",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///croot/attrs_1668696182826/work
certifi @ file:///croot/certifi_1671487769961/work/certifi
-e git+https://github.com/pydoit/doit.git@dcefd2159bca7fa80e236fc3fec91688a39ba7c4#egg=doit
flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
numpy @ file:///opt/conda/conda-bld/numpy_and_numpy_base_1653915516269/work
packaging @ file:///croot/packaging_1671697413597/work
pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyinotify==0.9.6
pytest==7.1.2
six==1.17.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions @ file:///croot/typing_extensions_1669924550328/work
zipp @ file:///croot/zipp_1672387121353/work
| name: doit
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=22.1.0=py37h06a4308_0
- blas=1.0=openblas
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- flit-core=3.6.0=pyhd3eb1b0_0
- importlib-metadata=4.11.3=py37h06a4308_0
- importlib_metadata=4.11.3=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgfortran-ng=11.2.0=h00389a5_1
- libgfortran5=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libopenblas=0.3.21=h043d6bf_0
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- numpy=1.21.5=py37hf838250_3
- numpy-base=1.21.5=py37h1e6e340_3
- openssl=1.1.1w=h7f8727e_0
- packaging=22.0=py37h06a4308_0
- pip=22.3.1=py37h06a4308_0
- pluggy=1.0.0=py37h06a4308_1
- py=1.11.0=pyhd3eb1b0_0
- pytest=7.1.2=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py37h06a4308_0
- typing_extensions=4.4.0=py37h06a4308_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zipp=3.11.0=py37h06a4308_0
- zlib=1.2.13=h5eee18b_1
- pip:
- pyinotify==0.9.6
- six==1.17.0
prefix: /opt/conda/envs/doit
| [
"tests/test_control.py::TestTaskControlCmdOptions::test_filter_delayed_regex_single",
"tests/test_control.py::TestTaskControlCmdOptions::test_filter_delayed_regex_multiple",
"tests/test_control.py::TestTaskControlCmdOptions::test_filter_delayed_regex_auto"
]
| []
| [
"tests/test_control.py::TestTaskControlInit::test_addTask",
"tests/test_control.py::TestTaskControlInit::test_targetDependency",
"tests/test_control.py::TestTaskControlInit::test_addTaskSameName",
"tests/test_control.py::TestTaskControlInit::test_addInvalidTask",
"tests/test_control.py::TestTaskControlInit::test_userErrorTaskDependency",
"tests/test_control.py::TestTaskControlInit::test_userErrorSetupTask",
"tests/test_control.py::TestTaskControlInit::test_sameTarget",
"tests/test_control.py::TestTaskControlInit::test_wild",
"tests/test_control.py::TestTaskControlInit::test_bug770150_task_dependency_from_target",
"tests/test_control.py::TestTaskControlCmdOptions::testFilter",
"tests/test_control.py::TestTaskControlCmdOptions::testProcessSelection",
"tests/test_control.py::TestTaskControlCmdOptions::testProcessAll",
"tests/test_control.py::TestTaskControlCmdOptions::testFilterPattern",
"tests/test_control.py::TestTaskControlCmdOptions::testFilterSubtask",
"tests/test_control.py::TestTaskControlCmdOptions::testFilterTarget",
"tests/test_control.py::TestTaskControlCmdOptions::test_filter_delayed_subtask",
"tests/test_control.py::TestTaskControlCmdOptions::testFilterWrongName",
"tests/test_control.py::TestTaskControlCmdOptions::testFilterEmptyList",
"tests/test_control.py::TestTaskControlCmdOptions::testOptions",
"tests/test_control.py::TestTaskControlCmdOptions::testPosParam",
"tests/test_control.py::TestExecNode::test_repr",
"tests/test_control.py::TestExecNode::test_ready_select__not_waiting",
"tests/test_control.py::TestExecNode::test_parent_status_failure",
"tests/test_control.py::TestExecNode::test_parent_status_ignore",
"tests/test_control.py::TestExecNode::test_step",
"tests/test_control.py::TestDecoratorNoNone::test_filtering",
"tests/test_control.py::TestTaskDispatcher_GenNone::test_create",
"tests/test_control.py::TestTaskDispatcher_GenNone::test_already_created",
"tests/test_control.py::TestTaskDispatcher_GenNone::test_cyclic",
"tests/test_control.py::TestTaskDispatcher_node_add_wait_run::test_wait",
"tests/test_control.py::TestTaskDispatcher_node_add_wait_run::test_none",
"tests/test_control.py::TestTaskDispatcher_node_add_wait_run::test_deps_not_ok",
"tests/test_control.py::TestTaskDispatcher_node_add_wait_run::test_calc_dep_already_executed",
"tests/test_control.py::TestTaskDispatcher_add_task::test_no_deps",
"tests/test_control.py::TestTaskDispatcher_add_task::test_task_deps",
"tests/test_control.py::TestTaskDispatcher_add_task::test_task_deps_already_created",
"tests/test_control.py::TestTaskDispatcher_add_task::test_task_deps_no_wait",
"tests/test_control.py::TestTaskDispatcher_add_task::test_calc_dep",
"tests/test_control.py::TestTaskDispatcher_add_task::test_calc_dep_already_executed",
"tests/test_control.py::TestTaskDispatcher_add_task::test_setup_task__run",
"tests/test_control.py::TestTaskDispatcher_add_task::test_delayed_creation",
"tests/test_control.py::TestTaskDispatcher_add_task::test_delayed_creation_sub_task",
"tests/test_control.py::TestTaskDispatcher_get_next_node::test_none",
"tests/test_control.py::TestTaskDispatcher_get_next_node::test_ready",
"tests/test_control.py::TestTaskDispatcher_get_next_node::test_to_run",
"tests/test_control.py::TestTaskDispatcher_get_next_node::test_to_run_none",
"tests/test_control.py::TestTaskDispatcher_update_waiting::test_wait_select",
"tests/test_control.py::TestTaskDispatcher_update_waiting::test_wait_run",
"tests/test_control.py::TestTaskDispatcher_update_waiting::test_wait_run_deps_not_ok",
"tests/test_control.py::TestTaskDispatcher_update_waiting::test_waiting_node_updated",
"tests/test_control.py::TestTaskDispatcher_dispatcher_generator::test_normal",
"tests/test_control.py::TestTaskDispatcher_dispatcher_generator::test_delayed_creation"
]
| []
| MIT License | 160 | [
"doit/loader.py",
"doit/cmd_run.py",
"doit/control.py",
"doit/task.py",
"doit/reporter.py"
]
| [
"doit/loader.py",
"doit/cmd_run.py",
"doit/control.py",
"doit/task.py",
"doit/reporter.py"
]
|
|
mne-tools__mne-python-2193 | 6ccf41c6295760dcf36e2a1062132e5b319a4812 | 2015-06-08 23:11:35 | 632e49f0470fc9526936dbb474fd6aa46501fe4d | Eric89GXL: It seems to be working for me now, but it is horribly slow. Hopefully I can speed it up tomorrow.
dgwakeman: You are awesome! I am so excited for this!
Eric89GXL: FYI I added a couple more files to the testing dataset. BEMs have now been created for ico-2 surfaces (320 instead of 5120 or 1280) which is much faster.
The code at this point seems to work for homog 1-layer, I just need to track down a bug in the 3-layer case. Speed isn't too much worse than C for small shells (<= 1280) and actually faster for standard size (5120) because, while calculating the matrix coefficients is slower in Python, the inversion is much faster:
```
ico type layers time
2 (320) C 1 0.0300931930542
2 (320) Python 1 0.250679969788
2 (320) C 3 0.347531795502
2 (320) Python 3 2.41311502457
3 (1280) C 1 0.621548891068
3 (1280) Python 1 2.16235113144
3 (1280) C 3 12.2742669582
3 (1280) Python 3 21.4681899548
4 (5120) C 1 24.4842910767
4 (5120) Python 1 27.2853899002
4 (5120) C 3 643.578150034
4 (5120) Python 3 282.933717012
```
Just need to track down where that bug is, which I probably won't get to until next week.
Eric89GXL: Okay, 1- and 3-layer model and solution computation works now. Ready for review/merge from my end.
Eric89GXL: Ping @agramfort @dengemann this is ready for review
dengemann: @Eric89GXL amazing! How fast is it compared to the original code?
Eric89GXL: See comment above, faster for normal use case (3-layer 5120)
Eric89GXL: There might be additional ways of optimizing it, too, if there is interest
dengemann: > here might be additional ways of optimizing it, too, if there is interest
I'm already very impressed. Eric what do we need here, any testing desired?
Eric89GXL: Hopefully the unit tests are sufficient to make us confident in using it. But sure, if you want to test it out and make comments, it's welcome.
dengemann: > But sure, if you want to test it out and make comments, it's welcome.
I will trust you if you're confident ;)
Eric89GXL: Check out the tests, hopefully those will make us confident :) If they don't, then they can be expanded.
Eric89GXL: @dengemann comments addressed. FYI you asked about speed, and that made me optimize a bit more:
```
2 (320) 1 C 0.283627033234
2 (320) 1 Python 0.291443109512
2 (320) 3 C 0.465950012207
2 (320) 3 Python 2.45959687233
3 (1280) 1 C 0.660896062851
3 (1280) 1 Python 1.62492084503
3 (1280) 3 C 12.4721641541
3 (1280) 3 Python 11.3650319576
4 (5120) 1 C 24.9352688789
4 (5120) 1 Python 11.2639200687
4 (5120) 3 C 644.138396025
4 (5120) 3 Python 118.89906311
```
dengemann: Great!!! I will recompute all my BEMs to enjoy the speedy drive :)
> On 09 Jul 2015, at 18:31, Eric Larson <[email protected]> wrote:
>
> @dengemann comments addressed. FYI you asked about speed, and that made me optimize a bit more:
>
> 2 (320) 1 C 0.283627033234
> 2 (320) 1 Python 0.291443109512
> 2 (320) 3 C 0.465950012207
> 2 (320) 3 Python 2.45959687233
> 3 (1280) 1 C 0.660896062851
> 3 (1280) 1 Python 1.62492084503
> 3 (1280) 3 C 12.4721641541
> 3 (1280) 3 Python 11.3650319576
> 4 (5120) 1 C 24.9352688789
> 4 (5120) 1 Python 11.2639200687
> 4 (5120) 3 C 644.138396025
> 4 (5120) 3 Python 118.89906311
> —
> Reply to this email directly or view it on GitHub.
>
agramfort: please update what's new, rebase and +1 for merge !
thanks awesome ! | diff --git a/doc/source/python_reference.rst b/doc/source/python_reference.rst
index dc0967bcd..8ac585031 100644
--- a/doc/source/python_reference.rst
+++ b/doc/source/python_reference.rst
@@ -159,7 +159,8 @@ Functions:
read_trans
save_stc_as_volume
write_labels_to_annot
- write_bem_surface
+ write_bem_solution
+ write_bem_surfaces
write_cov
write_events
write_evokeds
@@ -544,6 +545,8 @@ Functions:
average_forward_solutions
convert_forward_solution
do_forward_solution
+ make_bem_model
+ make_bem_solution
make_forward_solution
make_field_map
make_sphere_model
diff --git a/doc/source/python_tutorial.rst b/doc/source/python_tutorial.rst
index bc3c9613c..0f2011a59 100644
--- a/doc/source/python_tutorial.rst
+++ b/doc/source/python_tutorial.rst
@@ -18,6 +18,7 @@ What you can do with MNE Python
- **Averaging** to get Evoked data
- **Compute SSP pojectors** to remove ECG and EOG artifacts
- **Compute ICA** to remove artifacts or select latent sources.
+ - **Boundary Element Modeling**: single and three-layer BEM model creation and solution computation.
- **Forward modeling**: BEM computation and mesh creation (see :ref:`ch_forward`)
- **Linear inverse solvers** (dSPM, sLORETA, MNE, LCMV, DICS)
- **Sparse inverse solvers** (L1/L2 mixed norm MxNE, Gamma Map, Time-Frequency MxNE)
@@ -35,8 +36,7 @@ What you can do with MNE Python
What you're not supposed to do with MNE Python
----------------------------------------------
- - **Boundary Element Modeling** use MNE and Freesurfer.
-
+ - **Brain and head surface segmentation** for use with BEM models -- use Freesurfer.
.. note:: Package based on the FIF file format from Neuromag. It can read and convert CTF, BTI/4D, KIT and various EEG formats to FIF.
diff --git a/doc/source/whats_new.rst b/doc/source/whats_new.rst
index 4b2c09803..04bd663b2 100644
--- a/doc/source/whats_new.rst
+++ b/doc/source/whats_new.rst
@@ -23,6 +23,10 @@ Changelog
- Add support to hide/show all sections with a single keypress ('h') in :class:`mne.report.Report` by `Mainak Jas`_
+ - Add support for BEM model creation :func:`mne.make_bem_model` by `Eric Larson`_
+
+ - Add support for BEM solution computation :func:`mne.make_bem_solution` by `Eric Larson`_
+
BUG
~~~
diff --git a/mne/__init__.py b/mne/__init__.py
index 04e1fc1c3..04566bc47 100644
--- a/mne/__init__.py
+++ b/mne/__init__.py
@@ -31,6 +31,9 @@ from .io.base import concatenate_raws
from .io.chpi import get_chpi_positions
from .io.meas_info import create_info
from .io.kit import read_epochs_kit
+from .bem import (make_sphere_model, make_bem_model, make_bem_solution,
+ read_bem_surfaces, write_bem_surface, write_bem_surfaces,
+ read_bem_solution, write_bem_solution)
from .cov import (read_cov, write_cov, Covariance,
compute_covariance, compute_raw_data_covariance,
whiten_evoked, make_ad_hoc_cov)
@@ -52,10 +55,8 @@ from .source_estimate import (read_source_estimate, MixedSourceEstimate,
spatio_temporal_tris_connectivity,
spatio_temporal_dist_connectivity,
save_stc_as_volume, extract_label_time_course)
-from .surface import (read_bem_surfaces, read_surface, write_bem_surface,
- write_surface, decimate_surface, read_morph_map,
- read_bem_solution, get_head_surf,
- get_meg_helmet_surf)
+from .surface import (read_surface, write_surface, decimate_surface,
+ read_morph_map, get_head_surf, get_meg_helmet_surf)
from .source_space import (read_source_spaces, vertex_to_mni,
write_source_spaces, setup_source_space,
setup_volume_source_space, SourceSpaces,
@@ -78,7 +79,6 @@ from .selection import read_selection
from .dipole import read_dipole, Dipole, fit_dipole
from . import channels
from .channels import equalize_channels, rename_channels, find_layout
-from .bem import make_sphere_model
from . import beamformer
from . import commands
diff --git a/mne/bem.py b/mne/bem.py
index d401bd6d2..a14d8f21c 100644
--- a/mne/bem.py
+++ b/mne/bem.py
@@ -12,10 +12,518 @@ import numpy as np
from scipy import linalg
from .fixes import partial
-from .utils import (verbose, logger, run_subprocess, get_subjects_dir)
+from .utils import (verbose, logger, run_subprocess, deprecated,
+ get_subjects_dir)
from .io.constants import FIFF
+from .io.write import (start_file, start_block, write_float, write_int,
+ write_float_matrix, write_int_matrix, end_block,
+ end_file)
+from .io.tag import find_tag
+from .io.tree import dir_tree_find
+from .io.open import fiff_open
from .externals.six import string_types
-from .surface import read_surface, write_bem_surface
+
+
+# ############################################################################
+# Compute BEM solution
+
+# define VEC_DIFF(from,to,diff) {\
+# (diff)[X] = (to)[X] - (from)[X];\
+
+# The following approach is based on:
+#
+# de Munck JC: "A linear discretization of the volume conductor boundary
+# integral equation using analytically integrated elements",
+# IEEE Trans Biomed Eng. 1992 39(9) : 986 - 990
+#
+
+
+def _calc_beta(rk, rk_norm, rk1, rk1_norm):
+ """These coefficients are used to calculate the magic vector omega"""
+ rkk1 = rk1[0] - rk[0]
+ size = np.sqrt(np.dot(rkk1, rkk1))
+ rkk1 /= size
+ num = rk_norm + np.dot(rk, rkk1)
+ den = rk1_norm + np.dot(rk1, rkk1)
+ res = np.log(num / den) / size
+ return res
+
+
+def _lin_pot_coeff(fros, tri_rr, tri_nn, tri_area):
+ """The linear potential matrix element computations"""
+ from .source_space import _fast_cross_nd_sum
+ omega = np.zeros((len(fros), 3))
+
+ # we replicate a little bit of the _get_solids code here for speed
+ v1 = tri_rr[np.newaxis, 0, :] - fros
+ v2 = tri_rr[np.newaxis, 1, :] - fros
+ v3 = tri_rr[np.newaxis, 2, :] - fros
+ triples = _fast_cross_nd_sum(v1, v2, v3)
+ l1 = np.sqrt(np.sum(v1 * v1, axis=1))
+ l2 = np.sqrt(np.sum(v2 * v2, axis=1))
+ l3 = np.sqrt(np.sum(v3 * v3, axis=1))
+ ss = (l1 * l2 * l3 +
+ np.sum(v1 * v2, axis=1) * l3 +
+ np.sum(v1 * v3, axis=1) * l2 +
+ np.sum(v2 * v3, axis=1) * l1)
+ solids = np.arctan2(triples, ss)
+
+ # We *could* subselect the good points from v1, v2, v3, triples, solids,
+ # l1, l2, and l3, but there are *very* few bad points. So instead we do
+ # some unnecessary calculations, and then omit them from the final
+ # solution. These three lines ensure we don't get invalid values in
+ # _calc_beta.
+ bad_mask = np.abs(solids) < np.pi / 1e6
+ l1[bad_mask] = 1.
+ l2[bad_mask] = 1.
+ l3[bad_mask] = 1.
+
+ # Calculate the magic vector vec_omega
+ beta = [_calc_beta(v1, l1, v2, l2)[:, np.newaxis],
+ _calc_beta(v2, l2, v3, l3)[:, np.newaxis],
+ _calc_beta(v3, l3, v1, l1)[:, np.newaxis]]
+ vec_omega = (beta[2] - beta[0]) * v1
+ vec_omega += (beta[0] - beta[1]) * v2
+ vec_omega += (beta[1] - beta[2]) * v3
+
+ area2 = 2.0 * tri_area
+ n2 = 1.0 / (area2 * area2)
+ # leave omega = 0 otherwise
+ # Put it all together...
+ yys = [v1, v2, v3]
+ idx = [0, 1, 2, 0, 2]
+ for k in range(3):
+ diff = yys[idx[k - 1]] - yys[idx[k + 1]]
+ zdots = _fast_cross_nd_sum(yys[idx[k + 1]], yys[idx[k - 1]], tri_nn)
+ omega[:, k] = -n2 * (area2 * zdots * 2. * solids -
+ triples * (diff * vec_omega).sum(axis=-1))
+ # omit the bad points from the solution
+ omega[bad_mask] = 0.
+ return omega
+
+
+def _correct_auto_elements(surf, mat):
+ """Improve auto-element approximation..."""
+ pi2 = 2.0 * np.pi
+ tris_flat = surf['tris'].ravel()
+ misses = pi2 - mat.sum(axis=1)
+ for j, miss in enumerate(misses):
+ # How much is missing?
+ n_memb = len(surf['neighbor_tri'][j])
+ # The node itself receives one half
+ mat[j, j] = miss / 2.0
+ # The rest is divided evenly among the member nodes...
+ miss /= (4.0 * n_memb)
+ members = np.where(j == tris_flat)[0]
+ mods = members % 3
+ offsets = np.array([[1, 2], [-1, 1], [-1, -2]])
+ tri_1 = members + offsets[mods, 0]
+ tri_2 = members + offsets[mods, 1]
+ for t1, t2 in zip(tri_1, tri_2):
+ mat[j, tris_flat[t1]] += miss
+ mat[j, tris_flat[t2]] += miss
+ return
+
+
+def _fwd_bem_lin_pot_coeff(surfs):
+ """Calculate the coefficients for linear collocation approach"""
+ # taken from fwd_bem_linear_collocation.c
+ nps = [surf['np'] for surf in surfs]
+ np_tot = sum(nps)
+ coeff = np.zeros((np_tot, np_tot))
+ offsets = np.cumsum(np.concatenate(([0], nps)))
+ for si_1, surf1 in enumerate(surfs):
+ rr_ord = np.arange(nps[si_1])
+ for si_2, surf2 in enumerate(surfs):
+ logger.info(" %s (%d) -> %s (%d) ..." %
+ (_bem_explain_surface(surf1['id']), nps[si_1],
+ _bem_explain_surface(surf2['id']), nps[si_2]))
+ tri_rr = surf2['rr'][surf2['tris']]
+ tri_nn = surf2['tri_nn']
+ tri_area = surf2['tri_area']
+ submat = coeff[offsets[si_1]:offsets[si_1 + 1],
+ offsets[si_2]:offsets[si_2 + 1]] # view
+ for k in range(surf2['ntri']):
+ tri = surf2['tris'][k]
+ if si_1 == si_2:
+ skip_idx = ((rr_ord == tri[0]) |
+ (rr_ord == tri[1]) |
+ (rr_ord == tri[2]))
+ else:
+ skip_idx = list()
+ # No contribution from a triangle that
+ # this vertex belongs to
+ # if sidx1 == sidx2 and (tri == j).any():
+ # continue
+ # Otherwise do the hard job
+ coeffs = _lin_pot_coeff(surf1['rr'], tri_rr[k], tri_nn[k],
+ tri_area[k])
+ coeffs[skip_idx] = 0.
+ submat[:, tri] -= coeffs
+ if si_1 == si_2:
+ _correct_auto_elements(surf1, submat)
+ return coeff
+
+
+def _fwd_bem_multi_solution(solids, gamma, nps):
+ """Do multi surface solution
+
+ * Invert I - solids/(2*M_PI)
+ * Take deflation into account
+ * The matrix is destroyed after inversion
+ * This is the general multilayer case
+
+ """
+ pi2 = 1.0 / (2 * np.pi)
+ n_tot = np.sum(nps)
+ assert solids.shape == (n_tot, n_tot)
+ nsurf = len(nps)
+ defl = 1.0 / n_tot
+ # Modify the matrix
+ offsets = np.cumsum(np.concatenate(([0], nps)))
+ for si_1 in range(nsurf):
+ for si_2 in range(nsurf):
+ mult = pi2 if gamma is None else pi2 * gamma[si_1, si_2]
+ slice_j = slice(offsets[si_1], offsets[si_1 + 1])
+ slice_k = slice(offsets[si_2], offsets[si_2 + 1])
+ solids[slice_j, slice_k] = defl - solids[slice_j, slice_k] * mult
+ solids += np.eye(n_tot)
+ return linalg.inv(solids, overwrite_a=True)
+
+
+def _fwd_bem_homog_solution(solids, nps):
+ """Helper to make a homogeneous solution"""
+ return _fwd_bem_multi_solution(solids, None, nps)
+
+
+def _fwd_bem_ip_modify_solution(solution, ip_solution, ip_mult, n_tri):
+ """Modify the solution according to the IP approach"""
+ n_last = n_tri[-1]
+ mult = (1.0 + ip_mult) / ip_mult
+
+ logger.info(' Combining...')
+ offsets = np.cumsum(np.concatenate(([0], n_tri)))
+ for si in range(len(n_tri)):
+ # Pick the correct submatrix (right column) and multiply
+ sub = solution[offsets[si]:offsets[si + 1], np.sum(n_tri[:-1]):]
+ # Multiply
+ sub -= 2 * np.dot(sub, ip_solution)
+
+ # The lower right corner is a special case
+ sub[-n_last:, -n_last:] += mult * ip_solution
+
+ # Final scaling
+ logger.info(' Scaling...')
+ solution *= ip_mult
+ return
+
+
+def _fwd_bem_linear_collocation_solution(m):
+ """Compute the linear collocation potential solution"""
+ # first, add surface geometries
+ from .surface import _complete_surface_info
+ for surf in m['surfs']:
+ _complete_surface_info(surf, verbose=False)
+
+ logger.info('Computing the linear collocation solution...')
+ logger.info(' Matrix coefficients...')
+ coeff = _fwd_bem_lin_pot_coeff(m['surfs'])
+ m['nsol'] = len(coeff)
+ logger.info(" Inverting the coefficient matrix...")
+ nps = [surf['np'] for surf in m['surfs']]
+ m['solution'] = _fwd_bem_multi_solution(coeff, m['gamma'], nps)
+ if len(m['surfs']) == 3:
+ ip_mult = m['sigma'][1] / m['sigma'][2]
+ if ip_mult <= FIFF.FWD_BEM_IP_APPROACH_LIMIT:
+ logger.info('IP approach required...')
+ logger.info(' Matrix coefficients (homog)...')
+ coeff = _fwd_bem_lin_pot_coeff([m['surfs'][-1]])
+ logger.info(' Inverting the coefficient matrix (homog)...')
+ ip_solution = _fwd_bem_homog_solution(coeff,
+ [m['surfs'][-1]['np']])
+ logger.info(' Modify the original solution to incorporate '
+ 'IP approach...')
+ _fwd_bem_ip_modify_solution(m['solution'], ip_solution, ip_mult,
+ nps)
+ m['bem_method'] = FIFF.FWD_BEM_LINEAR_COLL
+ logger.info("Solution ready.")
+
+
+@verbose
+def make_bem_solution(surfs, verbose=None):
+ """Create a BEM solution using the linear collocation approach
+
+ Parameters
+ ----------
+ surfs : list of dict
+ The BEM surfaces to use.
+ verbose : bool, str, int, or None
+ If not None, override default verbose level (see mne.verbose).
+
+ Returns
+ -------
+ bem : dict
+ The BEM solution.
+
+ Notes
+ -----
+ .. versionadded:: 0.10.0
+
+ See Also
+ --------
+ make_bem_model
+ read_bem_surfaces
+ write_bem_surfaces
+ read_bem_solution
+ write_bem_solution
+ """
+ logger.info('Approximation method : Linear collocation\n')
+ if isinstance(surfs, string_types):
+ # Load the surfaces
+ logger.info('Loading surfaces...')
+ surfs = read_bem_surfaces(surfs)
+ bem = dict(surfs=surfs)
+ _add_gamma_multipliers(bem)
+ if len(bem['surfs']) == 3:
+ logger.info('Three-layer model surfaces loaded.')
+ elif len(bem['surfs']) == 1:
+ logger.info('Homogeneous model surface loaded.')
+ else:
+ raise RuntimeError('Only 1- or 3-layer BEM computations supported')
+ _fwd_bem_linear_collocation_solution(bem)
+ logger.info('BEM geometry computations complete.')
+ return bem
+
+
+# ############################################################################
+# Make BEM model
+
+def _ico_downsample(surf, dest_grade):
+ """Downsample the surface if isomorphic to a subdivided icosahedron"""
+ from .surface import _get_ico_surface
+ n_tri = surf['ntri']
+ found = -1
+ bad_msg = ("A surface with %d triangles cannot be isomorphic with a "
+ "subdivided icosahedron." % surf['ntri'])
+ if n_tri % 20 != 0:
+ raise RuntimeError(bad_msg)
+ n_tri = n_tri // 20
+ found = int(round(np.log(n_tri) / np.log(4)))
+ if n_tri != 4 ** found:
+ raise RuntimeError(bad_msg)
+ del n_tri
+
+ if dest_grade > found:
+ raise RuntimeError('For this surface, decimation grade should be %d '
+ 'or less, not %s.' % (found, dest_grade))
+
+ source = _get_ico_surface(found)
+ dest = _get_ico_surface(dest_grade, patch_stats=True)
+ del dest['tri_cent']
+ del dest['tri_nn']
+ del dest['neighbor_tri']
+ del dest['tri_area']
+ if not np.array_equal(source['tris'], surf['tris']):
+ raise RuntimeError('The source surface has a matching number of '
+ 'triangles but ordering is wrong')
+ logger.info('Going from %dth to %dth subdivision of an icosahedron '
+ '(n_tri: %d -> %d)' % (found, dest_grade, surf['ntri'],
+ dest['ntri']))
+ # Find the mapping
+ dest['rr'] = surf['rr'][_get_ico_map(source, dest)]
+ return dest
+
+
+def _get_ico_map(fro, to):
+ """Helper to get a mapping between ico surfaces"""
+ from .surface import _compute_nearest
+ nearest, dists = _compute_nearest(fro['rr'], to['rr'], return_dists=True)
+ n_bads = (dists > 5e-3).sum()
+ if n_bads > 0:
+ raise RuntimeError('No matching vertex for %d destination vertices'
+ % (n_bads))
+ return nearest
+
+
+def _order_surfaces(surfs):
+ """Reorder the surfaces"""
+ if len(surfs) != 3:
+ return surfs
+ # we have three surfaces
+ surf_order = [FIFF.FIFFV_BEM_SURF_ID_HEAD,
+ FIFF.FIFFV_BEM_SURF_ID_SKULL,
+ FIFF.FIFFV_BEM_SURF_ID_BRAIN]
+ ids = np.array([surf['id'] for surf in surfs])
+ if set(ids) != set(surf_order):
+ raise RuntimeError('bad surface ids: %s' % ids)
+ order = [np.where(ids == id_)[0][0] for id_ in surf_order]
+ surfs = [surfs[idx] for idx in order]
+ return surfs
+
+
+def _assert_complete_surface(surf):
+ """Check the sum of solid angles as seen from inside"""
+ # from surface_checks.c
+ from .source_space import _get_solids
+ tot_angle = 0.
+ # Center of mass....
+ cm = surf['rr'].mean(axis=0)
+ logger.info('%s CM is %6.2f %6.2f %6.2f mm' %
+ (_surf_name[surf['id']],
+ 1000 * cm[0], 1000 * cm[1], 1000 * cm[2]))
+ tot_angle = _get_solids(surf['rr'][surf['tris']], cm[np.newaxis, :])[0]
+ if np.abs(tot_angle / (2 * np.pi) - 1.0) > 1e-5:
+ raise RuntimeError('Surface %s is not complete (sum of solid angles '
+ '= %g * 4*PI instead).' %
+ (_surf_name[surf['id']], tot_angle))
+
+
+_surf_name = {
+ FIFF.FIFFV_BEM_SURF_ID_HEAD: 'outer skin ',
+ FIFF.FIFFV_BEM_SURF_ID_SKULL: 'outer skull',
+ FIFF.FIFFV_BEM_SURF_ID_BRAIN: 'inner skull',
+ FIFF.FIFFV_BEM_SURF_ID_UNKNOWN: 'unknown ',
+}
+
+
+def _assert_inside(fro, to):
+ """Helper to check one set of points is inside a surface"""
+ # this is "is_inside" in surface_checks.c
+ from .source_space import _get_solids
+ tot_angle = _get_solids(to['rr'][to['tris']], fro['rr'])
+ if (np.abs(tot_angle / (2 * np.pi) - 1.0) > 1e-5).any():
+ raise RuntimeError('Surface %s is not completely inside surface %s'
+ % (_surf_name[fro['id']], _surf_name[to['id']]))
+
+
+def _check_surfaces(surfs):
+ """Check that the surfaces are complete and non-intersecting"""
+ for surf in surfs:
+ _assert_complete_surface(surf)
+ # Then check the topology
+ for surf_1, surf_2 in zip(surfs[:-1], surfs[1:]):
+ logger.info('Checking that %s surface is inside %s surface...' %
+ (_surf_name[surf_2['id']], _surf_name[surf_1['id']]))
+ _assert_inside(surf_2, surf_1)
+
+
+def _check_surface_size(surf):
+ """Check that the coordinate limits are reasonable"""
+ sizes = surf['rr'].max(axis=0) - surf['rr'].min(axis=0)
+ if (sizes < 0.05).any():
+ raise RuntimeError('Dimensions of the surface %s seem too small '
+ '(%9.5f mm). Maybe the the unit of measure is '
+ 'meters instead of mm' %
+ (_surf_name[surf['id']], 1000 * sizes.min()))
+
+
+def _check_thicknesses(surfs):
+ """How close are we?"""
+ from .surface import _compute_nearest
+ for surf_1, surf_2 in zip(surfs[:-1], surfs[1:]):
+ min_dist = _compute_nearest(surf_1['rr'], surf_2['rr'],
+ return_dists=True)[0]
+ min_dist = min_dist.min()
+ logger.info('Checking distance between %s and %s surfaces...' %
+ (_surf_name[surf_1['id']], _surf_name[surf_2['id']]))
+ logger.info('Minimum distance between the %s and %s surfaces is '
+ 'approximately %6.1f mm' %
+ (_surf_name[surf_1['id']], _surf_name[surf_2['id']],
+ 1000 * min_dist))
+
+
+def _surfaces_to_bem(fname_surfs, ids, sigmas, ico=None):
+ """Convert surfaces to a BEM
+ """
+ from .surface import _read_surface_geom
+ # equivalent of mne_surf2bem
+ surfs = list()
+ assert len(fname_surfs) in (1, 3)
+ for fname in fname_surfs:
+ surfs.append(_read_surface_geom(fname, patch_stats=False,
+ verbose=False))
+ surfs[-1]['rr'] /= 1000.
+ # Downsampling if the surface is isomorphic with a subdivided icosahedron
+ if ico is not None:
+ for si, surf in enumerate(surfs):
+ surfs[si] = _ico_downsample(surf, ico)
+ for surf, id_ in zip(surfs, ids):
+ surf['id'] = id_
+
+ # Shifting surfaces is not implemented here
+
+ # Order the surfaces for the benefit of the topology checks
+ for surf, sigma in zip(surfs, sigmas):
+ surf['sigma'] = sigma
+ surfs = _order_surfaces(surfs)
+
+ # Check topology as best we can
+ _check_surfaces(surfs)
+ for surf in surfs:
+ _check_surface_size(surf)
+ _check_thicknesses(surfs)
+ logger.info('Surfaces passed the basic topology checks.')
+ return surfs
+
+
+@verbose
+def make_bem_model(subject, ico=4, conductivity=(0.3, 0.006, 0.3),
+ subjects_dir=None, verbose=None):
+ """Create a BEM model for a subject
+
+ Parameters
+ ----------
+ subject : str
+ The subject.
+ ico : int | None
+ The surface ico downsampling to use, e.g. 5=20484, 4=5120, 3=1280.
+ conductivity : array of int, shape (3,) or (1,)
+ The conductivities to use for each shell. Should be a single element
+ for a one-layer model, or three elements for a three-layer model.
+ Defaults to ``[0.3, 0.006, 0.3]``. The MNE-C default for a
+ single-layer model would be ``[0.3]``.
+ subjects_dir : string, or None
+ Path to SUBJECTS_DIR if it is not set in the environment.
+ verbose : bool, str, int, or None
+ If not None, override default verbose level (see mne.verbose).
+
+ Returns
+ -------
+ surfaces : list of dict
+ The BEM surfaces.
+
+ Notes
+ -----
+ .. versionadded:: 0.10.0
+
+ See Also
+ --------
+ make_bem_solution
+ make_sphere_model
+ read_bem_surfaces
+ write_bem_surfaces
+ """
+ conductivity = np.array(conductivity, float)
+ if conductivity.ndim != 1 or conductivity.size not in (1, 3):
+ raise ValueError('conductivity must be 1D array-like with 1 or 3 '
+ 'elements')
+ subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
+ subject_dir = op.join(subjects_dir, subject)
+ bem_dir = op.join(subject_dir, 'bem')
+ inner_skull = op.join(bem_dir, 'inner_skull.surf')
+ outer_skull = op.join(bem_dir, 'outer_skull.surf')
+ outer_skin = op.join(bem_dir, 'outer_skin.surf')
+ surfaces = [inner_skull, outer_skull, outer_skin]
+ ids = [FIFF.FIFFV_BEM_SURF_ID_BRAIN,
+ FIFF.FIFFV_BEM_SURF_ID_SKULL,
+ FIFF.FIFFV_BEM_SURF_ID_HEAD]
+ logger.info('Creating the BEM geometry...')
+ if len(conductivity) == 1:
+ surfaces = surfaces[:1]
+ ids = ids[:1]
+ surfaces = _surfaces_to_bem(surfaces, ids, conductivity, ico)
+ logger.info('Complete.\n')
+ return surfaces
# ############################################################################
@@ -134,7 +642,7 @@ def _fwd_eeg_fit_berg_scherg(m, nterms, nfit):
# Do the nonlinear minimization, constraining mu to the interval [-1, +1]
mu_0 = np.random.RandomState(0).rand(nfit) * f
fun = partial(_one_step, u=u)
- cons = []
+ cons = list()
for ii in range(nfit):
for val in [1., -1.]:
cons.append({'type': 'ineq',
@@ -188,6 +696,11 @@ def make_sphere_model(r0=(0., 0., 0.04), head_radius=0.09, info=None,
Notes
-----
.. versionadded:: 0.9.0
+
+ See Also
+ --------
+ make_bem_model
+ make_bem_solution
"""
for name in ('r0', 'head_radius'):
param = locals()[name]
@@ -207,7 +720,7 @@ def make_sphere_model(r0=(0., 0., 0.04), head_radius=0.09, info=None,
head_radius = head_radius_fit / 1000.
sphere = dict(r0=np.array(r0), is_sphere=True,
coord_frame=FIFF.FIFFV_COORD_HEAD)
- sphere['layers'] = []
+ sphere['layers'] = list()
if head_radius is not None:
# Eventually these could be configurable...
relative_radii = np.array(relative_radii, float)
@@ -239,7 +752,7 @@ def make_sphere_model(r0=(0., 0., 0.04), head_radius=0.09, info=None,
rv = _fwd_eeg_fit_berg_scherg(sphere, 200, 3)
logger.info('\nEquiv. model fitting -> RV = %g %%' % (100 * rv))
for k in range(3):
- logger.info('mu%d = %g\tlambda%d = %g'
+ logger.info('mu%d = %g lambda%d = %g'
% (k + 1, sphere['mu'][k], k + 1,
layers[-1]['sigma'] * sphere['lambda'][k]))
logger.info('Set up EEG sphere model with scalp radius %7.1f mm\n'
@@ -371,6 +884,7 @@ def make_watershed_bem(subject, subjects_dir=None, overwrite=False,
.. versionadded:: 0.10
"""
+ from .surface import read_surface
env = os.environ.copy()
if not os.environ.get('FREESURFER_HOME'):
@@ -447,6 +961,358 @@ def make_watershed_bem(subject, subjects_dir=None, overwrite=False,
points *= 1e-3
surf = dict(coord_frame=5, id=4, nn=None, np=len(points),
ntri=len(tris), rr=points, sigma=1, tris=tris)
- write_bem_surface(subject + '-head.fif', surf)
+ write_bem_surfaces(subject + '-head.fif', surf)
logger.info('Created %s/%s-head.fif\n\nComplete.' % (bem_dir, subject))
+
+
+# ############################################################################
+# Read
+
+@verbose
+def read_bem_surfaces(fname, patch_stats=False, s_id=None, verbose=None):
+ """Read the BEM surfaces from a FIF file
+
+ Parameters
+ ----------
+ fname : string
+ The name of the file containing the surfaces.
+ patch_stats : bool, optional (default False)
+ Calculate and add cortical patch statistics to the surfaces.
+ s_id : int | None
+ If int, only read and return the surface with the given s_id.
+ An error will be raised if it doesn't exist. If None, all
+ surfaces are read and returned.
+ verbose : bool, str, int, or None
+ If not None, override default verbose level (see mne.verbose).
+
+ Returns
+ -------
+ surf: list | dict
+ A list of dictionaries that each contain a surface. If s_id
+ is not None, only the requested surface will be returned.
+ """
+ from .surface import _complete_surface_info
+ # Default coordinate frame
+ coord_frame = FIFF.FIFFV_COORD_MRI
+ # Open the file, create directory
+ f, tree, _ = fiff_open(fname)
+ with f as fid:
+ # Find BEM
+ bem = dir_tree_find(tree, FIFF.FIFFB_BEM)
+ if bem is None or len(bem) == 0:
+ raise ValueError('BEM data not found')
+
+ bem = bem[0]
+ # Locate all surfaces
+ bemsurf = dir_tree_find(bem, FIFF.FIFFB_BEM_SURF)
+ if bemsurf is None:
+ raise ValueError('BEM surface data not found')
+
+ logger.info(' %d BEM surfaces found' % len(bemsurf))
+ # Coordinate frame possibly at the top level
+ tag = find_tag(fid, bem, FIFF.FIFF_BEM_COORD_FRAME)
+ if tag is not None:
+ coord_frame = tag.data
+ # Read all surfaces
+ if s_id is not None:
+ surf = [_read_bem_surface(fid, bsurf, coord_frame, s_id)
+ for bsurf in bemsurf]
+ surf = [s for s in surf if s is not None]
+ if not len(surf) == 1:
+ raise ValueError('surface with id %d not found' % s_id)
+ else:
+ surf = list()
+ for bsurf in bemsurf:
+ logger.info(' Reading a surface...')
+ this = _read_bem_surface(fid, bsurf, coord_frame)
+ surf.append(this)
+ logger.info('[done]')
+ logger.info(' %d BEM surfaces read' % len(surf))
+ if patch_stats:
+ for this in surf:
+ _complete_surface_info(this)
+ return surf[0] if s_id is not None else surf
+
+
+def _read_bem_surface(fid, this, def_coord_frame, s_id=None):
+ """Read one bem surface
+ """
+ # fid should be open as a context manager here
+ res = dict()
+ # Read all the interesting stuff
+ tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_ID)
+
+ if tag is None:
+ res['id'] = FIFF.FIFFV_BEM_SURF_ID_UNKNOWN
+ else:
+ res['id'] = int(tag.data)
+
+ if s_id is not None and res['id'] != s_id:
+ return None
+
+ tag = find_tag(fid, this, FIFF.FIFF_BEM_SIGMA)
+ res['sigma'] = 1.0 if tag is None else float(tag.data)
+
+ tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NNODE)
+ if tag is None:
+ raise ValueError('Number of vertices not found')
+
+ res['np'] = int(tag.data)
+
+ tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NTRI)
+ if tag is None:
+ raise ValueError('Number of triangles not found')
+ res['ntri'] = int(tag.data)
+
+ tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME)
+ if tag is None:
+ tag = find_tag(fid, this, FIFF.FIFF_BEM_COORD_FRAME)
+ if tag is None:
+ res['coord_frame'] = def_coord_frame
+ else:
+ res['coord_frame'] = tag.data
+ else:
+ res['coord_frame'] = tag.data
+
+ # Vertices, normals, and triangles
+ tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NODES)
+ if tag is None:
+ raise ValueError('Vertex data not found')
+
+ res['rr'] = tag.data.astype(np.float) # XXX : double because of mayavi bug
+ if res['rr'].shape[0] != res['np']:
+ raise ValueError('Vertex information is incorrect')
+
+ tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS)
+ if tag is None:
+ tag = tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NORMALS)
+ if tag is None:
+ res['nn'] = list()
+ else:
+ res['nn'] = tag.data
+ if res['nn'].shape[0] != res['np']:
+ raise ValueError('Vertex normal information is incorrect')
+
+ tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_TRIANGLES)
+ if tag is None:
+ raise ValueError('Triangulation not found')
+
+ res['tris'] = tag.data - 1 # index start at 0 in Python
+ if res['tris'].shape[0] != res['ntri']:
+ raise ValueError('Triangulation information is incorrect')
+
+ return res
+
+
+@verbose
+def read_bem_solution(fname, verbose=None):
+ """Read the BEM solution from a file
+
+ Parameters
+ ----------
+ fname : string
+ The file containing the BEM solution.
+ verbose : bool, str, int, or None
+ If not None, override default verbose level (see mne.verbose).
+
+ Returns
+ -------
+ bem : dict
+ The BEM solution.
+ """
+ # mirrors fwd_bem_load_surfaces from fwd_bem_model.c
+ logger.info('Loading surfaces...')
+ bem_surfs = read_bem_surfaces(fname, patch_stats=True, verbose=False)
+ if len(bem_surfs) == 3:
+ logger.info('Three-layer model surfaces loaded.')
+ needed = np.array([FIFF.FIFFV_BEM_SURF_ID_HEAD,
+ FIFF.FIFFV_BEM_SURF_ID_SKULL,
+ FIFF.FIFFV_BEM_SURF_ID_BRAIN])
+ if not all(x['id'] in needed for x in bem_surfs):
+ raise RuntimeError('Could not find necessary BEM surfaces')
+ # reorder surfaces as necessary (shouldn't need to?)
+ reorder = [None] * 3
+ for x in bem_surfs:
+ reorder[np.where(x['id'] == needed)[0][0]] = x
+ bem_surfs = reorder
+ elif len(bem_surfs) == 1:
+ if not bem_surfs[0]['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN:
+ raise RuntimeError('BEM Surfaces not found')
+ logger.info('Homogeneous model surface loaded.')
+
+ # convert from surfaces to solution
+ bem = dict(surfs=bem_surfs)
+ logger.info('\nLoading the solution matrix...\n')
+ f, tree, _ = fiff_open(fname)
+ with f as fid:
+ # Find the BEM data
+ nodes = dir_tree_find(tree, FIFF.FIFFB_BEM)
+ if len(nodes) == 0:
+ raise RuntimeError('No BEM data in %s' % fname)
+ bem_node = nodes[0]
+
+ # Approximation method
+ tag = find_tag(f, bem_node, FIFF.FIFF_BEM_APPROX)
+ if tag is None:
+ raise RuntimeError('No BEM solution found in %s' % fname)
+ method = tag.data[0]
+ if method not in (FIFF.FIFFV_BEM_APPROX_CONST,
+ FIFF.FIFFV_BEM_APPROX_LINEAR):
+ raise RuntimeError('Cannot handle BEM approximation method : %d'
+ % method)
+
+ tag = find_tag(fid, bem_node, FIFF.FIFF_BEM_POT_SOLUTION)
+ dims = tag.data.shape
+ if len(dims) != 2:
+ raise RuntimeError('Expected a two-dimensional solution matrix '
+ 'instead of a %d dimensional one' % dims[0])
+
+ dim = 0
+ for surf in bem['surfs']:
+ if method == FIFF.FIFFV_BEM_APPROX_LINEAR:
+ dim += surf['np']
+ else: # method == FIFF.FIFFV_BEM_APPROX_CONST
+ dim += surf['ntri']
+
+ if dims[0] != dim or dims[1] != dim:
+ raise RuntimeError('Expected a %d x %d solution matrix instead of '
+ 'a %d x %d one' % (dim, dim, dims[1], dims[0]))
+ sol = tag.data
+ nsol = dims[0]
+
+ bem['solution'] = sol
+ bem['nsol'] = nsol
+ bem['bem_method'] = method
+
+ # Gamma factors and multipliers
+ _add_gamma_multipliers(bem)
+ kind = {
+ FIFF.FIFFV_BEM_APPROX_CONST: 'constant collocation',
+ FIFF.FIFFV_BEM_APPROX_LINEAR: 'linear_collocation',
+ }[bem['bem_method']]
+ logger.info('Loaded %s BEM solution from %s', kind, fname)
+ return bem
+
+
+def _add_gamma_multipliers(bem):
+ """Helper to add gamma and multipliers in-place"""
+ bem['sigma'] = np.array([surf['sigma'] for surf in bem['surfs']])
+ # Dirty trick for the zero conductivity outside
+ sigma = np.r_[0.0, bem['sigma']]
+ bem['source_mult'] = 2.0 / (sigma[1:] + sigma[:-1])
+ bem['field_mult'] = sigma[1:] - sigma[:-1]
+ # make sure subsequent "zip"s work correctly
+ assert len(bem['surfs']) == len(bem['field_mult'])
+ bem['gamma'] = ((sigma[1:] - sigma[:-1])[np.newaxis, :] /
+ (sigma[1:] + sigma[:-1])[:, np.newaxis])
+ bem['is_sphere'] = False
+
+
+_surf_dict = {'inner_skull': FIFF.FIFFV_BEM_SURF_ID_BRAIN,
+ 'outer_skull': FIFF.FIFFV_BEM_SURF_ID_SKULL,
+ 'head': FIFF.FIFFV_BEM_SURF_ID_HEAD}
+
+
+def _bem_find_surface(bem, id_):
+ """Find surface from already-loaded BEM"""
+ if isinstance(id_, string_types):
+ name = id_
+ id_ = _surf_dict[id_]
+ else:
+ name = _bem_explain_surface(id_)
+ idx = np.where(np.array([s['id'] for s in bem['surfs']]) == id_)[0]
+ if len(idx) != 1:
+ raise RuntimeError('BEM model does not have the %s triangulation'
+ % name.replace('_', ' '))
+ return bem['surfs'][idx[0]]
+
+
+def _bem_explain_surface(id_):
+ """Return a string corresponding to the given surface ID"""
+ _rev_dict = dict((val, key) for key, val in _surf_dict.items())
+ return _rev_dict[id_]
+
+
+# ############################################################################
+# Write
+
+@deprecated('write_bem_surface is deprecated and will be removed in 0.11, '
+ 'use write_bem_surfaces instead')
+def write_bem_surface(fname, surf):
+ """Write one bem surface
+
+ Parameters
+ ----------
+ fname : string
+ File to write
+ surf : dict
+ A surface structured as obtained with read_bem_surfaces
+ """
+ write_bem_surfaces(fname, surf)
+
+
+def write_bem_surfaces(fname, surfs):
+ """Write BEM surfaces to a fiff file
+
+ Parameters
+ ----------
+ fname : str
+ Filename to write.
+ surfs : dict | list of dict
+ The surfaces, or a single surface.
+ """
+ if isinstance(surfs, dict):
+ surfs = [surfs]
+ with start_file(fname) as fid:
+ start_block(fid, FIFF.FIFFB_BEM)
+ write_int(fid, FIFF.FIFF_BEM_COORD_FRAME, surfs[0]['coord_frame'])
+ _write_bem_surfaces_block(fid, surfs)
+ end_block(fid, FIFF.FIFFB_BEM)
+ end_file(fid)
+
+
+def _write_bem_surfaces_block(fid, surfs):
+ """Helper to actually write bem surfaces"""
+ for surf in surfs:
+ start_block(fid, FIFF.FIFFB_BEM_SURF)
+ write_float(fid, FIFF.FIFF_BEM_SIGMA, surf['sigma'])
+ write_int(fid, FIFF.FIFF_BEM_SURF_ID, surf['id'])
+ write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, surf['coord_frame'])
+ write_int(fid, FIFF.FIFF_BEM_SURF_NNODE, surf['np'])
+ write_int(fid, FIFF.FIFF_BEM_SURF_NTRI, surf['ntri'])
+ write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NODES, surf['rr'])
+ # index start at 0 in Python
+ write_int_matrix(fid, FIFF.FIFF_BEM_SURF_TRIANGLES,
+ surf['tris'] + 1)
+ if 'nn' in surf and surf['nn'] is not None and len(surf['nn']) > 0:
+ write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NORMALS, surf['nn'])
+ end_block(fid, FIFF.FIFFB_BEM_SURF)
+
+
+def write_bem_solution(fname, bem):
+ """Write a BEM model with solution
+
+ Parameters
+ ----------
+ fname : str
+ The filename to use.
+ bem : dict
+ The BEM model with solution to save.
+ """
+ with start_file(fname) as fid:
+ start_block(fid, FIFF.FIFFB_BEM)
+ # Coordinate frame (mainly for backward compatibility)
+ write_int(fid, FIFF.FIFF_BEM_COORD_FRAME,
+ bem['surfs'][0]['coord_frame'])
+ # Surfaces
+ _write_bem_surfaces_block(fid, bem['surfs'])
+ # The potential solution
+ if 'solution' in bem:
+ if bem['bem_method'] != FIFF.FWD_BEM_LINEAR_COLL:
+ raise RuntimeError('Only linear collocation supported')
+ write_int(fid, FIFF.FIFF_BEM_APPROX, FIFF.FIFFV_BEM_APPROX_LINEAR)
+ write_float_matrix(fid, FIFF.FIFF_BEM_POT_SOLUTION,
+ bem['solution'])
+ end_block(fid, FIFF.FIFFB_BEM)
+ end_file(fid)
diff --git a/mne/coreg.py b/mne/coreg.py
index 0ff42a35d..d3df15089 100644
--- a/mne/coreg.py
+++ b/mne/coreg.py
@@ -21,8 +21,8 @@ from .io.meas_info import read_fiducials, write_fiducials
from .label import read_label, Label
from .source_space import (add_source_space_distances, read_source_spaces,
write_source_spaces)
-from .surface import (read_surface, write_surface, read_bem_surfaces,
- write_bem_surface)
+from .surface import read_surface, write_surface
+from .bem import read_bem_surfaces, write_bem_surfaces
from .transforms import rotation, rotation3d, scaling, translation
from .utils import get_config, get_subjects_dir, logger, pformat
from functools import reduce
@@ -851,7 +851,7 @@ def scale_bem(subject_to, bem_name, subject_from=None, scale=None,
% src)
surf0 = surfs[0]
surf0['rr'] = surf0['rr'] * scale
- write_bem_surface(dst, surf0)
+ write_bem_surfaces(dst, surf0)
def scale_labels(subject_to, pattern=None, overwrite=False, subject_from=None,
diff --git a/mne/dipole.py b/mne/dipole.py
index 38d12bac5..5e86deb2e 100644
--- a/mne/dipole.py
+++ b/mne/dipole.py
@@ -22,9 +22,9 @@ from .forward._compute_forward import (_compute_forwards_meeg,
_prep_field_computation)
from .externals.six import string_types
-from .surface import (_bem_find_surface, transform_surface_to,
- _normalize_vectors, _get_ico_surface,
- _bem_explain_surface, _compute_nearest)
+from .surface import (transform_surface_to, _normalize_vectors,
+ _get_ico_surface, _compute_nearest)
+from .bem import _bem_find_surface, _bem_explain_surface
from .source_space import (_make_volume_source_space, SourceSpaces,
_points_outside_surface)
from .parallel import parallel_func
diff --git a/mne/forward/_compute_forward.py b/mne/forward/_compute_forward.py
index 5db0afb6c..668ca4f8a 100644
--- a/mne/forward/_compute_forward.py
+++ b/mne/forward/_compute_forward.py
@@ -687,7 +687,7 @@ def _prep_field_computation(rr, bem, fwd_data, n_jobs, verbose=None):
bem_rr = mults = mri_Q = head_mri_t = None
if not bem['is_sphere']:
- if bem['bem_method'] != 'linear collocation':
+ if bem['bem_method'] != FIFF.FWD_BEM_LINEAR_COLL:
raise RuntimeError('only linear collocation supported')
# Store (and apply soon) μ_0/(4π) factor before source computations
mults = np.repeat(bem['source_mult'] / (4.0 * np.pi),
diff --git a/mne/forward/_make_forward.py b/mne/forward/_make_forward.py
index bd8b6b312..c70d5347c 100644
--- a/mne/forward/_make_forward.py
+++ b/mne/forward/_make_forward.py
@@ -22,7 +22,8 @@ from ..transforms import (invert_transform, transform_surface_to, apply_trans,
from ..utils import logger, verbose
from ..source_space import (read_source_spaces, _filter_source_spaces,
SourceSpaces)
-from ..surface import read_bem_solution, _normalize_vectors, _bem_find_surface
+from ..surface import _normalize_vectors
+from ..bem import read_bem_solution, _bem_find_surface
from ..externals.six import string_types
diff --git a/mne/source_space.py b/mne/source_space.py
index 3a180db60..e58e52390 100644
--- a/mne/source_space.py
+++ b/mne/source_space.py
@@ -17,8 +17,9 @@ from .io.write import (start_block, end_block, write_int,
write_float_sparse_rcs, write_string,
write_float_matrix, write_int_matrix,
write_coord_trans, start_file, end_file, write_id)
+from .bem import read_bem_surfaces
from .surface import (read_surface, _create_surf_spacing, _get_ico_surface,
- _tessellate_sphere_surf, read_bem_surfaces,
+ _tessellate_sphere_surf,
_read_surface_geom, _normalize_vectors,
_complete_surface_info, _compute_nearest,
fast_cross_3d, _fast_cross_nd_sum)
diff --git a/mne/surface.py b/mne/surface.py
index 012d23c9b..d4a04e9b1 100644
--- a/mne/surface.py
+++ b/mne/surface.py
@@ -13,12 +13,12 @@ from glob import glob
import numpy as np
from scipy import sparse
+from .bem import read_bem_surfaces
from .io.constants import FIFF
from .io.open import fiff_open
from .io.tree import dir_tree_find
from .io.tag import find_tag
-from .io.write import (write_int, write_float, write_float_matrix,
- write_int_matrix, start_file, end_block,
+from .io.write import (write_int, start_file, end_block,
start_block, end_file, write_string,
write_float_sparse_rcs)
from .channels.channels import _get_meg_system
@@ -27,265 +27,6 @@ from .utils import logger, verbose, get_subjects_dir
from .externals.six import string_types
-##############################################################################
-# BEM
-
-@verbose
-def read_bem_surfaces(fname, patch_stats=False, s_id=None, verbose=None):
- """Read the BEM surfaces from a FIF file
-
- Parameters
- ----------
- fname : string
- The name of the file containing the surfaces.
- patch_stats : bool, optional (default False)
- Calculate and add cortical patch statistics to the surfaces.
- s_id : int | None
- If int, only read and return the surface with the given s_id.
- An error will be raised if it doesn't exist. If None, all
- surfaces are read and returned.
- verbose : bool, str, int, or None
- If not None, override default verbose level (see mne.verbose).
-
- Returns
- -------
- surf: list | dict
- A list of dictionaries that each contain a surface. If s_id
- is not None, only the requested surface will be returned.
- """
- # Default coordinate frame
- coord_frame = FIFF.FIFFV_COORD_MRI
- # Open the file, create directory
- f, tree, _ = fiff_open(fname)
- with f as fid:
- # Find BEM
- bem = dir_tree_find(tree, FIFF.FIFFB_BEM)
- if bem is None:
- raise ValueError('BEM data not found')
-
- bem = bem[0]
- # Locate all surfaces
- bemsurf = dir_tree_find(bem, FIFF.FIFFB_BEM_SURF)
- if bemsurf is None:
- raise ValueError('BEM surface data not found')
-
- logger.info(' %d BEM surfaces found' % len(bemsurf))
- # Coordinate frame possibly at the top level
- tag = find_tag(fid, bem, FIFF.FIFF_BEM_COORD_FRAME)
- if tag is not None:
- coord_frame = tag.data
- # Read all surfaces
- if s_id is not None:
- surfs = [_read_bem_surface(fid, bsurf, coord_frame, s_id)
- for bsurf in bemsurf]
- surfs = [s for s in surfs if s is not None]
- if not len(surfs) == 1:
- raise ValueError('surface with id %d not found' % s_id)
- return surfs[0]
-
- surf = []
- for bsurf in bemsurf:
- logger.info(' Reading a surface...')
- this = _read_bem_surface(fid, bsurf, coord_frame)
- logger.info('[done]')
- if patch_stats:
- _complete_surface_info(this)
- surf.append(this)
-
- logger.info(' %d BEM surfaces read' % len(surf))
- return surf
-
-
-def _read_bem_surface(fid, this, def_coord_frame, s_id=None):
- """Read one bem surface
- """
- # fid should be open as a context manager here
- res = dict()
- # Read all the interesting stuff
- tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_ID)
-
- if tag is None:
- res['id'] = FIFF.FIFFV_BEM_SURF_ID_UNKNOWN
- else:
- res['id'] = int(tag.data)
-
- if s_id is not None and res['id'] != s_id:
- return None
-
- tag = find_tag(fid, this, FIFF.FIFF_BEM_SIGMA)
- res['sigma'] = 1.0 if tag is None else float(tag.data)
-
- tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NNODE)
- if tag is None:
- raise ValueError('Number of vertices not found')
-
- res['np'] = int(tag.data)
-
- tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NTRI)
- if tag is None:
- raise ValueError('Number of triangles not found')
- res['ntri'] = int(tag.data)
-
- tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME)
- if tag is None:
- tag = find_tag(fid, this, FIFF.FIFF_BEM_COORD_FRAME)
- if tag is None:
- res['coord_frame'] = def_coord_frame
- else:
- res['coord_frame'] = tag.data
- else:
- res['coord_frame'] = tag.data
-
- # Vertices, normals, and triangles
- tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NODES)
- if tag is None:
- raise ValueError('Vertex data not found')
-
- res['rr'] = tag.data.astype(np.float) # XXX : double because of mayavi bug
- if res['rr'].shape[0] != res['np']:
- raise ValueError('Vertex information is incorrect')
-
- tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS)
- if tag is None:
- tag = tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NORMALS)
- if tag is None:
- res['nn'] = []
- else:
- res['nn'] = tag.data
- if res['nn'].shape[0] != res['np']:
- raise ValueError('Vertex normal information is incorrect')
-
- tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_TRIANGLES)
- if tag is None:
- raise ValueError('Triangulation not found')
-
- res['tris'] = tag.data - 1 # index start at 0 in Python
- if res['tris'].shape[0] != res['ntri']:
- raise ValueError('Triangulation information is incorrect')
-
- return res
-
-
-@verbose
-def read_bem_solution(fname, verbose=None):
- """Read the BEM solution from a file
-
- Parameters
- ----------
- fname : string
- The file containing the BEM solution.
- verbose : bool, str, int, or None
- If not None, override default verbose level (see mne.verbose).
-
- Returns
- -------
- bem : dict
- The BEM solution.
- """
- logger.info('Loading surfaces...')
- bem_surfs = read_bem_surfaces(fname, patch_stats=True, verbose=False)
- if len(bem_surfs) == 3:
- logger.info('Three-layer model surfaces loaded.')
- needed = np.array([FIFF.FIFFV_BEM_SURF_ID_HEAD,
- FIFF.FIFFV_BEM_SURF_ID_SKULL,
- FIFF.FIFFV_BEM_SURF_ID_BRAIN])
- if not all(x['id'] in needed for x in bem_surfs):
- raise RuntimeError('Could not find necessary BEM surfaces')
- # reorder surfaces as necessary (shouldn't need to?)
- reorder = [None] * 3
- for x in bem_surfs:
- reorder[np.where(x['id'] == needed)[0][0]] = x
- bem_surfs = reorder
- elif len(bem_surfs) == 1:
- if not bem_surfs[0]['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN:
- raise RuntimeError('BEM Surfaces not found')
- logger.info('Homogeneous model surface loaded.')
-
- # convert from surfaces to solution
- bem = dict(surfs=bem_surfs)
- logger.info('\nLoading the solution matrix...\n')
- f, tree, _ = fiff_open(fname)
- with f as fid:
- # Find the BEM data
- nodes = dir_tree_find(tree, FIFF.FIFFB_BEM)
- if len(nodes) == 0:
- raise RuntimeError('No BEM data in %s' % fname)
- bem_node = nodes[0]
-
- # Approximation method
- tag = find_tag(f, bem_node, FIFF.FIFF_BEM_APPROX)
- method = tag.data[0]
- if method == FIFF.FIFFV_BEM_APPROX_CONST:
- method = 'constant collocation'
- elif method == FIFF.FIFFV_BEM_APPROX_LINEAR:
- method = 'linear collocation'
- else:
- raise RuntimeError('Cannot handle BEM approximation method : %d'
- % method)
-
- tag = find_tag(fid, bem_node, FIFF.FIFF_BEM_POT_SOLUTION)
- dims = tag.data.shape
- if len(dims) != 2:
- raise RuntimeError('Expected a two-dimensional solution matrix '
- 'instead of a %d dimensional one' % dims[0])
-
- dim = 0
- for surf in bem['surfs']:
- if method == 'linear collocation':
- dim += surf['np']
- else:
- dim += surf['ntri']
-
- if dims[0] != dim or dims[1] != dim:
- raise RuntimeError('Expected a %d x %d solution matrix instead of '
- 'a %d x %d one' % (dim, dim, dims[1], dims[0]))
- sol = tag.data
- nsol = dims[0]
-
- # Gamma factors and multipliers
- bem['sigma'] = np.array([surf['sigma'] for surf in bem['surfs']])
- # Dirty trick for the zero conductivity outside
- sigma = np.r_[0.0, bem['sigma']]
- bem['source_mult'] = 2.0 / (sigma[1:] + sigma[:-1])
- bem['field_mult'] = sigma[1:] - sigma[:-1]
- # make sure subsequent "zip"s work correctly
- assert len(bem['surfs']) == len(bem['field_mult'])
- bem['gamma'] = ((sigma[1:] - sigma[:-1])[np.newaxis, :] /
- (sigma[1:] + sigma[:-1])[:, np.newaxis])
- bem['sol_name'] = fname
- bem['solution'] = sol
- bem['nsol'] = nsol
- bem['bem_method'] = method
- bem['is_sphere'] = False
- logger.info('Loaded %s BEM solution from %s', bem['bem_method'], fname)
- return bem
-
-
-_surf_dict = {'inner_skull': FIFF.FIFFV_BEM_SURF_ID_BRAIN,
- 'outer_skull': FIFF.FIFFV_BEM_SURF_ID_SKULL,
- 'head': FIFF.FIFFV_BEM_SURF_ID_HEAD}
-
-
-def _bem_find_surface(bem, id_):
- """Find surface from already-loaded BEM"""
- if isinstance(id_, string_types):
- name = id_
- id_ = _surf_dict[id_]
- else:
- name = _bem_explain_surface[id_]
- idx = np.where(np.array([s['id'] for s in bem['surfs']]) == id_)[0]
- if len(idx) != 1:
- raise RuntimeError('BEM model does not have the %s triangulation'
- % name.replace('_', ' '))
- return bem['surfs'][idx[0]]
-
-
-def _bem_explain_surface(id_):
- """Return a string corresponding to the given surface ID"""
- _rev_dict = dict((val, key) for key, val in _surf_dict.items())
- return _rev_dict[id_]
-
-
###############################################################################
# AUTOMATED SURFACE FINDING
@@ -485,7 +226,8 @@ def _triangle_coords(r, geom, best):
return x, y, z
-def _complete_surface_info(this, do_neighbor_vert=False):
+@verbose
+def _complete_surface_info(this, do_neighbor_vert=False, verbose=None):
"""Complete surface info"""
# based on mne_source_space_add_geometry_info() in mne_add_geometry_info.c
@@ -736,13 +478,14 @@ def _read_surface_geom(fname, patch_stats=True, norm_rr=False, verbose=None):
##############################################################################
# SURFACE CREATION
-def _get_ico_surface(grade):
+def _get_ico_surface(grade, patch_stats=False):
"""Return an icosahedral surface of the desired grade"""
# always use verbose=False since users don't need to know we're pulling
# these from a file
ico_file_name = op.join(op.dirname(__file__), 'data',
'icos.fif.gz')
- ico = read_bem_surfaces(ico_file_name, s_id=9000 + grade, verbose=False)
+ ico = read_bem_surfaces(ico_file_name, patch_stats, s_id=9000 + grade,
+ verbose=False)
return ico
@@ -944,43 +687,7 @@ def write_surface(fname, coords, faces, create_stamp=''):
###############################################################################
-# Write
-
-def write_bem_surface(fname, surf):
- """Write one bem surface
-
- Parameters
- ----------
- fname : string
- File to write
- surf : dict
- A surface structured as obtained with read_bem_surfaces
- """
-
- # Create the file and save the essentials
- fid = start_file(fname)
-
- start_block(fid, FIFF.FIFFB_BEM)
- start_block(fid, FIFF.FIFFB_BEM_SURF)
-
- write_int(fid, FIFF.FIFF_BEM_SURF_ID, surf['id'])
- write_float(fid, FIFF.FIFF_BEM_SIGMA, surf['sigma'])
- write_int(fid, FIFF.FIFF_BEM_SURF_NNODE, surf['np'])
- write_int(fid, FIFF.FIFF_BEM_SURF_NTRI, surf['ntri'])
- write_int(fid, FIFF.FIFF_BEM_COORD_FRAME, surf['coord_frame'])
- write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NODES, surf['rr'])
-
- if 'nn' in surf and surf['nn'] is not None and len(surf['nn']) > 0:
- write_float_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS, surf['nn'])
-
- # index start at 0 in Python
- write_int_matrix(fid, FIFF.FIFF_BEM_SURF_TRIANGLES, surf['tris'] + 1)
-
- end_block(fid, FIFF.FIFFB_BEM_SURF)
- end_block(fid, FIFF.FIFFB_BEM)
-
- end_file(fid)
-
+# Decimation
def _decimate_surface(points, triangles, reduction):
"""Aux function"""
| implement bem-sol.fif code in python
for @Eric89GXL ... | mne-tools/mne-python | diff --git a/mne/tests/test_bem.py b/mne/tests/test_bem.py
index 8e7e06c2f..c8c0fc69b 100644
--- a/mne/tests/test_bem.py
+++ b/mne/tests/test_bem.py
@@ -2,16 +2,166 @@
#
# License: BSD 3 clause
+import os.path as op
import numpy as np
-from numpy.testing import assert_almost_equal
+from nose.tools import assert_raises
+from numpy.testing import assert_almost_equal, assert_equal, assert_allclose
+from mne import (make_bem_model, read_bem_surfaces, write_bem_surfaces,
+ make_bem_solution, read_bem_solution, write_bem_solution,
+ make_sphere_model)
from mne.preprocessing.maxfilter import fit_sphere_to_headshape
from mne.io.constants import FIFF
from mne.transforms import rotation
+from mne.datasets import testing
+from mne.utils import run_tests_if_main, _TempDir, slow_test
+from mne.bem import (_ico_downsample, _get_ico_map, _order_surfaces,
+ _assert_complete_surface, _assert_inside,
+ _check_surface_size, _bem_find_surface)
+from mne.io import read_info
+
+fname_raw = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data',
+ 'test_raw.fif')
+subjects_dir = op.join(testing.data_path(download=False), 'subjects')
+fname_bem_3 = op.join(subjects_dir, 'sample', 'bem',
+ 'sample-320-320-320-bem.fif')
+fname_bem_1 = op.join(subjects_dir, 'sample', 'bem',
+ 'sample-320-bem.fif')
+fname_bem_sol_3 = op.join(subjects_dir, 'sample', 'bem',
+ 'sample-320-320-320-bem-sol.fif')
+fname_bem_sol_1 = op.join(subjects_dir, 'sample', 'bem',
+ 'sample-320-bem-sol.fif')
+
+
+def _compare_bem_surfaces(surfs_1, surfs_2):
+ """Helper to compare BEM surfaces"""
+ names = ['id', 'nn', 'rr', 'coord_frame', 'tris', 'sigma', 'ntri', 'np']
+ ignores = ['tri_cent', 'tri_nn', 'tri_area', 'neighbor_tri']
+ for s0, s1 in zip(surfs_1, surfs_2):
+ assert_equal(set(names), set(s0.keys()) - set(ignores))
+ assert_equal(set(names), set(s1.keys()) - set(ignores))
+ for name in names:
+ assert_allclose(s0[name], s1[name], rtol=1e-3, atol=1e-6,
+ err_msg='Mismatch: "%s"' % name)
+
+
+def _compare_bem_solutions(sol_a, sol_b):
+ """Helper to compare BEM solutions"""
+ # compare the surfaces we used
+ _compare_bem_surfaces(sol_a['surfs'], sol_b['surfs'])
+ # compare the actual solutions
+ names = ['bem_method', 'field_mult', 'gamma', 'is_sphere',
+ 'nsol', 'sigma', 'source_mult', 'solution']
+ assert_equal(set(sol_a.keys()), set(sol_b.keys()))
+ assert_equal(set(names + ['surfs']), set(sol_b.keys()))
+ for key in names:
+ assert_allclose(sol_a[key], sol_b[key], rtol=1e-3, atol=1e-5,
+ err_msg='Mismatch: %s' % key)
+
+
[email protected]_testing_data
+def test_io_bem():
+ """Test reading and writing of bem surfaces and solutions
+ """
+ tempdir = _TempDir()
+ temp_bem = op.join(tempdir, 'temp-bem.fif')
+ assert_raises(ValueError, read_bem_surfaces, fname_raw)
+ assert_raises(ValueError, read_bem_surfaces, fname_bem_3, s_id=10)
+ surf = read_bem_surfaces(fname_bem_3, patch_stats=True)
+ surf = read_bem_surfaces(fname_bem_3, patch_stats=False)
+ write_bem_surfaces(temp_bem, surf[0])
+ surf_read = read_bem_surfaces(temp_bem, patch_stats=False)
+ _compare_bem_surfaces(surf, surf_read)
+
+ assert_raises(RuntimeError, read_bem_solution, fname_bem_3)
+ temp_sol = op.join(tempdir, 'temp-sol.fif')
+ sol = read_bem_solution(fname_bem_sol_3)
+ write_bem_solution(temp_sol, sol)
+ sol_read = read_bem_solution(temp_sol)
+ _compare_bem_solutions(sol, sol_read)
+ sol = read_bem_solution(fname_bem_sol_1)
+ assert_raises(RuntimeError, _bem_find_surface, sol, 3)
+
+
+def test_make_sphere_model():
+ """Test making a sphere model"""
+ info = read_info(fname_raw)
+ assert_raises(ValueError, make_sphere_model, 'foo', 'auto', info)
+ assert_raises(ValueError, make_sphere_model, 'auto', 'auto', None)
+ # here we just make sure it works -- the functionality is actually
+ # tested more extensively e.g. in the forward and dipole code
+ make_sphere_model('auto', 'auto', info)
+
+
[email protected]_testing_data
+def test_bem_model():
+ """Test BEM model creation from Python with I/O"""
+ tempdir = _TempDir()
+ fname_temp = op.join(tempdir, 'temp-bem.fif')
+ for kwargs, fname in zip((dict(), dict(conductivity=[0.3])),
+ [fname_bem_3, fname_bem_1]):
+ model = make_bem_model('sample', ico=2, subjects_dir=subjects_dir,
+ **kwargs)
+ model_c = read_bem_surfaces(fname)
+ _compare_bem_surfaces(model, model_c)
+ write_bem_surfaces(fname_temp, model)
+ model_read = read_bem_surfaces(fname_temp)
+ _compare_bem_surfaces(model, model_c)
+ _compare_bem_surfaces(model_read, model_c)
+ assert_raises(ValueError, make_bem_model, 'sample', # bad conductivity
+ conductivity=[0.3, 0.006], subjects_dir=subjects_dir)
+
+
+@slow_test
[email protected]_testing_data
+def test_bem_solution():
+ """Test making a BEM solution from Python with I/O"""
+ # test degenerate conditions
+ surf = read_bem_surfaces(fname_bem_1)[0]
+ assert_raises(RuntimeError, _ico_downsample, surf, 10) # bad dec grade
+ s_bad = dict(tris=surf['tris'][1:], ntri=surf['ntri'] - 1, rr=surf['rr'])
+ assert_raises(RuntimeError, _ico_downsample, s_bad, 1) # not isomorphic
+ s_bad = dict(tris=surf['tris'].copy(), ntri=surf['ntri'],
+ rr=surf['rr']) # bad triangulation
+ s_bad['tris'][0] = [0, 0, 0]
+ assert_raises(RuntimeError, _ico_downsample, s_bad, 1)
+ s_bad['id'] = 1
+ assert_raises(RuntimeError, _assert_complete_surface, s_bad)
+ s_bad = dict(tris=surf['tris'], ntri=surf['ntri'], rr=surf['rr'].copy())
+ s_bad['rr'][0] = 0.
+ assert_raises(RuntimeError, _get_ico_map, surf, s_bad)
+
+ surfs = read_bem_surfaces(fname_bem_3)
+ assert_raises(RuntimeError, _assert_inside, surfs[0], surfs[1]) # outside
+ surfs[0]['id'] = 100 # bad surfs
+ assert_raises(RuntimeError, _order_surfaces, surfs)
+ surfs[1]['rr'] /= 1000.
+ assert_raises(RuntimeError, _check_surface_size, surfs[1])
+
+ # actually test functionality
+ tempdir = _TempDir()
+ fname_temp = op.join(tempdir, 'temp-bem-sol.fif')
+ # use a model and solution made in Python
+ conductivities = [(0.3,), (0.3, 0.006, 0.3)]
+ fnames = [fname_bem_sol_1, fname_bem_sol_3]
+ for cond, fname in zip(conductivities, fnames):
+ for model_type in ('python', 'c'):
+ if model_type == 'python':
+ model = make_bem_model('sample', conductivity=cond, ico=2,
+ subjects_dir=subjects_dir)
+ else:
+ model = fname_bem_1 if len(cond) == 1 else fname_bem_3
+ solution = make_bem_solution(model)
+ solution_c = read_bem_solution(fname)
+ _compare_bem_solutions(solution, solution_c)
+ write_bem_solution(fname_temp, solution)
+ solution_read = read_bem_solution(fname_temp)
+ _compare_bem_solutions(solution, solution_c)
+ _compare_bem_solutions(solution_read, solution_c)
def test_fit_sphere_to_headshape():
- """ Test fitting a sphere to digitization points. """
+ """Test fitting a sphere to digitization points"""
# Create points of various kinds
dig = [
# Left auricular
@@ -89,3 +239,6 @@ def test_fit_sphere_to_headshape():
assert_almost_equal(r / 1000, 1.0, decimal=2)
assert_almost_equal(oh / 1000, [0.0, 0.0, 0.0], decimal=2)
assert_almost_equal(od / 1000, [0.0, 0.0, 0.0], decimal=2)
+
+
+run_tests_if_main()
diff --git a/mne/tests/test_dipole.py b/mne/tests/test_dipole.py
index d93e38074..1ee82e682 100644
--- a/mne/tests/test_dipole.py
+++ b/mne/tests/test_dipole.py
@@ -17,7 +17,8 @@ from mne.proj import make_eeg_average_ref_proj
from mne.io import Raw
-from mne.surface import _bem_find_surface, _compute_nearest, read_bem_solution
+from mne.surface import _compute_nearest
+from mne.bem import _bem_find_surface, read_bem_solution
from mne.transforms import (read_trans, apply_trans, _get_mri_head_t)
warnings.simplefilter('always')
diff --git a/mne/tests/test_docstring_parameters.py b/mne/tests/test_docstring_parameters.py
index a3c50cf65..8cb83466d 100644
--- a/mne/tests/test_docstring_parameters.py
+++ b/mne/tests/test_docstring_parameters.py
@@ -59,7 +59,8 @@ def get_name(func):
_deprecation_ignores = [
'mne.io.write', # always ignore these
'mne.fixes._in1d', # fix function
- 'mne.utils.plot_epochs_trellis'
+ 'mne.utils.plot_epochs_trellis', # deprecated
+ 'mne.utils.write_bem_surface', # deprecated
]
diff --git a/mne/tests/test_surface.py b/mne/tests/test_surface.py
index f03d1b855..a7e0c1db5 100644
--- a/mne/tests/test_surface.py
+++ b/mne/tests/test_surface.py
@@ -6,12 +6,10 @@ import warnings
from shutil import copyfile
from scipy import sparse
from nose.tools import assert_true, assert_raises
-from numpy.testing import (assert_array_equal, assert_array_almost_equal,
- assert_allclose, assert_equal)
+from numpy.testing import assert_array_equal, assert_allclose, assert_equal
from mne.datasets import testing
-from mne import (read_bem_surfaces, write_bem_surface, read_surface,
- write_surface, decimate_surface)
+from mne import read_surface, write_surface, decimate_surface
from mne.surface import (read_morph_map, _compute_nearest,
fast_cross_3d, get_head_surf, read_curvature,
get_meg_helmet_surf)
@@ -117,23 +115,6 @@ def test_make_morph_maps():
assert_true((mm - sparse.eye(mm.shape[0], mm.shape[0])).sum() == 0)
[email protected]_testing_data
-def test_io_bem_surfaces():
- """Test reading of bem surfaces
- """
- tempdir = _TempDir()
- surf = read_bem_surfaces(fname, patch_stats=True)
- surf = read_bem_surfaces(fname, patch_stats=False)
- print("Number of surfaces : %d" % len(surf))
-
- write_bem_surface(op.join(tempdir, 'bem_surf.fif'), surf[0])
- surf_read = read_bem_surfaces(op.join(tempdir, 'bem_surf.fif'),
- patch_stats=False)
-
- for key in surf[0].keys():
- assert_array_almost_equal(surf[0][key], surf_read[0][key])
-
-
@testing.requires_testing_data
def test_io_surface():
"""Test reading and writing of Freesurfer surface mesh files
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 11
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"numpy>=1.16.0",
"pandas>=1.0.0",
"scikit-learn",
"h5py",
"pysurfer",
"nose",
"nose-timer",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | apptools==5.2.1
certifi @ file:///croot/certifi_1671487769961/work/certifi
configobj==5.0.9
cycler==0.11.0
envisage==7.0.3
exceptiongroup==1.2.2
fonttools==4.38.0
h5py==3.8.0
importlib-metadata==6.7.0
importlib-resources==5.12.0
iniconfig==2.0.0
joblib==1.3.2
kiwisolver==1.4.5
matplotlib==3.5.3
mayavi==4.8.1
-e git+https://github.com/mne-tools/mne-python.git@6ccf41c6295760dcf36e2a1062132e5b319a4812#egg=mne
nibabel==4.0.2
nose==1.3.7
nose-timer==1.0.1
numpy==1.21.6
packaging==24.0
pandas==1.3.5
Pillow==9.5.0
pluggy==1.2.0
pyface==8.0.0
Pygments==2.17.2
pyparsing==3.1.4
pysurfer==0.11.2
pytest==7.4.4
python-dateutil==2.9.0.post0
pytz==2025.2
scikit-learn==1.0.2
scipy==1.7.3
six==1.17.0
threadpoolctl==3.1.0
tomli==2.0.1
traits==6.4.3
traitsui==8.0.0
typing_extensions==4.7.1
vtk==9.3.1
zipp==3.15.0
| name: mne-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- apptools==5.2.1
- configobj==5.0.9
- cycler==0.11.0
- envisage==7.0.3
- exceptiongroup==1.2.2
- fonttools==4.38.0
- h5py==3.8.0
- importlib-metadata==6.7.0
- importlib-resources==5.12.0
- iniconfig==2.0.0
- joblib==1.3.2
- kiwisolver==1.4.5
- matplotlib==3.5.3
- mayavi==4.8.1
- nibabel==4.0.2
- nose==1.3.7
- nose-timer==1.0.1
- numpy==1.21.6
- packaging==24.0
- pandas==1.3.5
- pillow==9.5.0
- pluggy==1.2.0
- pyface==8.0.0
- pygments==2.17.2
- pyparsing==3.1.4
- pysurfer==0.11.2
- pytest==7.4.4
- python-dateutil==2.9.0.post0
- pytz==2025.2
- scikit-learn==1.0.2
- scipy==1.7.3
- six==1.17.0
- threadpoolctl==3.1.0
- tomli==2.0.1
- traits==6.4.3
- traitsui==8.0.0
- typing-extensions==4.7.1
- vtk==9.3.1
- zipp==3.15.0
prefix: /opt/conda/envs/mne-python
| [
"mne/tests/test_bem.py::test_make_sphere_model",
"mne/tests/test_bem.py::test_fit_sphere_to_headshape",
"mne/tests/test_docstring_parameters.py::test_docstring_parameters",
"mne/tests/test_surface.py::test_helmet",
"mne/tests/test_surface.py::test_huge_cross",
"mne/tests/test_surface.py::test_compute_nearest"
]
| []
| []
| []
| BSD 3-Clause "New" or "Revised" License | 161 | [
"mne/coreg.py",
"mne/forward/_compute_forward.py",
"doc/source/python_tutorial.rst",
"mne/bem.py",
"mne/__init__.py",
"doc/source/whats_new.rst",
"mne/forward/_make_forward.py",
"mne/surface.py",
"mne/source_space.py",
"mne/dipole.py",
"doc/source/python_reference.rst"
]
| [
"mne/coreg.py",
"mne/forward/_compute_forward.py",
"doc/source/python_tutorial.rst",
"mne/bem.py",
"mne/__init__.py",
"doc/source/whats_new.rst",
"mne/forward/_make_forward.py",
"mne/surface.py",
"mne/source_space.py",
"mne/dipole.py",
"doc/source/python_reference.rst"
]
|
peterbe__premailer-128 | d2a2a4afdd06e5931545bc76b9ee940fdefdb543 | 2015-06-10 16:40:55 | d2a2a4afdd06e5931545bc76b9ee940fdefdb543 | diff --git a/README.rst b/README.rst
index 272a0b9..a3af844 100644
--- a/README.rst
+++ b/README.rst
@@ -221,7 +221,38 @@ attribute ``bgcolor="#eee"``.
Having these extra attributes basically as a "back up" for really shit
email clients that can't even take the style attributes. A lot of
professional HTML newsletters such as Amazon's use this. You can disable
-some attributes in ``disable_basic_attributes``
+some attributes in ``disable_basic_attributes``.
+
+
+Capturing logging from ``cssutils``
+-----------------------------------
+
+`cssutils <https://pypi.python.org/pypi/cssutils/>`__ is the library that
+``premailer`` uses to parse CSS. It will use the python ``logging`` module
+to mention all issues it has with parsing your CSS. If you want to capture
+this, you have to pass in ``cssutils_logging_handler`` and
+``cssutils_logging_level`` (optional). For example like this:
+
+.. code:: python
+
+ >>> import logging
+ >>> import premailer
+ >>> from io import StringIO
+ >>> mylog = StringIO()
+ >>> myhandler = logging.StreamHandler(mylog)
+ >>> p = premailer.Premailer("""
+ ... <html>
+ ... <style type="text/css">
+ ... @keyframes foo { from { opacity: 0; } to { opacity: 1; } }
+ ... </style>
+ ... <p>Hej</p>
+ ... </html>
+ ... """,
+ ... cssutils_logging_handler=myhandler,
+ ... cssutils_logging_level=logging.INFO)
+ >>> result = p.transform()
+ >>> mylog.getvalue()
+ 'CSSStylesheet: Unknown @rule found. [2:1: @keyframes]\n'
Running tests with tox
----------------------
diff --git a/premailer/premailer.py b/premailer/premailer.py
index e0ac5eb..4e5d49f 100644
--- a/premailer/premailer.py
+++ b/premailer/premailer.py
@@ -111,7 +111,9 @@ class Premailer(object):
base_path=None,
disable_basic_attributes=None,
disable_validation=False,
- cache_css_parsing=True):
+ cache_css_parsing=True,
+ cssutils_logging_handler=None,
+ cssutils_logging_level=None):
self.html = html
self.base_url = base_url
self.preserve_internal_links = preserve_internal_links
@@ -138,6 +140,11 @@ class Premailer(object):
self.disable_validation = disable_validation
self.cache_css_parsing = cache_css_parsing
+ if cssutils_logging_handler:
+ cssutils.log.addHandler(cssutils_logging_handler)
+ if cssutils_logging_level:
+ cssutils.log.setLevel(cssutils_logging_level)
+
def _parse_css_string(self, css_body, validate=True):
if self.cache_css_parsing:
return _cache_parse_css_string(css_body, validate=validate)
| Unknown property names
Property: Unknown Property name. [31:17: -ms-interpolation-mode]
Property: Unknown Property name. [4:17: mso-line-height-rule]
Property: Unknown Property name. [106:17: -webkit-text-size-adjust]
Property: Unknown Property name. [107:17: -ms-text-size-adjust]
Property: Unknown Property name. [188:21: text-rendering]
Property: Unknown Property name. [613:17: mso-text-raise]
Property: Unknown Property name. [624:17: transition]
Property: Unknown Property name.
I'm not sure what is this and what can I do about that..
Thanks for any help!
| peterbe/premailer | diff --git a/premailer/tests/test_premailer.py b/premailer/tests/test_premailer.py
index 4390a4e..dd9c07e 100644
--- a/premailer/tests/test_premailer.py
+++ b/premailer/tests/test_premailer.py
@@ -2,6 +2,7 @@ from __future__ import absolute_import, unicode_literals
import sys
import re
import unittest
+import logging
from contextlib import contextmanager
if sys.version_info >= (3, ): # As in, Python 3
from urllib.request import urlopen
@@ -2124,6 +2125,47 @@ ent:"" !important;display:block !important}
p = Premailer(html, disable_validation=True)
p.transform() # it should just work
+ def test_capture_cssutils_logging(self):
+ """you can capture all the warnings, errors etc. from cssutils
+ with your own logging. """
+ html = """<!doctype html>
+ <html>
+ <head>
+ <meta charset="UTF-8">
+ <title>Document</title>
+ <style>
+ @keyframes fadein {
+ from { opacity: 0; }
+ to { opacity: 1; }
+ }
+ </style>
+ </head>
+ <body></body>
+ </html>"""
+
+ mylog = StringIO()
+ myhandler = logging.StreamHandler(mylog)
+ p = Premailer(
+ html,
+ cssutils_logging_handler=myhandler,
+ )
+ p.transform() # it should work
+ eq_(
+ mylog.getvalue(),
+ 'CSSStylesheet: Unknown @rule found. [2:13: @keyframes]\n'
+ )
+
+ # only log errors now
+ mylog = StringIO()
+ myhandler = logging.StreamHandler(mylog)
+ p = Premailer(
+ html,
+ cssutils_logging_handler=myhandler,
+ cssutils_logging_level=logging.ERROR,
+ )
+ p.transform() # it should work
+ eq_(mylog.getvalue(), '')
+
def test_type_test(self):
"""test the correct type is returned"""
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"mock",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi @ file:///croot/certifi_1671487769961/work/certifi
cssselect==1.2.0
cssutils==2.7.1
exceptiongroup==1.2.2
importlib-metadata==6.7.0
iniconfig==2.0.0
lxml==5.3.1
mock==5.2.0
nose==1.3.7
packaging==24.0
pluggy==1.2.0
-e git+https://github.com/peterbe/premailer.git@d2a2a4afdd06e5931545bc76b9ee940fdefdb543#egg=premailer
pytest==7.4.4
tomli==2.0.1
typing_extensions==4.7.1
zipp==3.15.0
| name: premailer
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cssselect==1.2.0
- cssutils==2.7.1
- exceptiongroup==1.2.2
- importlib-metadata==6.7.0
- iniconfig==2.0.0
- lxml==5.3.1
- mock==5.2.0
- nose==1.3.7
- packaging==24.0
- pluggy==1.2.0
- pytest==7.4.4
- tomli==2.0.1
- typing-extensions==4.7.1
- zipp==3.15.0
prefix: /opt/conda/envs/premailer
| [
"premailer/tests/test_premailer.py::Tests::test_capture_cssutils_logging"
]
| []
| [
"premailer/tests/test_premailer.py::Tests::test_apple_newsletter_example",
"premailer/tests/test_premailer.py::Tests::test_base_url_fixer",
"premailer/tests/test_premailer.py::Tests::test_base_url_with_path",
"premailer/tests/test_premailer.py::Tests::test_basic_html",
"premailer/tests/test_premailer.py::Tests::test_basic_html_shortcut_function",
"premailer/tests/test_premailer.py::Tests::test_basic_html_with_pseudo_selector",
"premailer/tests/test_premailer.py::Tests::test_basic_xml",
"premailer/tests/test_premailer.py::Tests::test_broken_xml",
"premailer/tests/test_premailer.py::Tests::test_child_selector",
"premailer/tests/test_premailer.py::Tests::test_command_line_fileinput_from_argument",
"premailer/tests/test_premailer.py::Tests::test_command_line_fileinput_from_stdin",
"premailer/tests/test_premailer.py::Tests::test_command_line_preserve_style_tags",
"premailer/tests/test_premailer.py::Tests::test_comments_in_media_queries",
"premailer/tests/test_premailer.py::Tests::test_css_disable_basic_html_attributes",
"premailer/tests/test_premailer.py::Tests::test_css_text",
"premailer/tests/test_premailer.py::Tests::test_css_text_with_only_body_present",
"premailer/tests/test_premailer.py::Tests::test_css_with_html_attributes",
"premailer/tests/test_premailer.py::Tests::test_css_with_pseudoclasses_excluded",
"premailer/tests/test_premailer.py::Tests::test_css_with_pseudoclasses_included",
"premailer/tests/test_premailer.py::Tests::test_disabled_validator",
"premailer/tests/test_premailer.py::Tests::test_doctype",
"premailer/tests/test_premailer.py::Tests::test_empty_style_tag",
"premailer/tests/test_premailer.py::Tests::test_external_links",
"premailer/tests/test_premailer.py::Tests::test_external_links_unfindable",
"premailer/tests/test_premailer.py::Tests::test_external_styles_and_links",
"premailer/tests/test_premailer.py::Tests::test_external_styles_on_http",
"premailer/tests/test_premailer.py::Tests::test_external_styles_on_https",
"premailer/tests/test_premailer.py::Tests::test_external_styles_with_base_url",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_class_over_generic",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_element_over_generic",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_id_over_others",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_important_over_others",
"premailer/tests/test_premailer.py::Tests::test_fontface_selectors_with_no_selectortext",
"premailer/tests/test_premailer.py::Tests::test_ignore_some_external_stylesheets",
"premailer/tests/test_premailer.py::Tests::test_ignore_some_incorrectly",
"premailer/tests/test_premailer.py::Tests::test_ignore_some_inline_stylesheets",
"premailer/tests/test_premailer.py::Tests::test_ignore_style_elements_with_media_attribute",
"premailer/tests/test_premailer.py::Tests::test_include_star_selector",
"premailer/tests/test_premailer.py::Tests::test_inline_wins_over_external",
"premailer/tests/test_premailer.py::Tests::test_keyframe_selectors",
"premailer/tests/test_premailer.py::Tests::test_last_child",
"premailer/tests/test_premailer.py::Tests::test_last_child_exclude_pseudo",
"premailer/tests/test_premailer.py::Tests::test_leftover_important",
"premailer/tests/test_premailer.py::Tests::test_links_without_protocol",
"premailer/tests/test_premailer.py::Tests::test_load_external_url",
"premailer/tests/test_premailer.py::Tests::test_load_external_url_gzip",
"premailer/tests/test_premailer.py::Tests::test_mailto_url",
"premailer/tests/test_premailer.py::Tests::test_mediaquery",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_basic",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_non_trivial",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_with_class",
"premailer/tests/test_premailer.py::Tests::test_mixed_pseudo_selectors",
"premailer/tests/test_premailer.py::Tests::test_multiple_style_elements",
"premailer/tests/test_premailer.py::Tests::test_multithreading",
"premailer/tests/test_premailer.py::Tests::test_parse_style_rules",
"premailer/tests/test_premailer.py::Tests::test_precedence_comparison",
"premailer/tests/test_premailer.py::Tests::test_prefer_inline_to_class",
"premailer/tests/test_premailer.py::Tests::test_shortcut_function",
"premailer/tests/test_premailer.py::Tests::test_strip_important",
"premailer/tests/test_premailer.py::Tests::test_style_attribute_specificity",
"premailer/tests/test_premailer.py::Tests::test_style_block_with_external_urls",
"premailer/tests/test_premailer.py::Tests::test_turnoff_cache_works_as_expected",
"premailer/tests/test_premailer.py::Tests::test_type_test",
"premailer/tests/test_premailer.py::Tests::test_xml_cdata"
]
| []
| BSD 3-Clause "New" or "Revised" License | 163 | [
"README.rst",
"premailer/premailer.py"
]
| [
"README.rst",
"premailer/premailer.py"
]
|
|
Juniper__py-junos-eznc-386 | d850258f53611146f48db76842108beb9c4060b3 | 2015-06-11 18:43:09 | 4097eb9662e6caad8362b98e36a7ca50b122f92c | diff --git a/lib/jnpr/junos/facts/domain.py b/lib/jnpr/junos/facts/domain.py
index ed104f01..92eb03ad 100644
--- a/lib/jnpr/junos/facts/domain.py
+++ b/lib/jnpr/junos/facts/domain.py
@@ -1,4 +1,5 @@
from jnpr.junos.utils.fs import FS
+from jnpr.junos.exception import RpcError
from lxml.builder import E
@@ -11,14 +12,17 @@ def facts_domain(junos, facts):
facts['domain']
facts['fqdn']
"""
- # changes done to fix issue #332
- domain_filter_xml = E('configuration', E('system', E('domain-name')))
- domain = junos.rpc.get_config(domain_filter_xml)
- domain_name = domain.xpath('.//domain-name')
- if len(domain_name) > 0:
- facts['domain'] = domain_name[0].text
- facts['fqdn'] = facts['hostname'] + '.' + facts['domain']
- return
+
+ try:
+ domain_filter_xml = E('configuration', E('system', E('domain-name')))
+ domain = junos.rpc.get_config(domain_filter_xml)
+ domain_name = domain.xpath('.//domain-name')
+ if len(domain_name) > 0:
+ facts['domain'] = domain_name[0].text
+ facts['fqdn'] = facts['hostname'] + '.' + facts['domain']
+ return
+ except RpcError:
+ pass
fs = FS(junos)
file_content = fs.cat('/etc/resolv.conf') or fs.cat('/var/etc/resolv.conf')
| Domain fact gathering not in try block, RPCerror uncaught.
Was using 1.1.0 until this morning with ssh key authentication. After upgrade, connecting to the devices fails:
from jnpr.junos import Device
router = 'somerouter'
username = 'netconf'
path2keyfile = 'path2privatekey'
dev = Device(router, user=username, ssh_private_key_file=path2keyfile)
dev.open()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/jnpr/junos/device.py", line 494, in open
self.facts_refresh()
File "/usr/local/lib/python2.7/dist-packages/jnpr/junos/device.py", line 758, in facts_refresh
gather(self, self._facts)
File "/usr/local/lib/python2.7/dist-packages/jnpr/junos/facts/domain.py", line 16, in facts_domain
domain = junos.rpc.get_config(domain_filter_xml)
File "/usr/local/lib/python2.7/dist-packages/jnpr/junos/rpcmeta.py", line 43, in get_config
return self._junos.execute(rpc)
File "/usr/local/lib/python2.7/dist-packages/jnpr/junos/decorators.py", line 71, in wrapper
return function(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/jnpr/junos/decorators.py", line 26, in wrapper
return function(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/jnpr/junos/device.py", line 569, in execute
raise e(cmd=rpc_cmd_e, rsp=rsp)
jnpr.junos.exception.PermissionError: PermissionError(severity: error, bad_element: system, message: permission denied)
rolled back to 1.1.0 to confirm, and authentication was successful. With both versions, the target router logs the following in 'messages':
Jun 11 11:43:31 sshd[1258]: Accepted publickey for netconf from x.x.x.x port 40132 ssh2
Jun 11 11:43:31 sshd[1258]: subsystem request for netconf by user netconf
I am able to login interactively with the username/key combination.
| Juniper/py-junos-eznc | diff --git a/tests/unit/facts/test_domain.py b/tests/unit/facts/test_domain.py
index 62062557..e92a3313 100644
--- a/tests/unit/facts/test_domain.py
+++ b/tests/unit/facts/test_domain.py
@@ -8,6 +8,7 @@ from lxml import etree
from jnpr.junos.facts.domain import facts_domain
from jnpr.junos import Device
+from jnpr.junos.exception import RpcError
@attr('unit')
@@ -61,3 +62,16 @@ class TestDomain(unittest.TestCase):
facts_domain(self.dev, self.facts)
self.assertEqual(self.facts['domain'], 'testing.net')
self.assertEqual(self.facts['fqdn'], 'test.testing.net')
+
+ @patch('jnpr.junos.facts.domain.FS.cat')
+ def test_domain_rpc_error(self, mock_fs_cat):
+ self.dev.rpc.get_config = MagicMock(side_effect=RpcError)
+ mock_fs_cat.return_value =\
+ """# domain juniper.net
+ search englab.juniper.net spglab.juniper.net juniper.net jnpr.net
+ nameserver 10.11.12.13
+ """
+ self.facts['hostname'] = 'test'
+ facts_domain(self.dev, self.facts)
+ self.assertEqual(self.facts['domain'], 'juniper.net')
+ self.assertEqual(self.facts['fqdn'], 'test.juniper.net')
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"coverage",
"mock",
"nose",
"pep8",
"pyflakes",
"coveralls",
"ntc_templates",
"cryptography==3.2",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | bcrypt==4.2.1
certifi @ file:///croot/certifi_1671487769961/work/certifi
cffi==1.15.1
charset-normalizer==3.4.1
coverage==6.5.0
coveralls==3.3.1
cryptography==44.0.2
docopt==0.6.2
exceptiongroup==1.2.2
future==1.0.0
idna==3.10
importlib-metadata==6.7.0
iniconfig==2.0.0
Jinja2==3.1.6
-e git+https://github.com/Juniper/py-junos-eznc.git@d850258f53611146f48db76842108beb9c4060b3#egg=junos_eznc
lxml==5.3.1
MarkupSafe==2.1.5
mock==5.2.0
ncclient==0.6.19
netaddr==1.3.0
nose==1.3.7
ntc_templates==4.0.1
packaging==24.0
paramiko==3.5.1
pep8==1.7.1
pluggy==1.2.0
pycparser==2.21
pyflakes==3.0.1
PyNaCl==1.5.0
pytest==7.4.4
PyYAML==6.0.1
requests==2.31.0
scp==0.15.0
six==1.17.0
textfsm==1.1.3
tomli==2.0.1
typing_extensions==4.7.1
urllib3==2.0.7
zipp==3.15.0
| name: py-junos-eznc
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- bcrypt==4.2.1
- cffi==1.15.1
- charset-normalizer==3.4.1
- coverage==6.5.0
- coveralls==3.3.1
- cryptography==44.0.2
- docopt==0.6.2
- exceptiongroup==1.2.2
- future==1.0.0
- idna==3.10
- importlib-metadata==6.7.0
- iniconfig==2.0.0
- jinja2==3.1.6
- lxml==5.3.1
- markupsafe==2.1.5
- mock==5.2.0
- ncclient==0.6.19
- netaddr==1.3.0
- nose==1.3.7
- ntc-templates==4.0.1
- packaging==24.0
- paramiko==3.5.1
- pep8==1.7.1
- pluggy==1.2.0
- pycparser==2.21
- pyflakes==3.0.1
- pynacl==1.5.0
- pytest==7.4.4
- pyyaml==6.0.1
- requests==2.31.0
- scp==0.15.0
- six==1.17.0
- textfsm==1.1.3
- tomli==2.0.1
- typing-extensions==4.7.1
- urllib3==2.0.7
- zipp==3.15.0
prefix: /opt/conda/envs/py-junos-eznc
| [
"tests/unit/facts/test_domain.py::TestDomain::test_domain_rpc_error"
]
| []
| [
"tests/unit/facts/test_domain.py::TestDomain::test_domain_in_configuration",
"tests/unit/facts/test_domain.py::TestDomain::test_resolv_conf",
"tests/unit/facts/test_domain.py::TestDomain::test_resolv_conf_file_absent_under_etc",
"tests/unit/facts/test_domain.py::TestDomain::test_resolv_conf_no_domain"
]
| []
| Apache License 2.0 | 164 | [
"lib/jnpr/junos/facts/domain.py"
]
| [
"lib/jnpr/junos/facts/domain.py"
]
|
|
pre-commit__pre-commit-hooks-66 | 6077f2fefb542fa4ffd3e2f305d1986645110645 | 2015-06-11 20:44:35 | f82fb149af2c1b552b50e3e38e38ed3a44d4cda1 | diff --git a/pre_commit_hooks/detect_private_key.py b/pre_commit_hooks/detect_private_key.py
index 215ad56..782b680 100644
--- a/pre_commit_hooks/detect_private_key.py
+++ b/pre_commit_hooks/detect_private_key.py
@@ -1,7 +1,6 @@
from __future__ import print_function
import argparse
-import io
import sys
@@ -13,12 +12,11 @@ def detect_private_key(argv=None):
private_key_files = []
for filename in args.filenames:
- with io.open(filename, 'r') as f:
- content = f.read()
- if 'BEGIN RSA PRIVATE KEY' in content:
- private_key_files.append(content)
- if 'BEGIN DSA PRIVATE KEY' in content:
- private_key_files.append(content)
+ content = open(filename, 'rb').read()
+ if b'BEGIN RSA PRIVATE KEY' in content:
+ private_key_files.append(content)
+ if b'BEGIN DSA PRIVATE KEY' in content:
+ private_key_files.append(content)
if private_key_files:
for private_key_file in private_key_files:
| detect-private-key hook appears to crash with images in the directory
Just added some images and a resources.qrc file to my PyQt4 project and got this error out of it. Is it because it tries to decode images as text?
```
Detect Private Key.......................................................Failed
hookid: detect-private-key
Traceback (most recent call last):
File "C:\cygwin64\home\builder\.pre-commit\repo7jrxo4\py_env-default\Scripts\detect-private-key-script.py", line 9, in \<module\>
load_entry_point('pre-commit-hooks==0.4.2', 'console_scripts', 'detect-private-key')()
File "C:\cygwin64\home\builder\.pre-commit\repo7jrxo4\py_env-default\lib\site-packages\pre_commit_hooks\detect_private_key.py", line 17, in detect_private_key
content = f.read()
File "C:\cygwin64\home\builder\.pre-commit\repo7jrxo4\py_env-default\lib\encodings\cp1252.py", line 23, in decode
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
UnicodeDecodeError: 'charmap' codec can't decode byte 0x8f in position 44: character maps to \<undefined\>
``` | pre-commit/pre-commit-hooks | diff --git a/tests/detect_private_key_test.py b/tests/detect_private_key_test.py
index 6d2e627..c912624 100644
--- a/tests/detect_private_key_test.py
+++ b/tests/detect_private_key_test.py
@@ -10,6 +10,8 @@ TESTS = (
(b'-----BEGIN DSA PRIVATE KEY-----', 1),
(b'ssh-rsa DATA', 0),
(b'ssh-dsa DATA', 0),
+ # Some arbitrary binary data
+ (b'\xa2\xf1\x93\x12', 0),
)
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | autopep8==2.3.2
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
flake8==7.2.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mccabe==0.7.0
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
-e git+https://github.com/pre-commit/pre-commit-hooks.git@6077f2fefb542fa4ffd3e2f305d1986645110645#egg=pre_commit_hooks
pycodestyle==2.13.0
pyflakes==3.3.1
pytest @ file:///croot/pytest_1738938843180/work
PyYAML==6.0.2
simplejson==3.20.1
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: pre-commit-hooks
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- argparse==1.4.0
- autopep8==2.3.2
- flake8==7.2.0
- mccabe==0.7.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pyyaml==6.0.2
- simplejson==3.20.1
prefix: /opt/conda/envs/pre-commit-hooks
| [
"tests/detect_private_key_test.py::test_detect_private_key[\\xa2\\xf1\\x93\\x12-0]"
]
| []
| [
"tests/detect_private_key_test.py::test_detect_private_key[-----BEGIN",
"tests/detect_private_key_test.py::test_detect_private_key[ssh-rsa",
"tests/detect_private_key_test.py::test_detect_private_key[ssh-dsa"
]
| []
| MIT License | 165 | [
"pre_commit_hooks/detect_private_key.py"
]
| [
"pre_commit_hooks/detect_private_key.py"
]
|
|
scieloorg__xylose-77 | 35ee660d381ec682445b2014aadc4b1fa96e414a | 2015-06-12 18:46:22 | c0be8f42edd0a64900280c871c76b856c2a191f7 | diff --git a/setup.py b/setup.py
index 0193a00..bb78b72 100755
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,7 @@ except ImportError:
setup(
name="xylose",
- version='0.10b',
+ version='0.11b',
description="A SciELO library to abstract a JSON data structure that is a product of the ISIS2JSON conversion using the ISIS2JSON type 3 data model.",
author="SciELO",
author_email="[email protected]",
diff --git a/xylose/scielodocument.py b/xylose/scielodocument.py
index 5e7c073..59a6a86 100644
--- a/xylose/scielodocument.py
+++ b/xylose/scielodocument.py
@@ -25,8 +25,8 @@ else:
html_parser = unescape
# --------------
-LICENSE_REGEX = re.compile(r'a.+href="(.+)"')
-LICENSE_CREATIVE_COMMONS = re.compile(r'licenses/(.*?)/.') # Extracts the creative commons id from the url.
+LICENSE_REGEX = re.compile(r'a.+?href="(.+?)"')
+LICENSE_CREATIVE_COMMONS = re.compile(r'licenses/(.*?/\d\.\d)') # Extracts the creative commons id from the url.
DOI_REGEX = re.compile(r'\d{2}\.\d+/.*$')
def html_decode(string):
@@ -90,6 +90,7 @@ class Journal(object):
for dlicense in self.data['v540']:
if not 't' in dlicense:
continue
+
license_url = LICENSE_REGEX.findall(dlicense['t'])
if len(license_url) == 0:
continue
@@ -400,6 +401,7 @@ class Article(object):
for dlicense in self.data['v540']:
if not 't' in dlicense:
continue
+
license_url = LICENSE_REGEX.findall(dlicense['t'])
if len(license_url) == 0:
continue
| Ajustar identificação de licença
A identificação de licença coleta atualmente apenas o tipo da licença, ex:
by, by-nc.....
É importante também considerar a versão ex:
by/3.0
by/4.0
by-nc/3.0 | scieloorg/xylose | diff --git a/tests/test_document.py b/tests/test_document.py
index 745859f..db84c57 100644
--- a/tests/test_document.py
+++ b/tests/test_document.py
@@ -351,13 +351,13 @@ class JournalTests(unittest.TestCase):
journal = Journal(self.fulldoc['title'])
- self.assertEqual(journal.permissions['id'], 'by')
+ self.assertEqual(journal.permissions['id'], 'by/3.0')
def test_permission_id(self):
journal = Journal(self.fulldoc['title'])
- self.assertEqual(journal.permissions['id'], 'by-nc')
+ self.assertEqual(journal.permissions['id'], 'by-nc/3.0')
def test_permission_url(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 2
} | 0.10 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "",
"pip_packages": [
"nose",
"coverage",
"mocker",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
iniconfig==2.1.0
mocker==1.1.1
nose==1.3.7
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
tomli==2.2.1
-e git+https://github.com/scieloorg/xylose.git@35ee660d381ec682445b2014aadc4b1fa96e414a#egg=xylose
| name: xylose
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- mocker==1.1.1
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/xylose
| [
"tests/test_document.py::JournalTests::test_permission_id",
"tests/test_document.py::JournalTests::test_permission_t1"
]
| []
| [
"tests/test_document.py::ToolsTests::test_get_language_iso639_1_defined",
"tests/test_document.py::ToolsTests::test_get_language_iso639_1_undefined",
"tests/test_document.py::ToolsTests::test_get_language_iso639_2_defined",
"tests/test_document.py::ToolsTests::test_get_language_iso639_2_undefined",
"tests/test_document.py::ToolsTests::test_get_language_without_iso_format",
"tests/test_document.py::ToolsTests::test_get_publication_date_wrong_day",
"tests/test_document.py::ToolsTests::test_get_publication_date_wrong_day_month",
"tests/test_document.py::ToolsTests::test_get_publication_date_wrong_day_month_not_int",
"tests/test_document.py::ToolsTests::test_get_publication_date_wrong_day_not_int",
"tests/test_document.py::ToolsTests::test_get_publication_date_wrong_month_not_int",
"tests/test_document.py::ToolsTests::test_get_publication_date_year",
"tests/test_document.py::ToolsTests::test_get_publication_date_year_day",
"tests/test_document.py::ToolsTests::test_get_publication_date_year_month",
"tests/test_document.py::ToolsTests::test_get_publication_date_year_month_day",
"tests/test_document.py::JournalTests::test_any_issn_priority_electronic",
"tests/test_document.py::JournalTests::test_any_issn_priority_electronic_without_electronic",
"tests/test_document.py::JournalTests::test_any_issn_priority_print",
"tests/test_document.py::JournalTests::test_any_issn_priority_print_without_print",
"tests/test_document.py::JournalTests::test_collection_acronym",
"tests/test_document.py::JournalTests::test_creation_date",
"tests/test_document.py::JournalTests::test_current_status",
"tests/test_document.py::JournalTests::test_current_status_lots_of_changes_study_case_1",
"tests/test_document.py::JournalTests::test_current_status_some_changes",
"tests/test_document.py::JournalTests::test_current_without_v51",
"tests/test_document.py::JournalTests::test_journal",
"tests/test_document.py::JournalTests::test_journal_abbreviated_title",
"tests/test_document.py::JournalTests::test_journal_acronym",
"tests/test_document.py::JournalTests::test_journal_title",
"tests/test_document.py::JournalTests::test_journal_title_nlm",
"tests/test_document.py::JournalTests::test_journal_url",
"tests/test_document.py::JournalTests::test_languages",
"tests/test_document.py::JournalTests::test_languages_without_v350",
"tests/test_document.py::JournalTests::test_load_issn_with_v935_and_v35_ONLINE",
"tests/test_document.py::JournalTests::test_load_issn_with_v935_and_v35_PRINT",
"tests/test_document.py::JournalTests::test_load_issn_with_v935_equal_v400_and_v35_ONLINE",
"tests/test_document.py::JournalTests::test_load_issn_with_v935_equal_v400_and_v35_PRINT",
"tests/test_document.py::JournalTests::test_load_issn_with_v935_without_v35",
"tests/test_document.py::JournalTests::test_load_issn_without_v935_and_v35_ONLINE",
"tests/test_document.py::JournalTests::test_load_issn_without_v935_and_v35_PRINT",
"tests/test_document.py::JournalTests::test_load_issn_without_v935_without_v35",
"tests/test_document.py::JournalTests::test_permission_text",
"tests/test_document.py::JournalTests::test_permission_url",
"tests/test_document.py::JournalTests::test_permission_without_v540",
"tests/test_document.py::JournalTests::test_permission_without_v540_t",
"tests/test_document.py::JournalTests::test_publisher_loc",
"tests/test_document.py::JournalTests::test_publisher_name",
"tests/test_document.py::JournalTests::test_scielo_issn",
"tests/test_document.py::JournalTests::test_status",
"tests/test_document.py::JournalTests::test_status_lots_of_changes",
"tests/test_document.py::JournalTests::test_status_lots_of_changes_study_case_1",
"tests/test_document.py::JournalTests::test_status_some_changes",
"tests/test_document.py::JournalTests::test_status_without_v51",
"tests/test_document.py::JournalTests::test_subject_areas",
"tests/test_document.py::JournalTests::test_update_date",
"tests/test_document.py::JournalTests::test_without_journal_abbreviated_title",
"tests/test_document.py::JournalTests::test_without_journal_acronym",
"tests/test_document.py::JournalTests::test_without_journal_title",
"tests/test_document.py::JournalTests::test_without_journal_title_nlm",
"tests/test_document.py::JournalTests::test_without_journal_url",
"tests/test_document.py::JournalTests::test_without_publisher_loc",
"tests/test_document.py::JournalTests::test_without_publisher_name",
"tests/test_document.py::JournalTests::test_without_scielo_domain",
"tests/test_document.py::JournalTests::test_without_scielo_domain_title_v690",
"tests/test_document.py::JournalTests::test_without_subject_areas",
"tests/test_document.py::JournalTests::test_without_wos_citation_indexes",
"tests/test_document.py::JournalTests::test_without_wos_subject_areas",
"tests/test_document.py::JournalTests::test_wos_citation_indexes",
"tests/test_document.py::JournalTests::test_wos_subject_areas",
"tests/test_document.py::ArticleTests::test_acceptance_date",
"tests/test_document.py::ArticleTests::test_affiliation_just_with_affiliation_name",
"tests/test_document.py::ArticleTests::test_affiliation_without_affiliation_name",
"tests/test_document.py::ArticleTests::test_affiliations",
"tests/test_document.py::ArticleTests::test_ahead_publication_date",
"tests/test_document.py::ArticleTests::test_article",
"tests/test_document.py::ArticleTests::test_author_with_two_affiliations",
"tests/test_document.py::ArticleTests::test_author_with_two_role",
"tests/test_document.py::ArticleTests::test_author_without_affiliations",
"tests/test_document.py::ArticleTests::test_author_without_surname_and_given_names",
"tests/test_document.py::ArticleTests::test_authors",
"tests/test_document.py::ArticleTests::test_collection_acronym",
"tests/test_document.py::ArticleTests::test_collection_acronym_priorizing_collection",
"tests/test_document.py::ArticleTests::test_collection_acronym_retrieving_v992",
"tests/test_document.py::ArticleTests::test_collection_name_brazil",
"tests/test_document.py::ArticleTests::test_collection_name_undefined",
"tests/test_document.py::ArticleTests::test_corporative_authors",
"tests/test_document.py::ArticleTests::test_document_type",
"tests/test_document.py::ArticleTests::test_doi",
"tests/test_document.py::ArticleTests::test_doi_clean_1",
"tests/test_document.py::ArticleTests::test_doi_clean_2",
"tests/test_document.py::ArticleTests::test_doi_v237",
"tests/test_document.py::ArticleTests::test_end_page_loaded_crazy_legacy_way_1",
"tests/test_document.py::ArticleTests::test_end_page_loaded_crazy_legacy_way_2",
"tests/test_document.py::ArticleTests::test_end_page_loaded_through_xml",
"tests/test_document.py::ArticleTests::test_file_code",
"tests/test_document.py::ArticleTests::test_file_code_crazy_slashs_1",
"tests/test_document.py::ArticleTests::test_file_code_crazy_slashs_2",
"tests/test_document.py::ArticleTests::test_first_author",
"tests/test_document.py::ArticleTests::test_first_author_without_author",
"tests/test_document.py::ArticleTests::test_fulltexts_field_fulltexts",
"tests/test_document.py::ArticleTests::test_fulltexts_without_field_fulltexts",
"tests/test_document.py::ArticleTests::test_html_url",
"tests/test_document.py::ArticleTests::test_invalid_document_type",
"tests/test_document.py::ArticleTests::test_issue",
"tests/test_document.py::ArticleTests::test_issue_label_field_v4",
"tests/test_document.py::ArticleTests::test_issue_label_without_field_v4",
"tests/test_document.py::ArticleTests::test_issue_url",
"tests/test_document.py::ArticleTests::test_journal_abbreviated_title",
"tests/test_document.py::ArticleTests::test_journal_acronym",
"tests/test_document.py::ArticleTests::test_journal_title",
"tests/test_document.py::ArticleTests::test_keywords",
"tests/test_document.py::ArticleTests::test_keywords_iso639_2",
"tests/test_document.py::ArticleTests::test_keywords_with_undefined_language",
"tests/test_document.py::ArticleTests::test_keywords_without_subfield_k",
"tests/test_document.py::ArticleTests::test_keywords_without_subfield_l",
"tests/test_document.py::ArticleTests::test_languages_field_fulltexts",
"tests/test_document.py::ArticleTests::test_languages_field_v40",
"tests/test_document.py::ArticleTests::test_last_page",
"tests/test_document.py::ArticleTests::test_mixed_affiliations",
"tests/test_document.py::ArticleTests::test_normalized_affiliations",
"tests/test_document.py::ArticleTests::test_normalized_affiliations_undefined_ISO_3166_CODE",
"tests/test_document.py::ArticleTests::test_normalized_affiliations_without_p",
"tests/test_document.py::ArticleTests::test_original_abstract_with_just_one_language_defined",
"tests/test_document.py::ArticleTests::test_original_abstract_with_language_defined",
"tests/test_document.py::ArticleTests::test_original_abstract_with_language_defined_but_different_of_the_article_original_language",
"tests/test_document.py::ArticleTests::test_original_abstract_without_language_defined",
"tests/test_document.py::ArticleTests::test_original_language_invalid_iso639_2",
"tests/test_document.py::ArticleTests::test_original_language_iso639_2",
"tests/test_document.py::ArticleTests::test_original_language_original",
"tests/test_document.py::ArticleTests::test_original_title_with_just_one_language_defined",
"tests/test_document.py::ArticleTests::test_original_title_with_language_defined",
"tests/test_document.py::ArticleTests::test_original_title_with_language_defined_but_different_of_the_article_original_language",
"tests/test_document.py::ArticleTests::test_original_title_without_language_defined",
"tests/test_document.py::ArticleTests::test_pdf_url",
"tests/test_document.py::ArticleTests::test_processing_date",
"tests/test_document.py::ArticleTests::test_project_name",
"tests/test_document.py::ArticleTests::test_project_sponsors",
"tests/test_document.py::ArticleTests::test_publication_contract",
"tests/test_document.py::ArticleTests::test_publication_date",
"tests/test_document.py::ArticleTests::test_publisher_id",
"tests/test_document.py::ArticleTests::test_publisher_loc",
"tests/test_document.py::ArticleTests::test_publisher_name",
"tests/test_document.py::ArticleTests::test_receive_date",
"tests/test_document.py::ArticleTests::test_review_date",
"tests/test_document.py::ArticleTests::test_start_page",
"tests/test_document.py::ArticleTests::test_start_page_loaded_crazy_legacy_way_1",
"tests/test_document.py::ArticleTests::test_start_page_loaded_crazy_legacy_way_2",
"tests/test_document.py::ArticleTests::test_start_page_loaded_through_xml",
"tests/test_document.py::ArticleTests::test_subject_areas",
"tests/test_document.py::ArticleTests::test_supplement_issue",
"tests/test_document.py::ArticleTests::test_supplement_volume",
"tests/test_document.py::ArticleTests::test_thesis_degree",
"tests/test_document.py::ArticleTests::test_thesis_organization",
"tests/test_document.py::ArticleTests::test_thesis_organization_and_division",
"tests/test_document.py::ArticleTests::test_thesis_organization_without_name",
"tests/test_document.py::ArticleTests::test_translated_abstracts",
"tests/test_document.py::ArticleTests::test_translated_abstracts_without_v83",
"tests/test_document.py::ArticleTests::test_translated_abtracts_iso639_2",
"tests/test_document.py::ArticleTests::test_translated_titles",
"tests/test_document.py::ArticleTests::test_translated_titles_iso639_2",
"tests/test_document.py::ArticleTests::test_translated_titles_without_v12",
"tests/test_document.py::ArticleTests::test_volume",
"tests/test_document.py::ArticleTests::test_whitwout_acceptance_date",
"tests/test_document.py::ArticleTests::test_whitwout_ahead_publication_date",
"tests/test_document.py::ArticleTests::test_whitwout_receive_date",
"tests/test_document.py::ArticleTests::test_whitwout_review_date",
"tests/test_document.py::ArticleTests::test_without_affiliations",
"tests/test_document.py::ArticleTests::test_without_authors",
"tests/test_document.py::ArticleTests::test_without_citations",
"tests/test_document.py::ArticleTests::test_without_collection_acronym",
"tests/test_document.py::ArticleTests::test_without_corporative_authors",
"tests/test_document.py::ArticleTests::test_without_document_type",
"tests/test_document.py::ArticleTests::test_without_doi",
"tests/test_document.py::ArticleTests::test_without_html_url",
"tests/test_document.py::ArticleTests::test_without_issue",
"tests/test_document.py::ArticleTests::test_without_issue_url",
"tests/test_document.py::ArticleTests::test_without_journal_abbreviated_title",
"tests/test_document.py::ArticleTests::test_without_journal_acronym",
"tests/test_document.py::ArticleTests::test_without_journal_title",
"tests/test_document.py::ArticleTests::test_without_keywords",
"tests/test_document.py::ArticleTests::test_without_last_page",
"tests/test_document.py::ArticleTests::test_without_normalized_affiliations",
"tests/test_document.py::ArticleTests::test_without_original_abstract",
"tests/test_document.py::ArticleTests::test_without_original_title",
"tests/test_document.py::ArticleTests::test_without_pages",
"tests/test_document.py::ArticleTests::test_without_pdf_url",
"tests/test_document.py::ArticleTests::test_without_processing_date",
"tests/test_document.py::ArticleTests::test_without_project_name",
"tests/test_document.py::ArticleTests::test_without_project_sponsor",
"tests/test_document.py::ArticleTests::test_without_publication_contract",
"tests/test_document.py::ArticleTests::test_without_publication_date",
"tests/test_document.py::ArticleTests::test_without_publisher_id",
"tests/test_document.py::ArticleTests::test_without_publisher_loc",
"tests/test_document.py::ArticleTests::test_without_publisher_name",
"tests/test_document.py::ArticleTests::test_without_scielo_domain",
"tests/test_document.py::ArticleTests::test_without_scielo_domain_article_v69",
"tests/test_document.py::ArticleTests::test_without_scielo_domain_article_v69_and_with_title_v690",
"tests/test_document.py::ArticleTests::test_without_scielo_domain_title_v690",
"tests/test_document.py::ArticleTests::test_without_start_page",
"tests/test_document.py::ArticleTests::test_without_subject_areas",
"tests/test_document.py::ArticleTests::test_without_suplement_issue",
"tests/test_document.py::ArticleTests::test_without_supplement_volume",
"tests/test_document.py::ArticleTests::test_without_thesis_degree",
"tests/test_document.py::ArticleTests::test_without_thesis_organization",
"tests/test_document.py::ArticleTests::test_without_volume",
"tests/test_document.py::ArticleTests::test_without_wos_citation_indexes",
"tests/test_document.py::ArticleTests::test_without_wos_subject_areas",
"tests/test_document.py::ArticleTests::test_wos_citation_indexes",
"tests/test_document.py::ArticleTests::test_wos_subject_areas",
"tests/test_document.py::CitationTest::test_a_link_access_date",
"tests/test_document.py::CitationTest::test_analytic_institution_for_a_article_citation",
"tests/test_document.py::CitationTest::test_analytic_institution_for_a_book_citation",
"tests/test_document.py::CitationTest::test_article_title",
"tests/test_document.py::CitationTest::test_article_without_title",
"tests/test_document.py::CitationTest::test_authors_article",
"tests/test_document.py::CitationTest::test_authors_book",
"tests/test_document.py::CitationTest::test_authors_link",
"tests/test_document.py::CitationTest::test_authors_thesis",
"tests/test_document.py::CitationTest::test_book_chapter_title",
"tests/test_document.py::CitationTest::test_book_edition",
"tests/test_document.py::CitationTest::test_book_volume",
"tests/test_document.py::CitationTest::test_book_without_chapter_title",
"tests/test_document.py::CitationTest::test_citation_sample_congress",
"tests/test_document.py::CitationTest::test_citation_sample_link",
"tests/test_document.py::CitationTest::test_citation_sample_link_without_comment",
"tests/test_document.py::CitationTest::test_conference_edition",
"tests/test_document.py::CitationTest::test_conference_name",
"tests/test_document.py::CitationTest::test_conference_sponsor",
"tests/test_document.py::CitationTest::test_conference_without_name",
"tests/test_document.py::CitationTest::test_conference_without_sponsor",
"tests/test_document.py::CitationTest::test_date",
"tests/test_document.py::CitationTest::test_doi",
"tests/test_document.py::CitationTest::test_editor",
"tests/test_document.py::CitationTest::test_end_page_14",
"tests/test_document.py::CitationTest::test_end_page_514",
"tests/test_document.py::CitationTest::test_end_page_withdout_data",
"tests/test_document.py::CitationTest::test_first_author_article",
"tests/test_document.py::CitationTest::test_first_author_book",
"tests/test_document.py::CitationTest::test_first_author_link",
"tests/test_document.py::CitationTest::test_first_author_thesis",
"tests/test_document.py::CitationTest::test_first_author_without_monographic_authors",
"tests/test_document.py::CitationTest::test_first_author_without_monographic_authors_but_not_a_book_citation",
"tests/test_document.py::CitationTest::test_index_number",
"tests/test_document.py::CitationTest::test_institutions_all_fields",
"tests/test_document.py::CitationTest::test_institutions_v11",
"tests/test_document.py::CitationTest::test_institutions_v17",
"tests/test_document.py::CitationTest::test_institutions_v29",
"tests/test_document.py::CitationTest::test_institutions_v50",
"tests/test_document.py::CitationTest::test_institutions_v58",
"tests/test_document.py::CitationTest::test_invalid_edition",
"tests/test_document.py::CitationTest::test_isbn",
"tests/test_document.py::CitationTest::test_isbn_but_not_a_book",
"tests/test_document.py::CitationTest::test_issn",
"tests/test_document.py::CitationTest::test_issn_but_not_an_article",
"tests/test_document.py::CitationTest::test_issue_part",
"tests/test_document.py::CitationTest::test_issue_title",
"tests/test_document.py::CitationTest::test_journal_issue",
"tests/test_document.py::CitationTest::test_journal_volume",
"tests/test_document.py::CitationTest::test_link",
"tests/test_document.py::CitationTest::test_link_title",
"tests/test_document.py::CitationTest::test_link_without_title",
"tests/test_document.py::CitationTest::test_monographic_authors",
"tests/test_document.py::CitationTest::test_monographic_first_author",
"tests/test_document.py::CitationTest::test_pages_14",
"tests/test_document.py::CitationTest::test_pages_514",
"tests/test_document.py::CitationTest::test_pages_withdout_data",
"tests/test_document.py::CitationTest::test_publication_type_article",
"tests/test_document.py::CitationTest::test_publication_type_book",
"tests/test_document.py::CitationTest::test_publication_type_conference",
"tests/test_document.py::CitationTest::test_publication_type_link",
"tests/test_document.py::CitationTest::test_publication_type_thesis",
"tests/test_document.py::CitationTest::test_publication_type_undefined",
"tests/test_document.py::CitationTest::test_publisher",
"tests/test_document.py::CitationTest::test_publisher_address",
"tests/test_document.py::CitationTest::test_publisher_address_without_e",
"tests/test_document.py::CitationTest::test_series_book",
"tests/test_document.py::CitationTest::test_series_but_neither_journal_book_or_conference_citation",
"tests/test_document.py::CitationTest::test_series_conference",
"tests/test_document.py::CitationTest::test_series_journal",
"tests/test_document.py::CitationTest::test_source_book_title",
"tests/test_document.py::CitationTest::test_source_journal",
"tests/test_document.py::CitationTest::test_source_journal_without_journal_title",
"tests/test_document.py::CitationTest::test_sponsor",
"tests/test_document.py::CitationTest::test_start_page_14",
"tests/test_document.py::CitationTest::test_start_page_514",
"tests/test_document.py::CitationTest::test_start_page_withdout_data",
"tests/test_document.py::CitationTest::test_thesis_institution",
"tests/test_document.py::CitationTest::test_thesis_title",
"tests/test_document.py::CitationTest::test_thesis_without_title",
"tests/test_document.py::CitationTest::test_title_when_article_citation",
"tests/test_document.py::CitationTest::test_title_when_conference_citation",
"tests/test_document.py::CitationTest::test_title_when_link_citation",
"tests/test_document.py::CitationTest::test_title_when_thesis_citation",
"tests/test_document.py::CitationTest::test_with_volume_but_not_a_journal_article_neither_a_book",
"tests/test_document.py::CitationTest::test_without_analytic_institution",
"tests/test_document.py::CitationTest::test_without_authors",
"tests/test_document.py::CitationTest::test_without_date",
"tests/test_document.py::CitationTest::test_without_doi",
"tests/test_document.py::CitationTest::test_without_edition",
"tests/test_document.py::CitationTest::test_without_editor",
"tests/test_document.py::CitationTest::test_without_first_author",
"tests/test_document.py::CitationTest::test_without_index_number",
"tests/test_document.py::CitationTest::test_without_institutions",
"tests/test_document.py::CitationTest::test_without_issue",
"tests/test_document.py::CitationTest::test_without_issue_part",
"tests/test_document.py::CitationTest::test_without_issue_title",
"tests/test_document.py::CitationTest::test_without_link",
"tests/test_document.py::CitationTest::test_without_monographic_authors",
"tests/test_document.py::CitationTest::test_without_monographic_authors_but_not_a_book_citation",
"tests/test_document.py::CitationTest::test_without_publisher",
"tests/test_document.py::CitationTest::test_without_publisher_address",
"tests/test_document.py::CitationTest::test_without_series",
"tests/test_document.py::CitationTest::test_without_sponsor",
"tests/test_document.py::CitationTest::test_without_thesis_institution",
"tests/test_document.py::CitationTest::test_without_volume"
]
| []
| BSD 2-Clause "Simplified" License | 166 | [
"setup.py",
"xylose/scielodocument.py"
]
| [
"setup.py",
"xylose/scielodocument.py"
]
|
|
praw-dev__praw-426 | eb91d191eb09d94df144589ba6561f387a3a2922 | 2015-06-15 14:08:27 | eb91d191eb09d94df144589ba6561f387a3a2922 | diff --git a/praw/objects.py b/praw/objects.py
index df96fbf1..f4014f10 100755
--- a/praw/objects.py
+++ b/praw/objects.py
@@ -75,7 +75,7 @@ class RedditContentObject(object):
def __getattr__(self, attr):
"""Return the value of the `attr` attribute."""
- if not self.has_fetched:
+ if attr != '__setstate__' and not self.has_fetched:
self.has_fetched = self._populate(None, True)
return getattr(self, attr)
raise AttributeError('\'%s\' has no attribute \'%s\'' % (type(self),
| Can't unpickle Comment objects (maximum recursion depth exceeded)
Here's the stack trace:
```pytb
>>> import pickle
>>> import praw
>>> r = praw.Reddit('test')
>>> comment = r.get_info(thing_id='t1_cs6woop')
>>> pickled = pickle.dumps(comment)
>>> pickle.loads(pickled)
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/usr/lib/python2.7/pickle.py", line 1382, in loads
return Unpickler(file).load()
File "/usr/lib/python2.7/pickle.py", line 858, in load
dispatch[key](self)
File "/usr/lib/python2.7/pickle.py", line 1215, in load_build
setstate = getattr(inst, "__setstate__", None)
File "/home/___/.virtualenvs/___/local/lib/python2.7/site-packages/praw/objects.py", line 82, in __getattr__
if not self.has_fetched:
File "/home/___/.virtualenvs/___/local/lib/python2.7/site-packages/praw/objects.py", line 82, in __getattr__
if not self.has_fetched:
File "/home/___/.virtualenvs/___/local/lib/python2.7/site-packages/praw/objects.py", line 82, in __getattr__
if not self.has_fetched:
...
RuntimeError: maximum recursion depth exceeded
``` | praw-dev/praw | diff --git a/tests/cassettes/test_unpickle_comment.json b/tests/cassettes/test_unpickle_comment.json
new file mode 100644
index 00000000..ce5e4d79
--- /dev/null
+++ b/tests/cassettes/test_unpickle_comment.json
@@ -0,0 +1,1 @@
+{"recorded_with": "betamax/0.4.2", "http_interactions": [{"recorded_at": "2015-06-15T13:50:36", "response": {"status": {"code": 200, "message": "OK"}, "headers": {"transfer-encoding": ["chunked"], "cache-control": ["private, no-cache", "no-cache"], "server": ["cloudflare-nginx"], "set-cookie": ["__cfduid=d28895e21c18fe156bbe302205f9b22ac1434376236; expires=Tue, 14-Jun-16 13:50:36 GMT; path=/; domain=.reddit.com; HttpOnly", "secure_session=; Domain=reddit.com; Max-Age=-1434376237; Path=/; expires=Thu, 01-Jan-1970 00:00:01 GMT; HttpOnly", "reddit_session=7302867%2C2015-06-15T06%3A50%3A37%2Cc0e114b1f45a1bc763984b34c83d31df49a92fed; Domain=reddit.com; Path=/; HttpOnly"], "x-moose": ["majestic"], "x-xss-protection": ["1; mode=block"], "connection": ["keep-alive"], "content-encoding": ["gzip"], "pragma": ["no-cache"], "x-ua-compatible": ["IE=edge"], "x-frame-options": ["SAMEORIGIN"], "cf-ray": ["1f6ebeb510a504a3-CDG"], "date": ["Mon, 15 Jun 2015 13:50:37 GMT"], "content-type": ["application/json; charset=UTF-8"], "x-content-type-options": ["nosniff"]}, "body": {"encoding": "UTF-8", "base64_string": "H4sIAAAAAAAAAxzLTWrDMBBA4auIWSug0c+MpXNkV0oZWaM6TRMV26GL4LuXdPs+3hO+tnGHYp6g6zrWDYp5e7cGmuzyn++q7WPZ958Xdfne1Bq4jbbItkAxcPt8CD+uv3zBPElGVidEnZPXWmtLVSmRsnOkVTwqR7AG5jGuF339HJyfiK13mE6OTpjOjkpyJbCdnSLGij0mwTozhTzFGuI8hRaw9Zgl+64NjuP4AwAA//8DABH3aj7KAAAA"}, "url": "https://api.reddit.com/api/login/.json"}, "request": {"headers": {"Accept-Encoding": ["gzip, deflate"], "Connection": ["keep-alive"], "Accept": ["*/*"], "Content-Length": ["45"], "User-Agent": ["PRAW_test_suite PRAW/3.0a1 Python/2.7.8 Linux-3.16.0-37-generic-x86_64-with-Ubuntu-14.10-utopic"], "Content-Type": ["application/x-www-form-urlencoded"]}, "body": {"encoding": "utf-8", "string": "passwd=1111&api_type=json&user=PyAPITestUser2"}, "uri": "https://api.reddit.com/api/login/.json", "method": "POST"}}, {"recorded_at": "2015-06-15T13:50:37", "response": {"status": {"code": 200, "message": "OK"}, "headers": {"x-ratelimit-remaining": ["299"], "content-type": ["application/json; charset=UTF-8"], "cache-control": ["private, no-cache", "no-cache"], "content-length": ["2574"], "server": ["cloudflare-nginx"], "x-content-type-options": ["nosniff"], "x-moose": ["majestic"], "x-ratelimit-used": ["1"], "x-frame-options": ["SAMEORIGIN"], "x-sup-id": ["http://www.reddit.com/sup.json#e4356386a7"], "x-xss-protection": ["1; mode=block"], "connection": ["keep-alive"], "content-encoding": ["gzip"], "pragma": ["no-cache"], "x-ua-compatible": ["IE=edge"], "x-reddit-tracking": ["https://pixel.redditmedia.com/pixel/of_destiny.png?v=HcLLGdCwZSCTj%2BYMBgG8Ln0%2FKHA5a%2FIoJVNJ%2Fv%2FXrv6TSUaosRpQ67%2Fyfcg6Z7i5Diz8FZevtuCHzUmagS6TUQ7G47Go4bcv"], "cf-ray": ["1f6ebeb9f0cd04a3-CDG"], "date": ["Mon, 15 Jun 2015 13:50:37 GMT"], "x-ratelimit-reset": ["563"], "vary": ["accept-encoding"]}, "body": {"encoding": "UTF-8", "base64_string": "H4sIAC3YflUC/+2cW2/bRhqG/4rgi6KLzcRzPqQoFotFgV1gL3rRYi/qhTAnRox1ikT5FPS/d2ZIKpTkOpJFUrLrG8cmafKd+b734Ts0oy8X1/nUXXwYXPw3Xxb59OPFu8GF04UOm75cTGZupJejuNsvVlO4yPESeWSVdAR7yz3VAmVQQwup4BZxCWVmtDeCIB7PZEf52C38NJzhty/rSxVo4yrLlVl45/JimJe72RAXbJZOMM6n18MiL8Y+7vlffp0PfpoWi/vBbDq4WkFoSfyqbDzW6OnUu6G5D4dOV+Nx2LTwk9mNHg8XXi9nUUW1PZ22uhoZEirvaLG+nF4Vo9ki7vv5/p8//+cXvyx+XfoFLg+49suwq1isfDr/fJynDRdx7yocFq41ny2KuO23/4dtS33j44UyPV7GXymvaj8vPc4f4i99DFOUjoDhB70IU7b5C+X5qiGsT/uUyLkOU76eTTQMF3P5tY67lna2iFOJ4inm88XsZmvG7CxMb9i6WOZ6nBdxT9RlZi5+e/HrNP+88oM47vsPA7mYf75Wo5vra2g4xYwb5KEWkmkBCUYs9IOiOOPcWcKQIDpp8KHUGyMsxzLMxjpfDO1yObRjvUyTeneXOmV2mwZeCxmOisk47v5uXPzg8ptBOv7Hq4uJu7r47mPxQ9w+j98cKzee6DKd6Wqavg9Xiz+lqazbNiqpGljP82ERSrGe6uEody71fz3YqZ6kXi7rUjeBDeUtJwVRArEikKP3qSGac1P4u3i1ZhOvFmkmRkUx/3B56afvb4NH5mGK9fvZ4uNl0yP/yN2PeHFLb8yDcvcCU5tB7Aj3yhAPkUDOkEwo5IThGRQ0k07wZNVK3XBV2FqhVLRSuJrH2sSGCrjY7v7patLYFA93JWdW+XKUxhsH8/vv7wZveNjGA3/peLCf/Kf5IpwCca0QM8QST53KoJRUU8yJ8t6K4DlOENMYKX81vZr+/adQ68EvsdXfnRAWh4tvwqLERfrh63h6oEnZM7s0IfDcaVIpfKNJVzRJQ+yaJs3xtQQTO5tMwhU+DDxC5AGtGJ4xawiTWglFJeXaK42QQ8YjgsL9HGbQKOY9UqcByBGC+yBEGcx3CIHluROiUvhGiE4IUUeAl0SIf4fBDJaziR+sprmdOf9h8H0sgAsHpX/d355IFFVfQaR2O79VLuwh8/TZoa7/NhkQwWx3fs6JDEJUCs+HDLH/B//SdhRKrR1llGUaxAECSsMXwxAHmDHHnIfCGX8cL9C9ZqlFe+QFD4NKmOuBF+vx9cCLDUJ0Hxn2UNS189eVbDofS8aCtfAznH97e/u+lPI+JKHLxeWWrssqHy0vy8Jexo1DG80yrL0SrRKdEo3S9MnlYwwIWinnldbzZACiBlnCDLAaOkCVRUBxYgAh0EKmncBKHMsANvnUOwNocd0HA1Kb4uWtaQMCm8t86K32nmRASmYBJThUhmICFNYUUSuJ4+mqPTLhcIHdI6Iq9C4i+HOWDQchIvR1ExG1laKTopGij5o2egIRldbzRMRrjQl0nmana0S0GRO21/FGIGQyiQHVMFiQQQi0CJXSWei7zHBHTKJ3/4w4SGIPlChr/QglXlCQqLSeJyVeaZDAhU1/ee6FEtX4jqXEHtH9yYcPZbdRuuuMVkmxh8wmGE7y8GFd/y1yUAnFcx4+HESOtvIFpZXW8yRHBhFTMLMgEjHcJjQHRjoGZMawFeFm4XQyzBHkuHsY0f7JgRLwuiZHalP4cJtWyseiYzPhC0eozUQGkFChMj4kP6kMBN5bhgWFWjC4wY7e4sUBAntARFnoXUQ8602JQxAR+7qJiNpKKVsEI0UfNW30BCLO7p2Jv0K4qB4cdI2INsPFdr5XjFPuwrKfhLsMoJxJoLTMAOYaqUw5bLKkqH9GHCSxe0pUtd6lBHtBQaLSep6UeKVBAkEh+6JEPb5jKbFHtv/mEoRSInad0Sop9pB5+iVIXf9tcjBIHlmitUqO1vIFopXWN3L0SI56VfCSyLF98zYSWw8xjX9g4KE0SIfSZAx4I32GrZHEZBvw6D9f7COxc0rUtd6lBH7OG5YnokSl9TwpwSVmQkIFsIAhR9JQZ00EjktOkWWKKlIuh59NCbx88CQp7Y8S05Wa2Gk/lPg6vmMpsceN+5v5AlH4yMP/Vkmxh8xT54uv9d8gBwotztQj75+1SI6yHZrkqB0WDRb9Fe3VdNfj5GhqfSNHr+Rgn/sgR9mm45t0sWPRsfkEkWitnacEMGs9oF6HynAoAVEeQRsWxBSe6GXtAwT2gIiy0I8gottHnO0i4pwfcb5WRFSu7RoRbYaL7XyvEKTaMgbCQtyFfM8EUAwjYJVVxlvJoD3Rm1gHSeyeElWtdykhXlCQqLSeJyWQEZYo5IDkVgPKKQMmw+FuwMOdweqwENXkWEoYlR439UkJWUyT7F4oUY3vWErske2fXoKkbmNk1xmtkmIPmadfgtT13yYHJbzbP46U7bDxx5HKYdFg0V/RXk13/Sk51lrPkxyWZZAqxwCjMuQLIzjQVEGgiPYI4bBHuWPJoa7v+ycHy/sgR9mmZjRvAx1bLzEQBDmlHkBtFKASWaA1lUBCzK1h2GJ8oiecBwjsARFloXcRQTtHROjrJiJqK0UnRSNFHzVt9AQiKq3niYhXGy5g+stE14hoM1xs53tuMJUaZUBLFOiNvANGaQE0F4IbKKGxJ3oT6yCJPVCirPUuJUjnS5D2gkSl9Twp8VqDhJU9vej9dXzHUmKPbP/NJQgl+G0J0qj/NjkIx52To618sdZ6nuQghiEmlAwRMkCDMkyA1FkWH2obmyFvnTv6ESdfrHonh7ntJV+kNhWdfB6WIZZyEyDumIOACmuBzCgCIbAaw1i4cdGU/PqPFwcI7B4RVaF3EYE6X4KEvm4iorZSdFI0UvRR00ZPIKLSep6IeK3honpw0DUi2gwX2/ke+swbRRBA3AR68wwCY6UCRuMMcoQhwklR/4w4SGIPlChrvUMJprr9H+ltBola63lS4pUGCfEJJef2QolqfMdSYo9s/80lCGGPfZxLq6TYQ+bplyB1/bfJgSl6ZH7aJUdb+WKt9TzJoUmGHQ/RAmGO438k40BRG3IklJo5IS1l6XWfI8hx9zC77Z0c+bwXcsQ2LW5uJmmOjkXHZsJXFCsBRRbyHnKAYiKAMpkHmBlnEDImY6f9jO59BHaPiKrQO4ggnYeL2NdNRNRWik6KRoo+atrozxFRaz1PRLzWcNHbp2i2Fy528j0nRGhPASc4viOnGVDCGwCxDs5kmUblZzH2z4iDJPZAid1PzKyc93KCRK31PCnxOoNEuLcX6Zp9UKIe37GU2CPbf3MJEu7mfNcZrZJiD5knX4Ks679JDi4JedancB9CjpbyRUNrK+RIfZ0VPrX15iQZn5WdWx75B7y/uSWyaAAA"}, "url": "https://api.reddit.com/user/PyAPITestUser2/comments.json?sort=new&t=all"}, "request": {"headers": {"Connection": ["keep-alive"], "User-Agent": ["PRAW_test_suite PRAW/3.0a1 Python/2.7.8 Linux-3.16.0-37-generic-x86_64-with-Ubuntu-14.10-utopic"], "Accept": ["*/*"], "Accept-Encoding": ["gzip, deflate"], "Cookie": ["reddit_session=7302867%2C2015-06-15T06%3A50%3A37%2Cc0e114b1f45a1bc763984b34c83d31df49a92fed; __cfduid=d28895e21c18fe156bbe302205f9b22ac1434376236"]}, "body": {"encoding": "utf-8", "string": ""}, "uri": "https://api.reddit.com/user/PyAPITestUser2/comments.json?sort=new&t=all", "method": "GET"}}, {"recorded_at": "2015-06-15T13:50:38", "response": {"status": {"code": 200, "message": "OK"}, "headers": {"x-ratelimit-remaining": ["298"], "transfer-encoding": ["chunked"], "cache-control": ["private, no-cache", "no-cache"], "x-moose": ["majestic"], "server": ["cloudflare-nginx"], "x-ratelimit-used": ["2"], "x-frame-options": ["SAMEORIGIN"], "x-xss-protection": ["1; mode=block"], "connection": ["keep-alive"], "content-encoding": ["gzip"], "pragma": ["no-cache"], "x-ua-compatible": ["IE=edge"], "x-reddit-tracking": ["https://pixel.redditmedia.com/pixel/of_destiny.png?v=6qsn21tVL7go5HvVpotLbf0tnpT2hujgxe4105fEz7G1t0%2BHh25pl%2FSGghZNuAZaGB%2FJKaxz67I%3D"], "cf-ray": ["1f6ebebe912c04a3-CDG"], "date": ["Mon, 15 Jun 2015 13:50:38 GMT"], "x-ratelimit-reset": ["562"], "content-type": ["application/json; charset=UTF-8"], "x-content-type-options": ["nosniff"]}, "body": {"encoding": "UTF-8", "base64_string": "H4sIAAAAAAAAA1yRwW7DIBBEfwVxtqrYxgn2rcfecmjPaANLvYqBCnCUNsq/V6AkjXodzQ5vhgs/kjd8Yjx3vGHcQAY+sQufISkHtPCJWVgSNox7cFic++/X/ds7pvyRMNYrSspGwhp0d+uIkLEobSfFth02cnjZNIzPZFDZGJyK4RByerr5DItROqIxVPVid8HMkOby8LYVLq1j+Nl1BzNYsYGulwjCgu7sCOM49LtejhalMJ0WW7krcDcQtWb9gGnFHabUDOZ/1YX8UR0hujJG25WU4Bz6/BDHhvFwwqhaySeW41rOKKlS4SmIavyfozbE8xdFyBQ8n5hfl+UGcsJIltAovOHcY+sHCU1nW9f2h3BWOqw+l42u118AAAD//wMAKxRkF8UBAAA="}, "url": "https://api.reddit.com/user/PyAPITestUser2/about/.json"}, "request": {"headers": {"Connection": ["keep-alive"], "User-Agent": ["PRAW_test_suite PRAW/3.0a1 Python/2.7.8 Linux-3.16.0-37-generic-x86_64-with-Ubuntu-14.10-utopic"], "Accept": ["*/*"], "Accept-Encoding": ["gzip, deflate"], "Cookie": ["reddit_session=7302867%2C2015-06-15T06%3A50%3A37%2Cc0e114b1f45a1bc763984b34c83d31df49a92fed; __cfduid=d28895e21c18fe156bbe302205f9b22ac1434376236"]}, "body": {"encoding": "utf-8", "string": ""}, "uri": "https://api.reddit.com/user/PyAPITestUser2/about/.json", "method": "GET"}}, {"recorded_at": "2015-06-15T13:50:38", "response": {"status": {"code": 200, "message": "OK"}, "headers": {"x-ratelimit-remaining": ["297"], "content-type": ["application/json; charset=UTF-8"], "cache-control": ["private, no-cache", "no-cache"], "content-length": ["682"], "server": ["cloudflare-nginx"], "x-content-type-options": ["nosniff"], "x-moose": ["majestic"], "x-ratelimit-used": ["3"], "x-frame-options": ["SAMEORIGIN"], "x-xss-protection": ["1; mode=block"], "connection": ["keep-alive"], "content-encoding": ["gzip"], "pragma": ["no-cache"], "x-ua-compatible": ["IE=edge"], "x-reddit-tracking": ["https://pixel.redditmedia.com/pixel/of_destiny.png?v=AqxVkrSRF3bN55ymSSdkwfD%2FLdA4QMcYwodHHwNYU4Pccch5s1XTILSk9LACd6ER3FcO3MQmUDmhY5Rn5rzZjrp3D2F%2F%2BvsO"], "cf-ray": ["1f6ebec1714204a3-CDG"], "date": ["Mon, 15 Jun 2015 13:50:38 GMT"], "x-ratelimit-reset": ["562"], "vary": ["accept-encoding"]}, "body": {"encoding": "UTF-8", "base64_string": "H4sIAC7YflUC/61UyU7cQBD9FccHTsx4XwaUAyIimgOLICdC1OqlPNOZ9kJ3e2BA/Hu6G5sxHJCQcrHsqvKrV/Wq6tnf8Ib5R56vM//Q8xnW2Hw9+wQ3DUjE65V1WpfqSc010vCo0VrXwtibXgjj6ZWNVMj9Y8EqLBQYB3fIsc7a3CKMcbRttOSk1600fi17GzuBHzMyrjqBd6jBNVibBMZMCO64CVPahqwBsz3NtdbdURCQuV73NVHz1x9qYBzPaVsH0c/0PF5dIX41u87l7cU5e7g0hPqL5RKj/vIpOl1GJ7ebEHV0/rdbOQ6gqOSd5m0zVu0fCH38bTbzbk7R5dmZN5sdrPSxNTK+9ajASn2/82t254/2zr782CN5BS3ytHpI6D0UjEZZXpQQVqQK0yIvUhpnxYLQjMa0giTJwoyFFiZwOHeNeze5RviRy8VAxfLWXAvXtGvXA+/kaun9Mk3z0oeUb7aPOfRJRXLDglR5SnHF4qLMU8DVgkAZFSRMynBB8pDFFo62QuBOAWIgQAMzGtY1NFpN1O56IjhFk47Z/NOyxT3Uuo8SuSuyKCxTVuRltFiYSqO8jG3hizJb4CysGKQljUqXu92CjMpPE/1Hab7O8avScDP/SPEnK8+wQc402bVhrEcRh6hPOvv1gZqstODNBglMYLLTmNK2N/oiTDXfWhLRvvNa4qridCLJQHio6ncShoeeefx5TWJ5EpB2WOJ4n9edkg95x13XGXq7G1QC1u6wREkcZ1lYFNncJPB76UQPZPDhNASuiZwBwmw6ogMS6rUl/wFtOHnvlRlvVt2a+vC7i+Vcyqi0hhoQNJgIR3JwDxuCFG0lIEem5o1lY5OZFgyc9a5zFb/29k0Wpexcj07c7KYXdN/TsbiXl38Stiz/ywUAAA=="}, "url": "https://api.reddit.com/r/reddit_api_test/about/.json"}, "request": {"headers": {"Connection": ["keep-alive"], "User-Agent": ["PRAW_test_suite PRAW/3.0a1 Python/2.7.8 Linux-3.16.0-37-generic-x86_64-with-Ubuntu-14.10-utopic"], "Accept": ["*/*"], "Accept-Encoding": ["gzip, deflate"], "Cookie": ["reddit_session=7302867%2C2015-06-15T06%3A50%3A37%2Cc0e114b1f45a1bc763984b34c83d31df49a92fed; __cfduid=d28895e21c18fe156bbe302205f9b22ac1434376236"]}, "body": {"encoding": "utf-8", "string": ""}, "uri": "https://api.reddit.com/r/reddit_api_test/about/.json", "method": "GET"}}]}
\ No newline at end of file
diff --git a/tests/test_comments.py b/tests/test_comments.py
index 343e8dc3..2acced95 100644
--- a/tests/test_comments.py
+++ b/tests/test_comments.py
@@ -1,6 +1,7 @@
"""Tests for Comment class."""
from __future__ import print_function, unicode_literals
+import pickle
from praw import helpers
from praw.objects import Comment, MoreComments
from .helper import PRAWTest, betamax
@@ -97,6 +98,15 @@ class CommentTest(PRAWTest):
lambda item: isinstance(item, Comment))
self.assertEqual(comment._replies, None)
+ @betamax
+ def test_unpickle_comment(self):
+ item = next(self.r.user.get_comments())
+ pkl = pickle.dumps(item)
+ try:
+ pickle.loads(pkl)
+ except RuntimeError:
+ self.fail("unpickling shouldn't throw a RuntimeError exception")
+
class MoreCommentsTest(PRAWTest):
def betamax_init(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 2.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"flake8",
"betamax",
"betamax-matchers",
"mock",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | betamax==0.9.0
betamax-matchers==0.4.0
certifi==2025.1.31
charset-normalizer==3.4.1
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
flake8==7.2.0
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mccabe==0.7.0
mock==5.2.0
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
-e git+https://github.com/praw-dev/praw.git@eb91d191eb09d94df144589ba6561f387a3a2922#egg=praw
pycodestyle==2.13.0
pyflakes==3.3.1
pytest @ file:///croot/pytest_1738938843180/work
requests==2.32.3
requests-toolbelt==1.0.0
six==1.17.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
update-checker==0.18.0
urllib3==2.3.0
| name: praw
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- betamax==0.9.0
- betamax-matchers==0.4.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- flake8==7.2.0
- idna==3.10
- mccabe==0.7.0
- mock==5.2.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- requests==2.32.3
- requests-toolbelt==1.0.0
- six==1.17.0
- update-checker==0.18.0
- urllib3==2.3.0
prefix: /opt/conda/envs/praw
| [
"tests/test_comments.py::CommentTest::test_unpickle_comment"
]
| []
| [
"tests/test_comments.py::CommentTest::test_add_comment",
"tests/test_comments.py::CommentTest::test_add_reply",
"tests/test_comments.py::CommentTest::test_edit",
"tests/test_comments.py::CommentTest::test_front_page_comment_replies_are_none",
"tests/test_comments.py::CommentTest::test_get_comments_permalink",
"tests/test_comments.py::CommentTest::test_inbox_comment_permalink",
"tests/test_comments.py::CommentTest::test_inbox_comment_replies_are_none",
"tests/test_comments.py::CommentTest::test_save_comment",
"tests/test_comments.py::CommentTest::test_spambox_comments_replies_are_none",
"tests/test_comments.py::CommentTest::test_unicode_comment",
"tests/test_comments.py::CommentTest::test_user_comment_permalink",
"tests/test_comments.py::CommentTest::test_user_comment_replies_are_none",
"tests/test_comments.py::MoreCommentsTest::test_all_comments",
"tests/test_comments.py::MoreCommentsTest::test_comments_method"
]
| []
| BSD 2-Clause "Simplified" License | 168 | [
"praw/objects.py"
]
| [
"praw/objects.py"
]
|
|
sigmavirus24__github3.py-397 | 270f6d9c0978d0d2da2e4c98da12cbbb1d10c567 | 2015-06-19 22:55:07 | 05ed0c6a02cffc6ddd0e82ce840c464e1c5fd8c4 | balloob: Tests… working on it. | diff --git a/AUTHORS.rst b/AUTHORS.rst
index 6aeb346b..49e1c5b7 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -92,3 +92,5 @@ Contributors
- Jürgen Hermann (@jhermann)
- Antoine Giraudmaillet (@antoine-g)
+
+- Paulus Schoutsen (@balloob)
diff --git a/github3/pulls.py b/github3/pulls.py
index 7c257feb..4e00c0a0 100644
--- a/github3/pulls.py
+++ b/github3/pulls.py
@@ -12,7 +12,7 @@ from re import match
from json import dumps
from . import models
-from . import utils
+from .repos.contents import Contents
from .repos.commit import RepoCommit
from .users import User
from .decorators import requires_auth
@@ -78,39 +78,19 @@ class PullFile(models.GitHubCore):
self.raw_url = pfile.get('raw_url')
#: Patch generated by this pull request
self.patch = pfile.get('patch')
+ #: URL to JSON object with content and metadata
+ self.contents_url = pfile.get('contents_url')
def _repr(self):
return '<Pull Request File [{0}]>'.format(self.filename)
def contents(self):
- """Return the contents of the file as bytes.
+ """Return the contents of the file.
- :param stream: When true, the resulting object can be iterated over via
- ``iter_contents``.
+ :returns: :class:`Contents <github3.repos.contents.Contents>`
"""
- headers = {'Accept': 'application/octet-stream'}
- resp = self._get(self.raw_url, headers=headers)
- if self._boolean(resp, 200, 404):
- return resp.content
- return b''
-
- def download(self, path=None):
- """Download the contents for this file to disk.
-
- :param path: (optional), path where the file should be saved
- to, default is the filename provided in the headers and will be
- written in the current directory.
- it can take a file-like object as well
- :type path: str, file-like object
- :returns: bool -- True if successful, False otherwise
- """
- headers = {'Accept': 'application/octet-stream'}
- resp = self._get(self.raw_url, stream=True, headers=headers)
- if path is None:
- path = self.filename
- if self._boolean(resp, 200, 404):
- return utils.stream_response_to_file(resp, path)
- return None
+ json = self._json(self._get(self.contents_url), 200)
+ return self._instance_or_null(Contents, json)
class PullRequest(models.GitHubCore):
diff --git a/github3/repos/contents.py b/github3/repos/contents.py
index 6a563ab3..261d7116 100644
--- a/github3/repos/contents.py
+++ b/github3/repos/contents.py
@@ -62,7 +62,7 @@ class Contents(GitHubCore):
#: with the character set you wish to use, e.g.,
#: ``content.decoded.decode('utf-8')``.
#: .. versionchanged:: 0.5.2
- self.decoded = ''
+ self.decoded = b''
if self.encoding == 'base64' and self.content:
self.decoded = b64decode(self.content.encode())
| PullFile.contents() and PullFile.download() don't work for private repo
I am on the develop branch.
When having a private repo, the methods `contents()` and `download()` on PullFile don't work.
Tried debugging it myself:
Request returns a 404:
```
In [10]: pfile._get(pfile.raw_url)
Out[10]: <Response [404]>
```
Dropping the header `Accept: application/vnd.github.v3.full+json` from the request did not work.
Copy pasting `pfile.raw_url` into my browser where I am logged into GitHub works. | sigmavirus24/github3.py | diff --git a/tests/cassettes/PullFile_contents.json b/tests/cassettes/PullFile_contents.json
index f5bbcc49..8d223722 100644
--- a/tests/cassettes/PullFile_contents.json
+++ b/tests/cassettes/PullFile_contents.json
@@ -1,1 +1,1 @@
-{"recorded_with": "betamax/0.4.1", "http_interactions": [{"recorded_at": "2015-02-21T05:46:46", "response": {"headers": {"Transfer-Encoding": "chunked", "Access-Control-Expose-Headers": "ETag, Link, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval", "Content-Security-Policy": "default-src 'none'", "Access-Control-Allow-Origin": "*", "X-RateLimit-Reset": "1424501206", "X-GitHub-Media-Type": "github.v3; param=full; format=json", "X-RateLimit-Remaining": "59", "Server": "GitHub.com", "Cache-Control": "public, max-age=60, s-maxage=60", "X-XSS-Protection": "1; mode=block", "Access-Control-Allow-Credentials": "true", "ETag": "W/\"ce314491c63b8e4855a6544a8a22dd33\"", "X-Served-By": "318e55760cf7cdb40e61175a4d36cd32", "Date": "Sat, 21 Feb 2015 05:46:46 GMT", "Content-Type": "application/json; charset=utf-8", "Strict-Transport-Security": "max-age=31536000; includeSubdomains; preload", "X-GitHub-Request-Id": "42707723:202B:39AD56E9:54E81BC5", "Status": "200 OK", "Vary": "Accept, Accept-Encoding", "Content-Encoding": "gzip", "X-RateLimit-Limit": "60", "X-Frame-Options": "deny", "X-Content-Type-Options": "nosniff", "Last-Modified": "Fri, 20 Feb 2015 03:03:14 GMT"}, "url": "https://api.github.com/repos/sigmavirus24/github3.py/pulls/286", "body": {"encoding": "utf-8", "string": "", "base64_string": "H4sIAAAAAAAAA+1c32+rOBb+VxDzsqttQoCkSVGns6Ndafc+zWi392Wno8iASawSQPxIby7q/77fsYFAkqZpqWb6gK56lRD787F9fGx/5xxKvUhD3dHXeZ5kjmGwRIxXIl8X7tiLN0bKkzgzMrHasK1Ii8yaGupXe5zsjKQIw8ywFtf6lS583bFsezadz60rwG3CZRe5hXoOr4LzRRC8H2BM1SFUwnJv3QNG1qfOZVnBD3DeNFISoB6qqNi4PMVwLa6v9CxnOccEeGGccR9thbH3iA9OwMKMX+m5yEP6/Wff10QkcsFCLWFZpgVxqnkpZ7mIVlrKt4I/aZiyDY/yDDBFRk2UgFuJCPXbY15P13SysCdXOtuynKWH3ZMPs0oZCM2LoxzgUi8Kw5KVf9r+aANtlVYYpAU6tX5OqQisq1SocbnGoHAQh2H8BJRDobvae9yQ0dRsUDB870RBzdKI8zXH2KFLzzQQIsvfLpSsVWJlZflS+IQDrUhT7r9ZsKoexHqKIFEpF7AELNzMS0WSizh6u4Cd2kCL0xWLxHfo3nvQUJs0VNqWN/dQ1kJtviVFf3N1Va00klRsmbejoUm5x8UWg/1OyIP6QMx3CS3Zr7QEMfQi50vmb2gZykX9fKW7sb9Dif9yrv1gLaZyMW/ilGMFY5F9y8cP6UNEf/e//PMXp/4y0sgK5BzaQo80baT99u137QuqYAXSZLQff4W1aNWkFaORnmp5rP2GYQx3v/+lNvs++h7GCU/bxn9rVxa+NivGD9Lg8BEbVY/+SmAMBinNhVeELD2wRLIftZwk/Ibn6xh9iLX/SJP1D2WxNBFoSco8oLCwrnPQ2Re7W/9AHcZ4Kxn9JcsxwtbEnI7Myci6vjdvHHPuTOf/Q5ki8dGP4zKW5cwWjn1NZZRFPoKhInNnNqEiG56uXkDpFFnScIl8ma0ZyTS7mVvmdH5zPZkvzDljs4XHp+6E+VZgTm1z7nom8z2GBmDoxSri0KYIey0aFCFmP47woDxvZM/v3A1OZtDmHTKXh68spssBDQUHXNoOpua1RbtMvethz6v3NHM8QSGfN5YJg/NF7pQa1kMAVStSfE55SJNFSgNjS185y7gWB9oWWwC0XlNAcuLjYdc72l6HXe9d+9Sw69XXgBMHqffsethgoqU6CuvOorGw9RPL3h+HqegJW26PJouRbd6bM2c6c2Y3L9hya2Ra95YJK+1MLSrj4/geY/9VVrRl2ekB9mNln3tZwOYyZFRgaFYdy6X1f/20ct7AduGrQ34X//xh6BL4ZqMvlbWm49FHCL+//sjBqaSnqw80q9eo1xhGwGe+P52ywObu3DQnbMrMYOHZi2Di3biuZc/s6U0Q0G635gz7Eq5GtOsdXI0cbCUjNaz1GUfOY4ByJ39SW/obWqe1NFzMDm+AwxY1bFG4z/3pFzOykrQ66eRqz80JTAmOrmxD97k98wWTEGDjWFY/vMBnoZS8hg+rfVjtAw3TWSSfhIaRLBDRrxXZejkJ2aLB6XjZucL+ugPHEWmhcFOW7iS3I0DRpAHziKt9Ar0uL7L/Evm/C1f7+dcvWyJRUe6xkeQsgXr2KFch9TpVkShEsj3yXS8cql8a+L+iNT3wtsyNwVXFr3G3Z/uIU2QLqOx8pTNrztmml+ASAEDrOH7sBSQBAKQ8CJcwjOc7Xp2k6/WzP6b3h1YIkLUmnXp1vAEpjZocx3KIvHW/836NURrqk5xttuolKtUHjBvGbi8cnA8MCVIauBUod0C+7CsdoRJGBxQXkt6iEkYDmqe838RIMQmkgfzQ611ZjWjIolXBVv1kbUAw6+QxWbHvr/qSzi/LPQogicNPhVv0N3J7HJJUuXDgOuw19S2YPaj0C/XhD9pcmRwC4rt7yVlBdNT+A2BJTw+hP4LiqDFKY2+TldGvfukzupXVr9swOvSM9Mr2HGqFYZR/g8d7XbE+cOr08nlDWoIwShds/fN4PC6JeCFw6TbpJbFCABRLvTWcd30Gt6wxcOrZsFw6TQMS08ftLYyZ30vSBgSAahr7yKoQ2jomecE+kBKgjbj3DfWB3aO0saM4FwF8fBc4js8b3A5Q+VMmIo9fMbjIlCtSQI9x1qZZ7O/bUgjoBmgAQqwcUL1GvcYoDeXwP3RaWqOJPTJtclrCITmzTxHds9EERPf8fmKDCXdsWSYpsnXLI9kUsRxr4pgmFYEFrCYWnxDyo2gNGdbxApFBTmFUzLI6lgbf/76v5pyvBrY9Olygl7e5PdyWXq8KUdfxhic4JoC1IRd300s72Y3hJfRx/fJjLxsjFMGgnonvKLqYm/POgcCLiwhOZGs+vdKfKBSItt72w/oggWbUpY+aZlnj48jTAiE89GRvBpz9wyfxKPZ3T5QikTO9KiEvYXVzNwiq2og0jas4HeXIaDlUGrngX1XXN4fqdFwuFn6r+1F1y+cBK8J8qU7T6EcVCaA/U5QCDPdZopyFCTzabX68fqJocT4x+Y0fWL4Jrzaz5rZnsclsGngTP/BM/C28a/vGngNioMURVTV4bsnqk40d4pVUtOUnIcoGWvzSeFbo7hCdSGeJE8ZsiE5sh9edGKBPstoHWvx0vPtAiw+0eE+adKDFsUHSDXSgxd/v4jOaqKeBFh9o8byKLu/FR2FJDrT4QXpblfCUDbT4QIu3Ui96rbOBFj+bUTrQ4nXI/XlOPRtocUnN/ym0+DIUEYJ6ECWa8RCB4aW+VgHiPZOowbhTANoJwNd9HjJTT+ZOA0V6CU/AvDdluUqQkGH7PXt7KhMA8AeJEn1bOZEscdTIx7Rx7JLfZ5N8TAuY0yajBNj10b8v+NsTJ6RHCHmrS6Wk+m1yR/mrt0wj9f/xoXmHwAXa2qjB9EHXvBC3YtSXD0e0uPBQ5gji4deIuSGn3D/lIj1MdX/Q7yiD9tZgd8dZtLdGcoc82giiykTa5nsRNq3mLHtEo1kOJJQMBf2f3DXJpxfVOUYbIet3oyBFlBS5RknB6A+ciN6jG39r9buRQNYZtUtQYWQ++iKjUfDv2rm+t4aSNRR/WOuUYVs3axQhjVQjRDVsnTTjY914b6IxRhI+iHAn5/mVnOPPNWOXZj23RnKf+Ey6e05X/7CZb+WYfyq962ohfQO/JK0UpdHDn30mxR6Dq9Lr8YH+dYadHnQy6+m7TKmvS3c0XSrna4pZV5Vp/Bdlwl8oE/qsUtHr2AH5jUxGE17QPFmS5afACF+k+a6puXTxQoLhdSEHL7sZXhfynhd8DInTH5w4XZ8ydQfZaEdnZYT5UAHE2+JtCwiJ9H28JgjzhtJzlPfxvoT6O0quEUGNCLGA3iShO7Pn/wOG5zafAkoAAA=="}, "status": {"message": "OK", "code": 200}}, "request": {"uri": "https://api.github.com/repos/sigmavirus24/github3.py/pulls/286", "headers": {"Accept-Charset": "utf-8", "Accept-Encoding": "gzip, deflate", "User-Agent": "github3.py/1.0.0a1", "Connection": "keep-alive", "Content-Type": "application/json", "Accept": "application/vnd.github.v3.full+json"}, "body": {"encoding": "utf-8", "string": ""}, "method": "GET"}}, {"recorded_at": "2015-02-21T05:46:46", "response": {"headers": {"Transfer-Encoding": "chunked", "Access-Control-Expose-Headers": "ETag, Link, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval", "Content-Security-Policy": "default-src 'none'", "Access-Control-Allow-Origin": "*", "X-RateLimit-Reset": "1424501206", "X-GitHub-Media-Type": "github.v3; param=full; format=json", "X-RateLimit-Remaining": "58", "Server": "GitHub.com", "Cache-Control": "public, max-age=60, s-maxage=60", "X-XSS-Protection": "1; mode=block", "Access-Control-Allow-Credentials": "true", "ETag": "W/\"a7649b9ef71da693ea6c76a029bfdcf2\"", "X-Served-By": "065b43cd9674091fec48a221b420fbb3", "Date": "Sat, 21 Feb 2015 05:46:46 GMT", "Content-Type": "application/json; charset=utf-8", "Strict-Transport-Security": "max-age=31536000; includeSubdomains; preload", "X-GitHub-Request-Id": "42707723:202B:39AD5730:54E81BC6", "Status": "200 OK", "Vary": "Accept, Accept-Encoding", "Content-Encoding": "gzip", "X-RateLimit-Limit": "60", "X-Frame-Options": "deny", "X-Content-Type-Options": "nosniff", "Last-Modified": "Fri, 20 Feb 2015 03:03:14 GMT"}, "url": "https://api.github.com/repos/sigmavirus24/github3.py/pulls/286/files?per_page=100", "body": {"encoding": "utf-8", "string": "", "base64_string": "H4sIAAAAAAAAA+19aZfayJLoX+H0++C+T7a1b77jmRECxCJWCQRMz/Fol0Ab2pDoc99vfyFBlalyeetteuZ2dZsqpMzIyMzIyMjIWP7j5x8yT//h3Q8MhxsMxWIcbbOExeMOZXMEQfAGzZO8beKYSeG2rWM/vP7B8QM70kMbarl+7hUGiSZFEGRvkxreZrmeFxm8C2PLd3zbgme6Zfm5H0fwmORf/2DZgX37ir3+wfT0yLVvr4wgNj4UaQD1vTxPsncoem3jrRmHaOa7oV76aZER1O05Ca2iTS3UsWnLoijdIW2DxXFMp3Tc4UySczCTNwyCpEmKdxzmoeI90ql+/v5WodKvatSMo9yO8uxZy3riv73rc2oncfbZnj/A+KRT/5bazvtvHRKYokTPTQ9G/d//vfMG5+nXTAdpfhF0B55YttMxgzizf8zswPnbu5+izsPPTz+0/318kNp5kUadpuDbIrH0/Frpbe7ngf36+tyIrfp151UL0nr1N6j9U4Q0EP89tU+Fn9rZB73IvdvDtvXUBkgfUrv07fMHoIUQRq7F5nXnCq155ucffOt1B/riwWectUTXoNsCb36u6IottI7eucLr3OB14qiTe37WacgZ3p0KO8vfQu07AAK8SfQUyD+306yjp3ZbEHC2OkYN1e2O5OfDwugIi9Gzuu/aip0sT1uc33VUKP3Qdm5XecfPmy7dNXdX5bGD13rKUOjETtvg9U0nj+868jKMZmSu1VM70HO/tNvBegDUrOyvgfGj/HFor6ACP7I7fmTZFXy2CMHCB8zu0WlG4mEO3l1JJLv1v50L69lUPC2f14n9rvPODPQse/df/+/Gdd5euc6qpQnxShL/9clc3z0AvtJ5f6XAD0bhB1az8n58dZuB7BWQkp7ZzcP3Lfl+gKUIxPmIOBCzDgB+ftXM3qt3N8Jr67eUB4/uqPBVM7LwqKXGOygPw/DqgUChDJDEj4/0+o+7wocMaPIB5+ZLS/NvP0Dh/EdA9HWnQep98/G31x0Cw+/xvV+KH/wIOHNk2h/i9EME9P3jk3F73WmAN5VvS7lZdM00fm7Fr67L/GG2Yb5gDTfMg6TwhnmQDPaabXhHO2mdJ4392IVhvk3YIzN5ZCR3CHy4MpAPep6nvlHkdnZb8rcpe7K0syKx0+e9avnV2xfgPEC441z/5yOJrTM77fzLA50V8C172zz71//qnL24E+oWrJOPi/cOyJXvNdXfd2ZxZN+98oGNXan0rWvnP75qwL5qutAOG0O9pmHYOOo1QT3w3BfwvrK8u/4/kFMH0J+nvutHevDAEYBHPEctvhVpeWhLtYDoE7Q+LXHPoZvmoCVgbp31Su44cdoOxKLhmKsrx7wj33Y0mmUKnLvlps3iet7g8/dNc/dkCHWT9FM6vFH3q3+50lbnRlCd//gZ+8d//uurt4BaqF/3iLfNUL8NYhicK+wvbzbQXlDfbS4v7CCrpkjD4tr94tk2cgapCDaXyD6/wNXuRufF/aDdB+4YO+xz/yOZ5/NZvWdM183iga01Ak/qwLMfX6Gv/tZBOvjdGPlRM/1B/QHG+q7Cf7Qg3v3n97PKn5+x4mfM/K494Mt33+6q/eO3YLU//OP1g+zN0zZlgnzNExjBMgRBO7Zl8A6jcxTPOiyLYRTh4IT9VPYGbphnqAnc1c7hb7RZgrcV+OFFceltw+LvJXQQy5+L5/hnpXN480cI57+4V7+/CP+LUftNBf1fhsWvOQ5grzE4DMCeBEvg55/acxlIEyD+6mZ7ivvph3fAdH/64cbim6/wrZGRbn+CdONHbvOl2eRfw6cdmXA6fHhW5M4b7idYEPDGs2FvTVuYAEQwTTvJ3/SfFHcvfgJyj+2ABGtf4V3LXVvQkyTwTRBu4wgtI+vhLFWSbx1YIUizCK6Vmg39jeACg71WfBQraxR/i73FDPwe+BvR01NYateyN5QbjMXrMe6NCkLqpxh8bE6AEw3srZcWs2tBkIN8s/MvXUEZiR+EtTr819sowOnCi61rIamvXvEoUv/65OFk/D0nxRtgOFolMGUtol+bpEYYZqgP97M3pLKR8PDDEDt1Pz/MPFFCqZOsX2ptas/3Wz1RQrpWyIV+LmzV7/uCgS9FfyxJW49O0Sg1V9K8zhFpvva5UmKOdqnlhK8RmJvV4nybLQK9Osxd6rQdxX4vMlb0EVNkxDtMXQJXB+MS6dUzcmzxMrk/bKaXzEiWzAbrD81uvNXr5WnKbLpLfZxuOHQs5+Pz0JlyOU7n0qDytxpeGGFvSGLi2czVgZgwpzpOJ/JmrGU7k6UIlN0ONJcl1elIQnrdrl5oC9PcC4Sx8/SeGWTaoNuz8D05quxJMVl6uFCsVrxej4nMXy0sb7VnVXrsals5mGIDZ2QGzl44JLK08JFTpoW1m62Kfd+03CWrjoaeAV3va6zDD2tWiTdLK+sns5O2KZJTZg6iIOwlpTLfoBPLoShJP09HyjY3ldAc6OxgKPfogs0qfRMWNI2Eq+1kNpgUS/tSUkcuG9mrYZFNNBJdBeKSYdYbiWY2wWhvbL1DNV0iDKNgkrVEw1pO2BjluIs6q4hgM0UWeGzi25mYBHEaI5fUGl38/WWj0ZS/Cc3hUJxevHmZ9YgINcbynuVRlDiaS384KMf1ZjtlrQsaHOsoyzwsxvhilwi2OxIUDfWZ6dpDaMPLOCQaTKJ5n9HojWHOD3YmXjAhD1HmqMkTMcBZcX7uCsVmjETFWRD6wUGKBlt9wDv21KyMqe+c/KNqIap1vGy32I4SUuviRhOkzy1kV7QmNbUBcgtW0DnTkkT8QrKpRR+y8qQtF+pgElejojeot+lIxC7zrVOOjLLarK31Lqm6ueUxSr2VRO2E06BAmrpBvSxGgS4P8e0hHnbDnD1UOkccNhwNaqal0Uv5SWpzjm2szAtOdo/JcjAZJOFWQbp8PaLsdZHHXcRmI8zrX5ZpIdLD097kpt0jNwlNbLCvgxw/bzf+4eLKCMp1FXLA+Lrh5FyxI8jJGDPkSh3qQ+eIkpxGo4zLMZpazjHD7G39fjUdHFiFw3PVN7fJ6UQPDgycj1dpn6J9oJddLkfudhNYYkHszzBeg5mVHE7pakWY7HTQwwOYftw5aXtJj0m3HExr1Il8QShCEsPseIRVfortLEa0z8KaGCUiIWWn8jInxv16v3Em9QmfskiwS0+OIekHfh8q0Jlj6R9CH8+S9SimVzo/LeJCWDseXXfFcw8/i8W6PvaXeSozeEytstxM6ODkrndHghTlbjjBewNrUSNuchgy/S0QZ0C6Frqbb2bDDTErDHqyOSPpZocGRU6hq/FwtCMSiZ4tphI7EomhSF88HatE0x1iI8orCmkhjnUQrJAyH8MSILsHARt73MypyhlVIhSKHGaYO6ms7hDjOYQuyXNOqvoAH9Sb+KjvsRlyjAa7PW6ag8WY8FAzWeueEburMFdl+qDLHudve8aUOY7W+WZNjLsbZLi2h7I34HYJyaKqt+6HZnFg9JAuZjU/dfP05OXeeFFFM5crBZdE1J0jjda6qvdCrTwgRWBvyUGhnPCNg83WIw2vBFyUTwmf4fONrmi90f4wEQeHSRHjvDZOdW5iX6TAmu1W1A6JBwJe7eXleLUXN9UG86wteQnp8UCye+oh6ZJDG8PH2rlLKgu3OilTETeRXOunFYoK8XRmVvSxD8vw+3bxUk9bUeBhP3/debIXvoajW3z0QTm4fXPVnb2Zqwso9Hzzb3bb6g3s2qVtvTFuIHXO4myKJyyGZxhQ9RKkZduEbVsMZTIsp19Rrd5UWfYmSePcboWWKzr43zugpbbfg2BrHh8K3qS1N43S6U2cPIo4P/0QxVkEKpFb53P9QaT5CaQdm8cZzsAIDrMNjtEZktAtzqR5GyNoDuN+gjLXejr0ClBpWknj4I0eBPH5DcjrFsghvh60ks9PP+RpcRNtrvr061MCwzrzyQOiKYg/gQ+H+DepHep+9ChPUTyFPRS6CjVvQtvy9bZLV0g3BXNJ/v2qzXzfyEZ/b47zcGZ+/1FoeYatXYHiyX5zJ6T99ENf1d3XHdmPjs8ncPtmBSjKLYrtZ1Pg46PVA9bPH4OU1TyaN1TyRjHjxM6a71d6gKl//mIRB8GbUSORlnpw7Xie6lHmgHz3VNI0vSI62iBYXUnpNjg3wfWNf5O4KE7AuhjGvSP6WP8dyRA9lqHf0RTVo2mSuFY2ddOzH2bxOqZJ6pfQ39egHqre6K79HlRgnewNfLt+uVYEdVj+5uFu5FpRs0FpTRCduZk3Kjyqg0FrzDsS70jTm+zX6NGvhZUigsLMx8IE9g7H3sGTx8IvkthVrXOF8X9vfbhJrpltgmyZ12+SGCTn27oC+VovgvxNlpqdVxFosl49rfR0YBuZ/Pq+ERfN/E07AUmcAoAb9GvTD2NDwn7KYBj2dzhjm0Fh2UphWHFDxhmQJGinY/02Te16T6+1r+yhuQt6mMKPi6BdCtdiNAB+KOA0uvqn6xiWGnSyIYH7tX6ten96aJbB3ztwP9XI/e/vRP7qzf3aezwU4BROkTRL4PRN2v64dkHWDmHZAyFcm3lYxXAMap/Aym6PP6Aeuxb4VTK+Gadwsge16RVWQ1NvcOwNwagPxNIi+NeZ7c99ZrveUKIEx/wOpzcEN7Yams48mZ9QMpPFBS6vi3rHWSOsxgnXwpDc8nme0IzzSQhqf+cr1Lg/d9YbNkKdKbM81qThzyVpQrqmp3R7Kj0fnOeLejelLxO5XOy2uWtMjJlPqW7WZ+b4cZzXs2WqpPvFRRYJc3kCCKJTZ6UQHkpWGxIbdlyXh2IocsgO47P+JpYvFyfC6HBKcEIhEdyibxTxgBVPyEDsr7dlt/JW5tnfTFdB1pO6a9ihzPVe5bpzaxYGI+GgnxQzLf25tcTLKtp5mqLszrJyEJ14smZ6qzO9WfnnApmK2jIzMHbHe/oo6CvTgT8hZoPdepMY7pre46OjXpfRnJMcuX/ZBb5dC/1qvxbMDTXb+UunKKqkHIfxJUIjz80qQpV7nBWJcaYHINmJ3dl4Km42e2WeVCmaLUpb4GMmvQxH3fByQvfjcd/yK0UfwtlGF+ugr1fIUV33mcEiNSyOYsw81GSh1Pj+eZ1PansjnVlrt/YcZSX5R4nbKql2yibUpFgtPcoUQi6ph0alZ4ZrV5E3Hm0mUXA69+TNangcJVNftaiCGgzSeLKlQ3/ri8SlXGherK70k5XsyAMu4suwQEbYDDXJhZUOJ/V2Xu0y0dnQvYu7c62grBZ10sOWtMocxk6xD/zuWq5XThZ15RoXcubE9Wxak2xepWTE0Sye5mLPuMwyjN4cBI2w+ZofC+h4obKMbaFoWsUL95wT7vrosZvlUVvP0g2Sk0ztYMIyPYr6QUPP+wLTorGO7Xzksj6eInzCL3csibIKu0i8ymNBmI9ZguXnLGqTdJ5XTjA3VI+rR3sc23DjgzTo1af1qE4OwfhSrKSjeRifQ5TeTtSdWyf8CsH3Dt5jcV/zJ+dsfx4stVmNzVdR5nOzvDwJ3SXP61XUG497anawyDEIDYtyny/YghQjBQszFPHUJBxIU2j8KMFRXtt0p455nrG1M8ThrLvaiPIxm8UXhiP0YkHsLiyxZ+bIVkVsHHNTdBpiRJiRA6zP4SJP7OzCHRzP4q7A3XCYWOp5lBnT5BgY8WjgZE4/vRB21A0WhBon212KIN6MErc0x1Wov90gLs5NSDg6pLukt2X75shRQyWIBgkzj4lKiWuMDAXqSB3RkPQu1Wl2nq1IpziwpDbtatTWZfjNzutNN5czJzoYtgnOVTRXk8vWmG6Teb7j08op9N5lrYcwQUk6FBeaja5Tg/JyhiU4uZjNze26ZAhuwl5KB8lzgVJhtePDvBDZlYzltJATOUrHdJQeinOf1QOFM7qSOdf6c8nU8r1Tzrt+vNcZCdWRs35UsRNKOOm2lzgH1VkugtATDuveVMDJUhpqO6m+XI6paQ8de7JdKKztrzR5Y7J1uR1R3UUtLaRRt7/0FrvhOStkWSsBI4sNa6e8BAmNL+bYSV+wurJdqCcZZwPdVw5apY2XCeuMeM6gNj22zoYOkZvzpaifN8t4OImI2Vp2qVV+9udYb4OtbSbD0nl+1MUJnKAMS8awpTcSlt3REU+3hUxsuzjrJF0YlKg7m1GOJo5FnNvr027OiJv5iVixc5HeC32JXbBOPFvNnRidWXlmKMtTrjgVFa0Cb7RgXX0mTFzLdm09pmfj9fw4Cg7ibDkXVSn2xpJYJxfjsKsUoCRd6aO4TKRw1pfkQbg1JCwEvZ2oKaZucARS9pPzNmC0brUS+qm09vFNUmZHy+x7ykUpxIuq64jLToWNsgoUg1ta5mqtxG6c7UcXL/Xr+RkVxkbGd1VmVPvz7mlW96KiJks1NbAa0xCUUVfovK8Sh6WrSad8t1PLNdmV+sTMznpJ3o97wiwRzUmycas+K7nYKfao0bY3YL1gnHb7QuWMXQEz9is52LjUwLjQ+3EFeq+xtBuxNT7XNPeoLzNx2SNm43R88ibM7GSIrqgvRUYcFxIpr8WFLmDZBlSSR78/5ber3rQwWG+4nK0X3toVF6HoHg5rVAbVYSGVqiyetAvhxpI77AkCLvi9cDFfX3RRXnRP3bMFCz/AUtIjugizOAwTDA18BtZzMiycKl8MeMnwCnwZTahyJhIqu58PFqdlQpxnic11ER8bp9paHkoDyp36geLuNn2M2uAcWhYeYY/rKQ00Zx/F0mAXpB1W/DBRfTkZ55dLb7Yc2tJ0YxHBkCG2TL/oxyO+zkF7OukjY683qmbOera2SNRz7QsoeXwh71NLYrGwiF2PX9njdE6GtGfSs7hU2VNYbeRyzeaeWcndsZ7QsrCwhJNTTabUfIzEK3W0VdVF4Klb66IZuxmm2Qm5xTTBrCfjrmWR+lJhC8Wd9IloFWPH6ZRiQmmwkMaBPHaA9+AVvugnSF0OLmEvHNBBPQjtUbVdjrnTvs8hY45dyMZ0MRbOcnbxJIM7S4sDoywnVdhfFwvf6l4cJFW53mqKyDxbUUNsje6AMRwVj7lQuYNwu4mGR+g+3hV9ju/vFzE55fHhkONippuDxhYr0fJsXMbUliWDakkzGM1t0gGrRpXFORSos/cYxwPwC5WOkQEBMsJooDHbqDrSjXY6QDBn4S8U2HIGJa/PvDhyNA6ZrrOEprZGRWFb39j48DfOX7iIm8gyMV9Rg+OCjI/aYmqo3DS1GdHxvSTg49klEa344k744WID6vN1GgvUabK4nHscT3Hzo0CcbXuxvJBHwe0h48FWzMcmoaFjcVyhakUGyzLYaeSB51QBpKmLDlzlMCMtbo3OiaRfx0dvqJwSda8gNcMmrHKaE/oK2aIoJurENpRyfj+V9tKSJwa75bnGJRtfU8lUGywTbe6b8kLclINoMdlZ4x42EwAzg4mqCcV6POGMSceaW0ceSzPFn6R1waeLvXUhJkFU+X1KFhV1fRC2/MVmjWjip0RSH3p7aZrn2nKUBRN9IxVSpvcmknwu+gnKUjUbrEVc2u5h/U5cbOeQpKVc5pgjS6MMLQfWhJdOjMmddrPZjNieymQyK3Oya9ghwkkXZRj35+imlBkFgU36MhNxZWtavRVbs5knyXx0HF5gsi0VqGuCHE7EbBH3qXWaLfYa5q+HmBdXiwk+C01kZCGHfDUFaXMcLVb7/VYg8h2XaV7k6Weg/8FmH+NmsA2opM/pmQli2P4sn0YINr6o824edsfzS8zby8N5EW9rAsmy2tvWW3Rzcc3p/hBcsKyabvebVTfa5NRk2hP7vehc91JpzFaouBieEoceDUCA80fhcOz1QaV6OPMOj/UEakH2myua9++vZ9xvvWv7XbV0jMVaNg/XSTSHU6A7ISlbJ0B1xxKGTsK5+eG0/vtq6RzT4Q2GN3mawHlS53WcpDkKM3GLoHjO/qO1dCT/0O2/tHT398G/WEsnEL+jlu4TxRvOv8PZdxT7l5YOFFd/aeluKnXQTaJfttluzVafaHV+Y33dz81tQ2t/Ab/hNgE+VTAWgvuBZ/ZoWfMK1I7w+WgO+LEORZFwG87ptmMZNMbjpE3ocCiwMJomDAsnMQK3GvP6j0AeTFg/wmDuXoId7McXN2OLRyP85s0/vm/DejC8kO3IBdDtvQlOslcgn1weNW//ySxHFnPltzAd+aiGRG+GonBJ1Wqrv82apCXHRrHdUGPz8Ut02zckHhBAYf/mSJLjW/pqPuDypoH/8Lx91hgwf2iufB6b/jb3Bih+5+EA35r6119Pf/86t4e2mQbsi84Pj89/vf/DI6jWBeL680l/vs8p4mHYG+t2AHid2JcWdTsRTxgD0z56NDN+4d1TZtSU/i5e9AT8bwCrsR5u+9hScmtG/Njle5ek5uE9LRIUxpFY+0SHm0E9bayfH2s+roL2VXazU2vaut1KtS5PBXqF8m/le+IRvpve4N2Ivnnx+PLFNp76FLUG7U98ih5re3kYvIjm3fb2Yp8duISNzwD3xdrPTNU+RQB9rP+IyvUJ7Fq/CiLU/xmNwUYdRh+6/Y+Pg+iD/ewvBO02dX8GpyuwaPetjzDhwi+Fq/xfCPVWG9A9R4Dpz60ccQe8MDIz9a8WCb+0iXsYj0MRp64e3SwTf+mQAIyrONFAbQWgX4hiW/cRNbt8cJFrHn1p+3iBpq6Vf0bbm3mz/jiUqW3a4PxkffhV4J9BecS5NbN4wLaxMn18Ax5p9gfdCm8MxAF7D/sfLYN4FNeaL1+S1pr3V/Pu9oL3oZkX7nj3j81e3Wi+vfz3MoF7X8xGum2E2/9j+ZlZZBkY23xIP9mtn3sHPPbjV8gGzT0pDEc7nB/AKe3YUtGVabdudY/fPHCO/C1afFEauc5nM4RfafAzHPW7BvPa2P1ofqXR77EZvrt/hqH7x0dC/fDYu6a1f0n+9TME+y9o8q+PU9JQ+IfGu+Vx5D9Tq3n/nUeBBzsOcEx+PAoQ+IN+5Rdptv4YjRSLMxYPvh2GwdAmwRqWzuAcx9KsyfE2a7VUesUETNTuLMd/xTL5lGhbU5gXjYa+2S4N71zFx0fTqi8Yp5Fg6//E/uqfzTitevPthmc96rdSaf15TMj+MhH7FhOxJwvqo50Y/uvtxL50oP8mVdR/Xg//t7KNu+NV+2LYuQ4Gfij2lnoLKn1wZP7pJ/CCbTwhWydxPe/YkdV4Nzae5ncecATm4CbO8jxO8DRvGRhYsOI6QeKsgXMOGL9TJNjX69SXPeCeOBlf3Qb/R/q8faEff7yX2xeQue26v00Ai+d+bZ9r9y9Ptt/Tky26N48/2nYCBupwULpu2A8K1z+31eR36SavHfsWT7cdcph3wdMNS/uWNavEBVa5F0UnrEVBFsbS5gOZHgm9jTuT8mO4XpWkw9fY1NUUDNEO5Xh9GjHoMDUTLTG9cjQb+YeA7NH92LjE80kgzkfu8bIvZ7nkL1z1sptl68pI0TPnZ3SvSCR2xuxF+4DLlDKPD0Y8EPfpVNz1XG2QBEdeTvl+TUaCxVd1qdGYu5Xo3NDG0na6uJjjlVTPsNgHP7c0H22mR52Oa5RlmXw5zbFL3c1cwYrSQUoMJXMKphmRG42NQWgq5EyxaityUyWe5EsnAAMq3LcOq+Phsl9rysbZj8Sa3iXhuaR6x5IZ9a3RDuG2+CbEiX73gF/A9iiTJH08tGW9Jt36oqMUosH9ftrLvUydjAND106sZ1E7l9AxLyzpXokOBVLreqJipcj64qbaLrZ2JZghhoaW4ZE8PJ7n9CrO+3WXMNF1dAxVYsQHXZUomVks9rYKjMJCxJKeYXPT+fgwjFRVO19Ow6xeWYYloUSoJ8iEOtGHM80Nw1MSb8zTnF5kNaFk3NEM9qW9G+8iG68O0/MyTadosiAuhFkhKBpt5XgsHIYZO0nDXbEnLyotVxuBWaXFZbxaqkr/3B3Z3nQon9l9fq4dR6UCZagh631iWDa5BMupc3em1MwamYyVnGIUjRPO4nqCbKN6SQnHnhL2VXvAF9rAqi49r6yTMJIPh7QysmJXE57kBqvC5Oj1cutR3DKIiWiajLw6HaOG0IuJbQEuhcYCLgGU0hAM4YRztuyuzCOjTPPpJcfZekLz+EraSKK1lA3DXsdB5Q3SvLAQSx2NzOPWHYO3Ih9wDFg0pXZ81sos3kdOvjx05XyeIqdpiXP5zmejy9HtHv0Yk1V7htZ90jz6yakilixJFUO1f8jFfCQnJr2pzueJdmYGtBKUg912KxqEPwYjsQo5DyZOpC8SvtzVNJTdyatysR86MU8uHRbVI/Qkqjm4/Tlq6Q5Wo57LaihuXUILLCBOSZLCymBWbJc444NMXunyzLO5CZPLsYpdiK4kUId0vFa3DCOdh93TyT7mRaHliHLpqhubGVBMsTVG55WQU7BkPdyTwimOBELprnBX8atgtKvAb47bz/FJyPaTfbHfjuUVKdPbJSsYW+egn5WpbQ2ma2etJcGo6g1OhpUMBuMLeDXO5660daWsi0/ijaAMc6hWDvVxlM66sAhH/lEn2NVyUmuUnXiSCs4nYIk427JM/8APUEII1D1hH4YBJ47NxYnfppwfIHbMHI5TY30wj5HUc47Idr2piv5B6S37Y0Jw3NKXJuc9RwXpIUBpBAvkZbrsTw9mMDxXyEFzzg5fIby2GblBxohouaMtxC4TuwiP41O6zpdxHz0ZwXx6itHT2Z1Yaj5AEiU7iwtVPAQmcDGB2F6qQWHXu81Sn+iiV9Riis7dC/grDtiKTTBkKJ7GO94amuT8YkVHvRrsSHxg+PXWnatdtqD3mRliyHSU4eF0OMu3PdSqz+Vc1UOcCZHR5LTMDbd7WjuaejASc3I+Tol1JWF9WbIcVVYyTZtRG6sgRzqDHVbdCmybAlcpspXITE5imvYYMj/mvj3OsP4x8hfFPEhjxYrRwtx0e/2TPlBYROfm28MWG3GLbPCXp9t1dwSh/s7TjSNMgqHBXwXndQKzGBanbMcwcbgoNkjHaC/krvV+nUbhGz3d8H92ZcJv6enG8Uyv22sVDky/O7hO45/I0w1/R/PvCO4vT7c/hacbQ5HMQ9iOP6Gn25VYwBzkn93T7X/Bme539oQzSQI9FN0hsjmrlkKBF7G/pTdUZaaxqelzNOnBGU+pOdmlR3FIueC4IwfpwNmhLOkcx6OMwKPhIZeLxdKXu/Jwiqiu2fOj+Yw+5xdlIl8mGn1U5VlAG4P1YaCL7BF8xSTHmBDTBAsKWpvwe7VYDYzjJXMUkvUPlkYcVysjCmTJCM9dpVtsV6Z1Oe0vNNalZGPFeD7tbeNxSo3kVddHhlq3TwXCaW/JjjXqrriDQi1ElBLBjD8ATziI/ufvirLqzff6RdiHcgKnppFwpFWIL+HJqG/O16NdXi8OYnDsDniEnC4lcaSt095IAXN8iZAUactK4UqtiOxAqWU3Eqo5QRtD8P5YHPF+f1zI2giMowm7h++P6FAcLvlpHs6VvW731mf0Unvr0WXN++K2F4DwH0zmkmMdOGY5yKPB0qDdfshvyVEiHHJisvbAR4ePh+J+FfTOPXqjJjue41nOO4272TqcI7PJWMjS9aqYS/VwtMWCJeWZ5IlZH/1TlUYTuu8OvbNo1wfBJCe4MV7qZCR1Txu9PJ7OU1lGIejE2lcrdhnbChyFa2uXDQLUtQm+615W9GmW7A2P8Y4Q9oKkpQNZ10W1jtOLqYCTVlar5p6Xo92Z9zJPRZTuZGEmshUucmHl2f6Q9t2NocX703rLWeGAUtjZ0Cfps7G4+LK/SadErR60KQSRGKMBAw4th3kUnRkIpEJu9k7l6rmQj3oh4kHshmo2ZFF0GlFdZbxz1eWeNU0n2xTYGAKiSHMl8o71lITzu75AExXVwiHRyxrf/FQGNSpfnRwGIQd2Nszt4Xm0yrUDSk93wtlY+/pgiy2zOR+O8Z6/jrJdlDi1VmOEOFVUqeQVjVdtflcd/dlsMkXWXW+t4Jdp7up0WFP8ysR2M89gKj8LgVIio89LVnqI1QW7m3E56wT1tOQWhLdKXFAv5OQwS9SZQ7j5icKs8fagjbl9bIRwgEhNzkmn7np5YuveeobK47qfe1xGDJdItFTMyyokQ7sYoKEsgZPcfGEwuwt41HVVijANaT5X68D1zNX+oI/Ds6kj3bVTojJvDDZunaNjrp9CHCBnKTu9TYiYOxQdTcqljnAGrZSXPbhFMUMuNsnsMJ4nKsEfjMFkHoR8lYoityFj25pXQzgJXxbr6FCgDulIY+q8P+A0V0TjIxnyHL07cwciEiWrdmycuxysbpWWDLLlQ5fDJT0f0IxeVb18W1YTBBnNSgc89vZVcompANUiT7VREk1Xyy7GlcVsZahugSm8bC8nVmoRzIIGF9WpBC4ca/BbWx16gdfzF4Z2KktP8kcbjQkvu+qsR8u1jxJluTkkPb/gTGlyWHqrU9cXJuCQNCinvfGePo+LUQ8t+waZmaU0PfQDVspSbDIXVN/zq+PC64/g6MwBB6irA9JHJQdOpijKZ8W8dwpinl/F5kE6laS/PtECE+nJaLBLuaj0kf6ZHQfVHtmwEtG7TLr28iie5kt8WQx9aShNDEHarCo0uYgxTZB7ZSv0BX08Pao0IQwEUZhNAly1PNa2exvFOM3TJToE35ql5EoMng+UhOqTI3DhZS0p2/Wi5pBq2AYqTUSSD+gkzwvmOMlC1Q6GvmXKXcTsKqOJNjj1u5fNUN5s1Fwc786HtbA8Rpt1cBD26316MSrRxcEtruvMnHN6OmdSOgD3nS4+wzGjPz0z2NZL+WKzcctFnR1qVxhPRsm6olaONWPjbi8ME7uvVUTCBOV5VQmDTbT3VD7u1vKk21fX1mxycQ9ejZC2MFY53ttSC/+0kCYTshedziRqnwwZgmhbJcH0kGIUBVLixkyIe56GgPpmp+42zHkeZpup3s1jxRtsmF5fBV+Z5cnfAvVPt4VljXejszjZO5R0AM5I0Tu8e6R4DU7oxwpRQhciKdSet1SsmVQJAhNGWXzKgXcUO8UF59vNUjDMVa7uaMI9wEqVh4fVKF5Wi+kGnFRh8VGr9SJQ5XDi9Co/3nEEbDVudcljzKfSgLks14dd13OFnVuNKogVxVqiAX0UiatnXNJ4xpG63HjGcZuaiRgDvHGQw8SRGe2SEPkylB15Bro4drHsKzOzP9cWRxk9jjQtn+qT3KF6yqQ39dbLy2LKlLty4VyIkawgsmvVC7UCrdGWLNQAFKNULuJroqaVcC7uDtJkjiqH2XjG14vjZjnpb7GhT8PfVX3WQ8cXk3JP0THMwB4boRNldLSSMysXXdBDJvo2JXMP1cbUMWROc0QzWIUBBlifKgF0oPZwoXfxbBZxynJ+Viz9aNK0rOt+Jp9YZHVhk8yvk2lsM9hSGhuT9OLSDAPKoKmoqvqx35vv6oMUGMISh/BgJw10zN4w2HN1ictFT6KRIzjrQmAqUx1nbkrz8TapF8aW2oIDkFugujnfLrYW71FzN+LNVY+ZRZSGZjVvIrbADCPwAnfKDPEidX50xqPS5hB5S4KT7iY3nFAf9ioh24cOC+F3SqUC0kfiecYn1NZBj/N9dvWsW7WecVralxvPOMSm2LmuP3rGTZgB7CNHYZJj5cIf1+strm65cu4MbJwxCdk5YZEYWZZxocAkLxomdimzS3scrC9kbm+HiwtJrSGQ19qv0rVNdhcze6eklA4q7Y3Tn+MG4W4n0YRwS0FmbH4jDMaTk9ur3RNEKBIvgzJlhO5+TKNODwW3uXOPOuKTcZqCvmI/2s6j0FlkjMolWmUuUM/ZpkLU2+FjUywuKG2dNa6MNkIy2q/APVVRw9JJBHqzIdV5j1ygw4rRz/wyKVOdwmJRTjFV5USpX9GzLbBIrFgM1CAcHqiptrerCQg+ijVW6+w85LDhpKiZnD/0oJPbcr9GYvmceqfTqTyV9La/mqW0HKiIiwVYV6yxw5JFFe+0Cxml2JkQec7PJ4Q0GA50ZTQ74apg77tDflyjNNlNyJgC2czYG2tpakuj4ahept0lm/VZi1WKo+Gj/I4dbNkNuUkGq/H+FEwOUytQeaugwPIER91aW3mjAwG+nc5kt2Wjw4wIJ0Hqo119cLisqf1wsS3lFcqDL3vFoXNUZQyHY2CDZLrieMyu9+tqF/X53XqiVp66IR2/hybkXtoz5kDdKXt+s9IcC8ng1mM1R+hYxJmFNggHzsnrDVOQJ0Mi7jGgFe05GuHahugP90yxXAA3wRqf680mgZhj2hDnqUV3viIgKs8q2wVnLXa3pwVlcPtdXxLCCATMfUEdsDFJ+jPb2SnnkhsO1AH4qyvBaJrYaopyAzyHSggwPnCTuym5vjEi5S+yJfrWWFY8i+sOhZE8ppsUyXOOCRkNaJOzdANzDPvm2vB7x7KiLIzlCAMjbRbUepAWAjIqYAztODplGphF/9Eavsdu/+Ul96u95BoNX7970/CJtyBJv6WG7xMvOQJ/R7Dv8Dul3ZcMkf7S8LXR0v4ksaz+GzV8L2h8vmCa9Jeur7HIAvuM/1W6vkd3s3+DzA4fkiaKHt4EdvsO37NvtO/AN5jGGNOuyg/MnGUm6UoY7ZfLAb7VDXRZarV1nPZDTdwIxn5a+KhcFpUVB8QqxyELQjIWdzYIQ1H37KD8YQnBebFkD9Em1oEVkNYpD0SCKWd7TKEt2Si1or/Lg6gOLInfMSOadPGQtNICSeH63FqXe22+YJSVMFzuq6VXJ12MsZ2ep51keX4yEMbDTGxWOCLoC+crlfQ88zxKl4l+PA2yw6JK9gHiJNv13N5GFmYveAubyUThD3bzecaebWwUBkGgI+GUIY8p7lK0VB8GhBq6LJ0s6qF/oNfzMu+BhgmcCuQla+VLJVSnxeYYo3u+f+jNBGoP0WpmxPo89kjV3JQQuSHcbAhEpIRLiFSRhPbCun+kpYU6rI/GDnL/iP14OKtgbLVqSguyuJYMxUpWGzg4sVg107sxnc82p3iV2HuySC/UtOuiojLN0CgKUMZeLdcjpZhAlCxPpJbbwWK+W20DLFSFsovsoyzAOUnQt6NdAeFJtPN4SLmSwlNyeqGj+VbsXS6hX09GCtcjV+FJN8KDdT5elnZuHo/n2Y6Zal0aFxEqMVJ22k3OzAQCPO+zlZXvprOxCzFhrfOG16IqPMuO2ndN3ScWWI7X8r4Ms+7aCgJH7YmHvL9IfNQoQspx7c2ZWZA5p3EHvjqOZdMpT0Fo1oI8OlilrsRjgut7xqA/2ttotRRABNZ423X3w6G72Ov+aToMSeEMN+K9PkFIXtcdIfx5rUUaNaIoxxS4mTHBba5v8PTJrgQrFupV115dILCLMMG2khQPl5izWxXgrJsO/MtmEhNrD1+mgOSwFgRkauV4YyYAsAqRF06KT2WBkc0VHNsXw91p311pZL/Xc+N4ap4z0xAw/kgCvhx6OZ9Ls9CGqlzOVHLt7XmPjqttgMTcZoqqc7PCIWwsKELsocjI8ZkXQ30EAZFrlQF7GeE8Vc6RO6plB3VL21zPhNE4w0+4dTYOYj21jErTMhlHaTjm9vAJ50KcOiK6OEEoQPqw7QA/mebwdIBIXOPp7EjNDfKUBlx1SbbGdjdfMxN2WgdoHIFVD+MY0ZCyx7qyskQQ+f9EEr+JUYxFQ+AOEgLA8rpBExatmwS4BNgWizl/kMRPG6TJ6zxh6hClA1pnIYgtrH0HTvUY+DD+4Xf6EO/wn9pB4De+0+/jwm8l8f8lu7dhBkB7Cine/tRxaP8UsvtXJLivSvRX84JvjVP7QtyLNk9TE/jiYza6rAMZm5wiett6prXBL+7T/DROgu3De5/F73RHeykyBfM/KzDFf+tB4n974AoKYz66qt4Frmiet+6rfwWuuCbuhLG4y90J35rBuf76+PtPHbjCwnjHgPA5uAOKVNKEa2qT520KshRgpElzBGMwLOT9MpqOPY2L8VfgCkj++9W4EY/j9r0+648V/wpcAaT3V+CK6+r7K3BFsy7aMBu/JHDFV8WtBvBXolfcrgC+OXrFJ+W/lxN8U8CFJ1v2/8LoFQ/9+0OiVzxt7L89esWXqPYrISy+VLWh9e88OLwcx4KmrvqQX3T3/KBK+X0jq9oGbhuUYVK0bpscQ0HcQIxtoq3yFmtaOtuMxRWT3yeORUtRf3AciybNyz+1mupb41jApXOfenAr+dWXzn8eFdQ/exyLb1Mx/eniWNwEBmDOv30cCwhdwREQF9WxCYenHdrSddrkGZykLIfWDYLGSUcniJfiWDQpdSGwX5u3tgmG8KGNE/s2qe9TNj8kDoNnkL3Zb5PT/fCO4D+btrl59cflbf5SH/6o0BVfwuF3iFjx2eZ+TaAKknzNdBD4JDDIu9zkOu60JHHVCWRtbvomK30Tz/PxB5L3da4FIKdaJ2m002BLkv34ScGmRpM2PM1BFQoJ18AzxLR/vBZ/3bkF8WziUb4VWxCQur1z08N8xCS1ISUdBFkpIbloixfE6XvEC3lAqtn1f/pB8eJzJ/cg/IveacL0dUw96lyPAR34y4/A4AXCwwAw2z+3fQBgnTiC4ovV2yuMO0VQ0/u3ELPBNz+0wTh/vM8t/5AQ/UMEed8eUsY/efjjq2vTz5B/dQ+liWnTaduBIH9NNrX0LSD+4QHOj08ANgP82OOm58lDu659XcbQ1KmACfwxKsL3EKfuvqmmwkOX3zfT9uLIPmuhqdQEVHv/6jNB1F69fqHGdYohYuX7V98aUPVFOE3Q1/evboTyGML55aIxxDwELvWeeYbP/RC8TIxAAx+p8coMnwSFaSDclsAjWTaK009Xx3MqhGyGLQmmNiQrBKIDOjPqZoE1eco7DYwmRtET4nsgaKgHKycH0v8CfTUQGnJqog3z3GsS0qfjxGuwgbtfy9egjJ8i+92k94jaN5AehTeL+f7nNvbJ2xs+ydscrDXsvzW3JGqThbQZ5OZ/EzJXwiMYpSez8GOTePO2bw3tACx12uVwm+3ryN8VafkY3L8Ar4KYrJ0noDqxcYD4Mx8X/A3I4+yCdiaov4vJtDWayx+YbRsclNog6E8a/R3YS9vo78JOvLf3h+UfX91H4n31uvOwJEFogG8v8RlgTBGEYPwxAb52z7lhl/g8TzKhcDPwr7502n3S31+4oO8ighlgkoKTYIXA42CNYOk0TRkQUECH46TNGAbPcSBdQZ7PlySpIvLz7xWhcAx/IkPBN0j8Gbk2iFc4RvyRQtSL6P9R0tOLjf8OYtOn7fwaeYl+zXaQ5uPf//0Ze/vIt6/bql3pYRLYH+AUqX/wrvyqFW86ThqHD8JPxw+b3LWd6z1/P03j9KfozX2JdiHCVfK13AK+rK5bPPCsbyr3+ikbuuLQygvph3skYcXeJIIXUP/x1RNV5K1EsxShR83uQ9KtIEl/IkgGkMf5072nGS2I2vc4aFdArWzzyyS/jZ36DjBgYPU3Eai9iW/nAjbYKxN6LvG1O/Bn+PKDuPoZMelVIxQ1nDDz9OZXI6nA71bguu0mTWdayQ7CTINEDQm4IQ50s+n+eFegKQSB1D/APgXi4lUgzV797blQ1VDR+5+f1WvqXvF4d/v9vFpb4lEYewXFWnRfgtN2AApcO/JiiZuE1ZRiXj0rAZP5KJn+cnnpTmp/nEWY0TtBCegf6LmVcR8nrull+9OO9uPEtSLaTTjCcayhT5ygXuP405POywTaCkdJK6u9TcGvzs5+vFujn5x0nrZstjB/NU1/PM3UcdEJiyzvGEDMkKy8SeEOUXptqyXyprVGiHxpbB4n5esdenKyeNahF48KrxrCv/1rxL1PhOTQTt0XFv9zKfmLvWthfK53jzP/9d61CTlYppWRefo1Rz2hgudCyovn3kSHFN/Z+59fPViJw0oAO3G4yXgq6d6y1r//+RmH+4p8uwZp4nOCbfPuixJtKzS/INA2gfwN4DtXofr9872gnfNnu8A9m3lMFP7jV7KpxGbexI1GoQNB/EaL08C6eVbAbD89tDZNXvMANRH3UYjf+oTdtRlWwDQKv2dT95Yq8A6waecTprOZT+Y11aovksKAvPKwIJoDxEcnhc67F3G4Es9PP3RGH4u+fQtj+AydNvNIa43XBJPF3+YQWRwuce/sRT6mHfkU70/Tj0AZ6mn1Rw59bYaxwP2NN2iDZiFCN6QfpBwG122YRoanbcKmwGIMMiU1wTKfYPHY1DN4vEmBFAv6QNIyaRM3SYtxwEzEtHCHYCnwvmvibEF6uufwGg1Kg9HzfechNUk7JreJf1r3OsUvzOP1xV2SkhbGQ5Dx60G/TUfih+CFkaF2IxGhtzY+eHqS1KAvcl5q7T5XSQv1pUJfS3t/ze3whT493vl+DvEv1H1qAPIEwIvmJw9r6mPGkicTfh3Lp8lLAOjnV+qTzt3yoMAx9QuL47pEmuX6NLXJ0yVyRaRNVdKm4vgOJNpKX0GgafwxAcpLLd/blHxH27dqX2n91vkniVJeROKTpCnfjspXUXhgVWj2JK3KS3h8mmLl2wmtTa3yAo3BqfyWZeXbYbV1XgJ2lwnl20cIvdb62kA1pPKYg+Wl0XkpH8u3Y/G15h+WC6wXSKnzJPPLs13lumTa/C3tkLY2MC+M/F0eFyg3aNK43LH8RvK404C3qV1acFIjr3WyvHCesconRjFtUXC/wN9g1BucUnHmHYa9o/j9JxtB0tyFtmldvrnOPaO8Y0l3LP5zIkMjEbwgMrSD+9MPd+le3jQS/ptP5YcnR9WvMqSWUB+2GPTzTX/khZ/KOJ/icMsLA8P16e7ZCJcv7avXDrY5Y75Ik08xvsfmy9hDerQ2I+TDMbMVvZ6N9BOSuiHUZJh5oSOfRfdXzvEX5vnryN7P/Xch/WK+mgcS/dq4vkwRz7G9X7m3v5tj8+3Pb1M+v6zn+Kh9fqbp+Jpy46ZvdeK4VaveYfjbai4e1RRNS3BqvHOEaNQJ+KtP9QdPlUEtou3tlg/JID/cHYLhzudzmvrHwfp4IRjZtpU1p+aXTtJtI58ZMtAANCmm3nr6tXVQVedFGn2AGH5Fc/33wJvvhvDrh9IvHLlvM/Pqb2A98P8B/KD1kz2lAAA="}, "status": {"message": "OK", "code": 200}}, "request": {"uri": "https://api.github.com/repos/sigmavirus24/github3.py/pulls/286/files?per_page=100", "headers": {"Accept-Charset": "utf-8", "Accept-Encoding": "gzip, deflate", "User-Agent": "github3.py/1.0.0a1", "Connection": "keep-alive", "Content-Type": "application/json", "Accept": "application/vnd.github.v3.full+json"}, "body": {"encoding": "utf-8", "string": ""}, "method": "GET"}}, {"recorded_at": "2015-02-21T05:46:46", "response": {"headers": {"Transfer-Encoding": "chunked", "X-UA-Compatible": "IE=Edge,chrome=1", "X-Runtime": "0.011358", "X-Served-By": "9835a984a05caa405eb61faaa1546741", "X-Request-Id": "c4405a649e6367f514df656a2c5fc4a0", "X-XSS-Protection": "1; mode=block", "Set-Cookie": "logged_in=no; domain=.github.com; path=/; expires=Wed, 21-Feb-2035 05:46:46 GMT; secure; HttpOnly, _gh_sess=eyJzZXNzaW9uX2lkIjoiMzE5MDVlMjAxZWI5ZmYyYTQ0NWQ4MWY1ZWViNTYyMDgiLCJzcHlfcmVwbyI6InNpZ21hdmlydXMyNC9naXRodWIzLnB5Iiwic3B5X3JlcG9fYXQiOjE0MjQ0OTc2MDZ9--53040268da77d9d2cd665d61f4bb2acee1a4699e; path=/; secure; HttpOnly", "Content-Security-Policy": "default-src *; script-src assets-cdn.github.com collector-cdn.github.com; object-src assets-cdn.github.com; style-src 'self' 'unsafe-inline' 'unsafe-eval' assets-cdn.github.com; img-src 'self' data: assets-cdn.github.com identicons.github.com www.google-analytics.com collector.githubapp.com *.githubusercontent.com *.gravatar.com *.wp.com; media-src 'none'; frame-src 'self' render.githubusercontent.com gist.github.com www.youtube.com player.vimeo.com checkout.paypal.com; font-src assets-cdn.github.com; connect-src 'self' ghconduit.com:25035 live.github.com wss://live.github.com uploads.github.com www.google-analytics.com s3.amazonaws.com", "X-RateLimit-Remaining": "100", "X-GitHub-Request-Id": "42707723:12AB:18279253:54E81BC6", "Cache-Control": "no-cache", "Access-Control-Allow-Origin": "https://render.githubusercontent.com", "Strict-Transport-Security": "max-age=31536000; includeSubdomains; preload", "Server": "GitHub.com", "X-Rack-Cache": "miss", "Date": "Sat, 21 Feb 2015 05:46:46 GMT", "Location": "https://raw.githubusercontent.com/sigmavirus24/github3.py/fe5dd44af3eb7110a4a1f8c38f0c9bb235349ff6/github3/pulls.py", "Content-Type": "text/html; charset=utf-8", "Status": "302 Found", "Vary": "X-PJAX, Accept-Encoding", "X-RateLimit-Limit": "100", "X-Frame-Options": "deny", "X-Content-Type-Options": "nosniff"}, "url": "https://github.com/sigmavirus24/github3.py/raw/fe5dd44af3eb7110a4a1f8c38f0c9bb235349ff6/github3/pulls.py", "body": {"encoding": "utf-8", "string": "<html><body>You are being <a href=\"https://raw.githubusercontent.com/sigmavirus24/github3.py/fe5dd44af3eb7110a4a1f8c38f0c9bb235349ff6/github3/pulls.py\">redirected</a>.</body></html>"}, "status": {"message": "Found", "code": 302}}, "request": {"uri": "https://github.com/sigmavirus24/github3.py/raw/fe5dd44af3eb7110a4a1f8c38f0c9bb235349ff6/github3/pulls.py", "headers": {"Accept-Charset": "utf-8", "Accept-Encoding": "gzip, deflate", "User-Agent": "github3.py/1.0.0a1", "Connection": "keep-alive", "Content-Type": "application/json", "Accept": "application/octet-stream"}, "body": {"encoding": "utf-8", "string": ""}, "method": "GET"}}, {"recorded_at": "2015-02-21T05:46:47", "response": {"headers": {"Keep-Alive": "timeout=10, max=50", "Accept-Ranges": "bytes", "Content-Length": "3416", "Expires": "Sat, 21 Feb 2015 05:51:47 GMT", "Content-Security-Policy": "default-src 'none'", "X-XSS-Protection": "1; mode=block", "ETag": "\"681b647085e72d91f4e82229b5939ec10c41eea0\"", "Connection": "Keep-Alive", "Date": "Sat, 21 Feb 2015 05:46:47 GMT", "Cache-Control": "max-age=300", "Access-Control-Allow-Origin": "https://render.githubusercontent.com", "Strict-Transport-Security": "max-age=31536000", "X-Served-By": "cache-dfw1831-DFW", "Server": "Apache", "X-Cache": "MISS", "Content-Type": "text/plain; charset=utf-8", "Source-Age": "0", "Via": "1.1 varnish", "X-Content-Type-Options": "nosniff", "Content-Encoding": "gzip", "X-Cache-Hits": "0", "X-Frame-Options": "deny", "Vary": "Authorization,Accept-Encoding"}, "url": "https://raw.githubusercontent.com/sigmavirus24/github3.py/fe5dd44af3eb7110a4a1f8c38f0c9bb235349ff6/github3/pulls.py", "body": {"encoding": "utf-8", "string": "", "base64_string": "H4sIAAAAAAAAA+1bX3PcthF/16dA7AdS9YmSkjx0bqykjt3Unskfjy1PHyQPxTvidIh5JEuQUjSq+9m7uwBIAATvJFnp5KF6kO4WwO5isfhhdwE9ZQd/OWDLKhfl5Zx17ergr0jZe/Lkyd6laNfd4puk7opC7p3YP3t7p2sh2abKu4LD8LLNRClZVhSsXQOhyKTkkjW8yFrgzNqKIRcg/KvjspXJHklYNdWGpemqa7uGpykTm7pqWtaVAjTiaSFa3mQgW3VsuOmwydrlWhF/k1VpyHm3qaUiJ6C8Ib+sNhvRajqozAtpmv4h2tfd4tfFb3zZzvS3l1XDZ+yHTHIcyEszspO86Qd+gC+anvNl1WRtNTTiLEXDZZp1rVYzEVLCzJOlYml6vkGqI6ZrYNKbGuzWz/bDuzenmrS3t0e2ZW/Bmq/AkqIEA1dlPKi+P9/bY/AD9j2FpZhT//mFN4A99wjfXbCKzJCo4e85h+WU1Zyt27aeHx7m/IoXVc2bRPkFTuXw6ptD8o7Dp5e8PcgOJCx2wQ+QdqDX2iij2OZ8BQsuStGmaSx5sZoBSYLtczDYEhUB/Zn+kR2Iiz1FZwyH7Sc9Fxy/3495OmevDCtWrcgdN7y55Owa1IalkTU0okO26MH5wHgQC/yTXh92MuhmS/lR+fOKN7xccibbBh1dS1SmdDlCX+QFAhMwVhzB98jRu8gWvDAcJhVTvWxGRHFZmVVHN2XPzUYmD06QBqvd8BqMAX5H+xN1vi7Bpx1ZOABE/VKVvG8QK0s0doisFfNHoqzY7z4jho6+71+/MBNf0m5lWUvf1jzLXZ3kOnNmD9+tuVOXFOZWpWW24dAzikKNNFm31ZkYdgpOzOGN/c9U349qHDZY6kxItcedRUQ1DIrqUpT+hLAnDIt9VjNfp31rl+EC0whrGg0HpC1Z9Pz26DM7uz3+/PG7KFlVDQBq7Lr9zPK2fQd2fhQFj23g3II42FdBDX76UowphGwdbJEHK2ArAxDT1TnAZZq1sC0XXculBpsaB1gGCfleyN9oXMjhgMEv6AyaA3ZzxyNFu4vFxFBdTu/brO2kzWvGYEAyY1GW5zz3XFmq7q52RPMU7DYL8DtgC1wEri7IKBUAjhXu+6TLqoOjymHfN05JgNOV75LQ9wlJ6BunJCzXWXkJwcUmy3mP42MhultIhG5yBXx49xNyuxL8mqy/KKoFg60xxR/b064pXNaGup13k13DobJaqZUOsocuY+6a6DJ/i8EQu+SAB+DyOVvcKJ52wOXyrmmEw5lI0Z3hA3c0e6d4M9rkZ4AoPpgYJ3fxQw+7W8hiZDy3vvgwcnrtRpcMQtE2g0NZsmVWsgXs6zVffgLTFOITnNTVXMvDn/qYnYAlvrYJXxGBKC/KnIEtMaC7ygqM3FpveCJyxQE+eOSvDPkhaHcfUIMBdthEZ0JWC1xiaNJnL/gN4IjrOxjlOlGS5+PYfDIKNgem2AEP9OgH+uCwrvIbw9pZnQ2XMhtJwt62ukgI6DvFNJPs9enPP42Zput2U4w4E/V+7CH+FoBo/Pc2IATJYyFIHQvZhsbTezaAyr2wB4DytKAAOPeCbGy2JaFbtmJj4l43trxe83Kw6DWYcllUknthnaKBb4NA5cAQUdfI1HK3vlO0P3U4qFxKesw1NTAl0+SaTqdkkiEEx2UFHsBMWrY/wVuj9YjzGLFddUVIWzGprPB1VTiKo0jbgSsTOxfbiArpLkKq33elG45nkidUEXetdd/LW2z/MKWDFA6Maino/KM8z2jhJ3Wr1WiuhuhOFA+iEiRgAsKyVcubESq4vKnjNqzEDoSVr+mDLwynFYAeTwjA1mgChjjm2ZUCmDA4inZyxlPM4im2a2iZmwobHi8kjdTsqaGYF85qYB8Kb/nYO+0GD4leCcpesoZwPC1E+Ukm7CUFfDluiOPkyMunsYvDXo36Ir+ns9Rze0Xb4fV9J8/p/7nmIMDyQapd8A1MioZkC4jBIPJTYBAQTB3sWfZUOKF+hLiEB+VBJn5ddUWOUVQGwM0hoFLlFIiMARcnJKW4RBPyVJs+F/vxumAwssTi5t6Vjet1pRdggL/gUixuTIkCx+vCEtYBSBkONnFLHw50m5U4JL9WBytHv6ikaKvmxpVYqnH2BBVpOxBQndNhRKTRjuiptknNAeT0bDgCZ6rbxtvxHbWbsaSMKT4m7O+/11mZSwWxFxdqBhcXfrHCl4BWHmqYsdGLTN0rucPcjaOXNPaeBjRXjdBp6vXYeqpiIuirsD0u8OX7SxZotxdvA52pzI343tYS8oWz8+vz/PzgPDmff3x2GJ+/f7Zvfn8/V/Xlf1Mq8P3hef4smjlVKEuzHoi9hcfcDmal61GwPwdnjvbVVlK+jgiHuR0cK2UyrlSpIah/ctlUXS3jkY8reLB20Rj5+Qjy+XivtKItej5BN1A9bFZEuQe6I2vAHMj8sMdQ3VUBjjpZvLIp5Wm7oH7o5WH9HSBuSk8KmqrG5rbz5B8hr6rS2nXRvt+OSm9P0Fj6SLOCceKy5Pxe0zKD3ExJEwPTM02BKVqshmkaopnqQyonZ08DVROFqprh39y7JCODEiFfxpMnT14iXW1VRxACdtW1dCyCVZMBZ+ZKMzlni6oqbF6+7pZrx8PWmg1ZMCCGStCiHcpTZJ+6AKhLGYqPSj9Skc/wCFzDbwQV93IIZ0uMAIJdZA4mudaUXwC1zppsw1u80csabm7sdPGMa/RnL96+sU1Fg/Cmh9RUCGRkUgUAUiaYRWhAPyM1anzfARnNoH6IAxpCDVa3qlcUHqztYvF2JgKoxo6KEcS5kByUOf9dpYo6l3K4JAFPOTVIAwZzTW/3bm9qPmz7/zi3yYkKM3SafRF0O3VgKvhcdKLI8ciyEnZwlkzSOXbSV7yGbQ1uinX7W1VImjPjoGYdgGR5GQZQayCRt43OzciYDXrAUsS9N37uu9I1tNEWv+i7GujaxqDijBQ6wV+QAn59dLwf3F6pKV2mVZOWYKnYMdSM5FhYg+sVgAF1lvcrGt7uNy3Xl5Y94Gb2rakaGQYEWfeTRVzt7R+KOfQPpr+w3U5uoxfLJa9bsGWU1XUhliT18KrMTR0UJUefRxZCsQm+NaAL9JVxDYAtyEpibEXLHs3Yt0ff7lvxZG8tIVMV+YeQE0vE6PoScF4ESoBDWnc/9JxwY2K1w4cdzzDzHKyOERzO+Fs142GeXmipoVWdKycHxzPG2+zyhC5hrRMKlH7T0h0Cq650qhmMuqeR1UIaJW3O4qqmhBxVLUdlOjS4mmXCXvFV1hWte/4eHOt29bgku8pEQams4ZCEsBLn54r++2l2yeiBRcZqnFXVyX5p6RIJglI4ERzhvMzrCiYTWG593VLRbAzGOXuVPXe+fndxzuQfgHMuesD6xaBwrGOIGSPo8TCEVh9/WS6jC35f4Cqm6PiYLiK+1EPEn8tB1EukIfqFv4miPcg9xKN4h1Ig7BZ0vf4FTkHjA8XZR/APxfrB3kHD/0y+EXoucU+foDk9gkcYBcI+ocoHj3C4qGLC/8+WO7mH/VKQPbe/TbuJVQAcSt3etdeM3VqRlnKsfv7qesF9qAaxEda9oKdrzbBPjmPpreca1o+KbMnjiNIELEKpwpb3pup+/mxba+zTE1kqBWjau3WmoG+uT6LIjR1/poL4HZzXygQ1L9eTNBGdZsGxCp3rFyiu76gCvPdY6Q6hqM6J/Od8nkaOKD2EXtbGt5HbdcihNGHkSQ8KeydzqS6YSh2NfBPHuNPQXkL3HL7rYO8zfd0QfRxwjmrq25Mr6vKw7Mot7f8vsyv11OeR06uJTdTwquZl0IgH2EJ3SlgycspVf1h5CkXuKE5ZHGaqvExnmapM6Y9UjPaPOFDpA429KxAQc3f/O+Vspyw+qj3ZwxYTz1ZCo0l3d3is7GKX7+5h/r7Qourqc6bt7hVeVAF/ruSP6iYOJJk3rRuIFNISWmLa63v2NkeKu8UnUaPfxmoPafQgQCO+BkRs9mMEUQzGL7BUQcbuqf3xtOlGUKPwxzyGc9Ky2PqHg4n3cNvTS+9NnLqtUWcIBUIahJx4y/+fDBp5PZT/7vOQbkkP6ZZf24SviECUXQ/pluYh3dJ5SLc0D+mWD35IZyZ8nxd1y34h+vWn/0XwMmnCtZBXmPH98HvcXNOz1qGy7G6LiRf5pvx690f5gRHOTYZW+tdGXAqAib5yjD4htIbjV6uV7p723U9c3UYdvPcbb7GkrWPmMXsqeHscqXLrMTHCMdvUle3Q0+4pJQfd8JXDkcM7ULm3bgGEZP7/a/RVZl/KUIp2tT81dfh1V35yedFTJST7vPqGaGL5tK6g/cSCTao57uHJePH2DT0T0JGqc5J79oaWVMNN6qQa2u5e+93fI+tXCwYbQ4+R0ccT+u+K7QEAZiA31m2UH7dAa//q3Lt3ouJGRs/G/GuR6YOcrrHx8sjyKu8m5c997+KvmhWO09WS4UXp3QpocXQIO+sZO7Y6pmT2tK3s7mfEYP5xd2bg3rLcOtDnBSKWLCBa3/pBnx/hhua/XPdKu1g5AAA="}, "status": {"message": "OK", "code": 200}}, "request": {"uri": "https://raw.githubusercontent.com/sigmavirus24/github3.py/fe5dd44af3eb7110a4a1f8c38f0c9bb235349ff6/github3/pulls.py", "headers": {"Accept-Charset": "utf-8", "Accept-Encoding": "gzip, deflate", "User-Agent": "github3.py/1.0.0a1", "Connection": "keep-alive", "Content-Type": "application/json", "Accept": "application/octet-stream"}, "body": {"encoding": "utf-8", "string": ""}, "method": "GET"}}]}
\ No newline at end of file
+{"http_interactions": [{"recorded_at": "2015-06-22T17:07:24", "response": {"status": {"code": 200, "message": "OK"}, "headers": {"transfer-encoding": "chunked", "date": "Mon, 22 Jun 2015 17:05:34 GMT", "cache-control": "public, max-age=60, s-maxage=60", "x-served-by": "07ff1c8a09e44b62e277fae50a1b1dc4", "x-ratelimit-remaining": "58", "x-frame-options": "deny", "status": "200 OK", "content-encoding": "gzip", "x-ratelimit-limit": "60", "access-control-expose-headers": "ETag, Link, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval", "content-security-policy": "default-src 'none'", "x-ratelimit-reset": "1434995993", "x-github-request-id": "AE45F57C:7356:C2431E8:5588405E", "etag": "W/\"3284a16cca05edc9bbed5300363e46c6\"", "strict-transport-security": "max-age=31536000; includeSubdomains; preload", "content-type": "application/json; charset=utf-8", "x-content-type-options": "nosniff", "last-modified": "Mon, 22 Jun 2015 03:13:10 GMT", "access-control-allow-origin": "*", "access-control-allow-credentials": "true", "x-xss-protection": "1; mode=block", "x-github-media-type": "github.v3; param=full; format=json", "server": "GitHub.com", "vary": "Accept"}, "body": {"base64_string": "H4sIAAAAAAAAA+1cW2/juBX+K4L3pUVjy7o4doRstosWaOdpF23mpZuFQUmUTUSWBF2c8Qj57/0OKcmS4ziOFezmQRhkYFHkx0Py8BzyXFSOijQcOaN1nieZo+ssEZOVyNeFO/HijZ7yJM70TKw2bCvSIjNtXb21JslOT4owzHRzcT26Ggl/5JiWNbPnc/MKcJtw2UVuoZ7Cq+B8EQSXA0yoOYhKWO6te8DI9jS4LCv4Ac67ZkoC1FMVFRuXp5iuxfXVKMtZzrEAXhhn3EdfYew94ocTsDDjV6Nc5CG9/9n3NRGJXLBQS1iWaUGcal7KWS6ilZbyreBPGpZsw6M8A0yRURcl4FYiQvv2nNfLZU8X1vRqxLYsZ+nh8GRhVjEDoXlxlANc8kWhm7LxT9sfLaCt0gqDuGBEvZ9iKgLrMhVanM8xqBzEYRg/AeWQ6C73vuxIb1o2KJi+C1HQstTjfM0xdxjSM02EyPL3EyVbldhZWb4UPuGAK9KU++8mrGoHsp4iUFTKDSwBCzfzUpHkIo7eT2CnNdDidMUi8R28dwkaWhOHStny7hHKVmjNt8To726umpV6koot83Y0NSn3uNhisi+EPGgPxHyX0Jb9SlsQUy9yvmT+hrah3NTPVyM39neo8V/OtR/MhS038yZOOXYwNtm3fPKQPkT0d//LP39x6oexRlIg5+AWKtK0sfbbt9+1L2iCHUiL0S7+CmnRakk7RiM+1fJY+w3TGO5+/0st9n2MPYwTnraF/9aqJHwtVvQfpMDhYzauiv5KYAwCKc2FV4QsPZBEchw1nUT8hufrGGOItf9IkfUPJbE0EWhJyjygsLBuczDYV4dbv6ABY74Vjf6S5Zhhc2rYY2M6Nq/vjRvHmDv2/H+oUyQ+xvGyjmk6s4VjXVMdJZFfwFCVuTObUpUNT1evoHSqLGm6RL7M1oxomt3MTcOe31xP5wtjzths4XHbnTLfDAzbMuauZzDfY+gAgl6sIg5uiqBr0aEIsfpxhILytJA9rbkbnEwn5X2+8G2r/xaIMZkCJmQuD9/Yk+fTpSs44JJWsY1rk5RVrTyhOmvVqDr3eSPgMMdfpMLVsK0CcGyR4nfKQ1pz4j3IbHrkLONaHGhbaBJsHk0BSf6JB+X5QksPyvMidTcoz/o2ceQ8donyhJ6KlupEPXIWjaCuS0xrf6qmqkdUgjWeLsaWcW/MHHvmzG5eUQnm2DDvTQPC3rFNquPjFhBDjSth3FIQVAC1rsR8LwnY3Kn0CgzdqtO9VCJvH3pOC9gufHVX6OKfPlOdA18TWupKWtMp6yOI39+i5ORU1NMNCpzVa9ZrDD3gM9+3bRZY3J0bxpTZzAgWnrUIpt6N65rWzLJvgkAqTc6gl3DDIq13cMNyoErGalrro5JcxwD1jr5SJ4N39E57abjfHV4kBxU1qChcC//0+x1JSdqddHK15sYUogRHV7aha+HegAaREEBxLKsXr5jFUEve5ofdPuz2wZrT2SSfxJojjUlkxa1sthddp+l42bnC/rqDqSTSQuGmLN1JE5GApScNmEcm3ydY6eVF9l8i/3fhaj//+mVLtljUe2woOWmHPXmUq5B6naqIFLLVPfJdLxxqX+r4v7KOejD/MjeGySt+ywR8cow4RbaAys4jnVlzzja9CJcAAFrH8WMvIAkAIOWIOMdQeXrg1Um63j/7Y3p/aIUAWmvbVa+BNyClXtvYsR0ib93vvF9jlLr6JVebrXqRSu0B44ax2wsH5wNdgpQ6bgXKq5Av+1JHqITRAcWFpDephNGA5invtzCSTAJpID/0eldWMxqyaFWwVT9aGxCsOjleVuz7my6p09tyjwJIcgWkwi36C7k9DlGqPEHwQPZa+hbMHlS6l/rYD9q2MjkFZDbvRWcF0WH7D4AlPj2E/ggTxzHTiRL61Zs+s1tJ/WN9VM7dnlMtHcSZXv4NjvN1ZfWBb6iX6xzUEoReurDWP08mk5IMLwQuvS+9KFYIgGKpt4YPsM/kljUGTj0blkvfa0Bk+ri9hTHze1HagABQLWMfWhVC2zwn7YJ9ICVAG3HvHeoDu0dpY0dxLgK4Cs/wP58WuB2g8qdMRB6/YvC0KY+mAB/jrE2r2N+3pRAwDJgBCLFyQPWa9Rqj1FXcwKHv0xxPrbFhke8Tfs2ZdczQPRtPr8emcT+1ZZ0Z1UmKbN1ybKoq03vDcqwFwnyoCiRgtbD4hcghZdaQ0SGvGDLIt4yGWVaH5OD57/tmzulmsLZHhxv0/D63h2rp7aYgdR1veIJjAqw25ClvRmkluwm8hD6uX37sZRNENOg0MvEdVRc383nnQODFRQRftEXuyyeKKCLV2y6sDxLoRl36qGuWNT6OPC0QCUQlezHg7AufxKPY3z1Ri0jORlUNeQmruzNMBGdtRJrGVbyP8mS0PCp1TRvEqvubIxt1nC70sh5JNTCfB6wI86U6T2MkVUjB6JnCHSC6T5rKWZjANd62kNclyjDOpwa/8QPTN+AeZ+bc8kw2ndmBN/UDz8Dfwru2bqw5IAbDOMKzBt8tyX2SskPgkwrb/CSmssEwfm5gLHh3CHOk08QRYTaEObbj9I5M0CfZ7YNh/Hjg/GAYHwzjPQ2lg2EcCpLuoINh/HInn97EPQ2G8cEwnldh6r0sUtiSg2H8IE+uNq4PhvHBMN7K4ei1zwbD+MnU1MEwXgfdn7aqZ4NhXBrn/yTD+DIUEQJ7ECma8RDB4eVorYLEe+Zjw+ZOQWhHAN/2e8ikP5mGDRTpKTwCc2n2c5UkIUP3e472WDYA4A+SJfr2ciRh4kUnH9PHS7f8PqPkY3rAmjZZJcCuD/99wd+fPCF9QkiBXSomHd0md5QKe8s0Yv8fH5rPEZzBrQ0b2A8jzQtxL0Z7WTimzYVCmSeIwq8Rc0NO+X/KTXqYNf8wuqNk3Fud3b1MyL3Vkzuk5EYgVebkNs9F2PSas+wRnWY5kFAzFPR/ctfksZ7V5iXaGAnEGwUpoqTINcovxnjgSPQe3fhba9wNBbLNuF2DKiP70RcZzYJ/104bvtUVraH4w3qnZN26W70IaaYaIqpp62Qsv+SNS3OWMZPwQoQ7uc5vpC9/rhU7N4G6NZP7HGri3VO8+oetfCtd/VPxXZcL6QkWJimlKCMfHu0T2fqYXJWpjx/0rzPtVNBJ0qdnmZ1f1+5wumTOtxizbiq/CHBWUv2ZNGHMKqu9jh+QTyQymhCDpmRJkp+CI3yR5rum5dLFtw2GL48cfDdn+PLIJd8KGZKnPzh5uj5l4spz5KyMQB+qgJhbfHEBYZG+jy8OYd1Qe476Pr6ZUD+j5hpR1IgSC+ijFCNn9vx/mNP9Nk1KAAA=", "string": "", "encoding": "utf-8"}, "url": "https://api.github.com/repos/sigmavirus24/github3.py/pulls/286"}, "request": {"headers": {"Connection": "keep-alive", "Accept-Encoding": "gzip, deflate", "User-Agent": "github3.py/1.0.0a1", "Accept-Charset": "utf-8", "Accept": "application/vnd.github.v3.full+json", "Content-Type": "application/json"}, "uri": "https://api.github.com/repos/sigmavirus24/github3.py/pulls/286", "method": "GET", "body": {"string": "", "encoding": "utf-8"}}}, {"recorded_at": "2015-06-22T17:07:24", "response": {"status": {"code": 200, "message": "OK"}, "headers": {"transfer-encoding": "chunked", "date": "Mon, 22 Jun 2015 17:05:34 GMT", "cache-control": "public, max-age=60, s-maxage=60", "x-served-by": "a6882e5cd2513376cb9481dbcd83f3a2", "x-ratelimit-remaining": "57", "x-frame-options": "deny", "status": "200 OK", "content-encoding": "gzip", "x-ratelimit-limit": "60", "access-control-expose-headers": "ETag, Link, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval", "content-security-policy": "default-src 'none'", "x-ratelimit-reset": "1434995993", "x-github-request-id": "AE45F57C:7356:C243281:5588405E", "etag": "W/\"a7649b9ef71da693ea6c76a029bfdcf2\"", "strict-transport-security": "max-age=31536000; includeSubdomains; preload", "content-type": "application/json; charset=utf-8", "x-content-type-options": "nosniff", "last-modified": "Mon, 22 Jun 2015 03:13:10 GMT", "access-control-allow-origin": "*", "access-control-allow-credentials": "true", "x-xss-protection": "1; mode=block", "x-github-media-type": "github.v3; param=full; format=json", "server": "GitHub.com", "vary": "Accept"}, "body": {"base64_string": "H4sIAAAAAAAAA+19aZfayJLoX+H0++C+T7a1b77jmRECxCJWCQRMz/Fol0Ab2pDoc99vfyFBlalyeetteuZ2dZsqpMzIyMzIyMjIWP7j5x8yT//h3Q8MhxsMxWIcbbOExeMOZXMEQfAGzZO8beKYSeG2rWM/vP7B8QM70kMbarl+7hUGiSZFEGRvkxreZrmeFxm8C2PLd3zbgme6Zfm5H0fwmORf/2DZgX37ir3+wfT0yLVvr4wgNj4UaQD1vTxPsncoem3jrRmHaOa7oV76aZER1O05Ca2iTS3UsWnLoijdIW2DxXFMp3Tc4UySczCTNwyCpEmKdxzmoeI90ql+/v5WodKvatSMo9yO8uxZy3riv73rc2oncfbZnj/A+KRT/5bazvtvHRKYokTPTQ9G/d//vfMG5+nXTAdpfhF0B55YttMxgzizf8zswPnbu5+izsPPTz+0/318kNp5kUadpuDbIrH0/Frpbe7ngf36+tyIrfp151UL0nr1N6j9U4Q0EP89tU+Fn9rZB73IvdvDtvXUBkgfUrv07fMHoIUQRq7F5nXnCq155ucffOt1B/riwWectUTXoNsCb36u6IottI7eucLr3OB14qiTe37WacgZ3p0KO8vfQu07AAK8SfQUyD+306yjp3ZbEHC2OkYN1e2O5OfDwugIi9Gzuu/aip0sT1uc33VUKP3Qdm5XecfPmy7dNXdX5bGD13rKUOjETtvg9U0nj+868jKMZmSu1VM70HO/tNvBegDUrOyvgfGj/HFor6ACP7I7fmTZFXy2CMHCB8zu0WlG4mEO3l1JJLv1v50L69lUPC2f14n9rvPODPQse/df/+/Gdd5euc6qpQnxShL/9clc3z0AvtJ5f6XAD0bhB1az8n58dZuB7BWQkp7ZzcP3Lfl+gKUIxPmIOBCzDgB+ftXM3qt3N8Jr67eUB4/uqPBVM7LwqKXGOygPw/DqgUChDJDEj4/0+o+7wocMaPIB5+ZLS/NvP0Dh/EdA9HWnQep98/G31x0Cw+/xvV+KH/wIOHNk2h/i9EME9P3jk3F73WmAN5VvS7lZdM00fm7Fr67L/GG2Yb5gDTfMg6TwhnmQDPaabXhHO2mdJ4392IVhvk3YIzN5ZCR3CHy4MpAPep6nvlHkdnZb8rcpe7K0syKx0+e9avnV2xfgPEC441z/5yOJrTM77fzLA50V8C172zz71//qnL24E+oWrJOPi/cOyJXvNdXfd2ZxZN+98oGNXan0rWvnP75qwL5qutAOG0O9pmHYOOo1QT3w3BfwvrK8u/4/kFMH0J+nvutHevDAEYBHPEctvhVpeWhLtYDoE7Q+LXHPoZvmoCVgbp31Su44cdoOxKLhmKsrx7wj33Y0mmUKnLvlps3iet7g8/dNc/dkCHWT9FM6vFH3q3+50lbnRlCd//gZ+8d//uurt4BaqF/3iLfNUL8NYhicK+wvbzbQXlDfbS4v7CCrpkjD4tr94tk2cgapCDaXyD6/wNXuRufF/aDdB+4YO+xz/yOZ5/NZvWdM183iga01Ak/qwLMfX6Gv/tZBOvjdGPlRM/1B/QHG+q7Cf7Qg3v3n97PKn5+x4mfM/K494Mt33+6q/eO3YLU//OP1g+zN0zZlgnzNExjBMgRBO7Zl8A6jcxTPOiyLYRTh4IT9VPYGbphnqAnc1c7hb7RZgrcV+OFFceltw+LvJXQQy5+L5/hnpXN480cI57+4V7+/CP+LUftNBf1fhsWvOQ5grzE4DMCeBEvg55/acxlIEyD+6mZ7ivvph3fAdH/64cbim6/wrZGRbn+CdONHbvOl2eRfw6cdmXA6fHhW5M4b7idYEPDGs2FvTVuYAEQwTTvJ3/SfFHcvfgJyj+2ABGtf4V3LXVvQkyTwTRBu4wgtI+vhLFWSbx1YIUizCK6Vmg39jeACg71WfBQraxR/i73FDPwe+BvR01NYateyN5QbjMXrMe6NCkLqpxh8bE6AEw3srZcWs2tBkIN8s/MvXUEZiR+EtTr819sowOnCi61rIamvXvEoUv/65OFk/D0nxRtgOFolMGUtol+bpEYYZqgP97M3pLKR8PDDEDt1Pz/MPFFCqZOsX2ptas/3Wz1RQrpWyIV+LmzV7/uCgS9FfyxJW49O0Sg1V9K8zhFpvva5UmKOdqnlhK8RmJvV4nybLQK9Osxd6rQdxX4vMlb0EVNkxDtMXQJXB+MS6dUzcmzxMrk/bKaXzEiWzAbrD81uvNXr5WnKbLpLfZxuOHQs5+Pz0JlyOU7n0qDytxpeGGFvSGLi2czVgZgwpzpOJ/JmrGU7k6UIlN0ONJcl1elIQnrdrl5oC9PcC4Sx8/SeGWTaoNuz8D05quxJMVl6uFCsVrxej4nMXy0sb7VnVXrsals5mGIDZ2QGzl44JLK08JFTpoW1m62Kfd+03CWrjoaeAV3va6zDD2tWiTdLK+sns5O2KZJTZg6iIOwlpTLfoBPLoShJP09HyjY3ldAc6OxgKPfogs0qfRMWNI2Eq+1kNpgUS/tSUkcuG9mrYZFNNBJdBeKSYdYbiWY2wWhvbL1DNV0iDKNgkrVEw1pO2BjluIs6q4hgM0UWeGzi25mYBHEaI5fUGl38/WWj0ZS/Cc3hUJxevHmZ9YgINcbynuVRlDiaS384KMf1ZjtlrQsaHOsoyzwsxvhilwi2OxIUDfWZ6dpDaMPLOCQaTKJ5n9HojWHOD3YmXjAhD1HmqMkTMcBZcX7uCsVmjETFWRD6wUGKBlt9wDv21KyMqe+c/KNqIap1vGy32I4SUuviRhOkzy1kV7QmNbUBcgtW0DnTkkT8QrKpRR+y8qQtF+pgElejojeot+lIxC7zrVOOjLLarK31Lqm6ueUxSr2VRO2E06BAmrpBvSxGgS4P8e0hHnbDnD1UOkccNhwNaqal0Uv5SWpzjm2szAtOdo/JcjAZJOFWQbp8PaLsdZHHXcRmI8zrX5ZpIdLD097kpt0jNwlNbLCvgxw/bzf+4eLKCMp1FXLA+Lrh5FyxI8jJGDPkSh3qQ+eIkpxGo4zLMZpazjHD7G39fjUdHFiFw3PVN7fJ6UQPDgycj1dpn6J9oJddLkfudhNYYkHszzBeg5mVHE7pakWY7HTQwwOYftw5aXtJj0m3HExr1Il8QShCEsPseIRVfortLEa0z8KaGCUiIWWn8jInxv16v3Em9QmfskiwS0+OIekHfh8q0Jlj6R9CH8+S9SimVzo/LeJCWDseXXfFcw8/i8W6PvaXeSozeEytstxM6ODkrndHghTlbjjBewNrUSNuchgy/S0QZ0C6Frqbb2bDDTErDHqyOSPpZocGRU6hq/FwtCMSiZ4tphI7EomhSF88HatE0x1iI8orCmkhjnUQrJAyH8MSILsHARt73MypyhlVIhSKHGaYO6ms7hDjOYQuyXNOqvoAH9Sb+KjvsRlyjAa7PW6ag8WY8FAzWeueEburMFdl+qDLHudve8aUOY7W+WZNjLsbZLi2h7I34HYJyaKqt+6HZnFg9JAuZjU/dfP05OXeeFFFM5crBZdE1J0jjda6qvdCrTwgRWBvyUGhnPCNg83WIw2vBFyUTwmf4fONrmi90f4wEQeHSRHjvDZOdW5iX6TAmu1W1A6JBwJe7eXleLUXN9UG86wteQnp8UCye+oh6ZJDG8PH2rlLKgu3OilTETeRXOunFYoK8XRmVvSxD8vw+3bxUk9bUeBhP3/debIXvoajW3z0QTm4fXPVnb2Zqwso9Hzzb3bb6g3s2qVtvTFuIHXO4myKJyyGZxhQ9RKkZduEbVsMZTIsp19Rrd5UWfYmSePcboWWKzr43zugpbbfg2BrHh8K3qS1N43S6U2cPIo4P/0QxVkEKpFb53P9QaT5CaQdm8cZzsAIDrMNjtEZktAtzqR5GyNoDuN+gjLXejr0ClBpWknj4I0eBPH5DcjrFsghvh60ks9PP+RpcRNtrvr061MCwzrzyQOiKYg/gQ+H+DepHep+9ChPUTyFPRS6CjVvQtvy9bZLV0g3BXNJ/v2qzXzfyEZ/b47zcGZ+/1FoeYatXYHiyX5zJ6T99ENf1d3XHdmPjs8ncPtmBSjKLYrtZ1Pg46PVA9bPH4OU1TyaN1TyRjHjxM6a71d6gKl//mIRB8GbUSORlnpw7Xie6lHmgHz3VNI0vSI62iBYXUnpNjg3wfWNf5O4KE7AuhjGvSP6WP8dyRA9lqHf0RTVo2mSuFY2ddOzH2bxOqZJ6pfQ39egHqre6K79HlRgnewNfLt+uVYEdVj+5uFu5FpRs0FpTRCduZk3Kjyqg0FrzDsS70jTm+zX6NGvhZUigsLMx8IE9g7H3sGTx8IvkthVrXOF8X9vfbhJrpltgmyZ12+SGCTn27oC+VovgvxNlpqdVxFosl49rfR0YBuZ/Pq+ERfN/E07AUmcAoAb9GvTD2NDwn7KYBj2dzhjm0Fh2UphWHFDxhmQJGinY/02Te16T6+1r+yhuQt6mMKPi6BdCtdiNAB+KOA0uvqn6xiWGnSyIYH7tX6ten96aJbB3ztwP9XI/e/vRP7qzf3aezwU4BROkTRL4PRN2v64dkHWDmHZAyFcm3lYxXAMap/Aym6PP6Aeuxb4VTK+Gadwsge16RVWQ1NvcOwNwagPxNIi+NeZ7c99ZrveUKIEx/wOpzcEN7Yams48mZ9QMpPFBS6vi3rHWSOsxgnXwpDc8nme0IzzSQhqf+cr1Lg/d9YbNkKdKbM81qThzyVpQrqmp3R7Kj0fnOeLejelLxO5XOy2uWtMjJlPqW7WZ+b4cZzXs2WqpPvFRRYJc3kCCKJTZ6UQHkpWGxIbdlyXh2IocsgO47P+JpYvFyfC6HBKcEIhEdyibxTxgBVPyEDsr7dlt/JW5tnfTFdB1pO6a9ihzPVe5bpzaxYGI+GgnxQzLf25tcTLKtp5mqLszrJyEJ14smZ6qzO9WfnnApmK2jIzMHbHe/oo6CvTgT8hZoPdepMY7pre46OjXpfRnJMcuX/ZBb5dC/1qvxbMDTXb+UunKKqkHIfxJUIjz80qQpV7nBWJcaYHINmJ3dl4Km42e2WeVCmaLUpb4GMmvQxH3fByQvfjcd/yK0UfwtlGF+ugr1fIUV33mcEiNSyOYsw81GSh1Pj+eZ1PansjnVlrt/YcZSX5R4nbKql2yibUpFgtPcoUQi6ph0alZ4ZrV5E3Hm0mUXA69+TNangcJVNftaiCGgzSeLKlQ3/ri8SlXGherK70k5XsyAMu4suwQEbYDDXJhZUOJ/V2Xu0y0dnQvYu7c62grBZ10sOWtMocxk6xD/zuWq5XThZ15RoXcubE9Wxak2xepWTE0Sye5mLPuMwyjN4cBI2w+ZofC+h4obKMbaFoWsUL95wT7vrosZvlUVvP0g2Sk0ztYMIyPYr6QUPP+wLTorGO7Xzksj6eInzCL3csibIKu0i8ymNBmI9ZguXnLGqTdJ5XTjA3VI+rR3sc23DjgzTo1af1qE4OwfhSrKSjeRifQ5TeTtSdWyf8CsH3Dt5jcV/zJ+dsfx4stVmNzVdR5nOzvDwJ3SXP61XUG497anawyDEIDYtyny/YghQjBQszFPHUJBxIU2j8KMFRXtt0p455nrG1M8ThrLvaiPIxm8UXhiP0YkHsLiyxZ+bIVkVsHHNTdBpiRJiRA6zP4SJP7OzCHRzP4q7A3XCYWOp5lBnT5BgY8WjgZE4/vRB21A0WhBon212KIN6MErc0x1Wov90gLs5NSDg6pLukt2X75shRQyWIBgkzj4lKiWuMDAXqSB3RkPQu1Wl2nq1IpziwpDbtatTWZfjNzutNN5czJzoYtgnOVTRXk8vWmG6Teb7j08op9N5lrYcwQUk6FBeaja5Tg/JyhiU4uZjNze26ZAhuwl5KB8lzgVJhtePDvBDZlYzltJATOUrHdJQeinOf1QOFM7qSOdf6c8nU8r1Tzrt+vNcZCdWRs35UsRNKOOm2lzgH1VkugtATDuveVMDJUhpqO6m+XI6paQ8de7JdKKztrzR5Y7J1uR1R3UUtLaRRt7/0FrvhOStkWSsBI4sNa6e8BAmNL+bYSV+wurJdqCcZZwPdVw5apY2XCeuMeM6gNj22zoYOkZvzpaifN8t4OImI2Vp2qVV+9udYb4OtbSbD0nl+1MUJnKAMS8awpTcSlt3REU+3hUxsuzjrJF0YlKg7m1GOJo5FnNvr027OiJv5iVixc5HeC32JXbBOPFvNnRidWXlmKMtTrjgVFa0Cb7RgXX0mTFzLdm09pmfj9fw4Cg7ibDkXVSn2xpJYJxfjsKsUoCRd6aO4TKRw1pfkQbg1JCwEvZ2oKaZucARS9pPzNmC0brUS+qm09vFNUmZHy+x7ykUpxIuq64jLToWNsgoUg1ta5mqtxG6c7UcXL/Xr+RkVxkbGd1VmVPvz7mlW96KiJks1NbAa0xCUUVfovK8Sh6WrSad8t1PLNdmV+sTMznpJ3o97wiwRzUmycas+K7nYKfao0bY3YL1gnHb7QuWMXQEz9is52LjUwLjQ+3EFeq+xtBuxNT7XNPeoLzNx2SNm43R88ibM7GSIrqgvRUYcFxIpr8WFLmDZBlSSR78/5ber3rQwWG+4nK0X3toVF6HoHg5rVAbVYSGVqiyetAvhxpI77AkCLvi9cDFfX3RRXnRP3bMFCz/AUtIjugizOAwTDA18BtZzMiycKl8MeMnwCnwZTahyJhIqu58PFqdlQpxnic11ER8bp9paHkoDyp36geLuNn2M2uAcWhYeYY/rKQ00Zx/F0mAXpB1W/DBRfTkZ55dLb7Yc2tJ0YxHBkCG2TL/oxyO+zkF7OukjY683qmbOera2SNRz7QsoeXwh71NLYrGwiF2PX9njdE6GtGfSs7hU2VNYbeRyzeaeWcndsZ7QsrCwhJNTTabUfIzEK3W0VdVF4Klb66IZuxmm2Qm5xTTBrCfjrmWR+lJhC8Wd9IloFWPH6ZRiQmmwkMaBPHaA9+AVvugnSF0OLmEvHNBBPQjtUbVdjrnTvs8hY45dyMZ0MRbOcnbxJIM7S4sDoywnVdhfFwvf6l4cJFW53mqKyDxbUUNsje6AMRwVj7lQuYNwu4mGR+g+3hV9ju/vFzE55fHhkONippuDxhYr0fJsXMbUliWDakkzGM1t0gGrRpXFORSos/cYxwPwC5WOkQEBMsJooDHbqDrSjXY6QDBn4S8U2HIGJa/PvDhyNA6ZrrOEprZGRWFb39j48DfOX7iIm8gyMV9Rg+OCjI/aYmqo3DS1GdHxvSTg49klEa344k744WID6vN1GgvUabK4nHscT3Hzo0CcbXuxvJBHwe0h48FWzMcmoaFjcVyhakUGyzLYaeSB51QBpKmLDlzlMCMtbo3OiaRfx0dvqJwSda8gNcMmrHKaE/oK2aIoJurENpRyfj+V9tKSJwa75bnGJRtfU8lUGywTbe6b8kLclINoMdlZ4x42EwAzg4mqCcV6POGMSceaW0ceSzPFn6R1waeLvXUhJkFU+X1KFhV1fRC2/MVmjWjip0RSH3p7aZrn2nKUBRN9IxVSpvcmknwu+gnKUjUbrEVc2u5h/U5cbOeQpKVc5pgjS6MMLQfWhJdOjMmddrPZjNieymQyK3Oya9ghwkkXZRj35+imlBkFgU36MhNxZWtavRVbs5knyXx0HF5gsi0VqGuCHE7EbBH3qXWaLfYa5q+HmBdXiwk+C01kZCGHfDUFaXMcLVb7/VYg8h2XaV7k6Weg/8FmH+NmsA2opM/pmQli2P4sn0YINr6o824edsfzS8zby8N5EW9rAsmy2tvWW3Rzcc3p/hBcsKyabvebVTfa5NRk2hP7vehc91JpzFaouBieEoceDUCA80fhcOz1QaV6OPMOj/UEakH2myua9++vZ9xvvWv7XbV0jMVaNg/XSTSHU6A7ISlbJ0B1xxKGTsK5+eG0/vtq6RzT4Q2GN3mawHlS53WcpDkKM3GLoHjO/qO1dCT/0O2/tHT398G/WEsnEL+jlu4TxRvOv8PZdxT7l5YOFFd/aeluKnXQTaJfttluzVafaHV+Y33dz81tQ2t/Ab/hNgE+VTAWgvuBZ/ZoWfMK1I7w+WgO+LEORZFwG87ptmMZNMbjpE3ocCiwMJomDAsnMQK3GvP6j0AeTFg/wmDuXoId7McXN2OLRyP85s0/vm/DejC8kO3IBdDtvQlOslcgn1weNW//ySxHFnPltzAd+aiGRG+GonBJ1Wqrv82apCXHRrHdUGPz8Ut02zckHhBAYf/mSJLjW/pqPuDypoH/8Lx91hgwf2iufB6b/jb3Bih+5+EA35r6119Pf/86t4e2mQbsi84Pj89/vf/DI6jWBeL680l/vs8p4mHYG+t2AHid2JcWdTsRTxgD0z56NDN+4d1TZtSU/i5e9AT8bwCrsR5u+9hScmtG/Njle5ek5uE9LRIUxpFY+0SHm0E9bayfH2s+roL2VXazU2vaut1KtS5PBXqF8m/le+IRvpve4N2Ivnnx+PLFNp76FLUG7U98ih5re3kYvIjm3fb2Yp8duISNzwD3xdrPTNU+RQB9rP+IyvUJ7Fq/CiLU/xmNwUYdRh+6/Y+Pg+iD/ewvBO02dX8GpyuwaPetjzDhwi+Fq/xfCPVWG9A9R4Dpz60ccQe8MDIz9a8WCb+0iXsYj0MRp64e3SwTf+mQAIyrONFAbQWgX4hiW/cRNbt8cJFrHn1p+3iBpq6Vf0bbm3mz/jiUqW3a4PxkffhV4J9BecS5NbN4wLaxMn18Ax5p9gfdCm8MxAF7D/sfLYN4FNeaL1+S1pr3V/Pu9oL3oZkX7nj3j81e3Wi+vfz3MoF7X8xGum2E2/9j+ZlZZBkY23xIP9mtn3sHPPbjV8gGzT0pDEc7nB/AKe3YUtGVabdudY/fPHCO/C1afFEauc5nM4RfafAzHPW7BvPa2P1ofqXR77EZvrt/hqH7x0dC/fDYu6a1f0n+9TME+y9o8q+PU9JQ+IfGu+Vx5D9Tq3n/nUeBBzsOcEx+PAoQ+IN+5Rdptv4YjRSLMxYPvh2GwdAmwRqWzuAcx9KsyfE2a7VUesUETNTuLMd/xTL5lGhbU5gXjYa+2S4N71zFx0fTqi8Yp5Fg6//E/uqfzTitevPthmc96rdSaf15TMj+MhH7FhOxJwvqo50Y/uvtxL50oP8mVdR/Xg//t7KNu+NV+2LYuQ4Gfij2lnoLKn1wZP7pJ/CCbTwhWydxPe/YkdV4Nzae5ncecATm4CbO8jxO8DRvGRhYsOI6QeKsgXMOGL9TJNjX69SXPeCeOBlf3Qb/R/q8faEff7yX2xeQue26v00Ai+d+bZ9r9y9Ptt/Tky26N48/2nYCBupwULpu2A8K1z+31eR36SavHfsWT7cdcph3wdMNS/uWNavEBVa5F0UnrEVBFsbS5gOZHgm9jTuT8mO4XpWkw9fY1NUUDNEO5Xh9GjHoMDUTLTG9cjQb+YeA7NH92LjE80kgzkfu8bIvZ7nkL1z1sptl68pI0TPnZ3SvSCR2xuxF+4DLlDKPD0Y8EPfpVNz1XG2QBEdeTvl+TUaCxVd1qdGYu5Xo3NDG0na6uJjjlVTPsNgHP7c0H22mR52Oa5RlmXw5zbFL3c1cwYrSQUoMJXMKphmRG42NQWgq5EyxaityUyWe5EsnAAMq3LcOq+Phsl9rysbZj8Sa3iXhuaR6x5IZ9a3RDuG2+CbEiX73gF/A9iiTJH08tGW9Jt36oqMUosH9ftrLvUydjAND106sZ1E7l9AxLyzpXokOBVLreqJipcj64qbaLrZ2JZghhoaW4ZE8PJ7n9CrO+3WXMNF1dAxVYsQHXZUomVks9rYKjMJCxJKeYXPT+fgwjFRVO19Ow6xeWYYloUSoJ8iEOtGHM80Nw1MSb8zTnF5kNaFk3NEM9qW9G+8iG68O0/MyTadosiAuhFkhKBpt5XgsHIYZO0nDXbEnLyotVxuBWaXFZbxaqkr/3B3Z3nQon9l9fq4dR6UCZagh631iWDa5BMupc3em1MwamYyVnGIUjRPO4nqCbKN6SQnHnhL2VXvAF9rAqi49r6yTMJIPh7QysmJXE57kBqvC5Oj1cutR3DKIiWiajLw6HaOG0IuJbQEuhcYCLgGU0hAM4YRztuyuzCOjTPPpJcfZekLz+EraSKK1lA3DXsdB5Q3SvLAQSx2NzOPWHYO3Ih9wDFg0pXZ81sos3kdOvjx05XyeIqdpiXP5zmejy9HtHv0Yk1V7htZ90jz6yakilixJFUO1f8jFfCQnJr2pzueJdmYGtBKUg912KxqEPwYjsQo5DyZOpC8SvtzVNJTdyatysR86MU8uHRbVI/Qkqjm4/Tlq6Q5Wo57LaihuXUILLCBOSZLCymBWbJc444NMXunyzLO5CZPLsYpdiK4kUId0vFa3DCOdh93TyT7mRaHliHLpqhubGVBMsTVG55WQU7BkPdyTwimOBELprnBX8atgtKvAb47bz/FJyPaTfbHfjuUVKdPbJSsYW+egn5WpbQ2ma2etJcGo6g1OhpUMBuMLeDXO5660daWsi0/ijaAMc6hWDvVxlM66sAhH/lEn2NVyUmuUnXiSCs4nYIk427JM/8APUEII1D1hH4YBJ47NxYnfppwfIHbMHI5TY30wj5HUc47Idr2piv5B6S37Y0Jw3NKXJuc9RwXpIUBpBAvkZbrsTw9mMDxXyEFzzg5fIby2GblBxohouaMtxC4TuwiP41O6zpdxHz0ZwXx6itHT2Z1Yaj5AEiU7iwtVPAQmcDGB2F6qQWHXu81Sn+iiV9Riis7dC/grDtiKTTBkKJ7GO94amuT8YkVHvRrsSHxg+PXWnatdtqD3mRliyHSU4eF0OMu3PdSqz+Vc1UOcCZHR5LTMDbd7WjuaejASc3I+Tol1JWF9WbIcVVYyTZtRG6sgRzqDHVbdCmybAlcpspXITE5imvYYMj/mvj3OsP4x8hfFPEhjxYrRwtx0e/2TPlBYROfm28MWG3GLbPCXp9t1dwSh/s7TjSNMgqHBXwXndQKzGBanbMcwcbgoNkjHaC/krvV+nUbhGz3d8H92ZcJv6enG8Uyv22sVDky/O7hO45/I0w1/R/PvCO4vT7c/hacbQ5HMQ9iOP6Gn25VYwBzkn93T7X/Bme539oQzSQI9FN0hsjmrlkKBF7G/pTdUZaaxqelzNOnBGU+pOdmlR3FIueC4IwfpwNmhLOkcx6OMwKPhIZeLxdKXu/Jwiqiu2fOj+Yw+5xdlIl8mGn1U5VlAG4P1YaCL7BF8xSTHmBDTBAsKWpvwe7VYDYzjJXMUkvUPlkYcVysjCmTJCM9dpVtsV6Z1Oe0vNNalZGPFeD7tbeNxSo3kVddHhlq3TwXCaW/JjjXqrriDQi1ElBLBjD8ATziI/ufvirLqzff6RdiHcgKnppFwpFWIL+HJqG/O16NdXi8OYnDsDniEnC4lcaSt095IAXN8iZAUactK4UqtiOxAqWU3Eqo5QRtD8P5YHPF+f1zI2giMowm7h++P6FAcLvlpHs6VvW731mf0Unvr0WXN++K2F4DwH0zmkmMdOGY5yKPB0qDdfshvyVEiHHJisvbAR4ePh+J+FfTOPXqjJjue41nOO4272TqcI7PJWMjS9aqYS/VwtMWCJeWZ5IlZH/1TlUYTuu8OvbNo1wfBJCe4MV7qZCR1Txu9PJ7OU1lGIejE2lcrdhnbChyFa2uXDQLUtQm+615W9GmW7A2P8Y4Q9oKkpQNZ10W1jtOLqYCTVlar5p6Xo92Z9zJPRZTuZGEmshUucmHl2f6Q9t2NocX703rLWeGAUtjZ0Cfps7G4+LK/SadErR60KQSRGKMBAw4th3kUnRkIpEJu9k7l6rmQj3oh4kHshmo2ZFF0GlFdZbxz1eWeNU0n2xTYGAKiSHMl8o71lITzu75AExXVwiHRyxrf/FQGNSpfnRwGIQd2Nszt4Xm0yrUDSk93wtlY+/pgiy2zOR+O8Z6/jrJdlDi1VmOEOFVUqeQVjVdtflcd/dlsMkXWXW+t4Jdp7up0WFP8ysR2M89gKj8LgVIio89LVnqI1QW7m3E56wT1tOQWhLdKXFAv5OQwS9SZQ7j5icKs8fagjbl9bIRwgEhNzkmn7np5YuveeobK47qfe1xGDJdItFTMyyokQ7sYoKEsgZPcfGEwuwt41HVVijANaT5X68D1zNX+oI/Ds6kj3bVTojJvDDZunaNjrp9CHCBnKTu9TYiYOxQdTcqljnAGrZSXPbhFMUMuNsnsMJ4nKsEfjMFkHoR8lYoityFj25pXQzgJXxbr6FCgDulIY+q8P+A0V0TjIxnyHL07cwciEiWrdmycuxysbpWWDLLlQ5fDJT0f0IxeVb18W1YTBBnNSgc89vZVcompANUiT7VREk1Xyy7GlcVsZahugSm8bC8nVmoRzIIGF9WpBC4ca/BbWx16gdfzF4Z2KktP8kcbjQkvu+qsR8u1jxJluTkkPb/gTGlyWHqrU9cXJuCQNCinvfGePo+LUQ8t+waZmaU0PfQDVspSbDIXVN/zq+PC64/g6MwBB6irA9JHJQdOpijKZ8W8dwpinl/F5kE6laS/PtECE+nJaLBLuaj0kf6ZHQfVHtmwEtG7TLr28iie5kt8WQx9aShNDEHarCo0uYgxTZB7ZSv0BX08Pao0IQwEUZhNAly1PNa2exvFOM3TJToE35ql5EoMng+UhOqTI3DhZS0p2/Wi5pBq2AYqTUSSD+gkzwvmOMlC1Q6GvmXKXcTsKqOJNjj1u5fNUN5s1Fwc786HtbA8Rpt1cBD26316MSrRxcEtruvMnHN6OmdSOgD3nS4+wzGjPz0z2NZL+WKzcctFnR1qVxhPRsm6olaONWPjbi8ME7uvVUTCBOV5VQmDTbT3VD7u1vKk21fX1mxycQ9ejZC2MFY53ttSC/+0kCYTshedziRqnwwZgmhbJcH0kGIUBVLixkyIe56GgPpmp+42zHkeZpup3s1jxRtsmF5fBV+Z5cnfAvVPt4VljXejszjZO5R0AM5I0Tu8e6R4DU7oxwpRQhciKdSet1SsmVQJAhNGWXzKgXcUO8UF59vNUjDMVa7uaMI9wEqVh4fVKF5Wi+kGnFRh8VGr9SJQ5XDi9Co/3nEEbDVudcljzKfSgLks14dd13OFnVuNKogVxVqiAX0UiatnXNJ4xpG63HjGcZuaiRgDvHGQw8SRGe2SEPkylB15Bro4drHsKzOzP9cWRxk9jjQtn+qT3KF6yqQ39dbLy2LKlLty4VyIkawgsmvVC7UCrdGWLNQAFKNULuJroqaVcC7uDtJkjiqH2XjG14vjZjnpb7GhT8PfVX3WQ8cXk3JP0THMwB4boRNldLSSMysXXdBDJvo2JXMP1cbUMWROc0QzWIUBBlifKgF0oPZwoXfxbBZxynJ+Viz9aNK0rOt+Jp9YZHVhk8yvk2lsM9hSGhuT9OLSDAPKoKmoqvqx35vv6oMUGMISh/BgJw10zN4w2HN1ictFT6KRIzjrQmAqUx1nbkrz8TapF8aW2oIDkFugujnfLrYW71FzN+LNVY+ZRZSGZjVvIrbADCPwAnfKDPEidX50xqPS5hB5S4KT7iY3nFAf9ioh24cOC+F3SqUC0kfiecYn1NZBj/N9dvWsW7WecVralxvPOMSm2LmuP3rGTZgB7CNHYZJj5cIf1+strm65cu4MbJwxCdk5YZEYWZZxocAkLxomdimzS3scrC9kbm+HiwtJrSGQ19qv0rVNdhcze6eklA4q7Y3Tn+MG4W4n0YRwS0FmbH4jDMaTk9ur3RNEKBIvgzJlhO5+TKNODwW3uXOPOuKTcZqCvmI/2s6j0FlkjMolWmUuUM/ZpkLU2+FjUywuKG2dNa6MNkIy2q/APVVRw9JJBHqzIdV5j1ygw4rRz/wyKVOdwmJRTjFV5USpX9GzLbBIrFgM1CAcHqiptrerCQg+ijVW6+w85LDhpKiZnD/0oJPbcr9GYvmceqfTqTyV9La/mqW0HKiIiwVYV6yxw5JFFe+0Cxml2JkQec7PJ4Q0GA50ZTQ74apg77tDflyjNNlNyJgC2czYG2tpakuj4ahept0lm/VZi1WKo+Gj/I4dbNkNuUkGq/H+FEwOUytQeaugwPIER91aW3mjAwG+nc5kt2Wjw4wIJ0Hqo119cLisqf1wsS3lFcqDL3vFoXNUZQyHY2CDZLrieMyu9+tqF/X53XqiVp66IR2/hybkXtoz5kDdKXt+s9IcC8ng1mM1R+hYxJmFNggHzsnrDVOQJ0Mi7jGgFe05GuHahugP90yxXAA3wRqf680mgZhj2hDnqUV3viIgKs8q2wVnLXa3pwVlcPtdXxLCCATMfUEdsDFJ+jPb2SnnkhsO1AH4qyvBaJrYaopyAzyHSggwPnCTuym5vjEi5S+yJfrWWFY8i+sOhZE8ppsUyXOOCRkNaJOzdANzDPvm2vB7x7KiLIzlCAMjbRbUepAWAjIqYAztODplGphF/9Eavsdu/+Ul96u95BoNX7970/CJtyBJv6WG7xMvOQJ/R7Dv8Dul3ZcMkf7S8LXR0v4ksaz+GzV8L2h8vmCa9Jeur7HIAvuM/1W6vkd3s3+DzA4fkiaKHt4EdvsO37NvtO/AN5jGGNOuyg/MnGUm6UoY7ZfLAb7VDXRZarV1nPZDTdwIxn5a+KhcFpUVB8QqxyELQjIWdzYIQ1H37KD8YQnBebFkD9Em1oEVkNYpD0SCKWd7TKEt2Si1or/Lg6gOLInfMSOadPGQtNICSeH63FqXe22+YJSVMFzuq6VXJ12MsZ2ep51keX4yEMbDTGxWOCLoC+crlfQ88zxKl4l+PA2yw6JK9gHiJNv13N5GFmYveAubyUThD3bzecaebWwUBkGgI+GUIY8p7lK0VB8GhBq6LJ0s6qF/oNfzMu+BhgmcCuQla+VLJVSnxeYYo3u+f+jNBGoP0WpmxPo89kjV3JQQuSHcbAhEpIRLiFSRhPbCun+kpYU6rI/GDnL/iP14OKtgbLVqSguyuJYMxUpWGzg4sVg107sxnc82p3iV2HuySC/UtOuiojLN0CgKUMZeLdcjpZhAlCxPpJbbwWK+W20DLFSFsovsoyzAOUnQt6NdAeFJtPN4SLmSwlNyeqGj+VbsXS6hX09GCtcjV+FJN8KDdT5elnZuHo/n2Y6Zal0aFxEqMVJ22k3OzAQCPO+zlZXvprOxCzFhrfOG16IqPMuO2ndN3ScWWI7X8r4Ms+7aCgJH7YmHvL9IfNQoQspx7c2ZWZA5p3EHvjqOZdMpT0Fo1oI8OlilrsRjgut7xqA/2ttotRRABNZ423X3w6G72Ov+aToMSeEMN+K9PkFIXtcdIfx5rUUaNaIoxxS4mTHBba5v8PTJrgQrFupV115dILCLMMG2khQPl5izWxXgrJsO/MtmEhNrD1+mgOSwFgRkauV4YyYAsAqRF06KT2WBkc0VHNsXw91p311pZL/Xc+N4ap4z0xAw/kgCvhx6OZ9Ls9CGqlzOVHLt7XmPjqttgMTcZoqqc7PCIWwsKELsocjI8ZkXQ30EAZFrlQF7GeE8Vc6RO6plB3VL21zPhNE4w0+4dTYOYj21jErTMhlHaTjm9vAJ50KcOiK6OEEoQPqw7QA/mebwdIBIXOPp7EjNDfKUBlx1SbbGdjdfMxN2WgdoHIFVD+MY0ZCyx7qyskQQ+f9EEr+JUYxFQ+AOEgLA8rpBExatmwS4BNgWizl/kMRPG6TJ6zxh6hClA1pnIYgtrH0HTvUY+DD+4Xf6EO/wn9pB4De+0+/jwm8l8f8lu7dhBkB7Cine/tRxaP8UsvtXJLivSvRX84JvjVP7QtyLNk9TE/jiYza6rAMZm5wiett6prXBL+7T/DROgu3De5/F73RHeykyBfM/KzDFf+tB4n974AoKYz66qt4Frmiet+6rfwWuuCbuhLG4y90J35rBuf76+PtPHbjCwnjHgPA5uAOKVNKEa2qT520KshRgpElzBGMwLOT9MpqOPY2L8VfgCkj++9W4EY/j9r0+648V/wpcAaT3V+CK6+r7K3BFsy7aMBu/JHDFV8WtBvBXolfcrgC+OXrFJ+W/lxN8U8CFJ1v2/8LoFQ/9+0OiVzxt7L89esWXqPYrISy+VLWh9e88OLwcx4KmrvqQX3T3/KBK+X0jq9oGbhuUYVK0bpscQ0HcQIxtoq3yFmtaOtuMxRWT3yeORUtRf3AciybNyz+1mupb41jApXOfenAr+dWXzn8eFdQ/exyLb1Mx/eniWNwEBmDOv30cCwhdwREQF9WxCYenHdrSddrkGZykLIfWDYLGSUcniJfiWDQpdSGwX5u3tgmG8KGNE/s2qe9TNj8kDoNnkL3Zb5PT/fCO4D+btrl59cflbf5SH/6o0BVfwuF3iFjx2eZ+TaAKknzNdBD4JDDIu9zkOu60JHHVCWRtbvomK30Tz/PxB5L3da4FIKdaJ2m002BLkv34ScGmRpM2PM1BFQoJ18AzxLR/vBZ/3bkF8WziUb4VWxCQur1z08N8xCS1ISUdBFkpIbloixfE6XvEC3lAqtn1f/pB8eJzJ/cg/IveacL0dUw96lyPAR34y4/A4AXCwwAw2z+3fQBgnTiC4ovV2yuMO0VQ0/u3ELPBNz+0wTh/vM8t/5AQ/UMEed8eUsY/efjjq2vTz5B/dQ+liWnTaduBIH9NNrX0LSD+4QHOj08ANgP82OOm58lDu659XcbQ1KmACfwxKsL3EKfuvqmmwkOX3zfT9uLIPmuhqdQEVHv/6jNB1F69fqHGdYohYuX7V98aUPVFOE3Q1/evboTyGML55aIxxDwELvWeeYbP/RC8TIxAAx+p8coMnwSFaSDclsAjWTaK009Xx3MqhGyGLQmmNiQrBKIDOjPqZoE1eco7DYwmRtET4nsgaKgHKycH0v8CfTUQGnJqog3z3GsS0qfjxGuwgbtfy9egjJ8i+92k94jaN5AehTeL+f7nNvbJ2xs+ydscrDXsvzW3JGqThbQZ5OZ/EzJXwiMYpSez8GOTePO2bw3tACx12uVwm+3ryN8VafkY3L8Ar4KYrJ0noDqxcYD4Mx8X/A3I4+yCdiaov4vJtDWayx+YbRsclNog6E8a/R3YS9vo78JOvLf3h+UfX91H4n31uvOwJEFogG8v8RlgTBGEYPwxAb52z7lhl/g8TzKhcDPwr7502n3S31+4oO8ighlgkoKTYIXA42CNYOk0TRkQUECH46TNGAbPcSBdQZ7PlySpIvLz7xWhcAx/IkPBN0j8Gbk2iFc4RvyRQtSL6P9R0tOLjf8OYtOn7fwaeYl+zXaQ5uPf//0Ze/vIt6/bql3pYRLYH+AUqX/wrvyqFW86ThqHD8JPxw+b3LWd6z1/P03j9KfozX2JdiHCVfK13AK+rK5bPPCsbyr3+ikbuuLQygvph3skYcXeJIIXUP/x1RNV5K1EsxShR83uQ9KtIEl/IkgGkMf5072nGS2I2vc4aFdArWzzyyS/jZ36DjBgYPU3Eai9iW/nAjbYKxN6LvG1O/Bn+PKDuPoZMelVIxQ1nDDz9OZXI6nA71bguu0mTWdayQ7CTINEDQm4IQ50s+n+eFegKQSB1D/APgXi4lUgzV797blQ1VDR+5+f1WvqXvF4d/v9vFpb4lEYewXFWnRfgtN2AApcO/JiiZuE1ZRiXj0rAZP5KJn+cnnpTmp/nEWY0TtBCegf6LmVcR8nrull+9OO9uPEtSLaTTjCcayhT5ygXuP405POywTaCkdJK6u9TcGvzs5+vFujn5x0nrZstjB/NU1/PM3UcdEJiyzvGEDMkKy8SeEOUXptqyXyprVGiHxpbB4n5esdenKyeNahF48KrxrCv/1rxL1PhOTQTt0XFv9zKfmLvWthfK53jzP/9d61CTlYppWRefo1Rz2hgudCyovn3kSHFN/Z+59fPViJw0oAO3G4yXgq6d6y1r//+RmH+4p8uwZp4nOCbfPuixJtKzS/INA2gfwN4DtXofr9872gnfNnu8A9m3lMFP7jV7KpxGbexI1GoQNB/EaL08C6eVbAbD89tDZNXvMANRH3UYjf+oTdtRlWwDQKv2dT95Yq8A6waecTprOZT+Y11aovksKAvPKwIJoDxEcnhc67F3G4Es9PP3RGH4u+fQtj+AydNvNIa43XBJPF3+YQWRwuce/sRT6mHfkU70/Tj0AZ6mn1Rw59bYaxwP2NN2iDZiFCN6QfpBwG122YRoanbcKmwGIMMiU1wTKfYPHY1DN4vEmBFAv6QNIyaRM3SYtxwEzEtHCHYCnwvmvibEF6uufwGg1Kg9HzfechNUk7JreJf1r3OsUvzOP1xV2SkhbGQ5Dx60G/TUfih+CFkaF2IxGhtzY+eHqS1KAvcl5q7T5XSQv1pUJfS3t/ze3whT493vl+DvEv1H1qAPIEwIvmJw9r6mPGkicTfh3Lp8lLAOjnV+qTzt3yoMAx9QuL47pEmuX6NLXJ0yVyRaRNVdKm4vgOJNpKX0GgafwxAcpLLd/blHxH27dqX2n91vkniVJeROKTpCnfjspXUXhgVWj2JK3KS3h8mmLl2wmtTa3yAo3BqfyWZeXbYbV1XgJ2lwnl20cIvdb62kA1pPKYg+Wl0XkpH8u3Y/G15h+WC6wXSKnzJPPLs13lumTa/C3tkLY2MC+M/F0eFyg3aNK43LH8RvK404C3qV1acFIjr3WyvHCesconRjFtUXC/wN9g1BucUnHmHYa9o/j9JxtB0tyFtmldvrnOPaO8Y0l3LP5zIkMjEbwgMrSD+9MPd+le3jQS/ptP5YcnR9WvMqSWUB+2GPTzTX/khZ/KOJ/icMsLA8P16e7ZCJcv7avXDrY5Y75Ik08xvsfmy9hDerQ2I+TDMbMVvZ6N9BOSuiHUZJh5oSOfRfdXzvEX5vnryN7P/Xch/WK+mgcS/dq4vkwRz7G9X7m3v5tj8+3Pb1M+v6zn+Kh9fqbp+Jpy46ZvdeK4VaveYfjbai4e1RRNS3BqvHOEaNQJ+KtP9QdPlUEtou3tlg/JID/cHYLhzudzmvrHwfp4IRjZtpU1p+aXTtJtI58ZMtAANCmm3nr6tXVQVedFGn2AGH5Fc/33wJvvhvDrh9IvHLlvM/Pqb2A98P8B/KD1kz2lAAA=", "string": "", "encoding": "utf-8"}, "url": "https://api.github.com/repos/sigmavirus24/github3.py/pulls/286/files?per_page=100"}, "request": {"headers": {"Connection": "keep-alive", "Accept-Encoding": "gzip, deflate", "User-Agent": "github3.py/1.0.0a1", "Accept-Charset": "utf-8", "Accept": "application/vnd.github.v3.full+json", "Content-Type": "application/json"}, "uri": "https://api.github.com/repos/sigmavirus24/github3.py/pulls/286/files?per_page=100", "method": "GET", "body": {"string": "", "encoding": "utf-8"}}}, {"recorded_at": "2015-06-22T17:07:24", "response": {"status": {"code": 200, "message": "OK"}, "headers": {"transfer-encoding": "chunked", "date": "Mon, 22 Jun 2015 17:05:34 GMT", "cache-control": "public, max-age=60, s-maxage=60", "x-served-by": "a30e6f9aa7cf5731b87dfb3b9992202d", "x-ratelimit-remaining": "56", "x-frame-options": "deny", "status": "200 OK", "content-encoding": "gzip", "x-ratelimit-limit": "60", "access-control-expose-headers": "ETag, Link, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval", "content-security-policy": "default-src 'none'", "x-ratelimit-reset": "1434995993", "x-github-request-id": "AE45F57C:7356:C2432CB:5588405E", "etag": "W/\"87cbcbfec670418858b9c13f6382092a\"", "strict-transport-security": "max-age=31536000; includeSubdomains; preload", "content-type": "application/json; charset=utf-8", "x-content-type-options": "nosniff", "last-modified": "Sun, 26 Oct 2014 22:47:16 GMT", "access-control-allow-origin": "*", "access-control-allow-credentials": "true", "x-xss-protection": "1; mode=block", "x-github-media-type": "github.v3; param=full; format=json", "server": "GitHub.com", "vary": "Accept"}, "body": {"base64_string": "H4sIAAAAAAAAA9Vca28iy7X9L/M1UQYa4zGRois3Ni8bbMDmJUsRdNuAaRoygDFE+e9Za1dVU/2wx2eSq6v7Yc6ZAboeu/Zj7bV39T+/hePl87e/flvvgmDzl/Xh25+/rcfbGT6Zzrez3aTw3fpmMxvji/OL/OT87Efuovj8w/FL+Zez5wvHcUqTYqlQevbyOe8s//w8zmGozfyIwfNn5xe5P3/b/Qzw9Gy7XW/++v37eD3/i5riL95q+f3n83q1+b6ZT5fjt/nP3cY5+64XgEV991bh9jncbsxn0aL+5+fzy99enou+f3Y2fik8T37k87nx2Tj/cuEVLl5yXmkycQrFwlnp5eUcC5ptl8Hf4wuxFvHR9JNgNfn+1VlSa8S0mCMx6x/aPh7/zjVsvv8B2furfRisxn5i4p/jvZb7bvP8UwtWjuCj3f8nG98e1tSul3nwDDHo2fBB/XC5vZnnpkOntBj3i+Hd3M371eH2riyf39Tn9Xl56a/96iw/fG3uvFpvM6k1b+4fcqWnkP+N/rRXN73qbO0d3O3E6eQn1UeOuvOrlfUkbE6H/fdNvdZZjbru66RaOT6FntM71muNAP/KcW58+1avuXhyP/WWvXe/3zv6teauPF1hFfWbUdh4m3Tdl4Ezyj+Ffq13GPVKL/VqsPWqpYNfxpPL4BVzBwPnHSvuHTDnsTxdL71lacuZzG/xdNndDged1/Gl/ra6Pk6cs+m4n99PCo1cvYodDNxjeTk6TJzc9JYyKLtrfPaGlYftadspbSf9IHf6Rf5tVO1tsH/5FX/TcYJct9abPziNf4z6rdxt2a09heNBp+73G1eYJ8An5eGgFajRejvfrKdczEM+B3u0Hn6Huc1+ysUFxnzzlpUcRjpacjiMBpX8eNAIvHxp5g86K7PGp/B2GRy9Qi/wDkVIKr8d9YvYayTBhldo5UePrbdJPx9MwraRed5bBrmncETp4LRwhtEee71Go1ft4QzeZ3718QbylvPFyts+zrxTxSlWg91TCHmvIeOVkQr2fBh1F+c432m9fDmlrkGDgvqVHuG4nj5S36473HcBT2Ov435pV796j42tR/7TsOy+DZfrYFhoU2tk1Eenh6fr1crGcy7O69VZzq9dnt8eSovRYATdK+1Hg/puhJMa46Rul9h7t+Q0D6U9V+8dLl6xb6eXu+1fbz0nwC/fg9uBaOk20tLL0w7MvNCFZb1SesF594trH3+7qbUw33AD3Qq8Qpv/X2OEV0jnbTKHHOaQAeWAPzgjrmv1FBoJ2PuH1hxH/fflTbcIawh20KiXwWGGHbVyN229FoyCfZeb5/Vrex73bTR3c2NY56QPC3Iepz73XnahNa09tVTZYQdzjwfNKfYh56dlfGNWWNdruF3ao1+WzK5gY9yXOVvZF9cyMva9HEFDJ8tWUK+1ctCv3ehwWttTONE2E5OK09uM5sXDqD+c3ndd2S8sM/DL0xCSXDYOC1kf9y1yfIU/2wz7mAeaYfb9qx3RSt5nw2VvU7/Kqf2Xi+Fo0F41HPW5mec0y+X5EN95heY59LzSO+LspvfVk9/0Ybte2NzRhr3X4pTeyOMfBzYP6WLvcibYd8Hf4em0nEM9ate9mzjFwJbLGPLgrujX9EoL8uvw5mFljaR1S0lR/At3KGuazxbQc0uesuK5t4auyXy2XslJVlr1dtc+MXh78YiwNeoQPf1q1K8sMk7wOK5ex06QK8cZOrNZXLZKz28DeAr41IFTnE36j3jyMmzsY3qubGs3gE9FNHjRUkz9Er5uqe0PltzL3ZT9A/ftvmHemAXK2SZHXVa2o67oOCy4M2lgNnim6kU46BoNkRWGcWlR7smdlAqTZQ9zRzpW2VCH95OD7/cP/psPmXtz3zf6DU8FT3IWGxlP6xWqJ7Gy8kw8w0liah7jMWxJQtf6j+uEv6KcD9pTre9s64W2MuZO5pfhfW2zfwpfuu7k+VgpDR7OYDEjRKL8zDfzx7wC4rnSOW2Li5unMBUnlsFm1J3VdHS8136ca9AajPgQUKsuaWPa2oYVeuL36rj/jrihowJHeoCNVUvzMf1r+exG6aD7MIL24GnggslxNR3XOjnvavV2C8zgw94nBRcyLzJCr/x+fUeNvi0Mj7cFmeV4e2ht6BXhdZIRoNbcjjjvoHlarVk5tAW+58UfuLAx+FHgkwqiUGM9DHs5PLFS0oGUqqM1cFNc7patQVO17za29olt7WU9Ri8LLfjzYddb20/UIYMH0WnLO/IpK4YYDcPc/KZvLNCNj+9w5b3dELgh7R3dB0YD7BnxZOIMp0CTwGKyV0TBR3i/s0297M9GlFTZS/k+eOSwlYPk8t4BOl5zlzzvyPMXOjNo5jFj3ju/n5/Tvusy77DfWeBsgUKKwEvAEfBSEuM+2DN/L3hNnhg4rTefeAk+U5+U8SFcORAn4l3YTFqoRL1ifoIo5M1FzsBt74H+NdZ1RnkQN8uu7PMRjMyIZz3ResHZ54HN6D8TZ9ABvugJQvIO3ho2ZiEKOeuwt0VcO2Dv8IKt1RCoG/oHJFBZQAdyE4lCAWQzwtM4ndRJAB/ppz5bh+iaGjvhCRk3et2HsszljPu9go4Uc/i3OeZ9g4yANM0qstfQ2AAdwJ4aEqETZ4Hz5liDAnxVOa3vwKtNhXBG69HAU7pYa8z8g4sMaLQkVoR0fqEZQN0FNUNcH3Uci75Nz/8IOxg60ymiD+I85q32FvVqo8gZFeKSHEvwWCXPiJxh43vJXcqZc6vxKXd1etr7MEIgyivcGEMFnKmDjMpbnk0bR+VD6xVo6wB5QgFZjPgR+nr3A1+vtEVZP+ctL1sbZDXHuvLOXbOTm2sgomtkkgvmLeLnIrx28u4J3x49bfz7o8kNr5SPh9TiXv7Rdy6mSSki8u8Qs2dEnLC112H/bDpcSjRwZsHQ2cLzuJtxf0s8+nb3il3YePwKeOUBunYVQ2aIgs3u5YyW2DTe/rqyw0i0Z0pw7S8r9JqCqpOjQuZX17txH7asxubfbZvbN7vF9aisZ5jj75f6VCvMBXEqswlXO3dXfq2zv5tfMAd14F/ePOS/krUyc58z28u9+a/NN53BI4vV+5PcWsdH+JnhFHq9HzHL6yF7rHUO434DnEDvGOUvyNFpY/sEPpDsZofYth8TI6lM3nhIZJD78LYM3AaMHuFzg9ElBzZ2h11tYRlhVhQaLskcCAZTunVNC0GGBTtiFjWTrM1EBv56hJjHedv672mv2F6WFs9WBExrTh520JohV4qtXCO6ZeNtVFuIN47PzVE9xDUv8oFm34wI6qkoGiZ5j7I7gwbVe9f5ptFD9bSOCHPwHMWBM8tN+vvsuXullV/NbxqH/bSRGQ1+ve8K+BnmT8zeob/Bcy2mnQppyv6DF2jIma88EjM8c+6yzqdwUJCnIw04ZZGSo0mc/ihKy9wSp5Mnk+EZrUisIhQ0VcfK2OlYSOAj3MD1MA+V3ArILo0bfrUe4rWPY7e9HnsGroc7s6wEsVsskmsAgjW5cUYWWRgDhalYAjsSjazXfOoSWJkSTizrBFtAvq1gVIG9ywnSt4gtF5CZh8AEQHU3cXvWo3VehoN2eNNVebfJh5/II8bPVHJD5LrgDtMMguGbOoKxEEMNypJIaDKr068yTuzKsFnwqQeX3mZ6Uy2+gbObUXPByCEideJZrM6dFJbm2lpgayJU8cHcH+EKYivEkhO6Ioe2/mS/+C6Oobhvy3fpbJpILonjDIOm2Cq1W6WpJi8Q3lByBaC5pG8xbJWV4TIKquwdnKFIL+XP4t9nodwqcLjgzzx9uc69iC6EWwKfSAYPiAvxVq3tUSwaKy+7hSHse4h8mqeEKBcxYSb/iL6ztJQ6St/ysZYC9QjCYsYFrUlJ8inMxINA86Ml8gVgmInTWsuasEbmhbQtY1nxWGKwOlGkhQ1tbygI02hQOhIJ+1ktBsCiwpyASVmSx7bnFJsHMiMjmmXLwDLEE904+6rZu5XyWMWQORH5LPk17BcxIqf/bud+knvKfnuPi32Uu9EjJ3j6jBNDdr2dVNLapDCy9W1GjiBzks9HFeARrHz7j86tZBCLyBgF+07zWOn9Qe5Hzym9jvvwuRgJGf8R68iM/opP/+C818Kn90QGmXPrrJaVEOZFGVIkZmV2yxNrnXIe2ztB/yU3xiiRd0rwrjgxsMBDcujg0LxwIfJknWSy3B5v5+7VuArc6jAXCcCvX++a1lp43grdTfDtuNBMeQczUoZn1pwH8JnwDJrXYYVnjz+ISEVd/fGh85ILA4Er7fIdor16FfgLLG+WV1ActrJvyQVVvvwCqR28WodZbP8x0nql83o08SPeOq0PWAfsXEV/ezWaDQ8wJ6SkRgG3zNxoDvyomatTfsD4bTCi/esUNlZot19BDvwIhORWyU8R5cajKmLqANEdPp0ZPHjWGljC/juzR3iNa1Tb3mHNZ8Lr42kw+5MCeI9lKSuDtObsUVqs3USnSgZcsepavmDD3wP6WXpCgyijuo3EkYidRk5kx7HTCTWKae048eaGNUdV8VPe3F+BOdCrbxvmROw8jpm0JuhfAqdA5+EVe/gc/KasFshItHotrLC2sfoU2ROyhjTLnmJQrEhw6wSsaKKyZngljCCcNWpGrM8VM7DqzqBextqE1DRTk5aY+8g6gUQtzVMwGgmviGpmOm56wgfMPvAcLs6dbE2m36ieEJfYVszngA1eBoGfL6E+ZrDXR9gIGbvieqIR22oHFVb2DDaDF6o815DRLjvALMxcp9Nh1dVSakyHGbsja6HGJnJUVctOtNdEbXJ1QnpNOXP6FmuPrN7Cah6WpV0s90wzeDhZch7kj6wxDZcX+S+sTVWxv7ryOMf4gbwpbcwNv/lVbDhaloAh9IkhEzidGPjUFT0E/QeQhqnixnCStiTZ82jgAyPbiDkTLwPHGe0wuqFxpYqCdg6wpW6BgSQrRkQCZtw9ksno598Lg2rHu+3td4OrtX9zuKCmVpo/b7oXK/P/++NaRdlB60Jp6OL7bf59cXPwxCNYmaadUel4piJ7TzQmHQl0xQPdC6gTMS+Rmgxzej9m27BS44nUWroKzyrGnNjlDDg1wwPERhE5gK9Ht0LeqzVXGZVchcVQj5UafFTRlH1n6Jjx6/CpCXZCf/MRGsoB6TKuyQxkY4HUP9FlxPYc+z2Ss/BzZpLJehhQ5RdzBchcMDFrq/T2giFU9VrXh012a/CLLeWIP897aj7JA7JwQgolgAODjVHO/ewc926ejl125GImmaj5fpYTyS5bqGyzm6OeZsjiZ/05O0wuV9d24z5b14RthC+RT2E7WS94T4t3UFV0g6qSFWPF/6i6dBRL01nNnWaD75Yq+oLHNRW246083TjeBrK2P8G3nHo4MjCiYHXoQms9gi0mvTT9ueLuGRMV9xHx2Bm8A1F+EKIOi9phDt4hJi1gT/NtPAq7a1iF5r5OvwHnEcPcVs0XzKM9j663x0aAlWAVUV/Hf4277x9aP5q1vH8/93aMAvSzmsfdTTTGkJxISedaVR1ivUPiQ1mzQj7sgFd2gKcFZat6uaUtYJLb6hfGG0qXjl1LkF6TainP/ityvNIdE95GNV7x1+fwNagVNnYeeK/hsvQW821l8NVa0nruqDKtTyXU/HVX18LBO/hV1AsjNC7cK3gH+HHDin1dBpqjIGZKR7lTj4+aQ+ML9v+Ax2eFFx1M+D90DWycqiyeJeq92N9CeQLquh23TxyXXb1MspBxabqu9NNVK+iGy6PqyNwA7NAM9Rft0wN4nfaU+Fh8ranRzF338XoR9VFJHL1aC9JszNCHh968Ok9n8fywmirmIuobI5YAG+yS4yLCjmxPTuxcrQaIEllaTEJXeqREb4nhpvA0ewQtFInIatl1YmRgSWBIw2AwJ+qzRy1w6B2Al1eWt5Da62ejx1cOrUX+pdA9663IzQSXMz/G6MiqwXezX1DX6xlLgiWjKbIXq/OutMNpnTDK1Vpqcd6yCASkRiTvJeieDJTGYBbqRG+DhXDOvbBT9KqP53YPHTo10h2cO4PJo87D6qU9klSFOLKxb8UyIsKb/pK530ddUPE7wG02O7uHXqDyAt6D+HxfMln5cODG822JytIJ9KPhiMWQOZaaAWs2J1YSXW6LBr2BZiH5b/5C2ZN3jrPgWccQn+WZ5JeRvXEOau/KtsGXWFRcI5tjZd7a7z/ACO2M7xwUcPaF9oq4EX5lgfOZ3fdlN6w2QdeaV9dxFK19mom27Nsz9cgBOiHR20SmcMVzoVfsFaKTgb8Yh5DvXNUFjDdWGpWu6Mq+cX4NVE6pS5pTUZxm0p5tfZs/Q8uYxbLmx35cu0dNd16e4qtwmTHthWfG3ImeIa9waUtReEdbH27jPJNGHDZq59/BeaCyg37YY/5HI1d5HTq9Pdibc3aNeKhRjfutGbEia4AlZ7JsWx2dwuWiW8lmU1TNm7IhB+WVi5ITFYH2pB82wkQDaOVE8Sor9cv9tPl6uccZ55pXbWSRKmd8CnXWqGxI1dzXyHkMUxGLmdHuJGLOaN/OBj4TVWt0q6r5TyyzYXwFAQOVRHyY7ms66fldGGUsQNhc+T7TptX8pm7wsU0r1op8j7HnpDUn+NRkJA5KsOUS6t+Vk+U4ZJZmZESRQdNWIEf0cbT3rXJcvzMy+1MNulpkDZo9Na+5bRP8NXR9NjrmVa9ior+BncVd6DZ7pZnVoNeLnXeKv0n71Bq7lKKunCSOFAuyfAtioo6I6HNHrqdRFfKDFfrJpBtn2N+zjzLie6hrRFNRTUydvfYQ4EGuR/3RDNgpl8aUl8j9m8ItqTjBzmbUT2boM0A/lXCI9sg7Ox+6C11YCaSAbnjUTOYu8EAF3jC10koPn6MbR3rfh4iW6izEnx8URmTdXeKlkuKR6CKZb8M6diNESUbLODsX11QffcDo1h902P2DTMfOrDTGyrEuqLvSp/foThF+AZ/qz/40rADlWD2hdQsn6jjG7kVd31SdCo08ov5CsVZTi9VokoWdsydBvrvKk41VXZUzrxqvIhmsavl11GnrK+4ZFUjp+4LU5gtkSJx/P01FXnymtVfOI9G5o9kaYMWY9n9V96GpGdqvT02QCEf+CE+S40K3YxKfC5rMoS5Db6l7215XqLiia54+GD0vrCnhxOIdZ6Ym+iV9j2wM9v0VfQ/Ah53QkDydQJq93LDPmDFDF4yqhEzKOJfr3iM+n+r7JUDcrvQEoz8RXdQx7wsPrSoCLXb52r5VMByixwKYglUH3J2wYlgsozl1foGVYzy17jsw5lPPwe7bWbLcPilKjXs8aH+q61pqX9J1SgxsVrUxQ9wRDgxMESu1UR/PBzFTV1mgOewcp99b8aQRx2DHPHmNi8yKMYfIviR6rvsbIhyj+mbB7SlcgDqj1pr7bu4dlihWcf9YhF4lemNr1O1GQ2U1xMiQp8PKzanDFCeYqObp+w6x2ji9ouZqo25hxR3SZ4rHBLJHRmN8eEyLcGLUI2MJqstTMgp4SyJ+7a3PdqjCBaNlJT+ptaNucyuWlHPvku/x9MydqWrFQQcOO6fJyOlR/7/rOW+1QGrX0gMMf37q4r5ful491iVCLBvrupP+BOE0P8hFpBcZ/cD/AW6R+2M3cuYzc+aM4ZybzIG14khD+6Lj8Y569F+wKmRVJxBLTiyB8Z1fxzASvyMUo/24YZ2jzBJ6BySc7v9glQn3eX5Dx6nh2Hdcx+35/rt6bo2sdD2ykt/y6XgaXv13fTpkDq/+uz79KexK/0Av6lGCb0/etPvQp1veIVmP03cqEIV/4j6f7uiI5+FP4XMhF8tDTb3GQp+mV48ZTnSHSmMmcBhgLXD/GLkH4kMaj37MC1BT96sUJlQZ3qdoi1gLuUFQItraYVW8dfg66uKGECsdNeKzS97DoFSTff8SiX+Zl/zKvq+V3do5eCZGy2BMDQOuOFN9yy6643FiMKKu0hL7M41kzXmTBVT5F208q/sxC5UhAgOXJXg15KAt4HM/yED60XfE8rRv9BKwOg5GUFUNkYfGOoKEDVZZLW/yKkQSY4q+zt8KQtIckPjzzJttGRJL3dTiyiMuaYG6+t47zMApiTWQfY32GWeR1BkA7Tm9Uqz+9wv8pE9V8gSyc6dM4aM8QTE4qrtS4Su5cRyhnuILdBvcvMoRWHkaPuSFVyKeIucQW5/cYyuyJll2hZnKsEyDOtDBz9uvsdsc8h0tFHeeyS0FG6BO3gftLRp5XIuzWH/VQTBNMRhaUxGdkdEIX6+jkb6dYG5dGRwV4ybQE4wICxauIT2BtHHyK+RfsGfeE446vhS314p5EsyNeWMVVeFxbPkKz2AQAj1JxA5aPjXFN6nOO9ak7gebsN1vvaKLI0etaaCzHbecge6HqCIyr78tjNDrf7ovpiX1KdcEmYNt+l2uiRg56lBQGUaGB4qd3rLHuwDCvwBxodslrimQY4B76H3cg+aNCLmdL33C6u512Y3uX8AjJ/PAL9ZpTP6tNcBUz75YpwHHBz3nHkxnNDzQpcLrqF2gK244QFeU5hHl3nUtXgEizwQsxJr2g2KIaLdkmXVWweql9DZlZRnUFkrJjJlGVZ9hKjytuBY9fzpLNtXzxF2ApE/VObfy7cKVpzNZcH68i2BG4tyf9GmemCup5eiegGhUslS47cfMBndzeTdyonof7JrZR7UL3sIC8xeL/gmLFR8nN0U2ZG7ZQUBmXuQEj8ye1IyagOlcEA5feuLxlorIipUvxL5Rz0318eg6N+t2kz7yxF5pJzebVeSIcLv2DuqmMBn9VLSp67dFZFUHxMbQOa86q6J6gdyKAc+j/bqOUNq/Q8JX9X0T/Ge8es573eKdUZW32ASRueHQeauUmsvOBvhU61apqRmknjTeutI5sKvVfM99n3hxEzOse7rwBNIBFOHYm2thDuTfZMjAeIF51WfOzFxqYlb9PzUCkLB8Rr7lNO798hI6rN+ZYXwOel+IgviHnQqohumMQ7E17C9HFdPiVBlDkrVRMkTaO8otMf6efAu6/1mt4y3TfhEeH73oTmWnOitb6Ett/aRHRHXvJ3Jw3h2O3lphtKX5Dp3DzfDmwdb54fF6WsfdruExumPlojohu+CtON74G814cxbz5+xxFTZqvuONHQszMv9uj45IdFT3x/Qccn9M70/uOn920xk+9bO7zlZF79bwqrTgqPYunpb1P9VbAy+M29TLYI43joD30JG3fKp22u8iEF07QnfJH6W0At5Z4jbfb2Hrt9Fuakvz1M0Hu7F0HPeJ/nBnTk11leK+L2vA6h01+u0oMYyBjAtzz9W7WTJ9jHiNiJU+3X5DPw6iF26uxzwi/ZbdfTPnvTbphLF2d3qjA/eNdzrAj+gOjNgdRelPLTTQGYO3deBWmFXdJFML3mF82l/qrrH2J7iXGbAnY4bOfbwJp6VuNWNdOLH0vj749YkDUefCt2KAMWGFVpjfEzuXccMYfBt/B98acQCnfmRd7418tRkfo0unrbwdRnpHwUpGtXeTQSTPEicZ22PGDtHvcJKCh3ofONury5TcH3Mz1+omMHkR8lC110gjxPLjXQumlyjKVXjjE57EqngYPTJ8/QvvMkR1AhX9eI9O/K3uNEBmgXtUPzP2rG6hODPcr9jE5Gy6vjkCdC3ADUKMcEJdCskn9cxIV94IonNF7RW1bqP/WWvlO2+vy1tCWMGnBUUeO7rnVQL7Pu77fNvPRvNmun8mrVf160q7q2+nmHyV2EHdfrE6j2yMoHKCUDjHl4j31TdVJA+NNC96xwB5hwrfQqLuZCf7pbUvlLrBF96fYepXB/fECVW2P1DZ+6BXS72NpYh775DLqQ8ghoKZhyZxMN+G8l5MdCVlZgBE2O+8hS4VPeEbMrq2NXc+07eUhFFjF7Tp1GAcS+Z9mj9XKFhQt9FU3SUEdB9/r4UaxfLpsfqJ7jWSXklGZWaxqf7yRN2zkXtGrIlFhprK22gldfQh6jd8IBeKVxY705jHTmBYPG3eQqBrIninB9hn1YOOrmGLx4xXWlhnUVhRMEpKt2KVF91PZGe3FXimcRfvQuKbk4DPb8Eh1cubaTPWxxLoN+K8F3ED983ufKW2uOs+32iFXq27II2i7V8rLBll08orJtmKH6nYVvblvIVzkVvWxPS8Taze06NWxW8RcWiJ+4ncJL6wrbWk+Y93w38Y35LIJgMytBq/9UpvXoB3z6H78CZRK4bMQ757S9gXxJG//e0pxCvxnkNv5c/DKd6JNxlvns/P8Nnfg3m42Hz76z+/bZ6Dl//zdybi7YO/u4bfenEh39FoTfi//37Gf/3r3w+pFqYHUwAA", "string": "", "encoding": "utf-8"}, "url": "https://api.github.com/repos/sigmavirus24/github3.py/contents/github3/pulls.py?ref=fe5dd44af3eb7110a4a1f8c38f0c9bb235349ff6"}, "request": {"headers": {"Connection": "keep-alive", "Accept-Encoding": "gzip, deflate", "User-Agent": "github3.py/1.0.0a1", "Accept-Charset": "utf-8", "Accept": "application/vnd.github.v3.full+json", "Content-Type": "application/json"}, "uri": "https://api.github.com/repos/sigmavirus24/github3.py/contents/github3/pulls.py?ref=fe5dd44af3eb7110a4a1f8c38f0c9bb235349ff6", "method": "GET", "body": {"string": "", "encoding": "utf-8"}}}], "recorded_with": "betamax/0.4.2"}
\ No newline at end of file
diff --git a/tests/integration/test_pulls.py b/tests/integration/test_pulls.py
index 331689a5..c934e5cf 100644
--- a/tests/integration/test_pulls.py
+++ b/tests/integration/test_pulls.py
@@ -3,6 +3,7 @@
import tempfile
import github3
+from github3 import repos
from .helper import IntegrationHelper
@@ -151,18 +152,6 @@ class TestPullFile(IntegrationHelper):
owner='sigmavirus24', repo='github3.py', pull_number=286,
filename='github3/pulls.py'
)
-
- assert isinstance(pull_file.contents(), bytes)
-
- def test_download(self):
- """Show that a user can download a file in a pull request."""
- cassette_name = self.cassette_name('download')
- with self.recorder.use_cassette(cassette_name):
- pull_file = self.get_pull_request_file(
- owner='sigmavirus24', repo='github3.py', pull_number=286,
- filename='github3/pulls.py'
- )
-
- with tempfile.NamedTemporaryFile() as fd:
- filename = pull_file.download(fd)
- assert filename is not None
+ contents = pull_file.contents()
+ assert isinstance(contents, repos.contents.Contents)
+ assert contents.decoded != b''
diff --git a/tests/unit/test_pulls.py b/tests/unit/test_pulls.py
index f893f478..a7dfdd80 100644
--- a/tests/unit/test_pulls.py
+++ b/tests/unit/test_pulls.py
@@ -298,40 +298,10 @@ class TestPullFile(UnitHelper):
" module Test")
}
- @mock.patch('github3.utils.stream_response_to_file')
- def test_download(self, stream_response_to_file):
- """Verify the proper request is made to download file contents."""
- response_mock = mock.Mock()
- response_mock.status_code = 200
- self.session.get.return_value = response_mock
-
- self.instance.download()
-
- self.session.get.assert_called_once_with(
- self.example_data['raw_url'], stream=True,
- headers={'Accept': 'application/octet-stream'}
- )
- stream_response_to_file.assert_called_once_with(response_mock,
- 'file1.txt')
-
- @mock.patch('github3.utils.stream_response_to_file')
- def test_download_does_not_stream(self, stream_response_to_file):
- """Verify the proper request is made to download file contents."""
- # Since the default return value for self.session.get is None we do
- # not need to mock out the response object in this test.
- self.instance.download()
-
- self.session.get.assert_called_once_with(
- self.example_data['raw_url'], stream=True,
- headers={'Accept': 'application/octet-stream'}
- )
- assert stream_response_to_file.called is False
-
def test_contents(self):
"""Verify the request made to fetch a pull request file contents."""
self.instance.contents()
self.session.get.assert_called_once_with(
- self.example_data['raw_url'],
- headers={'Accept': 'application/octet-stream'}
+ self.example_data['contents_url']
)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 3
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | betamax==0.9.0
betamax-matchers==0.4.0
certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup==1.2.2
execnet==2.1.1
-e git+https://github.com/sigmavirus24/github3.py.git@270f6d9c0978d0d2da2e4c98da12cbbb1d10c567#egg=github3.py
idna==3.10
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
requests==2.32.3
requests-toolbelt==1.0.0
tomli==2.2.1
typing_extensions==4.13.0
uritemplate==4.1.1
uritemplate.py==3.0.2
urllib3==2.3.0
| name: github3.py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- betamax==0.9.0
- betamax-matchers==0.4.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- requests==2.32.3
- requests-toolbelt==1.0.0
- tomli==2.2.1
- typing-extensions==4.13.0
- uritemplate==4.1.1
- uritemplate-py==3.0.2
- urllib3==2.3.0
prefix: /opt/conda/envs/github3.py
| [
"tests/integration/test_pulls.py::TestPullFile::test_contents",
"tests/unit/test_pulls.py::TestPullFile::test_contents"
]
| [
"tests/integration/test_pulls.py::TestPullRequest::test_diff",
"tests/integration/test_pulls.py::TestPullRequest::test_patch"
]
| [
"tests/integration/test_pulls.py::TestPullRequest::test_close",
"tests/integration/test_pulls.py::TestPullRequest::test_commits",
"tests/integration/test_pulls.py::TestPullRequest::test_create_review_comment",
"tests/integration/test_pulls.py::TestPullRequest::test_files",
"tests/integration/test_pulls.py::TestPullRequest::test_is_merged",
"tests/integration/test_pulls.py::TestPullRequest::test_issue_comments",
"tests/integration/test_pulls.py::TestPullRequest::test_reopen",
"tests/integration/test_pulls.py::TestPullRequest::test_review_comments",
"tests/integration/test_pulls.py::TestPullRequest::test_update",
"tests/integration/test_pulls.py::TestReviewComment::test_reply",
"tests/unit/test_pulls.py::TestPullRequest::test_close",
"tests/unit/test_pulls.py::TestPullRequest::test_create_review_comment",
"tests/unit/test_pulls.py::TestPullRequest::test_diff",
"tests/unit/test_pulls.py::TestPullRequest::test_is_merged",
"tests/unit/test_pulls.py::TestPullRequest::test_merge",
"tests/unit/test_pulls.py::TestPullRequest::test_patch",
"tests/unit/test_pulls.py::TestPullRequest::test_reopen",
"tests/unit/test_pulls.py::TestPullRequest::test_update",
"tests/unit/test_pulls.py::TestPullRequestRequiresAuthentication::test_close",
"tests/unit/test_pulls.py::TestPullRequestRequiresAuthentication::test_create_review_comment",
"tests/unit/test_pulls.py::TestPullRequestRequiresAuthentication::test_merge",
"tests/unit/test_pulls.py::TestPullRequestRequiresAuthentication::test_reopen",
"tests/unit/test_pulls.py::TestPullRequestRequiresAuthentication::test_update",
"tests/unit/test_pulls.py::TestPullRequestIterator::test_commits",
"tests/unit/test_pulls.py::TestPullRequestIterator::test_files",
"tests/unit/test_pulls.py::TestPullRequestIterator::test_issue_comments",
"tests/unit/test_pulls.py::TestPullRequestIterator::test_review_comments",
"tests/unit/test_pulls.py::TestReviewComment::test_reply",
"tests/unit/test_pulls.py::TestReviewComment::test_reply_requires_authentication"
]
| []
| BSD 3-Clause "New" or "Revised" License | 170 | [
"github3/pulls.py",
"AUTHORS.rst",
"github3/repos/contents.py"
]
| [
"github3/pulls.py",
"AUTHORS.rst",
"github3/repos/contents.py"
]
|
mne-tools__mne-python-2228 | b143c6df244dca2e6121048fae99cb6e1cfa84ab | 2015-06-21 17:43:19 | 632e49f0470fc9526936dbb474fd6aa46501fe4d | diff --git a/doc/source/whats_new.rst b/doc/source/whats_new.rst
index 5ff2f77d1..3c7c0a5fd 100644
--- a/doc/source/whats_new.rst
+++ b/doc/source/whats_new.rst
@@ -14,6 +14,8 @@ BUG
- Fix ``mne.io.add_reference_channels`` not setting ``info[nchan]`` correctly by `Federico Raimondo`_
+ - Fix ``mne.stats.bonferroni_correction`` reject mask output to use corrected p-values by `Denis Engemann`_
+
.. _changes_0_9:
Changelog
diff --git a/mne/stats/multi_comp.py b/mne/stats/multi_comp.py
index 51751ea1a..a26b4a772 100644
--- a/mne/stats/multi_comp.py
+++ b/mne/stats/multi_comp.py
@@ -98,5 +98,5 @@ def bonferroni_correction(pval, alpha=0.05):
"""
pval = np.asarray(pval)
pval_corrected = pval * float(pval.size)
- reject = pval < alpha
+ reject = pval_corrected < alpha
return reject, pval_corrected
| BUG/API issue with multi_comp.bonferroni_correction
Do people agree that this is a bug, at least a very unexpected output:
https://github.com/mne-tools/mne-python/blob/master/mne/stats/multi_comp.py#L101
`pval_corrected` should instead be used for creating the output mask.
@agramfort @Eric89GXL @mainakjas | mne-tools/mne-python | diff --git a/mne/stats/tests/test_multi_comp.py b/mne/stats/tests/test_multi_comp.py
index 4cba14113..76b2c99db 100644
--- a/mne/stats/tests/test_multi_comp.py
+++ b/mne/stats/tests/test_multi_comp.py
@@ -1,5 +1,6 @@
import numpy as np
-from numpy.testing import assert_almost_equal, assert_allclose, assert_raises
+from numpy.testing import (
+ assert_almost_equal, assert_allclose, assert_raises, assert_array_equal)
from nose.tools import assert_true
from scipy import stats
@@ -25,6 +26,8 @@ def test_multi_pval_correction():
assert_true(pval_bonferroni.ndim == 2)
assert_true(reject_bonferroni.ndim == 2)
assert_allclose(pval_bonferroni / 10000, pval)
+ reject_expected = pval_bonferroni < alpha
+ assert_array_equal(reject_bonferroni, reject_expected)
fwer = np.mean(reject_bonferroni)
assert_almost_equal(fwer, alpha, 1)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 3,
"test_score": 1
},
"num_modified_files": 2
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"numpy>=1.16.0",
"pandas>=1.0.0",
"scikit-learn",
"h5py",
"pysurfer",
"nose",
"nose-timer",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | apptools==5.2.1
certifi @ file:///croot/certifi_1671487769961/work/certifi
configobj==5.0.9
cycler==0.11.0
envisage==7.0.3
exceptiongroup==1.2.2
fonttools==4.38.0
h5py==3.8.0
importlib-metadata==6.7.0
importlib-resources==5.12.0
iniconfig==2.0.0
joblib==1.3.2
kiwisolver==1.4.5
matplotlib==3.5.3
mayavi==4.8.1
-e git+https://github.com/mne-tools/mne-python.git@b143c6df244dca2e6121048fae99cb6e1cfa84ab#egg=mne
nibabel==4.0.2
nose==1.3.7
nose-timer==1.0.1
numpy==1.21.6
packaging==24.0
pandas==1.3.5
Pillow==9.5.0
pluggy==1.2.0
pyface==8.0.0
Pygments==2.17.2
pyparsing==3.1.4
pysurfer==0.11.2
pytest==7.4.4
python-dateutil==2.9.0.post0
pytz==2025.2
scikit-learn==1.0.2
scipy==1.7.3
six==1.17.0
threadpoolctl==3.1.0
tomli==2.0.1
traits==6.4.3
traitsui==8.0.0
typing_extensions==4.7.1
vtk==9.3.1
zipp==3.15.0
| name: mne-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- apptools==5.2.1
- configobj==5.0.9
- cycler==0.11.0
- envisage==7.0.3
- exceptiongroup==1.2.2
- fonttools==4.38.0
- h5py==3.8.0
- importlib-metadata==6.7.0
- importlib-resources==5.12.0
- iniconfig==2.0.0
- joblib==1.3.2
- kiwisolver==1.4.5
- matplotlib==3.5.3
- mayavi==4.8.1
- nibabel==4.0.2
- nose==1.3.7
- nose-timer==1.0.1
- numpy==1.21.6
- packaging==24.0
- pandas==1.3.5
- pillow==9.5.0
- pluggy==1.2.0
- pyface==8.0.0
- pygments==2.17.2
- pyparsing==3.1.4
- pysurfer==0.11.2
- pytest==7.4.4
- python-dateutil==2.9.0.post0
- pytz==2025.2
- scikit-learn==1.0.2
- scipy==1.7.3
- six==1.17.0
- threadpoolctl==3.1.0
- tomli==2.0.1
- traits==6.4.3
- traitsui==8.0.0
- typing-extensions==4.7.1
- vtk==9.3.1
- zipp==3.15.0
prefix: /opt/conda/envs/mne-python
| [
"mne/stats/tests/test_multi_comp.py::test_multi_pval_correction"
]
| []
| []
| []
| BSD 3-Clause "New" or "Revised" License | 171 | [
"mne/stats/multi_comp.py",
"doc/source/whats_new.rst"
]
| [
"mne/stats/multi_comp.py",
"doc/source/whats_new.rst"
]
|
|
poliastro__poliastro-51 | f81a56b7ab28391b66091bcf002462749d2a8f65 | 2015-06-23 15:32:34 | f47ee2b0790a82b3a76e24794d2916deb8340d1b | diff --git a/README b/README
index f6d8cd9c..f238398c 100644
--- a/README
+++ b/README
@@ -20,10 +20,6 @@
.. image:: https://ci.appveyor.com/api/projects/status/ajg5j8byv8isslso/branch/master?svg=true
:target: https://ci.appveyor.com/project/poliastro/poliastro/branch/master
-.. image:: https://badge.waffle.io/poliastro/poliastro.png?label=Ready
- :target: https://waffle.io/poliastro/poliastro
- :alt: 'Stories in Ready'
-
.. image:: https://img.shields.io/badge/license-MIT-blue.svg
:target: https://raw.githubusercontent.com/poliastro/poliastro/master/COPYING
diff --git a/poliastro/twobody/propagation.py b/poliastro/twobody/propagation.py
index 70a91f4f..567fd624 100644
--- a/poliastro/twobody/propagation.py
+++ b/poliastro/twobody/propagation.py
@@ -85,7 +85,7 @@ def _kepler(k, r0, v0, tof, numiter, rtol):
norm_r = xi * xi * c2_psi + dot_r0v0 / sqrt_mu * xi * (1 - psi * c3_psi) + norm_r0 * (1 - psi * c2_psi)
xi_new = xi + (sqrt_mu * tof - xi * xi * xi * c3_psi - dot_r0v0 / sqrt_mu * xi * xi * c2_psi -
norm_r0 * xi * (1 - psi * c3_psi)) / norm_r
- if abs((xi_new - xi) / xi_new) < rtol:
+ if abs(np.divide(xi_new - xi, xi_new)) < rtol or abs(xi_new - xi) < rtol:
break
else:
count += 1
| ZeroDivisionError when propagating with time zero
Actually if NUMBA_DISABLE_JIT is exported this appears as a maximum number of iterations error. Why does it change is a matter of study, but still the easiest way to fix it is to contemplate the special case of time = 0.0 at the very beginning of the `_kepler` function. | poliastro/poliastro | diff --git a/poliastro/tests/test_twobody.py b/poliastro/tests/test_twobody.py
index ac0b0e97..5da1ac2d 100644
--- a/poliastro/tests/test_twobody.py
+++ b/poliastro/tests/test_twobody.py
@@ -228,8 +228,23 @@ def test_propagate():
ss0 = State.from_vectors(Earth, r0, v0)
tof = 40 * u.min
ss1 = ss0.propagate(tof)
- r, v = ss1.r, ss1.v
+ r, v = ss1.rv()
assert_array_almost_equal(r.value, [-4219.7527, 4363.0292, -3958.7666],
decimal=1)
assert_array_almost_equal(v.value, [3.689866, -1.916735, -6.112511],
decimal=4)
+
+
+def test_propagation_zero_time_returns_same_state():
+ # Bug #50
+ r0 = [1131.340, -2282.343, 6672.423] * u.km
+ v0 = [-5.64305, 4.30333, 2.42879] * u.km / u.s
+ ss0 = State.from_vectors(Earth, r0, v0)
+ tof = 0 * u.s
+
+ ss1 = ss0.propagate(tof)
+
+ r, v = ss1.rv()
+
+ assert_array_almost_equal(r.value, r0.value)
+ assert_array_almost_equal(v.value, v0.value)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "numpy>=1.16.0 astropy>=1.0 numba>=0.18 matplotlib jplephem scipy",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gfortran"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astropy @ file:///croot/astropy_1697468907928/work
Brotli @ file:///croot/brotli-split_1736182456865/work
contourpy @ file:///croot/contourpy_1738160616259/work
cycler @ file:///tmp/build/80754af9/cycler_1637851556182/work
exceptiongroup==1.2.2
fonttools @ file:///croot/fonttools_1737039080035/work
importlib_resources @ file:///croot/importlib_resources-suite_1720641103994/work
iniconfig==2.1.0
jplephem @ file:///home/conda/feedstock_root/build_artifacts/jplephem_1734349820189/work
kiwisolver @ file:///croot/kiwisolver_1672387140495/work
llvmlite @ file:///croot/llvmlite_1736366675558/work
matplotlib==3.9.2
numba @ file:///croot/numba_1738606613869/work
numpy @ file:///croot/numpy_and_numpy_base_1708638617955/work/dist/numpy-1.26.4-cp39-cp39-linux_x86_64.whl#sha256=b69ac3eb7538c5a224df429dc49031914fb977825ee007f2c77a13f7aa6cd769
packaging @ file:///croot/packaging_1734472117206/work
pillow @ file:///croot/pillow_1738010226202/work
pluggy==1.5.0
-e git+https://github.com/poliastro/poliastro.git@f81a56b7ab28391b66091bcf002462749d2a8f65#egg=poliastro
pyerfa @ file:///croot/pyerfa_1738082786199/work
pyparsing @ file:///croot/pyparsing_1731445506121/work
PyQt6==6.7.1
PyQt6_sip @ file:///croot/pyqt-split_1740498191142/work/pyqt_sip
pytest==8.3.5
python-dateutil @ file:///croot/python-dateutil_1716495738603/work
PyYAML @ file:///croot/pyyaml_1728657952215/work
scipy @ file:///croot/scipy_1733756309941/work/dist/scipy-1.13.1-cp39-cp39-linux_x86_64.whl#sha256=3b247b926209f2d9f719ebae39faf3ff891b2596150ed8f8349adfc3eb19441c
sip @ file:///croot/sip_1738856193618/work
six @ file:///tmp/build/80754af9/six_1644875935023/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
tornado @ file:///croot/tornado_1733960490606/work
unicodedata2 @ file:///croot/unicodedata2_1736541023050/work
zipp @ file:///croot/zipp_1732630741423/work
| name: poliastro
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- astropy=5.3.4=py39ha9d4c09_0
- blas=1.0=openblas
- brotli-python=1.0.9=py39h6a678d5_9
- bzip2=1.0.8=h5eee18b_6
- c-ares=1.19.1=h5eee18b_0
- ca-certificates=2025.2.25=h06a4308_0
- contourpy=1.2.1=py39hdb19cb5_1
- cycler=0.11.0=pyhd3eb1b0_0
- cyrus-sasl=2.1.28=h52b45da_1
- expat=2.6.4=h6a678d5_0
- fontconfig=2.14.1=h55d465d_3
- fonttools=4.55.3=py39h5eee18b_0
- freetype=2.12.1=h4a9f257_0
- icu=73.1=h6a678d5_0
- importlib_resources=6.4.0=py39h06a4308_0
- jpeg=9e=h5eee18b_3
- jplephem=2.21=pyh9b8db34_1
- kiwisolver=1.4.4=py39h6a678d5_0
- krb5=1.20.1=h143b758_1
- lcms2=2.16=hb9589c4_0
- ld_impl_linux-64=2.40=h12ee557_0
- lerc=4.0.0=h6a678d5_0
- libabseil=20250127.0=cxx17_h6a678d5_0
- libcups=2.4.2=h2d74bed_1
- libcurl=8.12.1=hc9e6f67_0
- libdeflate=1.22=h5eee18b_0
- libedit=3.1.20230828=h5eee18b_0
- libev=4.33=h7f8727e_1
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgfortran-ng=11.2.0=h00389a5_1
- libgfortran5=11.2.0=h1234567_1
- libglib=2.78.4=hdc74915_0
- libgomp=11.2.0=h1234567_1
- libiconv=1.16=h5eee18b_3
- libllvm14=14.0.6=hecde1de_4
- libnghttp2=1.57.0=h2d74bed_0
- libopenblas=0.3.21=h043d6bf_0
- libpng=1.6.39=h5eee18b_0
- libpq=17.4=hdbd6064_0
- libprotobuf=5.29.3=hc99497a_0
- libssh2=1.11.1=h251f7ec_0
- libstdcxx-ng=11.2.0=h1234567_1
- libtiff=4.5.1=hffd6297_1
- libuuid=1.41.5=h5eee18b_0
- libwebp-base=1.3.2=h5eee18b_1
- libxcb=1.15=h7f8727e_0
- libxkbcommon=1.0.1=h097e994_2
- libxml2=2.13.5=hfdd30dd_0
- llvmlite=0.43.0=py39h6a678d5_1
- lz4-c=1.9.4=h6a678d5_1
- matplotlib=3.9.2=py39h06a4308_1
- matplotlib-base=3.9.2=py39hbfdbfaf_1
- mysql=8.4.0=h721767e_2
- ncurses=6.4=h6a678d5_0
- numba=0.60.0=py39h6a678d5_1
- numpy=1.26.4=py39heeff2f4_0
- numpy-base=1.26.4=py39h8a23956_0
- openjpeg=2.5.2=he7f1fd0_0
- openldap=2.6.4=h42fbc30_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pcre2=10.42=hebb0a14_1
- pillow=11.1.0=py39hcea889d_0
- pip=25.0=py39h06a4308_0
- pybind11-abi=4=hd3eb1b0_1
- pyerfa=2.0.1.5=py39h5eee18b_0
- pyparsing=3.2.0=py39h06a4308_0
- pyqt=6.7.1=py39h6a678d5_0
- pyqt6-sip=13.9.1=py39h5eee18b_0
- python=3.9.21=he870216_1
- python-dateutil=2.9.0post0=py39h06a4308_2
- pyyaml=6.0.2=py39h5eee18b_0
- qtbase=6.7.3=hdaa5aa8_0
- qtdeclarative=6.7.3=h6a678d5_0
- qtsvg=6.7.3=he621ea3_0
- qttools=6.7.3=h80c7b02_0
- qtwebchannel=6.7.3=h6a678d5_0
- qtwebsockets=6.7.3=h6a678d5_0
- readline=8.2=h5eee18b_0
- scipy=1.13.1=py39heeff2f4_1
- setuptools=75.8.0=py39h06a4308_0
- sip=6.10.0=py39h6a678d5_0
- six=1.16.0=pyhd3eb1b0_1
- sqlite=3.45.3=h5eee18b_0
- tbb=2021.8.0=hdb19cb5_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tornado=6.4.2=py39h5eee18b_0
- tzdata=2025a=h04d1e81_0
- unicodedata2=15.1.0=py39h5eee18b_1
- wheel=0.45.1=py39h06a4308_0
- xcb-util-cursor=0.1.4=h5eee18b_0
- xz=5.6.4=h5eee18b_1
- yaml=0.2.5=h7b6447c_0
- zipp=3.21.0=py39h06a4308_0
- zlib=1.2.13=h5eee18b_1
- zstd=1.5.6=hc292b87_0
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- pluggy==1.5.0
- pytest==8.3.5
prefix: /opt/conda/envs/poliastro
| [
"poliastro/tests/test_twobody.py::test_propagation_zero_time_returns_same_state"
]
| [
"poliastro/tests/test_twobody.py::test_state_has_attractor_given_in_constructor",
"poliastro/tests/test_twobody.py::test_default_time_for_new_state",
"poliastro/tests/test_twobody.py::test_state_has_elements_given_in_constructor",
"poliastro/tests/test_twobody.py::test_state_has_individual_elements",
"poliastro/tests/test_twobody.py::test_circular_has_proper_semimajor_axis",
"poliastro/tests/test_twobody.py::test_geosync_has_proper_period",
"poliastro/tests/test_twobody.py::test_parabolic_elements_fail_early",
"poliastro/tests/test_twobody.py::test_parabolic_has_proper_eccentricity",
"poliastro/tests/test_twobody.py::test_parabolic_has_zero_energy",
"poliastro/tests/test_twobody.py::test_perigee_and_apogee",
"poliastro/tests/test_twobody.py::test_convert_from_rv_to_coe",
"poliastro/tests/test_twobody.py::test_apply_zero_maneuver_returns_equal_state",
"poliastro/tests/test_twobody.py::test_apply_maneuver_changes_epoch",
"poliastro/tests/test_twobody.py::test_perifocal_points_to_perigee",
"poliastro/tests/test_twobody.py::test_pqw_for_circular_equatorial_orbit"
]
| [
"poliastro/tests/test_twobody.py::test_state_raises_unitserror_if_elements_units_are_wrong",
"poliastro/tests/test_twobody.py::test_state_has_rv_given_in_constructor",
"poliastro/tests/test_twobody.py::test_state_raises_unitserror_if_rv_units_are_wrong",
"poliastro/tests/test_twobody.py::test_convert_from_coe_to_rv",
"poliastro/tests/test_twobody.py::test_propagate"
]
| []
| MIT License | 172 | [
"poliastro/twobody/propagation.py",
"README"
]
| [
"poliastro/twobody/propagation.py",
"README"
]
|
|
CybOXProject__python-cybox-265 | c889ade168e7e0a411af9c836c95d61d7b5c4583 | 2015-06-23 18:29:58 | a378deb68b3ac56360c5cc35ff5aad1cd3dcab83 | diff --git a/cybox/bindings/extensions/location/ciq_address_3_0.py b/cybox/bindings/extensions/location/ciq_address_3_0.py
index ae0ad70..a148347 100644
--- a/cybox/bindings/extensions/location/ciq_address_3_0.py
+++ b/cybox/bindings/extensions/location/ciq_address_3_0.py
@@ -2,7 +2,9 @@
# See LICENSE.txt for complete terms.
import sys
-from cybox.bindings import *
+
+from mixbox.binding_utils import *
+
import cybox.bindings.cybox_common as cybox_common
XML_NS = "http://cybox.mitre.org/extensions/Address#CIQAddress3.0-1"
| CIQ Address extension non mixboxified
I think the `cybox.extensions.location.ciq_address_3_0` module was left out of the mixboxification effort. | CybOXProject/python-cybox | diff --git a/cybox/test/extensions/__init__.py b/cybox/test/extensions/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/cybox/test/extensions/location/__init__.py b/cybox/test/extensions/location/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/cybox/test/extensions/location/ciq_test.py b/cybox/test/extensions/location/ciq_test.py
new file mode 100644
index 0000000..a9d4318
--- /dev/null
+++ b/cybox/test/extensions/location/ciq_test.py
@@ -0,0 +1,26 @@
+# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
+# See LICENSE.txt for complete terms.
+
+"""Tests for various encoding issues throughout the library"""
+
+import unittest
+
+from mixbox.vendor.six import StringIO
+
+from cybox.bindings.extensions.location import ciq_address_3_0
+
+
+class CIQAddressTests(unittest.TestCase):
+
+ def test_can_load_extension(self):
+ addr = ciq_address_3_0.CIQAddress3_0InstanceType()
+
+ # Really basic test to verify the extension works.
+ s = StringIO()
+ addr.export(s.write, 0)
+ xml = s.getvalue()
+ self.assertEqual(165, len(xml))
+
+
+if __name__ == "__main__":
+ unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | 2.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[docs,test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libxml2-dev libxslt1-dev zlib1g-dev"
],
"python": "2.7",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
-e git+https://github.com/CybOXProject/python-cybox.git@c889ade168e7e0a411af9c836c95d61d7b5c4583#egg=cybox
distlib==0.3.9
docutils==0.18.1
filelock==3.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
Jinja2==3.0.3
lxml==5.3.1
MarkupSafe==2.0.1
mixbox==1.0.5
nose==1.3.0
ordered-set==4.0.2
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2025.2
six==1.17.0
snowballstemmer==2.2.0
Sphinx==1.3.1
sphinx-rtd-theme==0.1.8
tomli==1.2.3
tox==1.6.1
typing_extensions==4.1.1
virtualenv==20.17.1
weakrefmethod==1.0.3
zipp==3.6.0
| name: python-cybox
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- jinja2==3.0.3
- lxml==5.3.1
- markupsafe==2.0.1
- mixbox==1.0.5
- nose==1.3.0
- ordered-set==4.0.2
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==1.3.1
- sphinx-rtd-theme==0.1.8
- tomli==1.2.3
- tox==1.6.1
- typing-extensions==4.1.1
- virtualenv==20.17.1
- weakrefmethod==1.0.3
- zipp==3.6.0
prefix: /opt/conda/envs/python-cybox
| [
"cybox/test/extensions/location/ciq_test.py::CIQAddressTests::test_can_load_extension"
]
| []
| []
| []
| BSD 3-Clause "New" or "Revised" License | 173 | [
"cybox/bindings/extensions/location/ciq_address_3_0.py"
]
| [
"cybox/bindings/extensions/location/ciq_address_3_0.py"
]
|
|
poliastro__poliastro-52 | ee3390ea90914c4be4048717652860b34413d490 | 2015-06-24 08:31:00 | f47ee2b0790a82b3a76e24794d2916deb8340d1b | diff --git a/.travis.yml b/.travis.yml
index 82f85935..6d65bda5 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -20,6 +20,7 @@ install:
- conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION
- source activate test-environment
- conda install numpy "numba>=0.18" "astropy>=1.0" matplotlib pytest pip coverage requests pyyaml scipy
+ - conda install jplephem -c poliastro
- pip install coveralls pytest-cov
script:
- py.test -vv
diff --git a/appveyor.yml b/appveyor.yml
index bef87b9e..a0d4b621 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -36,6 +36,7 @@ install:
# Install dependencies
- "conda install -q numpy numba>=0.18 astropy>=1.0 matplotlib pytest scipy"
+ - "conda install jplephem -c poliastro"
build: off
diff --git a/poliastro/ephem.py b/poliastro/ephem.py
index cba4b1d8..3aabcc7f 100644
--- a/poliastro/ephem.py
+++ b/poliastro/ephem.py
@@ -129,5 +129,5 @@ def planet_ephem(body, epoch, kernel=default_kernel):
"""
r, v = kernel[0, body].compute_and_differentiate(epoch.jd1, epoch.jd2)
r *= u.km
- v *= u.km / u.s
+ v *= u.km / u.day
return r, v
| Fix velocity units in ephem
See discussion at https://github.com/brandon-rhodes/python-jplephem/issues/11 | poliastro/poliastro | diff --git a/poliastro/tests/test_ephem.py b/poliastro/tests/test_ephem.py
new file mode 100644
index 00000000..ecda97fc
--- /dev/null
+++ b/poliastro/tests/test_ephem.py
@@ -0,0 +1,29 @@
+# coding: utf-8
+import numpy as np
+
+from astropy import time
+from astropy import units as u
+
+from poliastro import ephem
+
+
+class FakeKernel(object):
+ def __getitem__(self, index):
+ return FakeSegment()
+
+
+class FakeSegment(object):
+ def compute_and_differentiate(self, jd1, jd2=None):
+ r = np.array([1, 1, 1])
+ v = np.array([1, 1, 1])
+ return r, v
+
+
+def test_proper_velocity_units():
+ # Bug #49
+ _body = 0
+ _epoch = time.Time("2000-01-01 00:00")
+
+ r, v = ephem.planet_ephem(_body, _epoch, FakeKernel())
+
+ assert v.unit == u.km / u.day
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 3
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "numpy>=1.16.0 astropy>=1.0 numba>=0.18 matplotlib jplephem scipy",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gfortran"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astropy @ file:///croot/astropy_1697468907928/work
Brotli @ file:///croot/brotli-split_1736182456865/work
contourpy @ file:///croot/contourpy_1738160616259/work
cycler @ file:///tmp/build/80754af9/cycler_1637851556182/work
exceptiongroup==1.2.2
fonttools @ file:///croot/fonttools_1737039080035/work
importlib_resources @ file:///croot/importlib_resources-suite_1720641103994/work
iniconfig==2.1.0
jplephem @ file:///home/conda/feedstock_root/build_artifacts/jplephem_1734349820189/work
kiwisolver @ file:///croot/kiwisolver_1672387140495/work
llvmlite @ file:///croot/llvmlite_1736366675558/work
matplotlib==3.9.2
numba @ file:///croot/numba_1738606613869/work
numpy @ file:///croot/numpy_and_numpy_base_1708638617955/work/dist/numpy-1.26.4-cp39-cp39-linux_x86_64.whl#sha256=b69ac3eb7538c5a224df429dc49031914fb977825ee007f2c77a13f7aa6cd769
packaging @ file:///croot/packaging_1734472117206/work
pillow @ file:///croot/pillow_1738010226202/work
pluggy==1.5.0
-e git+https://github.com/poliastro/poliastro.git@ee3390ea90914c4be4048717652860b34413d490#egg=poliastro
pyerfa @ file:///croot/pyerfa_1738082786199/work
pyparsing @ file:///croot/pyparsing_1731445506121/work
PyQt6==6.7.1
PyQt6_sip @ file:///croot/pyqt-split_1740498191142/work/pyqt_sip
pytest==8.3.5
python-dateutil @ file:///croot/python-dateutil_1716495738603/work
PyYAML @ file:///croot/pyyaml_1728657952215/work
scipy @ file:///croot/scipy_1733756309941/work/dist/scipy-1.13.1-cp39-cp39-linux_x86_64.whl#sha256=3b247b926209f2d9f719ebae39faf3ff891b2596150ed8f8349adfc3eb19441c
sip @ file:///croot/sip_1738856193618/work
six @ file:///tmp/build/80754af9/six_1644875935023/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
tornado @ file:///croot/tornado_1733960490606/work
unicodedata2 @ file:///croot/unicodedata2_1736541023050/work
zipp @ file:///croot/zipp_1732630741423/work
| name: poliastro
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- astropy=5.3.4=py39ha9d4c09_0
- blas=1.0=openblas
- brotli-python=1.0.9=py39h6a678d5_9
- bzip2=1.0.8=h5eee18b_6
- c-ares=1.19.1=h5eee18b_0
- ca-certificates=2025.2.25=h06a4308_0
- contourpy=1.2.1=py39hdb19cb5_1
- cycler=0.11.0=pyhd3eb1b0_0
- cyrus-sasl=2.1.28=h52b45da_1
- expat=2.6.4=h6a678d5_0
- fontconfig=2.14.1=h55d465d_3
- fonttools=4.55.3=py39h5eee18b_0
- freetype=2.12.1=h4a9f257_0
- icu=73.1=h6a678d5_0
- importlib_resources=6.4.0=py39h06a4308_0
- jpeg=9e=h5eee18b_3
- jplephem=2.21=pyh9b8db34_1
- kiwisolver=1.4.4=py39h6a678d5_0
- krb5=1.20.1=h143b758_1
- lcms2=2.16=hb9589c4_0
- ld_impl_linux-64=2.40=h12ee557_0
- lerc=4.0.0=h6a678d5_0
- libabseil=20250127.0=cxx17_h6a678d5_0
- libcups=2.4.2=h2d74bed_1
- libcurl=8.12.1=hc9e6f67_0
- libdeflate=1.22=h5eee18b_0
- libedit=3.1.20230828=h5eee18b_0
- libev=4.33=h7f8727e_1
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgfortran-ng=11.2.0=h00389a5_1
- libgfortran5=11.2.0=h1234567_1
- libglib=2.78.4=hdc74915_0
- libgomp=11.2.0=h1234567_1
- libiconv=1.16=h5eee18b_3
- libllvm14=14.0.6=hecde1de_4
- libnghttp2=1.57.0=h2d74bed_0
- libopenblas=0.3.21=h043d6bf_0
- libpng=1.6.39=h5eee18b_0
- libpq=17.4=hdbd6064_0
- libprotobuf=5.29.3=hc99497a_0
- libssh2=1.11.1=h251f7ec_0
- libstdcxx-ng=11.2.0=h1234567_1
- libtiff=4.5.1=hffd6297_1
- libuuid=1.41.5=h5eee18b_0
- libwebp-base=1.3.2=h5eee18b_1
- libxcb=1.15=h7f8727e_0
- libxkbcommon=1.0.1=h097e994_2
- libxml2=2.13.5=hfdd30dd_0
- llvmlite=0.43.0=py39h6a678d5_1
- lz4-c=1.9.4=h6a678d5_1
- matplotlib=3.9.2=py39h06a4308_1
- matplotlib-base=3.9.2=py39hbfdbfaf_1
- mysql=8.4.0=h721767e_2
- ncurses=6.4=h6a678d5_0
- numba=0.60.0=py39h6a678d5_1
- numpy=1.26.4=py39heeff2f4_0
- numpy-base=1.26.4=py39h8a23956_0
- openjpeg=2.5.2=he7f1fd0_0
- openldap=2.6.4=h42fbc30_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pcre2=10.42=hebb0a14_1
- pillow=11.1.0=py39hcea889d_0
- pip=25.0=py39h06a4308_0
- pybind11-abi=4=hd3eb1b0_1
- pyerfa=2.0.1.5=py39h5eee18b_0
- pyparsing=3.2.0=py39h06a4308_0
- pyqt=6.7.1=py39h6a678d5_0
- pyqt6-sip=13.9.1=py39h5eee18b_0
- python=3.9.21=he870216_1
- python-dateutil=2.9.0post0=py39h06a4308_2
- pyyaml=6.0.2=py39h5eee18b_0
- qtbase=6.7.3=hdaa5aa8_0
- qtdeclarative=6.7.3=h6a678d5_0
- qtsvg=6.7.3=he621ea3_0
- qttools=6.7.3=h80c7b02_0
- qtwebchannel=6.7.3=h6a678d5_0
- qtwebsockets=6.7.3=h6a678d5_0
- readline=8.2=h5eee18b_0
- scipy=1.13.1=py39heeff2f4_1
- setuptools=75.8.0=py39h06a4308_0
- sip=6.10.0=py39h6a678d5_0
- six=1.16.0=pyhd3eb1b0_1
- sqlite=3.45.3=h5eee18b_0
- tbb=2021.8.0=hdb19cb5_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tornado=6.4.2=py39h5eee18b_0
- tzdata=2025a=h04d1e81_0
- unicodedata2=15.1.0=py39h5eee18b_1
- wheel=0.45.1=py39h06a4308_0
- xcb-util-cursor=0.1.4=h5eee18b_0
- xz=5.6.4=h5eee18b_1
- yaml=0.2.5=h7b6447c_0
- zipp=3.21.0=py39h06a4308_0
- zlib=1.2.13=h5eee18b_1
- zstd=1.5.6=hc292b87_0
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- pluggy==1.5.0
- pytest==8.3.5
prefix: /opt/conda/envs/poliastro
| [
"poliastro/tests/test_ephem.py::test_proper_velocity_units"
]
| []
| []
| []
| MIT License | 174 | [
"appveyor.yml",
".travis.yml",
"poliastro/ephem.py"
]
| [
"appveyor.yml",
".travis.yml",
"poliastro/ephem.py"
]
|
|
mkdocs__mkdocs-668 | 98814de505c9d8f1849292372eff1c5ae492261c | 2015-06-27 22:50:14 | 3dfd95deae8379473714b346e61c1d63e957bb98 | landscape-bot: [](https://landscape.io/diff/187394)
Code quality remained the same when pulling **[a5ab2e1](https://github.com/d0ugal/mkdocs/commit/a5ab2e12e22a199617fcffd129d4762bfc6b712b) on d0ugal:symlink** into **[98814de](https://github.com/mkdocs/mkdocs/commit/98814de505c9d8f1849292372eff1c5ae492261c) on mkdocs:master**. | diff --git a/mkdocs/config/config_options.py b/mkdocs/config/config_options.py
index 38a61442..28ee62c8 100644
--- a/mkdocs/config/config_options.py
+++ b/mkdocs/config/config_options.py
@@ -332,6 +332,13 @@ class Extras(OptionallyRequired):
dirs.sort()
for filename in sorted(filenames):
fullpath = os.path.join(dirpath, filename)
+
+ # Some editors (namely Emacs) will create temporary symlinks
+ # for internal magic. We can just ignore these files.
+ if os.path.islink(fullpath):
+ if not os.path.exists(os.readlink(fullpath)):
+ continue
+
relpath = os.path.normpath(os.path.relpath(fullpath, docs_dir))
if self.file_match(relpath):
yield relpath
| Ignore broken symlinks in mkdocs serve.
What I am experiencing:
* When I am editing `index.md` in Emacs, Emacs creates files like:
```
➜ docs git:(master) ✗ ls -al .#*
lrwxrwxrwx 1 paulproteus paulproteus 36 Jun 17 17:24 .#index.md -> [email protected]:1434311808
```
* These files are Emacs' way of using symlinks to track which computer+process was Emacs-ing the file, so that in case of a crash, Emacs can figure out how to restore its state.
What I expect:
* When I edit `somethingelse.md` and press save, I expect the mkdocs livereload to reload the browser.
What I see instead:
```
INFO - Building documentation...
ERROR - file not found: /home/paulproteus/projects/sandstorm/docs/.#index.md
ERROR - Error building page .#index.md
[E 150617 17:22:21 ioloop:612] Exception in callback (3, <function null_wrapper at 0x7fc883190500>)
Traceback (most recent call last):
File "/home/paulproteus/.local/lib/python2.7/site-packages/tornado/ioloop.py", line 866, in start
handler_func(fd_obj, events)
File "/home/paulproteus/.local/lib/python2.7/site-packages/tornado/stack_context.py", line 275, in null_wrapper
return fn(*args, **kwargs)
File "/usr/lib/python2.7/dist-packages/pyinotify.py", line 1604, in handle_read
self.process_events()
File "/usr/lib/python2.7/dist-packages/pyinotify.py", line 1321, in process_events
self._default_proc_fun(revent)
File "/home/paulproteus/.local/lib/python2.7/site-packages/livereload/watcher.py", line 152, in inotify_event
self.callback()
File "/home/paulproteus/.local/lib/python2.7/site-packages/livereload/handlers.py", line 65, in poll_tasks
filepath, delay = cls.watcher.examine()
File "/home/paulproteus/.local/lib/python2.7/site-packages/livereload/watcher.py", line 72, in examine
func and func()
File "/usr/lib/python2.7/dist-packages/mkdocs/serve.py", line 74, in builder
build(config, live_server=True)
File "/usr/lib/python2.7/dist-packages/mkdocs/build.py", line 299, in build
build_pages(config)
File "/usr/lib/python2.7/dist-packages/mkdocs/build.py", line 259, in build_pages
dump_json)
File "/usr/lib/python2.7/dist-packages/mkdocs/build.py", line 171, in _build_page
input_content = io.open(input_path, 'r', encoding='utf-8').read()
IOError: [Errno 2] No such file or directory: '/home/paulproteus/projects/sandstorm/docs/.#index.md'
```
What I propose:
* If a "No such file or directory" error occurs, but the problem is a broken symlink, the `mkdocs` build should continue as if the file does not exist. Note that this arguably is a special-case to handle Emacs' own weirdness; a different way to do it would be to look at the list of git ignored files.
* Perhaps in general, `mkdocs` should issue a warning (not an error) on broken symlinks, gracefully ignoring them.
I'm open to a bunch of ideas. I wanted to file this in the hopes of sparking a discussion where the maintainers of mkdocs could express their opinion about the best way forward.
Thanks so much! Also hi! I'm twitter.com/asheeshlaroia and was chatting with a mkdocs developer earlier today. Seems like a great project! I learned about it via http://ericholscher.com/blog/2014/feb/27/how-i-judge-documentation-quality/ ! | mkdocs/mkdocs | diff --git a/mkdocs/tests/config/config_options_tests.py b/mkdocs/tests/config/config_options_tests.py
index b1e7bff7..d16f675b 100644
--- a/mkdocs/tests/config/config_options_tests.py
+++ b/mkdocs/tests/config/config_options_tests.py
@@ -1,6 +1,7 @@
from __future__ import unicode_literals
import os
+import tempfile
import unittest
from mkdocs import utils
@@ -251,6 +252,25 @@ class ExtrasTest(unittest.TestCase):
self.assertRaises(config_options.ValidationError,
option.validate, {})
+ def test_talk(self):
+
+ option = config_options.Extras(utils.is_markdown_file)
+
+ tmp_dir = tempfile.mkdtemp()
+
+ f1 = os.path.join(tmp_dir, 'file1.md')
+ f2 = os.path.join(tmp_dir, 'file2.md')
+
+ open(f1, 'a').close()
+
+ # symlink isn't available on Python 2 on Windows.
+ if hasattr(os, 'symlink'):
+ os.symlink('/path/that/doesnt/exist', f2)
+
+ files = list(option.walk_docs_dir(tmp_dir))
+
+ self.assertEqual(['file1.md', ], files)
+
class PagesTest(unittest.TestCase):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.14 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/project.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | click==8.1.8
exceptiongroup==1.2.2
ghp-import==2.1.0
importlib_metadata==8.6.1
iniconfig==2.1.0
Jinja2==3.1.6
livereload==2.7.1
Markdown==3.7
MarkupSafe==3.0.2
mergedeep==1.3.4
-e git+https://github.com/mkdocs/mkdocs.git@98814de505c9d8f1849292372eff1c5ae492261c#egg=mkdocs
mkdocs-bootstrap==0.2.0
mkdocs-bootswatch==0.5.0
mkdocs-get-deps==0.2.0
packaging==24.2
pathspec==0.12.1
platformdirs==4.3.7
pluggy==1.5.0
pytest==8.3.5
python-dateutil==2.9.0.post0
PyYAML==6.0.2
pyyaml_env_tag==0.1
six==1.17.0
tomli==2.2.1
tornado==6.4.2
watchdog==6.0.0
zipp==3.21.0
| name: mkdocs
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- click==8.1.8
- exceptiongroup==1.2.2
- ghp-import==2.1.0
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jinja2==3.1.6
- livereload==2.7.1
- markdown==3.7
- markupsafe==3.0.2
- mergedeep==1.3.4
- mkdocs-bootstrap==0.2.0
- mkdocs-bootswatch==0.5.0
- mkdocs-get-deps==0.2.0
- packaging==24.2
- pathspec==0.12.1
- platformdirs==4.3.7
- pluggy==1.5.0
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- pyyaml-env-tag==0.1
- six==1.17.0
- tomli==2.2.1
- tornado==6.4.2
- watchdog==6.0.0
- zipp==3.21.0
prefix: /opt/conda/envs/mkdocs
| [
"mkdocs/tests/config/config_options_tests.py::ExtrasTest::test_talk"
]
| []
| [
"mkdocs/tests/config/config_options_tests.py::OptionallyRequiredTest::test_default",
"mkdocs/tests/config/config_options_tests.py::OptionallyRequiredTest::test_empty",
"mkdocs/tests/config/config_options_tests.py::OptionallyRequiredTest::test_replace_default",
"mkdocs/tests/config/config_options_tests.py::OptionallyRequiredTest::test_required",
"mkdocs/tests/config/config_options_tests.py::OptionallyRequiredTest::test_required_no_default",
"mkdocs/tests/config/config_options_tests.py::TypeTest::test_length",
"mkdocs/tests/config/config_options_tests.py::TypeTest::test_multiple_types",
"mkdocs/tests/config/config_options_tests.py::TypeTest::test_single_type",
"mkdocs/tests/config/config_options_tests.py::URLTest::test_invalid",
"mkdocs/tests/config/config_options_tests.py::URLTest::test_invalid_url",
"mkdocs/tests/config/config_options_tests.py::URLTest::test_valid_url",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_repo_name_bitbucket",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_repo_name_custom",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_repo_name_github",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_file",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_incorrect_type_attribute_error",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_incorrect_type_type_error",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_missing_dir",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_missing_dir_but_required",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_valid_dir",
"mkdocs/tests/config/config_options_tests.py::SiteDirTest::test_doc_dir_in_site_dir",
"mkdocs/tests/config/config_options_tests.py::SiteDirTest::test_site_dir_in_docs_dir",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_theme",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_theme_invalid",
"mkdocs/tests/config/config_options_tests.py::ExtrasTest::test_empty",
"mkdocs/tests/config/config_options_tests.py::ExtrasTest::test_invalid",
"mkdocs/tests/config/config_options_tests.py::ExtrasTest::test_provided",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_invalid_config",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_invalid_type",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_provided",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_provided_dict",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_provided_empty",
"mkdocs/tests/config/config_options_tests.py::NumPagesTest::test_invalid_pages",
"mkdocs/tests/config/config_options_tests.py::NumPagesTest::test_many_pages",
"mkdocs/tests/config/config_options_tests.py::NumPagesTest::test_one_page",
"mkdocs/tests/config/config_options_tests.py::NumPagesTest::test_provided",
"mkdocs/tests/config/config_options_tests.py::PrivateTest::test_defined",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_builtins",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_builtins_config",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_configkey",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_duplicates",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_invalid_config_item",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_invalid_config_option",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_invalid_dict_item",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_list_dicts",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_mixed_list",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_none",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_not_list",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_simple_list"
]
| []
| BSD 2-Clause "Simplified" License | 176 | [
"mkdocs/config/config_options.py"
]
| [
"mkdocs/config/config_options.py"
]
|
google__yapf-164 | 22c52a696d787ac9e8a45ea70dc6e1cae9b398de | 2015-06-30 07:28:58 | 22c52a696d787ac9e8a45ea70dc6e1cae9b398de | diff --git a/yapf/yapflib/subtype_assigner.py b/yapf/yapflib/subtype_assigner.py
index f8c24cd..34391b3 100644
--- a/yapf/yapflib/subtype_assigner.py
+++ b/yapf/yapflib/subtype_assigner.py
@@ -73,7 +73,11 @@ class _SubtypeAssigner(pytree_visitor.PyTreeVisitor):
self._SetFirstLeafTokenSubtype(child,
format_token.Subtype.DICTIONARY_KEY)
elif last_was_colon:
- self._SetSubtypeRec(child, format_token.Subtype.DICTIONARY_VALUE)
+ if pytree_utils.NodeName(child) == 'power':
+ self._SetFirstLeafTokenSubtype(child,
+ format_token.Subtype.DICTIONARY_VALUE)
+ else:
+ self._SetSubtypeRec(child, format_token.Subtype.DICTIONARY_VALUE)
last_was_comma = isinstance(child, pytree.Leaf) and child.value == ','
last_was_colon = isinstance(child, pytree.Leaf) and child.value == ':'
self.Visit(child)
| Regression in formatting an argument expansion in a dictionary
`{a: b(*c)}` gets formatted to `{a: b( * c)}`
This regression occured in f30c1d26df3449950d95982f17568d03fe4a361f | google/yapf | diff --git a/yapftests/reformatter_test.py b/yapftests/reformatter_test.py
index bd88849..7e35c8b 100644
--- a/yapftests/reformatter_test.py
+++ b/yapftests/reformatter_test.py
@@ -1217,6 +1217,11 @@ format_token.Subtype.NONE))
uwlines = _ParseAndUnwrap(code)
self.assertCodeEqual(code, reformatter.Reformat(uwlines))
+ def testFunctionCallInDict(self):
+ code = "a = {'a': b(c=d, **e)}\n"
+ uwlines = _ParseAndUnwrap(code)
+ self.assertCodeEqual(code, reformatter.Reformat(uwlines))
+
class BuganizerFixes(ReformatterTest):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_git_commit_hash"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
nose==1.3.7
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
-e git+https://github.com/google/yapf.git@22c52a696d787ac9e8a45ea70dc6e1cae9b398de#egg=yapf
| name: yapf
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- nose==1.3.7
prefix: /opt/conda/envs/yapf
| [
"yapftests/reformatter_test.py::BasicReformatterTest::testFunctionCallInDict"
]
| []
| [
"yapftests/reformatter_test.py::BasicReformatterTest::testBlankLinesAtEndOfFile",
"yapftests/reformatter_test.py::BasicReformatterTest::testBlankLinesBeforeDecorators",
"yapftests/reformatter_test.py::BasicReformatterTest::testBlankLinesBeforeFunctionsNotInColumnZero",
"yapftests/reformatter_test.py::BasicReformatterTest::testClosingBracketIndent",
"yapftests/reformatter_test.py::BasicReformatterTest::testClosingBracketsInlinedInCall",
"yapftests/reformatter_test.py::BasicReformatterTest::testCommentBeforeFuncDef",
"yapftests/reformatter_test.py::BasicReformatterTest::testCommentBetweenDecorators",
"yapftests/reformatter_test.py::BasicReformatterTest::testComments",
"yapftests/reformatter_test.py::BasicReformatterTest::testCommentsWithContinuationMarkers",
"yapftests/reformatter_test.py::BasicReformatterTest::testContinuationIndent",
"yapftests/reformatter_test.py::BasicReformatterTest::testContinuationMarkers",
"yapftests/reformatter_test.py::BasicReformatterTest::testDictSetGenerator",
"yapftests/reformatter_test.py::BasicReformatterTest::testDictionaryMakerFormatting",
"yapftests/reformatter_test.py::BasicReformatterTest::testDocstringAndMultilineComment",
"yapftests/reformatter_test.py::BasicReformatterTest::testDocstrings",
"yapftests/reformatter_test.py::BasicReformatterTest::testEmptyContainers",
"yapftests/reformatter_test.py::BasicReformatterTest::testEndingWhitespaceAfterSimpleStatement",
"yapftests/reformatter_test.py::BasicReformatterTest::testExcessCharacters",
"yapftests/reformatter_test.py::BasicReformatterTest::testExcessLineCountWithDefaultKeywords",
"yapftests/reformatter_test.py::BasicReformatterTest::testExpressionPenalties",
"yapftests/reformatter_test.py::BasicReformatterTest::testFunctionCallContinuationLine",
"yapftests/reformatter_test.py::BasicReformatterTest::testI18n",
"yapftests/reformatter_test.py::BasicReformatterTest::testI18nNonFormatting",
"yapftests/reformatter_test.py::BasicReformatterTest::testIfConditionalParens",
"yapftests/reformatter_test.py::BasicReformatterTest::testLineDepthOfSingleLineStatement",
"yapftests/reformatter_test.py::BasicReformatterTest::testLineWrapInForExpression",
"yapftests/reformatter_test.py::BasicReformatterTest::testListComprehension",
"yapftests/reformatter_test.py::BasicReformatterTest::testMatchingParenSplittingMatching",
"yapftests/reformatter_test.py::BasicReformatterTest::testMultilineComment",
"yapftests/reformatter_test.py::BasicReformatterTest::testMultilineCommentReformatted",
"yapftests/reformatter_test.py::BasicReformatterTest::testMultilineDocstringAndMultilineComment",
"yapftests/reformatter_test.py::BasicReformatterTest::testMultilineShebang",
"yapftests/reformatter_test.py::BasicReformatterTest::testMultilineString",
"yapftests/reformatter_test.py::BasicReformatterTest::testMultipleUgliness",
"yapftests/reformatter_test.py::BasicReformatterTest::testNoBreakOutsideOfBracket",
"yapftests/reformatter_test.py::BasicReformatterTest::testNoKeywordArgumentBreakage",
"yapftests/reformatter_test.py::BasicReformatterTest::testNoPenaltySplitting",
"yapftests/reformatter_test.py::BasicReformatterTest::testNoQueueSeletionInMiddleOfLine",
"yapftests/reformatter_test.py::BasicReformatterTest::testNoSpaceBetweenUnaryOpAndOpeningParen",
"yapftests/reformatter_test.py::BasicReformatterTest::testNoSpacesBetweenOpeningBracketAndStartingOperator",
"yapftests/reformatter_test.py::BasicReformatterTest::testNoSpacesBetweenSubscriptsAndCalls",
"yapftests/reformatter_test.py::BasicReformatterTest::testNoSplittingAroundTermOperators",
"yapftests/reformatter_test.py::BasicReformatterTest::testNoSplittingWithinSubscriptList",
"yapftests/reformatter_test.py::BasicReformatterTest::testOpeningAndClosingBrackets",
"yapftests/reformatter_test.py::BasicReformatterTest::testRelativeImportStatements",
"yapftests/reformatter_test.py::BasicReformatterTest::testRelaxArraySubscriptAffinity",
"yapftests/reformatter_test.py::BasicReformatterTest::testSimple",
"yapftests/reformatter_test.py::BasicReformatterTest::testSimpleFunctions",
"yapftests/reformatter_test.py::BasicReformatterTest::testSimpleFunctionsWithTrailingComments",
"yapftests/reformatter_test.py::BasicReformatterTest::testSimpleMultilineCode",
"yapftests/reformatter_test.py::BasicReformatterTest::testSimpleMultilineWithComments",
"yapftests/reformatter_test.py::BasicReformatterTest::testSingleComment",
"yapftests/reformatter_test.py::BasicReformatterTest::testSingleLineFunctions",
"yapftests/reformatter_test.py::BasicReformatterTest::testSingleLineIfStatements",
"yapftests/reformatter_test.py::BasicReformatterTest::testSingleLineList",
"yapftests/reformatter_test.py::BasicReformatterTest::testSpaceAfterNotOperator",
"yapftests/reformatter_test.py::BasicReformatterTest::testSplitListWithInterspersedComments",
"yapftests/reformatter_test.py::BasicReformatterTest::testSplitListWithTerminatingComma",
"yapftests/reformatter_test.py::BasicReformatterTest::testSplitStringsIfSurroundedByParens",
"yapftests/reformatter_test.py::BasicReformatterTest::testTrailerOnSingleLine",
"yapftests/reformatter_test.py::BasicReformatterTest::testTrailingCommaAndBracket",
"yapftests/reformatter_test.py::BasicReformatterTest::testTupleCommaBeforeLastParen",
"yapftests/reformatter_test.py::BasicReformatterTest::testUnaryNotOperator",
"yapftests/reformatter_test.py::BasicReformatterTest::testUnaryOpInDictionaryValue",
"yapftests/reformatter_test.py::BuganizerFixes::testB13900309",
"yapftests/reformatter_test.py::BuganizerFixes::testB14406499",
"yapftests/reformatter_test.py::BuganizerFixes::testB14468247",
"yapftests/reformatter_test.py::BuganizerFixes::testB15438132",
"yapftests/reformatter_test.py::BuganizerFixes::testB15542157",
"yapftests/reformatter_test.py::BuganizerFixes::testB15597568",
"yapftests/reformatter_test.py::BuganizerFixes::testB15697268",
"yapftests/reformatter_test.py::BuganizerFixes::testB15884241",
"yapftests/reformatter_test.py::BuganizerFixes::testB16572361",
"yapftests/reformatter_test.py::BuganizerFixes::testB16783631",
"yapftests/reformatter_test.py::BuganizerFixes::testB17011869",
"yapftests/reformatter_test.py::BuganizerFixes::testB17133019",
"yapftests/reformatter_test.py::BuganizerFixes::testB17489866",
"yapftests/reformatter_test.py::BuganizerFixes::testB17534869",
"yapftests/reformatter_test.py::BuganizerFixes::testB18255697",
"yapftests/reformatter_test.py::BuganizerFixes::testB18256666",
"yapftests/reformatter_test.py::BuganizerFixes::testB18256826",
"yapftests/reformatter_test.py::BuganizerFixes::testB18257115",
"yapftests/reformatter_test.py::BuganizerFixes::testB19073499",
"yapftests/reformatter_test.py::BuganizerFixes::testB19194420",
"yapftests/reformatter_test.py::BuganizerFixes::testB19287512",
"yapftests/reformatter_test.py::BuganizerFixes::testB19353268",
"yapftests/reformatter_test.py::BuganizerFixes::testB19372573",
"yapftests/reformatter_test.py::BuganizerFixes::testB19377034",
"yapftests/reformatter_test.py::BuganizerFixes::testB19547210",
"yapftests/reformatter_test.py::BuganizerFixes::testB19626808",
"yapftests/reformatter_test.py::BuganizerFixes::testB20073838",
"yapftests/reformatter_test.py::BuganizerFixes::testB20128830",
"yapftests/reformatter_test.py::BuganizerFixes::testB20562732",
"yapftests/reformatter_test.py::BuganizerFixes::testB20605036",
"yapftests/reformatter_test.py::BuganizerFixes::testB20813997",
"yapftests/reformatter_test.py::TestsForPEP8Style::testAlignClosingBracketWithVisualIndentation",
"yapftests/reformatter_test.py::TestsForPEP8Style::testContinuedNonOudentedLine",
"yapftests/reformatter_test.py::TestsForPEP8Style::testIndent4",
"yapftests/reformatter_test.py::TestsForPEP8Style::testNoBlankBetweenClassAndDef",
"yapftests/reformatter_test.py::TestsForPEP8Style::testSingleWhiteBeforeTrailingComment",
"yapftests/reformatter_test.py::TestsForPEP8Style::testSpaceBetweenEndingCommandAndClosingBracket",
"yapftests/reformatter_test.py::TestsForPEP8Style::testSplittingSemicolonStatements",
"yapftests/reformatter_test.py::TestsForPEP8Style::testWrappingPercentExpressions",
"yapftests/reformatter_test.py::TestingNotInParameters::test_notInParams",
"yapftests/reformatter_test.py::TestsForPython3Code::testAnnotations",
"yapftests/reformatter_test.py::TestsForPython3Code::testExecAsNonKeyword",
"yapftests/reformatter_test.py::TestsForPython3Code::testKeywordOnlyArgSpecifier"
]
| []
| Apache License 2.0 | 177 | [
"yapf/yapflib/subtype_assigner.py"
]
| [
"yapf/yapflib/subtype_assigner.py"
]
|
|
kevin1024__vcrpy-162 | d14888ccd87ea82ed4252958393a45eb05aea866 | 2015-07-01 08:51:50 | 7d175b0f91c0048eb0de258be8576b84b7a21f52 | IvanMalison: installs are failing with
error in vcrpy setup command: Invalid environment marker: python_version<=2.4
IvanMalison: pkg_resources.DistributionNotFound: contextlib2
graingert: I've arsed this up pretty bad. But I know how to fix this. Please bare with me 😻
IvanMalison: Hah no worries. Thanks for the pr. This issue was annoying me.
IvanMalison: closes #147 | diff --git a/.travis.yml b/.travis.yml
index 594b492..93fea8b 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -33,7 +33,7 @@ python:
- 3.4
- pypy
install:
-- pip install PyYAML pytest pytest-localserver --use-mirrors
+- pip install .
- if [ $WITH_LIB = "requests1.x" ] ; then pip install requests==1.2.3; fi
- if [ $WITH_LIB = "requests2.2" ] ; then pip install requests==2.2.1; fi
- if [ $WITH_LIB = "requests2.3" ] ; then pip install requests==2.3.0; fi
diff --git a/setup.py b/setup.py
index 1c6ec59..aeeb415 100644
--- a/setup.py
+++ b/setup.py
@@ -19,6 +19,7 @@ class PyTest(TestCommand):
errno = pytest.main(self.test_args)
sys.exit(errno)
+
setup(
name='vcrpy',
version='1.5.2',
@@ -31,8 +32,12 @@ setup(
author_email='[email protected]',
url='https://github.com/kevin1024/vcrpy',
packages=find_packages(exclude=("tests*",)),
- install_requires=['PyYAML', 'mock', 'six>=1.5', 'contextlib2',
- 'wrapt', 'backport_collections'],
+ install_requires=['PyYAML', 'wrapt', 'six>=1.5'],
+ extras_require = {
+ ':python_version in "2.4, 2.5, 2.6"':
+ ['contextlib2', 'backport_collections', 'mock'],
+ ':python_version in "2.7, 3.1, 3.2"': ['contextlib2', 'mock'],
+ },
license='MIT',
tests_require=['pytest', 'mock', 'pytest-localserver'],
cmdclass={'test': PyTest},
diff --git a/vcr/cassette.py b/vcr/cassette.py
index 77b5395..87d2598 100644
--- a/vcr/cassette.py
+++ b/vcr/cassette.py
@@ -2,14 +2,11 @@
import functools
import logging
-import contextlib2
+
import wrapt
-try:
- from collections import Counter
-except ImportError:
- from backport_collections import Counter
# Internal imports
+from .compat import contextlib, collections
from .errors import UnhandledHTTPRequestError
from .matchers import requests_match, uri, method
from .patch import CassettePatcherBuilder
@@ -43,7 +40,7 @@ class CassetteContextDecorator(object):
self.__finish = None
def _patch_generator(self, cassette):
- with contextlib2.ExitStack() as exit_stack:
+ with contextlib.ExitStack() as exit_stack:
for patcher in CassettePatcherBuilder(cassette).build():
exit_stack.enter_context(patcher)
log.debug('Entered context for cassette at {0}.'.format(cassette._path))
@@ -148,7 +145,7 @@ class Cassette(object):
# self.data is the list of (req, resp) tuples
self.data = []
- self.play_counts = Counter()
+ self.play_counts = collections.Counter()
self.dirty = False
self.rewound = False
diff --git a/vcr/compat.py b/vcr/compat.py
new file mode 100644
index 0000000..e76c68f
--- /dev/null
+++ b/vcr/compat.py
@@ -0,0 +1,18 @@
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+try:
+ import contextlib
+except ImportError:
+ import contextlib2 as contextlib
+else:
+ if not hasattr(contextlib, 'ExitStack'):
+ import contextlib2 as contextlib
+
+import collections
+if not hasattr(collections, 'Counter'):
+ import backport_collections as collections
+
+__all__ = ['mock', 'contextlib', 'collections']
diff --git a/vcr/config.py b/vcr/config.py
index fb03136..1faef3b 100644
--- a/vcr/config.py
+++ b/vcr/config.py
@@ -1,4 +1,3 @@
-import collections
import copy
import functools
import inspect
@@ -6,6 +5,7 @@ import os
import six
+from .compat import collections
from .cassette import Cassette
from .serializers import yamlserializer, jsonserializer
from .util import compose
diff --git a/vcr/filters.py b/vcr/filters.py
index 84f06c8..14159d0 100644
--- a/vcr/filters.py
+++ b/vcr/filters.py
@@ -1,12 +1,10 @@
from six import BytesIO, text_type
from six.moves.urllib.parse import urlparse, urlencode, urlunparse
-try:
- from collections import OrderedDict
-except ImportError:
- from backport_collections import OrderedDict
import copy
import json
+from .compat import collections
+
def remove_headers(request, headers_to_remove):
headers = copy.copy(request.headers)
@@ -40,7 +38,7 @@ def remove_post_data_parameters(request, post_data_parameters_to_remove):
del json_data[k]
request.body = json.dumps(json_data).encode('utf-8')
else:
- post_data = OrderedDict()
+ post_data = collections.OrderedDict()
if isinstance(request.body, text_type):
request.body = request.body.encode('utf-8')
diff --git a/vcr/patch.py b/vcr/patch.py
index 1d9edb5..caed9fa 100644
--- a/vcr/patch.py
+++ b/vcr/patch.py
@@ -2,9 +2,7 @@
import functools
import itertools
-import contextlib2
-import mock
-
+from .compat import contextlib, mock
from .stubs import VCRHTTPConnection, VCRHTTPSConnection
from six.moves import http_client as httplib
@@ -323,9 +321,9 @@ def reset_patchers():
_CertValidatingHTTPSConnection)
[email protected]
[email protected]
def force_reset():
- with contextlib2.ExitStack() as exit_stack:
+ with contextlib.ExitStack() as exit_stack:
for patcher in reset_patchers():
exit_stack.enter_context(patcher)
yield
| Dependency that is only compatible/needed with/in python 2 in setup.py
this causes pip install to fail (at least with my py3.4 system) | kevin1024/vcrpy | diff --git a/tests/unit/test_cassettes.py b/tests/unit/test_cassettes.py
index 947ffa4..e7d8b7c 100644
--- a/tests/unit/test_cassettes.py
+++ b/tests/unit/test_cassettes.py
@@ -3,11 +3,10 @@ import inspect
import os
from six.moves import http_client as httplib
-import contextlib2
-import mock
import pytest
import yaml
+from vcr.compat import mock, contextlib
from vcr.cassette import Cassette
from vcr.errors import UnhandledHTTPRequestError
from vcr.patch import force_reset
@@ -158,7 +157,7 @@ def test_nesting_cassette_context_managers(*args):
second_response = copy.deepcopy(first_response)
second_response['body']['string'] = b'second_response'
- with contextlib2.ExitStack() as exit_stack:
+ with contextlib.ExitStack() as exit_stack:
first_cassette = exit_stack.enter_context(Cassette.use(path='test'))
exit_stack.enter_context(mock.patch.object(first_cassette, 'play_response',
return_value=first_response))
diff --git a/tests/unit/test_serialize.py b/tests/unit/test_serialize.py
index 5f2a9aa..41d97bd 100644
--- a/tests/unit/test_serialize.py
+++ b/tests/unit/test_serialize.py
@@ -1,6 +1,6 @@
-import mock
import pytest
+from vcr.compat import mock
from vcr.serialize import deserialize
from vcr.serializers import yamlserializer, jsonserializer
diff --git a/tests/unit/test_vcr.py b/tests/unit/test_vcr.py
index d58d96c..8b1de97 100644
--- a/tests/unit/test_vcr.py
+++ b/tests/unit/test_vcr.py
@@ -1,9 +1,9 @@
import os
-import mock
import pytest
from vcr import VCR, use_cassette
+from vcr.compat import mock
from vcr.request import Request
from vcr.stubs import VCRHTTPSConnection
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 6
} | 1.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-localserver",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | backport_collections==0.1
contextlib2==21.6.0
exceptiongroup==1.2.2
iniconfig==2.1.0
MarkupSafe==3.0.2
mock==5.2.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-localserver==0.9.0.post0
PyYAML==6.0.2
six==1.17.0
tomli==2.2.1
-e git+https://github.com/kevin1024/vcrpy.git@d14888ccd87ea82ed4252958393a45eb05aea866#egg=vcrpy
Werkzeug==3.1.3
wrapt==1.17.2
| name: vcrpy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- backport-collections==0.1
- contextlib2==21.6.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- markupsafe==3.0.2
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-localserver==0.9.0.post0
- pyyaml==6.0.2
- six==1.17.0
- tomli==2.2.1
- werkzeug==3.1.3
- wrapt==1.17.2
prefix: /opt/conda/envs/vcrpy
| [
"tests/unit/test_cassettes.py::test_cassette_load",
"tests/unit/test_cassettes.py::test_cassette_not_played",
"tests/unit/test_cassettes.py::test_cassette_append",
"tests/unit/test_cassettes.py::test_cassette_len",
"tests/unit/test_cassettes.py::test_cassette_contains",
"tests/unit/test_cassettes.py::test_cassette_responses_of",
"tests/unit/test_cassettes.py::test_cassette_get_missing_response",
"tests/unit/test_cassettes.py::test_cassette_cant_read_same_request_twice",
"tests/unit/test_cassettes.py::test_function_decorated_with_use_cassette_can_be_invoked_multiple_times",
"tests/unit/test_cassettes.py::test_arg_getter_functionality",
"tests/unit/test_cassettes.py::test_cassette_not_all_played",
"tests/unit/test_cassettes.py::test_cassette_all_played",
"tests/unit/test_cassettes.py::test_before_record_response",
"tests/unit/test_cassettes.py::test_nesting_cassette_context_managers",
"tests/unit/test_cassettes.py::test_nesting_context_managers_by_checking_references_of_http_connection",
"tests/unit/test_cassettes.py::test_custom_patchers",
"tests/unit/test_cassettes.py::test_decorated_functions_are_reentrant",
"tests/unit/test_cassettes.py::test_cassette_use_called_without_path_uses_function_to_generate_path",
"tests/unit/test_cassettes.py::test_path_transformer_with_function_path",
"tests/unit/test_cassettes.py::test_path_transformer_with_context_manager",
"tests/unit/test_cassettes.py::test_func_path_generator",
"tests/unit/test_serialize.py::test_deserialize_old_yaml_cassette",
"tests/unit/test_serialize.py::test_deserialize_old_json_cassette",
"tests/unit/test_serialize.py::test_deserialize_new_yaml_cassette",
"tests/unit/test_serialize.py::test_deserialize_new_json_cassette",
"tests/unit/test_serialize.py::test_serialize_constructs_UnicodeDecodeError",
"tests/unit/test_vcr.py::test_vcr_use_cassette",
"tests/unit/test_vcr.py::test_vcr_before_record_request_params",
"tests/unit/test_vcr.py::test_fixtures_with_use_cassette",
"tests/unit/test_vcr.py::test_custom_patchers",
"tests/unit/test_vcr.py::test_inject_cassette",
"tests/unit/test_vcr.py::test_with_current_defaults",
"tests/unit/test_vcr.py::test_cassette_library_dir_with_decoration_and_no_explicit_path",
"tests/unit/test_vcr.py::test_cassette_library_dir_with_decoration_and_explicit_path",
"tests/unit/test_vcr.py::test_cassette_library_dir_with_decoration_and_super_explicit_path",
"tests/unit/test_vcr.py::test_cassette_library_dir_with_path_transformer",
"tests/unit/test_vcr.py::test_use_cassette_with_no_extra_invocation",
"tests/unit/test_vcr.py::test_path_transformer",
"tests/unit/test_vcr.py::test_cassette_name_generator_defaults_to_using_module_function_defined_in",
"tests/unit/test_vcr.py::test_ensure_suffix"
]
| []
| []
| []
| MIT License | 178 | [
"vcr/config.py",
"setup.py",
"vcr/cassette.py",
".travis.yml",
"vcr/filters.py",
"vcr/patch.py",
"vcr/compat.py"
]
| [
"vcr/config.py",
"setup.py",
"vcr/cassette.py",
".travis.yml",
"vcr/filters.py",
"vcr/patch.py",
"vcr/compat.py"
]
|
praw-dev__praw-441 | c64e3f71841e8f0c996d42eb5dc9a91fc0c25dcb | 2015-07-02 19:21:13 | c64e3f71841e8f0c996d42eb5dc9a91fc0c25dcb | diff --git a/praw/objects.py b/praw/objects.py
index 4640a73a..505438ae 100755
--- a/praw/objects.py
+++ b/praw/objects.py
@@ -81,10 +81,26 @@ class RedditContentObject(object):
raise AttributeError('\'%s\' has no attribute \'%s\'' % (type(self),
attr))
+ def __getstate__(self):
+ """Needed for `pickle`.
+
+ Without this, pickle protocol version 0 will make HTTP requests
+ upon serialization, hence slowing it down significantly.
+ """
+ return self.__dict__
+
def __ne__(self, other):
"""Return whether the other instance differs from the current."""
return not self == other
+ def __reduce_ex__(self, protocol):
+ """Needed for `pickle`.
+
+ Without this, `pickle` protocol version 2 will make HTTP requests
+ upon serialization, hence slowing it down significantly.
+ """
+ return self.__reduce__()
+
def __setattr__(self, name, value):
"""Set the `name` attribute to `value."""
if value and name == 'subreddit':
| Pickling Comment objects is slow
Test case:
```python
import pickle, praw
r = praw.Reddit('test')
comment = r.get_info(thing_id='t1_aaaa')
pickle.dumps(comment)
```
Looking at Wireshark, it seems to be caused by an HTTP request.
Good news: implementing `def __getstate__(self): return self.__dict__` in `praw.objects.RedditContentObject` fixes it.
Bad news: I'm not sure why since `pickle` use `__dict__` by default, [as per the docs](https://docs.python.org/2/library/pickle.html#object.__getstate__).
I'm not really familiar with the intricacies of `pickle`, any ideas? | praw-dev/praw | diff --git a/tests/cassettes/test_pickling_v0.json b/tests/cassettes/test_pickling_v0.json
new file mode 100644
index 00000000..784fdddf
--- /dev/null
+++ b/tests/cassettes/test_pickling_v0.json
@@ -0,0 +1,1 @@
+{"recorded_with": "betamax/0.4.2", "http_interactions": [{"response": {"body": {"base64_string": "H4sIAAAAAAAAAxzLy4oDIRBA0V+RWhvQKrXU78huGEK1D3ryMrS9C/3vIbO9h/uG6xxPyOoNbdvGNiGrn1+toMou//nZWr2s+/76Upf7bFrBY9RV5gpZwRxcirzqdIIhogTXBD07omZMXyQsS6JUHZFERl99AK2gjHH7a9+fyWAMrNFYfzJ8Mni2mC1lDNraUrm45LupyXPqYWHxyZYYKLEXF52lXgSO4/gAAAD//wMAkqq30coAAAA=", "encoding": "UTF-8"}, "status": {"message": "OK", "code": 200}, "url": "https://api.reddit.com/api/login/.json", "headers": {"x-xss-protection": ["1; mode=block"], "set-cookie": ["__cfduid=d39c55cd52ddf9bd97d92e1bfab8520cb1435864405; expires=Fri, 01-Jul-16 19:13:25 GMT; path=/; domain=.reddit.com; HttpOnly", "secure_session=; Domain=reddit.com; Max-Age=-1435864406; Path=/; expires=Thu, 01-Jan-1970 00:00:01 GMT; HttpOnly", "reddit_session=7302867%2C2015-07-02T12%3A13%3A26%2C11cd7c495f0d9579f6b7a591c863975a48413fca; Domain=reddit.com; Path=/; HttpOnly"], "server": ["cloudflare-nginx"], "date": ["Thu, 02 Jul 2015 19:13:26 GMT"], "x-content-type-options": ["nosniff"], "cache-control": ["private, no-cache", "no-cache"], "content-type": ["application/json; charset=UTF-8"], "x-moose": ["majestic"], "x-ua-compatible": ["IE=edge"], "pragma": ["no-cache"], "transfer-encoding": ["chunked"], "x-frame-options": ["SAMEORIGIN"], "connection": ["keep-alive"], "content-encoding": ["gzip"], "cf-ray": ["1ffcaaf8bc5f0893-FRA"]}}, "request": {"body": {"encoding": "utf-8", "string": "user=PyAPITestUser2&api_type=json&passwd=1111"}, "uri": "https://api.reddit.com/api/login/.json", "method": "POST", "headers": {"Content-Length": ["45"], "User-Agent": ["PRAW_test_suite PRAW/3.0.0 Python/2.7.8 Linux-3.16.0-41-generic-x86_64-with-Ubuntu-14.10-utopic"], "Content-Type": ["application/x-www-form-urlencoded"], "Accept": ["*/*"], "Connection": ["keep-alive"], "Accept-Encoding": ["gzip, deflate"]}}, "recorded_at": "2015-07-02T19:13:26"}, {"response": {"body": {"base64_string": "H4sIAFeNlVUC/+2cXW/juBWG/4qRi0WLDif8/pjFoiiKBVqgF3uxi17sFAY/x5o4tmPLySSD/e8lKdkj22nGjiXZSXPj2JIsvuQ576NDSvHXi6ti4i4+DC7+VSzKYvLp4t3gwulSx01fL66nbqQXo7T7Vgj96d5+MUZgKL2lKBDBsJHC0+ACkcFJL7hGhEKLJIM0ncmOirGb+0k8w+9f102VaKOVxdLMvXNFOSyq3WyISzbl6aBxMbkalkU59mnPr35RDv6u7ch/GGiI4K3TfP6AoKWSccWdFNByJgUTzljItVcBO8sxNyFok85n9GTi3dDcx9NNluNx3DT319NbPR7OvV5Mk9J6e266VkSGxFD9uVxL0styNJ2nfb/c/+2XfyZhvy38HFcHXPlF3FXOlz6ffzYu8oaLtHcZD4ttzabzMm37/T9x20Lf+tRQ0ONF+krVql0UMzx16Uuf4jDmI2D8oOdxWDe/UJ2v7sL6tE+JnOkYlvWIN/u3sNN5Gm2UzjCbzae3WwNmp5MybZ0vCj0uyrQnyTJTl95e/CN2ZrCYXvvBclLYqYux+tPHJYQuHpT/uj+nZnwM+EYfKrXDMNbFfGgXi6Ed60XqS92sm97lrq3aGo7K63Fq8Idx+aMrbgf5+J8+Xly7jxc/fCp/TNtn6c0eitKxl/ngj5P8Pp4wfcoDssrP1FidqXpWDMs4nusBG44K53Kir/oz0dc5aUs0bETSxhhV/UaUMCwgI/B9jmqz+6X/klsL06nR83XWLee5w6OynH24vLy7u3tfyXlvp9eX88stbZdx63UM8uKyCu5l2ji0yT6XDSnDZWlXcjhXtZzlLI11yoEIge18nSyvG5vS4a6ix7JYjHLnUsz++OPd4M30zzH9aNyH6eu8VHnXsa7/bVLcLP0g9fv+w+DugY+YkTdTob1lSgZivTcUGwe5olBR5QMWghBliefOK9gzEo6V2wMvqiTY5QVGZ8WLWs4bL07Ii8rCXfOizSKh9l+dcR8GxWgJvfA3DwVXJnAvkZZOEGwlwRRDITjEQZHoPsG8j2WmPw0wjhDcAzKqPNhFBjovZNRyzgcZ/y6uisHPMWHvB9NJLgktSa/KHocDKr/QnnFws/C4eOgDBzHnbhauuNIt8qC+Hsv57OZKjW6vrqDhFDNukIdaSKYFJBgxLuM1GQfOnSUMCaL1x8nHyV9+jrEe/Jry+l2DDXXWSfWICbpAxLP70AREhYj84Vu3OibIt9TZIAjEcU7AHxm8vQjiJ+/vortmMRz6/XT+6bLprr8W7ic8v6O35kG5e4GpDRA7wr0yxEMkkDMkCIWcMDxAQUNELM8m3wELxFLRWuUbWDoCC3/pYLGf/efZPJ4Cca0QMyTW89SpAKWkmmJOlPdWRGNygpjGSPknwNJb0fFs8edAlCpndony7GWPXolydqshr40ouYtdE6XZv5aAsp4IeITIA1oyPGXWECa1EopKmuaVGiGHjEcExQs/DNCoOBVA6jQQOUJwH5TIebBLCSxfAiVqlW+U6IQSq1LgJVFij1sOT1QWdV5BpHazv1U27CHz9DXEKv7bdEAEs93xOTc6CFGrPB86JA+sV0gdZZQFDVIHAaXxxTDEAWbMMeehcCYvtT2fGehesy89M4PHTmXU9cCMdf96YMYGJbovHfZQ1LX715Fsuh9LxqK18DPdf8CqZg5uY1VzuPJLsktySzJL0yuPLntGvZTzWu95cgBRgyxhBlgNHaDKIqA4MYAQaCHTTmAljuUAu/7cOwdoedUHB3Kq4sVdvn90LAg2p/3QW+09CUBKZgElOEaGYgIU1hRRK4njudUeuXC4wO4xUQd6FxP8uVOIgzARc7uJiZWdkpuSmZKXmlZ6AhO13vPExGstF+gsj07XmGizXNie1xuBkAkSA6phtCGDEGgRI6VDzLtguCMmE7x/ThwksQdSVLF+hBQvrKCo9Z4nKV5pQYFLm5+R7YUUdf+OJcUeZfyTixFVtlG6645WabGHzCYcTrIYsY7/Fj2ohOK5ixEH0aOtOoPSWu950iNAxBQMFiQqxsuF5sBIx4AMDFsRLxpOZ9McQY8vDyPaPz1Qhl7X9MipCh/u8sz5WHxsVvvCEWqDCAAJFSPjYwUolYHAe8uwoFALdtqHOfcR2AMmqkDvYuLZT1IcgomU201MrOyUa4xopuSlppWewMTZPVPx/1Bk1AsJXWOizSJju9ZXjFPuMAEkXmkA5UwCpWUAmGukgnLYhKyof04cJLF7UtSx3iUFe2EFRa33PEnxSgsKBIXsixSr/h1Lij3q/O9ORyglYtcdrdJiD5mnn46s4r9NDwbJI9O11unRWp2BaK33jR490mM1Q3hJ9Ni+iBuJrYeYphsPPIYG6RiawIA30gdsjSQmbACk/zpjH4mdk2IV611S4Oc+iXkiUtR6z5MUXGImJFQg/d8NoDTGWhOB0xRUhKCoItX0+NmkwIsHT7LS/kgxWaprO+mHFN/6dywp9riAf7fOQBQ+clOgVVrsIfPUdca3+G/QA8UUZ+qRZ9RapkeVEk16rFyWTJY8lizWdNjj9GjqfaNHr/RgN33Qo0rV8W1u7Fh8bK4qEq2185QAZq0H1OsYGQ4lIMojaOPkmMITPdh9gMAeMFEF+hFMdL/s2S4mznnZ87VionZu15hos8jYrvUVglRbxkCcmLtY6zMBFMMIWGWV8VYyaE/0tNZBErsnRR3rXVKIF1ZQ1HrPkxTICEsUckByqwHllAETcLwq8HiFsDpOTDU5lhRG5SWoPkkhy0mW3Qsp6v4dS4o96vynpyM52xjZdUertNhD5umnI6v4b9ODEt79TZMqJTZumtQuSyZLHksWazrsf9Jjrfc86WFZgFQ5BhiVsc4wggNNFQSKaI8QjntU/lGzY+ihru77pwcr+qBHlapmNGsDH1sPORAEOaUeQG0UoBJZoDWVQELMrWHYYnyiVc8DBPaAiSrQu5igvWAi5nYTEys7JTclMyUvNa30BCZqveeJiVdbZFQ/Yto1JtosMrZrfW4wlRoFoCWKBEfeAaO0AJoLwQ2U0NgTPa11kMQeSFH/YO0OKUgv05H2Copa73mS4rUWFFb29FD4t/4dS4o96vzvTkcowW/TkUb8t+lBOO6FHm3VGWu950kPYhhiQslYTkZwUIYJkDqEtNhtbEDeOnf0siefL3unh7nrpc7IqSo6+V0tQyzlJoLcMQcBFdYCGSgCsXA1hrF48aK5Auy/zDhAYPeYqAO9iwnUy3Qk5nYTEys7JTclMyUvNa30BCZqveeJiddaZNQLCV1jos0iY7vWhz54owgCiJtIcB4gMFYqYDQOkCMMEc6K+ufEQRJ7IEUV6x1SMNX9f7O3WVCs9J4nKV5pQSE+o+zeXkhR9+9YUuxR5393OkLYYz8J0yot9pB5+unIKv7b9MAUPTI+7dOjrTpjrbcVeuTcDqXPqb05UMaHKnurI/8LycOnWhVpAAA=", "encoding": "UTF-8"}, "status": {"message": "OK", "code": 200}, "url": "https://api.reddit.com/user/PyAPITestUser2/comments.json?t=all&sort=new", "headers": {"x-xss-protection": ["1; mode=block"], "cache-control": ["private, no-cache", "no-cache"], "content-length": ["2606"], "server": ["cloudflare-nginx"], "date": ["Thu, 02 Jul 2015 19:13:27 GMT"], "x-content-type-options": ["nosniff"], "x-ratelimit-reset": ["393"], "content-type": ["application/json; charset=UTF-8"], "x-reddit-tracking": ["https://pixel.redditmedia.com/pixel/of_destiny.png?v=y3Jhs4XJ5ugsyWdG9pnDnfGKSbeEgrfxPSdmstjKjWA%2BUzeRcc2LGH9Yml8p1SiJZyKNuVriYuO2wvi26Sdq9URNAtTXSZSa"], "x-moose": ["majestic"], "x-ua-compatible": ["IE=edge"], "vary": ["accept-encoding"], "pragma": ["no-cache"], "x-sup-id": ["http://www.reddit.com/sup.json#e4356386a7"], "x-ratelimit-used": ["1"], "x-frame-options": ["SAMEORIGIN"], "connection": ["keep-alive"], "content-encoding": ["gzip"], "x-ratelimit-remaining": ["299"], "cf-ray": ["1ffcaafdccd60893-FRA"]}}, "request": {"body": {"encoding": "utf-8", "string": ""}, "uri": "https://api.reddit.com/user/PyAPITestUser2/comments.json?t=all&sort=new", "method": "GET", "headers": {"Cookie": ["__cfduid=d39c55cd52ddf9bd97d92e1bfab8520cb1435864405; reddit_session=7302867%2C2015-07-02T12%3A13%3A26%2C11cd7c495f0d9579f6b7a591c863975a48413fca"], "Connection": ["keep-alive"], "User-Agent": ["PRAW_test_suite PRAW/3.0.0 Python/2.7.8 Linux-3.16.0-41-generic-x86_64-with-Ubuntu-14.10-utopic"], "Accept": ["*/*"], "Accept-Encoding": ["gzip, deflate"]}}, "recorded_at": "2015-07-02T19:13:27"}]}
\ No newline at end of file
diff --git a/tests/cassettes/test_pickling_v1.json b/tests/cassettes/test_pickling_v1.json
new file mode 100644
index 00000000..2b78d924
--- /dev/null
+++ b/tests/cassettes/test_pickling_v1.json
@@ -0,0 +1,1 @@
+{"recorded_with": "betamax/0.4.2", "http_interactions": [{"response": {"body": {"base64_string": "H4sIAAAAAAAAAxzLSW7DMAxA0asIXCsASYmazpFdURSyhtptEhWWVw189yLd/of/hK85HpDUE9q+j31CUm/vWkHNR/7Pj9bqx3ocPy/q+TabVnAfdc1zhaRg3rft1vDXfrpeqVcWH4SWUthxjjFybmhKw+ZNXshFLKAVlDG+t/b6vUEOzmtGkgv6C/KVOJFJHDVTFYk2SJfgoqm9CKJdfCfLjrAGQhRrBc7z/AMAAP//AwBlfaprygAAAA==", "encoding": "UTF-8"}, "status": {"message": "OK", "code": 200}, "url": "https://api.reddit.com/api/login/.json", "headers": {"x-xss-protection": ["1; mode=block"], "set-cookie": ["__cfduid=dea6ea5a6ed306c9a2fbe4aea3094d7ad1435864409; expires=Fri, 01-Jul-16 19:13:29 GMT; path=/; domain=.reddit.com; HttpOnly", "secure_session=; Domain=reddit.com; Max-Age=-1435864409; Path=/; expires=Thu, 01-Jan-1970 00:00:01 GMT; HttpOnly", "reddit_session=7302867%2C2015-07-02T12%3A13%3A29%2C21d559485f58693dfc5004b7f142610d81005445; Domain=reddit.com; Path=/; HttpOnly"], "server": ["cloudflare-nginx"], "date": ["Thu, 02 Jul 2015 19:13:29 GMT"], "x-content-type-options": ["nosniff"], "cache-control": ["private, no-cache", "no-cache"], "content-type": ["application/json; charset=UTF-8"], "x-moose": ["majestic"], "x-ua-compatible": ["IE=edge"], "pragma": ["no-cache"], "transfer-encoding": ["chunked"], "x-frame-options": ["SAMEORIGIN"], "connection": ["keep-alive"], "content-encoding": ["gzip"], "cf-ray": ["1ffcab0cfb8008b1-FRA"]}}, "request": {"body": {"encoding": "utf-8", "string": "user=PyAPITestUser2&api_type=json&passwd=1111"}, "uri": "https://api.reddit.com/api/login/.json", "method": "POST", "headers": {"Content-Length": ["45"], "User-Agent": ["PRAW_test_suite PRAW/3.0.0 Python/2.7.8 Linux-3.16.0-41-generic-x86_64-with-Ubuntu-14.10-utopic"], "Content-Type": ["application/x-www-form-urlencoded"], "Accept": ["*/*"], "Connection": ["keep-alive"], "Accept-Encoding": ["gzip, deflate"]}}, "recorded_at": "2015-07-02T19:13:29"}, {"response": {"body": {"base64_string": "H4sIAFqNlVUC/+2cy27jyBWGX0XwYpAgXe26X3owCIJggATIYhYzyGIcCHVtsS1LskTZbTfm3VNVpGRactySRVKy443bJmnWX3XO//GcEtvfzi6LiTv7NDj7V7Eoi8nnsw+DM6dLHQ99O7uaupFejNLp2WeJb9wdLqnlQmMpMGEYMW8QhlByZRkj3CIRT1sYDBMo3cmOirGb+0m8w+/f1kOV+dx6lMXSzL1zRTksqtNsiEs25emicTG5HJZFOfbpzK9+UQ7+ru3IfxpoiOCN03x+j6ClknHFnRTQciYFE85YyLVXATvLMTchaJPuZ/Rk4t3Q3MXbTZbjcTw091fTGz0ezr1eTJPS+ngeulZEhsRQ/aVcS9LLcjSdp3O/3P3tl38mYb8t/BxXF1z6RTxVzpc+3382LvKBs3R2GS+LY82m8zId+/0/8dhC3/g0UNDjRfqValS7KGZ46tIvfY7LmK+A8Qc9j8v6+Beq+9VTWN/2OZEzHcOyXvHm/BZ2Ok+rjdIdZrP59GZjwex0Uqaj80Whx0WZziRZZurSt2f/iJMZLKZXfrCcFHbqYqz+dLGE0MWL8r/uz2kYHwP+aA6V2mEY62I+tIvF0I71Is2lHtZNb/PUVmMNR+XVOA34w7j80RU3g3z9TxdnV+7i7IfP5Y/p+Cx9s4OidO15vvhikr+PN0w/5QVZ5WcarM5UPSuGZVzP9YINR4VzOdFX85noq5y0JRo2ImljjKp5IxodJCAj8GOOanP6pf+aRwvTqdHzddYt53nCo7KcfTo/v729/VjJ+WinV+fz8w1t5/HoVQzy4rwK7nk6OLTJPucNKcNlaVdyOFe1nOUsrXXKgQiBzXydLK8ah9LlrqLHsliM8uRSzP7448Pg3fQvMf1o3Ifp67xU+dShrv9tUlwv/SDN++7T4Paej5iR11OhvWVKBmK9NxQbB7miUFHlAxaCEGWJ584r2DMSDpXbAy+qJNjmBUYnxYtazjsvjsiLysJd86LNIqH2X51xnwbFaAm98Nf3BVcmcC+Rlk4QbCXBFEMhOMRBkeg+wbznGvnjAOMAwT0go8qDbWSg00JGLed0kPHv4rIY/BwT9m4wneSS0JL0VdnDcEDlV9ozDq4XHhf3feAg5tz1whWXukUe1M9jOZ9dX6rRzeUlNJxixg3yUAvJtIAkdp1cxmcyDpw7SxgSROuLycXkLz/HWA9+TXn9ocGGOuukesIEXSDixXNoAqJCRP7hYVodE+QhdR4RBOLYE/AnFm8ngvjJx9vorlkMh/44nX8+b7rrr4X7Cc9v6Y25V+5OYGoDxI5wrwzxEAnkDAlCIScMD1DQEBHLq42FTbBALBWtVb6DpSOw8NcOFvvFf5nN4y0Q1woxQ2I9T50KUEqqKeZEeW9FNCYniGmMlH8GLL0VHS8WfwpEqXJmmygv3vbolSgntxvy1oiSp9g1UZrzawko60bAI0Tu0ZLhKbOGMKmVUFTS1FdqhBwyHhEUH/wwQKNiK4DUcSBygOA+KJHzYJsSWL4GStQq3ynRCSVWpcBrosQOHzk8U1nUeQWR2s7+Vtmwg8zj1xCr+G/SARHMttfn1OggRK3ydOiQPLDeIXWUURY0SBMElMYvhiEOMGOOOQ+FM3mr7eXMQHeafe2ZGTxOKqOuB2as59cDMx5RovvSYQdFXbt/Hcmm+7FkLFoLv9D9e+xq5uA2djWHK78kuyS3JLM0vfLktmfUSzmv9Z4mBxA1yBJmgNXQAaosAooTAwiBFjLtBFbiUA6wqy+9c4CWl31wIKcqXtzmz48OBcHjth96q70nAUjJLKAEx8hQTIDCmiJqJXE8j9ojF/YX2D0m6kBvY4K/tIXYCxMxt5uYWNkpuSmZKXmpaaVnMFHrPU1MvNVygc7y6nSNiTbLhc2+3giETJAYUA2jDRmEQIsYKR1i3gXDHTGZ4P1zYi+JPZCiivUTpHhlBUWt9zRJ8UYLClza/I5sL6So53coKXYo45/djKiyjdJtd7RKix1kNuFwlM2Idfw36EElFC/djNiLHm3VGZTWek+THgEipmCwIFExPi40B0Y6BmRg2Ir40HA6m+YAeny9H9H+6YEy9LqmR05VeH+bO+dD8fG42heOUBtEAEioGBkfK0CpDATeW4YFhVqw477MuYvAHjBRBXobEy9+k2IfTKTcbmJiZadcY0QzJS81rfQMJk7unYr/hyKj3kjoGhNtFhmbtb5inHKHCSDxSQMoZxIoLQPAXCMVlMMmZEX9c2Ivid2Too71NinYKysoar2nSYo3WlAgKGRfpFjN71BS7FDnf7cdoZSIbXe0SosdZB6/HVnFf5MeDJIn2rXW6dFanYForfedHj3SY9UhvCZ6bD7EjcTWQ0zTBw88hgbpGJrAgDfSB2yNJCY8Akj/dcYuEjsnxSrW26TAL30T80ikqPWeJim4xExIqED6fzeA0hhrTQROLagIQVFFqvb4xaTAi3tPstL+SDFZqis76YcUD/M7lBQ7PMC/W2cgCp/4UKBVWuwg89h1xkP8H9EDxRRn6ol31FqmR5USTXqsXJZMljyWLNZ02NP0aOp9p0ev9GDXfdCjStXxTR7sUHw83lUkWmvnKQHMWg+o1zEyHEpAlEfQxuaYwiO92L2HwB4wUQX6CUx0v+3ZLiZOedvzrWKidm7XmGizyNis9RWCVFvGQGzMXaz1mQCKYQSsssp4Kxm0R3pbay+J3ZOijvU2KcQrKyhqvadJCmSEJQo5ILnVgHLKgAk4PhV4fEJYHRtTTQ4lhVF5C6pPUshykmX3Qop6foeSYoc6//l2JGcbI9vuaJUWO8g8fjuyiv8mPSjh3X9oUqXEow9NapclkyWPJYs1HfY/6bHWe5r0sCxAqhwDjMpYZxjBgaYKAkW0RwjHMyr/UbND6KEu7/qnByv6oEeVqmY0awMfGy85EAQ5pR5AbRSgElmgNZVAQsytYdhifKRdzz0E9oCJKtDbmKC9YCLmdhMTKzslNyUzJS81rfQMJmq9p4mJN1tkwPyJRdeYaLPI2Kz1ucFUahSAligSHHkHjNICaC4EN1BCY4/0ttZeEnsgRRXrbVKQXtqR9gqKWu9pkuKtFhRW9vRS+MP8DiXFDnX+d9sRSvB7O9KI/yY9CMe90KOtOmOt9zTpQQxDTCgZy8kIDsowAVKHkDa7jQ3IW+cO3vbk82Xv9DC3vdQZOVVFJ39XyxBLuYkgd8xBQIW1QAaKQCxcjWEsPrxorgD7LzP2ENg9JupAb2MC9dKOxNxuYmJlp+SmZKbkpaaVnsFErfc0MfFWi4x6I6FrTLRZZGzW+tAHbxRBAHETCc4DBMZKBYzGAXKEIcJZUf+c2EtiD6SoYr1FCqa6/9/sbRYUK72nSYo3WlCILyi7txdS1PM7lBQ71PnfbUcIe+pPwrRKix1kHr8dWcV/kx6YoifWp316tFVnrPW2Qo+c26H0ObUfL5Txocre6sr/Atr8huMVaQAA", "encoding": "UTF-8"}, "status": {"message": "OK", "code": 200}, "url": "https://api.reddit.com/user/PyAPITestUser2/comments.json?t=all&sort=new", "headers": {"x-xss-protection": ["1; mode=block"], "cache-control": ["private, no-cache", "no-cache"], "content-length": ["2607"], "server": ["cloudflare-nginx"], "date": ["Thu, 02 Jul 2015 19:13:30 GMT"], "x-content-type-options": ["nosniff"], "x-ratelimit-reset": ["390"], "content-type": ["application/json; charset=UTF-8"], "x-reddit-tracking": ["https://pixel.redditmedia.com/pixel/of_destiny.png?v=9MZApNR3IAPjbhJiKbVbvujo6sivs6GURrLX9KX%2FCvGsONQwEOkhfoAl3ER7yTY2748o3j55GADAHujR7EOrq7oEdFe%2FFsY9"], "x-moose": ["majestic"], "x-ua-compatible": ["IE=edge"], "vary": ["accept-encoding"], "pragma": ["no-cache"], "x-sup-id": ["http://www.reddit.com/sup.json#e4356386a7"], "x-ratelimit-used": ["2"], "x-frame-options": ["SAMEORIGIN"], "connection": ["keep-alive"], "content-encoding": ["gzip"], "x-ratelimit-remaining": ["298"], "cf-ray": ["1ffcab124bf608b1-FRA"]}}, "request": {"body": {"encoding": "utf-8", "string": ""}, "uri": "https://api.reddit.com/user/PyAPITestUser2/comments.json?t=all&sort=new", "method": "GET", "headers": {"Cookie": ["__cfduid=dea6ea5a6ed306c9a2fbe4aea3094d7ad1435864409; reddit_session=7302867%2C2015-07-02T12%3A13%3A29%2C21d559485f58693dfc5004b7f142610d81005445"], "Connection": ["keep-alive"], "User-Agent": ["PRAW_test_suite PRAW/3.0.0 Python/2.7.8 Linux-3.16.0-41-generic-x86_64-with-Ubuntu-14.10-utopic"], "Accept": ["*/*"], "Accept-Encoding": ["gzip, deflate"]}}, "recorded_at": "2015-07-02T19:13:30"}]}
\ No newline at end of file
diff --git a/tests/cassettes/test_pickling_v2.json b/tests/cassettes/test_pickling_v2.json
new file mode 100644
index 00000000..3a4f3110
--- /dev/null
+++ b/tests/cassettes/test_pickling_v2.json
@@ -0,0 +1,1 @@
+{"recorded_with": "betamax/0.4.2", "http_interactions": [{"response": {"body": {"base64_string": "H4sIAAAAAAAAAxzLy2rEMAxA0V8xWnvAkuL48R3dlVJkWyHz9OBkMwz59zLd3sN9w2XrD8jmDTpGHxtk8/1jDTTZ5T8/VNvvuu/PDy1y29QauPe2yrZCNvB6Tc9RxjpuhThh1EQ6YcEkpRL5ElRRQ5uLF6FSp+bBGqi9X8/6+QM7inOw5NCfXDg5+kLKyJnJcuJCsixLQsZJWqzOFU01xsTE4sIslZuPcBzHHwAAAP//AwAKbaoxygAAAA==", "encoding": "UTF-8"}, "status": {"message": "OK", "code": 200}, "url": "https://api.reddit.com/api/login/.json", "headers": {"x-xss-protection": ["1; mode=block"], "set-cookie": ["__cfduid=d49ca6cca210c0b0c94548e0d7457f5e81435864411; expires=Fri, 01-Jul-16 19:13:31 GMT; path=/; domain=.reddit.com; HttpOnly", "secure_session=; Domain=reddit.com; Max-Age=-1435864412; Path=/; expires=Thu, 01-Jan-1970 00:00:01 GMT; HttpOnly", "reddit_session=7302867%2C2015-07-02T12%3A13%3A32%2C393b2afff91314ad8c00be9c889323a076ac3d58; Domain=reddit.com; Path=/; HttpOnly"], "server": ["cloudflare-nginx"], "date": ["Thu, 02 Jul 2015 19:13:32 GMT"], "x-content-type-options": ["nosniff"], "cache-control": ["private, no-cache", "no-cache"], "content-type": ["application/json; charset=UTF-8"], "x-moose": ["majestic"], "x-ua-compatible": ["IE=edge"], "pragma": ["no-cache"], "transfer-encoding": ["chunked"], "x-frame-options": ["SAMEORIGIN"], "connection": ["keep-alive"], "content-encoding": ["gzip"], "cf-ray": ["1ffcab1d4ce40467-FRA"]}}, "request": {"body": {"encoding": "utf-8", "string": "user=PyAPITestUser2&api_type=json&passwd=1111"}, "uri": "https://api.reddit.com/api/login/.json", "method": "POST", "headers": {"Content-Length": ["45"], "User-Agent": ["PRAW_test_suite PRAW/3.0.0 Python/2.7.8 Linux-3.16.0-41-generic-x86_64-with-Ubuntu-14.10-utopic"], "Content-Type": ["application/x-www-form-urlencoded"], "Accept": ["*/*"], "Connection": ["keep-alive"], "Accept-Encoding": ["gzip, deflate"]}}, "recorded_at": "2015-07-02T19:13:32"}, {"response": {"body": {"base64_string": "H4sIAF2NlVUC/+2cW2/juBmG/4qRi0WLDic8H2axKIpigRboxV7sohc7hcHjWBPHdiw5mWSw/70kJXtkO83YsSw7aW5ykBTxJb/vffSRYvz14qqYuIsPg4t/FWVVTD5dvBtcOF3peOjrxfXUjXQ5SqevvpTVLUPlohJYOyMcQwZrLoRBKmiDscJeUuo0kp4z5LhId7KjYuzmfhLv8PvXVVMVWmulXJi5d66ohkV9mg1xxaY8XTQuJlfDqqjGPp351ZfV4O/ajvyHgYYI3jrN5w8IWioZV9xJAS1nUjDhjIVcexWwsxxzE6LGdD+jJxPvhuY+3m6yGI/jobm/nt7q8XDudTlNSpvjuelGERkSQ/XnaiVJL6rRdJ7O/XL/t1/+mYT9Vvo5ri+48mU8Vc0XPt9/Ni7ygYt0dhEvi23NpvMqHfv9P/FYqW99aijocZn+pG7VlsUMT136o09xGPMVMP6i53FY1/+gvl/ThdVtnxI50zEsqxFv96+003kabZTuMJvNp7cbA2ankyodnZeFHhdVOpNkmalLP178I3ZmUE6v/WAxKezUxVj96eMCQhcvyt/dn1MzPgZ8rQ+12mEY62I+tGU5tGNdpr40zbrpXe7asq3hqLoepwZ/GFc/uuJ2kK//6ePFtft48cOn6sd0fJZ+2EFRuvYyX/xxkn+ON0y/5QFZ5mdqrMlUPSuGVRzP1YANR4VzOdGX/Zno65y0FRq2ImljjOp+I0oYFpAR+D5Htd39yn/JrYXp1Oj5KusW89zhUVXNPlxe3t3dva/lvLfT68v55Ya2y3j0Oga5vKyDe5kODm2yz2VLynBR2aUczlUjZzFLY51yIEJgM18ni+vWoXS5q+mxKMpR7lyK2R9/vBu8mf45ph+N+zB9k5cqnzrU9b9NipuFH6R+338Y3D3wETPyZiq0t0zJQKz3hmLjIFcUKqp8wEIQoizx3HkFe0bCoXJ74EWdBNu8wOiseNHIeePFCXlRW/jYvOiySGj812Tch0ExWkAv/M1DwZUJ3EukpRMEW0kwxVAIDnFQJLpPMO+5Rv40wDhAcA/IqPNgGxnovJDRyDkfZPy7uCoGP8eEvR9MJ7kktCR9VfYwHFD5hfaMg5vS4+KhDxzEnLspXXGlO+RB8zyW89nNlRrdXl1Bwylm3CAPtZBMC0gwYlzGZzIOnDtLGBJE64+Tj5O//BxjPfg15fW7FhuarJPqERMcAxHP7kMbEDUi8i/funVkgnxLnTWCQBznBPyRwduJIH7y/i66axbDod9P558u2+76a+F+wvM7emselLsXmNoAsSPcK0M8RAI5Q4JQyAnDAxQ0RMTybPItsEAsFW1UvoHlSGDhLx0s9rP/PJvHWyCuFWKGxHqeOhWglFRTzIny3opoTE4Q0xgp/wRYeis6ni3+HIhS58w2UZ697NErUc5uNeS1ESV38dhEafevI6CsJgIeIfKAFgxPmTWESa2EopKmeaVGyCHjEUHxwQ8DNCpOBZA6DUQOENwHJXIebFMCy5dAiUblGyWOQollKfCSKLHDK4cnKosmryBS29nfKRt2kHn6GmIZ/006IILZ9vicGx2EaFSeDx2SB1YrpI4yyoIGqYOA0vjFMMQBZswx56FwJi+1PZ8Z6F6zLz0zg8dOZdT1wIxV/3pgxholjl867KDo2O5fRbLtfiwZi9bCz3T/HquaObitVc3h0i/JLsktySxtrzy67Bn1Us4bvefJAUQNsoQZYDV0gCqLgOLEAEKghUw7gVXe0XEIB9j15945QKurPjiQUxWXd/n90aEgWJ/2Q2+19yQAKZkFlOAYGYoJUFhTRK0kjudWe+TC/gKPj4km0NuY4M+dQuyFiZjbbUws7ZTclMyUvNS20hOYaPSeJyZea7lAZ3l0jo2JLsuFzXm9EQiZIDGgGkYbMgiBFjFSOsS8C4Y7YjLB++fEXhJ7IEUd60dI8cIKikbveZLilRYUuLJ5j2wvpGj6dygpdijjn1yMqLON0m13dEqLHWS24XCSxYhV/DfoQSUUz12M2IseXdUZlDZ6z5MeASKmYLAgUTE+LjQHRjoGZGDYivjQcDqb5gB6fHkY0f7pgTL0jk2PnKrw4S7PnA/Fx3q1LxyhNogAkFAxMj5WgFIZCLy3DAsKtWCn3cy5i8AeMFEHehsTz95JsQ8mUm63MbG0U64xopmSl9pWegITZ7en4v+hyGgWEo6NiS6LjM1aXzFOucMEkPikAZQzCZSWAWCukQrKYROyov45sZfE45OiifU2KdgLKygavedJildaUCAoZF+kWPbvUFLsUOd/dzpCKRHb7uiUFjvIPP10ZBn/TXowSB6ZrnVOj87qDEQbvW/06JEeyxnCS6LH5kPcSGw9xDS9eOAxNEjH0AQGvJE+YGskMWENIP3XGbtIPDoplrHeJgV+7k7ME5Gi0XuepOASMyGhAun/bgClMdaaCJymoCIERRWpp8fPJgUuHzzJSvsjxWShru2kH1J869+hpNjhAf7dOgNR+MhLgU5psYPMU9cZ3+K/Rg8UU5ypR/aodUyPOiXa9Fi6LJkseSxZrO2wx+nR1vtGj17pwW76oEedquPb3Nih+FhfVSRaa+cpAcxaD6jXMTIcSkCUR9DGyTGFJ9rYvYfAHjBRB/oRTBx/2bNbTJzzsudrxUTj3GNjossiY7PWVwhSbRkDcWLuYq3PBFAMI2CVVcZbyaA90W6tvSQenxRNrLdJIV5YQdHoPU9SICMsUcgBya0GlFMGTMDxqcDjE8LqODHV5FBSGJWXoPokhawmWXYvpGj6dygpdqjzn56O5GxjZNsdndJiB5mnn44s479JD0r48V+a1Cmx9tKkcVkyWfJYsljbYf+THiu950kPywKkyjHAqIx1hhEcaKogUER7hHA8o/KHmh1CD3V13z89WNEHPepUNaNZF/jY2ORAEOSUegC1UYBKZIHWVAIJMbeGYYvxiVY99xDYAybqQG9jgvaCiZjbbUws7ZTclMyUvNS20hOYaPSeJyZebZEB8xuLY2OiyyJjs9bnBlOpUQBaokhw5B0wSguQPrmVGyihsSfarbWXxB5IUcd6mxSkl+lIdwVFo/c8SfFaCwore9oU/q1/h5Jihzr/u9MRSvDbdKQV/016EI57oUdXdcZK73nSgxiGmFAylpMRHJRhAqQOIS12GxuQt84dvOzJ54ve6WHueqkzcqqKo3yuliGWchNB7piDgAprgQwUgVi4GsNYfHjRXAH2X2bsIfD4mGgCvY0J1Mt0JOZ2GxNLOyU3JTMlL7Wt9AQmGr3niYnXWmQ0CwnHxkSXRcZmrQ998EYRBBA3keA8QGCsVMBoHCBHGCKcFfXPib0k9kCKOtZbpGDq+P/N3mVBsdR7nqR4pQWF+Iyye3shRdO/Q0mxQ53/3ekIYY99JEyntNhB5umnI8v4b9IDU/TI+HRPj67qjJXeTuiRcztUPqf2+kAZH+rsra/8L3S6MKwVaQAA", "encoding": "UTF-8"}, "status": {"message": "OK", "code": 200}, "url": "https://api.reddit.com/user/PyAPITestUser2/comments.json?t=all&sort=new", "headers": {"x-xss-protection": ["1; mode=block"], "cache-control": ["private, no-cache", "no-cache"], "content-length": ["2607"], "server": ["cloudflare-nginx"], "date": ["Thu, 02 Jul 2015 19:13:33 GMT"], "x-content-type-options": ["nosniff"], "x-ratelimit-reset": ["387"], "content-type": ["application/json; charset=UTF-8"], "x-reddit-tracking": ["https://pixel.redditmedia.com/pixel/of_destiny.png?v=NI%2FS0Vy8geEeJ%2BBL7Qy5J2Q26m47mKxDD5uB2y%2B851CRfxV7GGUC7FAOqmbXqrgmuzZ%2BjeU5o6AO95wvK098lEadH8PZql7p"], "x-moose": ["majestic"], "x-ua-compatible": ["IE=edge"], "vary": ["accept-encoding"], "pragma": ["no-cache"], "x-sup-id": ["http://www.reddit.com/sup.json#e4356386a7"], "x-ratelimit-used": ["3"], "x-frame-options": ["SAMEORIGIN"], "connection": ["keep-alive"], "content-encoding": ["gzip"], "x-ratelimit-remaining": ["297"], "cf-ray": ["1ffcab26cdfa0467-FRA"]}}, "request": {"body": {"encoding": "utf-8", "string": ""}, "uri": "https://api.reddit.com/user/PyAPITestUser2/comments.json?t=all&sort=new", "method": "GET", "headers": {"Cookie": ["__cfduid=d49ca6cca210c0b0c94548e0d7457f5e81435864411; reddit_session=7302867%2C2015-07-02T12%3A13%3A32%2C393b2afff91314ad8c00be9c889323a076ac3d58"], "Connection": ["keep-alive"], "User-Agent": ["PRAW_test_suite PRAW/3.0.0 Python/2.7.8 Linux-3.16.0-41-generic-x86_64-with-Ubuntu-14.10-utopic"], "Accept": ["*/*"], "Accept-Encoding": ["gzip, deflate"]}}, "recorded_at": "2015-07-02T19:13:33"}]}
\ No newline at end of file
diff --git a/tests/cassettes/test_unpickle_comment.json b/tests/cassettes/test_unpickle_comment.json
deleted file mode 100644
index ce5e4d79..00000000
--- a/tests/cassettes/test_unpickle_comment.json
+++ /dev/null
@@ -1,1 +0,0 @@
-{"recorded_with": "betamax/0.4.2", "http_interactions": [{"recorded_at": "2015-06-15T13:50:36", "response": {"status": {"code": 200, "message": "OK"}, "headers": {"transfer-encoding": ["chunked"], "cache-control": ["private, no-cache", "no-cache"], "server": ["cloudflare-nginx"], "set-cookie": ["__cfduid=d28895e21c18fe156bbe302205f9b22ac1434376236; expires=Tue, 14-Jun-16 13:50:36 GMT; path=/; domain=.reddit.com; HttpOnly", "secure_session=; Domain=reddit.com; Max-Age=-1434376237; Path=/; expires=Thu, 01-Jan-1970 00:00:01 GMT; HttpOnly", "reddit_session=7302867%2C2015-06-15T06%3A50%3A37%2Cc0e114b1f45a1bc763984b34c83d31df49a92fed; Domain=reddit.com; Path=/; HttpOnly"], "x-moose": ["majestic"], "x-xss-protection": ["1; mode=block"], "connection": ["keep-alive"], "content-encoding": ["gzip"], "pragma": ["no-cache"], "x-ua-compatible": ["IE=edge"], "x-frame-options": ["SAMEORIGIN"], "cf-ray": ["1f6ebeb510a504a3-CDG"], "date": ["Mon, 15 Jun 2015 13:50:37 GMT"], "content-type": ["application/json; charset=UTF-8"], "x-content-type-options": ["nosniff"]}, "body": {"encoding": "UTF-8", "base64_string": "H4sIAAAAAAAAAxzLTWrDMBBA4auIWSug0c+MpXNkV0oZWaM6TRMV26GL4LuXdPs+3hO+tnGHYp6g6zrWDYp5e7cGmuzyn++q7WPZ958Xdfne1Bq4jbbItkAxcPt8CD+uv3zBPElGVidEnZPXWmtLVSmRsnOkVTwqR7AG5jGuF339HJyfiK13mE6OTpjOjkpyJbCdnSLGij0mwTozhTzFGuI8hRaw9Zgl+64NjuP4AwAA//8DABH3aj7KAAAA"}, "url": "https://api.reddit.com/api/login/.json"}, "request": {"headers": {"Accept-Encoding": ["gzip, deflate"], "Connection": ["keep-alive"], "Accept": ["*/*"], "Content-Length": ["45"], "User-Agent": ["PRAW_test_suite PRAW/3.0a1 Python/2.7.8 Linux-3.16.0-37-generic-x86_64-with-Ubuntu-14.10-utopic"], "Content-Type": ["application/x-www-form-urlencoded"]}, "body": {"encoding": "utf-8", "string": "passwd=1111&api_type=json&user=PyAPITestUser2"}, "uri": "https://api.reddit.com/api/login/.json", "method": "POST"}}, {"recorded_at": "2015-06-15T13:50:37", "response": {"status": {"code": 200, "message": "OK"}, "headers": {"x-ratelimit-remaining": ["299"], "content-type": ["application/json; charset=UTF-8"], "cache-control": ["private, no-cache", "no-cache"], "content-length": ["2574"], "server": ["cloudflare-nginx"], "x-content-type-options": ["nosniff"], "x-moose": ["majestic"], "x-ratelimit-used": ["1"], "x-frame-options": ["SAMEORIGIN"], "x-sup-id": ["http://www.reddit.com/sup.json#e4356386a7"], "x-xss-protection": ["1; mode=block"], "connection": ["keep-alive"], "content-encoding": ["gzip"], "pragma": ["no-cache"], "x-ua-compatible": ["IE=edge"], "x-reddit-tracking": ["https://pixel.redditmedia.com/pixel/of_destiny.png?v=HcLLGdCwZSCTj%2BYMBgG8Ln0%2FKHA5a%2FIoJVNJ%2Fv%2FXrv6TSUaosRpQ67%2Fyfcg6Z7i5Diz8FZevtuCHzUmagS6TUQ7G47Go4bcv"], "cf-ray": ["1f6ebeb9f0cd04a3-CDG"], "date": ["Mon, 15 Jun 2015 13:50:37 GMT"], "x-ratelimit-reset": ["563"], "vary": ["accept-encoding"]}, "body": {"encoding": "UTF-8", "base64_string": "H4sIAC3YflUC/+2cW2/bRhqG/4rgi6KLzcRzPqQoFotFgV1gL3rRYi/qhTAnRox1ikT5FPS/d2ZIKpTkOpJFUrLrG8cmafKd+b734Ts0oy8X1/nUXXwYXPw3Xxb59OPFu8GF04UOm75cTGZupJejuNsvVlO4yPESeWSVdAR7yz3VAmVQQwup4BZxCWVmtDeCIB7PZEf52C38NJzhty/rSxVo4yrLlVl45/JimJe72RAXbJZOMM6n18MiL8Y+7vlffp0PfpoWi/vBbDq4WkFoSfyqbDzW6OnUu6G5D4dOV+Nx2LTwk9mNHg8XXi9nUUW1PZ22uhoZEirvaLG+nF4Vo9ki7vv5/p8//+cXvyx+XfoFLg+49suwq1isfDr/fJynDRdx7yocFq41ny2KuO23/4dtS33j44UyPV7GXymvaj8vPc4f4i99DFOUjoDhB70IU7b5C+X5qiGsT/uUyLkOU76eTTQMF3P5tY67lna2iFOJ4inm88XsZmvG7CxMb9i6WOZ6nBdxT9RlZi5+e/HrNP+88oM47vsPA7mYf75Wo5vra2g4xYwb5KEWkmkBCUYs9IOiOOPcWcKQIDpp8KHUGyMsxzLMxjpfDO1yObRjvUyTeneXOmV2mwZeCxmOisk47v5uXPzg8ptBOv7Hq4uJu7r47mPxQ9w+j98cKzee6DKd6Wqavg9Xiz+lqazbNiqpGljP82ERSrGe6uEody71fz3YqZ6kXi7rUjeBDeUtJwVRArEikKP3qSGac1P4u3i1ZhOvFmkmRkUx/3B56afvb4NH5mGK9fvZ4uNl0yP/yN2PeHFLb8yDcvcCU5tB7Aj3yhAPkUDOkEwo5IThGRQ0k07wZNVK3XBV2FqhVLRSuJrH2sSGCrjY7v7patLYFA93JWdW+XKUxhsH8/vv7wZveNjGA3/peLCf/Kf5IpwCca0QM8QST53KoJRUU8yJ8t6K4DlOENMYKX81vZr+/adQ68EvsdXfnRAWh4tvwqLERfrh63h6oEnZM7s0IfDcaVIpfKNJVzRJQ+yaJs3xtQQTO5tMwhU+DDxC5AGtGJ4xawiTWglFJeXaK42QQ8YjgsL9HGbQKOY9UqcByBGC+yBEGcx3CIHluROiUvhGiE4IUUeAl0SIf4fBDJaziR+sprmdOf9h8H0sgAsHpX/d355IFFVfQaR2O79VLuwh8/TZoa7/NhkQwWx3fs6JDEJUCs+HDLH/B//SdhRKrR1llGUaxAECSsMXwxAHmDHHnIfCGX8cL9C9ZqlFe+QFD4NKmOuBF+vx9cCLDUJ0Hxn2UNS189eVbDofS8aCtfAznH97e/u+lPI+JKHLxeWWrssqHy0vy8Jexo1DG80yrL0SrRKdEo3S9MnlYwwIWinnldbzZACiBlnCDLAaOkCVRUBxYgAh0EKmncBKHMsANvnUOwNocd0HA1Kb4uWtaQMCm8t86K32nmRASmYBJThUhmICFNYUUSuJ4+mqPTLhcIHdI6Iq9C4i+HOWDQchIvR1ExG1laKTopGij5o2egIRldbzRMRrjQl0nmana0S0GRO21/FGIGQyiQHVMFiQQQi0CJXSWei7zHBHTKJ3/4w4SGIPlChr/QglXlCQqLSeJyVeaZDAhU1/ee6FEtX4jqXEHtH9yYcPZbdRuuuMVkmxh8wmGE7y8GFd/y1yUAnFcx4+HESOtvIFpZXW8yRHBhFTMLMgEjHcJjQHRjoGZMawFeFm4XQyzBHkuHsY0f7JgRLwuiZHalP4cJtWyseiYzPhC0eozUQGkFChMj4kP6kMBN5bhgWFWjC4wY7e4sUBAntARFnoXUQ8602JQxAR+7qJiNpKKVsEI0UfNW30BCLO7p2Jv0K4qB4cdI2INsPFdr5XjFPuwrKfhLsMoJxJoLTMAOYaqUw5bLKkqH9GHCSxe0pUtd6lBHtBQaLSep6UeKVBAkEh+6JEPb5jKbFHtv/mEoRSInad0Sop9pB5+iVIXf9tcjBIHlmitUqO1vIFopXWN3L0SI56VfCSyLF98zYSWw8xjX9g4KE0SIfSZAx4I32GrZHEZBvw6D9f7COxc0rUtd6lBH7OG5YnokSl9TwpwSVmQkIFsIAhR9JQZ00EjktOkWWKKlIuh59NCbx88CQp7Y8S05Wa2Gk/lPg6vmMpsceN+5v5AlH4yMP/Vkmxh8xT54uv9d8gBwotztQj75+1SI6yHZrkqB0WDRb9Fe3VdNfj5GhqfSNHr+Rgn/sgR9mm45t0sWPRsfkEkWitnacEMGs9oF6HynAoAVEeQRsWxBSe6GXtAwT2gIiy0I8gottHnO0i4pwfcb5WRFSu7RoRbYaL7XyvEKTaMgbCQtyFfM8EUAwjYJVVxlvJoD3Rm1gHSeyeElWtdykhXlCQqLSeJyWQEZYo5IDkVgPKKQMmw+FuwMOdweqwENXkWEoYlR439UkJWUyT7F4oUY3vWErske2fXoKkbmNk1xmtkmIPmadfgtT13yYHJbzbP46U7bDxx5HKYdFg0V/RXk13/Sk51lrPkxyWZZAqxwCjMuQLIzjQVEGgiPYI4bBHuWPJoa7v+ycHy/sgR9mmZjRvAx1bLzEQBDmlHkBtFKASWaA1lUBCzK1h2GJ8oiecBwjsARFloXcRQTtHROjrJiJqK0UnRSNFHzVt9AQiKq3niYhXGy5g+stE14hoM1xs53tuMJUaZUBLFOiNvANGaQE0F4IbKKGxJ3oT6yCJPVCirPUuJUjnS5D2gkSl9Twp8VqDhJU9vej9dXzHUmKPbP/NJQgl+G0J0qj/NjkIx52To618sdZ6nuQghiEmlAwRMkCDMkyA1FkWH2obmyFvnTv6ESdfrHonh7ntJV+kNhWdfB6WIZZyEyDumIOACmuBzCgCIbAaw1i4cdGU/PqPFwcI7B4RVaF3EYE6X4KEvm4iorZSdFI0UvRR00ZPIKLSep6IeK3honpw0DUi2gwX2/ke+swbRRBA3AR68wwCY6UCRuMMcoQhwklR/4w4SGIPlChrvUMJprr9H+ltBola63lS4pUGCfEJJef2QolqfMdSYo9s/80lCGGPfZxLq6TYQ+bplyB1/bfJgSl6ZH7aJUdb+WKt9TzJoUmGHQ/RAmGO438k40BRG3IklJo5IS1l6XWfI8hx9zC77Z0c+bwXcsQ2LW5uJmmOjkXHZsJXFCsBRRbyHnKAYiKAMpkHmBlnEDImY6f9jO59BHaPiKrQO4ggnYeL2NdNRNRWik6KRoo+atrozxFRaz1PRLzWcNHbp2i2Fy528j0nRGhPASc4viOnGVDCGwCxDs5kmUblZzH2z4iDJPZAid1PzKyc93KCRK31PCnxOoNEuLcX6Zp9UKIe37GU2CPbf3MJEu7mfNcZrZJiD5knX4Ks679JDi4JedancB9CjpbyRUNrK+RIfZ0VPrX15iQZn5WdWx75B7y/uSWyaAAA"}, "url": "https://api.reddit.com/user/PyAPITestUser2/comments.json?sort=new&t=all"}, "request": {"headers": {"Connection": ["keep-alive"], "User-Agent": ["PRAW_test_suite PRAW/3.0a1 Python/2.7.8 Linux-3.16.0-37-generic-x86_64-with-Ubuntu-14.10-utopic"], "Accept": ["*/*"], "Accept-Encoding": ["gzip, deflate"], "Cookie": ["reddit_session=7302867%2C2015-06-15T06%3A50%3A37%2Cc0e114b1f45a1bc763984b34c83d31df49a92fed; __cfduid=d28895e21c18fe156bbe302205f9b22ac1434376236"]}, "body": {"encoding": "utf-8", "string": ""}, "uri": "https://api.reddit.com/user/PyAPITestUser2/comments.json?sort=new&t=all", "method": "GET"}}, {"recorded_at": "2015-06-15T13:50:38", "response": {"status": {"code": 200, "message": "OK"}, "headers": {"x-ratelimit-remaining": ["298"], "transfer-encoding": ["chunked"], "cache-control": ["private, no-cache", "no-cache"], "x-moose": ["majestic"], "server": ["cloudflare-nginx"], "x-ratelimit-used": ["2"], "x-frame-options": ["SAMEORIGIN"], "x-xss-protection": ["1; mode=block"], "connection": ["keep-alive"], "content-encoding": ["gzip"], "pragma": ["no-cache"], "x-ua-compatible": ["IE=edge"], "x-reddit-tracking": ["https://pixel.redditmedia.com/pixel/of_destiny.png?v=6qsn21tVL7go5HvVpotLbf0tnpT2hujgxe4105fEz7G1t0%2BHh25pl%2FSGghZNuAZaGB%2FJKaxz67I%3D"], "cf-ray": ["1f6ebebe912c04a3-CDG"], "date": ["Mon, 15 Jun 2015 13:50:38 GMT"], "x-ratelimit-reset": ["562"], "content-type": ["application/json; charset=UTF-8"], "x-content-type-options": ["nosniff"]}, "body": {"encoding": "UTF-8", "base64_string": "H4sIAAAAAAAAA1yRwW7DIBBEfwVxtqrYxgn2rcfecmjPaANLvYqBCnCUNsq/V6AkjXodzQ5vhgs/kjd8Yjx3vGHcQAY+sQufISkHtPCJWVgSNox7cFic++/X/ds7pvyRMNYrSspGwhp0d+uIkLEobSfFth02cnjZNIzPZFDZGJyK4RByerr5DItROqIxVPVid8HMkOby8LYVLq1j+Nl1BzNYsYGulwjCgu7sCOM49LtejhalMJ0WW7krcDcQtWb9gGnFHabUDOZ/1YX8UR0hujJG25WU4Bz6/BDHhvFwwqhaySeW41rOKKlS4SmIavyfozbE8xdFyBQ8n5hfl+UGcsJIltAovOHcY+sHCU1nW9f2h3BWOqw+l42u118AAAD//wMAKxRkF8UBAAA="}, "url": "https://api.reddit.com/user/PyAPITestUser2/about/.json"}, "request": {"headers": {"Connection": ["keep-alive"], "User-Agent": ["PRAW_test_suite PRAW/3.0a1 Python/2.7.8 Linux-3.16.0-37-generic-x86_64-with-Ubuntu-14.10-utopic"], "Accept": ["*/*"], "Accept-Encoding": ["gzip, deflate"], "Cookie": ["reddit_session=7302867%2C2015-06-15T06%3A50%3A37%2Cc0e114b1f45a1bc763984b34c83d31df49a92fed; __cfduid=d28895e21c18fe156bbe302205f9b22ac1434376236"]}, "body": {"encoding": "utf-8", "string": ""}, "uri": "https://api.reddit.com/user/PyAPITestUser2/about/.json", "method": "GET"}}, {"recorded_at": "2015-06-15T13:50:38", "response": {"status": {"code": 200, "message": "OK"}, "headers": {"x-ratelimit-remaining": ["297"], "content-type": ["application/json; charset=UTF-8"], "cache-control": ["private, no-cache", "no-cache"], "content-length": ["682"], "server": ["cloudflare-nginx"], "x-content-type-options": ["nosniff"], "x-moose": ["majestic"], "x-ratelimit-used": ["3"], "x-frame-options": ["SAMEORIGIN"], "x-xss-protection": ["1; mode=block"], "connection": ["keep-alive"], "content-encoding": ["gzip"], "pragma": ["no-cache"], "x-ua-compatible": ["IE=edge"], "x-reddit-tracking": ["https://pixel.redditmedia.com/pixel/of_destiny.png?v=AqxVkrSRF3bN55ymSSdkwfD%2FLdA4QMcYwodHHwNYU4Pccch5s1XTILSk9LACd6ER3FcO3MQmUDmhY5Rn5rzZjrp3D2F%2F%2BvsO"], "cf-ray": ["1f6ebec1714204a3-CDG"], "date": ["Mon, 15 Jun 2015 13:50:38 GMT"], "x-ratelimit-reset": ["562"], "vary": ["accept-encoding"]}, "body": {"encoding": "UTF-8", "base64_string": "H4sIAC7YflUC/61UyU7cQBD9FccHTsx4XwaUAyIimgOLICdC1OqlPNOZ9kJ3e2BA/Hu6G5sxHJCQcrHsqvKrV/Wq6tnf8Ib5R56vM//Q8xnW2Hw9+wQ3DUjE65V1WpfqSc010vCo0VrXwtibXgjj6ZWNVMj9Y8EqLBQYB3fIsc7a3CKMcbRttOSk1600fi17GzuBHzMyrjqBd6jBNVibBMZMCO64CVPahqwBsz3NtdbdURCQuV73NVHz1x9qYBzPaVsH0c/0PF5dIX41u87l7cU5e7g0hPqL5RKj/vIpOl1GJ7ebEHV0/rdbOQ6gqOSd5m0zVu0fCH38bTbzbk7R5dmZN5sdrPSxNTK+9ajASn2/82t254/2zr782CN5BS3ytHpI6D0UjEZZXpQQVqQK0yIvUhpnxYLQjMa0giTJwoyFFiZwOHeNeze5RviRy8VAxfLWXAvXtGvXA+/kaun9Mk3z0oeUb7aPOfRJRXLDglR5SnHF4qLMU8DVgkAZFSRMynBB8pDFFo62QuBOAWIgQAMzGtY1NFpN1O56IjhFk47Z/NOyxT3Uuo8SuSuyKCxTVuRltFiYSqO8jG3hizJb4CysGKQljUqXu92CjMpPE/1Hab7O8avScDP/SPEnK8+wQc402bVhrEcRh6hPOvv1gZqstODNBglMYLLTmNK2N/oiTDXfWhLRvvNa4qridCLJQHio6ncShoeeefx5TWJ5EpB2WOJ4n9edkg95x13XGXq7G1QC1u6wREkcZ1lYFNncJPB76UQPZPDhNASuiZwBwmw6ogMS6rUl/wFtOHnvlRlvVt2a+vC7i+Vcyqi0hhoQNJgIR3JwDxuCFG0lIEem5o1lY5OZFgyc9a5zFb/29k0Wpexcj07c7KYXdN/TsbiXl38Stiz/ywUAAA=="}, "url": "https://api.reddit.com/r/reddit_api_test/about/.json"}, "request": {"headers": {"Connection": ["keep-alive"], "User-Agent": ["PRAW_test_suite PRAW/3.0a1 Python/2.7.8 Linux-3.16.0-37-generic-x86_64-with-Ubuntu-14.10-utopic"], "Accept": ["*/*"], "Accept-Encoding": ["gzip, deflate"], "Cookie": ["reddit_session=7302867%2C2015-06-15T06%3A50%3A37%2Cc0e114b1f45a1bc763984b34c83d31df49a92fed; __cfduid=d28895e21c18fe156bbe302205f9b22ac1434376236"]}, "body": {"encoding": "utf-8", "string": ""}, "uri": "https://api.reddit.com/r/reddit_api_test/about/.json", "method": "GET"}}]}
\ No newline at end of file
diff --git a/tests/test_comments.py b/tests/test_comments.py
index e3c3aa27..2253f27e 100644
--- a/tests/test_comments.py
+++ b/tests/test_comments.py
@@ -2,6 +2,7 @@
from __future__ import print_function, unicode_literals
import pickle
+import mock
from praw import helpers
from praw.objects import Comment, MoreComments
from .helper import PRAWTest, betamax
@@ -98,10 +99,24 @@ class CommentTest(PRAWTest):
lambda item: isinstance(item, Comment))
self.assertEqual(comment._replies, None)
+ def _test_pickling(self, protocol):
+ comment = next(self.r.user.get_comments())
+ with mock.patch('praw.BaseReddit.request_json') as request_json_func:
+ unpickled_comment = pickle.loads(pickle.dumps(comment, protocol))
+ self.assertEqual(comment, unpickled_comment)
+ self.assertEqual(request_json_func.called, 0)
+
@betamax()
- def test_unpickle_comment(self):
- item = next(self.r.user.get_comments())
- self.assertEqual(item, pickle.loads(pickle.dumps(item)))
+ def test_pickling_v0(self):
+ self._test_pickling(0)
+
+ @betamax()
+ def test_pickling_v1(self):
+ self._test_pickling(1)
+
+ @betamax()
+ def test_pickling_v2(self):
+ self._test_pickling(2)
class MoreCommentsTest(PRAWTest):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 1
} | 3.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"betamax>=0.4.2",
"betamax-matchers>=0.2.0",
"flake8",
"mock>=1.0.0",
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | betamax==0.9.0
betamax-matchers==0.4.0
certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
flake8==7.2.0
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mccabe==0.7.0
mock==5.2.0
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
-e git+https://github.com/praw-dev/praw.git@c64e3f71841e8f0c996d42eb5dc9a91fc0c25dcb#egg=praw
pycodestyle==2.13.0
pyflakes==3.3.1
pytest @ file:///croot/pytest_1738938843180/work
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
requests==2.32.3
requests-toolbelt==1.0.0
six==1.17.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions==4.13.0
update-checker==0.18.0
urllib3==2.3.0
| name: praw
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- betamax==0.9.0
- betamax-matchers==0.4.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- execnet==2.1.1
- flake8==7.2.0
- idna==3.10
- mccabe==0.7.0
- mock==5.2.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- requests==2.32.3
- requests-toolbelt==1.0.0
- six==1.17.0
- typing-extensions==4.13.0
- update-checker==0.18.0
- urllib3==2.3.0
prefix: /opt/conda/envs/praw
| [
"tests/test_comments.py::CommentTest::test_pickling_v0",
"tests/test_comments.py::CommentTest::test_pickling_v1",
"tests/test_comments.py::CommentTest::test_pickling_v2"
]
| []
| [
"tests/test_comments.py::CommentTest::test_add_comment",
"tests/test_comments.py::CommentTest::test_add_reply",
"tests/test_comments.py::CommentTest::test_edit",
"tests/test_comments.py::CommentTest::test_front_page_comment_replies_are_none",
"tests/test_comments.py::CommentTest::test_get_comments_permalink",
"tests/test_comments.py::CommentTest::test_inbox_comment_permalink",
"tests/test_comments.py::CommentTest::test_inbox_comment_replies_are_none",
"tests/test_comments.py::CommentTest::test_save_comment",
"tests/test_comments.py::CommentTest::test_spambox_comments_replies_are_none",
"tests/test_comments.py::CommentTest::test_unicode_comment",
"tests/test_comments.py::CommentTest::test_user_comment_permalink",
"tests/test_comments.py::CommentTest::test_user_comment_replies_are_none",
"tests/test_comments.py::MoreCommentsTest::test_all_comments",
"tests/test_comments.py::MoreCommentsTest::test_comments_method"
]
| []
| BSD 2-Clause "Simplified" License | 180 | [
"praw/objects.py"
]
| [
"praw/objects.py"
]
|
|
mattboyer__git-guilt-34 | 58f92d3e37a115596a890ad3e2fe674636c682ee | 2015-07-02 20:15:01 | 58f92d3e37a115596a890ad3e2fe674636c682ee | diff --git a/git_guilt/guilt.py b/git_guilt/guilt.py
index 23a748a..3b400c5 100644
--- a/git_guilt/guilt.py
+++ b/git_guilt/guilt.py
@@ -78,13 +78,14 @@ class GitRunner(object):
raise GitError("Malformed Git version")
raw_version = self.run_git(GitRunner._version_args)
- if not (raw_version and
- 1 == len(raw_version) and
- raw_version[0].startswith('git version')
- ):
- raise GitError("Couldn't determine Git version %s" % raw_version)
+ version_re = re.compile(r'^git version (\d+.\d+.\d+)')
- return version_string_to_tuple(raw_version[0].split()[-1])
+ if raw_version and 1 == len(raw_version):
+ match = version_re.match(raw_version[0])
+ if match:
+ return version_string_to_tuple(match.group(1))
+
+ raise GitError("Couldn't determine Git version %s" % raw_version)
def _get_git_root(self):
# We should probably go beyond just finding the root dir for the Git
| Git version detection fails on MacOS
Here's what a friendly user had to report:
> my MACBOOK running latest 10.10.3 comes preinstalled with
```
Laurences-MacBook-Pro:security Laurence$ git version
git version 2.3.2 (Apple Git-55)
```
The parsing code that consumes the output of `git version` should be made more robust. | mattboyer/git-guilt | diff --git a/test/test_guilt.py b/test/test_guilt.py
index 1433e33..3abda65 100644
--- a/test/test_guilt.py
+++ b/test/test_guilt.py
@@ -306,6 +306,27 @@ class GitRunnerTestCase(TestCase):
)
mock_process.reset_mock()
+ @patch('git_guilt.guilt.subprocess.Popen')
+ def test_mac_version(self, mock_process):
+ mock_process.return_value.communicate = Mock(
+ return_value=(b'git version 2.3.2 (Apple Git-55)', None)
+ )
+ mock_process.return_value.returncode = 0
+ mock_process.return_value.wait = \
+ Mock(return_value=None)
+
+ version_tuple = self.runner._get_git_version()
+
+ mock_process.assert_called_once_with(
+ ['nosuchgit', '--version'],
+ cwd='/my/arbitrary/path',
+ stderr=-1,
+ stdout=-1
+ )
+ mock_process.reset_mock()
+
+ self.assertEquals((2,3,2), version_tuple)
+
def test_version_comparison(self):
self.assertEquals((1, 0, 0), self.runner.version)
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.30 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.4",
"reqs_path": [
"requirements-3.4.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
astroid==2.11.7
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
coveralls==3.3.1
dill==0.3.4
docopt==0.6.2
docutils==0.18.1
-e git+https://github.com/mattboyer/git-guilt.git@58f92d3e37a115596a890ad3e2fe674636c682ee#egg=git_guilt
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
isort==5.10.1
Jinja2==3.0.3
lazy-object-proxy==1.7.1
MarkupSafe==2.0.1
mccabe==0.7.0
mock==5.2.0
nose==1.3.7
packaging==21.3
pep8==1.7.1
platformdirs==2.4.0
pluggy==1.0.0
py==1.11.0
Pygments==2.14.0
pylint==2.13.9
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytz==2025.2
requests==2.27.1
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-argparse==0.3.2
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
typed-ast==1.5.5
typing_extensions==4.1.1
urllib3==1.26.20
wrapt==1.16.0
zipp==3.6.0
| name: git-guilt
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- argparse==1.4.0
- astroid==2.11.7
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- coveralls==3.3.1
- dill==0.3.4
- docopt==0.6.2
- docutils==0.18.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isort==5.10.1
- jinja2==3.0.3
- lazy-object-proxy==1.7.1
- markupsafe==2.0.1
- mccabe==0.7.0
- mock==5.2.0
- nose==1.3.7
- packaging==21.3
- pep8==1.7.1
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pygments==2.14.0
- pylint==2.13.9
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytz==2025.2
- requests==2.27.1
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-argparse==0.3.2
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- typed-ast==1.5.5
- typing-extensions==4.1.1
- urllib3==1.26.20
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/git-guilt
| [
"test/test_guilt.py::GitRunnerTestCase::test_mac_version"
]
| []
| [
"test/test_guilt.py::DeltaTestCase::test_comparison",
"test/test_guilt.py::DeltaTestCase::test_eq",
"test/test_guilt.py::DeltaTestCase::test_repr",
"test/test_guilt.py::BinaryDeltaTestCase::test_comparison",
"test/test_guilt.py::BinaryDeltaTestCase::test_eq",
"test/test_guilt.py::BinaryDeltaTestCase::test_repr",
"test/test_guilt.py::ArgTestCase::test_bad_args",
"test/test_guilt.py::ArgTestCase::test_help",
"test/test_guilt.py::GitRunnerTestCase::test_get_delta_files",
"test/test_guilt.py::GitRunnerTestCase::test_get_delta_no_files",
"test/test_guilt.py::GitRunnerTestCase::test_get_git_root_exception",
"test/test_guilt.py::GitRunnerTestCase::test_populate_rev_tree",
"test/test_guilt.py::GitRunnerTestCase::test_run_git",
"test/test_guilt.py::GitRunnerTestCase::test_run_git_cwd",
"test/test_guilt.py::GitRunnerTestCase::test_run_git_exception",
"test/test_guilt.py::GitRunnerTestCase::test_run_git_no_output_error",
"test/test_guilt.py::GitRunnerTestCase::test_run_git_no_output_no_error",
"test/test_guilt.py::GitRunnerTestCase::test_run_git_non_zerp",
"test/test_guilt.py::GitRunnerTestCase::test_run_git_stderr",
"test/test_guilt.py::GitRunnerTestCase::test_version_comparison",
"test/test_guilt.py::GitRunnerTestCase::test_version_retrieval",
"test/test_guilt.py::TextBlameTests::test_blame_locs",
"test/test_guilt.py::TextBlameTests::test_blame_locs_bad_encoding",
"test/test_guilt.py::TextBlameTests::test_blame_locs_empty_file",
"test/test_guilt.py::TextBlameTests::test_blame_locs_exception",
"test/test_guilt.py::TextBlameTests::test_blame_locs_file_missing",
"test/test_guilt.py::TextBlameTests::test_text_blame_repr",
"test/test_guilt.py::BinaryBlameTests::test_bin_blame_repr",
"test/test_guilt.py::BinaryBlameTests::test_blame_bytes",
"test/test_guilt.py::BinaryBlameTests::test_blame_bytes_empty_file",
"test/test_guilt.py::BinaryBlameTests::test_blame_bytes_file_missing",
"test/test_guilt.py::BinaryBlameTests::test_blame_bytes_locs_exception",
"test/test_guilt.py::GuiltTestCase::test_file_not_in_since_rev",
"test/test_guilt.py::GuiltTestCase::test_file_not_in_until_rev",
"test/test_guilt.py::GuiltTestCase::test_map_binary_blames",
"test/test_guilt.py::GuiltTestCase::test_map_text_blames",
"test/test_guilt.py::GuiltTestCase::test_populate_trees",
"test/test_guilt.py::GuiltTestCase::test_reduce_locs",
"test/test_guilt.py::GuiltTestCase::test_show_run",
"test/test_guilt.py::FormatterTestCase::test_get_width_not_tty",
"test/test_guilt.py::FormatterTestCase::test_get_width_tty",
"test/test_guilt.py::FormatterTestCase::test_green",
"test/test_guilt.py::FormatterTestCase::test_red",
"test/test_guilt.py::FormatterTestCase::test_show_binary_guilt",
"test/test_guilt.py::FormatterTestCase::test_show_text_guilt"
]
| []
| null | 181 | [
"git_guilt/guilt.py"
]
| [
"git_guilt/guilt.py"
]
|
|
kevin1024__vcrpy-167 | 5f8407a8a1a2ebe11aa386c7a4f13816956c419b | 2015-07-04 12:30:53 | 1660cc3a9fee71f1ef98b338609a7ae23eed90ca | Diaoul: No unit test because dict order is not deterministic.
Diaoul: More details in the commit message. | diff --git a/README.rst b/README.rst
index a5dcd90..0b85862 100644
--- a/README.rst
+++ b/README.rst
@@ -5,7 +5,6 @@ VCR.py
:alt: vcr.py
vcr.py
-
This is a Python version of `Ruby's VCR
library <https://github.com/vcr/vcr>`__.
@@ -145,7 +144,9 @@ The following options are available :
- port (the port of the server receiving the request)
- path (the path of the request)
- query (the query string of the request)
-- body (the entire request body)
+- raw\_body (the entire request body as is)
+- body (the entire request body unmarshalled by content-type
+ i.e. xmlrpc, json, form-urlencoded, falling back on raw\_body)
- headers (the headers of the request)
Backwards compatible matchers:
diff --git a/setup.py b/setup.py
index 4b8c549..eaf20c3 100644
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,6 @@
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
-import pkg_resources
long_description = open('README.rst', 'r').read()
@@ -21,21 +20,6 @@ class PyTest(TestCommand):
sys.exit(errno)
-install_requires=['PyYAML', 'wrapt', 'six>=1.5']
-
-
-extras_require = {
- ':python_version in "2.4, 2.5, 2.6"':
- ['contextlib2', 'backport_collections', 'mock'],
- ':python_version in "2.7, 3.1, 3.2"': ['contextlib2', 'mock'],
-}
-
-
-if 'bdist_wheel' not in sys.argv:
- for key, value in extras_require.items():
- if key.startswith(':') and pkg_resources.evaluate_marker(key[1:]):
- install_requires.extend(value)
-
setup(
name='vcrpy',
version='1.6.0',
@@ -48,8 +32,12 @@ setup(
author_email='[email protected]',
url='https://github.com/kevin1024/vcrpy',
packages=find_packages(exclude=("tests*",)),
- install_requires=install_requires,
- extras_require=extras_require,
+ install_requires=['PyYAML', 'wrapt', 'six>=1.5'],
+ extras_require = {
+ ':python_version in "2.4, 2.5, 2.6"':
+ ['contextlib2', 'backport_collections', 'mock'],
+ ':python_version in "2.7, 3.1, 3.2"': ['contextlib2', 'mock'],
+ },
license='MIT',
tests_require=['pytest', 'mock', 'pytest-localserver'],
cmdclass={'test': PyTest},
diff --git a/vcr/config.py b/vcr/config.py
index 1faef3b..7655a3a 100644
--- a/vcr/config.py
+++ b/vcr/config.py
@@ -47,6 +47,7 @@ class VCR(object):
'path': matchers.path,
'query': matchers.query,
'headers': matchers.headers,
+ 'raw_body': matchers.raw_body,
'body': matchers.body,
}
self.record_mode = record_mode
diff --git a/vcr/matchers.py b/vcr/matchers.py
index 91bce11..39c5949 100644
--- a/vcr/matchers.py
+++ b/vcr/matchers.py
@@ -1,3 +1,6 @@
+import json
+from six.moves import urllib, xmlrpc_client
+from .util import CaseInsensitiveDict, read_body
import logging
log = logging.getLogger(__name__)
@@ -30,10 +33,23 @@ def query(r1, r2):
return r1.query == r2.query
+def raw_body(r1, r2):
+ return read_body(r1) == read_body(r2)
+
+
def body(r1, r2):
- if hasattr(r1.body, 'read') and hasattr(r2.body, 'read'):
- return r1.body.read() == r2.body.read()
- return r1.body == r2.body
+ r1_body = read_body(r1)
+ r2_body = read_body(r2)
+ r1_headers = CaseInsensitiveDict(r1.headers)
+ r2_headers = CaseInsensitiveDict(r2.headers)
+ if r1_headers.get('Content-Type') == r2_headers.get('Content-Type') == 'application/x-www-form-urlencoded':
+ return urllib.parse.parse_qs(r1_body) == urllib.parse.parse_qs(r2_body)
+ if r1_headers.get('Content-Type') == r2_headers.get('Content-Type') == 'application/json':
+ return json.loads(r1_body) == json.loads(r2_body)
+ if ('xmlrpc' in r1_headers.get('User-Agent', '') and 'xmlrpc' in r2_headers.get('User-Agent', '') and
+ r1_headers.get('Content-Type') == r2_headers.get('Content-Type') == 'text/xml'):
+ return xmlrpc_client.loads(r1_body) == xmlrpc_client.loads(r2_body)
+ return r1_body == r2_body
def headers(r1, r2):
diff --git a/vcr/util.py b/vcr/util.py
index 57f72b1..8c5bd94 100644
--- a/vcr/util.py
+++ b/vcr/util.py
@@ -1,3 +1,74 @@
+import collections
+
+# Shamelessly stolen from https://github.com/kennethreitz/requests/blob/master/requests/structures.py
+class CaseInsensitiveDict(collections.MutableMapping):
+ """
+ A case-insensitive ``dict``-like object.
+ Implements all methods and operations of
+ ``collections.MutableMapping`` as well as dict's ``copy``. Also
+ provides ``lower_items``.
+ All keys are expected to be strings. The structure remembers the
+ case of the last key to be set, and ``iter(instance)``,
+ ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
+ will contain case-sensitive keys. However, querying and contains
+ testing is case insensitive::
+ cid = CaseInsensitiveDict()
+ cid['Accept'] = 'application/json'
+ cid['aCCEPT'] == 'application/json' # True
+ list(cid) == ['Accept'] # True
+ For example, ``headers['content-encoding']`` will return the
+ value of a ``'Content-Encoding'`` response header, regardless
+ of how the header name was originally stored.
+ If the constructor, ``.update``, or equality comparison
+ operations are given keys that have equal ``.lower()``s, the
+ behavior is undefined.
+ """
+ def __init__(self, data=None, **kwargs):
+ self._store = dict()
+ if data is None:
+ data = {}
+ self.update(data, **kwargs)
+
+ def __setitem__(self, key, value):
+ # Use the lowercased key for lookups, but store the actual
+ # key alongside the value.
+ self._store[key.lower()] = (key, value)
+
+ def __getitem__(self, key):
+ return self._store[key.lower()][1]
+
+ def __delitem__(self, key):
+ del self._store[key.lower()]
+
+ def __iter__(self):
+ return (casedkey for casedkey, mappedvalue in self._store.values())
+
+ def __len__(self):
+ return len(self._store)
+
+ def lower_items(self):
+ """Like iteritems(), but with all lowercase keys."""
+ return (
+ (lowerkey, keyval[1])
+ for (lowerkey, keyval)
+ in self._store.items()
+ )
+
+ def __eq__(self, other):
+ if isinstance(other, collections.Mapping):
+ other = CaseInsensitiveDict(other)
+ else:
+ return NotImplemented
+ # Compare insensitively
+ return dict(self.lower_items()) == dict(other.lower_items())
+
+ # Copy is required
+ def copy(self):
+ return CaseInsensitiveDict(self._store.values())
+
+ def __repr__(self):
+ return str(dict(self.items()))
+
def partition_dict(predicate, dictionary):
true_dict = {}
false_dict = {}
@@ -14,3 +85,8 @@ def compose(*functions):
res = function(res)
return res
return composed
+
+def read_body(request):
+ if hasattr(request.body, 'read'):
+ return request.body.read()
+ return request.body
| Error with body matcher for json, xmlrpc and form urlencoded
This is a tricky issue I encountered when using the body matcher on xmlrpc requests.
Symptoms: Sometimes the request won't match, sometimes it will, and this only affects certain requests. This occurs on Python 3.4 and I believe other python 3 versions but not on 2.7
Cause: An XMLRPC request has a body with XML inside which is generated from the parameters passed to the function call. Some parameters can be of dict type. xmlrpclib (or xmlrpc.client) will loop over items of the dict and generate the appropriate XML which will be the body of our POST request. Now items order is not guaranteed in dict and the behavior changed in python 3 such that the order of the same dict *can change anytime* and is not *more or less constant on the same computer* as in python 2. So the generated XML won't be necessarily the same as the one you recorded.
Fix suggestion: A custom xmlrpc body matcher that takes that into account and will compare the `struct` XML elements in the correct order.
The gzip compression didn't help me debuging this as I couldn't even read directly the cassettes... | kevin1024/vcrpy | diff --git a/tests/unit/test_matchers.py b/tests/unit/test_matchers.py
index f942bd2..d4b32de 100644
--- a/tests/unit/test_matchers.py
+++ b/tests/unit/test_matchers.py
@@ -35,6 +35,38 @@ def test_uri_matcher():
assert matched
+def test_body_matcher():
+ # raw
+ req1 = request.Request('POST', 'http://host.com/', '123', {})
+ req2 = request.Request('POST', 'http://another-host.com/', '123', {'Some-Header': 'value'})
+ assert matchers.body(req1, req2)
+
+ # application/x-www-form-urlencoded
+ req1 = request.Request('POST', 'http://host.com/', 'a=1&b=2', {'Content-Type': 'application/x-www-form-urlencoded'})
+ req2 = request.Request('POST', 'http://host.com/', 'b=2&a=1', {'Content-Type': 'application/x-www-form-urlencoded'})
+ assert matchers.body(req1, req2)
+
+ # application/json
+ req1 = request.Request('POST', 'http://host.com/', '{"a": 1, "b": 2}', {'Content-Type': 'application/json'})
+ req2 = request.Request('POST', 'http://host.com/', '{"b": 2, "a": 1}', {'content-type': 'application/json'})
+ assert matchers.body(req1, req2)
+
+ # xmlrpc
+ req1_body = (b"<?xml version='1.0'?><methodCall><methodName>test</methodName>"
+ b"<params><param><value><array><data><value><struct>"
+ b"<member><name>a</name><value><string>1</string></value></member>"
+ b"<member><name>b</name><value><string>2</string></value></member>"
+ b"</struct></value></data></array></value></param></params></methodCall>")
+ req2_body = (b"<?xml version='1.0'?><methodCall><methodName>test</methodName>"
+ b"<params><param><value><array><data><value><struct>"
+ b"<member><name>b</name><value><string>2</string></value></member>"
+ b"<member><name>a</name><value><string>1</string></value></member>"
+ b"</struct></value></data></array></value></param></params></methodCall>")
+ req1 = request.Request('POST', 'http://host.com/', req1_body, {'User-Agent': 'xmlrpclib', 'Content-Type': 'text/xml'})
+ req2 = request.Request('POST', 'http://host.com/', req2_body, {'user-agent': 'somexmlrpc', 'content-type': 'text/xml'})
+ assert matchers.body(req1, req2)
+
+
def test_query_matcher():
req1 = request.Request('GET', 'http://host.com/?a=b&c=d', '', {})
req2 = request.Request('GET', 'http://host.com/?c=d&a=b', '', {})
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 5
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-localserver",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///croot/attrs_1668696182826/work
certifi @ file:///croot/certifi_1671487769961/work/certifi
flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
MarkupSafe==2.1.5
mock==5.2.0
packaging @ file:///croot/packaging_1671697413597/work
pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pytest==7.1.2
pytest-localserver==0.9.0.post0
PyYAML==6.0.1
six==1.17.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions @ file:///croot/typing_extensions_1669924550328/work
-e git+https://github.com/kevin1024/vcrpy.git@5f8407a8a1a2ebe11aa386c7a4f13816956c419b#egg=vcrpy
Werkzeug==2.2.3
wrapt==1.16.0
zipp @ file:///croot/zipp_1672387121353/work
| name: vcrpy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=22.1.0=py37h06a4308_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- flit-core=3.6.0=pyhd3eb1b0_0
- importlib-metadata=4.11.3=py37h06a4308_0
- importlib_metadata=4.11.3=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=22.0=py37h06a4308_0
- pip=22.3.1=py37h06a4308_0
- pluggy=1.0.0=py37h06a4308_1
- py=1.11.0=pyhd3eb1b0_0
- pytest=7.1.2=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py37h06a4308_0
- typing_extensions=4.4.0=py37h06a4308_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zipp=3.11.0=py37h06a4308_0
- zlib=1.2.13=h5eee18b_1
- pip:
- markupsafe==2.1.5
- mock==5.2.0
- pytest-localserver==0.9.0.post0
- pyyaml==6.0.1
- six==1.17.0
- werkzeug==2.2.3
- wrapt==1.16.0
prefix: /opt/conda/envs/vcrpy
| [
"tests/unit/test_matchers.py::test_body_matcher"
]
| []
| [
"tests/unit/test_matchers.py::test_uri_matcher",
"tests/unit/test_matchers.py::test_query_matcher",
"tests/unit/test_matchers.py::test_metchers"
]
| []
| MIT License | 182 | [
"README.rst",
"vcr/config.py",
"setup.py",
"vcr/util.py",
"vcr/matchers.py"
]
| [
"README.rst",
"vcr/config.py",
"setup.py",
"vcr/util.py",
"vcr/matchers.py"
]
|
imageio__imageio-98 | d99d9a25cd8db0920d42cf5d3f372471776865fe | 2015-07-04 20:48:09 | 1f53bf2d5794079c9300a3bb46a1adbc5889de59 | diff --git a/imageio/plugins/__init__.py b/imageio/plugins/__init__.py
index 4e0074c..4ee8062 100644
--- a/imageio/plugins/__init__.py
+++ b/imageio/plugins/__init__.py
@@ -78,6 +78,7 @@ For the Format.Writer class:
"""
+from . import tifffile # noqa
from . import freeimage # noqa
from . import freeimagemulti # noqa
from . import example # noqa
diff --git a/imageio/plugins/_tifffile.py b/imageio/plugins/_tifffile.py
new file mode 100644
index 0000000..9ccacbe
--- /dev/null
+++ b/imageio/plugins/_tifffile.py
@@ -0,0 +1,4808 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# tifffile.py
+
+# Copyright (c) 2008-2014, Christoph Gohlke
+# Copyright (c) 2008-2014, The Regents of the University of California
+# Produced at the Laboratory for Fluorescence Dynamics
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# * Neither the name of the copyright holders nor the names of any
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+"""Read and write image data from and to TIFF files.
+
+Image and metadata can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH,
+SGI, ImageJ, MicroManager, FluoView, SEQ and GEL files.
+Only a subset of the TIFF specification is supported, mainly uncompressed
+and losslessly compressed 2**(0 to 6) bit integer, 16, 32 and 64-bit float,
+grayscale and RGB(A) images, which are commonly used in bio-scientific imaging.
+Specifically, reading JPEG and CCITT compressed image data or EXIF, IPTC, GPS,
+and XMP metadata is not implemented.
+Only primary info records are read for STK, FluoView, MicroManager, and
+NIH image formats.
+
+TIFF, the Tagged Image File Format, is under the control of Adobe Systems.
+BigTIFF allows for files greater than 4 GB. STK, LSM, FluoView, SGI, SEQ, GEL,
+and OME-TIFF, are custom extensions defined by Molecular Devices (Universal
+Imaging Corporation), Carl Zeiss MicroImaging, Olympus, Silicon Graphics
+International, Media Cybernetics, Molecular Dynamics, and the Open Microscopy
+Environment consortium respectively.
+
+For command line usage run ``python tifffile.py --help``
+
+:Author:
+ `Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
+
+:Organization:
+ Laboratory for Fluorescence Dynamics, University of California, Irvine
+
+:Version: 2014.08.24
+
+Requirements
+------------
+* `CPython 2.7 or 3.4 <http://www.python.org>`_
+* `Numpy 1.8.2 <http://www.numpy.org>`_
+* `Matplotlib 1.4 <http://www.matplotlib.org>`_ (optional for plotting)
+* `Tifffile.c 2013.11.05 <http://www.lfd.uci.edu/~gohlke/>`_
+ (recommended for faster decoding of PackBits and LZW encoded strings)
+
+Notes
+-----
+The API is not stable yet and might change between revisions.
+
+Tested on little-endian platforms only.
+
+Other Python packages and modules for reading bio-scientific TIFF files:
+
+* `Imread <http://luispedro.org/software/imread>`_
+* `PyLibTiff <http://code.google.com/p/pylibtiff>`_
+* `SimpleITK <http://www.simpleitk.org>`_
+* `PyLSM <https://launchpad.net/pylsm>`_
+* `PyMca.TiffIO.py <http://pymca.sourceforge.net/>`_ (same as fabio.TiffIO)
+* `BioImageXD.Readers <http://www.bioimagexd.net/>`_
+* `Cellcognition.io <http://cellcognition.org/>`_
+* `CellProfiler.bioformats
+ <https://github.com/CellProfiler/python-bioformats>`_
+
+Acknowledgements
+----------------
+* Egor Zindy, University of Manchester, for cz_lsm_scan_info specifics.
+* Wim Lewis for a bug fix and some read_cz_lsm functions.
+* Hadrien Mary for help on reading MicroManager files.
+
+References
+----------
+(1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated.
+ http://partners.adobe.com/public/developer/tiff/
+(2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html
+(3) MetaMorph Stack (STK) Image File Format.
+ http://support.meta.moleculardevices.com/docs/t10243.pdf
+(4) Image File Format Description LSM 5/7 Release 6.0 (ZEN 2010).
+ Carl Zeiss MicroImaging GmbH. BioSciences. May 10, 2011
+(5) File Format Description - LSM 5xx Release 2.0.
+ http://ibb.gsf.de/homepage/karsten.rodenacker/IDL/Lsmfile.doc
+(6) The OME-TIFF format.
+ http://www.openmicroscopy.org/site/support/file-formats/ome-tiff
+(7) UltraQuant(r) Version 6.0 for Windows Start-Up Guide.
+ http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf
+(8) Micro-Manager File Formats.
+ http://www.micro-manager.org/wiki/Micro-Manager_File_Formats
+(9) Tags for TIFF and Related Specifications. Digital Preservation.
+ http://www.digitalpreservation.gov/formats/content/tiff_tags.shtml
+
+Examples
+--------
+>>> data = numpy.random.rand(5, 301, 219)
+>>> imsave('temp.tif', data)
+
+>>> image = imread('temp.tif')
+>>> numpy.testing.assert_array_equal(image, data)
+
+>>> with TiffFile('temp.tif') as tif:
+... images = tif.asarray()
+... for page in tif:
+... for tag in page.tags.values():
+... t = tag.name, tag.value
+... image = page.asarray()
+
+"""
+
+from __future__ import division, print_function
+
+import sys
+import os
+import re
+import glob
+import math
+import zlib
+import time
+import json
+import struct
+import warnings
+import tempfile
+import datetime
+import collections
+from fractions import Fraction
+from xml.etree import cElementTree as etree
+
+import numpy
+
+__version__ = '2014.08.24'
+__docformat__ = 'restructuredtext en'
+__all__ = ('imsave', 'imread', 'imshow', 'TiffFile', 'TiffWriter',
+ 'TiffSequence')
+
+
+def imsave(filename, data, **kwargs):
+ """Write image data to TIFF file.
+
+ Refer to the TiffWriter class and member functions for documentation.
+
+ Parameters
+ ----------
+ filename : str
+ Name of file to write.
+ data : array_like
+ Input image. The last dimensions are assumed to be image depth,
+ height, width, and samples.
+ kwargs : dict
+ Parameters 'byteorder', 'bigtiff', and 'software' are passed to
+ the TiffWriter class.
+ Parameters 'photometric', 'planarconfig', 'resolution',
+ 'description', 'compress', 'volume', and 'extratags' are passed to
+ the TiffWriter.save function.
+
+ Examples
+ --------
+ >>> data = numpy.random.rand(2, 5, 3, 301, 219)
+ >>> description = u'{"shape": %s}' % str(list(data.shape))
+ >>> imsave('temp.tif', data, compress=6,
+ ... extratags=[(270, 's', 0, description, True)])
+
+ """
+ tifargs = {}
+ for key in ('byteorder', 'bigtiff', 'software', 'writeshape'):
+ if key in kwargs:
+ tifargs[key] = kwargs[key]
+ del kwargs[key]
+
+ if 'writeshape' not in kwargs:
+ kwargs['writeshape'] = True
+ if 'bigtiff' not in tifargs and data.size*data.dtype.itemsize > 2000*2**20:
+ tifargs['bigtiff'] = True
+
+ with TiffWriter(filename, **tifargs) as tif:
+ tif.save(data, **kwargs)
+
+
+class TiffWriter(object):
+ """Write image data to TIFF file.
+
+ TiffWriter instances must be closed using the close method, which is
+ automatically called when using the 'with' statement.
+
+ Examples
+ --------
+ >>> data = numpy.random.rand(2, 5, 3, 301, 219)
+ >>> with TiffWriter('temp.tif', bigtiff=True) as tif:
+ ... for i in range(data.shape[0]):
+ ... tif.save(data[i], compress=6)
+
+ """
+ TYPES = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6,
+ 'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17}
+ TAGS = {
+ 'new_subfile_type': 254, 'subfile_type': 255,
+ 'image_width': 256, 'image_length': 257, 'bits_per_sample': 258,
+ 'compression': 259, 'photometric': 262, 'fill_order': 266,
+ 'document_name': 269, 'image_description': 270, 'strip_offsets': 273,
+ 'orientation': 274, 'samples_per_pixel': 277, 'rows_per_strip': 278,
+ 'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283,
+ 'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296,
+ 'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320,
+ 'tile_width': 322, 'tile_length': 323, 'tile_offsets': 324,
+ 'tile_byte_counts': 325, 'extra_samples': 338, 'sample_format': 339,
+ 'image_depth': 32997, 'tile_depth': 32998}
+
+ def __init__(self, filename, bigtiff=False, byteorder=None,
+ software='tifffile.py'):
+ """Create a new TIFF file for writing.
+
+ Use bigtiff=True when creating files greater than 2 GB.
+
+ Parameters
+ ----------
+ filename : str
+ Name of file to write.
+ bigtiff : bool
+ If True, the BigTIFF format is used.
+ byteorder : {'<', '>'}
+ The endianness of the data in the file.
+ By default this is the system's native byte order.
+ software : str
+ Name of the software used to create the image.
+ Saved with the first page only.
+
+ """
+ if byteorder not in (None, '<', '>'):
+ raise ValueError("invalid byteorder %s" % byteorder)
+ if byteorder is None:
+ byteorder = '<' if sys.byteorder == 'little' else '>'
+
+ self._byteorder = byteorder
+ self._software = software
+
+ self._fh = open(filename, 'wb')
+ self._fh.write({'<': b'II', '>': b'MM'}[byteorder])
+
+ if bigtiff:
+ self._bigtiff = True
+ self._offset_size = 8
+ self._tag_size = 20
+ self._numtag_format = 'Q'
+ self._offset_format = 'Q'
+ self._val_format = '8s'
+ self._fh.write(struct.pack(byteorder+'HHH', 43, 8, 0))
+ else:
+ self._bigtiff = False
+ self._offset_size = 4
+ self._tag_size = 12
+ self._numtag_format = 'H'
+ self._offset_format = 'I'
+ self._val_format = '4s'
+ self._fh.write(struct.pack(byteorder+'H', 42))
+
+ # first IFD
+ self._ifd_offset = self._fh.tell()
+ self._fh.write(struct.pack(byteorder+self._offset_format, 0))
+
+ def save(self, data, photometric=None, planarconfig=None, resolution=None,
+ description=None, volume=False, writeshape=False, compress=0,
+ extratags=()):
+ """Write image data to TIFF file.
+
+ Image data are written in one stripe per plane.
+ Dimensions larger than 2 to 4 (depending on photometric mode, planar
+ configuration, and SGI mode) are flattened and saved as separate pages.
+ The 'sample_format' and 'bits_per_sample' TIFF tags are derived from
+ the data type.
+
+ Parameters
+ ----------
+ data : array_like
+ Input image. The last dimensions are assumed to be image depth,
+ height, width, and samples.
+ photometric : {'minisblack', 'miniswhite', 'rgb'}
+ The color space of the image data.
+ By default this setting is inferred from the data shape.
+ planarconfig : {'contig', 'planar'}
+ Specifies if samples are stored contiguous or in separate planes.
+ By default this setting is inferred from the data shape.
+ 'contig': last dimension contains samples.
+ 'planar': third last dimension contains samples.
+ resolution : (float, float) or ((int, int), (int, int))
+ X and Y resolution in dots per inch as float or rational numbers.
+ description : str
+ The subject of the image. Saved with the first page only.
+ compress : int
+ Values from 0 to 9 controlling the level of zlib compression.
+ If 0, data are written uncompressed (default).
+ volume : bool
+ If True, volume data are stored in one tile (if applicable) using
+ the SGI image_depth and tile_depth tags.
+ Image width and depth must be multiple of 16.
+ Few software can read this format, e.g. MeVisLab.
+ writeshape : bool
+ If True, write the data shape to the image_description tag
+ if necessary and no other description is given.
+ extratags: sequence of tuples
+ Additional tags as [(code, dtype, count, value, writeonce)].
+
+ code : int
+ The TIFF tag Id.
+ dtype : str
+ Data type of items in 'value' in Python struct format.
+ One of B, s, H, I, 2I, b, h, i, f, d, Q, or q.
+ count : int
+ Number of data values. Not used for string values.
+ value : sequence
+ 'Count' values compatible with 'dtype'.
+ writeonce : bool
+ If True, the tag is written to the first page only.
+
+ """
+ if photometric not in (None, 'minisblack', 'miniswhite', 'rgb'):
+ raise ValueError("invalid photometric %s" % photometric)
+ if planarconfig not in (None, 'contig', 'planar'):
+ raise ValueError("invalid planarconfig %s" % planarconfig)
+ if not 0 <= compress <= 9:
+ raise ValueError("invalid compression level %s" % compress)
+
+ fh = self._fh
+ byteorder = self._byteorder
+ numtag_format = self._numtag_format
+ val_format = self._val_format
+ offset_format = self._offset_format
+ offset_size = self._offset_size
+ tag_size = self._tag_size
+
+ data = numpy.asarray(data, dtype=byteorder+data.dtype.char, order='C')
+ data_shape = shape = data.shape
+ data = numpy.atleast_2d(data)
+
+ # normalize shape of data
+ samplesperpixel = 1
+ extrasamples = 0
+ if volume and data.ndim < 3:
+ volume = False
+ if photometric is None:
+ if planarconfig:
+ photometric = 'rgb'
+ elif data.ndim > 2 and shape[-1] in (3, 4):
+ photometric = 'rgb'
+ elif volume and data.ndim > 3 and shape[-4] in (3, 4):
+ photometric = 'rgb'
+ elif data.ndim > 2 and shape[-3] in (3, 4):
+ photometric = 'rgb'
+ else:
+ photometric = 'minisblack'
+ if planarconfig and len(shape) <= (3 if volume else 2):
+ planarconfig = None
+ photometric = 'minisblack'
+ if photometric == 'rgb':
+ if len(shape) < 3:
+ raise ValueError("not a RGB(A) image")
+ if len(shape) < 4:
+ volume = False
+ if planarconfig is None:
+ if shape[-1] in (3, 4):
+ planarconfig = 'contig'
+ elif shape[-4 if volume else -3] in (3, 4):
+ planarconfig = 'planar'
+ elif shape[-1] > shape[-4 if volume else -3]:
+ planarconfig = 'planar'
+ else:
+ planarconfig = 'contig'
+ if planarconfig == 'contig':
+ data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
+ samplesperpixel = data.shape[-1]
+ else:
+ data = data.reshape(
+ (-1,) + shape[(-4 if volume else -3):] + (1,))
+ samplesperpixel = data.shape[1]
+ if samplesperpixel > 3:
+ extrasamples = samplesperpixel - 3
+ elif planarconfig and len(shape) > (3 if volume else 2):
+ if planarconfig == 'contig':
+ data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
+ samplesperpixel = data.shape[-1]
+ else:
+ data = data.reshape(
+ (-1,) + shape[(-4 if volume else -3):] + (1,))
+ samplesperpixel = data.shape[1]
+ extrasamples = samplesperpixel - 1
+ else:
+ planarconfig = None
+ # remove trailing 1s
+ while len(shape) > 2 and shape[-1] == 1:
+ shape = shape[:-1]
+ if len(shape) < 3:
+ volume = False
+ if False and (
+ len(shape) > (3 if volume else 2) and shape[-1] < 5 and
+ all(shape[-1] < i
+ for i in shape[(-4 if volume else -3):-1])):
+ # DISABLED: non-standard TIFF, e.g. (220, 320, 2)
+ planarconfig = 'contig'
+ samplesperpixel = shape[-1]
+ data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
+ else:
+ data = data.reshape(
+ (-1, 1) + shape[(-3 if volume else -2):] + (1,))
+
+ if samplesperpixel == 2:
+ warnings.warn("writing non-standard TIFF (samplesperpixel 2)")
+
+ if volume and (data.shape[-2] % 16 or data.shape[-3] % 16):
+ warnings.warn("volume width or length are not multiple of 16")
+ volume = False
+ data = numpy.swapaxes(data, 1, 2)
+ data = data.reshape(
+ (data.shape[0] * data.shape[1],) + data.shape[2:])
+
+ # data.shape is now normalized 5D or 6D, depending on volume
+ # (pages, planar_samples, (depth,) height, width, contig_samples)
+ assert len(data.shape) in (5, 6)
+ shape = data.shape
+
+ bytestr = bytes if sys.version[0] == '2' else (
+ lambda x: bytes(x, 'utf-8') if isinstance(x, str) else x)
+ tags = [] # list of (code, ifdentry, ifdvalue, writeonce)
+
+ if volume:
+ # use tiles to save volume data
+ tag_byte_counts = TiffWriter.TAGS['tile_byte_counts']
+ tag_offsets = TiffWriter.TAGS['tile_offsets']
+ else:
+ # else use strips
+ tag_byte_counts = TiffWriter.TAGS['strip_byte_counts']
+ tag_offsets = TiffWriter.TAGS['strip_offsets']
+
+ def pack(fmt, *val):
+ return struct.pack(byteorder+fmt, *val)
+
+ def addtag(code, dtype, count, value, writeonce=False):
+ # Compute ifdentry & ifdvalue bytes from code, dtype, count, value.
+ # Append (code, ifdentry, ifdvalue, writeonce) to tags list.
+ code = int(TiffWriter.TAGS.get(code, code))
+ try:
+ tifftype = TiffWriter.TYPES[dtype]
+ except KeyError:
+ raise ValueError("unknown dtype %s" % dtype)
+ rawcount = count
+ if dtype == 's':
+ value = bytestr(value) + b'\0'
+ count = rawcount = len(value)
+ value = (value, )
+ if len(dtype) > 1:
+ count *= int(dtype[:-1])
+ dtype = dtype[-1]
+ ifdentry = [pack('HH', code, tifftype),
+ pack(offset_format, rawcount)]
+ ifdvalue = None
+ if count == 1:
+ if isinstance(value, (tuple, list)):
+ value = value[0]
+ ifdentry.append(pack(val_format, pack(dtype, value)))
+ elif struct.calcsize(dtype) * count <= offset_size:
+ ifdentry.append(pack(val_format,
+ pack(str(count)+dtype, *value)))
+ else:
+ ifdentry.append(pack(offset_format, 0))
+ ifdvalue = pack(str(count)+dtype, *value)
+ tags.append((code, b''.join(ifdentry), ifdvalue, writeonce))
+
+ def rational(arg, max_denominator=1000000):
+ # return nominator and denominator from float or two integers
+ try:
+ f = Fraction.from_float(arg)
+ except TypeError:
+ f = Fraction(arg[0], arg[1])
+ f = f.limit_denominator(max_denominator)
+ return f.numerator, f.denominator
+
+ if self._software:
+ addtag('software', 's', 0, self._software, writeonce=True)
+ self._software = None # only save to first page
+ if description:
+ addtag('image_description', 's', 0, description, writeonce=True)
+ elif writeshape and shape[0] > 1 and shape != data_shape:
+ addtag('image_description', 's', 0,
+ "shape=(%s)" % (",".join('%i' % i for i in data_shape)),
+ writeonce=True)
+ addtag('datetime', 's', 0,
+ datetime.datetime.now().strftime("%Y:%m:%d %H:%M:%S"),
+ writeonce=True)
+ addtag('compression', 'H', 1, 32946 if compress else 1)
+ addtag('orientation', 'H', 1, 1)
+ addtag('image_width', 'I', 1, shape[-2])
+ addtag('image_length', 'I', 1, shape[-3])
+ if volume:
+ addtag('image_depth', 'I', 1, shape[-4])
+ addtag('tile_depth', 'I', 1, shape[-4])
+ addtag('tile_width', 'I', 1, shape[-2])
+ addtag('tile_length', 'I', 1, shape[-3])
+ addtag('new_subfile_type', 'I', 1, 0 if shape[0] == 1 else 2)
+ addtag('sample_format', 'H', 1,
+ {'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind])
+ addtag('photometric', 'H', 1,
+ {'miniswhite': 0, 'minisblack': 1, 'rgb': 2}[photometric])
+ addtag('samples_per_pixel', 'H', 1, samplesperpixel)
+ if planarconfig and samplesperpixel > 1:
+ addtag('planar_configuration', 'H', 1, 1
+ if planarconfig == 'contig' else 2)
+ addtag('bits_per_sample', 'H', samplesperpixel,
+ (data.dtype.itemsize * 8, ) * samplesperpixel)
+ else:
+ addtag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8)
+ if extrasamples:
+ if photometric == 'rgb' and extrasamples == 1:
+ addtag('extra_samples', 'H', 1, 1) # associated alpha channel
+ else:
+ addtag('extra_samples', 'H', extrasamples, (0,) * extrasamples)
+ if resolution:
+ addtag('x_resolution', '2I', 1, rational(resolution[0]))
+ addtag('y_resolution', '2I', 1, rational(resolution[1]))
+ addtag('resolution_unit', 'H', 1, 2)
+ addtag('rows_per_strip', 'I', 1,
+ shape[-3] * (shape[-4] if volume else 1))
+
+ # use one strip or tile per plane
+ strip_byte_counts = (data[0, 0].size * data.dtype.itemsize,) * shape[1]
+ addtag(tag_byte_counts, offset_format, shape[1], strip_byte_counts)
+ addtag(tag_offsets, offset_format, shape[1], (0, ) * shape[1])
+
+ # add extra tags from users
+ for t in extratags:
+ addtag(*t)
+ # the entries in an IFD must be sorted in ascending order by tag code
+ tags = sorted(tags, key=lambda x: x[0])
+
+ if not self._bigtiff and (fh.tell() + data.size*data.dtype.itemsize
+ > 2**31-1):
+ raise ValueError("data too large for non-bigtiff file")
+
+ for pageindex in range(shape[0]):
+ # update pointer at ifd_offset
+ pos = fh.tell()
+ fh.seek(self._ifd_offset)
+ fh.write(pack(offset_format, pos))
+ fh.seek(pos)
+
+ # write ifdentries
+ fh.write(pack(numtag_format, len(tags)))
+ tag_offset = fh.tell()
+ fh.write(b''.join(t[1] for t in tags))
+ self._ifd_offset = fh.tell()
+ fh.write(pack(offset_format, 0)) # offset to next IFD
+
+ # write tag values and patch offsets in ifdentries, if necessary
+ for tagindex, tag in enumerate(tags):
+ if tag[2]:
+ pos = fh.tell()
+ fh.seek(tag_offset + tagindex*tag_size + offset_size + 4)
+ fh.write(pack(offset_format, pos))
+ fh.seek(pos)
+ if tag[0] == tag_offsets:
+ strip_offsets_offset = pos
+ elif tag[0] == tag_byte_counts:
+ strip_byte_counts_offset = pos
+ fh.write(tag[2])
+
+ # write image data
+ data_offset = fh.tell()
+ if compress:
+ strip_byte_counts = []
+ for plane in data[pageindex]:
+ plane = zlib.compress(plane, compress)
+ strip_byte_counts.append(len(plane))
+ fh.write(plane)
+ else:
+ # if this fails try update Python/numpy
+ data[pageindex].tofile(fh)
+ fh.flush()
+
+ # update strip and tile offsets and byte_counts if necessary
+ pos = fh.tell()
+ for tagindex, tag in enumerate(tags):
+ if tag[0] == tag_offsets: # strip or tile offsets
+ if tag[2]:
+ fh.seek(strip_offsets_offset)
+ strip_offset = data_offset
+ for size in strip_byte_counts:
+ fh.write(pack(offset_format, strip_offset))
+ strip_offset += size
+ else:
+ fh.seek(tag_offset + tagindex*tag_size +
+ offset_size + 4)
+ fh.write(pack(offset_format, data_offset))
+ elif tag[0] == tag_byte_counts: # strip or tile byte_counts
+ if compress:
+ if tag[2]:
+ fh.seek(strip_byte_counts_offset)
+ for size in strip_byte_counts:
+ fh.write(pack(offset_format, size))
+ else:
+ fh.seek(tag_offset + tagindex*tag_size +
+ offset_size + 4)
+ fh.write(pack(offset_format, strip_byte_counts[0]))
+ break
+ fh.seek(pos)
+ fh.flush()
+ # remove tags that should be written only once
+ if pageindex == 0:
+ tags = [t for t in tags if not t[-1]]
+
+ def close(self):
+ self._fh.close()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.close()
+
+
+def imread(files, **kwargs):
+ """Return image data from TIFF file(s) as numpy array.
+
+ The first image series is returned if no arguments are provided.
+
+ Parameters
+ ----------
+ files : str or list
+ File name, glob pattern, or list of file names.
+ key : int, slice, or sequence of page indices
+ Defines which pages to return as array.
+ series : int
+ Defines which series of pages in file to return as array.
+ multifile : bool
+ If True (default), OME-TIFF data may include pages from multiple files.
+ pattern : str
+ Regular expression pattern that matches axes names and indices in
+ file names.
+ kwargs : dict
+ Additional parameters passed to the TiffFile or TiffSequence asarray
+ function.
+
+ Examples
+ --------
+ >>> im = imread('test.tif', key=0) # doctest: +SKIP
+ >>> im.shape # doctest: +SKIP
+ (256, 256, 4)
+ >>> ims = imread(['test.tif', 'test.tif']) # doctest: +SKIP
+ >>> ims.shape # doctest: +SKIP
+ (2, 256, 256, 4)
+
+ """
+ kwargs_file = {}
+ if 'multifile' in kwargs:
+ kwargs_file['multifile'] = kwargs['multifile']
+ del kwargs['multifile']
+ else:
+ kwargs_file['multifile'] = True
+ kwargs_seq = {}
+ if 'pattern' in kwargs:
+ kwargs_seq['pattern'] = kwargs['pattern']
+ del kwargs['pattern']
+
+ if isinstance(files, basestring) and any(i in files for i in '?*'):
+ files = glob.glob(files)
+ if not files:
+ raise ValueError('no files found')
+ if len(files) == 1:
+ files = files[0]
+
+ if isinstance(files, basestring):
+ with TiffFile(files, **kwargs_file) as tif:
+ return tif.asarray(**kwargs)
+ else:
+ with TiffSequence(files, **kwargs_seq) as imseq:
+ return imseq.asarray(**kwargs)
+
+
+class lazyattr(object):
+ """Lazy object attribute whose value is computed on first access."""
+ __slots__ = ('func', )
+
+ def __init__(self, func):
+ self.func = func
+
+ def __get__(self, instance, owner):
+ if instance is None:
+ return self
+ value = self.func(instance)
+ if value is NotImplemented:
+ return getattr(super(owner, instance), self.func.__name__)
+ setattr(instance, self.func.__name__, value)
+ return value
+
+
+class TiffFile(object):
+ """Read image and metadata from TIFF, STK, LSM, and FluoView files.
+
+ TiffFile instances must be closed using the close method, which is
+ automatically called when using the 'with' statement.
+
+ Attributes
+ ----------
+ pages : list
+ All TIFF pages in file.
+ series : list of Records(shape, dtype, axes, TiffPages)
+ TIFF pages with compatible shapes and types.
+ micromanager_metadata: dict
+ Extra MicroManager non-TIFF metadata in the file, if exists.
+
+ All attributes are read-only.
+
+ Examples
+ --------
+ >>> with TiffFile('test.tif') as tif: # doctest: +SKIP
+ ... data = tif.asarray()
+ ... data.shape
+ (256, 256, 4)
+
+ """
+ def __init__(self, arg, name=None, offset=None, size=None,
+ multifile=True, multifile_close=True):
+ """Initialize instance from file.
+
+ Parameters
+ ----------
+ arg : str or open file
+ Name of file or open file object.
+ The file objects are closed in TiffFile.close().
+ name : str
+ Optional name of file in case 'arg' is a file handle.
+ offset : int
+ Optional start position of embedded file. By default this is
+ the current file position.
+ size : int
+ Optional size of embedded file. By default this is the number
+ of bytes from the 'offset' to the end of the file.
+ multifile : bool
+ If True (default), series may include pages from multiple files.
+ Currently applies to OME-TIFF only.
+ multifile_close : bool
+ If True (default), keep the handles of other files in multifile
+ series closed. This is inefficient when few files refer to
+ many pages. If False, the C runtime may run out of resources.
+
+ """
+ self._fh = FileHandle(arg, name=name, offset=offset, size=size)
+ self.offset_size = None
+ self.pages = []
+ self._multifile = bool(multifile)
+ self._multifile_close = bool(multifile_close)
+ self._files = {self._fh.name: self} # cache of TiffFiles
+ try:
+ self._fromfile()
+ except Exception:
+ self._fh.close()
+ raise
+
+ @property
+ def filehandle(self):
+ """Return file handle."""
+ return self._fh
+
+ @property
+ def filename(self):
+ """Return name of file handle."""
+ return self._fh.name
+
+ def close(self):
+ """Close open file handle(s)."""
+ for tif in self._files.values():
+ tif._fh.close()
+ self._files = {}
+
+ def _fromfile(self):
+ """Read TIFF header and all page records from file."""
+ self._fh.seek(0)
+ try:
+ self.byteorder = {b'II': '<', b'MM': '>'}[self._fh.read(2)]
+ except KeyError:
+ raise ValueError("not a valid TIFF file")
+ version = struct.unpack(self.byteorder+'H', self._fh.read(2))[0]
+ if version == 43: # BigTiff
+ self.offset_size, zero = struct.unpack(self.byteorder+'HH',
+ self._fh.read(4))
+ if zero or self.offset_size != 8:
+ raise ValueError("not a valid BigTIFF file")
+ elif version == 42:
+ self.offset_size = 4
+ else:
+ raise ValueError("not a TIFF file")
+ self.pages = []
+ while True:
+ try:
+ page = TiffPage(self)
+ self.pages.append(page)
+ except StopIteration:
+ break
+ if not self.pages:
+ raise ValueError("empty TIFF file")
+
+ if self.is_micromanager:
+ # MicroManager files contain metadata not stored in TIFF tags.
+ self.micromanager_metadata = read_micromanager_metadata(self._fh)
+
+ if self.is_lsm:
+ self._fix_lsm_strip_offsets()
+ self._fix_lsm_strip_byte_counts()
+
+ def _fix_lsm_strip_offsets(self):
+ """Unwrap strip offsets for LSM files greater than 4 GB."""
+ for series in self.series:
+ wrap = 0
+ previous_offset = 0
+ for page in series.pages:
+ strip_offsets = []
+ for current_offset in page.strip_offsets:
+ if current_offset < previous_offset:
+ wrap += 2**32
+ strip_offsets.append(current_offset + wrap)
+ previous_offset = current_offset
+ page.strip_offsets = tuple(strip_offsets)
+
+ def _fix_lsm_strip_byte_counts(self):
+ """Set strip_byte_counts to size of compressed data.
+
+ The strip_byte_counts tag in LSM files contains the number of bytes
+ for the uncompressed data.
+
+ """
+ if not self.pages:
+ return
+ strips = {}
+ for page in self.pages:
+ assert len(page.strip_offsets) == len(page.strip_byte_counts)
+ for offset, bytecount in zip(page.strip_offsets,
+ page.strip_byte_counts):
+ strips[offset] = bytecount
+ offsets = sorted(strips.keys())
+ offsets.append(min(offsets[-1] + strips[offsets[-1]], self._fh.size))
+ for i, offset in enumerate(offsets[:-1]):
+ strips[offset] = min(strips[offset], offsets[i+1] - offset)
+ for page in self.pages:
+ if page.compression:
+ page.strip_byte_counts = tuple(
+ strips[offset] for offset in page.strip_offsets)
+
+ @lazyattr
+ def series(self):
+ """Return series of TiffPage with compatible shape and properties."""
+ if not self.pages:
+ return []
+
+ series = []
+ page0 = self.pages[0]
+
+ if self.is_ome:
+ series = self._omeseries()
+ elif self.is_fluoview:
+ dims = {b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T',
+ b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R',
+ b'EVENT': 'V', b'EXPOSURE': 'L'}
+ mmhd = list(reversed(page0.mm_header.dimensions))
+ series = [Record(
+ axes=''.join(dims.get(i[0].strip().upper(), 'Q')
+ for i in mmhd if i[1] > 1),
+ shape=tuple(int(i[1]) for i in mmhd if i[1] > 1),
+ pages=self.pages, dtype=numpy.dtype(page0.dtype))]
+ elif self.is_lsm:
+ lsmi = page0.cz_lsm_info
+ axes = CZ_SCAN_TYPES[lsmi.scan_type]
+ if page0.is_rgb:
+ axes = axes.replace('C', '').replace('XY', 'XYC')
+ axes = axes[::-1]
+ shape = tuple(getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes)
+ pages = [p for p in self.pages if not p.is_reduced]
+ series = [Record(axes=axes, shape=shape, pages=pages,
+ dtype=numpy.dtype(pages[0].dtype))]
+ if len(pages) != len(self.pages): # reduced RGB pages
+ pages = [p for p in self.pages if p.is_reduced]
+ cp = 1
+ i = 0
+ while cp < len(pages) and i < len(shape)-2:
+ cp *= shape[i]
+ i += 1
+ shape = shape[:i] + pages[0].shape
+ axes = axes[:i] + 'CYX'
+ series.append(Record(axes=axes, shape=shape, pages=pages,
+ dtype=numpy.dtype(pages[0].dtype)))
+ elif self.is_imagej:
+ shape = []
+ axes = []
+ ij = page0.imagej_tags
+ if 'frames' in ij:
+ shape.append(ij['frames'])
+ axes.append('T')
+ if 'slices' in ij:
+ shape.append(ij['slices'])
+ axes.append('Z')
+ if 'channels' in ij and not self.is_rgb:
+ shape.append(ij['channels'])
+ axes.append('C')
+ remain = len(self.pages) // (product(shape) if shape else 1)
+ if remain > 1:
+ shape.append(remain)
+ axes.append('I')
+ shape.extend(page0.shape)
+ axes.extend(page0.axes)
+ axes = ''.join(axes)
+ series = [Record(pages=self.pages, shape=tuple(shape), axes=axes,
+ dtype=numpy.dtype(page0.dtype))]
+ elif self.is_nih:
+ if len(self.pages) == 1:
+ shape = page0.shape
+ axes = page0.axes
+ else:
+ shape = (len(self.pages),) + page0.shape
+ axes = 'I' + page0.axes
+ series = [Record(pages=self.pages, shape=shape, axes=axes,
+ dtype=numpy.dtype(page0.dtype))]
+ elif page0.is_shaped:
+ # TODO: shaped files can contain multiple series
+ shape = page0.tags['image_description'].value[7:-1]
+ shape = tuple(int(i) for i in shape.split(b','))
+ series = [Record(pages=self.pages, shape=shape,
+ axes='Q' * len(shape),
+ dtype=numpy.dtype(page0.dtype))]
+
+ # generic detection of series
+ if not series:
+ shapes = []
+ pages = {}
+ for page in self.pages:
+ if not page.shape:
+ continue
+ shape = page.shape + (page.axes,
+ page.compression in TIFF_DECOMPESSORS)
+ if shape not in pages:
+ shapes.append(shape)
+ pages[shape] = [page]
+ else:
+ pages[shape].append(page)
+ series = [Record(pages=pages[s],
+ axes=(('I' + s[-2])
+ if len(pages[s]) > 1 else s[-2]),
+ dtype=numpy.dtype(pages[s][0].dtype),
+ shape=((len(pages[s]), ) + s[:-2]
+ if len(pages[s]) > 1 else s[:-2]))
+ for s in shapes]
+
+ # remove empty series, e.g. in MD Gel files
+ series = [s for s in series if sum(s.shape) > 0]
+
+ return series
+
+ def asarray(self, key=None, series=None, memmap=False):
+ """Return image data from multiple TIFF pages as numpy array.
+
+ By default the first image series is returned.
+
+ Parameters
+ ----------
+ key : int, slice, or sequence of page indices
+ Defines which pages to return as array.
+ series : int
+ Defines which series of pages to return as array.
+ memmap : bool
+ If True, return an array stored in a binary file on disk
+ if possible.
+
+ """
+ if key is None and series is None:
+ series = 0
+ if series is not None:
+ pages = self.series[series].pages
+ else:
+ pages = self.pages
+
+ if key is None:
+ pass
+ elif isinstance(key, int):
+ pages = [pages[key]]
+ elif isinstance(key, slice):
+ pages = pages[key]
+ elif isinstance(key, collections.Iterable):
+ pages = [pages[k] for k in key]
+ else:
+ raise TypeError("key must be an int, slice, or sequence")
+
+ if not len(pages):
+ raise ValueError("no pages selected")
+
+ if self.is_nih:
+ if pages[0].is_palette:
+ result = stack_pages(pages, colormapped=False, squeeze=False)
+ result = numpy.take(pages[0].color_map, result, axis=1)
+ result = numpy.swapaxes(result, 0, 1)
+ else:
+ result = stack_pages(pages, memmap=memmap,
+ colormapped=False, squeeze=False)
+ elif len(pages) == 1:
+ return pages[0].asarray(memmap=memmap)
+ elif self.is_ome:
+ assert not self.is_palette, "color mapping disabled for ome-tiff"
+ if any(p is None for p in pages):
+ # zero out missing pages
+ firstpage = next(p for p in pages if p)
+ nopage = numpy.zeros_like(
+ firstpage.asarray(memmap=False))
+ s = self.series[series]
+ if memmap:
+ with tempfile.NamedTemporaryFile() as fh:
+ result = numpy.memmap(fh, dtype=s.dtype, shape=s.shape)
+ result = result.reshape(-1)
+ else:
+ result = numpy.empty(s.shape, s.dtype).reshape(-1)
+ index = 0
+
+ class KeepOpen:
+ # keep Tiff files open between consecutive pages
+ def __init__(self, parent, close):
+ self.master = parent
+ self.parent = parent
+ self._close = close
+
+ def open(self, page):
+ if self._close and page and page.parent != self.parent:
+ if self.parent != self.master:
+ self.parent.filehandle.close()
+ self.parent = page.parent
+ self.parent.filehandle.open()
+
+ def close(self):
+ if self._close and self.parent != self.master:
+ self.parent.filehandle.close()
+
+ keep = KeepOpen(self, self._multifile_close)
+ for page in pages:
+ keep.open(page)
+ if page:
+ a = page.asarray(memmap=False, colormapped=False,
+ reopen=False)
+ else:
+ a = nopage
+ try:
+ result[index:index + a.size] = a.reshape(-1)
+ except ValueError as e:
+ warnings.warn("ome-tiff: %s" % e)
+ break
+ index += a.size
+ keep.close()
+ else:
+ result = stack_pages(pages, memmap=memmap)
+
+ if key is None:
+ try:
+ result.shape = self.series[series].shape
+ except ValueError:
+ try:
+ warnings.warn("failed to reshape %s to %s" % (
+ result.shape, self.series[series].shape))
+ # try series of expected shapes
+ result.shape = (-1,) + self.series[series].shape
+ except ValueError:
+ # revert to generic shape
+ result.shape = (-1,) + pages[0].shape
+ else:
+ result.shape = (-1,) + pages[0].shape
+ return result
+
+ def _omeseries(self):
+ """Return image series in OME-TIFF file(s)."""
+ root = etree.fromstring(self.pages[0].tags['image_description'].value)
+ uuid = root.attrib.get('UUID', None)
+ self._files = {uuid: self}
+ dirname = self._fh.dirname
+ modulo = {}
+ result = []
+ for element in root:
+ if element.tag.endswith('BinaryOnly'):
+ warnings.warn("ome-xml: not an ome-tiff master file")
+ break
+ if element.tag.endswith('StructuredAnnotations'):
+ for annot in element:
+ if not annot.attrib.get('Namespace',
+ '').endswith('modulo'):
+ continue
+ for value in annot:
+ for modul in value:
+ for along in modul:
+ if not along.tag[:-1].endswith('Along'):
+ continue
+ axis = along.tag[-1]
+ newaxis = along.attrib.get('Type', 'other')
+ newaxis = AXES_LABELS[newaxis]
+ if 'Start' in along.attrib:
+ labels = range(
+ int(along.attrib['Start']),
+ int(along.attrib['End']) + 1,
+ int(along.attrib.get('Step', 1)))
+ else:
+ labels = [label.text for label in along
+ if label.tag.endswith('Label')]
+ modulo[axis] = (newaxis, labels)
+ if not element.tag.endswith('Image'):
+ continue
+ for pixels in element:
+ if not pixels.tag.endswith('Pixels'):
+ continue
+ atr = pixels.attrib
+ dtype = atr.get('Type', None)
+ axes = ''.join(reversed(atr['DimensionOrder']))
+ shape = list(int(atr['Size'+ax]) for ax in axes)
+ size = product(shape[:-2])
+ ifds = [None] * size
+ for data in pixels:
+ if not data.tag.endswith('TiffData'):
+ continue
+ atr = data.attrib
+ ifd = int(atr.get('IFD', 0))
+ num = int(atr.get('NumPlanes', 1 if 'IFD' in atr else 0))
+ num = int(atr.get('PlaneCount', num))
+ idx = [int(atr.get('First'+ax, 0)) for ax in axes[:-2]]
+ try:
+ idx = numpy.ravel_multi_index(idx, shape[:-2])
+ except ValueError:
+ # ImageJ produces invalid ome-xml when cropping
+ warnings.warn("ome-xml: invalid TiffData index")
+ continue
+ for uuid in data:
+ if not uuid.tag.endswith('UUID'):
+ continue
+ if uuid.text not in self._files:
+ if not self._multifile:
+ # abort reading multifile OME series
+ # and fall back to generic series
+ return []
+ fname = uuid.attrib['FileName']
+ try:
+ tif = TiffFile(os.path.join(dirname, fname))
+ except (IOError, ValueError):
+ tif.close()
+ warnings.warn(
+ "ome-xml: failed to read '%s'" % fname)
+ break
+ self._files[uuid.text] = tif
+ if self._multifile_close:
+ tif.close()
+ pages = self._files[uuid.text].pages
+ try:
+ for i in range(num if num else len(pages)):
+ ifds[idx + i] = pages[ifd + i]
+ except IndexError:
+ warnings.warn("ome-xml: index out of range")
+ # only process first uuid
+ break
+ else:
+ pages = self.pages
+ try:
+ for i in range(num if num else len(pages)):
+ ifds[idx + i] = pages[ifd + i]
+ except IndexError:
+ warnings.warn("ome-xml: index out of range")
+ if all(i is None for i in ifds):
+ # skip images without data
+ continue
+ dtype = next(i for i in ifds if i).dtype
+ result.append(Record(axes=axes, shape=shape, pages=ifds,
+ dtype=numpy.dtype(dtype)))
+
+ for record in result:
+ for axis, (newaxis, labels) in modulo.items():
+ i = record.axes.index(axis)
+ size = len(labels)
+ if record.shape[i] == size:
+ record.axes = record.axes.replace(axis, newaxis, 1)
+ else:
+ record.shape[i] //= size
+ record.shape.insert(i+1, size)
+ record.axes = record.axes.replace(axis, axis+newaxis, 1)
+ record.shape = tuple(record.shape)
+
+ # squeeze dimensions
+ for record in result:
+ record.shape, record.axes = squeeze_axes(record.shape, record.axes)
+
+ return result
+
+ def __len__(self):
+ """Return number of image pages in file."""
+ return len(self.pages)
+
+ def __getitem__(self, key):
+ """Return specified page."""
+ return self.pages[key]
+
+ def __iter__(self):
+ """Return iterator over pages."""
+ return iter(self.pages)
+
+ def __str__(self):
+ """Return string containing information about file."""
+ result = [
+ self._fh.name.capitalize(),
+ format_size(self._fh.size),
+ {'<': 'little endian', '>': 'big endian'}[self.byteorder]]
+ if self.is_bigtiff:
+ result.append("bigtiff")
+ if len(self.pages) > 1:
+ result.append("%i pages" % len(self.pages))
+ if len(self.series) > 1:
+ result.append("%i series" % len(self.series))
+ if len(self._files) > 1:
+ result.append("%i files" % (len(self._files)))
+ return ", ".join(result)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.close()
+
+ @lazyattr
+ def fstat(self):
+ try:
+ return os.fstat(self._fh.fileno())
+ except Exception: # io.UnsupportedOperation
+ return None
+
+ @lazyattr
+ def is_bigtiff(self):
+ return self.offset_size != 4
+
+ @lazyattr
+ def is_rgb(self):
+ return all(p.is_rgb for p in self.pages)
+
+ @lazyattr
+ def is_palette(self):
+ return all(p.is_palette for p in self.pages)
+
+ @lazyattr
+ def is_mdgel(self):
+ return any(p.is_mdgel for p in self.pages)
+
+ @lazyattr
+ def is_mediacy(self):
+ return any(p.is_mediacy for p in self.pages)
+
+ @lazyattr
+ def is_stk(self):
+ return all(p.is_stk for p in self.pages)
+
+ @lazyattr
+ def is_lsm(self):
+ return self.pages[0].is_lsm
+
+ @lazyattr
+ def is_imagej(self):
+ return self.pages[0].is_imagej
+
+ @lazyattr
+ def is_micromanager(self):
+ return self.pages[0].is_micromanager
+
+ @lazyattr
+ def is_nih(self):
+ return self.pages[0].is_nih
+
+ @lazyattr
+ def is_fluoview(self):
+ return self.pages[0].is_fluoview
+
+ @lazyattr
+ def is_ome(self):
+ return self.pages[0].is_ome
+
+
+class TiffPage(object):
+ """A TIFF image file directory (IFD).
+
+ Attributes
+ ----------
+ index : int
+ Index of page in file.
+ dtype : str {TIFF_SAMPLE_DTYPES}
+ Data type of image, colormapped if applicable.
+ shape : tuple
+ Dimensions of the image array in TIFF page,
+ colormapped and with one alpha channel if applicable.
+ axes : str
+ Axes label codes:
+ 'X' width, 'Y' height, 'S' sample, 'I' image series|page|plane,
+ 'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda,
+ 'T' time, 'R' region|tile, 'A' angle, 'P' phase, 'H' lifetime,
+ 'L' exposure, 'V' event, 'Q' unknown, '_' missing
+ tags : TiffTags
+ Dictionary of tags in page.
+ Tag values are also directly accessible as attributes.
+ color_map : numpy array
+ Color look up table, if exists.
+ cz_lsm_scan_info: Record(dict)
+ LSM scan info attributes, if exists.
+ imagej_tags: Record(dict)
+ Consolidated ImageJ description and metadata tags, if exists.
+ uic_tags: Record(dict)
+ Consolidated MetaMorph STK/UIC tags, if exists.
+
+ All attributes are read-only.
+
+ Notes
+ -----
+ The internal, normalized '_shape' attribute is 6 dimensional:
+
+ 0. number planes (stk)
+ 1. planar samples_per_pixel
+ 2. image_depth Z (sgi)
+ 3. image_length Y
+ 4. image_width X
+ 5. contig samples_per_pixel
+
+ """
+ def __init__(self, parent):
+ """Initialize instance from file."""
+ self.parent = parent
+ self.index = len(parent.pages)
+ self.shape = self._shape = ()
+ self.dtype = self._dtype = None
+ self.axes = ""
+ self.tags = TiffTags()
+
+ self._fromfile()
+ self._process_tags()
+
+ def _fromfile(self):
+ """Read TIFF IFD structure and its tags from file.
+
+ File cursor must be at storage position of IFD offset and is left at
+ offset to next IFD.
+
+ Raises StopIteration if offset (first bytes read) is 0.
+
+ """
+ fh = self.parent.filehandle
+ byteorder = self.parent.byteorder
+ offset_size = self.parent.offset_size
+
+ fmt = {4: 'I', 8: 'Q'}[offset_size]
+ offset = struct.unpack(byteorder + fmt, fh.read(offset_size))[0]
+ if not offset:
+ raise StopIteration()
+
+ # read standard tags
+ tags = self.tags
+ fh.seek(offset)
+ fmt, size = {4: ('H', 2), 8: ('Q', 8)}[offset_size]
+ try:
+ numtags = struct.unpack(byteorder + fmt, fh.read(size))[0]
+ except Exception:
+ warnings.warn("corrupted page list")
+ raise StopIteration()
+
+ tagcode = 0
+ for _ in range(numtags):
+ try:
+ tag = TiffTag(self.parent)
+ # print(tag)
+ except TiffTag.Error as e:
+ warnings.warn(str(e))
+ continue
+ if tagcode > tag.code:
+ # expected for early LSM and tifffile versions
+ warnings.warn("tags are not ordered by code")
+ tagcode = tag.code
+ if tag.name not in tags:
+ tags[tag.name] = tag
+ else:
+ # some files contain multiple IFD with same code
+ # e.g. MicroManager files contain two image_description
+ i = 1
+ while True:
+ name = "%s_%i" % (tag.name, i)
+ if name not in tags:
+ tags[name] = tag
+ break
+
+ pos = fh.tell()
+
+ if self.is_lsm or (self.index and self.parent.is_lsm):
+ # correct non standard LSM bitspersample tags
+ self.tags['bits_per_sample']._correct_lsm_bitspersample(self)
+
+ if self.is_lsm:
+ # read LSM info subrecords
+ for name, reader in CZ_LSM_INFO_READERS.items():
+ try:
+ offset = self.cz_lsm_info['offset_'+name]
+ except KeyError:
+ continue
+ if offset < 8:
+ # older LSM revision
+ continue
+ fh.seek(offset)
+ try:
+ setattr(self, 'cz_lsm_'+name, reader(fh))
+ except ValueError:
+ pass
+
+ elif self.is_stk and 'uic1tag' in tags and not tags['uic1tag'].value:
+ # read uic1tag now that plane count is known
+ uic1tag = tags['uic1tag']
+ fh.seek(uic1tag.value_offset)
+ tags['uic1tag'].value = Record(
+ read_uic1tag(fh, byteorder, uic1tag.dtype, uic1tag.count,
+ tags['uic2tag'].count))
+ fh.seek(pos)
+
+ def _process_tags(self):
+ """Validate standard tags and initialize attributes.
+
+ Raise ValueError if tag values are not supported.
+
+ """
+ tags = self.tags
+ for code, (name, default, dtype, count, validate) in TIFF_TAGS.items():
+ if not (name in tags or default is None):
+ tags[name] = TiffTag(code, dtype=dtype, count=count,
+ value=default, name=name)
+ if name in tags and validate:
+ try:
+ if tags[name].count == 1:
+ setattr(self, name, validate[tags[name].value])
+ else:
+ setattr(self, name, tuple(
+ validate[value] for value in tags[name].value))
+ except KeyError:
+ raise ValueError("%s.value (%s) not supported" %
+ (name, tags[name].value))
+
+ tag = tags['bits_per_sample']
+ if tag.count == 1:
+ self.bits_per_sample = tag.value
+ else:
+ # LSM might list more items than samples_per_pixel
+ value = tag.value[:self.samples_per_pixel]
+ if any((v-value[0] for v in value)):
+ self.bits_per_sample = value
+ else:
+ self.bits_per_sample = value[0]
+
+ tag = tags['sample_format']
+ if tag.count == 1:
+ self.sample_format = TIFF_SAMPLE_FORMATS[tag.value]
+ else:
+ value = tag.value[:self.samples_per_pixel]
+ if any((v-value[0] for v in value)):
+ self.sample_format = [TIFF_SAMPLE_FORMATS[v] for v in value]
+ else:
+ self.sample_format = TIFF_SAMPLE_FORMATS[value[0]]
+
+ if 'photometric' not in tags:
+ self.photometric = None
+
+ if 'image_depth' not in tags:
+ self.image_depth = 1
+
+ if 'image_length' in tags:
+ self.strips_per_image = int(math.floor(
+ float(self.image_length + self.rows_per_strip - 1) /
+ self.rows_per_strip))
+ else:
+ self.strips_per_image = 0
+
+ key = (self.sample_format, self.bits_per_sample)
+ self.dtype = self._dtype = TIFF_SAMPLE_DTYPES.get(key, None)
+
+ if 'image_length' not in self.tags or 'image_width' not in self.tags:
+ # some GEL file pages are missing image data
+ self.image_length = 0
+ self.image_width = 0
+ self.image_depth = 0
+ self.strip_offsets = 0
+ self._shape = ()
+ self.shape = ()
+ self.axes = ''
+
+ if self.is_palette:
+ self.dtype = self.tags['color_map'].dtype[1]
+ self.color_map = numpy.array(self.color_map, self.dtype)
+ dmax = self.color_map.max()
+ if dmax < 256:
+ self.dtype = numpy.uint8
+ self.color_map = self.color_map.astype(self.dtype)
+ #else:
+ # self.dtype = numpy.uint8
+ # self.color_map >>= 8
+ # self.color_map = self.color_map.astype(self.dtype)
+ self.color_map.shape = (3, -1)
+
+ # determine shape of data
+ image_length = self.image_length
+ image_width = self.image_width
+ image_depth = self.image_depth
+ samples_per_pixel = self.samples_per_pixel
+
+ if self.is_stk:
+ assert self.image_depth == 1
+ planes = self.tags['uic2tag'].count
+ if self.is_contig:
+ self._shape = (planes, 1, 1, image_length, image_width,
+ samples_per_pixel)
+ if samples_per_pixel == 1:
+ self.shape = (planes, image_length, image_width)
+ self.axes = 'YX'
+ else:
+ self.shape = (planes, image_length, image_width,
+ samples_per_pixel)
+ self.axes = 'YXS'
+ else:
+ self._shape = (planes, samples_per_pixel, 1, image_length,
+ image_width, 1)
+ if samples_per_pixel == 1:
+ self.shape = (planes, image_length, image_width)
+ self.axes = 'YX'
+ else:
+ self.shape = (planes, samples_per_pixel, image_length,
+ image_width)
+ self.axes = 'SYX'
+ # detect type of series
+ if planes == 1:
+ self.shape = self.shape[1:]
+ elif numpy.all(self.uic2tag.z_distance != 0):
+ self.axes = 'Z' + self.axes
+ elif numpy.all(numpy.diff(self.uic2tag.time_created) != 0):
+ self.axes = 'T' + self.axes
+ else:
+ self.axes = 'I' + self.axes
+ # DISABLED
+ if self.is_palette:
+ assert False, "color mapping disabled for stk"
+ if self.color_map.shape[1] >= 2**self.bits_per_sample:
+ if image_depth == 1:
+ self.shape = (3, planes, image_length, image_width)
+ else:
+ self.shape = (3, planes, image_depth, image_length,
+ image_width)
+ self.axes = 'C' + self.axes
+ else:
+ warnings.warn("palette cannot be applied")
+ self.is_palette = False
+ elif self.is_palette:
+ samples = 1
+ if 'extra_samples' in self.tags:
+ samples += len(self.extra_samples)
+ if self.is_contig:
+ self._shape = (1, 1, image_depth, image_length, image_width,
+ samples)
+ else:
+ self._shape = (1, samples, image_depth, image_length,
+ image_width, 1)
+ if self.color_map.shape[1] >= 2**self.bits_per_sample:
+ if image_depth == 1:
+ self.shape = (3, image_length, image_width)
+ self.axes = 'CYX'
+ else:
+ self.shape = (3, image_depth, image_length, image_width)
+ self.axes = 'CZYX'
+ else:
+ warnings.warn("palette cannot be applied")
+ self.is_palette = False
+ if image_depth == 1:
+ self.shape = (image_length, image_width)
+ self.axes = 'YX'
+ else:
+ self.shape = (image_depth, image_length, image_width)
+ self.axes = 'ZYX'
+ elif self.is_rgb or samples_per_pixel > 1:
+ if self.is_contig:
+ self._shape = (1, 1, image_depth, image_length, image_width,
+ samples_per_pixel)
+ if image_depth == 1:
+ self.shape = (image_length, image_width, samples_per_pixel)
+ self.axes = 'YXS'
+ else:
+ self.shape = (image_depth, image_length, image_width,
+ samples_per_pixel)
+ self.axes = 'ZYXS'
+ else:
+ self._shape = (1, samples_per_pixel, image_depth,
+ image_length, image_width, 1)
+ if image_depth == 1:
+ self.shape = (samples_per_pixel, image_length, image_width)
+ self.axes = 'SYX'
+ else:
+ self.shape = (samples_per_pixel, image_depth,
+ image_length, image_width)
+ self.axes = 'SZYX'
+ if False and self.is_rgb and 'extra_samples' in self.tags:
+ # DISABLED: only use RGB and first alpha channel if exists
+ extra_samples = self.extra_samples
+ if self.tags['extra_samples'].count == 1:
+ extra_samples = (extra_samples, )
+ for exs in extra_samples:
+ if exs in ('unassalpha', 'assocalpha', 'unspecified'):
+ if self.is_contig:
+ self.shape = self.shape[:-1] + (4,)
+ else:
+ self.shape = (4,) + self.shape[1:]
+ break
+ else:
+ self._shape = (1, 1, image_depth, image_length, image_width, 1)
+ if image_depth == 1:
+ self.shape = (image_length, image_width)
+ self.axes = 'YX'
+ else:
+ self.shape = (image_depth, image_length, image_width)
+ self.axes = 'ZYX'
+ if not self.compression and 'strip_byte_counts' not in tags:
+ self.strip_byte_counts = (
+ product(self.shape) * (self.bits_per_sample // 8), )
+
+ assert len(self.shape) == len(self.axes)
+
+ def asarray(self, squeeze=True, colormapped=True, rgbonly=False,
+ scale_mdgel=False, memmap=False, reopen=True):
+ """Read image data from file and return as numpy array.
+
+ Raise ValueError if format is unsupported.
+ If any of 'squeeze', 'colormapped', or 'rgbonly' are not the default,
+ the shape of the returned array might be different from the page shape.
+
+ Parameters
+ ----------
+ squeeze : bool
+ If True, all length-1 dimensions (except X and Y) are
+ squeezed out from result.
+ colormapped : bool
+ If True, color mapping is applied for palette-indexed images.
+ rgbonly : bool
+ If True, return RGB(A) image without additional extra samples.
+ memmap : bool
+ If True, use numpy.memmap to read arrays from file if possible.
+ For use on 64 bit systems and files with few huge contiguous data.
+ reopen : bool
+ If True and the parent file handle is closed, the file is
+ temporarily re-opened (and closed if no exception occurs).
+ scale_mdgel : bool
+ If True, MD Gel data will be scaled according to the private
+ metadata in the second TIFF page. The dtype will be float32.
+
+ """
+ if not self._shape:
+ return
+
+ if self.dtype is None:
+ raise ValueError("data type not supported: %s%i" % (
+ self.sample_format, self.bits_per_sample))
+ if self.compression not in TIFF_DECOMPESSORS:
+ raise ValueError("cannot decompress %s" % self.compression)
+ tag = self.tags['sample_format']
+ if tag.count != 1 and any((i-tag.value[0] for i in tag.value)):
+ raise ValueError("sample formats don't match %s" % str(tag.value))
+
+ fh = self.parent.filehandle
+ closed = fh.closed
+ if closed:
+ if reopen:
+ fh.open()
+ else:
+ raise IOError("file handle is closed")
+
+ dtype = self._dtype
+ shape = self._shape
+ image_width = self.image_width
+ image_length = self.image_length
+ image_depth = self.image_depth
+ typecode = self.parent.byteorder + dtype
+ bits_per_sample = self.bits_per_sample
+
+ if self.is_tiled:
+ if 'tile_offsets' in self.tags:
+ byte_counts = self.tile_byte_counts
+ offsets = self.tile_offsets
+ else:
+ byte_counts = self.strip_byte_counts
+ offsets = self.strip_offsets
+ tile_width = self.tile_width
+ tile_length = self.tile_length
+ tile_depth = self.tile_depth if 'tile_depth' in self.tags else 1
+ tw = (image_width + tile_width - 1) // tile_width
+ tl = (image_length + tile_length - 1) // tile_length
+ td = (image_depth + tile_depth - 1) // tile_depth
+ shape = (shape[0], shape[1],
+ td*tile_depth, tl*tile_length, tw*tile_width, shape[-1])
+ tile_shape = (tile_depth, tile_length, tile_width, shape[-1])
+ runlen = tile_width
+ else:
+ byte_counts = self.strip_byte_counts
+ offsets = self.strip_offsets
+ runlen = image_width
+
+ if any(o < 2 for o in offsets):
+ raise ValueError("corrupted page")
+
+ if memmap and self._is_memmappable(rgbonly, colormapped):
+ result = fh.memmap_array(typecode, shape, offset=offsets[0])
+ elif self.is_contiguous:
+ fh.seek(offsets[0])
+ result = fh.read_array(typecode, product(shape))
+ result = result.astype('=' + dtype)
+ else:
+ if self.is_contig:
+ runlen *= self.samples_per_pixel
+ if bits_per_sample in (8, 16, 32, 64, 128):
+ if (bits_per_sample * runlen) % 8:
+ raise ValueError("data and sample size mismatch")
+
+ def unpack(x):
+ try:
+ return numpy.fromstring(x, typecode)
+ except ValueError as e:
+ # strips may be missing EOI
+ warnings.warn("unpack: %s" % e)
+ xlen = ((len(x) // (bits_per_sample // 8))
+ * (bits_per_sample // 8))
+ return numpy.fromstring(x[:xlen], typecode)
+
+ elif isinstance(bits_per_sample, tuple):
+ def unpack(x):
+ return unpackrgb(x, typecode, bits_per_sample)
+ else:
+ def unpack(x):
+ return unpackints(x, typecode, bits_per_sample, runlen)
+
+ decompress = TIFF_DECOMPESSORS[self.compression]
+ if self.compression == 'jpeg':
+ table = self.jpeg_tables if 'jpeg_tables' in self.tags else b''
+ decompress = lambda x: decodejpg(x, table, self.photometric)
+
+ if self.is_tiled:
+ result = numpy.empty(shape, dtype)
+ tw, tl, td, pl = 0, 0, 0, 0
+ for offset, bytecount in zip(offsets, byte_counts):
+ fh.seek(offset)
+ tile = unpack(decompress(fh.read(bytecount)))
+ tile.shape = tile_shape
+ if self.predictor == 'horizontal':
+ numpy.cumsum(tile, axis=-2, dtype=dtype, out=tile)
+ result[0, pl, td:td+tile_depth,
+ tl:tl+tile_length, tw:tw+tile_width, :] = tile
+ del tile
+ tw += tile_width
+ if tw >= shape[4]:
+ tw, tl = 0, tl + tile_length
+ if tl >= shape[3]:
+ tl, td = 0, td + tile_depth
+ if td >= shape[2]:
+ td, pl = 0, pl + 1
+ result = result[...,
+ :image_depth, :image_length, :image_width, :]
+ else:
+ strip_size = (self.rows_per_strip * self.image_width *
+ self.samples_per_pixel)
+ result = numpy.empty(shape, dtype).reshape(-1)
+ index = 0
+ for offset, bytecount in zip(offsets, byte_counts):
+ fh.seek(offset)
+ strip = fh.read(bytecount)
+ strip = decompress(strip)
+ strip = unpack(strip)
+ size = min(result.size, strip.size, strip_size,
+ result.size - index)
+ result[index:index+size] = strip[:size]
+ del strip
+ index += size
+
+ result.shape = self._shape
+
+ if self.predictor == 'horizontal' and not (self.is_tiled and not
+ self.is_contiguous):
+ # work around bug in LSM510 software
+ if not (self.parent.is_lsm and not self.compression):
+ numpy.cumsum(result, axis=-2, dtype=dtype, out=result)
+
+ if colormapped and self.is_palette:
+ if self.color_map.shape[1] >= 2**bits_per_sample:
+ # FluoView and LSM might fail here
+ result = numpy.take(self.color_map,
+ result[:, 0, :, :, :, 0], axis=1)
+ elif rgbonly and self.is_rgb and 'extra_samples' in self.tags:
+ # return only RGB and first alpha channel if exists
+ extra_samples = self.extra_samples
+ if self.tags['extra_samples'].count == 1:
+ extra_samples = (extra_samples, )
+ for i, exs in enumerate(extra_samples):
+ if exs in ('unassalpha', 'assocalpha', 'unspecified'):
+ if self.is_contig:
+ result = result[..., [0, 1, 2, 3+i]]
+ else:
+ result = result[:, [0, 1, 2, 3+i]]
+ break
+ else:
+ if self.is_contig:
+ result = result[..., :3]
+ else:
+ result = result[:, :3]
+
+ if squeeze:
+ try:
+ result.shape = self.shape
+ except ValueError:
+ warnings.warn("failed to reshape from %s to %s" % (
+ str(result.shape), str(self.shape)))
+
+ if scale_mdgel and self.parent.is_mdgel:
+ # MD Gel stores private metadata in the second page
+ tags = self.parent.pages[1]
+ if tags.md_file_tag in (2, 128):
+ scale = tags.md_scale_pixel
+ scale = scale[0] / scale[1] # rational
+ result = result.astype('float32')
+ if tags.md_file_tag == 2:
+ result **= 2 # squary root data format
+ result *= scale
+
+ if closed:
+ # TODO: file remains open if an exception occurred above
+ fh.close()
+ return result
+
+ def _is_memmappable(self, rgbonly, colormapped):
+ """Return if image data in file can be memory mapped."""
+ if not self.parent.filehandle.is_file or not self.is_contiguous:
+ return False
+ return not (self.predictor or
+ (rgbonly and 'extra_samples' in self.tags) or
+ (colormapped and self.is_palette) or
+ ({'big': '>', 'little': '<'}[sys.byteorder] !=
+ self.parent.byteorder))
+
+ @lazyattr
+ def is_contiguous(self):
+ """Return offset and size of contiguous data, else None.
+
+ Excludes prediction and colormapping.
+
+ """
+ if self.compression or self.bits_per_sample not in (8, 16, 32, 64):
+ return
+ if self.is_tiled:
+ if (self.image_width != self.tile_width or
+ self.image_length % self.tile_length or
+ self.tile_width % 16 or self.tile_length % 16):
+ return
+ if ('image_depth' in self.tags and 'tile_depth' in self.tags and
+ (self.image_length != self.tile_length or
+ self.image_depth % self.tile_depth)):
+ return
+ offsets = self.tile_offsets
+ byte_counts = self.tile_byte_counts
+ else:
+ offsets = self.strip_offsets
+ byte_counts = self.strip_byte_counts
+ if len(offsets) == 1:
+ return offsets[0], byte_counts[0]
+ if self.is_stk or all(offsets[i] + byte_counts[i] == offsets[i+1]
+ or byte_counts[i+1] == 0 # no data/ignore offset
+ for i in range(len(offsets)-1)):
+ return offsets[0], sum(byte_counts)
+
+ def __str__(self):
+ """Return string containing information about page."""
+ s = ', '.join(s for s in (
+ ' x '.join(str(i) for i in self.shape),
+ str(numpy.dtype(self.dtype)),
+ '%s bit' % str(self.bits_per_sample),
+ self.photometric if 'photometric' in self.tags else '',
+ self.compression if self.compression else 'raw',
+ '|'.join(t[3:] for t in (
+ 'is_stk', 'is_lsm', 'is_nih', 'is_ome', 'is_imagej',
+ 'is_micromanager', 'is_fluoview', 'is_mdgel', 'is_mediacy',
+ 'is_sgi', 'is_reduced', 'is_tiled',
+ 'is_contiguous') if getattr(self, t))) if s)
+ return "Page %i: %s" % (self.index, s)
+
+ def __getattr__(self, name):
+ """Return tag value."""
+ if name in self.tags:
+ value = self.tags[name].value
+ setattr(self, name, value)
+ return value
+ raise AttributeError(name)
+
+ @lazyattr
+ def uic_tags(self):
+ """Consolidate UIC tags."""
+ if not self.is_stk:
+ raise AttributeError("uic_tags")
+ tags = self.tags
+ result = Record()
+ result.number_planes = tags['uic2tag'].count
+ if 'image_description' in tags:
+ result.plane_descriptions = self.image_description.split(b'\x00')
+ if 'uic1tag' in tags:
+ result.update(tags['uic1tag'].value)
+ if 'uic3tag' in tags:
+ result.update(tags['uic3tag'].value) # wavelengths
+ if 'uic4tag' in tags:
+ result.update(tags['uic4tag'].value) # override uic1 tags
+ uic2tag = tags['uic2tag'].value
+ result.z_distance = uic2tag.z_distance
+ result.time_created = uic2tag.time_created
+ result.time_modified = uic2tag.time_modified
+ try:
+ result.datetime_created = [
+ julian_datetime(*dt) for dt in
+ zip(uic2tag.date_created, uic2tag.time_created)]
+ result.datetime_modified = [
+ julian_datetime(*dt) for dt in
+ zip(uic2tag.date_modified, uic2tag.time_modified)]
+ except ValueError as e:
+ warnings.warn("uic_tags: %s" % e)
+ return result
+
+ @lazyattr
+ def imagej_tags(self):
+ """Consolidate ImageJ metadata."""
+ if not self.is_imagej:
+ raise AttributeError("imagej_tags")
+ tags = self.tags
+ if 'image_description_1' in tags:
+ # MicroManager
+ result = imagej_description(tags['image_description_1'].value)
+ else:
+ result = imagej_description(tags['image_description'].value)
+ if 'imagej_metadata' in tags:
+ try:
+ result.update(imagej_metadata(
+ tags['imagej_metadata'].value,
+ tags['imagej_byte_counts'].value,
+ self.parent.byteorder))
+ except Exception as e:
+ warnings.warn(str(e))
+ return Record(result)
+
+ @lazyattr
+ def is_rgb(self):
+ """True if page contains a RGB image."""
+ return ('photometric' in self.tags and
+ self.tags['photometric'].value == 2)
+
+ @lazyattr
+ def is_contig(self):
+ """True if page contains a contiguous image."""
+ return ('planar_configuration' in self.tags and
+ self.tags['planar_configuration'].value == 1)
+
+ @lazyattr
+ def is_palette(self):
+ """True if page contains a palette-colored image and not OME or STK."""
+ try:
+ # turn off color mapping for OME-TIFF and STK
+ if self.is_stk or self.is_ome or self.parent.is_ome:
+ return False
+ except IndexError:
+ pass # OME-XML not found in first page
+ return ('photometric' in self.tags and
+ self.tags['photometric'].value == 3)
+
+ @lazyattr
+ def is_tiled(self):
+ """True if page contains tiled image."""
+ return 'tile_width' in self.tags
+
+ @lazyattr
+ def is_reduced(self):
+ """True if page is a reduced image of another image."""
+ return bool(self.tags['new_subfile_type'].value & 1)
+
+ @lazyattr
+ def is_mdgel(self):
+ """True if page contains md_file_tag tag."""
+ return 'md_file_tag' in self.tags
+
+ @lazyattr
+ def is_mediacy(self):
+ """True if page contains Media Cybernetics Id tag."""
+ return ('mc_id' in self.tags and
+ self.tags['mc_id'].value.startswith(b'MC TIFF'))
+
+ @lazyattr
+ def is_stk(self):
+ """True if page contains UIC2Tag tag."""
+ return 'uic2tag' in self.tags
+
+ @lazyattr
+ def is_lsm(self):
+ """True if page contains LSM CZ_LSM_INFO tag."""
+ return 'cz_lsm_info' in self.tags
+
+ @lazyattr
+ def is_fluoview(self):
+ """True if page contains FluoView MM_STAMP tag."""
+ return 'mm_stamp' in self.tags
+
+ @lazyattr
+ def is_nih(self):
+ """True if page contains NIH image header."""
+ return 'nih_image_header' in self.tags
+
+ @lazyattr
+ def is_sgi(self):
+ """True if page contains SGI image and tile depth tags."""
+ return 'image_depth' in self.tags and 'tile_depth' in self.tags
+
+ @lazyattr
+ def is_ome(self):
+ """True if page contains OME-XML in image_description tag."""
+ return ('image_description' in self.tags and self.tags[
+ 'image_description'].value.startswith(b'<?xml version='))
+
+ @lazyattr
+ def is_shaped(self):
+ """True if page contains shape in image_description tag."""
+ return ('image_description' in self.tags and self.tags[
+ 'image_description'].value.startswith(b'shape=('))
+
+ @lazyattr
+ def is_imagej(self):
+ """True if page contains ImageJ description."""
+ return (
+ ('image_description' in self.tags and
+ self.tags['image_description'].value.startswith(b'ImageJ=')) or
+ ('image_description_1' in self.tags and # Micromanager
+ self.tags['image_description_1'].value.startswith(b'ImageJ=')))
+
+ @lazyattr
+ def is_micromanager(self):
+ """True if page contains Micro-Manager metadata."""
+ return 'micromanager_metadata' in self.tags
+
+
+class TiffTag(object):
+ """A TIFF tag structure.
+
+ Attributes
+ ----------
+ name : string
+ Attribute name of tag.
+ code : int
+ Decimal code of tag.
+ dtype : str
+ Datatype of tag data. One of TIFF_DATA_TYPES.
+ count : int
+ Number of values.
+ value : various types
+ Tag data as Python object.
+ value_offset : int
+ Location of value in file, if any.
+
+ All attributes are read-only.
+
+ """
+ __slots__ = ('code', 'name', 'count', 'dtype', 'value', 'value_offset',
+ '_offset', '_value', '_type')
+
+ class Error(Exception):
+ pass
+
+ def __init__(self, arg, **kwargs):
+ """Initialize instance from file or arguments."""
+ self._offset = None
+ if hasattr(arg, '_fh'):
+ self._fromfile(arg, **kwargs)
+ else:
+ self._fromdata(arg, **kwargs)
+
+ def _fromdata(self, code, dtype, count, value, name=None):
+ """Initialize instance from arguments."""
+ self.code = int(code)
+ self.name = name if name else str(code)
+ self.dtype = TIFF_DATA_TYPES[dtype]
+ self.count = int(count)
+ self.value = value
+ self._value = value
+ self._type = dtype
+
+ def _fromfile(self, parent):
+ """Read tag structure from open file. Advance file cursor."""
+ fh = parent.filehandle
+ byteorder = parent.byteorder
+ self._offset = fh.tell()
+ self.value_offset = self._offset + parent.offset_size + 4
+
+ fmt, size = {4: ('HHI4s', 12), 8: ('HHQ8s', 20)}[parent.offset_size]
+ data = fh.read(size)
+ code, dtype = struct.unpack(byteorder + fmt[:2], data[:4])
+ count, value = struct.unpack(byteorder + fmt[2:], data[4:])
+ self._value = value
+ self._type = dtype
+
+ if code in TIFF_TAGS:
+ name = TIFF_TAGS[code][0]
+ elif code in CUSTOM_TAGS:
+ name = CUSTOM_TAGS[code][0]
+ else:
+ name = str(code)
+
+ try:
+ dtype = TIFF_DATA_TYPES[self._type]
+ except KeyError:
+ raise TiffTag.Error("unknown tag data type %i" % self._type)
+
+ fmt = '%s%i%s' % (byteorder, count*int(dtype[0]), dtype[1])
+ size = struct.calcsize(fmt)
+ if size > parent.offset_size or code in CUSTOM_TAGS:
+ pos = fh.tell()
+ tof = {4: 'I', 8: 'Q'}[parent.offset_size]
+ self.value_offset = offset = struct.unpack(byteorder+tof, value)[0]
+ if offset < 0 or offset > parent.filehandle.size:
+ raise TiffTag.Error("corrupt file - invalid tag value offset")
+ elif offset < 4:
+ raise TiffTag.Error("corrupt value offset for tag %i" % code)
+ fh.seek(offset)
+ if code in CUSTOM_TAGS:
+ readfunc = CUSTOM_TAGS[code][1]
+ value = readfunc(fh, byteorder, dtype, count)
+ if isinstance(value, dict): # numpy.core.records.record
+ value = Record(value)
+ elif code in TIFF_TAGS or dtype[-1] == 's':
+ value = struct.unpack(fmt, fh.read(size))
+ else:
+ value = read_numpy(fh, byteorder, dtype, count)
+ fh.seek(pos)
+ else:
+ value = struct.unpack(fmt, value[:size])
+
+ if code not in CUSTOM_TAGS and code not in (273, 279, 324, 325):
+ # scalar value if not strip/tile offsets/byte_counts
+ if len(value) == 1:
+ value = value[0]
+
+ if (dtype.endswith('s') and isinstance(value, bytes)
+ and self._type != 7):
+ # TIFF ASCII fields can contain multiple strings,
+ # each terminated with a NUL
+ value = stripascii(value)
+
+ self.code = code
+ self.name = name
+ self.dtype = dtype
+ self.count = count
+ self.value = value
+
+ def _correct_lsm_bitspersample(self, parent):
+ """Correct LSM bitspersample tag.
+
+ Old LSM writers may use a separate region for two 16-bit values,
+ although they fit into the tag value element of the tag.
+
+ """
+ if self.code == 258 and self.count == 2:
+ # TODO: test this. Need example file.
+ warnings.warn("correcting LSM bitspersample tag")
+ fh = parent.filehandle
+ tof = {4: '<I', 8: '<Q'}[parent.offset_size]
+ self.value_offset = struct.unpack(tof, self._value)[0]
+ fh.seek(self.value_offset)
+ self.value = struct.unpack("<HH", fh.read(4))
+
+ def as_str(self):
+ """Return value as human readable string."""
+ return ((str(self.value).split('\n', 1)[0]) if (self._type != 7)
+ else '<undefined>')
+
+ def __str__(self):
+ """Return string containing information about tag."""
+ return ' '.join(str(getattr(self, s)) for s in self.__slots__)
+
+
+class TiffSequence(object):
+ """Sequence of image files.
+
+ The data shape and dtype of all files must match.
+
+ Properties
+ ----------
+ files : list
+ List of file names.
+ shape : tuple
+ Shape of image sequence.
+ axes : str
+ Labels of axes in shape.
+
+ Examples
+ --------
+ >>> tifs = TiffSequence("test.oif.files/*.tif") # doctest: +SKIP
+ >>> tifs.shape, tifs.axes # doctest: +SKIP
+ ((2, 100), 'CT')
+ >>> data = tifs.asarray() # doctest: +SKIP
+ >>> data.shape # doctest: +SKIP
+ (2, 100, 256, 256)
+
+ """
+ _patterns = {
+ 'axes': r"""
+ # matches Olympus OIF and Leica TIFF series
+ _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))
+ _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
+ _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
+ _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
+ _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
+ _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
+ _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
+ """}
+
+ class ParseError(Exception):
+ pass
+
+ def __init__(self, files, imread=TiffFile, pattern='axes',
+ *args, **kwargs):
+ """Initialize instance from multiple files.
+
+ Parameters
+ ----------
+ files : str, or sequence of str
+ Glob pattern or sequence of file names.
+ imread : function or class
+ Image read function or class with asarray function returning numpy
+ array from single file.
+ pattern : str
+ Regular expression pattern that matches axes names and sequence
+ indices in file names.
+ By default this matches Olympus OIF and Leica TIFF series.
+
+ """
+ if isinstance(files, basestring):
+ files = natural_sorted(glob.glob(files))
+ files = list(files)
+ if not files:
+ raise ValueError("no files found")
+ #if not os.path.isfile(files[0]):
+ # raise ValueError("file not found")
+ self.files = files
+
+ if hasattr(imread, 'asarray'):
+ # redefine imread
+ _imread = imread
+
+ def imread(fname, *args, **kwargs):
+ with _imread(fname) as im:
+ return im.asarray(*args, **kwargs)
+
+ self.imread = imread
+
+ self.pattern = self._patterns.get(pattern, pattern)
+ try:
+ self._parse()
+ if not self.axes:
+ self.axes = 'I'
+ except self.ParseError:
+ self.axes = 'I'
+ self.shape = (len(files),)
+ self._start_index = (0,)
+ self._indices = tuple((i,) for i in range(len(files)))
+
+ def __str__(self):
+ """Return string with information about image sequence."""
+ return "\n".join([
+ self.files[0],
+ '* files: %i' % len(self.files),
+ '* axes: %s' % self.axes,
+ '* shape: %s' % str(self.shape)])
+
+ def __len__(self):
+ return len(self.files)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.close()
+
+ def close(self):
+ pass
+
+ def asarray(self, memmap=False, *args, **kwargs):
+ """Read image data from all files and return as single numpy array.
+
+ If memmap is True, return an array stored in a binary file on disk.
+ The args and kwargs parameters are passed to the imread function.
+
+ Raise IndexError or ValueError if image shapes don't match.
+
+ """
+ im = self.imread(self.files[0], *args, **kwargs)
+ shape = self.shape + im.shape
+ if memmap:
+ with tempfile.NamedTemporaryFile() as fh:
+ result = numpy.memmap(fh, dtype=im.dtype, shape=shape)
+ else:
+ result = numpy.zeros(shape, dtype=im.dtype)
+ result = result.reshape(-1, *im.shape)
+ for index, fname in zip(self._indices, self.files):
+ index = [i-j for i, j in zip(index, self._start_index)]
+ index = numpy.ravel_multi_index(index, self.shape)
+ im = self.imread(fname, *args, **kwargs)
+ result[index] = im
+ result.shape = shape
+ return result
+
+ def _parse(self):
+ """Get axes and shape from file names."""
+ if not self.pattern:
+ raise self.ParseError("invalid pattern")
+ pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE)
+ matches = pattern.findall(self.files[0])
+ if not matches:
+ raise self.ParseError("pattern doesn't match file names")
+ matches = matches[-1]
+ if len(matches) % 2:
+ raise self.ParseError("pattern doesn't match axis name and index")
+ axes = ''.join(m for m in matches[::2] if m)
+ if not axes:
+ raise self.ParseError("pattern doesn't match file names")
+
+ indices = []
+ for fname in self.files:
+ matches = pattern.findall(fname)[-1]
+ if axes != ''.join(m for m in matches[::2] if m):
+ raise ValueError("axes don't match within the image sequence")
+ indices.append([int(m) for m in matches[1::2] if m])
+ shape = tuple(numpy.max(indices, axis=0))
+ start_index = tuple(numpy.min(indices, axis=0))
+ shape = tuple(i-j+1 for i, j in zip(shape, start_index))
+ if product(shape) != len(self.files):
+ warnings.warn("files are missing. Missing data are zeroed")
+
+ self.axes = axes.upper()
+ self.shape = shape
+ self._indices = indices
+ self._start_index = start_index
+
+
+class Record(dict):
+ """Dictionary with attribute access.
+
+ Can also be initialized with numpy.core.records.record.
+
+ """
+ __slots__ = ()
+
+ def __init__(self, arg=None, **kwargs):
+ if kwargs:
+ arg = kwargs
+ elif arg is None:
+ arg = {}
+ try:
+ dict.__init__(self, arg)
+ except (TypeError, ValueError):
+ for i, name in enumerate(arg.dtype.names):
+ v = arg[i]
+ self[name] = v if v.dtype.char != 'S' else stripnull(v)
+
+ def __getattr__(self, name):
+ return self[name]
+
+ def __setattr__(self, name, value):
+ self.__setitem__(name, value)
+
+ def __str__(self):
+ """Pretty print Record."""
+ s = []
+ lists = []
+ for k in sorted(self):
+ try:
+ if k.startswith('_'): # does not work with byte
+ continue
+ except AttributeError:
+ pass
+ v = self[k]
+ if isinstance(v, (list, tuple)) and len(v):
+ if isinstance(v[0], Record):
+ lists.append((k, v))
+ continue
+ elif isinstance(v[0], TiffPage):
+ v = [i.index for i in v if i]
+ s.append(
+ ("* %s: %s" % (k, str(v))).split("\n", 1)[0]
+ [:PRINT_LINE_LEN].rstrip())
+ for k, v in lists:
+ l = []
+ for i, w in enumerate(v):
+ l.append("* %s[%i]\n %s" % (k, i,
+ str(w).replace("\n", "\n ")))
+ s.append('\n'.join(l))
+ return '\n'.join(s)
+
+
+class TiffTags(Record):
+ """Dictionary of TiffTag with attribute access."""
+
+ def __str__(self):
+ """Return string with information about all tags."""
+ s = []
+ for tag in sorted(self.values(), key=lambda x: x.code):
+ typecode = "%i%s" % (tag.count * int(tag.dtype[0]), tag.dtype[1])
+ line = "* %i %s (%s) %s" % (
+ tag.code, tag.name, typecode, tag.as_str())
+ s.append(line[:PRINT_LINE_LEN].lstrip())
+ return '\n'.join(s)
+
+
+class FileHandle(object):
+ """Binary file handle.
+
+ * Handle embedded files (for CZI within CZI files).
+ * Allow to re-open closed files (for multi file formats such as OME-TIFF).
+ * Read numpy arrays and records from file like objects.
+
+ Only binary read, seek, tell, and close are supported on embedded files.
+ When initialized from another file handle, do not use it unless this
+ FileHandle is closed.
+
+ Attributes
+ ----------
+ name : str
+ Name of the file.
+ path : str
+ Absolute path to file.
+ size : int
+ Size of file in bytes.
+ is_file : bool
+ If True, file has a filno and can be memory mapped.
+
+ All attributes are read-only.
+
+ """
+ __slots__ = ('_fh', '_arg', '_mode', '_name', '_dir',
+ '_offset', '_size', '_close', 'is_file')
+
+ def __init__(self, arg, mode='rb', name=None, offset=None, size=None):
+ """Initialize file handle from file name or another file handle.
+
+ Parameters
+ ----------
+ arg : str, File, or FileHandle
+ File name or open file handle.
+ mode : str
+ File open mode in case 'arg' is a file name.
+ name : str
+ Optional name of file in case 'arg' is a file handle.
+ offset : int
+ Optional start position of embedded file. By default this is
+ the current file position.
+ size : int
+ Optional size of embedded file. By default this is the number
+ of bytes from the 'offset' to the end of the file.
+
+ """
+ self._fh = None
+ self._arg = arg
+ self._mode = mode
+ self._name = name
+ self._dir = ''
+ self._offset = offset
+ self._size = size
+ self._close = True
+ self.is_file = False
+ self.open()
+
+ def open(self):
+ """Open or re-open file."""
+ if self._fh:
+ return # file is open
+
+ if isinstance(self._arg, basestring):
+ # file name
+ self._arg = os.path.abspath(self._arg)
+ self._dir, self._name = os.path.split(self._arg)
+ self._fh = open(self._arg, self._mode)
+ self._close = True
+ if self._offset is None:
+ self._offset = 0
+ elif isinstance(self._arg, FileHandle):
+ # FileHandle
+ self._fh = self._arg._fh
+ if self._offset is None:
+ self._offset = 0
+ self._offset += self._arg._offset
+ self._close = False
+ if not self._name:
+ if self._offset:
+ name, ext = os.path.splitext(self._arg._name)
+ self._name = "%s@%i%s" % (name, self._offset, ext)
+ else:
+ self._name = self._arg._name
+ self._dir = self._arg._dir
+ else:
+ # open file object
+ self._fh = self._arg
+ if self._offset is None:
+ self._offset = self._arg.tell()
+ self._close = False
+ if not self._name:
+ try:
+ self._dir, self._name = os.path.split(self._fh.name)
+ except AttributeError:
+ self._name = "Unnamed stream"
+
+ if self._offset:
+ self._fh.seek(self._offset)
+
+ if self._size is None:
+ pos = self._fh.tell()
+ self._fh.seek(self._offset, 2)
+ self._size = self._fh.tell()
+ self._fh.seek(pos)
+
+ try:
+ self._fh.fileno()
+ self.is_file = True
+ except Exception:
+ self.is_file = False
+
+ def read(self, size=-1):
+ """Read 'size' bytes from file, or until EOF is reached."""
+ if size < 0 and self._offset:
+ size = self._size
+ return self._fh.read(size)
+
+ def memmap_array(self, dtype, shape, offset=0, mode='r', order='C'):
+ """Return numpy.memmap of data stored in file."""
+ if not self.is_file:
+ raise ValueError("Can not memory map file without fileno.")
+ return numpy.memmap(self._fh, dtype=dtype, mode=mode,
+ offset=self._offset + offset,
+ shape=shape, order=order)
+
+ def read_array(self, dtype, count=-1, sep=""):
+ """Return numpy array from file.
+
+ Work around numpy issue #2230, "numpy.fromfile does not accept
+ StringIO object" https://github.com/numpy/numpy/issues/2230.
+
+ """
+ try:
+ return numpy.fromfile(self._fh, dtype, count, sep)
+ except IOError:
+ if count < 0:
+ size = self._size
+ else:
+ size = count * numpy.dtype(dtype).itemsize
+ data = self._fh.read(size)
+ return numpy.fromstring(data, dtype, count, sep)
+
+ def read_record(self, dtype, shape=1, byteorder=None):
+ """Return numpy record from file."""
+ try:
+ rec = numpy.rec.fromfile(self._fh, dtype, shape,
+ byteorder=byteorder)
+ except Exception:
+ dtype = numpy.dtype(dtype)
+ if shape is None:
+ shape = self._size // dtype.itemsize
+ size = product(sequence(shape)) * dtype.itemsize
+ data = self._fh.read(size)
+ return numpy.rec.fromstring(data, dtype, shape,
+ byteorder=byteorder)
+ return rec[0] if shape == 1 else rec
+
+ def tell(self):
+ """Return file's current position."""
+ return self._fh.tell() - self._offset
+
+ def seek(self, offset, whence=0):
+ """Set file's current position."""
+ if self._offset:
+ if whence == 0:
+ self._fh.seek(self._offset + offset, whence)
+ return
+ elif whence == 2:
+ self._fh.seek(self._offset + self._size + offset, 0)
+ return
+ self._fh.seek(offset, whence)
+
+ def close(self):
+ """Close file."""
+ if self._close and self._fh:
+ self._fh.close()
+ self._fh = None
+ self.is_file = False
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.close()
+
+ def __getattr__(self, name):
+ """Return attribute from underlying file object."""
+ if self._offset:
+ warnings.warn(
+ "FileHandle: '%s' not implemented for embedded files" % name)
+ return getattr(self._fh, name)
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def dirname(self):
+ return self._dir
+
+ @property
+ def path(self):
+ return os.path.join(self._dir, self._name)
+
+ @property
+ def size(self):
+ return self._size
+
+ @property
+ def closed(self):
+ return self._fh is None
+
+
+def read_bytes(fh, byteorder, dtype, count):
+ """Read tag data from file and return as byte string."""
+ dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
+ return fh.read_array(dtype, count).tostring()
+
+
+def read_numpy(fh, byteorder, dtype, count):
+ """Read tag data from file and return as numpy array."""
+ dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
+ return fh.read_array(dtype, count)
+
+
+def read_json(fh, byteorder, dtype, count):
+ """Read JSON tag data from file and return as object."""
+ data = fh.read(count)
+ try:
+ return json.loads(unicode(stripnull(data), 'utf-8'))
+ except ValueError:
+ warnings.warn("invalid JSON `%s`" % data)
+
+
+def read_mm_header(fh, byteorder, dtype, count):
+ """Read MM_HEADER tag from file and return as numpy.rec.array."""
+ return fh.read_record(MM_HEADER, byteorder=byteorder)
+
+
+def read_mm_stamp(fh, byteorder, dtype, count):
+ """Read MM_STAMP tag from file and return as numpy.array."""
+ return fh.read_array(byteorder+'f8', 8)
+
+
+def read_uic1tag(fh, byteorder, dtype, count, plane_count=None):
+ """Read MetaMorph STK UIC1Tag from file and return as dictionary.
+
+ Return empty dictionary if plane_count is unknown.
+
+ """
+ assert dtype in ('2I', '1I') and byteorder == '<'
+ result = {}
+ if dtype == '2I':
+ # pre MetaMorph 2.5 (not tested)
+ values = fh.read_array('<u4', 2*count).reshape(count, 2)
+ result = {'z_distance': values[:, 0] / values[:, 1]}
+ elif plane_count:
+ for i in range(count):
+ tagid = struct.unpack('<I', fh.read(4))[0]
+ if tagid in (28, 29, 37, 40, 41):
+ # silently skip unexpected tags
+ fh.read(4)
+ continue
+ name, value = read_uic_tag(fh, tagid, plane_count, offset=True)
+ result[name] = value
+ return result
+
+
+def read_uic2tag(fh, byteorder, dtype, plane_count):
+ """Read MetaMorph STK UIC2Tag from file and return as dictionary."""
+ assert dtype == '2I' and byteorder == '<'
+ values = fh.read_array('<u4', 6*plane_count).reshape(plane_count, 6)
+ return {
+ 'z_distance': values[:, 0] / values[:, 1],
+ 'date_created': values[:, 2], # julian days
+ 'time_created': values[:, 3], # milliseconds
+ 'date_modified': values[:, 4], # julian days
+ 'time_modified': values[:, 5], # milliseconds
+ }
+
+
+def read_uic3tag(fh, byteorder, dtype, plane_count):
+ """Read MetaMorph STK UIC3Tag from file and return as dictionary."""
+ assert dtype == '2I' and byteorder == '<'
+ values = fh.read_array('<u4', 2*plane_count).reshape(plane_count, 2)
+ return {'wavelengths': values[:, 0] / values[:, 1]}
+
+
+def read_uic4tag(fh, byteorder, dtype, plane_count):
+ """Read MetaMorph STK UIC4Tag from file and return as dictionary."""
+ assert dtype == '1I' and byteorder == '<'
+ result = {}
+ while True:
+ tagid = struct.unpack('<H', fh.read(2))[0]
+ if tagid == 0:
+ break
+ name, value = read_uic_tag(fh, tagid, plane_count, offset=False)
+ result[name] = value
+ return result
+
+
+def read_uic_tag(fh, tagid, plane_count, offset):
+ """Read a single UIC tag value from file and return tag name and value.
+
+ UIC1Tags use an offset.
+
+ """
+ def read_int(count=1):
+ value = struct.unpack('<%iI' % count, fh.read(4*count))
+ return value[0] if count == 1 else value
+
+ try:
+ name, dtype = UIC_TAGS[tagid]
+ except KeyError:
+ # unknown tag
+ return '_tagid_%i' % tagid, read_int()
+
+ if offset:
+ pos = fh.tell()
+ if dtype not in (int, None):
+ off = read_int()
+ if off < 8:
+ warnings.warn("invalid offset for uic tag '%s': %i"
+ % (name, off))
+ return name, off
+ fh.seek(off)
+
+ if dtype is None:
+ # skip
+ name = '_' + name
+ value = read_int()
+ elif dtype is int:
+ # int
+ value = read_int()
+ elif dtype is Fraction:
+ # fraction
+ value = read_int(2)
+ value = value[0] / value[1]
+ elif dtype is julian_datetime:
+ # datetime
+ value = julian_datetime(*read_int(2))
+ elif dtype is read_uic_image_property:
+ # ImagePropertyEx
+ value = read_uic_image_property(fh)
+ elif dtype is str:
+ # pascal string
+ size = read_int()
+ if 0 <= size < 2**10:
+ value = struct.unpack('%is' % size, fh.read(size))[0][:-1]
+ value = stripnull(value)
+ elif offset:
+ value = ''
+ warnings.warn("corrupt string in uic tag '%s'" % name)
+ else:
+ raise ValueError("invalid string size %i" % size)
+ elif dtype == '%ip':
+ # sequence of pascal strings
+ value = []
+ for i in range(plane_count):
+ size = read_int()
+ if 0 <= size < 2**10:
+ string = struct.unpack('%is' % size, fh.read(size))[0][:-1]
+ string = stripnull(string)
+ value.append(string)
+ elif offset:
+ warnings.warn("corrupt string in uic tag '%s'" % name)
+ else:
+ raise ValueError("invalid string size %i" % size)
+ else:
+ # struct or numpy type
+ dtype = '<' + dtype
+ if '%i' in dtype:
+ dtype = dtype % plane_count
+ if '(' in dtype:
+ # numpy type
+ value = fh.read_array(dtype, 1)[0]
+ if value.shape[-1] == 2:
+ # assume fractions
+ value = value[..., 0] / value[..., 1]
+ else:
+ # struct format
+ value = struct.unpack(dtype, fh.read(struct.calcsize(dtype)))
+ if len(value) == 1:
+ value = value[0]
+
+ if offset:
+ fh.seek(pos + 4)
+
+ return name, value
+
+
+def read_uic_image_property(fh):
+ """Read UIC ImagePropertyEx tag from file and return as dict."""
+ # TODO: test this
+ size = struct.unpack('B', fh.read(1))[0]
+ name = struct.unpack('%is' % size, fh.read(size))[0][:-1]
+ flags, prop = struct.unpack('<IB', fh.read(5))
+ if prop == 1:
+ value = struct.unpack('II', fh.read(8))
+ value = value[0] / value[1]
+ else:
+ size = struct.unpack('B', fh.read(1))[0]
+ value = struct.unpack('%is' % size, fh.read(size))[0]
+ return dict(name=name, flags=flags, value=value)
+
+
+def read_cz_lsm_info(fh, byteorder, dtype, count):
+ """Read CS_LSM_INFO tag from file and return as numpy.rec.array."""
+ assert byteorder == '<'
+ magic_number, structure_size = struct.unpack('<II', fh.read(8))
+ if magic_number not in (50350412, 67127628):
+ raise ValueError("not a valid CS_LSM_INFO structure")
+ fh.seek(-8, 1)
+
+ if structure_size < numpy.dtype(CZ_LSM_INFO).itemsize:
+ # adjust structure according to structure_size
+ cz_lsm_info = []
+ size = 0
+ for name, dtype in CZ_LSM_INFO:
+ size += numpy.dtype(dtype).itemsize
+ if size > structure_size:
+ break
+ cz_lsm_info.append((name, dtype))
+ else:
+ cz_lsm_info = CZ_LSM_INFO
+
+ return fh.read_record(cz_lsm_info, byteorder=byteorder)
+
+
+def read_cz_lsm_floatpairs(fh):
+ """Read LSM sequence of float pairs from file and return as list."""
+ size = struct.unpack('<i', fh.read(4))[0]
+ return fh.read_array('<2f8', count=size)
+
+
+def read_cz_lsm_positions(fh):
+ """Read LSM positions from file and return as list."""
+ size = struct.unpack('<I', fh.read(4))[0]
+ return fh.read_array('<2f8', count=size)
+
+
+def read_cz_lsm_time_stamps(fh):
+ """Read LSM time stamps from file and return as list."""
+ size, count = struct.unpack('<ii', fh.read(8))
+ if size != (8 + 8 * count):
+ raise ValueError("lsm_time_stamps block is too short")
+ # return struct.unpack('<%dd' % count, fh.read(8*count))
+ return fh.read_array('<f8', count=count)
+
+
+def read_cz_lsm_event_list(fh):
+ """Read LSM events from file and return as list of (time, type, text)."""
+ count = struct.unpack('<II', fh.read(8))[1]
+ events = []
+ while count > 0:
+ esize, etime, etype = struct.unpack('<IdI', fh.read(16))
+ etext = stripnull(fh.read(esize - 16))
+ events.append((etime, etype, etext))
+ count -= 1
+ return events
+
+
+def read_cz_lsm_scan_info(fh):
+ """Read LSM scan information from file and return as Record."""
+ block = Record()
+ blocks = [block]
+ unpack = struct.unpack
+ if 0x10000000 != struct.unpack('<I', fh.read(4))[0]:
+ # not a Recording sub block
+ raise ValueError("not a lsm_scan_info structure")
+ fh.read(8)
+ while True:
+ entry, dtype, size = unpack('<III', fh.read(12))
+ if dtype == 2:
+ # ascii
+ value = stripnull(fh.read(size))
+ elif dtype == 4:
+ # long
+ value = unpack('<i', fh.read(4))[0]
+ elif dtype == 5:
+ # rational
+ value = unpack('<d', fh.read(8))[0]
+ else:
+ value = 0
+ if entry in CZ_LSM_SCAN_INFO_ARRAYS:
+ blocks.append(block)
+ name = CZ_LSM_SCAN_INFO_ARRAYS[entry]
+ newobj = []
+ setattr(block, name, newobj)
+ block = newobj
+ elif entry in CZ_LSM_SCAN_INFO_STRUCTS:
+ blocks.append(block)
+ newobj = Record()
+ block.append(newobj)
+ block = newobj
+ elif entry in CZ_LSM_SCAN_INFO_ATTRIBUTES:
+ name = CZ_LSM_SCAN_INFO_ATTRIBUTES[entry]
+ setattr(block, name, value)
+ elif entry == 0xffffffff:
+ # end sub block
+ block = blocks.pop()
+ else:
+ # unknown entry
+ setattr(block, "entry_0x%x" % entry, value)
+ if not blocks:
+ break
+ return block
+
+
+def read_nih_image_header(fh, byteorder, dtype, count):
+ """Read NIH_IMAGE_HEADER tag from file and return as numpy.rec.array."""
+ a = fh.read_record(NIH_IMAGE_HEADER, byteorder=byteorder)
+ a = a.newbyteorder(byteorder)
+ a.xunit = a.xunit[:a._xunit_len]
+ a.um = a.um[:a._um_len]
+ return a
+
+
+def read_micromanager_metadata(fh):
+ """Read MicroManager non-TIFF settings from open file and return as dict.
+
+ The settings can be used to read image data without parsing the TIFF file.
+
+ Raise ValueError if file does not contain valid MicroManager metadata.
+
+ """
+ fh.seek(0)
+ try:
+ byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)]
+ except IndexError:
+ raise ValueError("not a MicroManager TIFF file")
+
+ results = {}
+ fh.seek(8)
+ (index_header, index_offset, display_header, display_offset,
+ comments_header, comments_offset, summary_header, summary_length
+ ) = struct.unpack(byteorder + "IIIIIIII", fh.read(32))
+
+ if summary_header != 2355492:
+ raise ValueError("invalid MicroManager summary_header")
+ results['summary'] = read_json(fh, byteorder, None, summary_length)
+
+ if index_header != 54773648:
+ raise ValueError("invalid MicroManager index_header")
+ fh.seek(index_offset)
+ header, count = struct.unpack(byteorder + "II", fh.read(8))
+ if header != 3453623:
+ raise ValueError("invalid MicroManager index_header")
+ data = struct.unpack(byteorder + "IIIII"*count, fh.read(20*count))
+ results['index_map'] = {
+ 'channel': data[::5], 'slice': data[1::5], 'frame': data[2::5],
+ 'position': data[3::5], 'offset': data[4::5]}
+
+ if display_header != 483765892:
+ raise ValueError("invalid MicroManager display_header")
+ fh.seek(display_offset)
+ header, count = struct.unpack(byteorder + "II", fh.read(8))
+ if header != 347834724:
+ raise ValueError("invalid MicroManager display_header")
+ results['display_settings'] = read_json(fh, byteorder, None, count)
+
+ if comments_header != 99384722:
+ raise ValueError("invalid MicroManager comments_header")
+ fh.seek(comments_offset)
+ header, count = struct.unpack(byteorder + "II", fh.read(8))
+ if header != 84720485:
+ raise ValueError("invalid MicroManager comments_header")
+ results['comments'] = read_json(fh, byteorder, None, count)
+
+ return results
+
+
+def imagej_metadata(data, bytecounts, byteorder):
+ """Return dict from ImageJ metadata tag value."""
+ _str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
+
+ def read_string(data, byteorder):
+ return _str(stripnull(data[0 if byteorder == '<' else 1::2]))
+
+ def read_double(data, byteorder):
+ return struct.unpack(byteorder+('d' * (len(data) // 8)), data)
+
+ def read_bytes(data, byteorder):
+ #return struct.unpack('b' * len(data), data)
+ return numpy.fromstring(data, 'uint8')
+
+ metadata_types = { # big endian
+ b'info': ('info', read_string),
+ b'labl': ('labels', read_string),
+ b'rang': ('ranges', read_double),
+ b'luts': ('luts', read_bytes),
+ b'roi ': ('roi', read_bytes),
+ b'over': ('overlays', read_bytes)}
+ metadata_types.update( # little endian
+ dict((k[::-1], v) for k, v in metadata_types.items()))
+
+ if not bytecounts:
+ raise ValueError("no ImageJ metadata")
+
+ if not data[:4] in (b'IJIJ', b'JIJI'):
+ raise ValueError("invalid ImageJ metadata")
+
+ header_size = bytecounts[0]
+ if header_size < 12 or header_size > 804:
+ raise ValueError("invalid ImageJ metadata header size")
+
+ ntypes = (header_size - 4) // 8
+ header = struct.unpack(byteorder+'4sI'*ntypes, data[4:4+ntypes*8])
+ pos = 4 + ntypes * 8
+ counter = 0
+ result = {}
+ for mtype, count in zip(header[::2], header[1::2]):
+ values = []
+ name, func = metadata_types.get(mtype, (_str(mtype), read_bytes))
+ for _ in range(count):
+ counter += 1
+ pos1 = pos + bytecounts[counter]
+ values.append(func(data[pos:pos1], byteorder))
+ pos = pos1
+ result[name.strip()] = values[0] if count == 1 else values
+ return result
+
+
+def imagej_description(description):
+ """Return dict from ImageJ image_description tag."""
+ def _bool(val):
+ return {b'true': True, b'false': False}[val.lower()]
+
+ _str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
+ result = {}
+ for line in description.splitlines():
+ try:
+ key, val = line.split(b'=')
+ except Exception:
+ continue
+ key = key.strip()
+ val = val.strip()
+ for dtype in (int, float, _bool, _str):
+ try:
+ val = dtype(val)
+ break
+ except Exception:
+ pass
+ result[_str(key)] = val
+ return result
+
+
+def decodejpg(encoded, tables=b'', photometric=None,
+ ycbcr_subsampling=None, ycbcr_positioning=None):
+ """Decode JPEG encoded byte string (using _czifile extension module)."""
+ import _czifile
+ image = _czifile.decodejpg(encoded, tables)
+ if photometric == 'rgb' and ycbcr_subsampling and ycbcr_positioning:
+ # TODO: convert YCbCr to RGB
+ pass
+ return image.tostring()
+
+
+def decodepackbits(encoded):
+ """Decompress PackBits encoded byte string.
+
+ PackBits is a simple byte-oriented run-length compression scheme.
+
+ """
+ func = ord if sys.version[0] == '2' else lambda x: x
+ result = []
+ result_extend = result.extend
+ i = 0
+ try:
+ while True:
+ n = func(encoded[i]) + 1
+ i += 1
+ if n < 129:
+ result_extend(encoded[i:i+n])
+ i += n
+ elif n > 129:
+ result_extend(encoded[i:i+1] * (258-n))
+ i += 1
+ except IndexError:
+ pass
+ return b''.join(result) if sys.version[0] == '2' else bytes(result)
+
+
+def decodelzw(encoded):
+ """Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string).
+
+ The strip must begin with a CLEAR code and end with an EOI code.
+
+ This is an implementation of the LZW decoding algorithm described in (1).
+ It is not compatible with old style LZW compressed files like quad-lzw.tif.
+
+ """
+ len_encoded = len(encoded)
+ bitcount_max = len_encoded * 8
+ unpack = struct.unpack
+
+ if sys.version[0] == '2':
+ newtable = [chr(i) for i in range(256)]
+ else:
+ newtable = [bytes([i]) for i in range(256)]
+ newtable.extend((0, 0))
+
+ def next_code():
+ """Return integer of `bitw` bits at `bitcount` position in encoded."""
+ start = bitcount // 8
+ s = encoded[start:start+4]
+ try:
+ code = unpack('>I', s)[0]
+ except Exception:
+ code = unpack('>I', s + b'\x00'*(4-len(s)))[0]
+ code <<= bitcount % 8
+ code &= mask
+ return code >> shr
+
+ switchbitch = { # code: bit-width, shr-bits, bit-mask
+ 255: (9, 23, int(9*'1'+'0'*23, 2)),
+ 511: (10, 22, int(10*'1'+'0'*22, 2)),
+ 1023: (11, 21, int(11*'1'+'0'*21, 2)),
+ 2047: (12, 20, int(12*'1'+'0'*20, 2)), }
+ bitw, shr, mask = switchbitch[255]
+ bitcount = 0
+
+ if len_encoded < 4:
+ raise ValueError("strip must be at least 4 characters long")
+
+ if next_code() != 256:
+ raise ValueError("strip must begin with CLEAR code")
+
+ code = 0
+ oldcode = 0
+ result = []
+ result_append = result.append
+ while True:
+ code = next_code() # ~5% faster when inlining this function
+ bitcount += bitw
+ if code == 257 or bitcount >= bitcount_max: # EOI
+ break
+ if code == 256: # CLEAR
+ table = newtable[:]
+ table_append = table.append
+ lentable = 258
+ bitw, shr, mask = switchbitch[255]
+ code = next_code()
+ bitcount += bitw
+ if code == 257: # EOI
+ break
+ result_append(table[code])
+ else:
+ if code < lentable:
+ decoded = table[code]
+ newcode = table[oldcode] + decoded[:1]
+ else:
+ newcode = table[oldcode]
+ newcode += newcode[:1]
+ decoded = newcode
+ result_append(decoded)
+ table_append(newcode)
+ lentable += 1
+ oldcode = code
+ if lentable in switchbitch:
+ bitw, shr, mask = switchbitch[lentable]
+
+ if code != 257:
+ warnings.warn("unexpected end of lzw stream (code %i)" % code)
+
+ return b''.join(result)
+
+
+def unpackints(data, dtype, itemsize, runlen=0):
+ """Decompress byte string to array of integers of any bit size <= 32.
+
+ Parameters
+ ----------
+ data : byte str
+ Data to decompress.
+ dtype : numpy.dtype or str
+ A numpy boolean or integer type.
+ itemsize : int
+ Number of bits per integer.
+ runlen : int
+ Number of consecutive integers, after which to start at next byte.
+
+ """
+ if itemsize == 1: # bitarray
+ data = numpy.fromstring(data, '|B')
+ data = numpy.unpackbits(data)
+ if runlen % 8:
+ data = data.reshape(-1, runlen + (8 - runlen % 8))
+ data = data[:, :runlen].reshape(-1)
+ return data.astype(dtype)
+
+ dtype = numpy.dtype(dtype)
+ if itemsize in (8, 16, 32, 64):
+ return numpy.fromstring(data, dtype)
+ if itemsize < 1 or itemsize > 32:
+ raise ValueError("itemsize out of range: %i" % itemsize)
+ if dtype.kind not in "biu":
+ raise ValueError("invalid dtype")
+
+ itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize)
+ if itembytes != dtype.itemsize:
+ raise ValueError("dtype.itemsize too small")
+ if runlen == 0:
+ runlen = len(data) // itembytes
+ skipbits = runlen*itemsize % 8
+ if skipbits:
+ skipbits = 8 - skipbits
+ shrbits = itembytes*8 - itemsize
+ bitmask = int(itemsize*'1'+'0'*shrbits, 2)
+ dtypestr = '>' + dtype.char # dtype always big endian?
+
+ unpack = struct.unpack
+ l = runlen * (len(data)*8 // (runlen*itemsize + skipbits))
+ result = numpy.empty((l, ), dtype)
+ bitcount = 0
+ for i in range(len(result)):
+ start = bitcount // 8
+ s = data[start:start+itembytes]
+ try:
+ code = unpack(dtypestr, s)[0]
+ except Exception:
+ code = unpack(dtypestr, s + b'\x00'*(itembytes-len(s)))[0]
+ code <<= bitcount % 8
+ code &= bitmask
+ result[i] = code >> shrbits
+ bitcount += itemsize
+ if (i+1) % runlen == 0:
+ bitcount += skipbits
+ return result
+
+
+def unpackrgb(data, dtype='<B', bitspersample=(5, 6, 5), rescale=True):
+ """Return array from byte string containing packed samples.
+
+ Use to unpack RGB565 or RGB555 to RGB888 format.
+
+ Parameters
+ ----------
+ data : byte str
+ The data to be decoded. Samples in each pixel are stored consecutively.
+ Pixels are aligned to 8, 16, or 32 bit boundaries.
+ dtype : numpy.dtype
+ The sample data type. The byteorder applies also to the data stream.
+ bitspersample : tuple
+ Number of bits for each sample in a pixel.
+ rescale : bool
+ Upscale samples to the number of bits in dtype.
+
+ Returns
+ -------
+ result : ndarray
+ Flattened array of unpacked samples of native dtype.
+
+ Examples
+ --------
+ >>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff)
+ >>> print(unpackrgb(data, '<B', (5, 6, 5), False))
+ [ 1 1 1 31 63 31]
+ >>> print(unpackrgb(data, '<B', (5, 6, 5)))
+ [ 8 4 8 255 255 255]
+ >>> print(unpackrgb(data, '<B', (5, 5, 5)))
+ [ 16 8 8 255 255 255]
+
+ """
+ dtype = numpy.dtype(dtype)
+ bits = int(numpy.sum(bitspersample))
+ if not (bits <= 32 and all(i <= dtype.itemsize*8 for i in bitspersample)):
+ raise ValueError("sample size not supported %s" % str(bitspersample))
+ dt = next(i for i in 'BHI' if numpy.dtype(i).itemsize*8 >= bits)
+ data = numpy.fromstring(data, dtype.byteorder+dt)
+ result = numpy.empty((data.size, len(bitspersample)), dtype.char)
+ for i, bps in enumerate(bitspersample):
+ t = data >> int(numpy.sum(bitspersample[i+1:]))
+ t &= int('0b'+'1'*bps, 2)
+ if rescale:
+ o = ((dtype.itemsize * 8) // bps + 1) * bps
+ if o > data.dtype.itemsize * 8:
+ t = t.astype('I')
+ t *= (2**o - 1) // (2**bps - 1)
+ t //= 2**(o - (dtype.itemsize * 8))
+ result[:, i] = t
+ return result.reshape(-1)
+
+
+def reorient(image, orientation):
+ """Return reoriented view of image array.
+
+ Parameters
+ ----------
+ image : numpy array
+ Non-squeezed output of asarray() functions.
+ Axes -3 and -2 must be image length and width respectively.
+ orientation : int or str
+ One of TIFF_ORIENTATIONS keys or values.
+
+ """
+ o = TIFF_ORIENTATIONS.get(orientation, orientation)
+ if o == 'top_left':
+ return image
+ elif o == 'top_right':
+ return image[..., ::-1, :]
+ elif o == 'bottom_left':
+ return image[..., ::-1, :, :]
+ elif o == 'bottom_right':
+ return image[..., ::-1, ::-1, :]
+ elif o == 'left_top':
+ return numpy.swapaxes(image, -3, -2)
+ elif o == 'right_top':
+ return numpy.swapaxes(image, -3, -2)[..., ::-1, :]
+ elif o == 'left_bottom':
+ return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :]
+ elif o == 'right_bottom':
+ return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :]
+
+
+def squeeze_axes(shape, axes, skip='XY'):
+ """Return shape and axes with single-dimensional entries removed.
+
+ Remove unused dimensions unless their axes are listed in 'skip'.
+
+ >>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC')
+ ((5, 2, 1), 'TYX')
+
+ """
+ if len(shape) != len(axes):
+ raise ValueError("dimensions of axes and shape don't match")
+ shape, axes = zip(*(i for i in zip(shape, axes)
+ if i[0] > 1 or i[1] in skip))
+ return shape, ''.join(axes)
+
+
+def transpose_axes(data, axes, asaxes='CTZYX'):
+ """Return data with its axes permuted to match specified axes.
+
+ A view is returned if possible.
+
+ >>> transpose_axes(numpy.zeros((2, 3, 4, 5)), 'TYXC', asaxes='CTZYX').shape
+ (5, 2, 1, 3, 4)
+
+ """
+ for ax in axes:
+ if ax not in asaxes:
+ raise ValueError("unknown axis %s" % ax)
+ # add missing axes to data
+ shape = data.shape
+ for ax in reversed(asaxes):
+ if ax not in axes:
+ axes = ax + axes
+ shape = (1,) + shape
+ data = data.reshape(shape)
+ # transpose axes
+ data = data.transpose([axes.index(ax) for ax in asaxes])
+ return data
+
+
+def stack_pages(pages, memmap=False, *args, **kwargs):
+ """Read data from sequence of TiffPage and stack them vertically.
+
+ If memmap is True, return an array stored in a binary file on disk.
+ Additional parameters are passsed to the page asarray function.
+
+ """
+ if len(pages) == 0:
+ raise ValueError("no pages")
+
+ if len(pages) == 1:
+ return pages[0].asarray(memmap=memmap, *args, **kwargs)
+
+ result = pages[0].asarray(*args, **kwargs)
+ shape = (len(pages),) + result.shape
+ if memmap:
+ with tempfile.NamedTemporaryFile() as fh:
+ result = numpy.memmap(fh, dtype=result.dtype, shape=shape)
+ else:
+ result = numpy.empty(shape, dtype=result.dtype)
+
+ for i, page in enumerate(pages):
+ result[i] = page.asarray(*args, **kwargs)
+
+ return result
+
+
+def stripnull(string):
+ """Return string truncated at first null character.
+
+ Clean NULL terminated C strings.
+
+ >>> stripnull(b'string\\x00') # doctest: +SKIP
+ b'string'
+
+ """
+ i = string.find(b'\x00')
+ return string if (i < 0) else string[:i]
+
+
+def stripascii(string):
+ """Return string truncated at last byte that is 7bit ASCII.
+
+ Clean NULL separated and terminated TIFF strings.
+
+ >>> stripascii(b'string\\x00string\\n\\x01\\x00') # doctest: +SKIP
+ b'string\\x00string\\n'
+ >>> stripascii(b'\\x00') # doctest: +SKIP
+ b''
+
+ """
+ # TODO: pythonize this
+ ord_ = ord if sys.version_info[0] < 3 else lambda x: x
+ i = len(string)
+ while i:
+ i -= 1
+ if 8 < ord_(string[i]) < 127:
+ break
+ else:
+ i = -1
+ return string[:i+1]
+
+
+def format_size(size):
+ """Return file size as string from byte size."""
+ for unit in ('B', 'KB', 'MB', 'GB', 'TB'):
+ if size < 2048:
+ return "%.f %s" % (size, unit)
+ size /= 1024.0
+
+
+def sequence(value):
+ """Return tuple containing value if value is not a sequence.
+
+ >>> sequence(1)
+ (1,)
+ >>> sequence([1])
+ [1]
+
+ """
+ try:
+ len(value)
+ return value
+ except TypeError:
+ return (value, )
+
+
+def product(iterable):
+ """Return product of sequence of numbers.
+
+ Equivalent of functools.reduce(operator.mul, iterable, 1).
+
+ >>> product([2**8, 2**30])
+ 274877906944
+ >>> product([])
+ 1
+
+ """
+ prod = 1
+ for i in iterable:
+ prod *= i
+ return prod
+
+
+def natural_sorted(iterable):
+ """Return human sorted list of strings.
+
+ E.g. for sorting file names.
+
+ >>> natural_sorted(['f1', 'f2', 'f10'])
+ ['f1', 'f2', 'f10']
+
+ """
+ def sortkey(x):
+ return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)]
+ numbers = re.compile(r'(\d+)')
+ return sorted(iterable, key=sortkey)
+
+
+def excel_datetime(timestamp, epoch=datetime.datetime.fromordinal(693594)):
+ """Return datetime object from timestamp in Excel serial format.
+
+ Convert LSM time stamps.
+
+ >>> excel_datetime(40237.029999999795)
+ datetime.datetime(2010, 2, 28, 0, 43, 11, 999982)
+
+ """
+ return epoch + datetime.timedelta(timestamp)
+
+
+def julian_datetime(julianday, milisecond=0):
+ """Return datetime from days since 1/1/4713 BC and ms since midnight.
+
+ Convert Julian dates according to MetaMorph.
+
+ >>> julian_datetime(2451576, 54362783)
+ datetime.datetime(2000, 2, 2, 15, 6, 2, 783)
+
+ """
+ if julianday <= 1721423:
+ # no datetime before year 1
+ return None
+
+ a = julianday + 1
+ if a > 2299160:
+ alpha = math.trunc((a - 1867216.25) / 36524.25)
+ a += 1 + alpha - alpha // 4
+ b = a + (1524 if a > 1721423 else 1158)
+ c = math.trunc((b - 122.1) / 365.25)
+ d = math.trunc(365.25 * c)
+ e = math.trunc((b - d) / 30.6001)
+
+ day = b - d - math.trunc(30.6001 * e)
+ month = e - (1 if e < 13.5 else 13)
+ year = c - (4716 if month > 2.5 else 4715)
+
+ hour, milisecond = divmod(milisecond, 1000 * 60 * 60)
+ minute, milisecond = divmod(milisecond, 1000 * 60)
+ second, milisecond = divmod(milisecond, 1000)
+
+ return datetime.datetime(year, month, day,
+ hour, minute, second, milisecond)
+
+
+def test_tifffile(directory='testimages', verbose=True):
+ """Read all images in directory.
+
+ Print error message on failure.
+
+ >>> test_tifffile(verbose=False)
+
+ """
+ successful = 0
+ failed = 0
+ start = time.time()
+ for f in glob.glob(os.path.join(directory, '*.*')):
+ if verbose:
+ print("\n%s>\n" % f.lower(), end='')
+ t0 = time.time()
+ try:
+ tif = TiffFile(f, multifile=True)
+ except Exception as e:
+ if not verbose:
+ print(f, end=' ')
+ print("ERROR:", e)
+ failed += 1
+ continue
+ try:
+ img = tif.asarray()
+ except ValueError:
+ try:
+ img = tif[0].asarray()
+ except Exception as e:
+ if not verbose:
+ print(f, end=' ')
+ print("ERROR:", e)
+ failed += 1
+ continue
+ finally:
+ tif.close()
+ successful += 1
+ if verbose:
+ print("%s, %s %s, %s, %.0f ms" % (
+ str(tif), str(img.shape), img.dtype, tif[0].compression,
+ (time.time()-t0) * 1e3))
+ if verbose:
+ print("\nSuccessfully read %i of %i files in %.3f s\n" % (
+ successful, successful+failed, time.time()-start))
+
+
+class TIFF_SUBFILE_TYPES(object):
+ def __getitem__(self, key):
+ result = []
+ if key & 1:
+ result.append('reduced_image')
+ if key & 2:
+ result.append('page')
+ if key & 4:
+ result.append('mask')
+ return tuple(result)
+
+
+TIFF_PHOTOMETRICS = {
+ 0: 'miniswhite',
+ 1: 'minisblack',
+ 2: 'rgb',
+ 3: 'palette',
+ 4: 'mask',
+ 5: 'separated', # CMYK
+ 6: 'ycbcr',
+ 8: 'cielab',
+ 9: 'icclab',
+ 10: 'itulab',
+ 32803: 'cfa', # Color Filter Array
+ 32844: 'logl',
+ 32845: 'logluv',
+ 34892: 'linear_raw'
+}
+
+TIFF_COMPESSIONS = {
+ 1: None,
+ 2: 'ccittrle',
+ 3: 'ccittfax3',
+ 4: 'ccittfax4',
+ 5: 'lzw',
+ 6: 'ojpeg',
+ 7: 'jpeg',
+ 8: 'adobe_deflate',
+ 9: 't85',
+ 10: 't43',
+ 32766: 'next',
+ 32771: 'ccittrlew',
+ 32773: 'packbits',
+ 32809: 'thunderscan',
+ 32895: 'it8ctpad',
+ 32896: 'it8lw',
+ 32897: 'it8mp',
+ 32898: 'it8bl',
+ 32908: 'pixarfilm',
+ 32909: 'pixarlog',
+ 32946: 'deflate',
+ 32947: 'dcs',
+ 34661: 'jbig',
+ 34676: 'sgilog',
+ 34677: 'sgilog24',
+ 34712: 'jp2000',
+ 34713: 'nef',
+}
+
+TIFF_DECOMPESSORS = {
+ None: lambda x: x,
+ 'adobe_deflate': zlib.decompress,
+ 'deflate': zlib.decompress,
+ 'packbits': decodepackbits,
+ 'lzw': decodelzw,
+ # 'jpeg': decodejpg
+}
+
+TIFF_DATA_TYPES = {
+ 1: '1B', # BYTE 8-bit unsigned integer.
+ 2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code;
+ # the last byte must be NULL (binary zero).
+ 3: '1H', # SHORT 16-bit (2-byte) unsigned integer
+ 4: '1I', # LONG 32-bit (4-byte) unsigned integer.
+ 5: '2I', # RATIONAL Two LONGs: the first represents the numerator of
+ # a fraction; the second, the denominator.
+ 6: '1b', # SBYTE An 8-bit signed (twos-complement) integer.
+ 7: '1s', # UNDEFINED An 8-bit byte that may contain anything,
+ # depending on the definition of the field.
+ 8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) integer.
+ 9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) integer.
+ 10: '2i', # SRATIONAL Two SLONGs: the first represents the numerator
+ # of a fraction, the second the denominator.
+ 11: '1f', # FLOAT Single precision (4-byte) IEEE format.
+ 12: '1d', # DOUBLE Double precision (8-byte) IEEE format.
+ 13: '1I', # IFD unsigned 4 byte IFD offset.
+ #14: '', # UNICODE
+ #15: '', # COMPLEX
+ 16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff)
+ 17: '1q', # SLONG8 signed 8 byte integer (BigTiff)
+ 18: '1Q', # IFD8 unsigned 8 byte IFD offset (BigTiff)
+}
+
+TIFF_SAMPLE_FORMATS = {
+ 1: 'uint',
+ 2: 'int',
+ 3: 'float',
+ #4: 'void',
+ #5: 'complex_int',
+ 6: 'complex',
+}
+
+TIFF_SAMPLE_DTYPES = {
+ ('uint', 1): '?', # bitmap
+ ('uint', 2): 'B',
+ ('uint', 3): 'B',
+ ('uint', 4): 'B',
+ ('uint', 5): 'B',
+ ('uint', 6): 'B',
+ ('uint', 7): 'B',
+ ('uint', 8): 'B',
+ ('uint', 9): 'H',
+ ('uint', 10): 'H',
+ ('uint', 11): 'H',
+ ('uint', 12): 'H',
+ ('uint', 13): 'H',
+ ('uint', 14): 'H',
+ ('uint', 15): 'H',
+ ('uint', 16): 'H',
+ ('uint', 17): 'I',
+ ('uint', 18): 'I',
+ ('uint', 19): 'I',
+ ('uint', 20): 'I',
+ ('uint', 21): 'I',
+ ('uint', 22): 'I',
+ ('uint', 23): 'I',
+ ('uint', 24): 'I',
+ ('uint', 25): 'I',
+ ('uint', 26): 'I',
+ ('uint', 27): 'I',
+ ('uint', 28): 'I',
+ ('uint', 29): 'I',
+ ('uint', 30): 'I',
+ ('uint', 31): 'I',
+ ('uint', 32): 'I',
+ ('uint', 64): 'Q',
+ ('int', 8): 'b',
+ ('int', 16): 'h',
+ ('int', 32): 'i',
+ ('int', 64): 'q',
+ ('float', 16): 'e',
+ ('float', 32): 'f',
+ ('float', 64): 'd',
+ ('complex', 64): 'F',
+ ('complex', 128): 'D',
+ ('uint', (5, 6, 5)): 'B',
+}
+
+TIFF_ORIENTATIONS = {
+ 1: 'top_left',
+ 2: 'top_right',
+ 3: 'bottom_right',
+ 4: 'bottom_left',
+ 5: 'left_top',
+ 6: 'right_top',
+ 7: 'right_bottom',
+ 8: 'left_bottom',
+}
+
+# TODO: is there a standard for character axes labels?
+AXES_LABELS = {
+ 'X': 'width',
+ 'Y': 'height',
+ 'Z': 'depth',
+ 'S': 'sample', # rgb(a)
+ 'I': 'series', # general sequence, plane, page, IFD
+ 'T': 'time',
+ 'C': 'channel', # color, emission wavelength
+ 'A': 'angle',
+ 'P': 'phase', # formerly F # P is Position in LSM!
+ 'R': 'tile', # region, point, mosaic
+ 'H': 'lifetime', # histogram
+ 'E': 'lambda', # excitation wavelength
+ 'L': 'exposure', # lux
+ 'V': 'event',
+ 'Q': 'other',
+ #'M': 'mosaic', # LSM 6
+}
+
+AXES_LABELS.update(dict((v, k) for k, v in AXES_LABELS.items()))
+
+# Map OME pixel types to numpy dtype
+OME_PIXEL_TYPES = {
+ 'int8': 'i1',
+ 'int16': 'i2',
+ 'int32': 'i4',
+ 'uint8': 'u1',
+ 'uint16': 'u2',
+ 'uint32': 'u4',
+ 'float': 'f4',
+ # 'bit': 'bit',
+ 'double': 'f8',
+ 'complex': 'c8',
+ 'double-complex': 'c16',
+}
+
+# NIH Image PicHeader v1.63
+NIH_IMAGE_HEADER = [
+ ('fileid', 'a8'),
+ ('nlines', 'i2'),
+ ('pixelsperline', 'i2'),
+ ('version', 'i2'),
+ ('oldlutmode', 'i2'),
+ ('oldncolors', 'i2'),
+ ('colors', 'u1', (3, 32)),
+ ('oldcolorstart', 'i2'),
+ ('colorwidth', 'i2'),
+ ('extracolors', 'u2', (6, 3)),
+ ('nextracolors', 'i2'),
+ ('foregroundindex', 'i2'),
+ ('backgroundindex', 'i2'),
+ ('xscale', 'f8'),
+ ('_x0', 'i2'),
+ ('_x1', 'i2'),
+ ('units_t', 'i2'), # NIH_UNITS_TYPE
+ ('p1', [('x', 'i2'), ('y', 'i2')]),
+ ('p2', [('x', 'i2'), ('y', 'i2')]),
+ ('curvefit_t', 'i2'), # NIH_CURVEFIT_TYPE
+ ('ncoefficients', 'i2'),
+ ('coeff', 'f8', 6),
+ ('_um_len', 'u1'),
+ ('um', 'a15'),
+ ('_x2', 'u1'),
+ ('binarypic', 'b1'),
+ ('slicestart', 'i2'),
+ ('sliceend', 'i2'),
+ ('scalemagnification', 'f4'),
+ ('nslices', 'i2'),
+ ('slicespacing', 'f4'),
+ ('currentslice', 'i2'),
+ ('frameinterval', 'f4'),
+ ('pixelaspectratio', 'f4'),
+ ('colorstart', 'i2'),
+ ('colorend', 'i2'),
+ ('ncolors', 'i2'),
+ ('fill1', '3u2'),
+ ('fill2', '3u2'),
+ ('colortable_t', 'u1'), # NIH_COLORTABLE_TYPE
+ ('lutmode_t', 'u1'), # NIH_LUTMODE_TYPE
+ ('invertedtable', 'b1'),
+ ('zeroclip', 'b1'),
+ ('_xunit_len', 'u1'),
+ ('xunit', 'a11'),
+ ('stacktype_t', 'i2'), # NIH_STACKTYPE_TYPE
+]
+
+NIH_COLORTABLE_TYPE = (
+ 'CustomTable', 'AppleDefault', 'Pseudo20', 'Pseudo32', 'Rainbow',
+ 'Fire1', 'Fire2', 'Ice', 'Grays', 'Spectrum')
+
+NIH_LUTMODE_TYPE = (
+ 'PseudoColor', 'OldAppleDefault', 'OldSpectrum', 'GrayScale',
+ 'ColorLut', 'CustomGrayscale')
+
+NIH_CURVEFIT_TYPE = (
+ 'StraightLine', 'Poly2', 'Poly3', 'Poly4', 'Poly5', 'ExpoFit',
+ 'PowerFit', 'LogFit', 'RodbardFit', 'SpareFit1', 'Uncalibrated',
+ 'UncalibratedOD')
+
+NIH_UNITS_TYPE = (
+ 'Nanometers', 'Micrometers', 'Millimeters', 'Centimeters', 'Meters',
+ 'Kilometers', 'Inches', 'Feet', 'Miles', 'Pixels', 'OtherUnits')
+
+NIH_STACKTYPE_TYPE = (
+ 'VolumeStack', 'RGBStack', 'MovieStack', 'HSVStack')
+
+# Map Universal Imaging Corporation MetaMorph internal tag ids to name and type
+UIC_TAGS = {
+ 0: ('auto_scale', int),
+ 1: ('min_scale', int),
+ 2: ('max_scale', int),
+ 3: ('spatial_calibration', int),
+ 4: ('x_calibration', Fraction),
+ 5: ('y_calibration', Fraction),
+ 6: ('calibration_units', str),
+ 7: ('name', str),
+ 8: ('thresh_state', int),
+ 9: ('thresh_state_red', int),
+ 10: ('tagid_10', None), # undefined
+ 11: ('thresh_state_green', int),
+ 12: ('thresh_state_blue', int),
+ 13: ('thresh_state_lo', int),
+ 14: ('thresh_state_hi', int),
+ 15: ('zoom', int),
+ 16: ('create_time', julian_datetime),
+ 17: ('last_saved_time', julian_datetime),
+ 18: ('current_buffer', int),
+ 19: ('gray_fit', None),
+ 20: ('gray_point_count', None),
+ 21: ('gray_x', Fraction),
+ 22: ('gray_y', Fraction),
+ 23: ('gray_min', Fraction),
+ 24: ('gray_max', Fraction),
+ 25: ('gray_unit_name', str),
+ 26: ('standard_lut', int),
+ 27: ('wavelength', int),
+ 28: ('stage_position', '(%i,2,2)u4'), # N xy positions as fractions
+ 29: ('camera_chip_offset', '(%i,2,2)u4'), # N xy offsets as fractions
+ 30: ('overlay_mask', None),
+ 31: ('overlay_compress', None),
+ 32: ('overlay', None),
+ 33: ('special_overlay_mask', None),
+ 34: ('special_overlay_compress', None),
+ 35: ('special_overlay', None),
+ 36: ('image_property', read_uic_image_property),
+ 37: ('stage_label', '%ip'), # N str
+ 38: ('autoscale_lo_info', Fraction),
+ 39: ('autoscale_hi_info', Fraction),
+ 40: ('absolute_z', '(%i,2)u4'), # N fractions
+ 41: ('absolute_z_valid', '(%i,)u4'), # N long
+ 42: ('gamma', int),
+ 43: ('gamma_red', int),
+ 44: ('gamma_green', int),
+ 45: ('gamma_blue', int),
+ 46: ('camera_bin', int),
+ 47: ('new_lut', int),
+ 48: ('image_property_ex', None),
+ 49: ('plane_property', int),
+ 50: ('user_lut_table', '(256,3)u1'),
+ 51: ('red_autoscale_info', int),
+ 52: ('red_autoscale_lo_info', Fraction),
+ 53: ('red_autoscale_hi_info', Fraction),
+ 54: ('red_minscale_info', int),
+ 55: ('red_maxscale_info', int),
+ 56: ('green_autoscale_info', int),
+ 57: ('green_autoscale_lo_info', Fraction),
+ 58: ('green_autoscale_hi_info', Fraction),
+ 59: ('green_minscale_info', int),
+ 60: ('green_maxscale_info', int),
+ 61: ('blue_autoscale_info', int),
+ 62: ('blue_autoscale_lo_info', Fraction),
+ 63: ('blue_autoscale_hi_info', Fraction),
+ 64: ('blue_min_scale_info', int),
+ 65: ('blue_max_scale_info', int),
+ #66: ('overlay_plane_color', read_uic_overlay_plane_color),
+}
+
+
+# Olympus FluoView
+MM_DIMENSION = [
+ ('name', 'a16'),
+ ('size', 'i4'),
+ ('origin', 'f8'),
+ ('resolution', 'f8'),
+ ('unit', 'a64'),
+]
+
+MM_HEADER = [
+ ('header_flag', 'i2'),
+ ('image_type', 'u1'),
+ ('image_name', 'a257'),
+ ('offset_data', 'u4'),
+ ('palette_size', 'i4'),
+ ('offset_palette0', 'u4'),
+ ('offset_palette1', 'u4'),
+ ('comment_size', 'i4'),
+ ('offset_comment', 'u4'),
+ ('dimensions', MM_DIMENSION, 10),
+ ('offset_position', 'u4'),
+ ('map_type', 'i2'),
+ ('map_min', 'f8'),
+ ('map_max', 'f8'),
+ ('min_value', 'f8'),
+ ('max_value', 'f8'),
+ ('offset_map', 'u4'),
+ ('gamma', 'f8'),
+ ('offset', 'f8'),
+ ('gray_channel', MM_DIMENSION),
+ ('offset_thumbnail', 'u4'),
+ ('voice_field', 'i4'),
+ ('offset_voice_field', 'u4'),
+]
+
+# Carl Zeiss LSM
+CZ_LSM_INFO = [
+ ('magic_number', 'u4'),
+ ('structure_size', 'i4'),
+ ('dimension_x', 'i4'),
+ ('dimension_y', 'i4'),
+ ('dimension_z', 'i4'),
+ ('dimension_channels', 'i4'),
+ ('dimension_time', 'i4'),
+ ('data_type', 'i4'), # CZ_DATA_TYPES
+ ('thumbnail_x', 'i4'),
+ ('thumbnail_y', 'i4'),
+ ('voxel_size_x', 'f8'),
+ ('voxel_size_y', 'f8'),
+ ('voxel_size_z', 'f8'),
+ ('origin_x', 'f8'),
+ ('origin_y', 'f8'),
+ ('origin_z', 'f8'),
+ ('scan_type', 'u2'),
+ ('spectral_scan', 'u2'),
+ ('type_of_data', 'u4'), # CZ_TYPE_OF_DATA
+ ('offset_vector_overlay', 'u4'),
+ ('offset_input_lut', 'u4'),
+ ('offset_output_lut', 'u4'),
+ ('offset_channel_colors', 'u4'),
+ ('time_interval', 'f8'),
+ ('offset_channel_data_types', 'u4'),
+ ('offset_scan_info', 'u4'), # CZ_LSM_SCAN_INFO
+ ('offset_ks_data', 'u4'),
+ ('offset_time_stamps', 'u4'),
+ ('offset_event_list', 'u4'),
+ ('offset_roi', 'u4'),
+ ('offset_bleach_roi', 'u4'),
+ ('offset_next_recording', 'u4'),
+ # LSM 2.0 ends here
+ ('display_aspect_x', 'f8'),
+ ('display_aspect_y', 'f8'),
+ ('display_aspect_z', 'f8'),
+ ('display_aspect_time', 'f8'),
+ ('offset_mean_of_roi_overlay', 'u4'),
+ ('offset_topo_isoline_overlay', 'u4'),
+ ('offset_topo_profile_overlay', 'u4'),
+ ('offset_linescan_overlay', 'u4'),
+ ('offset_toolbar_flags', 'u4'),
+ ('offset_channel_wavelength', 'u4'),
+ ('offset_channel_factors', 'u4'),
+ ('objective_sphere_correction', 'f8'),
+ ('offset_unmix_parameters', 'u4'),
+ # LSM 3.2, 4.0 end here
+ ('offset_acquisition_parameters', 'u4'),
+ ('offset_characteristics', 'u4'),
+ ('offset_palette', 'u4'),
+ ('time_difference_x', 'f8'),
+ ('time_difference_y', 'f8'),
+ ('time_difference_z', 'f8'),
+ ('internal_use_1', 'u4'),
+ ('dimension_p', 'i4'),
+ ('dimension_m', 'i4'),
+ ('dimensions_reserved', '16i4'),
+ ('offset_tile_positions', 'u4'),
+ ('reserved_1', '9u4'),
+ ('offset_positions', 'u4'),
+ ('reserved_2', '21u4'), # must be 0
+]
+
+# Import functions for LSM_INFO sub-records
+CZ_LSM_INFO_READERS = {
+ 'scan_info': read_cz_lsm_scan_info,
+ 'time_stamps': read_cz_lsm_time_stamps,
+ 'event_list': read_cz_lsm_event_list,
+ 'channel_colors': read_cz_lsm_floatpairs,
+ 'positions': read_cz_lsm_floatpairs,
+ 'tile_positions': read_cz_lsm_floatpairs,
+}
+
+# Map cz_lsm_info.scan_type to dimension order
+CZ_SCAN_TYPES = {
+ 0: 'XYZCT', # x-y-z scan
+ 1: 'XYZCT', # z scan (x-z plane)
+ 2: 'XYZCT', # line scan
+ 3: 'XYTCZ', # time series x-y
+ 4: 'XYZTC', # time series x-z
+ 5: 'XYTCZ', # time series 'Mean of ROIs'
+ 6: 'XYZTC', # time series x-y-z
+ 7: 'XYCTZ', # spline scan
+ 8: 'XYCZT', # spline scan x-z
+ 9: 'XYTCZ', # time series spline plane x-z
+ 10: 'XYZCT', # point mode
+}
+
+# Map dimension codes to cz_lsm_info attribute
+CZ_DIMENSIONS = {
+ 'X': 'dimension_x',
+ 'Y': 'dimension_y',
+ 'Z': 'dimension_z',
+ 'C': 'dimension_channels',
+ 'T': 'dimension_time',
+}
+
+# Description of cz_lsm_info.data_type
+CZ_DATA_TYPES = {
+ 0: 'varying data types',
+ 1: '8 bit unsigned integer',
+ 2: '12 bit unsigned integer',
+ 5: '32 bit float',
+}
+
+# Description of cz_lsm_info.type_of_data
+CZ_TYPE_OF_DATA = {
+ 0: 'Original scan data',
+ 1: 'Calculated data',
+ 2: '3D reconstruction',
+ 3: 'Topography height map',
+}
+
+CZ_LSM_SCAN_INFO_ARRAYS = {
+ 0x20000000: "tracks",
+ 0x30000000: "lasers",
+ 0x60000000: "detection_channels",
+ 0x80000000: "illumination_channels",
+ 0xa0000000: "beam_splitters",
+ 0xc0000000: "data_channels",
+ 0x11000000: "timers",
+ 0x13000000: "markers",
+}
+
+CZ_LSM_SCAN_INFO_STRUCTS = {
+ # 0x10000000: "recording",
+ 0x40000000: "track",
+ 0x50000000: "laser",
+ 0x70000000: "detection_channel",
+ 0x90000000: "illumination_channel",
+ 0xb0000000: "beam_splitter",
+ 0xd0000000: "data_channel",
+ 0x12000000: "timer",
+ 0x14000000: "marker",
+}
+
+CZ_LSM_SCAN_INFO_ATTRIBUTES = {
+ # recording
+ 0x10000001: "name",
+ 0x10000002: "description",
+ 0x10000003: "notes",
+ 0x10000004: "objective",
+ 0x10000005: "processing_summary",
+ 0x10000006: "special_scan_mode",
+ 0x10000007: "scan_type",
+ 0x10000008: "scan_mode",
+ 0x10000009: "number_of_stacks",
+ 0x1000000a: "lines_per_plane",
+ 0x1000000b: "samples_per_line",
+ 0x1000000c: "planes_per_volume",
+ 0x1000000d: "images_width",
+ 0x1000000e: "images_height",
+ 0x1000000f: "images_number_planes",
+ 0x10000010: "images_number_stacks",
+ 0x10000011: "images_number_channels",
+ 0x10000012: "linscan_xy_size",
+ 0x10000013: "scan_direction",
+ 0x10000014: "time_series",
+ 0x10000015: "original_scan_data",
+ 0x10000016: "zoom_x",
+ 0x10000017: "zoom_y",
+ 0x10000018: "zoom_z",
+ 0x10000019: "sample_0x",
+ 0x1000001a: "sample_0y",
+ 0x1000001b: "sample_0z",
+ 0x1000001c: "sample_spacing",
+ 0x1000001d: "line_spacing",
+ 0x1000001e: "plane_spacing",
+ 0x1000001f: "plane_width",
+ 0x10000020: "plane_height",
+ 0x10000021: "volume_depth",
+ 0x10000023: "nutation",
+ 0x10000034: "rotation",
+ 0x10000035: "precession",
+ 0x10000036: "sample_0time",
+ 0x10000037: "start_scan_trigger_in",
+ 0x10000038: "start_scan_trigger_out",
+ 0x10000039: "start_scan_event",
+ 0x10000040: "start_scan_time",
+ 0x10000041: "stop_scan_trigger_in",
+ 0x10000042: "stop_scan_trigger_out",
+ 0x10000043: "stop_scan_event",
+ 0x10000044: "stop_scan_time",
+ 0x10000045: "use_rois",
+ 0x10000046: "use_reduced_memory_rois",
+ 0x10000047: "user",
+ 0x10000048: "use_bc_correction",
+ 0x10000049: "position_bc_correction1",
+ 0x10000050: "position_bc_correction2",
+ 0x10000051: "interpolation_y",
+ 0x10000052: "camera_binning",
+ 0x10000053: "camera_supersampling",
+ 0x10000054: "camera_frame_width",
+ 0x10000055: "camera_frame_height",
+ 0x10000056: "camera_offset_x",
+ 0x10000057: "camera_offset_y",
+ 0x10000059: "rt_binning",
+ 0x1000005a: "rt_frame_width",
+ 0x1000005b: "rt_frame_height",
+ 0x1000005c: "rt_region_width",
+ 0x1000005d: "rt_region_height",
+ 0x1000005e: "rt_offset_x",
+ 0x1000005f: "rt_offset_y",
+ 0x10000060: "rt_zoom",
+ 0x10000061: "rt_line_period",
+ 0x10000062: "prescan",
+ 0x10000063: "scan_direction_z",
+ # track
+ 0x40000001: "multiplex_type", # 0 after line; 1 after frame
+ 0x40000002: "multiplex_order",
+ 0x40000003: "sampling_mode", # 0 sample; 1 line average; 2 frame average
+ 0x40000004: "sampling_method", # 1 mean; 2 sum
+ 0x40000005: "sampling_number",
+ 0x40000006: "acquire",
+ 0x40000007: "sample_observation_time",
+ 0x4000000b: "time_between_stacks",
+ 0x4000000c: "name",
+ 0x4000000d: "collimator1_name",
+ 0x4000000e: "collimator1_position",
+ 0x4000000f: "collimator2_name",
+ 0x40000010: "collimator2_position",
+ 0x40000011: "is_bleach_track",
+ 0x40000012: "is_bleach_after_scan_number",
+ 0x40000013: "bleach_scan_number",
+ 0x40000014: "trigger_in",
+ 0x40000015: "trigger_out",
+ 0x40000016: "is_ratio_track",
+ 0x40000017: "bleach_count",
+ 0x40000018: "spi_center_wavelength",
+ 0x40000019: "pixel_time",
+ 0x40000021: "condensor_frontlens",
+ 0x40000023: "field_stop_value",
+ 0x40000024: "id_condensor_aperture",
+ 0x40000025: "condensor_aperture",
+ 0x40000026: "id_condensor_revolver",
+ 0x40000027: "condensor_filter",
+ 0x40000028: "id_transmission_filter1",
+ 0x40000029: "id_transmission1",
+ 0x40000030: "id_transmission_filter2",
+ 0x40000031: "id_transmission2",
+ 0x40000032: "repeat_bleach",
+ 0x40000033: "enable_spot_bleach_pos",
+ 0x40000034: "spot_bleach_posx",
+ 0x40000035: "spot_bleach_posy",
+ 0x40000036: "spot_bleach_posz",
+ 0x40000037: "id_tubelens",
+ 0x40000038: "id_tubelens_position",
+ 0x40000039: "transmitted_light",
+ 0x4000003a: "reflected_light",
+ 0x4000003b: "simultan_grab_and_bleach",
+ 0x4000003c: "bleach_pixel_time",
+ # laser
+ 0x50000001: "name",
+ 0x50000002: "acquire",
+ 0x50000003: "power",
+ # detection_channel
+ 0x70000001: "integration_mode",
+ 0x70000002: "special_mode",
+ 0x70000003: "detector_gain_first",
+ 0x70000004: "detector_gain_last",
+ 0x70000005: "amplifier_gain_first",
+ 0x70000006: "amplifier_gain_last",
+ 0x70000007: "amplifier_offs_first",
+ 0x70000008: "amplifier_offs_last",
+ 0x70000009: "pinhole_diameter",
+ 0x7000000a: "counting_trigger",
+ 0x7000000b: "acquire",
+ 0x7000000c: "point_detector_name",
+ 0x7000000d: "amplifier_name",
+ 0x7000000e: "pinhole_name",
+ 0x7000000f: "filter_set_name",
+ 0x70000010: "filter_name",
+ 0x70000013: "integrator_name",
+ 0x70000014: "channel_name",
+ 0x70000015: "detector_gain_bc1",
+ 0x70000016: "detector_gain_bc2",
+ 0x70000017: "amplifier_gain_bc1",
+ 0x70000018: "amplifier_gain_bc2",
+ 0x70000019: "amplifier_offset_bc1",
+ 0x70000020: "amplifier_offset_bc2",
+ 0x70000021: "spectral_scan_channels",
+ 0x70000022: "spi_wavelength_start",
+ 0x70000023: "spi_wavelength_stop",
+ 0x70000026: "dye_name",
+ 0x70000027: "dye_folder",
+ # illumination_channel
+ 0x90000001: "name",
+ 0x90000002: "power",
+ 0x90000003: "wavelength",
+ 0x90000004: "aquire",
+ 0x90000005: "detchannel_name",
+ 0x90000006: "power_bc1",
+ 0x90000007: "power_bc2",
+ # beam_splitter
+ 0xb0000001: "filter_set",
+ 0xb0000002: "filter",
+ 0xb0000003: "name",
+ # data_channel
+ 0xd0000001: "name",
+ 0xd0000003: "acquire",
+ 0xd0000004: "color",
+ 0xd0000005: "sample_type",
+ 0xd0000006: "bits_per_sample",
+ 0xd0000007: "ratio_type",
+ 0xd0000008: "ratio_track1",
+ 0xd0000009: "ratio_track2",
+ 0xd000000a: "ratio_channel1",
+ 0xd000000b: "ratio_channel2",
+ 0xd000000c: "ratio_const1",
+ 0xd000000d: "ratio_const2",
+ 0xd000000e: "ratio_const3",
+ 0xd000000f: "ratio_const4",
+ 0xd0000010: "ratio_const5",
+ 0xd0000011: "ratio_const6",
+ 0xd0000012: "ratio_first_images1",
+ 0xd0000013: "ratio_first_images2",
+ 0xd0000014: "dye_name",
+ 0xd0000015: "dye_folder",
+ 0xd0000016: "spectrum",
+ 0xd0000017: "acquire",
+ # timer
+ 0x12000001: "name",
+ 0x12000002: "description",
+ 0x12000003: "interval",
+ 0x12000004: "trigger_in",
+ 0x12000005: "trigger_out",
+ 0x12000006: "activation_time",
+ 0x12000007: "activation_number",
+ # marker
+ 0x14000001: "name",
+ 0x14000002: "description",
+ 0x14000003: "trigger_in",
+ 0x14000004: "trigger_out",
+}
+
+# Map TIFF tag code to attribute name, default value, type, count, validator
+TIFF_TAGS = {
+ 254: ('new_subfile_type', 0, 4, 1, TIFF_SUBFILE_TYPES()),
+ 255: ('subfile_type', None, 3, 1,
+ {0: 'undefined', 1: 'image', 2: 'reduced_image', 3: 'page'}),
+ 256: ('image_width', None, 4, 1, None),
+ 257: ('image_length', None, 4, 1, None),
+ 258: ('bits_per_sample', 1, 3, 1, None),
+ 259: ('compression', 1, 3, 1, TIFF_COMPESSIONS),
+ 262: ('photometric', None, 3, 1, TIFF_PHOTOMETRICS),
+ 266: ('fill_order', 1, 3, 1, {1: 'msb2lsb', 2: 'lsb2msb'}),
+ 269: ('document_name', None, 2, None, None),
+ 270: ('image_description', None, 2, None, None),
+ 271: ('make', None, 2, None, None),
+ 272: ('model', None, 2, None, None),
+ 273: ('strip_offsets', None, 4, None, None),
+ 274: ('orientation', 1, 3, 1, TIFF_ORIENTATIONS),
+ 277: ('samples_per_pixel', 1, 3, 1, None),
+ 278: ('rows_per_strip', 2**32-1, 4, 1, None),
+ 279: ('strip_byte_counts', None, 4, None, None),
+ 280: ('min_sample_value', None, 3, None, None),
+ 281: ('max_sample_value', None, 3, None, None), # 2**bits_per_sample
+ 282: ('x_resolution', None, 5, 1, None),
+ 283: ('y_resolution', None, 5, 1, None),
+ 284: ('planar_configuration', 1, 3, 1, {1: 'contig', 2: 'separate'}),
+ 285: ('page_name', None, 2, None, None),
+ 286: ('x_position', None, 5, 1, None),
+ 287: ('y_position', None, 5, 1, None),
+ 296: ('resolution_unit', 2, 4, 1, {1: 'none', 2: 'inch', 3: 'centimeter'}),
+ 297: ('page_number', None, 3, 2, None),
+ 305: ('software', None, 2, None, None),
+ 306: ('datetime', None, 2, None, None),
+ 315: ('artist', None, 2, None, None),
+ 316: ('host_computer', None, 2, None, None),
+ 317: ('predictor', 1, 3, 1, {1: None, 2: 'horizontal'}),
+ 318: ('white_point', None, 5, 2, None),
+ 319: ('primary_chromaticities', None, 5, 6, None),
+ 320: ('color_map', None, 3, None, None),
+ 322: ('tile_width', None, 4, 1, None),
+ 323: ('tile_length', None, 4, 1, None),
+ 324: ('tile_offsets', None, 4, None, None),
+ 325: ('tile_byte_counts', None, 4, None, None),
+ 338: ('extra_samples', None, 3, None,
+ {0: 'unspecified', 1: 'assocalpha', 2: 'unassalpha'}),
+ 339: ('sample_format', 1, 3, 1, TIFF_SAMPLE_FORMATS),
+ 340: ('smin_sample_value', None, None, None, None),
+ 341: ('smax_sample_value', None, None, None, None),
+ 347: ('jpeg_tables', None, 7, None, None),
+ 530: ('ycbcr_subsampling', 1, 3, 2, None),
+ 531: ('ycbcr_positioning', 1, 3, 1, None),
+ 32996: ('sgi_matteing', None, None, 1, None), # use extra_samples
+ 32996: ('sgi_datatype', None, None, 1, None), # use sample_format
+ 32997: ('image_depth', None, 4, 1, None),
+ 32998: ('tile_depth', None, 4, 1, None),
+ 33432: ('copyright', None, 1, None, None),
+ 33445: ('md_file_tag', None, 4, 1, None),
+ 33446: ('md_scale_pixel', None, 5, 1, None),
+ 33447: ('md_color_table', None, 3, None, None),
+ 33448: ('md_lab_name', None, 2, None, None),
+ 33449: ('md_sample_info', None, 2, None, None),
+ 33450: ('md_prep_date', None, 2, None, None),
+ 33451: ('md_prep_time', None, 2, None, None),
+ 33452: ('md_file_units', None, 2, None, None),
+ 33550: ('model_pixel_scale', None, 12, 3, None),
+ 33922: ('model_tie_point', None, 12, None, None),
+ 34665: ('exif_ifd', None, None, 1, None),
+ 34735: ('geo_key_directory', None, 3, None, None),
+ 34736: ('geo_double_params', None, 12, None, None),
+ 34737: ('geo_ascii_params', None, 2, None, None),
+ 34853: ('gps_ifd', None, None, 1, None),
+ 37510: ('user_comment', None, None, None, None),
+ 42112: ('gdal_metadata', None, 2, None, None),
+ 42113: ('gdal_nodata', None, 2, None, None),
+ 50289: ('mc_xy_position', None, 12, 2, None),
+ 50290: ('mc_z_position', None, 12, 1, None),
+ 50291: ('mc_xy_calibration', None, 12, 3, None),
+ 50292: ('mc_lens_lem_na_n', None, 12, 3, None),
+ 50293: ('mc_channel_name', None, 1, None, None),
+ 50294: ('mc_ex_wavelength', None, 12, 1, None),
+ 50295: ('mc_time_stamp', None, 12, 1, None),
+ 50838: ('imagej_byte_counts', None, None, None, None),
+ 65200: ('flex_xml', None, 2, None, None),
+ # code: (attribute name, default value, type, count, validator)
+}
+
+# Map custom TIFF tag codes to attribute names and import functions
+CUSTOM_TAGS = {
+ 700: ('xmp', read_bytes),
+ 34377: ('photoshop', read_numpy),
+ 33723: ('iptc', read_bytes),
+ 34675: ('icc_profile', read_bytes),
+ 33628: ('uic1tag', read_uic1tag), # Universal Imaging Corporation STK
+ 33629: ('uic2tag', read_uic2tag),
+ 33630: ('uic3tag', read_uic3tag),
+ 33631: ('uic4tag', read_uic4tag),
+ 34361: ('mm_header', read_mm_header), # Olympus FluoView
+ 34362: ('mm_stamp', read_mm_stamp),
+ 34386: ('mm_user_block', read_bytes),
+ 34412: ('cz_lsm_info', read_cz_lsm_info), # Carl Zeiss LSM
+ 43314: ('nih_image_header', read_nih_image_header),
+ # 40001: ('mc_ipwinscal', read_bytes),
+ 40100: ('mc_id_old', read_bytes),
+ 50288: ('mc_id', read_bytes),
+ 50296: ('mc_frame_properties', read_bytes),
+ 50839: ('imagej_metadata', read_bytes),
+ 51123: ('micromanager_metadata', read_json),
+}
+
+# Max line length of printed output
+PRINT_LINE_LEN = 79
+
+
+def imshow(data, title=None, vmin=0, vmax=None, cmap=None,
+ bitspersample=None, photometric='rgb', interpolation='nearest',
+ dpi=96, figure=None, subplot=111, maxdim=8192, **kwargs):
+ """Plot n-dimensional images using matplotlib.pyplot.
+
+ Return figure, subplot and plot axis.
+ Requires pyplot already imported ``from matplotlib import pyplot``.
+
+ Parameters
+ ----------
+ bitspersample : int or None
+ Number of bits per channel in integer RGB images.
+ photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'}
+ The color space of the image data.
+ title : str
+ Window and subplot title.
+ figure : matplotlib.figure.Figure (optional).
+ Matplotlib to use for plotting.
+ subplot : int
+ A matplotlib.pyplot.subplot axis.
+ maxdim : int
+ maximum image size in any dimension.
+ kwargs : optional
+ Arguments for matplotlib.pyplot.imshow.
+
+ """
+ #if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'):
+ # raise ValueError("Can't handle %s photometrics" % photometric)
+ # TODO: handle photometric == 'separated' (CMYK)
+ isrgb = photometric in ('rgb', 'palette')
+ data = numpy.atleast_2d(data.squeeze())
+ data = data[(slice(0, maxdim), ) * len(data.shape)]
+
+ dims = data.ndim
+ if dims < 2:
+ raise ValueError("not an image")
+ elif dims == 2:
+ dims = 0
+ isrgb = False
+ else:
+ if isrgb and data.shape[-3] in (3, 4):
+ data = numpy.swapaxes(data, -3, -2)
+ data = numpy.swapaxes(data, -2, -1)
+ elif not isrgb and (data.shape[-1] < data.shape[-2] // 16 and
+ data.shape[-1] < data.shape[-3] // 16 and
+ data.shape[-1] < 5):
+ data = numpy.swapaxes(data, -3, -1)
+ data = numpy.swapaxes(data, -2, -1)
+ isrgb = isrgb and data.shape[-1] in (3, 4)
+ dims -= 3 if isrgb else 2
+
+ if photometric == 'palette' and isrgb:
+ datamax = data.max()
+ if datamax > 255:
+ data >>= 8 # possible precision loss
+ data = data.astype('B')
+ elif data.dtype.kind in 'ui':
+ if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None:
+ try:
+ bitspersample = int(math.ceil(math.log(data.max(), 2)))
+ except Exception:
+ bitspersample = data.dtype.itemsize * 8
+ elif not isinstance(bitspersample, int):
+ # bitspersample can be tuple, e.g. (5, 6, 5)
+ bitspersample = data.dtype.itemsize * 8
+ datamax = 2**bitspersample
+ if isrgb:
+ if bitspersample < 8:
+ data <<= 8 - bitspersample
+ elif bitspersample > 8:
+ data >>= bitspersample - 8 # precision loss
+ data = data.astype('B')
+ elif data.dtype.kind == 'f':
+ datamax = data.max()
+ if isrgb and datamax > 1.0:
+ if data.dtype.char == 'd':
+ data = data.astype('f')
+ data /= datamax
+ elif data.dtype.kind == 'b':
+ datamax = 1
+ elif data.dtype.kind == 'c':
+ raise NotImplementedError("complex type") # TODO: handle complex types
+
+ if not isrgb:
+ if vmax is None:
+ vmax = datamax
+ if vmin is None:
+ if data.dtype.kind == 'i':
+ dtmin = numpy.iinfo(data.dtype).min
+ vmin = numpy.min(data)
+ if vmin == dtmin:
+ vmin = numpy.min(data > dtmin)
+ if data.dtype.kind == 'f':
+ dtmin = numpy.finfo(data.dtype).min
+ vmin = numpy.min(data)
+ if vmin == dtmin:
+ vmin = numpy.min(data > dtmin)
+ else:
+ vmin = 0
+
+ pyplot = sys.modules['matplotlib.pyplot']
+
+ if figure is None:
+ pyplot.rc('font', family='sans-serif', weight='normal', size=8)
+ figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True,
+ facecolor='1.0', edgecolor='w')
+ try:
+ figure.canvas.manager.window.title(title)
+ except Exception:
+ pass
+ pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9,
+ left=0.1, right=0.95, hspace=0.05, wspace=0.0)
+ subplot = pyplot.subplot(subplot)
+
+ if title:
+ try:
+ title = unicode(title, 'Windows-1252')
+ except TypeError:
+ pass
+ pyplot.title(title, size=11)
+
+ if cmap is None:
+ if data.dtype.kind in 'ubf' or vmin == 0:
+ cmap = 'cubehelix'
+ else:
+ cmap = 'coolwarm'
+ if photometric == 'miniswhite':
+ cmap += '_r'
+
+ image = pyplot.imshow(data[(0, ) * dims].squeeze(), vmin=vmin, vmax=vmax,
+ cmap=cmap, interpolation=interpolation, **kwargs)
+
+ if not isrgb:
+ pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05
+
+ def format_coord(x, y):
+ # callback function to format coordinate display in toolbar
+ x = int(x + 0.5)
+ y = int(y + 0.5)
+ try:
+ if dims:
+ return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x],
+ current, x, y)
+ else:
+ return "%s @ [%4i, %4i]" % (data[y, x], x, y)
+ except IndexError:
+ return ""
+
+ pyplot.gca().format_coord = format_coord
+
+ if dims:
+ current = list((0, ) * dims)
+ cur_ax_dat = [0, data[tuple(current)].squeeze()]
+ sliders = [pyplot.Slider(
+ pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]),
+ 'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5',
+ valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)]
+ for slider in sliders:
+ slider.drawon = False
+
+ def set_image(current, sliders=sliders, data=data):
+ # change image and redraw canvas
+ cur_ax_dat[1] = data[tuple(current)].squeeze()
+ image.set_data(cur_ax_dat[1])
+ for ctrl, index in zip(sliders, current):
+ ctrl.eventson = False
+ ctrl.set_val(index)
+ ctrl.eventson = True
+ figure.canvas.draw()
+
+ def on_changed(index, axis, data=data, current=current):
+ # callback function for slider change event
+ index = int(round(index))
+ cur_ax_dat[0] = axis
+ if index == current[axis]:
+ return
+ if index >= data.shape[axis]:
+ index = 0
+ elif index < 0:
+ index = data.shape[axis] - 1
+ current[axis] = index
+ set_image(current)
+
+ def on_keypressed(event, data=data, current=current):
+ # callback function for key press event
+ key = event.key
+ axis = cur_ax_dat[0]
+ if str(key) in '0123456789':
+ on_changed(key, axis)
+ elif key == 'right':
+ on_changed(current[axis] + 1, axis)
+ elif key == 'left':
+ on_changed(current[axis] - 1, axis)
+ elif key == 'up':
+ cur_ax_dat[0] = 0 if axis == len(data.shape)-1 else axis + 1
+ elif key == 'down':
+ cur_ax_dat[0] = len(data.shape)-1 if axis == 0 else axis - 1
+ elif key == 'end':
+ on_changed(data.shape[axis] - 1, axis)
+ elif key == 'home':
+ on_changed(0, axis)
+
+ figure.canvas.mpl_connect('key_press_event', on_keypressed)
+ for axis, ctrl in enumerate(sliders):
+ ctrl.on_changed(lambda k, a=axis: on_changed(k, a))
+
+ return figure, subplot, image
+
+
+def _app_show():
+ """Block the GUI. For use as skimage plugin."""
+ pyplot = sys.modules['matplotlib.pyplot']
+ pyplot.show()
+
+
+def main(argv=None):
+ """Command line usage main function."""
+ if float(sys.version[0:3]) < 2.6:
+ print("This script requires Python version 2.6 or better.")
+ print("This is Python version %s" % sys.version)
+ return 0
+ if argv is None:
+ argv = sys.argv
+
+ import optparse
+
+ parser = optparse.OptionParser(
+ usage="usage: %prog [options] path",
+ description="Display image data in TIFF files.",
+ version="%%prog %s" % __version__)
+ opt = parser.add_option
+ opt('-p', '--page', dest='page', type='int', default=-1,
+ help="display single page")
+ opt('-s', '--series', dest='series', type='int', default=-1,
+ help="display series of pages of same shape")
+ opt('--nomultifile', dest='nomultifile', action='store_true',
+ default=False, help="don't read OME series from multiple files")
+ opt('--noplot', dest='noplot', action='store_true', default=False,
+ help="don't display images")
+ opt('--interpol', dest='interpol', metavar='INTERPOL', default='bilinear',
+ help="image interpolation method")
+ opt('--dpi', dest='dpi', type='int', default=96,
+ help="set plot resolution")
+ opt('--debug', dest='debug', action='store_true', default=False,
+ help="raise exception on failures")
+ opt('--test', dest='test', action='store_true', default=False,
+ help="try read all images in path")
+ opt('--doctest', dest='doctest', action='store_true', default=False,
+ help="runs the docstring examples")
+ opt('-v', '--verbose', dest='verbose', action='store_true', default=True)
+ opt('-q', '--quiet', dest='verbose', action='store_false')
+
+ settings, path = parser.parse_args()
+ path = ' '.join(path)
+
+ if settings.doctest:
+ import doctest
+ doctest.testmod()
+ return 0
+ if not path:
+ parser.error("No file specified")
+ if settings.test:
+ test_tifffile(path, settings.verbose)
+ return 0
+
+ if any(i in path for i in '?*'):
+ path = glob.glob(path)
+ if not path:
+ print('no files match the pattern')
+ return 0
+ # TODO: handle image sequences
+ #if len(path) == 1:
+ path = path[0]
+
+ print("Reading file structure...", end=' ')
+ start = time.time()
+ try:
+ tif = TiffFile(path, multifile=not settings.nomultifile)
+ except Exception as e:
+ if settings.debug:
+ raise
+ else:
+ print("\n", e)
+ sys.exit(0)
+ print("%.3f ms" % ((time.time()-start) * 1e3))
+
+ if tif.is_ome:
+ settings.norgb = True
+
+ images = [(None, tif[0 if settings.page < 0 else settings.page])]
+ if not settings.noplot:
+ print("Reading image data... ", end=' ')
+
+ def notnone(x):
+ return next(i for i in x if i is not None)
+ start = time.time()
+ try:
+ if settings.page >= 0:
+ images = [(tif.asarray(key=settings.page),
+ tif[settings.page])]
+ elif settings.series >= 0:
+ images = [(tif.asarray(series=settings.series),
+ notnone(tif.series[settings.series].pages))]
+ else:
+ images = []
+ for i, s in enumerate(tif.series):
+ try:
+ images.append(
+ (tif.asarray(series=i), notnone(s.pages)))
+ except ValueError as e:
+ images.append((None, notnone(s.pages)))
+ if settings.debug:
+ raise
+ else:
+ print("\n* series %i failed: %s... " % (i, e),
+ end='')
+ print("%.3f ms" % ((time.time()-start) * 1e3))
+ except Exception as e:
+ if settings.debug:
+ raise
+ else:
+ print(e)
+
+ tif.close()
+
+ print("\nTIFF file:", tif)
+ print()
+ for i, s in enumerate(tif.series):
+ print("Series %i" % i)
+ print(s)
+ print()
+ for i, page in images:
+ print(page)
+ print(page.tags)
+ if page.is_palette:
+ print("\nColor Map:", page.color_map.shape, page.color_map.dtype)
+ for attr in ('cz_lsm_info', 'cz_lsm_scan_info', 'uic_tags',
+ 'mm_header', 'imagej_tags', 'micromanager_metadata',
+ 'nih_image_header'):
+ if hasattr(page, attr):
+ print("", attr.upper(), Record(getattr(page, attr)), sep="\n")
+ print()
+ if page.is_micromanager:
+ print('MICROMANAGER_FILE_METADATA')
+ print(Record(tif.micromanager_metadata))
+
+ if images and not settings.noplot:
+ try:
+ import matplotlib
+ matplotlib.use('TkAgg')
+ from matplotlib import pyplot
+ except ImportError as e:
+ warnings.warn("failed to import matplotlib.\n%s" % e)
+ else:
+ for img, page in images:
+ if img is None:
+ continue
+ vmin, vmax = None, None
+ if 'gdal_nodata' in page.tags:
+ try:
+ vmin = numpy.min(img[img > float(page.gdal_nodata)])
+ except ValueError:
+ pass
+ if page.is_stk:
+ try:
+ vmin = page.uic_tags['min_scale']
+ vmax = page.uic_tags['max_scale']
+ except KeyError:
+ pass
+ else:
+ if vmax <= vmin:
+ vmin, vmax = None, None
+ title = "%s\n %s" % (str(tif), str(page))
+ imshow(img, title=title, vmin=vmin, vmax=vmax,
+ bitspersample=page.bits_per_sample,
+ photometric=page.photometric,
+ interpolation=settings.interpol,
+ dpi=settings.dpi)
+ pyplot.show()
+
+
+TIFFfile = TiffFile # backwards compatibility
+
+if sys.version_info[0] > 2:
+ basestring = str, bytes
+ unicode = str
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/imageio/plugins/tifffile.py b/imageio/plugins/tifffile.py
new file mode 100644
index 0000000..6e4eba0
--- /dev/null
+++ b/imageio/plugins/tifffile.py
@@ -0,0 +1,229 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2015, imageio contributors
+# imageio is distributed under the terms of the (new) BSD License.
+
+""" Storage of image data in tiff format.
+"""
+
+from __future__ import absolute_import, print_function, division
+
+from .. import formats
+from ..core import Format
+
+import numpy as np
+
+_tifffile = None # Defer loading to lib() function.
+
+
+def load_lib():
+ global _tifffile
+ try:
+ import tifffile as _tifffile
+ except ImportError:
+ from . import _tifffile
+ return _tifffile
+
+
+TIFF_FORMATS = ('.tif', '.tiff', '.stk', '.lsm')
+WRITE_METADATA_KEYS = ('photometric', 'planarconfig', 'resolution',
+ 'description', 'compress', 'volume', 'writeshape',
+ 'extratags')
+READ_METADATA_KEYS = ('planar_configuration', 'is_fluoview', 'is_nih',
+ 'is_contig', 'is_micromanager', 'is_ome', 'is_lsm'
+ 'is_palette', 'is_reduced', 'is_rgb', 'is_sgi',
+ 'is_shaped', 'is_stk', 'is_tiled', 'is_mdgel'
+ 'resolution_unit', 'compression', 'is_mediacy',
+ 'orientation')
+
+
+class TiffFormat(Format):
+
+ """ Provides support for a wide range of Tiff images.
+
+ Parameters for reading
+ ----------------------
+ offset : int
+ Optional start position of embedded file. By default this is
+ the current file position.
+ size : int
+ Optional size of embedded file. By default this is the number
+ of bytes from the 'offset' to the end of the file.
+ multifile : bool
+ If True (default), series may include pages from multiple files.
+ Currently applies to OME-TIFF only.
+ multifile_close : bool
+ If True (default), keep the handles of other files in multifile
+ series closed. This is inefficient when few files refer to
+ many pages. If False, the C runtime may run out of resources.
+
+ Parameters for saving
+ ---------------------
+ bigtiff : bool
+ If True, the BigTIFF format is used.
+ byteorder : {'<', '>'}
+ The endianness of the data in the file.
+ By default this is the system's native byte order.
+ software : str
+ Name of the software used to create the image.
+ Saved with the first page only.
+
+ Metadata for reading
+ --------------------
+ planar_configuration : {'contig', 'planar'}
+ Specifies if samples are stored contiguous or in separate planes.
+ By default this setting is inferred from the data shape.
+ 'contig': last dimension contains samples.
+ 'planar': third last dimension contains samples.
+ resolution_unit : (float, float) or ((int, int), (int, int))
+ X and Y resolution in dots per inch as float or rational numbers.
+ compression : int
+ Values from 0 to 9 indicating the level of zlib compression.
+ If 0, data is uncompressed.
+ orientation : {'top_left', 'bottom_right', ...}
+ Oriented of image array.
+ is_rgb : bool
+ True if page contains a RGB image.
+ is_contig : bool
+ True if page contains a contiguous image.
+ is_tiled : bool
+ True if page contains tiled image.
+ is_palette : bool
+ True if page contains a palette-colored image and not OME or STK.
+ is_reduced : bool
+ True if page is a reduced image of another image.
+ is_shaped : bool
+ True if page contains shape in image_description tag.
+ is_fluoview : bool
+ True if page contains FluoView MM_STAMP tag.
+ is_nih : bool
+ True if page contains NIH image header.
+ is_micromanager : bool
+ True if page contains Micro-Manager metadata.
+ is_ome : bool
+ True if page contains OME-XML in image_description tag.
+ is_sgi : bool
+ True if page contains SGI image and tile depth tags.
+ is_stk : bool
+ True if page contains UIC2Tag tag.
+ is_mdgel : bool
+ True if page contains md_file_tag tag.
+ is_mediacy : bool
+ True if page contains Media Cybernetics Id tag.
+ is_stk : bool
+ True if page contains UIC2Tag tag.
+ is_lsm : bool
+ True if page contains LSM CZ_LSM_INFO tag.
+
+ Metadata for writing
+ --------------------
+ photometric : {'minisblack', 'miniswhite', 'rgb'}
+ The color space of the image data.
+ By default this setting is inferred from the data shape.
+ planarconfig : {'contig', 'planar'}
+ Specifies if samples are stored contiguous or in separate planes.
+ By default this setting is inferred from the data shape.
+ 'contig': last dimension contains samples.
+ 'planar': third last dimension contains samples.
+ resolution : (float, float) or ((int, int), (int, int))
+ X and Y resolution in dots per inch as float or rational numbers.
+ description : str
+ The subject of the image. Saved with the first page only.
+ compress : int
+ Values from 0 to 9 controlling the level of zlib compression.
+ If 0, data are written uncompressed (default).
+ volume : bool
+ If True, volume data are stored in one tile (if applicable) using
+ the SGI image_depth and tile_depth tags.
+ Image width and depth must be multiple of 16.
+ Few software can read this format, e.g. MeVisLab.
+ writeshape : bool
+ If True, write the data shape to the image_description tag
+ if necessary and no other description is given.
+ extratags: sequence of tuples
+ Additional tags as [(code, dtype, count, value, writeonce)].
+
+ code : int
+ The TIFF tag Id.
+ dtype : str
+ Data type of items in 'value' in Python struct format.
+ One of B, s, H, I, 2I, b, h, i, f, d, Q, or q.
+ count : int
+ Number of data values. Not used for string values.
+ value : sequence
+ 'Count' values compatible with 'dtype'.
+ writeonce : bool
+ If True, the tag is written to the first page only.
+ """
+
+ def _can_read(self, request):
+ # We support any kind of image data
+ return request.filename.lower().endswith(TIFF_FORMATS)
+
+ def _can_write(self, request):
+ # We support any kind of image data
+ return request.filename.lower().endswith(TIFF_FORMATS)
+
+ # -- reader
+
+ class Reader(Format.Reader):
+
+ def _open(self, **kwargs):
+ if not _tifffile:
+ load_lib()
+ self._tf = _tifffile.TiffFile(self.request.get_file(), **kwargs)
+
+ # metadata is the same for all images
+ self._meta = {}
+
+ def _close(self):
+ self._tf.close()
+
+ def _get_length(self):
+ return len(self._tf)
+
+ def _get_data(self, index):
+ # Get data
+ if index < 0 or index >= len(self._tf):
+ raise IndexError(
+ 'Index out of range while reading from tiff file')
+ im = self._tf[index].asarray()
+ meta = self._meta or self._get_meta_data(index)
+ # Return array and empty meta data
+ return im, meta
+
+ def _get_meta_data(self, index):
+ page = self._tf[index or 0]
+ for key in READ_METADATA_KEYS:
+ try:
+ self._meta[key] = getattr(page, key)
+ except Exception:
+ pass
+ return self._meta
+
+ # -- writer
+ class Writer(Format.Writer):
+
+ def _open(self, bigtiff=None, byteorder=None, software=None):
+ if not _tifffile:
+ load_lib()
+ self._tf = _tifffile.TiffWriter(self.request.get_local_filename(),
+ bigtiff, byteorder, software)
+ self._meta = {}
+
+ def _close(self):
+ self._tf.close()
+
+ def _append_data(self, im, meta):
+ meta = meta or self._meta
+ print(meta)
+ self._tf.save(np.asanyarray(im), **meta)
+
+ def set_meta_data(self, meta):
+ self._meta = {}
+ for (key, value) in meta.items():
+ if key in WRITE_METADATA_KEYS:
+ self._meta[key] = value
+
+# Register
+format = TiffFormat('tiff', "TIFF format", 'tif tiff stk lsm', 'iIvV')
+formats.add_format(format)
| Plugin: scientific TIFF files
In some fields a variant of the TIFF format is heavily used, e.g. for multiplanar microscopic images. Like OME-TIFF (https://www.openmicroscopy.org/site/support/ome-model/ome-tiff/).
The freeimage TIFF reader only deals with basic TIFF.
This is not really my field, so could use some input on what is needed.
One implementation that we could potentially use is: http://www.lfd.uci.edu/~gohlke/code/tifffile.py.html
I believe there was another, but I cannot remember. | imageio/imageio | diff --git a/tests/test_meta.py b/tests/test_meta.py
index d8bcf21..2d08a82 100644
--- a/tests/test_meta.py
+++ b/tests/test_meta.py
@@ -122,7 +122,8 @@ def test_import_dependencies():
# Get the difference; what do we import extra?
extra_modules = modnames_new.difference(modnames_ref)
- known_modules = ['zipfile', 'importlib'] # discard these
+ known_modules = ['zipfile', 'importlib', 'json', 'decimal',
+ 'fractions'] # discard these
# Remove modules in standard library
stdloc = os.path.dirname(os.__file__)
diff --git a/tests/test_tifffile.py b/tests/test_tifffile.py
new file mode 100644
index 0000000..67fb089
--- /dev/null
+++ b/tests/test_tifffile.py
@@ -0,0 +1,61 @@
+""" Test tifffile plugin functionality.
+"""
+
+import os
+
+import numpy as np
+
+from pytest import raises
+from imageio.testing import run_tests_if_main, get_test_dir
+
+import imageio
+
+test_dir = get_test_dir()
+
+
+def test_tifffile_format():
+ # Test selection
+ for name in ['tiff', '.tif']:
+ format = imageio.formats['tiff']
+ assert format.name == 'TIFF'
+
+
+def test_tifffile_reading_writing():
+ """ Test reading and saveing tiff """
+ im2 = np.ones((10, 10, 3), np.uint8) * 2
+
+ filename1 = os.path.join(test_dir, 'test_tiff.tiff')
+
+ # One image
+ imageio.imsave(filename1, im2)
+ im = imageio.imread(filename1)
+ ims = imageio.mimread(filename1)
+ assert (im == im2).all()
+ assert len(ims) == 1
+
+ # Multiple images
+ imageio.mimsave(filename1, [im2, im2, im2])
+ im = imageio.imread(filename1)
+ ims = imageio.mimread(filename1)
+ assert (im == im2).all()
+ assert len(ims) == 3, ims[0].shape
+
+ # Mixed
+ W = imageio.save(filename1)
+ W.set_meta_data({'planarconfig': 'planar'})
+ assert W.format.name == 'TIFF'
+ W.append_data(im2)
+ W.append_data(im2)
+ W.close()
+ #
+ R = imageio.read(filename1)
+ assert R.format.name == 'TIFF'
+ ims = list(R) # == [im for im in R]
+ assert (ims[0] == im2).all()
+ meta = R.get_meta_data()
+ assert meta['is_rgb']
+ # Fail
+ raises(IndexError, R.get_data, -1)
+ raises(IndexError, R.get_data, 3)
+
+run_tests_if_main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "numpy>=1.16.0",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y libfreeimage3"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
-e git+https://github.com/imageio/imageio.git@d99d9a25cd8db0920d42cf5d3f372471776865fe#egg=imageio
iniconfig==2.1.0
numpy @ file:///croot/numpy_and_numpy_base_1736283260865/work/dist/numpy-2.0.2-cp39-cp39-linux_x86_64.whl#sha256=3387e3e62932fa288bc18e8f445ce19e998b418a65ed2064dd40a054f976a6c7
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
tomli==2.2.1
| name: imageio
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- blas=1.0=openblas
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgfortran-ng=11.2.0=h00389a5_1
- libgfortran5=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libopenblas=0.3.21=h043d6bf_0
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- numpy=2.0.2=py39heeff2f4_0
- numpy-base=2.0.2=py39h8a23956_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=72.1.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- tomli==2.2.1
prefix: /opt/conda/envs/imageio
| [
"tests/test_tifffile.py::test_tifffile_reading_writing"
]
| []
| [
"tests/test_meta.py::test_namespace",
"tests/test_meta.py::test_import_nothing",
"tests/test_meta.py::test_import_modules",
"tests/test_meta.py::test_import_dependencies",
"tests/test_tifffile.py::test_tifffile_format"
]
| []
| BSD 2-Clause "Simplified" License | 183 | [
"imageio/plugins/__init__.py",
"imageio/plugins/_tifffile.py",
"imageio/plugins/tifffile.py"
]
| [
"imageio/plugins/__init__.py",
"imageio/plugins/_tifffile.py",
"imageio/plugins/tifffile.py"
]
|
|
softlayer__softlayer-python-571 | 7d3772b40ea8cb46065ecb4756d4746d02bfbf61 | 2015-07-06 19:24:20 | 1195b2020ef6efc40462d59eb079f26e5f39a6d8 | diff --git a/CHANGELOG b/CHANGELOG
index e5e1b8ad..3397760f 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,9 +1,3 @@
-4.0.4
-
- * Fixes bug with pulling the userData property for the virtual server detail
-
- * Fixes a class of bugs invloving unicode from the API
-
4.0.3
* Fixes bug with `slcli vs ready` command
diff --git a/SoftLayer/CLI/formatting.py b/SoftLayer/CLI/formatting.py
index 6f35a985..4f10980d 100644
--- a/SoftLayer/CLI/formatting.py
+++ b/SoftLayer/CLI/formatting.py
@@ -50,7 +50,7 @@ def format_output(data, fmt='table'): # pylint: disable=R0911,R0912
# responds to .formatted
if hasattr(data, 'formatted'):
if fmt == 'table':
- return data.formatted
+ return str(data.formatted)
# responds to .separator
if hasattr(data, 'separator'):
diff --git a/SoftLayer/CLI/server/detail.py b/SoftLayer/CLI/server/detail.py
index 943b694b..9fc76c5d 100644
--- a/SoftLayer/CLI/server/detail.py
+++ b/SoftLayer/CLI/server/detail.py
@@ -96,8 +96,10 @@ def cli(env, identifier, passwords, price):
table.add_row(['remote users', pass_table])
tag_row = []
- for tag in result['tagReferences']:
- tag_row.append(tag['tag']['name'])
+ for tag_detail in result['tagReferences']:
+ tag = utils.lookup(tag_detail, 'tag', 'name')
+ if tag is not None:
+ tag_row.append(tag)
if tag_row:
table.add_row(['tags', formatting.listing(tag_row, separator=',')])
diff --git a/SoftLayer/CLI/virt/detail.py b/SoftLayer/CLI/virt/detail.py
index 3afe151e..4fc115ce 100644
--- a/SoftLayer/CLI/virt/detail.py
+++ b/SoftLayer/CLI/virt/detail.py
@@ -93,8 +93,10 @@ def cli(self, identifier, passwords=False, price=False):
table.add_row(['users', pass_table])
tag_row = []
- for tag in result['tagReferences']:
- tag_row.append(tag['tag']['name'])
+ for tag_detail in result['tagReferences']:
+ tag = utils.lookup(tag_detail, 'tag', 'name')
+ if tag is not None:
+ tag_row.append(tag)
if tag_row:
table.add_row(['tags', formatting.listing(tag_row, separator=', ')])
diff --git a/SoftLayer/consts.py b/SoftLayer/consts.py
index 0279225c..674f0460 100644
--- a/SoftLayer/consts.py
+++ b/SoftLayer/consts.py
@@ -5,7 +5,7 @@
:license: MIT, see LICENSE for more details.
"""
-VERSION = 'v4.0.4'
+VERSION = 'v4.0.3'
API_PUBLIC_ENDPOINT = 'https://api.softlayer.com/xmlrpc/v3.1/'
API_PRIVATE_ENDPOINT = 'https://api.service.softlayer.com/xmlrpc/v3.1/'
API_PUBLIC_ENDPOINT_REST = 'https://api.softlayer.com/rest/v3.1/'
diff --git a/SoftLayer/managers/vs.py b/SoftLayer/managers/vs.py
index 30de90b3..ebd523f4 100644
--- a/SoftLayer/managers/vs.py
+++ b/SoftLayer/managers/vs.py
@@ -191,6 +191,7 @@ def get_instance(self, instance_id, **kwargs):
'blockDevices',
'blockDeviceTemplateGroup[id, name, globalIdentifier]',
'postInstallScriptUri',
+ 'userData',
'''operatingSystem[passwords[username,password],
softwareLicense.softwareDescription[
manufacturer,name,version,
diff --git a/docs/conf.py b/docs/conf.py
index 8f480496..0472001b 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -55,9 +55,9 @@
# built documents.
#
# The short X.Y version.
-version = '4.0.4'
+version = '4.0.3'
# The full version, including alpha/beta/rc tags.
-release = '4.0.4'
+release = '4.0.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/setup.py b/setup.py
index 3f329b77..88132dbe 100644
--- a/setup.py
+++ b/setup.py
@@ -36,7 +36,7 @@
setup(
name='SoftLayer',
- version='4.0.4',
+ version='4.0.3',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author='SoftLayer Technologies, Inc.',
| slcli server detail example01 (KeyError: 'tag')
Hello
I am running
slcli server detail hostname
against our entire SLR inventory (200 servers) for most of them I get expected output but for 4 out of the 100 I get this:
```
➜ ~ slcli server detail example01
An unexpected error has occured:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/site-packages/SoftLayer-4.0.2-py2.7.egg/SoftLayer/CLI/core.py", line 181, in main
cli.main()
File "/usr/local/lib/python2.7/site-packages/click-4.0-py2.7.egg/click/core.py", line 644, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python2.7/site-packages/click-4.0-py2.7.egg/click/core.py", line 991, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python2.7/site-packages/click-4.0-py2.7.egg/click/core.py", line 991, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python2.7/site-packages/click-4.0-py2.7.egg/click/core.py", line 837, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python2.7/site-packages/click-4.0-py2.7.egg/click/core.py", line 464, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/click-4.0-py2.7.egg/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args[1:], **kwargs)
File "/usr/local/lib/python2.7/site-packages/click-4.0-py2.7.egg/click/core.py", line 464, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/SoftLayer-4.0.2-py2.7.egg/SoftLayer/CLI/server/detail.py", line 100, in cli
tag_row.append(tag['tag']['name'])
KeyError: 'tag'
Feel free to report this error as it is likely a bug:
https://github.com/softlayer/softlayer-python/issues
```
I tried it under
```
➜ ~ python -V
Python 2.7.10
```
and
```
(softlayer)➜ ~ python -V
Python 3.4.3
``` | softlayer/softlayer-python | diff --git a/SoftLayer/tests/CLI/helper_tests.py b/SoftLayer/tests/CLI/helper_tests.py
index 873d58a8..7862ef00 100644
--- a/SoftLayer/tests/CLI/helper_tests.py
+++ b/SoftLayer/tests/CLI/helper_tests.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
SoftLayer.tests.CLI.helper_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -358,17 +357,6 @@ def test_format_output_python_keyvaluetable(self):
ret = formatting.format_output(t, 'python')
self.assertEqual({'nothing': None}, ret)
- def test_format_output_unicode(self):
- t = formatting.format_output('☃', 'raw')
- self.assertEqual('☃', t)
-
- item = formatting.FormattedItem('raw ☃', '☃')
- t = formatting.format_output(item)
- self.assertEqual('☃', t)
-
- t = formatting.format_output(item, 'raw')
- self.assertEqual('raw ☃', t)
-
class TestTemplateArgs(testing.TestCase):
diff --git a/SoftLayer/tests/CLI/modules/server_tests.py b/SoftLayer/tests/CLI/modules/server_tests.py
index 85df2f42..e41e49cd 100644
--- a/SoftLayer/tests/CLI/modules/server_tests.py
+++ b/SoftLayer/tests/CLI/modules/server_tests.py
@@ -57,6 +57,25 @@ def test_server_details(self):
self.assertEqual(result.exit_code, 0)
self.assertEqual(json.loads(result.output), expected)
+ def test_detail_vs_empty_tag(self):
+ mock = self.set_mock('SoftLayer_Hardware_Server', 'getObject')
+ mock.return_value = {
+ 'id': 100,
+ 'processorPhysicalCoreAmount': 2,
+ 'memoryCapacity': 2,
+ 'tagReferences': [
+ {'tag': {'name': 'example-tag'}},
+ {},
+ ],
+ }
+ result = self.run_command(['server', 'detail', '100'])
+
+ self.assertEqual(result.exit_code, 0)
+ self.assertEqual(
+ json.loads(result.output)['tags'],
+ ['example-tag'],
+ )
+
def test_list_servers(self):
result = self.run_command(['server', 'list', '--tag=openstack'])
diff --git a/SoftLayer/tests/CLI/modules/vs_tests.py b/SoftLayer/tests/CLI/modules/vs_tests.py
index 9c1064bd..76dd75e5 100644
--- a/SoftLayer/tests/CLI/modules/vs_tests.py
+++ b/SoftLayer/tests/CLI/modules/vs_tests.py
@@ -66,6 +66,25 @@ def test_detail_vs(self):
'id': 1}],
'owner': 'chechu'})
+ def test_detail_vs_empty_tag(self):
+ mock = self.set_mock('SoftLayer_Virtual_Guest', 'getObject')
+ mock.return_value = {
+ 'id': 100,
+ 'maxCpu': 2,
+ 'maxMemory': 1024,
+ 'tagReferences': [
+ {'tag': {'name': 'example-tag'}},
+ {},
+ ],
+ }
+ result = self.run_command(['vs', 'detail', '100'])
+
+ self.assertEqual(result.exit_code, 0)
+ self.assertEqual(
+ json.loads(result.output)['tags'],
+ ['example-tag'],
+ )
+
def test_create_options(self):
result = self.run_command(['vs', 'create-options'])
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 8
} | 4.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"tools/test-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
coverage==6.2
distlib==0.3.9
docutils==0.18.1
filelock==3.4.1
fixtures==4.0.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
Jinja2==3.0.3
MarkupSafe==2.0.1
mock==5.2.0
nose==1.3.7
packaging==21.3
pbr==6.1.1
platformdirs==2.4.0
pluggy==1.0.0
prettytable==2.5.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytz==2025.2
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
-e git+https://github.com/softlayer/softlayer-python.git@7d3772b40ea8cb46065ecb4756d4746d02bfbf61#egg=SoftLayer
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
testtools==2.6.0
toml==0.10.2
tomli==1.2.3
tox==3.28.0
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.17.1
wcwidth==0.2.13
zipp==3.6.0
| name: softlayer-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- click==8.0.4
- coverage==6.2
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- fixtures==4.0.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- jinja2==3.0.3
- markupsafe==2.0.1
- mock==5.2.0
- nose==1.3.7
- packaging==21.3
- pbr==6.1.1
- platformdirs==2.4.0
- pluggy==1.0.0
- prettytable==2.5.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytz==2025.2
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- testtools==2.6.0
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.17.1
- wcwidth==0.2.13
- zipp==3.6.0
prefix: /opt/conda/envs/softlayer-python
| [
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_detail_vs_empty_tag",
"SoftLayer/tests/CLI/modules/vs_tests.py::DnsTests::test_detail_vs_empty_tag"
]
| []
| [
"SoftLayer/tests/CLI/helper_tests.py::CLIJSONEncoderTest::test_default",
"SoftLayer/tests/CLI/helper_tests.py::CLIJSONEncoderTest::test_fail",
"SoftLayer/tests/CLI/helper_tests.py::PromptTests::test_confirmation",
"SoftLayer/tests/CLI/helper_tests.py::PromptTests::test_do_or_die",
"SoftLayer/tests/CLI/helper_tests.py::FormattedItemTests::test_blank",
"SoftLayer/tests/CLI/helper_tests.py::FormattedItemTests::test_gb",
"SoftLayer/tests/CLI/helper_tests.py::FormattedItemTests::test_init",
"SoftLayer/tests/CLI/helper_tests.py::FormattedItemTests::test_mb_to_gb",
"SoftLayer/tests/CLI/helper_tests.py::FormattedItemTests::test_sort",
"SoftLayer/tests/CLI/helper_tests.py::FormattedItemTests::test_sort_mixed",
"SoftLayer/tests/CLI/helper_tests.py::FormattedListTests::test_init",
"SoftLayer/tests/CLI/helper_tests.py::FormattedListTests::test_str",
"SoftLayer/tests/CLI/helper_tests.py::FormattedListTests::test_to_python",
"SoftLayer/tests/CLI/helper_tests.py::FormattedTxnTests::test_active_txn",
"SoftLayer/tests/CLI/helper_tests.py::FormattedTxnTests::test_active_txn_empty",
"SoftLayer/tests/CLI/helper_tests.py::FormattedTxnTests::test_active_txn_missing",
"SoftLayer/tests/CLI/helper_tests.py::FormattedTxnTests::test_transaction_status",
"SoftLayer/tests/CLI/helper_tests.py::FormattedTxnTests::test_transaction_status_missing",
"SoftLayer/tests/CLI/helper_tests.py::CLIAbortTests::test_init",
"SoftLayer/tests/CLI/helper_tests.py::ResolveIdTests::test_resolve_id_multiple",
"SoftLayer/tests/CLI/helper_tests.py::ResolveIdTests::test_resolve_id_none",
"SoftLayer/tests/CLI/helper_tests.py::ResolveIdTests::test_resolve_id_one",
"SoftLayer/tests/CLI/helper_tests.py::TestFormatOutput::test_format_output_formatted_item",
"SoftLayer/tests/CLI/helper_tests.py::TestFormatOutput::test_format_output_json",
"SoftLayer/tests/CLI/helper_tests.py::TestFormatOutput::test_format_output_json_keyvaluetable",
"SoftLayer/tests/CLI/helper_tests.py::TestFormatOutput::test_format_output_list",
"SoftLayer/tests/CLI/helper_tests.py::TestFormatOutput::test_format_output_python",
"SoftLayer/tests/CLI/helper_tests.py::TestFormatOutput::test_format_output_python_keyvaluetable",
"SoftLayer/tests/CLI/helper_tests.py::TestFormatOutput::test_format_output_raw",
"SoftLayer/tests/CLI/helper_tests.py::TestFormatOutput::test_format_output_string",
"SoftLayer/tests/CLI/helper_tests.py::TestFormatOutput::test_format_output_table",
"SoftLayer/tests/CLI/helper_tests.py::TestFormatOutput::test_sequentialoutput",
"SoftLayer/tests/CLI/helper_tests.py::TestFormatOutput::test_unknown",
"SoftLayer/tests/CLI/helper_tests.py::TestTemplateArgs::test_no_template_option",
"SoftLayer/tests/CLI/helper_tests.py::TestTemplateArgs::test_template_options",
"SoftLayer/tests/CLI/helper_tests.py::TestExportToTemplate::test_export_to_template",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_cancel_server",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_create_options",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_create_server",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_create_server_missing_required",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_create_server_test_flag",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_create_server_with_export",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_edit_server_failed",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_edit_server_userdata",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_edit_server_userdata_and_file",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_edit_server_userfile",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_list_servers",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_nic_edit_server",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_server_cancel_reasons",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_server_details",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_server_power_cycle",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_server_power_cycle_negative",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_server_power_off",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_server_power_on",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_server_reboot_default",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_server_reboot_hard",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_server_reboot_negative",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_server_reboot_soft",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_server_reload",
"SoftLayer/tests/CLI/modules/server_tests.py::ServerCLITests::test_update_firmware",
"SoftLayer/tests/CLI/modules/vs_tests.py::DnsTests::test_create",
"SoftLayer/tests/CLI/modules/vs_tests.py::DnsTests::test_create_options",
"SoftLayer/tests/CLI/modules/vs_tests.py::DnsTests::test_detail_vs",
"SoftLayer/tests/CLI/modules/vs_tests.py::DnsTests::test_list_vs"
]
| []
| MIT License | 186 | [
"docs/conf.py",
"SoftLayer/CLI/formatting.py",
"setup.py",
"SoftLayer/consts.py",
"SoftLayer/CLI/virt/detail.py",
"SoftLayer/CLI/server/detail.py",
"SoftLayer/managers/vs.py",
"CHANGELOG"
]
| [
"docs/conf.py",
"SoftLayer/CLI/formatting.py",
"setup.py",
"SoftLayer/consts.py",
"SoftLayer/CLI/virt/detail.py",
"SoftLayer/CLI/server/detail.py",
"SoftLayer/managers/vs.py",
"CHANGELOG"
]
|
|
sigmavirus24__github3.py-407 | 48adfa8a0a3884fd9e2600b90b37e27efd61eee2 | 2015-07-06 22:37:54 | 05ed0c6a02cffc6ddd0e82ce840c464e1c5fd8c4 | diff --git a/AUTHORS.rst b/AUTHORS.rst
index 49e1c5b7..117a8194 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -94,3 +94,5 @@ Contributors
- Antoine Giraudmaillet (@antoine-g)
- Paulus Schoutsen (@balloob)
+
+- Nolan Bruabker (@nrb)
diff --git a/github3/issues/milestone.py b/github3/issues/milestone.py
index 9c2726e6..96114ddb 100644
--- a/github3/issues/milestone.py
+++ b/github3/issues/milestone.py
@@ -38,6 +38,8 @@ class Milestone(GitHubCore):
self.due_on = self._strptime(mile.get('due_on'))
#: datetime object representing when the milestone was updated.
self.updated_at = self._strptime(mile.get('updated_at'))
+ #: string representing the milestone's ID.
+ self.id = str(mile.get('id'))
def _repr(self):
return '<Milestone [{0}]>'.format(self)
| Milestone objects don't have id attributes
```py
>>> r = g.repository('rcbops', 'rpc-openstack')
>>> milestones = list(r.milestones())
>>> milestones
[<Milestone [kilo 11.0.0]>, <Milestone [next]>, <Milestone [icehouse 9.0.10]>, <Milestone [juno 10.1.10]>, <Milestone [Kilo 11.1.0]>, <Milestone [Kilo 11.2.0]>]
>>> milestones[0].id
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'Milestone' object has no attribute 'id'
``` | sigmavirus24/github3.py | diff --git a/tests/test_issues.py b/tests/test_issues.py
index e85aa48c..c58275c4 100644
--- a/tests/test_issues.py
+++ b/tests/test_issues.py
@@ -77,6 +77,9 @@ class TestMilestone(BaseCase):
def test_str(self):
assert str(self.m) == 'v1.0.0'
+ def test_id(self):
+ assert self.m.id== '219754'
+
def test_delete(self):
self.response('', 204)
self.delete(self.api)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 2
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | betamax==0.9.0
betamax-matchers==0.4.0
certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup==1.2.2
execnet==2.1.1
-e git+https://github.com/sigmavirus24/github3.py.git@48adfa8a0a3884fd9e2600b90b37e27efd61eee2#egg=github3.py
idna==3.10
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
requests==2.32.3
requests-toolbelt==1.0.0
tomli==2.2.1
typing_extensions==4.13.0
uritemplate==4.1.1
uritemplate.py==3.0.2
urllib3==2.3.0
| name: github3.py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- betamax==0.9.0
- betamax-matchers==0.4.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- requests==2.32.3
- requests-toolbelt==1.0.0
- tomli==2.2.1
- typing-extensions==4.13.0
- uritemplate==4.1.1
- uritemplate-py==3.0.2
- urllib3==2.3.0
prefix: /opt/conda/envs/github3.py
| [
"tests/test_issues.py::TestMilestone::test_id"
]
| []
| [
"tests/test_issues.py::TestLabel::test_delete",
"tests/test_issues.py::TestLabel::test_equality",
"tests/test_issues.py::TestLabel::test_repr",
"tests/test_issues.py::TestLabel::test_str",
"tests/test_issues.py::TestLabel::test_update",
"tests/test_issues.py::TestMilestone::test_delete",
"tests/test_issues.py::TestMilestone::test_due_on",
"tests/test_issues.py::TestMilestone::test_repr",
"tests/test_issues.py::TestMilestone::test_str",
"tests/test_issues.py::TestMilestone::test_update",
"tests/test_issues.py::TestIssue::test_add_labels",
"tests/test_issues.py::TestIssue::test_assign",
"tests/test_issues.py::TestIssue::test_close",
"tests/test_issues.py::TestIssue::test_comment",
"tests/test_issues.py::TestIssue::test_create_comment",
"tests/test_issues.py::TestIssue::test_edit",
"tests/test_issues.py::TestIssue::test_enterprise",
"tests/test_issues.py::TestIssue::test_equality",
"tests/test_issues.py::TestIssue::test_is_closed",
"tests/test_issues.py::TestIssue::test_issue_137",
"tests/test_issues.py::TestIssue::test_remove_all_labels",
"tests/test_issues.py::TestIssue::test_remove_label",
"tests/test_issues.py::TestIssue::test_reopen",
"tests/test_issues.py::TestIssue::test_replace_labels",
"tests/test_issues.py::TestIssue::test_repr",
"tests/test_issues.py::TestIssueEvent::test_equality",
"tests/test_issues.py::TestIssueEvent::test_repr"
]
| []
| BSD 3-Clause "New" or "Revised" License | 188 | [
"github3/issues/milestone.py",
"AUTHORS.rst"
]
| [
"github3/issues/milestone.py",
"AUTHORS.rst"
]
|
|
marshmallow-code__marshmallow-234 | 4e922445601219dc6bfe014d36b3c61d9528e2ad | 2015-07-07 10:29:53 | b8ad05b5342914e857c442d75e8abe9ea8f867fb | diff --git a/marshmallow/schema.py b/marshmallow/schema.py
index 4de0a123..7fe1289f 100644
--- a/marshmallow/schema.py
+++ b/marshmallow/schema.py
@@ -699,8 +699,8 @@ class BaseSchema(base.SchemaABC):
"""
if obj and many:
try: # Homogeneous collection
- obj_prototype = obj[0]
- except IndexError: # Nothing to serialize
+ obj_prototype = next(iter(obj))
+ except StopIteration: # Nothing to serialize
return self.declared_fields
obj = obj_prototype
ret = self.dict_class()
| fields.Nested does not support sets
Currently `fields.Nested` assumes that the value of the field is a list - https://github.com/marshmallow-code/marshmallow/blob/dev/marshmallow/schema.py#L702 - and fails for `set` during serialization | marshmallow-code/marshmallow | diff --git a/tests/test_schema.py b/tests/test_schema.py
index 29dada19..5d0dbbdf 100644
--- a/tests/test_schema.py
+++ b/tests/test_schema.py
@@ -3,6 +3,7 @@
import json
import random
+from collections import namedtuple
import pytest
@@ -687,6 +688,21 @@ def test_nested_only_and_exclude():
assert 'bar' not in result.data['inner']
+def test_nested_with_sets():
+ class Inner(Schema):
+ foo = fields.Field()
+
+ class Outer(Schema):
+ inners = fields.Nested(Inner, many=True)
+
+ sch = Outer()
+
+ DataClass = namedtuple('DataClass', ['foo'])
+ data = dict(inners=set([DataClass(42), DataClass(2)]))
+ result = sch.dump(data)
+ assert len(result.data['inners']) == 2
+
+
def test_meta_serializer_fields():
u = User("John", age=42.3, email="[email protected]",
homepage="http://john.com")
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 2.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"dev-requirements.txt",
"docs/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster @ git+https://github.com/sloria/alabaster.git@667b1b676c6bf7226db057f098ec826d84d3ae40
babel==2.17.0
cachetools==5.5.2
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
distlib==0.3.9
docutils==0.20.1
exceptiongroup==1.2.2
filelock==3.18.0
flake8==2.4.0
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
invoke==2.2.0
Jinja2==3.1.6
MarkupSafe==3.0.2
-e git+https://github.com/marshmallow-code/marshmallow.git@4e922445601219dc6bfe014d36b3c61d9528e2ad#egg=marshmallow
mccabe==0.3.1
packaging==24.2
pep8==1.5.7
platformdirs==4.3.7
pluggy==1.5.0
pyflakes==0.8.1
Pygments==2.19.1
pyproject-api==1.9.0
pytest==8.3.5
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.32.3
six==1.17.0
snowballstemmer==2.2.0
Sphinx==7.2.6
sphinx-issues==0.2.0
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: marshmallow
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.11+sloria0
- babel==2.17.0
- cachetools==5.5.2
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- distlib==0.3.9
- docutils==0.20.1
- exceptiongroup==1.2.2
- filelock==3.18.0
- flake8==2.4.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- invoke==2.2.0
- jinja2==3.1.6
- markupsafe==3.0.2
- mccabe==0.3.1
- packaging==24.2
- pep8==1.5.7
- platformdirs==4.3.7
- pluggy==1.5.0
- pyflakes==0.8.1
- pygments==2.19.1
- pyproject-api==1.9.0
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.32.3
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==7.2.6
- sphinx-issues==0.2.0
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/marshmallow
| [
"tests/test_schema.py::test_nested_with_sets"
]
| []
| [
"tests/test_schema.py::test_serializing_basic_object[UserSchema]",
"tests/test_schema.py::test_serializing_basic_object[UserMetaSchema]",
"tests/test_schema.py::test_serializer_dump",
"tests/test_schema.py::test_dump_returns_dict_of_errors",
"tests/test_schema.py::test_dump_with_strict_mode_raises_error[UserSchema]",
"tests/test_schema.py::test_dump_with_strict_mode_raises_error[UserMetaSchema]",
"tests/test_schema.py::test_dump_resets_errors",
"tests/test_schema.py::test_load_resets_errors",
"tests/test_schema.py::test_dump_resets_error_fields",
"tests/test_schema.py::test_load_resets_error_fields",
"tests/test_schema.py::test_errored_fields_do_not_appear_in_output",
"tests/test_schema.py::test_load_many_stores_error_indices",
"tests/test_schema.py::test_dump_many",
"tests/test_schema.py::test_multiple_errors_can_be_stored_for_a_given_index",
"tests/test_schema.py::test_dump_many_stores_error_indices",
"tests/test_schema.py::test_dump_many_doesnt_stores_error_indices_when_index_errors_is_false",
"tests/test_schema.py::test_dump_returns_a_marshalresult",
"tests/test_schema.py::test_dumps_returns_a_marshalresult",
"tests/test_schema.py::test_dumping_single_object_with_collection_schema",
"tests/test_schema.py::test_loading_single_object_with_collection_schema",
"tests/test_schema.py::test_dumps_many",
"tests/test_schema.py::test_load_returns_an_unmarshalresult",
"tests/test_schema.py::test_load_many",
"tests/test_schema.py::test_loads_returns_an_unmarshalresult",
"tests/test_schema.py::test_loads_many",
"tests/test_schema.py::test_loads_deserializes_from_json",
"tests/test_schema.py::test_serializing_none",
"tests/test_schema.py::test_default_many_symmetry",
"tests/test_schema.py::TestValidate::test_validate_returns_errors_dict",
"tests/test_schema.py::TestValidate::test_validate_many",
"tests/test_schema.py::TestValidate::test_validate_many_doesnt_store_index_if_index_errors_option_is_false",
"tests/test_schema.py::TestValidate::test_validate_strict",
"tests/test_schema.py::TestValidate::test_validate_required",
"tests/test_schema.py::test_fields_are_not_copies[UserSchema]",
"tests/test_schema.py::test_fields_are_not_copies[UserMetaSchema]",
"tests/test_schema.py::test_dumps_returns_json",
"tests/test_schema.py::test_naive_datetime_field",
"tests/test_schema.py::test_datetime_formatted_field",
"tests/test_schema.py::test_datetime_iso_field",
"tests/test_schema.py::test_tz_datetime_field",
"tests/test_schema.py::test_local_datetime_field",
"tests/test_schema.py::test_class_variable",
"tests/test_schema.py::test_serialize_many[UserSchema]",
"tests/test_schema.py::test_serialize_many[UserMetaSchema]",
"tests/test_schema.py::test_inheriting_schema",
"tests/test_schema.py::test_custom_field",
"tests/test_schema.py::test_url_field",
"tests/test_schema.py::test_relative_url_field",
"tests/test_schema.py::test_stores_invalid_url_error[UserSchema]",
"tests/test_schema.py::test_stores_invalid_url_error[UserMetaSchema]",
"tests/test_schema.py::test_email_field[UserSchema]",
"tests/test_schema.py::test_email_field[UserMetaSchema]",
"tests/test_schema.py::test_stored_invalid_email",
"tests/test_schema.py::test_integer_field",
"tests/test_schema.py::test_fixed_field",
"tests/test_schema.py::test_as_string",
"tests/test_schema.py::test_decimal_field",
"tests/test_schema.py::test_price_field",
"tests/test_schema.py::test_extra",
"tests/test_schema.py::test_extra_many",
"tests/test_schema.py::test_method_field[UserSchema]",
"tests/test_schema.py::test_method_field[UserMetaSchema]",
"tests/test_schema.py::test_function_field",
"tests/test_schema.py::test_prefix[UserSchema]",
"tests/test_schema.py::test_prefix[UserMetaSchema]",
"tests/test_schema.py::test_fields_must_be_declared_as_instances",
"tests/test_schema.py::test_serializing_generator[UserSchema]",
"tests/test_schema.py::test_serializing_generator[UserMetaSchema]",
"tests/test_schema.py::test_serializing_empty_list_returns_empty_list",
"tests/test_schema.py::test_serializing_dict",
"tests/test_schema.py::test_serializing_dict_with_meta_fields",
"tests/test_schema.py::test_exclude_in_init[UserSchema]",
"tests/test_schema.py::test_exclude_in_init[UserMetaSchema]",
"tests/test_schema.py::test_only_in_init[UserSchema]",
"tests/test_schema.py::test_only_in_init[UserMetaSchema]",
"tests/test_schema.py::test_invalid_only_param",
"tests/test_schema.py::test_can_serialize_uuid",
"tests/test_schema.py::test_can_serialize_time",
"tests/test_schema.py::test_invalid_time",
"tests/test_schema.py::test_invalid_date",
"tests/test_schema.py::test_invalid_email",
"tests/test_schema.py::test_invalid_url",
"tests/test_schema.py::test_invalid_selection",
"tests/test_schema.py::test_custom_json",
"tests/test_schema.py::test_custom_error_message",
"tests/test_schema.py::test_load_errors_with_many",
"tests/test_schema.py::test_error_raised_if_fields_option_is_not_list",
"tests/test_schema.py::test_error_raised_if_additional_option_is_not_list",
"tests/test_schema.py::test_only_and_exclude",
"tests/test_schema.py::test_only_with_invalid_attribute",
"tests/test_schema.py::test_nested_only_and_exclude",
"tests/test_schema.py::test_meta_serializer_fields",
"tests/test_schema.py::test_meta_fields_mapping",
"tests/test_schema.py::test_meta_field_not_on_obj_raises_attribute_error",
"tests/test_schema.py::test_exclude_fields",
"tests/test_schema.py::test_fields_option_must_be_list_or_tuple",
"tests/test_schema.py::test_exclude_option_must_be_list_or_tuple",
"tests/test_schema.py::test_dateformat_option",
"tests/test_schema.py::test_default_dateformat",
"tests/test_schema.py::test_inherit_meta",
"tests/test_schema.py::test_inherit_meta_override",
"tests/test_schema.py::test_additional",
"tests/test_schema.py::test_cant_set_both_additional_and_fields",
"tests/test_schema.py::test_serializing_none_meta",
"tests/test_schema.py::TestErrorHandler::test_dump_with_custom_error_handler",
"tests/test_schema.py::TestErrorHandler::test_load_with_custom_error_handler",
"tests/test_schema.py::TestErrorHandler::test_validate_with_custom_error_handler",
"tests/test_schema.py::TestErrorHandler::test_multiple_serializers_with_same_error_handler",
"tests/test_schema.py::TestErrorHandler::test_setting_error_handler_class_attribute",
"tests/test_schema.py::TestSchemaValidator::test_validator_decorator_is_deprecated",
"tests/test_schema.py::TestSchemaValidator::test_validator_defined_on_class",
"tests/test_schema.py::TestSchemaValidator::test_validator_that_raises_error_with_dict",
"tests/test_schema.py::TestSchemaValidator::test_validator_that_raises_error_with_list",
"tests/test_schema.py::TestSchemaValidator::test_mixed_schema_validators",
"tests/test_schema.py::TestSchemaValidator::test_registered_validators_are_not_shared_with_ancestors",
"tests/test_schema.py::TestSchemaValidator::test_registered_validators_are_not_shared_with_children",
"tests/test_schema.py::TestSchemaValidator::test_inheriting_then_registering_validator",
"tests/test_schema.py::TestSchemaValidator::test_multiple_schema_errors_can_be_stored",
"tests/test_schema.py::TestSchemaValidator::test_schema_validation_error_with_stict_stores_correct_field_name",
"tests/test_schema.py::TestSchemaValidator::test_schema_validation_error_with_strict_when_field_is_specified",
"tests/test_schema.py::TestSchemaValidator::test_schema_validation_error_stored_on_multiple_fields",
"tests/test_schema.py::TestSchemaValidator::test_validator_with_strict",
"tests/test_schema.py::TestSchemaValidator::test_validator_defined_by_decorator",
"tests/test_schema.py::TestSchemaValidator::test_validators_are_inherited",
"tests/test_schema.py::TestSchemaValidator::test_uncaught_validation_errors_are_stored",
"tests/test_schema.py::TestSchemaValidator::test_validation_error_with_error_parameter",
"tests/test_schema.py::TestSchemaValidator::test_store_schema_validation_errors_on_specified_field",
"tests/test_schema.py::TestSchemaValidator::test_errors_are_cleared_on_load",
"tests/test_schema.py::TestSchemaValidator::test_errors_are_cleared_after_loading_collection",
"tests/test_schema.py::TestSchemaValidator::test_raises_error_with_list",
"tests/test_schema.py::TestSchemaValidator::test_raises_error_with_dict",
"tests/test_schema.py::TestSchemaValidator::test_nested_schema_validators",
"tests/test_schema.py::TestPreprocessors::test_preprocessor_decorator_is_deprecated",
"tests/test_schema.py::TestPreprocessors::test_preprocessors_defined_on_class",
"tests/test_schema.py::TestPreprocessors::test_registered_preprocessors_are_not_shared_with_ancestors",
"tests/test_schema.py::TestPreprocessors::test_registered_preprocessors_are_not_shared_with_children",
"tests/test_schema.py::TestPreprocessors::test_preprocessors_defined_by_decorator",
"tests/test_schema.py::TestDataHandler::test_data_handler_is_deprecated",
"tests/test_schema.py::TestDataHandler::test_schema_with_custom_data_handler",
"tests/test_schema.py::TestDataHandler::test_registered_data_handlers_are_not_shared_with_ancestors",
"tests/test_schema.py::TestDataHandler::test_registered_data_handlers_are_not_shared_with_children",
"tests/test_schema.py::TestDataHandler::test_serializer_with_multiple_data_handlers",
"tests/test_schema.py::TestDataHandler::test_setting_data_handlers_class_attribute",
"tests/test_schema.py::TestDataHandler::test_root_data_handler",
"tests/test_schema.py::test_schema_repr",
"tests/test_schema.py::TestNestedSchema::test_flat_nested",
"tests/test_schema.py::TestNestedSchema::test_nested_many_with_missing_attribute",
"tests/test_schema.py::TestNestedSchema::test_nested_with_attribute_none",
"tests/test_schema.py::TestNestedSchema::test_flat_nested2",
"tests/test_schema.py::TestNestedSchema::test_nested_field_does_not_validate_required",
"tests/test_schema.py::TestNestedSchema::test_nested_none",
"tests/test_schema.py::TestNestedSchema::test_nested",
"tests/test_schema.py::TestNestedSchema::test_nested_many_fields",
"tests/test_schema.py::TestNestedSchema::test_nested_meta_many",
"tests/test_schema.py::TestNestedSchema::test_nested_only",
"tests/test_schema.py::TestNestedSchema::test_exclude",
"tests/test_schema.py::TestNestedSchema::test_list_field",
"tests/test_schema.py::TestNestedSchema::test_list_field_parent",
"tests/test_schema.py::TestNestedSchema::test_nested_load_many",
"tests/test_schema.py::TestNestedSchema::test_nested_errors",
"tests/test_schema.py::TestNestedSchema::test_nested_strict",
"tests/test_schema.py::TestNestedSchema::test_nested_method_field",
"tests/test_schema.py::TestNestedSchema::test_nested_function_field",
"tests/test_schema.py::TestNestedSchema::test_nested_prefixed_field",
"tests/test_schema.py::TestNestedSchema::test_nested_prefixed_many_field",
"tests/test_schema.py::TestNestedSchema::test_invalid_float_field",
"tests/test_schema.py::TestNestedSchema::test_serializer_meta_with_nested_fields",
"tests/test_schema.py::TestNestedSchema::test_serializer_with_nested_meta_fields",
"tests/test_schema.py::TestNestedSchema::test_nested_fields_must_be_passed_a_serializer",
"tests/test_schema.py::TestNestedSchema::test_invalid_type_passed_to_nested_many_field",
"tests/test_schema.py::TestSelfReference::test_nesting_schema_within_itself",
"tests/test_schema.py::TestSelfReference::test_nesting_schema_by_passing_class_name",
"tests/test_schema.py::TestSelfReference::test_nesting_within_itself_meta",
"tests/test_schema.py::TestSelfReference::test_nested_self_with_only_param",
"tests/test_schema.py::TestSelfReference::test_multiple_nested_self_fields",
"tests/test_schema.py::TestSelfReference::test_nested_many",
"tests/test_schema.py::test_serialization_with_required_field",
"tests/test_schema.py::test_deserialization_with_required_field",
"tests/test_schema.py::test_deserialization_with_required_field_and_custom_validator",
"tests/test_schema.py::TestContext::test_context_method",
"tests/test_schema.py::TestContext::test_context_method_function",
"tests/test_schema.py::TestContext::test_function_field_raises_error_when_context_not_available",
"tests/test_schema.py::TestContext::test_fields_context",
"tests/test_schema.py::TestContext::test_nested_fields_inherit_context",
"tests/test_schema.py::test_serializer_can_specify_nested_object_as_attribute",
"tests/test_schema.py::TestFieldInheritance::test_inherit_fields_from_schema_subclass",
"tests/test_schema.py::TestFieldInheritance::test_inherit_fields_from_non_schema_subclass",
"tests/test_schema.py::TestFieldInheritance::test_inheritance_follows_mro",
"tests/test_schema.py::TestAccessor::test_accessor_is_used",
"tests/test_schema.py::TestAccessor::test_accessor_with_many",
"tests/test_schema.py::TestAccessor::test_accessor_decorator",
"tests/test_schema.py::TestRequiredFields::test_required_string_field_missing",
"tests/test_schema.py::TestRequiredFields::test_required_string_field_failure",
"tests/test_schema.py::TestRequiredFields::test_allow_none_param",
"tests/test_schema.py::TestRequiredFields::test_allow_none_custom_message",
"tests/test_schema.py::TestDefaults::test_missing_inputs_are_excluded_from_dump_output",
"tests/test_schema.py::TestDefaults::test_none_is_serialized_to_none",
"tests/test_schema.py::TestDefaults::test_default_and_value_missing",
"tests/test_schema.py::TestDefaults::test_loading_none",
"tests/test_schema.py::TestDefaults::test_missing_inputs_are_excluded_from_load_output"
]
| []
| MIT License | 189 | [
"marshmallow/schema.py"
]
| [
"marshmallow/schema.py"
]
|
|
colour-science__colour-194 | 3cc1e58379b92f8254f655ee108b721fd1342713 | 2015-07-14 09:01:43 | 3cd6ab8d4c3483bcdeb2d7ef33967160808c0bb2 | diff --git a/colour/volume/__init__.py b/colour/volume/__init__.py
index 58c0f7e4b..72f6c7b1e 100644
--- a/colour/volume/__init__.py
+++ b/colour/volume/__init__.py
@@ -6,16 +6,24 @@ from __future__ import absolute_import
from .dataset import * # noqa
from . import dataset
from .macadam_limits import is_within_macadam_limits
+from .mesh import is_within_mesh_volume
from .pointer_gamut import is_within_pointer_gamut
+from .spectrum import is_within_visible_spectrum
from .rgb import (
RGB_colourspace_limits,
RGB_colourspace_volume_MonteCarlo,
- RGB_colourspace_pointer_gamut_coverage_MonteCarlo)
+ RGB_colourspace_volume_coverage_MonteCarlo,
+ RGB_colourspace_pointer_gamut_coverage_MonteCarlo,
+ RGB_colourspace_visible_spectrum_coverage_MonteCarlo)
__all__ = []
__all__ += dataset.__all__
__all__ += ['is_within_macadam_limits']
+__all__ += ['is_within_mesh_volume']
__all__ += ['is_within_pointer_gamut']
+__all__ += ['is_within_visible_spectrum']
__all__ += ['RGB_colourspace_limits',
'RGB_colourspace_volume_MonteCarlo',
- 'RGB_colourspace_pointer_gamut_coverage_MonteCarlo']
+ 'RGB_colourspace_volume_coverage_MonteCarlo',
+ 'RGB_colourspace_pointer_gamut_coverage_MonteCarlo',
+ 'RGB_colourspace_visible_spectrum_coverage_MonteCarlo']
diff --git a/colour/volume/mesh.py b/colour/volume/mesh.py
new file mode 100644
index 000000000..e90f458a3
--- /dev/null
+++ b/colour/volume/mesh.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Mesh Volume Computations Helpers
+================================
+
+Defines helpers objects related to volume computations.
+"""
+
+from __future__ import division, unicode_literals
+
+import numpy as np
+
+from colour.utilities import is_scipy_installed
+
+__author__ = 'Colour Developers'
+__copyright__ = 'Copyright (C) 2013 - 2015 - Colour Developers'
+__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
+__maintainer__ = 'Colour Developers'
+__email__ = '[email protected]'
+__status__ = 'Production'
+
+__all__ = ['is_within_mesh_volume']
+
+
+def is_within_mesh_volume(points, mesh, tolerance=None):
+ """
+ Returns if given points are within given mesh volume using Delaunay
+ triangulation.
+
+ Parameters
+ ----------
+ points : array_like
+ Points to check if they are within `mesh` volume.
+ mesh : array_like
+ Points of the volume used to generate the Delaunay triangulation.
+ tolerance : numeric, optional
+ Tolerance allowed in the inside-triangle check.
+
+ Returns
+ -------
+ bool
+ Is within mesh volume.
+
+ Notes
+ -----
+ - This definition requires *scipy* to be installed.
+
+ Examples
+ --------
+ >>> mesh = np.array([[-1.0, -1.0, 1.0],
+ ... [1.0, -1.0, 1.0],
+ ... [1.0, -1.0, -1.0],
+ ... [-1.0, -1.0, -1.0],
+ ... [0.0, 1.0, 0.0]])
+ >>> is_within_mesh_volume(np.array([0.0005, 0.0031, 0.0010]), mesh)
+ array(True, dtype=bool)
+ >>> a = np.array([[0.0005, 0.0031, 0.0010],
+ ... [0.3205, 0.4131, 0.5100]])
+ >>> is_within_mesh_volume(a, mesh)
+ array([ True, False], dtype=bool)
+ """
+
+ if is_scipy_installed(raise_exception=True):
+ from scipy.spatial import Delaunay
+
+ triangulation = Delaunay(mesh)
+
+ simplex = triangulation.find_simplex(points, tol=tolerance)
+ simplex = np.where(simplex >= 0, True, False)
+
+ return simplex
diff --git a/colour/volume/pointer_gamut.py b/colour/volume/pointer_gamut.py
index 57415b898..860cbd0f0 100644
--- a/colour/volume/pointer_gamut.py
+++ b/colour/volume/pointer_gamut.py
@@ -5,7 +5,7 @@
Pointer's Gamut Volume Computations
===================================
-Defines objects related to Pointer's Gamut volume computations
+Defines objects related to Pointer's Gamut volume computations.
See Also
--------
@@ -15,14 +15,12 @@ See Also
from __future__ import division, unicode_literals
-import numpy as np
-
from colour.models import (
Lab_to_XYZ,
LCHab_to_Lab,
POINTER_GAMUT_DATA,
POINTER_GAMUT_ILLUMINANT)
-from colour.utilities import is_scipy_installed
+from colour.volume import is_within_mesh_volume
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2015 - Colour Developers'
@@ -33,8 +31,6 @@ __status__ = 'Production'
__all__ = ['is_within_pointer_gamut']
-_XYZ_POINTER_GAMUT_TRIANGULATION_CACHE = None
-
def is_within_pointer_gamut(XYZ, tolerance=None):
"""
@@ -60,26 +56,16 @@ def is_within_pointer_gamut(XYZ, tolerance=None):
Examples
--------
- >>> is_within_pointer_gamut(np.array([0.3205, 0.4131, 0.51]))
+ >>> import numpy as np
+ >>> is_within_pointer_gamut(np.array([0.3205, 0.4131, 0.5100]))
array(True, dtype=bool)
- >>> a = np.array([[0.3205, 0.4131, 0.51],
- ... [0.0005, 0.0031, 0.001]])
+ >>> a = np.array([[0.3205, 0.4131, 0.5100],
+ ... [0.0005, 0.0031, 0.0010]])
>>> is_within_pointer_gamut(a)
array([ True, False], dtype=bool)
"""
- if is_scipy_installed(raise_exception=True):
- from scipy.spatial import Delaunay
-
- global _XYZ_POINTER_GAMUT_TRIANGULATION_CACHE
-
- triangulation = _XYZ_POINTER_GAMUT_TRIANGULATION_CACHE
- if triangulation is None:
- _XYZ_POINTER_GAMUT_TRIANGULATION_CACHE = triangulation = (
- Delaunay(Lab_to_XYZ(LCHab_to_Lab(POINTER_GAMUT_DATA),
- POINTER_GAMUT_ILLUMINANT)))
-
- simplex = triangulation.find_simplex(XYZ, tol=tolerance)
- simplex = np.where(simplex >= 0, True, False)
+ XYZ_p = Lab_to_XYZ(LCHab_to_Lab(POINTER_GAMUT_DATA),
+ POINTER_GAMUT_ILLUMINANT)
- return simplex
+ return is_within_mesh_volume(XYZ, XYZ_p, tolerance)
diff --git a/colour/volume/rgb.py b/colour/volume/rgb.py
index a09b41490..0096119e8 100644
--- a/colour/volume/rgb.py
+++ b/colour/volume/rgb.py
@@ -10,6 +10,7 @@ Defines various RGB colourspace volume computation objects:
- :func:`RGB_colourspace_limits`
- :func:`RGB_colourspace_volume_MonteCarlo`
- :func:`RGB_colourspace_pointer_gamut_coverage_MonteCarlo`
+- :func:`RGB_colourspace_visible_spectrum_coverage_MonteCarlo`
See Also
--------
@@ -25,9 +26,13 @@ import numpy as np
from colour.algebra import random_triplet_generator
from colour.colorimetry import ILLUMINANTS
-from colour.models import Lab_to_XYZ, RGB_to_XYZ, XYZ_to_Lab, XYZ_to_RGB
+from colour.models import (
+ Lab_to_XYZ,
+ RGB_to_XYZ,
+ XYZ_to_Lab,
+ XYZ_to_RGB)
from colour.utilities import is_scipy_installed
-from colour.volume import is_within_pointer_gamut
+from colour.volume import is_within_pointer_gamut, is_within_visible_spectrum
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
@@ -39,7 +44,8 @@ __status__ = 'Production'
__all__ = ['sample_RGB_colourspace_volume_MonteCarlo',
'RGB_colourspace_limits',
'RGB_colourspace_volume_MonteCarlo',
- 'RGB_colourspace_pointer_gamut_coverage_MonteCarlo']
+ 'RGB_colourspace_pointer_gamut_coverage_MonteCarlo',
+ 'RGB_colourspace_visible_spectrum_coverage_MonteCarlo']
def _wrapper_RGB_colourspace_volume_MonteCarlo(args):
@@ -262,24 +268,25 @@ def RGB_colourspace_volume_MonteCarlo(
return Lab_volume * np.sum(results) / (process_samples * cpu_count)
-def RGB_colourspace_pointer_gamut_coverage_MonteCarlo(
+def RGB_colourspace_volume_coverage_MonteCarlo(
colourspace,
+ coverage_sampler,
samples=10e6,
random_generator=random_triplet_generator,
random_state=None):
"""
- Returns given *RGB* colourspace percentage coverage of Pointer's Gamut
- volume using *Monte Carlo* method.
+ Returns given *RGB* colourspace percentage coverage of an arbitrary volume.
Parameters
----------
colourspace : RGB_Colourspace
- *RGB* colourspace to compute the Pointer's Gamut coverage percentage.
+ *RGB* colourspace to compute the volume coverage percentage.
+ coverage_sampler : object
+ Python object responsible for checking the volume coverage.
samples : numeric, optional
Samples count.
random_generator : generator, optional
- Random triplet generator providing the random samples within the *Lab*
- colourspace volume.
+ Random triplet generator providing the random samples.
random_state : RandomState, optional
Mersenne Twister pseudo-random number generator to use in the random
number generator.
@@ -287,7 +294,7 @@ def RGB_colourspace_pointer_gamut_coverage_MonteCarlo(
Returns
-------
float
- Percentage coverage of Pointer's Gamut volume.
+ Percentage coverage of volume.
Notes
-----
@@ -297,7 +304,11 @@ def RGB_colourspace_pointer_gamut_coverage_MonteCarlo(
--------
>>> from colour import sRGB_COLOURSPACE as sRGB
>>> prng = np.random.RandomState(2)
- >>> RGB_colourspace_pointer_gamut_coverage_MonteCarlo(sRGB, 10e3, random_state=prng) # noqa # doctest: +ELLIPSIS
+ >>> RGB_colourspace_volume_coverage_MonteCarlo(
+ ... sRGB,
+ ... is_within_pointer_gamut,
+ ... 10e3,
+ ... random_state=prng) # doctest: +ELLIPSIS
83...
"""
@@ -306,11 +317,12 @@ def RGB_colourspace_pointer_gamut_coverage_MonteCarlo(
if random_state is not None else
np.random.RandomState())
+ # TODO: Investigate for generator yielding directly a ndarray.
XYZ = np.asarray(list(random_generator(
samples, random_state=random_state)))
- XYZ_p = XYZ[is_within_pointer_gamut(XYZ)]
+ XYZ_vs = XYZ[coverage_sampler(XYZ)]
- RGB = XYZ_to_RGB(XYZ_p,
+ RGB = XYZ_to_RGB(XYZ_vs,
colourspace.whitepoint,
colourspace.whitepoint,
colourspace.XYZ_to_RGB_matrix)
@@ -318,4 +330,102 @@ def RGB_colourspace_pointer_gamut_coverage_MonteCarlo(
RGB_c = RGB[np.logical_and(np.min(RGB, axis=-1) >= 0,
np.max(RGB, axis=-1) <= 1)]
- return 100 * RGB_c.size / XYZ_p.size
+ return 100 * RGB_c.size / XYZ_vs.size
+
+
+def RGB_colourspace_pointer_gamut_coverage_MonteCarlo(
+ colourspace,
+ samples=10e6,
+ random_generator=random_triplet_generator,
+ random_state=None):
+ """
+ Returns given *RGB* colourspace percentage coverage of Pointer's Gamut
+ volume using *Monte Carlo* method.
+
+ Parameters
+ ----------
+ colourspace : RGB_Colourspace
+ *RGB* colourspace to compute the Pointer's Gamut coverage percentage.
+ samples : numeric, optional
+ Samples count.
+ random_generator : generator, optional
+ Random triplet generator providing the random samples.
+ random_state : RandomState, optional
+ Mersenne Twister pseudo-random number generator to use in the random
+ number generator.
+
+ Returns
+ -------
+ float
+ Percentage coverage of Pointer's Gamut volume.
+
+ Notes
+ -----
+ - This definition requires *scipy* to be installed.
+
+ Examples
+ --------
+ >>> from colour import sRGB_COLOURSPACE as sRGB
+ >>> prng = np.random.RandomState(2)
+ >>> RGB_colourspace_pointer_gamut_coverage_MonteCarlo(
+ ... sRGB,
+ ... 10e3,
+ ... random_state=prng) # doctest: +ELLIPSIS
+ 83...
+ """
+
+ return RGB_colourspace_volume_coverage_MonteCarlo(
+ colourspace,
+ is_within_pointer_gamut,
+ samples,
+ random_generator,
+ random_state)
+
+
+def RGB_colourspace_visible_spectrum_coverage_MonteCarlo(
+ colourspace,
+ samples=10e6,
+ random_generator=random_triplet_generator,
+ random_state=None):
+ """
+ Returns given *RGB* colourspace percentage coverage of visible spectrum
+ volume using *Monte Carlo* method.
+
+ Parameters
+ ----------
+ colourspace : RGB_Colourspace
+ *RGB* colourspace to compute the visible spectrum coverage percentage.
+ samples : numeric, optional
+ Samples count.
+ random_generator : generator, optional
+ Random triplet generator providing the random samples.
+ random_state : RandomState, optional
+ Mersenne Twister pseudo-random number generator to use in the random
+ number generator.
+
+ Returns
+ -------
+ float
+ Percentage coverage of visible spectrum volume.
+
+ Notes
+ -----
+ - This definition requires *scipy* to be installed.
+
+ Examples
+ --------
+ >>> from colour import sRGB_COLOURSPACE as sRGB
+ >>> prng = np.random.RandomState(2)
+ >>> RGB_colourspace_visible_spectrum_coverage_MonteCarlo(
+ ... sRGB,
+ ... 10e3,
+ ... random_state=prng) # doctest: +ELLIPSIS
+ 36...
+ """
+
+ return RGB_colourspace_volume_coverage_MonteCarlo(
+ colourspace,
+ is_within_visible_spectrum,
+ samples,
+ random_generator,
+ random_state)
diff --git a/colour/volume/spectrum.py b/colour/volume/spectrum.py
new file mode 100644
index 000000000..5e4e64241
--- /dev/null
+++ b/colour/volume/spectrum.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Visible Spectrum Volume Computations
+====================================
+
+Defines objects related to visible spectrum volume computations.
+
+See Also
+--------
+`Spectrum IPython Notebook
+<http://nbviewer.ipython.org/github/colour-science/colour-ipython/blob/master/notebooks/colorimetry/spectrum.ipynb>`_ # noqa
+"""
+
+from __future__ import division, unicode_literals
+
+from colour.colorimetry import STANDARD_OBSERVERS_CMFS
+from colour.volume import is_within_mesh_volume
+
+__author__ = 'Colour Developers'
+__copyright__ = 'Copyright (C) 2013 - 2015 - Colour Developers'
+__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
+__maintainer__ = 'Colour Developers'
+__email__ = '[email protected]'
+__status__ = 'Production'
+
+__all__ = ['is_within_visible_spectrum']
+
+
+def is_within_visible_spectrum(XYZ,
+ cmfs=STANDARD_OBSERVERS_CMFS.get(
+ 'CIE 1931 2 Degree Standard Observer'),
+ tolerance=None):
+ """
+ Returns if given *CIE XYZ* tristimulus values are within visible spectrum
+ volume / given colour matching functions volume.
+
+ Parameters
+ ----------
+ XYZ : array_like
+ *CIE XYZ* tristimulus values.
+ cmfs : XYZ_ColourMatchingFunctions
+ Standard observer colour matching functions.
+ tolerance : numeric, optional
+ Tolerance allowed in the inside-triangle check.
+
+ Returns
+ -------
+ bool
+ Is within visible spectrum.
+
+ Notes
+ -----
+ - Input *CIE XYZ* tristimulus values are in domain [0, 1].
+ - This definition requires *scipy* to be installed.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> is_within_visible_spectrum(np.array([0.3205, 0.4131, 0.51]))
+ array(True, dtype=bool)
+ >>> a = np.array([[0.3205, 0.4131, 0.51],
+ ... [-0.0005, 0.0031, 0.001]])
+ >>> is_within_visible_spectrum(a)
+ array([ True, False], dtype=bool)
+ """
+
+ return is_within_mesh_volume(XYZ, cmfs.values, tolerance)
| Implement support for "Visible Spectrum" coverage computation.
Similar needs than #190. | colour-science/colour | diff --git a/colour/volume/tests/tests_mesh.py b/colour/volume/tests/tests_mesh.py
new file mode 100644
index 000000000..2ab1296d9
--- /dev/null
+++ b/colour/volume/tests/tests_mesh.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Defines unit tests for :mod:`colour.volume.mesh` module.
+"""
+
+from __future__ import division, unicode_literals
+
+import numpy as np
+import sys
+
+if sys.version_info[:2] <= (2, 6):
+ import unittest2 as unittest
+else:
+ import unittest
+from itertools import permutations
+
+from colour.volume import is_within_mesh_volume
+from colour.utilities import ignore_numpy_errors
+
+__author__ = 'Colour Developers'
+__copyright__ = 'Copyright (C) 2013 - 2015 - Colour Developers'
+__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
+__maintainer__ = 'Colour Developers'
+__email__ = '[email protected]'
+__status__ = 'Production'
+
+__all__ = ['TestIsWithinMeshVolume']
+
+
+class TestIsWithinMeshVolume(unittest.TestCase):
+ """
+ Defines :func:`colour.volume.mesh.is_within_mesh_volume` definition unit
+ tests methods.
+ """
+
+ def setUp(self):
+ """
+ Initialises common tests attributes.
+ """
+
+ self.__mesh = np.array([[-1.0, -1.0, 1.0],
+ [1.0, -1.0, 1.0],
+ [1.0, -1.0, -1.0],
+ [-1.0, -1.0, -1.0],
+ [0.0, 1.0, 0.0]])
+
+ def test_is_within_mesh_volume(self):
+ """
+ Tests :func:`colour.volume.mesh.is_within_mesh_volume` definition.
+ """
+
+ self.assertTrue(
+ is_within_mesh_volume(np.array([0.0005, 0.0031, 0.0010]),
+ self.__mesh))
+
+ self.assertFalse(
+ is_within_mesh_volume(np.array([0.3205, 0.4131, 0.5100]),
+ self.__mesh))
+
+ self.assertTrue(
+ is_within_mesh_volume(np.array([0.0025, 0.0088, 0.0340]),
+ self.__mesh))
+
+ self.assertFalse(
+ is_within_mesh_volume(np.array([0.4325, 0.3788, 0.1034]),
+ self.__mesh))
+
+ def test_n_dimensional_is_within_mesh_volume(self):
+ """
+ Tests :func:`colour.volume.mesh.is_within_mesh_volume` definition
+ n-dimensional arrays support.
+ """
+
+ a = np.array([0.0005, 0.0031, 0.0010])
+ b = np.array([True])
+ np.testing.assert_almost_equal(
+ is_within_mesh_volume(a, self.__mesh),
+ b)
+
+ a = np.tile(a, (6, 1))
+ b = np.tile(b, 6)
+ np.testing.assert_almost_equal(
+ is_within_mesh_volume(a, self.__mesh),
+ b)
+
+ a = np.reshape(a, (2, 3, 3))
+ b = np.reshape(b, (2, 3))
+ np.testing.assert_almost_equal(
+ is_within_mesh_volume(a, self.__mesh),
+ b)
+
+ @ignore_numpy_errors
+ def test_nan_is_within_mesh_volume(self):
+ """
+ Tests :func:`colour.volume.mesh.is_within_mesh_volume` definition nan
+ support.
+ """
+
+ cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
+ cases = set(permutations(cases * 3, r=3))
+ for case in cases:
+ is_within_mesh_volume(case, self.__mesh)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/colour/volume/tests/tests_rgb.py b/colour/volume/tests/tests_rgb.py
index 04e7509f7..04d5b636d 100644
--- a/colour/volume/tests/tests_rgb.py
+++ b/colour/volume/tests/tests_rgb.py
@@ -3,6 +3,18 @@
"""
Defines unit tests for :mod:`colour.volume.rgb` module.
+
+Notes
+-----
+The MonteCarlo sampling based unit tests are assuming that
+:func:`np.random.RandomState` definition will return the same sequence no
+matter which *OS* or *Python* version is used. There is however no formal
+promise about the *prng* sequence reproducibility of either *Python or *Numpy*
+implementations:
+
+Laurent. (2012). Reproducibility of python pseudo-random numbers across systems
+and versions? Retrieved January 20, 2015, from
+http://stackoverflow.com/questions/8786084/reproducibility-of-python-pseudo-random-numbers-across-systems-and-versions # noqa
"""
from __future__ import division, unicode_literals
@@ -22,7 +34,10 @@ from colour.models import (
from colour.volume import (
RGB_colourspace_limits,
RGB_colourspace_volume_MonteCarlo,
- RGB_colourspace_pointer_gamut_coverage_MonteCarlo)
+ RGB_colourspace_volume_coverage_MonteCarlo,
+ RGB_colourspace_pointer_gamut_coverage_MonteCarlo,
+ RGB_colourspace_visible_spectrum_coverage_MonteCarlo,
+ is_within_pointer_gamut)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
@@ -33,7 +48,9 @@ __status__ = 'Production'
__all__ = ['TestRGB_colourspaceLimits',
'TestRGB_colourspaceVolumeMonteCarlo',
- 'TestRGB_colourspacePointerGamutCoverageMonteCarlo']
+ 'TestRGB_colourspace_volume_coverage_MonteCarlo',
+ 'TestRGB_colourspacePointerGamutCoverageMonteCarlo',
+ 'TestRGB_colourspaceVisibleSpectrumCoverageMonteCarlo']
class TestRGB_colourspaceLimits(unittest.TestCase):
@@ -79,16 +96,6 @@ class TestRGB_colourspaceVolumeMonteCarlo(unittest.TestCase):
"""
Tests :func:`colour.volume.rgb.RGB_colourspace_volume_MonteCarlo`
definition.
-
- Notes
- -----
- The test is assuming that :func:`np.random.RandomState` definition will
- return the same sequence no matter which *OS* or *Python* version is
- used. There is however no formal promise about the *prng* sequence
- reproducibility of either *Python or *Numpy* implementations: Laurent.
- (2012). Reproducibility of python pseudo-random numbers across systems
- and versions? Retrieved January 20, 2015, from
- http://stackoverflow.com/questions/8786084/reproducibility-of-python-pseudo-random-numbers-across-systems-and-versions # noqa
"""
self.assertEquals(
@@ -100,6 +107,30 @@ class TestRGB_colourspaceVolumeMonteCarlo(unittest.TestCase):
859500.0)
+class TestRGB_colourspace_volume_coverage_MonteCarlo(unittest.TestCase):
+ """
+ Defines
+ :func:`colour.volume.rgb.RGB_colourspace_volume_coverage_MonteCarlo`
+ definition unit tests methods.
+ """
+
+ def test_RGB_colourspace_volume_coverage_MonteCarlo(self):
+ """
+ Tests
+ :func:`colour.volume.rgb.RGB_colourspace_volume_coverage_MonteCarlo` # noqa
+ definition.
+ """
+
+ np.testing.assert_almost_equal(
+ RGB_colourspace_volume_coverage_MonteCarlo(
+ REC_709_COLOURSPACE,
+ is_within_pointer_gamut,
+ 10e3,
+ random_state=np.random.RandomState(2)),
+ 83.02013422818791,
+ decimal=7)
+
+
class TestRGB_colourspacePointerGamutCoverageMonteCarlo(unittest.TestCase):
"""
Defines
@@ -112,16 +143,6 @@ class TestRGB_colourspacePointerGamutCoverageMonteCarlo(unittest.TestCase):
Tests
:func:`colour.volume.rgb.RGB_colourspace_pointer_gamut_coverage_MonteCarlo` # noqa
definition.
-
- Notes
- -----
- The test is assuming that :func:`np.random.RandomState` definition will
- return the same sequence no matter which *OS* or *Python* version is
- used. There is however no formal promise about the *prng* sequence
- reproducibility of either *Python or *Numpy* implementations: Laurent.
- (2012). Reproducibility of python pseudo-random numbers across systems
- and versions? Retrieved January 20, 2015, from
- http://stackoverflow.com/questions/8786084/reproducibility-of-python-pseudo-random-numbers-across-systems-and-versions # noqa
"""
np.testing.assert_almost_equal(
@@ -133,5 +154,28 @@ class TestRGB_colourspacePointerGamutCoverageMonteCarlo(unittest.TestCase):
decimal=7)
+class TestRGB_colourspaceVisibleSpectrumCoverageMonteCarlo(unittest.TestCase):
+ """
+ Defines
+ :func:`colour.volume.rgb.RGB_colourspace_visible_spectrum_coverage_MonteCarlo` # noqa
+ definition unit tests methods.
+ """
+
+ def test_RGB_colourspace_visible_spectrum_coverage_MonteCarlo(self):
+ """
+ Tests
+ :func:`colour.volume.rgb.RGB_colourspace_visible_spectrum_coverage_MonteCarlo` # noqa
+ definition.
+ """
+
+ np.testing.assert_almost_equal(
+ RGB_colourspace_visible_spectrum_coverage_MonteCarlo(
+ REC_709_COLOURSPACE,
+ 10e3,
+ random_state=np.random.RandomState(2)),
+ 36.48383937316356,
+ decimal=7)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/colour/volume/tests/tests_spectrum.py b/colour/volume/tests/tests_spectrum.py
new file mode 100644
index 000000000..8e90ceda6
--- /dev/null
+++ b/colour/volume/tests/tests_spectrum.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Defines unit tests for :mod:`colour.volume.spectrum` module.
+"""
+
+from __future__ import division, unicode_literals
+
+import numpy as np
+import sys
+
+if sys.version_info[:2] <= (2, 6):
+ import unittest2 as unittest
+else:
+ import unittest
+from itertools import permutations
+
+from colour.volume import is_within_visible_spectrum
+from colour.utilities import ignore_numpy_errors
+
+__author__ = 'Colour Developers'
+__copyright__ = 'Copyright (C) 2013 - 2015 - Colour Developers'
+__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
+__maintainer__ = 'Colour Developers'
+__email__ = '[email protected]'
+__status__ = 'Production'
+
+__all__ = ['TestIsWithinVisibleSpectrum']
+
+
+class TestIsWithinVisibleSpectrum(unittest.TestCase):
+ """
+ Defines :func:`colour.volume.spectrum.is_within_visible_spectrum`
+ definition unit tests methods.
+ """
+
+ def test_is_within_visible_spectrum(self):
+ """
+ Tests :func:`colour.volume.spectrum.is_within_visible_spectrum`
+ definition.
+ """
+
+ self.assertTrue(
+ is_within_visible_spectrum(np.array([0.3205, 0.4131, 0.5100])))
+
+ self.assertFalse(
+ is_within_visible_spectrum(np.array([-0.0005, 0.0031, 0.0010])))
+
+ self.assertTrue(
+ is_within_visible_spectrum(np.array([0.4325, 0.3788, 0.1034])))
+
+ self.assertFalse(
+ is_within_visible_spectrum(np.array([0.0025, 0.0088, 0.0340])))
+
+ def test_n_dimensional_is_within_visible_spectrum(self):
+ """
+ Tests :func:`colour.volume.spectrum.is_within_visible_spectrum`
+ definition n-dimensional arrays support.
+ """
+
+ a = np.array([0.3205, 0.4131, 0.5100])
+ b = np.array([True])
+ np.testing.assert_almost_equal(
+ is_within_visible_spectrum(a),
+ b)
+
+ a = np.tile(a, (6, 1))
+ b = np.tile(b, 6)
+ np.testing.assert_almost_equal(
+ is_within_visible_spectrum(a),
+ b)
+
+ a = np.reshape(a, (2, 3, 3))
+ b = np.reshape(b, (2, 3))
+ np.testing.assert_almost_equal(
+ is_within_visible_spectrum(a),
+ b)
+
+ @ignore_numpy_errors
+ def test_nan_is_within_visible_spectrum(self):
+ """
+ Tests :func:`colour.volume.spectrum.is_within_visible_spectrum`
+ definition nan support.
+ """
+
+ cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
+ cases = set(permutations(cases * 3, r=3))
+ for case in cases:
+ is_within_visible_spectrum(case)
+
+
+if __name__ == '__main__':
+ unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 3
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[tests]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"flake8",
"coverage",
"pytest"
],
"pre_install": [],
"python": "3.4",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
-e git+https://github.com/colour-science/colour.git@3cc1e58379b92f8254f655ee108b721fd1342713#egg=colour_science
coverage==6.2
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig==1.1.1
mccabe==0.7.0
nose==1.3.7
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: colour
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- flake8==5.0.4
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- mccabe==0.7.0
- nose==1.3.7
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/colour
| [
"colour/volume/tests/tests_rgb.py::TestRGB_colourspaceLimits::test_RGB_colourspace_limits",
"colour/volume/tests/tests_rgb.py::TestRGB_colourspaceVolumeMonteCarlo::test_RGB_colourspace_volume_MonteCarlo"
]
| [
"colour/volume/tests/tests_mesh.py::TestIsWithinMeshVolume::test_is_within_mesh_volume",
"colour/volume/tests/tests_mesh.py::TestIsWithinMeshVolume::test_n_dimensional_is_within_mesh_volume",
"colour/volume/tests/tests_mesh.py::TestIsWithinMeshVolume::test_nan_is_within_mesh_volume",
"colour/volume/tests/tests_rgb.py::TestRGB_colourspace_volume_coverage_MonteCarlo::test_RGB_colourspace_volume_coverage_MonteCarlo",
"colour/volume/tests/tests_rgb.py::TestRGB_colourspacePointerGamutCoverageMonteCarlo::test_RGB_colourspace_pointer_gamut_coverage_MonteCarlo",
"colour/volume/tests/tests_rgb.py::TestRGB_colourspaceVisibleSpectrumCoverageMonteCarlo::test_RGB_colourspace_visible_spectrum_coverage_MonteCarlo",
"colour/volume/tests/tests_spectrum.py::TestIsWithinVisibleSpectrum::test_is_within_visible_spectrum",
"colour/volume/tests/tests_spectrum.py::TestIsWithinVisibleSpectrum::test_n_dimensional_is_within_visible_spectrum",
"colour/volume/tests/tests_spectrum.py::TestIsWithinVisibleSpectrum::test_nan_is_within_visible_spectrum"
]
| []
| []
| BSD 3-Clause "New" or "Revised" License | 192 | [
"colour/volume/__init__.py",
"colour/volume/rgb.py",
"colour/volume/mesh.py",
"colour/volume/spectrum.py",
"colour/volume/pointer_gamut.py"
]
| [
"colour/volume/__init__.py",
"colour/volume/rgb.py",
"colour/volume/mesh.py",
"colour/volume/spectrum.py",
"colour/volume/pointer_gamut.py"
]
|
|
cdent__gabbi-56 | f2a32ad31bc205580834f009f05586a533f390f7 | 2015-07-16 05:25:12 | 081a75f5f0ddfdc31c4bab62db2f084a50c9ee99 | diff --git a/gabbi/handlers.py b/gabbi/handlers.py
index f754d94..d39a987 100644
--- a/gabbi/handlers.py
+++ b/gabbi/handlers.py
@@ -120,6 +120,8 @@ class HeadersResponseHandler(ResponseHandler):
test_key_value = {}
def action(self, test, header, value):
+ header = header.lower() # case-insensitive comparison
+
response = test.response
header_value = test.replace_template(value)
| case-insensitive headers
As far as I can tell, gabbi enforces lowercase headers:
```yaml
response_headers:
content-type: text/html; charset=utf-8
```
```
... ✓ front page returns HTML
```
vs.
```yaml
response_headers:
Content-Type: text/html; charset=utf-8
```
```
... E front page returns HTML
ERROR: front page returns HTML
"'Content-Type' header not available in response keys: dict_keys(['server', 'content-type', 'access-control-allow-origin', 'date', 'access-control-allow-credentials', 'connection', 'content-location', 'content-length', 'status'])"
```
From my perspective, the second version is more readable - so it would be nice if header name comparisons were case-insensitive.
| cdent/gabbi | diff --git a/gabbi/tests/test_handlers.py b/gabbi/tests/test_handlers.py
index b18e8ec..b647e80 100644
--- a/gabbi/tests/test_handlers.py
+++ b/gabbi/tests/test_handlers.py
@@ -94,10 +94,16 @@ class HandlersTest(unittest.TestCase):
def test_response_headers(self):
handler = handlers.HeadersResponseHandler(self.test_class)
+ self.test.response = {'content-type': 'text/plain'}
+
self.test.test_data = {'response_headers': {
'content-type': 'text/plain',
}}
- self.test.response = {'content-type': 'text/plain'}
+ self._assert_handler(handler)
+
+ self.test.test_data = {'response_headers': {
+ 'Content-Type': 'text/plain',
+ }}
self._assert_handler(handler)
def test_response_headers_regex(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"tox",
"pytest"
],
"pre_install": [
"pip install tox"
],
"python": "3.4",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
decorator==5.1.1
distlib==0.3.9
filelock==3.4.1
fixtures==4.0.1
-e git+https://github.com/cdent/gabbi.git@f2a32ad31bc205580834f009f05586a533f390f7#egg=gabbi
httplib2==0.22.0
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
jsonpath-rw==1.4.0
packaging==21.3
pbr==6.1.1
platformdirs==2.4.0
pluggy==1.0.0
ply==3.11
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==6.0.1
six==1.17.0
testtools==2.6.0
toml==0.10.2
tomli==1.2.3
tox==3.28.0
typing_extensions==4.1.1
virtualenv==20.17.1
wsgi_intercept==1.13.1
zipp==3.6.0
| name: gabbi
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- decorator==5.1.1
- distlib==0.3.9
- filelock==3.4.1
- fixtures==4.0.1
- httplib2==0.22.0
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- jsonpath-rw==1.4.0
- packaging==21.3
- pbr==6.1.1
- platformdirs==2.4.0
- pluggy==1.0.0
- ply==3.11
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==6.0.1
- six==1.17.0
- testtools==2.6.0
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- typing-extensions==4.1.1
- virtualenv==20.17.1
- wsgi-intercept==1.13.1
- zipp==3.6.0
prefix: /opt/conda/envs/gabbi
| [
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers"
]
| []
| [
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_fail_data",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_fail_header",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_regex",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_fail_data",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_fail_path",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_strings",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_strings_fail"
]
| []
| Apache License 2.0 | 194 | [
"gabbi/handlers.py"
]
| [
"gabbi/handlers.py"
]
|
|
cdent__gabbi-59 | ad9c7d1743576979ae96a74469937afcb980b5af | 2015-07-16 09:15:44 | 081a75f5f0ddfdc31c4bab62db2f084a50c9ee99 | diff --git a/gabbi/case.py b/gabbi/case.py
index 50b8815..2be6949 100644
--- a/gabbi/case.py
+++ b/gabbi/case.py
@@ -153,7 +153,7 @@ class HTTPTestCase(testcase.TestCase):
def _environ_replace(self, message):
"""Replace an indicator in a message with the environment value."""
- value = re.sub(r"\$ENVIRON\['([^']+)'\]",
+ value = re.sub(self._replacer_regex('ENVIRON'),
self._environ_replacer, message)
if value == "False":
return False
@@ -167,7 +167,7 @@ class HTTPTestCase(testcase.TestCase):
Let KeyError raise if variable not present.
"""
- environ_name = match.group(1)
+ environ_name = match.group('arg')
return os.environ[environ_name]
@staticmethod
@@ -188,17 +188,17 @@ class HTTPTestCase(testcase.TestCase):
"""Replace a header indicator in a message with that headers value from
the prior request.
"""
- return re.sub(r"\$HEADERS\['([^']+)'\]",
+ return re.sub(self._replacer_regex('HEADERS'),
self._header_replacer, message)
def _header_replacer(self, match):
"""Replace a regex match with the value of a prior header."""
- header_key = match.group(1)
+ header_key = match.group('arg')
return self.prior.response[header_key.lower()]
def _json_replacer(self, match):
"""Replace a regex match with the value of a JSON Path."""
- path = match.group(1)
+ path = match.group('arg')
return str(self.extract_json_path_value(self.prior.json_data, path))
def _location_replace(self, message):
@@ -253,9 +253,14 @@ class HTTPTestCase(testcase.TestCase):
return full_url
+ @staticmethod
+ def _replacer_regex(key):
+ message = r"\$%s\[(?P<quote>['\"])(?P<arg>.+?)(?P=quote)\]" % key
+ return message
+
def _response_replace(self, message):
"""Replace a JSON Path from the prior request with a value."""
- return re.sub(r"\$RESPONSE\['([^']+)'\]",
+ return re.sub(self._replacer_regex('RESPONSE'),
self._json_replacer, message)
def _run_request(self, url, method, headers, body):
diff --git a/gabbi/gabbits_intercept/backref.yaml b/gabbi/gabbits_intercept/backref.yaml
index 4ae1168..e0cec5b 100644
--- a/gabbi/gabbits_intercept/backref.yaml
+++ b/gabbi/gabbits_intercept/backref.yaml
@@ -16,7 +16,7 @@ tests:
location: $SCHEME://$NETLOC/posterchild
- name: post some more json
- url: $RESPONSE['link']
+ url: $RESPONSE["link"]
request_headers:
content-type: application/json
method: POST
@@ -26,7 +26,7 @@ tests:
d:
z: $RESPONSE['b']
response_json_paths:
- a: $RESPONSE['a']
+ a: $RESPONSE["a"]
c: /v2
d:
z: $RESPONSE['b']
@@ -48,3 +48,19 @@ tests:
location: $SCHEME://$NETLOC$RESPONSE['c']
x-gabbi-url: $SCHEME://$NETLOC/v2
content-type: $HEADERS['content-type']
+
+- name: post even more json quote different
+ url: $RESPONSE["c"]
+ request_headers:
+ content-type: application/json
+ method: POST
+ data: |
+ {"a": "$RESPONSE["a"]",
+ "c": "$RESPONSE["c"]"}
+ response_strings:
+ - '"a": "$RESPONSE["a"]"'
+ - '"c": "/v2"'
+ response_headers:
+ location: $SCHEME://$NETLOC$RESPONSE['c']
+ x-gabbi-url: $SCHEME://$NETLOC/v2
+ content-type: $HEADERS['content-type']
diff --git a/gabbi/handlers.py b/gabbi/handlers.py
index d39a987..f754d94 100644
--- a/gabbi/handlers.py
+++ b/gabbi/handlers.py
@@ -120,8 +120,6 @@ class HeadersResponseHandler(ResponseHandler):
test_key_value = {}
def action(self, test, header, value):
- header = header.lower() # case-insensitive comparison
-
response = test.response
header_value = test.replace_template(value)
diff --git a/gabbi/reporter.py b/gabbi/reporter.py
index 9776b80..0479588 100644
--- a/gabbi/reporter.py
+++ b/gabbi/reporter.py
@@ -81,7 +81,7 @@ class ConciseTestResult(TextTestResult):
details = details.strip().splitlines()[-1] # traceback's last line
if ':' in details:
details = details.split(':', 1)[1] # discard exception name
- self.stream.writeln('\t%s' % details.strip())
+ self.stream.writeln('\t%s' % details)
class ConciseTestRunner(TextTestRunner):
| magical variables enforce single quotes
One of my tests contained the following line:
```yaml
url: $RESPONSE["$._links.next"]
```
That refused to work - it took me quite a while to figure out that's because gabbi seems to only accept single quotes there, as this worked just fine.
```yaml
url: $RESPONSE['$._links.next']
```
I would argue that users' expectation is that single and double quotes are equivalent.
| cdent/gabbi | diff --git a/gabbi/tests/test_handlers.py b/gabbi/tests/test_handlers.py
index b647e80..b18e8ec 100644
--- a/gabbi/tests/test_handlers.py
+++ b/gabbi/tests/test_handlers.py
@@ -94,16 +94,10 @@ class HandlersTest(unittest.TestCase):
def test_response_headers(self):
handler = handlers.HeadersResponseHandler(self.test_class)
- self.test.response = {'content-type': 'text/plain'}
-
self.test.test_data = {'response_headers': {
'content-type': 'text/plain',
}}
- self._assert_handler(handler)
-
- self.test.test_data = {'response_headers': {
- 'Content-Type': 'text/plain',
- }}
+ self.test.response = {'content-type': 'text/plain'}
self._assert_handler(handler)
def test_response_headers_regex(self):
diff --git a/gabbi/tests/test_replacers.py b/gabbi/tests/test_replacers.py
index f32fe01..11628c1 100644
--- a/gabbi/tests/test_replacers.py
+++ b/gabbi/tests/test_replacers.py
@@ -44,3 +44,8 @@ class EnvironReplaceTest(testtools.TestCase):
os.environ['moo'] = "cow"
self.assertEqual("cow", http_case._environ_replace(message))
+
+ message = '$ENVIRON["moo"]'
+
+ os.environ['moo'] = "True"
+ self.assertEqual(True, http_case._environ_replace(message))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 4
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio",
"subunit"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
decorator==5.2.1
exceptiongroup==1.2.2
execnet==2.1.1
extras==1.0.0
-e git+https://github.com/cdent/gabbi.git@ad9c7d1743576979ae96a74469937afcb980b5af#egg=gabbi
httplib2==0.22.0
iniconfig==2.1.0
jsonpath-rw==1.4.0
packaging==24.2
pbr==6.1.1
pluggy==1.5.0
ply==3.11
pyparsing==3.2.3
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
PyYAML==6.0.2
six==1.17.0
subunit==0.0.0a0
testtools==2.7.2
tomli==2.2.1
typing_extensions==4.13.0
wsgi_intercept==1.13.1
| name: gabbi
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- decorator==5.2.1
- exceptiongroup==1.2.2
- execnet==2.1.1
- extras==1.0.0
- httplib2==0.22.0
- iniconfig==2.1.0
- jsonpath-rw==1.4.0
- packaging==24.2
- pbr==6.1.1
- pluggy==1.5.0
- ply==3.11
- pyparsing==3.2.3
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- pyyaml==6.0.2
- six==1.17.0
- subunit==0.0.0a0
- testtools==2.7.2
- tomli==2.2.1
- typing-extensions==4.13.0
- wsgi-intercept==1.13.1
prefix: /opt/conda/envs/gabbi
| [
"gabbi/tests/test_replacers.py::EnvironReplaceTest::test_environ_boolean"
]
| []
| [
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_fail_data",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_fail_header",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_regex",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_fail_data",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_fail_path",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_strings",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_strings_fail"
]
| []
| Apache License 2.0 | 195 | [
"gabbi/case.py",
"gabbi/gabbits_intercept/backref.yaml",
"gabbi/reporter.py",
"gabbi/handlers.py"
]
| [
"gabbi/case.py",
"gabbi/gabbits_intercept/backref.yaml",
"gabbi/reporter.py",
"gabbi/handlers.py"
]
|
|
tailhook__injections-6 | 680d3403f0086e0a94d69604bba0bfcbd9596014 | 2015-07-16 17:37:03 | 680d3403f0086e0a94d69604bba0bfcbd9596014 | diff --git a/injections/__init__.py b/injections/__init__.py
index 47bdc0d..dce8a9e 100644
--- a/injections/__init__.py
+++ b/injections/__init__.py
@@ -1,6 +1,7 @@
from .core import (
Container,
Dependency,
+ MissingDependencyError,
has,
depends,
propagate,
@@ -11,6 +12,7 @@ from .core import (
__all__ = [
'Container',
'Dependency',
+ 'MissingDependencyError',
'has',
'depends',
'propagate',
diff --git a/injections/core.py b/injections/core.py
index a4dccae..edff1db 100644
--- a/injections/core.py
+++ b/injections/core.py
@@ -76,6 +76,13 @@ class Dependency:
depends = Dependency # nicer declarative name
+class MissingDependencyError(KeyError):
+ """Required dependency is missed in container"""
+
+ def __str__(self):
+ return "Dependency {!r} is missed in container".format(self.args[0])
+
+
class Container(object):
"""Container for things that will be dependency-injected
@@ -104,7 +111,10 @@ class Container(object):
deps = getattr(inst, '__injections__', None)
if deps:
for attr, dep in deps.items():
- val = pro[dep.name]
+ val = pro.get(dep.name)
+ if val is None:
+ raise MissingDependencyError(dep.name)
+
if not isinstance(val, dep.type):
raise TypeError("Wrong provider for {!r}".format(val))
setattr(inst, attr, val)
diff --git a/setup.py b/setup.py
index 41cf797..c1e7d04 100644
--- a/setup.py
+++ b/setup.py
@@ -16,4 +16,5 @@ setup(name='injections',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2',
],
+ license='MIT',
)
| Use custom exception for injecting container into object with unknown dependencies
I have a class
```
@has
class A:
redis = depends(Redis)
loop = depends(AbstractLoop)
```
When I try to initialize it from container with missing dependencies I have a `KeyError`.
```
inj = Container()
inj['redis'] = create_redis()
inj.inject(A()) # raises KeyError
```
Exception on injection stage is pretty cool but I suggest using custom exception class. Not `KeyError` but derived from it. Exception text also should be more informative than just `KeyError: 'loop'`. | tailhook/injections | diff --git a/injections/test_core.py b/injections/test_core.py
index 2e9aabe..06006cd 100644
--- a/injections/test_core.py
+++ b/injections/test_core.py
@@ -126,3 +126,21 @@ class TestCore(TestCase):
c['name'] = 1
with self.assertRaises(TypeError):
c.inject(self.Consumer())
+
+ def test_missing_dependency(self):
+ c = di.Container()
+ c['a'] = 1
+
+ @di.has
+ class Consumer:
+ a = di.depends(int)
+ b = di.depends(int)
+
+ if hasattr(self, 'assertRaisesRegex'):
+ checker = self.assertRaisesRegex
+ else:
+ checker = self.assertRaisesRegexp
+
+ with checker(di.MissingDependencyError,
+ "Dependency 'b' is missed in container"):
+ c.inject(Consumer())
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 3
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": null,
"python": "3.4",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/tailhook/injections.git@680d3403f0086e0a94d69604bba0bfcbd9596014#egg=injections
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
nose==1.3.7
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: injections
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- nose==1.3.7
prefix: /opt/conda/envs/injections
| [
"injections/test_core.py::TestCore::test_missing_dependency"
]
| []
| [
"injections/test_core.py::TestCore::test_clone",
"injections/test_core.py::TestCore::test_cyclic",
"injections/test_core.py::TestCore::test_interconnect",
"injections/test_core.py::TestCore::test_larger_cycle",
"injections/test_core.py::TestCore::test_ok",
"injections/test_core.py::TestCore::test_wrong_type"
]
| []
| MIT License | 196 | [
"setup.py",
"injections/core.py",
"injections/__init__.py"
]
| [
"setup.py",
"injections/core.py",
"injections/__init__.py"
]
|
|
toejough__ugetch-24 | 2616fe2bbe55feb0048e3e973966f83d7f5f9710 | 2015-07-18 12:50:40 | 2616fe2bbe55feb0048e3e973966f83d7f5f9710 | diff --git a/.travis.yml b/.travis.yml
index 0386a42..4d40821 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,5 +1,11 @@
+# basic config
language: python
python: 3.4
install:
- pip install tox
script: tox
+
+# whitelist
+branches:
+ only:
+ - master
diff --git a/README.rst b/README.rst
index 91ee28f..ae7af94 100644
--- a/README.rst
+++ b/README.rst
@@ -24,17 +24,21 @@ API
getkey
------
-Gets a "key" from the ``infile`` and returns it to the user, where "key" is a character or glyph.
-Currently supports parsing ASCII and UTF-8.
+Gets a "key" from the ``infile`` and returns it to the user, where "key" is a character, glyph, or string describing a control key.
+
+Currently supports parsing ASCII, UTF-8, and tab.
Any other keys entered (such as arrow keys, UTF-16, etc) will result in returning individual bytes, one at a time.
+The tab key is represented as a string: 'TAB'. This is not a "character", but it *is* a "key". The implementation
+of keys may change in the future, but for now this seems to be simple and clear.
+
Parameters:
* ``infile`` - defaults to the ``sys.stdin`` file, as it is set at call time.
Returns:
-* ``key`` - a string value corresponding to the key read in from the TTY ('a', 'B', '-', etc).
+* ``key`` - a string value corresponding to the key read in from the TTY ('a', 'B', '-', '☢', 'TAB', etc).
contribution
============
diff --git a/ugetch/__init__.py b/ugetch/__init__.py
index 2c7bd98..b615ca3 100644
--- a/ugetch/__init__.py
+++ b/ugetch/__init__.py
@@ -14,40 +14,62 @@ import termios
def ascii_parser(infile):
'''Parse ascii bytes to ascii keys'''
byte = _get_byte(infile)
+ key = None
if byte < 128:
- return chr(byte)
+ key = chr(byte)
else:
_put_byte(byte)
+ return key
def utf8_parser(infile):
'''Parse utf-8 bytes to a string'''
first_byte = _get_byte(infile)
+ key = None
+ utf8_bytes = [first_byte]
# get byte string in py 2 and 3
- if sys.version_info.major == 3:
- string = chr(first_byte)
+ try:
+ if sys.version_info.major == 3:
+ key = chr(first_byte)
+ else:
+ if 194 <= first_byte <= 223:
+ # 2 bytes
+ utf8_bytes.append(_get_byte(infile))
+ elif 224 <= first_byte <= 239:
+ # 3 bytes
+ utf8_bytes.append(_get_byte(infile))
+ utf8_bytes.append(_get_byte(infile))
+ elif 240 <= first_byte <= 244:
+ # 4 bytes
+ utf8_bytes.append(_get_byte(infile))
+ utf8_bytes.append(_get_byte(infile))
+ utf8_bytes.append(_get_byte(infile))
+ byte_string = ''.join([chr(b) for b in utf8_bytes])
+ key = byte_string.decode('utf-8')
+ except:
+ # couldn't parse utf-8 out. not a failure, just not a success.
+ for b in utf8_bytes:
+ _put_byte(b)
+ return key
+
+
+def tab_parser(infile):
+ '''Parse a tab key out'''
+ first_byte = _get_byte(infile)
+ key = None
+ if first_byte == 9:
+ # Tab key
+ key = 'TAB'
else:
- utf8_bytes = [first_byte]
- if 194 <= first_byte <= 223:
- # 2 bytes
- utf8_bytes.append(_get_byte(infile))
- elif 224 <= first_byte <= 239:
- # 3 bytes
- utf8_bytes.append(_get_byte(infile))
- utf8_bytes.append(_get_byte(infile))
- elif 240 <= first_byte <= 244:
- # 4 bytes
- utf8_bytes.append(_get_byte(infile))
- utf8_bytes.append(_get_byte(infile))
- utf8_bytes.append(_get_byte(infile))
- byte_string = ''.join([chr(b) for b in utf8_bytes])
- string = byte_string.decode('utf-8')
- return string
+ _put_byte(first_byte)
+ return key
# [ Global ]
_DEFAULT=object() # enable dynamic defaults
-_KEY_PARSERS=[ascii_parser, utf8_parser] # ways to parse keys from byte lists
+# Tab needs to go before the ASCII parser, because it is technically in the ASCII range,
+# and would be sucked in raw by the ASCII parser.
+_KEY_PARSERS=[tab_parser, ascii_parser, utf8_parser] # ways to parse keys from byte lists
_BYTES=[] # byte buffer from the input file
| implement tab key support | toejough/ugetch | diff --git a/test_ugetch.py b/test_ugetch.py
index 52f60b7..072b7bf 100644
--- a/test_ugetch.py
+++ b/test_ugetch.py
@@ -19,8 +19,7 @@ import ugetch
# [ Helpers ]
-def in_vs_out(input_values):
- '''supply ascii chars, use getch, and validate ascii returned'''
+def start_cli_shim():
# choose the correct spawn
spawn = pexpect.spawn
if sys.version_info.major == 3:
@@ -28,23 +27,35 @@ def in_vs_out(input_values):
# spawn the subproc
p = spawn("python ugetch-cli.py", timeout=0.5)
p.expect_exact("hit ctrl-c to exit.")
+ return p
+
+
+def assert_expected(p, expected):
+ '''Assert that we get the expected value'''
+ index = p.expect_exact([expected, pexpect.EOF, pexpect.TIMEOUT])
+ assert index == 0, "did not find expected ({}) in output ({})".format(
+ value, p.before
+ )
+
+
+def in_vs_out(input_values):
+ '''supply ascii chars, use getch, and validate ascii returned'''
+ # get shim
+ p = start_cli_shim()
# test each char
for value in input_values:
p.expect_exact("hit a key to print its representation: ")
p.send(value)
- index = p.expect_exact([value, pexpect.EOF, pexpect.TIMEOUT])
- assert index == 0, "did not find expected ({}) in output ({})".format(
- value, p.before
- )
+ assert_expected(p, value)
# [ Unit tests ]
def test_getch_ascii():
'''supply ascii chars, use getch, and validate ascii returned'''
- in_vs_out(" \n\t")
all_printable_acii = [chr(n) for n in range(32, 127)] # not including 127
in_vs_out(all_printable_acii)
+
def test_getch_utf8():
'''supply utf-8 glyphs, use getch, and validate the keys returned'''
in_vs_out(['☢'])
@@ -52,6 +63,17 @@ def test_getch_utf8():
in_vs_out(['༰','༱','༲','༳','༴',' ༵','༶','༸',' ༹','༺',' ','༻',' ','༼','༽',' ༾',' ༿',' ','ཀ','ཁ','ག','གྷ','ང','ཅ','ཆ']) # tibetan
+def test_getch_tab():
+ # get shim
+ p = start_cli_shim()
+ # test each char
+ p.expect_exact("hit a key to print its representation: ")
+ p.send('\t')
+ assert_expected(p, 'TAB')
+ p.sendcontrol('i')
+ assert_expected(p, 'TAB')
+
+
if __name__ == '__main__':
import cProfile
cProfile.run("test_getch_ascii()")
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pexpect",
"pytest-cache"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pexpect==4.9.0
pluggy @ file:///croot/pluggy_1733169602837/work
ptyprocess==0.7.0
pytest @ file:///croot/pytest_1738938843180/work
pytest-cache==1.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
-e git+https://github.com/toejough/ugetch.git@2616fe2bbe55feb0048e3e973966f83d7f5f9710#egg=ugetch
| name: ugetch
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- execnet==2.1.1
- pexpect==4.9.0
- ptyprocess==0.7.0
- pytest-cache==1.0
prefix: /opt/conda/envs/ugetch
| [
"test_ugetch.py::test_getch_tab"
]
| []
| [
"test_ugetch.py::test_getch_ascii",
"test_ugetch.py::test_getch_utf8"
]
| []
| MIT License | 198 | [
"README.rst",
"ugetch/__init__.py",
".travis.yml"
]
| [
"README.rst",
"ugetch/__init__.py",
".travis.yml"
]
|
|
toejough__ugetch-26 | ddf84a5577fa1b7341dd404bdf5b5e9b9a43cf72 | 2015-07-18 13:52:44 | ddf84a5577fa1b7341dd404bdf5b5e9b9a43cf72 | diff --git a/README.rst b/README.rst
index ae7af94..3b949a3 100644
--- a/README.rst
+++ b/README.rst
@@ -26,11 +26,9 @@ getkey
Gets a "key" from the ``infile`` and returns it to the user, where "key" is a character, glyph, or string describing a control key.
-Currently supports parsing ASCII, UTF-8, and tab.
-Any other keys entered (such as arrow keys, UTF-16, etc) will result in returning individual bytes, one at a time.
-
-The tab key is represented as a string: 'TAB'. This is not a "character", but it *is* a "key". The implementation
-of keys may change in the future, but for now this seems to be simple and clear.
+Currently supports parsing ASCII, UTF-8, and some `special characters`_ as listed below.
+Any other keys entered will result in returning individual bytes, one at a time.
+*Pull requests adding special key support are welcome*
Parameters:
@@ -40,6 +38,21 @@ Returns:
* ``key`` - a string value corresponding to the key read in from the TTY ('a', 'B', '-', '☢', 'TAB', etc).
+Special Characters
+==================
+The following special characters are supported for VT220-like terminals. This is all I personally have to test on,
+but pull reqeusts with additional support are welcome. Issue #14 will make integration of new keys easier.
+
+Currently, special keys are supported by returning their name as a string, instead of a single character or glyph, as ASCII and UTF-8 do.
+This is not a "character", but it *is* a "key". The implementation
+of keys may change in the future, but for now this seems to be simple and clear.
+
+* TAB
+* UP
+* DOWN
+* LEFT
+* RIGHT
+
contribution
============
diff --git a/ugetch/__init__.py b/ugetch/__init__.py
index b615ca3..8d714da 100644
--- a/ugetch/__init__.py
+++ b/ugetch/__init__.py
@@ -65,11 +65,42 @@ def tab_parser(infile):
return key
+def arrow_parser(infile):
+ '''Parse arrow keys'''
+ first_byte = _get_byte(infile)
+ key = None
+ if first_byte == 27:
+ # escape. Check for bracket
+ second_byte = _get_byte(infile)
+ if second_byte == 91:
+ # final byte
+ final_byte = _get_byte(infile)
+ if final_byte == 65:
+ key = 'UP'
+ elif final_byte == 66:
+ key = 'DOWN'
+ elif final_byte == 67:
+ key = 'RIGHT'
+ elif final_byte == 68:
+ key = 'LEFT'
+ else:
+ _put_byte(first_byte)
+ _put_byte(second_byte)
+ _put_byte(final_byte)
+ else:
+ _put_byte(first_byte)
+ _put_byte(second_byte)
+ else:
+ _put_byte(first_byte)
+ return key
+
+
# [ Global ]
_DEFAULT=object() # enable dynamic defaults
-# Tab needs to go before the ASCII parser, because it is technically in the ASCII range,
-# and would be sucked in raw by the ASCII parser.
-_KEY_PARSERS=[tab_parser, ascii_parser, utf8_parser] # ways to parse keys from byte lists
+# Special key parsers need to go before the ASCII parser, because
+# their first byte is generally in the ASCII range,
+# and would be sucked in raw by the ASCII parser.
+_KEY_PARSERS=[tab_parser, arrow_parser, ascii_parser, utf8_parser] # ways to parse keys from byte lists
_BYTES=[] # byte buffer from the input file
| implement arrow key support | toejough/ugetch | diff --git a/test_ugetch.py b/test_ugetch.py
index 072b7bf..498adbd 100644
--- a/test_ugetch.py
+++ b/test_ugetch.py
@@ -34,7 +34,7 @@ def assert_expected(p, expected):
'''Assert that we get the expected value'''
index = p.expect_exact([expected, pexpect.EOF, pexpect.TIMEOUT])
assert index == 0, "did not find expected ({}) in output ({})".format(
- value, p.before
+ expected, p.before
)
@@ -44,7 +44,7 @@ def in_vs_out(input_values):
p = start_cli_shim()
# test each char
for value in input_values:
- p.expect_exact("hit a key to print its representation: ")
+ assert_expected(p, "hit a key to print its representation: ")
p.send(value)
assert_expected(p, value)
@@ -74,6 +74,24 @@ def test_getch_tab():
assert_expected(p, 'TAB')
+def test_getch_arrows():
+ # get shim
+ p = start_cli_shim()
+ # test each arrow
+ KEY_UP = '\x1b[A'
+ KEY_DOWN = '\x1b[B'
+ KEY_RIGHT = '\x1b[C'
+ KEY_LEFT = '\x1b[D'
+ p.send(KEY_UP)
+ assert_expected(p, 'UP')
+ p.send(KEY_DOWN)
+ assert_expected(p, 'DOWN')
+ p.send(KEY_LEFT)
+ assert_expected(p, 'LEFT')
+ p.send(KEY_RIGHT)
+ assert_expected(p, 'RIGHT')
+
+
if __name__ == '__main__':
import cProfile
cProfile.run("test_getch_ascii()")
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pexpect",
"pytest-cache"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pexpect==4.9.0
pluggy @ file:///croot/pluggy_1733169602837/work
ptyprocess==0.7.0
pytest @ file:///croot/pytest_1738938843180/work
pytest-cache==1.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
-e git+https://github.com/toejough/ugetch.git@ddf84a5577fa1b7341dd404bdf5b5e9b9a43cf72#egg=ugetch
| name: ugetch
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- execnet==2.1.1
- pexpect==4.9.0
- ptyprocess==0.7.0
- pytest-cache==1.0
prefix: /opt/conda/envs/ugetch
| [
"test_ugetch.py::test_getch_arrows"
]
| []
| [
"test_ugetch.py::test_getch_ascii",
"test_ugetch.py::test_getch_utf8",
"test_ugetch.py::test_getch_tab"
]
| []
| MIT License | 199 | [
"README.rst",
"ugetch/__init__.py"
]
| [
"README.rst",
"ugetch/__init__.py"
]
|
|
cdent__gabbi-61 | 081a75f5f0ddfdc31c4bab62db2f084a50c9ee99 | 2015-07-19 11:29:17 | 081a75f5f0ddfdc31c4bab62db2f084a50c9ee99 | diff --git a/gabbi/handlers.py b/gabbi/handlers.py
index d39a987..0b7a71d 100644
--- a/gabbi/handlers.py
+++ b/gabbi/handlers.py
@@ -128,9 +128,9 @@ class HeadersResponseHandler(ResponseHandler):
try:
response_value = response[header]
except KeyError:
- # Reform KeyError to something more debuggable.
- raise KeyError("'%s' header not available in response keys: %s"
- % (header, response.keys()))
+ raise AssertionError(
+ "'%s' header not present in response: %s" % (
+ header, response.keys()))
if header_value.startswith('/') and header_value.endswith('/'):
header_value = header_value.strip('/').rstrip('/')
| missing response header raises error rather than failure
The following reports an error when it should report a failure instead:
```yaml
tests:
- name: failure
url: http://google.com
status: 302
response_headers:
x-foo: bar
```
AFAICT that's because internally a `KeyError` is raised ("'x-foo' header not available in response keys: dict_keys(...)"): `testtools.TestCase`'s default `exception_handlers` doesn't have a mapping for that particular exception, so it defaults to `_report_error` rather than using `_report_failure`.
| cdent/gabbi | diff --git a/gabbi/tests/test_handlers.py b/gabbi/tests/test_handlers.py
index b647e80..af27359 100644
--- a/gabbi/tests/test_handlers.py
+++ b/gabbi/tests/test_handlers.py
@@ -133,9 +133,9 @@ class HandlersTest(unittest.TestCase):
'location': '/somewhere',
}}
self.test.response = {'content-type': 'application/json'}
- with self.assertRaises(KeyError) as failure:
+ with self.assertRaises(AssertionError) as failure:
self._assert_handler(handler)
- self.assertIn("'location' header not available in response keys:",
+ self.assertIn("'location' header not present in response:",
str(failure.exception))
def _assert_handler(self, handler):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
decorator==5.2.1
exceptiongroup==1.2.2
execnet==2.1.1
-e git+https://github.com/cdent/gabbi.git@081a75f5f0ddfdc31c4bab62db2f084a50c9ee99#egg=gabbi
httplib2==0.22.0
iniconfig==2.1.0
jsonpath-rw==1.4.0
packaging==24.2
pbr==6.1.1
pluggy==1.5.0
ply==3.11
pyparsing==3.2.3
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
PyYAML==6.0.2
six==1.17.0
testtools==2.7.2
tomli==2.2.1
typing_extensions==4.13.0
wsgi_intercept==1.13.1
| name: gabbi
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- decorator==5.2.1
- exceptiongroup==1.2.2
- execnet==2.1.1
- httplib2==0.22.0
- iniconfig==2.1.0
- jsonpath-rw==1.4.0
- packaging==24.2
- pbr==6.1.1
- pluggy==1.5.0
- ply==3.11
- pyparsing==3.2.3
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- pyyaml==6.0.2
- six==1.17.0
- testtools==2.7.2
- tomli==2.2.1
- typing-extensions==4.13.0
- wsgi-intercept==1.13.1
prefix: /opt/conda/envs/gabbi
| [
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_fail_header"
]
| []
| [
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_fail_data",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_headers_regex",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_fail_data",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_json_paths_fail_path",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_strings",
"gabbi/tests/test_handlers.py::HandlersTest::test_response_strings_fail"
]
| []
| Apache License 2.0 | 200 | [
"gabbi/handlers.py"
]
| [
"gabbi/handlers.py"
]
|
|
colour-science__colour-213 | d09934a0cf3d851ce15b3a7556ec24673a9ddb35 | 2015-07-27 10:51:51 | 3cd6ab8d4c3483bcdeb2d7ef33967160808c0bb2 | diff --git a/colour/algebra/__init__.py b/colour/algebra/__init__.py
index f46d14502..77bfc6f5e 100644
--- a/colour/algebra/__init__.py
+++ b/colour/algebra/__init__.py
@@ -6,17 +6,19 @@ from __future__ import absolute_import
from .coordinates import * # noqa
from . import coordinates
from .extrapolation import Extrapolator1d
-from .interpolation import (LinearInterpolator1d,
- SplineInterpolator,
- SpragueInterpolator)
+from .interpolation import (LinearInterpolator,
+ SpragueInterpolator,
+ CubicSplineInterpolator,
+ PchipInterpolator)
from .matrix import is_identity
from .random import random_triplet_generator
__all__ = []
__all__ += coordinates.__all__
__all__ += ['Extrapolator1d']
-__all__ += ['LinearInterpolator1d',
- 'SplineInterpolator',
- 'SpragueInterpolator']
+__all__ += ['LinearInterpolator',
+ 'SpragueInterpolator',
+ 'CubicSplineInterpolator',
+ 'PchipInterpolator']
__all__ += ['is_identity']
__all__ += ['random_triplet_generator']
diff --git a/colour/algebra/extrapolation.py b/colour/algebra/extrapolation.py
index 148074866..3e60ad80a 100644
--- a/colour/algebra/extrapolation.py
+++ b/colour/algebra/extrapolation.py
@@ -74,10 +74,10 @@ class Extrapolator1d(object):
--------
Extrapolating a single numeric variable:
- >>> from colour.algebra import LinearInterpolator1d
+ >>> from colour.algebra import LinearInterpolator
>>> x = np.array([3, 4, 5])
>>> y = np.array([1, 2, 3])
- >>> interpolator = LinearInterpolator1d(x, y)
+ >>> interpolator = LinearInterpolator(x, y)
>>> extrapolator = Extrapolator1d(interpolator)
>>> extrapolator(1)
-1.0
@@ -91,7 +91,7 @@ class Extrapolator1d(object):
>>> x = np.array([3, 4, 5])
>>> y = np.array([1, 2, 3])
- >>> interpolator = LinearInterpolator1d(x, y)
+ >>> interpolator = LinearInterpolator(x, y)
>>> extrapolator = Extrapolator1d(interpolator, method='Constant')
>>> extrapolator(np.array([0.1, 0.2, 8, 9]))
array([ 1., 1., 3., 3.])
@@ -100,7 +100,7 @@ class Extrapolator1d(object):
>>> x = np.array([3, 4, 5])
>>> y = np.array([1, 2, 3])
- >>> interpolator = LinearInterpolator1d(x, y)
+ >>> interpolator = LinearInterpolator(x, y)
>>> extrapolator = Extrapolator1d(interpolator, method='Constant', left=0)
>>> extrapolator(np.array([0.1, 0.2, 8, 9]))
array([ 0., 0., 3., 3.])
diff --git a/colour/algebra/interpolation.py b/colour/algebra/interpolation.py
index 83aa98bb4..4830120da 100644
--- a/colour/algebra/interpolation.py
+++ b/colour/algebra/interpolation.py
@@ -7,10 +7,12 @@ Interpolation
Defines classes for interpolating variables.
-- :class:`LinearInterpolator1d`: 1-D function linear interpolation.
-- :class:`SplineInterpolator`: 1-D function cubic spline interpolation.
+- :class:`LinearInterpolator`: 1-D function linear interpolation.
- :class:`SpragueInterpolator`: 1-D function fifth-order polynomial
interpolation.
+- :class:`CubicSplineInterpolator`: 1-D function cubic spline interpolation.
+- :class:`PchipInterpolator`: 1-D function piecewise cube Hermite
+ interpolation.
"""
from __future__ import division, unicode_literals
@@ -32,12 +34,13 @@ __maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
-__all__ = ['LinearInterpolator1d',
- 'SplineInterpolator',
- 'SpragueInterpolator']
+__all__ = ['LinearInterpolator',
+ 'SpragueInterpolator',
+ 'CubicSplineInterpolator',
+ 'PchipInterpolator']
-class LinearInterpolator1d(object):
+class LinearInterpolator(object):
"""
Linearly interpolates a 1-D function.
@@ -74,7 +77,7 @@ class LinearInterpolator1d(object):
... 27.8007,
... 86.0500])
>>> x = np.arange(len(y))
- >>> f = LinearInterpolator1d(x, y)
+ >>> f = LinearInterpolator(x, y)
>>> # Doctests ellipsis for Python 2.x compatibility.
>>> f(0.5) # doctest: +ELLIPSIS
7.64...
@@ -225,31 +228,6 @@ class LinearInterpolator1d(object):
raise ValueError('"{0}" is above interpolation range.'.format(x))
-if is_scipy_installed():
- from scipy.interpolate import interp1d
-
- class SplineInterpolator(interp1d):
- """
- Interpolates a 1-D function using cubic spline interpolation.
-
- Notes
- -----
- This class is a wrapper around *scipy.interpolate.interp1d* class.
- """
-
- def __init__(self, *args, **kwargs):
- # TODO: Implements proper wrapper to ensure return values
- # consistency and avoid having to cast to numeric in
- # :meth:`SpectralPowerDistribution.interpolate` method.
- super(SplineInterpolator, self).__init__(
- kind='cubic', *args, **kwargs)
-else:
- warning(('"scipy.interpolate.interp1d" interpolator is unavailable, using '
- '"LinearInterpolator1d" instead!'))
-
- SplineInterpolator = LinearInterpolator1d
-
-
class SpragueInterpolator(object):
"""
Constructs a fifth-order polynomial that passes through :math:`y` dependent
@@ -273,7 +251,7 @@ class SpragueInterpolator(object):
See Also
--------
- LinearInterpolator1d
+ LinearInterpolator
Notes
-----
@@ -523,3 +501,29 @@ class SpragueInterpolator(object):
if above_interpolation_range.any():
raise ValueError('"{0}" is above interpolation range.'.format(x))
+
+
+if is_scipy_installed():
+ from scipy.interpolate import PchipInterpolator, interp1d
+
+ class CubicSplineInterpolator(interp1d):
+ """
+ Interpolates a 1-D function using cubic spline interpolation.
+
+ Notes
+ -----
+ This class is a wrapper around *scipy.interpolate.interp1d* class.
+ """
+
+ def __init__(self, *args, **kwargs):
+ # TODO: Implements proper wrapper to ensure return values
+ # consistency and avoid having to cast to numeric in
+ # :meth:`SpectralPowerDistribution.interpolate` method.
+ super(CubicSplineInterpolator, self).__init__(
+ kind='cubic', *args, **kwargs)
+else:
+ warning(('"scipy.interpolate.PchipInterpolator" and '
+ '"scipy.interpolate.interp1d" interpolators are not available, '
+ 'using "LinearInterpolator" instead!'))
+
+ PchipInterpolator = CubicSplineInterpolator = LinearInterpolator
diff --git a/colour/colorimetry/spectrum.py b/colour/colorimetry/spectrum.py
index 37412d73f..bff22dd7d 100644
--- a/colour/colorimetry/spectrum.py
+++ b/colour/colorimetry/spectrum.py
@@ -26,9 +26,10 @@ import numpy as np
from colour.algebra import (
Extrapolator1d,
- LinearInterpolator1d,
- SplineInterpolator,
- SpragueInterpolator)
+ LinearInterpolator,
+ SpragueInterpolator,
+ CubicSplineInterpolator,
+ PchipInterpolator)
from colour.utilities import (
ArbitraryPrecisionMapping,
is_iterable,
@@ -1587,11 +1588,8 @@ class SpectralPowerDistribution(object):
"""
extrapolator = Extrapolator1d(
- LinearInterpolator1d(self.wavelengths,
- self.values),
- method=method,
- left=left,
- right=right)
+ LinearInterpolator(self.wavelengths, self.values),
+ method=method, left=left, right=right)
spd_shape = self.shape
for i in np.arange(spd_shape.start,
@@ -1618,7 +1616,7 @@ class SpectralPowerDistribution(object):
shape : SpectralShape, optional
Spectral shape used for interpolation.
method : unicode, optional
- {None, 'Sprague', 'Cubic Spline', 'Linear'},
+ {None, 'Cubic Spline', 'Linear', 'Pchip', 'Sprague'},
Enforce given interpolation method.
Returns
@@ -1651,9 +1649,11 @@ class SpectralPowerDistribution(object):
-------
- If *scipy* is not unavailable the *Cubic Spline* method will
fallback to legacy *Linear* interpolation.
+ - *Cubic Spline* interpolator requires at least 3 wavelengths
+ :math:`\lambda_n` for interpolation.
- *Linear* interpolator requires at least 2 wavelengths
:math:`\lambda_n` for interpolation.
- - *Cubic Spline* interpolator requires at least 3 wavelengths
+ - *Pchip* interpolator requires at least 2 wavelengths
:math:`\lambda_n` for interpolation.
- Sprague (1880) interpolator requires at least 6 wavelengths
:math:`\lambda_n` for interpolation.
@@ -1683,13 +1683,6 @@ class SpectralPowerDistribution(object):
Non uniform data is using *Cubic Spline* interpolation by default:
- >>> data = {
- ... 510: 49.67,
- ... 520: 69.59,
- ... 530: 81.73,
- ... 540: 88.19,
- ... 550: 86.26,
- ... 560: 77.18}
>>> spd = SpectralPowerDistribution('Spd', data)
>>> spd[511] = 31.41
>>> spd.interpolate(SpectralShape(steps=1)) # doctest: +ELLIPSIS
@@ -1699,18 +1692,23 @@ class SpectralPowerDistribution(object):
Enforcing *Linear* interpolation:
- >>> data = {
- ... 510: 49.67,
- ... 520: 69.59,
- ... 530: 81.73,
- ... 540: 88.19,
- ... 550: 86.26,
- ... 560: 77.18}
>>> spd = SpectralPowerDistribution('Spd', data)
- >>> spd.interpolate(SpectralShape(steps=1), method='Linear') # noqa # doctest: +ELLIPSIS
+ >>> spd.interpolate( # doctest: +ELLIPSIS
+ ... SpectralShape(steps=1),
+ ... method='Linear')
<...SpectralPowerDistribution object at 0x...>
>>> spd[515] # doctest: +ELLIPSIS
array(59.63...)
+
+ Enforcing *Pchip* interpolation:
+
+ >>> spd = SpectralPowerDistribution('Spd', data)
+ >>> spd.interpolate( # doctest: +ELLIPSIS
+ ... SpectralShape(steps=1),
+ ... method='Pchip')
+ <...SpectralPowerDistribution object at 0x...>
+ >>> spd[515] # doctest: +ELLIPSIS
+ array(58.8173260...)
"""
spd_shape = self.shape
@@ -1732,25 +1730,28 @@ class SpectralPowerDistribution(object):
if method is None:
if is_uniform:
- interpolator = SpragueInterpolator(wavelengths, values)
+ interpolator = SpragueInterpolator
else:
- interpolator = SplineInterpolator(wavelengths, values)
+ interpolator = CubicSplineInterpolator
+ elif method == 'cubic spline':
+ interpolator = CubicSplineInterpolator
+ elif method == 'linear':
+ interpolator = LinearInterpolator
+ elif method == 'pchip':
+ interpolator = PchipInterpolator
elif method == 'sprague':
if is_uniform:
- interpolator = SpragueInterpolator(wavelengths, values)
+ interpolator = SpragueInterpolator
else:
raise RuntimeError(
('"Sprague" interpolator can only be used for '
'interpolating functions having a uniformly spaced '
'independent variable!'))
- elif method == 'cubic spline':
- interpolator = SplineInterpolator(wavelengths, values)
- elif method == 'linear':
- interpolator = LinearInterpolator1d(wavelengths, values)
else:
raise ValueError(
'Undefined "{0}" interpolator!'.format(method))
+ interpolator = interpolator(wavelengths, values)
self.__data = SpectralMapping(
[(wavelength, float(interpolator(wavelength)))
for wavelength in shape])
@@ -3351,7 +3352,7 @@ class TriSpectralPowerDistribution(object):
shape : SpectralShape, optional
Spectral shape used for interpolation.
method : unicode, optional
- {None, 'Sprague', 'Cubic Spline', 'Linear'},
+ {None, 'Cubic Spline', 'Linear', 'Pchip', 'Sprague'},
Enforce given interpolation method.
Returns
@@ -3403,71 +3404,43 @@ class TriSpectralPowerDistribution(object):
>>> tri_spd = TriSpectralPowerDistribution('Tri Spd', data, mapping)
>>> tri_spd.interpolate(SpectralShape(steps=1)) # doctest: +ELLIPSIS
<...TriSpectralPowerDistribution object at 0x...>
- >>> tri_spd[515]
- array([ 60.30332087, 93.27163315, 13.86051361])
+ >>> tri_spd[515] # doctest: +ELLIPSIS
+ array([ 60.3033208..., 93.2716331..., 13.8605136...])
Non uniform data is using *Cubic Spline* interpolation by default:
- >>> x_bar = {
- ... 510: 49.67,
- ... 520: 69.59,
- ... 530: 81.73,
- ... 540: 88.19,
- ... 550: 89.76,
- ... 560: 90.28}
- >>> y_bar = {
- ... 510: 90.56,
- ... 520: 87.34,
- ... 530: 45.76,
- ... 540: 23.45,
- ... 550: 15.34,
- ... 560: 10.11}
- >>> z_bar = {
- ... 510: 12.43,
- ... 520: 23.15,
- ... 530: 67.98,
- ... 540: 90.28,
- ... 550: 91.61,
- ... 560: 98.24}
>>> data = {'x_bar': x_bar, 'y_bar': y_bar, 'z_bar': z_bar}
>>> mapping = {'x': 'x_bar', 'y': 'y_bar', 'z': 'z_bar'}
>>> tri_spd = TriSpectralPowerDistribution('Tri Spd', data, mapping)
>>> tri_spd[511] = np.array([31.41, 95.27, 15.06])
>>> tri_spd.interpolate(SpectralShape(steps=1)) # doctest: +ELLIPSIS
<...TriSpectralPowerDistribution object at 0x...>
- >>> tri_spd[515]
- array([ 21.47104053, 100.64300155, 18.8165196 ])
+ >>> tri_spd[515] # doctest: +ELLIPSIS
+ array([ 21.4710405..., 100.6430015..., 18.8165196...])
Enforcing *Linear* interpolation:
- >>> x_bar = {
- ... 510: 49.67,
- ... 520: 69.59,
- ... 530: 81.73,
- ... 540: 88.19,
- ... 550: 89.76,
- ... 560: 90.28}
- >>> y_bar = {
- ... 510: 90.56,
- ... 520: 87.34,
- ... 530: 45.76,
- ... 540: 23.45,
- ... 550: 15.34,
- ... 560: 10.11}
- >>> z_bar = {
- ... 510: 12.43,
- ... 520: 23.15,
- ... 530: 67.98,
- ... 540: 90.28,
- ... 550: 91.61,
- ... 560: 98.24}
>>> data = {'x_bar': x_bar, 'y_bar': y_bar, 'z_bar': z_bar}
>>> mapping = {'x': 'x_bar', 'y': 'y_bar', 'z': 'z_bar'}
>>> tri_spd = TriSpectralPowerDistribution('Tri Spd', data, mapping)
- >>> tri_spd.interpolate(SpectralShape(steps=1), method='Linear') # noqa # doctest: +ELLIPSIS
+ >>> tri_spd.interpolate( # doctest: +ELLIPSIS
+ ... SpectralShape(steps=1),
+ ... method='Linear')
+ <...TriSpectralPowerDistribution object at 0x...>
+ >>> tri_spd[515] # doctest: +ELLIPSIS
+ array([ 59.63..., 88.95..., 17.79...])
+
+ Enforcing *Pchip* interpolation:
+
+ >>> data = {'x_bar': x_bar, 'y_bar': y_bar, 'z_bar': z_bar}
+ >>> mapping = {'x': 'x_bar', 'y': 'y_bar', 'z': 'z_bar'}
+ >>> tri_spd = TriSpectralPowerDistribution('Tri Spd', data, mapping)
+ >>> tri_spd.interpolate( # doctest: +ELLIPSIS
+ ... SpectralShape(steps=1),
+ ... method='Pchip')
<...TriSpectralPowerDistribution object at 0x...>
- >>> tri_spd[515]
- array([ 59.63, 88.95, 17.79])
+ >>> tri_spd[515] # doctest: +ELLIPSIS
+ array([ 58.8173260..., 89.4355596..., 16.4545683...])
"""
for i in self.__mapping.keys():
diff --git a/colour/colorimetry/tristimulus.py b/colour/colorimetry/tristimulus.py
index e3d3eaa32..a901fc0cc 100644
--- a/colour/colorimetry/tristimulus.py
+++ b/colour/colorimetry/tristimulus.py
@@ -19,8 +19,13 @@ from __future__ import division, unicode_literals
import numpy as np
-from colour.algebra import SplineInterpolator, SpragueInterpolator
+from colour.algebra import (
+ CubicSplineInterpolator,
+ LinearInterpolator,
+ PchipInterpolator,
+ SpragueInterpolator)
from colour.colorimetry import STANDARD_OBSERVERS_CMFS, ones_spd
+from colour.utilities import is_string
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2015 - Colour Developers'
@@ -111,7 +116,8 @@ def spectral_to_XYZ(spd,
def wavelength_to_XYZ(wavelength,
cmfs=STANDARD_OBSERVERS_CMFS.get(
- 'CIE 1931 2 Degree Standard Observer')):
+ 'CIE 1931 2 Degree Standard Observer'),
+ method=None):
"""
Converts given wavelength :math:`\lambda` to *CIE XYZ* tristimulus values
using given colour matching functions.
@@ -128,6 +134,9 @@ def wavelength_to_XYZ(wavelength,
Wavelength :math:`\lambda` in nm.
cmfs : XYZ_ColourMatchingFunctions, optional
Standard observer colour matching functions.
+ method : unicode, optional
+ {None, 'Cubic Spline', 'Linear', 'Pchip', 'Sprague'},
+ Enforce given interpolation method.
Returns
-------
@@ -136,22 +145,60 @@ def wavelength_to_XYZ(wavelength,
Raises
------
+ RuntimeError
+ If Sprague (1880) interpolation method is forced with a
+ non-uniformly spaced independent variable.
ValueError
- If wavelength :math:`\lambda` is not contained in the colour matching
- functions domain.
+ If the interpolation method is not defined or if wavelength
+ :math:`\lambda` is not contained in the colour matching functions
+ domain.
Notes
-----
- Output *CIE XYZ* tristimulus values are in domain [0, 1].
- If *scipy* is not unavailable the *Cubic Spline* method will fallback
to legacy *Linear* interpolation.
+ - Sprague (1880) interpolator cannot be used for interpolating
+ functions having a non-uniformly spaced independent variable.
+
+ Warning
+ -------
+ - If *scipy* is not unavailable the *Cubic Spline* method will fallback
+ to legacy *Linear* interpolation.
+ - *Cubic Spline* interpolator requires at least 3 wavelengths
+ :math:`\lambda_n` for interpolation.
+ - *Linear* interpolator requires at least 2 wavelengths :math:`\lambda_n`
+ for interpolation.
+ - *Pchip* interpolator requires at least 2 wavelengths :math:`\lambda_n`
+ for interpolation.
+ - Sprague (1880) interpolator requires at least 6 wavelengths
+ :math:`\lambda_n` for interpolation.
Examples
--------
+ Uniform data is using Sprague (1880) interpolation by default:
+
>>> from colour import CMFS
>>> cmfs = CMFS.get('CIE 1931 2 Degree Standard Observer')
- >>> wavelength_to_XYZ(480) # doctest: +ELLIPSIS
+ >>> wavelength_to_XYZ(480, cmfs) # doctest: +ELLIPSIS
array([ 0.09564 , 0.13902 , 0.812950...])
+ >>> wavelength_to_XYZ(480.5, cmfs) # doctest: +ELLIPSIS
+ array([ 0.0914287..., 0.1418350..., 0.7915726...])
+
+ Enforcing *Cubic Spline* interpolation:
+
+ >>> wavelength_to_XYZ(480.5, cmfs, 'Cubic Spline') # doctest: +ELLIPSIS
+ array([ 0.0914288..., 0.1418351..., 0.7915729...])
+
+ Enforcing *Linear* interpolation:
+
+ >>> wavelength_to_XYZ(480.5, cmfs, 'Linear') # doctest: +ELLIPSIS
+ array([ 0.0914697..., 0.1418482..., 0.7917337...])
+
+ Enforcing *Pchip* interpolation:
+
+ >>> wavelength_to_XYZ(480.5, cmfs, 'Pchip') # doctest: +ELLIPSIS
+ array([ 0.0914280..., 0.1418341..., 0.7915711...])
"""
cmfs_shape = cmfs.shape
@@ -163,9 +210,34 @@ def wavelength_to_XYZ(wavelength,
if wavelength not in cmfs:
wavelengths, values, = cmfs.wavelengths, cmfs.values
- interpolator = (SpragueInterpolator
- if cmfs.is_uniform() else
- SplineInterpolator)
+
+ if is_string(method):
+ method = method.lower()
+
+ is_uniform = cmfs.is_uniform()
+
+ if method is None:
+ if is_uniform:
+ interpolator = SpragueInterpolator
+ else:
+ interpolator = CubicSplineInterpolator
+ elif method == 'cubic spline':
+ interpolator = CubicSplineInterpolator
+ elif method == 'linear':
+ interpolator = LinearInterpolator
+ elif method == 'pchip':
+ interpolator = PchipInterpolator
+ elif method == 'sprague':
+ if is_uniform:
+ interpolator = SpragueInterpolator
+ else:
+ raise RuntimeError(
+ ('"Sprague" interpolator can only be used for '
+ 'interpolating functions having a uniformly spaced '
+ 'independent variable!'))
+ else:
+ raise ValueError(
+ 'Undefined "{0}" interpolator!'.format(method))
interpolators = [interpolator(wavelengths, values[..., i])
for i in range(values.shape[-1])]
diff --git a/colour/examples/algebra/examples_interpolation.py b/colour/examples/algebra/examples_interpolation.py
index 9f94f50e4..d2edba760 100644
--- a/colour/examples/algebra/examples_interpolation.py
+++ b/colour/examples/algebra/examples_interpolation.py
@@ -16,7 +16,7 @@ from colour.utilities.verbose import message_box
message_box('Interpolation Computations')
message_box(('Comparing Sprague (1880) and "Cubic Spline" recommended '
- 'interpolation methods.'))
+ 'interpolation methods to "Pchip" method.'))
uniform_spd_data = {
340: 0.0000,
@@ -78,11 +78,16 @@ base_spd = colour.SpectralPowerDistribution(
uniform_interpolated_spd = colour.SpectralPowerDistribution(
'Uniform - Sprague Interpolation',
uniform_spd_data)
+uniform_pchip_interpolated_spd = colour.SpectralPowerDistribution(
+ 'Uniform - Pchip Interpolation',
+ uniform_spd_data)
non_uniform_interpolated_spd = colour.SpectralPowerDistribution(
'Non Uniform - Cubic Spline Interpolation',
non_uniform_spd_data)
uniform_interpolated_spd.interpolate(colour.SpectralShape(steps=1))
+uniform_pchip_interpolated_spd.interpolate(colour.SpectralShape(steps=1),
+ method='Pchip')
non_uniform_interpolated_spd.interpolate(colour.SpectralShape(steps=1))
shape = base_spd.shape
@@ -97,6 +102,10 @@ pylab.plot(uniform_interpolated_spd.wavelengths,
uniform_interpolated_spd.values,
label=uniform_interpolated_spd.name,
linewidth=2)
+pylab.plot(uniform_pchip_interpolated_spd.wavelengths,
+ uniform_pchip_interpolated_spd.values,
+ label=uniform_pchip_interpolated_spd.name,
+ linewidth=2)
pylab.plot(non_uniform_interpolated_spd.wavelengths,
non_uniform_interpolated_spd.values,
label=non_uniform_interpolated_spd.name,
diff --git a/colour/notation/munsell.py b/colour/notation/munsell.py
index 9a39cc6f7..93106af11 100644
--- a/colour/notation/munsell.py
+++ b/colour/notation/munsell.py
@@ -53,7 +53,7 @@ except ImportError:
from colour.algebra import (
Extrapolator1d,
- LinearInterpolator1d,
+ LinearInterpolator,
cartesian_to_cylindrical)
from colour.colorimetry import ILLUMINANTS, luminance_ASTMD153508
from colour.constants import (
@@ -197,7 +197,7 @@ def _munsell_value_ASTMD153508_interpolator():
munsell_values = np.arange(0, 10, 0.001)
if _MUNSELL_VALUE_ASTM_D1535_08_INTERPOLATOR_CACHE is None:
_MUNSELL_VALUE_ASTM_D1535_08_INTERPOLATOR_CACHE = Extrapolator1d(
- LinearInterpolator1d(
+ LinearInterpolator(
luminance_ASTMD153508(munsell_values),
munsell_values))
@@ -624,8 +624,8 @@ def munsell_specification_to_xyY(specification):
else:
Y_minus = luminance_ASTMD153508(value_minus)
Y_plus = luminance_ASTMD153508(value_plus)
- x = LinearInterpolator1d((Y_minus, Y_plus), (x_minus, x_plus))(Y)
- y = LinearInterpolator1d((Y_minus, Y_plus), (y_minus, y_plus))(Y)
+ x = LinearInterpolator((Y_minus, Y_plus), (x_minus, x_plus))(Y)
+ y = LinearInterpolator((Y_minus, Y_plus), (y_minus, y_plus))(Y)
return np.array([x, y, Y / 100])
@@ -826,7 +826,7 @@ def xyY_to_munsell_specification(xyY):
theta_differences_indexes]
hue_angle_difference_new = Extrapolator1d(
- LinearInterpolator1d(
+ LinearInterpolator(
theta_differences,
hue_angles_differences))(0) % 360
hue_angle_new = (hue_angle_current + hue_angle_difference_new) % 360
@@ -891,7 +891,7 @@ def xyY_to_munsell_specification(xyY):
rho_bounds = rho_bounds[rhos_bounds_indexes]
chroma_bounds = chroma_bounds[rhos_bounds_indexes]
- chroma_new = LinearInterpolator1d(rho_bounds, chroma_bounds)(rho_input)
+ chroma_new = LinearInterpolator(rho_bounds, chroma_bounds)(rho_input)
specification_current = [hue_current, value, chroma_new, code_current]
x_current, y_current, Y_current = np.ravel(
@@ -1305,7 +1305,7 @@ def hue_to_hue_angle(hue, code):
"""
single_hue = ((17 - code) % 10 + (hue / 10) - 0.5) % 10
- return LinearInterpolator1d(
+ return LinearInterpolator(
(0, 2, 3, 4, 5, 6, 8, 9, 10),
(0, 45, 70, 135, 160, 225, 255, 315, 360))(single_hue)
@@ -1339,7 +1339,7 @@ def hue_angle_to_hue(hue_angle):
(3.2160000..., 4)
"""
- single_hue = LinearInterpolator1d(
+ single_hue = LinearInterpolator(
(0, 45, 70, 135, 160, 225, 255, 315, 360),
(0, 2, 3, 4, 5, 6, 8, 9, 10))(hue_angle)
@@ -1826,15 +1826,15 @@ def xy_from_renotation_ovoid(specification):
specification).lower()
if interpolation_method == 'linear':
- x = LinearInterpolator1d((lower_hue_angle, upper_hue_angle),
- (x_minus, x_plus))(hue_angle)
- y = LinearInterpolator1d((lower_hue_angle, upper_hue_angle),
- (y_minus, y_plus))(hue_angle)
+ x = LinearInterpolator((lower_hue_angle, upper_hue_angle),
+ (x_minus, x_plus))(hue_angle)
+ y = LinearInterpolator((lower_hue_angle, upper_hue_angle),
+ (y_minus, y_plus))(hue_angle)
elif interpolation_method == 'radial':
- theta = LinearInterpolator1d((lower_hue_angle, upper_hue_angle),
- (theta_minus, theta_plus))(hue_angle)
- rho = LinearInterpolator1d((lower_hue_angle, upper_hue_angle),
- (rho_minus, rho_plus))(hue_angle)
+ theta = LinearInterpolator((lower_hue_angle, upper_hue_angle),
+ (theta_minus, theta_plus))(hue_angle)
+ rho = LinearInterpolator((lower_hue_angle, upper_hue_angle),
+ (rho_minus, rho_plus))(hue_angle)
x = rho * np.cos(np.radians(theta)) + x_grey
y = rho * np.sin(np.radians(theta)) + y_grey
@@ -1903,7 +1903,7 @@ def LCHab_to_munsell_specification(LCHab):
else:
code = 8
- hue = LinearInterpolator1d((0, 36), (0, 10))(Hab % 36)
+ hue = LinearInterpolator((0, 36), (0, 10))(Hab % 36)
if hue == 0:
hue = 10
@@ -1986,8 +1986,8 @@ def maximum_chroma_from_renotation(hue, value, code):
L9 = luminance_ASTMD153508(9)
L10 = luminance_ASTMD153508(10)
- max_chroma = min(LinearInterpolator1d((L9, L10), (ma_limit_mcw, 0))(L),
- LinearInterpolator1d((L9, L10), (ma_limit_mccw, 0))(
+ max_chroma = min(LinearInterpolator((L9, L10), (ma_limit_mcw, 0))(L),
+ LinearInterpolator((L9, L10), (ma_limit_mccw, 0))(
L))
return max_chroma
@@ -2065,9 +2065,9 @@ def munsell_specification_to_xy(specification):
x = x_minus
y = y_minus
else:
- x = LinearInterpolator1d((chroma_minus, chroma_plus),
- (x_minus, x_plus))(chroma)
- y = LinearInterpolator1d((chroma_minus, chroma_plus),
- (y_minus, y_plus))(chroma)
+ x = LinearInterpolator((chroma_minus, chroma_plus),
+ (x_minus, x_plus))(chroma)
+ y = LinearInterpolator((chroma_minus, chroma_plus),
+ (y_minus, y_plus))(chroma)
return x, y
diff --git a/colour/plotting/__init__.py b/colour/plotting/__init__.py
index 04dbb252b..0686b6164 100644
--- a/colour/plotting/__init__.py
+++ b/colour/plotting/__init__.py
@@ -12,9 +12,9 @@ from .common import (
DEFAULT_FIGURE_HEIGHT,
DEFAULT_FIGURE_SIZE,
DEFAULT_FONT_SIZE,
- DEFAULT_PARAMETERS,
DEFAULT_COLOUR_CYCLE,
DEFAULT_HATCH_PATTERNS,
+ DEFAULT_PARAMETERS,
DEFAULT_PLOTTING_ILLUMINANT,
DEFAULT_PLOTTING_OECF,
ColourParameter,
@@ -91,9 +91,9 @@ __all__ += [
'DEFAULT_FIGURE_HEIGHT',
'DEFAULT_FIGURE_SIZE',
'DEFAULT_FONT_SIZE',
- 'DEFAULT_PARAMETERS',
'DEFAULT_COLOUR_CYCLE',
'DEFAULT_HATCH_PATTERNS',
+ 'DEFAULT_PARAMETERS',
'DEFAULT_PLOTTING_ILLUMINANT',
'DEFAULT_PLOTTING_OECF',
'ColourParameter',
diff --git a/colour/plotting/colorimetry.py b/colour/plotting/colorimetry.py
index 049493214..730c22501 100644
--- a/colour/plotting/colorimetry.py
+++ b/colour/plotting/colorimetry.py
@@ -256,8 +256,6 @@ def multi_spd_plot(spds,
y_limit_min.append(min(values))
y_limit_max.append(max(values))
- matplotlib.pyplot.rc("axes", color_cycle=["r", "g", "b", "y"])
-
if use_spds_colours:
XYZ = spectral_to_XYZ(spd, cmfs, illuminant) / 100
if normalise_spds_colours:
diff --git a/colour/plotting/common.py b/colour/plotting/common.py
index 1b0eae504..4dd9bfd4e 100644
--- a/colour/plotting/common.py
+++ b/colour/plotting/common.py
@@ -53,9 +53,9 @@ __all__ = ['PLOTTING_RESOURCES_DIRECTORY',
'DEFAULT_FIGURE_HEIGHT',
'DEFAULT_FIGURE_SIZE',
'DEFAULT_FONT_SIZE',
- 'DEFAULT_PARAMETERS',
'DEFAULT_COLOUR_CYCLE',
'DEFAULT_HATCH_PATTERNS',
+ 'DEFAULT_PARAMETERS',
'DEFAULT_PLOTTING_ILLUMINANT',
'DEFAULT_PLOTTING_OECF',
'ColourParameter',
@@ -123,22 +123,6 @@ DEFAULT_FONT_SIZE : numeric
if 'Qt4Agg' in matplotlib.get_backend():
DEFAULT_FONT_SIZE = 10
-DEFAULT_PARAMETERS = {
- 'figure.figsize': DEFAULT_FIGURE_SIZE,
- 'font.size': DEFAULT_FONT_SIZE,
- 'axes.titlesize': DEFAULT_FONT_SIZE * 1.25,
- 'axes.labelsize': DEFAULT_FONT_SIZE * 1.25,
- 'legend.fontsize': DEFAULT_FONT_SIZE * 0.9,
- 'xtick.labelsize': DEFAULT_FONT_SIZE,
- 'ytick.labelsize': DEFAULT_FONT_SIZE}
-"""
-Default plotting parameters.
-
-DEFAULT_PARAMETERS : dict
-"""
-
-pylab.rcParams.update(DEFAULT_PARAMETERS)
-
DEFAULT_COLOUR_CYCLE = ('r', 'g', 'b', 'c', 'm', 'y', 'k')
"""
Default colour cycle for plots.
@@ -155,6 +139,23 @@ DEFAULT_HATCH_PATTERNS : tuple
{'\\\\', 'o', 'x', '.', '*', '//'}
"""
+DEFAULT_PARAMETERS = {
+ 'figure.figsize': DEFAULT_FIGURE_SIZE,
+ 'font.size': DEFAULT_FONT_SIZE,
+ 'axes.titlesize': DEFAULT_FONT_SIZE * 1.25,
+ 'axes.labelsize': DEFAULT_FONT_SIZE * 1.25,
+ 'legend.fontsize': DEFAULT_FONT_SIZE * 0.9,
+ 'xtick.labelsize': DEFAULT_FONT_SIZE,
+ 'ytick.labelsize': DEFAULT_FONT_SIZE,
+ 'axes.color_cycle': DEFAULT_COLOUR_CYCLE}
+"""
+Default plotting parameters.
+
+DEFAULT_PARAMETERS : dict
+"""
+
+pylab.rcParams.update(DEFAULT_PARAMETERS)
+
DEFAULT_PLOTTING_ILLUMINANT = ILLUMINANTS.get(
'CIE 1931 2 Degree Standard Observer').get('D65')
"""
| Implement support for spectral data "Piecewise Cubic Hermite Interpolation". | colour-science/colour | diff --git a/colour/algebra/tests/tests_extrapolation.py b/colour/algebra/tests/tests_extrapolation.py
index 9565bb613..00f7a4262 100644
--- a/colour/algebra/tests/tests_extrapolation.py
+++ b/colour/algebra/tests/tests_extrapolation.py
@@ -16,7 +16,7 @@ else:
import unittest
from itertools import permutations
-from colour.algebra import Extrapolator1d, LinearInterpolator1d
+from colour.algebra import Extrapolator1d, LinearInterpolator
from colour.utilities import ignore_numpy_errors
__author__ = 'Colour Developers'
@@ -62,14 +62,14 @@ class TestExtrapolator1d(unittest.TestCase):
"""
extrapolator = Extrapolator1d(
- LinearInterpolator1d(
+ LinearInterpolator(
np.array([5, 6, 7]),
np.array([5, 6, 7])))
np.testing.assert_almost_equal(extrapolator((4, 8)), (4., 8.))
self.assertEqual(extrapolator(4), 4.)
extrapolator = Extrapolator1d(
- LinearInterpolator1d(
+ LinearInterpolator(
np.array([3, 4, 5]),
np.array([1, 2, 3])),
method='Constant')
@@ -78,7 +78,7 @@ class TestExtrapolator1d(unittest.TestCase):
self.assertEqual(extrapolator(0.1), 1.)
extrapolator = Extrapolator1d(
- LinearInterpolator1d(
+ LinearInterpolator(
np.array([3, 4, 5]),
np.array([1, 2, 3])),
method='Constant',
@@ -88,7 +88,7 @@ class TestExtrapolator1d(unittest.TestCase):
self.assertEqual(extrapolator(0.1), 0.)
extrapolator = Extrapolator1d(
- LinearInterpolator1d(
+ LinearInterpolator(
np.array([3, 4, 5]),
np.array([1, 2, 3])),
method='Constant',
@@ -108,7 +108,7 @@ class TestExtrapolator1d(unittest.TestCase):
cases = set(permutations(cases * 3, r=3))
for case in cases:
extrapolator = Extrapolator1d(
- LinearInterpolator1d(np.array(case), np.array(case)))
+ LinearInterpolator(np.array(case), np.array(case)))
extrapolator(case[0])
diff --git a/colour/algebra/tests/tests_interpolation.py b/colour/algebra/tests/tests_interpolation.py
index ba5113322..0b85c20f8 100644
--- a/colour/algebra/tests/tests_interpolation.py
+++ b/colour/algebra/tests/tests_interpolation.py
@@ -17,7 +17,7 @@ else:
from itertools import permutations
from colour.algebra import (
- LinearInterpolator1d,
+ LinearInterpolator,
SpragueInterpolator)
from colour.utilities import ignore_numpy_errors
@@ -362,7 +362,7 @@ SPRAGUE_INTERPOLATED_POINTS_DATA_A_10_SAMPLES = (
class TestLinearInterpolator1d(unittest.TestCase):
"""
Defines
- :func:`colour.algebra.interpolation.LinearInterpolator1d` class units
+ :func:`colour.algebra.interpolation.LinearInterpolator` class units
tests methods.
"""
@@ -375,7 +375,7 @@ class TestLinearInterpolator1d(unittest.TestCase):
'y')
for attribute in required_attributes:
- self.assertIn(attribute, dir(LinearInterpolator1d))
+ self.assertIn(attribute, dir(LinearInterpolator))
def test_required_methods(self):
"""
@@ -385,18 +385,18 @@ class TestLinearInterpolator1d(unittest.TestCase):
required_methods = ()
for method in required_methods:
- self.assertIn(method, dir(LinearInterpolator1d))
+ self.assertIn(method, dir(LinearInterpolator))
def test___call__(self):
"""
Tests
- :func:`colour.algebra.interpolation.LinearInterpolator1d.__call__`
+ :func:`colour.algebra.interpolation.LinearInterpolator.__call__`
method.
"""
steps = 0.1
x = np.arange(len(POINTS_DATA_A))
- linear_interpolator = LinearInterpolator1d(x, POINTS_DATA_A)
+ linear_interpolator = LinearInterpolator(x, POINTS_DATA_A)
for i, value in enumerate(
np.arange(0, len(POINTS_DATA_A) - 1 + steps, steps)):
@@ -414,7 +414,7 @@ class TestLinearInterpolator1d(unittest.TestCase):
def test_nan__call__(self):
"""
Tests
- :func:`colour.algebra.interpolation.LinearInterpolator1d.__call__`
+ :func:`colour.algebra.interpolation.LinearInterpolator.__call__`
method nan support.
"""
@@ -422,7 +422,7 @@ class TestLinearInterpolator1d(unittest.TestCase):
cases = set(permutations(cases * 3, r=3))
for case in cases:
try:
- linear_interpolator = LinearInterpolator1d(
+ linear_interpolator = LinearInterpolator(
np.array(case), np.array(case))
linear_interpolator(case[0])
except ValueError:
diff --git a/colour/colorimetry/tests/tests_spectrum.py b/colour/colorimetry/tests/tests_spectrum.py
index f32ef44ce..5677a5226 100644
--- a/colour/colorimetry/tests/tests_spectrum.py
+++ b/colour/colorimetry/tests/tests_spectrum.py
@@ -2434,6 +2434,20 @@ class TestSpectralPowerDistribution(unittest.TestCase):
rtol=0.0000001,
atol=0.0000001)
+ np.testing.assert_almost_equal(
+ self.__spd.clone().interpolate(
+ SpectralShape(steps=1),
+ method='Linear')[410],
+ np.array(0.0643),
+ decimal=7)
+
+ np.testing.assert_almost_equal(
+ self.__spd.clone().interpolate(
+ SpectralShape(steps=1),
+ method='Pchip')[410],
+ np.array(0.06439937984496125),
+ decimal=7)
+
def test_align(self):
"""
Tests
@@ -2971,6 +2985,20 @@ class TestTriSpectralPowerDistribution(unittest.TestCase):
rtol=0.0000001,
atol=0.0000001)
+ np.testing.assert_almost_equal(
+ self.__tri_spd.clone().interpolate(
+ SpectralShape(steps=1),
+ method='Linear')[411],
+ np.array([0.050334, 0.001404, 0.24018]),
+ decimal=7)
+
+ np.testing.assert_almost_equal(
+ self.__tri_spd.clone().interpolate(
+ SpectralShape(steps=1),
+ method='Pchip')[411],
+ np.array([0.04895501, 0.00136229, 0.23349933]),
+ decimal=7)
+
def test_align(self):
"""
Tests
diff --git a/colour/colorimetry/tests/tests_tristimulus.py b/colour/colorimetry/tests/tests_tristimulus.py
index 33fecf517..97a72d384 100644
--- a/colour/colorimetry/tests/tests_tristimulus.py
+++ b/colour/colorimetry/tests/tests_tristimulus.py
@@ -208,6 +208,30 @@ class TestWavelength_to_XYZ(unittest.TestCase):
np.array([0.44575583, 0.18184213, 0.]),
decimal=7)
+ np.testing.assert_almost_equal(
+ wavelength_to_XYZ(
+ 480.5,
+ CMFS.get('CIE 2012 2 Degree Standard Observer'),
+ 'Cubic Spline'),
+ np.array([0.07773422, 0.18148028, 0.7337162]),
+ decimal=7)
+
+ np.testing.assert_almost_equal(
+ wavelength_to_XYZ(
+ 480.5,
+ CMFS.get('CIE 2012 2 Degree Standard Observer'),
+ 'Linear'),
+ np.array([0.07779856, 0.18149335, 0.7340129]),
+ decimal=7)
+
+ np.testing.assert_almost_equal(
+ wavelength_to_XYZ(
+ 480.5,
+ CMFS.get('CIE 2012 2 Degree Standard Observer'),
+ 'Pchip'),
+ np.array([0.07773515, 0.18148048, 0.73372294]),
+ decimal=7)
+
def test_n_dimensional_wavelength_to_XYZ(self):
"""
Tests
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 10
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[tests]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.4",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
-e git+https://github.com/colour-science/colour.git@d09934a0cf3d851ce15b3a7556ec24673a9ddb35#egg=colour_science
coverage==6.2
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig==1.1.1
mccabe==0.7.0
nose==1.3.7
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: colour
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- flake8==5.0.4
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- mccabe==0.7.0
- nose==1.3.7
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/colour
| [
"colour/algebra/tests/tests_extrapolation.py::TestExtrapolator1d::test___call__",
"colour/algebra/tests/tests_extrapolation.py::TestExtrapolator1d::test_nan__call__",
"colour/algebra/tests/tests_extrapolation.py::TestExtrapolator1d::test_required_attributes",
"colour/algebra/tests/tests_extrapolation.py::TestExtrapolator1d::test_required_methods",
"colour/algebra/tests/tests_interpolation.py::TestLinearInterpolator1d::test___call__",
"colour/algebra/tests/tests_interpolation.py::TestLinearInterpolator1d::test_nan__call__",
"colour/algebra/tests/tests_interpolation.py::TestLinearInterpolator1d::test_required_attributes",
"colour/algebra/tests/tests_interpolation.py::TestLinearInterpolator1d::test_required_methods",
"colour/algebra/tests/tests_interpolation.py::TestSpragueInterpolator::test___call__",
"colour/algebra/tests/tests_interpolation.py::TestSpragueInterpolator::test_nan__call__",
"colour/algebra/tests/tests_interpolation.py::TestSpragueInterpolator::test_required_attributes",
"colour/algebra/tests/tests_interpolation.py::TestSpragueInterpolator::test_required_methods",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralMapping::test_required_attributes",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralMapping::test_required_methods",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralShape::test__contains__",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralShape::test__eq__",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralShape::test__iter__",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralShape::test__len__",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralShape::test__ne__",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralShape::test_end",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralShape::test_range",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralShape::test_required_attributes",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralShape::test_required_methods",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralShape::test_start",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralShape::test_steps",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test__add__",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test__contains__",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test__div__",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test__eq__",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test__getitem__",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test__iter__",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test__len__",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test__mul__",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test__ne__",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test__pow__",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test__setitem__",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test__sub__",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test_align",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test_clone",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test_extrapolate",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test_get",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test_is_uniform",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test_normalise",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test_required_attributes",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test_required_methods",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test_shape",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test_values",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test_wavelengths",
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test_zeros",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test__add__",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test__contains__",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test__div__",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test__eq__",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test__getitem__",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test__iter__",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test__len__",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test__mul__",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test__ne__",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test__pow__",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test__setitem__",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test__sub__",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test_align",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test_clone",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test_extrapolate",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test_get",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test_is_uniform",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test_normalise",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test_required_attributes",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test_required_methods",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test_shape",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test_values",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test_wavelengths",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test_zeros",
"colour/colorimetry/tests/tests_spectrum.py::TestConstantSpd::test_constant_spd",
"colour/colorimetry/tests/tests_spectrum.py::TestZerosSpd::test_zeros_spd",
"colour/colorimetry/tests/tests_spectrum.py::TestOnes_spd::test_ones_spd",
"colour/colorimetry/tests/tests_tristimulus.py::TestSpectral_to_XYZ::test_spectral_to_XYZ",
"colour/colorimetry/tests/tests_tristimulus.py::TestWavelength_to_XYZ::test_n_dimensional_wavelength_to_XYZ"
]
| [
"colour/colorimetry/tests/tests_spectrum.py::TestSpectralPowerDistribution::test_interpolate",
"colour/colorimetry/tests/tests_spectrum.py::TestTriSpectralPowerDistribution::test_interpolate",
"colour/colorimetry/tests/tests_tristimulus.py::TestWavelength_to_XYZ::test_wavelength_to_XYZ"
]
| []
| []
| BSD 3-Clause "New" or "Revised" License | 202 | [
"colour/plotting/colorimetry.py",
"colour/algebra/extrapolation.py",
"colour/algebra/__init__.py",
"colour/colorimetry/spectrum.py",
"colour/plotting/__init__.py",
"colour/notation/munsell.py",
"colour/examples/algebra/examples_interpolation.py",
"colour/plotting/common.py",
"colour/colorimetry/tristimulus.py",
"colour/algebra/interpolation.py"
]
| [
"colour/plotting/colorimetry.py",
"colour/algebra/extrapolation.py",
"colour/algebra/__init__.py",
"colour/colorimetry/spectrum.py",
"colour/plotting/__init__.py",
"colour/notation/munsell.py",
"colour/examples/algebra/examples_interpolation.py",
"colour/plotting/common.py",
"colour/colorimetry/tristimulus.py",
"colour/algebra/interpolation.py"
]
|
|
sigmavirus24__github3.py-421 | 95b8f36b296dc58954b053f88fe34a408944a921 | 2015-07-27 17:02:15 | 05ed0c6a02cffc6ddd0e82ce840c464e1c5fd8c4 | sigmavirus24: 2 things:
1. This is failing flake8 checks
2. This needs unittests | diff --git a/github3/github.py b/github3/github.py
index be5f5aab..9c61e4b9 100644
--- a/github3/github.py
+++ b/github3/github.py
@@ -78,6 +78,26 @@ class GitHub(GitHubCore):
url = self._build_url('events')
return self._iter(int(number), url, Event, etag=etag)
+ def all_organizations(self, number=-1, since=None, etag=None,
+ per_page=None):
+ """Iterate over every organization in the order they were created.
+
+ :param int number: (optional), number of organizations to return.
+ Default: -1, returns all of them
+ :param int since: (optional), last organization id seen (allows
+ restarting this iteration)
+ :param str etag: (optional), ETag from a previous request to the same
+ endpoint
+ :param int per_page: (optional), number of organizations to list per
+ request
+ :returns: generator of :class:`Organization
+ <github3.orgs.Organization>`
+ """
+ url = self._build_url('organizations')
+ return self._iter(int(number), url, Organization,
+ params={'since': since, 'per_page': per_page},
+ etag=etag)
+
def all_repositories(self, number=-1, since=None, etag=None,
per_page=None):
"""Iterate over every repository in the order they were created.
| Need a method to list all organizations on GitHub
https://developer.github.com/v3/orgs/#list-all-organizations was added recently. As a result we need to add `GitHub#all_organizations` to correspond with `GitHub#all_users`, `GitHub#all_events`, and `GitHub#all_repositories`. | sigmavirus24/github3.py | diff --git a/tests/cassettes/GitHub_all_organizations.json b/tests/cassettes/GitHub_all_organizations.json
new file mode 100644
index 00000000..d379a91d
--- /dev/null
+++ b/tests/cassettes/GitHub_all_organizations.json
@@ -0,0 +1,1 @@
+{"http_interactions": [{"request": {"uri": "https://api.github.com/organizations?per_page=25", "method": "GET", "headers": {"Accept": "application/vnd.github.v3.full+json", "Accept-Charset": "utf-8", "Content-Type": "application/json", "Accept-Encoding": "gzip, deflate", "Connection": "keep-alive", "User-Agent": "github3.py/1.0.0a2"}, "body": {"string": "", "encoding": "utf-8"}}, "recorded_at": "2015-07-27T16:50:16", "response": {"url": "https://api.github.com/organizations?per_page=25", "headers": {"x-github-media-type": "github.v3; param=full; format=json", "content-type": "application/json; charset=utf-8", "x-github-request-id": "3EBD09C7:4121:BE664F4:55B66147", "x-served-by": "065b43cd9674091fec48a221b420fbb3", "x-ratelimit-remaining": "58", "transfer-encoding": "chunked", "access-control-expose-headers": "ETag, Link, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval", "x-frame-options": "deny", "content-security-policy": "default-src 'none'", "status": "200 OK", "x-xss-protection": "1; mode=block", "date": "Mon, 27 Jul 2015 16:50:16 GMT", "x-ratelimit-limit": "60", "etag": "W/\"467f85325c09d7c93ffa03581d0b7de5\"", "cache-control": "public, max-age=60, s-maxage=60", "content-encoding": "gzip", "access-control-allow-origin": "*", "link": "<https://api.github.com/organizations?per_page=25&since=2595>; rel=\"next\", <https://api.github.com/organizations{?since}>; rel=\"first\"", "x-ratelimit-reset": "1438017277", "access-control-allow-credentials": "true", "vary": "Accept", "x-content-type-options": "nosniff", "server": "GitHub.com", "strict-transport-security": "max-age=31536000; includeSubdomains; preload"}, "status": {"message": "OK", "code": 200}, "body": {"base64_string": "H4sIAAAAAAAAA63ay27jNhSA4VcR1G0QX+JbAhRdzKqLAsV00UVRDGiZlomhRIGi7BqDefceySJFyRrMIXF2gyD/yYQfaElU/vmWSpWLMv1IudZnzXn6kopT+rHZvKSNlvD1izFV/bFYsEq85sJcmuNrpoqF0nm9GBrNK1V/QReL7vvhZ/ErL01I+AigLHhx5Dok7Ytvi8c/vsOQqjlKkX0JnzUO/ZHsygzT06Xovlj3C9jUXGeqNPCrd2vZLDab366/vsF/6MTrTIvKCAUmZSPl9xePqAQqfmf61CsdViglP0NDuSjCamhjuIaaRGwYR4x2WM2hpalPVohS1EbfM7Dmmlu41eodIzdTY/2mabji04QIy6cZFKJPQ4ldAWcOdrobMyUlz4y4cnHirN+Rq/UBA/vUYlnHYTjqpI8gnUygAJ2MpOZcH+Y40795cmyEPCW1Opsb0zwxCv4trzzRnMmk0uooeVG/jja0yjNLjbtGPgKsL3x3OGobRUi2GQVfO4faDHdBrNvf+ia0vWtZbZeY3ednWBjXhPMMaQTSEFNQDdOowbbLuU02/cyEq6C+39jdbqHdHuPlVVgum4RruTICy7UUVm4YNdVuj6EquD72TOsd6mmgL7BE7beH83RVBE3XUbB0g4hJYIFRJAru/uvqfuK6l3nb4u4jRyEaaKginLw4hsvLSdS8ecR4YIDBq+F+ojHCWLk9ak95FZbNJuFmrowAcy2FlhtGTbVH7bOb5mV2kezYnlB0pyEH1DYbdViuIQoH89oIMq+mQPPGEbPB8mN2mKiu8CSmCqa/ciPKs+r1ttst5i5jLsciPrXhls8jIkifh1DIPk8lBgahOeDxUYqG9ZCsOxl77Mo97ll71GFBhyhc0msjCL2aws4bR4wGyz+HNr3l10zIWqqre0Zb4m76/Q6tZn9WBJpLY8xcTELmphGLrWDp0WQnldunNORpZSf2yILAIIn0astYrrYl02qHUWP94Bgy/QSnU0bpOlHn5K8KXtkkrDwl5sKTX1b9Fz6pAh5TMp78DqfOue4+NJM/mTYlPDn4x9MlN3mjGwe92WGulEOFde6LcGYbRijblALZziI33uzmNmT6uTneEzD73G6pzrdQRyF5cmovgqoq4HAguSh4kTTivPHjRdWgPYCiDlj8DkvqmnDUIY1gHWIK2GEaPS3qs7ZmTV0JLq3YdoV6p+dlWDCbhHu5MoLLtRRabhg1Fqz63D6c3svcLvAOL5Oqce/xDhvck6EfYsGGnxZO5rURaF5NweaNo4aD9Z+DGz85GA0v6bo/N2gf5lfvuOeGocKC9UW4lg0jqGxK4WRnUSPBgs8hTXeXFCW8CSylUpWjwp08j0ssl1eFk/lxBJufU9D588j55k+n0z/YV/gDnPY29CLyS1JxfVa6YCXcd8JlNWFVBX9b0917Tt6hlspkl0NvvF4eUCehLsLyPoJw2b6LQO1LCs9+FDFlu9ZzO3H8cclPOc8Y3Fo+Pi/Xb0vUNc7LsEQ2CUdyZQSTaymg3DBqKlj1n1PlTLL/7rDJ7MVtvd2g/gBlFGK5higczGsjyLyaAs0bR80G6/9zttrAIx28DbI7bPuOOqH2MiyZTcLBXBnB5VoKLDeMnOp99tS5uy3593+CzYb+GisAAA==", "string": "", "encoding": "utf-8"}}}], "recorded_with": "betamax/0.4.2"}
\ No newline at end of file
diff --git a/tests/integration/test_github.py b/tests/integration/test_github.py
index 11067a7f..d27eb229 100644
--- a/tests/integration/test_github.py
+++ b/tests/integration/test_github.py
@@ -169,6 +169,13 @@ class TestGitHub(IntegrationHelper):
assert isinstance(i, github3.issues.Issue)
+ def test_all_organizations(self):
+ """Test the ability to iterate over all of the organizations."""
+ cassette_name = self.cassette_name('all_organizations')
+ with self.recorder.use_cassette(cassette_name):
+ for r in self.gh.all_organizations(number=25):
+ assert isinstance(r, github3.orgs.Organization)
+
def test_all_repositories(self):
"""Test the ability to iterate over all of the repositories."""
cassette_name = self.cassette_name('iter_all_repos')
diff --git a/tests/unit/test_github.py b/tests/unit/test_github.py
index dd98da37..8c7e10ca 100644
--- a/tests/unit/test_github.py
+++ b/tests/unit/test_github.py
@@ -317,6 +317,40 @@ class TestGitHubIterators(UnitIteratorHelper):
headers={}
)
+ def test_all_organizations(self):
+ """Show that one can iterate over all organizations."""
+ i = self.instance.all_organizations()
+ self.get_next(i)
+
+ self.session.get.assert_called_once_with(
+ url_for('organizations'),
+ params={'per_page': 100},
+ headers={}
+ )
+
+ def test_all_organizations_per_page(self):
+ """Show that one can iterate over all organizations with per_page."""
+ i = self.instance.all_organizations(per_page=25)
+ self.get_next(i)
+
+ self.session.get.assert_called_once_with(
+ url_for('organizations'),
+ params={'per_page': 25},
+ headers={}
+ )
+
+ def test_all_organizations_since(self):
+ """Show that one can limit the organizations returned."""
+ since = 100000
+ i = self.instance.all_organizations(since=since)
+ self.get_next(i)
+
+ self.session.get.assert_called_once_with(
+ url_for('organizations'),
+ params={'per_page': 100, 'since': since},
+ headers={}
+ )
+
def test_all_repositories(self):
"""Show that one can iterate over all repositories."""
i = self.instance.all_repositories()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | betamax==0.9.0
betamax-matchers==0.4.0
certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup==1.2.2
execnet==2.1.1
-e git+https://github.com/sigmavirus24/github3.py.git@95b8f36b296dc58954b053f88fe34a408944a921#egg=github3.py
idna==3.10
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
requests==2.32.3
requests-toolbelt==1.0.0
tomli==2.2.1
typing_extensions==4.13.0
uritemplate==4.1.1
uritemplate.py==3.0.2
urllib3==2.3.0
| name: github3.py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- betamax==0.9.0
- betamax-matchers==0.4.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- requests==2.32.3
- requests-toolbelt==1.0.0
- tomli==2.2.1
- typing-extensions==4.13.0
- uritemplate==4.1.1
- uritemplate-py==3.0.2
- urllib3==2.3.0
prefix: /opt/conda/envs/github3.py
| [
"tests/integration/test_github.py::TestGitHub::test_all_organizations",
"tests/unit/test_github.py::TestGitHubIterators::test_all_organizations",
"tests/unit/test_github.py::TestGitHubIterators::test_all_organizations_per_page",
"tests/unit/test_github.py::TestGitHubIterators::test_all_organizations_since"
]
| [
"tests/integration/test_github.py::TestGitHub::test_update_me"
]
| [
"tests/integration/test_github.py::TestGitHub::test_all_events",
"tests/integration/test_github.py::TestGitHub::test_all_repositories",
"tests/integration/test_github.py::TestGitHub::test_all_users",
"tests/integration/test_github.py::TestGitHub::test_authorize",
"tests/integration/test_github.py::TestGitHub::test_create_gist",
"tests/integration/test_github.py::TestGitHub::test_create_issue",
"tests/integration/test_github.py::TestGitHub::test_create_key",
"tests/integration/test_github.py::TestGitHub::test_create_repository",
"tests/integration/test_github.py::TestGitHub::test_emojis",
"tests/integration/test_github.py::TestGitHub::test_feeds",
"tests/integration/test_github.py::TestGitHub::test_followers",
"tests/integration/test_github.py::TestGitHub::test_followers_of",
"tests/integration/test_github.py::TestGitHub::test_gist",
"tests/integration/test_github.py::TestGitHub::test_gitignore_template",
"tests/integration/test_github.py::TestGitHub::test_gitignore_templates",
"tests/integration/test_github.py::TestGitHub::test_is_following",
"tests/integration/test_github.py::TestGitHub::test_is_starred",
"tests/integration/test_github.py::TestGitHub::test_issue",
"tests/integration/test_github.py::TestGitHub::test_me",
"tests/integration/test_github.py::TestGitHub::test_meta",
"tests/integration/test_github.py::TestGitHub::test_non_existent_gitignore_template",
"tests/integration/test_github.py::TestGitHub::test_notifications",
"tests/integration/test_github.py::TestGitHub::test_notifications_all",
"tests/integration/test_github.py::TestGitHub::test_octocat",
"tests/integration/test_github.py::TestGitHub::test_organization",
"tests/integration/test_github.py::TestGitHub::test_pull_request",
"tests/integration/test_github.py::TestGitHub::test_rate_limit",
"tests/integration/test_github.py::TestGitHub::test_repositories",
"tests/integration/test_github.py::TestGitHub::test_repositories_by",
"tests/integration/test_github.py::TestGitHub::test_repository",
"tests/integration/test_github.py::TestGitHub::test_repository_with_id",
"tests/integration/test_github.py::TestGitHub::test_search_code",
"tests/integration/test_github.py::TestGitHub::test_search_code_with_text_match",
"tests/integration/test_github.py::TestGitHub::test_search_issues",
"tests/integration/test_github.py::TestGitHub::test_search_repositories",
"tests/integration/test_github.py::TestGitHub::test_search_repositories_with_text_match",
"tests/integration/test_github.py::TestGitHub::test_search_users",
"tests/integration/test_github.py::TestGitHub::test_search_users_with_text_match",
"tests/integration/test_github.py::TestGitHub::test_user",
"tests/integration/test_github.py::TestGitHub::test_user_teams",
"tests/integration/test_github.py::TestGitHub::test_user_with_id",
"tests/integration/test_github.py::TestGitHub::test_zen",
"tests/unit/test_github.py::TestGitHub::test_authorization",
"tests/unit/test_github.py::TestGitHub::test_authorize",
"tests/unit/test_github.py::TestGitHub::test_can_login_without_two_factor_callback",
"tests/unit/test_github.py::TestGitHub::test_check_authorization",
"tests/unit/test_github.py::TestGitHub::test_create_gist",
"tests/unit/test_github.py::TestGitHub::test_create_key",
"tests/unit/test_github.py::TestGitHub::test_create_key_requires_a_key",
"tests/unit/test_github.py::TestGitHub::test_create_key_requires_a_title",
"tests/unit/test_github.py::TestGitHub::test_create_repository",
"tests/unit/test_github.py::TestGitHub::test_emojis",
"tests/unit/test_github.py::TestGitHub::test_follow",
"tests/unit/test_github.py::TestGitHub::test_follow_requires_a_username",
"tests/unit/test_github.py::TestGitHub::test_gist",
"tests/unit/test_github.py::TestGitHub::test_gitignore_template",
"tests/unit/test_github.py::TestGitHub::test_gitignore_templates",
"tests/unit/test_github.py::TestGitHub::test_is_following",
"tests/unit/test_github.py::TestGitHub::test_is_starred",
"tests/unit/test_github.py::TestGitHub::test_is_starred_requires_a_repo",
"tests/unit/test_github.py::TestGitHub::test_is_starred_requires_an_owner",
"tests/unit/test_github.py::TestGitHub::test_issue",
"tests/unit/test_github.py::TestGitHub::test_issue_requires_positive_issue_id",
"tests/unit/test_github.py::TestGitHub::test_issue_requires_repository",
"tests/unit/test_github.py::TestGitHub::test_issue_requires_username",
"tests/unit/test_github.py::TestGitHub::test_me",
"tests/unit/test_github.py::TestGitHub::test_repository",
"tests/unit/test_github.py::TestGitHub::test_repository_with_id",
"tests/unit/test_github.py::TestGitHub::test_repository_with_id_accepts_a_string",
"tests/unit/test_github.py::TestGitHub::test_repository_with_id_requires_a_positive_id",
"tests/unit/test_github.py::TestGitHub::test_repository_with_invalid_repo",
"tests/unit/test_github.py::TestGitHub::test_repository_with_invalid_user",
"tests/unit/test_github.py::TestGitHub::test_repository_with_invalid_user_and_repo",
"tests/unit/test_github.py::TestGitHub::test_two_factor_login",
"tests/unit/test_github.py::TestGitHub::test_update_me",
"tests/unit/test_github.py::TestGitHub::test_user",
"tests/unit/test_github.py::TestGitHub::test_user_with_id",
"tests/unit/test_github.py::TestGitHub::test_user_with_id_accepts_a_string",
"tests/unit/test_github.py::TestGitHub::test_user_with_id_requires_a_positive_id",
"tests/unit/test_github.py::TestGitHubIterators::test_all_events",
"tests/unit/test_github.py::TestGitHubIterators::test_all_repositories",
"tests/unit/test_github.py::TestGitHubIterators::test_all_repositories_per_page",
"tests/unit/test_github.py::TestGitHubIterators::test_all_repositories_since",
"tests/unit/test_github.py::TestGitHubIterators::test_all_users",
"tests/unit/test_github.py::TestGitHubIterators::test_all_users_per_page",
"tests/unit/test_github.py::TestGitHubIterators::test_all_users_since",
"tests/unit/test_github.py::TestGitHubIterators::test_authorizations",
"tests/unit/test_github.py::TestGitHubIterators::test_emails",
"tests/unit/test_github.py::TestGitHubIterators::test_followed_by",
"tests/unit/test_github.py::TestGitHubIterators::test_followers",
"tests/unit/test_github.py::TestGitHubIterators::test_followers_of",
"tests/unit/test_github.py::TestGitHubIterators::test_followers_require_auth",
"tests/unit/test_github.py::TestGitHubIterators::test_following",
"tests/unit/test_github.py::TestGitHubIterators::test_following_require_auth",
"tests/unit/test_github.py::TestGitHubIterators::test_gists",
"tests/unit/test_github.py::TestGitHubIterators::test_gists_by",
"tests/unit/test_github.py::TestGitHubIterators::test_issues",
"tests/unit/test_github.py::TestGitHubIterators::test_issues_on",
"tests/unit/test_github.py::TestGitHubIterators::test_issues_on_with_params",
"tests/unit/test_github.py::TestGitHubIterators::test_issues_with_params",
"tests/unit/test_github.py::TestGitHubIterators::test_keys",
"tests/unit/test_github.py::TestGitHubIterators::test_notifications",
"tests/unit/test_github.py::TestGitHubIterators::test_notifications_all",
"tests/unit/test_github.py::TestGitHubIterators::test_notifications_participating_in",
"tests/unit/test_github.py::TestGitHubIterators::test_organization_issues",
"tests/unit/test_github.py::TestGitHubIterators::test_organization_issues_with_params",
"tests/unit/test_github.py::TestGitHubIterators::test_organizations",
"tests/unit/test_github.py::TestGitHubIterators::test_organizations_with",
"tests/unit/test_github.py::TestGitHubIterators::test_public_gists",
"tests/unit/test_github.py::TestGitHubIterators::test_repositories_by",
"tests/unit/test_github.py::TestGitHubIterators::test_repositories_by_with_type",
"tests/unit/test_github.py::TestGitHubIterators::test_respositories",
"tests/unit/test_github.py::TestGitHubIterators::test_respositories_accepts_params",
"tests/unit/test_github.py::TestGitHubIterators::test_starred",
"tests/unit/test_github.py::TestGitHubIterators::test_starred_by",
"tests/unit/test_github.py::TestGitHubIterators::test_subscriptions",
"tests/unit/test_github.py::TestGitHubIterators::test_subscriptions_for",
"tests/unit/test_github.py::TestGitHubIterators::test_user_issues",
"tests/unit/test_github.py::TestGitHubIterators::test_user_issues_with_parameters",
"tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_authorization",
"tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_authorizations",
"tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_create_issue",
"tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_create_key",
"tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_create_repository",
"tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_emails",
"tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_follow",
"tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_gists",
"tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_is_following",
"tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_is_starred",
"tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_issues",
"tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_keys",
"tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_me",
"tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_notifications",
"tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_organization_issues",
"tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_organizations",
"tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_repositories",
"tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_starred",
"tests/unit/test_github.py::TestGitHubRequiresAuthentication::test_user_issues",
"tests/unit/test_github.py::TestGitHubAuthorizations::test_revoke_authorization",
"tests/unit/test_github.py::TestGitHubAuthorizations::test_revoke_authorizations"
]
| []
| BSD 3-Clause "New" or "Revised" License | 203 | [
"github3/github.py"
]
| [
"github3/github.py"
]
|
PennChopMicrobiomeProgram__PhylogeneticProfiler-4 | fc368fcef164305e45513d27d592f0118229ec0e | 2015-07-29 20:38:42 | fc368fcef164305e45513d27d592f0118229ec0e | diff --git a/phyloprofilerlib/main.py b/phyloprofilerlib/main.py
index c7ba473..4105c14 100644
--- a/phyloprofilerlib/main.py
+++ b/phyloprofilerlib/main.py
@@ -52,7 +52,25 @@ class Metaphlan(object):
def run(self, R1, R2, out_dir):
command = self.make_command(R1, R2)
- subprocess.check_call(command, stdout=self.make_output_handle(R1, out_dir))
+ output = subprocess.check_output(command)
+ revised_output = self.revise_output(output)
+ with self.make_output_handle(R1, out_dir) as f:
+ f.write(revised_output)
+
+ @staticmethod
+ def revise_output(output):
+ output_lines = output.splitlines(True)
+ if len(output_lines) < 2:
+ raise ValueError("Output has fewer than 2 lines.")
+ elif len(output_lines) == 2:
+ return output
+ else:
+ header = output_lines.pop(0)
+ revised_output_lines = [header]
+ for line in output_lines:
+ if ("s__" in line) and not ("t__" in line):
+ revised_output_lines.append(line)
+ return "".join(revised_output_lines)
def main(argv=None):
| Sum of proportions should equal 1
MetaPhlan prints the proportions for each level of the taxonomy, which results in the sum of proportions being (number of ranks) * 100. This kind of output is difficult for us to use downstream. It would be better for us to have the full taxonomic assignment at the species level only. If we want to summarize at a higher rank of the taxonomy, we can compute this downstream.
One complication is that, if no reads are assigned for a sample, no species-level output is generated. We will have to deal with this as a special case. | PennChopMicrobiomeProgram/PhylogeneticProfiler | diff --git a/test/test_main.py b/test/test_main.py
index 935b6e5..12ebbd8 100644
--- a/test/test_main.py
+++ b/test/test_main.py
@@ -19,6 +19,15 @@ class MetaphlanWrapperTest(unittest.TestCase):
self.r2 = "fake_genome1-R2.fastq"
self.out = "out"
+ def test_revise_output(self):
+ observed = Metaphlan.revise_output(ANELLO_FULL_OUTPUT)
+ self.assertEqual(observed, ANELLO_OUTPUT)
+
+ def test_revise_no_assignments(self):
+ """revise_output should not adjust output if no assignments are made."""
+ self.assertEqual(
+ Metaphlan.revise_output(UNCLASSIFIED_OUTPUT), UNCLASSIFIED_OUTPUT)
+
def test_main(self):
app = Metaphlan(self.config)
observed = app.make_command(self.r1, self.r2)
@@ -872,7 +881,7 @@ TCGTTGTAGTAAATACTGCGTGTCCCGGCAGATCACGCAGTATTTACTACAACGAAGGGGACATTTGAAGCCTATTTTGA
BC@A@GGGEGGGGFFG>CDF/;E=CD<E/CEEE@FB<D>CGGGGCEGGGGGDECEGGGGG/EDGECCFGGEGGGG00FGG>CFGGEGBGGDG0BFGGGGGGGGGDACDGGG...68.@
"""
-ANELLO_OUTPUT = """\
+ANELLO_FULL_OUTPUT = """\
#SampleID Metaphlan2_Analysis
k__Viruses 100.0
k__Viruses|p__Viruses_noname 100.0
@@ -883,3 +892,13 @@ k__Viruses|p__Viruses_noname|c__Viruses_noname|o__Viruses_noname|f__Anellovirida
k__Viruses|p__Viruses_noname|c__Viruses_noname|o__Viruses_noname|f__Anelloviridae|g__Alphatorquevirus|s__Torque_teno_virus_1 100.0
k__Viruses|p__Viruses_noname|c__Viruses_noname|o__Viruses_noname|f__Anelloviridae|g__Alphatorquevirus|s__Torque_teno_virus_1|t__PRJNA15247 100.0
"""
+
+UNCLASSIFIED_OUTPUT = """\
+#SampleID Metaphlan2_Analysis
+unclassified 100.0
+"""
+
+ANELLO_OUTPUT = """\
+#SampleID Metaphlan2_Analysis
+k__Viruses|p__Viruses_noname|c__Viruses_noname|o__Viruses_noname|f__Anelloviridae|g__Alphatorquevirus|s__Torque_teno_virus_1 100.0
+"""
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "",
"pip_packages": [
"numpy>=1.16.0",
"pandas>=1.0.0",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
iniconfig==2.1.0
numpy==2.0.2
packaging==24.2
pandas==2.2.3
-e git+https://github.com/PennChopMicrobiomeProgram/PhylogeneticProfiler.git@fc368fcef164305e45513d27d592f0118229ec0e#egg=PhyloProfiler
pluggy==1.5.0
pytest==8.3.5
python-dateutil==2.9.0.post0
pytz==2025.2
six==1.17.0
tomli==2.2.1
tzdata==2025.2
| name: PhylogeneticProfiler
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- pluggy==1.5.0
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pytz==2025.2
- six==1.17.0
- tomli==2.2.1
- tzdata==2025.2
prefix: /opt/conda/envs/PhylogeneticProfiler
| [
"test/test_main.py::MetaphlanWrapperTest::test_revise_no_assignments",
"test/test_main.py::MetaphlanWrapperTest::test_revise_output"
]
| [
"test/test_main.py::MainTests::test_main_function"
]
| [
"test/test_main.py::MetaphlanWrapperTest::test_main"
]
| []
| null | 204 | [
"phyloprofilerlib/main.py"
]
| [
"phyloprofilerlib/main.py"
]
|
|
scrapinghub__shub-53 | 7d6eb64b6bfa5de78f20629bfd92922489bd187d | 2015-08-05 17:06:56 | 7e4919dba6005af10818ce087a19239454c04699 | diff --git a/shub/login.py b/shub/login.py
index af5f8aa..4699e07 100644
--- a/shub/login.py
+++ b/shub/login.py
@@ -10,7 +10,8 @@ from six.moves import input
@click.pass_context
def cli(context):
if auth.get_key_netrc():
- context.fail('Already logged in. To logout use: shub logout')
+ log("You're already logged in. To change credentials, use 'shub logout' first.")
+ return 0
cfg_key = _find_cfg_key()
key = _prompt_for_key(suggestion=cfg_key)
| login while already logged-in shows error
If one is already logged in, `shub login` throws an error, while it could say something less "dramatic"
Current behavior:
```
$ shub login
Usage: shub login [OPTIONS]
Error: Already logged in. To logout use: shub logout
```
- no need to show "Usage" as it's correct
- "Error" is misleading. it could say "You're already logged in. Nothing to do. If you want to login with another API key, use `logout` first", or something along those lines | scrapinghub/shub | diff --git a/tests/test_login.py b/tests/test_login.py
index 48ba3b9..00c195e 100644
--- a/tests/test_login.py
+++ b/tests/test_login.py
@@ -65,3 +65,17 @@ username = KEY_SUGGESTION
# then
self.assertEqual(0, result.exit_code, result.exception)
+
+ def test_login_attempt_after_login_doesnt_lead_to_an_error(self):
+ with self.runner.isolated_filesystem() as fs:
+ login.auth.NETRC_FILE = os.path.join(fs, '.netrc')
+
+ # given
+ self.runner.invoke(login.cli, input=self.VALID_KEY)
+
+ # when
+ result = self.runner.invoke(login.cli, input=self.VALID_KEY)
+
+ # then
+ self.assertEqual(0, result.exit_code)
+ self.assertTrue('already logged in' in result.output)
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"mock",
"scrapy"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
Automat==24.8.1
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
click==8.1.8
constantly==23.10.4
cryptography==44.0.2
cssselect==1.3.0
defusedxml==0.7.1
exceptiongroup==1.2.2
filelock==3.18.0
hyperlink==21.0.0
idna==3.10
incremental==24.7.2
iniconfig==2.1.0
itemadapter==0.11.0
itemloaders==1.3.2
jmespath==1.0.1
lxml==5.3.1
mock==5.2.0
packaging==24.2
parsel==1.10.0
pluggy==1.5.0
Protego==0.4.0
pyasn1==0.6.1
pyasn1_modules==0.4.2
pycparser==2.22
PyDispatcher==2.0.7
pyOpenSSL==25.0.0
pytest==8.3.5
queuelib==1.7.0
requests==2.32.3
requests-file==2.1.0
Scrapy==2.12.0
service-identity==24.2.0
-e git+https://github.com/scrapinghub/shub.git@7d6eb64b6bfa5de78f20629bfd92922489bd187d#egg=shub
six==1.17.0
tldextract==5.1.3
tomli==2.2.1
Twisted==24.11.0
typing_extensions==4.13.0
urllib3==2.3.0
w3lib==2.3.1
zope.interface==7.2
| name: shub
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- automat==24.8.1
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- click==8.1.8
- constantly==23.10.4
- cryptography==44.0.2
- cssselect==1.3.0
- defusedxml==0.7.1
- exceptiongroup==1.2.2
- filelock==3.18.0
- hyperlink==21.0.0
- idna==3.10
- incremental==24.7.2
- iniconfig==2.1.0
- itemadapter==0.11.0
- itemloaders==1.3.2
- jmespath==1.0.1
- lxml==5.3.1
- mock==5.2.0
- packaging==24.2
- parsel==1.10.0
- pluggy==1.5.0
- protego==0.4.0
- pyasn1==0.6.1
- pyasn1-modules==0.4.2
- pycparser==2.22
- pydispatcher==2.0.7
- pyopenssl==25.0.0
- pytest==8.3.5
- queuelib==1.7.0
- requests==2.32.3
- requests-file==2.1.0
- scrapy==2.12.0
- service-identity==24.2.0
- six==1.17.0
- tldextract==5.1.3
- tomli==2.2.1
- twisted==24.11.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- w3lib==2.3.1
- zope-interface==7.2
prefix: /opt/conda/envs/shub
| [
"tests/test_login.py::LoginTest::test_login_attempt_after_login_doesnt_lead_to_an_error"
]
| []
| [
"tests/test_login.py::LoginTest::test_login_can_handle_invalid_scrapy_cfg",
"tests/test_login.py::LoginTest::test_login_suggests_scrapy_cfg_username_as_key",
"tests/test_login.py::LoginTest::test_login_suggests_shub_apikey_as_key",
"tests/test_login.py::LoginTest::test_login_validates_api_key",
"tests/test_login.py::LoginTest::test_login_writes_input_key_to_netrc_file"
]
| []
| BSD 3-Clause "New" or "Revised" License | 205 | [
"shub/login.py"
]
| [
"shub/login.py"
]
|
|
DataDog__datadogpy-76 | 2c49469f45e6f84f17ba72875a97976277ce1a14 | 2015-08-06 18:16:53 | 2c49469f45e6f84f17ba72875a97976277ce1a14 | diff --git a/datadog/__init__.py b/datadog/__init__.py
index 92c4d55..2dbe54e 100644
--- a/datadog/__init__.py
+++ b/datadog/__init__.py
@@ -7,14 +7,17 @@ It contains:
without hindering performance.
* datadog.dogshell: a command-line tool, wrapping datadog.api, to interact with Datadog REST API.
"""
+# stdlib
from pkg_resources import get_distribution, DistributionNotFound
import os
import os.path
+# datadog
from datadog import api
from datadog.dogstatsd import statsd
from datadog.threadstats import ThreadStats # noqa
from datadog.util.hostname import get_hostname
+from datadog.util.compat import iteritems
try:
@@ -32,7 +35,7 @@ else:
def initialize(api_key=None, app_key=None, host_name=None, api_host=None,
- proxies=None, statsd_host=None, statsd_port=None, cacert=True):
+ statsd_host=None, statsd_port=None, **kwargs):
"""
Initialize and configure Datadog.api and Datadog.statsd modules
@@ -58,17 +61,23 @@ def initialize(api_key=None, app_key=None, host_name=None, api_host=None,
certificates. Can also be set to True (default) to use the systems
certificate store, or False to skip SSL verification
:type cacert: path or boolean
+
+ :param mute: Mute any exceptions before they escape from the library (default: True).
+ :type mute: boolean
"""
- # Configure api
+ # API configuration
api._api_key = api_key if api_key is not None else os.environ.get('DATADOG_API_KEY')
api._application_key = app_key if app_key is not None else os.environ.get('DATADOG_APP_KEY')
api._host_name = host_name if host_name is not None else get_hostname()
api._api_host = api_host if api_host is not None else \
os.environ.get('DATADOG_HOST', 'https://app.datadoghq.com')
- api._proxies = proxies
- api._cacert = cacert
- # Given statsd_host and statsd_port, overrides statsd instance
+ # Statsd configuration -overrides default statsd instance attributes-
if statsd_host and statsd_port:
statsd.host = statsd_host
statsd.port = int(statsd_port)
+
+ # HTTP client and API options
+ for key, value in iteritems(kwargs):
+ attribute = "_{0}".format(key)
+ setattr(api, attribute, value)
diff --git a/datadog/api/__init__.py b/datadog/api/__init__.py
index 9570eff..3ce7b85 100644
--- a/datadog/api/__init__.py
+++ b/datadog/api/__init__.py
@@ -14,7 +14,7 @@ _timeout = 3
_max_timeouts = 3
_max_retries = 3
_backoff_period = 300
-_swallow = True
+_mute = True
# Resources
from datadog.api.comments import Comment
diff --git a/datadog/api/base.py b/datadog/api/base.py
index 4c312ff..b618186 100644
--- a/datadog/api/base.py
+++ b/datadog/api/base.py
@@ -61,7 +61,7 @@ class HTTPClient(object):
# Import API, User and HTTP settings
from datadog.api import _api_key, _application_key, _api_host, \
- _swallow, _host_name, _proxies, _max_retries, _timeout, \
+ _mute, _host_name, _proxies, _max_retries, _timeout, \
_cacert
# Check keys and add then to params
@@ -124,6 +124,7 @@ class HTTPClient(object):
raise HttpTimeout('%s %s timed out after %d seconds.' % (method, url, _timeout))
except requests.exceptions.HTTPError as e:
if e.response.status_code in (400, 403, 404):
+ # This gets caught afterwards and raises an ApiError exception
pass
else:
raise
@@ -160,7 +161,7 @@ class HTTPClient(object):
return response_formatter(response_obj)
except ClientError as e:
- if _swallow:
+ if _mute:
log.error(str(e))
if error_formatter is None:
return {'errors': e.args[0]}
@@ -169,7 +170,7 @@ class HTTPClient(object):
else:
raise
except ApiError as e:
- if _swallow:
+ if _mute:
for error in e.args[0]['errors']:
log.error(str(error))
if error_formatter is None:
| Error handling: APIError is never raised
This library never returns the status code of the http request.
Instead, when the status code is 400, 403 or 404, it is supposed to raise an APIError exception with the content of the errors field received from the dispatcher.(https://github.com/DataDog/datadogpy/blob/master/datadog/api/base.py#L152).
For all other 3, 4, 5** status codes, an HTTPError is raised.
So catching errors and interacting with the API should be a matter of catching APIError and HTTPError exceptions.
BUT, the `_swallow` attribute prevents APIError to be raised (https://github.com/DataDog/datadogpy/blob/master/datadog/api/base.py#L171-L172). It's set to True in api/__init__.py and is not modifiable at the moment.
A temporary workaround is to use `from datadog import api api._swallow = False`.
We could evaluate the impact of a potential transition to swallow being True by default and also being set up when initializing the api. | DataDog/datadogpy | diff --git a/tests/unit/api/helper.py b/tests/unit/api/helper.py
index 6db98ca..624c160 100644
--- a/tests/unit/api/helper.py
+++ b/tests/unit/api/helper.py
@@ -5,6 +5,7 @@ import unittest
from datadog import initialize, api
from datadog.api.base import CreateableAPIResource, UpdatableAPIResource, DeletableAPIResource,\
GetableAPIResource, ListableAPIResource, ActionAPIResource
+from datadog.api.exceptions import ApiError
from datadog.util.compat import iteritems, json
# 3p
@@ -20,11 +21,16 @@ FAKE_PROXY = {
}
-class MockReponse(requests.Response):
- content = None
+class MockResponse(requests.Response):
+
+ def __init__(self, raise_for_status=False):
+ super(MockResponse, self).__init__()
+ self._raise_for_status = raise_for_status
def raise_for_status(self):
- pass
+ if not self._raise_for_status:
+ return
+ raise ApiError({'errors': ""})
# A few API Resources
@@ -68,32 +74,42 @@ class DatadogAPITestCase(unittest.TestCase):
self.request_patcher = patch('requests.Session')
request_class_mock = self.request_patcher.start()
self.request_mock = request_class_mock.return_value
- self.request_mock.request = Mock(return_value=MockReponse())
+ self.request_mock.request = Mock(return_value=MockResponse())
- def get_request_data(self):
+ def tearDown(self):
+ self.request_patcher.stop()
+
+ def arm_requests_to_raise(self):
"""
+ Arm the mocked request to raise for status.
+ """
+ self.request_mock.request = Mock(return_value=MockResponse(raise_for_status=True))
+ def get_request_data(self):
+ """
+ Returns JSON formatted data from the submitted `requests`.
"""
_, kwargs = self.request_mock.request.call_args
return json.loads(kwargs['data'])
def request_called_with(self, method, url, data=None, params=None):
(req_method, req_url), others = self.request_mock.request.call_args
- assert method == req_method, req_method
- assert url == req_url, req_url
+ self.assertEquals(method, req_method, req_method)
+ self.assertEquals(url, req_url, req_url)
if data:
- assert 'data' in others
- assert json.dumps(data) == others['data'], others['data']
+ self.assertIn('data', others)
+ self.assertEquals(json.dumps(data), others['data'], others['data'])
if params:
- assert 'params' in others
+ self.assertIn('params', others)
for (k, v) in iteritems(params):
- assert k in others['params'], others['params']
- assert v == others['params'][k]
+ self.assertIn(k, others['params'], others['params'])
+ self.assertEquals(v, others['params'][k])
- def tearDown(self):
- self.request_patcher.stop()
+ def assertIn(self, first, second, msg=None):
+ msg = msg or "{0} not in {1}".format(first, second)
+ self.assertTrue(first in second, msg)
class DatadogAPINoInitialization(DatadogAPITestCase):
diff --git a/tests/unit/api/test_api.py b/tests/unit/api/test_api.py
index b34a49a..c2566f7 100644
--- a/tests/unit/api/test_api.py
+++ b/tests/unit/api/test_api.py
@@ -7,12 +7,11 @@ from time import time
# 3p
import mock
-from nose.tools import assert_raises, assert_true, assert_false
# datadog
from datadog import initialize, api
from datadog.api import Metric
-from datadog.api.exceptions import ApiNotInitialized
+from datadog.api.exceptions import ApiError, ApiNotInitialized
from datadog.util.compat import is_p3k
from tests.unit.api.helper import (
DatadogAPIWithInitialization,
@@ -51,21 +50,27 @@ def preserve_environ_datadog(func):
class TestInitialization(DatadogAPINoInitialization):
- def test_no_initialization_fails(self, test='sisi'):
- assert_raises(ApiNotInitialized, MyCreatable.create)
+ def test_no_initialization_fails(self):
+ """
+ Raise ApiNotInitialized exception when `initialize` has not ran or no API key was set.
+ """
+ self.assertRaises(ApiNotInitialized, MyCreatable.create)
# No API key => only stats in statsd mode should work
initialize()
api._api_key = None
- assert_raises(ApiNotInitialized, MyCreatable.create)
+ self.assertRaises(ApiNotInitialized, MyCreatable.create)
# Finally, initialize with an API key
initialize(api_key=API_KEY, api_host=API_HOST)
MyCreatable.create()
- assert self.request_mock.request.call_count == 1
+ self.assertEquals(self.request_mock.request.call_count, 1)
@mock.patch('datadog.util.config.get_config_path')
def test_get_hostname(self, mock_config_path):
+ """
+ API hostname parameter fallback with Datadog Agent hostname when available.
+ """
# Generate a fake agent config
tmpfilepath = os.path.join(tempfile.gettempdir(), "tmp-agentconfig")
with open(tmpfilepath, "wb") as f:
@@ -79,31 +84,64 @@ class TestInitialization(DatadogAPINoInitialization):
mock_config_path.return_value = tmpfilepath
initialize()
- assert api._host_name == HOST_NAME, api._host_name
+ self.assertEquals(api._host_name, HOST_NAME, api._host_name)
def test_request_parameters(self):
- # Test API, application keys, API host and proxies
- initialize(api_key=API_KEY, app_key=APP_KEY, api_host=API_HOST, proxies=FAKE_PROXY)
+ """
+ API parameters are set with `initialize` method.
+ """
+ # Test API, application keys, API host, and some HTTP client options
+ initialize(api_key=API_KEY, app_key=APP_KEY, api_host=API_HOST)
# Make a simple API call
MyCreatable.create()
_, options = self.request_mock.request.call_args
- assert 'params' in options
+ # Assert `requests` parameters
+ self.assertIn('params', options)
- assert 'api_key' in options['params']
- assert options['params']['api_key'] == API_KEY
- assert 'application_key' in options['params']
- assert options['params']['application_key'] == APP_KEY
+ self.assertIn('api_key', options['params'])
+ self.assertEquals(options['params']['api_key'], API_KEY)
+ self.assertIn('application_key', options['params'])
+ self.assertEquals(options['params']['application_key'], APP_KEY)
- assert 'proxies' in options
- assert options['proxies'] == FAKE_PROXY
+ self.assertIn('headers', options)
+ self.assertEquals(options['headers'], {'Content-Type': 'application/json'})
- assert 'headers' in options
- assert options['headers'] == {'Content-Type': 'application/json'}
+ def test_initialize_options(self):
+ """
+ HTTP client and API options are set with `initialize` method.
+ """
+ initialize(api_key=API_KEY, app_key=APP_KEY, api_host=API_HOST,
+ proxies=FAKE_PROXY, cacert=False)
+
+ # Make a simple API call
+ MyCreatable.create()
+
+ _, options = self.request_mock.request.call_args
+
+ # Assert `requests` parameters
+ self.assertIn('proxies', options)
+ self.assertEquals(options['proxies'], FAKE_PROXY)
+
+ self.assertIn('verify', options)
+ self.assertEquals(options['verify'], False)
+
+ # Arm the `requests` to raise
+ self.arm_requests_to_raise()
+
+ # No exception should be raised (mute=True by default)
+ MyCreatable.create()
+
+ # Repeat with mute to False
+ initialize(api_key=API_KEY, mute=False)
+ self.assertRaises(ApiError, MyCreatable.create)
def test_initialization_from_env(self):
+ """
+ Set API parameters in `initialize` from environment variables.
+ """
@preserve_environ_datadog
def test_api_params_from_env(env_name, attr_name, env_value):
"""
@@ -156,6 +194,9 @@ class TestInitialization(DatadogAPINoInitialization):
class TestResources(DatadogAPIWithInitialization):
def test_creatable(self):
+ """
+ Creatable resource logic.
+ """
MyCreatable.create(mydata="val")
self.request_called_with('POST', "host/api/v1/creatables", data={'mydata': "val"})
@@ -164,28 +205,43 @@ class TestResources(DatadogAPIWithInitialization):
data={'mydata': "val", 'host': api._host_name})
def test_getable(self):
+ """
+ Getable resource logic.
+ """
getable_object_id = 123
MyGetable.get(getable_object_id, otherparam="val")
self.request_called_with('GET', "host/api/v1/getables/" + str(getable_object_id),
params={'otherparam': "val"})
def test_listable(self):
+ """
+ Listable resource logic.
+ """
MyListable.get_all(otherparam="val")
self.request_called_with('GET', "host/api/v1/listables", params={'otherparam': "val"})
def test_updatable(self):
+ """
+ Updatable resource logic.
+ """
updatable_object_id = 123
MyUpdatable.update(updatable_object_id, params={'myparam': "val1"}, mydata="val2")
self.request_called_with('PUT', "host/api/v1/updatables/" + str(updatable_object_id),
params={'myparam': "val1"}, data={'mydata': "val2"})
def test_detalable(self):
+ """
+ Deletable resource logic.
+ """
deletable_object_id = 123
MyDeletable.delete(deletable_object_id, otherparam="val")
self.request_called_with('DELETE', "host/api/v1/deletables/" + str(deletable_object_id),
params={'otherparam': "val"})
def test_actionable(self):
+ """
+ Actionable resource logic.
+ """
actionable_object_id = 123
MyActionable.trigger_class_action('POST', "actionname", id=actionable_object_id,
mydata="val")
@@ -214,17 +270,18 @@ class TestMetricResource(DatadogAPIWithInitialization):
payload = self.get_request_data()
for i, metric in enumerate(payload['series']):
- assert set(metric.keys()) == set(['metric', 'points', 'host'])
+ self.assertEquals(set(metric.keys()), set(['metric', 'points', 'host']))
- assert metric['metric'] == serie[i]['metric']
- assert metric['host'] == api._host_name
+ self.assertEquals(metric['metric'], serie[i]['metric'])
+ self.assertEquals(metric['host'], api._host_name)
# points is a list of 1 point
- assert isinstance(metric['points'], list) and len(metric['points']) == 1
+ self.assertTrue(isinstance(metric['points'], list))
+ self.assertEquals(len(metric['points']), 1)
# it consists of a [time, value] pair
- assert len(metric['points'][0]) == 2
+ self.assertEquals(len(metric['points'][0]), 2)
# its value == value we sent
- assert metric['points'][0][1] == serie[i]['points']
+ self.assertEquals(metric['points'][0][1], serie[i]['points'])
# it's time not so far from current time
assert now - 1 < metric['points'][0][0] < now + 1
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 3
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"six",
"mock",
"pytest"
],
"pre_install": [],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
-e git+https://github.com/DataDog/datadogpy.git@2c49469f45e6f84f17ba72875a97976277ce1a14#egg=datadog
decorator==5.2.1
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
mock==5.2.0
nose==1.3.7
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
requests==2.32.3
six==1.17.0
tomli==2.2.1
urllib3==2.3.0
| name: datadogpy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- decorator==5.2.1
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- mock==5.2.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- requests==2.32.3
- six==1.17.0
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/datadogpy
| [
"tests/unit/api/test_api.py::TestInitialization::test_initialize_options"
]
| []
| [
"tests/unit/api/test_api.py::TestInitialization::test_get_hostname",
"tests/unit/api/test_api.py::TestInitialization::test_initialization_from_env",
"tests/unit/api/test_api.py::TestInitialization::test_no_initialization_fails",
"tests/unit/api/test_api.py::TestInitialization::test_request_parameters",
"tests/unit/api/test_api.py::TestResources::test_actionable",
"tests/unit/api/test_api.py::TestResources::test_creatable",
"tests/unit/api/test_api.py::TestResources::test_detalable",
"tests/unit/api/test_api.py::TestResources::test_getable",
"tests/unit/api/test_api.py::TestResources::test_listable",
"tests/unit/api/test_api.py::TestResources::test_updatable",
"tests/unit/api/test_api.py::TestMetricResource::test_metric_submit_query_switch",
"tests/unit/api/test_api.py::TestMetricResource::test_points_submission"
]
| []
| BSD-3-Clause | 206 | [
"datadog/__init__.py",
"datadog/api/base.py",
"datadog/api/__init__.py"
]
| [
"datadog/__init__.py",
"datadog/api/base.py",
"datadog/api/__init__.py"
]
|
|
zalando-stups__pierone-cli-15 | 49f45a29f0890a35a6e53754f9cdcadd5924ab21 | 2015-08-07 11:15:04 | 49f45a29f0890a35a6e53754f9cdcadd5924ab21 | diff --git a/pierone/cli.py b/pierone/cli.py
index 786b144..436bacd 100644
--- a/pierone/cli.py
+++ b/pierone/cli.py
@@ -208,5 +208,31 @@ def scm_source(config, team, artifact, tag, output):
max_column_widths={'revision': 10})
[email protected]('image')
[email protected]('image')
+@output_option
[email protected]_obj
+def image(config, image, output):
+ '''List tags that point to this image'''
+ token = get_token()
+
+ resp = request(config.get('url'), '/tags/{}'.format(image), token['access_token'])
+
+ if resp.status_code == 404:
+ click.echo('Image {} not found'.format(image))
+ return
+
+ if resp.status_code == 412:
+ click.echo('Prefix {} matches more than one image.'.format(image))
+ return
+
+ tags = resp.json()
+
+ with OutputFormat(output):
+ print_table(['team', 'artifact', 'name'],
+ tags,
+ titles={'name': 'Tag', 'artifact': 'Artifact', 'team': 'Team'})
+
+
def main():
cli()
| Support reverse image search
As soon as https://github.com/zalando-stups/pierone/issues/33 is done. | zalando-stups/pierone-cli | diff --git a/tests/test_cli.py b/tests/test_cli.py
index f2b360c..b319cb3 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -49,3 +49,20 @@ def test_scm_source(monkeypatch, tmpdir):
result = runner.invoke(cli, ['scm-source', 'myteam', 'myart', '1.0'], catch_exceptions=False)
assert 'myrev123' in result.output
assert 'git:somerepo' in result.output
+
+def test_image(monkeypatch, tmpdir):
+ response = MagicMock()
+ response.json.return_value = [{'name': '1.0', 'team': 'stups', 'artifact': 'kio'}]
+
+ runner = CliRunner()
+ monkeypatch.setattr('pierone.cli.CONFIG_FILE_PATH', 'config.yaml')
+ monkeypatch.setattr('pierone.cli.get_named_token', MagicMock(return_value={'access_token': 'tok123'}))
+ monkeypatch.setattr('os.path.expanduser', lambda x: x.replace('~', str(tmpdir)))
+ monkeypatch.setattr('requests.get', MagicMock(return_value=response))
+ with runner.isolated_filesystem():
+ with open('config.yaml', 'w') as fd:
+ fd.write('')
+ result = runner.invoke(cli, ['image', 'abcd'], catch_exceptions=False)
+ assert 'kio' in result.output
+ assert 'stups' in result.output
+ assert '1.0' in result.output
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 1
} | 0.16 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | backports.tarfile==1.2.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
click==8.1.8
clickclick==20.10.2
coverage==7.8.0
cryptography==44.0.2
dnspython==2.7.0
exceptiongroup==1.2.2
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jeepney==0.9.0
keyring==25.6.0
more-itertools==10.6.0
packaging==24.2
pluggy==1.5.0
pycparser==2.22
pytest==8.3.5
pytest-cov==6.0.0
PyYAML==6.0.2
requests==2.32.3
SecretStorage==3.3.3
stups-cli-support==1.1.22
-e git+https://github.com/zalando-stups/pierone-cli.git@49f45a29f0890a35a6e53754f9cdcadd5924ab21#egg=stups_pierone
stups-tokens==1.1.19
stups-zign==1.2
tomli==2.2.1
urllib3==2.3.0
zipp==3.21.0
| name: pierone-cli
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- backports-tarfile==1.2.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- click==8.1.8
- clickclick==20.10.2
- coverage==7.8.0
- cryptography==44.0.2
- dnspython==2.7.0
- exceptiongroup==1.2.2
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jeepney==0.9.0
- keyring==25.6.0
- more-itertools==10.6.0
- packaging==24.2
- pluggy==1.5.0
- pycparser==2.22
- pytest==8.3.5
- pytest-cov==6.0.0
- pyyaml==6.0.2
- requests==2.32.3
- secretstorage==3.3.3
- stups-cli-support==1.1.22
- stups-tokens==1.1.19
- stups-zign==1.2
- tomli==2.2.1
- urllib3==2.3.0
- zipp==3.21.0
prefix: /opt/conda/envs/pierone-cli
| [
"tests/test_cli.py::test_image"
]
| []
| [
"tests/test_cli.py::test_version",
"tests/test_cli.py::test_login",
"tests/test_cli.py::test_scm_source"
]
| []
| Apache License 2.0 | 207 | [
"pierone/cli.py"
]
| [
"pierone/cli.py"
]
|
|
softlayer__softlayer-python-602 | 1195b2020ef6efc40462d59eb079f26e5f39a6d8 | 2015-08-10 15:51:18 | 1195b2020ef6efc40462d59eb079f26e5f39a6d8 | diff --git a/SoftLayer/CLI/server/detail.py b/SoftLayer/CLI/server/detail.py
index 1b9d588e..9fc76c5d 100644
--- a/SoftLayer/CLI/server/detail.py
+++ b/SoftLayer/CLI/server/detail.py
@@ -62,14 +62,11 @@ def cli(env, identifier, passwords, price):
table.add_row(
['created', result['provisionDate'] or formatting.blank()])
- if utils.lookup(result, 'billingItem') != []:
- table.add_row(['owner', formatting.FormattedItem(
- utils.lookup(result, 'billingItem', 'orderItem',
- 'order', 'userRecord',
- 'username') or formatting.blank(),
- )])
- else:
- table.add_row(['owner', formatting.blank()])
+ table.add_row(['owner', formatting.FormattedItem(
+ utils.lookup(result, 'billingItem', 'orderItem',
+ 'order', 'userRecord',
+ 'username') or formatting.blank()
+ )])
vlan_table = formatting.Table(['type', 'number', 'id'])
diff --git a/SoftLayer/CLI/virt/detail.py b/SoftLayer/CLI/virt/detail.py
index b003e413..4fc115ce 100644
--- a/SoftLayer/CLI/virt/detail.py
+++ b/SoftLayer/CLI/virt/detail.py
@@ -67,14 +67,11 @@ def cli(self, identifier, passwords=False, price=False):
table.add_row(['private_cpu', result['dedicatedAccountHostOnlyFlag']])
table.add_row(['created', result['createDate']])
table.add_row(['modified', result['modifyDate']])
- if utils.lookup(result, 'billingItem') != []:
- table.add_row(['owner', formatting.FormattedItem(
- utils.lookup(result, 'billingItem', 'orderItem',
- 'order', 'userRecord',
- 'username') or formatting.blank(),
- )])
- else:
- table.add_row(['owner', formatting.blank()])
+ table.add_row(['owner', formatting.FormattedItem(
+ utils.lookup(result, 'billingItem', 'orderItem',
+ 'order', 'userRecord',
+ 'username') or formatting.blank(),
+ )])
vlan_table = formatting.Table(['type', 'number', 'id'])
for vlan in result['networkVlans']:
diff --git a/SoftLayer/managers/vs.py b/SoftLayer/managers/vs.py
index 30de90b3..40ea9925 100644
--- a/SoftLayer/managers/vs.py
+++ b/SoftLayer/managers/vs.py
@@ -423,6 +423,7 @@ def verify_create_instance(self, **kwargs):
Without actually placing an order.
See :func:`create_instance` for a list of available options.
"""
+ kwargs.pop('tags', None)
create_options = self._generate_create_dict(**kwargs)
return self.guest.generateOrderTemplate(create_options)
| got an unexpected keyword argument 'tags'
Hi there
I came across an error when creating VS and providing tags either in the form of -g or --tag
C:\Python34\Scripts>slcli vs create --test --hostname OS --domain vm.local --cpu 1 --memory 1024 --os WIN_LATEST_64 --datacenter lon02 --tag 1234
An unexpected error has occured:
Traceback (most recent call last):
File "C:\Python34\lib\site-packages\SoftLayer\CLI\core.py", line 181, in main
cli.main()
File "C:\Python34\lib\site-packages\click\core.py", line 644, in main
rv = self.invoke(ctx)
File "C:\Python34\lib\site-packages\click\core.py", line 991, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "C:\Python34\lib\site-packages\click\core.py", line 991, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "C:\Python34\lib\site-packages\click\core.py", line 837, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "C:\Python34\lib\site-packages\click\core.py", line 464, in invoke
return callback(*args, **kwargs)
File "C:\Python34\lib\site-packages\click\decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args[1:], **kwargs)
File "C:\Python34\lib\site-packages\click\core.py", line 464, in invoke
return callback(*args, **kwargs)
File "C:\Python34\lib\site-packages\SoftLayer\CLI\virt\create.py", line 92, in cli
result = vsi.verify_create_instance(**data)
File "C:\Python34\lib\site-packages\SoftLayer\managers\vs.py", line 426, in verify_create_instance
create_options = self._generate_create_dict(**kwargs)
TypeError: _generate_create_dict() got an unexpected keyword argument 'tags' | softlayer/softlayer-python | diff --git a/SoftLayer/tests/managers/vs_tests.py b/SoftLayer/tests/managers/vs_tests.py
index 3a179b46..9bdb3459 100644
--- a/SoftLayer/tests/managers/vs_tests.py
+++ b/SoftLayer/tests/managers/vs_tests.py
@@ -141,7 +141,7 @@ def test_reload_instance(self):
def test_create_verify(self, create_dict):
create_dict.return_value = {'test': 1, 'verify': 1}
- self.vs.verify_create_instance(test=1, verify=1)
+ self.vs.verify_create_instance(test=1, verify=1, tags=['test', 'tags'])
create_dict.assert_called_once_with(test=1, verify=1)
self.assert_called_with('SoftLayer_Virtual_Guest',
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 3
} | 4.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"tools/test-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
coverage==6.2
distlib==0.3.9
docutils==0.18.1
filelock==3.4.1
fixtures==4.0.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
Jinja2==3.0.3
MarkupSafe==2.0.1
mock==5.2.0
nose==1.3.7
packaging==21.3
pbr==6.1.1
platformdirs==2.4.0
pluggy==1.0.0
prettytable==2.5.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytz==2025.2
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
-e git+https://github.com/softlayer/softlayer-python.git@1195b2020ef6efc40462d59eb079f26e5f39a6d8#egg=SoftLayer
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
testtools==2.6.0
toml==0.10.2
tomli==1.2.3
tox==3.28.0
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.17.1
wcwidth==0.2.13
zipp==3.6.0
| name: softlayer-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- click==8.0.4
- coverage==6.2
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- fixtures==4.0.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- jinja2==3.0.3
- markupsafe==2.0.1
- mock==5.2.0
- nose==1.3.7
- packaging==21.3
- pbr==6.1.1
- platformdirs==2.4.0
- pluggy==1.0.0
- prettytable==2.5.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytz==2025.2
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- testtools==2.6.0
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.17.1
- wcwidth==0.2.13
- zipp==3.6.0
prefix: /opt/conda/envs/softlayer-python
| [
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_create_verify"
]
| []
| [
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_cancel_instance",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_capture_additional_disks",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_captures",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_change_port_speed_private",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_change_port_speed_public",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_create_instance",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_create_instances",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_edit_blank",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_edit_full",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_edit_metadata",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_edit_tags",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_edit_tags_blank",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_generate_basic",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_generate_datacenter",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_generate_dedicated",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_generate_image_id",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_generate_missing",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_generate_monthly",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_generate_multi_disk",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_generate_network",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_generate_no_disks",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_generate_os_and_image",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_generate_post_uri",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_generate_private_network_only",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_generate_private_vlan",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_generate_public_vlan",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_generate_single_disk",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_generate_sshkey",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_generate_userdata",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_get_create_options",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_get_instance",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_get_item_id_for_upgrade",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_list_instances",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_list_instances_hourly",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_list_instances_monthly",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_list_instances_neither",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_list_instances_with_filters",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_reload_instance",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_rescue",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_resolve_ids_hostname",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_resolve_ids_ip",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_resolve_ids_ip_invalid",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_resolve_ids_ip_private",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_upgrade",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_upgrade_blank",
"SoftLayer/tests/managers/vs_tests.py::VSTests::test_upgrade_full",
"SoftLayer/tests/managers/vs_tests.py::VSWaitReadyGoTests::test_active_and_provisiondate",
"SoftLayer/tests/managers/vs_tests.py::VSWaitReadyGoTests::test_active_not_provisioned",
"SoftLayer/tests/managers/vs_tests.py::VSWaitReadyGoTests::test_active_provision_pending",
"SoftLayer/tests/managers/vs_tests.py::VSWaitReadyGoTests::test_active_reload",
"SoftLayer/tests/managers/vs_tests.py::VSWaitReadyGoTests::test_iter_four_complete",
"SoftLayer/tests/managers/vs_tests.py::VSWaitReadyGoTests::test_iter_once_complete",
"SoftLayer/tests/managers/vs_tests.py::VSWaitReadyGoTests::test_iter_ten_incomplete",
"SoftLayer/tests/managers/vs_tests.py::VSWaitReadyGoTests::test_iter_two_incomplete",
"SoftLayer/tests/managers/vs_tests.py::VSWaitReadyGoTests::test_ready_iter_once_incomplete",
"SoftLayer/tests/managers/vs_tests.py::VSWaitReadyGoTests::test_reload_no_pending",
"SoftLayer/tests/managers/vs_tests.py::VSWaitReadyGoTests::test_reload_pending",
"SoftLayer/tests/managers/vs_tests.py::VSWaitReadyGoTests::test_wait_interface"
]
| []
| MIT License | 209 | [
"SoftLayer/CLI/server/detail.py",
"SoftLayer/managers/vs.py",
"SoftLayer/CLI/virt/detail.py"
]
| [
"SoftLayer/CLI/server/detail.py",
"SoftLayer/managers/vs.py",
"SoftLayer/CLI/virt/detail.py"
]
|
|
SMART-Lab__smartlearner-31 | 0f11484800712ac92d6027754ab0a026193fa985 | 2015-08-13 18:37:49 | 0f11484800712ac92d6027754ab0a026193fa985 | diff --git a/smartlearner/__init__.py b/smartlearner/__init__.py
index 5159ac3..d6347f4 100644
--- a/smartlearner/__init__.py
+++ b/smartlearner/__init__.py
@@ -1,5 +1,10 @@
from .interfaces.model import Model
from .interfaces.dataset import Dataset
+from .interfaces.loss import Loss
+from .interfaces.task import Task
+
from .trainer import Trainer
from .tasks import tasks
+from .tasks import views
+from .tasks import stopping_criteria
diff --git a/smartlearner/batch_schedulers/__init__.py b/smartlearner/batch_schedulers/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/smartlearner/batch_schedulers/minibatch.py b/smartlearner/batch_schedulers/minibatch.py
new file mode 100644
index 0000000..54ea692
--- /dev/null
+++ b/smartlearner/batch_schedulers/minibatch.py
@@ -0,0 +1,46 @@
+import theano
+import numpy as np
+
+from ..interfaces.batch_scheduler import BatchScheduler
+
+
+class MiniBatchScheduler(BatchScheduler):
+ def __init__(self, dataset, batch_size):
+ super(MiniBatchScheduler, self).__init__(dataset)
+ self._shared_batch_size = theano.shared(np.array(0, dtype='i4'))
+ self.batch_size = batch_size
+ self.shared_batch_count = theano.shared(np.array(0, dtype='i4'))
+
+ @property
+ def batch_size(self):
+ return self._shared_batch_size.get_value()
+
+ @batch_size.setter
+ def batch_size(self, value):
+ self._shared_batch_size.set_value(np.array(value, dtype='i4'))
+ self.nb_updates_per_epoch = int(np.ceil(len(self.dataset)/self.batch_size))
+
+ @property
+ def updates(self):
+ return {} # No updates
+
+ @property
+ def givens(self):
+ start = self.shared_batch_count * self._shared_batch_size
+ end = (self.shared_batch_count + 1) * self._shared_batch_size
+
+ if self.dataset.targets is not None:
+ return {self.dataset.symb_inputs: self.dataset.inputs[start:end],
+ self.dataset.symb_targets: self.dataset.targets[start:end]}
+ else:
+ return {self.dataset.symb_inputs: self.dataset.inputs[start:end]}
+
+ def __iter__(self):
+ for batch_count in range(self.nb_updates_per_epoch):
+ self.shared_batch_count.set_value(batch_count)
+ yield batch_count + 1
+
+
+class FullBatchScheduler(MiniBatchScheduler):
+ def __init__(self, dataset):
+ super(FullBatchScheduler, self).__init__(dataset, batch_size=len(self.dataset))
diff --git a/smartlearner/update_rules/__init__.py b/smartlearner/direction_modifiers/__init__.py
similarity index 76%
rename from smartlearner/update_rules/__init__.py
rename to smartlearner/direction_modifiers/__init__.py
index f1121ac..33afb25 100644
--- a/smartlearner/update_rules/__init__.py
+++ b/smartlearner/direction_modifiers/__init__.py
@@ -1,3 +1,2 @@
-from .update_rule import UpdateRule
from .decreasing_learning_rate import DecreasingLearningRate
from .constant_learning_rate import ConstantLearningRate
diff --git a/smartlearner/update_rules/constant_learning_rate.py b/smartlearner/direction_modifiers/constant_learning_rate.py
similarity index 100%
rename from smartlearner/update_rules/constant_learning_rate.py
rename to smartlearner/direction_modifiers/constant_learning_rate.py
diff --git a/smartlearner/direction_modifiers/decreasing_learning_rate.py b/smartlearner/direction_modifiers/decreasing_learning_rate.py
new file mode 100644
index 0000000..eaca014
--- /dev/null
+++ b/smartlearner/direction_modifiers/decreasing_learning_rate.py
@@ -0,0 +1,44 @@
+from collections import OrderedDict
+
+import numpy as np
+
+from ..utils import sharedX
+from ..interfaces.direction_modifier import DirectionModifier
+
+
+class DecreasingLearningRate(DirectionModifier):
+ def __init__(self, lr, dc=0.):
+ """
+ Implements a decreasing learning rate update rule.
+
+ Parameters
+ ----------
+ lr: float
+ Learning rate.
+ dc: float in [0,1) (optional)
+ Decreasing constant (decay). Default: 0.
+ """
+ if dc < 0. or dc >= 1:
+ raise ValueError("`dc` ({}) must be between 0 (inclusive) and 1 (exclusive)!".format(dc))
+
+ super(DecreasingLearningRate, self).__init__()
+ self.lr = lr
+ self.dc = dc
+ self._updates = OrderedDict()
+
+ def _get_updates(self):
+ return self._updates
+
+ def apply(self, directions):
+ new_directions = OrderedDict()
+
+ for param, gparam in directions.items():
+ lr = sharedX(self.lr * np.ones_like(param.get_value()), name='lr_' + param.name)
+
+ if self.dc != 0.:
+ # Decrease the learning rate by a factor of `dc` after each update.
+ self._updates[lr] = self.dc * lr
+
+ new_directions[param] = lr * gparam
+
+ return new_directions
diff --git a/smartlearner/interfaces/__init__.py b/smartlearner/interfaces/__init__.py
index e69de29..889fe01 100644
--- a/smartlearner/interfaces/__init__.py
+++ b/smartlearner/interfaces/__init__.py
@@ -0,0 +1,7 @@
+from .batch_scheduler import BatchScheduler
+from .dataset import Dataset
+from .model import Model
+from .loss import Loss
+from .task import Task
+from .direction_modifier import DirectionModifier
+from .direction_modifier import ParamModifier
diff --git a/smartlearner/batch_scheduler.py b/smartlearner/interfaces/batch_scheduler.py
similarity index 84%
rename from smartlearner/batch_scheduler.py
rename to smartlearner/interfaces/batch_scheduler.py
index 99f33ec..2324f97 100644
--- a/smartlearner/batch_scheduler.py
+++ b/smartlearner/interfaces/batch_scheduler.py
@@ -1,4 +1,4 @@
-from abc import ABCMeta, abstractmethod
+from abc import ABCMeta, abstractmethod, abstractproperty
import numpy as np
import theano
@@ -11,8 +11,16 @@ class BatchScheduler(object):
self.dataset = dataset
@property
+ def tasks(self):
+ return []
+
+ @abstractproperty
+ def updates(self):
+ raise NotImplementedError("Subclass of 'BatchScheduler' must implement property 'updates'.")
+
+ @abstractproperty
def givens(self):
- raise NotImplementedError("Subclass of 'BatchScheduler' must implement 'givens'.")
+ raise NotImplementedError("Subclass of 'BatchScheduler' must implement property 'givens'.")
@abstractmethod
def __iter__(self):
@@ -40,6 +48,10 @@ class MiniBatchScheduler(BatchScheduler):
self._shared_batch_size.set_value(np.array(value, dtype='i4'))
self.nb_updates_per_epoch = int(np.ceil(len(self.dataset)/self.batch_size))
+ @property
+ def updates(self):
+ return {} # No updates
+
@property
def givens(self):
start = self.shared_batch_count * self._shared_batch_size
@@ -59,4 +71,4 @@ class MiniBatchScheduler(BatchScheduler):
class FullBatchScheduler(MiniBatchScheduler):
def __init__(self, dataset):
- super(FullBatchScheduler, self).__init__(dataset, batch_size=len(self.dataset))
\ No newline at end of file
+ super(FullBatchScheduler, self).__init__(dataset, batch_size=len(self.dataset))
diff --git a/smartlearner/interfaces/direction_modifier.py b/smartlearner/interfaces/direction_modifier.py
new file mode 100644
index 0000000..9823a42
--- /dev/null
+++ b/smartlearner/interfaces/direction_modifier.py
@@ -0,0 +1,41 @@
+from abc import ABCMeta, abstractmethod
+
+
+class DirectionModifier(object):
+ __metaclass__ = ABCMeta
+
+ @property
+ def tasks(self):
+ return []
+
+ @property
+ def updates(self):
+ return self._get_updates()
+
+ @abstractmethod
+ def _get_updates(self):
+ raise NotImplementedError("Subclass of 'DirectionModifier' must implement '_get_updates()'.")
+
+ @abstractmethod
+ def apply(self, directions):
+ raise NotImplementedError("Subclass of 'DirectionModifier' must implement 'apply(directions)'.")
+
+
+class ParamModifier(object):
+ __metaclass__ = ABCMeta
+
+ @property
+ def tasks(self):
+ return []
+
+ @property
+ def updates(self):
+ return self._get_updates()
+
+ @abstractmethod
+ def _get_updates(self):
+ raise NotImplementedError("Subclass of 'ParamModifier' must implement '_get_updates()'.")
+
+ @abstractmethod
+ def apply(self, params):
+ raise NotImplementedError("Subclass of 'ParamModifier' must implement 'apply(params)'.")
diff --git a/smartlearner/interfaces/loss.py b/smartlearner/interfaces/loss.py
index e9823e9..cd1742f 100644
--- a/smartlearner/interfaces/loss.py
+++ b/smartlearner/interfaces/loss.py
@@ -1,24 +1,53 @@
+from collections import OrderedDict
+
from theano import tensor as T
+from abc import ABCMeta, abstractmethod
+
class Loss(object):
+ __metaclass__ = ABCMeta
+
def __init__(self, model, dataset):
self.model = model
self.dataset = dataset
- self.target = dataset.symb_targets
self.consider_constant = [] # Part of the computational graph to be considered as a constant.
+ self._tasks = []
+ self._gradients = None
+
+ # Build the graph for the loss.
+ model_output = self.model.get_output(self.dataset.symb_inputs)
+ self._loss = self._compute_loss(model_output)
+
+ @abstractmethod
+ def _get_updates(self):
+ raise NotImplementedError("Subclass of 'Loss' must implement '_get_updates()'.")
- def get_graph_output(self):
- output, updates = self.model.get_model_output(self.dataset.symb_inputs)
- return self._loss_function(output), updates
+ @abstractmethod
+ def _compute_loss(self, model_output):
+ raise NotImplementedError("Subclass of 'Loss' must implement '_compute_loss(model_output)'.")
- def get_gradients(self):
- cost, updates = self.get_graph_output()
- gparams = T.grad(cost=cost,
+ @property
+ def gradients(self):
+ if self._gradients is None:
+ self._gradients = self._get_gradients()
+
+ return self._gradients
+
+ @property
+ def tasks(self):
+ return self.model.tasks + self._tasks
+
+ @property
+ def updates(self):
+ updates = OrderedDict()
+ updates.update(self.model.updates)
+ updates.update(self._get_updates())
+ return updates
+
+ def _get_gradients(self):
+ gparams = T.grad(cost=self._loss,
wrt=self.model.parameters,
consider_constant=self.consider_constant)
- gradients = dict(zip(self.model.parameters, gparams))
- return gradients, updates
-
- def _loss_function(self, model_output):
- raise NotImplementedError("Subclass of 'Loss' must implement '_loss_function(model_output)'.")
+ self._gradients = dict(zip(self.model.parameters, gparams))
+ return self.gradients
diff --git a/smartlearner/interfaces/model.py b/smartlearner/interfaces/model.py
index f72e79b..bad865b 100644
--- a/smartlearner/interfaces/model.py
+++ b/smartlearner/interfaces/model.py
@@ -1,5 +1,3 @@
-import theano.tensor as T
-from collections import OrderedDict
from abc import ABCMeta, abstractmethod, abstractproperty
@@ -14,9 +12,18 @@ class abstractclassmethod(classmethod):
class Model(object):
__metaclass__ = ABCMeta
- def get_model_output(self, inputs):
+ @property
+ def tasks(self):
+ return []
+
+ @abstractmethod
+ def get_output(self, inputs):
raise NotImplementedError("Subclass of 'Model' must define a model output (a theano graph)")
+ @abstractproperty
+ def updates(self):
+ raise NotImplementedError("Subclass of 'Model' must implement property 'updates'.")
+
@abstractproperty
def parameters(self):
raise NotImplementedError("Subclass of 'Model' must implement property 'parameters'.")
diff --git a/smartlearner/interfaces/optimizer.py b/smartlearner/interfaces/optimizer.py
new file mode 100644
index 0000000..13da9d1
--- /dev/null
+++ b/smartlearner/interfaces/optimizer.py
@@ -0,0 +1,81 @@
+from collections import OrderedDict
+
+from abc import ABCMeta, abstractmethod
+
+
+class Optimizer(object):
+ __metaclass__ = ABCMeta
+
+ def __init__(self, loss):
+ self.loss = loss
+ self._tasks = []
+
+ self._direction_modifiers = []
+ self._param_modifiers = []
+ self._directions = None
+
+ def append_direction_modifier(self, direction_modifier):
+ self._direction_modifiers.append(direction_modifier)
+
+ def append_param_modifier(self, param_modifier):
+ self._param_modifiers.append(param_modifier)
+
+ @abstractmethod
+ def _get_directions(self):
+ raise NotImplementedError("Subclass of 'Optimizer' must implement '_get_directions()'.")
+
+ @abstractmethod
+ def _get_updates(self):
+ raise NotImplementedError("Subclass of 'Optimizer' must implement private property '_updates'.")
+
+ @property
+ def directions(self):
+ if self._directions is None:
+ self._directions = self._get_directions()
+
+ return self._directions
+
+ @property
+ def tasks(self):
+ tasks = []
+ tasks.extend(self.loss.tasks)
+
+ for direction_modifier in self._direction_modifiers:
+ tasks.extend(direction_modifier.tasks)
+
+ for param_modifier in self._param_modifiers:
+ tasks.extend(param_modifier.tasks)
+
+ tasks.extend(self._tasks)
+ return tasks
+
+ @property
+ def updates(self):
+ updates = OrderedDict()
+
+ directions = self.directions
+ updates.update(self.loss.updates) # Gather updates from the loss.
+ updates.update(self._get_updates()) # Gather updates from the optimizer.
+
+ # Apply directions modifiers and gather updates from these modifiers.
+ updates.update(self._apply_modifiers(self._direction_modifiers, directions))
+
+ # Update parameters
+ params_updates = OrderedDict()
+ for param, direction in directions.items():
+ params_updates[param] = param + direction
+ updates.update(params_updates)
+
+ # Apply parameters modifiers and gather updates from these modifiers.
+ updates.update(self._apply_modifiers(self._param_modifiers, params_updates))
+
+ return updates
+
+ def _apply_modifiers(self, list_modifiers, objects_to_modify):
+ updates = OrderedDict()
+ for modifier in list_modifiers:
+ modified_objects = modifier.apply(objects_to_modify)
+ objects_to_modify.update(modified_objects)
+ updates.update(modifier.updates)
+
+ return updates
diff --git a/smartlearner/losses/classification_losses.py b/smartlearner/losses/classification_losses.py
index b5bfc74..738a899 100644
--- a/smartlearner/losses/classification_losses.py
+++ b/smartlearner/losses/classification_losses.py
@@ -4,13 +4,19 @@ import theano.tensor as T
class NegativeLogLikelihood(Loss):
- def _loss_function(self, model_output):
+ def _get_updates(self):
+ return {} # There is no updates for NegativeLogLikelihood.
+
+ def _compute_loss(self, model_output):
nll = -T.log(model_output)
- indices = T.cast(self.target[:, 0], dtype="int32") # Targets are floats.
- selected_nll = nll[T.arange(self.target.shape[0]), indices]
+ indices = T.cast(self.dataset.symb_targets[:, 0], dtype="int32") # Targets are floats.
+ selected_nll = nll[T.arange(self.dataset.symb_targets.shape[0]), indices]
return T.mean(selected_nll)
class CategoricalCrossEntropy(Loss):
- def _loss_function(self, model_output):
- return T.mean(T.nnet.categorical_crossentropy(model_output, self.target))
+ def _get_updates(self):
+ return {} # There is no updates for CategoricalCrossEntropy.
+
+ def _compute_loss(self, model_output):
+ return T.mean(T.nnet.categorical_crossentropy(model_output, self.dataset.symb_targets))
diff --git a/smartlearner/losses/distribution_losses.py b/smartlearner/losses/distribution_losses.py
index c20506b..97a99c7 100644
--- a/smartlearner/losses/distribution_losses.py
+++ b/smartlearner/losses/distribution_losses.py
@@ -4,5 +4,8 @@ import theano.tensor as T
class BinaryCrossEntropy(Loss):
- def _loss_function(self, model_output):
- return T.mean(T.nnet.binary_crossentropy(model_output, self.target))
+ def _get_updates(self):
+ return {} # There is no updates for BinaryCrossEntropy.
+
+ def _compute_loss(self, model_output):
+ return T.mean(T.nnet.binary_crossentropy(model_output, self.dataset.symb_targets))
diff --git a/smartlearner/losses/reconstruction_losses.py b/smartlearner/losses/reconstruction_losses.py
index 59f3e0e..19f5199 100644
--- a/smartlearner/losses/reconstruction_losses.py
+++ b/smartlearner/losses/reconstruction_losses.py
@@ -4,10 +4,16 @@ import theano.tensor as T
class L2Distance(Loss):
- def _loss_function(self, model_output):
- return T.mean((model_output - self.target)**2)
+ def _get_updates(self):
+ return {} # There is no updates for L2Distance.
+
+ def _compute_loss(self, model_output):
+ return T.mean((model_output - self.dataset.symb_targets)**2)
class L1Distance(Loss):
- def _loss_function(self, model_output):
- return T.mean(abs(model_output - self.target))
+ def _get_updates(self):
+ return {} # There is no updates for L1Distance.
+
+ def _compute_loss(self, model_output):
+ return T.mean(abs(model_output - self.dataset.symb_targets))
diff --git a/smartlearner/optimizers/__init__.py b/smartlearner/optimizers/__init__.py
index 3ddc3b4..d2baa34 100644
--- a/smartlearner/optimizers/__init__.py
+++ b/smartlearner/optimizers/__init__.py
@@ -1,2 +1,2 @@
-from .optimizer import Optimizer
from .sgd import SGD
+from .adagrad import AdaGrad
diff --git a/smartlearner/optimizers/adagrad.py b/smartlearner/optimizers/adagrad.py
new file mode 100644
index 0000000..c132e9d
--- /dev/null
+++ b/smartlearner/optimizers/adagrad.py
@@ -0,0 +1,58 @@
+from collections import OrderedDict
+import theano.tensor as T
+
+from . import SGD
+from ..utils import sharedX
+
+
+class AdaGrad(SGD):
+ """ Implements the AdaGrad optimizer [Duchi11]_.
+
+ References
+ ----------
+ .. [Duchi11] Duchi, J., Hazan, E., & Singer, Y., "Adaptive Subgradient
+ Methods for Online Learning and Stochastic Optimization",
+ Journal of Machine Learning Research, vol. 12, pp. 2121-2159,
+ 2011.
+ """
+ def __init__(self, loss, lr, eps=1e-6):
+ """
+ Parameters
+ ----------
+ loss: `smartlearner.interfaces.loss.Loss` object
+ Loss function from which to obtain the gradients.
+ lr: float
+ Initial learning rate.
+ eps: float (optional)
+ Epsilon needed to avoid division by zero.
+ """
+ super().__init__(loss)
+ self.lr = lr
+ self.eps = eps
+ self.parameters = {}
+ self._updates = OrderedDict()
+
+ def _get_updates(self):
+ return self._updates
+
+ def _get_directions(self):
+ """ Produces descending directions. """
+ directions = OrderedDict()
+
+ for i, (param, direction) in enumerate(super()._get_directions().items()):
+ # sum_squared_grad := \sum g_t^2
+ param_name = param.name if param.name is not None else str(i)
+ sum_squared_grad = sharedX(param.get_value() * 0., name='sum_squared_grad_' + param_name)
+ self.parameters[sum_squared_grad.name] = sum_squared_grad
+
+ # Accumulate gradient
+ new_sum_squared_grad = sum_squared_grad + T.sqr(direction)
+
+ # Compute update
+ root_sum_squared = T.sqrt(new_sum_squared_grad + self.eps)
+
+ # Apply update
+ self._updates[sum_squared_grad] = new_sum_squared_grad
+ directions[param] = (self.lr/root_sum_squared) * direction
+
+ return directions
diff --git a/smartlearner/optimizers/optimizer.py b/smartlearner/optimizers/optimizer.py
deleted file mode 100644
index 954a241..0000000
--- a/smartlearner/optimizers/optimizer.py
+++ /dev/null
@@ -1,49 +0,0 @@
-from collections import OrderedDict
-
-from abc import ABCMeta, abstractmethod
-
-
-class Optimizer(object):
- __metaclass__ = ABCMeta
-
- def __init__(self, loss):
- self.loss = loss
-
- self._update_rules = []
- self._param_modifiers = []
-
- def append_update_rule(self, update_rule):
- self._update_rules.append(update_rule)
-
- def append_param_modifier(self, param_modifier):
- self._param_modifiers.append(param_modifier)
-
- @abstractmethod
- def _get_directions(self):
- raise NotImplementedError("Subclass of 'Optimizer' must implement '_get_directions()'.")
-
- def gather_updates(self):
- updates = OrderedDict()
-
- self.directions, updates_from_get_directions = self._get_directions()
- updates.update(updates_from_get_directions)
-
- updates.update(self._apply_updates(self._update_rules, self.directions))
-
- # Update parameters
- params_updates = OrderedDict()
- for param, gparam in self.directions.items():
- params_updates[param] = param + self.directions[param]
- updates.update(params_updates)
-
- updates.update(self._apply_updates(self._param_modifiers, params_updates))
-
- return updates
-
- def _apply_updates(self, list_updates, object_to_update):
- update_dict = OrderedDict()
- for update in list_updates:
- modified_object, updates_to_add = update.apply(object_to_update)
- object_to_update.update(modified_object)
- update_dict.update(updates_to_add)
- return update_dict
\ No newline at end of file
diff --git a/smartlearner/optimizers/sgd.py b/smartlearner/optimizers/sgd.py
index 2130265..8f73365 100644
--- a/smartlearner/optimizers/sgd.py
+++ b/smartlearner/optimizers/sgd.py
@@ -1,16 +1,19 @@
-import numpy as np
-from . import Optimizer
+from collections import OrderedDict
+
+from ..interfaces.optimizer import Optimizer
class SGD(Optimizer):
def __init__(self, loss):
- super(SGD, self).__init__(loss)
+ super().__init__(loss)
- def _get_directions(self):
- self.gradients, updates_from_get_gradients = self.loss.get_gradients()
+ def _get_updates(self):
+ return {} # There is no updates for SGD.
- # Take the opposite of the gradient.
- for param, gparam in self.gradients.items():
- self.gradients[param] = -gparam
+ def _get_directions(self):
+ # Take the opposite of the gradients as directions.
+ directions = OrderedDict()
+ for param, gradient in self.loss.gradients.items():
+ directions[param] = -gradient
- return self.gradients, updates_from_get_gradients
+ return directions
diff --git a/smartlearner/tasks/tasks.py b/smartlearner/tasks/tasks.py
index cd6021f..75ba0c4 100644
--- a/smartlearner/tasks/tasks.py
+++ b/smartlearner/tasks/tasks.py
@@ -6,6 +6,16 @@ from time import time
from smartlearner.interfaces.task import Task, RecurrentTask
+class MonitorVariable(Task):
+ def __init__(self, var):
+ super().__init__()
+ self.var = self.track_variable(var)
+
+ @property
+ def value(self):
+ return self.var.get_value()
+
+
class PrintVariable(RecurrentTask):
def __init__(self, msg, *variables, **recurrent_options):
# TODO: docstring should include **recurrent_options.
diff --git a/smartlearner/trainer.py b/smartlearner/trainer.py
index c25770f..d9cfda0 100644
--- a/smartlearner/trainer.py
+++ b/smartlearner/trainer.py
@@ -1,9 +1,8 @@
from collections import OrderedDict
import theano
-from time import time
from .status import Status
-from smartlearner.tasks.stopping_criteria import TrainingExit
+from .tasks.stopping_criteria import TrainingExit
class Trainer(object):
@@ -11,8 +10,16 @@ class Trainer(object):
self.status = status if status is not None else Status(self)
self._optimizer = optimizer
self._batch_scheduler = batch_scheduler
- self._updates = OrderedDict()
+
+ # Gather updates from the optimizer and the batch scheduler.
+ self._graph_updates = OrderedDict()
+ self._graph_updates.update(self._optimizer.updates)
+ self._graph_updates.update(self._batch_scheduler.updates)
+
+ # Gather tasks from the optimizer and the batch scheduler.
self._tasks = []
+ self._tasks.extend(self._optimizer.tasks)
+ self._tasks.extend(self._batch_scheduler.tasks)
def train(self):
self._pre_learning()
@@ -20,16 +27,17 @@ class Trainer(object):
self._post_learning()
def append_task(self, task):
- self._updates.update(task.updates)
self._tasks.append(task)
def _build_theano_graph(self):
- updates = self._optimizer.gather_updates()
- updates.update(self._updates)
+ # Get updates from tasks.
+ for task in self._tasks:
+ self._graph_updates.update(task.updates)
+
self._learn = theano.function([],
- updates=updates,
- givens=self._batch_scheduler.givens,
- name="learn")
+ updates=self._graph_updates,
+ givens=self._batch_scheduler.givens,
+ name="learn")
def _pre_learning(self):
self._build_theano_graph()
diff --git a/smartlearner/update_rules/decreasing_learning_rate.py b/smartlearner/update_rules/decreasing_learning_rate.py
deleted file mode 100644
index f721726..0000000
--- a/smartlearner/update_rules/decreasing_learning_rate.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from collections import OrderedDict
-
-import numpy as np
-
-from ..utils import sharedX
-from . import UpdateRule
-
-
-class DecreasingLearningRate(UpdateRule):
- def __init__(self, lr, dc=0.):
- """
- Implements a decreasing learning rate update rule.
-
- Parameters
- ----------
- lr: float
- learning rate
- dc: float
- decreasing constant (decay)
- """
- super(DecreasingLearningRate, self).__init__()
- assert dc <= 1.
- assert dc >= 0.
- self.lr = lr
- self.dc = dc
-
- def apply(self, gradients):
- updates = OrderedDict()
- new_gradients = OrderedDict()
-
- for param, gparam in gradients.items():
- lr = sharedX(self.lr * np.ones_like(param.get_value()), name='lr_' + param.name)
-
- if self.dc != 0.:
- # Decrease the learning rate by a factor of `dc` after each update.
- updates[lr] = self.dc * lr
-
- new_gradients[param] = lr * gparam
-
- return new_gradients, updates
diff --git a/smartlearner/update_rules/update_rule.py b/smartlearner/update_rules/update_rule.py
deleted file mode 100644
index e5933ad..0000000
--- a/smartlearner/update_rules/update_rule.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from abc import ABCMeta, abstractmethod
-
-
-class UpdateRule(object):
- __metaclass__ = ABCMeta
-
- @abstractmethod
- def apply(self, gradients):
- raise NotImplementedError("Subclass of 'UpdateRule' must implement 'apply(gradients)'.")
| Refactor how the updates are handled
I suggest a global updates gatherer (either a module function or a singleton class if you feel fancy) that each parts would have access to. This way, everyone would be in charge of registering its updates.
Another solution is a chain call to gather the updates. This would have to be added in the interfaces of each abstract classes (`Model`, `Optimizer`, `Loss`?, `Tasks`?). | SMART-Lab/smartlearner | diff --git a/smartlearner/optimizers/tests/tests_optimizers.py b/smartlearner/optimizers/tests/tests_optimizers.py
new file mode 100644
index 0000000..7735fb9
--- /dev/null
+++ b/smartlearner/optimizers/tests/tests_optimizers.py
@@ -0,0 +1,110 @@
+import numpy as np
+
+import theano
+import theano.tensor as T
+
+from smartlearner.tasks import stopping_criteria
+from smartlearner.utils import sharedX
+
+from smartlearner import Trainer
+from smartlearner.optimizers import SGD, AdaGrad
+
+from smartlearner.tasks import tasks
+
+from numpy.testing import assert_array_almost_equal
+
+from smartlearner.testing import DummyLoss, DummyBatchScheduler
+
+floatX = theano.config.floatX
+
+
+class DummyLossWithGradient(DummyLoss):
+ def __init__(self, cost, param):
+ super().__init__()
+ self.cost = cost
+ self.param = param
+
+ def _get_gradients(self):
+ gparam = T.grad(cost=self.cost, wrt=self.param)
+ return {self.param: gparam}
+
+
+def test_sgd():
+ # Create simple Nd gaussian functions to optimize. These functions are
+ # (perfectly) well-conditioned so it should take only one gradient step
+ # to converge using 1/L, where L is the largest eigenvalue of the hessian.
+ max_epoch = 2
+ for N in range(1, 5):
+ center = np.arange(1, N+1)[None, :].astype(floatX)
+ param = sharedX(np.zeros((1, N)))
+ cost = T.sum(0.5*T.dot(T.dot((param-center), T.eye(N)), (param-center).T))
+ loss = DummyLossWithGradient(cost, param)
+
+ trainer = Trainer(SGD(loss), DummyBatchScheduler())
+ trainer.append_task(stopping_criteria.MaxEpochStopping(max_epoch))
+
+ # Monitor the gradient of `loss` w.r.t. to `param`.
+ gparam = tasks.MonitorVariable(loss.gradients[param])
+ trainer.append_task(gparam)
+ trainer.train()
+
+ # Since the problem is well-conditionned and we use an optimal gradient step 1/L,
+ # two epochs should be enough for `param` to be around `center` and the gradients near 0.
+ assert_array_almost_equal(param.get_value(), center)
+ assert_array_almost_equal(gparam.value, 0.)
+
+ # Create an Nd gaussian function to optimize. This function is not
+ # well-conditioned and there exists no perfect gradient step to converge in
+ # only one iteration.
+ #cost = T.sum(N*0.5*T.dot(T.dot((param-center), np.diag(1./np.arange(1, N+1))), ((param-center).T)))
+ max_epoch = 80
+ N = 4
+ center = 5*np.ones((1, N)).astype(floatX)
+ param = sharedX(np.zeros((1, N)))
+ cost = T.sum(0.5*T.dot(T.dot((param-center), np.diag(1./np.arange(1, N+1))), (param-center).T))
+ loss = DummyLossWithGradient(cost, param)
+
+ trainer = Trainer(SGD(loss), DummyBatchScheduler())
+ trainer.append_task(stopping_criteria.MaxEpochStopping(max_epoch))
+ #trainer.append_task(tasks.PrintVariable("Loss param : {}", param))
+ #trainer.append_task(tasks.PrintVariable("Loss gradient: {}", loss.gradients[param]))
+
+ # Monitor the gradient of `loss` w.r.t. to `param`.
+ gparam = tasks.MonitorVariable(loss.gradients[param])
+ trainer.append_task(gparam)
+ trainer.train()
+
+ # Since the problem is well-conditionned and we use an optimal gradient step 1/L,
+ # two epochs should be enough for `param` to be around `center` and the gradients near 0.
+ assert_array_almost_equal(param.get_value(), center, decimal=6)
+ assert_array_almost_equal(gparam.value, 0.)
+
+
+def test_adagrad():
+ max_epoch = 15
+
+ # Create an Nd gaussian functions to optimize. These functions are not
+ # well-conditioned and there exists no perfect gradient step to converge in
+ # only one iteration.
+ for N in range(1, 5):
+ center = 5*np.ones((1, N)).astype(floatX)
+ param = sharedX(np.zeros((1, N)))
+ cost = T.sum(0.5*T.dot(T.dot((param-center), np.diag(1./np.arange(1, N+1))), ((param-center).T)))
+ loss = DummyLossWithGradient(cost, param)
+
+ # Even with a really high gradient step, AdaGrad can still converge.
+ # Actually, it is faster than using the optimal gradient step with SGD.
+ optimizer = AdaGrad(loss, lr=100, eps=1e-1)
+ trainer = Trainer(optimizer, DummyBatchScheduler())
+ trainer.append_task(stopping_criteria.MaxEpochStopping(max_epoch))
+ #trainer.append_task(tasks.PrintVariable("Loss param : {}", param))
+ #trainer.append_task(tasks.PrintVariable("Loss gradient: {}", loss.gradients[param]))
+
+ # Monitor the gradient of `loss` w.r.t. to `param`.
+ gparam = tasks.MonitorVariable(loss.gradients[param])
+ trainer.append_task(gparam)
+ trainer.train()
+
+ # After 30 epochs, param should be around the center and gradients near 0.
+ assert_array_almost_equal(param.get_value(), center)
+ assert_array_almost_equal(gparam.value, 0.)
diff --git a/smartlearner/testing.py b/smartlearner/testing.py
index 4036c3a..5edf1ce 100644
--- a/smartlearner/testing.py
+++ b/smartlearner/testing.py
@@ -1,10 +1,10 @@
import numpy as np
-from smartlearner.interfaces.dataset import Dataset
-from smartlearner.interfaces.model import Model
-from smartlearner.interfaces.loss import Loss
-from smartlearner.optimizers.optimizer import Optimizer
-from smartlearner.batch_scheduler import BatchScheduler
+from .interfaces.dataset import Dataset
+from .interfaces.model import Model
+from .interfaces.loss import Loss
+from .interfaces.optimizer import Optimizer
+from .interfaces.batch_scheduler import BatchScheduler
class DummyDataset(Dataset):
@@ -21,8 +21,12 @@ class DummyModel(Model):
def parameters(self):
return self._parameters
- def get_model_output(self, inputs):
- pass
+ @property
+ def updates(self):
+ return {}
+
+ def get_output(self, inputs):
+ return inputs
def save(self, path):
pass
@@ -35,16 +39,22 @@ class DummyLoss(Loss):
def __init__(self):
super(DummyLoss, self).__init__(DummyModel(), DummyDataset())
- def _loss_function(self, model_output):
- pass
+ def _compute_loss(self, model_output):
+ return model_output
+
+ def _get_updates(self):
+ return {}
class DummyOptimizer(Optimizer):
def __init__(self):
super(DummyOptimizer, self).__init__(loss=DummyLoss())
+ def _get_updates(self):
+ return {}
+
def _get_directions(self):
- return {}, {}
+ return {}
class DummyBatchScheduler(BatchScheduler):
@@ -55,5 +65,9 @@ class DummyBatchScheduler(BatchScheduler):
def givens(self):
return {}
+ @property
+ def updates(self):
+ return {}
+
def __iter__(self):
return iter(range(1))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files",
"has_removed_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 12
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "theano numpy scipy nose pyparsing pip flake8 six pep8 pyflakes",
"pip_packages": [
"nose",
"nose-cov",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cov-core==1.15.0
coverage==7.8.0
exceptiongroup==1.2.2
flake8 @ file:///croot/flake8_1726157165993/work
iniconfig==2.1.0
Mako @ file:///croot/mako_1665472421453/work
MarkupSafe @ file:///croot/markupsafe_1738584038848/work
mccabe @ file:///opt/conda/conda-bld/mccabe_1644221741721/work
nose @ file:///opt/conda/conda-bld/nose_1642704612149/work
nose-cov==1.6
numpy @ file:///opt/conda/conda-bld/numpy_and_numpy_base_1652801679809/work
packaging==24.2
pep8==1.7.1
pluggy==1.5.0
pycodestyle @ file:///croot/pycodestyle_1726150303809/work
pyflakes @ file:///croot/pyflakes_1708962956225/work
pygpu==0.7.6
pyparsing @ file:///croot/pyparsing_1731445506121/work
pytest==8.3.5
scipy @ file:///opt/conda/conda-bld/scipy_1661390393401/work
six @ file:///tmp/build/80754af9/six_1644875935023/work
-e git+https://github.com/SMART-Lab/smartlearner.git@0f11484800712ac92d6027754ab0a026193fa985#egg=smartlearner
Theano==1.0.5
tomli==2.2.1
| name: smartlearner
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- _sysroot_linux-64_curr_repodata_hack=3=haa98f57_10
- binutils_impl_linux-64=2.35.1=h27ae35d_9
- binutils_linux-64=2.35.1=h454624a_30
- blas=1.0=openblas
- ca-certificates=2025.2.25=h06a4308_0
- fftw=3.3.9=h5eee18b_2
- flake8=7.1.1=py39h06a4308_0
- gcc_impl_linux-64=7.5.0=h7105cf2_17
- gcc_linux-64=7.5.0=h8f34230_30
- gxx_impl_linux-64=7.5.0=h0a5bf11_17
- gxx_linux-64=7.5.0=hffc177d_30
- kernel-headers_linux-64=3.10.0=h57e8cba_10
- ld_impl_linux-64=2.35.1=h7274673_9
- libffi=3.4.4=h6a678d5_1
- libgcc-devel_linux-64=7.5.0=hbbeae57_17
- libgcc-ng=11.2.0=h1234567_1
- libgfortran-ng=11.2.0=h00389a5_1
- libgfortran5=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libgpuarray=0.7.6=h7f8727e_1
- libopenblas=0.3.21=h043d6bf_0
- libstdcxx-devel_linux-64=7.5.0=hf0c5c8d_17
- libstdcxx-ng=11.2.0=h1234567_1
- mako=1.2.3=py39h06a4308_0
- markupsafe=3.0.2=py39h5eee18b_0
- mccabe=0.7.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- nose=1.3.7=pyhd3eb1b0_1008
- numpy=1.22.3=py39h7a5d4dd_0
- numpy-base=1.22.3=py39hb8be1f0_0
- openssl=3.0.16=h5eee18b_0
- pep8=1.7.1=py39h06a4308_1
- pip=25.0=py39h06a4308_0
- pycodestyle=2.12.1=py39h06a4308_0
- pyflakes=3.2.0=py39h06a4308_0
- pygpu=0.7.6=py39hce1f21e_1
- pyparsing=3.2.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- scipy=1.7.3=py39hf838250_2
- setuptools=75.8.0=py39h06a4308_0
- six=1.16.0=pyhd3eb1b0_1
- sqlite=3.45.3=h5eee18b_0
- sysroot_linux-64=2.17=h57e8cba_10
- theano=1.0.5=py39h295c915_1
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cov-core==1.15.0
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- nose-cov==1.6
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/smartlearner
| [
"smartlearner/optimizers/tests/tests_optimizers.py::test_sgd",
"smartlearner/optimizers/tests/tests_optimizers.py::test_adagrad"
]
| []
| []
| []
| BSD 3-Clause "New" or "Revised" License | 211 | [
"smartlearner/batch_scheduler.py",
"smartlearner/interfaces/optimizer.py",
"smartlearner/batch_schedulers/__init__.py",
"smartlearner/update_rules/constant_learning_rate.py",
"smartlearner/update_rules/update_rule.py",
"smartlearner/tasks/tasks.py",
"smartlearner/interfaces/__init__.py",
"smartlearner/direction_modifiers/decreasing_learning_rate.py",
"smartlearner/optimizers/__init__.py",
"smartlearner/update_rules/decreasing_learning_rate.py",
"smartlearner/losses/distribution_losses.py",
"smartlearner/optimizers/sgd.py",
"smartlearner/interfaces/direction_modifier.py",
"smartlearner/interfaces/loss.py",
"smartlearner/__init__.py",
"smartlearner/optimizers/adagrad.py",
"smartlearner/interfaces/model.py",
"smartlearner/update_rules/__init__.py",
"smartlearner/trainer.py",
"smartlearner/optimizers/optimizer.py",
"smartlearner/losses/classification_losses.py",
"smartlearner/batch_schedulers/minibatch.py",
"smartlearner/losses/reconstruction_losses.py"
]
| [
"smartlearner/interfaces/optimizer.py",
"smartlearner/batch_schedulers/__init__.py",
"smartlearner/update_rules/update_rule.py",
"smartlearner/tasks/tasks.py",
"smartlearner/interfaces/__init__.py",
"smartlearner/direction_modifiers/decreasing_learning_rate.py",
"smartlearner/optimizers/__init__.py",
"smartlearner/update_rules/decreasing_learning_rate.py",
"smartlearner/losses/distribution_losses.py",
"smartlearner/optimizers/sgd.py",
"smartlearner/interfaces/batch_scheduler.py",
"smartlearner/interfaces/direction_modifier.py",
"smartlearner/direction_modifiers/__init__.py",
"smartlearner/interfaces/loss.py",
"smartlearner/__init__.py",
"smartlearner/optimizers/adagrad.py",
"smartlearner/interfaces/model.py",
"smartlearner/trainer.py",
"smartlearner/optimizers/optimizer.py",
"smartlearner/losses/classification_losses.py",
"smartlearner/batch_schedulers/minibatch.py",
"smartlearner/losses/reconstruction_losses.py",
"smartlearner/direction_modifiers/constant_learning_rate.py"
]
|
|
SMART-Lab__smartlearner-35 | b0877d3b961deceb273139f064985e39239351a7 | 2015-08-16 16:29:00 | b0877d3b961deceb273139f064985e39239351a7 | diff --git a/smartlearner/batch_scheduler.py b/smartlearner/batch_scheduler.py
index 664cfb5..99f33ec 100644
--- a/smartlearner/batch_scheduler.py
+++ b/smartlearner/batch_scheduler.py
@@ -26,6 +26,11 @@ class MiniBatchScheduler(BatchScheduler):
self.batch_size = batch_size
self.shared_batch_count = theano.shared(np.array(0, dtype='i4'))
+ # Keep only `batch_size` examples as test values.
+ self.dataset.symb_inputs.tag.test_value = self.dataset.inputs.get_value()[:batch_size]
+ if self.dataset.has_targets:
+ self.dataset.symb_targets.tag.test_value = self.dataset.targets.get_value()[:batch_size]
+
@property
def batch_size(self):
return self._shared_batch_size.get_value()
diff --git a/smartlearner/interfaces/dataset.py b/smartlearner/interfaces/dataset.py
index d2d0a4c..c67da77 100644
--- a/smartlearner/interfaces/dataset.py
+++ b/smartlearner/interfaces/dataset.py
@@ -1,13 +1,48 @@
-import theano
+import numpy as np
+
+import theano.tensor as T
+
+from smartlearner.utils import sharedX
class Dataset(object):
+ """ Dataset interface.
+
+ Attributes
+ ----------
+ symb_inputs : `theano.tensor.TensorType` object
+ Symbolic variables representing the inputs.
+ symb_targets : `theano.tensor.TensorType` object or None
+ Symbolic variables representing the targets.
+
+ Notes
+ -----
+ `symb_inputs` and `symb_targets` have test value already tagged to them. Use
+ THEANO_FLAGS="compute_test_value=warn" to use them.
+ """
def __init__(self, inputs, targets=None, name="dataset"):
+ """
+ Parameters
+ ----------
+ inputs : ndarray
+ Training examples
+ targets : ndarray (optional)
+ Target for each training example.
+ name : str (optional)
+ The name of the dataset is used to name Theano variables. Default: 'dataset'.
+ """
self.name = name
self.inputs = inputs
self.targets = targets
- self.symb_inputs = theano.tensor.matrix(name=self.name+'_inputs')
- self.symb_targets = theano.tensor.matrix(name=self.name+'_targets')
+ self.symb_inputs = T.TensorVariable(type=T.TensorType("floatX", [False]*self.inputs.ndim),
+ name=self.name+'_symb_inputs')
+ self.symb_inputs.tag.test_value = self.inputs.get_value() # For debugging Theano graphs.
+
+ self.symb_targets = None
+ if self.has_targets:
+ self.symb_targets = T.TensorVariable(type=T.TensorType("floatX", [False]*self.targets.ndim),
+ name=self.name+'_symb_targets')
+ self.symb_targets.tag.test_value = self.targets.get_value() # For debugging Theano graphs.
@property
def inputs(self):
@@ -15,7 +50,7 @@ class Dataset(object):
@inputs.setter
def inputs(self, value):
- self._inputs_shared = theano.shared(value, name=self.name + "_inputs", borrow=True)
+ self._inputs_shared = sharedX(value, name=self.name+"_inputs")
@property
def targets(self):
@@ -24,20 +59,37 @@ class Dataset(object):
@targets.setter
def targets(self, value):
if value is not None:
- self._targets_shared = theano.shared(value, name=self.name + "_targets", borrow=True)
+ self._targets_shared = sharedX(np.array(value), name=self.name+"_targets")
else:
self._targets_shared = None
+ @property
+ def has_targets(self):
+ return self.targets is not None
+
+ @property
+ def input_shape(self):
+ return self.inputs.get_value().shape[1:]
+
+ @property
+ def target_shape(self):
+ if self.has_targets:
+ return self.targets.get_value().shape[1:]
+
+ return None
+
@property
def input_size(self):
- return self.inputs.get_value().shape[-1]
+ # TODO: is this property really useful? If needed one could just call directly `dataset.input_shape[-1]`.
+ return self.input_shape[-1]
@property
def target_size(self):
- if self.targets is None:
- return 0
- else:
- return self.targets.get_value().shape[-1]
+ # TODO: is this property really useful? If needed one could just call directly `dataset.target_shape[-1]`.
+ if self.has_targets:
+ return self.target_shape[-1]
+
+ return None
def __len__(self):
return len(self.inputs.get_value())
| Move tests at the root of the library.
That way test won't appear in the autocomplete of your preferred python IDE. | SMART-Lab/smartlearner | diff --git a/tests/interfaces/test_dataset.py b/tests/interfaces/test_dataset.py
new file mode 100644
index 0000000..6378b93
--- /dev/null
+++ b/tests/interfaces/test_dataset.py
@@ -0,0 +1,140 @@
+import numpy as np
+import theano
+import theano.tensor as T
+
+from nose.tools import assert_true
+from numpy.testing import assert_equal, assert_array_equal
+
+from smartlearner.interfaces.dataset import Dataset
+
+floatX = theano.config.floatX
+ALL_DTYPES = np.sctypes['int'] + np.sctypes['uint'] + np.sctypes['float']
+
+
+def test_dataset_used_in_theano_function():
+ rng = np.random.RandomState(1234)
+
+ nb_examples = 10
+
+ inputs = (rng.randn(nb_examples, 5) * 100).astype(floatX)
+ targets = (rng.randn(nb_examples, 1) > 0.5).astype(floatX)
+ dataset = Dataset(inputs, targets)
+
+ input_sqr_norm = T.sum(dataset.symb_inputs**2)
+ result = input_sqr_norm - dataset.symb_targets
+ f = theano.function([dataset.symb_inputs, dataset.symb_targets], result)
+
+ assert_array_equal(f(inputs, targets), np.sum(inputs**2)-targets)
+
+
+def test_dataset_without_targets():
+ rng = np.random.RandomState(1234)
+
+ nb_examples = 10
+ nb_features = 3
+ sequences_length = 4
+ nb_channels = 2
+ image_shape = (5, 5)
+
+ # Test creating dataset with different example shapes:
+ # scalar feature, vector features, sequence of vector features, multiple channels images features.
+ for example_shape in [(), (nb_features,), (sequences_length, nb_features), (nb_channels,)+image_shape]:
+ inputs_shape = (nb_examples,) + example_shape
+
+ for dtype in ALL_DTYPES:
+ inputs = (rng.randn(*inputs_shape) * 100).astype(dtype)
+ dataset = Dataset(inputs)
+
+ # Data should be converted into `floatX`.
+ assert_equal(dataset.inputs.dtype, floatX)
+ assert_equal(dataset.symb_inputs.dtype, floatX)
+ assert_equal(dataset.symb_inputs.ndim, inputs.ndim)
+ assert_equal(dataset.input_shape, example_shape)
+ assert_array_equal(dataset.inputs.get_value(), inputs.astype(floatX))
+
+ # Everything related to target should be None
+ assert_true(dataset.targets is None)
+ assert_true(dataset.symb_targets is None)
+ assert_true(dataset.target_shape is None)
+ assert_true(dataset.target_size is None)
+
+ # Create dataset from nested Pyton lists.
+ inputs = [[1, 2, 3]] * nb_examples
+ dataset = Dataset(inputs)
+ # Data should be converted into `floatX`.
+ assert_equal(dataset.inputs.dtype, floatX)
+ assert_equal(dataset.symb_inputs.dtype, floatX)
+ assert_equal(dataset.symb_inputs.ndim, 2)
+ assert_equal(dataset.input_shape, (3,))
+ assert_array_equal(dataset.inputs.get_value(), np.array(inputs, dtype=floatX))
+
+
+def test_dataset_with_targets():
+ rng = np.random.RandomState(1234)
+
+ nb_examples = 10
+ nb_features = 3
+ sequences_length = 4
+ nb_channels = 2
+ image_shape = (5, 5)
+
+ # Test creating dataset with different example shapes and target shapes:
+ # scalar feature, vector features, sequence of vector features, multiple channels images features.
+ for target_shape in [(), (nb_features,), (sequences_length, nb_features), (nb_channels,)+image_shape]:
+ for example_shape in [(), (nb_features,), (sequences_length, nb_features), (nb_channels,)+image_shape]:
+ inputs_shape = (nb_examples,) + example_shape
+ targets_shape = (nb_examples,) + target_shape
+
+ for example_dtype in ALL_DTYPES:
+ for target_dtype in ALL_DTYPES:
+ inputs = (rng.randn(*inputs_shape) * 100).astype(example_dtype)
+ targets = (rng.randn(*targets_shape) * 100).astype(target_dtype)
+ dataset = Dataset(inputs, targets)
+
+ # Data should be converted into `floatX`.
+ assert_equal(dataset.inputs.dtype, floatX)
+ assert_equal(dataset.symb_inputs.dtype, floatX)
+ assert_equal(dataset.symb_inputs.ndim, inputs.ndim)
+ assert_equal(dataset.input_shape, example_shape)
+ assert_array_equal(dataset.inputs.get_value(), inputs.astype(floatX))
+
+ assert_equal(dataset.targets.dtype, floatX)
+ assert_equal(dataset.symb_targets.dtype, floatX)
+ assert_equal(dataset.symb_targets.ndim, targets.ndim)
+ assert_equal(dataset.target_shape, target_shape)
+ assert_array_equal(dataset.targets.get_value(), targets.astype(floatX))
+
+ # Create dataset from nested Pyton lists.
+ inputs = [[1, 2, 3]] * nb_examples
+ targets = [[1, 2, 3]] * nb_examples
+ dataset = Dataset(inputs, targets)
+ # Data should be converted into `floatX`.
+ assert_equal(dataset.inputs.dtype, floatX)
+ assert_equal(dataset.symb_inputs.dtype, floatX)
+ assert_equal(dataset.symb_inputs.ndim, 2)
+ assert_equal(dataset.input_shape, (3,))
+ assert_array_equal(dataset.inputs.get_value(), np.array(inputs, dtype=floatX))
+
+ assert_equal(dataset.targets.dtype, floatX)
+ assert_equal(dataset.symb_targets.dtype, floatX)
+ assert_equal(dataset.symb_targets.ndim, 2)
+ assert_equal(dataset.target_shape, (3,))
+ assert_array_equal(dataset.targets.get_value(), np.array(targets, dtype=floatX))
+
+
+def test_dataset_with_test_value():
+ rng = np.random.RandomState(1234)
+
+ nb_examples = 10
+
+ theano.config.compute_test_value = 'warn'
+ try:
+ inputs = (rng.randn(nb_examples, 5) * 100).astype(floatX)
+ targets = (rng.randn(nb_examples, 1) > 0.5).astype(floatX)
+ dataset = Dataset(inputs, targets)
+
+ input_sqr_norm = T.sum(dataset.symb_inputs**2)
+ result = input_sqr_norm - dataset.symb_targets
+ assert_array_equal(result.tag.test_value, np.sum(inputs**2)-targets)
+ finally:
+ theano.config.compute_test_value = 'off'
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "theano numpy scipy nose pyparsing pip flake8 six pep8 pyflakes",
"pip_packages": [
"nose",
"nose-cov",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cov-core==1.15.0
coverage==7.8.0
exceptiongroup==1.2.2
flake8 @ file:///croot/flake8_1726157165993/work
iniconfig==2.1.0
Mako @ file:///croot/mako_1665472421453/work
MarkupSafe @ file:///croot/markupsafe_1738584038848/work
mccabe @ file:///opt/conda/conda-bld/mccabe_1644221741721/work
nose @ file:///opt/conda/conda-bld/nose_1642704612149/work
nose-cov==1.6
numpy @ file:///opt/conda/conda-bld/numpy_and_numpy_base_1652801679809/work
packaging==24.2
pep8==1.7.1
pluggy==1.5.0
pycodestyle @ file:///croot/pycodestyle_1726150303809/work
pyflakes @ file:///croot/pyflakes_1708962956225/work
pygpu==0.7.6
pyparsing @ file:///croot/pyparsing_1731445506121/work
pytest==8.3.5
scipy @ file:///opt/conda/conda-bld/scipy_1661390393401/work
six @ file:///tmp/build/80754af9/six_1644875935023/work
-e git+https://github.com/SMART-Lab/smartlearner.git@b0877d3b961deceb273139f064985e39239351a7#egg=smartlearner
Theano==1.0.5
tomli==2.2.1
| name: smartlearner
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- _sysroot_linux-64_curr_repodata_hack=3=haa98f57_10
- binutils_impl_linux-64=2.35.1=h27ae35d_9
- binutils_linux-64=2.35.1=h454624a_30
- blas=1.0=openblas
- ca-certificates=2025.2.25=h06a4308_0
- fftw=3.3.9=h5eee18b_2
- flake8=7.1.1=py39h06a4308_0
- gcc_impl_linux-64=7.5.0=h7105cf2_17
- gcc_linux-64=7.5.0=h8f34230_30
- gxx_impl_linux-64=7.5.0=h0a5bf11_17
- gxx_linux-64=7.5.0=hffc177d_30
- kernel-headers_linux-64=3.10.0=h57e8cba_10
- ld_impl_linux-64=2.35.1=h7274673_9
- libffi=3.4.4=h6a678d5_1
- libgcc-devel_linux-64=7.5.0=hbbeae57_17
- libgcc-ng=11.2.0=h1234567_1
- libgfortran-ng=11.2.0=h00389a5_1
- libgfortran5=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libgpuarray=0.7.6=h7f8727e_1
- libopenblas=0.3.21=h043d6bf_0
- libstdcxx-devel_linux-64=7.5.0=hf0c5c8d_17
- libstdcxx-ng=11.2.0=h1234567_1
- mako=1.2.3=py39h06a4308_0
- markupsafe=3.0.2=py39h5eee18b_0
- mccabe=0.7.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- nose=1.3.7=pyhd3eb1b0_1008
- numpy=1.22.3=py39h7a5d4dd_0
- numpy-base=1.22.3=py39hb8be1f0_0
- openssl=3.0.16=h5eee18b_0
- pep8=1.7.1=py39h06a4308_1
- pip=25.0=py39h06a4308_0
- pycodestyle=2.12.1=py39h06a4308_0
- pyflakes=3.2.0=py39h06a4308_0
- pygpu=0.7.6=py39hce1f21e_1
- pyparsing=3.2.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- scipy=1.7.3=py39hf838250_2
- setuptools=75.8.0=py39h06a4308_0
- six=1.16.0=pyhd3eb1b0_1
- sqlite=3.45.3=h5eee18b_0
- sysroot_linux-64=2.17=h57e8cba_10
- theano=1.0.5=py39h295c915_1
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cov-core==1.15.0
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- nose-cov==1.6
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/smartlearner
| [
"tests/interfaces/test_dataset.py::test_dataset_without_targets",
"tests/interfaces/test_dataset.py::test_dataset_with_targets"
]
| [
"tests/interfaces/test_dataset.py::test_dataset_used_in_theano_function",
"tests/interfaces/test_dataset.py::test_dataset_with_test_value"
]
| []
| []
| BSD 3-Clause "New" or "Revised" License | 212 | [
"smartlearner/interfaces/dataset.py",
"smartlearner/batch_scheduler.py"
]
| [
"smartlearner/interfaces/dataset.py",
"smartlearner/batch_scheduler.py"
]
|
|
Shopify__shopify_python_api-110 | 657517e9b83e0e99404d994047b3bfea3b73d310 | 2015-08-18 03:32:10 | c29e0ecbed9de67dd923f980a3ac053922dab75e | diff --git a/shopify/resources/image.py b/shopify/resources/image.py
index c5f9fd2..a0e82ef 100644
--- a/shopify/resources/image.py
+++ b/shopify/resources/image.py
@@ -8,6 +8,14 @@ import re
class Image(ShopifyResource):
_prefix_source = "/admin/products/$product_id/"
+ @classmethod
+ def _prefix(cls, options={}):
+ product_id = options.get("product_id")
+ if product_id:
+ return "/admin/products/%s" % (product_id)
+ else:
+ return "/admin"
+
def __getattr__(self, name):
if name in ["pico", "icon", "thumb", "small", "compact", "medium", "large", "grande", "original"]:
return re.sub(r"/(.*)\.(\w{2,4})", r"/\1_%s.\2" % (name), self.src)
@@ -24,3 +32,8 @@ class Image(ShopifyResource):
return []
query_params = { 'metafield[owner_id]': self.id, 'metafield[owner_resource]': 'product_image' }
return Metafield.find(from_ = '/admin/metafields.json?%s' % urllib.parse.urlencode(query_params))
+
+ def save(self):
+ if 'product_id' not in self._prefix_options:
+ self._prefix_options['product_id'] = self.product_id
+ return super(ShopifyResource, self).save()
| Support for independently creating/modifying product images.
Please correct me if I'm wrong, but at the moment the only way to create/update product images through the Python API is directly through a POST/PUT call to the product endpoint.
As per the [Shopify API documentation on product images](http://docs.shopify.com/api/product_image), we should be able to make POST and PUT calls directly to `/admin/products/#{id}/images.json` and `PUT /admin/products/#{id}/images/#{id}.json`. However, this doesn't seem to work currently with the API as the prefix for the `Image` class isn't properly being built. Thus, something like this...
```python
image = shopify.Image()
image.src = "http://example.com/example.png"
image.product_id = 123456789
image.save()
```
... tries to POST to `/admin/products//images.json` which returns a 406.
Looking at the source, I think a fix would be to implement the `prefix()` class method, similar to what's been done with the `Variant` class. Just wanted to run this by folks who might be more familiar with this to see if that might be a valid solution. | Shopify/shopify_python_api | diff --git a/test/image_test.py b/test/image_test.py
index 935f53b..1234898 100644
--- a/test/image_test.py
+++ b/test/image_test.py
@@ -13,6 +13,17 @@ class ImageTest(TestCase):
self.assertEqual('http://cdn.shopify.com/s/files/1/0006/9093/3842/products/ipod-nano.png?v=1389388540', image.src)
self.assertEqual(850703190, image.id)
+ def test_create_image_then_add_parent_id(self):
+ self.fake("products/632910392/images", method='POST', body=self.load_fixture('image'), headers={'Content-type': 'application/json'})
+ image = shopify.Image()
+ image.position = 1
+ image.product_id = 632910392
+ image.attachment = "R0lGODlhbgCMAPf/APbr48VySrxTO7IgKt2qmKQdJeK8lsFjROG5p/nz7Zg3MNmnd7Q1MLNVS9GId71hSJMZIuzTu4UtKbeEeakhKMl8U8WYjfr18YQaIbAf=="
+ image.save()
+
+ self.assertEqual('http://cdn.shopify.com/s/files/1/0006/9093/3842/products/ipod-nano.png?v=1389388540', image.src)
+ self.assertEqual(850703190, image.id)
+
def test_get_images(self):
self.fake("products/632910392/images", method='GET', body=self.load_fixture('images'))
image = shopify.Image.find(product_id=632910392)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_media"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 2.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pyactiveresource==2.2.2
pytest==8.3.5
pytest-cov==6.0.0
PyYAML==6.0.2
-e git+https://github.com/Shopify/shopify_python_api.git@657517e9b83e0e99404d994047b3bfea3b73d310#egg=ShopifyAPI
six==1.17.0
tomli==2.2.1
| name: shopify_python_api
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pyactiveresource==2.2.2
- pytest==8.3.5
- pytest-cov==6.0.0
- pyyaml==6.0.2
- six==1.17.0
- tomli==2.2.1
prefix: /opt/conda/envs/shopify_python_api
| [
"test/image_test.py::ImageTest::test_create_image_then_add_parent_id"
]
| []
| [
"test/image_test.py::ImageTest::test_create_image",
"test/image_test.py::ImageTest::test_get_image",
"test/image_test.py::ImageTest::test_get_images",
"test/image_test.py::ImageTest::test_get_metafields_for_image"
]
| []
| MIT License | 213 | [
"shopify/resources/image.py"
]
| [
"shopify/resources/image.py"
]
|
|
marshmallow-code__marshmallow-262 | b8ad05b5342914e857c442d75e8abe9ea8f867fb | 2015-08-19 17:33:46 | b8ad05b5342914e857c442d75e8abe9ea8f867fb | diff --git a/AUTHORS.rst b/AUTHORS.rst
index b9a42126..26fc232f 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -40,3 +40,4 @@ Contributors (chronological)
- Kelvin Hammond `@kelvinhammond <https://github.com/kelvinhammond>`_
- Matt Stobo `@mwstobo <https://github.com/mwstobo>`_
- Max Orhai `@max-orhai <https://github.com/max-orhai>`_
+- Praveen `@praveen-p <https://github.com/praveen-p>`_
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 5090a42a..f1c43987 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,10 +1,7 @@
Changelog
---------
-2.0.0x (unreleased)
-+++++++++++++++++++
-
-2.0.0b5 (2015-08-23)
+2.0.0b5 (unreleased)
++++++++++++++++++++
Features:
@@ -17,11 +14,9 @@ Features:
Bug fixes:
- `make_object` is only called after all validators and postprocessors have finished (:issue:`253`). Thanks :user:`sunsongxp` for reporting.
-- If an invalid type is passed to ``Schema`` and ``strict=False``, store a ``_schema`` error in the errors dict rather than raise an exception (:issue:`261`). Thanks :user:`density` for reporting.
Other changes:
-- ``make_object`` is only called when input data are completely valid (:issue:`243`). Thanks :user:`kissgyorgy` for reporting.
- Change default error messages for ``URL`` and ``Email`` validators so that they don't include user input (:issue:`255`).
- ``Email`` validator permits email addresses with non-ASCII characters, as per RFC 6530 (:issue:`221`). Thanks :user:`lextoumbourou` for reporting and :user:`mwstobo` for sending the patch.
diff --git a/MANIFEST.in b/MANIFEST.in
index f6857893..b7f36481 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,11 +1,1 @@
-include *.rst LICENSE NOTICE
-recursive-include tests *
-recursive-include examples *
-recursive-include docs *
-recursive-exclude docs *.pyc
-recursive-exclude docs *.pyo
-recursive-exclude tests *.pyc
-recursive-exclude tests *.pyo
-recursive-exclude examples *.pyc
-recursive-exclude examples *.pyo
-prune docs/_build
+include *.rst LICENSE
\ No newline at end of file
diff --git a/docs/extending.rst b/docs/extending.rst
index 606c246c..fdae3460 100644
--- a/docs/extending.rst
+++ b/docs/extending.rst
@@ -184,7 +184,6 @@ Schema-level Validation
You can register schema-level validation functions for a :class:`Schema` using the :meth:`marshmallow.validates_schema <marshmallow.decorators.validates_schema>` decorator. Schema-level validation errors will be stored on the ``_schema`` key of the errors dictonary.
.. code-block:: python
- :emphasize-lines: 7
from marshmallow import Schema, fields, validates_schema, ValidationError
@@ -269,15 +268,16 @@ However, if you want to specify how values are accessed from an object, you can
return obj.get(key, default)
-Error Handlers and Accessors as Class Members
----------------------------------------------
+Handler Functions as Class Members
+----------------------------------
-You can register a Schema's error handler and accessor as optional class members. This might be useful for defining an abstract `Schema` class.
+You can register a Schema's error handler, validators, and accessor as optional class members. This might be useful for defining an abstract `Schema` class.
.. code-block:: python
class BaseSchema(Schema):
__error_handler__ = handle_errors # A function
+ __validators__ = [validate_schema] # List of functions
__accessor__ = get_from_dict # A function
@@ -362,15 +362,3 @@ Our application schemas can now inherit from our custom schema class.
result = ser.dump(user)
result.data # {"user": {"name": "Keith", "email": "[email protected]"}}
-Using Context
--------------
-
-The ``context`` attribute of a `Schema` is a general-purpose store for extra information that may be needed for (de)serialization. It may be used in both ``Schema`` and ``Field`` methods.
-
-.. code-block:: python
-
- schema = UserSchema()
- # Make current HTTP request available to
- # custom fields, schema methods, schema validators, etc.
- schema.context['request'] = request
- schema.dump(user)
diff --git a/marshmallow/__init__.py b/marshmallow/__init__.py
index fc41de39..b8581d21 100644
--- a/marshmallow/__init__.py
+++ b/marshmallow/__init__.py
@@ -13,7 +13,7 @@ from marshmallow.decorators import (
from marshmallow.utils import pprint, missing
from marshmallow.exceptions import MarshallingError, UnmarshallingError, ValidationError
-__version__ = '2.0.0.dev'
+__version__ = '2.0.0b5-dev'
__author__ = 'Steven Loria'
__license__ = 'MIT'
diff --git a/marshmallow/fields.py b/marshmallow/fields.py
index 0990f115..b45afb9b 100755
--- a/marshmallow/fields.py
+++ b/marshmallow/fields.py
@@ -810,6 +810,11 @@ class DateTime(Field):
return func(value)
except (TypeError, AttributeError, ValueError):
raise err
+ elif self.dateformat:
+ try:
+ return dt.datetime.strptime(value, self.dateformat)
+ except (TypeError, AttributeError, ValueError):
+ raise err
elif utils.dateutil_available:
try:
return utils.from_datestring(value)
diff --git a/marshmallow/marshalling.py b/marshmallow/marshalling.py
index 8bbb42dd..630c51f5 100644
--- a/marshmallow/marshalling.py
+++ b/marshmallow/marshalling.py
@@ -248,20 +248,13 @@ class Unmarshaller(ErrorStore):
key = fields_dict[attr_name].attribute or attr_name
try:
raw_value = data.get(attr_name, missing)
- except AttributeError: # Input data is not a dict
+ except AttributeError:
msg = 'Data must be a dict, got a {0}'.format(data.__class__.__name__)
- errors = self.get_errors(index=index)
- if strict:
- raise ValidationError(
- msg,
- field_names=[SCHEMA],
- fields=[]
- )
- else:
- errors = self.get_errors()
- errors.setdefault(SCHEMA, []).append(msg)
- # Input data type is incorrect, so we can bail out early
- break
+ raise ValidationError(
+ msg,
+ field_names=[attr_name],
+ fields=[field_obj]
+ )
field_name = attr_name
if raw_value is missing and field_obj.load_from:
field_name = field_obj.load_from
diff --git a/marshmallow/schema.py b/marshmallow/schema.py
index 950fce69..8b1e8f92 100644
--- a/marshmallow/schema.py
+++ b/marshmallow/schema.py
@@ -257,6 +257,8 @@ class BaseSchema(base.SchemaABC):
instead of failing silently and storing the errors.
:param bool many: Should be set to `True` if ``obj`` is a collection
so that the object will be serialized to a list.
+ :param bool skip_missing: If `True`, don't include key:value pairs in
+ serialized results if ``value`` is `None`.
:param dict context: Optional context passed to :class:`fields.Method` and
:class:`fields.Function` fields.
:param tuple load_only: A list or tuple of fields to skip during serialization
@@ -596,15 +598,9 @@ class BaseSchema(base.SchemaABC):
"""Override-able method that defines how to create the final deserialization
output. Defaults to noop (i.e. just return ``data`` as is).
- .. note::
-
- This method will only be invoked if when the input data are completely valid.
-
:param dict data: The deserialized data.
.. versionadded:: 1.0.0
- .. versionchanged:: 2.0.0
- Only invoked when data are valid.
"""
return data
@@ -656,7 +652,7 @@ class BaseSchema(base.SchemaABC):
result = self._invoke_load_processors(POST_LOAD, result, many)
- if not errors and postprocess:
+ if postprocess:
if many:
result = [self.make_object(each) for each in result]
else:
diff --git a/marshmallow/validate.py b/marshmallow/validate.py
index 4f8f7a26..680c9019 100644
--- a/marshmallow/validate.py
+++ b/marshmallow/validate.py
@@ -97,7 +97,7 @@ class Email(Validator):
"""
USER_REGEX = re.compile(
- r"(^[-!#$%&'*+/=?^`{}|~\w]+(\.[-!#$%&'*+/=?^`{}|~\w]+)*$" # dot-atom
+ r"(^[-!#$%&'*+/=?^_`{}|~0-9\w]+(\.[-!#$%&'*+/=?^_`{}|~0-9\w]+)*$" # dot-atom
# quoted-string
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]'
r'|\\[\001-\011\013\014\016-\177])*"$)', re.IGNORECASE | re.UNICODE)
diff --git a/tasks.py b/tasks.py
index b1b20780..04eab04a 100644
--- a/tasks.py
+++ b/tasks.py
@@ -12,7 +12,7 @@ build_dir = os.path.join(docs_dir, '_build')
def test():
"""Run the tests."""
flake()
- run('python setup.py test', echo=True)
+ run('python setup.py test', echo=True, pty=True)
@task
def flake():
@@ -48,7 +48,7 @@ def docs(clean=False, browse=False, watch=False):
"""Build the docs."""
if clean:
clean_docs()
- run("sphinx-build %s %s" % (docs_dir, build_dir), echo=True)
+ run("sphinx-build %s %s" % (docs_dir, build_dir), pty=True)
if browse:
browse_docs()
if watch:
@@ -69,7 +69,7 @@ def watch_docs():
@task
def readme(browse=False):
- run("rst2html.py README.rst > README.html")
+ run("rst2html.py README.rst > README.html", pty=True)
if browse:
webbrowser.open_new_tab('README.html')
| DateTime ignores date formatting string
The documentation for the `marshmallow.fields.DateTime` says:
Parameters:
* format (str) – Either "rfc" (for RFC822), "iso" (for ISO8601), or a date format string. If None, defaults to “iso”.
But the part `or a date format string` is not true. I would've expected this to accept date formatting strings as defined in the [`time` module](https://docs.python.org/2/library/time.html#time.strftime), however, if the `format` parameter is a date formatting string, it is ignored and the parsing is done using `dateutil` (I noticed this [here in the source code](https://github.com/marshmallow-code/marshmallow/blob/dev/marshmallow/fields.py#L771).
It looks like the documentation is not consistent with the code here. I think it would be very valuable if you could indeed provide a date formatting string and that marshmallow will use it with (`time.strptime`](https://docs.python.org/2/library/time.html#time.strptime) to parse it instead of letting `dateutil` do a guess about the format.
I am willing to edit this in the source code, but I would like to discuss the desired behaviour first.
Cheers | marshmallow-code/marshmallow | diff --git a/tests/test_deserialization.py b/tests/test_deserialization.py
index 356f759d..d7c66630 100644
--- a/tests/test_deserialization.py
+++ b/tests/test_deserialization.py
@@ -288,6 +288,26 @@ class TestFieldDeserialization:
msg = 'Could not deserialize {0!r} to a datetime object.'.format(in_value)
assert msg in str(excinfo)
+ def test_custom_date_format_datetime_field_deserialization(self):
+
+ dtime = dt.datetime.now()
+ datestring = dtime.strftime('%H:%M:%S %Y-%m-%d')
+
+ field = fields.DateTime(format='%d-%m-%Y %H:%M:%S')
+ #deserialization should fail when datestring is not of same format
+ with pytest.raises(ValidationError) as excinfo:
+ field.deserialize(datestring)
+ msg = 'Could not deserialize {0!r} to a datetime object.'.format(datestring)
+ assert msg in str(excinfo)
+ field = fields.DateTime(format='%H:%M:%S %Y-%m-%d')
+ assert_datetime_equal(field.deserialize(datestring), dtime)
+
+ field = fields.DateTime()
+ if utils.dateutil_available:
+ assert_datetime_equal(field.deserialize(datestring), dtime)
+ else:
+ assert msg in str(excinfo)
+
@pytest.mark.parametrize('fmt', ['rfc', 'rfc822'])
def test_rfc_datetime_field_deserialization(self, fmt):
dtime = dt.datetime.now()
@@ -741,16 +761,6 @@ class TestSchemaDeserialization:
assert result.name == 'Monty'
assert_almost_equal(result.age, 42.3)
- # https://github.com/marshmallow-code/marshmallow/issues/243
- def test_make_object_not_called_if_data_are_invalid(self):
- class MySchema(Schema):
- email = fields.Email()
-
- def make_object(self, data):
- assert False, 'make_object should not have been called'
- result, errors = MySchema().load({'email': 'invalid'})
- assert 'email' in errors
-
# Regression test for https://github.com/marshmallow-code/marshmallow/issues/253
def test_validators_run_before_make_object(self):
class UserSchema(Schema):
@@ -1168,24 +1178,3 @@ def test_required_message_can_be_changed(message):
expected = [message] if isinstance(message, basestring) else message
assert expected == errs['age']
assert data == {}
-
-# Regression test for https://github.com/marshmallow-code/marshmallow/issues/261
-def test_deserialize_doesnt_raise_exception_if_strict_is_false_and_input_type_is_incorrect():
- class MySchema(Schema):
- foo = fields.Field()
- bar = fields.Field()
- data, errs = MySchema().load([])
- assert '_schema' in errs
- assert errs['_schema'] == ['Data must be a dict, got a list']
-
-
-def test_deserialize_raises_exception_if_strict_is_true_and_input_type_is_incorrect():
- class MySchema(Schema):
- foo = fields.Field()
- bar = fields.Field()
- with pytest.raises(ValidationError) as excinfo:
- MySchema(strict=True).load([])
- assert 'Data must be a dict, got a list' in str(excinfo)
- exc = excinfo.value
- assert exc.field_names == ['_schema']
- assert exc.fields == []
diff --git a/tests/test_schema.py b/tests/test_schema.py
index 0fda3b71..bfbf5dad 100644
--- a/tests/test_schema.py
+++ b/tests/test_schema.py
@@ -1706,7 +1706,7 @@ class TestNestedSchema:
schema = OuterSchema()
_, errors = schema.load({'inner': 1})
- assert errors['inner']['_schema'] == ['Data must be a dict, got a int']
+ assert errors['inner'] == ['Data must be a dict, got a int']
def test_missing_required_nested_field(self):
class Inner(Schema):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 10
} | 2.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"dev-requirements.txt",
"docs/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster @ git+https://github.com/sloria/alabaster.git@667b1b676c6bf7226db057f098ec826d84d3ae40
babel==2.17.0
cachetools==5.5.2
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
distlib==0.3.9
docutils==0.20.1
exceptiongroup==1.2.2
filelock==3.18.0
flake8==2.4.0
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
invoke==2.2.0
Jinja2==3.1.6
MarkupSafe==3.0.2
-e git+https://github.com/marshmallow-code/marshmallow.git@b8ad05b5342914e857c442d75e8abe9ea8f867fb#egg=marshmallow
mccabe==0.3.1
packaging==24.2
pep8==1.5.7
platformdirs==4.3.7
pluggy==1.5.0
pyflakes==0.8.1
Pygments==2.19.1
pyproject-api==1.9.0
pytest==8.3.5
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.32.3
six==1.17.0
snowballstemmer==2.2.0
Sphinx==7.2.6
sphinx-issues==0.2.0
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: marshmallow
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.11+sloria0
- babel==2.17.0
- cachetools==5.5.2
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- distlib==0.3.9
- docutils==0.20.1
- exceptiongroup==1.2.2
- filelock==3.18.0
- flake8==2.4.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- invoke==2.2.0
- jinja2==3.1.6
- markupsafe==3.0.2
- mccabe==0.3.1
- packaging==24.2
- pep8==1.5.7
- platformdirs==4.3.7
- pluggy==1.5.0
- pyflakes==0.8.1
- pygments==2.19.1
- pyproject-api==1.9.0
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.32.3
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==7.2.6
- sphinx-issues==0.2.0
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/marshmallow
| [
"tests/test_deserialization.py::TestFieldDeserialization::test_custom_date_format_datetime_field_deserialization",
"tests/test_schema.py::TestNestedSchema::test_invalid_type_passed_to_nested_field"
]
| []
| [
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[String]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Integer]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Boolean]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Float]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Number]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[DateTime]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[LocalDateTime]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Time]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Date]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[TimeDelta]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Fixed]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Url]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Email]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[FormattedString]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[UUID]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Select]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_allow_none_deserialize_to_none[Decimal]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[String]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Integer]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Boolean]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Float]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Number]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[DateTime]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[LocalDateTime]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Time]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Date]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[TimeDelta]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Fixed]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Url]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Email]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[FormattedString]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[UUID]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Select]",
"tests/test_deserialization.py::TestDeserializingNone::test_fields_dont_allow_none_by_default[Decimal]",
"tests/test_deserialization.py::TestDeserializingNone::test_list_field_deserialize_none_to_empty_list",
"tests/test_deserialization.py::TestFieldDeserialization::test_float_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_float_field_deserialization[bad]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_float_field_deserialization[]",
"tests/test_deserialization.py::TestFieldDeserialization::test_integer_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_with_places",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_with_places_and_rounding",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_deserialization_string",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_special_values",
"tests/test_deserialization.py::TestFieldDeserialization::test_decimal_field_special_values_not_permitted",
"tests/test_deserialization.py::TestFieldDeserialization::test_string_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_custom_truthy_values",
"tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_custom_truthy_values_invalid[notvalid]",
"tests/test_deserialization.py::TestFieldDeserialization::test_boolean_field_deserialization_with_custom_truthy_values_invalid[123]",
"tests/test_deserialization.py::TestFieldDeserialization::test_arbitrary_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[not-a-datetime]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[42]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_datetime_deserialization[in_value3]",
"tests/test_deserialization.py::TestFieldDeserialization::test_rfc_datetime_field_deserialization[rfc]",
"tests/test_deserialization.py::TestFieldDeserialization::test_rfc_datetime_field_deserialization[rfc822]",
"tests/test_deserialization.py::TestFieldDeserialization::test_iso_datetime_field_deserialization[iso]",
"tests/test_deserialization.py::TestFieldDeserialization::test_iso_datetime_field_deserialization[iso8601]",
"tests/test_deserialization.py::TestFieldDeserialization::test_localdatetime_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_time_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[badvalue]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[in_data2]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_time_field_deserialization[42]",
"tests/test_deserialization.py::TestFieldDeserialization::test_fixed_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_fixed_field_deserialize_invalid_value",
"tests/test_deserialization.py::TestFieldDeserialization::test_timedelta_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[badvalue]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[in_value2]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_timedelta_field_deserialization[9999999999]",
"tests/test_deserialization.py::TestFieldDeserialization::test_date_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_date_field_deserialization[]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_date_field_deserialization[123]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_date_field_deserialization[in_value2]",
"tests/test_deserialization.py::TestFieldDeserialization::test_price_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_url_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_relative_url_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_email_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_function_field_deserialization_is_noop_by_default",
"tests/test_deserialization.py::TestFieldDeserialization::test_function_field_deserialization_with_callable",
"tests/test_deserialization.py::TestFieldDeserialization::test_uuid_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_uuid_deserialization[malformed]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_uuid_deserialization[123]",
"tests/test_deserialization.py::TestFieldDeserialization::test_invalid_uuid_deserialization[in_value2]",
"tests/test_deserialization.py::TestFieldDeserialization::test_deserialization_function_must_be_callable",
"tests/test_deserialization.py::TestFieldDeserialization::test_method_field_deserialization_is_noop_by_default",
"tests/test_deserialization.py::TestFieldDeserialization::test_deserialization_method",
"tests/test_deserialization.py::TestFieldDeserialization::test_deserialization_method_must_be_a_method",
"tests/test_deserialization.py::TestFieldDeserialization::test_enum_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_query_select_field_func_key_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_query_select_field_string_key_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_query_select_list_field_func_key_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_query_select_list_field_string_key_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_fixed_list_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_datetime_list_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_single_value",
"tests/test_deserialization.py::TestFieldDeserialization::test_list_field_deserialize_invalid_value",
"tests/test_deserialization.py::TestFieldDeserialization::test_constant_field_deserialization",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validator_function",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validator_class_that_returns_bool",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validator_that_raises_error_with_list",
"tests/test_deserialization.py::TestFieldDeserialization::test_validator_must_return_false_to_raise_error",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_validator_with_nonascii_input",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_user_validators",
"tests/test_deserialization.py::TestFieldDeserialization::test_field_deserialization_with_custom_error_message",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_to_dict",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_values",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_many",
"tests/test_deserialization.py::TestSchemaDeserialization::test_make_object",
"tests/test_deserialization.py::TestSchemaDeserialization::test_validators_run_before_make_object",
"tests/test_deserialization.py::TestSchemaDeserialization::test_make_object_many",
"tests/test_deserialization.py::TestSchemaDeserialization::test_exclude",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_single_deserialization_to_dict",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_list_deserialization_to_dict",
"tests/test_deserialization.py::TestSchemaDeserialization::test_none_deserialization",
"tests/test_deserialization.py::TestSchemaDeserialization::test_nested_none_deserialization",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_attribute_param",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_attribute_param_error_returns_field_name_not_attribute_name",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_attribute_param_error_returns_load_from_not_attribute_name",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_load_from_param",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_dump_only_param",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_param_value",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_param_callable",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialize_with_missing_param_none",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialization_returns_errors",
"tests/test_deserialization.py::TestSchemaDeserialization::test_deserialization_returns_errors_with_multiple_validators",
"tests/test_deserialization.py::TestSchemaDeserialization::test_strict_mode_deserialization",
"tests/test_deserialization.py::TestSchemaDeserialization::test_strict_mode_many",
"tests/test_deserialization.py::TestSchemaDeserialization::test_strict_mode_deserialization_with_multiple_validators",
"tests/test_deserialization.py::TestSchemaDeserialization::test_uncaught_validation_errors_are_stored",
"tests/test_deserialization.py::TestSchemaDeserialization::test_multiple_errors_can_be_stored_for_a_field",
"tests/test_deserialization.py::TestSchemaDeserialization::test_multiple_errors_can_be_stored_for_an_email_field",
"tests/test_deserialization.py::TestSchemaDeserialization::test_multiple_errors_can_be_stored_for_a_url_field",
"tests/test_deserialization.py::TestSchemaDeserialization::test_required_value_only_passed_to_validators_if_provided",
"tests/test_deserialization.py::TestValidation::test_integer_with_validator",
"tests/test_deserialization.py::TestValidation::test_integer_with_validators[field0]",
"tests/test_deserialization.py::TestValidation::test_integer_with_validators[field1]",
"tests/test_deserialization.py::TestValidation::test_integer_with_validators[field2]",
"tests/test_deserialization.py::TestValidation::test_float_with_validators[field0]",
"tests/test_deserialization.py::TestValidation::test_float_with_validators[field1]",
"tests/test_deserialization.py::TestValidation::test_float_with_validators[field2]",
"tests/test_deserialization.py::TestValidation::test_string_validator",
"tests/test_deserialization.py::TestValidation::test_function_validator",
"tests/test_deserialization.py::TestValidation::test_function_validators[field0]",
"tests/test_deserialization.py::TestValidation::test_function_validators[field1]",
"tests/test_deserialization.py::TestValidation::test_function_validators[field2]",
"tests/test_deserialization.py::TestValidation::test_method_validator",
"tests/test_deserialization.py::test_required_field_failure[String]",
"tests/test_deserialization.py::test_required_field_failure[Integer]",
"tests/test_deserialization.py::test_required_field_failure[Boolean]",
"tests/test_deserialization.py::test_required_field_failure[Float]",
"tests/test_deserialization.py::test_required_field_failure[Number]",
"tests/test_deserialization.py::test_required_field_failure[DateTime]",
"tests/test_deserialization.py::test_required_field_failure[LocalDateTime]",
"tests/test_deserialization.py::test_required_field_failure[Time]",
"tests/test_deserialization.py::test_required_field_failure[Date]",
"tests/test_deserialization.py::test_required_field_failure[TimeDelta]",
"tests/test_deserialization.py::test_required_field_failure[Fixed]",
"tests/test_deserialization.py::test_required_field_failure[Url]",
"tests/test_deserialization.py::test_required_field_failure[Email]",
"tests/test_deserialization.py::test_required_field_failure[UUID]",
"tests/test_deserialization.py::test_required_field_failure[Decimal]",
"tests/test_deserialization.py::test_required_enum",
"tests/test_deserialization.py::test_required_message_can_be_changed[My",
"tests/test_deserialization.py::test_required_message_can_be_changed[message1]",
"tests/test_deserialization.py::test_required_message_can_be_changed[message2]",
"tests/test_schema.py::test_serializing_basic_object[UserSchema]",
"tests/test_schema.py::test_serializing_basic_object[UserMetaSchema]",
"tests/test_schema.py::test_serializer_dump",
"tests/test_schema.py::test_dump_returns_dict_of_errors",
"tests/test_schema.py::test_dump_with_strict_mode_raises_error[UserSchema]",
"tests/test_schema.py::test_dump_with_strict_mode_raises_error[UserMetaSchema]",
"tests/test_schema.py::test_dump_resets_errors",
"tests/test_schema.py::test_load_resets_errors",
"tests/test_schema.py::test_dump_resets_error_fields",
"tests/test_schema.py::test_load_resets_error_fields",
"tests/test_schema.py::test_errored_fields_do_not_appear_in_output",
"tests/test_schema.py::test_load_many_stores_error_indices",
"tests/test_schema.py::test_dump_many",
"tests/test_schema.py::test_multiple_errors_can_be_stored_for_a_given_index",
"tests/test_schema.py::test_dump_many_stores_error_indices",
"tests/test_schema.py::test_dump_many_doesnt_stores_error_indices_when_index_errors_is_false",
"tests/test_schema.py::test_dump_returns_a_marshalresult",
"tests/test_schema.py::test_dumps_returns_a_marshalresult",
"tests/test_schema.py::test_dumping_single_object_with_collection_schema",
"tests/test_schema.py::test_loading_single_object_with_collection_schema",
"tests/test_schema.py::test_dumps_many",
"tests/test_schema.py::test_load_returns_an_unmarshalresult",
"tests/test_schema.py::test_load_many",
"tests/test_schema.py::test_loads_returns_an_unmarshalresult",
"tests/test_schema.py::test_loads_many",
"tests/test_schema.py::test_loads_deserializes_from_json",
"tests/test_schema.py::test_serializing_none",
"tests/test_schema.py::test_default_many_symmetry",
"tests/test_schema.py::TestValidate::test_validate_returns_errors_dict",
"tests/test_schema.py::TestValidate::test_validate_many",
"tests/test_schema.py::TestValidate::test_validate_many_doesnt_store_index_if_index_errors_option_is_false",
"tests/test_schema.py::TestValidate::test_validate_strict",
"tests/test_schema.py::TestValidate::test_validate_required",
"tests/test_schema.py::test_fields_are_not_copies[UserSchema]",
"tests/test_schema.py::test_fields_are_not_copies[UserMetaSchema]",
"tests/test_schema.py::test_dumps_returns_json",
"tests/test_schema.py::test_naive_datetime_field",
"tests/test_schema.py::test_datetime_formatted_field",
"tests/test_schema.py::test_datetime_iso_field",
"tests/test_schema.py::test_tz_datetime_field",
"tests/test_schema.py::test_local_datetime_field",
"tests/test_schema.py::test_class_variable",
"tests/test_schema.py::test_serialize_many[UserSchema]",
"tests/test_schema.py::test_serialize_many[UserMetaSchema]",
"tests/test_schema.py::test_inheriting_schema",
"tests/test_schema.py::test_custom_field",
"tests/test_schema.py::test_url_field",
"tests/test_schema.py::test_relative_url_field",
"tests/test_schema.py::test_stores_invalid_url_error[UserSchema]",
"tests/test_schema.py::test_stores_invalid_url_error[UserMetaSchema]",
"tests/test_schema.py::test_email_field[UserSchema]",
"tests/test_schema.py::test_email_field[UserMetaSchema]",
"tests/test_schema.py::test_stored_invalid_email",
"tests/test_schema.py::test_integer_field",
"tests/test_schema.py::test_fixed_field",
"tests/test_schema.py::test_as_string",
"tests/test_schema.py::test_decimal_field",
"tests/test_schema.py::test_price_field",
"tests/test_schema.py::test_extra",
"tests/test_schema.py::test_extra_many",
"tests/test_schema.py::test_method_field[UserSchema]",
"tests/test_schema.py::test_method_field[UserMetaSchema]",
"tests/test_schema.py::test_function_field",
"tests/test_schema.py::test_prefix[UserSchema]",
"tests/test_schema.py::test_prefix[UserMetaSchema]",
"tests/test_schema.py::test_fields_must_be_declared_as_instances",
"tests/test_schema.py::test_serializing_generator[UserSchema]",
"tests/test_schema.py::test_serializing_generator[UserMetaSchema]",
"tests/test_schema.py::test_serializing_empty_list_returns_empty_list",
"tests/test_schema.py::test_serializing_dict",
"tests/test_schema.py::test_serializing_dict_with_meta_fields",
"tests/test_schema.py::test_exclude_in_init[UserSchema]",
"tests/test_schema.py::test_exclude_in_init[UserMetaSchema]",
"tests/test_schema.py::test_only_in_init[UserSchema]",
"tests/test_schema.py::test_only_in_init[UserMetaSchema]",
"tests/test_schema.py::test_invalid_only_param",
"tests/test_schema.py::test_can_serialize_uuid",
"tests/test_schema.py::test_can_serialize_time",
"tests/test_schema.py::test_invalid_time",
"tests/test_schema.py::test_invalid_date",
"tests/test_schema.py::test_invalid_email",
"tests/test_schema.py::test_invalid_url",
"tests/test_schema.py::test_invalid_selection",
"tests/test_schema.py::test_custom_json",
"tests/test_schema.py::test_custom_error_message",
"tests/test_schema.py::test_load_errors_with_many",
"tests/test_schema.py::test_error_raised_if_fields_option_is_not_list",
"tests/test_schema.py::test_error_raised_if_additional_option_is_not_list",
"tests/test_schema.py::test_only_and_exclude",
"tests/test_schema.py::test_only_with_invalid_attribute",
"tests/test_schema.py::test_nested_only_and_exclude",
"tests/test_schema.py::test_nested_with_sets",
"tests/test_schema.py::test_meta_serializer_fields",
"tests/test_schema.py::test_meta_fields_mapping",
"tests/test_schema.py::test_meta_field_not_on_obj_raises_attribute_error",
"tests/test_schema.py::test_exclude_fields",
"tests/test_schema.py::test_fields_option_must_be_list_or_tuple",
"tests/test_schema.py::test_exclude_option_must_be_list_or_tuple",
"tests/test_schema.py::test_dateformat_option",
"tests/test_schema.py::test_default_dateformat",
"tests/test_schema.py::test_inherit_meta",
"tests/test_schema.py::test_inherit_meta_override",
"tests/test_schema.py::test_additional",
"tests/test_schema.py::test_cant_set_both_additional_and_fields",
"tests/test_schema.py::test_serializing_none_meta",
"tests/test_schema.py::TestErrorHandler::test_dump_with_custom_error_handler",
"tests/test_schema.py::TestErrorHandler::test_load_with_custom_error_handler",
"tests/test_schema.py::TestErrorHandler::test_validate_with_custom_error_handler",
"tests/test_schema.py::TestErrorHandler::test_multiple_serializers_with_same_error_handler",
"tests/test_schema.py::TestErrorHandler::test_setting_error_handler_class_attribute",
"tests/test_schema.py::TestSchemaValidator::test_validator_decorator_is_deprecated",
"tests/test_schema.py::TestSchemaValidator::test_validator_defined_on_class",
"tests/test_schema.py::TestSchemaValidator::test_validator_that_raises_error_with_dict",
"tests/test_schema.py::TestSchemaValidator::test_validator_that_raises_error_with_list",
"tests/test_schema.py::TestSchemaValidator::test_mixed_schema_validators",
"tests/test_schema.py::TestSchemaValidator::test_registered_validators_are_not_shared_with_ancestors",
"tests/test_schema.py::TestSchemaValidator::test_registered_validators_are_not_shared_with_children",
"tests/test_schema.py::TestSchemaValidator::test_inheriting_then_registering_validator",
"tests/test_schema.py::TestSchemaValidator::test_multiple_schema_errors_can_be_stored",
"tests/test_schema.py::TestSchemaValidator::test_schema_validation_error_with_stict_stores_correct_field_name",
"tests/test_schema.py::TestSchemaValidator::test_schema_validation_error_with_strict_when_field_is_specified",
"tests/test_schema.py::TestSchemaValidator::test_schema_validation_error_stored_on_multiple_fields",
"tests/test_schema.py::TestSchemaValidator::test_validator_with_strict",
"tests/test_schema.py::TestSchemaValidator::test_validator_defined_by_decorator",
"tests/test_schema.py::TestSchemaValidator::test_validators_are_inherited",
"tests/test_schema.py::TestSchemaValidator::test_uncaught_validation_errors_are_stored",
"tests/test_schema.py::TestSchemaValidator::test_validation_error_with_error_parameter",
"tests/test_schema.py::TestSchemaValidator::test_store_schema_validation_errors_on_specified_field",
"tests/test_schema.py::TestSchemaValidator::test_errors_are_cleared_on_load",
"tests/test_schema.py::TestSchemaValidator::test_errors_are_cleared_after_loading_collection",
"tests/test_schema.py::TestSchemaValidator::test_raises_error_with_list",
"tests/test_schema.py::TestSchemaValidator::test_raises_error_with_dict",
"tests/test_schema.py::TestSchemaValidator::test_nested_schema_validators",
"tests/test_schema.py::TestPreprocessors::test_preprocessor_decorator_is_deprecated",
"tests/test_schema.py::TestPreprocessors::test_preprocessors_defined_on_class",
"tests/test_schema.py::TestPreprocessors::test_registered_preprocessors_are_not_shared_with_ancestors",
"tests/test_schema.py::TestPreprocessors::test_registered_preprocessors_are_not_shared_with_children",
"tests/test_schema.py::TestPreprocessors::test_preprocessors_defined_by_decorator",
"tests/test_schema.py::TestDataHandler::test_data_handler_is_deprecated",
"tests/test_schema.py::TestDataHandler::test_schema_with_custom_data_handler",
"tests/test_schema.py::TestDataHandler::test_registered_data_handlers_are_not_shared_with_ancestors",
"tests/test_schema.py::TestDataHandler::test_registered_data_handlers_are_not_shared_with_children",
"tests/test_schema.py::TestDataHandler::test_serializer_with_multiple_data_handlers",
"tests/test_schema.py::TestDataHandler::test_setting_data_handlers_class_attribute",
"tests/test_schema.py::TestDataHandler::test_root_data_handler",
"tests/test_schema.py::test_schema_repr",
"tests/test_schema.py::TestNestedSchema::test_flat_nested",
"tests/test_schema.py::TestNestedSchema::test_nested_many_with_missing_attribute",
"tests/test_schema.py::TestNestedSchema::test_nested_with_attribute_none",
"tests/test_schema.py::TestNestedSchema::test_flat_nested2",
"tests/test_schema.py::TestNestedSchema::test_nested_field_does_not_validate_required",
"tests/test_schema.py::TestNestedSchema::test_nested_none",
"tests/test_schema.py::TestNestedSchema::test_nested",
"tests/test_schema.py::TestNestedSchema::test_nested_many_fields",
"tests/test_schema.py::TestNestedSchema::test_nested_meta_many",
"tests/test_schema.py::TestNestedSchema::test_nested_only",
"tests/test_schema.py::TestNestedSchema::test_exclude",
"tests/test_schema.py::TestNestedSchema::test_list_field",
"tests/test_schema.py::TestNestedSchema::test_list_field_parent",
"tests/test_schema.py::TestNestedSchema::test_nested_load_many",
"tests/test_schema.py::TestNestedSchema::test_nested_errors",
"tests/test_schema.py::TestNestedSchema::test_nested_strict",
"tests/test_schema.py::TestNestedSchema::test_nested_method_field",
"tests/test_schema.py::TestNestedSchema::test_nested_function_field",
"tests/test_schema.py::TestNestedSchema::test_nested_prefixed_field",
"tests/test_schema.py::TestNestedSchema::test_nested_prefixed_many_field",
"tests/test_schema.py::TestNestedSchema::test_invalid_float_field",
"tests/test_schema.py::TestNestedSchema::test_serializer_meta_with_nested_fields",
"tests/test_schema.py::TestNestedSchema::test_serializer_with_nested_meta_fields",
"tests/test_schema.py::TestNestedSchema::test_nested_fields_must_be_passed_a_serializer",
"tests/test_schema.py::TestNestedSchema::test_missing_required_nested_field",
"tests/test_schema.py::TestSelfReference::test_nesting_schema_within_itself",
"tests/test_schema.py::TestSelfReference::test_nesting_schema_by_passing_class_name",
"tests/test_schema.py::TestSelfReference::test_nesting_within_itself_meta",
"tests/test_schema.py::TestSelfReference::test_nested_self_with_only_param",
"tests/test_schema.py::TestSelfReference::test_multiple_nested_self_fields",
"tests/test_schema.py::TestSelfReference::test_nested_many",
"tests/test_schema.py::test_serialization_with_required_field",
"tests/test_schema.py::test_deserialization_with_required_field",
"tests/test_schema.py::test_deserialization_with_required_field_and_custom_validator",
"tests/test_schema.py::TestContext::test_context_method",
"tests/test_schema.py::TestContext::test_context_method_function",
"tests/test_schema.py::TestContext::test_function_field_raises_error_when_context_not_available",
"tests/test_schema.py::TestContext::test_fields_context",
"tests/test_schema.py::TestContext::test_nested_fields_inherit_context",
"tests/test_schema.py::test_serializer_can_specify_nested_object_as_attribute",
"tests/test_schema.py::TestFieldInheritance::test_inherit_fields_from_schema_subclass",
"tests/test_schema.py::TestFieldInheritance::test_inherit_fields_from_non_schema_subclass",
"tests/test_schema.py::TestFieldInheritance::test_inheritance_follows_mro",
"tests/test_schema.py::TestAccessor::test_accessor_is_used",
"tests/test_schema.py::TestAccessor::test_accessor_with_many",
"tests/test_schema.py::TestAccessor::test_accessor_decorator",
"tests/test_schema.py::TestRequiredFields::test_required_string_field_missing",
"tests/test_schema.py::TestRequiredFields::test_required_string_field_failure",
"tests/test_schema.py::TestRequiredFields::test_allow_none_param",
"tests/test_schema.py::TestRequiredFields::test_allow_none_custom_message",
"tests/test_schema.py::TestDefaults::test_missing_inputs_are_excluded_from_dump_output",
"tests/test_schema.py::TestDefaults::test_none_is_serialized_to_none",
"tests/test_schema.py::TestDefaults::test_default_and_value_missing",
"tests/test_schema.py::TestDefaults::test_loading_none",
"tests/test_schema.py::TestDefaults::test_missing_inputs_are_excluded_from_load_output",
"tests/test_schema.py::TestLoadOnly::test_load_only",
"tests/test_schema.py::TestLoadOnly::test_dump_only"
]
| []
| MIT License | 214 | [
"marshmallow/marshalling.py",
"marshmallow/schema.py",
"MANIFEST.in",
"tasks.py",
"AUTHORS.rst",
"marshmallow/validate.py",
"CHANGELOG.rst",
"docs/extending.rst",
"marshmallow/fields.py",
"marshmallow/__init__.py"
]
| [
"marshmallow/marshalling.py",
"marshmallow/schema.py",
"MANIFEST.in",
"tasks.py",
"AUTHORS.rst",
"marshmallow/validate.py",
"CHANGELOG.rst",
"docs/extending.rst",
"marshmallow/fields.py",
"marshmallow/__init__.py"
]
|
|
enthought__okonomiyaki-91 | 231ef105738438f69c19c1805d9df1e410b9630a | 2015-08-24 15:34:23 | 5faaff42f15508429bcbd588ccadb4a6e5bbaf97 | diff --git a/CHANGELOG b/CHANGELOG
index 5e8f7ce..b21e68e 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -5,6 +5,8 @@ Improvements:
* EnpkgVersion.from_string now handles versions of the form `1.3.0`, with the
build number being implicitly 0.
+ * EggMetadata.from_egg now handles eggs built for RedHat Enterprise
+ Linux 3 (#88).
Internals:
diff --git a/okonomiyaki/platforms/epd_platform.py b/okonomiyaki/platforms/epd_platform.py
index ec327a9..433605a 100644
--- a/okonomiyaki/platforms/epd_platform.py
+++ b/okonomiyaki/platforms/epd_platform.py
@@ -136,7 +136,9 @@ class EPDPlatform(HasTraits):
elif platform == "win32":
epd_name = "win"
elif platform.startswith("linux"):
- if osdist in (None, "RedHat_5"):
+ if osdist == "RedHat_3":
+ epd_name = "rh3"
+ elif osdist in (None, "RedHat_5"):
epd_name = "rh5"
elif osdist == "RedHat_6":
epd_name = "rh6"
| Okonomiyaki fails to parse existing RedHat3-built eggs
```python
metadata = file_formats.EggMetadata.from_egg(path)
File "okonomiyaki/file_formats/_egg_info.py", line 627, in from_egg
spec_depend = LegacySpecDepend.from_egg(path_or_file)
File "okonomiyaki/file_formats/_egg_info.py", line 387, in from_egg
return cls.from_string(spec_depend_string)
File "okonomiyaki/file_formats/_egg_info.py", line 391, in from_string
data, epd_platform = _normalized_info_from_string(spec_depend_string)
File "okonomiyaki/file_formats/_egg_info.py", line 562, in _normalized_info_from_string
epd_platform = _epd_platform_from_raw_spec(data)
File "okonomiyaki/file_formats/_egg_info.py", line 307, in _epd_platform_from_raw_spec
platform_string, osdist_string, arch_string
File "okonomiyaki/platforms/epd_platform.py", line 135, in _from_spec_depend_data
raise ValueError(msg)
ValueError: Unrecognized platform/osdist combination: 'linux2'/'RedHat_3'
``` | enthought/okonomiyaki | diff --git a/okonomiyaki/platforms/tests/test_epd_platform.py b/okonomiyaki/platforms/tests/test_epd_platform.py
index e2d8729..b566340 100644
--- a/okonomiyaki/platforms/tests/test_epd_platform.py
+++ b/okonomiyaki/platforms/tests/test_epd_platform.py
@@ -327,6 +327,8 @@ class TestGuessEPDPlatform(unittest.TestCase):
examples = (
(("linux2", None, "x86"),
EPDPlatform.from_epd_string("rh5-32"),),
+ (("linux2", "RedHat_3", "x86"),
+ EPDPlatform.from_epd_string("rh3-32"),),
(("linux2", "RedHat_5", "x86"),
EPDPlatform.from_epd_string("rh5-32"),),
(("linux2", "RedHat_5", "amd64"),
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 0.9 | {
"env_vars": null,
"env_yml_path": [],
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [],
"python": "3.7",
"reqs_path": [
"dev_requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi @ file:///croot/certifi_1671487769961/work/certifi
coverage==7.2.7
docutils==0.20.1
enum34==1.1.10
exceptiongroup==1.2.2
flake8==5.0.4
haas==0.9.0
importlib-metadata==4.2.0
iniconfig==2.0.0
mccabe==0.7.0
mock==1.0.1
-e git+https://github.com/enthought/okonomiyaki.git@231ef105738438f69c19c1805d9df1e410b9630a#egg=okonomiyaki
packaging==24.0
pbr==6.1.1
pluggy==1.2.0
pycodestyle==2.9.1
pyflakes==2.5.0
pytest==7.4.4
statistics==1.0.3.5
stevedore==3.5.2
tomli==2.0.1
typing_extensions==4.7.1
zipfile2==0.0.12
zipp==3.15.0
| name: okonomiyaki
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.2.7
- docutils==0.20.1
- enum34==1.1.10
- exceptiongroup==1.2.2
- flake8==5.0.4
- haas==0.9.0
- importlib-metadata==4.2.0
- iniconfig==2.0.0
- mccabe==0.7.0
- mock==1.0.1
- packaging==24.0
- pbr==6.1.1
- pluggy==1.2.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pytest==7.4.4
- statistics==1.0.3.5
- stevedore==3.5.2
- tomli==2.0.1
- typing-extensions==4.7.1
- zipfile2==0.0.12
- zipp==3.15.0
prefix: /opt/conda/envs/okonomiyaki
| [
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_from_spec_depend_data"
]
| []
| [
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_epd_platform_from_string",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_epd_platform_from_string_new_arch",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_epd_platform_from_string_new_names",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_epd_platform_from_string_new_names_underscore",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_from_running_python",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_from_running_system",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_guessed_epd_platform",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_short_names_consistency",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatformApplies::test_all",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatformApplies::test_applies_rh",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatformApplies::test_current_linux",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatformApplies::test_current_windows",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_from_epd_platform_string",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_from_epd_platform_string_invalid",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_guess_darwin_platform",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_guess_linux2_platform",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_guess_linux2_unsupported",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_guess_solaris_unsupported",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_guess_win32_platform"
]
| []
| BSD License | 217 | [
"okonomiyaki/platforms/epd_platform.py",
"CHANGELOG"
]
| [
"okonomiyaki/platforms/epd_platform.py",
"CHANGELOG"
]
|
|
Yelp__swagger_spec_validator-36 | 0d6a8a94a5ce2ea1a5b19bd9a07cad85192b038c | 2015-08-24 21:30:09 | 0d6a8a94a5ce2ea1a5b19bd9a07cad85192b038c | diff --git a/CHANGELOG b/CHANGELOG
index 1624c2b..c55f47a 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,7 @@
+Version 1.1.0 (2015-08-24)
+-------------
+ * Validate crossrefs - #33, #34
+
Version 1.0.12 (2015-07-02)
-------------
* Handle API level parameters - #29
diff --git a/swagger_spec_validator/__about__.py b/swagger_spec_validator/__about__.py
index 2ef8969..75e6007 100644
--- a/swagger_spec_validator/__about__.py
+++ b/swagger_spec_validator/__about__.py
@@ -7,7 +7,7 @@ __title__ = "swagger_spec_validator"
__summary__ = "Validation of Swagger specifications"
__uri__ = "http://github.com/Yelp/swagger_spec_validator"
-__version__ = "1.0.12"
+__version__ = "1.1.0"
__author__ = "John Billings"
__email__ = "[email protected]"
diff --git a/swagger_spec_validator/util.py b/swagger_spec_validator/util.py
index 1975850..ff60fc3 100644
--- a/swagger_spec_validator/util.py
+++ b/swagger_spec_validator/util.py
@@ -44,7 +44,9 @@ def validate_spec_url(spec_url):
:param spec_url:
For Swagger 1.2, this is the URL to the resource listing in api-docs.
- For Swagger 2.0, this is the URL to swagger.json in api-docs.
+ For Swagger 2.0, this is the URL to swagger.json in api-docs. If given
+ as `file://` this must be an absolute url for
+ cross-refs to work correctly.
"""
spec_json = load_json(spec_url)
validator = get_validator(spec_json, spec_url)
diff --git a/swagger_spec_validator/validator20.py b/swagger_spec_validator/validator20.py
index e1d1a94..4882f24 100644
--- a/swagger_spec_validator/validator20.py
+++ b/swagger_spec_validator/validator20.py
@@ -21,16 +21,17 @@ def validate_spec_url(spec_url):
:raises: :py:class:`swagger_spec_validator.SwaggerValidationError`
"""
log.info('Validating %s' % spec_url)
- validate_spec(load_json(spec_url))
+ validate_spec(load_json(spec_url), spec_url)
-def validate_spec(spec_json, _spec_url=None):
+def validate_spec(spec_json, spec_url=None):
"""Validates a Swagger 2.0 API Specification given a Swagger Spec.
:param spec_json: the json dict of the swagger spec.
:type spec_json: dict
- :param _spec_url: url serving the spec json (currently not used)
- :type _spec_url: string
+ :param spec_url: url serving the spec json. Used for dereferencing
+ relative refs. eg: file:///foo/swagger.json
+ :type spec_url: string
:returns: `None` in case of success, otherwise raises an exception.
:raises: :py:class:`swagger_spec_validator.SwaggerValidationError`
"""
@@ -38,7 +39,8 @@ def validate_spec(spec_json, _spec_url=None):
# Dereference all $refs so we don't have to deal with them
fix_malformed_model_refs(spec_json)
- spec_json = jsonref.JsonRef.replace_refs(spec_json)
+ spec_json = jsonref.JsonRef.replace_refs(spec_json,
+ base_uri=spec_url or '')
replace_jsonref_proxies(spec_json)
# TODO: Extract 'parameters', 'responses' from the spec as well.
@@ -159,7 +161,9 @@ def replace_jsonref_proxies(obj):
fragment[k] = v.__subject__
descend(fragment[k])
elif isinstance(fragment, list):
- for element in fragment:
+ for index, element in enumerate(fragment):
+ if isinstance(element, jsonref.JsonRef):
+ fragment[index] = element.__subject__
descend(element)
descend(obj)
| Make use of spec_url being passed to `validate_spec`
`spec_url` [here](https://github.com/Yelp/swagger_spec_validator/blob/master/swagger_spec_validator/validator20.py#L27) is not getting used as of now. Use it to resolve `$ref`s. | Yelp/swagger_spec_validator | diff --git a/tests/data/v2.0/pingpong.json b/tests/data/v2.0/pingpong.json
new file mode 100644
index 0000000..541d9ee
--- /dev/null
+++ b/tests/data/v2.0/pingpong.json
@@ -0,0 +1,21 @@
+{
+ "ping": {
+ "get": {
+ "operationId": "ping",
+ "parameters": [
+ {
+ "name": "pung",
+ "in": "query",
+ "description": "true or false",
+ "type": "boolean"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "Successful response"
+ }
+ }
+ }
+ }
+}
+
diff --git a/tests/data/v2.0/relative_ref.json b/tests/data/v2.0/relative_ref.json
new file mode 100644
index 0000000..0c9f600
--- /dev/null
+++ b/tests/data/v2.0/relative_ref.json
@@ -0,0 +1,19 @@
+{
+ "swagger": "2.0",
+ "info": {
+ "version": "1.0.0",
+ "title": "Simple"
+ },
+ "tags": [
+ {
+ "name": "pingpong",
+ "description": "pingpong related specs"
+ }
+ ],
+ "paths": {
+ "/ping": {
+ "$ref": "pingpong.json#/ping"
+ }
+ }
+}
+
diff --git a/tests/validator20/validate_spec_url_test.py b/tests/validator20/validate_spec_url_test.py
index 890d7d2..4112eeb 100644
--- a/tests/validator20/validate_spec_url_test.py
+++ b/tests/validator20/validate_spec_url_test.py
@@ -1,5 +1,6 @@
import json
import mock
+import os
import pytest
from swagger_spec_validator.common import SwaggerValidationError
@@ -14,6 +15,13 @@ def test_success(petstore_contents):
mock_load_json.assert_called_once_with('http://localhost/api-docs')
+def test_success_crossref_url():
+ my_dir = os.path.abspath(os.path.dirname(__file__))
+ urlpath = "file://{0}".format(os.path.join(
+ my_dir, "../data/v2.0/relative_ref.json"))
+ validate_spec_url(urlpath)
+
+
def test_raise_SwaggerValidationError_on_urlopen_error():
with pytest.raises(SwaggerValidationError) as excinfo:
validate_spec_url('http://foo')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 4
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"flake8",
"mock",
"coverage"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
flake8==7.2.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
jsonref==1.1.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
mccabe==0.7.0
mock==5.2.0
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pycodestyle==2.13.0
pyflakes==3.3.2
pytest @ file:///croot/pytest_1738938843180/work
referencing==0.36.2
rpds-py==0.24.0
six==1.17.0
-e git+https://github.com/Yelp/swagger_spec_validator.git@0d6a8a94a5ce2ea1a5b19bd9a07cad85192b038c#egg=swagger_spec_validator
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions==4.13.0
| name: swagger_spec_validator
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- coverage==7.8.0
- flake8==7.2.0
- jsonref==1.1.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- mccabe==0.7.0
- mock==5.2.0
- pycodestyle==2.13.0
- pyflakes==3.3.2
- referencing==0.36.2
- rpds-py==0.24.0
- six==1.17.0
- typing-extensions==4.13.0
prefix: /opt/conda/envs/swagger_spec_validator
| [
"tests/validator20/validate_spec_url_test.py::test_success_crossref_url"
]
| [
"tests/validator20/validate_spec_url_test.py::test_raise_SwaggerValidationError_on_urlopen_error"
]
| [
"tests/validator20/validate_spec_url_test.py::test_success"
]
| []
| Apache License 2.0 | 219 | [
"swagger_spec_validator/validator20.py",
"swagger_spec_validator/__about__.py",
"CHANGELOG",
"swagger_spec_validator/util.py"
]
| [
"swagger_spec_validator/validator20.py",
"swagger_spec_validator/__about__.py",
"CHANGELOG",
"swagger_spec_validator/util.py"
]
|
|
chimpler__pyhocon-42 | cfe9a3c34f0fb5aabfa7f5257b573058be84e490 | 2015-08-24 23:27:53 | 4683937b1d195ce2f53ca78987571e41bfe273e7 | diff --git a/pyhocon/config_parser.py b/pyhocon/config_parser.py
index a3a2191..1c966d6 100644
--- a/pyhocon/config_parser.py
+++ b/pyhocon/config_parser.py
@@ -83,6 +83,16 @@ class ConfigFactory(object):
"""
return ConfigParser().parse(content, basedir, resolve)
+ @staticmethod
+ def from_dict(dictionary):
+ """Convert dictionary (and ordered dictionary) into a ConfigTree
+ :param dictionary: dictionary to convert
+ :type dictionary: dict
+ :return: Config object
+ :type return: Config
+ """
+ return ConfigTree(dictionary)
+
class ConfigParser(object):
"""
diff --git a/pyhocon/config_tree.py b/pyhocon/config_tree.py
index 92b98a0..a11433d 100644
--- a/pyhocon/config_tree.py
+++ b/pyhocon/config_tree.py
@@ -18,6 +18,7 @@ class ConfigTree(OrderedDict):
def __init__(self, *args, **kwds):
super(ConfigTree, self).__init__(*args, **kwds)
+
for key, value in self.items():
if isinstance(value, ConfigValues):
value.parent = self
diff --git a/pyhocon/tool.py b/pyhocon/tool.py
index 267a862..35010bd 100644
--- a/pyhocon/tool.py
+++ b/pyhocon/tool.py
@@ -9,6 +9,7 @@ LOG_FORMAT = '%(asctime)s %(levelname)s: %(message)s'
class HOCONConverter(object):
+
@staticmethod
def to_json(config, indent=2, level=0):
"""Convert HOCON input into a JSON output
| How to dump HOCON file?
Hi,
Just wonder how to do this with pyhocon?
https://marcinkubala.wordpress.com/2013/10/09/typesafe-config-hocon/
Thanks. | chimpler/pyhocon | diff --git a/tests/test_config_parser.py b/tests/test_config_parser.py
index 16bd518..4c02aed 100644
--- a/tests/test_config_parser.py
+++ b/tests/test_config_parser.py
@@ -3,6 +3,10 @@ from pyparsing import ParseSyntaxException, ParseException
import pytest
from pyhocon import ConfigFactory, ConfigSubstitutionException
from pyhocon.exceptions import ConfigMissingException, ConfigWrongTypeException
+try: # pragma: no cover
+ from collections import OrderedDict
+except ImportError: # pragma: no cover
+ from ordereddict import OrderedDict
class TestConfigParser(object):
@@ -1143,3 +1147,22 @@ with-escaped-newline-escape-sequence: \"\"\"
assert config['with-escaped-backslash'] == '\n\\\\\n'
assert config['with-newline-escape-sequence'] == '\n\\n\n'
assert config['with-escaped-newline-escape-sequence'] == '\n\\\\n\n'
+
+ def test_from_dict_with_dict(self):
+ d = {
+ 'banana': 3,
+ 'apple': 4,
+ 'pear': 1,
+ 'orange': 2,
+ }
+ config = ConfigFactory.from_dict(d)
+ assert config == d
+
+ def test_from_dict_with_ordered_dict(self):
+ d = OrderedDict()
+ d['banana'] = 3
+ d['apple'] = 4
+ d['pear'] = 1
+ d['orange'] = 2
+ config = ConfigFactory.from_dict(d)
+ assert config == d
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 3
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
-e git+https://github.com/chimpler/pyhocon.git@cfe9a3c34f0fb5aabfa7f5257b573058be84e490#egg=pyhocon
pyparsing==2.0.3
pytest==8.3.5
tomli==2.2.1
| name: pyhocon
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pyparsing==2.0.3
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/pyhocon
| [
"tests/test_config_parser.py::TestConfigParser::test_from_dict_with_dict",
"tests/test_config_parser.py::TestConfigParser::test_from_dict_with_ordered_dict"
]
| []
| [
"tests/test_config_parser.py::TestConfigParser::test_parse_simple_value",
"tests/test_config_parser.py::TestConfigParser::test_parse_with_enclosing_brace",
"tests/test_config_parser.py::TestConfigParser::test_parse_with_enclosing_square_bracket",
"tests/test_config_parser.py::TestConfigParser::test_quoted_key_with_dots",
"tests/test_config_parser.py::TestConfigParser::test_comma_to_separate_expr",
"tests/test_config_parser.py::TestConfigParser::test_dict_merge",
"tests/test_config_parser.py::TestConfigParser::test_parse_with_comments",
"tests/test_config_parser.py::TestConfigParser::test_missing_config",
"tests/test_config_parser.py::TestConfigParser::test_parse_null",
"tests/test_config_parser.py::TestConfigParser::test_parse_empty",
"tests/test_config_parser.py::TestConfigParser::test_parse_override",
"tests/test_config_parser.py::TestConfigParser::test_concat_dict",
"tests/test_config_parser.py::TestConfigParser::test_concat_string",
"tests/test_config_parser.py::TestConfigParser::test_concat_list",
"tests/test_config_parser.py::TestConfigParser::test_bad_concat",
"tests/test_config_parser.py::TestConfigParser::test_string_substitutions",
"tests/test_config_parser.py::TestConfigParser::test_int_substitutions",
"tests/test_config_parser.py::TestConfigParser::test_cascade_string_substitutions",
"tests/test_config_parser.py::TestConfigParser::test_dict_substitutions",
"tests/test_config_parser.py::TestConfigParser::test_list_substitutions",
"tests/test_config_parser.py::TestConfigParser::test_non_existent_substitution",
"tests/test_config_parser.py::TestConfigParser::test_non_compatible_substitution",
"tests/test_config_parser.py::TestConfigParser::test_concat_multi_line_string",
"tests/test_config_parser.py::TestConfigParser::test_concat_multi_line_list",
"tests/test_config_parser.py::TestConfigParser::test_concat_multi_line_dict",
"tests/test_config_parser.py::TestConfigParser::test_parse_URL_from_samples",
"tests/test_config_parser.py::TestConfigParser::test_include_dict_from_samples",
"tests/test_config_parser.py::TestConfigParser::test_list_of_dicts",
"tests/test_config_parser.py::TestConfigParser::test_list_of_lists",
"tests/test_config_parser.py::TestConfigParser::test_list_of_dicts_with_merge",
"tests/test_config_parser.py::TestConfigParser::test_list_of_lists_with_merge",
"tests/test_config_parser.py::TestConfigParser::test_invalid_assignment",
"tests/test_config_parser.py::TestConfigParser::test_invalid_dict",
"tests/test_config_parser.py::TestConfigParser::test_include_list",
"tests/test_config_parser.py::TestConfigParser::test_include_dict",
"tests/test_config_parser.py::TestConfigParser::test_include_substitution",
"tests/test_config_parser.py::TestConfigParser::test_substitution_override",
"tests/test_config_parser.py::TestConfigParser::test_substitution_flat_override",
"tests/test_config_parser.py::TestConfigParser::test_substitution_nested_override",
"tests/test_config_parser.py::TestConfigParser::test_optional_substitution",
"tests/test_config_parser.py::TestConfigParser::test_substitution_cycle",
"tests/test_config_parser.py::TestConfigParser::test_assign_number_with_eol",
"tests/test_config_parser.py::TestConfigParser::test_assign_strings_with_eol",
"tests/test_config_parser.py::TestConfigParser::test_assign_list_numbers_with_eol",
"tests/test_config_parser.py::TestConfigParser::test_assign_list_strings_with_eol",
"tests/test_config_parser.py::TestConfigParser::test_assign_dict_strings_with_equal_sign_with_eol",
"tests/test_config_parser.py::TestConfigParser::test_assign_dict_strings_no_equal_sign_with_eol",
"tests/test_config_parser.py::TestConfigParser::test_substitutions_overwrite",
"tests/test_config_parser.py::TestConfigParser::test_fallback_substitutions_overwrite",
"tests/test_config_parser.py::TestConfigParser::test_fallback_substitutions_overwrite_file",
"tests/test_config_parser.py::TestConfigParser::test_one_line_quote_escape",
"tests/test_config_parser.py::TestConfigParser::test_multi_line_escape"
]
| []
| Apache License 2.0 | 220 | [
"pyhocon/tool.py",
"pyhocon/config_tree.py",
"pyhocon/config_parser.py"
]
| [
"pyhocon/tool.py",
"pyhocon/config_tree.py",
"pyhocon/config_parser.py"
]
|
|
mapbox__mapbox-sdk-py-21 | 7859e5abc3ad69a001a24bb9ae66dcdd38da064c | 2015-08-25 17:27:19 | 7859e5abc3ad69a001a24bb9ae66dcdd38da064c | diff --git a/mapbox/compat.py b/mapbox/compat.py
index baf551f..a46774e 100644
--- a/mapbox/compat.py
+++ b/mapbox/compat.py
@@ -4,7 +4,4 @@ import itertools
import sys
-if sys.version_info < (3,):
- map = itertools.imap
-else:
- map = map
+map = itertools.imap if sys.version_info < (3,) else map
diff --git a/mapbox/scripts/cli.py b/mapbox/scripts/cli.py
index 7a9f867..5159758 100644
--- a/mapbox/scripts/cli.py
+++ b/mapbox/scripts/cli.py
@@ -23,12 +23,27 @@ def configure_logging(verbosity):
@click.group()
@cligj.verbose_opt
@cligj.quiet_opt
[email protected]('--access-token', help="Your Mapbox access token.")
@click.version_option(version=mapbox.__version__, message='%(version)s')
@click.pass_context
-def main_group(ctx, verbose, quiet):
- """Command line interface to Mapbox web services.
+def main_group(ctx, verbose, quiet, access_token):
+ """This is the command line interface to Mapbox web services.
+
+ Mapbox web services require an access token. Your token is shown
+ on the https://www.mapbox.com/developers/api/ page when you are
+ logged in. The token can be provided on the command line
+
+ $ mbx --access-token MY_TOKEN ...
+
+ or as an environment variable.
+
+ \b
+ $ export MapboxAccessToken=MY_TOKEN
+ $ mbx ...
+
"""
verbosity = verbose - quiet
configure_logging(verbosity)
ctx.obj = {}
ctx.obj['verbosity'] = verbosity
+ ctx.obj['access_token'] = access_token
diff --git a/mapbox/scripts/geocoder.py b/mapbox/scripts/geocoder.py
index 3348b7f..4705242 100644
--- a/mapbox/scripts/geocoder.py
+++ b/mapbox/scripts/geocoder.py
@@ -34,15 +34,14 @@ def coords_from_query(query):
@click.command(short_help="Geocode an address or coordinates.")
@click.argument('query', default='-', required=False)
[email protected]('--access-token', help="Your access token")
@click.option(
'--forward/--reverse',
default=True,
help="Perform a forward or reverse geocode. [default: forward]")
@click.pass_context
-def geocode(ctx, query, access_token, forward):
- """Get coordinates for an address (forward geocoding) or addresses
- for coordinates (reverse geocoding)
+def geocode(ctx, query, forward):
+ """This command gets coordinates for an address (forward mode) or
+ addresses for coordinates (reverse mode).
In forward (the default) mode the query argument shall be an address
such as '1600 pennsylvania ave nw'.
@@ -57,6 +56,8 @@ def geocode(ctx, query, access_token, forward):
"""
verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 2
logger = logging.getLogger('mapbox')
+
+ access_token = (ctx.obj and ctx.obj.get('access_token')) or None
geocoder = mapbox.Geocoder(access_token=access_token)
if forward:
| Move --access-token option from command to group
New commands won't have to implement the option if it's on the group.
This means `mbx --access-token MY_TOKEN geocode` instead of `mbx geocode --access-token MY_TOKEN`.
Support for the token environment variable will be unchanged. | mapbox/mapbox-sdk-py | diff --git a/tests/test_cli.py b/tests/test_cli.py
index ad1a75d..278edfe 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -41,7 +41,7 @@ def test_cli_geocode_fwd():
runner = CliRunner()
result = runner.invoke(
main_group,
- ['geocode', '--forward', '1600 pennsylvania ave nw', '--access-token', 'bogus'])
+ ['--access-token', 'bogus', 'geocode', '--forward', '1600 pennsylvania ave nw'])
assert result.exit_code == 0
assert result.output == '{"query": ["1600", "pennsylvania", "ave", "nw"]}\n'
@@ -81,7 +81,7 @@ def test_cli_geocode_reverse():
runner = CliRunner()
result = runner.invoke(
main_group,
- ['geocode', '--reverse', '--access-token', 'pk.test'],
+ ['--access-token', 'pk.test', 'geocode', '--reverse'],
input=','.join([str(x) for x in coords]))
assert result.exit_code == 0
assert result.output == '{"query": %s}\n' % json.dumps(coords)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
click-plugins==1.1.1
cligj==0.7.2
coverage==7.8.0
coveralls==4.0.1
docopt==0.6.2
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
-e git+https://github.com/mapbox/mapbox-sdk-py.git@7859e5abc3ad69a001a24bb9ae66dcdd38da064c#egg=mapbox
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
PyYAML==6.0.2
requests==2.32.3
responses==0.25.7
tomli==2.2.1
uritemplate==4.1.1
uritemplate.py==3.0.2
urllib3==2.3.0
| name: mapbox-sdk-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- click-plugins==1.1.1
- cligj==0.7.2
- coverage==7.8.0
- coveralls==4.0.1
- docopt==0.6.2
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pyyaml==6.0.2
- requests==2.32.3
- responses==0.25.7
- tomli==2.2.1
- uritemplate==4.1.1
- uritemplate-py==3.0.2
- urllib3==2.3.0
prefix: /opt/conda/envs/mapbox-sdk-py
| [
"tests/test_cli.py::test_cli_geocode_fwd"
]
| [
"tests/test_cli.py::test_coords_from_query_csv",
"tests/test_cli.py::test_coords_from_query_ws",
"tests/test_cli.py::test_cli_geocode_reverse",
"tests/test_cli.py::test_cli_geocode_reverse_env_token",
"tests/test_cli.py::test_cli_geocode_rev_unauthorized"
]
| [
"tests/test_cli.py::test_iter_query_string",
"tests/test_cli.py::test_iter_query_file",
"tests/test_cli.py::test_coords_from_query_json",
"tests/test_cli.py::test_cli_geocode_fwd_env_token",
"tests/test_cli.py::test_cli_geocode_unauthorized"
]
| []
| MIT License | 221 | [
"mapbox/compat.py",
"mapbox/scripts/cli.py",
"mapbox/scripts/geocoder.py"
]
| [
"mapbox/compat.py",
"mapbox/scripts/cli.py",
"mapbox/scripts/geocoder.py"
]
|
|
praw-dev__praw-509 | fea4a6df0508c93e200ef9fcacecf2b423f2aa27 | 2015-08-25 23:23:35 | c45e5f6ca0c5cd9968b51301989eb82740f8dc85 | diff --git a/CHANGES.rst b/CHANGES.rst
index 5e55a57e..4cd5cc03 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -22,13 +22,35 @@ formatted links that link to the relevant place in the code overview.
.. begin_changelog_body
-UNRELEASED
+Unreleased
----------
* **[BUGFIX]** Fixed login password prompt issue on windows (#485).
* **[BUGFIX]** Fixed unicode user-agent issue (#483).
* **[BUGFIX]** Fix duplicate request issue with comment and submission streams
(#501).
+ * **[BUGFIX]** Stopped :meth:`praw.objects.Redditor.friend` from raising
+ LoginRequired when using OAuth.
+ * **[BUGFIX]** Stopped a json-parsing error from occuring in cases where
+ reddit's response to a request was an empty string. :meth:`request_json`
+ will now simply return that empty string.
* **[CHANGE]** Added messages to all PRAW exceptions (#491).
+ * **[CHANGE]** Made it easier to send JSON dumps instead of form-encoded data
+ for http requests. Some api-v1 endpoints require the request body to be in
+ the json format.
+ * **[CHANGE]** Moved and deprecated
+ :meth:`praw.objects.LoggedInRedditor.get_friends` to
+ :class:`praw.AuthenticatedReddit`, leaving a pointer in its place.
+ Previously, `get_friends` was difficult to access because the only instance
+ of `LoggedInRedditor` was the reddit session's `user` attribute, which is
+ only instantiated if the user has the "identity" scope. By moving
+ `get_friends` to the reddit session, it can be used without having to
+ manipulate a :class:`praw.objects.Redditor` intsance's class.
+ * **[FEATURE]** Added support for adding "notes" to your friends. Users with
+ reddit Gold can set the ``note`` parameter of
+ :meth:`praw.objects.Redditor.friend`. 300 character max enforced by reddit.
+ * **[FEATURE]** New :meth:`praw.objects.Redditor.get_friend_info` to see info
+ about one of your friends. Includes their name, ID, when you added them, and
+ if you have reddit Gold, your note about them.
PRAW 3.2.1
----------
diff --git a/praw/__init__.py b/praw/__init__.py
index 398d64b5..6a31d2dc 100644
--- a/praw/__init__.py
+++ b/praw/__init__.py
@@ -102,6 +102,7 @@ class Config(object): # pylint: disable=R0903
'flairselector': 'api/flairselector/',
'flairtemplate': 'api/flairtemplate/',
'friend': 'api/friend/',
+ 'friend_v1': 'api/v1/me/friends/{user}',
'friends': 'prefs/friends/',
'gild_thing': 'api/v1/gold/gild/{fullname}/',
'gild_user': 'api/v1/gold/give/{username}/',
@@ -612,6 +613,12 @@ class BaseReddit(object):
hook = self._json_reddit_objecter if as_objects else None
# Request url just needs to be available for the objecter to use
self._request_url = url # pylint: disable=W0201
+
+ if response == '':
+ # Some of the v1 urls don't return anything, even when they're
+ # successful.
+ return response
+
data = json.loads(response, object_hook=hook)
delattr(self, '_request_url')
# Update the modhash
@@ -1321,6 +1328,12 @@ class AuthenticatedReddit(OAuth2Reddit, UnauthenticatedReddit):
data = {'r': six.text_type(subreddit), 'link': link}
return self.request_json(self.config['flairselector'], data=data)
+ @decorators.restrict_access(scope='read', login=True)
+ def get_friends(self, **params):
+ """Return a UserList of Redditors with whom the user is friends."""
+ url = self.config['friends']
+ return self.request_json(url, params=params)[0]
+
@decorators.restrict_access(scope='identity', oauth_only=True)
def get_me(self):
"""Return a LoggedInRedditor object.
diff --git a/praw/internal.py b/praw/internal.py
index 7967eb24..62eefe7c 100644
--- a/praw/internal.py
+++ b/praw/internal.py
@@ -158,10 +158,15 @@ def _prepare_request(reddit_session, url, params, data, auth, files,
# Most POST requests require adding `api_type` and `uh` to the data.
if data is True:
data = {}
- if not auth:
- data.setdefault('api_type', 'json')
- if reddit_session.modhash:
- data.setdefault('uh', reddit_session.modhash)
+
+ if isinstance(data, dict):
+ if not auth:
+ data.setdefault('api_type', 'json')
+ if reddit_session.modhash:
+ data.setdefault('uh', reddit_session.modhash)
+ else:
+ request.headers.setdefault('Content-Type', 'application/json')
+
request.data = data
request.files = files
return request
diff --git a/praw/objects.py b/praw/objects.py
index f6a0822a..9f9d557b 100755
--- a/praw/objects.py
+++ b/praw/objects.py
@@ -843,14 +843,38 @@ class Redditor(Gildable, Messageable, Refreshable):
self._case_name = self.name
self.name = tmp
- def friend(self):
+ @restrict_access(scope='subscribe')
+ def friend(self, note=None, _unfriend=False):
"""Friend the user.
+ :param note: A personal note about the user. Requires reddit Gold.
+ :param _unfriend: Unfriend the user. Please use :meth:`unfriend`
+ instead of setting this parameter manually.
+
:returns: The json response from the server.
"""
self.reddit_session.evict(self.reddit_session.config['friends'])
- return _modify_relationship('friend')(self.reddit_session.user, self)
+
+ # Requests through password auth use /api/friend
+ # Requests through oauth use /api/v1/me/friends/%username%
+ if not self.reddit_session.is_oauth_session():
+ modifier = _modify_relationship('friend', unlink=_unfriend)
+ data = {'note': note} if note else {}
+ return modifier(self.reddit_session.user, self, **data)
+
+ url = self.reddit_session.config['friend_v1'].format(user=self.name)
+ # This endpoint wants the data to be a string instead of an actual
+ # dictionary, although it is not required to have any content for adds.
+ # Unfriending does require the 'id' key.
+ if _unfriend:
+ data = {'id': self.name}
+ else:
+ # We cannot send a null or empty note string.
+ data = {'note': note} if note else {}
+ data = dumps(data)
+ method = 'DELETE' if _unfriend else 'PUT'
+ return self.reddit_session.request_json(url, data=data, method=method)
def get_disliked(self, *args, **kwargs):
"""Return a listing of the Submissions the user has downvoted.
@@ -881,6 +905,20 @@ class Redditor(Gildable, Messageable, Refreshable):
kwargs['_use_oauth'] = self.reddit_session.is_oauth_session()
return _get_redditor_listing('downvoted')(self, *args, **kwargs)
+ @restrict_access(scope='mysubreddits')
+ def get_friend_info(self):
+ """Return information about this friend, including personal notes.
+
+ The personal note can be added or overwritten with :meth:friend, but
+ only if the user has reddit Gold.
+
+ :returns: The json response from the server.
+
+ """
+ url = self.reddit_session.config['friend_v1'].format(user=self.name)
+ data = {'id': self.name}
+ return self.reddit_session.request_json(url, data=data, method='GET')
+
def get_liked(self, *args, **kwargs):
"""Return a listing of the Submissions the user has upvoted.
@@ -935,9 +973,7 @@ class Redditor(Gildable, Messageable, Refreshable):
:returns: The json response from the server.
"""
- self.reddit_session.evict(self.reddit_session.config['friends'])
- return _modify_relationship('friend', unlink=True)(
- self.reddit_session.user, self)
+ return self.friend(_unfriend=True)
class LoggedInRedditor(Redditor):
@@ -965,10 +1001,16 @@ class LoggedInRedditor(Redditor):
self._mod_subs[six.text_type(sub).lower()] = sub
return self._mod_subs
+ @deprecated(':meth:`get_friends` has been moved to '
+ ':class:`praw.AuthenticatedReddit` and will be removed from '
+ ':class:`objects.LoggedInRedditor` in PRAW v4.0.0')
def get_friends(self, **params):
- """Return a UserList of Redditors with whom the user has friended."""
- url = self.reddit_session.config['friends']
- return self.reddit_session.request_json(url, params=params)[0]
+ """Return a UserList of Redditors with whom the user is friends.
+
+ This method has been moved to :class:`praw.AuthenticatedReddit.
+
+ """
+ return self.reddit_session.get_friends(**params)
class ModAction(RedditContentObject):
| Redditor.friend() raises LoginRequired through OAuth; doesn't use suggested URL
I decided to try playing around with reddit's Friend system today, and found that Redditor.friend() raises a LoginRequired when used through OAuth.
First, I found that internal.py [chooses a restrict_access decorator with scope=None](https://github.com/praw-dev/praw/blob/c05bab70ff4b26967ca0b114436e0edad24aa6aa/praw/internal.py#L89) causing [all these checks](https://github.com/praw-dev/praw/blob/31d4984fd66930dbc886dfedb27c835dc6609262/praw/decorators.py#L329-L342) to fail until it finally gets to `elif login` and raises the exception. I tried changing that to scope='subscribers', but this causes an OAuthException with the message *"praw.errors.OAuthException: Bearer realm="reddit", error="invalid_request" on url https://oauth.reddit.com/api/friend/.json"*
At this point I looked at the [api docs](https://www.reddit.com/dev/api/oauth#POST_api_friend) and saw that they actually want you to use [PUT /api/v1/me/friends/username](https://www.reddit.com/dev/api/oauth#PUT_api_v1_me_friends_%7Busername%7D) instead (ostensibly so that /api/friend will one day control everything except adding friends). I tried this:
@restrict_access(scope='subscribe')
def friend(self):
# friend_v1 = 'api/v1/me/friends/%s'
url = self.reddit_session.config['friend_v1'] % self.name
data = {'name': self.name, 'note': 'u'}
method = 'PUT'
self.reddit_session.evict(self.reddit_session.config['friends'])
self.reddit_session.request_json(url, data=data, method=method)
But I'm still getting HTTPException 400 errors.
I'd love to make a pull request, but I'm not sure what to try next. Any insight? | praw-dev/praw | diff --git a/tests/cassettes/test_friends_oauth.json b/tests/cassettes/test_friends_oauth.json
new file mode 100644
index 00000000..b0a7cc0e
--- /dev/null
+++ b/tests/cassettes/test_friends_oauth.json
@@ -0,0 +1,1 @@
+{"recorded_with": "betamax/0.4.2", "http_interactions": [{"response": {"body": {"encoding": "UTF-8", "base64_string": "H4sIAAAAAAAAA6pWSkxOTi0uji/Jz07NU7JSUDI3NjCyMDPXdS4oDgxwKfByzfILMU8yi6wqiAo3yzYLtyzNV9JRUAKrjy+pLEgFaUpKTSxKLQKJp1YUZBalFsdnggwzNjMw0FFQKk7OhygrLk0qTi7KTEpVqgUAAAD//wMAZlrF43kAAAA="}, "url": "https://api.reddit.com/api/v1/access_token/", "headers": {"connection": ["keep-alive"], "server": ["cloudflare-nginx"], "set-cookie": ["__cfduid=dbfab20b40c347a3f6ae70df0dc539d481440570943; expires=Thu, 25-Aug-16 06:35:43 GMT; path=/; domain=.reddit.com; HttpOnly"], "transfer-encoding": ["chunked"], "content-type": ["application/json; charset=UTF-8"], "x-moose": ["majestic"], "content-encoding": ["gzip"], "date": ["Wed, 26 Aug 2015 06:35:43 GMT"], "cache-control": ["max-age=0, must-revalidate"], "x-xss-protection": ["1; mode=block"], "strict-transport-security": ["max-age=15552000; includeSubDomains; preload"], "x-content-type-options": ["nosniff"], "x-frame-options": ["SAMEORIGIN"], "cf-ray": ["21bd84aa3c7f1413-LAX"]}, "status": {"code": 200, "message": "OK"}}, "recorded_at": "2015-08-26T06:35:41", "request": {"body": {"string": "grant_type=refresh_token&redirect_uri=https%3A%2F%2F127.0.0.1%3A65010%2Fauthorize_callback&refresh_token=LlqwOLjyu_l6GMZIBqhcLWB0hAE", "encoding": "utf-8"}, "uri": "https://api.reddit.com/api/v1/access_token/", "method": "POST", "headers": {"Authorization": ["Basic c3RKbFVTVWJQUWU1bFE6aVUtTHNPenlKSDdCRFZvcS1xT1dORXEyenVJ"], "Accept": ["*/*"], "Connection": ["keep-alive"], "Accept-Encoding": ["gzip, deflate"], "User-Agent": ["PRAW_test_suite PRAW/3.2.1 Python/3.4.3 b'Windows-7-6.1.7601-SP1'"], "Content-Type": ["application/x-www-form-urlencoded"], "Content-Length": ["132"]}}}, {"response": {"body": {"string": "{\"fields\": [\"note\"], \"explanation\": \"you must have an active reddit gold subscription to do that\", \"reason\": \"GOLD_REQUIRED\"}", "encoding": "UTF-8"}, "url": "https://oauth.reddit.com/api/v1/me/friends/PyAPITestUser3.json", "headers": {"x-ratelimit-used": ["2"], "server": ["cloudflare-nginx"], "set-cookie": ["__cfduid=d51d9956cff66a4d8321b1e82c334037e1440570943; expires=Thu, 25-Aug-16 06:35:43 GMT; path=/; domain=.reddit.com; HttpOnly"], "x-frame-options": ["SAMEORIGIN"], "content-type": ["application/json; charset=UTF-8"], "x-moose": ["majestic"], "x-ratelimit-remaining": ["598.0"], "x-ua-compatible": ["IE=edge"], "x-xss-protection": ["1; mode=block"], "cache-control": ["private, s-maxage=0, max-age=0, must-revalidate", "max-age=0, must-revalidate"], "connection": ["keep-alive"], "date": ["Wed, 26 Aug 2015 06:35:43 GMT"], "x-ratelimit-reset": ["257"], "strict-transport-security": ["max-age=15552000; includeSubDomains; preload"], "x-content-type-options": ["nosniff"], "content-length": ["125"], "expires": ["-1"], "cf-ray": ["21bd84ad935022a0-LAX"]}, "status": {"code": 400, "message": "Bad Request"}}, "recorded_at": "2015-08-26T06:35:41", "request": {"body": {"string": "{\"note\": \"note\"}", "encoding": "utf-8"}, "uri": "https://oauth.reddit.com/api/v1/me/friends/PyAPITestUser3.json", "method": "PUT", "headers": {"Authorization": ["bearer 7302867-CpsQPDpJEjNT7b6YzpZW6k6W9uo"], "Accept": ["*/*"], "Connection": ["keep-alive"], "Accept-Encoding": ["gzip, deflate"], "User-Agent": ["PRAW_test_suite PRAW/3.2.1 Python/3.4.3 b'Windows-7-6.1.7601-SP1'"], "Content-Type": ["application/json"], "Content-Length": ["16"]}}}, {"response": {"body": {"string": "{\"date\": 1440570943.0, \"name\": \"PyAPITestUser3\", \"id\": \"t2_6c1xj\"}", "encoding": "UTF-8"}, "url": "https://oauth.reddit.com/api/v1/me/friends/PyAPITestUser3.json", "headers": {"x-ratelimit-used": ["3"], "server": ["cloudflare-nginx"], "set-cookie": ["__cfduid=d51d9956cff66a4d8321b1e82c334037e1440570943; expires=Thu, 25-Aug-16 06:35:43 GMT; path=/; domain=.reddit.com; HttpOnly"], "x-frame-options": ["SAMEORIGIN"], "content-type": ["application/json; charset=UTF-8"], "x-moose": ["majestic"], "x-ratelimit-reset": ["257"], "x-ratelimit-remaining": ["597.0"], "x-xss-protection": ["1; mode=block"], "cache-control": ["private, s-maxage=0, max-age=0, must-revalidate", "max-age=0, must-revalidate"], "connection": ["keep-alive"], "date": ["Wed, 26 Aug 2015 06:35:43 GMT"], "strict-transport-security": ["max-age=15552000; includeSubDomains; preload"], "x-content-type-options": ["nosniff"], "content-length": ["66"], "expires": ["-1"], "cf-ray": ["21bd84ae735822a0-LAX"]}, "status": {"code": 201, "message": "Created"}}, "recorded_at": "2015-08-26T06:35:42", "request": {"body": {"string": "{}", "encoding": "utf-8"}, "uri": "https://oauth.reddit.com/api/v1/me/friends/PyAPITestUser3.json", "method": "PUT", "headers": {"Authorization": ["bearer 7302867-CpsQPDpJEjNT7b6YzpZW6k6W9uo"], "Accept": ["*/*"], "Connection": ["keep-alive"], "Accept-Encoding": ["gzip, deflate"], "User-Agent": ["PRAW_test_suite PRAW/3.2.1 Python/3.4.3 b'Windows-7-6.1.7601-SP1'"], "Content-Type": ["application/json"], "Content-Length": ["2"]}}}, {"response": {"body": {"encoding": "UTF-8", "base64_string": "H4sIAAAAAAAAA2SPsQqDMBRFfyW8WUqNWKhbx24O7fxIzUt91SSQxNIi/nuJQxFcD+fAvTMM7DQ0ApKEQoBWSUEjZnDKUsbt99JebxTTPVKossIRTWBaK6PGSIWALpBKlElZSVnX8lxXh2MhoGdNaIK3GPzDp7hvcErdvhvZDTioYPOaMtveWnLpz7LEEZ9+3O7giNZvQa8ivimwYdJIVvEIjUhhWuX196krPy9Ylh8AAAD//wMAK4nJkwoBAAA="}, "url": "https://api.reddit.com/user/PyAPITestUser3/about/.json", "headers": {"connection": ["keep-alive"], "server": ["cloudflare-nginx"], "x-frame-options": ["SAMEORIGIN"], "transfer-encoding": ["chunked"], "content-type": ["application/json; charset=UTF-8"], "x-moose": ["majestic"], "x-ua-compatible": ["IE=edge"], "content-encoding": ["gzip"], "cache-control": ["max-age=0, must-revalidate"], "access-control-allow-origin": ["*"], "date": ["Wed, 26 Aug 2015 06:35:44 GMT"], "x-reddit-tracking": ["https://pixel.redditmedia.com/pixel/of_destiny.png?v=a1F8dTU0p4RfH24s95%2BiRD1fBaZBy1d2xVjexwqCj%2B%2FZ8Q6CupfRyQ9gjwW%2B%2BjAn%2BCuS9y7didg%3D"], "x-xss-protection": ["1; mode=block"], "x-content-type-options": ["nosniff"], "strict-transport-security": ["max-age=15552000; includeSubDomains; preload"], "access-control-expose-headers": ["X-Reddit-Tracking, X-Moose"], "cf-ray": ["21bd84af6cba1413-LAX"]}, "status": {"code": 200, "message": "OK"}}, "recorded_at": "2015-08-26T06:35:42", "request": {"body": {"string": "", "encoding": "utf-8"}, "uri": "https://api.reddit.com/user/PyAPITestUser3/about/.json", "method": "GET", "headers": {"User-Agent": ["PRAW_test_suite PRAW/3.2.1 Python/3.4.3 b'Windows-7-6.1.7601-SP1'"], "Accept": ["*/*"], "Connection": ["keep-alive"], "Cookie": ["__cfduid=d51d9956cff66a4d8321b1e82c334037e1440570943"], "Accept-Encoding": ["gzip, deflate"]}}}, {"response": {"body": {"encoding": "UTF-8", "base64_string": "H4sIAAAAAAAAA6pWSkxOTi0uji/Jz07NU7JSUDI3NjCyMDPX9XML180p0/Wryo4PqLRMDXQpScxx83ZyyXDJVtJRUAKrjy+pLEgFaUpKTSxKLQKJp1YUZBalFsdnggwzNjMw0FFQKk7OhyjLrSwuTSpKTUnJLClWqgUAAAD//wMA6XtVbnwAAAA="}, "url": "https://api.reddit.com/api/v1/access_token/", "headers": {"connection": ["keep-alive"], "server": ["cloudflare-nginx"], "x-frame-options": ["SAMEORIGIN"], "transfer-encoding": ["chunked"], "content-type": ["application/json; charset=UTF-8"], "x-moose": ["majestic"], "content-encoding": ["gzip"], "date": ["Wed, 26 Aug 2015 06:35:44 GMT"], "cache-control": ["max-age=0, must-revalidate"], "x-xss-protection": ["1; mode=block"], "strict-transport-security": ["max-age=15552000; includeSubDomains; preload"], "x-content-type-options": ["nosniff"], "cf-ray": ["21bd84b18cc71413-LAX"]}, "status": {"code": 200, "message": "OK"}}, "recorded_at": "2015-08-26T06:35:42", "request": {"body": {"string": "grant_type=refresh_token&redirect_uri=https%3A%2F%2F127.0.0.1%3A65010%2Fauthorize_callback&refresh_token=O7tfWhqem6fQZqxhoTiLca1s7VA", "encoding": "utf-8"}, "uri": "https://api.reddit.com/api/v1/access_token/", "method": "POST", "headers": {"Authorization": ["Basic c3RKbFVTVWJQUWU1bFE6aVUtTHNPenlKSDdCRFZvcS1xT1dORXEyenVJ"], "Accept": ["*/*"], "Connection": ["keep-alive"], "Cookie": ["__cfduid=d51d9956cff66a4d8321b1e82c334037e1440570943"], "User-Agent": ["PRAW_test_suite PRAW/3.2.1 Python/3.4.3 b'Windows-7-6.1.7601-SP1'"], "Content-Type": ["application/x-www-form-urlencoded"], "Content-Length": ["132"], "Accept-Encoding": ["gzip, deflate"]}}}, {"response": {"body": {"encoding": "UTF-8", "base64_string": "H4sIAAAAAAAAA6pWSkksSVWyUjA0MTEwNTewNDHWM9BRUMpLzAWJKgVUOgZ4hqQWl4QWpxYZK+koKGWmgMRLjOLNkg0rspRqAQAAAP//AwC0C6M5QgAAAA=="}, "url": "https://oauth.reddit.com/api/v1/me/friends/PyAPITestUser3.json", "headers": {"x-ratelimit-used": ["4"], "server": ["cloudflare-nginx"], "set-cookie": ["__cfduid=dcfacd553cdaabb519e3a258c4c7f962d1440570944; expires=Thu, 25-Aug-16 06:35:44 GMT; path=/; domain=.reddit.com; HttpOnly"], "transfer-encoding": ["chunked"], "content-type": ["application/json; charset=UTF-8"], "x-moose": ["majestic"], "x-ratelimit-reset": ["256"], "x-ratelimit-remaining": ["596.0"], "date": ["Wed, 26 Aug 2015 06:35:44 GMT"], "cache-control": ["private, s-maxage=0, max-age=0, must-revalidate", "max-age=0, must-revalidate"], "strict-transport-security": ["max-age=15552000; includeSubDomains; preload"], "content-encoding": ["gzip"], "connection": ["keep-alive"], "x-xss-protection": ["1; mode=block"], "x-content-type-options": ["nosniff"], "x-frame-options": ["SAMEORIGIN"], "expires": ["-1"], "cf-ray": ["21bd84b2636a22a0-LAX"]}, "status": {"code": 200, "message": "OK"}}, "recorded_at": "2015-08-26T06:35:42", "request": {"body": {"string": "", "encoding": "utf-8"}, "uri": "https://oauth.reddit.com/api/v1/me/friends/PyAPITestUser3.json", "method": "GET", "headers": {"Authorization": ["bearer 7302867-NFW-lv-Nzk_Py9eQDtalFKBDhDk"], "User-Agent": ["PRAW_test_suite PRAW/3.2.1 Python/3.4.3 b'Windows-7-6.1.7601-SP1'"], "Connection": ["keep-alive"], "Accept": ["*/*"], "Accept-Encoding": ["gzip, deflate"]}}}, {"response": {"body": {"encoding": "UTF-8", "base64_string": "H4sIAAAAAAAAA6pWSkxOTi0uji/Jz07NU7JSUDI3NjCyMDPXrTJzNs30rzQK9NR1KQiNN3dPD3XJN9J1DPLIV9JRUAKrjy+pLEgFaUpKTSxKLQKJp1YUZBalFsdnggwzNjMw0FFQKk7OhygrSk1MUaoFAAAA//8DAFnIJVd0AAAA"}, "url": "https://api.reddit.com/api/v1/access_token/", "headers": {"connection": ["keep-alive"], "server": ["cloudflare-nginx"], "x-frame-options": ["SAMEORIGIN"], "transfer-encoding": ["chunked"], "content-type": ["application/json; charset=UTF-8"], "x-moose": ["majestic"], "content-encoding": ["gzip"], "date": ["Wed, 26 Aug 2015 06:35:45 GMT"], "cache-control": ["max-age=0, must-revalidate"], "x-xss-protection": ["1; mode=block"], "strict-transport-security": ["max-age=15552000; includeSubDomains; preload"], "x-content-type-options": ["nosniff"], "cf-ray": ["21bd84b49ce71413-LAX"]}, "status": {"code": 200, "message": "OK"}}, "recorded_at": "2015-08-26T06:35:43", "request": {"body": {"string": "grant_type=refresh_token&redirect_uri=https%3A%2F%2F127.0.0.1%3A65010%2Fauthorize_callback&refresh_token=_mmtb8YjDym0eC26G-rTxXUMea0", "encoding": "utf-8"}, "uri": "https://api.reddit.com/api/v1/access_token/", "method": "POST", "headers": {"Authorization": ["Basic c3RKbFVTVWJQUWU1bFE6aVUtTHNPenlKSDdCRFZvcS1xT1dORXEyenVJ"], "Accept": ["*/*"], "Connection": ["keep-alive"], "Cookie": ["__cfduid=dcfacd553cdaabb519e3a258c4c7f962d1440570944"], "User-Agent": ["PRAW_test_suite PRAW/3.2.1 Python/3.4.3 b'Windows-7-6.1.7601-SP1'"], "Content-Type": ["application/x-www-form-urlencoded"], "Content-Length": ["132"], "Accept-Encoding": ["gzip, deflate"]}}}, {"response": {"body": {"encoding": "UTF-8", "base64_string": "H4sIAAAAAAAAA4yOPQvCMBgG/8rLMxdJbVI1m6Pg0EGnEqQfAVM1SpNBDfnvkm7ZXA/uuDbgZuwISTg7PR+N8ygIY+c7SAoYruY+ztpCUhsS1pBUcs74VnBRrVhBsN0jUfT9UyfZLDm/vlSvb60RC8pMsWE7npvNZ98cTtr59FBljXoo3xOiikvm71cVo/oBAAD//wMA8Hm2kdwAAAA="}, "url": "https://oauth.reddit.com/prefs/friends/.json", "headers": {"x-ratelimit-used": ["5"], "server": ["cloudflare-nginx"], "set-cookie": ["__cfduid=dffa64eb3980af2982a62afec11f2fbe11440570945; expires=Thu, 25-Aug-16 06:35:45 GMT; path=/; domain=.reddit.com; HttpOnly"], "transfer-encoding": ["chunked"], "content-type": ["application/json; charset=UTF-8"], "x-moose": ["majestic"], "x-ratelimit-remaining": ["595.0"], "x-ua-compatible": ["IE=edge"], "date": ["Wed, 26 Aug 2015 06:35:45 GMT"], "connection": ["keep-alive"], "cache-control": ["private, s-maxage=0, max-age=0, must-revalidate", "max-age=0, must-revalidate"], "strict-transport-security": ["max-age=15552000; includeSubDomains; preload"], "content-encoding": ["gzip"], "x-ratelimit-reset": ["255"], "x-reddit-tracking": ["https://pixel.redditmedia.com/pixel/of_destiny.png?v=z%2FUU9bl2%2FDxqL2Ui%2BXXj%2Fud5RIcTJqljWl9%2BQCEyV2voMh1ns0FvhGp%2BsSS7bZCNeva0U%2FpmKT6YW6fhEgLpKZe5fztZndwh"], "x-xss-protection": ["1; mode=block"], "x-content-type-options": ["nosniff"], "x-frame-options": ["SAMEORIGIN"], "expires": ["-1"], "cf-ray": ["21bd84b6e38722a0-LAX"]}, "status": {"code": 200, "message": "OK"}}, "recorded_at": "2015-08-26T06:35:43", "request": {"body": {"string": "", "encoding": "utf-8"}, "uri": "https://oauth.reddit.com/prefs/friends/.json", "method": "GET", "headers": {"Authorization": ["bearer 7302867-z6C5iOy2QI-DpU_7GgUDo2-ARHo"], "User-Agent": ["PRAW_test_suite PRAW/3.2.1 Python/3.4.3 b'Windows-7-6.1.7601-SP1'"], "Connection": ["keep-alive"], "Accept": ["*/*"], "Accept-Encoding": ["gzip, deflate"]}}}, {"response": {"body": {"string": "", "encoding": "UTF-8"}, "url": "https://oauth.reddit.com/api/v1/me/friends/PyAPITestUser3.json", "headers": {"x-ratelimit-used": ["6"], "server": ["cloudflare-nginx"], "set-cookie": ["__cfduid=dffa64eb3980af2982a62afec11f2fbe11440570945; expires=Thu, 25-Aug-16 06:35:45 GMT; path=/; domain=.reddit.com; HttpOnly"], "x-frame-options": ["SAMEORIGIN"], "content-type": ["application/json; charset=UTF-8"], "x-moose": ["majestic"], "x-ratelimit-reset": ["255"], "x-ratelimit-remaining": ["594.0"], "x-xss-protection": ["1; mode=block"], "cache-control": ["private, s-maxage=0, max-age=0, must-revalidate", "max-age=0, must-revalidate"], "connection": ["keep-alive"], "date": ["Wed, 26 Aug 2015 06:35:45 GMT"], "strict-transport-security": ["max-age=15552000; includeSubDomains; preload"], "x-content-type-options": ["nosniff"], "content-length": ["0"], "expires": ["-1"], "cf-ray": ["21bd84b9139322a0-LAX"]}, "status": {"code": 204, "message": "No Content"}}, "recorded_at": "2015-08-26T06:35:44", "request": {"body": {"string": "{\"id\": \"PyAPITestUser3\"}", "encoding": "utf-8"}, "uri": "https://oauth.reddit.com/api/v1/me/friends/PyAPITestUser3.json", "method": "DELETE", "headers": {"Authorization": ["bearer 7302867-CpsQPDpJEjNT7b6YzpZW6k6W9uo"], "Accept": ["*/*"], "Connection": ["keep-alive"], "Accept-Encoding": ["gzip, deflate"], "User-Agent": ["PRAW_test_suite PRAW/3.2.1 Python/3.4.3 b'Windows-7-6.1.7601-SP1'"], "Content-Type": ["application/json"], "Content-Length": ["24"]}}}, {"response": {"body": {"encoding": "UTF-8", "base64_string": "H4sIAAAAAAAAA2SPsQqDMBRFfyW8WUqNWKhbx24O7fxIzUt91SSQxNIi/nuJQxFcD+fAvTMM7DQ0ApKEQoBWSUEjZnDKUsbt99JebxTTPVKossIRTWBaK6PGSIWALpBKlElZSVnX8lxXh2MhoGdNaIK3GPzDp7hvcErdvhvZDTioYPOaMtveWnLpz7LEEZ9+3O7giNZvQa8ivimwYdJIVvEIjUhhWuX196krPy9Ylh8AAAD//wMAK4nJkwoBAAA="}, "url": "https://api.reddit.com/user/PyAPITestUser3/about/.json?uniq=1", "headers": {"connection": ["keep-alive"], "server": ["cloudflare-nginx"], "x-frame-options": ["SAMEORIGIN"], "transfer-encoding": ["chunked"], "content-type": ["application/json; charset=UTF-8"], "x-moose": ["majestic"], "x-ua-compatible": ["IE=edge"], "content-encoding": ["gzip"], "cache-control": ["max-age=0, must-revalidate"], "access-control-allow-origin": ["*"], "date": ["Wed, 26 Aug 2015 06:35:46 GMT"], "x-reddit-tracking": ["https://pixel.redditmedia.com/pixel/of_destiny.png?v=DjXAucekmGU%2FnCJKF2pS6zxzUmQhnQfTxCR7K3JVyyBuaTVh9x%2FgM4uRwRfzENLRP0lB30DU0aQ%3D"], "x-xss-protection": ["1; mode=block"], "x-content-type-options": ["nosniff"], "strict-transport-security": ["max-age=15552000; includeSubDomains; preload"], "access-control-expose-headers": ["X-Reddit-Tracking, X-Moose"], "cf-ray": ["21bd84bb5d1a1413-LAX"]}, "status": {"code": 200, "message": "OK"}}, "recorded_at": "2015-08-26T06:35:44", "request": {"body": {"string": "", "encoding": "utf-8"}, "uri": "https://api.reddit.com/user/PyAPITestUser3/about/.json?uniq=1", "method": "GET", "headers": {"User-Agent": ["PRAW_test_suite PRAW/3.2.1 Python/3.4.3 b'Windows-7-6.1.7601-SP1'"], "Accept": ["*/*"], "Connection": ["keep-alive"], "Cookie": ["__cfduid=dffa64eb3980af2982a62afec11f2fbe11440570945"], "Accept-Encoding": ["gzip, deflate"]}}}]}
\ No newline at end of file
diff --git a/tests/helper.py b/tests/helper.py
index 03700625..c6e5ad6f 100644
--- a/tests/helper.py
+++ b/tests/helper.py
@@ -7,6 +7,7 @@ import time
import unittest
from betamax import Betamax, BaseMatcher
from betamax_matchers.form_urlencoded import URLEncodedBodyMatcher
+from betamax_matchers.json_body import JSONBodyMatcher
from functools import wraps
from praw import Reddit
from requests.compat import urljoin
@@ -30,7 +31,8 @@ class BodyMatcher(BaseMatcher):
to_compare = request.copy()
to_compare.body = text_type(to_compare.body)
- return URLEncodedBodyMatcher().match(to_compare, recorded_request)
+ return URLEncodedBodyMatcher().match(to_compare, recorded_request) or \
+ JSONBodyMatcher().match(to_compare, recorded_request)
class PRAWTest(unittest.TestCase):
diff --git a/tests/test_redditor.py b/tests/test_redditor.py
index a7326063..a1452a9d 100644
--- a/tests/test_redditor.py
+++ b/tests/test_redditor.py
@@ -1,9 +1,10 @@
"""Tests for Redditor class."""
from __future__ import print_function, unicode_literals
+from praw import errors
from praw.objects import LoggedInRedditor
from six import text_type
-from .helper import PRAWTest, betamax
+from .helper import OAuthPRAWTest, PRAWTest, betamax
class RedditorTest(PRAWTest):
@@ -13,16 +14,16 @@ class RedditorTest(PRAWTest):
@betamax()
def test_add_remove_friends(self):
- friends = self.r.user.get_friends()
+ friends = self.r.get_friends()
redditor = friends[0] if friends else self.other_user
redditor.unfriend()
self.delay_for_listing_update()
- self.assertTrue(redditor not in self.r.user.get_friends(u=1))
+ self.assertTrue(redditor not in self.r.get_friends(u=1))
redditor.friend()
self.delay_for_listing_update()
- self.assertTrue(redditor in self.r.user.get_friends(u=2))
+ self.assertTrue(redditor in self.r.get_friends(u=2))
@betamax()
def test_duplicate_login(self):
@@ -94,3 +95,28 @@ class RedditorTest(PRAWTest):
@betamax()
def test_user_set_on_login(self):
self.assertTrue(isinstance(self.r.user, LoggedInRedditor))
+
+
+class OAuthRedditorTest(OAuthPRAWTest):
+ @betamax()
+ def test_friends_oauth(self):
+ self.r.refresh_access_information(self.refresh_token['subscribe'])
+ user = self.r.get_redditor(self.other_user_name)
+
+ # Only Gold users can include personal notes
+ self.assertRaises(errors.HTTPException, user.friend, 'note')
+
+ friendship = user.friend()
+ self.assertEqual(friendship['id'], user.fullname)
+
+ self.r.refresh_access_information(self.refresh_token['mysubreddits'])
+ friendship2 = user.get_friend_info()
+ self.assertEqual(friendship, friendship2)
+
+ self.r.refresh_access_information(self.refresh_token['read'])
+ friends = list(self.r.get_friends())
+ self.assertTrue(user in friends)
+
+ self.r.refresh_access_information(self.refresh_token['subscribe'])
+ user.unfriend()
+ self.assertFalse(user.refresh().is_friend)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 4
} | 3.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"coveralls",
"flake8",
"pep257",
"betamax_matchers",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | betamax==0.9.0
betamax-matchers==0.4.0
certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
coveralls==4.0.1
decorator==5.2.1
docopt==0.6.2
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
flake8==7.2.0
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mccabe==0.7.0
packaging @ file:///croot/packaging_1734472117206/work
pep257==0.7.0
pluggy @ file:///croot/pluggy_1733169602837/work
-e git+https://github.com/praw-dev/praw.git@fea4a6df0508c93e200ef9fcacecf2b423f2aa27#egg=praw
pycodestyle==2.13.0
pyflakes==3.3.1
pytest @ file:///croot/pytest_1738938843180/work
requests==2.32.3
requests-toolbelt==1.0.0
six==1.17.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
update-checker==0.18.0
urllib3==2.3.0
| name: praw
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- betamax==0.9.0
- betamax-matchers==0.4.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- coveralls==4.0.1
- decorator==5.2.1
- docopt==0.6.2
- flake8==7.2.0
- idna==3.10
- mccabe==0.7.0
- pep257==0.7.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- requests==2.32.3
- requests-toolbelt==1.0.0
- six==1.17.0
- update-checker==0.18.0
- urllib3==2.3.0
prefix: /opt/conda/envs/praw
| [
"tests/test_redditor.py::RedditorTest::test_add_remove_friends"
]
| [
"tests/test_redditor.py::OAuthRedditorTest::test_friends_oauth"
]
| [
"tests/test_redditor.py::RedditorTest::test_duplicate_login",
"tests/test_redditor.py::RedditorTest::test_get_hidden",
"tests/test_redditor.py::RedditorTest::test_get_redditor",
"tests/test_redditor.py::RedditorTest::test_get_submitted",
"tests/test_redditor.py::RedditorTest::test_get_upvoted_and_downvoted",
"tests/test_redditor.py::RedditorTest::test_name_lazy_update",
"tests/test_redditor.py::RedditorTest::test_redditor_comparison",
"tests/test_redditor.py::RedditorTest::test_user_set_on_login"
]
| []
| BSD 2-Clause "Simplified" License | 222 | [
"praw/internal.py",
"praw/objects.py",
"CHANGES.rst",
"praw/__init__.py"
]
| [
"praw/internal.py",
"praw/objects.py",
"CHANGES.rst",
"praw/__init__.py"
]
|
|
kevin1024__vcrpy-201 | d620095c3667cc7f9fd7ea1213948dd58db6aefb | 2015-08-28 11:29:16 | 31c358c0350fdb56603fe4b96df2be05aa2c6838 | diff --git a/README.rst b/README.rst
index 23910bb..768e850 100644
--- a/README.rst
+++ b/README.rst
@@ -1,16 +1,14 @@
-|Build Status| |Stories in Ready| |Gitter|
-
VCR.py
======
-.. figure:: https://raw.github.com/kevin1024/vcrpy/master/vcr.png
+.. image:: vcr.png
:alt: vcr.py
- vcr.py
-
This is a Python version of `Ruby's VCR
library <https://github.com/vcr/vcr>`__.
+|Build Status| |Stories in Ready| |Gitter Chat|
+
What it does
------------
@@ -758,6 +756,6 @@ more details
:target: http://travis-ci.org/kevin1024/vcrpy
.. |Stories in Ready| image:: https://badge.waffle.io/kevin1024/vcrpy.png?label=ready&title=Ready
:target: https://waffle.io/kevin1024/vcrpy
-.. |Gitter| image:: https://badges.gitter.im/Join%20Chat.svg
+.. |Gitter Chat| image:: https://badges.gitter.im/Join%20Chat.svg
:alt: Join the chat at https://gitter.im/kevin1024/vcrpy
:target: https://gitter.im/kevin1024/vcrpy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
diff --git a/vcr/cassette.py b/vcr/cassette.py
index bc7410f..6dc6c40 100644
--- a/vcr/cassette.py
+++ b/vcr/cassette.py
@@ -75,7 +75,7 @@ class CassetteContextDecorator(object):
lambda key, _: key in self._non_cassette_arguments,
self._args_getter()
)
- if 'path_transformer' in other_kwargs:
+ if other_kwargs.get('path_transformer'):
transformer = other_kwargs['path_transformer']
cassette_kwargs['path'] = transformer(cassette_kwargs['path'])
self.__finish = self._patch_generator(self.cls.load(**cassette_kwargs))
diff --git a/vcr/config.py b/vcr/config.py
index 455d074..99e4cf7 100644
--- a/vcr/config.py
+++ b/vcr/config.py
@@ -2,25 +2,19 @@ import copy
import functools
import inspect
import os
-import types
import six
from .compat import collections
from .cassette import Cassette
from .serializers import yamlserializer, jsonserializer
-from .util import compose, auto_decorate
+from .util import compose
from . import matchers
from . import filters
class VCR(object):
- @staticmethod
- def is_test_method(method_name, function):
- return method_name.startswith('test') and \
- isinstance(function, types.FunctionType)
-
@staticmethod
def ensure_suffix(suffix):
def ensure(path):
@@ -29,7 +23,7 @@ class VCR(object):
return path
return ensure
- def __init__(self, path_transformer=lambda x: x, before_record_request=None,
+ def __init__(self, path_transformer=None, before_record_request=None,
custom_patches=(), filter_query_parameters=(), ignore_hosts=(),
record_mode="once", ignore_localhost=False, filter_headers=(),
before_record_response=None, filter_post_data_parameters=(),
@@ -114,7 +108,7 @@ class VCR(object):
matcher_names = kwargs.get('match_on', self.match_on)
path_transformer = kwargs.get(
'path_transformer',
- self.path_transformer or self.ensure_suffix('.yaml')
+ self.path_transformer
)
func_path_generator = kwargs.get(
'func_path_generator',
@@ -208,7 +202,7 @@ class VCR(object):
if filter_query_parameters:
filter_functions.append(functools.partial(
filters.remove_query_parameters,
- query_parameters_to_remove=filter_query_parameters
+ query_parameters_to_remove=filter_query_parameters
))
if filter_post_data_parameters:
filter_functions.append(
@@ -256,7 +250,3 @@ class VCR(object):
def register_matcher(self, name, matcher):
self.matchers[name] = matcher
-
- def test_case(self, predicate=None):
- predicate = predicate or self.is_test_method
- return six.with_metaclass(auto_decorate(self.use_cassette, predicate))
diff --git a/vcr/util.py b/vcr/util.py
index dd66320..b36ff75 100644
--- a/vcr/util.py
+++ b/vcr/util.py
@@ -1,6 +1,4 @@
import collections
-import types
-
# Shamelessly stolen from https://github.com/kennethreitz/requests/blob/master/requests/structures.py
class CaseInsensitiveDict(collections.MutableMapping):
@@ -83,8 +81,9 @@ def partition_dict(predicate, dictionary):
def compose(*functions):
def composed(incoming):
res = incoming
- for function in functions[::-1]:
- res = function(res)
+ for function in reversed(functions):
+ if function:
+ res = function(res)
return res
return composed
@@ -92,30 +91,3 @@ def read_body(request):
if hasattr(request.body, 'read'):
return request.body.read()
return request.body
-
-
-def auto_decorate(
- decorator,
- predicate=lambda name, value: isinstance(value, types.FunctionType)
-):
- def maybe_decorate(attribute, value):
- if predicate(attribute, value):
- value = decorator(value)
- return value
-
- class DecorateAll(type):
-
- def __setattr__(cls, attribute, value):
- return super(DecorateAll, cls).__setattr__(
- attribute, maybe_decorate(attribute, value)
- )
-
- def __new__(cls, name, bases, attributes_dict):
- new_attributes_dict = dict(
- (attribute, maybe_decorate(attribute, value))
- for attribute, value in attributes_dict.items()
- )
- return super(DecorateAll, cls).__new__(
- cls, name, bases, new_attributes_dict
- )
- return DecorateAll
| path_transformer doesn't default properly
In the code, we have
```python
class VCR(object):
def __init__(self, path_transformer=lambda x: x, before_record_request=None,
```
and then later
```python
def get_merged_config(self, **kwargs):
path_transformer = kwargs.get(
'path_transformer',
self.path_transformer or self.ensure_suffix('.yaml')
)
```
and then later
```python
if 'path_transformer' in other_kwargs:
transformer = other_kwargs['path_transformer']
cassette_kwargs['path'] = transformer(cassette_kwargs['path'])
```
So there are two problems with this code:
1. The default `ensure_suffix` doesn't fire because the parameter default is already the identity function which is truthy. The only way this fires is if the user specifies `path_transformer=None` which is surely not the intent.
2. If the context manager is called with `path_transformer=None` then it will blow up. | kevin1024/vcrpy | diff --git a/tests/unit/test_cassettes.py b/tests/unit/test_cassettes.py
index e1e9fb9..413b154 100644
--- a/tests/unit/test_cassettes.py
+++ b/tests/unit/test_cassettes.py
@@ -245,6 +245,13 @@ def test_path_transformer_with_context_manager():
assert cassette._path == 'a'
+def test_path_transformer_None():
+ with Cassette.use(
+ path='a', path_transformer=None,
+ ) as cassette:
+ assert cassette._path == 'a'
+
+
def test_func_path_generator():
def generator(function):
return os.path.join(os.path.dirname(inspect.getfile(function)),
diff --git a/tests/unit/test_vcr.py b/tests/unit/test_vcr.py
index 7150f15..6cbf21b 100644
--- a/tests/unit/test_vcr.py
+++ b/tests/unit/test_vcr.py
@@ -1,13 +1,11 @@
import os
import pytest
-from six.moves import http_client as httplib
from vcr import VCR, use_cassette
from vcr.compat import mock
from vcr.request import Request
from vcr.stubs import VCRHTTPSConnection
-from vcr.patch import _HTTPConnection, force_reset
def test_vcr_use_cassette():
@@ -100,6 +98,28 @@ def test_vcr_before_record_response_iterable():
assert mock_filter.call_count == 1
+def test_vcr_path_transformer():
+ # Regression test for #199
+
+ # Prevent actually saving the cassette
+ with mock.patch('vcr.cassette.save_cassette'):
+
+ # Baseline: path should be unchanged
+ vcr = VCR()
+ with vcr.use_cassette('test') as cassette:
+ assert cassette._path == 'test'
+
+ # Regression test: path_transformer=None should do the same.
+ vcr = VCR(path_transformer=None)
+ with vcr.use_cassette('test') as cassette:
+ assert cassette._path == 'test'
+
+ # and it should still work with cassette_library_dir
+ vcr = VCR(cassette_library_dir='/foo')
+ with vcr.use_cassette('test') as cassette:
+ assert cassette._path == '/foo/test'
+
+
@pytest.fixture
def random_fixture():
return 1
@@ -245,7 +265,6 @@ def test_path_transformer():
def test_cassette_name_generator_defaults_to_using_module_function_defined_in():
vcr = VCR(inject_cassette=True)
-
@vcr.use_cassette
def function_name(cassette):
assert cassette._path == os.path.join(os.path.dirname(__file__),
@@ -277,29 +296,3 @@ def test_additional_matchers():
function_defaults()
function_additional()
-
-
-class TestVCRClass(VCR().test_case()):
-
- def no_decoration(self):
- assert httplib.HTTPConnection == _HTTPConnection
- self.test_dynamically_added()
- assert httplib.HTTPConnection == _HTTPConnection
-
- def test_one(self):
- with force_reset():
- self.no_decoration()
- with force_reset():
- self.test_two()
- assert httplib.HTTPConnection != _HTTPConnection
-
- def test_two(self):
- assert httplib.HTTPConnection != _HTTPConnection
-
-
-def test_dynamically_added(self):
- assert httplib.HTTPConnection != _HTTPConnection
-
-
-TestVCRClass.test_dynamically_added = test_dynamically_added
-del test_dynamically_added
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 4
} | 1.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-localserver",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi @ file:///croot/certifi_1671487769961/work/certifi
exceptiongroup==1.2.2
importlib-metadata==6.7.0
iniconfig==2.0.0
MarkupSafe==2.1.5
mock==5.2.0
packaging==24.0
pluggy==1.2.0
pytest==7.4.4
pytest-localserver==0.9.0.post0
PyYAML==6.0.1
six==1.17.0
tomli==2.0.1
typing_extensions==4.7.1
-e git+https://github.com/kevin1024/vcrpy.git@d620095c3667cc7f9fd7ea1213948dd58db6aefb#egg=vcrpy
Werkzeug==2.2.3
wrapt==1.16.0
zipp==3.15.0
| name: vcrpy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- importlib-metadata==6.7.0
- iniconfig==2.0.0
- markupsafe==2.1.5
- mock==5.2.0
- packaging==24.0
- pluggy==1.2.0
- pytest==7.4.4
- pytest-localserver==0.9.0.post0
- pyyaml==6.0.1
- six==1.17.0
- tomli==2.0.1
- typing-extensions==4.7.1
- werkzeug==2.2.3
- wrapt==1.16.0
- zipp==3.15.0
prefix: /opt/conda/envs/vcrpy
| [
"tests/unit/test_cassettes.py::test_path_transformer_None",
"tests/unit/test_vcr.py::test_vcr_path_transformer"
]
| [
"tests/unit/test_cassettes.py::test_use_as_decorator_on_coroutine",
"tests/unit/test_cassettes.py::test_use_as_decorator_on_generator"
]
| [
"tests/unit/test_cassettes.py::test_cassette_load",
"tests/unit/test_cassettes.py::test_cassette_not_played",
"tests/unit/test_cassettes.py::test_cassette_append",
"tests/unit/test_cassettes.py::test_cassette_len",
"tests/unit/test_cassettes.py::test_cassette_contains",
"tests/unit/test_cassettes.py::test_cassette_responses_of",
"tests/unit/test_cassettes.py::test_cassette_get_missing_response",
"tests/unit/test_cassettes.py::test_cassette_cant_read_same_request_twice",
"tests/unit/test_cassettes.py::test_function_decorated_with_use_cassette_can_be_invoked_multiple_times",
"tests/unit/test_cassettes.py::test_arg_getter_functionality",
"tests/unit/test_cassettes.py::test_cassette_not_all_played",
"tests/unit/test_cassettes.py::test_cassette_all_played",
"tests/unit/test_cassettes.py::test_before_record_response",
"tests/unit/test_cassettes.py::test_nesting_cassette_context_managers",
"tests/unit/test_cassettes.py::test_nesting_context_managers_by_checking_references_of_http_connection",
"tests/unit/test_cassettes.py::test_custom_patchers",
"tests/unit/test_cassettes.py::test_decorated_functions_are_reentrant",
"tests/unit/test_cassettes.py::test_cassette_use_called_without_path_uses_function_to_generate_path",
"tests/unit/test_cassettes.py::test_path_transformer_with_function_path",
"tests/unit/test_cassettes.py::test_path_transformer_with_context_manager",
"tests/unit/test_cassettes.py::test_func_path_generator",
"tests/unit/test_vcr.py::test_vcr_use_cassette",
"tests/unit/test_vcr.py::test_vcr_before_record_request_params",
"tests/unit/test_vcr.py::test_vcr_before_record_response_iterable",
"tests/unit/test_vcr.py::test_fixtures_with_use_cassette",
"tests/unit/test_vcr.py::test_custom_patchers",
"tests/unit/test_vcr.py::test_inject_cassette",
"tests/unit/test_vcr.py::test_with_current_defaults",
"tests/unit/test_vcr.py::test_cassette_library_dir_with_decoration_and_no_explicit_path",
"tests/unit/test_vcr.py::test_cassette_library_dir_with_decoration_and_explicit_path",
"tests/unit/test_vcr.py::test_cassette_library_dir_with_decoration_and_super_explicit_path",
"tests/unit/test_vcr.py::test_cassette_library_dir_with_path_transformer",
"tests/unit/test_vcr.py::test_use_cassette_with_no_extra_invocation",
"tests/unit/test_vcr.py::test_path_transformer",
"tests/unit/test_vcr.py::test_cassette_name_generator_defaults_to_using_module_function_defined_in",
"tests/unit/test_vcr.py::test_ensure_suffix",
"tests/unit/test_vcr.py::test_additional_matchers"
]
| []
| MIT License | 225 | [
"README.rst",
"vcr/cassette.py",
"vcr/util.py",
"vcr/config.py"
]
| [
"README.rst",
"vcr/cassette.py",
"vcr/util.py",
"vcr/config.py"
]
|
|
paver__paver-152 | e5e75eefb3ccbef2b112419740661d59017d043d | 2015-08-28 15:37:52 | e5e75eefb3ccbef2b112419740661d59017d043d | diff --git a/paver/tasks.py b/paver/tasks.py
index 2eb1c4a..3498321 100644
--- a/paver/tasks.py
+++ b/paver/tasks.py
@@ -147,11 +147,11 @@ class Environment(object):
for option in options:
task._set_value_to_task(task_name, option, None, options[option])
- if args and task.consume_args > 0:
+ if args is not None and task.consume_args > 0:
args = _consume_nargs(task, args)
elif args and (task.consume_args == 0):
raise BuildFailure("Task %s is not decorated with @consume_(n)args,"
- "but has been called with them")
+ "but has been called with them" % task)
task()
def _run_task(self, task_name, needs, func):
| When a empty args list is given to call_task, the args of the previous task are used.
I have a task that calls another task. The first one give 0 to n arguments to the the second one. The first needs a third task that is calling a fourth one with some arguments. When the first one gives at least 1 argument, the behavior is correct. But when the first one uses an empty list of arguments, the arguments of the call of the third to the fourth are reused.
```
from paver.easy import *
@task
@needs('specific_dep')
@consume_args
def my_task(args):
call_task('the_other_task', args=args)
@task
@consume_args
def the_other_task(args):
info('Doing the thing with the args: %s', args)
@task
def specific_dep():
call_task('generic_dep', args=['specific'])
@task
@consume_args
def generic_dep(args):
name, = args
info('Loading the dependency %s', name)
```
```
% ./venv/bin/paver my_task args
---> pavement.my_task
---> pavement.specific_dep
---> pavement.generic_dep
Loading the dependency specific
---> pavement.the_other_task
Doing the thing with the args: ['args']
% ./venv/bin/paver my_task
---> pavement.my_task
---> pavement.specific_dep
---> pavement.generic_dep
Loading the dependency specific
---> pavement.the_other_task
Doing the thing with the args: ['specific'] ### Should be Doing the thing with the args: []
``` | paver/paver | diff --git a/paver/tests/test_tasks.py b/paver/tests/test_tasks.py
index b8a419c..62419d7 100644
--- a/paver/tests/test_tasks.py
+++ b/paver/tests/test_tasks.py
@@ -847,6 +847,25 @@ def test_calling_task_with_arguments():
tasks._process_commands(['t1'])
+def test_calling_task_with_empty_arguments():
+ @tasks.task
+ def t1():
+ env.call_task('t3', args=['argument1'])
+ env.call_task('t2', args=[])
+
+ @tasks.task
+ @tasks.consume_args
+ def t2(args):
+ assert args == []
+
+ @tasks.task
+ @tasks.consume_args
+ def t3(args):
+ assert args == ['argument1']
+
+ env = _set_environment(t1=t1, t2=t2, t3=t3)
+ tasks._process_commands(['t1'])
+
def test_calling_nonconsuming_task_with_arguments():
@tasks.task
def t2():
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"numpy>=1.16.0",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
numpy==2.0.2
packaging @ file:///croot/packaging_1734472117206/work
-e git+https://github.com/paver/paver.git@e5e75eefb3ccbef2b112419740661d59017d043d#egg=Paver
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: paver
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- numpy==2.0.2
prefix: /opt/conda/envs/paver
| [
"paver/tests/test_tasks.py::test_calling_task_with_empty_arguments"
]
| []
| [
"paver/tests/test_tasks.py::test_basic_dependencies",
"paver/tests/test_tasks.py::test_longname_resolution_in_dependencies",
"paver/tests/test_tasks.py::test_chained_dependencies",
"paver/tests/test_tasks.py::test_backwards_compatible_needs",
"paver/tests/test_tasks.py::test_tasks_dont_repeat",
"paver/tests/test_tasks.py::test_basic_command_line",
"paver/tests/test_tasks.py::test_list_tasks",
"paver/tests/test_tasks.py::test_environment_insertion",
"paver/tests/test_tasks.py::test_add_options_to_environment",
"paver/tests/test_tasks.py::test_shortname_access",
"paver/tests/test_tasks.py::test_longname_access",
"paver/tests/test_tasks.py::test_task_command_line_options",
"paver/tests/test_tasks.py::test_setting_of_options_with_equals",
"paver/tests/test_tasks.py::test_options_inherited_via_needs",
"paver/tests/test_tasks.py::test_options_inherited_via_needs_even_from_grandparents",
"paver/tests/test_tasks.py::test_options_shouldnt_overlap",
"paver/tests/test_tasks.py::test_options_shouldnt_overlap_when_bad_task_specified",
"paver/tests/test_tasks.py::test_options_may_overlap_if_explicitly_allowed",
"paver/tests/test_tasks.py::test_exactly_same_parameters_must_be_specified_in_order_to_allow_sharing",
"paver/tests/test_tasks.py::test_dest_parameter_should_map_opt_to_property",
"paver/tests/test_tasks.py::test_dotted_options",
"paver/tests/test_tasks.py::test_dry_run",
"paver/tests/test_tasks.py::test_consume_args",
"paver/tests/test_tasks.py::test_consume_nargs",
"paver/tests/test_tasks.py::test_consume_nargs_and_options",
"paver/tests/test_tasks.py::test_optional_args_in_tasks",
"paver/tests/test_tasks.py::test_debug_logging",
"paver/tests/test_tasks.py::test_base_logging",
"paver/tests/test_tasks.py::test_error_show_up_no_matter_what",
"paver/tests/test_tasks.py::test_all_messages_for_a_task_are_captured",
"paver/tests/test_tasks.py::test_messages_with_formatting_and_no_args_still_work",
"paver/tests/test_tasks.py::test_alternate_pavement_option",
"paver/tests/test_tasks.py::test_captured_output_shows_up_on_exception",
"paver/tests/test_tasks.py::test_calling_subpavement",
"paver/tests/test_tasks.py::test_task_finders",
"paver/tests/test_tasks.py::test_calling_a_function_rather_than_task",
"paver/tests/test_tasks.py::test_depending_on_a_function_rather_than_task",
"paver/tests/test_tasks.py::test_description_retrieval_trial",
"paver/tests/test_tasks.py::test_description_empty_without_docstring",
"paver/tests/test_tasks.py::test_description_retrieval_first_sentence",
"paver/tests/test_tasks.py::test_description_retrieval_first_sentence_even_with_version_numbers",
"paver/tests/test_tasks.py::test_auto_task_is_not_run_with_noauto",
"paver/tests/test_tasks.py::test_auto_task_is_run_when_present",
"paver/tests/test_tasks.py::test_task_can_be_called_repeatedly",
"paver/tests/test_tasks.py::test_options_passed_to_task",
"paver/tests/test_tasks.py::test_calling_task_with_option_arguments",
"paver/tests/test_tasks.py::test_calling_task_with_arguments_do_not_overwrite_it_for_other_tasks",
"paver/tests/test_tasks.py::test_options_might_be_provided_if_task_might_be_called",
"paver/tests/test_tasks.py::test_calling_task_with_arguments",
"paver/tests/test_tasks.py::test_calling_nonconsuming_task_with_arguments",
"paver/tests/test_tasks.py::test_options_may_overlap_between_multiple_tasks_even_when_specified_in_reverse_order",
"paver/tests/test_tasks.py::test_options_might_be_shared_both_way"
]
| []
| BSD License | 226 | [
"paver/tasks.py"
]
| [
"paver/tasks.py"
]
|
|
docker__docker-py-752 | 33acb9d2e05d0f3abb7897abbe50dd54600da85b | 2015-08-31 22:02:13 | 33acb9d2e05d0f3abb7897abbe50dd54600da85b | diff --git a/docker/client.py b/docker/client.py
index b1f72e97..88bc50de 100644
--- a/docker/client.py
+++ b/docker/client.py
@@ -41,7 +41,7 @@ class Client(clientbase.ClientBase):
'stderr': stderr and 1 or 0,
'stream': stream and 1 or 0,
}
- u = self._url("/containers/{0}/attach".format(container))
+ u = self._url("/containers/{0}/attach", container)
response = self._post(u, params=params, stream=stream)
return self._get_result(container, stream, response)
@@ -58,7 +58,7 @@ class Client(clientbase.ClientBase):
if ws:
return self._attach_websocket(container, params)
- u = self._url("/containers/{0}/attach".format(container))
+ u = self._url("/containers/{0}/attach", container)
return self._get_raw_response_socket(self.post(
u, None, params=self._attach_params(params), stream=True))
@@ -275,8 +275,9 @@ class Client(clientbase.ClientBase):
@check_resource
def diff(self, container):
- return self._result(self._get(self._url("/containers/{0}/changes".
- format(container))), True)
+ return self._result(
+ self._get(self._url("/containers/{0}/changes", container)), True
+ )
def events(self, since=None, until=None, filters=None, decode=None):
if isinstance(since, datetime):
@@ -326,7 +327,7 @@ class Client(clientbase.ClientBase):
'Cmd': cmd
}
- url = self._url('/containers/{0}/exec'.format(container))
+ url = self._url('/containers/{0}/exec', container)
res = self._post_json(url, data=data)
return self._result(res, True)
@@ -337,7 +338,7 @@ class Client(clientbase.ClientBase):
)
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
- res = self._get(self._url("/exec/{0}/json".format(exec_id)))
+ res = self._get(self._url("/exec/{0}/json", exec_id))
return self._result(res, True)
def exec_resize(self, exec_id, height=None, width=None):
@@ -347,7 +348,7 @@ class Client(clientbase.ClientBase):
exec_id = exec_id.get('Id')
params = {'h': height, 'w': width}
- url = self._url("/exec/{0}/resize".format(exec_id))
+ url = self._url("/exec/{0}/resize", exec_id)
res = self._post(url, params=params)
self._raise_for_status(res)
@@ -362,27 +363,28 @@ class Client(clientbase.ClientBase):
'Detach': detach
}
- res = self._post_json(self._url('/exec/{0}/start'.format(exec_id)),
- data=data, stream=stream)
+ res = self._post_json(
+ self._url('/exec/{0}/start', exec_id), data=data, stream=stream
+ )
return self._get_result_tty(stream, res, tty)
@check_resource
def export(self, container):
- res = self._get(self._url("/containers/{0}/export".format(container)),
- stream=True)
+ res = self._get(
+ self._url("/containers/{0}/export", container), stream=True
+ )
self._raise_for_status(res)
return res.raw
@check_resource
def get_image(self, image):
- res = self._get(self._url("/images/{0}/get".format(image)),
- stream=True)
+ res = self._get(self._url("/images/{0}/get", image), stream=True)
self._raise_for_status(res)
return res.raw
@check_resource
def history(self, image):
- res = self._get(self._url("/images/{0}/history".format(image)))
+ res = self._get(self._url("/images/{0}/history", image))
return self._result(res, True)
def images(self, name=None, quiet=False, all=False, viz=False,
@@ -496,7 +498,7 @@ class Client(clientbase.ClientBase):
raise errors.DeprecatedMethod(
'insert is not available for API version >=1.12'
)
- api_url = self._url("/images/{0}/insert".format(image))
+ api_url = self._url("/images/{0}/insert", image)
params = {
'url': url,
'path': path
@@ -506,21 +508,18 @@ class Client(clientbase.ClientBase):
@check_resource
def inspect_container(self, container):
return self._result(
- self._get(self._url("/containers/{0}/json".format(container))),
- True)
+ self._get(self._url("/containers/{0}/json", container)), True
+ )
@check_resource
def inspect_image(self, image):
return self._result(
- self._get(
- self._url("/images/{0}/json".format(image.replace('/', '%2F')))
- ),
- True
+ self._get(self._url("/images/{0}/json", image)), True
)
@check_resource
def kill(self, container, signal=None):
- url = self._url("/containers/{0}/kill".format(container))
+ url = self._url("/containers/{0}/kill", container)
params = {}
if signal is not None:
params['signal'] = signal
@@ -583,7 +582,7 @@ class Client(clientbase.ClientBase):
if tail != 'all' and (not isinstance(tail, int) or tail <= 0):
tail = 'all'
params['tail'] = tail
- url = self._url("/containers/{0}/logs".format(container))
+ url = self._url("/containers/{0}/logs", container)
res = self._get(url, params=params, stream=stream)
return self._get_result(container, stream, res)
return self.attach(
@@ -596,7 +595,7 @@ class Client(clientbase.ClientBase):
@check_resource
def pause(self, container):
- url = self._url('/containers/{0}/pause'.format(container))
+ url = self._url('/containers/{0}/pause', container)
res = self._post(url)
self._raise_for_status(res)
@@ -605,7 +604,7 @@ class Client(clientbase.ClientBase):
@check_resource
def port(self, container, private_port):
- res = self._get(self._url("/containers/{0}/json".format(container)))
+ res = self._get(self._url("/containers/{0}/json", container))
self._raise_for_status(res)
json_ = res.json()
s_port = str(private_port)
@@ -692,7 +691,7 @@ class Client(clientbase.ClientBase):
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
- u = self._url("/images/{0}/push".format(repository))
+ u = self._url("/images/{0}/push", repository)
params = {
'tag': tag
}
@@ -725,14 +724,15 @@ class Client(clientbase.ClientBase):
@check_resource
def remove_container(self, container, v=False, link=False, force=False):
params = {'v': v, 'link': link, 'force': force}
- res = self._delete(self._url("/containers/" + container),
- params=params)
+ res = self._delete(
+ self._url("/containers/{0}", container), params=params
+ )
self._raise_for_status(res)
@check_resource
def remove_image(self, image, force=False, noprune=False):
params = {'force': force, 'noprune': noprune}
- res = self._delete(self._url("/images/" + image), params=params)
+ res = self._delete(self._url("/images/{0}", image), params=params)
self._raise_for_status(res)
@check_resource
@@ -741,7 +741,7 @@ class Client(clientbase.ClientBase):
raise errors.InvalidVersion(
'rename was only introduced in API version 1.17'
)
- url = self._url("/containers/{0}/rename".format(container))
+ url = self._url("/containers/{0}/rename", container)
params = {'name': name}
res = self._post(url, params=params)
self._raise_for_status(res)
@@ -749,21 +749,22 @@ class Client(clientbase.ClientBase):
@check_resource
def resize(self, container, height, width):
params = {'h': height, 'w': width}
- url = self._url("/containers/{0}/resize".format(container))
+ url = self._url("/containers/{0}/resize", container)
res = self._post(url, params=params)
self._raise_for_status(res)
@check_resource
def restart(self, container, timeout=10):
params = {'t': timeout}
- url = self._url("/containers/{0}/restart".format(container))
+ url = self._url("/containers/{0}/restart", container)
res = self._post(url, params=params)
self._raise_for_status(res)
def search(self, term):
- return self._result(self._get(self._url("/images/search"),
- params={'term': term}),
- True)
+ return self._result(
+ self._get(self._url("/images/search"), params={'term': term}),
+ True
+ )
@check_resource
def start(self, container, binds=None, port_bindings=None, lxc_conf=None,
@@ -829,7 +830,7 @@ class Client(clientbase.ClientBase):
)
start_config = self.create_host_config(**start_config_kwargs)
- url = self._url("/containers/{0}/start".format(container))
+ url = self._url("/containers/{0}/start", container)
res = self._post_json(url, data=start_config)
self._raise_for_status(res)
@@ -839,13 +840,13 @@ class Client(clientbase.ClientBase):
raise errors.InvalidVersion(
'Stats retrieval is not supported in API < 1.17!')
- url = self._url("/containers/{0}/stats".format(container))
+ url = self._url("/containers/{0}/stats", container)
return self._stream_helper(self._get(url, stream=True), decode=decode)
@check_resource
def stop(self, container, timeout=10):
params = {'t': timeout}
- url = self._url("/containers/{0}/stop".format(container))
+ url = self._url("/containers/{0}/stop", container)
res = self._post(url, params=params,
timeout=(timeout + (self.timeout or 0)))
@@ -858,14 +859,14 @@ class Client(clientbase.ClientBase):
'repo': repository,
'force': 1 if force else 0
}
- url = self._url("/images/{0}/tag".format(image))
+ url = self._url("/images/{0}/tag", image)
res = self._post(url, params=params)
self._raise_for_status(res)
return res.status_code == 201
@check_resource
def top(self, container):
- u = self._url("/containers/{0}/top".format(container))
+ u = self._url("/containers/{0}/top", container)
return self._result(self._get(u), True)
def version(self, api_version=True):
@@ -874,13 +875,13 @@ class Client(clientbase.ClientBase):
@check_resource
def unpause(self, container):
- url = self._url('/containers/{0}/unpause'.format(container))
+ url = self._url('/containers/{0}/unpause', container)
res = self._post(url)
self._raise_for_status(res)
@check_resource
def wait(self, container, timeout=None):
- url = self._url("/containers/{0}/wait".format(container))
+ url = self._url("/containers/{0}/wait", container)
res = self._post(url, timeout=timeout)
self._raise_for_status(res)
json_ = res.json()
diff --git a/docker/clientbase.py b/docker/clientbase.py
index ce52ffa7..90dba63d 100644
--- a/docker/clientbase.py
+++ b/docker/clientbase.py
@@ -88,11 +88,21 @@ class ClientBase(requests.Session):
def _delete(self, url, **kwargs):
return self.delete(url, **self._set_request_timeout(kwargs))
- def _url(self, path, versioned_api=True):
+ def _url(self, pathfmt, resource_id=None, versioned_api=True):
+ if resource_id and not isinstance(resource_id, six.string_types):
+ raise ValueError(
+ 'Expected a resource ID string but found {0} ({1}) '
+ 'instead'.format(resource_id, type(resource_id))
+ )
+ elif resource_id:
+ resource_id = six.moves.urllib.parse.quote_plus(resource_id)
+
if versioned_api:
- return '{0}/v{1}{2}'.format(self.base_url, self._version, path)
+ return '{0}/v{1}{2}'.format(
+ self.base_url, self._version, pathfmt.format(resource_id)
+ )
else:
- return '{0}{1}'.format(self.base_url, path)
+ return '{0}{1}'.format(self.base_url, pathfmt.format(resource_id))
def _raise_for_status(self, response, explanation=None):
"""Raises stored :class:`APIError`, if one occurred."""
@@ -136,7 +146,7 @@ class ClientBase(requests.Session):
@check_resource
def _attach_websocket(self, container, params=None):
- url = self._url("/containers/{0}/attach/ws".format(container))
+ url = self._url("/containers/{0}/attach/ws", container)
req = requests.Request("POST", url, params=self._attach_params(params))
full_url = req.prepare().url
full_url = full_url.replace("http://", "ws://", 1)
| Hardening for URL request construction
The `Client` class performs a lot URL construction like this:
res = self._get(self._url("/exec/{0}/json".format(exec_id)))
This can be used to manipulate the query string if `exec_id` contains characters like `'?'` and `'/'`, which can lead to vulnerabilities elsewhere. The client code should be fixed to reject such invalid parameters.
Downstream bug report: https://bugzilla.redhat.com/show_bug.cgi?id=1248031 | docker/docker-py | diff --git a/tests/test.py b/tests/test.py
index 1bf8c55d..9cf94a18 100644
--- a/tests/test.py
+++ b/tests/test.py
@@ -144,6 +144,28 @@ class DockerClientTest(Cleanup, base.BaseTestCase):
'Version parameter must be a string or None. Found float'
)
+ def test_url_valid_resource(self):
+ url = self.client._url('/hello/{0}/world', 'somename')
+ self.assertEqual(
+ url, '{0}{1}'.format(url_prefix, 'hello/somename/world')
+ )
+
+ url = self.client._url('/hello/{0}/world', '/some?name')
+ self.assertEqual(
+ url, '{0}{1}'.format(url_prefix, 'hello/%2Fsome%3Fname/world')
+ )
+
+ def test_url_invalid_resource(self):
+ with pytest.raises(ValueError):
+ self.client._url('/hello/{0}/world', ['sakuya', 'izayoi'])
+
+ def test_url_no_resource(self):
+ url = self.client._url('/simple')
+ self.assertEqual(url, '{0}{1}'.format(url_prefix, 'simple'))
+
+ url = self.client._url('/simple', None)
+ self.assertEqual(url, '{0}{1}'.format(url_prefix, 'simple'))
+
#########################
# INFORMATION TESTS #
#########################
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 2
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi @ file:///croot/certifi_1671487769961/work/certifi
coverage==7.2.7
-e git+https://github.com/docker/docker-py.git@33acb9d2e05d0f3abb7897abbe50dd54600da85b#egg=docker_py
exceptiongroup==1.2.2
importlib-metadata==6.7.0
iniconfig==2.0.0
packaging==24.0
pluggy==1.2.0
pytest==7.4.4
pytest-cov==4.1.0
requests==2.5.3
six==1.17.0
tomli==2.0.1
typing_extensions==4.7.1
websocket-client==0.32.0
zipp==3.15.0
| name: docker-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.2.7
- exceptiongroup==1.2.2
- importlib-metadata==6.7.0
- iniconfig==2.0.0
- packaging==24.0
- pluggy==1.2.0
- pytest==7.4.4
- pytest-cov==4.1.0
- requests==2.5.3
- six==1.17.0
- tomli==2.0.1
- typing-extensions==4.7.1
- websocket-client==0.32.0
- zipp==3.15.0
prefix: /opt/conda/envs/docker-py
| [
"tests/test.py::DockerClientTest::test_url_invalid_resource",
"tests/test.py::DockerClientTest::test_url_no_resource",
"tests/test.py::DockerClientTest::test_url_valid_resource"
]
| []
| [
"tests/test.py::DockerClientTest::test_auto_retrieve_server_version",
"tests/test.py::DockerClientTest::test_build_container",
"tests/test.py::DockerClientTest::test_build_container_custom_context",
"tests/test.py::DockerClientTest::test_build_container_custom_context_gzip",
"tests/test.py::DockerClientTest::test_build_container_invalid_container_limits",
"tests/test.py::DockerClientTest::test_build_container_pull",
"tests/test.py::DockerClientTest::test_build_container_stream",
"tests/test.py::DockerClientTest::test_build_container_with_container_limits",
"tests/test.py::DockerClientTest::test_build_container_with_named_dockerfile",
"tests/test.py::DockerClientTest::test_build_remote_with_registry_auth",
"tests/test.py::DockerClientTest::test_commit",
"tests/test.py::DockerClientTest::test_container_stats",
"tests/test.py::DockerClientTest::test_create_container",
"tests/test.py::DockerClientTest::test_create_container_empty_volumes_from",
"tests/test.py::DockerClientTest::test_create_container_privileged",
"tests/test.py::DockerClientTest::test_create_container_with_added_capabilities",
"tests/test.py::DockerClientTest::test_create_container_with_binds",
"tests/test.py::DockerClientTest::test_create_container_with_binds_list",
"tests/test.py::DockerClientTest::test_create_container_with_binds_mode",
"tests/test.py::DockerClientTest::test_create_container_with_binds_mode_and_ro_error",
"tests/test.py::DockerClientTest::test_create_container_with_binds_ro",
"tests/test.py::DockerClientTest::test_create_container_with_binds_rw",
"tests/test.py::DockerClientTest::test_create_container_with_cgroup_parent",
"tests/test.py::DockerClientTest::test_create_container_with_cpu_shares",
"tests/test.py::DockerClientTest::test_create_container_with_cpuset",
"tests/test.py::DockerClientTest::test_create_container_with_devices",
"tests/test.py::DockerClientTest::test_create_container_with_dropped_capabilities",
"tests/test.py::DockerClientTest::test_create_container_with_entrypoint",
"tests/test.py::DockerClientTest::test_create_container_with_labels_dict",
"tests/test.py::DockerClientTest::test_create_container_with_labels_list",
"tests/test.py::DockerClientTest::test_create_container_with_links",
"tests/test.py::DockerClientTest::test_create_container_with_links_as_list_of_tuples",
"tests/test.py::DockerClientTest::test_create_container_with_lxc_conf",
"tests/test.py::DockerClientTest::test_create_container_with_lxc_conf_compat",
"tests/test.py::DockerClientTest::test_create_container_with_mac_address",
"tests/test.py::DockerClientTest::test_create_container_with_mem_limit_as_int",
"tests/test.py::DockerClientTest::test_create_container_with_mem_limit_as_string",
"tests/test.py::DockerClientTest::test_create_container_with_mem_limit_as_string_with_g_unit",
"tests/test.py::DockerClientTest::test_create_container_with_mem_limit_as_string_with_k_unit",
"tests/test.py::DockerClientTest::test_create_container_with_mem_limit_as_string_with_m_unit",
"tests/test.py::DockerClientTest::test_create_container_with_mem_limit_as_string_with_wrong_value",
"tests/test.py::DockerClientTest::test_create_container_with_multiple_links",
"tests/test.py::DockerClientTest::test_create_container_with_named_volume",
"tests/test.py::DockerClientTest::test_create_container_with_port_binds",
"tests/test.py::DockerClientTest::test_create_container_with_ports",
"tests/test.py::DockerClientTest::test_create_container_with_restart_policy",
"tests/test.py::DockerClientTest::test_create_container_with_stdin_open",
"tests/test.py::DockerClientTest::test_create_container_with_volume_string",
"tests/test.py::DockerClientTest::test_create_container_with_volumes_from",
"tests/test.py::DockerClientTest::test_create_container_with_working_dir",
"tests/test.py::DockerClientTest::test_create_host_config_secopt",
"tests/test.py::DockerClientTest::test_create_named_container",
"tests/test.py::DockerClientTest::test_ctor",
"tests/test.py::DockerClientTest::test_diff",
"tests/test.py::DockerClientTest::test_diff_with_dict_instead_of_id",
"tests/test.py::DockerClientTest::test_events",
"tests/test.py::DockerClientTest::test_events_with_filters",
"tests/test.py::DockerClientTest::test_events_with_since_until",
"tests/test.py::DockerClientTest::test_exec_create",
"tests/test.py::DockerClientTest::test_exec_inspect",
"tests/test.py::DockerClientTest::test_exec_resize",
"tests/test.py::DockerClientTest::test_exec_start",
"tests/test.py::DockerClientTest::test_export",
"tests/test.py::DockerClientTest::test_export_with_dict_instead_of_id",
"tests/test.py::DockerClientTest::test_get_image",
"tests/test.py::DockerClientTest::test_image_history",
"tests/test.py::DockerClientTest::test_image_ids",
"tests/test.py::DockerClientTest::test_image_viz",
"tests/test.py::DockerClientTest::test_images",
"tests/test.py::DockerClientTest::test_images_filters",
"tests/test.py::DockerClientTest::test_images_quiet",
"tests/test.py::DockerClientTest::test_import_image",
"tests/test.py::DockerClientTest::test_import_image_from_bytes",
"tests/test.py::DockerClientTest::test_import_image_from_image",
"tests/test.py::DockerClientTest::test_info",
"tests/test.py::DockerClientTest::test_insert_image",
"tests/test.py::DockerClientTest::test_inspect_container",
"tests/test.py::DockerClientTest::test_inspect_container_undefined_id",
"tests/test.py::DockerClientTest::test_inspect_image",
"tests/test.py::DockerClientTest::test_inspect_image_undefined_id",
"tests/test.py::DockerClientTest::test_kill_container",
"tests/test.py::DockerClientTest::test_kill_container_with_dict_instead_of_id",
"tests/test.py::DockerClientTest::test_kill_container_with_signal",
"tests/test.py::DockerClientTest::test_list_containers",
"tests/test.py::DockerClientTest::test_load_config",
"tests/test.py::DockerClientTest::test_load_config_no_file",
"tests/test.py::DockerClientTest::test_load_config_with_random_name",
"tests/test.py::DockerClientTest::test_load_image",
"tests/test.py::DockerClientTest::test_log_streaming",
"tests/test.py::DockerClientTest::test_log_tail",
"tests/test.py::DockerClientTest::test_log_tty",
"tests/test.py::DockerClientTest::test_logs",
"tests/test.py::DockerClientTest::test_logs_with_dict_instead_of_id",
"tests/test.py::DockerClientTest::test_pause_container",
"tests/test.py::DockerClientTest::test_port",
"tests/test.py::DockerClientTest::test_pull",
"tests/test.py::DockerClientTest::test_pull_stream",
"tests/test.py::DockerClientTest::test_push_image",
"tests/test.py::DockerClientTest::test_push_image_stream",
"tests/test.py::DockerClientTest::test_push_image_with_tag",
"tests/test.py::DockerClientTest::test_remove_container",
"tests/test.py::DockerClientTest::test_remove_container_with_dict_instead_of_id",
"tests/test.py::DockerClientTest::test_remove_image",
"tests/test.py::DockerClientTest::test_remove_link",
"tests/test.py::DockerClientTest::test_rename_container",
"tests/test.py::DockerClientTest::test_resize_container",
"tests/test.py::DockerClientTest::test_restart_container",
"tests/test.py::DockerClientTest::test_restart_container_with_dict_instead_of_id",
"tests/test.py::DockerClientTest::test_retrieve_server_version",
"tests/test.py::DockerClientTest::test_search",
"tests/test.py::DockerClientTest::test_start_container",
"tests/test.py::DockerClientTest::test_start_container_none",
"tests/test.py::DockerClientTest::test_start_container_privileged",
"tests/test.py::DockerClientTest::test_start_container_regression_573",
"tests/test.py::DockerClientTest::test_start_container_with_binds_ro",
"tests/test.py::DockerClientTest::test_start_container_with_binds_rw",
"tests/test.py::DockerClientTest::test_start_container_with_dict_instead_of_id",
"tests/test.py::DockerClientTest::test_start_container_with_links",
"tests/test.py::DockerClientTest::test_start_container_with_links_as_list_of_tuples",
"tests/test.py::DockerClientTest::test_start_container_with_lxc_conf",
"tests/test.py::DockerClientTest::test_start_container_with_lxc_conf_compat",
"tests/test.py::DockerClientTest::test_start_container_with_multiple_links",
"tests/test.py::DockerClientTest::test_start_container_with_port_binds",
"tests/test.py::DockerClientTest::test_stop_container",
"tests/test.py::DockerClientTest::test_stop_container_with_dict_instead_of_id",
"tests/test.py::DockerClientTest::test_tag_image",
"tests/test.py::DockerClientTest::test_tag_image_force",
"tests/test.py::DockerClientTest::test_tag_image_tag",
"tests/test.py::DockerClientTest::test_tar_with_directory_symlinks",
"tests/test.py::DockerClientTest::test_tar_with_empty_directory",
"tests/test.py::DockerClientTest::test_tar_with_excludes",
"tests/test.py::DockerClientTest::test_tar_with_file_symlinks",
"tests/test.py::DockerClientTest::test_unpause_container",
"tests/test.py::DockerClientTest::test_url_compatibility_http",
"tests/test.py::DockerClientTest::test_url_compatibility_http_unix_triple_slash",
"tests/test.py::DockerClientTest::test_url_compatibility_tcp",
"tests/test.py::DockerClientTest::test_url_compatibility_unix",
"tests/test.py::DockerClientTest::test_url_compatibility_unix_triple_slash",
"tests/test.py::DockerClientTest::test_version",
"tests/test.py::DockerClientTest::test_wait",
"tests/test.py::DockerClientTest::test_wait_with_dict_instead_of_id",
"tests/test.py::StreamTest::test_early_stream_response"
]
| []
| Apache License 2.0 | 228 | [
"docker/client.py",
"docker/clientbase.py"
]
| [
"docker/client.py",
"docker/clientbase.py"
]
|
|
mopidy__mopidy-local-sqlite-72 | df6a368f3649efe4c2d691fdaced0a590553a4a2 | 2015-09-04 09:55:29 | df6a368f3649efe4c2d691fdaced0a590553a4a2 | diff --git a/CHANGES.rst b/CHANGES.rst
index ae7111f..16f16e6 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -8,6 +8,10 @@
If we can find the old data dir, all files are automatically moved to the new
data dir.
+- By default, browsing artists will use the "sortname" field for
+ ordering results, if available. Set ``use_artist_sortname = false``
+ to sort according to sort according to the displayed name only.
+
- Remove file system ("Folders") browsing, since this is already
handled by the ``file`` backend in Mopidy v1.1.
diff --git a/README.rst b/README.rst
index a9d63ce..ba529d3 100644
--- a/README.rst
+++ b/README.rst
@@ -64,6 +64,10 @@ but be aware that these are still subject to change::
# multi-artist tracks [https://github.com/sampsyo/beets/issues/907]
use_artist_mbid_uri = false
+ # whether to use the sortname field for sorting artist browse results;
+ # set to false to sort according to displayed name only
+ use_artist_sortname = true
+
Project Resources
------------------------------------------------------------------------
diff --git a/mopidy_local_sqlite/__init__.py b/mopidy_local_sqlite/__init__.py
index 279d7d0..e6c1939 100644
--- a/mopidy_local_sqlite/__init__.py
+++ b/mopidy_local_sqlite/__init__.py
@@ -26,6 +26,7 @@ class Extension(ext.Extension):
schema['timeout'] = config.Integer(optional=True, minimum=1)
schema['use_album_mbid_uri'] = config.Boolean()
schema['use_artist_mbid_uri'] = config.Boolean()
+ schema['use_artist_sortname'] = config.Boolean()
# no longer used
schema['search_limit'] = config.Deprecated()
schema['extract_images'] = config.Deprecated()
diff --git a/mopidy_local_sqlite/ext.conf b/mopidy_local_sqlite/ext.conf
index 5919d64..cfbfa46 100644
--- a/mopidy_local_sqlite/ext.conf
+++ b/mopidy_local_sqlite/ext.conf
@@ -23,3 +23,7 @@ use_album_mbid_uri = true
# disabled by default, since some taggers do not handle this well for
# multi-artist tracks [https://github.com/sampsyo/beets/issues/907]
use_artist_mbid_uri = false
+
+# whether to use the sortname field for sorting artist browse results;
+# set to false to sort according to displayed name only
+use_artist_sortname = true
diff --git a/mopidy_local_sqlite/library.py b/mopidy_local_sqlite/library.py
index d3623c5..da67cda 100644
--- a/mopidy_local_sqlite/library.py
+++ b/mopidy_local_sqlite/library.py
@@ -170,7 +170,8 @@ class SQLiteLibrary(local.Library):
# to composers and performers
if type == Ref.TRACK and 'album' in query:
order = ('disc_no', 'track_no', 'name')
-
+ if type == Ref.ARTIST and self._config['use_artist_sortname']:
+ order = ('coalesce(sortname, name)',)
roles = role or ('artist', 'albumartist') # FIXME: re-think 'roles'...
refs = []
diff --git a/mopidy_local_sqlite/schema.py b/mopidy_local_sqlite/schema.py
index df02db2..a169077 100644
--- a/mopidy_local_sqlite/schema.py
+++ b/mopidy_local_sqlite/schema.py
@@ -146,7 +146,7 @@ _SEARCH_FIELDS = {
'comment'
}
-schema_version = 5
+schema_version = 6
logger = logging.getLogger(__name__)
@@ -167,16 +167,18 @@ class Connection(sqlite3.Connection):
def load(c):
sql_dir = os.path.join(os.path.dirname(__file__), b'sql')
user_version = c.execute('PRAGMA user_version').fetchone()[0]
- if not user_version:
- logger.info('Creating SQLite database schema v%s', schema_version)
- script = os.path.join(sql_dir, 'create-v%s.sql' % schema_version)
- c.executescript(open(script).read())
- user_version = c.execute('PRAGMA user_version').fetchone()[0]
while user_version != schema_version:
- logger.info('Upgrading SQLite database schema v%s', user_version)
- script = os.path.join(sql_dir, 'upgrade-v%s.sql' % user_version)
- c.executescript(open(script).read())
- user_version = c.execute('PRAGMA user_version').fetchone()[0]
+ if user_version:
+ logger.info('Upgrading SQLite database schema v%s', user_version)
+ filename = 'upgrade-v%s.sql' % user_version
+ else:
+ logger.info('Creating SQLite database schema v%s', schema_version)
+ filename = 'schema.sql'
+ with open(os.path.join(sql_dir, filename)) as fh:
+ c.executescript(fh.read())
+ new_version = c.execute('PRAGMA user_version').fetchone()[0]
+ assert new_version != user_version
+ user_version = new_version
return user_version
@@ -269,6 +271,7 @@ def insert_artists(c, artists):
_insert(c, 'artist', {
'uri': artist.uri,
'name': artist.name,
+ 'sortname': artist.sortname,
'musicbrainz_id': artist.musicbrainz_id
})
return artist.uri
@@ -422,6 +425,7 @@ def _track(row):
albumartists = [Artist(
uri=row.albumartist_uri,
name=row.albumartist_name,
+ sortname=row.albumartist_sortname,
musicbrainz_id=row.albumartist_musicbrainz_id
)]
else:
@@ -440,18 +444,21 @@ def _track(row):
kwargs['artists'] = [Artist(
uri=row.artist_uri,
name=row.artist_name,
+ sortname=row.artist_sortname,
musicbrainz_id=row.artist_musicbrainz_id
)]
if row.composer_uri is not None:
kwargs['composers'] = [Artist(
uri=row.composer_uri,
name=row.composer_name,
+ sortname=row.composer_sortname,
musicbrainz_id=row.composer_musicbrainz_id
)]
if row.performer_uri is not None:
kwargs['performers'] = [Artist(
uri=row.performer_uri,
name=row.performer_name,
+ sortname=row.performer_sortname,
musicbrainz_id=row.performer_musicbrainz_id
)]
return Track(**kwargs)
diff --git a/mopidy_local_sqlite/sql/create-v5.sql b/mopidy_local_sqlite/sql/schema.sql
similarity index 94%
rename from mopidy_local_sqlite/sql/create-v5.sql
rename to mopidy_local_sqlite/sql/schema.sql
index d6c119c..c633016 100644
--- a/mopidy_local_sqlite/sql/create-v5.sql
+++ b/mopidy_local_sqlite/sql/schema.sql
@@ -2,11 +2,12 @@
BEGIN EXCLUSIVE TRANSACTION;
-PRAGMA user_version = 5; -- schema version
+PRAGMA user_version = 6; -- schema version
CREATE TABLE artist (
uri TEXT PRIMARY KEY, -- artist URI
name TEXT NOT NULL, -- artist name
+ sortname TEXT, -- artist name for sorting
musicbrainz_id TEXT -- MusicBrainz ID
);
@@ -66,6 +67,7 @@ SELECT album.uri AS uri,
album.name AS name,
artist.uri AS artist_uri,
artist.name AS artist_name,
+ artist.sortname AS artist_sortname,
artist.musicbrainz_id AS artist_musicbrainz_id,
album.num_tracks AS num_tracks,
album.num_discs AS num_discs,
@@ -97,15 +99,19 @@ SELECT track.rowid AS docid,
album.images AS album_images,
artist.uri AS artist_uri,
artist.name AS artist_name,
+ artist.sortname AS artist_sortname,
artist.musicbrainz_id AS artist_musicbrainz_id,
composer.uri AS composer_uri,
composer.name AS composer_name,
+ composer.sortname AS composer_sortname,
composer.musicbrainz_id AS composer_musicbrainz_id,
performer.uri AS performer_uri,
performer.name AS performer_name,
+ performer.sortname AS performer_sortname,
performer.musicbrainz_id AS performer_musicbrainz_id,
albumartist.uri AS albumartist_uri,
albumartist.name AS albumartist_name,
+ albumartist.sortname AS albumartist_sortname,
albumartist.musicbrainz_id AS albumartist_musicbrainz_id
FROM track
LEFT OUTER JOIN album ON track.album = album.uri
diff --git a/mopidy_local_sqlite/sql/upgrade-v5.sql b/mopidy_local_sqlite/sql/upgrade-v5.sql
new file mode 100644
index 0000000..45da1c1
--- /dev/null
+++ b/mopidy_local_sqlite/sql/upgrade-v5.sql
@@ -0,0 +1,70 @@
+-- Mopidy-Local-SQLite schema upgrade v5 -> v6
+
+BEGIN EXCLUSIVE TRANSACTION;
+
+ALTER TABLE artist ADD COLUMN sortname TEXT;
+
+DROP VIEW albums;
+DROP VIEW tracks;
+
+CREATE VIEW albums AS
+SELECT album.uri AS uri,
+ album.name AS name,
+ artist.uri AS artist_uri,
+ artist.name AS artist_name,
+ artist.sortname AS artist_sortname,
+ artist.musicbrainz_id AS artist_musicbrainz_id,
+ album.num_tracks AS num_tracks,
+ album.num_discs AS num_discs,
+ album.date AS date,
+ album.musicbrainz_id AS musicbrainz_id,
+ album.images AS images
+ FROM album
+ LEFT OUTER JOIN artist ON album.artists = artist.uri;
+
+CREATE VIEW tracks AS
+SELECT track.rowid AS docid,
+ track.uri AS uri,
+ track.name AS name,
+ track.genre AS genre,
+ track.track_no AS track_no,
+ track.disc_no AS disc_no,
+ track.date AS date,
+ track.length AS length,
+ track.bitrate AS bitrate,
+ track.comment AS comment,
+ track.musicbrainz_id AS musicbrainz_id,
+ track.last_modified AS last_modified,
+ album.uri AS album_uri,
+ album.name AS album_name,
+ album.num_tracks AS album_num_tracks,
+ album.num_discs AS album_num_discs,
+ album.date AS album_date,
+ album.musicbrainz_id AS album_musicbrainz_id,
+ album.images AS album_images,
+ artist.uri AS artist_uri,
+ artist.name AS artist_name,
+ artist.sortname AS artist_sortname,
+ artist.musicbrainz_id AS artist_musicbrainz_id,
+ composer.uri AS composer_uri,
+ composer.name AS composer_name,
+ composer.sortname AS composer_sortname,
+ composer.musicbrainz_id AS composer_musicbrainz_id,
+ performer.uri AS performer_uri,
+ performer.name AS performer_name,
+ performer.sortname AS performer_sortname,
+ performer.musicbrainz_id AS performer_musicbrainz_id,
+ albumartist.uri AS albumartist_uri,
+ albumartist.name AS albumartist_name,
+ albumartist.sortname AS albumartist_sortname,
+ albumartist.musicbrainz_id AS albumartist_musicbrainz_id
+ FROM track
+ LEFT OUTER JOIN album ON track.album = album.uri
+ LEFT OUTER JOIN artist ON track.artists = artist.uri
+ LEFT OUTER JOIN artist AS composer ON track.composers = composer.uri
+ LEFT OUTER JOIN artist AS performer ON track.performers = performer.uri
+ LEFT OUTER JOIN artist AS albumartist ON album.artists = albumartist.uri;
+
+PRAGMA user_version = 6; -- update schema version
+
+END TRANSACTION;
| Add "sortname" fields
See mopidy/mopidy#940.
Even if this doesn't get added to models, the fields can still be extracted from gstreamer tags passed to `Library.add()` in Mopidy v0.20. `Library.browse()` can then return results using
```
ORDER BY coalesce(sortname, name)
```
| mopidy/mopidy-local-sqlite | diff --git a/tests/test_extension.py b/tests/test_extension.py
index a865514..099be12 100644
--- a/tests/test_extension.py
+++ b/tests/test_extension.py
@@ -17,3 +17,4 @@ def test_get_config_schema():
assert 'timeout' in schema
assert 'use_album_mbid_uri' in schema
assert 'use_artist_mbid_uri' in schema
+ assert 'use_artist_sortname' in schema
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 7
} | 0.10 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"mock",
"pytest",
"pytest-cov",
"pytest-xdist"
],
"pre_install": [
"apt-get update",
"apt-get install -y mopidy"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup==1.2.2
execnet==2.1.1
idna==3.10
iniconfig==2.1.0
mock==5.2.0
Mopidy==3.4.2
-e git+https://github.com/mopidy/mopidy-local-sqlite.git@df6a368f3649efe4c2d691fdaced0a590553a4a2#egg=Mopidy_Local_SQLite
packaging==24.2
pluggy==1.5.0
pykka==4.2.0
pytest==8.3.5
pytest-cov==6.0.0
pytest-xdist==3.6.1
requests==2.32.3
tomli==2.2.1
tornado==6.4.2
typing_extensions==4.13.0
uritools==4.0.3
urllib3==2.3.0
| name: mopidy-local-sqlite
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- idna==3.10
- iniconfig==2.1.0
- mock==5.2.0
- mopidy==3.4.2
- packaging==24.2
- pluggy==1.5.0
- pykka==4.2.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-xdist==3.6.1
- requests==2.32.3
- tomli==2.2.1
- tornado==6.4.2
- typing-extensions==4.13.0
- uritools==4.0.3
- urllib3==2.3.0
prefix: /opt/conda/envs/mopidy-local-sqlite
| [
"tests/test_extension.py::test_get_config_schema"
]
| []
| [
"tests/test_extension.py::test_get_default_config"
]
| []
| Apache License 2.0 | 230 | [
"README.rst",
"mopidy_local_sqlite/sql/create-v5.sql",
"mopidy_local_sqlite/sql/upgrade-v5.sql",
"mopidy_local_sqlite/schema.py",
"mopidy_local_sqlite/__init__.py",
"mopidy_local_sqlite/library.py",
"mopidy_local_sqlite/ext.conf",
"CHANGES.rst"
]
| [
"README.rst",
"mopidy_local_sqlite/sql/upgrade-v5.sql",
"mopidy_local_sqlite/schema.py",
"mopidy_local_sqlite/__init__.py",
"mopidy_local_sqlite/library.py",
"mopidy_local_sqlite/ext.conf",
"CHANGES.rst",
"mopidy_local_sqlite/sql/schema.sql"
]
|
|
falconry__falcon-597 | dcbde2512cac6f282bc209702c7477379c9546c1 | 2015-09-06 00:04:56 | b78ffaac7c412d3b3d6cd3c70dd05024d79d2cce | philiptzou: Rebased onto the latest version.
philiptzou: Rebased onto the latest version. Again.
kgriffs: @philiptzou Nice work! Just a couple of nits to address and this should be ready to merge.
philiptzou: @kgriffs Sorry for the delay. Just fixed the issues you addressed. | diff --git a/falcon/util/uri.py b/falcon/util/uri.py
index f014e5a..2359672 100644
--- a/falcon/util/uri.py
+++ b/falcon/util/uri.py
@@ -178,9 +178,12 @@ if six.PY2:
tokens = decoded_uri.split('%')
decoded_uri = tokens[0]
for token in tokens[1:]:
- char, byte = _HEX_TO_BYTE[token[:2]]
- decoded_uri += char + token[2:]
-
+ token_partial = token[:2]
+ if token_partial in _HEX_TO_BYTE:
+ char, byte = _HEX_TO_BYTE[token_partial]
+ else:
+ char, byte = '%', 0
+ decoded_uri += char + (token[2:] if byte else token)
only_ascii = only_ascii and (byte <= 127)
# PERF(kgriffs): Only spend the time to do this if there
@@ -235,7 +238,12 @@ else:
tokens = decoded_uri.split(b'%')
decoded_uri = tokens[0]
for token in tokens[1:]:
- decoded_uri += _HEX_TO_BYTE[token[:2]] + token[2:]
+ token_partial = token[:2]
+ if token_partial in _HEX_TO_BYTE:
+ decoded_uri += _HEX_TO_BYTE[token_partial] + token[2:]
+ else:
+ # malformed percentage like "x=%" or "y=%+"
+ decoded_uri += b'%' + token
# Convert back to str
return decoded_uri.decode('utf-8', 'replace')
| Falcon crashes on decoding query string
When Falcon receives a request with query string that cannot be percent decoded (e.g. `x=%`), it crashes resulting in a rather unexpected 502 response.
Traceback (most recent call last):
File "/opt/ads/venv/local/lib/python2.7/site-packages/falcon/api.py", line 154, in __call__
req = self._request_type(env, options=self.req_options)
File "/opt/ads/venv/local/lib/python2.7/site-packages/falcon/request.py", line 237, in __init__
keep_blank_qs_values=self.options.keep_blank_qs_values,
File "/opt/ads/venv/local/lib/python2.7/site-packages/falcon/util/uri.py", line 327, in parse_query_string
params[k] = decode(v)
File "/opt/ads/venv/local/lib/python2.7/site-packages/falcon/util/uri.py", line 181, in decode
char, byte = _HEX_TO_BYTE[token[:2]]
KeyError: ''
Would it make more sense to pass-through the original value in this case without trying to decode it? It's a rather small change so I could prepare a pull request for it, but I would like to confirm first that this was not intentional behavior. | falconry/falcon | diff --git a/tests/test_query_params.py b/tests/test_query_params.py
index 5604e9a..50ed010 100644
--- a/tests/test_query_params.py
+++ b/tests/test_query_params.py
@@ -64,6 +64,16 @@ class _TestQueryParams(testing.TestBase):
self.assertEqual(req.get_param_as_list('id', int), [23, 42])
self.assertEqual(req.get_param('q'), u'\u8c46 \u74e3')
+ def test_bad_percentage(self):
+ query_string = 'x=%%20%+%&y=peregrine&z=%a%z%zz%1%20e'
+ self.simulate_request('/', query_string=query_string)
+ self.assertEqual(self.srmock.status, falcon.HTTP_200)
+
+ req = self.resource.req
+ self.assertEqual(req.get_param('x'), '% % %')
+ self.assertEqual(req.get_param('y'), 'peregrine')
+ self.assertEqual(req.get_param('z'), '%a%z%zz%1 e')
+
def test_allowed_names(self):
query_string = ('p=0&p1=23&2p=foo&some-thing=that&blank=&'
'some_thing=x&-bogus=foo&more.things=blah&'
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"coverage",
"ddt",
"pyyaml",
"requests",
"testtools",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///croot/attrs_1668696182826/work
certifi @ file:///croot/certifi_1671487769961/work/certifi
charset-normalizer==3.4.1
coverage==7.2.7
ddt==1.7.2
-e git+https://github.com/falconry/falcon.git@dcbde2512cac6f282bc209702c7477379c9546c1#egg=falcon
flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core
idna==3.10
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
nose==1.3.7
packaging @ file:///croot/packaging_1671697413597/work
pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pytest==7.1.2
python-mimeparse==1.6.0
PyYAML==6.0.1
requests==2.31.0
six==1.17.0
testtools==2.7.1
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions @ file:///croot/typing_extensions_1669924550328/work
urllib3==2.0.7
zipp @ file:///croot/zipp_1672387121353/work
| name: falcon
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=22.1.0=py37h06a4308_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- flit-core=3.6.0=pyhd3eb1b0_0
- importlib-metadata=4.11.3=py37h06a4308_0
- importlib_metadata=4.11.3=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=22.0=py37h06a4308_0
- pip=22.3.1=py37h06a4308_0
- pluggy=1.0.0=py37h06a4308_1
- py=1.11.0=pyhd3eb1b0_0
- pytest=7.1.2=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py37h06a4308_0
- typing_extensions=4.4.0=py37h06a4308_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zipp=3.11.0=py37h06a4308_0
- zlib=1.2.13=h5eee18b_1
- pip:
- charset-normalizer==3.4.1
- coverage==7.2.7
- ddt==1.7.2
- idna==3.10
- nose==1.3.7
- python-mimeparse==1.6.0
- pyyaml==6.0.1
- requests==2.31.0
- six==1.17.0
- testtools==2.7.1
- urllib3==2.0.7
prefix: /opt/conda/envs/falcon
| [
"tests/test_query_params.py::_TestQueryParams::test_bad_percentage",
"tests/test_query_params.py::PostQueryParams::test_bad_percentage",
"tests/test_query_params.py::GetQueryParams::test_bad_percentage"
]
| []
| [
"tests/test_query_params.py::_TestQueryParams::test_allowed_names",
"tests/test_query_params.py::_TestQueryParams::test_blank",
"tests/test_query_params.py::_TestQueryParams::test_boolean",
"tests/test_query_params.py::_TestQueryParams::test_boolean_blank",
"tests/test_query_params.py::_TestQueryParams::test_get_date_invalid",
"tests/test_query_params.py::_TestQueryParams::test_get_date_missing_param",
"tests/test_query_params.py::_TestQueryParams::test_get_date_store",
"tests/test_query_params.py::_TestQueryParams::test_get_date_valid",
"tests/test_query_params.py::_TestQueryParams::test_get_date_valid_with_format",
"tests/test_query_params.py::_TestQueryParams::test_int",
"tests/test_query_params.py::_TestQueryParams::test_int_neg",
"tests/test_query_params.py::_TestQueryParams::test_list_transformer",
"tests/test_query_params.py::_TestQueryParams::test_list_type",
"tests/test_query_params.py::_TestQueryParams::test_list_type_blank",
"tests/test_query_params.py::_TestQueryParams::test_multiple_form_keys",
"tests/test_query_params.py::_TestQueryParams::test_multiple_form_keys_as_list",
"tests/test_query_params.py::_TestQueryParams::test_multiple_keys_as_bool",
"tests/test_query_params.py::_TestQueryParams::test_multiple_keys_as_int",
"tests/test_query_params.py::_TestQueryParams::test_none",
"tests/test_query_params.py::_TestQueryParams::test_param_property",
"tests/test_query_params.py::_TestQueryParams::test_percent_encoded",
"tests/test_query_params.py::_TestQueryParams::test_required_1_get_param",
"tests/test_query_params.py::_TestQueryParams::test_required_2_get_param_as_int",
"tests/test_query_params.py::_TestQueryParams::test_required_3_get_param_as_bool",
"tests/test_query_params.py::_TestQueryParams::test_required_4_get_param_as_list",
"tests/test_query_params.py::_TestQueryParams::test_simple",
"tests/test_query_params.py::PostQueryParams::test_allowed_names",
"tests/test_query_params.py::PostQueryParams::test_blank",
"tests/test_query_params.py::PostQueryParams::test_boolean",
"tests/test_query_params.py::PostQueryParams::test_boolean_blank",
"tests/test_query_params.py::PostQueryParams::test_get_date_invalid",
"tests/test_query_params.py::PostQueryParams::test_get_date_missing_param",
"tests/test_query_params.py::PostQueryParams::test_get_date_store",
"tests/test_query_params.py::PostQueryParams::test_get_date_valid",
"tests/test_query_params.py::PostQueryParams::test_get_date_valid_with_format",
"tests/test_query_params.py::PostQueryParams::test_int",
"tests/test_query_params.py::PostQueryParams::test_int_neg",
"tests/test_query_params.py::PostQueryParams::test_list_transformer",
"tests/test_query_params.py::PostQueryParams::test_list_type",
"tests/test_query_params.py::PostQueryParams::test_list_type_blank",
"tests/test_query_params.py::PostQueryParams::test_multiple_form_keys",
"tests/test_query_params.py::PostQueryParams::test_multiple_form_keys_as_list",
"tests/test_query_params.py::PostQueryParams::test_multiple_keys_as_bool",
"tests/test_query_params.py::PostQueryParams::test_multiple_keys_as_int",
"tests/test_query_params.py::PostQueryParams::test_non_ascii",
"tests/test_query_params.py::PostQueryParams::test_none",
"tests/test_query_params.py::PostQueryParams::test_param_property",
"tests/test_query_params.py::PostQueryParams::test_percent_encoded",
"tests/test_query_params.py::PostQueryParams::test_required_1_get_param",
"tests/test_query_params.py::PostQueryParams::test_required_2_get_param_as_int",
"tests/test_query_params.py::PostQueryParams::test_required_3_get_param_as_bool",
"tests/test_query_params.py::PostQueryParams::test_required_4_get_param_as_list",
"tests/test_query_params.py::PostQueryParams::test_simple",
"tests/test_query_params.py::GetQueryParams::test_allowed_names",
"tests/test_query_params.py::GetQueryParams::test_blank",
"tests/test_query_params.py::GetQueryParams::test_boolean",
"tests/test_query_params.py::GetQueryParams::test_boolean_blank",
"tests/test_query_params.py::GetQueryParams::test_get_date_invalid",
"tests/test_query_params.py::GetQueryParams::test_get_date_missing_param",
"tests/test_query_params.py::GetQueryParams::test_get_date_store",
"tests/test_query_params.py::GetQueryParams::test_get_date_valid",
"tests/test_query_params.py::GetQueryParams::test_get_date_valid_with_format",
"tests/test_query_params.py::GetQueryParams::test_int",
"tests/test_query_params.py::GetQueryParams::test_int_neg",
"tests/test_query_params.py::GetQueryParams::test_list_transformer",
"tests/test_query_params.py::GetQueryParams::test_list_type",
"tests/test_query_params.py::GetQueryParams::test_list_type_blank",
"tests/test_query_params.py::GetQueryParams::test_multiple_form_keys",
"tests/test_query_params.py::GetQueryParams::test_multiple_form_keys_as_list",
"tests/test_query_params.py::GetQueryParams::test_multiple_keys_as_bool",
"tests/test_query_params.py::GetQueryParams::test_multiple_keys_as_int",
"tests/test_query_params.py::GetQueryParams::test_none",
"tests/test_query_params.py::GetQueryParams::test_param_property",
"tests/test_query_params.py::GetQueryParams::test_percent_encoded",
"tests/test_query_params.py::GetQueryParams::test_required_1_get_param",
"tests/test_query_params.py::GetQueryParams::test_required_2_get_param_as_int",
"tests/test_query_params.py::GetQueryParams::test_required_3_get_param_as_bool",
"tests/test_query_params.py::GetQueryParams::test_required_4_get_param_as_list",
"tests/test_query_params.py::GetQueryParams::test_simple"
]
| []
| Apache License 2.0 | 231 | [
"falcon/util/uri.py"
]
| [
"falcon/util/uri.py"
]
|
spendright__msd-20 | 02d85249b9e1f5b2824b9a268220a30301084e42 | 2015-09-07 05:17:33 | 02d85249b9e1f5b2824b9a268220a30301084e42 | diff --git a/msd/db.py b/msd/db.py
index 87ee840..9014087 100644
--- a/msd/db.py
+++ b/msd/db.py
@@ -15,6 +15,27 @@ import sqlite3
from itertools import groupby
+def create_table(db, table_name, columns, primary_key=None):
+ """Create a table with the given columns and, optionally, primary key.
+
+ *columns* is a map from column name to type
+ *primary_key* is a list of column names
+ """
+
+ col_def_sql = ', '.join('`{}` {}'.format(col_name, col_type)
+ for col_name, col_type in sorted(columns.items()))
+
+ # optional PRIMARY KEY
+ primary_key_sql = ''
+ if primary_key:
+ primary_key_sql = ', PRIMARY KEY({})'.format(col_sql(primary_key))
+
+ create_sql = 'CREATE TABLE `{}` ({}{})'.format(
+ table_name, col_def_sql, primary_key_sql)
+
+ db.execute(create_sql)
+
+
def create_index(db, table_name, index_cols):
if isinstance(index_cols, str):
raise TypeError
diff --git a/msd/merge.py b/msd/merge.py
index ba95d56..bff8f3f 100644
--- a/msd/merge.py
+++ b/msd/merge.py
@@ -14,6 +14,7 @@
"""Supporting code to merge data from the scratch table and write it
to the output table."""
from .db import create_index
+from .db import create_table
from .db import insert_row
from .table import TABLES
@@ -24,12 +25,7 @@ def create_output_table(output_db, table_name):
primary_key = table_def['primary_key']
indexes = table_def.get('indexes', ())
- create_sql = 'CREATE TABLE `{}` ({}, PRIMARY KEY ({}))'.format(
- table_name,
- ', '.join('`{}` {}'.format(col_name, col_type)
- for col_name, col_type in sorted(columns.items())),
- ', '.join('`{}`'.format(pk_col) for pk_col in primary_key))
- output_db.execute(create_sql)
+ create_table(output_db, table_name, columns, primary_key)
for index_cols in indexes:
create_index(output_db, table_name, index_cols)
diff --git a/msd/norm.py b/msd/norm.py
index fa70768..c6d6543 100644
--- a/msd/norm.py
+++ b/msd/norm.py
@@ -72,4 +72,4 @@ def norm(s):
def smunch(s):
"""Like norm(), except we remove whitespace too."""
- return WHITESPACE_RE.sub('', s)
+ return WHITESPACE_RE.sub('', norm(s))
diff --git a/msd/scratch.py b/msd/scratch.py
index dd64aec..60ef616 100644
--- a/msd/scratch.py
+++ b/msd/scratch.py
@@ -19,6 +19,7 @@ from os.path import exists
from os.path import getmtime
from .db import create_index
+from .db import create_table
from .db import insert_row
from .db import open_db
from .db import show_tables
@@ -63,7 +64,7 @@ def build_scratch_db(
with open_db(scratch_db_tmp_path) as scratch_db:
- init_scratch_tables(scratch_db)
+ create_scratch_tables(scratch_db)
for input_db_path in input_db_paths:
log.info('dumping data from {} -> {}'.format(
@@ -78,27 +79,29 @@ def build_scratch_db(
rename(scratch_db_tmp_path, scratch_db_path)
-def init_scratch_tables(scratch_db):
+def create_scratch_tables(scratch_db):
"""Add tables to the given (open) SQLite DB."""
- for table_name, table_def in sorted(TABLES.items()):
- columns = table_def['columns'].copy()
- columns['scraper_id'] = 'text'
-
- create_sql = 'CREATE TABLE `{}` ({})'.format(
- table_name, ', '.join(
- '`{}` {}'.format(col_name, col_type)
- for col_name, col_type in sorted(columns.items())))
- scratch_db.execute(create_sql)
-
- # add "primary key" index
- index_cols = list(table_def.get('primary_key', ()))
- if 'scraper_id' not in index_cols:
- index_cols = ['scraper_id'] + index_cols
- create_index(scratch_db, table_name, index_cols)
+ for table_name in sorted(TABLES):
+ create_scratch_table(scratch_db, table_name)
+
+
+def create_scratch_table(scratch_db, table_name):
+ table_def = TABLES[table_name]
- # add other indexes
- for index_cols in table_def.get('indexes', ()):
- create_index(scratch_db, table_name, index_cols)
+ columns = table_def['columns'].copy()
+ columns['scraper_id'] = 'text'
+
+ create_table(scratch_db, table_name, columns)
+
+ # add "primary key" (non-unique) index
+ index_cols = list(table_def.get('primary_key', ()))
+ if 'scraper_id' not in index_cols:
+ index_cols = ['scraper_id'] + index_cols
+ create_index(scratch_db, table_name, index_cols)
+
+ # add other indexes
+ for index_cols in table_def.get('indexes', ()):
+ create_index(scratch_db, table_name, index_cols)
def db_path_to_scraper_prefix(path):
diff --git a/srs b/srs
new file mode 120000
index 0000000..6ed0827
--- /dev/null
+++ b/srs
@@ -0,0 +1,1 @@
+submodules/srs/srs
\ No newline at end of file
diff --git a/titlecase b/titlecase
new file mode 120000
index 0000000..f64a324
--- /dev/null
+++ b/titlecase
@@ -0,0 +1,1 @@
+submodules/titlecase/titlecase
\ No newline at end of file
| fails to merge 'CardScan' and 'Cardscan'
Somehow, `msd` is producing two entries for `Newell Rubbermaid` brands, `CardScan` and `Cardscan`. | spendright/msd | diff --git a/test/db.py b/test/db.py
new file mode 100644
index 0000000..a4686ed
--- /dev/null
+++ b/test/db.py
@@ -0,0 +1,69 @@
+# Copyright 2014-2015 SpendRight, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Utilities for testing databases."""
+import sqlite3
+from unittest import TestCase
+
+from msd.db import create_table
+from msd.db import create_index
+from msd.db import insert_row
+from msd.db import open_db
+from msd.merge import create_output_table
+from msd.scratch import create_scratch_table
+from msd.table import TABLES
+
+
+# stuff that could be in msd.db, but that we only use for testing
+
+def select_all(db, table_name):
+ """Get all rows from a table, and sort (for easy testing of equality)."""
+ return sorted(
+ (dict(row) for row in
+ db.execute('SELECT * FROM `{}`'.format(table_name))),
+ key=lambda row: [(k, repr(v)) for (k, v) in row.items()])
+
+
+def insert_rows(db, table_name, rows):
+ """Call insert_row() multiple times."""
+ for row in rows:
+ insert_row(db, table_name, row)
+
+
+
+class DBTestCase(TestCase):
+
+ # output_tables to create at setup time
+ OUTPUT_TABLES = []
+
+ # scratch tables to create at setup time
+ SCRATCH_TABLES = []
+
+ def setUp(self):
+ self.output_db = open_db(':memory:')
+ self.scratch_db = open_db(':memory:')
+ self._tmp_dir = None
+
+ for table_name in self.SCRATCH_TABLES:
+ create_scratch_table(self.scratch_db, table_name)
+
+ for table_name in self.OUTPUT_TABLES:
+ create_output_table(self.output_db, table_name)
+
+ @property
+ def tmp_dir(self):
+ if self._tmp_dir is None:
+ self._tmp_dir = mkdtemp()
+ self.addCleanup(rmtree, self._tmp_dir)
+
+ return self._tmp_dir
diff --git a/test/unit/msd/test_brand.py b/test/unit/msd/test_brand.py
index 68834fc..478f4e1 100644
--- a/test/unit/msd/test_brand.py
+++ b/test/unit/msd/test_brand.py
@@ -14,7 +14,13 @@
# limitations under the License.
from unittest import TestCase
+from msd.brand import build_scraper_brand_map_table
from msd.brand import split_brand_and_tm
+from msd.db import insert_row
+
+from ...db import DBTestCase
+from ...db import insert_rows
+from ...db import select_all
class TestSplitBrandAndTM(TestCase):
@@ -39,3 +45,44 @@ class TestSplitBrandAndTM(TestCase):
def test_on_tm(self):
self.assertEqual(split_brand_and_tm('™'), ('', '™'))
+
+
+class TestBuildScraperBrandMapTable(DBTestCase):
+
+ SCRATCH_TABLES = [
+ 'brand', 'category', 'claim', 'rating', 'scraper_brand_map']
+
+ OUTPUT_TABLES = ['company_name', 'scraper_company_map']
+
+ def test_merge_differing_capitalization(self):
+ # this tests #19
+ insert_rows(self.scratch_db, 'brand', [
+ dict(brand='CardScan',
+ company='Newell Rubbermaid',
+ scraper_id='sr.campaign.hrc'),
+ dict(brand='Cardscan',
+ company='Newell Rubbermaid',
+ scraper_id='sr.campaign.hrc'),
+ ])
+
+ insert_row(self.output_db, 'scraper_company_map', dict(
+ company='Newell Rubbermaid',
+ scraper_company='Newell Rubbermaid',
+ scraper_id='sr.campaign.hrc')
+ )
+
+ build_scraper_brand_map_table(self.output_db, self.scratch_db)
+
+ self.assertEqual(
+ select_all(self.output_db, 'scraper_brand_map'),
+ [dict(brand='CardScan',
+ company='Newell Rubbermaid',
+ scraper_brand='CardScan',
+ scraper_company='Newell Rubbermaid',
+ scraper_id='sr.campaign.hrc'),
+ dict(brand='CardScan',
+ company='Newell Rubbermaid',
+ scraper_brand='Cardscan',
+ scraper_company='Newell Rubbermaid',
+ scraper_id='sr.campaign.hrc'),
+ ])
diff --git a/test/unit/suite.py b/test/unit/suite.py
index fe78ab0..3448e2c 100644
--- a/test/unit/suite.py
+++ b/test/unit/suite.py
@@ -1,4 +1,4 @@
-# Copyright 2014 SpendRight, Inc.
+# Copyright 2014-2015 SpendRight, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 4
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
iniconfig==2.1.0
-e git+https://github.com/spendright/msd.git@02d85249b9e1f5b2824b9a268220a30301084e42#egg=msd
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
titlecase==2.4.1
tomli==2.2.1
Unidecode==1.3.8
| name: msd
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- titlecase==2.4.1
- tomli==2.2.1
- unidecode==1.3.8
prefix: /opt/conda/envs/msd
| [
"test/unit/msd/test_brand.py::TestSplitBrandAndTM::test_brand_with_symbol",
"test/unit/msd/test_brand.py::TestSplitBrandAndTM::test_discard_part_after_symbol",
"test/unit/msd/test_brand.py::TestSplitBrandAndTM::test_empty",
"test/unit/msd/test_brand.py::TestSplitBrandAndTM::test_on_tm",
"test/unit/msd/test_brand.py::TestSplitBrandAndTM::test_plain_brand",
"test/unit/msd/test_brand.py::TestSplitBrandAndTM::test_strip",
"test/unit/msd/test_brand.py::TestBuildScraperBrandMapTable::test_merge_differing_capitalization"
]
| []
| []
| []
| Apache License 2.0 | 232 | [
"msd/merge.py",
"msd/scratch.py",
"msd/db.py",
"srs",
"titlecase",
"msd/norm.py"
]
| [
"msd/merge.py",
"msd/scratch.py",
"msd/db.py",
"srs",
"titlecase",
"msd/norm.py"
]
|
|
johnpaulett__python-hl7-15 | 253b2071aa376e7f37854b30e38fc6a0ab5c203d | 2015-09-07 17:01:12 | 253b2071aa376e7f37854b30e38fc6a0ab5c203d | diff --git a/hl7/containers.py b/hl7/containers.py
index 231e3b1..a57f570 100644
--- a/hl7/containers.py
+++ b/hl7/containers.py
@@ -319,7 +319,7 @@ class Message(Container):
elif c in DEFAULT_MAP:
rv.append(esc + DEFAULT_MAP[c] + esc)
elif ord(c) >= 0x20 and ord(c) <= 0x7E:
- rv.append(c.encode('ascii'))
+ rv.append(c)
else:
rv.append('%sX%2x%s' % (esc, ord(c), esc))
| Buggy ASCII escaping
If you escape a string containing any ASCII characters, this will lead to a `TypeError`:
``` python
import hl7
msg = hl7.Message(separator='\r', separators='\r|^~\&')
msg.escape('asdf')
```
``` plain
Traceback (most recent call last):
File "hl7/__init__.py", line 630, in escape
return ''.join(rv)
TypeError: sequence item 0: expected str instance, bytes found
```
| johnpaulett/python-hl7 | diff --git a/tests/test_parse.py b/tests/test_parse.py
index e7ad1d4..dfed995 100644
--- a/tests/test_parse.py
+++ b/tests/test_parse.py
@@ -172,13 +172,19 @@ class ParseTest(unittest.TestCase):
def test_escape(self):
msg = hl7.parse(rep_sample_hl7)
+ # Escape Separators
self.assertEqual(msg.escape('\\'), '\\E\\')
self.assertEqual(msg.escape('|'), '\\F\\')
self.assertEqual(msg.escape('^'), '\\S\\')
self.assertEqual(msg.escape('&'), '\\T\\')
self.assertEqual(msg.escape('~'), '\\R\\')
+ # Escape ASCII characters
+ self.assertEqual(msg.escape('asdf'), 'asdf')
+
+ # Escape non-ASCII characters
self.assertEqual(msg.escape('áéíóú'), '\\Xe1\\\\Xe9\\\\Xed\\\\Xf3\\\\Xfa\\')
+ self.assertEqual(msg.escape('äsdf'), '\\Xe4\\sdf')
def test_file(self):
# Extract message from file
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | distlib==0.3.9
docutils==0.21.2
exceptiongroup==1.2.2
filelock==3.18.0
flake8==2.1.0
-e git+https://github.com/johnpaulett/python-hl7.git@253b2071aa376e7f37854b30e38fc6a0ab5c203d#egg=hl7
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
mccabe==0.7.0
packaging==24.2
pep8==1.7.1
platformdirs==4.3.7
pluggy==1.5.0
py==1.11.0
pyflakes==3.3.2
Pygments==2.19.1
pytest==8.3.5
six==1.17.0
Sphinx==1.2.1
tomli==2.2.1
tox==1.7.0
virtualenv==20.30.0
| name: python-hl7
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- distlib==0.3.9
- docutils==0.21.2
- exceptiongroup==1.2.2
- filelock==3.18.0
- flake8==2.1.0
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- mccabe==0.7.0
- packaging==24.2
- pep8==1.7.1
- platformdirs==4.3.7
- pluggy==1.5.0
- py==1.11.0
- pyflakes==3.3.2
- pygments==2.19.1
- pytest==8.3.5
- six==1.17.0
- sphinx==1.2.1
- tomli==2.2.1
- tox==1.7.0
- virtualenv==20.30.0
prefix: /opt/conda/envs/python-hl7
| [
"tests/test_parse.py::ParseTest::test_escape"
]
| []
| [
"tests/test_parse.py::ParseTest::test_assign",
"tests/test_parse.py::ParseTest::test_bytestring_converted_to_unicode",
"tests/test_parse.py::ParseTest::test_elementnumbering",
"tests/test_parse.py::ParseTest::test_empty_initial_repetition",
"tests/test_parse.py::ParseTest::test_extract",
"tests/test_parse.py::ParseTest::test_file",
"tests/test_parse.py::ParseTest::test_non_ascii_bytestring",
"tests/test_parse.py::ParseTest::test_non_ascii_bytestring_no_encoding",
"tests/test_parse.py::ParseTest::test_nonstandard_separators",
"tests/test_parse.py::ParseTest::test_parse",
"tests/test_parse.py::ParseTest::test_parsing_classes",
"tests/test_parse.py::ParseTest::test_repetition",
"tests/test_parse.py::ParseTest::test_subcomponent",
"tests/test_parse.py::ParseTest::test_unescape",
"tests/test_parse.py::ParsePlanTest::test_create_parse_plan",
"tests/test_parse.py::ParsePlanTest::test_parse_plan",
"tests/test_parse.py::ParsePlanTest::test_parse_plan_next"
]
| []
| BSD License | 233 | [
"hl7/containers.py"
]
| [
"hl7/containers.py"
]
|
|
enthought__okonomiyaki-119 | 9830662935a3432c0619f60bf72dd1f312f0f2a8 | 2015-09-09 14:07:38 | 9830662935a3432c0619f60bf72dd1f312f0f2a8 | diff --git a/CHANGELOG b/CHANGELOG
index da8cdd4..74c7922 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -6,6 +6,8 @@ Improvements:
* SemanticVersion class added
* MetadataVersion class added to easily manipulate metadata versions.
* runtime_metadata_factory function to parse and validate runtime packages.
+ * __str__ is now implemented for EPDPlatform. The implementation support
+ EPDPlatform.from_epd_string(str(epd_platform)) == epd_platform (#117)
2015-00-01 0.10.0:
--------------------
diff --git a/okonomiyaki/platforms/epd_platform.py b/okonomiyaki/platforms/epd_platform.py
index 433605a..33ab4bd 100644
--- a/okonomiyaki/platforms/epd_platform.py
+++ b/okonomiyaki/platforms/epd_platform.py
@@ -252,6 +252,9 @@ class EPDPlatform(HasTraits):
def short(self):
return "{0}-{1}".format(self.platform_name, self.arch_bits)
+ def __str__(self):
+ return "{0.platform_name}_{0.arch}".format(self)
+
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
| Implement str(epd_platform)
Should look as follows:
```python
>>> platform = EPDPlatform.from_epd_string("rh5-64")
>>> str(platform)
rh5_x86_64
``` | enthought/okonomiyaki | diff --git a/okonomiyaki/platforms/tests/test_epd_platform.py b/okonomiyaki/platforms/tests/test_epd_platform.py
index 9dfa96e..e159950 100644
--- a/okonomiyaki/platforms/tests/test_epd_platform.py
+++ b/okonomiyaki/platforms/tests/test_epd_platform.py
@@ -161,6 +161,28 @@ class TestEPDPlatform(unittest.TestCase):
epd_platform = EPDPlatform.from_running_system("amd64")
self.assertEqual(epd_platform.short, "rh5-64")
+ def test_str(self):
+ # Given
+ epd_platform = EPDPlatform.from_epd_string("rh5-64")
+
+ # When/Then
+ self.assertEqual(str(epd_platform), "rh5_x86_64")
+
+ # Given
+ epd_platform = EPDPlatform.from_epd_string("osx-32")
+
+ # When/Then
+ self.assertEqual(str(epd_platform), "osx_x86")
+
+ # Given
+ s = "osx_x86"
+
+ # When
+ epd_platform = EPDPlatform.from_epd_string(s)
+
+ # Then
+ self.assertEqual(str(epd_platform), s)
+
class TestEPDPlatformApplies(unittest.TestCase):
@mock_centos_5_8
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 2
} | 0.10 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"dev_requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
coverage==7.8.0
docutils==0.21.2
enum34==1.1.10
exceptiongroup==1.2.2
flake8==7.2.0
haas==0.9.0
iniconfig==2.1.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
mccabe==0.7.0
mock==1.0.1
-e git+https://github.com/enthought/okonomiyaki.git@9830662935a3432c0619f60bf72dd1f312f0f2a8#egg=okonomiyaki
packaging==24.2
pbr==6.1.1
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.2
pytest==8.3.5
pytest-cov==6.0.0
referencing==0.36.2
rpds-py==0.24.0
six==1.17.0
statistics==1.0.3.5
stevedore==4.1.1
tomli==2.2.1
typing_extensions==4.13.0
zipfile2==0.0.12
| name: okonomiyaki
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- coverage==7.8.0
- docutils==0.21.2
- enum34==1.1.10
- exceptiongroup==1.2.2
- flake8==7.2.0
- haas==0.9.0
- iniconfig==2.1.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- mccabe==0.7.0
- mock==1.0.1
- packaging==24.2
- pbr==6.1.1
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.2
- pytest==8.3.5
- pytest-cov==6.0.0
- referencing==0.36.2
- rpds-py==0.24.0
- six==1.17.0
- statistics==1.0.3.5
- stevedore==4.1.1
- tomli==2.2.1
- typing-extensions==4.13.0
- zipfile2==0.0.12
prefix: /opt/conda/envs/okonomiyaki
| [
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_str"
]
| [
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_guessed_epd_platform",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatformApplies::test_all",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatformApplies::test_applies_rh",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatformApplies::test_current_linux",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_guess_linux2_platform",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_guess_linux2_unsupported"
]
| [
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_epd_platform_from_string",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_epd_platform_from_string_new_arch",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_epd_platform_from_string_new_names",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_epd_platform_from_string_new_names_underscore",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_from_running_python",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_from_running_system",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_short_names_consistency",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatformApplies::test_current_windows",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_from_epd_platform_string",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_from_epd_platform_string_invalid",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_from_spec_depend_data",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_guess_darwin_platform",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_guess_solaris_unsupported",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_guess_win32_platform"
]
| []
| BSD License | 234 | [
"okonomiyaki/platforms/epd_platform.py",
"CHANGELOG"
]
| [
"okonomiyaki/platforms/epd_platform.py",
"CHANGELOG"
]
|
|
mapbox__mapbox-sdk-py-26 | f56c2ffa9477819ce29fb42e87534244f14e43fe | 2015-09-09 14:44:31 | f56c2ffa9477819ce29fb42e87534244f14e43fe | diff --git a/mapbox/__init__.py b/mapbox/__init__.py
index c1889b3..362ae7c 100644
--- a/mapbox/__init__.py
+++ b/mapbox/__init__.py
@@ -9,11 +9,18 @@ from uritemplate import URITemplate
__version__ = "0.1.0"
+class InvalidPlaceTypeError(KeyError):
+ pass
+
+
class Service:
"""Base service class"""
def get_session(self, token=None, env=None):
- access_token = token or (env or os.environ).get('MapboxAccessToken')
+ access_token = (
+ token or
+ (env or os.environ).get('MapboxAccessToken') or
+ (env or os.environ).get('MAPBOX_ACCESS_TOKEN'))
session = requests.Session()
session.params.update(access_token=access_token)
return session
@@ -27,18 +34,47 @@ class Geocoder(Service):
self.baseuri = 'https://api.mapbox.com/v4/geocode'
self.session = self.get_session(access_token)
- def forward(self, address, params=None):
+ def _validate_place_types(self, types):
+ """Validate place types and return a mapping for use in requests"""
+ for pt in types:
+ if pt not in self.place_types:
+ raise InvalidPlaceTypeError(pt)
+ return {'types': ",".join(types)}
+
+ def forward(self, address, types=None, lng=None, lat=None):
"""A forward geocoding request
- See: https://www.mapbox.com/developers/api/geocoding/#forward"""
+ Results may be constrained to those in a sequence of place_types or
+ biased toward a given longitude and latitude.
+
+ See: https://www.mapbox.com/developers/api/geocoding/#forward."""
uri = URITemplate('%s/{dataset}/{query}.json' % self.baseuri).expand(
dataset=self.name, query=address)
+ params = {}
+ if types:
+ params.update(self._validate_place_types(types))
+ if lng is not None and lat is not None:
+ params.update(proximity='{0},{1}'.format(lng, lat))
return self.session.get(uri, params=params)
- def reverse(self, lon, lat, params=None):
+ def reverse(self, lon, lat, types=None):
"""A reverse geocoding request
- See: https://www.mapbox.com/developers/api/geocoding/#reverse"""
+ See: https://www.mapbox.com/developers/api/geocoding/#reverse."""
uri = URITemplate(self.baseuri + '/{dataset}/{lon},{lat}.json').expand(
dataset=self.name, lon=str(lon), lat=str(lat))
+ params = {}
+ if types:
+ params.update(self._validate_place_types(types))
return self.session.get(uri, params=params)
+
+ @property
+ def place_types(self):
+ """A mapping of place type names to descriptions"""
+ return {
+ 'address': "A street address with house number. Examples: 1600 Pennsylvania Ave NW, 1051 Market St, Oberbaumstrasse 7.",
+ 'country': "Sovereign states and other political entities. Examples: United States, France, China, Russia.",
+ 'place': "City, town, village or other municipality relevant to a country's address or postal system. Examples: Cleveland, Saratoga Springs, Berlin, Paris.",
+ 'poi': "Places of interest including commercial venues, major landmarks, parks, and other features. Examples: Yosemite National Park, Lake Superior.",
+ 'postcode': "Postal code, varies by a country's postal system. Examples: 20009, CR0 3RL.",
+ 'region': "First order administrative divisions within a country, usually provinces or states. Examples: California, Ontario, Essonne."}
diff --git a/mapbox/scripts/cli.py b/mapbox/scripts/cli.py
index f1bf3a3..b18509a 100644
--- a/mapbox/scripts/cli.py
+++ b/mapbox/scripts/cli.py
@@ -35,10 +35,11 @@ def main_group(ctx, verbose, quiet, access_token):
$ mbx --access-token MY_TOKEN ...
- or as an environment variable.
+ or as an environment variable named MAPBOX_ACCESS_TOKEN or
+ MapboxAccessToken.
\b
- $ export MapboxAccessToken=MY_TOKEN
+ $ export MAPBOX_ACCESS_TOKEN=MY_TOKEN
$ mbx ...
"""
diff --git a/mapbox/scripts/geocoder.py b/mapbox/scripts/geocoder.py
index f8f0b3a..e97f11e 100644
--- a/mapbox/scripts/geocoder.py
+++ b/mapbox/scripts/geocoder.py
@@ -8,7 +8,7 @@ import mapbox
from mapbox.compat import map
-class MapboxException(click.ClickException):
+class MapboxCLIException(click.ClickException):
pass
@@ -41,18 +41,30 @@ def echo_headers(headers, file=None):
@click.command(short_help="Geocode an address or coordinates.")
@click.argument('query', default='-', required=False)
[email protected]('--include', '-i', 'include_headers',
- is_flag=True, default=False,
- help="Include HTTP headers in the output.")
@click.option(
'--forward/--reverse',
default=True,
help="Perform a forward or reverse geocode. [default: forward]")
[email protected]('--include', '-i', 'include_headers',
+ is_flag=True, default=False,
+ help="Include HTTP headers in the output.")
[email protected](
+ '--lat', type=float, default=None,
+ help="Bias results toward this latitude (decimal degrees). --lng "
+ "is also required.")
[email protected](
+ '--lng', type=float, default=None,
+ help="Bias results toward this longitude (decimal degrees). --lat "
+ "is also required.")
[email protected](
+ '--place-type', '-t', multiple=True, metavar='NAME', default=None,
+ help="Restrict results to one or more of these place types: {0}.".format(
+ sorted(mapbox.Geocoder().place_types.keys())))
@click.option('--output', '-o', default='-', help="Save output to a file.")
@click.pass_context
-def geocode(ctx, query, include_headers, forward, output):
- """This command gets coordinates for an address (forward mode) or
- addresses for coordinates (reverse mode).
+def geocode(ctx, query, include_headers, forward, lat, lng, place_type, output):
+ """This command returns places matching an address (forward mode) or
+ places matching coordinates (reverse mode).
In forward (the default) mode the query argument shall be an address
such as '1600 pennsylvania ave nw'.
@@ -64,6 +76,7 @@ def geocode(ctx, query, include_headers, forward, output):
$ mbx geocode --reverse '[-77.4371, 37.5227]'
+ An access token is required, see `mbx --help`.
"""
verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 2
logger = logging.getLogger('mapbox')
@@ -75,19 +88,20 @@ def geocode(ctx, query, include_headers, forward, output):
if forward:
for q in iter_query(query):
- resp = geocoder.forward(q)
+ resp = geocoder.forward(
+ q, types=place_type, lat=lat, lng=lng)
if include_headers:
echo_headers(resp.headers, file=stdout)
if resp.status_code == 200:
click.echo(resp.text, file=stdout)
else:
- raise MapboxException(resp.text.strip())
+ raise MapboxCLIException(resp.text.strip())
else:
for coords in map(coords_from_query, iter_query(query)):
- resp = geocoder.reverse(*coords)
+ resp = geocoder.reverse(*coords, types=place_type)
if include_headers:
echo_headers(resp.headers, file=stdout)
if resp.status_code == 200:
click.echo(resp.text, file=stdout)
else:
- raise MapboxException(resp.text.strip())
+ raise MapboxCLIException(resp.text.strip())
| Add proximity feature to forward geocoding
https://www.mapbox.com/developers/api/geocoding/#proximity | mapbox/mapbox-sdk-py | diff --git a/tests/test_geocoder.py b/tests/test_geocoder.py
index 84dd846..86ae506 100644
--- a/tests/test_geocoder.py
+++ b/tests/test_geocoder.py
@@ -1,5 +1,6 @@
import json
import responses
+
import mapbox
@@ -24,6 +25,14 @@ def test_service_session_os_environ(monkeypatch):
monkeypatch.undo()
+def test_service_session_os_environ_caps(monkeypatch):
+ """Get a session using os.environ's token"""
+ monkeypatch.setenv('MAPBOX_ACCESS_TOKEN', 'pk.test_os_environ')
+ session = mapbox.Service().get_session()
+ assert session.params.get('access_token') == 'pk.test_os_environ'
+ monkeypatch.undo()
+
+
def test_geocoder_default_name():
"""Default name is set"""
geocoder = mapbox.Geocoder()
@@ -69,3 +78,85 @@ def test_geocoder_reverse():
response = mapbox.Geocoder(access_token='pk.test').reverse(*[str(x) for x in coords])
assert response.status_code == 200
assert response.json()['query'] == coords
+
+
+def test_geocoder_place_types():
+ """Place types are enumerated"""
+ assert sorted(mapbox.Geocoder().place_types.items()) == [
+ ('address', "A street address with house number. Examples: 1600 Pennsylvania Ave NW, 1051 Market St, Oberbaumstrasse 7."),
+ ('country', "Sovereign states and other political entities. Examples: United States, France, China, Russia."),
+ ('place', "City, town, village or other municipality relevant to a country's address or postal system. Examples: Cleveland, Saratoga Springs, Berlin, Paris."),
+ ('poi', "Places of interest including commercial venues, major landmarks, parks, and other features. Examples: Yosemite National Park, Lake Superior."),
+ ('postcode', "Postal code, varies by a country's postal system. Examples: 20009, CR0 3RL."),
+ ('region', "First order administrative divisions within a country, usually provinces or states. Examples: California, Ontario, Essonne.")]
+
+
+def test_validate_place_types_err():
+ try:
+ mapbox.Geocoder()._validate_place_types(('address', 'bogus'))
+ except mapbox.InvalidPlaceTypeError as err:
+ assert str(err) == "'bogus'"
+
+
+def test_validate_place_types():
+ assert mapbox.Geocoder()._validate_place_types(
+ ('address', 'poi')) == {'types': 'address,poi'}
+
+
[email protected]
+def test_geocoder_forward_types():
+ """Type filtering of forward geocoding works"""
+
+ responses.add(
+ responses.GET,
+ 'https://api.mapbox.com/v4/geocode/mapbox.places/1600%20pennsylvania%20ave%20nw.json?types=address,country,place,poi,postcode,region&access_token=pk.test',
+ match_querystring=True,
+ body='{"query": ["1600", "pennsylvania", "ave", "nw"]}', status=200,
+ content_type='application/json')
+
+ response = mapbox.Geocoder(
+ access_token='pk.test').forward(
+ '1600 pennsylvania ave nw',
+ types=('address', 'country', 'place', 'poi', 'postcode', 'region'))
+ assert response.status_code == 200
+ assert response.json()['query'] == ["1600", "pennsylvania", "ave", "nw"]
+
+
[email protected]
+def test_geocoder_reverse_types():
+ """Type filtering of reverse geocoding works"""
+
+ coords = [-77.4371, 37.5227]
+
+ responses.add(
+ responses.GET,
+ 'https://api.mapbox.com/v4/geocode/mapbox.places/%s.json?types=address,country,place,poi,postcode,region&access_token=pk.test' % ','.join([str(x) for x in coords]),
+ match_querystring=True,
+ body='{"query": %s}' % json.dumps(coords),
+ status=200,
+ content_type='application/json')
+
+ response = mapbox.Geocoder(
+ access_token='pk.test').reverse(
+ *[str(x) for x in coords],
+ types=('address', 'country', 'place', 'poi', 'postcode', 'region'))
+ assert response.status_code == 200
+ assert response.json()['query'] == coords
+
+
[email protected]
+def test_geocoder_forward_proximity():
+ """Proximity parameter works"""
+
+ responses.add(
+ responses.GET,
+ 'https://api.mapbox.com/v4/geocode/mapbox.places/1600%20pennsylvania%20ave%20nw.json?proximity=0,0&access_token=pk.test',
+ match_querystring=True,
+ body='{"query": ["1600", "pennsylvania", "ave", "nw"]}', status=200,
+ content_type='application/json')
+
+ response = mapbox.Geocoder(
+ access_token='pk.test').forward(
+ '1600 pennsylvania ave nw', lng=0, lat=0)
+ assert response.status_code == 200
+ assert response.json()['query'] == ["1600", "pennsylvania", "ave", "nw"]
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
click-plugins==1.1.1
cligj==0.7.2
coverage==7.8.0
coveralls==4.0.1
docopt==0.6.2
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
-e git+https://github.com/mapbox/mapbox-sdk-py.git@f56c2ffa9477819ce29fb42e87534244f14e43fe#egg=mapbox
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
PyYAML==6.0.2
requests==2.32.3
responses==0.25.7
tomli==2.2.1
uritemplate==4.1.1
uritemplate.py==3.0.2
urllib3==2.3.0
| name: mapbox-sdk-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- click-plugins==1.1.1
- cligj==0.7.2
- coverage==7.8.0
- coveralls==4.0.1
- docopt==0.6.2
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pyyaml==6.0.2
- requests==2.32.3
- responses==0.25.7
- tomli==2.2.1
- uritemplate==4.1.1
- uritemplate-py==3.0.2
- urllib3==2.3.0
prefix: /opt/conda/envs/mapbox-sdk-py
| [
"tests/test_geocoder.py::test_service_session_os_environ_caps",
"tests/test_geocoder.py::test_geocoder_place_types",
"tests/test_geocoder.py::test_validate_place_types_err",
"tests/test_geocoder.py::test_validate_place_types",
"tests/test_geocoder.py::test_geocoder_forward_types",
"tests/test_geocoder.py::test_geocoder_reverse_types",
"tests/test_geocoder.py::test_geocoder_forward_proximity"
]
| []
| [
"tests/test_geocoder.py::test_service_session",
"tests/test_geocoder.py::test_service_session_env",
"tests/test_geocoder.py::test_service_session_os_environ",
"tests/test_geocoder.py::test_geocoder_default_name",
"tests/test_geocoder.py::test_geocoder_name",
"tests/test_geocoder.py::test_geocoder_forward",
"tests/test_geocoder.py::test_geocoder_reverse"
]
| []
| MIT License | 235 | [
"mapbox/__init__.py",
"mapbox/scripts/geocoder.py",
"mapbox/scripts/cli.py"
]
| [
"mapbox/__init__.py",
"mapbox/scripts/geocoder.py",
"mapbox/scripts/cli.py"
]
|
|
bottlepy__bottle-787 | 534a2e08ac0ef55dd542a3a83b1118188c6a399b | 2015-09-11 12:49:06 | 90d749bef49120396ecc347ea67df98881157030 | diff --git a/bottle.py b/bottle.py
index e4b17d8..5287147 100644
--- a/bottle.py
+++ b/bottle.py
@@ -2176,6 +2176,22 @@ class ConfigDict(dict):
self._meta = {}
self._on_change = lambda name, value: None
+ def load_module(self, path, squash):
+ """ Load values from a Python module.
+ :param squash: Squash nested dicts into namespaces by using
+ load_dict(), otherwise use update()
+ Example: load_config('my.app.settings', True)
+ Example: load_config('my.app.settings', False)
+ """
+ config_obj = __import__(path)
+ obj = dict([(key, getattr(config_obj, key))
+ for key in dir(config_obj) if key.isupper()])
+ if squash:
+ self.load_dict(obj)
+ else:
+ self.update(obj)
+ return self
+
def load_config(self, filename):
""" Load values from an ``*.ini`` style config file.
| Ability to load config from module
Currently it is not possible to load config from a Python module (similar to the way Django does it).
I'd like to propose the following;
```py
class ConfigDict(dict):
def load_module(self, module_path):
"""
Load configuration from module path
>>> load_module('settings.prod')
"""
assert isinstance(module_path, str)
config = importlib.import_module(module_path)
obj = {key: getattr(config, key) for key in dir(config) if key.isupper()}
self.update(obj)
```
Happy to put together a tested PR if this proposal is accepted | bottlepy/bottle | diff --git a/test/example_settings.py b/test/example_settings.py
new file mode 100644
index 0000000..58dd992
--- /dev/null
+++ b/test/example_settings.py
@@ -0,0 +1,5 @@
+A = {
+ "B": {
+ "C": 3
+ }
+}
\ No newline at end of file
diff --git a/test/test_config.py b/test/test_config.py
index a4cfd9f..a9ea4ab 100644
--- a/test/test_config.py
+++ b/test/test_config.py
@@ -1,3 +1,4 @@
+import sys
import unittest
from bottle import ConfigDict
@@ -69,6 +70,17 @@ class TestConfDict(unittest.TestCase):
c = ConfigDict()
c.load_dict({key: {'subkey': 'value'}})
self.assertEqual('value', c[key + '.subkey'])
+
+ def test_load_module(self):
+ c = ConfigDict()
+ c.load_module('example_settings', True)
+ self.assertEqual(c['A.B.C'], 3)
+
+ c = ConfigDict()
+ c.load_module('example_settings', False)
+ self.assertEqual(c['A']['B']['C'], 3)
+
+
if __name__ == '__main__': #pragma: no cover
unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 1
} | 0.12 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"Mako",
"jinja2",
"eventlet",
"cherrypy",
"paste",
"twisted",
"tornado",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
autocommand==2.2.2
Automat==24.8.1
backports.tarfile==1.2.0
-e git+https://github.com/bottlepy/bottle.git@534a2e08ac0ef55dd542a3a83b1118188c6a399b#egg=bottle
cheroot==10.0.1
CherryPy==18.10.0
constantly==23.10.4
dnspython==2.7.0
eventlet==0.39.1
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
greenlet==3.1.1
hyperlink==21.0.0
idna==3.10
incremental==24.7.2
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
jaraco.collections==5.1.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jaraco.text==4.0.0
Jinja2==3.1.6
Mako==1.3.9
MarkupSafe==3.0.2
more-itertools==10.6.0
packaging @ file:///croot/packaging_1734472117206/work
Paste==3.10.1
pluggy @ file:///croot/pluggy_1733169602837/work
portend==3.2.0
pytest @ file:///croot/pytest_1738938843180/work
python-dateutil==2.9.0.post0
six==1.17.0
tempora==5.8.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
tornado==6.4.2
Twisted==24.11.0
typing_extensions==4.13.0
zc.lockfile==3.0.post1
zope.interface==7.2
| name: bottle
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- autocommand==2.2.2
- automat==24.8.1
- backports-tarfile==1.2.0
- cheroot==10.0.1
- cherrypy==18.10.0
- constantly==23.10.4
- dnspython==2.7.0
- eventlet==0.39.1
- greenlet==3.1.1
- hyperlink==21.0.0
- idna==3.10
- incremental==24.7.2
- jaraco-collections==5.1.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jaraco-text==4.0.0
- jinja2==3.1.6
- mako==1.3.9
- markupsafe==3.0.2
- more-itertools==10.6.0
- paste==3.10.1
- portend==3.2.0
- python-dateutil==2.9.0.post0
- six==1.17.0
- tempora==5.8.0
- tornado==6.4.2
- twisted==24.11.0
- typing-extensions==4.13.0
- zc-lockfile==3.0.post1
- zope-interface==7.2
prefix: /opt/conda/envs/bottle
| [
"test/test_config.py::TestConfDict::test_load_module"
]
| []
| [
"test/test_config.py::TestConfDict::test_isadict",
"test/test_config.py::TestConfDict::test_load_dict",
"test/test_config.py::TestConfDict::test_meta",
"test/test_config.py::TestConfDict::test_namespaces",
"test/test_config.py::TestConfDict::test_update",
"test/test_config.py::TestConfDict::test_write"
]
| []
| MIT License | 236 | [
"bottle.py"
]
| [
"bottle.py"
]
|
|
tobgu__pyrsistent-57 | d35ea98728473bd070ff1e6ac5304e4e12ea816c | 2015-09-14 21:17:17 | 87706acb8297805b56bcc2c0f89ffa73eb1de0d1 | diff --git a/pyrsistent/_field_common.py b/pyrsistent/_field_common.py
index 6934978..04231c0 100644
--- a/pyrsistent/_field_common.py
+++ b/pyrsistent/_field_common.py
@@ -2,7 +2,8 @@ from collections import Iterable
import six
from pyrsistent._checked_types import (
CheckedType, CheckedPSet, CheckedPMap, CheckedPVector,
- optional as optional_type, InvariantException, get_type, wrap_invariant)
+ optional as optional_type, InvariantException, get_type, wrap_invariant,
+ _restore_pickle)
def set_fields(dct, bases, name):
@@ -121,12 +122,42 @@ class PTypeError(TypeError):
self.actual_type = actual_type
-def _sequence_field(checked_class, suffix, item_type, optional, initial):
+SEQ_FIELD_TYPE_SUFFIXES = {
+ CheckedPVector: "PVector",
+ CheckedPSet: "PSet",
+}
+
+# Global dictionary to hold auto-generated field types: used for unpickling
+_seq_field_types = {}
+
+def _restore_seq_field_pickle(checked_class, item_type, data):
+ """Unpickling function for auto-generated PVec/PSet field types."""
+ type_ = _seq_field_types[checked_class, item_type]
+ return _restore_pickle(type_, data)
+
+def _make_seq_field_type(checked_class, item_type):
+ """Create a subclass of the given checked class with the given item type."""
+ type_ = _seq_field_types.get((checked_class, item_type))
+ if type_ is not None:
+ return type_
+
+ class TheType(checked_class):
+ __type__ = item_type
+
+ def __reduce__(self):
+ return (_restore_seq_field_pickle,
+ (checked_class, item_type, list(self)))
+
+ suffix = SEQ_FIELD_TYPE_SUFFIXES[checked_class]
+ TheType.__name__ = item_type.__name__.capitalize() + suffix
+ _seq_field_types[checked_class, item_type] = TheType
+ return TheType
+
+def _sequence_field(checked_class, item_type, optional, initial):
"""
Create checked field for either ``PSet`` or ``PVector``.
:param checked_class: ``CheckedPSet`` or ``CheckedPVector``.
- :param suffix: Suffix for new type name.
:param item_type: The required type for the items in the set.
:param optional: If true, ``None`` can be used as a value for
this field.
@@ -134,9 +165,7 @@ def _sequence_field(checked_class, suffix, item_type, optional, initial):
:return: A ``field`` containing a checked class.
"""
- class TheType(checked_class):
- __type__ = item_type
- TheType.__name__ = item_type.__name__.capitalize() + suffix
+ TheType = _make_seq_field_type(checked_class, item_type)
if optional:
def factory(argument):
@@ -164,7 +193,7 @@ def pset_field(item_type, optional=False, initial=()):
:return: A ``field`` containing a ``CheckedPSet`` of the given type.
"""
- return _sequence_field(CheckedPSet, "PSet", item_type, optional,
+ return _sequence_field(CheckedPSet, item_type, optional,
initial)
@@ -180,13 +209,41 @@ def pvector_field(item_type, optional=False, initial=()):
:return: A ``field`` containing a ``CheckedPVector`` of the given type.
"""
- return _sequence_field(CheckedPVector, "PVector", item_type, optional,
+ return _sequence_field(CheckedPVector, item_type, optional,
initial)
_valid = lambda item: (True, "")
+# Global dictionary to hold auto-generated field types: used for unpickling
+_pmap_field_types = {}
+
+def _restore_pmap_field_pickle(key_type, value_type, data):
+ """Unpickling function for auto-generated PMap field types."""
+ type_ = _pmap_field_types[key_type, value_type]
+ return _restore_pickle(type_, data)
+
+def _make_pmap_field_type(key_type, value_type):
+ """Create a subclass of CheckedPMap with the given key and value types."""
+ type_ = _pmap_field_types.get((key_type, value_type))
+ if type_ is not None:
+ return type_
+
+ class TheMap(CheckedPMap):
+ __key_type__ = key_type
+ __value_type__ = value_type
+
+ def __reduce__(self):
+ return (_restore_pmap_field_pickle,
+ (self.__key_type__, self.__value_type__, dict(self)))
+
+ TheMap.__name__ = (key_type.__name__.capitalize() +
+ value_type.__name__.capitalize() + "PMap")
+ _pmap_field_types[key_type, value_type] = TheMap
+ return TheMap
+
+
def pmap_field(key_type, value_type, optional=False, invariant=PFIELD_NO_INVARIANT):
"""
Create a checked ``PMap`` field.
@@ -199,11 +256,7 @@ def pmap_field(key_type, value_type, optional=False, invariant=PFIELD_NO_INVARIA
:return: A ``field`` containing a ``CheckedPMap``.
"""
- class TheMap(CheckedPMap):
- __key_type__ = key_type
- __value_type__ = value_type
- TheMap.__name__ = (key_type.__name__.capitalize() +
- value_type.__name__.capitalize() + "PMap")
+ TheMap = _make_pmap_field_type(key_type, value_type)
if optional:
def factory(argument):
| p*_field prevents pickle from working on a PClass
I would never use pickle in production, but as I was trying to write some strawman example storage code for an example app, I discovered that while I could pickle basic PClasses, I can't pickle any that use pmap_field or pvector_field (and probably others that have similar implementations)
e.g.:
```
>>> class Foo(PClass):
... v = pvector_field(int)
...
>>> Foo()
Foo(v=IntPVector([]))
>>> dumps(Foo())
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
cPickle.PicklingError: Can't pickle <class 'pyrsistent._field_common.IntPVector'>: attribute lookup pyrsistent._field_common.IntPVector failed
```
The same happens for `pmap_field`.
I guess this is because of the way that those functions generate classes at runtime?
@tobgu if you can let me know what needs done to fix this I can try to submit a PR. | tobgu/pyrsistent | diff --git a/tests/class_test.py b/tests/class_test.py
index f7254b4..0caddba 100644
--- a/tests/class_test.py
+++ b/tests/class_test.py
@@ -2,7 +2,9 @@ from collections import Hashable
import math
import pickle
import pytest
-from pyrsistent import field, InvariantException, PClass, optional, CheckedPVector
+from pyrsistent import (
+ field, InvariantException, PClass, optional, CheckedPVector,
+ pmap_field, pset_field, pvector_field)
class Point(PClass):
@@ -11,6 +13,12 @@ class Point(PClass):
z = field(type=int, initial=0)
+class TypedContainerObj(PClass):
+ map = pmap_field(str, str)
+ set = pset_field(str)
+ vec = pvector_field(str)
+
+
def test_evolve_pclass_instance():
p = Point(x=1, y=2)
p2 = p.set(x=p.x+2)
@@ -165,6 +173,12 @@ def test_supports_pickling():
assert isinstance(p2, Point)
+def test_supports_pickling_with_typed_container_fields():
+ obj = TypedContainerObj(map={'foo': 'bar'}, set=['hello', 'there'], vec=['a', 'b'])
+ obj2 = pickle.loads(pickle.dumps(obj))
+ assert obj == obj2
+
+
def test_can_remove_optional_member():
p1 = Point(x=1, y=2)
p2 = p1.remove('y')
@@ -250,4 +264,4 @@ def test_multiple_global_invariants():
MultiInvariantGlobal(one=1)
assert False
except InvariantException as e:
- assert e.invariant_errors == (('x', 'y'),)
\ No newline at end of file
+ assert e.invariant_errors == (('x', 'y'),)
diff --git a/tests/record_test.py b/tests/record_test.py
index 9146bd0..b2439e5 100644
--- a/tests/record_test.py
+++ b/tests/record_test.py
@@ -13,6 +13,12 @@ class ARecord(PRecord):
y = field()
+class RecordContainingContainers(PRecord):
+ map = pmap_field(str, str)
+ vec = pvector_field(str)
+ set = pset_field(str)
+
+
def test_create():
r = ARecord(x=1, y='foo')
assert r.x == 1
@@ -223,6 +229,11 @@ def test_pickling():
assert x == y
assert isinstance(y, ARecord)
+def test_supports_pickling_with_typed_container_fields():
+ obj = RecordContainingContainers(
+ map={'foo': 'bar'}, set=['hello', 'there'], vec=['a', 'b'])
+ obj2 = pickle.loads(pickle.dumps(obj))
+ assert obj == obj2
def test_all_invariant_errors_reported():
class BRecord(PRecord):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 1
} | 0.11 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
attrs==25.3.0
babel==2.17.0
cachetools==5.5.2
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
coverage==7.8.0
distlib==0.3.9
docutils==0.21.2
exceptiongroup==1.2.2
filelock==3.18.0
hypothesis==6.130.5
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
memory_profiler==0.31
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
psutil==2.1.1
py==1.11.0
Pygments==2.19.1
pyperform==1.86
pyproject-api==1.9.0
-e git+https://github.com/tobgu/pyrsistent.git@d35ea98728473bd070ff1e6ac5304e4e12ea816c#egg=pyrsistent
pytest==8.3.5
pytest-cov==6.0.0
requests==2.32.3
six==1.17.0
snowballstemmer==2.2.0
sortedcontainers==2.4.0
Sphinx==7.4.7
sphinx_rtd_theme==0.1.5
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: pyrsistent
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- attrs==25.3.0
- babel==2.17.0
- cachetools==5.5.2
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- coverage==7.8.0
- distlib==0.3.9
- docutils==0.21.2
- exceptiongroup==1.2.2
- filelock==3.18.0
- hypothesis==6.130.5
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- memory-profiler==0.31
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- psutil==2.1.1
- py==1.11.0
- pygments==2.19.1
- pyperform==1.86
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-cov==6.0.0
- requests==2.32.3
- six==1.17.0
- snowballstemmer==2.2.0
- sortedcontainers==2.4.0
- sphinx==7.4.7
- sphinx-rtd-theme==0.1.5
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/pyrsistent
| [
"tests/class_test.py::test_supports_pickling_with_typed_container_fields",
"tests/record_test.py::test_supports_pickling_with_typed_container_fields"
]
| []
| [
"tests/class_test.py::test_evolve_pclass_instance",
"tests/class_test.py::test_direct_assignment_not_possible",
"tests/class_test.py::test_direct_delete_not_possible",
"tests/class_test.py::test_cannot_construct_with_undeclared_fields",
"tests/class_test.py::test_cannot_construct_with_wrong_type",
"tests/class_test.py::test_cannot_construct_without_mandatory_fields",
"tests/class_test.py::test_field_invariant_must_hold",
"tests/class_test.py::test_initial_value_set_when_not_present_in_arguments",
"tests/class_test.py::test_can_create_nested_structures_from_dict_and_serialize_back_to_dict",
"tests/class_test.py::test_can_serialize_with_custom_serializer",
"tests/class_test.py::test_implements_proper_equality_based_on_equality_of_fields",
"tests/class_test.py::test_is_hashable",
"tests/class_test.py::test_supports_nested_transformation",
"tests/class_test.py::test_repr",
"tests/class_test.py::test_global_invariant_check",
"tests/class_test.py::test_supports_pickling",
"tests/class_test.py::test_can_remove_optional_member",
"tests/class_test.py::test_cannot_remove_mandatory_member",
"tests/class_test.py::test_cannot_remove_non_existing_member",
"tests/class_test.py::test_evolver_without_evolution_returns_original_instance",
"tests/class_test.py::test_evolver_with_evolution_to_same_element_returns_original_instance",
"tests/class_test.py::test_evolver_supports_chained_set_and_remove",
"tests/class_test.py::test_string_as_type_specifier",
"tests/class_test.py::test_multiple_invariants_on_field",
"tests/class_test.py::test_multiple_global_invariants",
"tests/record_test.py::test_create",
"tests/record_test.py::test_correct_assignment",
"tests/record_test.py::test_direct_assignment_not_possible",
"tests/record_test.py::test_cannot_assign_undeclared_fields",
"tests/record_test.py::test_cannot_assign_wrong_type_to_fields",
"tests/record_test.py::test_cannot_construct_with_undeclared_fields",
"tests/record_test.py::test_cannot_construct_with_fields_of_wrong_type",
"tests/record_test.py::test_support_record_inheritance",
"tests/record_test.py::test_single_type_spec",
"tests/record_test.py::test_remove",
"tests/record_test.py::test_remove_non_existing_member",
"tests/record_test.py::test_field_invariant_must_hold",
"tests/record_test.py::test_global_invariant_must_hold",
"tests/record_test.py::test_set_multiple_fields",
"tests/record_test.py::test_initial_value",
"tests/record_test.py::test_type_specification_must_be_a_type",
"tests/record_test.py::test_initial_must_be_of_correct_type",
"tests/record_test.py::test_invariant_must_be_callable",
"tests/record_test.py::test_global_invariants_are_inherited",
"tests/record_test.py::test_global_invariants_must_be_callable",
"tests/record_test.py::test_repr",
"tests/record_test.py::test_factory",
"tests/record_test.py::test_factory_must_be_callable",
"tests/record_test.py::test_nested_record_construction",
"tests/record_test.py::test_pickling",
"tests/record_test.py::test_all_invariant_errors_reported",
"tests/record_test.py::test_precord_factory_method_is_idempotent",
"tests/record_test.py::test_serialize",
"tests/record_test.py::test_nested_serialize",
"tests/record_test.py::test_serializer_must_be_callable",
"tests/record_test.py::test_transform_without_update_returns_same_precord",
"tests/record_test.py::test_nested_create_serialize",
"tests/record_test.py::test_pset_field_initial_value",
"tests/record_test.py::test_pset_field_custom_initial",
"tests/record_test.py::test_pset_field_factory",
"tests/record_test.py::test_pset_field_checked_set",
"tests/record_test.py::test_pset_field_type",
"tests/record_test.py::test_pset_field_mandatory",
"tests/record_test.py::test_pset_field_default_non_optional",
"tests/record_test.py::test_pset_field_explicit_non_optional",
"tests/record_test.py::test_pset_field_optional",
"tests/record_test.py::test_pset_field_name",
"tests/record_test.py::test_pvector_field_initial_value",
"tests/record_test.py::test_pvector_field_custom_initial",
"tests/record_test.py::test_pvector_field_factory",
"tests/record_test.py::test_pvector_field_checked_vector",
"tests/record_test.py::test_pvector_field_type",
"tests/record_test.py::test_pvector_field_mandatory",
"tests/record_test.py::test_pvector_field_default_non_optional",
"tests/record_test.py::test_pvector_field_explicit_non_optional",
"tests/record_test.py::test_pvector_field_optional",
"tests/record_test.py::test_pvector_field_name",
"tests/record_test.py::test_pvector_field_create_from_nested_serialized_data",
"tests/record_test.py::test_pmap_field_initial_value",
"tests/record_test.py::test_pmap_field_factory",
"tests/record_test.py::test_pmap_field_checked_map_key",
"tests/record_test.py::test_pmap_field_checked_map_value",
"tests/record_test.py::test_pmap_field_mandatory",
"tests/record_test.py::test_pmap_field_default_non_optional",
"tests/record_test.py::test_pmap_field_explicit_non_optional",
"tests/record_test.py::test_pmap_field_optional",
"tests/record_test.py::test_pmap_field_name",
"tests/record_test.py::test_pmap_field_invariant",
"tests/record_test.py::test_pmap_field_create_from_nested_serialized_data"
]
| []
| MIT License | 238 | [
"pyrsistent/_field_common.py"
]
| [
"pyrsistent/_field_common.py"
]
|
|
docker__docker-py-770 | 02f330d8dc3da47215bed47b44fac73941ea6920 | 2015-09-15 21:47:09 | f479720d517a7db7f886916190b3032d29d18f10 | shin-: #764 needs to be merged for the tests to pass. | diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index 46b35160..36edf8de 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -457,7 +457,8 @@ def create_host_config(
restart_policy=None, cap_add=None, cap_drop=None, devices=None,
extra_hosts=None, read_only=None, pid_mode=None, ipc_mode=None,
security_opt=None, ulimits=None, log_config=None, mem_limit=None,
- memswap_limit=None, cgroup_parent=None, group_add=None, version=None
+ memswap_limit=None, cgroup_parent=None, group_add=None, cpu_quota=None,
+ cpu_period=None, version=None
):
host_config = {}
@@ -518,7 +519,7 @@ def create_host_config(
host_config['Devices'] = parse_devices(devices)
if group_add:
- if compare_version(version, '1.20') < 0:
+ if version_lt(version, '1.20'):
raise errors.InvalidVersion(
'group_add param not supported for API version < 1.20'
)
@@ -601,6 +602,30 @@ def create_host_config(
log_config = LogConfig(**log_config)
host_config['LogConfig'] = log_config
+ if cpu_quota:
+ if not isinstance(cpu_quota, int):
+ raise TypeError(
+ 'Invalid type for cpu_quota param: expected int but'
+ ' found {0}'.format(type(cpu_quota))
+ )
+ if version_lt(version, '1.19'):
+ raise errors.InvalidVersion(
+ 'cpu_quota param not supported for API version < 1.19'
+ )
+ host_config['CpuQuota'] = cpu_quota
+
+ if cpu_period:
+ if not isinstance(cpu_period, int):
+ raise TypeError(
+ 'Invalid type for cpu_period param: expected int but'
+ ' found {0}'.format(type(cpu_period))
+ )
+ if version_lt(version, '1.19'):
+ raise errors.InvalidVersion(
+ 'cpu_period param not supported for API version < 1.19'
+ )
+ host_config['CpuPeriod'] = cpu_period
+
return host_config
| Add support for --cpu-quota & --cpu-period run flags
Any chance we could add support for the above run flags?
https://docs.docker.com/reference/commandline/run/ | docker/docker-py | diff --git a/tests/utils_test.py b/tests/utils_test.py
index b67ac4ec..45929f73 100644
--- a/tests/utils_test.py
+++ b/tests/utils_test.py
@@ -25,6 +25,159 @@ TEST_CERT_DIR = os.path.join(
)
+class HostConfigTest(base.BaseTestCase):
+ def test_create_host_config_no_options(self):
+ config = create_host_config(version='1.19')
+ self.assertFalse('NetworkMode' in config)
+
+ def test_create_host_config_no_options_newer_api_version(self):
+ config = create_host_config(version='1.20')
+ self.assertEqual(config['NetworkMode'], 'default')
+
+ def test_create_host_config_invalid_cpu_cfs_types(self):
+ with pytest.raises(TypeError):
+ create_host_config(version='1.20', cpu_quota='0')
+
+ with pytest.raises(TypeError):
+ create_host_config(version='1.20', cpu_period='0')
+
+ with pytest.raises(TypeError):
+ create_host_config(version='1.20', cpu_quota=23.11)
+
+ with pytest.raises(TypeError):
+ create_host_config(version='1.20', cpu_period=1999.0)
+
+ def test_create_host_config_with_cpu_quota(self):
+ config = create_host_config(version='1.20', cpu_quota=1999)
+ self.assertEqual(config.get('CpuQuota'), 1999)
+
+ def test_create_host_config_with_cpu_period(self):
+ config = create_host_config(version='1.20', cpu_period=1999)
+ self.assertEqual(config.get('CpuPeriod'), 1999)
+
+
+class UlimitTest(base.BaseTestCase):
+ def test_create_host_config_dict_ulimit(self):
+ ulimit_dct = {'name': 'nofile', 'soft': 8096}
+ config = create_host_config(
+ ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
+ )
+ self.assertIn('Ulimits', config)
+ self.assertEqual(len(config['Ulimits']), 1)
+ ulimit_obj = config['Ulimits'][0]
+ self.assertTrue(isinstance(ulimit_obj, Ulimit))
+ self.assertEqual(ulimit_obj.name, ulimit_dct['name'])
+ self.assertEqual(ulimit_obj.soft, ulimit_dct['soft'])
+ self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft)
+
+ def test_create_host_config_dict_ulimit_capitals(self):
+ ulimit_dct = {'Name': 'nofile', 'Soft': 8096, 'Hard': 8096 * 4}
+ config = create_host_config(
+ ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
+ )
+ self.assertIn('Ulimits', config)
+ self.assertEqual(len(config['Ulimits']), 1)
+ ulimit_obj = config['Ulimits'][0]
+ self.assertTrue(isinstance(ulimit_obj, Ulimit))
+ self.assertEqual(ulimit_obj.name, ulimit_dct['Name'])
+ self.assertEqual(ulimit_obj.soft, ulimit_dct['Soft'])
+ self.assertEqual(ulimit_obj.hard, ulimit_dct['Hard'])
+ self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft)
+
+ def test_create_host_config_obj_ulimit(self):
+ ulimit_dct = Ulimit(name='nofile', soft=8096)
+ config = create_host_config(
+ ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
+ )
+ self.assertIn('Ulimits', config)
+ self.assertEqual(len(config['Ulimits']), 1)
+ ulimit_obj = config['Ulimits'][0]
+ self.assertTrue(isinstance(ulimit_obj, Ulimit))
+ self.assertEqual(ulimit_obj, ulimit_dct)
+
+ def test_ulimit_invalid_type(self):
+ self.assertRaises(ValueError, lambda: Ulimit(name=None))
+ self.assertRaises(ValueError, lambda: Ulimit(name='hello', soft='123'))
+ self.assertRaises(ValueError, lambda: Ulimit(name='hello', hard='456'))
+
+
+class LogConfigTest(base.BaseTestCase):
+ def test_create_host_config_dict_logconfig(self):
+ dct = {'type': LogConfig.types.SYSLOG, 'config': {'key1': 'val1'}}
+ config = create_host_config(
+ version=DEFAULT_DOCKER_API_VERSION, log_config=dct
+ )
+ self.assertIn('LogConfig', config)
+ self.assertTrue(isinstance(config['LogConfig'], LogConfig))
+ self.assertEqual(dct['type'], config['LogConfig'].type)
+
+ def test_create_host_config_obj_logconfig(self):
+ obj = LogConfig(type=LogConfig.types.SYSLOG, config={'key1': 'val1'})
+ config = create_host_config(
+ version=DEFAULT_DOCKER_API_VERSION, log_config=obj
+ )
+ self.assertIn('LogConfig', config)
+ self.assertTrue(isinstance(config['LogConfig'], LogConfig))
+ self.assertEqual(obj, config['LogConfig'])
+
+ def test_logconfig_invalid_config_type(self):
+ with pytest.raises(ValueError):
+ LogConfig(type=LogConfig.types.JSON, config='helloworld')
+
+
+class KwargsFromEnvTest(base.BaseTestCase):
+ def setUp(self):
+ self.os_environ = os.environ.copy()
+
+ def tearDown(self):
+ os.environ = self.os_environ
+
+ def test_kwargs_from_env_empty(self):
+ os.environ.update(DOCKER_HOST='',
+ DOCKER_CERT_PATH='',
+ DOCKER_TLS_VERIFY='')
+
+ kwargs = kwargs_from_env()
+ self.assertEqual(None, kwargs.get('base_url'))
+ self.assertEqual(None, kwargs.get('tls'))
+
+ def test_kwargs_from_env_tls(self):
+ os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
+ DOCKER_CERT_PATH=TEST_CERT_DIR,
+ DOCKER_TLS_VERIFY='1')
+ kwargs = kwargs_from_env(assert_hostname=False)
+ self.assertEqual('https://192.168.59.103:2376', kwargs['base_url'])
+ self.assertTrue('ca.pem' in kwargs['tls'].verify)
+ self.assertTrue('cert.pem' in kwargs['tls'].cert[0])
+ self.assertTrue('key.pem' in kwargs['tls'].cert[1])
+ self.assertEqual(False, kwargs['tls'].assert_hostname)
+ try:
+ client = Client(**kwargs)
+ self.assertEqual(kwargs['base_url'], client.base_url)
+ self.assertEqual(kwargs['tls'].verify, client.verify)
+ self.assertEqual(kwargs['tls'].cert, client.cert)
+ except TypeError as e:
+ self.fail(e)
+
+ def test_kwargs_from_env_no_cert_path(self):
+ try:
+ temp_dir = tempfile.mkdtemp()
+ cert_dir = os.path.join(temp_dir, '.docker')
+ shutil.copytree(TEST_CERT_DIR, cert_dir)
+
+ os.environ.update(HOME=temp_dir,
+ DOCKER_CERT_PATH='',
+ DOCKER_TLS_VERIFY='1')
+
+ kwargs = kwargs_from_env()
+ self.assertIn(cert_dir, kwargs['tls'].verify)
+ self.assertIn(cert_dir, kwargs['tls'].cert[0])
+ self.assertIn(cert_dir, kwargs['tls'].cert[1])
+ finally:
+ if temp_dir:
+ shutil.rmtree(temp_dir)
+
+
class UtilsTest(base.BaseTestCase):
longMessage = True
@@ -39,12 +192,6 @@ class UtilsTest(base.BaseTestCase):
local_tempfile.close()
return local_tempfile.name
- def setUp(self):
- self.os_environ = os.environ.copy()
-
- def tearDown(self):
- os.environ = self.os_environ
-
def test_parse_repository_tag(self):
self.assertEqual(parse_repository_tag("root"),
("root", None))
@@ -103,51 +250,6 @@ class UtilsTest(base.BaseTestCase):
assert parse_host(val, 'win32') == tcp_port
- def test_kwargs_from_env_empty(self):
- os.environ.update(DOCKER_HOST='',
- DOCKER_CERT_PATH='',
- DOCKER_TLS_VERIFY='')
-
- kwargs = kwargs_from_env()
- self.assertEqual(None, kwargs.get('base_url'))
- self.assertEqual(None, kwargs.get('tls'))
-
- def test_kwargs_from_env_tls(self):
- os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
- DOCKER_CERT_PATH=TEST_CERT_DIR,
- DOCKER_TLS_VERIFY='1')
- kwargs = kwargs_from_env(assert_hostname=False)
- self.assertEqual('https://192.168.59.103:2376', kwargs['base_url'])
- self.assertTrue('ca.pem' in kwargs['tls'].verify)
- self.assertTrue('cert.pem' in kwargs['tls'].cert[0])
- self.assertTrue('key.pem' in kwargs['tls'].cert[1])
- self.assertEqual(False, kwargs['tls'].assert_hostname)
- try:
- client = Client(**kwargs)
- self.assertEqual(kwargs['base_url'], client.base_url)
- self.assertEqual(kwargs['tls'].verify, client.verify)
- self.assertEqual(kwargs['tls'].cert, client.cert)
- except TypeError as e:
- self.fail(e)
-
- def test_kwargs_from_env_no_cert_path(self):
- try:
- temp_dir = tempfile.mkdtemp()
- cert_dir = os.path.join(temp_dir, '.docker')
- shutil.copytree(TEST_CERT_DIR, cert_dir)
-
- os.environ.update(HOME=temp_dir,
- DOCKER_CERT_PATH='',
- DOCKER_TLS_VERIFY='1')
-
- kwargs = kwargs_from_env()
- self.assertIn(cert_dir, kwargs['tls'].verify)
- self.assertIn(cert_dir, kwargs['tls'].cert[0])
- self.assertIn(cert_dir, kwargs['tls'].cert[1])
- finally:
- if temp_dir:
- shutil.rmtree(temp_dir)
-
def test_parse_env_file_proper(self):
env_file = self.generate_tempfile(
file_content='USER=jdoe\nPASS=secret')
@@ -181,79 +283,6 @@ class UtilsTest(base.BaseTestCase):
for filters, expected in tests:
self.assertEqual(convert_filters(filters), expected)
- def test_create_host_config_no_options(self):
- config = create_host_config(version='1.19')
- self.assertFalse('NetworkMode' in config)
-
- def test_create_host_config_no_options_newer_api_version(self):
- config = create_host_config(version='1.20')
- self.assertEqual(config['NetworkMode'], 'default')
-
- def test_create_host_config_dict_ulimit(self):
- ulimit_dct = {'name': 'nofile', 'soft': 8096}
- config = create_host_config(
- ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
- )
- self.assertIn('Ulimits', config)
- self.assertEqual(len(config['Ulimits']), 1)
- ulimit_obj = config['Ulimits'][0]
- self.assertTrue(isinstance(ulimit_obj, Ulimit))
- self.assertEqual(ulimit_obj.name, ulimit_dct['name'])
- self.assertEqual(ulimit_obj.soft, ulimit_dct['soft'])
- self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft)
-
- def test_create_host_config_dict_ulimit_capitals(self):
- ulimit_dct = {'Name': 'nofile', 'Soft': 8096, 'Hard': 8096 * 4}
- config = create_host_config(
- ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
- )
- self.assertIn('Ulimits', config)
- self.assertEqual(len(config['Ulimits']), 1)
- ulimit_obj = config['Ulimits'][0]
- self.assertTrue(isinstance(ulimit_obj, Ulimit))
- self.assertEqual(ulimit_obj.name, ulimit_dct['Name'])
- self.assertEqual(ulimit_obj.soft, ulimit_dct['Soft'])
- self.assertEqual(ulimit_obj.hard, ulimit_dct['Hard'])
- self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft)
-
- def test_create_host_config_obj_ulimit(self):
- ulimit_dct = Ulimit(name='nofile', soft=8096)
- config = create_host_config(
- ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
- )
- self.assertIn('Ulimits', config)
- self.assertEqual(len(config['Ulimits']), 1)
- ulimit_obj = config['Ulimits'][0]
- self.assertTrue(isinstance(ulimit_obj, Ulimit))
- self.assertEqual(ulimit_obj, ulimit_dct)
-
- def test_ulimit_invalid_type(self):
- self.assertRaises(ValueError, lambda: Ulimit(name=None))
- self.assertRaises(ValueError, lambda: Ulimit(name='hello', soft='123'))
- self.assertRaises(ValueError, lambda: Ulimit(name='hello', hard='456'))
-
- def test_create_host_config_dict_logconfig(self):
- dct = {'type': LogConfig.types.SYSLOG, 'config': {'key1': 'val1'}}
- config = create_host_config(
- version=DEFAULT_DOCKER_API_VERSION, log_config=dct
- )
- self.assertIn('LogConfig', config)
- self.assertTrue(isinstance(config['LogConfig'], LogConfig))
- self.assertEqual(dct['type'], config['LogConfig'].type)
-
- def test_create_host_config_obj_logconfig(self):
- obj = LogConfig(type=LogConfig.types.SYSLOG, config={'key1': 'val1'})
- config = create_host_config(
- version=DEFAULT_DOCKER_API_VERSION, log_config=obj
- )
- self.assertIn('LogConfig', config)
- self.assertTrue(isinstance(config['LogConfig'], LogConfig))
- self.assertEqual(obj, config['LogConfig'])
-
- def test_logconfig_invalid_config_type(self):
- with pytest.raises(ValueError):
- LogConfig(type=LogConfig.types.JSON, config='helloworld')
-
def test_resolve_repository_name(self):
# docker hub library image
self.assertEqual(
@@ -407,6 +436,8 @@ class UtilsTest(base.BaseTestCase):
None,
)
+
+class PortsTest(base.BaseTestCase):
def test_split_port_with_host_ip(self):
internal_port, external_port = split_port("127.0.0.1:1000:2000")
self.assertEqual(internal_port, ["2000"])
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 1.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/docker/docker-py.git@02f330d8dc3da47215bed47b44fac73941ea6920#egg=docker_py
exceptiongroup==1.2.2
flake8==7.2.0
iniconfig==2.1.0
mccabe==0.7.0
packaging==24.2
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.2
pytest==8.3.5
requests==2.5.3
six==1.17.0
tomli==2.2.1
websocket_client==0.32.0
| name: docker-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- flake8==7.2.0
- iniconfig==2.1.0
- mccabe==0.7.0
- packaging==24.2
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.2
- pytest==8.3.5
- requests==2.5.3
- six==1.17.0
- tomli==2.2.1
- websocket-client==0.32.0
prefix: /opt/conda/envs/docker-py
| [
"tests/utils_test.py::HostConfigTest::test_create_host_config_with_cpu_period",
"tests/utils_test.py::HostConfigTest::test_create_host_config_with_cpu_quota"
]
| []
| [
"tests/utils_test.py::HostConfigTest::test_create_host_config_invalid_cpu_cfs_types",
"tests/utils_test.py::HostConfigTest::test_create_host_config_no_options",
"tests/utils_test.py::HostConfigTest::test_create_host_config_no_options_newer_api_version",
"tests/utils_test.py::UlimitTest::test_create_host_config_dict_ulimit",
"tests/utils_test.py::UlimitTest::test_create_host_config_dict_ulimit_capitals",
"tests/utils_test.py::UlimitTest::test_create_host_config_obj_ulimit",
"tests/utils_test.py::UlimitTest::test_ulimit_invalid_type",
"tests/utils_test.py::LogConfigTest::test_create_host_config_dict_logconfig",
"tests/utils_test.py::LogConfigTest::test_create_host_config_obj_logconfig",
"tests/utils_test.py::LogConfigTest::test_logconfig_invalid_config_type",
"tests/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_empty",
"tests/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_no_cert_path",
"tests/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_tls",
"tests/utils_test.py::UtilsTest::test_convert_filters",
"tests/utils_test.py::UtilsTest::test_parse_bytes",
"tests/utils_test.py::UtilsTest::test_parse_env_file_commented_line",
"tests/utils_test.py::UtilsTest::test_parse_env_file_invalid_line",
"tests/utils_test.py::UtilsTest::test_parse_env_file_proper",
"tests/utils_test.py::UtilsTest::test_parse_host",
"tests/utils_test.py::UtilsTest::test_parse_host_empty_value",
"tests/utils_test.py::UtilsTest::test_parse_repository_tag",
"tests/utils_test.py::UtilsTest::test_resolve_authconfig",
"tests/utils_test.py::UtilsTest::test_resolve_registry_and_auth",
"tests/utils_test.py::UtilsTest::test_resolve_repository_name",
"tests/utils_test.py::PortsTest::test_build_port_bindings_with_matching_internal_port_ranges",
"tests/utils_test.py::PortsTest::test_build_port_bindings_with_matching_internal_ports",
"tests/utils_test.py::PortsTest::test_build_port_bindings_with_nonmatching_internal_port_ranges",
"tests/utils_test.py::PortsTest::test_build_port_bindings_with_nonmatching_internal_ports",
"tests/utils_test.py::PortsTest::test_build_port_bindings_with_one_port",
"tests/utils_test.py::PortsTest::test_build_port_bindings_with_port_range",
"tests/utils_test.py::PortsTest::test_host_only_with_colon",
"tests/utils_test.py::PortsTest::test_non_matching_length_port_ranges",
"tests/utils_test.py::PortsTest::test_port_and_range_invalid",
"tests/utils_test.py::PortsTest::test_port_only_with_colon",
"tests/utils_test.py::PortsTest::test_split_port_invalid",
"tests/utils_test.py::PortsTest::test_split_port_no_host_port",
"tests/utils_test.py::PortsTest::test_split_port_range_no_host_port",
"tests/utils_test.py::PortsTest::test_split_port_range_with_host_ip_no_port",
"tests/utils_test.py::PortsTest::test_split_port_range_with_host_port",
"tests/utils_test.py::PortsTest::test_split_port_range_with_protocol",
"tests/utils_test.py::PortsTest::test_split_port_with_host_ip",
"tests/utils_test.py::PortsTest::test_split_port_with_host_ip_no_port",
"tests/utils_test.py::PortsTest::test_split_port_with_host_port",
"tests/utils_test.py::PortsTest::test_split_port_with_protocol",
"tests/utils_test.py::ExcludePathsTest::test_directory",
"tests/utils_test.py::ExcludePathsTest::test_directory_with_single_exception",
"tests/utils_test.py::ExcludePathsTest::test_directory_with_subdir_exception",
"tests/utils_test.py::ExcludePathsTest::test_directory_with_trailing_slash",
"tests/utils_test.py::ExcludePathsTest::test_directory_with_wildcard_exception",
"tests/utils_test.py::ExcludePathsTest::test_exclude_custom_dockerfile",
"tests/utils_test.py::ExcludePathsTest::test_exclude_dockerfile_dockerignore",
"tests/utils_test.py::ExcludePathsTest::test_no_dupes",
"tests/utils_test.py::ExcludePathsTest::test_no_excludes",
"tests/utils_test.py::ExcludePathsTest::test_question_mark",
"tests/utils_test.py::ExcludePathsTest::test_single_filename",
"tests/utils_test.py::ExcludePathsTest::test_single_filename_trailing_slash",
"tests/utils_test.py::ExcludePathsTest::test_single_subdir_single_filename",
"tests/utils_test.py::ExcludePathsTest::test_single_subdir_wildcard_filename",
"tests/utils_test.py::ExcludePathsTest::test_subdirectory",
"tests/utils_test.py::ExcludePathsTest::test_wildcard_exclude",
"tests/utils_test.py::ExcludePathsTest::test_wildcard_filename_end",
"tests/utils_test.py::ExcludePathsTest::test_wildcard_filename_start",
"tests/utils_test.py::ExcludePathsTest::test_wildcard_subdir_single_filename",
"tests/utils_test.py::ExcludePathsTest::test_wildcard_subdir_wildcard_filename",
"tests/utils_test.py::ExcludePathsTest::test_wildcard_with_exception",
"tests/utils_test.py::ExcludePathsTest::test_wildcard_with_wildcard_exception"
]
| []
| Apache License 2.0 | 239 | [
"docker/utils/utils.py"
]
| [
"docker/utils/utils.py"
]
|
rbarrois__python-semanticversion-27 | 2ed3d39c291080c61edd9139370939e1fdc3209a | 2015-09-15 22:15:55 | 2ed3d39c291080c61edd9139370939e1fdc3209a | diff --git a/CREDITS b/CREDITS
index c700c77..1506f2a 100644
--- a/CREDITS
+++ b/CREDITS
@@ -18,8 +18,9 @@ Contributors
The project has received contributions from (in alphabetical order):
* Raphaël Barrois <[email protected]> (https://github.com/rbarrois)
-* Michael Hrivnak <[email protected]> (https://github.com/mhrivnak)
* Rick Eyre <[email protected]> (https://github.com/rickeyre)
+* Michael Hrivnak <[email protected]> (https://github.com/mhrivnak)
+* William Minchin <[email protected]> (https://github.com/minchinweb)
Contributor license agreement
diff --git a/ChangeLog b/ChangeLog
index cd7c3db..7b233fe 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,35 +1,12 @@
ChangeLog
=========
-2.5.0 (master)
---------------
+2.4.3 (unreleased)
+------------------
*Bugfix:*
- `#18 <https://github.com/rbarrois/python-semanticversion/issues/18>`_: According to SemVer 2.0.0, build numbers aren't ordered.
-
- * Remove specs of the ``Spec('<1.1.3+')`` form
- * Comparing ``Version('0.1.0')`` to ``Version('0.1.0+bcd')`` has new
- rules::
-
- >>> Version('0.1.0+1') == Version('0.1.0+bcd')
- False
- >>> Version('0.1.0+1') != Version('0.1.0+bcd')
- True
- >>> Version('0.1.0+1') < Version('0.1.0+bcd')
- False
- >>> Version('0.1.0+1') > Version('0.1.0+bcd')
- False
- >>> Version('0.1.0+1') <= Version('0.1.0+bcd')
- False
- >>> Version('0.1.0+1') >= Version('0.1.0+bcd')
- False
- >>> compare(Version('0.1.0+1'), Version('0.1.0+bcd'))
- NotImplemented
-
- * :func:`semantic_version.compare` returns ``NotImplemented`` when its
- parameters differ only by build metadata
- * ``Spec('<=1.3.0')`` now matches ``Version('1.3.0+abde24fe883')``
+ * Fix handling of bumping pre-release versions, thanks to @minchinweb.
2.4.2 (2015-07-02)
------------------
diff --git a/README.rst b/README.rst
index 484692e..5923a30 100644
--- a/README.rst
+++ b/README.rst
@@ -247,18 +247,19 @@ definition or (for the empty pre-release number) if a single dash is appended
False
-Including build metadata in specifications
-""""""""""""""""""""""""""""""""""""""""""
+Including build identifiers in specifications
+"""""""""""""""""""""""""""""""""""""""""""""
-Build metadata has no ordering; thus, the only meaningful comparison including
-build metadata is equality.
+The same rule applies for the build identifier: comparisons will include it only
+if it was included in the :class:`Spec` definition, or - for the unnumbered build
+version - if a single + is appended to the definition(``1.0.0+``, ``1.0.0-alpha+``):
.. code-block:: pycon
- >>> Version('1.0.0+build2') in Spec('<=1.0.0') # Build metadata ignored
+ >>> Version('1.0.0+build2') in Spec('<=1.0.0') # Build identifier ignored
True
- >>> Version('1.0.0+build2') in Spec('==1.0.0+build2') # Include build in checks
+ >>> Version('1.0.0+build2') in Spec('<=1.0.0+') # Include build in checks
False
diff --git a/docs/reference.rst b/docs/reference.rst
index 261e738..3550c25 100644
--- a/docs/reference.rst
+++ b/docs/reference.rst
@@ -22,13 +22,9 @@ Module-level functions
:param str v1: The first version to compare
:param str v2: The second version to compare
:raises: :exc:`ValueError`, if any version string is invalid
- :rtype: ``int``, -1 / 0 / 1 as for a :func:`cmp` comparison;
- ``NotImplemented`` if versions only differ by build metadata
+ :rtype: ``int``, -1 / 0 / 1 as for a :func:`cmp` comparison
-.. warning:: Since build metadata has no ordering,
- ``compare(Version('0.1.1'), Version('0.1.1+3'))`` returns ``NotImplemented``
-
.. function:: match(spec, version)
@@ -111,9 +107,9 @@ Representing a version (the Version class)
.. attribute:: build
- ``tuple`` of ``strings``, the build metadata.
+ ``tuple`` of ``strings``, the build component.
- It contains the various dot-separated identifiers in the build metadata.
+ It contains the various dot-separated identifiers in the build component.
May be ``None`` for a :attr:`partial` version number in a ``<major>``, ``<major>.<minor>``,
``<major>.<minor>.<patch>`` or ``<major>.<minor>.<patch>-<prerelease>`` format.
@@ -155,7 +151,7 @@ Representing a version (the Version class)
For instance, ``Version('1.0', partial=True)`` means "any version beginning in ``1.0``".
``Version('1.0.1-alpha', partial=True)`` means "The ``1.0.1-alpha`` version or any
- any release differing only in build metadata": ``1.0.1-alpha+build3`` matches, ``1.0.1-alpha.2`` doesn't.
+ ulterior build of that same version": ``1.0.1-alpha+build3`` matches, ``1.0.1-alpha.2`` doesn't.
Examples::
@@ -250,6 +246,7 @@ The main issue with representing version specifications is that the usual syntax
does not map well onto `SemVer`_ precedence rules:
* A specification of ``<1.3.4`` is not expected to allow ``1.3.4-rc2``, but strict `SemVer`_ comparisons allow it ;
+* Converting the previous specification to ``<=1.3.3`` in order to avoid ``1.3.4``
prereleases has the issue of excluding ``1.3.3+build3`` ;
* It may be necessary to exclude either all variations on a patch-level release
(``!=1.3.3``) or specifically one build-level release (``1.3.3-build.434``).
@@ -259,7 +256,7 @@ In order to have version specification behave naturally, the rules are the follo
* If no pre-release number was included in the specification, pre-release numbers
are ignored when deciding whether a version satisfies a specification.
-* If no build metadata was included in the specification, build metadata is ignored
+* If no build number was included in the specification, build numbers are ignored
when deciding whether a version satisfies a specification.
This means that::
@@ -270,7 +267,7 @@ This means that::
True
>>> Version('1.1.1-rc1+build4') in Spec('<=1.1.1-rc1')
True
- >>> Version('1.1.1-rc1+build4') in Spec('==1.1.1-rc1+build2')
+ >>> Version('1.1.1-rc1+build4') in Spec('<=1.1.1-rc1+build2')
False
@@ -288,31 +285,20 @@ rules apply:
>>> Version('1.1.1-rc1') in Spec('<1.1.1-')
True
-* Setting a build metadata separator without build metadata (``<=1.1.1+``)
- forces matches "up to the build metadata"; use this to include/exclude a
- release lacking build metadata while excluding/including all other builds
- of that release
+* Setting a build separator without a build identifier (``>1.1.1+``) forces
+ satisfaction tests to include both prerelease and build identifiers::
- >>> Version('1.1.1') in Spec('==1.1.1+')
- True
- >>> Version('1.1.1+2') in Spec('==1.1.1+')
+ >>> Version('1.1.1+build2') in Spec('>1.1.1')
False
-
-
-.. warning:: As stated in the `SemVer`_ specification, the ordering of build metadata is *undefined*.
- Thus, a :class:`Spec` string can only mention build metadata to include or exclude a specific version:
-
- * ``==1.1.1+b1234`` includes this specific build
- * ``!=1.1.1+b1234`` excludes it (but would match ``1.1.1+b1235``
- * ``<1.1.1+b1`` is invalid
-
+ >>> Version('1.1.1+build2') in Spec('>1.1.1+')
+ True
.. class:: Spec(spec_string[, spec_string[, ...]])
Stores a list of :class:`SpecItem` and matches any :class:`Version` against all
contained :class:`specs <SpecItem>`.
- It is built from a comma-separated list of version specifications::
+ It is build from a comma-separated list of version specifications::
>>> Spec('>=1.0.0,<1.2.0,!=1.1.4')
<Spec: (
@@ -441,16 +427,16 @@ rules apply:
>>> SpecItem('>=0.1.1').match(Version('0.1.1-rc1')) # pre-release satisfy conditions
True
- >>> Version('0.1.1+build2') in SpecItem('>=0.1.1') # build metadata is ignored when checking for precedence
+ >>> Version('0.1.1+build2') in SpecItem('>=0.1.1') # build version satisfy specifications
True
>>>
>>> # Use the '-' marker to include the pre-release component in checks
>>> SpecItem('>=0.1.1-').match(Version('0.1.1-rc1')
False
- >>> # Use the '+' marker to include the build metadata in checks
- >>> SpecItem('==0.1.1+').match(Version('0.1.1+b1234')
- False
>>>
+ >>> # Use the '+' marker to include the build identifier in checks
+ >>> SpecItem('<=0.1.1-alpha+').match(Version('0.1.1-alpha+build1'))
+ False
.. rubric:: Attributes
diff --git a/semantic_version/base.py b/semantic_version/base.py
index 982fcc8..4d9b87e 100644
--- a/semantic_version/base.py
+++ b/semantic_version/base.py
@@ -89,15 +89,24 @@ class Version(object):
return int(value)
def next_major(self):
- return Version('.'.join(str(x) for x in [self.major + 1, 0, 0]))
+ if self.prerelease and self.minor is 0 and self.patch is 0:
+ return Version('.'.join(str(x) for x in [self.major, self.minor, self.patch]))
+ else:
+ return Version('.'.join(str(x) for x in [self.major + 1, 0, 0]))
def next_minor(self):
- return Version(
- '.'.join(str(x) for x in [self.major, self.minor + 1, 0]))
+ if self.prerelease and self.patch is 0:
+ return Version('.'.join(str(x) for x in [self.major, self.minor, self.patch]))
+ else:
+ return Version(
+ '.'.join(str(x) for x in [self.major, self.minor + 1, 0]))
def next_patch(self):
- return Version(
- '.'.join(str(x) for x in [self.major, self.minor, self.patch + 1]))
+ if self.prerelease:
+ return Version('.'.join(str(x) for x in [self.major, self.minor, self.patch]))
+ else:
+ return Version(
+ '.'.join(str(x) for x in [self.major, self.minor, self.patch + 1]))
@classmethod
def coerce(cls, version_string, partial=False):
@@ -290,14 +299,20 @@ class Version(object):
return 0
def build_cmp(a, b):
- """Compare build metadata.
+ """Compare build components.
- Special rule: there is no ordering on build metadata.
+ Special rule: a version without build component has lower
+ precedence than one with a build component.
"""
- if a == b:
- return 0
+ if a and b:
+ return identifier_list_cmp(a, b)
+ elif a:
+ # Versions with build field have higher precedence
+ return 1
+ elif b:
+ return -1
else:
- return NotImplemented
+ return 0
def make_optional(orig_cmp_fun):
"""Convert a cmp-like function to consider 'None == *'."""
@@ -326,7 +341,10 @@ class Version(object):
build_cmp,
]
- def __compare(self, other):
+ def __cmp__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+
field_pairs = zip(self, other)
comparison_functions = self._comparison_functions(partial=self.partial or other.partial)
comparisons = zip(comparison_functions, self, other)
@@ -338,48 +356,44 @@ class Version(object):
return 0
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+
+ return self.__cmp__(other) == 0
+
def __hash__(self):
return hash((self.major, self.minor, self.patch, self.prerelease, self.build))
- def __cmp__(self, other):
+ def __ne__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
- return self.__compare(other)
- def __compare_helper(self, other, condition, notimpl_target):
- """Helper for comparison.
+ return self.__cmp__(other) != 0
- Allows the caller to provide:
- - The condition
- - The return value if the comparison is meaningless (ie versions with
- build metadata).
- """
+ def __lt__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
- cmp_res = self.__cmp__(other)
- if cmp_res is NotImplemented:
- return notimpl_target
-
- return condition(cmp_res)
-
- def __eq__(self, other):
- return self.__compare_helper(other, lambda x: x == 0, notimpl_target=False)
-
- def __ne__(self, other):
- return self.__compare_helper(other, lambda x: x != 0, notimpl_target=True)
-
- def __lt__(self, other):
- return self.__compare_helper(other, lambda x: x < 0, notimpl_target=False)
+ return self.__cmp__(other) < 0
def __le__(self, other):
- return self.__compare_helper(other, lambda x: x <= 0, notimpl_target=False)
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+
+ return self.__cmp__(other) <= 0
def __gt__(self, other):
- return self.__compare_helper(other, lambda x: x > 0, notimpl_target=False)
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+
+ return self.__cmp__(other) > 0
def __ge__(self, other):
- return self.__compare_helper(other, lambda x: x >= 0, notimpl_target=False)
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+
+ return self.__cmp__(other) >= 0
class SpecItem(object):
@@ -415,10 +429,6 @@ class SpecItem(object):
kind, version = match.groups()
spec = Version(version, partial=True)
- if spec.build is not None and kind not in (cls.KIND_EQUAL, cls.KIND_NEQ):
- raise ValueError(
- "Invalid requirement specification %r: build numbers have no ordering."
- % requirement_string)
return (kind, spec)
def match(self, version):
diff --git a/semantic_version/compat.py b/semantic_version/compat.py
index 4dd60fe..bea6f67 100644
--- a/semantic_version/compat.py
+++ b/semantic_version/compat.py
@@ -2,14 +2,17 @@
# Copyright (c) 2012-2014 The python-semanticversion project
# This code is distributed under the two-clause BSD License.
+import sys
-def base_cmp(x, y):
- if x == y:
- return 0
- elif x > y:
- return 1
- elif x < y:
- return -1
- else:
- # Fix Py2's behavior: cmp(x, y) returns -1 for unorderable types
- return NotImplemented
+is_python2 = (sys.version_info[0] == 2)
+
+if is_python2: # pragma: no cover
+ base_cmp = cmp
+else: # pragma: no cover
+ def base_cmp(x, y):
+ if x < y:
+ return -1
+ elif x > y:
+ return 1
+ else:
+ return 0
| Version bump from prereleases not working as expected
I was expecting that a prerelease version, when bumped, would go up to the 'full' or release version without bumping the main version number if not needed.
Probably makes more sense if I give examples.
What I expected:
```
1.0.0-dev --[major]--> 1.0.0
1.0.1-dev --[major]--> 2.0.0
1.1.0-dev --[major]--> 2.0.0
1.0.0-dev --[minor]--> 1.0.0
1.0.1-dev --[minor]--> 1.1.0
1.1.0-dev --[minor]--> 1.1.0
1.0.0-dev --[patch]--> 1.0.0
1.0.1-dev --[patch]--> 1.0.1
1.1.0-dev --[patch]--> 1.1.0
```
What currently happens:
```
1.0.0-dev --[major]--> 2.0.0
1.0.1-dev --[major]--> 2.0.0
1.1.0-dev --[major]--> 2.0.0
1.0.0-dev --[minor]--> 1.1.0
1.0.1-dev --[minor]--> 1.1.0
1.1.0-dev --[minor]--> 1.2.0
1.0.0-dev --[patch]--> 1.0.1
1.0.1-dev --[patch]--> 1.0.2
1.1.0-dev --[patch]--> 1.1.1
```
If this a bug in the implementation, or have I missed something in the spec? Thanks!
| rbarrois/python-semanticversion | diff --git a/tests/test_base.py b/tests/test_base.py
index ae23d86..00c1216 100755
--- a/tests/test_base.py
+++ b/tests/test_base.py
@@ -64,7 +64,7 @@ class TopLevelTestCase(unittest.TestCase):
('0.1.1', '0.1.1', 0),
('0.1.1', '0.1.0', 1),
('0.1.0-alpha', '0.1.0', -1),
- ('0.1.0-alpha+2', '0.1.0-alpha', NotImplemented),
+ ('0.1.0-alpha+2', '0.1.0-alpha', 1),
)
def test_compare(self):
@@ -179,6 +179,7 @@ class VersionTestCase(unittest.TestCase):
'1.1.2': (1, 1, 2, None, None),
'1.1.3-rc4.5': (1, 1, 3, ('rc4', '5'), None),
'1.0.0-': (1, 0, 0, (), None),
+ '1.0.0+': (1, 0, 0, (), ()),
'1.0.0-rc.1+build.1': (1, 0, 0, ('rc', '1'), ('build', '1')),
'1.0.0+0.3.7': (1, 0, 0, (), ('0', '3', '7')),
'1.3.7+build': (1, 3, 7, (), ('build',)),
@@ -241,11 +242,111 @@ class VersionTestCase(unittest.TestCase):
self.assertTrue(v != '0.1.0')
self.assertFalse(v == '0.1.0')
- def test_bump_versions(self):
+ def test_bump_clean_versions(self):
# We Test each property explicitly as the == comparator for versions
# does not distinguish between prerelease or builds for equality.
+ v = base.Version('1.0.0+build')
+ v = v.next_major()
+ self.assertEqual(v.major, 2)
+ self.assertEqual(v.minor, 0)
+ self.assertEqual(v.patch, 0)
+ self.assertEqual(v.prerelease, ())
+ self.assertEqual(v.build, ())
+
+ v = base.Version('1.0.0+build')
+ v = v.next_minor()
+ self.assertEqual(v.major, 1)
+ self.assertEqual(v.minor, 1)
+ self.assertEqual(v.patch, 0)
+ self.assertEqual(v.prerelease, ())
+ self.assertEqual(v.build, ())
+
+ v = base.Version('1.0.0+build')
+ v = v.next_patch()
+ self.assertEqual(v.major, 1)
+ self.assertEqual(v.minor, 0)
+ self.assertEqual(v.patch, 1)
+ self.assertEqual(v.prerelease, ())
+ self.assertEqual(v.build, ())
+
+ v = base.Version('1.1.0+build')
+ v = v.next_major()
+ self.assertEqual(v.major, 2)
+ self.assertEqual(v.minor, 0)
+ self.assertEqual(v.patch, 0)
+ self.assertEqual(v.prerelease, ())
+ self.assertEqual(v.build, ())
+
+ v = base.Version('1.1.0+build')
+ v = v.next_minor()
+ self.assertEqual(v.major, 1)
+ self.assertEqual(v.minor, 2)
+ self.assertEqual(v.patch, 0)
+ self.assertEqual(v.prerelease, ())
+ self.assertEqual(v.build, ())
+
+ v = base.Version('1.1.0+build')
+ v = v.next_patch()
+ self.assertEqual(v.major, 1)
+ self.assertEqual(v.minor, 1)
+ self.assertEqual(v.patch, 1)
+ self.assertEqual(v.prerelease, ())
+ self.assertEqual(v.build, ())
+
+ v = base.Version('1.0.1+build')
+ v = v.next_major()
+ self.assertEqual(v.major, 2)
+ self.assertEqual(v.minor, 0)
+ self.assertEqual(v.patch, 0)
+ self.assertEqual(v.prerelease, ())
+ self.assertEqual(v.build, ())
+
+ v = base.Version('1.0.1+build')
+ v = v.next_minor()
+ self.assertEqual(v.major, 1)
+ self.assertEqual(v.minor, 1)
+ self.assertEqual(v.patch, 0)
+ self.assertEqual(v.prerelease, ())
+ self.assertEqual(v.build, ())
+
+ v = base.Version('1.0.1+build')
+ v = v.next_patch()
+ self.assertEqual(v.major, 1)
+ self.assertEqual(v.minor, 0)
+ self.assertEqual(v.patch, 2)
+ self.assertEqual(v.prerelease, ())
+ self.assertEqual(v.build, ())
+
+ def test_bump_prerelease_versions(self):
+ # We Test each property explicitly as the == comparator for versions
+ # does not distinguish between prerelease or builds for equality.
+
+ v = base.Version('1.0.0-pre+build')
+ v = v.next_major()
+ self.assertEqual(v.major, 1)
+ self.assertEqual(v.minor, 0)
+ self.assertEqual(v.patch, 0)
+ self.assertEqual(v.prerelease, ())
+ self.assertEqual(v.build, ())
+
+ v = base.Version('1.0.0-pre+build')
+ v = v.next_minor()
+ self.assertEqual(v.major, 1)
+ self.assertEqual(v.minor, 0)
+ self.assertEqual(v.patch, 0)
+ self.assertEqual(v.prerelease, ())
+ self.assertEqual(v.build, ())
+
v = base.Version('1.0.0-pre+build')
+ v = v.next_patch()
+ self.assertEqual(v.major, 1)
+ self.assertEqual(v.minor, 0)
+ self.assertEqual(v.patch, 0)
+ self.assertEqual(v.prerelease, ())
+ self.assertEqual(v.build, ())
+
+ v = base.Version('1.1.0-pre+build')
v = v.next_major()
self.assertEqual(v.major, 2)
self.assertEqual(v.minor, 0)
@@ -253,7 +354,7 @@ class VersionTestCase(unittest.TestCase):
self.assertEqual(v.prerelease, ())
self.assertEqual(v.build, ())
- v = base.Version('1.0.1-pre+build')
+ v = base.Version('1.1.0-pre+build')
v = v.next_minor()
self.assertEqual(v.major, 1)
self.assertEqual(v.minor, 1)
@@ -265,35 +366,48 @@ class VersionTestCase(unittest.TestCase):
v = v.next_patch()
self.assertEqual(v.major, 1)
self.assertEqual(v.minor, 1)
- self.assertEqual(v.patch, 1)
+ self.assertEqual(v.patch, 0)
self.assertEqual(v.prerelease, ())
self.assertEqual(v.build, ())
+ v = base.Version('1.0.1-pre+build')
+ v = v.next_major()
+ self.assertEqual(v.major, 2)
+ self.assertEqual(v.minor, 0)
+ self.assertEqual(v.patch, 0)
+ self.assertEqual(v.prerelease, ())
+ self.assertEqual(v.build, ())
-class SpecItemTestCase(unittest.TestCase):
- invalids = [
- '<=0.1.1+build3',
- '<=0.1.1+',
- '>0.2.3-rc2+',
- ]
+ v = base.Version('1.0.1-pre+build')
+ v = v.next_minor()
+ self.assertEqual(v.major, 1)
+ self.assertEqual(v.minor, 1)
+ self.assertEqual(v.patch, 0)
+ self.assertEqual(v.prerelease, ())
+ self.assertEqual(v.build, ())
+
+ v = base.Version('1.0.1-pre+build')
+ v = v.next_patch()
+ self.assertEqual(v.major, 1)
+ self.assertEqual(v.minor, 0)
+ self.assertEqual(v.patch, 1)
+ self.assertEqual(v.prerelease, ())
+ self.assertEqual(v.build, ())
- def test_invalids(self):
- for invalid in self.invalids:
- with self.assertRaises(ValueError, msg="SpecItem(%r) should be invalid" % invalid):
- _v = base.SpecItem(invalid)
+class SpecItemTestCase(unittest.TestCase):
components = {
'==0.1.0': (base.SpecItem.KIND_EQUAL, 0, 1, 0, None, None),
'==0.1.2-rc3': (base.SpecItem.KIND_EQUAL, 0, 1, 2, ('rc3',), None),
'==0.1.2+build3.14': (base.SpecItem.KIND_EQUAL, 0, 1, 2, (), ('build3', '14')),
- '<=0.1.1': (base.SpecItem.KIND_LTE, 0, 1, 1, None, None),
+ '<=0.1.1+': (base.SpecItem.KIND_LTE, 0, 1, 1, (), ()),
'<0.1.1': (base.SpecItem.KIND_LT, 0, 1, 1, None, None),
'<=0.1.1': (base.SpecItem.KIND_LTE, 0, 1, 1, None, None),
- '!=0.1.1+': (base.SpecItem.KIND_NEQ, 0, 1, 1, (), ()),
'<=0.1.1-': (base.SpecItem.KIND_LTE, 0, 1, 1, (), None),
'>=0.2.3-rc2': (base.SpecItem.KIND_GTE, 0, 2, 3, ('rc2',), None),
+ '>0.2.3-rc2+': (base.SpecItem.KIND_GT, 0, 2, 3, ('rc2',), ()),
'>=2.0.0': (base.SpecItem.KIND_GTE, 2, 0, 0, None, None),
- '!=0.1.1+rc3': (base.SpecItem.KIND_NEQ, 0, 1, 1, (), ('rc3',)),
+ '!=0.1.1+': (base.SpecItem.KIND_NEQ, 0, 1, 1, (), ()),
'!=0.3.0': (base.SpecItem.KIND_NEQ, 0, 3, 0, None, None),
}
@@ -345,17 +459,13 @@ class SpecItemTestCase(unittest.TestCase):
['0.2.3-rc3', '0.2.3', '0.2.3+1', '0.2.3-rc2', '0.2.3-rc2+1'],
['0.2.3-rc1', '0.2.2'],
),
- '==0.2.3+': (
- ['0.2.3'],
- ['0.2.3+rc1', '0.2.4', '0.2.3-rc2'],
- ),
- '!=0.2.3-rc2+12': (
- ['0.2.3-rc3', '0.2.3', '0.2.3-rc2+1', '0.2.4', '0.2.3-rc3+12'],
- ['0.2.3-rc2+12'],
+ '>0.2.3-rc2+': (
+ ['0.2.3-rc3', '0.2.3', '0.2.3-rc2+1'],
+ ['0.2.3-rc1', '0.2.2', '0.2.3-rc2'],
),
- '==2.0.0+b1': (
- ['2.0.0+b1'],
- ['2.1.1', '1.9.9', '1.9.9999', '2.0.0', '2.0.0-rc4'],
+ '>2.0.0+': (
+ ['2.1.1', '2.0.0+b1', '3.1.4'],
+ ['1.9.9', '1.9.9999', '2.0.0', '2.0.0-rc4'],
),
'!=0.1.1': (
['0.1.2', '0.1.0', '1.4.2'],
@@ -454,17 +564,13 @@ class SpecTestCase(unittest.TestCase):
self.assertTrue(repr(base.SpecItem(spec_text)) in repr(spec_list))
matches = {
- # At least 0.1.1 including pre-releases, less than 0.1.2 excluding pre-releases
'>=0.1.1,<0.1.2': (
['0.1.1', '0.1.1+4', '0.1.1-alpha'],
['0.1.2-alpha', '0.1.2', '1.3.4'],
),
- # At least 0.1.0 without pre-releases, less than 0.1.4 excluding pre-releases,
- # neither 0.1.3-rc1 nor any build of that version,
- # not 0.1.0+b3 precisely
- '>=0.1.0-,!=0.1.3-rc1,!=0.1.0+b3,<0.1.4': (
+ '>=0.1.0+,!=0.1.3-rc1,<0.1.4': (
['0.1.1', '0.1.0+b4', '0.1.2', '0.1.3-rc2'],
- ['0.0.1', '0.1.0+b3', '0.1.4', '0.1.4-alpha', '0.1.3-rc1+4',
+ ['0.0.1', '0.1.4', '0.1.4-alpha', '0.1.3-rc1+4',
'0.1.0-alpha', '0.2.2', '0.1.4-rc1'],
),
}
diff --git a/tests/test_match.py b/tests/test_match.py
index 6926e0a..155a612 100755
--- a/tests/test_match.py
+++ b/tests/test_match.py
@@ -15,7 +15,6 @@ class MatchTestCase(unittest.TestCase):
'<=0.1.4a',
'>0.1.1.1',
'~0.1.2-rc23,1',
- '<0.1.2-rc1.3-14.15+build.2012-01-01.11h34',
]
valid_specs = [
@@ -26,7 +25,7 @@ class MatchTestCase(unittest.TestCase):
'>0.1.2-rc1',
'>=0.1.2-rc1.3.4',
'==0.1.2+build42-12.2012-01-01.12h23',
- '!=0.1.2-rc1.3-14.15+build.2012-01-01.11h34',
+ '<0.1.2-rc1.3-14.15+build.2012-01-01.11h34',
]
matches = {
@@ -54,19 +53,11 @@ class MatchTestCase(unittest.TestCase):
'0.1.2',
'0.1.2+build4',
],
- '!=0.1.2+': [
- '0.1.2+1',
- '0.1.2-rc1',
- ],
- '!=0.1.2-': [
+ '<0.1.2+': [
'0.1.1',
'0.1.2-rc1',
- ],
- '!=0.1.2+345': [
- '0.1.1',
- '0.1.2-rc1+345',
- '0.1.2+346',
- '0.2.3+345',
+ '0.1.2-rc1.3.4',
+ '0.1.2-rc1+build4.5',
],
'>=0.1.1': [
'0.1.1',
@@ -81,6 +72,12 @@ class MatchTestCase(unittest.TestCase):
'0.2.0',
'1.0.0',
],
+ '>0.1.1+': [
+ '0.1.1+b2',
+ '0.1.2-rc1',
+ '1.1.1',
+ '2.0.4',
+ ],
'<0.1.1-': [
'0.1.1-alpha',
'0.1.1-rc4',
@@ -90,8 +87,7 @@ class MatchTestCase(unittest.TestCase):
def test_invalid(self):
for invalid in self.invalid_specs:
- with self.assertRaises(ValueError, msg="Spec(%r) should be invalid" % invalid):
- semantic_version.Spec(invalid)
+ self.assertRaises(ValueError, semantic_version.Spec, invalid)
def test_simple(self):
for valid in self.valid_specs:
@@ -126,9 +122,11 @@ class MatchTestCase(unittest.TestCase):
self.assertFalse(version in strict_spec, "%r should not be in %r" % (version, strict_spec))
def test_build_check(self):
- spec = semantic_version.Spec('<=0.1.1-rc1')
+ strict_spec = semantic_version.Spec('<=0.1.1-rc1+')
+ lax_spec = semantic_version.Spec('<=0.1.1-rc1')
version = semantic_version.Version('0.1.1-rc1+4.2')
- self.assertTrue(version in spec, "%r should be in %r" % (version, spec))
+ self.assertTrue(version in lax_spec, "%r should be in %r" % (version, lax_spec))
+ self.assertFalse(version in strict_spec, "%r should not be in %r" % (version, strict_spec))
if __name__ == '__main__': # pragma: no cover
diff --git a/tests/test_parsing.py b/tests/test_parsing.py
index c7651d2..5112ca5 100755
--- a/tests/test_parsing.py
+++ b/tests/test_parsing.py
@@ -3,7 +3,6 @@
# Copyright (c) 2012-2014 The python-semanticversion project
# This code is distributed under the two-clause BSD License.
-import itertools
import unittest
import semantic_version
@@ -45,8 +44,12 @@ class ComparisonTestCase(unittest.TestCase):
'1.0.0-beta.2',
'1.0.0-beta.11',
'1.0.0-rc.1',
+ '1.0.0-rc.1+build.1',
'1.0.0',
+ '1.0.0+0.3.7',
'1.3.7+build',
+ '1.3.7+build.2.b8f12d7',
+ '1.3.7+build.11.e0f985a',
]
def test_comparisons(self):
@@ -64,36 +67,6 @@ class ComparisonTestCase(unittest.TestCase):
cmp_res = -1 if i < j else (1 if i > j else 0)
self.assertEqual(cmp_res, semantic_version.compare(first, second))
- unordered = [
- [
- '1.0.0-rc.1',
- '1.0.0-rc.1+build.1',
- ],
- [
- '1.0.0',
- '1.0.0+0.3.7',
- ],
- [
- '1.3.7',
- '1.3.7+build',
- '1.3.7+build.2.b8f12d7',
- '1.3.7+build.11.e0f985a',
- ],
- ]
-
- def test_unordered(self):
- for group in self.unordered:
- for a, b in itertools.combinations(group, 2):
- v1 = semantic_version.Version(a)
- v2 = semantic_version.Version(b)
- self.assertTrue(v1 == v1, "%r != %r" % (v1, v1))
- self.assertFalse(v1 != v1, "%r != %r" % (v1, v1))
- self.assertFalse(v1 == v2, "%r == %r" % (v1, v2))
- self.assertTrue(v1 != v2, "%r !!= %r" % (v1, v2))
- self.assertFalse(v1 < v2, "%r !< %r" % (v1, v2))
- self.assertFalse(v1 <= v2, "%r !<= %r" % (v1, v2))
- self.assertFalse(v2 > v1, "%r !> %r" % (v2, v1))
- self.assertFalse(v2 >= v1, "%r !>= %r" % (v2, v1))
if __name__ == '__main__': # pragma: no cover
unittest.main()
diff --git a/tests/test_spec.py b/tests/test_spec.py
index a13cb0b..7a645f9 100644
--- a/tests/test_spec.py
+++ b/tests/test_spec.py
@@ -154,3 +154,10 @@ class FormatTests(unittest.TestCase):
self.assertLess(Version('1.0.0-beta.2'), Version('1.0.0-beta.11'))
self.assertLess(Version('1.0.0-beta.11'), Version('1.0.0-rc.1'))
self.assertLess(Version('1.0.0-rc.1'), Version('1.0.0'))
+
+
+
+class PrecedenceTestCase(unittest.TestCase):
+ pass
+
+
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 6
} | 2.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"coverage"
],
"pre_install": null,
"python": "3.4",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
-e git+https://github.com/rbarrois/python-semanticversion.git@2ed3d39c291080c61edd9139370939e1fdc3209a#egg=semantic_version
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: python-semanticversion
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/python-semanticversion
| [
"tests/test_base.py::TopLevelTestCase::test_compare",
"tests/test_base.py::VersionTestCase::test_bump_prerelease_versions",
"tests/test_base.py::SpecItemTestCase::test_components",
"tests/test_base.py::SpecItemTestCase::test_matches",
"tests/test_base.py::SpecTestCase::test_matches",
"tests/test_match.py::MatchTestCase::test_build_check",
"tests/test_match.py::MatchTestCase::test_match",
"tests/test_match.py::MatchTestCase::test_simple",
"tests/test_parsing.py::ComparisonTestCase::test_comparisons"
]
| []
| [
"tests/test_base.py::ComparisonTestCase::test_identifier_cmp",
"tests/test_base.py::ComparisonTestCase::test_identifier_list_cmp",
"tests/test_base.py::TopLevelTestCase::test_match",
"tests/test_base.py::TopLevelTestCase::test_validate_invalid",
"tests/test_base.py::TopLevelTestCase::test_validate_valid",
"tests/test_base.py::VersionTestCase::test_bump_clean_versions",
"tests/test_base.py::VersionTestCase::test_compare_partial_to_self",
"tests/test_base.py::VersionTestCase::test_compare_to_self",
"tests/test_base.py::VersionTestCase::test_hash",
"tests/test_base.py::VersionTestCase::test_invalid_comparisons",
"tests/test_base.py::VersionTestCase::test_parsing",
"tests/test_base.py::VersionTestCase::test_parsing_partials",
"tests/test_base.py::VersionTestCase::test_str",
"tests/test_base.py::VersionTestCase::test_str_partials",
"tests/test_base.py::SpecItemTestCase::test_equality",
"tests/test_base.py::SpecItemTestCase::test_hash",
"tests/test_base.py::SpecItemTestCase::test_to_string",
"tests/test_base.py::CoerceTestCase::test_coerce",
"tests/test_base.py::CoerceTestCase::test_invalid",
"tests/test_base.py::SpecTestCase::test_contains",
"tests/test_base.py::SpecTestCase::test_equality",
"tests/test_base.py::SpecTestCase::test_filter_compatible",
"tests/test_base.py::SpecTestCase::test_filter_empty",
"tests/test_base.py::SpecTestCase::test_filter_incompatible",
"tests/test_base.py::SpecTestCase::test_hash",
"tests/test_base.py::SpecTestCase::test_parsing",
"tests/test_base.py::SpecTestCase::test_parsing_split",
"tests/test_base.py::SpecTestCase::test_select_compatible",
"tests/test_base.py::SpecTestCase::test_select_empty",
"tests/test_base.py::SpecTestCase::test_select_incompatible",
"tests/test_match.py::MatchTestCase::test_contains",
"tests/test_match.py::MatchTestCase::test_invalid",
"tests/test_match.py::MatchTestCase::test_prerelease_check",
"tests/test_parsing.py::ParsingTestCase::test_invalid",
"tests/test_parsing.py::ParsingTestCase::test_simple",
"tests/test_spec.py::FormatTests::test_build",
"tests/test_spec.py::FormatTests::test_major_minor_patch",
"tests/test_spec.py::FormatTests::test_precedence",
"tests/test_spec.py::FormatTests::test_prerelease"
]
| []
| BSD 2-Clause "Simplified" License | 240 | [
"README.rst",
"CREDITS",
"semantic_version/compat.py",
"ChangeLog",
"docs/reference.rst",
"semantic_version/base.py"
]
| [
"README.rst",
"CREDITS",
"semantic_version/compat.py",
"ChangeLog",
"docs/reference.rst",
"semantic_version/base.py"
]
|
|
pypa__twine-131 | a0c87357d9d5d588082c9a59f6efc6f6bc3d3498 | 2015-09-19 01:38:19 | f487b7da9c42e4932bc33bf10d70cdc59fd16fd5 | diff --git a/twine/package.py b/twine/package.py
index 230e5ac..e80116a 100644
--- a/twine/package.py
+++ b/twine/package.py
@@ -143,9 +143,10 @@ class PackageFile(object):
def sign(self, sign_with, identity):
print("Signing {0}".format(self.basefilename))
- gpg_args = (sign_with, "--detach-sign", "-a", self.filename)
+ gpg_args = (sign_with, "--detach-sign")
if identity:
gpg_args += ("--local-user", identity)
+ gpg_args += ("-a", self.filename)
subprocess.check_call(gpg_args)
with open(self.signed_filename, "rb") as gpg:
| Signing fails in version 1.6.0
Signing has stopped to function in the latest version, with the following message:
```
You need a passphrase to unlock the secret key for
user: "foo bar <[email protected]>"
1024-bit DSA key, ID ABCDEFGH, created 2015-01-01
gpg: can't open `--local-user': No such file or directory
gpg: signing failed: file open error
```
1.5.0 is ok. | pypa/twine | diff --git a/tests/test_package.py b/tests/test_package.py
index 7b15ba9..fcc827a 100644
--- a/tests/test_package.py
+++ b/tests/test_package.py
@@ -53,5 +53,5 @@ def test_sign_file_with_identity(monkeypatch):
pkg.sign('gpg', 'identity')
except IOError:
pass
- args = ('gpg', '--detach-sign', '-a', filename, '--local-user', 'identity')
+ args = ('gpg', '--detach-sign', '--local-user', 'identity', '-a', filename)
assert replaced_check_call.calls == [pretend.call(args)]
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"coverage",
"pretend",
"flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
flake8==7.2.0
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mccabe==0.7.0
packaging @ file:///croot/packaging_1734472117206/work
pkginfo==1.12.1.2
pluggy @ file:///croot/pluggy_1733169602837/work
pretend==1.0.9
pycodestyle==2.13.0
pyflakes==3.3.1
pytest @ file:///croot/pytest_1738938843180/work
requests==2.32.3
requests-toolbelt==1.0.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
-e git+https://github.com/pypa/twine.git@a0c87357d9d5d588082c9a59f6efc6f6bc3d3498#egg=twine
urllib3==2.3.0
| name: twine
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- flake8==7.2.0
- idna==3.10
- mccabe==0.7.0
- pkginfo==1.12.1.2
- pretend==1.0.9
- pycodestyle==2.13.0
- pyflakes==3.3.1
- requests==2.32.3
- requests-toolbelt==1.0.0
- urllib3==2.3.0
prefix: /opt/conda/envs/twine
| [
"tests/test_package.py::test_sign_file_with_identity"
]
| []
| [
"tests/test_package.py::test_sign_file"
]
| []
| Apache License 2.0 | 241 | [
"twine/package.py"
]
| [
"twine/package.py"
]
|
|
tomerfiliba__plumbum-225 | 8d19a7b6c1f1a9caa2eb03f461e9a31a085d7ad5 | 2015-09-21 23:27:31 | 8d19a7b6c1f1a9caa2eb03f461e9a31a085d7ad5 | diff --git a/docs/cli.rst b/docs/cli.rst
index 559fa94..18e46a5 100644
--- a/docs/cli.rst
+++ b/docs/cli.rst
@@ -198,6 +198,28 @@ these values. Here's an example ::
All of these convert the argument to a :ref:`local path <guide-paths>`.
+Positional Arguements
+^^^^^^^^^^^^^^^^^^^^^
+
+You can supply positional argument validators using the ``cli.positional`` decorator. Simply
+pass the validators in the decorator matching the names in the main function. For example::
+
+ class MyApp(cli.Application):
+ @positional(cli.ExistingFile, cli.NonexistantPath)
+ def main(self, infile, *outfiles):
+ "infile is a path, outfiles are a list of paths, proper errors are given"
+
+If you only want to run your application in Python 3, you can also use annotations to
+specify the validators. For example::
+
+ class MyApp(cli.Application):
+ def main(self, infile : cli.ExistingFile, *outfiles : cli.NonexistantPath):
+ "Identical to above MyApp"
+
+Annotations are ignored if the positional decorator is present.
+
+.. versionadded:: 1.5.0
+
Repeatable Switches
^^^^^^^^^^^^^^^^^^^
Many times, you would like to allow a certain switch to be given multiple times. For instance,
diff --git a/plumbum/cli/__init__.py b/plumbum/cli/__init__.py
index 7ac0e6e..068aaef 100644
--- a/plumbum/cli/__init__.py
+++ b/plumbum/cli/__init__.py
@@ -1,3 +1,3 @@
-from plumbum.cli.switches import SwitchError, switch, autoswitch, SwitchAttr, Flag, CountOf
+from plumbum.cli.switches import SwitchError, switch, autoswitch, SwitchAttr, Flag, CountOf, positional
from plumbum.cli.switches import Range, Set, ExistingDirectory, ExistingFile, NonexistentPath, Predicate
from plumbum.cli.application import Application, ColorfulApplication
diff --git a/plumbum/cli/application.py b/plumbum/cli/application.py
index 3600683..8d7786a 100644
--- a/plumbum/cli/application.py
+++ b/plumbum/cli/application.py
@@ -1,7 +1,6 @@
from __future__ import division, print_function, absolute_import
import os
import sys
-import inspect
import functools
from textwrap import TextWrapper
from collections import defaultdict
@@ -299,7 +298,7 @@ class Application(object):
continue
# handle argument
- val = self._handle_argument(val, swinfo, name)
+ val = self._handle_argument(val, swinfo.argtype, name)
if swinfo.func in swfuncs:
if swinfo.list:
@@ -329,7 +328,7 @@ class Application(object):
if swinfo.func in swfuncs:
continue # skip if overridden by command line arguments
- val = self._handle_argument(envval, swinfo, env)
+ val = self._handle_argument(envval, swinfo.argtype, env)
envname = "$%s" % (env,)
if swinfo.list:
# multiple values over environment variables are not supported,
@@ -343,18 +342,19 @@ class Application(object):
return swfuncs, tailargs
@classmethod
- def autocomplete(self, argv):
+ def autocomplete(cls, argv):
"""This is supplied to make subclassing and testing argument completion methods easier"""
pass
- def _handle_argument(self, val, swinfo, name):
- if swinfo.argtype:
+ @staticmethod
+ def _handle_argument(val, argtype, name):
+ if argtype:
try:
- return swinfo.argtype(val)
+ return argtype(val)
except (TypeError, ValueError):
ex = sys.exc_info()[1] # compat
raise WrongArgumentType("Argument of %s expected to be %r, not %r:\n %r" % (
- name, swinfo.argtype, val, ex))
+ name, argtype, val, ex))
else:
return NotImplemented
@@ -388,20 +388,58 @@ class Application(object):
raise SwitchCombinationError("Given %s, the following are invalid %r" %
(swfuncs[func].swname, [swfuncs[f].swname for f in invalid]))
- m_args, m_varargs, _, m_defaults = six.getargspec(self.main)
- max_args = six.MAXSIZE if m_varargs else len(m_args) - 1
- min_args = len(m_args) - 1 - (len(m_defaults) if m_defaults else 0)
+ m = six.getfullargspec(self.main)
+ max_args = six.MAXSIZE if m.varargs else len(m.args) - 1
+ min_args = len(m.args) - 1 - (len(m.defaults) if m.defaults else 0)
if len(tailargs) < min_args:
raise PositionalArgumentsError("Expected at least %d positional arguments, got %r" %
(min_args, tailargs))
elif len(tailargs) > max_args:
raise PositionalArgumentsError("Expected at most %d positional arguments, got %r" %
- (max_args, tailargs))
+ (max_args, tailargs))
+
+ # Positional arguement validataion
+ if hasattr(self.main, 'positional'):
+ tailargs = self._positional_validate(tailargs, self.main.positional, self.main.positional_varargs, m.args[1:], m.varargs)
+
+ elif hasattr(m, 'annotations'):
+ args_names = list(m.args[1:])
+ positional = [None]*len(args_names)
+ varargs = None
+
+
+ # All args are positional, so convert kargs to positional
+ for item in m.annotations:
+ if item == m.varargs:
+ varargs = m.annotations[item]
+ else:
+ positional[args_names.index(item)] = m.annotations[item]
+
+ tailargs = self._positional_validate(tailargs, positional, varargs,
+ m.args[1:], m.varargs)
ordered = [(f, a) for _, f, a in
sorted([(sf.index, f, sf.val) for f, sf in swfuncs.items()])]
return ordered, tailargs
+ def _positional_validate(self, args, validator_list, varargs, argnames, varargname):
+ """Makes sure args follows the validation given input"""
+ out_args = list(args)
+
+ for i in range(min(len(args),len(validator_list))):
+
+ if validator_list[i] is not None:
+ out_args[i] = self._handle_argument(args[i], validator_list[i], argnames[i])
+
+ if len(args) > len(validator_list):
+ if varargs is not None:
+ out_args[len(validator_list):] = [
+ self._handle_argument(a, varargs, varargname) for a in args[len(validator_list):]]
+ else:
+ out_args[len(validator_list):] = args[len(validator_list):]
+
+ return out_args
+
@classmethod
def run(cls, argv = None, exit = True): # @ReservedAssignment
"""
@@ -473,23 +511,8 @@ class Application(object):
"""
inst = cls("")
- swfuncs = {}
- for index, (swname, val) in enumerate(switches.items(), 1):
- switch = getattr(cls, swname)
- swinfo = inst._switches_by_func[switch._switch_info.func]
- if isinstance(switch, CountOf):
- p = (range(val),)
- elif swinfo.list and not hasattr(val, "__iter__"):
- raise SwitchError("Switch %r must be a sequence (iterable)" % (swname,))
- elif not swinfo.argtype:
- # a flag
- if val not in (True, False, None, Flag):
- raise SwitchError("Switch %r is a boolean flag" % (swname,))
- p = ()
- else:
- p = (val,)
- swfuncs[swinfo.func] = SwitchParseInfo(swname, p, index)
-
+
+ swfuncs = inst._parse_kwd_args(switches)
ordered, tailargs = inst._validate_args(swfuncs, args)
for f, a in ordered:
f(inst, *a)
@@ -508,6 +531,26 @@ class Application(object):
return inst, retcode
+ def _parse_kwd_args(self, switches):
+ """Parses keywords (positional arguments), used by invoke."""
+ swfuncs = {}
+ for index, (swname, val) in enumerate(switches.items(), 1):
+ switch = getattr(type(self), swname)
+ swinfo = self._switches_by_func[switch._switch_info.func]
+ if isinstance(switch, CountOf):
+ p = (range(val),)
+ elif swinfo.list and not hasattr(val, "__iter__"):
+ raise SwitchError("Switch %r must be a sequence (iterable)" % (swname,))
+ elif not swinfo.argtype:
+ # a flag
+ if val not in (True, False, None, Flag):
+ raise SwitchError("Switch %r is a boolean flag" % (swname,))
+ p = ()
+ else:
+ p = (val,)
+ swfuncs[swinfo.func] = SwitchParseInfo(swname, p, index)
+ return swfuncs
+
def main(self, *args):
"""Implement me (no need to call super)"""
if self._subcommands:
@@ -555,13 +598,13 @@ class Application(object):
if self.DESCRIPTION:
print(self.COLOR_DISCRIPTION[self.DESCRIPTION.strip() + '\n'])
- m_args, m_varargs, _, m_defaults = six.getargspec(self.main)
- tailargs = m_args[1:] # skip self
- if m_defaults:
- for i, d in enumerate(reversed(m_defaults)):
+ m = six.getfullargspec(self.main)
+ tailargs = m.args[1:] # skip self
+ if m.defaults:
+ for i, d in enumerate(reversed(m.defaults)):
tailargs[-i - 1] = "[%s=%r]" % (tailargs[-i - 1], d)
- if m_varargs:
- tailargs.append("%s..." % (m_varargs,))
+ if m.varargs:
+ tailargs.append("%s..." % (m.varargs,))
tailargs = " ".join(tailargs)
with self.COLOR_USAGE:
diff --git a/plumbum/cli/switches.py b/plumbum/cli/switches.py
index e3fc915..2512b77 100644
--- a/plumbum/cli/switches.py
+++ b/plumbum/cli/switches.py
@@ -1,7 +1,8 @@
-import inspect
from plumbum.lib import six, getdoc
from plumbum import local
+from abc import abstractmethod
+
class SwitchError(Exception):
"""A general switch related-error (base class of all other switch errors)"""
@@ -133,7 +134,7 @@ def switch(names, argtype = None, argname = None, list = False, mandatory = Fals
def deco(func):
if argname is None:
- argspec = six.getargspec(func)[0]
+ argspec = six.getfullargspec(func).args
if len(argspec) == 2:
argname2 = argspec[1]
else:
@@ -252,10 +253,95 @@ class CountOf(SwitchAttr):
def __call__(self, inst, v):
self.__set__(inst, len(v))
+#===================================================================================================
+# Decorator for function that adds argument checking
+#===================================================================================================
+
+
+
+class positional(object):
+ """
+ Runs a validator on the main function for a class.
+ This should be used like this:
+
+ class MyApp(cli.Application):
+ @cli.positional(cli.Range(1,10), cli.ExistingFile)
+ def main(self, x, *f):
+ # x is a range, f's are all ExistingFile's)
+
+ Or, Python 3 only:
+
+ class MyApp(cli.Application):
+ def main(self, x : cli.Range(1,10), *f : cli.ExistingFile):
+ # x is a range, f's are all ExistingFile's)
+
+
+ If you do not want to validate on the annotations, use this decorator (
+ even if empty) to override annotation validation.
+
+ Validators should be callable, and should have a .choices() function with
+ possible choices. (For future argument completion, for example)
+
+ Default arguments do not go through the validator.
+
+ #TODO: Check with MyPy
+
+ """
+
+ def __init__(self, *args, **kargs):
+ self.args = args
+ self.kargs = kargs
+
+ def __call__(self, function):
+ m = six.getfullargspec(function)
+ args_names = list(m.args[1:])
+
+ positional = [None]*len(args_names)
+ varargs = None
+
+ for i in range(min(len(positional),len(self.args))):
+ positional[i] = self.args[i]
+
+ if len(args_names) + 1 == len(self.args):
+ varargs = self.args[-1]
+
+ # All args are positional, so convert kargs to positional
+ for item in self.kargs:
+ if item == m.varargs:
+ varargs = self.kargs[item]
+ else:
+ positional[args_names.index(item)] = self.kargs[item]
+
+ function.positional = positional
+ function.positional_varargs = varargs
+ return function
+
+class Validator(six.ABC):
+ __slots__ = ()
+
+ @abstractmethod
+ def __call__(self, obj):
+ "Must be implemented for a Validator to work"
+
+ def choices(self, partial=""):
+ """Should return set of valid choices, can be given optional partial info"""
+ return set()
+
+ def __repr__(self):
+ """If not overridden, will print the slots as args"""
+
+ slots = {}
+ for cls in self.__mro__:
+ for prop in getattr(cls, "__slots__", ()):
+ if prop[0] != '_':
+ slots[prop] = getattr(self, prop)
+ mystrs = ("{0} = {1}".format(name, slots[name]) for name in slots)
+ return "{0}({1})".format(self.__class__.__name__, ", ".join(mystrs))
+
#===================================================================================================
# Switch type validators
#===================================================================================================
-class Range(object):
+class Range(Validator):
"""
A switch-type validator that checks for the inclusion of a value in a certain range.
Usage::
@@ -266,6 +352,8 @@ class Range(object):
:param start: The minimal value
:param end: The maximal value
"""
+ __slots__ = ("start", "end")
+
def __init__(self, start, end):
self.start = start
self.end = end
@@ -276,18 +364,21 @@ class Range(object):
if obj < self.start or obj > self.end:
raise ValueError("Not in range [%d..%d]" % (self.start, self.end))
return obj
+ def choices(self, partial=""):
+ # TODO: Add partial handling
+ return set(range(self.start, self.end+1))
-class Set(object):
+class Set(Validator):
"""
A switch-type validator that checks that the value is contained in a defined
set of values. Usage::
class MyApp(Application):
- mode = SwitchAttr(["--mode"], Set("TCP", "UDP", case_insensitive = False))
+ mode = SwitchAttr(["--mode"], Set("TCP", "UDP", case_sensitive = False))
:param values: The set of values (strings)
- :param case_insensitive: A keyword argument that indicates whether to use case-sensitive
- comparison or not. The default is ``True``
+ :param case_sensitive: A keyword argument that indicates whether to use case-sensitive
+ comparison or not. The default is ``False``
"""
def __init__(self, *values, **kwargs):
self.case_sensitive = kwargs.pop("case_sensitive", False)
@@ -302,6 +393,9 @@ class Set(object):
if obj not in self.values:
raise ValueError("Expected one of %r" % (list(self.values.values()),))
return self.values[obj]
+ def choices(self, partial=""):
+ # TODO: Add case sensitive/insensitive parital completion
+ return set(self.values)
class Predicate(object):
"""A wrapper for a single-argument function with pretty printing"""
@@ -311,6 +405,8 @@ class Predicate(object):
return self.func.__name__
def __call__(self, val):
return self.func(val)
+ def choices(self, partial=""):
+ return set()
@Predicate
def ExistingDirectory(val):
diff --git a/plumbum/colorlib/styles.py b/plumbum/colorlib/styles.py
index 73c7938..1c5e4f3 100644
--- a/plumbum/colorlib/styles.py
+++ b/plumbum/colorlib/styles.py
@@ -10,7 +10,6 @@ With the ``Style`` class, any color can be directly called or given to a with st
from __future__ import print_function
import sys
import os
-import platform
import re
from copy import copy
from plumbum.colorlib.names import color_names, color_html
@@ -127,9 +126,6 @@ class Color(ABC):
self.exact = True
'This is false if the named color does not match the real color'
- self.use_color = True
- 'This is a toggle for color (or max representation)'
-
if None in (g,b):
if not r_or_color:
return
diff --git a/plumbum/lib.py b/plumbum/lib.py
index 35c3fc6..77c6f06 100644
--- a/plumbum/lib.py
+++ b/plumbum/lib.py
@@ -27,14 +27,13 @@ class six(object):
A light-weight version of six (which works on IronPython)
"""
PY3 = sys.version_info[0] >= 3
- ABC = ABCMeta('ABC', (object,), {'__module__':__name__})
+ ABC = ABCMeta('ABC', (object,), {'__module__':__name__, '__slots__':()})
- # Be sure to use named-tuple access, so that different order doesn't affect usage
+ # Be sure to use named-tuple access, so that usage is not affected
try:
- getargspec = staticmethod(inspect.getargspec)
+ getfullargspec = staticmethod(inspect.getfullargspec)
except AttributeError:
- getargspec = staticmethod(lambda func: inspect.getfullargspec(func)[:4])
-
+ getfullargspec = staticmethod(inspect.getargspec) # extra fields will not be available
if PY3:
integer_types = (int,)
| Feature request: Validator on main args
I've been digging around in the src, and I don't see a way to provide validators on the positional input arguments. Since the validators are cleanly implemented, this seems to work pretty well:
```python
def main(self, outfile, *srcfiles):
outfile = cli.ExistingFile(outfile)
srcfiles = map(cli.ExistingFile, srcfiles)
```
But is there a better, integrated way of doing this? I can think of two options. First, a python3 only way would be to use annotations, like so:
```python
def main(self, outfile : cli.ExistingFile , *srcfiles : cli.ExistingFile):
```
This has the advantage of being clean and integrated, and looks to be pretty easy to implement without affecting the library usage in Python2 (just try getargspec, and if there's an annotation, that fails, so then getfullargspec works), but has the minor downfall that the application now is Python3 only. Another minor downfall is this is a use of annotations that does not fall under PEP 0484.
Second, a decorator, similar to the switches, but for the main function and built for multiple args (a little like Slots in PyQT):
```python
@cli.main_validators(cli.ExistingFile, cli.ExistingFile)
def main(self, outfile, *srcfiles):
```
A very simple, incomplete implementation to show that this is possible:
```python
import decorator
def main_validator(*types)
@decorator.decorator
def main_validator(func, *args, **kargs):
newargs = []
for i in range(len(args)):
if isinstance(args[i], str):
newargs.append(types[i](args[i]))
else:
newargs.append(list(map(types[i],args[i])))
return func(*newargs, **kargs)
```
Just a suggestion! Thanks for such a great library. | tomerfiliba/plumbum | diff --git a/tests/test_cli.py b/tests/test_cli.py
index 22d2f01..674074f 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -1,11 +1,11 @@
-import sys
import unittest
-from plumbum import cli
import time
from plumbum import cli, local
from plumbum.cli.terminal import ask, choose, hexdump, Progress
-from plumbum.lib import six, captured_stdout, StringIO
+from plumbum.lib import captured_stdout, ensure_skipIf
+
+ensure_skipIf(unittest)
class TestApp(cli.Application):
@cli.switch(["a"])
@@ -28,6 +28,8 @@ class TestApp(cli.Application):
self.eggs = "lalala"
self.eggs = old
self.tailargs = args
+
+
class Geet(cli.Application):
debug = cli.Flag("--debug")
diff --git a/tests/test_validate.py b/tests/test_validate.py
new file mode 100644
index 0000000..42f60ea
--- /dev/null
+++ b/tests/test_validate.py
@@ -0,0 +1,107 @@
+# -*- coding: utf-8 -*-
+from __future__ import print_function, division
+import unittest
+from plumbum import cli
+from plumbum.lib import captured_stdout
+from plumbum.lib import six
+
+class TestValidator(unittest.TestCase):
+ def test_named(self):
+ class Try(object):
+ @cli.positional(x=abs, y=str)
+ def main(selfy, x, y):
+ pass
+
+ self.assertEqual(Try.main.positional, [abs, str])
+ self.assertEqual(Try.main.positional_varargs, None)
+
+ def test_position(self):
+ class Try(object):
+ @cli.positional(abs, str)
+ def main(selfy, x, y):
+ pass
+
+ self.assertEqual(Try.main.positional, [abs, str])
+ self.assertEqual(Try.main.positional_varargs, None)
+
+ def test_mix(self):
+ class Try(object):
+ @cli.positional(abs, str, d=bool)
+ def main(selfy, x, y, z, d):
+ pass
+
+ self.assertEqual(Try.main.positional, [abs, str, None, bool])
+ self.assertEqual(Try.main.positional_varargs, None)
+
+ def test_var(self):
+ class Try(object):
+ @cli.positional(abs, str, int)
+ def main(selfy, x, y, *g):
+ pass
+
+ self.assertEqual(Try.main.positional, [abs, str])
+ self.assertEqual(Try.main.positional_varargs, int)
+
+ def test_defaults(self):
+ class Try(object):
+ @cli.positional(abs, str)
+ def main(selfy, x, y = 'hello'):
+ pass
+
+ self.assertEqual(Try.main.positional, [abs, str])
+
+
+class TestProg(unittest.TestCase):
+ def test_prog(self):
+ class MainValidator(cli.Application):
+ @cli.positional(int, int, int)
+ def main(self, myint, myint2, *mylist):
+ print(repr(myint), myint2, mylist)
+
+ with captured_stdout() as stream:
+ _, rc = MainValidator.run(["prog", "1", "2", '3', '4', '5'], exit = False)
+ self.assertEqual(rc, 0)
+ self.assertEqual("1 2 (3, 4, 5)", stream.getvalue().strip())
+
+
+ def test_failure(self):
+ class MainValidator(cli.Application):
+ @cli.positional(int, int, int)
+ def main(self, myint, myint2, *mylist):
+ print(myint, myint2, mylist)
+ with captured_stdout() as stream:
+ _, rc = MainValidator.run(["prog", "1.2", "2", '3', '4', '5'], exit = False)
+ self.assertEqual(rc, 2)
+ value = stream.getvalue().strip()
+ self.assertTrue("'int'>, not '1.2':" in value)
+ self.assertTrue(" 'int'>, not '1.2':" in value)
+ self.assertTrue('''ValueError("invalid literal for int() with base 10: '1.2'"''' in value)
+
+ def test_defaults(self):
+ class MainValidator(cli.Application):
+ @cli.positional(int, int)
+ def main(self, myint, myint2=2):
+ print(repr(myint), repr(myint2))
+
+ with captured_stdout() as stream:
+ _, rc = MainValidator.run(["prog", "1"], exit = False)
+ self.assertEqual(rc, 0)
+ self.assertEqual("1 2", stream.getvalue().strip())
+
+ with captured_stdout() as stream:
+ _, rc = MainValidator.run(["prog", "1", "3"], exit = False)
+ self.assertEqual(rc, 0)
+ self.assertEqual("1 3", stream.getvalue().strip())
+
+# Unfortionatly, Py3 anotations are a syntax error in Py2, so using exec to add test for Py3
+if six.PY3:
+ exec("""
+class Main3Validator(cli.Application):
+ def main(self, myint:int, myint2:int, *mylist:int):
+ print(myint, myint2, mylist)
+class TestProg3(unittest.TestCase):
+ def test_prog(self):
+ with captured_stdout() as stream:
+ _, rc = Main3Validator.run(["prog", "1", "2", '3', '4', '5'], exit = False)
+ self.assertEqual(rc, 0)
+ self.assertIn("1 2 (3, 4, 5)", stream.getvalue())""")
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 6
} | 1.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "paramiko",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio",
"paramiko"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
bcrypt @ file:///tmp/build/80754af9/bcrypt_1597936153616/work
certifi==2021.5.30
cffi @ file:///tmp/build/80754af9/cffi_1625814693874/work
coverage==6.2
cryptography @ file:///tmp/build/80754af9/cryptography_1635366128178/work
execnet==1.9.0
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
paramiko @ file:///opt/conda/conda-bld/paramiko_1640109032755/work
pluggy==1.0.0
-e git+https://github.com/tomerfiliba/plumbum.git@8d19a7b6c1f1a9caa2eb03f461e9a31a085d7ad5#egg=plumbum
py==1.11.0
pycparser @ file:///tmp/build/80754af9/pycparser_1636541352034/work
PyNaCl @ file:///tmp/build/80754af9/pynacl_1595009112226/work
pyparsing==3.1.4
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
six @ file:///tmp/build/80754af9/six_1644875935023/work
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: plumbum
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bcrypt=3.2.0=py36h7b6447c_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- cffi=1.14.6=py36h400218f_0
- cryptography=35.0.0=py36hd23ed53_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libsodium=1.0.18=h7b6447c_0
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- paramiko=2.8.1=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pycparser=2.21=pyhd3eb1b0_0
- pynacl=1.4.0=py36h7b6447c_1
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- six=1.16.0=pyhd3eb1b0_1
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- execnet==1.9.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/plumbum
| [
"tests/test_validate.py::TestValidator::test_defaults",
"tests/test_validate.py::TestValidator::test_mix",
"tests/test_validate.py::TestValidator::test_named",
"tests/test_validate.py::TestValidator::test_position",
"tests/test_validate.py::TestValidator::test_var",
"tests/test_validate.py::TestProg::test_defaults",
"tests/test_validate.py::TestProg::test_failure",
"tests/test_validate.py::TestProg::test_prog",
"tests/test_validate.py::TestProg3::test_prog"
]
| []
| [
"tests/test_cli.py::CLITest::test_default_main",
"tests/test_cli.py::CLITest::test_env_var",
"tests/test_cli.py::CLITest::test_failures",
"tests/test_cli.py::CLITest::test_invoke",
"tests/test_cli.py::CLITest::test_lazy_subcommand",
"tests/test_cli.py::CLITest::test_mandatory_env_var",
"tests/test_cli.py::CLITest::test_meta_switches",
"tests/test_cli.py::CLITest::test_okay",
"tests/test_cli.py::CLITest::test_reset_switchattr",
"tests/test_cli.py::CLITest::test_subcommands",
"tests/test_cli.py::CLITest::test_unbind",
"tests/test_cli.py::TestTerminal::test_ask",
"tests/test_cli.py::TestTerminal::test_choose",
"tests/test_cli.py::TestTerminal::test_hexdump",
"tests/test_cli.py::TestTerminal::test_progress"
]
| []
| MIT License | 242 | [
"plumbum/colorlib/styles.py",
"plumbum/lib.py",
"docs/cli.rst",
"plumbum/cli/application.py",
"plumbum/cli/switches.py",
"plumbum/cli/__init__.py"
]
| [
"plumbum/colorlib/styles.py",
"plumbum/lib.py",
"docs/cli.rst",
"plumbum/cli/application.py",
"plumbum/cli/switches.py",
"plumbum/cli/__init__.py"
]
|
|
ipython__ipykernel-59 | 4cda22a855a3e6833fac5ea1ee229eea0a444be6 | 2015-09-22 00:21:05 | 4cda22a855a3e6833fac5ea1ee229eea0a444be6 | diff --git a/ipykernel/kernelspec.py b/ipykernel/kernelspec.py
index 1d8541f..0203a87 100644
--- a/ipykernel/kernelspec.py
+++ b/ipykernel/kernelspec.py
@@ -153,7 +153,7 @@ class InstallIPythonKernelSpecApp(Application):
opts = parser.parse_args(self.argv)
try:
dest = install(user=opts.user, kernel_name=opts.name, prefix=opts.prefix,
- dispay_name=opts.display_name,
+ display_name=opts.display_name,
)
except OSError as e:
if e.errno == errno.EACCES:
| Typo in install_kernelspec causes to break
#57 causes
`The command "python -m ipykernel.kernelspec --user" failed and exited with 1 during .`
as reported in https://travis-ci.org/jupyter/nbconvert/jobs/81479990.
Working on a fix + test now. | ipython/ipykernel | diff --git a/ipykernel/tests/test_kernelspec.py b/ipykernel/tests/test_kernelspec.py
index 409802c..c849436 100644
--- a/ipykernel/tests/test_kernelspec.py
+++ b/ipykernel/tests/test_kernelspec.py
@@ -7,7 +7,7 @@ import os
import shutil
import sys
import tempfile
-
+
try:
from unittest import mock
except ImportError:
@@ -20,6 +20,7 @@ from ipykernel.kernelspec import (
get_kernel_dict,
write_kernel_spec,
install,
+ InstallIPythonKernelSpecApp,
KERNEL_NAME,
RESOURCES,
)
@@ -75,6 +76,17 @@ def test_write_kernel_spec_path():
shutil.rmtree(path)
+def test_install_kernelspec():
+
+ path = tempfile.mkdtemp()
+ try:
+ test = InstallIPythonKernelSpecApp.launch_instance(argv=['--prefix', path])
+ assert_is_spec(os.path.join(
+ path, 'share', 'jupyter', 'kernels', KERNEL_NAME))
+ finally:
+ shutil.rmtree(path)
+
+
def test_install_user():
tmp = tempfile.mkdtemp()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 1
} | 4.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"nose-warnings-filters",
"pytest"
],
"pre_install": null,
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
backcall==0.2.0
certifi==2021.5.30
decorator==5.1.1
entrypoints==0.4
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/ipython/ipykernel.git@4cda22a855a3e6833fac5ea1ee229eea0a444be6#egg=ipykernel
ipython==7.16.3
ipython-genutils==0.2.0
jedi==0.17.2
jupyter-client==7.1.2
jupyter-core==4.9.2
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
nest-asyncio==1.6.0
nose==1.3.7
nose-warnings-filters==0.1.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
Pygments==2.14.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
python-dateutil==2.9.0.post0
pyzmq==25.1.2
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tornado==6.1
traitlets==4.3.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
wcwidth==0.2.13
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: ipykernel
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- backcall==0.2.0
- decorator==5.1.1
- entrypoints==0.4
- ipython==7.16.3
- ipython-genutils==0.2.0
- jedi==0.17.2
- jupyter-client==7.1.2
- jupyter-core==4.9.2
- nest-asyncio==1.6.0
- nose==1.3.7
- nose-warnings-filters==0.1.5
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- pygments==2.14.0
- python-dateutil==2.9.0.post0
- pyzmq==25.1.2
- six==1.17.0
- tornado==6.1
- traitlets==4.3.3
- wcwidth==0.2.13
prefix: /opt/conda/envs/ipykernel
| [
"ipykernel/tests/test_kernelspec.py::test_install_kernelspec"
]
| []
| [
"ipykernel/tests/test_kernelspec.py::test_make_ipkernel_cmd",
"ipykernel/tests/test_kernelspec.py::test_get_kernel_dict",
"ipykernel/tests/test_kernelspec.py::test_write_kernel_spec",
"ipykernel/tests/test_kernelspec.py::test_write_kernel_spec_path",
"ipykernel/tests/test_kernelspec.py::test_install_user",
"ipykernel/tests/test_kernelspec.py::test_install"
]
| []
| BSD 3-Clause "New" or "Revised" License | 243 | [
"ipykernel/kernelspec.py"
]
| [
"ipykernel/kernelspec.py"
]
|
|
keleshev__schema-73 | e5c489cbbbbb8671ee32c43d518fea0076563da4 | 2015-09-22 18:04:16 | eb7670f0f4615195393dc5350d49fa9a33304137 | diff --git a/README.rst b/README.rst
index b9d5d9d..b5a9ffd 100644
--- a/README.rst
+++ b/README.rst
@@ -85,7 +85,7 @@ otherwise it will raise ``SchemaError``.
>>> Schema(int).validate('123')
Traceback (most recent call last):
...
- SchemaError: '123' should be instance of 'int'
+ SchemaError: '123' should be instance of <type 'int'>
>>> Schema(object).validate('hai')
'hai'
@@ -172,7 +172,7 @@ against schemas listed inside that container:
Traceback (most recent call last):
...
SchemaError: Or(<type 'int'>, <type 'float'>) did not validate 'not int or float here'
- 'not int or float here' should be instance of 'float'
+ 'not int or float here' should be instance of <type 'float'>
Dictionaries
~~~~~~~~~~~~
@@ -232,9 +232,8 @@ data matches:
>>> from schema import Optional
>>> Schema({Optional('color', default='blue'): str,
- ... str: str}).validate({'texture': 'furry'}
- ... ) == {'color': 'blue', 'texture': 'furry'}
- True
+ ... str: str}).validate({'texture': 'furry'})
+ {'color': 'blue', 'texture': 'furry'}
Defaults are used verbatim, not passed through any validators specified in the
value.
diff --git a/schema.py b/schema.py
index bd9d4dd..9158c68 100644
--- a/schema.py
+++ b/schema.py
@@ -140,15 +140,15 @@ class Schema(object):
new[nkey] = nvalue
elif skey is not None:
if x is not None:
- raise SchemaError(['Invalid value for key %r' % key] +
+ raise SchemaError(['invalid value for key %r' % key] +
x.autos, [e] + x.errors)
required = set(k for k in s if type(k) is not Optional)
if coverage != required:
- raise SchemaError('Missing keys: %s' % ", ".join(required - coverage), e)
+ raise SchemaError('missed keys %r' % (required - coverage), e)
if len(new) != len(data):
wrong_keys = set(data.keys()) - set(new.keys())
s_wrong_keys = ', '.join('%r' % (k,) for k in sorted(wrong_keys))
- raise SchemaError('Wrong keys %s in %r' % (s_wrong_keys, data),
+ raise SchemaError('wrong keys %s in %r' % (s_wrong_keys, data),
e)
# Apply default-having optionals that haven't been used:
@@ -162,7 +162,7 @@ class Schema(object):
if isinstance(data, s):
return data
else:
- raise SchemaError('%r should be instance of %r' % (data, s.__name__), e)
+ raise SchemaError('%r should be instance of %r' % (data, s), e)
if flavor == VALIDATOR:
try:
return s.validate(data)
diff --git a/setup.py b/setup.py
index a539f6e..1bfb248 100644
--- a/setup.py
+++ b/setup.py
@@ -1,5 +1,6 @@
from setuptools import setup
+import codecs
import schema
@@ -13,7 +14,7 @@ setup(
keywords="schema json validation",
url="http://github.com/halst/schema",
py_modules=['schema'],
- long_description=open('README.rst').read(),
+ long_description=codecs.open('README.rst', 'r', 'utf-8').read(),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
| Cannot install with pip3 -- README.rst contains non-ascii character.
```bash
$ pip3 install schema
Downloading/unpacking schema
Downloading schema-0.3.1.tar.gz
Running setup.py (path:/private/tmp/pip_build_bla/schema/setup.py) egg_info for package schema
Traceback (most recent call last):
File "<string>", line 17, in <module>
File "/private/tmp/pip_build_bla/schema/setup.py", line 16, in <module>
long_description=open('README.rst').read(),
File "/usr/local/Cellar/python3/3.4.2_1/Frameworks/Python.framework/Versions/3.4/lib/python3.4/encodings/ascii.py", line 26, in decode
return codecs.ascii_decode(input, self.errors)[0]
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 2360: ordinal not in range(128)
Complete output from command python setup.py egg_info:
Traceback (most recent call last):
File "<string>", line 17, in <module>
File "/private/tmp/pip_build_bla/schema/setup.py", line 16, in <module>
long_description=open('README.rst').read(),
File "/usr/local/Cellar/python3/3.4.2_1/Frameworks/Python.framework/Versions/3.4/lib/python3.4/encodings/ascii.py", line 26, in decode
return codecs.ascii_decode(input, self.errors)[0]
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 2360: ordinal not in range(128)
----------------------------------------
Cleaning up...
Command python setup.py egg_info failed with error code 1 in /private/tmp/pip_build_bla/schema
Storing debug log for failure in /Users/bla/.pip/pip.log
```
The culprit line in the readme is here:
```
Alternatively, you can just drop `schema.py` file into your project—it is self-contained.
```
(The m-width dash)
If I set LANG=utf-8, it works:
```bash
$ LANG=utf-8 pip install schema
Downloading/unpacking schema
Downloading schema-0.3.1.tar.gz
Running setup.py (path:/Users/bla/tmpvenv/build/schema/setup.py) egg_info for package schema
Installing collected packages: schema
Running setup.py install for schema
Successfully installed schema
Cleaning up...
``` | keleshev/schema | diff --git a/test_schema.py b/test_schema.py
index aee25dc..4ec9993 100644
--- a/test_schema.py
+++ b/test_schema.py
@@ -106,33 +106,35 @@ def test_dict():
try:
Schema({'key': 5}).validate({})
except SchemaError as e:
- assert e.args[0] == "Missing keys: key"
+ assert e.args[0] in ["missed keys set(['key'])",
+ "missed keys {'key'}"] # Python 3 style
raise
with SE:
try:
Schema({'key': 5}).validate({'n': 5})
except SchemaError as e:
- assert e.args[0] == "Missing keys: key"
+ assert e.args[0] in ["missed keys set(['key'])",
+ "missed keys {'key'}"] # Python 3 style
raise
with SE:
try:
Schema({}).validate({'n': 5})
except SchemaError as e:
- assert e.args[0] == "Wrong keys 'n' in {'n': 5}"
+ assert e.args[0] == "wrong keys 'n' in {'n': 5}"
raise
with SE:
try:
Schema({'key': 5}).validate({'key': 5, 'bad': 5})
except SchemaError as e:
- assert e.args[0] in ["Wrong keys 'bad' in {'key': 5, 'bad': 5}",
- "Wrong keys 'bad' in {'bad': 5, 'key': 5}"]
+ assert e.args[0] in ["wrong keys 'bad' in {'key': 5, 'bad': 5}",
+ "wrong keys 'bad' in {'bad': 5, 'key': 5}"]
raise
with SE:
try:
Schema({}).validate({'a': 5, 'b': 5})
except SchemaError as e:
- assert e.args[0] in ["Wrong keys 'a', 'b' in {'a': 5, 'b': 5}",
- "Wrong keys 'a', 'b' in {'b': 5, 'a': 5}"]
+ assert e.args[0] in ["wrong keys 'a', 'b' in {'a': 5, 'b': 5}",
+ "wrong keys 'a', 'b' in {'b': 5, 'a': 5}"]
raise
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 3
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
pytest-cov==6.0.0
-e git+https://github.com/keleshev/schema.git@e5c489cbbbbb8671ee32c43d518fea0076563da4#egg=schema
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: schema
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- pytest-cov==6.0.0
prefix: /opt/conda/envs/schema
| [
"test_schema.py::test_dict"
]
| []
| [
"test_schema.py::test_schema",
"test_schema.py::test_validate_file",
"test_schema.py::test_and",
"test_schema.py::test_or",
"test_schema.py::test_validate_list",
"test_schema.py::test_list_tuple_set_frozenset",
"test_schema.py::test_strictly",
"test_schema.py::test_dict_keys",
"test_schema.py::test_dict_optional_keys",
"test_schema.py::test_dict_optional_defaults",
"test_schema.py::test_complex",
"test_schema.py::test_nice_errors",
"test_schema.py::test_use_error_handling",
"test_schema.py::test_or_error_handling",
"test_schema.py::test_and_error_handling",
"test_schema.py::test_schema_error_handling",
"test_schema.py::test_use_json",
"test_schema.py::test_error_reporting",
"test_schema.py::test_schema_repr",
"test_schema.py::test_validate_object",
"test_schema.py::test_issue_9_prioritized_key_comparison",
"test_schema.py::test_issue_9_prioritized_key_comparison_in_dicts"
]
| []
| MIT License | 244 | [
"README.rst",
"schema.py",
"setup.py"
]
| [
"README.rst",
"schema.py",
"setup.py"
]
|
|
Turbo87__utm-16 | a121973a02f2e24cfc9d45a24cd37b7c0bef3af9 | 2015-09-23 19:42:26 | 4c7c13f2b2b9c01a8581392641aeb8bbda6aba6f | diff --git a/utm/conversion.py b/utm/conversion.py
index ec30333..33fbfe9 100644
--- a/utm/conversion.py
+++ b/utm/conversion.py
@@ -176,7 +176,7 @@ def latitude_to_zone_letter(latitude):
def latlon_to_zone_number(latitude, longitude):
- if 56 <= latitude <= 64 and 3 <= longitude <= 12:
+ if 56 <= latitude < 64 and 3 <= longitude < 12:
return 32
if 72 <= latitude <= 84 and longitude >= 0:
| Borders of zones 31V and 32V
In `latlon_to_zone_number` you handle the zones 31V and 32V different from all the other zones.
Usually, you compute the zone number `[left..right)` and the zone letter `[bottom..top)`. But for the corrections of 31V and 32V you use `[left..right]` and `[bottom..top]`.
This leads to the following behavior:
* for 52°N 12°E, 33U is returned. (which, in my opinion is correct)
* for 60°N 12°E, 32V is returned. Shouldn't it be 33V?
* for 64°N 5°E, 32W is returned. Shouldn't it be 31W?
I would replace line 179 by
```python
if 56 <= latitude < 64 and 3 <= longitude < 12:
```
I didn't check your code for the corrections in the X-band in detail. (I'm not interrested in places this far north...) But I believe, we have the same issue there.
| Turbo87/utm | diff --git a/test/test_utm.py b/test/test_utm.py
index 1ffcab2..5cbabe8 100644
--- a/test/test_utm.py
+++ b/test/test_utm.py
@@ -173,5 +173,57 @@ class BadInput(UTMTestCase):
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, 500000, 5000000, 32, 'Z')
+
+class Zone32V(unittest.TestCase):
+
+ def assert_zone_equal(self, result, expected_number, expected_letter):
+ self.assertEqual(result[2], expected_number)
+ self.assertEqual(result[3].upper(), expected_letter.upper())
+
+ def test_inside(self):
+ self.assert_zone_equal(UTM.from_latlon(56, 3), 32, 'V')
+ self.assert_zone_equal(UTM.from_latlon(56, 6), 32, 'V')
+ self.assert_zone_equal(UTM.from_latlon(56, 9), 32, 'V')
+ self.assert_zone_equal(UTM.from_latlon(56, 11.999999), 32, 'V')
+
+ self.assert_zone_equal(UTM.from_latlon(60, 3), 32, 'V')
+ self.assert_zone_equal(UTM.from_latlon(60, 6), 32, 'V')
+ self.assert_zone_equal(UTM.from_latlon(60, 9), 32, 'V')
+ self.assert_zone_equal(UTM.from_latlon(60, 11.999999), 32, 'V')
+
+ self.assert_zone_equal(UTM.from_latlon(63.999999, 3), 32, 'V')
+ self.assert_zone_equal(UTM.from_latlon(63.999999, 6), 32, 'V')
+ self.assert_zone_equal(UTM.from_latlon(63.999999, 9), 32, 'V')
+ self.assert_zone_equal(UTM.from_latlon(63.999999, 11.999999), 32, 'V')
+
+ def test_left_of(self):
+ self.assert_zone_equal(UTM.from_latlon(55.999999, 2.999999), 31, 'U')
+ self.assert_zone_equal(UTM.from_latlon(56, 2.999999), 31, 'V')
+ self.assert_zone_equal(UTM.from_latlon(60, 2.999999), 31, 'V')
+ self.assert_zone_equal(UTM.from_latlon(63.999999, 2.999999), 31, 'V')
+ self.assert_zone_equal(UTM.from_latlon(64, 2.999999), 31, 'W')
+
+ def test_right_of(self):
+ self.assert_zone_equal(UTM.from_latlon(55.999999, 12), 33, 'U')
+ self.assert_zone_equal(UTM.from_latlon(56, 12), 33, 'V')
+ self.assert_zone_equal(UTM.from_latlon(60, 12), 33, 'V')
+ self.assert_zone_equal(UTM.from_latlon(63.999999, 12), 33, 'V')
+ self.assert_zone_equal(UTM.from_latlon(64, 12), 33, 'W')
+
+ def test_below(self):
+ self.assert_zone_equal(UTM.from_latlon(55.999999, 3), 31, 'U')
+ self.assert_zone_equal(UTM.from_latlon(55.999999, 6), 32, 'U')
+ self.assert_zone_equal(UTM.from_latlon(55.999999, 9), 32, 'U')
+ self.assert_zone_equal(UTM.from_latlon(55.999999, 11.999999), 32, 'U')
+ self.assert_zone_equal(UTM.from_latlon(55.999999, 12), 33, 'U')
+
+ def test_above(self):
+ self.assert_zone_equal(UTM.from_latlon(64, 3), 31, 'W')
+ self.assert_zone_equal(UTM.from_latlon(64, 6), 32, 'W')
+ self.assert_zone_equal(UTM.from_latlon(64, 9), 32, 'W')
+ self.assert_zone_equal(UTM.from_latlon(64, 11.999999), 32, 'W')
+ self.assert_zone_equal(UTM.from_latlon(64, 12), 33, 'W')
+
+
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
tomli==2.2.1
-e git+https://github.com/Turbo87/utm.git@a121973a02f2e24cfc9d45a24cd37b7c0bef3af9#egg=utm
| name: utm
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/utm
| [
"test/test_utm.py::Zone32V::test_above",
"test/test_utm.py::Zone32V::test_right_of"
]
| []
| [
"test/test_utm.py::KnownValues::test_from_latlon",
"test/test_utm.py::KnownValues::test_to_latlon",
"test/test_utm.py::BadInput::test_from_latlon_range_checks",
"test/test_utm.py::BadInput::test_to_latlon_range_checks",
"test/test_utm.py::Zone32V::test_below",
"test/test_utm.py::Zone32V::test_inside",
"test/test_utm.py::Zone32V::test_left_of"
]
| []
| MIT License | 245 | [
"utm/conversion.py"
]
| [
"utm/conversion.py"
]
|
|
docker__docker-py-787 | 5e331a55a8e8e10354693172dce1aa63f58ebe97 | 2015-09-23 21:07:40 | f479720d517a7db7f886916190b3032d29d18f10 | shin-: @aanand Thanks, didn't know about `six.u`. PTAL?
aanand: Cool. Perhaps the first test should be rewritten so it tests byte string input on all Python versions:
```python
def test_convert_volume_bindings_binary_input(self):
expected = [six.u('/mnt/지연:/unicode/박:rw')]
data = {
b'/mnt/지연': {
'bind': b'/unicode/박',
'mode': 'rw'
}
}
self.assertEqual(
convert_volume_binds(data), expected
)
```
shin-: Okay, I've tried all sorts of combination, I've settled on just separating py2 and py3 tests completely (so it's 2+2 now). On the bright side, results were consistent all along (and we have tests to prove it)!
aanand: OK, I see the rationale, but instead of entirely separate test methods, could we just switch on the Python version *inside* the method?
```python
def test_convert_volume_binds_unicode_bytes_input(self):
if six.PY2:
expected = [unicode('/mnt/지연:/unicode/박:rw', 'utf-8')]
data = {
'/mnt/지연': {
'bind': '/unicode/박',
'mode': 'rw'
}
}
else:
expected = ['/mnt/지연:/unicode/박:rw']
data = {
bytes('/mnt/지연', 'utf-8'): {
'bind': bytes('/unicode/박', 'utf-8'),
'mode': 'rw'
}
}
self.assertEqual(convert_volume_binds(data), expected)
def test_convert_volume_binds_unicode_unicode_input(self):
if six.PY2:
expected = [unicode('/mnt/지연:/unicode/박:rw', 'utf-8')]
data = {
unicode('/mnt/지연', 'utf-8'): {
'bind': unicode('/unicode/박', 'utf-8'),
'mode': 'rw'
}
}
else:
expected = ['/mnt/지연:/unicode/박:rw']
data = {
'/mnt/지연': {
'bind': '/unicode/박',
'mode': 'rw'
}
}
self.assertEqual(convert_volume_binds(data), expected)
``` | diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index 36edf8de..1fce1377 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -242,6 +242,9 @@ def convert_volume_binds(binds):
result = []
for k, v in binds.items():
+ if isinstance(k, six.binary_type):
+ k = k.decode('utf-8')
+
if isinstance(v, dict):
if 'ro' in v and 'mode' in v:
raise ValueError(
@@ -249,6 +252,10 @@ def convert_volume_binds(binds):
.format(repr(v))
)
+ bind = v['bind']
+ if isinstance(bind, six.binary_type):
+ bind = bind.decode('utf-8')
+
if 'ro' in v:
mode = 'ro' if v['ro'] else 'rw'
elif 'mode' in v:
@@ -256,11 +263,15 @@ def convert_volume_binds(binds):
else:
mode = 'rw'
- result.append('{0}:{1}:{2}'.format(
- k, v['bind'], mode
- ))
+ result.append(
+ six.text_type('{0}:{1}:{2}').format(k, bind, mode)
+ )
else:
- result.append('{0}:{1}:rw'.format(k, v))
+ if isinstance(v, six.binary_type):
+ v = v.decode('utf-8')
+ result.append(
+ six.text_type('{0}:{1}:rw').format(k, v)
+ )
return result
| Create container bind volume with Unicode folder name
If bind volume folder name is Unicode, for example Japanese, It will raise exception:
Host volume and container volume both should handle Unicode.
```
File "/home/vagrant/.local/share/virtualenvs/qnap/local/lib/python2.7/site-packages/docker/utils/utils.py", line 461, in create_host_config
host_config['Binds'] = convert_volume_binds(binds)
File "/home/vagrant/.local/share/virtualenvs/qnap/local/lib/python2.7/site-packages/docker/utils/utils.py", line 208, in convert_volume_binds
k, v['bind'], mode
``` | docker/docker-py | diff --git a/tests/utils_test.py b/tests/utils_test.py
index 45929f73..8ac1dcb9 100644
--- a/tests/utils_test.py
+++ b/tests/utils_test.py
@@ -1,15 +1,20 @@
+# -*- coding: utf-8 -*-
+
import os
import os.path
import shutil
import tempfile
+import pytest
+import six
+
from docker.client import Client
from docker.constants import DEFAULT_DOCKER_API_VERSION
from docker.errors import DockerException
from docker.utils import (
parse_repository_tag, parse_host, convert_filters, kwargs_from_env,
create_host_config, Ulimit, LogConfig, parse_bytes, parse_env_file,
- exclude_paths,
+ exclude_paths, convert_volume_binds,
)
from docker.utils.ports import build_port_bindings, split_port
from docker.auth import resolve_repository_name, resolve_authconfig
@@ -17,7 +22,6 @@ from docker.auth import resolve_repository_name, resolve_authconfig
from . import base
from .helpers import make_tree
-import pytest
TEST_CERT_DIR = os.path.join(
os.path.dirname(__file__),
@@ -192,6 +196,89 @@ class UtilsTest(base.BaseTestCase):
local_tempfile.close()
return local_tempfile.name
+ def test_convert_volume_binds_empty(self):
+ self.assertEqual(convert_volume_binds({}), [])
+ self.assertEqual(convert_volume_binds([]), [])
+
+ def test_convert_volume_binds_list(self):
+ data = ['/a:/a:ro', '/b:/c:z']
+ self.assertEqual(convert_volume_binds(data), data)
+
+ def test_convert_volume_binds_complete(self):
+ data = {
+ '/mnt/vol1': {
+ 'bind': '/data',
+ 'mode': 'ro'
+ }
+ }
+ self.assertEqual(convert_volume_binds(data), ['/mnt/vol1:/data:ro'])
+
+ def test_convert_volume_binds_compact(self):
+ data = {
+ '/mnt/vol1': '/data'
+ }
+ self.assertEqual(convert_volume_binds(data), ['/mnt/vol1:/data:rw'])
+
+ def test_convert_volume_binds_no_mode(self):
+ data = {
+ '/mnt/vol1': {
+ 'bind': '/data'
+ }
+ }
+ self.assertEqual(convert_volume_binds(data), ['/mnt/vol1:/data:rw'])
+
+ def test_convert_volume_binds_unicode_bytes_input(self):
+ if six.PY2:
+ expected = [unicode('/mnt/지연:/unicode/박:rw', 'utf-8')]
+
+ data = {
+ '/mnt/지연': {
+ 'bind': '/unicode/박',
+ 'mode': 'rw'
+ }
+ }
+ self.assertEqual(
+ convert_volume_binds(data), expected
+ )
+ else:
+ expected = ['/mnt/지연:/unicode/박:rw']
+
+ data = {
+ bytes('/mnt/지연', 'utf-8'): {
+ 'bind': bytes('/unicode/박', 'utf-8'),
+ 'mode': 'rw'
+ }
+ }
+ self.assertEqual(
+ convert_volume_binds(data), expected
+ )
+
+ def test_convert_volume_binds_unicode_unicode_input(self):
+ if six.PY2:
+ expected = [unicode('/mnt/지연:/unicode/박:rw', 'utf-8')]
+
+ data = {
+ unicode('/mnt/지연', 'utf-8'): {
+ 'bind': unicode('/unicode/박', 'utf-8'),
+ 'mode': 'rw'
+ }
+ }
+ self.assertEqual(
+ convert_volume_binds(data), expected
+ )
+ else:
+ expected = ['/mnt/지연:/unicode/박:rw']
+
+ data = {
+ '/mnt/지연': {
+ 'bind': '/unicode/박',
+ 'mode': 'rw'
+ }
+ }
+ self.assertEqual(
+ convert_volume_binds(data), expected
+ )
+
def test_parse_repository_tag(self):
self.assertEqual(parse_repository_tag("root"),
("root", None))
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi @ file:///croot/certifi_1671487769961/work/certifi
coverage==7.2.7
-e git+https://github.com/docker/docker-py.git@5e331a55a8e8e10354693172dce1aa63f58ebe97#egg=docker_py
exceptiongroup==1.2.2
importlib-metadata==6.7.0
iniconfig==2.0.0
packaging==24.0
pluggy==1.2.0
pytest==7.4.4
pytest-cov==4.1.0
requests==2.5.3
six==1.17.0
tomli==2.0.1
typing_extensions==4.7.1
websocket-client==0.32.0
zipp==3.15.0
| name: docker-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.2.7
- exceptiongroup==1.2.2
- importlib-metadata==6.7.0
- iniconfig==2.0.0
- packaging==24.0
- pluggy==1.2.0
- pytest==7.4.4
- pytest-cov==4.1.0
- requests==2.5.3
- six==1.17.0
- tomli==2.0.1
- typing-extensions==4.7.1
- websocket-client==0.32.0
- zipp==3.15.0
prefix: /opt/conda/envs/docker-py
| [
"tests/utils_test.py::UtilsTest::test_convert_volume_binds_unicode_bytes_input"
]
| []
| [
"tests/utils_test.py::HostConfigTest::test_create_host_config_invalid_cpu_cfs_types",
"tests/utils_test.py::HostConfigTest::test_create_host_config_no_options",
"tests/utils_test.py::HostConfigTest::test_create_host_config_no_options_newer_api_version",
"tests/utils_test.py::HostConfigTest::test_create_host_config_with_cpu_period",
"tests/utils_test.py::HostConfigTest::test_create_host_config_with_cpu_quota",
"tests/utils_test.py::UlimitTest::test_create_host_config_dict_ulimit",
"tests/utils_test.py::UlimitTest::test_create_host_config_dict_ulimit_capitals",
"tests/utils_test.py::UlimitTest::test_create_host_config_obj_ulimit",
"tests/utils_test.py::UlimitTest::test_ulimit_invalid_type",
"tests/utils_test.py::LogConfigTest::test_create_host_config_dict_logconfig",
"tests/utils_test.py::LogConfigTest::test_create_host_config_obj_logconfig",
"tests/utils_test.py::LogConfigTest::test_logconfig_invalid_config_type",
"tests/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_empty",
"tests/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_no_cert_path",
"tests/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_tls",
"tests/utils_test.py::UtilsTest::test_convert_filters",
"tests/utils_test.py::UtilsTest::test_convert_volume_binds_compact",
"tests/utils_test.py::UtilsTest::test_convert_volume_binds_complete",
"tests/utils_test.py::UtilsTest::test_convert_volume_binds_empty",
"tests/utils_test.py::UtilsTest::test_convert_volume_binds_list",
"tests/utils_test.py::UtilsTest::test_convert_volume_binds_no_mode",
"tests/utils_test.py::UtilsTest::test_convert_volume_binds_unicode_unicode_input",
"tests/utils_test.py::UtilsTest::test_parse_bytes",
"tests/utils_test.py::UtilsTest::test_parse_env_file_commented_line",
"tests/utils_test.py::UtilsTest::test_parse_env_file_invalid_line",
"tests/utils_test.py::UtilsTest::test_parse_env_file_proper",
"tests/utils_test.py::UtilsTest::test_parse_host",
"tests/utils_test.py::UtilsTest::test_parse_host_empty_value",
"tests/utils_test.py::UtilsTest::test_parse_repository_tag",
"tests/utils_test.py::UtilsTest::test_resolve_authconfig",
"tests/utils_test.py::UtilsTest::test_resolve_registry_and_auth",
"tests/utils_test.py::UtilsTest::test_resolve_repository_name",
"tests/utils_test.py::PortsTest::test_build_port_bindings_with_matching_internal_port_ranges",
"tests/utils_test.py::PortsTest::test_build_port_bindings_with_matching_internal_ports",
"tests/utils_test.py::PortsTest::test_build_port_bindings_with_nonmatching_internal_port_ranges",
"tests/utils_test.py::PortsTest::test_build_port_bindings_with_nonmatching_internal_ports",
"tests/utils_test.py::PortsTest::test_build_port_bindings_with_one_port",
"tests/utils_test.py::PortsTest::test_build_port_bindings_with_port_range",
"tests/utils_test.py::PortsTest::test_host_only_with_colon",
"tests/utils_test.py::PortsTest::test_non_matching_length_port_ranges",
"tests/utils_test.py::PortsTest::test_port_and_range_invalid",
"tests/utils_test.py::PortsTest::test_port_only_with_colon",
"tests/utils_test.py::PortsTest::test_split_port_invalid",
"tests/utils_test.py::PortsTest::test_split_port_no_host_port",
"tests/utils_test.py::PortsTest::test_split_port_range_no_host_port",
"tests/utils_test.py::PortsTest::test_split_port_range_with_host_ip_no_port",
"tests/utils_test.py::PortsTest::test_split_port_range_with_host_port",
"tests/utils_test.py::PortsTest::test_split_port_range_with_protocol",
"tests/utils_test.py::PortsTest::test_split_port_with_host_ip",
"tests/utils_test.py::PortsTest::test_split_port_with_host_ip_no_port",
"tests/utils_test.py::PortsTest::test_split_port_with_host_port",
"tests/utils_test.py::PortsTest::test_split_port_with_protocol",
"tests/utils_test.py::ExcludePathsTest::test_directory",
"tests/utils_test.py::ExcludePathsTest::test_directory_with_single_exception",
"tests/utils_test.py::ExcludePathsTest::test_directory_with_subdir_exception",
"tests/utils_test.py::ExcludePathsTest::test_directory_with_trailing_slash",
"tests/utils_test.py::ExcludePathsTest::test_directory_with_wildcard_exception",
"tests/utils_test.py::ExcludePathsTest::test_exclude_custom_dockerfile",
"tests/utils_test.py::ExcludePathsTest::test_exclude_dockerfile_dockerignore",
"tests/utils_test.py::ExcludePathsTest::test_no_dupes",
"tests/utils_test.py::ExcludePathsTest::test_no_excludes",
"tests/utils_test.py::ExcludePathsTest::test_question_mark",
"tests/utils_test.py::ExcludePathsTest::test_single_filename",
"tests/utils_test.py::ExcludePathsTest::test_single_filename_trailing_slash",
"tests/utils_test.py::ExcludePathsTest::test_single_subdir_single_filename",
"tests/utils_test.py::ExcludePathsTest::test_single_subdir_wildcard_filename",
"tests/utils_test.py::ExcludePathsTest::test_subdirectory",
"tests/utils_test.py::ExcludePathsTest::test_wildcard_exclude",
"tests/utils_test.py::ExcludePathsTest::test_wildcard_filename_end",
"tests/utils_test.py::ExcludePathsTest::test_wildcard_filename_start",
"tests/utils_test.py::ExcludePathsTest::test_wildcard_subdir_single_filename",
"tests/utils_test.py::ExcludePathsTest::test_wildcard_subdir_wildcard_filename",
"tests/utils_test.py::ExcludePathsTest::test_wildcard_with_exception",
"tests/utils_test.py::ExcludePathsTest::test_wildcard_with_wildcard_exception"
]
| []
| Apache License 2.0 | 246 | [
"docker/utils/utils.py"
]
| [
"docker/utils/utils.py"
]
|
MITLibraries__oastats-backend-46 | ed278663c240daf3a71571f94812bc9f5b2da9dd | 2015-09-24 20:20:13 | ed278663c240daf3a71571f94812bc9f5b2da9dd | diff --git a/pipeline/summary.py b/pipeline/summary.py
index 982c7f5..c3a4360 100644
--- a/pipeline/summary.py
+++ b/pipeline/summary.py
@@ -29,6 +29,7 @@ def index(requests, solr_url):
request.get('authors', []))),
'author_name': list(map(itemgetter('name'),
request.get('authors', []))),
+ 'author': list(map(join_author, request.get('authors', [])))
}
solr.write(doc)
@@ -209,4 +210,8 @@ def split_author(author):
except ValueError:
return
if mitid and name:
- return {'mitid': int(mitid), 'name': name}
+ return {'mitid': mitid, 'name': name}
+
+
+def join_author(author):
+ return "%s:%s" % (author.get('mitid', ''), author.get('name', ''))
| Add combined author/mit id field to solr during summary
Changes in the summary script depend on a string field with `author:mitid`. This is in addition to the two separate `author` and `mitid` fields. | MITLibraries/oastats-backend | diff --git a/tests/integration/test_cli.py b/tests/integration/test_cli.py
index 34933b2..757d504 100644
--- a/tests/integration/test_cli.py
+++ b/tests/integration/test_cli.py
@@ -62,7 +62,13 @@ def test_index_adds_mongo_records_to_solr(runner, solr_port, mongo_port):
r = solr.search('*:*')
assert len(r) == 2
doc = next(iter(r))
- assert doc['title'] == 'The Foobar'
+ assert doc == {'title': 'The Foobar', 'handle': 'foobar', 'country': 'USA',
+ 'time': '2015-01-01T00:00:00Z',
+ 'dlc_display': ['Stuff', 'Things'],
+ 'dlc_canonical': ['Dept of Stuff', 'Dept of Things'],
+ 'author_id': ['1234', '5678'],
+ 'author_name': ['Bar, Foo', 'Baz, Foo'],
+ 'author': ['1234:Bar, Foo', '5678:Baz, Foo']}
@pytest.mark.usefixtures('load_mongo_records', 'load_solr_records')
diff --git a/tests/unit/test_summary.py b/tests/unit/test_summary.py
index b86c192..034d163 100644
--- a/tests/unit/test_summary.py
+++ b/tests/unit/test_summary.py
@@ -75,7 +75,8 @@ def test_index_maps_mongo_request_to_solr(solr, mongo_req):
'dlc_display': ['Stuff n Such', 'Other Things'],
'dlc_canonical': ['Dept of Stuff', 'Dept of Things'],
'author_id': ['1234', '7890'],
- 'author_name': ['Captain Snuggles', 'Muffinpants']
+ 'author_name': ['Captain Snuggles', 'Muffinpants'],
+ 'author': ['1234:Captain Snuggles', '7890:Muffinpants']
}]
@@ -126,7 +127,7 @@ def test_authors_filters_out_authors_with_empty_mitid(mongo):
def test_get_author_returns_solr_query():
- assert get_author({'mitid': 1234, 'name': 'Fluffy'}) == \
+ assert get_author({'mitid': '1234', 'name': 'Fluffy'}) == \
('author_id:"1234"', {'rows': 0, 'group': 'true',
'group.field': 'handle', 'group.ngroups': 'true'})
@@ -181,8 +182,8 @@ def test_create_handle_creates_mongo_insert(handle_result):
{'country': 'ISL', 'downloads': 4}],
'dates': [{'date': '2015-01-01', 'downloads': 3},
{'date': '2015-02-01', 'downloads': 5}],
- 'parents': [{'mitid': 1234, 'name': 'Fluffy'},
- {'mitid': 5678, 'name': 'Captain Snuggles'}]
+ 'parents': [{'mitid': '1234', 'name': 'Fluffy'},
+ {'mitid': '5678', 'name': 'Captain Snuggles'}]
}
}
@@ -214,9 +215,17 @@ def test_dictify_converts_facet_count_to_dictionary():
def test_split_author_turns_string_into_compound_field():
- assert split_author('1234:Foo Bar') == {'mitid': 1234, 'name': 'Foo Bar'}
+ assert split_author('1234:Foo Bar') == {'mitid': '1234', 'name': 'Foo Bar'}
def test_split_author_returns_none_for_invalid_author():
assert split_author(':Foo Bar') is None
assert split_author('Foo Bar') is None
+
+
+def test_join_author_joins_author_parts():
+ assert join_author({'name': 'Cat, Lucy', 'mitid': '1234'}) == \
+ '1234:Cat, Lucy'
+
+def test_join_author_uses_empty_string_for_missing_items():
+ assert join_author({'name': 'Cat, Lucy'}) == ':Cat, Lucy'
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"mock",
"mongobox"
],
"pre_install": null,
"python": "3.4",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | apache-log-parser==1.7.0
arrow==0.6.0
attrs==22.2.0
certifi==2021.5.30
click==5.1
futures==2.2.0
geoip2==2.2.0
importlib-metadata==4.8.3
iniconfig==1.1.1
maxminddb==1.2.0
mock==5.2.0
mongobox==0.1.8
-e git+https://github.com/MITLibraries/oastats-backend.git@ed278663c240daf3a71571f94812bc9f5b2da9dd#egg=OAStats_Pipeline
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycountry==1.15
pymongo==3.0.3
pyparsing==3.1.4
pysolr==3.3.2
pytest==7.0.1
python-dateutil==2.4.2
PyYAML==3.11
requests==2.7.0
six==1.9.0
tomli==1.2.3
typing_extensions==4.1.1
ua-parser==0.4.1
user-agents==1.0.1
zipp==3.6.0
| name: oastats-backend
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- apache-log-parser==1.7.0
- arrow==0.6.0
- attrs==22.2.0
- click==5.1
- futures==2.2.0
- geoip2==2.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- maxminddb==1.2.0
- mock==5.2.0
- mongobox==0.1.8
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycountry==1.15
- pymongo==3.0.3
- pyparsing==3.1.4
- pysolr==3.3.2
- pytest==7.0.1
- python-dateutil==2.4.2
- pyyaml==3.11
- requests==2.7.0
- six==1.9.0
- tomli==1.2.3
- typing-extensions==4.1.1
- ua-parser==0.4.1
- user-agents==1.0.1
- wheel==0.24.0
- zipp==3.6.0
prefix: /opt/conda/envs/oastats-backend
| [
"tests/unit/test_summary.py::test_index_maps_mongo_request_to_solr",
"tests/unit/test_summary.py::test_create_handle_creates_mongo_insert",
"tests/unit/test_summary.py::test_split_author_turns_string_into_compound_field",
"tests/unit/test_summary.py::test_join_author_joins_author_parts",
"tests/unit/test_summary.py::test_join_author_uses_empty_string_for_missing_items"
]
| []
| [
"tests/unit/test_summary.py::test_index_adds_requests_to_solr",
"tests/unit/test_summary.py::test_query_solr_merges_query_params",
"tests/unit/test_summary.py::test_query_solr_queries_solr",
"tests/unit/test_summary.py::test_query_solr_sets_default_params",
"tests/unit/test_summary.py::test_get_author_returns_solr_query",
"tests/unit/test_summary.py::test_create_author_creates_mongo_insert",
"tests/unit/test_summary.py::test_get_dlc_returns_solr_query",
"tests/unit/test_summary.py::test_create_dlc_creates_mongo_insert",
"tests/unit/test_summary.py::test_get_handle_returns_solr_query",
"tests/unit/test_summary.py::test_get_overall_returns_solr_query",
"tests/unit/test_summary.py::test_create_overall_creates_mongo_insert",
"tests/unit/test_summary.py::test_dictify_converts_facet_count_to_dictionary",
"tests/unit/test_summary.py::test_split_author_returns_none_for_invalid_author"
]
| []
| Apache License 2.0 | 247 | [
"pipeline/summary.py"
]
| [
"pipeline/summary.py"
]
|
|
peterbe__premailer-138 | e5e1feb7d63fc7499702d70e3197ad3d293e90bf | 2015-09-25 22:24:07 | e5e1feb7d63fc7499702d70e3197ad3d293e90bf | lavr: csstext_to_pairs results are stripped and sorted again. | diff --git a/premailer/merge_style.py b/premailer/merge_style.py
index 0fe93f7..9842e63 100644
--- a/premailer/merge_style.py
+++ b/premailer/merge_style.py
@@ -1,5 +1,6 @@
import cssutils
import threading
+from operator import itemgetter
def csstext_to_pairs(csstext):
@@ -10,11 +11,10 @@ def csstext_to_pairs(csstext):
# The lock is required to avoid ``cssutils`` concurrency
# issues documented in issue #65
with csstext_to_pairs._lock:
- parsed = cssutils.css.CSSVariablesDeclaration(csstext)
- return [
- (key.strip(), parsed.getVariableValue(key).strip())
- for key in sorted(parsed)
- ]
+ return sorted([(prop.name.strip(), prop.propertyValue.cssText.strip())
+ for prop in cssutils.parseStyle(csstext)],
+ key=itemgetter(0))
+
csstext_to_pairs._lock = threading.RLock()
| SyntaxErr on <h1> with !important
Feeding the following into ```transform()``` results in a ```SyntaxErr```:
```
<style type="text/css">
h1 { border:1px solid black }
p { color:red;}
</style>
<p>Hey</p>
<h1 style="display: block;font-family: Helvetica;font-size: 26px;font-style: normal;font-weight: bold;line-height: 100%;letter-spacing: normal;margin-top: 0;margin-right: 0;margin-bottom: 10px;margin-left: 0;text-align: left;color: #202020 !important;">Some Stuff</h1>
```
Results in:
```
SyntaxErr: PropertyValue: Missing token for production Choice(ColorValue, Dimension, URIValue, Value, variable, MSValue, CSSCalc, function): ('CHAR', '!', 1, 230)
```
Removing the ```!important``` from the inline style on the H1 allows it to be inlined as expected. | peterbe/premailer | diff --git a/premailer/tests/test_merge_style.py b/premailer/tests/test_merge_style.py
index 7a8a215..0d4d11f 100644
--- a/premailer/tests/test_merge_style.py
+++ b/premailer/tests/test_merge_style.py
@@ -1,7 +1,5 @@
from __future__ import absolute_import, unicode_literals
import unittest
-import xml
-from nose.tools import raises
from premailer.merge_style import csstext_to_pairs, merge_styles
@@ -14,9 +12,13 @@ class TestMergeStyle(unittest.TestCase):
parsed_csstext = csstext_to_pairs(csstext)
self.assertEqual(('font-size', '1px'), parsed_csstext[0])
- @raises(xml.dom.SyntaxErr)
def test_inline_invalid_syntax(self):
- # inline shouldn't have those as I understand
- # but keep the behaviour
+ # Invalid syntax does not raise
inline = '{color:pink} :hover{color:purple} :active{color:red}'
merge_styles(inline, [], [])
+
+ def test_important_rule(self):
+ # No exception after #133
+ csstext = 'font-size:1px !important'
+ parsed_csstext = csstext_to_pairs(csstext)
+ self.assertEqual(('font-size', '1px'), parsed_csstext[0])
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": [],
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"mock",
"coverage",
"pytest"
],
"pre_install": [],
"python": "3.4",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
cssselect==1.1.0
cssutils==2.3.1
importlib-metadata==4.8.3
iniconfig==1.1.1
lxml==5.3.1
mock==5.2.0
nose==1.3.7
packaging==21.3
pluggy==1.0.0
-e git+https://github.com/peterbe/premailer.git@e5e1feb7d63fc7499702d70e3197ad3d293e90bf#egg=premailer
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: premailer
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- cssselect==1.1.0
- cssutils==2.3.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- lxml==5.3.1
- mock==5.2.0
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/premailer
| [
"premailer/tests/test_merge_style.py::TestMergeStyle::test_important_rule",
"premailer/tests/test_merge_style.py::TestMergeStyle::test_inline_invalid_syntax"
]
| []
| [
"premailer/tests/test_merge_style.py::TestMergeStyle::test_csstext_to_pairs"
]
| []
| BSD 3-Clause "New" or "Revised" License | 249 | [
"premailer/merge_style.py"
]
| [
"premailer/merge_style.py"
]
|
keleshev__schema-85 | 29286c1f9cce20cf70f2b15d9247f2ca6ef1d6c9 | 2015-09-26 00:20:07 | eb7670f0f4615195393dc5350d49fa9a33304137 | codecov-io: ## [Current coverage][1] is `98.07%`
> Merging **#85** into **master** will increase coverage by **+0.05%** as of [`ea3e9e6`][3]
```diff
@@ master #85 diff @@
======================================
Files 1 1
Stmts 152 156 +4
Branches 0 0
Methods 0 0
======================================
+ Hit 149 153 +4
Partial 0 0
Missed 3 3
```
> Review entire [Coverage Diff][4] as of [`ea3e9e6`][3]
[1]: https://codecov.io/github/keleshev/schema?ref=ea3e9e6de11ed22e995df1b30ede806b9912bba5
[2]: https://codecov.io/github/keleshev/schema/features/suggestions?ref=ea3e9e6de11ed22e995df1b30ede806b9912bba5
[3]: https://codecov.io/github/keleshev/schema/commit/ea3e9e6de11ed22e995df1b30ede806b9912bba5
[4]: https://codecov.io/github/keleshev/schema/compare/e94b7144f3016654d1360eb1c070fd2db0d54a43...ea3e9e6de11ed22e995df1b30ede806b9912bba5
> Powered by [Codecov](https://codecov.io). Updated on successful CI builds.
sjakobi: Anybody know a better name than `callable_str`?
And does that function possibly need `_` as a prefix?
skorokithakis: I think that the method does need an underscore as a prefix, it's not meant to be exported by the module. Other than that, I'm ready to merge this.
skorokithakis: Oops, looks like there's a merge conflict. Can you resolve that so I can merge this? | diff --git a/schema.py b/schema.py
index 1ecf845..d24744d 100644
--- a/schema.py
+++ b/schema.py
@@ -70,7 +70,7 @@ class Use(object):
except SchemaError as x:
raise SchemaError([None] + x.autos, [self._error] + x.errors)
except BaseException as x:
- f = self._callable.__name__
+ f = _callable_str(self._callable)
raise SchemaError('%s(%r) raised %r' % (f, data, x), self._error)
@@ -176,7 +176,7 @@ class Schema(object):
raise SchemaError('%r.validate(%r) raised %r' % (s, data, x),
self._error)
if flavor == CALLABLE:
- f = s.__name__
+ f = _callable_str(s)
try:
if s(data):
return data
@@ -211,3 +211,9 @@ class Optional(Schema):
'"%r" is too complex.' % (self._schema,))
self.default = default
self.key = self._schema
+
+
+def _callable_str(callable_):
+ if hasattr(callable_, '__name__'):
+ return callable_.__name__
+ return str(callable_)
| doesn't work with operator.methodcaller
from operator import methodcaller
from schema import Schema
f = methodcaller('endswith', '.csv')
assert f('test.csv')
Schema(f).validate('test.csv')
AttributeError: 'operator.methodcaller' object has no attribute '__name__'
We can't assume that all callables have a `__name__`, it would seem. | keleshev/schema | diff --git a/test_schema.py b/test_schema.py
index ad49343..967dec0 100644
--- a/test_schema.py
+++ b/test_schema.py
@@ -1,5 +1,6 @@
from __future__ import with_statement
from collections import defaultdict, namedtuple
+from operator import methodcaller
import os
from pytest import raises
@@ -383,8 +384,18 @@ def test_missing_keys_exception_with_non_str_dict_keys():
try:
Schema({1: 'x'}).validate(dict())
except SchemaError as e:
- assert (e.args[0] ==
- "Missing keys: 1")
+ assert e.args[0] == "Missing keys: 1"
+ raise
+
+
+def test_issue_56_cant_rely_on_callables_to_have_name():
+ s = Schema(methodcaller('endswith', '.csv'))
+ assert s.validate('test.csv') == 'test.csv'
+ with SE:
+ try:
+ s.validate('test.py')
+ except SchemaError as e:
+ assert "operator.methodcaller" in e.args[0]
raise
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
-e git+https://github.com/keleshev/schema.git@29286c1f9cce20cf70f2b15d9247f2ca6ef1d6c9#egg=schema
tomli==2.2.1
| name: schema
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- tomli==2.2.1
prefix: /opt/conda/envs/schema
| [
"test_schema.py::test_issue_56_cant_rely_on_callables_to_have_name"
]
| []
| [
"test_schema.py::test_schema",
"test_schema.py::test_validate_file",
"test_schema.py::test_and",
"test_schema.py::test_or",
"test_schema.py::test_validate_list",
"test_schema.py::test_list_tuple_set_frozenset",
"test_schema.py::test_strictly",
"test_schema.py::test_dict",
"test_schema.py::test_dict_keys",
"test_schema.py::test_dict_optional_keys",
"test_schema.py::test_dict_optional_defaults",
"test_schema.py::test_dict_subtypes",
"test_schema.py::test_complex",
"test_schema.py::test_nice_errors",
"test_schema.py::test_use_error_handling",
"test_schema.py::test_or_error_handling",
"test_schema.py::test_and_error_handling",
"test_schema.py::test_schema_error_handling",
"test_schema.py::test_use_json",
"test_schema.py::test_error_reporting",
"test_schema.py::test_schema_repr",
"test_schema.py::test_validate_object",
"test_schema.py::test_issue_9_prioritized_key_comparison",
"test_schema.py::test_issue_9_prioritized_key_comparison_in_dicts",
"test_schema.py::test_missing_keys_exception_with_non_str_dict_keys",
"test_schema.py::test_exception_handling_with_bad_validators"
]
| []
| MIT License | 250 | [
"schema.py"
]
| [
"schema.py"
]
|
mne-tools__mne-python-2495 | a9cc9c9b5e9433fd06258e05e41cc7edacfe4391 | 2015-09-26 16:48:32 | 632e49f0470fc9526936dbb474fd6aa46501fe4d | diff --git a/doc/manual/cookbook.rst b/doc/manual/cookbook.rst
index ac96a2780..17a256d13 100644
--- a/doc/manual/cookbook.rst
+++ b/doc/manual/cookbook.rst
@@ -347,9 +347,9 @@ ways:
- Employ empty room data (collected without the subject) to
calculate the full noise covariance matrix. This is recommended
for analyzing ongoing spontaneous activity. This can be done using
- :func:`mne.compute_raw_data_covariance` as::
+ :func:`mne.compute_raw_covariance` as::
- >>> cov = mne.compute_raw_data_covariance(raw_erm)
+ >>> cov = mne.compute_raw_covariance(raw_erm)
- Employ a section of continuous raw data collected in the presence
of the subject to calculate the full noise covariance matrix. This
@@ -360,7 +360,7 @@ ways:
(``*> 200 s``) segment of data with epileptic spikes present provided
that the spikes occur infrequently and that the segment is apparently
stationary with respect to background brain activity. This can also
- use :func:`mne.compute_raw_data_covariance`.
+ use :func:`mne.compute_raw_covariance`.
See :ref:`covariance` for more information.
diff --git a/doc/python_reference.rst b/doc/python_reference.rst
index e700b9131..4a9df6655 100644
--- a/doc/python_reference.rst
+++ b/doc/python_reference.rst
@@ -512,7 +512,7 @@ Covariance
:template: function.rst
compute_covariance
- compute_raw_data_covariance
+ compute_raw_covariance
make_ad_hoc_cov
read_cov
write_cov
diff --git a/examples/preprocessing/plot_estimate_covariance_matrix_raw.py b/examples/preprocessing/plot_estimate_covariance_matrix_raw.py
index a0d799cc0..efafce8e8 100644
--- a/examples/preprocessing/plot_estimate_covariance_matrix_raw.py
+++ b/examples/preprocessing/plot_estimate_covariance_matrix_raw.py
@@ -29,7 +29,7 @@ picks = mne.pick_types(raw.info, meg=True, eeg=True, stim=False, eog=True,
reject = dict(eeg=80e-6, eog=150e-6)
# Compute the covariance from the raw data
-cov = mne.compute_raw_data_covariance(raw, picks=picks, reject=reject)
+cov = mne.compute_raw_covariance(raw, picks=picks, reject=reject)
print(cov)
###############################################################################
diff --git a/examples/preprocessing/plot_xdawn_denoising.py b/examples/preprocessing/plot_xdawn_denoising.py
index b861a7cd0..76b7ce824 100644
--- a/examples/preprocessing/plot_xdawn_denoising.py
+++ b/examples/preprocessing/plot_xdawn_denoising.py
@@ -31,7 +31,7 @@ efficient sensor selection in a P300 BCI. In Signal Processing Conference,
# License: BSD (3-clause)
-from mne import (io, compute_raw_data_covariance, read_events, pick_types,
+from mne import (io, compute_raw_covariance, read_events, pick_types,
Epochs)
from mne.datasets import sample
from mne.preprocessing import Xdawn
@@ -65,7 +65,7 @@ epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False,
plot_epochs_image(epochs['vis_r'], picks=[230], vmin=-500, vmax=500)
# Estimates signal covariance
-signal_cov = compute_raw_data_covariance(raw, picks=picks)
+signal_cov = compute_raw_covariance(raw, picks=picks)
# Xdawn instance
xd = Xdawn(n_components=2, signal_cov=signal_cov)
diff --git a/mne/__init__.py b/mne/__init__.py
index 43e82ce21..19cc4a46c 100644
--- a/mne/__init__.py
+++ b/mne/__init__.py
@@ -36,7 +36,7 @@ from .bem import (make_sphere_model, make_bem_model, make_bem_solution,
read_bem_solution, write_bem_solution)
from .cov import (read_cov, write_cov, Covariance,
compute_covariance, compute_raw_data_covariance,
- whiten_evoked, make_ad_hoc_cov)
+ compute_raw_covariance, whiten_evoked, make_ad_hoc_cov)
from .event import (read_events, write_events, find_events, merge_events,
pick_events, make_fixed_length_events, concatenate_events,
find_stim_steps)
diff --git a/mne/cov.py b/mne/cov.py
index 0cd17ab27..bbf7fff5e 100644
--- a/mne/cov.py
+++ b/mne/cov.py
@@ -33,6 +33,7 @@ from .defaults import _handle_default
from .epochs import _is_good
from .utils import (check_fname, logger, verbose, estimate_rank,
_compute_row_norms, check_sklearn_version, _time_mask)
+from .utils import deprecated
from .externals.six.moves import zip
from .externals.six import string_types
@@ -100,7 +101,7 @@ class Covariance(dict):
See Also
--------
compute_covariance
- compute_raw_data_covariance
+ compute_raw_covariance
make_ad_hoc_cov
read_cov
"""
@@ -275,7 +276,7 @@ def read_cov(fname, verbose=None):
See Also
--------
- write_cov, compute_covariance, compute_raw_data_covariance
+ write_cov, compute_covariance, compute_raw_covariance
"""
check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz'))
f, tree = fiff_open(fname)[:2]
@@ -338,10 +339,20 @@ def _check_n_samples(n_samples, n_chan):
logger.warning(text)
+@deprecated('"compute_raw_data_covariance" is deprecated and will be '
+ 'removed in MNE-0.11. Please use compute_raw_covariance instead')
@verbose
def compute_raw_data_covariance(raw, tmin=None, tmax=None, tstep=0.2,
reject=None, flat=None, picks=None,
verbose=None):
+ return compute_raw_covariance(raw, tmin, tmax, tstep,
+ reject, flat, picks, verbose)
+
+
+@verbose
+def compute_raw_covariance(raw, tmin=None, tmax=None, tstep=0.2,
+ reject=None, flat=None, picks=None,
+ verbose=None):
"""Estimate noise covariance matrix from a continuous segment of raw data.
It is typically useful to estimate a noise covariance
@@ -557,7 +568,7 @@ def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None,
See Also
--------
- compute_raw_data_covariance : Estimate noise covariance from raw data
+ compute_raw_covariance : Estimate noise covariance from raw data
References
----------
| rename compute_raw_data_covariance to compute_raw_covariance
any objection to rename compute_raw_data_covariance to compute_raw_covariance ?
the _data is not consistent. See eg compute_raw_psd etc... | mne-tools/mne-python | diff --git a/mne/beamformer/tests/test_lcmv.py b/mne/beamformer/tests/test_lcmv.py
index b9435b236..d92c60a11 100644
--- a/mne/beamformer/tests/test_lcmv.py
+++ b/mne/beamformer/tests/test_lcmv.py
@@ -205,7 +205,7 @@ def test_lcmv_raw():
picks = mne.pick_types(raw.info, meg=True, exclude='bads',
selection=left_temporal_channels)
- data_cov = mne.compute_raw_data_covariance(raw, tmin=tmin, tmax=tmax)
+ data_cov = mne.compute_raw_covariance(raw, tmin=tmin, tmax=tmax)
stc = lcmv_raw(raw, forward, noise_cov, data_cov, reg=0.01, label=label,
start=start, stop=stop, picks=picks)
diff --git a/mne/preprocessing/tests/test_maxwell.py b/mne/preprocessing/tests/test_maxwell.py
index 118f74aa9..c7f07ff0a 100644
--- a/mne/preprocessing/tests/test_maxwell.py
+++ b/mne/preprocessing/tests/test_maxwell.py
@@ -9,7 +9,7 @@ from numpy.testing import (assert_equal, assert_allclose,
assert_array_almost_equal)
from nose.tools import assert_true, assert_raises
-from mne import compute_raw_data_covariance, pick_types
+from mne import compute_raw_covariance, pick_types
from mne.cov import _estimate_rank_meeg_cov
from mne.datasets import testing
from mne.forward._make_forward import _prep_meg_channels
@@ -142,8 +142,8 @@ def test_maxwell_filter_additional():
rtol=1e-6, atol=1e-20)
# Test rank of covariance matrices for raw and SSS processed data
- cov_raw = compute_raw_data_covariance(raw)
- cov_sss = compute_raw_data_covariance(raw_sss)
+ cov_raw = compute_raw_covariance(raw)
+ cov_sss = compute_raw_covariance(raw_sss)
scalings = None
cov_raw_rank = _estimate_rank_meeg_cov(cov_raw['data'], raw.info, scalings)
diff --git a/mne/preprocessing/tests/test_xdawn.py b/mne/preprocessing/tests/test_xdawn.py
index 3be444350..453ead003 100644
--- a/mne/preprocessing/tests/test_xdawn.py
+++ b/mne/preprocessing/tests/test_xdawn.py
@@ -7,7 +7,7 @@ import os.path as op
from nose.tools import (assert_equal, assert_raises)
from numpy.testing import assert_array_equal
from mne import (io, Epochs, read_events, pick_types,
- compute_raw_data_covariance)
+ compute_raw_covariance)
from mne.utils import requires_sklearn, run_tests_if_main
from mne.preprocessing.xdawn import Xdawn
@@ -56,7 +56,7 @@ def test_xdawn_fit():
# ========== with signal cov provided ====================
# provide covariance object
- signal_cov = compute_raw_data_covariance(raw, picks=picks)
+ signal_cov = compute_raw_covariance(raw, picks=picks)
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=signal_cov, reg=None)
xd.fit(epochs)
diff --git a/mne/tests/test_cov.py b/mne/tests/test_cov.py
index 3f9fc1dd2..6619b047c 100644
--- a/mne/tests/test_cov.py
+++ b/mne/tests/test_cov.py
@@ -18,7 +18,7 @@ from mne.cov import (regularize, whiten_evoked, _estimate_rank_meeg_cov,
_undo_scaling_cov)
from mne import (read_cov, write_cov, Epochs, merge_events,
- find_events, compute_raw_data_covariance,
+ find_events, compute_raw_covariance,
compute_covariance, read_evokeds, compute_proj_raw,
pick_channels_cov, pick_channels, pick_types, pick_info,
make_ad_hoc_cov)
@@ -98,7 +98,7 @@ def test_cov_estimation_on_raw_segment():
"""
tempdir = _TempDir()
raw = Raw(raw_fname, preload=False)
- cov = compute_raw_data_covariance(raw)
+ cov = compute_raw_covariance(raw)
cov_mne = read_cov(erm_cov_fname)
assert_true(cov_mne.ch_names == cov.ch_names)
assert_true(linalg.norm(cov.data - cov_mne.data, ord='fro') /
@@ -113,7 +113,7 @@ def test_cov_estimation_on_raw_segment():
# test with a subset of channels
picks = pick_channels(raw.ch_names, include=raw.ch_names[:5])
- cov = compute_raw_data_covariance(raw, picks=picks)
+ cov = compute_raw_covariance(raw, picks=picks)
assert_true(cov_mne.ch_names[:5] == cov.ch_names)
assert_true(linalg.norm(cov.data - cov_mne.data[picks][:, picks],
ord='fro') / linalg.norm(cov.data, ord='fro') < 1e-4)
@@ -121,7 +121,7 @@ def test_cov_estimation_on_raw_segment():
raw_2 = raw.crop(0, 1)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
- cov = compute_raw_data_covariance(raw_2)
+ cov = compute_raw_covariance(raw_2)
assert_true(len(w) == 1)
@@ -264,12 +264,12 @@ def test_rank():
raw_sss = Raw(hp_fif_fname)
raw_sss.add_proj(compute_proj_raw(raw_sss))
- cov_sample = compute_raw_data_covariance(raw_sample)
- cov_sample_proj = compute_raw_data_covariance(
+ cov_sample = compute_raw_covariance(raw_sample)
+ cov_sample_proj = compute_raw_covariance(
raw_sample.copy().apply_proj())
- cov_sss = compute_raw_data_covariance(raw_sss)
- cov_sss_proj = compute_raw_data_covariance(
+ cov_sss = compute_raw_covariance(raw_sss)
+ cov_sss_proj = compute_raw_covariance(
raw_sss.copy().apply_proj())
picks_all_sample = pick_types(raw_sample.info, meg=True, eeg=True)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 6
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"numpy>=1.16.0",
"pandas>=1.0.0",
"scikit-learn",
"h5py",
"pysurfer",
"nose",
"nose-timer",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | apptools==5.2.1
certifi @ file:///croot/certifi_1671487769961/work/certifi
configobj==5.0.9
cycler==0.11.0
envisage==7.0.3
exceptiongroup==1.2.2
fonttools==4.38.0
h5py==3.8.0
importlib-metadata==6.7.0
importlib-resources==5.12.0
iniconfig==2.0.0
joblib==1.3.2
kiwisolver==1.4.5
matplotlib==3.5.3
mayavi==4.8.1
-e git+https://github.com/mne-tools/mne-python.git@a9cc9c9b5e9433fd06258e05e41cc7edacfe4391#egg=mne
nibabel==4.0.2
nose==1.3.7
nose-timer==1.0.1
numpy==1.21.6
packaging==24.0
pandas==1.3.5
Pillow==9.5.0
pluggy==1.2.0
pyface==8.0.0
Pygments==2.17.2
pyparsing==3.1.4
pysurfer==0.11.2
pytest==7.4.4
python-dateutil==2.9.0.post0
pytz==2025.2
scikit-learn==1.0.2
scipy==1.7.3
six==1.17.0
threadpoolctl==3.1.0
tomli==2.0.1
traits==6.4.3
traitsui==8.0.0
typing_extensions==4.7.1
vtk==9.3.1
zipp==3.15.0
| name: mne-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- apptools==5.2.1
- configobj==5.0.9
- cycler==0.11.0
- envisage==7.0.3
- exceptiongroup==1.2.2
- fonttools==4.38.0
- h5py==3.8.0
- importlib-metadata==6.7.0
- importlib-resources==5.12.0
- iniconfig==2.0.0
- joblib==1.3.2
- kiwisolver==1.4.5
- matplotlib==3.5.3
- mayavi==4.8.1
- nibabel==4.0.2
- nose==1.3.7
- nose-timer==1.0.1
- numpy==1.21.6
- packaging==24.0
- pandas==1.3.5
- pillow==9.5.0
- pluggy==1.2.0
- pyface==8.0.0
- pygments==2.17.2
- pyparsing==3.1.4
- pysurfer==0.11.2
- pytest==7.4.4
- python-dateutil==2.9.0.post0
- pytz==2025.2
- scikit-learn==1.0.2
- scipy==1.7.3
- six==1.17.0
- threadpoolctl==3.1.0
- tomli==2.0.1
- traits==6.4.3
- traitsui==8.0.0
- typing-extensions==4.7.1
- vtk==9.3.1
- zipp==3.15.0
prefix: /opt/conda/envs/mne-python
| [
"mne/preprocessing/tests/test_xdawn.py::test_xdawn_init",
"mne/preprocessing/tests/test_xdawn.py::test_xdawn_fit",
"mne/preprocessing/tests/test_xdawn.py::test_xdawn_apply_transform",
"mne/preprocessing/tests/test_xdawn.py::test_xdawn_regularization",
"mne/tests/test_cov.py::test_ad_hoc_cov",
"mne/tests/test_cov.py::test_arithmetic_cov",
"mne/tests/test_cov.py::test_regularize_cov",
"mne/tests/test_cov.py::test_evoked_whiten",
"mne/tests/test_cov.py::test_rank",
"mne/tests/test_cov.py::test_cov_scaling"
]
| [
"mne/tests/test_cov.py::test_io_cov",
"mne/tests/test_cov.py::test_cov_estimation_on_raw_segment",
"mne/tests/test_cov.py::test_cov_estimation_with_triggers",
"mne/tests/test_cov.py::test_auto_low_rank",
"mne/tests/test_cov.py::test_compute_covariance_auto_reg"
]
| []
| []
| BSD 3-Clause "New" or "Revised" License | 251 | [
"doc/manual/cookbook.rst",
"mne/__init__.py",
"examples/preprocessing/plot_estimate_covariance_matrix_raw.py",
"examples/preprocessing/plot_xdawn_denoising.py",
"mne/cov.py",
"doc/python_reference.rst"
]
| [
"doc/manual/cookbook.rst",
"mne/__init__.py",
"examples/preprocessing/plot_estimate_covariance_matrix_raw.py",
"examples/preprocessing/plot_xdawn_denoising.py",
"mne/cov.py",
"doc/python_reference.rst"
]
|
|
tornadoweb__tornado-1526 | 7493c94369299020eda0d452e7dda793073b7f63 | 2015-09-27 17:43:48 | 4ee9ba94de11aaa4f932560fa2b3d8ceb8c61d2a | diff --git a/.travis.yml b/.travis.yml
index 4b199eb8..0a184314 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -35,6 +35,7 @@ install:
# codecov tries to install the latest.
- if [[ $TRAVIS_PYTHON_VERSION == '3.2' ]]; then travis_retry pip install 'coverage<4.0'; fi
- travis_retry pip install codecov
+ - curl-config --version; pip freeze
script:
# Get out of the source directory before running tests to avoid PYTHONPATH
diff --git a/tornado/simple_httpclient.py b/tornado/simple_httpclient.py
index 81ed8873..074d18b8 100644
--- a/tornado/simple_httpclient.py
+++ b/tornado/simple_httpclient.py
@@ -462,9 +462,12 @@ class _HTTPConnection(httputil.HTTPMessageDelegate):
if self.request.expect_100_continue and first_line.code == 100:
self._write_body(False)
return
- self.headers = headers
self.code = first_line.code
self.reason = first_line.reason
+ self.headers = headers
+
+ if self._should_follow_redirect():
+ return
if self.request.header_callback is not None:
# Reassemble the start line.
@@ -473,14 +476,17 @@ class _HTTPConnection(httputil.HTTPMessageDelegate):
self.request.header_callback("%s: %s\r\n" % (k, v))
self.request.header_callback('\r\n')
+ def _should_follow_redirect(self):
+ return (self.request.follow_redirects and
+ self.request.max_redirects > 0 and
+ self.code in (301, 302, 303, 307))
+
def finish(self):
data = b''.join(self.chunks)
self._remove_timeout()
original_request = getattr(self.request, "original_request",
self.request)
- if (self.request.follow_redirects and
- self.request.max_redirects > 0 and
- self.code in (301, 302, 303, 307)):
+ if self._should_follow_redirect():
assert isinstance(self.request, _RequestProxy)
new_request = copy.copy(self.request.request)
new_request.url = urlparse.urljoin(self.request.url,
@@ -527,6 +533,9 @@ class _HTTPConnection(httputil.HTTPMessageDelegate):
self.stream.close()
def data_received(self, chunk):
+ if self._should_follow_redirect():
+ # We're going to follow a redirect so just discard the body.
+ return
if self.request.streaming_callback is not None:
self.request.streaming_callback(chunk)
else:
diff --git a/tornado/web.py b/tornado/web.py
index afa9ca58..aa203495 100644
--- a/tornado/web.py
+++ b/tornado/web.py
@@ -1065,33 +1065,12 @@ class RequestHandler(object):
def current_user(self):
"""The authenticated user for this request.
- This is set in one of two ways:
+ This is a cached version of `get_current_user`, which you can
+ override to set the user based on, e.g., a cookie. If that
+ method is not overridden, this method always returns None.
- * A subclass may override `get_current_user()`, which will be called
- automatically the first time ``self.current_user`` is accessed.
- `get_current_user()` will only be called once per request,
- and is cached for future access::
-
- def get_current_user(self):
- user_cookie = self.get_secure_cookie("user")
- if user_cookie:
- return json.loads(user_cookie)
- return None
-
- * It may be set as a normal variable, typically from an overridden
- `prepare()`::
-
- @gen.coroutine
- def prepare(self):
- user_id_cookie = self.get_secure_cookie("user_id")
- if user_id_cookie:
- self.current_user = yield load_user(user_id_cookie)
-
- Note that `prepare()` may be a coroutine while `get_current_user()`
- may not, so the latter form is necessary if loading the user requires
- asynchronous operations.
-
- The user object may any type of the application's choosing.
+ We lazy-load the current user the first time this method is called
+ and cache the result after that.
"""
if not hasattr(self, "_current_user"):
self._current_user = self.get_current_user()
@@ -1102,10 +1081,7 @@ class RequestHandler(object):
self._current_user = value
def get_current_user(self):
- """Override to determine the current user from, e.g., a cookie.
-
- This method may not be a coroutine.
- """
+ """Override to determine the current user from, e.g., a cookie."""
return None
def get_login_url(self):
| tornado.httpclient.HTTPClient.fetch() executes callback on contents of redirect response
Assume the following example of using the HTTPClient to retrieve a file:
```python
>>> tornado.version
'4.2.1'
>>> try:
... fh = open('/tmp/foo.msi', 'wb')
... def handle_chunk(chunk):
... fh.write(chunk)
... tornado.httpclient.HTTPClient(max_body_size=107374182400).fetch('http://download.macromedia.com/get/flashplayer/current/licensing/win/install_flash_player_18_plugin.msi', method='GET', streaming_callback=handle_chunk)
... finally:
... fh.close()
...
HTTPResponse(_body=None,buffer=<_io.BytesIO object at 0x7f933e4a7470>,code=200,effective_url='http://fpdownload.macromedia.com/get/flashplayer/current/licensing/win/install_flash_player_18_plugin.msi',error=None,headers={'Content-Length': '19924480', 'Accept-Ranges': 'bytes', 'Server': 'Apache', 'Last-Modified': 'Fri, 07 Aug 2015 12:22:31 GMT', 'Connection': 'close', 'Etag': '"1300600-51cb7b0998fc0"', 'Date': 'Mon, 14 Sep 2015 17:24:22 GMT', 'Content-Type': 'application/x-msi'},reason='OK',request=<tornado.httpclient.HTTPRequest object at 0x7f933e196250>,request_time=4.273449897766113,time_info={})
>>> with open('/tmp/foo.msi', 'rb') as fh:
... first_288_bytes = fh.read(288)
...
>>> first_288_bytes
'<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">\n<html><head>\n<title>302 Found</title>\n</head><body>\n<h1>Found</h1>\n<p>The document has moved <a href="http://fpdownload.macromedia.com/get/flashplayer/current/licensing/win/install_flash_player_18_plugin.msi">here</a>.</p>\n</body></html>'
```
This results in a corrupted file, as the HTTP response from the 302 redirect is written to the destination file via the callback.
It's entirely possible that I am doing something wrong, but this looks like a bug. I can conceive of no way to reliably determine that the callback is being executed on a 302 redirect response, since the raw data from the chunk of the file is what is passed to the callback function, leaving nothing from the HTTPResponse object to be examined in order to determine whether or not to write the chunk to the destination file. | tornadoweb/tornado | diff --git a/tornado/test/httpclient_test.py b/tornado/test/httpclient_test.py
index ecc63e4a..6254c266 100644
--- a/tornado/test/httpclient_test.py
+++ b/tornado/test/httpclient_test.py
@@ -48,6 +48,7 @@ class PutHandler(RequestHandler):
class RedirectHandler(RequestHandler):
def prepare(self):
+ self.write('redirects can have bodies too')
self.redirect(self.get_argument("url"),
status=int(self.get_argument("status", "302")))
diff --git a/tornado/test/simple_httpclient_test.py b/tornado/test/simple_httpclient_test.py
index dc4c865b..5214c1e4 100644
--- a/tornado/test/simple_httpclient_test.py
+++ b/tornado/test/simple_httpclient_test.py
@@ -11,14 +11,15 @@ import socket
import ssl
import sys
+from tornado.escape import to_unicode
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.httputil import HTTPHeaders, ResponseStartLine
from tornado.ioloop import IOLoop
from tornado.log import gen_log
from tornado.netutil import Resolver, bind_sockets
-from tornado.simple_httpclient import SimpleAsyncHTTPClient, _default_ca_certs
-from tornado.test.httpclient_test import ChunkHandler, CountdownHandler, HelloWorldHandler
+from tornado.simple_httpclient import SimpleAsyncHTTPClient
+from tornado.test.httpclient_test import ChunkHandler, CountdownHandler, HelloWorldHandler, RedirectHandler
from tornado.test import httpclient_test
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, ExpectLog
from tornado.test.util import skipOnTravis, skipIfNoIPv6, refusing_port, unittest
@@ -145,6 +146,7 @@ class SimpleHTTPClientTestMixin(object):
url("/no_content_length", NoContentLengthHandler),
url("/echo_post", EchoPostHandler),
url("/respond_in_prepare", RespondInPrepareHandler),
+ url("/redirect", RedirectHandler),
], gzip=True)
def test_singleton(self):
@@ -416,6 +418,24 @@ class SimpleHTTPClientTestMixin(object):
expect_100_continue=True)
self.assertEqual(response.code, 403)
+ def test_streaming_follow_redirects(self):
+ # When following redirects, header and streaming callbacks
+ # should only be called for the final result.
+ # TODO(bdarnell): this test belongs in httpclient_test instead of
+ # simple_httpclient_test, but it fails with the version of libcurl
+ # available on travis-ci. Move it when that has been upgraded
+ # or we have a better framework to skip tests based on curl version.
+ headers = []
+ chunks = []
+ self.fetch("/redirect?url=/hello",
+ header_callback=headers.append,
+ streaming_callback=chunks.append)
+ chunks = list(map(to_unicode, chunks))
+ self.assertEqual(chunks, ['Hello world!'])
+ # Make sure we only got one set of headers.
+ num_start_lines = len([h for h in headers if h.startswith("HTTP/")])
+ self.assertEqual(num_start_lines, 1)
+
class SimpleHTTPClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPTestCase):
def setUp(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 3
} | 4.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"futures",
"mock",
"monotonic",
"trollius",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
futures==2.2.0
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mock==5.2.0
monotonic==1.6
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
-e git+https://github.com/tornadoweb/tornado.git@7493c94369299020eda0d452e7dda793073b7f63#egg=tornado
trollius==2.1.post2
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: tornado
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- futures==2.2.0
- mock==5.2.0
- monotonic==1.6
- six==1.17.0
- trollius==2.1.post2
prefix: /opt/conda/envs/tornado
| [
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_streaming_follow_redirects",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_streaming_follow_redirects"
]
| [
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_no_content",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_no_content",
"tornado/test/simple_httpclient_test.py::MaxHeaderSizeTest::test_large_headers",
"tornado/test/simple_httpclient_test.py::MaxBodySizeTest::test_large_body"
]
| [
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_304_with_content_length",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_all_methods",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_basic_auth",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_basic_auth_explicit_mode",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_body_encoding",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_body_sanity_checks",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_chunked",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_chunked_close",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_configure_defaults",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_credentials_in_url",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_final_callback_stack_context",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_follow_redirect",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_future_http_error",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_future_http_error_no_raise",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_future_interface",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_header_callback",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_header_callback_stack_context",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_header_types",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_hello_world",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_patch_receives_payload",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_post",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_put_307",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_reuse_request_from_response",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_streaming_callback",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_streaming_stack_context",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_types",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_unsupported_auth_mode",
"tornado/test/httpclient_test.py::RequestProxyTest::test_bad_attribute",
"tornado/test/httpclient_test.py::RequestProxyTest::test_both_set",
"tornado/test/httpclient_test.py::RequestProxyTest::test_default_set",
"tornado/test/httpclient_test.py::RequestProxyTest::test_defaults_none",
"tornado/test/httpclient_test.py::RequestProxyTest::test_neither_set",
"tornado/test/httpclient_test.py::RequestProxyTest::test_request_set",
"tornado/test/httpclient_test.py::HTTPResponseTestCase::test_str",
"tornado/test/httpclient_test.py::SyncHTTPClientTest::test_sync_client",
"tornado/test/httpclient_test.py::SyncHTTPClientTest::test_sync_client_error",
"tornado/test/httpclient_test.py::HTTPRequestTestCase::test_body",
"tornado/test/httpclient_test.py::HTTPRequestTestCase::test_body_setter",
"tornado/test/httpclient_test.py::HTTPRequestTestCase::test_headers",
"tornado/test/httpclient_test.py::HTTPRequestTestCase::test_headers_setter",
"tornado/test/httpclient_test.py::HTTPRequestTestCase::test_if_modified_since",
"tornado/test/httpclient_test.py::HTTPRequestTestCase::test_null_headers_setter",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_304_with_content_length",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_all_methods",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_basic_auth",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_basic_auth_explicit_mode",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_body_encoding",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_body_sanity_checks",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_chunked",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_chunked_close",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_configure_defaults",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_credentials_in_url",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_final_callback_stack_context",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_follow_redirect",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_future_http_error",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_future_http_error_no_raise",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_future_interface",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_header_callback",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_header_callback_stack_context",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_header_types",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_hello_world",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_patch_receives_payload",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_post",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_put_307",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_reuse_request_from_response",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_streaming_callback",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_streaming_stack_context",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_types",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientCommonTestCase::test_unsupported_auth_mode",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_100_continue",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_100_continue_early_response",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_async_body_producer_chunked",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_async_body_producer_content_length",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_connection_limit",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_connection_refused",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_gzip",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_head_request",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_header_reuse",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_host_header",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_ipv6",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_max_redirects",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_no_content_length",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_options_request",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_queue_timeout",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_redirect_connection_limit",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_request_timeout",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_see_other_redirect",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_singleton",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_sync_body_producer_chunked",
"tornado/test/simple_httpclient_test.py::SimpleHTTPClientTestCase::test_sync_body_producer_content_length",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_100_continue",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_100_continue_early_response",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_async_body_producer_chunked",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_async_body_producer_content_length",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_connection_limit",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_connection_refused",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_error_logging",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_gzip",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_head_request",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_header_reuse",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_host_header",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_ipv6",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_max_redirects",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_no_content_length",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_options_request",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_queue_timeout",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_redirect_connection_limit",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_request_timeout",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_see_other_redirect",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_singleton",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_ssl_context",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_ssl_context_handshake_fail",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_ssl_options",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_ssl_options_handshake_fail",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_sync_body_producer_chunked",
"tornado/test/simple_httpclient_test.py::SimpleHTTPSClientTestCase::test_sync_body_producer_content_length",
"tornado/test/simple_httpclient_test.py::CreateAsyncHTTPClientTestCase::test_max_clients",
"tornado/test/simple_httpclient_test.py::HTTP100ContinueTestCase::test_100_continue",
"tornado/test/simple_httpclient_test.py::HTTP204NoContentTestCase::test_204_no_content",
"tornado/test/simple_httpclient_test.py::HostnameMappingTestCase::test_hostname_mapping",
"tornado/test/simple_httpclient_test.py::HostnameMappingTestCase::test_port_mapping",
"tornado/test/simple_httpclient_test.py::ResolveTimeoutTestCase::test_resolve_timeout",
"tornado/test/simple_httpclient_test.py::MaxHeaderSizeTest::test_small_headers",
"tornado/test/simple_httpclient_test.py::MaxBodySizeTest::test_small_body",
"tornado/test/simple_httpclient_test.py::MaxBufferSizeTest::test_large_body"
]
| []
| Apache License 2.0 | 252 | [
"tornado/web.py",
".travis.yml",
"tornado/simple_httpclient.py"
]
| [
"tornado/web.py",
".travis.yml",
"tornado/simple_httpclient.py"
]
|
|
pypa__twine-134 | b34f042da78aed22b6e512df61b495638b06ba03 | 2015-09-27 21:52:01 | f487b7da9c42e4932bc33bf10d70cdc59fd16fd5 | diff --git a/twine/commands/upload.py b/twine/commands/upload.py
index 032cc21..2bd4a52 100644
--- a/twine/commands/upload.py
+++ b/twine/commands/upload.py
@@ -67,11 +67,13 @@ def upload(dists, repository, sign, identity, username, password, comment,
if not sign and identity:
raise ValueError("sign must be given along with identity")
+ dists = find_dists(dists)
+
# Determine if the user has passed in pre-signed distributions
signatures = dict(
(os.path.basename(d), d) for d in dists if d.endswith(".asc")
)
- dists = [i for i in dists if not i.endswith(".asc")]
+ uploads = [i for i in dists if not i.endswith(".asc")]
config = utils.get_repository_from_config(config_file, repository)
@@ -86,24 +88,14 @@ def upload(dists, repository, sign, identity, username, password, comment,
repository = Repository(config["repository"], username, password)
- uploads = find_dists(dists)
-
for filename in uploads:
package = PackageFile.from_filename(filename, comment)
- # Sign the dist if requested
- # if sign:
- # sign_file(sign_with, filename, identity)
- # signed_name = os.path.basename(filename) + ".asc"
- signed_name = package.signed_filename
+ signed_name = package.signed_basefilename
if signed_name in signatures:
- with open(signatures[signed_name], "rb") as gpg:
- package.gpg_signature = (signed_name, gpg.read())
- # data["gpg_signature"] = (signed_name, gpg.read())
+ package.add_gpg_signature(signatures[signed_name], signed_name)
elif sign:
package.sign(sign_with, identity)
- # with open(filename + ".asc", "rb") as gpg:
- # data["gpg_signature"] = (signed_name, gpg.read())
resp = repository.upload(package)
diff --git a/twine/package.py b/twine/package.py
index e80116a..e062c71 100644
--- a/twine/package.py
+++ b/twine/package.py
@@ -49,6 +49,7 @@ class PackageFile(object):
self.filetype = filetype
self.safe_name = pkg_resources.safe_name(metadata.name)
self.signed_filename = self.filename + '.asc'
+ self.signed_basefilename = self.basefilename + '.asc'
self.gpg_signature = None
md5_hash = hashlib.md5()
@@ -141,6 +142,13 @@ class PackageFile(object):
return data
+ def add_gpg_signature(self, signature_filepath, signature_filename):
+ if self.gpg_signature is not None:
+ raise ValueError('GPG Signature can only be added once')
+
+ with open(signature_filepath, "rb") as gpg:
+ self.gpg_signature = (signature_filename, gpg.read())
+
def sign(self, sign_with, identity):
print("Signing {0}".format(self.basefilename))
gpg_args = (sign_with, "--detach-sign")
@@ -149,5 +157,4 @@ class PackageFile(object):
gpg_args += ("-a", self.filename)
subprocess.check_call(gpg_args)
- with open(self.signed_filename, "rb") as gpg:
- self.pg_signature = (self.signed_filename, gpg.read())
+ self.add_gpg_signature(self.signed_filename, self.signed_basefilename)
| "twine upload" usually fails to upload .asc files
On the most recent Foolscap release, I signed the sdist tarballs as usual, and tried to use twine to upload everything:
```
% python setup.py sdist --formats=zip,gztar bdist_wheel
% ls dist
foolscap-0.9.1-py2-none-any.whl foolscap-0.9.1.tar.gz foolscap-0.9.1.zip
% (gpg sign them all)
% ls dist
foolscap-0.9.1-py2-none-any.whl foolscap-0.9.1.tar.gz foolscap-0.9.1.zip
foolscap-0.9.1-py2-none-any.whl.asc foolscap-0.9.1.tar.gz.asc foolscap-0.9.1.zip.asc
% python setup.py register
% twine upload dist/*
```
Twine uploaded the tar/zip/whl files, but ignored the .asc signatures, and the resulting [pypi page](https://pypi.python.org/pypi/foolscap/0.9.1) doesn't show them either.
After some digging, I found that `twine/upload.py upload()` will only use pre-signed .asc files if the command was run like `cd dist; twine upload *`. It won't use them if it was run as `cd dist; twine upload ./*` or `twine upload dist/*`. The problem seems to be that the `signatures` dictionary is indexed by the basename of the signature files, while the lookup key is using the full (original) filename of the tarball/etc with ".asc" appended.
I think it might be simpler and safer to have the code just check for a neighboring .asc file inside the upload loop, something like:
```python
for filename in uploads:
package = PackageFile.from_filename(filename, comment)
maybe_sig = package.signed_filename + ".asc"
if os.path.exists(maybe_sig):
package.gpg_signature = (os.path.basename(maybe_sig), sigdata)
...
```
I'll write up a patch for this. I started to look for a way of adding a test, but the code that looks for signatures happens deep enough in `upload()` that it'd need a oversized mock "Repository" class to exercise the .asc check without actually uploading anything. I'm not sure what the best way to approach the test would be.
| pypa/twine | diff --git a/tests/test_package.py b/tests/test_package.py
index fcc827a..d28eec1 100644
--- a/tests/test_package.py
+++ b/tests/test_package.py
@@ -55,3 +55,18 @@ def test_sign_file_with_identity(monkeypatch):
pass
args = ('gpg', '--detach-sign', '--local-user', 'identity', '-a', filename)
assert replaced_check_call.calls == [pretend.call(args)]
+
+
+def test_package_signed_name_is_correct():
+ filename = 'tests/fixtures/deprecated-pypirc'
+
+ pkg = package.PackageFile(
+ filename=filename,
+ comment=None,
+ metadata=pretend.stub(name="deprecated-pypirc"),
+ python_version=None,
+ filetype=None
+ )
+
+ assert pkg.signed_basefilename == "deprecated-pypirc.asc"
+ assert pkg.signed_filename == (filename + '.asc')
diff --git a/tests/test_upload.py b/tests/test_upload.py
index b40660f..7f99510 100644
--- a/tests/test_upload.py
+++ b/tests/test_upload.py
@@ -66,6 +66,7 @@ def test_find_dists_handles_real_files():
def test_get_config_old_format(tmpdir):
pypirc = os.path.join(str(tmpdir), ".pypirc")
+ dists = ["tests/fixtures/twine-1.5.0-py2.py3-none-any.whl"]
with open(pypirc, "w") as fp:
fp.write(textwrap.dedent("""
@@ -75,7 +76,7 @@ def test_get_config_old_format(tmpdir):
"""))
try:
- upload.upload(dists="foo", repository="pypi", sign=None, identity=None,
+ upload.upload(dists=dists, repository="pypi", sign=None, identity=None,
username=None, password=None, comment=None,
sign_with=None, config_file=pypirc, skip_existing=False)
except KeyError as err:
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 2
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"coverage",
"pretend",
"flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
flake8==7.2.0
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mccabe==0.7.0
packaging @ file:///croot/packaging_1734472117206/work
pkginfo==1.12.1.2
pluggy @ file:///croot/pluggy_1733169602837/work
pretend==1.0.9
pycodestyle==2.13.0
pyflakes==3.3.1
pytest @ file:///croot/pytest_1738938843180/work
requests==2.32.3
requests-toolbelt==1.0.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
-e git+https://github.com/pypa/twine.git@b34f042da78aed22b6e512df61b495638b06ba03#egg=twine
urllib3==2.3.0
| name: twine
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- flake8==7.2.0
- idna==3.10
- mccabe==0.7.0
- pkginfo==1.12.1.2
- pretend==1.0.9
- pycodestyle==2.13.0
- pyflakes==3.3.1
- requests==2.32.3
- requests-toolbelt==1.0.0
- urllib3==2.3.0
prefix: /opt/conda/envs/twine
| [
"tests/test_package.py::test_package_signed_name_is_correct"
]
| []
| [
"tests/test_package.py::test_sign_file",
"tests/test_package.py::test_sign_file_with_identity",
"tests/test_upload.py::test_ensure_wheel_files_uploaded_first",
"tests/test_upload.py::test_ensure_if_no_wheel_files",
"tests/test_upload.py::test_find_dists_expands_globs",
"tests/test_upload.py::test_find_dists_errors_on_invalid_globs",
"tests/test_upload.py::test_find_dists_handles_real_files",
"tests/test_upload.py::test_get_config_old_format",
"tests/test_upload.py::test_skip_existing_skips_files_already_on_PyPI",
"tests/test_upload.py::test_skip_upload_respects_skip_existing"
]
| []
| Apache License 2.0 | 253 | [
"twine/package.py",
"twine/commands/upload.py"
]
| [
"twine/package.py",
"twine/commands/upload.py"
]
|
|
twisted__tubes-26 | 71894325180c6337d257e457d96e85075a560020 | 2015-09-28 08:11:28 | 71894325180c6337d257e457d96e85075a560020 | diff --git a/docs/listings/echoflow.py b/docs/listings/echoflow.py
index beea5b3..d2f3c1a 100644
--- a/docs/listings/echoflow.py
+++ b/docs/listings/echoflow.py
@@ -1,15 +1,18 @@
-from tubes.protocol import factoryFromFlow
+from tubes.protocol import flowFountFromEndpoint
+from tubes.listening import Listener
from twisted.internet.endpoints import serverFromString
-from twisted.internet.defer import Deferred
+from twisted.internet.defer import Deferred, inlineCallbacks
-def echoFlow(fount, drain):
- fount.flowTo(drain)
+def echoFlow(flow):
+ flow.fount.flowTo(flow.drain)
+@inlineCallbacks
def main(reactor, listenOn="stdio:"):
endpoint = serverFromString(reactor, listenOn)
- endpoint.listen(factoryFromFlow(echoFlow))
- return Deferred()
+ flowFount = yield flowFountFromEndpoint(endpoint)
+ flowFount.flowTo(Listener(echoFlow))
+ yield Deferred()
if __name__ == '__main__':
from twisted.internet.task import react
diff --git a/docs/listings/echonetstrings.py b/docs/listings/echonetstrings.py
index bf05626..1287f0f 100644
--- a/docs/listings/echonetstrings.py
+++ b/docs/listings/echonetstrings.py
@@ -1,17 +1,21 @@
from tubes.tube import Tube
from tubes.framing import stringsToNetstrings
-from tubes.protocol import factoryFromFlow
+from tubes.protocol import flowFountFromEndpoint
+from tubes.listening import Listener
+
from twisted.internet.endpoints import TCP4ServerEndpoint
-from twisted.internet.defer import Deferred
+from twisted.internet.defer import inlineCallbacks, Deferred()
-def echoTubeFactory(fount, drain):
- return (fount.flowTo(Tube(stringsToNetstrings()))
- .flowTo(drain))
+def echoTubeFactory(flow):
+ return (flow.fount.flowTo(Tube(stringsToNetstrings()))
+ .flowTo(flow.drain))
+@inlineCallbacks
def main(reactor):
endpoint = TCP4ServerEndpoint(reactor, 4321)
- endpoint.listen(factoryFromFlow(echoTubeFactory))
- return Deferred()
+ flowFount = yield flowFountFromEndpoint(endpoint)
+ flowFount.flowTo(Listener(echoTubeFactory))
+ yield Deferred()
if __name__ == '__main__':
from twisted.internet.task import react
diff --git a/docs/listings/portforward.py b/docs/listings/portforward.py
index ec723dc..0941743 100644
--- a/docs/listings/portforward.py
+++ b/docs/listings/portforward.py
@@ -1,21 +1,22 @@
-import os
+from tubes.protocol import flowFountFromEndpoint, flowFromEndpoint
+from tubes.listening import Listener
-from tubes.protocol import factoryFromFlow
from twisted.internet.endpoints import serverFromString, clientFromString
-from twisted.internet.defer import Deferred
+from twisted.internet.defer import Deferred, inlineCallbacks
+@inlineCallbacks
def main(reactor, listen="tcp:4321", connect="tcp:localhost:6543"):
clientEndpoint = clientFromString(reactor, connect)
serverEndpoint = serverFromString(reactor, listen)
- def incomingTubeFactory(listeningFount, listeningDrain):
- def outgoingTubeFactory(connectingFount, connectingDrain):
- listeningFount.flowTo(connectingDrain)
- connectingFount.flowTo(listeningDrain)
- clientEndpoint.connect(factoryFromFlow(outgoingTubeFactory))
-
- serverEndpoint.listen(factoryFromFlow(incomingTubeFactory))
- return Deferred()
+ def incoming(listening):
+ def outgoing(connecting):
+ listening.fount.flowTo(connecting.drain)
+ connecting.fount.flowTo(listening.drain)
+ flowFromEndpoint(clientEndpoint).addCallback(outgoing)
+ flowFount = yield flowFountFromEndpoint(serverEndpoint)
+ flowFount.flowTo(Listener(incoming))
+ yield Deferred()
if __name__ == '__main__':
from twisted.internet.task import react
diff --git a/docs/listings/reversetube.py b/docs/listings/reversetube.py
index cf99b0c..4e9096b 100644
--- a/docs/listings/reversetube.py
+++ b/docs/listings/reversetube.py
@@ -1,6 +1,8 @@
-from tubes.protocol import factoryFromFlow
from twisted.internet.endpoints import serverFromString
-from twisted.internet.defer import Deferred
+from twisted.internet.defer import Deferred, inlineCallbacks
+
+from tubes.protocol import flowFountFromEndpoint
+from tubes.listening import Listener
from tubes.tube import tube, series
@tube
@@ -8,15 +10,17 @@ class Reverser(object):
def received(self, item):
yield b"".join(reversed(item))
-def reverseFlow(fount, drain):
+def reverseFlow(flow):
from tubes.framing import bytesToLines, linesToBytes
lineReverser = series(bytesToLines(), Reverser(), linesToBytes())
- fount.flowTo(lineReverser).flowTo(drain)
+ flow.fount.flowTo(lineReverser).flowTo(flow.drain)
+@inlineCallbacks
def main(reactor, listenOn="stdio:"):
endpoint = serverFromString(reactor, listenOn)
- endpoint.listen(factoryFromFlow(reverseFlow))
- return Deferred()
+ flowFount = yield flowFountFromEndpoint(endpoint)
+ flowFount.flowTo(Listener(reverseFlow))
+ yield Deferred()
if __name__ == '__main__':
from twisted.internet.task import react
diff --git a/docs/listings/rpn.py b/docs/listings/rpn.py
index fe6f96c..697957e 100644
--- a/docs/listings/rpn.py
+++ b/docs/listings/rpn.py
@@ -1,9 +1,10 @@
-from tubes.protocol import factoryFromFlow
from tubes.itube import IFrame, ISegment
from tubes.tube import tube, receiver
+from tubes.listening import Listener
from twisted.internet.endpoints import serverFromString
-from twisted.internet.defer import Deferred
+from twisted.internet.defer import Deferred, inlineCallbacks
+from tubes.protocol import flowFountFromEndpoint
class Calculator(object):
def __init__(self):
@@ -79,15 +80,17 @@ def calculatorSeries():
linesToBytes()
)
-def mathFlow(fount, drain):
+def mathFlow(flow):
processor = calculatorSeries()
- nextDrain = fount.flowTo(processor)
- nextDrain.flowTo(drain)
+ nextDrain = flow.fount.flowTo(processor)
+ nextDrain.flowTo(flow.drain)
+@inlineCallbacks
def main(reactor, port="stdio:"):
endpoint = serverFromString(reactor, port)
- endpoint.listen(factoryFromFlow(mathFlow))
- return Deferred()
+ flowFount = yield flowFountFromEndpoint(endpoint)
+ flowFount.flowTo(Listener(mathFlow))
+ yield Deferred()
if __name__ == '__main__':
from twisted.internet.task import react
diff --git a/sketches/amptube.py b/sketches/amptube.py
index 128d183..0a7e8e9 100644
--- a/sketches/amptube.py
+++ b/sketches/amptube.py
@@ -1,18 +1,19 @@
-
-
from zope.interface import implementer
from ampserver import Math
-from twisted.tubes.protocol import factoryFromFlow
from twisted.internet.endpoints import serverFromString
+from twisted.internet.defer import Deferred, inlineCallbacks
from twisted.internet import reactor
from twisted.protocols.amp import AmpBox, IBoxSender
-from twisted.tubes.itube import ISegment
-from twisted.tubes.tube import Pump, series
-from twisted.tubes.framing import packedPrefixToStrings
+
+from tubes.protocol import flowFountFromEndpoint
+from tubes.listening import Listener
+from tubes.itube import ISegment
+from tubes.tube import Pump, series
+from tubes.framing import packedPrefixToStrings
class StringsToBoxes(Pump):
@@ -96,14 +97,22 @@ class BoxConsumer(Pump):
-def mathFlow(fount, drain):
+def mathFlow(fount):
fount.flowTo(series(packedPrefixToStrings(16), StringsToBoxes(),
- BoxConsumer(Math()), BoxesToData(), drain))
+ BoxConsumer(Math()), BoxesToData(), fount.drain))
-serverEndpoint = serverFromString(reactor, "tcp:1234")
-serverEndpoint.listen(factoryFromFlow(mathFlow))
-from twisted.internet import reactor
-reactor.run()
+
+@inlineCallbacks
+def main():
+ serverEndpoint = serverFromString(reactor, "tcp:1234")
+ flowFount = yield flowFountFromEndpoint(serverEndpoint)
+ flowFount.flowTo(Listener(mathFlow))
+ yield Deferred()
+
+
+from twisted.interne.task import react
+from sys import argv
+react(main, argv[1:])
diff --git a/sketches/fanchat.py b/sketches/fanchat.py
index c26a013..92cdcb3 100644
--- a/sketches/fanchat.py
+++ b/sketches/fanchat.py
@@ -5,14 +5,15 @@ from json import loads, dumps
from zope.interface.common import IMapping
from twisted.internet.endpoints import serverFromString
-from twisted.internet.defer import Deferred
+from twisted.internet.defer import Deferred, inlineCallbacks
-from twisted.tubes.routing import Router, Routed, to
-from twisted.tubes.protocol import factoryFromFlow
-from twisted.tubes.itube import IFrame
-from twisted.tubes.tube import series, tube, receiver
-from twisted.tubes.framing import bytesToLines, linesToBytes
-from twisted.tubes.fan import Out, In
+from tubes.routing import Router, Routed, to
+from tubes.itube import IFrame
+from tubes.tube import series, tube, receiver
+from tubes.framing import bytesToLines, linesToBytes
+from tubes.fan import Out, In
+from tubes.listening import Listener
+from tubes.protocol import flowFountFromEndpoint
@@ -130,12 +131,12 @@ class Hub(object):
self.participants = []
self.channels = defaultdict(Channel)
- def newParticipantFlow(self, fount, drain):
- commandFount = fount.flowTo(
+ def newParticipantFlow(self, flow):
+ commandFount = flow.fount.flowTo(
series(OnStop(lambda: self.participants.remove(participant)),
bytesToLines(), linesToCommands)
)
- commandDrain = series(commandsToLines, linesToBytes(), drain)
+ commandDrain = series(commandsToLines, linesToBytes(), flow.drain)
participant = Participant(self, commandFount, commandDrain)
self.participants.append(participant)
@@ -144,10 +145,12 @@ class Hub(object):
+@inlineCallbacks
def main(reactor, port="stdio:"):
endpoint = serverFromString(reactor, port)
- endpoint.listen(factoryFromFlow(Hub().newParticipantFlow))
- return Deferred()
+ flowFount = yield flowFountFromEndpoint(endpoint)
+ flowFount.flowTo(Listener(Hub().newParticipantFlow))
+ yield Deferred()
diff --git a/sketches/notes.rst b/sketches/notes.rst
index 91e3587..7425177 100644
--- a/sketches/notes.rst
+++ b/sketches/notes.rst
@@ -3,18 +3,18 @@ In the interst of making this branch more accessible to additional contributors,
Framing needs a ton of tests.
It hasn't changed a whole lot so documenting and testing this module might be a good way to get started.
-``twisted.tubes.protocol`` is pretty well tested and roughly complete but could really use some docstrings, and improve the ones it has.
-See for example the docstring for factoryFromFlow.
+``tubes.protocol`` is pretty well tested and roughly complete but could really use some docstrings, and improve the ones it has.
+See for example the docstring for flowFountFromEndpoint.
-The objects in ``twisted.tubes.protocol``, especially those that show up in log messages, could really use nicer reprs that indicate what they're doing.
+The objects in ``tubes.protocol``, especially those that show up in log messages, could really use nicer reprs that indicate what they're doing.
For example ``_ProtocolPlumbing`` and ``_FlowFactory`` should both include information about the flow function they're working on behalf of.
-Similarly, ``twisted.tubes.fan`` is a pretty rough sketch, although it's a bit less self-evident what is going on there since it's not fully implemented.
+Similarly, ``tubes.fan`` is a pretty rough sketch, although it's a bit less self-evident what is going on there since it's not fully implemented.
(*Hopefully* it's straightforward, but let's not count on hope.)
There are a bunch of un-covered `__repr__`s, probably.
-`twisted.tubes.tube.Diverter` could use some better docstrings, as could its helpers `_DrainingFount` and `_DrainingTube`.
+`tubes.tube.Diverter` could use some better docstrings, as could its helpers `_DrainingFount` and `_DrainingTube`.
We need a decorator for a function so that this:
diff --git a/tox.ini b/tox.ini
index ac04142..e689da0 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = py26, py27, pypy, docs, lint
+envlist = py26, py27, pypy, docs, lint, apidocs
[testenv]
deps =
diff --git a/tubes/_siphon.py b/tubes/_siphon.py
index ad4c542..665c901 100644
--- a/tubes/_siphon.py
+++ b/tubes/_siphon.py
@@ -10,8 +10,8 @@ from collections import deque
from zope.interface import implementer
-from .itube import IPause, IDrain, IFount, ITube
-from .kit import Pauser, beginFlowingFrom, beginFlowingTo
+from .itube import IDrain, IFount, ITube
+from .kit import Pauser, beginFlowingFrom, beginFlowingTo, NoPause, OncePause
from ._components import _registryAdapting
from twisted.python.failure import Failure
@@ -214,10 +214,7 @@ class _SiphonFount(_SiphonPiece):
siphon's tube.
"""
result = beginFlowingTo(self, drain)
- if self._siphon._pauseBecauseNoDrain:
- pbnd = self._siphon._pauseBecauseNoDrain
- self._siphon._pauseBecauseNoDrain = None
- pbnd.unpause()
+ self._siphon._pauseBecauseNoDrain.maybeUnpause()
self._siphon._unbufferIterator()
return result
@@ -245,19 +242,6 @@ class _SiphonFount(_SiphonPiece):
-@implementer(IPause)
-class _PlaceholderPause(object):
- """
- L{IPause} provider that does nothing.
- """
-
- def unpause(self):
- """
- No-op.
- """
-
-
-
@implementer(IDrain)
class _SiphonDrain(_SiphonPiece):
"""
@@ -295,7 +279,7 @@ class _SiphonDrain(_SiphonPiece):
pbpc = self._siphon._pauseBecausePauseCalled
self._siphon._pauseBecausePauseCalled = None
if fount is None:
- pauseFlow = _PlaceholderPause
+ pauseFlow = NoPause
else:
pauseFlow = fount.pauseFlow
self._siphon._pauseBecausePauseCalled = pauseFlow()
@@ -382,9 +366,9 @@ class _Siphon(object):
self._everStarted = False
self._unbuffering = False
self._flowStoppingReason = None
- self._pauseBecauseNoDrain = None
self._tfount = _SiphonFount(self)
+ self._pauseBecauseNoDrain = OncePause(self._tfount._pauser)
self._tdrain = _SiphonDrain(self)
self._tube = tube
self._pending = SiphonPendingValues()
@@ -433,9 +417,7 @@ class _Siphon(object):
return
self._pending.append(iter(iterableOrNot))
if self._tfount.drain is None:
- if self._pauseBecauseNoDrain is None:
- self._pauseBecauseNoDrain = self._tfount.pauseFlow()
-
+ self._pauseBecauseNoDrain.pauseOnce()
self._unbufferIterator()
diff --git a/tubes/itube.py b/tubes/itube.py
index 9656155..8bfd04a 100644
--- a/tubes/itube.py
+++ b/tubes/itube.py
@@ -9,8 +9,8 @@ Interfaces related to data flows.
from zope.interface import Interface, Attribute
if 0:
- from zope.interface.interfaces import IInterface
- IInterface
+ from zope.interface.interfaces import ISpecification
+ ISpecification
from twisted.python.failure import Failure
Failure
@@ -60,7 +60,7 @@ class IFount(Interface):
"""
The type of output produced by this Fount.
- This may be an L{IInterface} provider.
+ This may be an L{ISpecification} provider.
""")
drain = Attribute(
@@ -126,6 +126,8 @@ class IDrain(Interface):
inputType = Attribute(
"""
Similar to L{IFount.outputType}.
+
+ This is an L{ISpecification} provider.
""")
fount = Attribute(
@@ -175,7 +177,7 @@ class IDrain(Interface):
class ITube(Interface):
"""
- A tube translates input to output.
+ A tube transforms input into output.
Look at this awesome ASCII art::
diff --git a/tubes/kit.py b/tubes/kit.py
index 7ac796f..ee73633 100644
--- a/tubes/kit.py
+++ b/tubes/kit.py
@@ -132,3 +132,46 @@ def beginFlowingFrom(drain, fount):
(oldFount.drain is drain) ):
oldFount.flowTo(None)
+
+
+@implementer(IPause)
+class NoPause(object):
+ """
+ A null implementation of L{IPause} that does nothing.
+ """
+
+ def unpause(self):
+ """
+ No-op.
+ """
+
+
+
+class OncePause(object):
+ """
+ Pause a pauser once, unpause it if necessary.
+ """
+ def __init__(self, pauser):
+ """
+ Create a L{OncePause} with the given L{Pauser}.
+ """
+ self._pauser = pauser
+ self._currentlyPaused = False
+
+
+ def pauseOnce(self):
+ """
+ If this L{OncePause} is not currently paused, pause its pauser.
+ """
+ if not self._currentlyPaused:
+ self._currentlyPaused = True
+ self._pause = self._pauser.pause()
+
+
+ def maybeUnpause(self):
+ """
+ If this L{OncePause} is currently paused, unpause it.
+ """
+ if self._currentlyPaused:
+ self._currentlyPaused = False
+ self._pause.unpause()
diff --git a/tubes/listening.py b/tubes/listening.py
new file mode 100644
index 0000000..5b2657a
--- /dev/null
+++ b/tubes/listening.py
@@ -0,0 +1,135 @@
+# -*- test-case-name: tubes.test.test_listening -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+"""
+Listening.
+"""
+
+from zope.interface import implementer, implementedBy
+
+from .itube import IDrain
+from .kit import beginFlowingFrom, NoPause
+from .tube import tube, series
+
+class Flow(object):
+ """
+ A L{Flow} is a combination of a Fount and a Drain, representing a
+ bi-directional communication channel such as a TCP connection.
+
+ @ivar fount: A fount.
+ @type fount: L{IFount}
+
+ @ivar drain: A drain.
+ @type drain: L{IDrain}
+ """
+
+ def __init__(self, fount, drain):
+ """
+ @param fount: Fount.
+ @type fount: L{IFount}
+
+ @param drain: Drain.
+ @type drain: L{IDrain}
+ """
+ self.fount = fount
+ self.drain = drain
+
+
+
+@implementer(IDrain)
+class Listener(object):
+ """
+ A L{Listener} is a drain that accepts L{Flow}s and sets them up.
+ """
+
+ inputType = implementedBy(Flow)
+
+ def __init__(self, flowConnector, maxConnections=100):
+ """
+ @param flowConnector: a 1-argument callable taking a L{Flow} and
+ returning nothing, which connects the flow.
+
+ @param maxConnections: The number of concurrent L{Flow} objects
+ to maintain active at once.
+ @type maxConnections: L{int}
+ """
+ self.fount = None
+ self._flowConnector = flowConnector
+ self._maxConnections = maxConnections
+ self._currentConnections = 0
+ self._paused = NoPause()
+
+
+ def flowingFrom(self, fount):
+ """
+ The flow has begun from the given L{fount} of L{Flow}s.
+
+ @param fount: A fount of flows. One example of such a suitable fount
+ would be the return value of
+ L{tubes.protocol.flowFountFromEndpoint}.
+
+ @return: L{None}, since this is a "terminal" drain, where founts of
+ L{Flow} must end up in order for more new connections to be
+ established.
+ """
+ beginFlowingFrom(self, fount)
+
+
+ def receive(self, item):
+ """
+ Receive the given flow, applying backpressure if too many connections
+ are active.
+
+ @param item: The inbound L{Flow}.
+ """
+ self._currentConnections += 1
+ if self._currentConnections >= self._maxConnections:
+ self._paused = self.fount.pauseFlow()
+ def dec():
+ self._currentConnections -= 1
+ self._paused.unpause()
+ self._paused = NoPause()
+ self._flowConnector(Flow(item.fount.flowTo(series(_OnStop(dec))),
+ item.drain))
+
+
+ def flowStopped(self, reason):
+ """
+ No more L{Flow}s are incoming; nothing to do.
+
+ @param reason: the reason the flow stopped.
+ """
+
+
+
+@tube
+class _OnStop(object):
+ """
+ Call a callback when the flow stops.
+ """
+ def __init__(self, callback):
+ """
+ Call the given callback.
+ """
+ self.callback = callback
+
+
+ def received(self, item):
+ """
+ Pass through all received items.
+
+ @param item: An item being passed through (type unknown).
+ """
+ yield item
+
+
+ def stopped(self, reason):
+ """
+ Call the callback on stop.
+
+ @param reason: the reason that the flow stopped; ignored.
+
+ @return: no items.
+ """
+ self.callback()
+ return ()
diff --git a/tubes/protocol.py b/tubes/protocol.py
index 6577f4c..6d44259 100644
--- a/tubes/protocol.py
+++ b/tubes/protocol.py
@@ -5,28 +5,34 @@
"""
Objects to connect L{real data <_Protocol>} to L{tubes}.
-@see: L{factoryFromFlow}
+@see: L{flowFountFromEndpoint}
"""
__all__ = [
- 'factoryFromFlow',
+ 'flowFountFromEndpoint',
+ 'flowFromEndpoint',
]
-from zope.interface import implementer
+from zope.interface import implementer, implementedBy
-from .kit import Pauser, beginFlowingFrom, beginFlowingTo
-from .itube import IDrain, IFount, ISegment
+from .kit import Pauser, beginFlowingFrom, beginFlowingTo, OncePause
+from .itube import StopFlowCalled, IDrain, IFount, ISegment
+from .listening import Flow
-from twisted.internet.interfaces import IPushProducer
+from twisted.python.failure import Failure
+from twisted.internet.interfaces import IPushProducer, IListeningPort
from twisted.internet.protocol import Protocol as _Protocol
if 0:
# Workaround for inability of pydoctor to resolve references.
from twisted.internet.interfaces import (
- IProtocol, ITransport, IConsumer, IProtocolFactory, IProducer)
- IProtocol, ITransport, IConsumer, IProtocolFactory, IProducer
- from twisted.python.failure import Failure
- Failure
+ IProtocol, ITransport, IConsumer, IProtocolFactory, IProducer,
+ IStreamServerEndpoint
+ )
+ (IProtocol, ITransport, IConsumer, IProtocolFactory, IProducer,
+ IStreamServerEndpoint)
+ from twisted.internet.defer import Deferred
+ Deferred
@@ -205,7 +211,7 @@ class _ProtocolPlumbing(_Protocol):
A L{_ProtocolPlumbing} implements L{IProtocol} to deliver all incoming data
to the drain associated with its L{fount <IFount>}.
- @ivar _flow: A flow function, as described in L{factoryFromFlow}.
+ @ivar _flow: A flow function, as described in L{_factoryFromFlow}.
@type _flow: L{callable}
@ivar _drain: The drain that is passed on to the application, created after
@@ -267,17 +273,17 @@ class _ProtocolPlumbing(_Protocol):
-def factoryFromFlow(flow):
+def _factoryFromFlow(flow):
"""
Convert a flow function into an L{IProtocolFactory}.
A "flow function" is a function which takes a L{fount <IFount>} and an
L{drain <IDrain>}.
- L{factoryFromFlow} takes such a function and creates an L{IProtocolFactory}
- which, upon each new connection, provides the flow function with an
- L{IFount} and an L{IDrain} representing the read end and the write end of
- the incoming connection, respectively.
+ L{_factoryFromFlow} takes such a function and creates an
+ L{IProtocolFactory} which, upon each new connection, provides the flow
+ function with an L{IFount} and an L{IDrain} representing the read end and
+ the write end of the incoming connection, respectively.
@param flow: a 2-argument callable, taking (fount, drain).
@type flow: L{callable}
@@ -287,3 +293,134 @@ def factoryFromFlow(flow):
"""
from twisted.internet.protocol import Factory
return Factory.forProtocol(lambda: _ProtocolPlumbing(flow))
+
+
+
+@implementer(IFount)
+class _FountImpl(object):
+ """
+ Implementation of fount for listening port.
+ """
+
+ outputType = implementedBy(Flow)
+
+ def __init__(self, portObject, aFlowFunction, preListen):
+ """
+ Create a fount implementation from a provider of L{IPushProducer} and a
+ function that takes a fount and a drain.
+
+ @param portObject: the result of the L{Deferred} from
+ L{IStreamServerEndpoint.listen}
+ @type portObject: L{IListeningPort} and L{IPushProducer} provider
+ (probably; workarounds are in place for other cases)
+
+ @param aFlowFunction: a 2-argument callable, invoked when a connection
+ arrives, with a fount and drain.
+ @type aFlowFunction: L{callable}
+
+ @param preListen: the founts and drains accepted before the C{listen}
+ L{Deferred} has fired. Because these might be arriving before this
+ L{_FountImpl} even I{exists}, this needs to be passed in. That is
+ OK because L{_FountImpl} is very tightly coupled to
+ L{flowFountFromEndpoint}, which is the only thing that constructs
+ it.
+ @type preListen: L{list} of 2-L{tuple}s of C{(fount, drain)}
+ """
+ self.drain = None
+ self._preListen = preListen
+ self._pauser = Pauser(portObject.pauseProducing,
+ portObject.resumeProducing)
+ self._noDrainPause = OncePause(self._pauser)
+ self._aFlowFunction = aFlowFunction
+ self._portObject = portObject
+ if preListen:
+ self._noDrainPause.pauseOnce()
+
+
+ def flowTo(self, drain):
+ """
+ Start flowing to the given drain.
+
+ @param drain: The drain to send flows to.
+
+ @return: the next fount in the chain.
+ """
+ result = beginFlowingTo(self, drain)
+ self._noDrainPause.maybeUnpause()
+ for f, d in self._preListen:
+ self._aFlowFunction(f, d)
+ return result
+
+
+ def pauseFlow(self):
+ """
+ Allow backpressure to build up in the listening socket; ask Twisted to
+ stop calling C{accept}.
+
+ @return: An L{IPause}.
+ """
+ return self._pauser.pause()
+
+
+ def stopFlow(self):
+ """
+ Stop the delivery of L{Flow} objects to this L{_FountImpl}'s drain, and
+ stop listening on the port represented by this fount.
+ """
+ self.drain.flowStopped(Failure(StopFlowCalled()))
+ self.drain = None
+ if IListeningPort.providedBy(self._portObject):
+ self._portObject.stopListening()
+
+
+
+def flowFountFromEndpoint(endpoint):
+ """
+ Listen on the given endpoint, and thereby create a L{fount <IFount>} which
+ outputs a new L{Flow} for each connection.
+
+ @note: L{IStreamServerEndpoint} formally specifies that its C{connect}
+ method returns a L{Deferred} that fires with an L{IListeningPort}.
+ However, L{IListeningPort} is insufficient to express the requisite
+ flow-control to implement a fount; so the C{endpoint} parameter must be
+ an extended endpoint whose C{listen} L{Deferred} fires with a provider
+ of both L{IListeningPort} and L{IPushProducer}. Luckily, the
+ real-world implementations of L{IListeningPort} within Twisted are all
+ L{IPushProducer}s as well, so practically speaking you will not notice
+ this, but for testing it is important to know this is necessary.
+
+ @param endpoint: a server endpoint.
+ @type endpoint: L{IStreamServerEndpoint}
+
+ @return: a L{twisted.internet.defer.Deferred} that fires with a L{IFount}
+ whose C{outputType} is L{Flow}.
+ """
+ preListen = []
+ def listening(portObject):
+ listening.impl = _FountImpl(portObject, aFlowFunction, preListen)
+ return listening.impl
+ listening.impl = None
+ def aFlowFunction(fount, drain):
+ if listening.impl is None or listening.impl.drain is None:
+ preListen.append((fount, drain))
+ if listening.impl is not None:
+ listening.impl._pauseForNoDrain()
+ else:
+ listening.impl.drain.receive(Flow(fount, drain))
+ aFactory = _factoryFromFlow(aFlowFunction)
+ return endpoint.listen(aFactory).addCallback(listening)
+
+
+
+def flowFromEndpoint(endpoint):
+ """
+ Convert a client endpoint into a L{Deferred} that fires with a L{Flow}.
+
+ @param endpoint: a client endpoint that will be connected to, once.
+
+ @return: a L{Deferred} that fires with a L{Flow}.
+ """
+ def cb(fount, drain):
+ cb.result = Flow(fount, drain)
+ return (endpoint.connect(_factoryFromFlow(cb))
+ .addCallback(lambda whatever: cb.result))
diff --git a/tubes/tube.py b/tubes/tube.py
index df954ef..fe23b49 100644
--- a/tubes/tube.py
+++ b/tubes/tube.py
@@ -15,8 +15,9 @@ from twisted.python.components import proxyForInterface
from twisted.python.failure import Failure
from .itube import IDrain, ITube, IDivertable, IFount, StopFlowCalled
-from ._siphon import _tubeRegistry, _Siphon, _PlaceholderPause, skip
+from ._siphon import _tubeRegistry, _Siphon, skip
from ._components import _registryActive
+from .kit import NoPause as _PlaceholderPause
__all__ = [
"Diverter",
| replace conversion of flow-function → factory with conversion of endpoint → fount of founts
Presently on trunk, tubes are hooked up to the outside world (i.e.: Twisted) by converting a "flow function", a function taking a fount and drain, to a Factory, which is then hooked up to Twisted.
However, this is limiting in one major regard: the backpressure of *not accepting future connections* is impossible to model properly.
A better way to conceptualize a listening socket is a fount whose `outputType` is an object with both `Fount` and `Drain` attributes. This way, we can flow the stream of incoming connections to a "listening" drain, which can exert backpressure (most obviously by simply limiting the number of concurrently active connections).
As it happens, this is also a feature missing from Twisted which makes it hard to implement fairness in queueing. By making tubes treat listening sockets and connected sockets consistently, we can open up a whole new area of correct-by-default behavior under high levels of load. | twisted/tubes | diff --git a/tubes/test/test_listening.py b/tubes/test/test_listening.py
new file mode 100644
index 0000000..5b0b3e3
--- /dev/null
+++ b/tubes/test/test_listening.py
@@ -0,0 +1,56 @@
+# -*- test-case-name: tubes.test.test_listening -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Tests for L{tubes.listening}.
+"""
+
+from unittest import TestCase
+
+from ..listening import Flow, Listener
+from ..memory import iteratorFount
+
+from .util import FakeDrain
+
+class ListeningTests(TestCase):
+ """
+ Test cases for listening.
+ """
+
+ def test_listenerCallsFlowConnector(self):
+ """
+ A L{Listener} is a drain which calls the function given to it to
+ connect a flow
+ """
+ drained = FakeDrain()
+ flow = Flow(iteratorFount([1, 2, 3]),
+ drained)
+ flows = []
+ fi = iteratorFount([flow])
+ listener = Listener(flows.append)
+ fi.flowTo(listener)
+ self.assertEqual(len(flows), 1)
+ results = FakeDrain()
+ flows[0].fount.flowTo(results)
+ # The listener might need to (and in fact does) interpose a different
+ # value for 'fount' and 'drain' to add hooks to them. We assert about
+ # the values passed through them.
+ self.assertEqual(results.received, [1, 2, 3])
+ iteratorFount([4, 5, 6]).flowTo(flows[0].drain)
+ self.assertEqual(drained.received, [4, 5, 6])
+
+
+ def test_listenerLimitsConcurrentConnections(self):
+ """
+ L{Listener} will pause its fount when too many connections are
+ received.
+ """
+ connectorCalled = []
+ listener = Listener(connectorCalled.append, maxConnections=3)
+ tenFlows = iteratorFount([Flow(iteratorFount([1, 2, 3]),
+ FakeDrain())
+ for each in range(10)])
+ tenFlows.flowTo(listener)
+ self.assertEqual(len(connectorCalled), 3)
+ connectorCalled[0].fount.flowTo(connectorCalled[0].drain)
diff --git a/tubes/test/test_protocol.py b/tubes/test/test_protocol.py
index 8c64452..9586f82 100644
--- a/tubes/test/test_protocol.py
+++ b/tubes/test/test_protocol.py
@@ -6,14 +6,20 @@
Tests for L{tubes.protocol}.
"""
+from zope.interface import implementer
+
from twisted.trial.unittest import SynchronousTestCase as TestCase
from twisted.python.failure import Failure
+from twisted.test.proto_helpers import StringTransport
+from twisted.internet.interfaces import IStreamServerEndpoint
-from ..protocol import factoryFromFlow
+from ..protocol import flowFountFromEndpoint, flowFromEndpoint
from ..tube import tube, series
+from ..listening import Flow, Listener
+from ..itube import IFount
-from ..test.util import StringEndpoint, FakeDrain, FakeFount
+from .util import StringEndpoint, FakeDrain, FakeFount, fakeEndpointWithPorts
@tube
class RememberingTube(object):
@@ -50,9 +56,9 @@ class RememberingTube(object):
-class FlowingAdapterTests(TestCase):
+class FlowConnectorTests(TestCase):
"""
- Tests for L{factoryFromFlow} and the drain/fount/factory adapters it
+ Tests for L{flowFromEndpoint} and the drain/fount/factory adapters it
constructs.
"""
@@ -61,17 +67,25 @@ class FlowingAdapterTests(TestCase):
Sert up these tests.
"""
self.endpoint = StringEndpoint()
- def flowFunction(fount, drain):
- self.adaptedDrain = drain
- self.adaptedFount = fount
- self.adaptedProtocol = self.successResultOf(
- self.endpoint.connect(factoryFromFlow(flowFunction))
- )
-
+ flow = self.successResultOf(flowFromEndpoint(self.endpoint))
+ self.adaptedDrain = flow.drain
+ self.adaptedFount = flow.fount
self.tube = RememberingTube()
self.drain = series(self.tube)
+ def adaptedProtocol(self):
+ """
+ Retrieve a protocol for testing with.
+
+ @return: the first protocol instance to have been created by making the
+ virtual outbound connection associated with the call to
+ L{flowFromEndpoint} performed in L{FlowConnectorTests.setUp}.
+ @rtype: L{IProtocol}
+ """
+ return self.endpoint.transports[0].protocol
+
+
def test_flowToSetsDrain(self):
"""
L{_ProtocolFount.flowTo} will set the C{drain} attribute of the
@@ -87,7 +101,7 @@ class FlowingAdapterTests(TestCase):
L{_ProtocolFount.dataReceived} to invoke L{receive} on its drain.
"""
self.adaptedFount.flowTo(self.drain)
- self.adaptedProtocol.dataReceived("some data")
+ self.adaptedProtocol().dataReceived("some data")
self.assertEqual(self.tube.items, ["some data"])
@@ -108,7 +122,7 @@ class FlowingAdapterTests(TestCase):
"""
self.adaptedFount.flowTo(self.drain)
self.adaptedFount.stopFlow()
- self.assertEqual(self.adaptedProtocol.transport.disconnecting, True)
+ self.assertEqual(self.adaptedProtocol().transport.disconnecting, True)
# The connection has not been closed yet; we *asked* the flow to stop,
# but it may not have done.
self.assertEqual(self.tube.wasStopped, False)
@@ -121,7 +135,7 @@ class FlowingAdapterTests(TestCase):
"""
self.adaptedFount.flowTo(self.drain)
self.adaptedDrain.flowStopped(Failure(ZeroDivisionError()))
- self.assertEqual(self.adaptedProtocol.transport.disconnecting, True)
+ self.assertEqual(self.adaptedProtocol().transport.disconnecting, True)
self.assertEqual(self.tube.wasStopped, False)
@@ -137,7 +151,7 @@ class FlowingAdapterTests(TestCase):
class MyFunException(Exception):
pass
f = Failure(MyFunException())
- self.adaptedProtocol.connectionLost(f)
+ self.adaptedProtocol().connectionLost(f)
self.assertEqual(self.tube.wasStopped, True)
self.assertIdentical(f, self.tube.reason)
@@ -150,7 +164,7 @@ class FlowingAdapterTests(TestCase):
ff = FakeFount()
ff.flowTo(self.adaptedDrain)
self.assertEqual(ff.flowIsStopped, False)
- self.adaptedProtocol.connectionLost(Failure(ZeroDivisionError))
+ self.adaptedProtocol().connectionLost(Failure(ZeroDivisionError))
self.assertEqual(ff.flowIsStopped, True)
@@ -160,16 +174,16 @@ class FlowingAdapterTests(TestCase):
L{_ProtocolFount} is flowing to anything, then it will pause the
transport but only until the L{_ProtocolFount} is flowing to something.
"""
- self.adaptedProtocol.dataReceived("hello, ")
- self.assertEqual(self.adaptedProtocol.transport.producerState,
+ self.adaptedProtocol().dataReceived("hello, ")
+ self.assertEqual(self.adaptedProtocol().transport.producerState,
'paused')
# It would be invalid to call dataReceived again in this state, so no
# need to test that...
fd = FakeDrain()
self.adaptedFount.flowTo(fd)
- self.assertEqual(self.adaptedProtocol.transport.producerState,
+ self.assertEqual(self.adaptedProtocol().transport.producerState,
'producing')
- self.adaptedProtocol.dataReceived("world!")
+ self.adaptedProtocol().dataReceived("world!")
self.assertEqual(fd.received, ["hello, ", "world!"])
@@ -181,7 +195,7 @@ class FlowingAdapterTests(TestCase):
self.test_dataReceivedBeforeFlowing()
fd2 = FakeDrain()
self.adaptedFount.flowTo(fd2)
- self.adaptedProtocol.dataReceived("hooray")
+ self.adaptedProtocol().dataReceived("hooray")
self.assertEqual(fd2.received, ["hooray"])
@@ -201,7 +215,7 @@ class FlowingAdapterTests(TestCase):
"""
fd = FakeDrain()
self.adaptedFount.flowTo(fd)
- self.adaptedProtocol.dataReceived("a")
+ self.adaptedProtocol().dataReceived("a")
self.adaptedFount.flowTo(None)
self.assertEqual(fd.fount, None)
self.test_dataReceivedBeforeFlowing()
@@ -231,10 +245,10 @@ class FlowingAdapterTests(TestCase):
self.assertEqual(ff.flowIsPaused, False)
self.adaptedDrain.flowingFrom(ff)
# The connection is too full! Back off!
- self.adaptedProtocol.transport.producer.pauseProducing()
+ self.adaptedProtocol().transport.producer.pauseProducing()
self.assertEqual(ff.flowIsPaused, True)
# All clear, start writing again.
- self.adaptedProtocol.transport.producer.resumeProducing()
+ self.adaptedProtocol().transport.producer.resumeProducing()
self.assertEqual(ff.flowIsPaused, False)
@@ -249,17 +263,17 @@ class FlowingAdapterTests(TestCase):
producing = 'producing'
paused = 'paused'
# Sanity check.
- self.assertEqual(self.adaptedProtocol.transport.producerState,
+ self.assertEqual(self.adaptedProtocol().transport.producerState,
producing)
self.adaptedFount.flowTo(fd)
# Steady as she goes.
- self.assertEqual(self.adaptedProtocol.transport.producerState,
+ self.assertEqual(self.adaptedProtocol().transport.producerState,
producing)
anPause = fd.fount.pauseFlow()
- self.assertEqual(self.adaptedProtocol.transport.producerState,
+ self.assertEqual(self.adaptedProtocol().transport.producerState,
paused)
anPause.unpause()
- self.assertEqual(self.adaptedProtocol.transport.producerState,
+ self.assertEqual(self.adaptedProtocol().transport.producerState,
producing)
@@ -288,3 +302,99 @@ class FlowingAdapterTests(TestCase):
return another
anotherOther = self.adaptedFount.flowTo(ReflowingFakeDrain())
self.assertIdentical(another, anotherOther)
+
+
+
+class FlowListenerTests(TestCase):
+ """
+ Tests for L{flowFountFromEndpoint} and the fount adapter it constructs.
+ """
+
+ def test_fromEndpoint(self):
+ """
+ L{flowFountFromEndpoint} returns a L{Deferred} that fires when the
+ listening port is ready.
+ """
+ endpoint, ports = fakeEndpointWithPorts()
+ deferred = flowFountFromEndpoint(endpoint)
+ self.assertNoResult(deferred)
+ deferred.callback(None)
+ result = self.successResultOf(deferred)
+ self.assertTrue(IFount.providedBy(result))
+ self.assertEqual(result.outputType.implementedBy(Flow), True)
+
+
+ def test_oneConnectionAccepted(self):
+ """
+ When a connection comes in to a listening L{flowFountFromEndpoint}, the
+ L{Listener} that it's flowing to's callback is called.
+ """
+ endpoint, ports = fakeEndpointWithPorts()
+ deferred = flowFountFromEndpoint(endpoint)
+ self.assertNoResult(deferred)
+ deferred.callback(None)
+ result = self.successResultOf(deferred)
+ connected = []
+ result.flowTo(Listener(connected.append))
+ protocol = ports[0].factory.buildProtocol(None)
+ self.assertEqual(len(connected), 0)
+ protocol.makeConnection(StringTransport())
+ self.assertEqual(len(connected), 1)
+
+
+ def test_acceptBeforeActuallyListening(self):
+ """
+ Sometimes a connection is established reentrantly by C{listen}, without
+ waiting for the L{Deferred} returned to fire. In this case the
+ connection will be buffered until said L{Deferred} fires.
+ """
+ immediateTransport = StringTransport()
+ subEndpoint, ports = fakeEndpointWithPorts()
+ @implementer(IStreamServerEndpoint)
+ class ImmediateFakeEndpoint(object):
+ def listen(self, factory):
+ protocol = factory.buildProtocol(None)
+ protocol.makeConnection(immediateTransport)
+ return subEndpoint.listen(factory)
+ endpoint = ImmediateFakeEndpoint()
+ deferred = flowFountFromEndpoint(endpoint)
+ deferred.callback(None)
+ fount = self.successResultOf(deferred)
+ connected = []
+ self.assertEqual(ports[0].currentlyProducing, False)
+ fount.flowTo(Listener(connected.append))
+ self.assertEqual(ports[0].currentlyProducing, True)
+ self.assertEqual(len(connected), 1)
+
+
+ def test_backpressure(self):
+ """
+ When the L{IFount} returned by L{flowFountFromEndpoint} is paused, it
+ removes its listening port from the reactor. When resumed, it re-adds
+ it.
+ """
+ endpoint, ports = fakeEndpointWithPorts()
+ deferred = flowFountFromEndpoint(endpoint)
+ deferred.callback(None)
+ fount = self.successResultOf(deferred)
+ fount.flowTo(FakeDrain())
+ pause = fount.pauseFlow()
+ self.assertEqual(ports[0].currentlyProducing, False)
+ pause.unpause()
+ self.assertEqual(ports[0].currentlyProducing, True)
+
+
+ def test_stopping(self):
+ """
+ The L{IFount} returned by L{flowFountFromEndpoint} will stop listening
+ on the endpoint
+ """
+ endpoint, ports = fakeEndpointWithPorts()
+ deferred = flowFountFromEndpoint(endpoint)
+ deferred.callback(None)
+ fount = self.successResultOf(deferred)
+ fd = FakeDrain()
+ fount.flowTo(fd)
+ fount.stopFlow()
+ self.assertEqual(ports[0].listenStopping, True)
+ self.assertEqual(len(fd.stopped), 1)
diff --git a/tubes/test/util.py b/tubes/test/util.py
index 2c29c0e..dd36224 100644
--- a/tubes/test/util.py
+++ b/tubes/test/util.py
@@ -11,12 +11,15 @@ from zope.interface.verify import verifyClass
from twisted.test.proto_helpers import StringTransport
from twisted.internet.defer import succeed
-from twisted.internet.interfaces import IStreamClientEndpoint
+from twisted.internet.interfaces import (
+ IStreamClientEndpoint, IStreamServerEndpoint, IListeningPort, IPushProducer
+)
from ..itube import IDrain, IFount, IDivertable
from ..tube import tube
from ..kit import Pauser, beginFlowingFrom, beginFlowingTo
+from twisted.internet.defer import Deferred
@implementer(IStreamClientEndpoint)
class StringEndpoint(object):
@@ -311,3 +314,123 @@ class NullTube(object):
+@implementer(IListeningPort, IPushProducer)
+class FakeListeningProducerPort(object):
+ """
+ This is a fake L{IListeningPort}, also implementing L{IPushProducer}, which
+ L{flowFountFromEndpoint} needs to make backpressure work.
+ """
+ def __init__(self, factory):
+ """
+ Create a L{FakeListeningProducerPort} with the given protocol
+ factory.
+ """
+ self.factory = factory
+ self.stopper = Deferred()
+ self.listenStopping = False
+ self.currentlyProducing = True
+
+
+ def pauseProducing(self):
+ """
+ Pause producing new connections.
+ """
+ self.currentlyProducing = False
+
+
+ def resumeProducing(self):
+ """
+ Resume producing new connections.
+ """
+ self.currentlyProducing = True
+
+
+ def startListening(self):
+ """
+ Start listening on this port.
+
+ @raise CannotListenError: If it cannot listen on this port (e.g., it is
+ a TCP port and it cannot bind to the required port number).
+ """
+
+
+ def stopListening(self):
+ """
+ Stop listening on this fake port.
+
+ @return: a L{Deferred} that should be fired when the test wants to
+ complete stopping listening.
+ """
+ self.listenStopping = True
+ return self.stopper
+
+
+ def stopProducing(self):
+ """
+ Stop producing more data.
+ """
+ self.stopListening()
+
+
+ def getHost(self):
+ """
+ Get the host that this port is listening for.
+
+ @return: An L{IAddress} provider.
+ """
+
+verifyClass(IListeningPort, FakeListeningProducerPort)
+verifyClass(IPushProducer, FakeListeningProducerPort)
+
+
+@implementer(IStreamServerEndpoint)
+class FakeEndpoint(object):
+ """
+ A fake implementation of L{IStreamServerEndpoint} with a L{Deferred} that
+ fires controllably.
+
+ @ivar _listening: deferreds that will fire with listening ports when their
+ C{.callback} is invoked (input to C{.callback} ignored); added to when
+ C{listen} is called.
+ @type _listening: L{list} of L{Deferred}
+
+ @ivar _ports: list of ports that have already started listening
+ @type _ports: L{list} of L{IListeningPort}
+ """
+ def __init__(self):
+ """
+ Create a L{FakeEndpoint}.
+ """
+ self._listening = []
+ self._ports = []
+
+
+ def listen(self, factory):
+ """
+ Listen with the given factory.
+
+ @param factory: The factory to use for future connections.
+
+ @return: a L{Deferred} that fires with a new listening port.
+ """
+ self._listening.append(Deferred())
+ def newListener(ignored):
+ result = FakeListeningProducerPort(factory)
+ self._ports.append(result)
+ return result
+ return self._listening[-1].addCallback(newListener)
+
+
+
+def fakeEndpointWithPorts():
+ """
+ Create a L{FakeEndpoint} and expose the list of ports that it uses.
+
+ @return: a fake endpoint and a list of the ports it has listened on
+ @rtype: a 2-tuple of C{(endpoint, ports)}, where C{ports} is a L{list} of
+ L{IListeningPort}.
+ """
+ self = FakeEndpoint()
+ return self, self._ports
+
+verifyClass(IStreamServerEndpoint, FakeEndpoint)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 14
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
Automat==24.8.1
characteristic==14.3.0
constantly==23.10.4
coverage==7.8.0
exceptiongroup==1.2.2
execnet==2.1.1
hyperlink==21.0.0
idna==3.10
incremental==24.7.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
six==1.17.0
tomli==2.2.1
-e git+https://github.com/twisted/tubes.git@71894325180c6337d257e457d96e85075a560020#egg=Tubes
Twisted==24.11.0
typing_extensions==4.13.0
zope.interface==7.2
| name: tubes
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- automat==24.8.1
- characteristic==14.3.0
- constantly==23.10.4
- coverage==7.8.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- hyperlink==21.0.0
- idna==3.10
- incremental==24.7.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- six==1.17.0
- tomli==2.2.1
- twisted==24.11.0
- typing-extensions==4.13.0
- zope-interface==7.2
prefix: /opt/conda/envs/tubes
| [
"tubes/test/test_listening.py::ListeningTests::test_listenerCallsFlowConnector",
"tubes/test/test_listening.py::ListeningTests::test_listenerLimitsConcurrentConnections",
"tubes/test/test_protocol.py::FlowConnectorTests::test_connectionLostSendsFlowStopped",
"tubes/test/test_protocol.py::FlowConnectorTests::test_connectionLostSendsStopFlow",
"tubes/test/test_protocol.py::FlowConnectorTests::test_dataReceivedBeforeFlowing",
"tubes/test/test_protocol.py::FlowConnectorTests::test_dataReceivedBeforeFlowingThenFlowTo",
"tubes/test/test_protocol.py::FlowConnectorTests::test_dataReceivedWhenFlowingToNone",
"tubes/test/test_protocol.py::FlowConnectorTests::test_drainReceivingWritesToTransport",
"tubes/test/test_protocol.py::FlowConnectorTests::test_flowStoppedStopsConnection",
"tubes/test/test_protocol.py::FlowConnectorTests::test_flowToDeliversData",
"tubes/test/test_protocol.py::FlowConnectorTests::test_flowToSetsDrain",
"tubes/test/test_protocol.py::FlowConnectorTests::test_flowingFrom",
"tubes/test/test_protocol.py::FlowConnectorTests::test_flowingFromAttribute",
"tubes/test/test_protocol.py::FlowConnectorTests::test_flowingToNoneAfterFlowingToSomething",
"tubes/test/test_protocol.py::FlowConnectorTests::test_pauseUnpauseFromOtherDrain",
"tubes/test/test_protocol.py::FlowConnectorTests::test_pauseUnpauseFromTransport",
"tubes/test/test_protocol.py::FlowConnectorTests::test_stopFlowStopsConnection",
"tubes/test/test_protocol.py::FlowConnectorTests::test_stopProducing",
"tubes/test/test_protocol.py::FlowListenerTests::test_acceptBeforeActuallyListening",
"tubes/test/test_protocol.py::FlowListenerTests::test_backpressure",
"tubes/test/test_protocol.py::FlowListenerTests::test_fromEndpoint",
"tubes/test/test_protocol.py::FlowListenerTests::test_oneConnectionAccepted",
"tubes/test/test_protocol.py::FlowListenerTests::test_stopping"
]
| []
| []
| []
| MIT License | 254 | [
"tubes/protocol.py",
"tubes/tube.py",
"tubes/itube.py",
"docs/listings/portforward.py",
"sketches/notes.rst",
"docs/listings/echonetstrings.py",
"docs/listings/reversetube.py",
"sketches/fanchat.py",
"sketches/amptube.py",
"docs/listings/echoflow.py",
"tubes/kit.py",
"tox.ini",
"tubes/_siphon.py",
"tubes/listening.py",
"docs/listings/rpn.py"
]
| [
"tubes/protocol.py",
"tubes/tube.py",
"tubes/itube.py",
"docs/listings/portforward.py",
"sketches/notes.rst",
"docs/listings/echonetstrings.py",
"docs/listings/reversetube.py",
"sketches/fanchat.py",
"sketches/amptube.py",
"docs/listings/echoflow.py",
"tubes/kit.py",
"tox.ini",
"tubes/_siphon.py",
"tubes/listening.py",
"docs/listings/rpn.py"
]
|
|
tornadoweb__tornado-1533 | 4ee9ba94de11aaa4f932560fa2b3d8ceb8c61d2a | 2015-09-29 02:40:11 | 4ee9ba94de11aaa4f932560fa2b3d8ceb8c61d2a | diff --git a/tornado/auth.py b/tornado/auth.py
index 32d0e226..ff7172aa 100644
--- a/tornado/auth.py
+++ b/tornado/auth.py
@@ -75,7 +75,7 @@ import hmac
import time
import uuid
-from tornado.concurrent import TracebackFuture, return_future
+from tornado.concurrent import TracebackFuture, return_future, chain_future
from tornado import gen
from tornado import httpclient
from tornado import escape
@@ -985,7 +985,7 @@ class FacebookGraphMixin(OAuth2Mixin):
future.set_exception(AuthError('Facebook auth error: %s' % str(response)))
return
- args = escape.parse_qs_bytes(escape.native_str(response.body))
+ args = urlparse.parse_qs(escape.native_str(response.body))
session = {
"access_token": args["access_token"][-1],
"expires": args.get("expires")
@@ -1062,8 +1062,13 @@ class FacebookGraphMixin(OAuth2Mixin):
Added the ability to override ``self._FACEBOOK_BASE_URL``.
"""
url = self._FACEBOOK_BASE_URL + path
- return self.oauth2_request(url, callback, access_token,
- post_args, **args)
+ # Thanks to the _auth_return_future decorator, our "callback"
+ # argument is a Future, which we cannot pass as a callback to
+ # oauth2_request. Instead, have oauth2_request return a
+ # future and chain them together.
+ oauth_future = self.oauth2_request(url, access_token=access_token,
+ post_args=post_args, **args)
+ chain_future(oauth_future, callback)
def _oauth_signature(consumer_token, method, url, parameters={}, token=None):
| FacebookGraphMixin failing
I had to modify `_on_access_token` so that it did not attempt to make a `facebook_request` to `/me`. With the code as it stands, I get this error:
`TypeError: 'Future' object is not callable`
However, removing that extra request fixes this, and I can use the access_token in the next part of my auth pipeline. | tornadoweb/tornado | diff --git a/tornado/test/auth_test.py b/tornado/test/auth_test.py
index 56de93a5..3ed40e45 100644
--- a/tornado/test/auth_test.py
+++ b/tornado/test/auth_test.py
@@ -5,7 +5,7 @@
from __future__ import absolute_import, division, print_function, with_statement
-from tornado.auth import OpenIdMixin, OAuthMixin, OAuth2Mixin, TwitterMixin, AuthError, GoogleOAuth2Mixin
+from tornado.auth import OpenIdMixin, OAuthMixin, OAuth2Mixin, TwitterMixin, AuthError, GoogleOAuth2Mixin, FacebookGraphMixin
from tornado.concurrent import Future
from tornado.escape import json_decode
from tornado import gen
@@ -126,6 +126,38 @@ class OAuth2ClientLoginHandler(RequestHandler, OAuth2Mixin):
assert res.done()
+class FacebookClientLoginHandler(RequestHandler, FacebookGraphMixin):
+ def initialize(self, test):
+ self._OAUTH_AUTHORIZE_URL = test.get_url('/facebook/server/authorize')
+ self._OAUTH_ACCESS_TOKEN_URL = test.get_url('/facebook/server/access_token')
+ self._FACEBOOK_BASE_URL = test.get_url('/facebook/server')
+
+ @gen.coroutine
+ def get(self):
+ if self.get_argument("code", None):
+ user = yield self.get_authenticated_user(
+ redirect_uri=self.request.full_url(),
+ client_id=self.settings["facebook_api_key"],
+ client_secret=self.settings["facebook_secret"],
+ code=self.get_argument("code"))
+ self.write(user)
+ else:
+ yield self.authorize_redirect(
+ redirect_uri=self.request.full_url(),
+ client_id=self.settings["facebook_api_key"],
+ extra_params={"scope": "read_stream,offline_access"})
+
+
+class FacebookServerAccessTokenHandler(RequestHandler):
+ def get(self):
+ self.write('access_token=asdf')
+
+
+class FacebookServerMeHandler(RequestHandler):
+ def get(self):
+ self.write('{}')
+
+
class TwitterClientHandler(RequestHandler, TwitterMixin):
def initialize(self, test):
self._OAUTH_REQUEST_TOKEN_URL = test.get_url('/oauth1/server/request_token')
@@ -260,6 +292,8 @@ class AuthTest(AsyncHTTPTestCase):
dict(version='1.0a')),
('/oauth2/client/login', OAuth2ClientLoginHandler, dict(test=self)),
+ ('/facebook/client/login', FacebookClientLoginHandler, dict(test=self)),
+
('/twitter/client/login', TwitterClientLoginHandler, dict(test=self)),
('/twitter/client/login_gen_engine', TwitterClientLoginGenEngineHandler, dict(test=self)),
('/twitter/client/login_gen_coroutine', TwitterClientLoginGenCoroutineHandler, dict(test=self)),
@@ -271,13 +305,17 @@ class AuthTest(AsyncHTTPTestCase):
('/oauth1/server/request_token', OAuth1ServerRequestTokenHandler),
('/oauth1/server/access_token', OAuth1ServerAccessTokenHandler),
+ ('/facebook/server/access_token', FacebookServerAccessTokenHandler),
+ ('/facebook/server/me', FacebookServerMeHandler),
('/twitter/server/access_token', TwitterServerAccessTokenHandler),
(r'/twitter/api/users/show/(.*)\.json', TwitterServerShowUserHandler),
(r'/twitter/api/account/verify_credentials\.json', TwitterServerVerifyCredentialsHandler),
],
http_client=self.http_client,
twitter_consumer_key='test_twitter_consumer_key',
- twitter_consumer_secret='test_twitter_consumer_secret')
+ twitter_consumer_secret='test_twitter_consumer_secret',
+ facebook_api_key='test_facebook_api_key',
+ facebook_secret='test_facebook_secret')
def test_openid_redirect(self):
response = self.fetch('/openid/client/login', follow_redirects=False)
@@ -358,6 +396,13 @@ class AuthTest(AsyncHTTPTestCase):
self.assertEqual(response.code, 302)
self.assertTrue('/oauth2/server/authorize?' in response.headers['Location'])
+ def test_facebook_login(self):
+ response = self.fetch('/facebook/client/login', follow_redirects=False)
+ self.assertEqual(response.code, 302)
+ self.assertTrue('/facebook/server/authorize?' in response.headers['Location'])
+ response = self.fetch('/facebook/client/login?code=1234', follow_redirects=False)
+ self.assertEqual(response.code, 200)
+
def base_twitter_redirect(self, url):
# Same as test_oauth10a_redirect
response = self.fetch(url, follow_redirects=False)
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 4.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"futures",
"mock",
"monotonic",
"trollius",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
futures==2.2.0
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mock==5.2.0
monotonic==1.6
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
-e git+https://github.com/tornadoweb/tornado.git@4ee9ba94de11aaa4f932560fa2b3d8ceb8c61d2a#egg=tornado
trollius==2.1.post2
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: tornado
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- futures==2.2.0
- mock==5.2.0
- monotonic==1.6
- six==1.17.0
- trollius==2.1.post2
prefix: /opt/conda/envs/tornado
| [
"tornado/test/auth_test.py::AuthTest::test_facebook_login"
]
| []
| [
"tornado/test/auth_test.py::AuthTest::test_oauth10_get_user",
"tornado/test/auth_test.py::AuthTest::test_oauth10_redirect",
"tornado/test/auth_test.py::AuthTest::test_oauth10_request_parameters",
"tornado/test/auth_test.py::AuthTest::test_oauth10a_get_user",
"tornado/test/auth_test.py::AuthTest::test_oauth10a_get_user_coroutine_exception",
"tornado/test/auth_test.py::AuthTest::test_oauth10a_redirect",
"tornado/test/auth_test.py::AuthTest::test_oauth10a_request_parameters",
"tornado/test/auth_test.py::AuthTest::test_oauth2_redirect",
"tornado/test/auth_test.py::AuthTest::test_openid_get_user",
"tornado/test/auth_test.py::AuthTest::test_openid_redirect",
"tornado/test/auth_test.py::AuthTest::test_twitter_get_user",
"tornado/test/auth_test.py::AuthTest::test_twitter_redirect",
"tornado/test/auth_test.py::AuthTest::test_twitter_redirect_gen_coroutine",
"tornado/test/auth_test.py::AuthTest::test_twitter_redirect_gen_engine",
"tornado/test/auth_test.py::AuthTest::test_twitter_show_user",
"tornado/test/auth_test.py::AuthTest::test_twitter_show_user_error",
"tornado/test/auth_test.py::AuthTest::test_twitter_show_user_future",
"tornado/test/auth_test.py::AuthTest::test_twitter_show_user_future_error",
"tornado/test/auth_test.py::GoogleOAuth2Test::test_google_login"
]
| []
| Apache License 2.0 | 255 | [
"tornado/auth.py"
]
| [
"tornado/auth.py"
]
|
|
networkx__networkx-1781 | 328d899fb17bac857eedd9ec3e03f4ede9b11c63 | 2015-09-29 20:34:48 | bd19b43751b5bd433ebc5a9ab173938f721af2dd | diff --git a/networkx/readwrite/gml.py b/networkx/readwrite/gml.py
index d9e49d336..d3c8b8527 100644
--- a/networkx/readwrite/gml.py
+++ b/networkx/readwrite/gml.py
@@ -286,7 +286,7 @@ def parse_gml_lines(lines, label, destringizer):
"""
def tokenize():
patterns = [
- r'[A-Za-z][0-9A-Za-z]*\s+', # keys
+ r'[A-Za-z][0-9A-Za-z_]*\s+', # keys
r'[+-]?(?:[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)(?:[Ee][+-]?[0-9]+)?', # reals
r'[+-]?[0-9]+', # ints
r'".*?"', # strings
| Cannot parse GML files from Cytoscape, possible regression?
This does not appear to be a duplicate of #321 -- I cannot parse .GML files produced from Cytoscape, including the "simple.gml" [file](https://networkx.lanl.gov/trac/attachment/ticket/324/simple.gml) as reported in #321. Below is the output I receive if parsing the file provided in that issue.
From reading the comments, it looks like the suggestion is to either modify the GML parser directly in networkx (allowing slightly out of format GML files), or to modify the output from Cytoscape. If I issue a PR for the GML parser modification, would there be support for it?
```python
10:20:28 (mcdonadt@8086):~> nx.__version__
Out[5]: '1.10'
10:20:32 (mcdonadt@8086):~> g = nx.read_gml('Downloads/simple.gml')
---------------------------------------------------------------------------
NetworkXError Traceback (most recent call last)
<ipython-input-6-e05f5232526f> in <module>()
----> 1 g = nx.read_gml('Downloads/simple.gml')
/Users/mcdonadt/miniconda3/envs/unifrac-network/lib/python3.4/site-packages/networkx/readwrite/gml.py in read_gml(path, label, destringizer)
/Users/mcdonadt/miniconda3/envs/unifrac-network/lib/python3.4/site-packages/networkx/utils/decorators.py in _open_file(func, *args, **kwargs)
218 # Finally, we call the original function, making sure to close the fobj.
219 try:
--> 220 result = func(*new_args, **kwargs)
221 finally:
222 if close_fobj:
/Users/mcdonadt/miniconda3/envs/unifrac-network/lib/python3.4/site-packages/networkx/readwrite/gml.py in read_gml(path, label, destringizer)
208 yield line
209
--> 210 G = parse_gml_lines(filter_lines(path), label, destringizer)
211 return G
212
/Users/mcdonadt/miniconda3/envs/unifrac-network/lib/python3.4/site-packages/networkx/readwrite/gml.py in parse_gml_lines(lines, label, destringizer)
381
382 tokens = tokenize()
--> 383 graph = parse_graph()
384
385 directed = graph.pop('directed', False)
/Users/mcdonadt/miniconda3/envs/unifrac-network/lib/python3.4/site-packages/networkx/readwrite/gml.py in parse_graph()
370
371 def parse_graph():
--> 372 curr_token, dct = parse_kv(next(tokens))
373 if curr_token[0] is not None: # EOF
374 unexpected(curr_token, 'EOF')
/Users/mcdonadt/miniconda3/envs/unifrac-network/lib/python3.4/site-packages/networkx/readwrite/gml.py in parse_kv(curr_token)
355 curr_token = next(tokens)
356 elif type == 4: # dict start
--> 357 curr_token, value = parse_dict(curr_token)
358 else:
359 unexpected(curr_token, "an int, float, string or '['")
/Users/mcdonadt/miniconda3/envs/unifrac-network/lib/python3.4/site-packages/networkx/readwrite/gml.py in parse_dict(curr_token)
365 def parse_dict(curr_token):
366 curr_token = consume(curr_token, 4, "'['") # dict start
--> 367 curr_token, dct = parse_kv(curr_token)
368 curr_token = consume(curr_token, 5, "']'") # dict end
369 return curr_token, dct
/Users/mcdonadt/miniconda3/envs/unifrac-network/lib/python3.4/site-packages/networkx/readwrite/gml.py in parse_kv(curr_token)
355 curr_token = next(tokens)
356 elif type == 4: # dict start
--> 357 curr_token, value = parse_dict(curr_token)
358 else:
359 unexpected(curr_token, "an int, float, string or '['")
/Users/mcdonadt/miniconda3/envs/unifrac-network/lib/python3.4/site-packages/networkx/readwrite/gml.py in parse_dict(curr_token)
364
365 def parse_dict(curr_token):
--> 366 curr_token = consume(curr_token, 4, "'['") # dict start
367 curr_token, dct = parse_kv(curr_token)
368 curr_token = consume(curr_token, 5, "']'") # dict end
/Users/mcdonadt/miniconda3/envs/unifrac-network/lib/python3.4/site-packages/networkx/readwrite/gml.py in consume(curr_token, type, expected)
334 def consume(curr_token, type, expected):
335 if curr_token[0] == type:
--> 336 return next(tokens)
337 unexpected(curr_token, expected)
338
/Users/mcdonadt/miniconda3/envs/unifrac-network/lib/python3.4/site-packages/networkx/readwrite/gml.py in tokenize()
321 else:
322 raise NetworkXError('cannot tokenize %r at (%d, %d)' %
--> 323 (line[pos:], lineno + 1, pos + 1))
324 lineno += 1
325 yield (None, None, lineno + 1, 1) # EOF
NetworkXError: cannot tokenize 'root_index\t-3' at (5, 3)
``` | networkx/networkx | diff --git a/networkx/readwrite/tests/test_gml.py b/networkx/readwrite/tests/test_gml.py
index 85b069127..042061032 100644
--- a/networkx/readwrite/tests/test_gml.py
+++ b/networkx/readwrite/tests/test_gml.py
@@ -65,6 +65,90 @@ graph [
]
]
"""
+ def test_parse_gml_cytoscape_bug(self):
+ # example from issue #321, originally #324 in trac
+ cytoscape_example = """
+Creator "Cytoscape"
+Version 1.0
+graph [
+ node [
+ root_index -3
+ id -3
+ graphics [
+ x -96.0
+ y -67.0
+ w 40.0
+ h 40.0
+ fill "#ff9999"
+ type "ellipse"
+ outline "#666666"
+ outline_width 1.5
+ ]
+ label "node2"
+ ]
+ node [
+ root_index -2
+ id -2
+ graphics [
+ x 63.0
+ y 37.0
+ w 40.0
+ h 40.0
+ fill "#ff9999"
+ type "ellipse"
+ outline "#666666"
+ outline_width 1.5
+ ]
+ label "node1"
+ ]
+ node [
+ root_index -1
+ id -1
+ graphics [
+ x -31.0
+ y -17.0
+ w 40.0
+ h 40.0
+ fill "#ff9999"
+ type "ellipse"
+ outline "#666666"
+ outline_width 1.5
+ ]
+ label "node0"
+ ]
+ edge [
+ root_index -2
+ target -2
+ source -1
+ graphics [
+ width 1.5
+ fill "#0000ff"
+ type "line"
+ Line [
+ ]
+ source_arrow 0
+ target_arrow 3
+ ]
+ label "DirectedEdge"
+ ]
+ edge [
+ root_index -1
+ target -1
+ source -3
+ graphics [
+ width 1.5
+ fill "#0000ff"
+ type "line"
+ Line [
+ ]
+ source_arrow 0
+ target_arrow 3
+ ]
+ label "DirectedEdge"
+ ]
+]
+"""
+ nx.parse_gml(cytoscape_example)
def test_parse_gml(self):
G = nx.parse_gml(self.simple_data, label='label')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 1.102 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libgdal-dev graphviz"
],
"python": "3.6",
"reqs_path": [
"requirements/default.txt",
"requirements/test.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
decorator==5.1.1
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/networkx/networkx.git@328d899fb17bac857eedd9ec3e03f4ede9b11c63#egg=networkx
nose==1.3.7
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: networkx
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- decorator==5.1.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/networkx
| [
"networkx/readwrite/tests/test_gml.py::TestGraph::test_parse_gml_cytoscape_bug"
]
| [
"networkx/readwrite/tests/test_gml.py::TestGraph::test_parse_gml",
"networkx/readwrite/tests/test_gml.py::TestGraph::test_read_gml"
]
| [
"networkx/readwrite/tests/test_gml.py::TestGraph::test_relabel_duplicate",
"networkx/readwrite/tests/test_gml.py::TestGraph::test_tuplelabels",
"networkx/readwrite/tests/test_gml.py::TestGraph::test_quotes",
"networkx/readwrite/tests/test_gml.py::TestGraph::test_name",
"networkx/readwrite/tests/test_gml.py::TestGraph::test_graph_types",
"networkx/readwrite/tests/test_gml.py::TestGraph::test_data_types",
"networkx/readwrite/tests/test_gml.py::TestGraph::test_escape_unescape",
"networkx/readwrite/tests/test_gml.py::TestGraph::test_exceptions"
]
| []
| BSD 3-Clause | 256 | [
"networkx/readwrite/gml.py"
]
| [
"networkx/readwrite/gml.py"
]
|
|
keleshev__schema-91 | eb7670f0f4615195393dc5350d49fa9a33304137 | 2015-09-30 21:18:29 | eb7670f0f4615195393dc5350d49fa9a33304137 | diff --git a/schema.py b/schema.py
index b8abee4..f4f5460 100644
--- a/schema.py
+++ b/schema.py
@@ -109,7 +109,7 @@ class Schema(object):
if flavor == ITERABLE:
data = Schema(type(s), error=e).validate(data)
o = Or(*s, error=e)
- return type(s)(o.validate(d) for d in data)
+ return type(data)(o.validate(d) for d in data)
if flavor == DICT:
data = Schema(dict, error=e).validate(data)
new = type(data)() # new - is a dict of the validated values
| Inconsistent return types for validate: type(schema) or type(data)?
I've noted a small inconsistency when validating with iterable and dict schemas:
```python
if flavor == ITERABLE:
data = Schema(type(s), error=e).validate(data)
return type(s)(Or(*s, error=e).validate(d) for d in data)
if flavor == DICT:
data = Schema(dict, error=e).validate(data)
new = type(data)() # new - is a dict of the validated values
...
return new
```
Briefly, when validating with iterable schemas the return type is that of the schema, when validating with dict schemas it's that of the input.
Personally I think that it's more flexible to have the return type match the one of the input.
Ideas, preferences? | keleshev/schema | diff --git a/test_schema.py b/test_schema.py
index 967dec0..e124fc9 100644
--- a/test_schema.py
+++ b/test_schema.py
@@ -408,3 +408,10 @@ def test_exception_handling_with_bad_validators():
except SchemaError as e:
assert "TypeError" in e.args[0]
raise
+
+
+def test_issue_83_iterable_validation_return_type():
+ TestSetType = type("TestSetType", (set,), dict())
+ data = TestSetType(["test", "strings"])
+ s = Schema(set([str]))
+ assert isinstance(s.validate(data), TestSetType)
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"coverage"
],
"pre_install": [],
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --doctest-glob=README.rst --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
-e git+https://github.com/keleshev/schema.git@eb7670f0f4615195393dc5350d49fa9a33304137#egg=schema
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: schema
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/schema
| [
"test_schema.py::test_issue_83_iterable_validation_return_type"
]
| []
| [
"test_schema.py::test_schema",
"test_schema.py::test_validate_file",
"test_schema.py::test_and",
"test_schema.py::test_or",
"test_schema.py::test_validate_list",
"test_schema.py::test_list_tuple_set_frozenset",
"test_schema.py::test_strictly",
"test_schema.py::test_dict",
"test_schema.py::test_dict_keys",
"test_schema.py::test_dict_optional_keys",
"test_schema.py::test_dict_optional_defaults",
"test_schema.py::test_dict_subtypes",
"test_schema.py::test_complex",
"test_schema.py::test_nice_errors",
"test_schema.py::test_use_error_handling",
"test_schema.py::test_or_error_handling",
"test_schema.py::test_and_error_handling",
"test_schema.py::test_schema_error_handling",
"test_schema.py::test_use_json",
"test_schema.py::test_error_reporting",
"test_schema.py::test_schema_repr",
"test_schema.py::test_validate_object",
"test_schema.py::test_issue_9_prioritized_key_comparison",
"test_schema.py::test_issue_9_prioritized_key_comparison_in_dicts",
"test_schema.py::test_missing_keys_exception_with_non_str_dict_keys",
"test_schema.py::test_issue_56_cant_rely_on_callables_to_have_name",
"test_schema.py::test_exception_handling_with_bad_validators"
]
| []
| MIT License | 257 | [
"schema.py"
]
| [
"schema.py"
]
|
|
Stranger6667__pyanyapi-22 | e5b7aadff6d02eac82e27cb4ad1f8bc9a4fb2eb0 | 2015-10-01 09:10:15 | fef649317fa4d2717c35a697bf10941691c4b3fe | diff --git a/README.rst b/README.rst
index b01d832..35babdf 100644
--- a/README.rst
+++ b/README.rst
@@ -31,7 +31,7 @@ Usage
-----
Library provides an ability to create API over various content.
-Currently there are bundled tools to work with HTML, XML, JSON.
+Currently there are bundled tools to work with HTML, XML, JSON and YAML.
Initially it was created to work with ``requests`` library.
Basic setup
@@ -139,6 +139,22 @@ Settings attribute is merged from all ancestors of current parser.
>>> SecondChildParser({'child': '//more'}).settings['child']
//more
+Results stripping
+~~~~~~~~~~~~~~~~~
+
+Parsers can automagically strip trailing whitespaces with ``strip=True`` option.
+
+.. code:: python
+
+ from pyanyapi import RegExpParser
+
+
+ >>> settings = {'p': 'string(//p)'}
+ >>> XMLParser(settings).parse('<p> Pcontent </p>').p
+ Pcontent
+ >>> XMLParser(settings, strip=True).parse('<p> Pcontent </p>).p
+ Pcontent
+
HTML & XML
~~~~~~~~~~
diff --git a/pyanyapi/_compat.py b/pyanyapi/_compat.py
index 6ef744e..6322456 100644
--- a/pyanyapi/_compat.py
+++ b/pyanyapi/_compat.py
@@ -16,3 +16,9 @@ try:
import ujson as json
except ImportError:
import json
+
+
+try:
+ string_types = (str, unicode)
+except NameError:
+ string_types = (str, )
diff --git a/pyanyapi/interfaces.py b/pyanyapi/interfaces.py
index 88122a4..7dfb67b 100644
--- a/pyanyapi/interfaces.py
+++ b/pyanyapi/interfaces.py
@@ -6,7 +6,7 @@ import re
import yaml
-from ._compat import json, etree, objectify, XMLParser, HTMLParser
+from ._compat import json, etree, objectify, XMLParser, HTMLParser, string_types
from .exceptions import ResponseParseError
from .helpers import memoize
@@ -21,8 +21,9 @@ class BaseInterface(object):
content = None
empty_result = None
- def __init__(self, content):
+ def __init__(self, content, strip=False):
self.content = content
+ self.strip = strip
self.parse = memoize(self.parse)
@classmethod
@@ -58,6 +59,11 @@ class BaseInterface(object):
if hasattr(attr, '_attached') and type(attr).__name__ == 'cached_property'
)
+ def maybe_strip(self, value):
+ if self.strip and isinstance(value, string_types):
+ return value.strip()
+ return value
+
# Uses as fallback. None - can be obtained from JSON's null, any string also can be, so unique object is a best choice
EMPTY_RESULT = object()
@@ -132,15 +138,13 @@ class XPathInterface(BaseInterface):
result = self.parse(settings['base'])
child_query = settings.get('children')
if child_query:
- return [''.join(element.xpath(child_query)).strip() for element in result]
- elif isinstance(result, list):
- return result
- return result.strip()
+ return [self.maybe_strip(''.join(element.xpath(child_query))) for element in result]
+ return result
return self.parse(settings)
def parse(self, query):
- return self.parsed_content.xpath(query)
+ return self.maybe_strip(self.parsed_content.xpath(query))
class XMLInterface(XPathInterface):
@@ -209,7 +213,7 @@ class DictInterface(BaseInterface):
return self.empty_result
else:
return target
- return target
+ return self.maybe_strip(target)
def execute_method(self, settings):
if isinstance(settings, dict):
@@ -266,7 +270,7 @@ class AJAXInterface(JSONInterface):
def get_inner_interface(self, text, json_part):
if json_part not in self._inner_cache:
inner_content = super(AJAXInterface, self).get_from_dict(text, json_part)
- self._inner_cache[json_part] = self.inner_interface_class(inner_content)
+ self._inner_cache[json_part] = self.inner_interface_class(inner_content, self.strip)
return self._inner_cache[json_part]
def get_from_dict(self, target, query):
@@ -292,14 +296,14 @@ class RegExpInterface(BaseInterface):
So, response will be like 'ok' or 'Error 100'.
"""
- def __init__(self, content, flags=0):
+ def __init__(self, content, strip=False, flags=0):
self.flags = flags
- super(RegExpInterface, self).__init__(content)
+ super(RegExpInterface, self).__init__(content, strip)
def execute_method(self, settings):
matches = re.findall(settings, self.content, self.flags)
if matches:
- return matches[0]
+ return self.maybe_strip(matches[0])
return self.empty_result
def parse(self, query):
diff --git a/pyanyapi/parsers.py b/pyanyapi/parsers.py
index 05fceef..25c009f 100644
--- a/pyanyapi/parsers.py
+++ b/pyanyapi/parsers.py
@@ -23,7 +23,8 @@ class BaseParser(object):
"""
interface_class = None
- def __init__(self, settings=None):
+ def __init__(self, settings=None, strip=False):
+ self.strip = strip
parents_settings = self.get_parents_settings()
if settings:
parents_settings.update(settings)
@@ -68,7 +69,7 @@ class BaseParser(object):
return self.parse(content).parse_all()
def get_interface_kwargs(self):
- return {'content': self.content}
+ return {'content': self.content, 'strip': self.strip}
def prepare_content(self, content):
"""
@@ -165,9 +166,9 @@ class AJAXParser(LXMLParser):
class RegExpParser(BaseParser):
interface_class = RegExpInterface
- def __init__(self, settings=None, flags=0):
+ def __init__(self, settings=None, strip=False, flags=0):
self.flags = flags
- super(RegExpParser, self).__init__(settings)
+ super(RegExpParser, self).__init__(settings, strip)
def get_interface_kwargs(self):
kwargs = super(RegExpParser, self).get_interface_kwargs()
| Add option to strip results
At parser level, False by default | Stranger6667/pyanyapi | diff --git a/tests/test_parsers.py b/tests/test_parsers.py
index 25e54b3..85f4471 100644
--- a/tests/test_parsers.py
+++ b/tests/test_parsers.py
@@ -179,7 +179,7 @@ def test_complex_config():
parsed = XMLParser({'test': {'base': '//test', 'children': 'text()|*//text()'}}).parse(
'<xml><test>123 </test><test><inside> 234</inside></test></xml>'
)
- assert parsed.test == ['123', '234']
+ assert parsed.test == ['123 ', ' 234']
def test_json_parse():
diff --git a/tests/test_strip.py b/tests/test_strip.py
new file mode 100644
index 0000000..d649312
--- /dev/null
+++ b/tests/test_strip.py
@@ -0,0 +1,34 @@
+# coding: utf-8
+from .conftest import lxml_is_supported
+from pyanyapi import RegExpParser, JSONParser, AJAXParser, XMLParser
+
+
+JSON_CONTENT = '{"container":" 1 "}'
+AJAX_CONTENT = '{"content": "<p> Pcontent </p>"}'
+XML_CONTENT = '<p> Pcontent </p>'
+
+
+def test_strip_regexp_parser():
+ settings = {'all': '.+'}
+ assert RegExpParser(settings).parse(' 1 ').all == ' 1 '
+ assert RegExpParser(settings, strip=True).parse(' 1 ').all == '1'
+
+
+def test_strip_json_parser():
+ settings = {'all': 'container'}
+ assert JSONParser(settings).parse(JSON_CONTENT).all == ' 1 '
+ assert JSONParser(settings, strip=True).parse(JSON_CONTENT).all == '1'
+
+
+@lxml_is_supported
+def test_strip_ajax_parser():
+ settings = {'all': 'content > string(//p)'}
+ assert AJAXParser(settings).parse(AJAX_CONTENT).all == ' Pcontent '
+ assert AJAXParser(settings, strip=True).parse(AJAX_CONTENT).all == 'Pcontent'
+
+
+@lxml_is_supported
+def test_strip_xml_parser():
+ settings = {'all': 'string(//p)'}
+ assert XMLParser(settings).parse(XML_CONTENT).all == ' Pcontent '
+ assert XMLParser(settings, strip=True).parse(XML_CONTENT).all == 'Pcontent'
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 4
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
importlib-metadata==4.8.3
iniconfig==1.1.1
lxml==5.3.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
-e git+https://github.com/Stranger6667/pyanyapi.git@e5b7aadff6d02eac82e27cb4ad1f8bc9a4fb2eb0#egg=pyanyapi
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
PyYAML==3.11
tomli==1.2.3
typing_extensions==4.1.1
ujson==4.3.0
zipp==3.6.0
| name: pyanyapi
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- lxml==5.3.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pyyaml==3.11
- tomli==1.2.3
- typing-extensions==4.1.1
- ujson==4.3.0
- zipp==3.6.0
prefix: /opt/conda/envs/pyanyapi
| [
"tests/test_parsers.py::test_complex_config",
"tests/test_strip.py::test_strip_regexp_parser",
"tests/test_strip.py::test_strip_json_parser",
"tests/test_strip.py::test_strip_ajax_parser",
"tests/test_strip.py::test_strip_xml_parser"
]
| []
| [
"tests/test_parsers.py::test_xml_objectify_parser",
"tests/test_parsers.py::test_xml_objectify_parser_error",
"tests/test_parsers.py::test_xml_parser_error",
"tests/test_parsers.py::test_yaml_parser_error",
"tests/test_parsers.py::test_xml_parsed[settings0]",
"tests/test_parsers.py::test_xml_parsed[settings1]",
"tests/test_parsers.py::test_xml_simple_settings",
"tests/test_parsers.py::test_json_parsed",
"tests/test_parsers.py::test_multiple_parser_join",
"tests/test_parsers.py::test_multiply_parsers_declaration",
"tests/test_parsers.py::test_empty_values[{\"container\":{\"test\":\"value\"}}-test-value]",
"tests/test_parsers.py::test_empty_values[{\"container\":{\"test\":\"value\"}}-second-None]",
"tests/test_parsers.py::test_empty_values[{\"container\":{\"fail\":[1]}}-second-None]",
"tests/test_parsers.py::test_empty_values[{\"container\":[[1],[],[3]]}-third-expected3]",
"tests/test_parsers.py::test_empty_values[{\"container\":null}-null-None]",
"tests/test_parsers.py::test_empty_values[{\"container\":[1,2]}-test-1,2]",
"tests/test_parsers.py::test_attributes",
"tests/test_parsers.py::test_efficient_parsing",
"tests/test_parsers.py::test_simple_config_xml_parser",
"tests/test_parsers.py::test_simple_config_json_parser",
"tests/test_parsers.py::test_settings_inheritance",
"tests/test_parsers.py::test_json_parse",
"tests/test_parsers.py::test_json_value_error_parse",
"tests/test_parsers.py::test_regexp_parse",
"tests/test_parsers.py::test_yaml_parse",
"tests/test_parsers.py::test_ajax_parser",
"tests/test_parsers.py::test_ajax_parser_cache",
"tests/test_parsers.py::test_ajax_parser_invalid_settings",
"tests/test_parsers.py::test_parse_memoization",
"tests/test_parsers.py::test_regexp_settings",
"tests/test_parsers.py::test_parse_all",
"tests/test_parsers.py::test_parse_all_combined_parser"
]
| []
| MIT License | 258 | [
"README.rst",
"pyanyapi/parsers.py",
"pyanyapi/interfaces.py",
"pyanyapi/_compat.py"
]
| [
"README.rst",
"pyanyapi/parsers.py",
"pyanyapi/interfaces.py",
"pyanyapi/_compat.py"
]
|
|
pypa__twine-138 | 5c06ed2049b633fe7f0b0e27df32d2715614ab3a | 2015-10-01 16:31:50 | f487b7da9c42e4932bc33bf10d70cdc59fd16fd5 | diff --git a/twine/repository.py b/twine/repository.py
index dc473aa..ae57821 100644
--- a/twine/repository.py
+++ b/twine/repository.py
@@ -17,6 +17,9 @@ import requests
from requests_toolbelt.multipart import MultipartEncoder
+KEYWORDS_TO_NOT_FLATTEN = set(["gpg_signature", "content"])
+
+
class Repository(object):
def __init__(self, repository_url, username, password):
self.url = repository_url
@@ -30,11 +33,12 @@ class Repository(object):
def _convert_data_to_list_of_tuples(data):
data_to_send = []
for key, value in data.items():
- if isinstance(value, (list, tuple)):
+ if (key in KEYWORDS_TO_NOT_FLATTEN or
+ not isinstance(value, (list, tuple))):
+ data_to_send.append((key, value))
+ else:
for item in value:
data_to_send.append((key, item))
- else:
- data_to_send.append((key, value))
return data_to_send
def register(self, package):
| Bug in uploading signatures (again)
This time the bug originates from https://github.com/a-tal/twine/commit/13c83237246fe00c67b2e4ce72e8a935f89f9ee0 and the refactor.
Because that flattens all values that are sequences, it happens to flatten the `gpg_signature` field. That causes two parts to be for the `gpg_signature` which in turn translates to a list when parsed by PyPI and causes a 500 because PyPI tries to call a string operation on a list. | pypa/twine | diff --git a/tests/test_repository.py b/tests/test_repository.py
new file mode 100644
index 0000000..3b8d84b
--- /dev/null
+++ b/tests/test_repository.py
@@ -0,0 +1,49 @@
+# Copyright 2015 Ian Cordasco
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from twine import repository
+
+
+def test_gpg_signature_structure_is_preserved():
+ data = {
+ 'gpg_signature': ('filename.asc', 'filecontent'),
+ }
+
+ tuples = repository.Repository._convert_data_to_list_of_tuples(data)
+ assert tuples == [('gpg_signature', ('filename.asc', 'filecontent'))]
+
+
+def test_content_structure_is_preserved():
+ data = {
+ 'content': ('filename', 'filecontent'),
+ }
+
+ tuples = repository.Repository._convert_data_to_list_of_tuples(data)
+ assert tuples == [('content', ('filename', 'filecontent'))]
+
+
+def test_iterables_are_flattened():
+ data = {
+ 'platform': ['UNKNOWN'],
+ }
+
+ tuples = repository.Repository._convert_data_to_list_of_tuples(data)
+ assert tuples == [('platform', 'UNKNOWN')]
+
+ data = {
+ 'platform': ['UNKNOWN', 'ANOTHERPLATFORM'],
+ }
+
+ tuples = repository.Repository._convert_data_to_list_of_tuples(data)
+ assert tuples == [('platform', 'UNKNOWN'),
+ ('platform', 'ANOTHERPLATFORM')]
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pkginfo==1.12.1.2
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
requests==2.32.3
requests-toolbelt==1.0.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
-e git+https://github.com/pypa/twine.git@5c06ed2049b633fe7f0b0e27df32d2715614ab3a#egg=twine
typing_extensions==4.13.0
urllib3==2.3.0
| name: twine
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- execnet==2.1.1
- idna==3.10
- pkginfo==1.12.1.2
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- requests==2.32.3
- requests-toolbelt==1.0.0
- typing-extensions==4.13.0
- urllib3==2.3.0
prefix: /opt/conda/envs/twine
| [
"tests/test_repository.py::test_gpg_signature_structure_is_preserved",
"tests/test_repository.py::test_content_structure_is_preserved"
]
| []
| [
"tests/test_repository.py::test_iterables_are_flattened"
]
| []
| Apache License 2.0 | 259 | [
"twine/repository.py"
]
| [
"twine/repository.py"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.