instance_id
stringlengths 10
57
| base_commit
stringlengths 40
40
| created_at
stringdate 2014-04-30 14:58:36
2025-04-30 20:14:11
| environment_setup_commit
stringlengths 40
40
| hints_text
stringlengths 0
273k
| patch
stringlengths 251
7.06M
| problem_statement
stringlengths 11
52.5k
| repo
stringlengths 7
53
| test_patch
stringlengths 231
997k
| meta
dict | version
stringclasses 851
values | install_config
dict | requirements
stringlengths 93
34.2k
⌀ | environment
stringlengths 760
20.5k
⌀ | FAIL_TO_PASS
listlengths 1
9.39k
| FAIL_TO_FAIL
listlengths 0
2.69k
| PASS_TO_PASS
listlengths 0
7.87k
| PASS_TO_FAIL
listlengths 0
192
| license_name
stringclasses 55
values | __index_level_0__
int64 0
21.4k
| before_filepaths
listlengths 1
105
| after_filepaths
listlengths 1
105
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
joblib__joblib-350 | 6393c7ee293980963541384dc9573e09116f4734 | 2016-05-11 08:06:46 | 40341615cc2600675ce7457d9128fb030f6f89fa | aabadie: @lesteve, I addressed your previous comments and squashed the commits.
lesteve: Thanks a lot, merging ! | diff --git a/joblib/hashing.py b/joblib/hashing.py
index a6e5337..ced817b 100644
--- a/joblib/hashing.py
+++ b/joblib/hashing.py
@@ -189,7 +189,11 @@ class NumpyHasher(Hasher):
if isinstance(obj, self.np.ndarray) and not obj.dtype.hasobject:
# Compute a hash of the object
# The update function of the hash requires a c_contiguous buffer.
- if obj.flags.c_contiguous:
+ if obj.shape == ():
+ # 0d arrays need to be flattened because viewing them as bytes
+ # raises a ValueError exception.
+ obj_c_contiguous = obj.flatten()
+ elif obj.flags.c_contiguous:
obj_c_contiguous = obj
elif obj.flags.f_contiguous:
obj_c_contiguous = obj.T
| Regression: joblib.hash fails on 0d array
```python
import numpy as np
import joblib
arr = np.array(0)
joblib.hash(arr)
```
Works with joblib 0.9.4 and fails on master with the following error:
```
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-4-c182cb63f185> in <module>()
----> 1 joblib.hash(arr)
/home/le243287/dev/alt-scikit-learn/sklearn/externals/joblib/hashing.py in hash(obj, hash_name, coerce_mmap)
256 else:
257 hasher = Hasher(hash_name=hash_name)
--> 258 return hasher.hash(obj)
/home/le243287/dev/alt-scikit-learn/sklearn/externals/joblib/hashing.py in hash(self, obj, return_digest)
66 def hash(self, obj, return_digest=True):
67 try:
---> 68 self.dump(obj)
69 except pickle.PicklingError as e:
70 e.args += ('PicklingError while hashing %r: %r' % (obj, e),)
/volatile/le243287/miniconda3/lib/python3.5/pickle.py in dump(self, obj)
406 if self.proto >= 4:
407 self.framer.start_framing()
--> 408 self.save(obj)
409 self.write(STOP)
410 self.framer.end_framing()
/home/le243287/dev/alt-scikit-learn/sklearn/externals/joblib/hashing.py in save(self, obj)
205 # taking the memoryview.
206 self._hash.update(
--> 207 self._getbuffer(obj_c_contiguous.view(self.np.uint8)))
208
209 # We store the class, to be able to distinguish between
ValueError: new type not compatible with array.
```
Full disclosure I discovered this when running the nilearn tests using joblib master. | joblib/joblib | diff --git a/joblib/test/test_hashing.py b/joblib/test/test_hashing.py
index b197167..d36e4d8 100644
--- a/joblib/test/test_hashing.py
+++ b/joblib/test/test_hashing.py
@@ -402,6 +402,16 @@ def test_hashes_are_different_between_c_and_fortran_contiguous_arrays():
assert_not_equal(hash(arr_c), hash(arr_f))
+@with_numpy
+def test_0d_array():
+ hash(np.array(0))
+
+
+@with_numpy
+def test_0d_and_1d_array_hashing_is_different():
+ assert_not_equal(hash(np.array(0)), hash(np.array([0])))
+
+
@with_numpy
def test_hashes_stay_the_same_with_numpy_objects():
# We want to make sure that hashes don't change with joblib
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "",
"pip_packages": [
"nose",
"coverage",
"numpy>=1.6.1",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/joblib/joblib.git@6393c7ee293980963541384dc9573e09116f4734#egg=joblib
nose==1.3.7
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: joblib
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/joblib
| [
"joblib/test/test_hashing.py::test_0d_array",
"joblib/test/test_hashing.py::test_0d_and_1d_array_hashing_is_different"
]
| []
| [
"joblib/test/test_hashing.py::test_memory_setup_func",
"joblib/test/test_hashing.py::test_memory_teardown_func",
"joblib/test/test_hashing.py::test_hash_methods",
"joblib/test/test_hashing.py::test_numpy_datetime_array",
"joblib/test/test_hashing.py::test_hash_numpy_noncontiguous",
"joblib/test/test_hashing.py::test_hash_numpy_performance",
"joblib/test/test_hashing.py::test_bound_methods_hash",
"joblib/test/test_hashing.py::test_bound_cached_methods_hash",
"joblib/test/test_hashing.py::test_hash_object_dtype",
"joblib/test/test_hashing.py::test_numpy_scalar",
"joblib/test/test_hashing.py::test_dict_hash",
"joblib/test/test_hashing.py::test_set_hash",
"joblib/test/test_hashing.py::test_string",
"joblib/test/test_hashing.py::test_dtype",
"joblib/test/test_hashing.py::test_hashes_are_different_between_c_and_fortran_contiguous_arrays",
"joblib/test/test_hashing.py::test_hashing_pickling_error"
]
| []
| BSD 3-Clause "New" or "Revised" License | 531 | [
"joblib/hashing.py"
]
| [
"joblib/hashing.py"
]
|
joblib__joblib-351 | 63efbf0263922f9603f9ad9f86a2cc344a627d20 | 2016-05-11 13:56:43 | 40341615cc2600675ce7457d9128fb030f6f89fa | aabadie: I think this is in a pretty good shape now, so changing the status to MRG.
A few comments though:
* the implementation is not perfect (e.g new `_COMPRESSOR_OBJS` list, etc) so I'd like to have someone else's opinion,
* forcing the decompressor to a wrong one (exemple: use gzip compression with dump but try to uncompress with bz2) will raise an exception. Should we handle this case ? My answer on this is 'no' as the message is quite meaningful but maybe other people won't agree with this,
* online documentation and CHANGES.rst still need to be updated but this can be done after a preliminary review.
aabadie: @lesteve, I addressed what we discussed IRL. Feel free to have a look when you have time.
aabadie: @lesteve, @ogrisel , any comment ?
aabadie: Just for the record, here is the kind of things now possible with this PR: https://gist.github.com/aabadie/074587354d97d872aff6abb65510f618
lesteve: Looks like you need to rebase on master.
lesteve: > Looks like you need to rebase on master.
Other than this LGTM.
aabadie: > Looks like you need to rebase on master.
Rebased, waiting for AppVeyor.
lesteve: Can you add an entry in CHANGES.rst? Maybe we want to update the docs and add a small section saying that we can use file objects.
aabadie: > Can you add an entry in CHANGES.rst?
Sure
> Maybe we want to update the docs and add a small section saying that we can use file objects.
Good point. Something I forgot to add.
aabadie: @lesteve, I added some lines about persistence in file objects in the documentation and updated the CHANGES.rst file. Comments welcome :wink:
lesteve: Can you rebase on master so we quickly double-check that flake8_diff.sh works fine on your PR?
aabadie: > Can you rebase on master so we quickly double-check that flake8_diff.sh works fine on your PR?
Travis is broken because of the new code in the doc. I'm on it.
aabadie: I had issues with doctest and the usage of file object in a context manager. I skipped the lines in the doc but that may not be a solution. Any better idea ?
Otherwise, I rebased on the lastest master (including the flake8_diff.sh recent changes) and it works just fine.
lesteve: > I had issues with doctest and the usage of file object in a context manager. I skipped the lines in the doc but that may not be a solution. Any better idea ?
What was the error? It would be great to understand where it comes from and fix it properly ...
> Otherwise, I rebased on the lastest master (including the flake8_diff.sh recent changes) and it works just fine.
Great, thanks a lot!
aabadie: > What was the error? It would be great to understand where it comes from and fix it properly ...
There were 2 things:
* `with open() as fo` returns the file object ([<io.blabla>]) : I fixed it using an ellipsis flags and by adding `[<...>]` after the `with` block
* in python 2.6, using gzip.GzipFile or bz2.BZ2File doesn't work if you don't use `contextlib.closing`. So I had to skip the file object compression example.
lesteve: > with open() as fo returns the file object ([]) : I fixed it using an ellipsis flags and by adding [<...>] after the with block
Hmmm I am not sure what we should do in this case:
```py
with gzip.GzipFile('/tmp/test.gz', 'wb') as f:
res = joblib.dump({'a': [1, 2, 3], 'b': 'asadf'}, f)
```
Should `res` be:
* a single element list containing containing `f`
* a single element list containing containing f.name to be more consistent with when dump is called with a filename
* because this is a new feature, can we do what we want, in particular returning a list may not make sense, since the list was there for historical reasons (main pickle + companion files for each numpy array)
> in python 2.6, using gzip.GzipFile or bz2.BZ2File doesn't work if you don't use contextlib.closing. So I had to skip the file object compression example.
Pfff :sob: is there a way we can just skip doctests in Python 2.6?
lesteve: > Pfff :sob: is there a way we can just skip doctests in Python 2.6?
Seems like scikit-learn has some "*fixture*" files in doc to skip the doctests. You can probably have a look there. I believe there is something in setup.cfg as well to work with nose and doctest-fixtures.
aabadie: > Should res be:
> * a single element list containing containing f
> * a single element list containing containing f.name to be more consistent with when dump is called > with a filename
> * because this is a new feature, can we do what we want, in particular returning a list may not make > sense, since the list was there for historical reasons (main pickle + companion files for each numpy array)
I think returning nothing in case a file object is given as input is reasonable because this means the user knows in advance the destination of the dump and the returned list is here, as you said, for historical reasons.
aabadie: > Seems like scikit-learn has some *fixture* files in the doc folder to skip the doctests
Nice, it works like a charm. The `setup.cfg` file in joblib was already correctly configured. | diff --git a/CHANGES.rst b/CHANGES.rst
index 082f60c..eed5f3d 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -4,6 +4,11 @@ Latest changes
Release 0.10.0
--------------
+Alexandre Abadie
+
+ ENH: joblib.dump/load now accept file-like objects besides filenames.
+ https://github.com/joblib/joblib/pull/351 for more details.
+
Niels Zeilemaker and Olivier Grisel
Refactored joblib.Parallel to enable the registration of custom
diff --git a/benchmarks/bench_auto_batching.py b/benchmarks/bench_auto_batching.py
index 6046099..d94bc95 100644
--- a/benchmarks/bench_auto_batching.py
+++ b/benchmarks/bench_auto_batching.py
@@ -53,11 +53,10 @@ def bench_short_tasks(task_times, n_jobs=2, batch_size="auto",
p(delayed(sleep_noop)(max(t, 0), input_data, output_data_size)
for t in task_times)
duration = time.time() - t0
- effective_batch_size = getattr(p, '_effective_batch_size',
+ effective_batch_size = getattr(p._backend, '_effective_batch_size',
p.batch_size)
-
- print('Completed %d tasks in %0.3fs, final batch_size=%d\n'
- % (len(task_times), duration, effective_batch_size))
+ print('Completed {} tasks in {:3f}s, final batch_size={}\n'.format(
+ len(task_times), duration, effective_batch_size))
return duration, effective_batch_size
diff --git a/doc/persistence.rst b/doc/persistence.rst
index d261795..f040428 100644
--- a/doc/persistence.rst
+++ b/doc/persistence.rst
@@ -46,6 +46,18 @@ We can then load the object from the file::
[('a', [1, 2, 3]), ('b', array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))]
+Persistence in file objects
+===========================
+
+Instead of filenames, `dump` and `load` functions also accept file objects:
+
+ >>> with open(filename, 'wb') as fo: # doctest: +ELLIPSIS
+ ... joblib.dump(to_persist, fo)
+ >>> with open(filename, 'rb') as fo: # doctest: +ELLIPSIS
+ ... joblib.load(fo)
+ [('a', [1, 2, 3]), ('b', array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))]
+
+
Compressed joblib pickles
=========================
@@ -79,6 +91,15 @@ are 'gzip', 'bz2', 'lzma' and 'xz':
Lzma and Xz compression methods are only available for python versions >= 3.3.
+Compressor files provided by the python standard library can also be used to
+compress pickle, e.g ``gzip.GzipFile``, ``bz2.BZ2File``, ``lzma.LZMAFile``:
+ >>> # Dumping in a gzip.GzipFile object using a compression level of 3.
+ >>> import gzip
+ >>> with gzip.GzipFile(filename + '.gz', 'wb', compresslevel=3) as fo: # doctest: +ELLIPSIS
+ ... joblib.dump(to_persist, fo)
+ >>> with gzip.GzipFile(filename + '.gz', 'rb') as fo: # doctest: +ELLIPSIS
+ ... joblib.load(fo)
+ [('a', [1, 2, 3]), ('b', array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))]
More details can be found in the :func:`joblib.dump` and
:func:`joblib.load` documentation.
diff --git a/doc/persistence_fixture.py b/doc/persistence_fixture.py
new file mode 100644
index 0000000..503d0bc
--- /dev/null
+++ b/doc/persistence_fixture.py
@@ -0,0 +1,13 @@
+"""Fixture module to skip the persistence doctest with python 2.6."""
+
+from nose import SkipTest
+from joblib import _compat
+
+
+def setup_module(module):
+ """Setup module."""
+ if _compat.PY26:
+ # gzip.GZipFile and bz2.BZ2File compressor classes cannot be used
+ # within a context manager (e.g in a `with` block) in python 2.6 so
+ # we skip doctesting of persistence documentation in this case.
+ raise SkipTest("Skipping persistence doctest in Python 2.6")
diff --git a/joblib/numpy_pickle.py b/joblib/numpy_pickle.py
index f029582..0cf88a2 100644
--- a/joblib/numpy_pickle.py
+++ b/joblib/numpy_pickle.py
@@ -397,6 +397,9 @@ def dump(value, filename, compress=0, protocol=None, cache_size=None):
if Path is not None and isinstance(filename, Path):
filename = str(filename)
+ is_filename = isinstance(filename, _basestring)
+ is_fileobj = hasattr(filename, "write")
+
compress_method = 'zlib' # zlib is the default compression method.
if compress is True:
# By default, if compress is enabled, we want to be using 3 by default
@@ -424,14 +427,16 @@ def dump(value, filename, compress=0, protocol=None, cache_size=None):
'Non valid compression method given: "{0}". Possible values are '
'{1}.'.format(compress_method, _COMPRESSORS))
- if not isinstance(filename, _basestring):
+ if not is_filename and not is_fileobj:
# People keep inverting arguments, and the resulting error is
# incomprehensible
raise ValueError(
- 'Second argument should be a filename, %s (type %s) was given'
+ 'Second argument should be a filename or a file-like object, '
+ '%s (type %s) was given.'
% (filename, type(filename))
)
- elif not isinstance(compress, tuple):
+
+ if is_filename and not isinstance(compress, tuple):
# In case no explicit compression was requested using both compression
# method and level in a tuple and the filename has an explicit
# extension, we select the corresponding compressor.
@@ -473,14 +478,54 @@ def dump(value, filename, compress=0, protocol=None, cache_size=None):
with _write_fileobject(filename, compress=(compress_method,
compress_level)) as f:
NumpyPickler(f, protocol=protocol).dump(value)
-
- else:
+ elif is_filename:
with open(filename, 'wb') as f:
NumpyPickler(f, protocol=protocol).dump(value)
+ else:
+ NumpyPickler(filename, protocol=protocol).dump(value)
+
+ # If the target container is a file object, nothing is returned.
+ if is_fileobj:
+ return
+ # For compatibility, the list of created filenames (e.g with one element
+ # after 0.10.0) is returned by default.
return [filename]
+def _unpickle(fobj, filename="", mmap_mode=None):
+ """Internal unpickling function."""
+ # We are careful to open the file handle early and keep it open to
+ # avoid race-conditions on renames.
+ # That said, if data is stored in companion files, which can be
+ # the case with the old persistence format, moving the directory
+ # will create a race when joblib tries to access the companion
+ # files.
+ unpickler = NumpyUnpickler(filename, fobj, mmap_mode=mmap_mode)
+ obj = None
+ try:
+ obj = unpickler.load()
+ if unpickler.compat_mode:
+ warnings.warn("The file '%s' has been generated with a "
+ "joblib version less than 0.10. "
+ "Please regenerate this pickle file."
+ % filename,
+ DeprecationWarning, stacklevel=3)
+ except UnicodeDecodeError as exc:
+ # More user-friendly error message
+ if PY3_OR_LATER:
+ new_exc = ValueError(
+ 'You may be trying to read with '
+ 'python 3 a joblib pickle generated with python 2. '
+ 'This feature is not supported by joblib.')
+ new_exc.__cause__ = exc
+ raise new_exc
+ # Reraise exception with Python 2
+ raise
+
+ return obj
+
+
def load(filename, mmap_mode=None):
"""Reconstruct a Python object from a file persisted with joblib.dump.
@@ -514,37 +559,19 @@ def load(filename, mmap_mode=None):
"""
if Path is not None and isinstance(filename, Path):
filename = str(filename)
- with open(filename, 'rb') as f:
- with _read_fileobject(f, filename, mmap_mode) as f:
- if isinstance(f, _basestring):
- # if the returned file object is a string, this means we try
- # to load a pickle file generated with an version of Joblib
- # so we load it with joblib compatibility function.
- return load_compatibility(f)
-
- # We are careful to open the file handle early and keep it open to
- # avoid race-conditions on renames.
- # That said, if data is stored in companion files, which can be
- # the case with the old persistence format, moving the directory
- # will create a race when joblib tries to access the companion
- # files.
- unpickler = NumpyUnpickler(filename, f, mmap_mode=mmap_mode)
- try:
- obj = unpickler.load()
- if unpickler.compat_mode:
- warnings.warn("The file '%s' has been generated with a "
- "joblib version less than 0.10. "
- "Please regenerate this pickle file."
- % filename,
- DeprecationWarning, stacklevel=2)
- except UnicodeDecodeError as exc:
- # More user-friendly error message
- if PY3_OR_LATER:
- new_exc = ValueError(
- 'You may be trying to read with '
- 'python 3 a joblib pickle generated with python 2. '
- 'This feature is not supported by joblib.')
- new_exc.__cause__ = exc
- raise new_exc
+
+ if hasattr(filename, "read") and hasattr(filename, "seek"):
+ with _read_fileobject(filename, "", mmap_mode) as fobj:
+ obj = _unpickle(fobj)
+ else:
+ with open(filename, 'rb') as f:
+ with _read_fileobject(f, filename, mmap_mode) as fobj:
+ if isinstance(fobj, _basestring):
+ # if the returned file object is a string, this means we
+ # try to load a pickle file generated with an version of
+ # Joblib so we load it with joblib compatibility function.
+ return load_compatibility(fobj)
+
+ obj = _unpickle(fobj, filename, mmap_mode)
return obj
diff --git a/joblib/numpy_pickle_utils.py b/joblib/numpy_pickle_utils.py
index 1755b28..ee879a6 100644
--- a/joblib/numpy_pickle_utils.py
+++ b/joblib/numpy_pickle_utils.py
@@ -8,6 +8,7 @@ import pickle
import sys
import io
import zlib
+import gzip
import bz2
import warnings
import contextlib
@@ -49,6 +50,9 @@ _LZMA_PREFIX = b'\x5d\x00'
# Supported compressors
_COMPRESSORS = ('zlib', 'bz2', 'lzma', 'xz', 'gzip')
+_COMPRESSOR_CLASSES = [gzip.GzipFile, bz2.BZ2File]
+if lzma is not None:
+ _COMPRESSOR_CLASSES.append(lzma.LZMAFile)
# The max magic number length of supported compression file types.
_MAX_PREFIX_LEN = max(len(prefix)
@@ -147,20 +151,42 @@ def _read_fileobject(fileobj, filename, mmap_mode=None):
"""
# Detect if the fileobj contains compressed data.
compressor = _detect_compressor(fileobj)
+ if isinstance(fileobj, tuple(_COMPRESSOR_CLASSES)):
+ compressor = fileobj.__class__.__name__
if compressor == 'compat':
+ # Compatibility with old pickle mode: simply return the input
+ # filename "as-is" and let the compatibility function be called by the
+ # caller.
warnings.warn("The file '%s' has been generated with a joblib "
"version less than 0.10. "
"Please regenerate this pickle file." % filename,
DeprecationWarning, stacklevel=2)
yield filename
else:
- if compressor in _COMPRESSORS and mmap_mode is not None:
+ # Checking if incompatible load parameters with the type of file:
+ # mmap_mode cannot be used with compressed file or in memory buffers
+ # such as io.BytesIO.
+ if ((compressor in _COMPRESSORS or
+ isinstance(fileobj, tuple(_COMPRESSOR_CLASSES))) and
+ mmap_mode is not None):
warnings.warn('File "%(filename)s" is compressed using '
'"%(compressor)s" which is not compatible with '
- 'mmap_mode "%(mmap_mode)s" flag passed.'
- % locals(), DeprecationWarning, stacklevel=2)
-
- if compressor == 'zlib':
+ 'mmap_mode "%(mmap_mode)s" flag passed. mmap_mode '
+ 'option will be ignored.'
+ % locals(), stacklevel=2)
+ if isinstance(fileobj, io.BytesIO) and mmap_mode is not None:
+ warnings.warn('In memory persistence is not compatible with '
+ 'mmap_mode "%(mmap_mode)s" flag passed. mmap_mode '
+ 'option will be ignored.'
+ % locals(), stacklevel=2)
+
+ # if the passed fileobj is in the supported list of decompressor
+ # objects (GzipFile, BZ2File, LzmaFile), we simply return it.
+ if isinstance(fileobj, tuple(_COMPRESSOR_CLASSES)):
+ yield fileobj
+ # otherwise, based on the compressor detected in the file, we open the
+ # correct decompressor file object, wrapped in a buffer.
+ elif compressor == 'zlib':
yield _buffered_read_file(BinaryZlibFile(fileobj, 'rb'))
elif compressor == 'gzip':
yield _buffered_read_file(BinaryGzipFile(fileobj, 'rb'))
@@ -180,6 +206,7 @@ def _read_fileobject(fileobj, filename, mmap_mode=None):
"python ({0}.{1})"
.format(sys.version_info[0],
sys.version_info[1]))
+ # No compression detected => returning the input file object (open)
else:
yield fileobj
| In-memory caching
Sometimes reloading custom objects from the cache can be expensive (but still much faster than recomputing the whole object from scratch), in which case a two-level cache can be useful: results could be first kept in memory, and, after some timeout (LRU/etc.), pushed to the hard drive for longer-time persistence.
Is this something that joblib's architecture may easily allow? | joblib/joblib | diff --git a/joblib/test/test_numpy_pickle.py b/joblib/test/test_numpy_pickle.py
index 19a5e95..f3708fb 100644
--- a/joblib/test/test_numpy_pickle.py
+++ b/joblib/test/test_numpy_pickle.py
@@ -13,6 +13,7 @@ import warnings
import nose
import gzip
import zlib
+import bz2
import pickle
from contextlib import closing
@@ -25,7 +26,7 @@ from joblib.testing import assert_raises_regex
from joblib import numpy_pickle
from joblib.test import data
-from joblib._compat import PY3_OR_LATER
+from joblib._compat import PY3_OR_LATER, PY26
from joblib.numpy_pickle_utils import _IO_BUFFER_SIZE, BinaryZlibFile
from joblib.numpy_pickle_utils import _detect_compressor, _COMPRESSORS
@@ -334,14 +335,15 @@ def test_compress_mmap_mode_warning():
numpy_pickle.load(this_filename, mmap_mode='r+')
nose.tools.assert_equal(len(caught_warnings), 1)
for warn in caught_warnings:
- nose.tools.assert_equal(warn.category, DeprecationWarning)
+ nose.tools.assert_equal(warn.category, UserWarning)
nose.tools.assert_equal(warn.message.args[0],
'File "%(filename)s" is compressed using '
'"%(compressor)s" which is not compatible '
'with mmap_mode "%(mmap_mode)s" flag '
- 'passed.' % {'filename': this_filename,
- 'mmap_mode': 'r+',
- 'compressor': 'zlib'})
+ 'passed. mmap_mode option will be '
+ 'ignored.' % {'filename': this_filename,
+ 'mmap_mode': 'r+',
+ 'compressor': 'zlib'})
@with_numpy
@@ -681,6 +683,120 @@ def test_compression_using_file_extension():
os.remove(dump_fname)
+@with_numpy
+def test_file_handle_persistence():
+ objs = [np.random.random((10, 10)),
+ "some data",
+ np.matrix([0, 1, 2])]
+ fobjs = [open]
+ if not PY26:
+ fobjs += [bz2.BZ2File, gzip.GzipFile]
+ if PY3_OR_LATER:
+ import lzma
+ fobjs += [lzma.LZMAFile]
+ filename = env['filename'] + str(random.randint(0, 1000))
+
+ for obj in objs:
+ for fobj in fobjs:
+ with fobj(filename, 'wb') as f:
+ numpy_pickle.dump(obj, f)
+
+ # using the same decompressor prevents from internally
+ # decompress again.
+ with fobj(filename, 'rb') as f:
+ obj_reloaded = numpy_pickle.load(f)
+
+ # when needed, the correct decompressor should be used when
+ # passing a raw file handle.
+ with open(filename, 'rb') as f:
+ obj_reloaded_2 = numpy_pickle.load(f)
+
+ if isinstance(obj, np.ndarray):
+ np.testing.assert_array_equal(obj_reloaded, obj)
+ np.testing.assert_array_equal(obj_reloaded_2, obj)
+ else:
+ nose.tools.assert_equal(obj_reloaded, obj)
+ nose.tools.assert_equal(obj_reloaded_2, obj)
+
+ os.remove(filename)
+
+
+@with_numpy
+def test_in_memory_persistence():
+ objs = [np.random.random((10, 10)),
+ "some data",
+ np.matrix([0, 1, 2])]
+ for obj in objs:
+ f = io.BytesIO()
+ numpy_pickle.dump(obj, f)
+ obj_reloaded = numpy_pickle.load(f)
+ if isinstance(obj, np.ndarray):
+ np.testing.assert_array_equal(obj_reloaded, obj)
+ else:
+ nose.tools.assert_equal(obj_reloaded, obj)
+
+
+@with_numpy
+def test_file_handle_persistence_mmap():
+ obj = np.random.random((10, 10))
+ filename = env['filename'] + str(random.randint(0, 1000))
+
+ with open(filename, 'wb') as f:
+ numpy_pickle.dump(obj, f)
+
+ with open(filename, 'rb') as f:
+ obj_reloaded = numpy_pickle.load(f, mmap_mode='r+')
+
+ np.testing.assert_array_equal(obj_reloaded, obj)
+
+
+@with_numpy
+def test_file_handle_persistence_compressed_mmap():
+ obj = np.random.random((10, 10))
+ filename = env['filename'] + str(random.randint(0, 1000))
+
+ with open(filename, 'wb') as f:
+ numpy_pickle.dump(obj, f, compress=('gzip', 3))
+
+ with closing(gzip.GzipFile(filename, 'rb')) as f:
+ with warnings.catch_warnings(record=True) as caught_warnings:
+ warnings.simplefilter("always")
+ numpy_pickle.load(f, mmap_mode='r+')
+ nose.tools.assert_equal(len(caught_warnings), 1)
+ for warn in caught_warnings:
+ nose.tools.assert_equal(warn.category, UserWarning)
+ nose.tools.assert_equal(warn.message.args[0],
+ 'File "%(filename)s" is compressed '
+ 'using "%(compressor)s" which is not '
+ 'compatible with mmap_mode '
+ '"%(mmap_mode)s" flag '
+ 'passed. mmap_mode option will be '
+ 'ignored.' %
+ {'filename': "",
+ 'mmap_mode': 'r+',
+ 'compressor': 'GzipFile'})
+
+
+@with_numpy
+def test_file_handle_persistence_in_memory_mmap():
+ obj = np.random.random((10, 10))
+ buf = io.BytesIO()
+
+ numpy_pickle.dump(obj, buf)
+
+ with warnings.catch_warnings(record=True) as caught_warnings:
+ warnings.simplefilter("always")
+ numpy_pickle.load(buf, mmap_mode='r+')
+ nose.tools.assert_equal(len(caught_warnings), 1)
+ for warn in caught_warnings:
+ nose.tools.assert_equal(warn.category, UserWarning)
+ nose.tools.assert_equal(warn.message.args[0],
+ 'In memory persistence is not compatible '
+ 'with mmap_mode "%(mmap_mode)s" '
+ 'flag passed. mmap_mode option will be '
+ 'ignored.' % {'mmap_mode': 'r+'})
+
+
def test_binary_zlibfile():
filename = env['filename'] + str(random.randint(0, 1000))
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 5
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "",
"pip_packages": [
"nose",
"coverage",
"numpy>=1.6.1",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/joblib/joblib.git@63efbf0263922f9603f9ad9f86a2cc344a627d20#egg=joblib
nose==1.3.7
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: joblib
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/joblib
| [
"joblib/test/test_numpy_pickle.py::test_compress_mmap_mode_warning",
"joblib/test/test_numpy_pickle.py::test_file_handle_persistence",
"joblib/test/test_numpy_pickle.py::test_in_memory_persistence",
"joblib/test/test_numpy_pickle.py::test_file_handle_persistence_mmap",
"joblib/test/test_numpy_pickle.py::test_file_handle_persistence_compressed_mmap",
"joblib/test/test_numpy_pickle.py::test_file_handle_persistence_in_memory_mmap"
]
| [
"joblib/test/test_numpy_pickle.py::test_cache_size_warning",
"joblib/test/test_numpy_pickle.py::test_joblib_pickle_across_python_versions"
]
| [
"joblib/test/test_numpy_pickle.py::test_value_error",
"joblib/test/test_numpy_pickle.py::test_compress_level_error",
"joblib/test/test_numpy_pickle.py::test_numpy_persistence",
"joblib/test/test_numpy_pickle.py::test_numpy_persistence_bufferred_array_compression",
"joblib/test/test_numpy_pickle.py::test_memmap_persistence",
"joblib/test/test_numpy_pickle.py::test_memmap_persistence_mixed_dtypes",
"joblib/test/test_numpy_pickle.py::test_masked_array_persistence",
"joblib/test/test_numpy_pickle.py::test_compressed_pickle_dump_and_load",
"joblib/test/test_numpy_pickle.py::test_compress_tuple_argument",
"joblib/test/test_numpy_pickle.py::test_joblib_compression_formats",
"joblib/test/test_numpy_pickle.py::test_load_externally_decompressed_files",
"joblib/test/test_numpy_pickle.py::test_compression_using_file_extension",
"joblib/test/test_numpy_pickle.py::test_binary_zlibfile",
"joblib/test/test_numpy_pickle.py::test_numpy_subclass",
"joblib/test/test_numpy_pickle.py::test_pathlib",
"joblib/test/test_numpy_pickle.py::test_non_contiguous_array_pickling",
"joblib/test/test_numpy_pickle.py::test_pickle_highest_protocol"
]
| []
| BSD 3-Clause "New" or "Revised" License | 532 | [
"joblib/numpy_pickle_utils.py",
"doc/persistence_fixture.py",
"benchmarks/bench_auto_batching.py",
"CHANGES.rst",
"joblib/numpy_pickle.py",
"doc/persistence.rst"
]
| [
"joblib/numpy_pickle_utils.py",
"doc/persistence_fixture.py",
"benchmarks/bench_auto_batching.py",
"CHANGES.rst",
"joblib/numpy_pickle.py",
"doc/persistence.rst"
]
|
dask__dask-1152 | 472ce70b9c19a075764f7614e884b2afc14c44cb | 2016-05-11 14:35:49 | 8f8435a08bd54f0763f75ce61ec55adea9a2c08b | diff --git a/dask/array/core.py b/dask/array/core.py
index a82f73979..2cf8f9573 100644
--- a/dask/array/core.py
+++ b/dask/array/core.py
@@ -2062,7 +2062,7 @@ def asarray(array):
"""
if not isinstance(array, Array):
name = 'asarray-' + tokenize(array)
- if not hasattr(array, 'shape'):
+ if isinstance(getattr(array, 'shape', None), Iterable):
array = np.asarray(array)
array = from_array(array, chunks=array.shape, name=name)
return array
@@ -2103,7 +2103,7 @@ def is_scalar_for_elemwise(arg):
True
"""
return (np.isscalar(arg)
- or not hasattr(arg, 'shape')
+ or not isinstance(getattr(arg, 'shape', None), Iterable)
or isinstance(arg, np.dtype)
or (isinstance(arg, np.ndarray) and arg.ndim == 0))
@@ -2156,6 +2156,7 @@ def elemwise(op, *args, **kwargs):
(op.__name__, str(sorted(set(kwargs) - set(['name', 'dtype'])))))
shapes = [getattr(arg, 'shape', ()) for arg in args]
+ shapes = [s if isinstance(s, Iterable) else () for s in shapes]
out_ndim = len(broadcast_shapes(*shapes)) # Raises ValueError if dimensions mismatch
expr_inds = tuple(range(out_ndim))[::-1]
| Array astype breaks when given a type instead of a dtype object
The code below fails in dask 0.9.0, numpy 1.11.0, toolz 0.7.4, Python 2.7:
```py
import numpy as np
import dask.array as da
a = np.arange(5).astype(np.int32)
d = da.from_array(a, (1,))
print(d.dtype)
e = d.astype(np.int16)
print(e.dtype)
print(e.compute())
```
with the exception
```
Traceback (most recent call last):
File "./dask-astype.py", line 8, in <module>
e = d.astype(np.int16)
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/dask/array/core.py", line 1091, in astype
return elemwise(_astype, self, dtype, name=name)
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/dask/array/core.py", line 2159, in elemwise
out_ndim = len(broadcast_shapes(*shapes)) # Raises ValueError if dimensions mismatch
File "/home/bmerry/work/sdp/env/local/lib/python2.7/site-packages/dask/array/core.py", line 2131, in broadcast_shapes
for sizes in zip_longest(*map(reversed, shapes), fillvalue=1):
TypeError: type object argument after * must be a sequence, not itertools.imap
```
If one passes `np.dtype(np.int16)` instead of just `np.int16`, then it works. The problem seems to be that the `np.int16.shape` attribute exists, but isn't a sequence. Possibly this can be fixed by adding `dtype = np.dtype(dtype) to the start of `Array.astype`. | dask/dask | diff --git a/dask/array/tests/test_array_core.py b/dask/array/tests/test_array_core.py
index 555ca3bfc..989a91ad9 100644
--- a/dask/array/tests/test_array_core.py
+++ b/dask/array/tests/test_array_core.py
@@ -2010,3 +2010,9 @@ def test_copy():
def test_npartitions():
assert da.ones(5, chunks=(2,)).npartitions == 3
assert da.ones((5, 5), chunks=(2, 3)).npartitions == 6
+
+
+def test_astype_gh1151():
+ a = np.arange(5).astype(np.int32)
+ b = da.from_array(a, (1,))
+ assert_eq(a.astype(np.int16), b.astype(np.int16))
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.10 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[complete]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "numpy>=1.16.0 pandas>=1.0.0 cloudpickle partd distributed s3fs toolz psutil pytables bokeh bcolz scipy h5py ipython",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y graphviz liblzma-dev"
],
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiobotocore @ file:///opt/conda/conda-bld/aiobotocore_1643638228694/work
aiohttp @ file:///tmp/build/80754af9/aiohttp_1632748060317/work
aioitertools @ file:///tmp/build/80754af9/aioitertools_1607109665762/work
async-timeout==3.0.1
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
backcall @ file:///home/ktietz/src/ci/backcall_1611930011877/work
bcolz==1.2.1
bokeh @ file:///tmp/build/80754af9/bokeh_1620710048147/work
boto3==1.23.10
botocore==1.26.10
brotlipy==0.7.0
certifi==2021.5.30
cffi @ file:///tmp/build/80754af9/cffi_1625814693874/work
chardet @ file:///tmp/build/80754af9/chardet_1607706739153/work
click==8.0.3
cloudpickle @ file:///tmp/build/80754af9/cloudpickle_1632508026186/work
contextvars==2.4
cryptography @ file:///tmp/build/80754af9/cryptography_1635366128178/work
cytoolz==0.11.0
-e git+https://github.com/dask/dask.git@472ce70b9c19a075764f7614e884b2afc14c44cb#egg=dask
decorator @ file:///opt/conda/conda-bld/decorator_1643638310831/work
distributed==1.10.2
fsspec @ file:///opt/conda/conda-bld/fsspec_1642510437511/work
h5py==2.10.0
HeapDict @ file:///Users/ktietz/demo/mc3/conda-bld/heapdict_1630598515714/work
idna @ file:///tmp/build/80754af9/idna_1637925883363/work
idna-ssl @ file:///tmp/build/80754af9/idna_ssl_1611752490495/work
immutables @ file:///tmp/build/80754af9/immutables_1628888996840/work
importlib-metadata==4.8.3
iniconfig==1.1.1
ipython @ file:///tmp/build/80754af9/ipython_1593447367857/work
ipython-genutils @ file:///tmp/build/80754af9/ipython_genutils_1606773439826/work
jedi @ file:///tmp/build/80754af9/jedi_1606932572482/work
Jinja2 @ file:///opt/conda/conda-bld/jinja2_1647436528585/work
jmespath @ file:///Users/ktietz/demo/mc3/conda-bld/jmespath_1630583964805/work
locket==0.2.1
MarkupSafe @ file:///tmp/build/80754af9/markupsafe_1621528150516/work
mock @ file:///tmp/build/80754af9/mock_1607622725907/work
msgpack @ file:///tmp/build/80754af9/msgpack-python_1612287171716/work
msgpack-python==0.5.6
multidict @ file:///tmp/build/80754af9/multidict_1607367768400/work
numexpr @ file:///tmp/build/80754af9/numexpr_1618853194344/work
numpy @ file:///tmp/build/80754af9/numpy_and_numpy_base_1603483703303/work
olefile @ file:///Users/ktietz/demo/mc3/conda-bld/olefile_1629805411829/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pandas==1.1.5
parso==0.7.0
partd @ file:///opt/conda/conda-bld/partd_1647245470509/work
pexpect @ file:///tmp/build/80754af9/pexpect_1605563209008/work
pickleshare @ file:///tmp/build/80754af9/pickleshare_1606932040724/work
Pillow @ file:///tmp/build/80754af9/pillow_1625670622947/work
pluggy==1.0.0
prompt-toolkit @ file:///tmp/build/80754af9/prompt-toolkit_1633440160888/work
psutil @ file:///tmp/build/80754af9/psutil_1612297621795/work
ptyprocess @ file:///tmp/build/80754af9/ptyprocess_1609355006118/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl
py==1.11.0
pycparser @ file:///tmp/build/80754af9/pycparser_1636541352034/work
Pygments @ file:///opt/conda/conda-bld/pygments_1644249106324/work
pyOpenSSL @ file:///opt/conda/conda-bld/pyopenssl_1643788558760/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
PySocks @ file:///tmp/build/80754af9/pysocks_1605305763431/work
pytest==7.0.1
python-dateutil @ file:///tmp/build/80754af9/python-dateutil_1626374649649/work
pytz==2021.3
PyYAML==5.4.1
s3fs==0.4.2
s3transfer==0.5.2
scipy @ file:///tmp/build/80754af9/scipy_1597686635649/work
six @ file:///tmp/build/80754af9/six_1644875935023/work
sortedcontainers @ file:///tmp/build/80754af9/sortedcontainers_1623949099177/work
tables==3.6.1
tblib @ file:///Users/ktietz/demo/mc3/conda-bld/tblib_1629402031467/work
tomli==1.2.3
toolz @ file:///tmp/build/80754af9/toolz_1636545406491/work
tornado @ file:///tmp/build/80754af9/tornado_1606942266872/work
traitlets @ file:///tmp/build/80754af9/traitlets_1632746497744/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3 @ file:///opt/conda/conda-bld/urllib3_1643638302206/work
wcwidth @ file:///Users/ktietz/demo/mc3/conda-bld/wcwidth_1629357192024/work
wrapt==1.12.1
yarl @ file:///tmp/build/80754af9/yarl_1606939915466/work
zict==2.0.0
zipp==3.6.0
| name: dask
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- aiobotocore=2.1.0=pyhd3eb1b0_0
- aiohttp=3.7.4.post0=py36h7f8727e_2
- aioitertools=0.7.1=pyhd3eb1b0_0
- async-timeout=3.0.1=py36h06a4308_0
- attrs=21.4.0=pyhd3eb1b0_0
- backcall=0.2.0=pyhd3eb1b0_0
- bcolz=1.2.1=py36h04863e7_0
- blas=1.0=openblas
- blosc=1.21.3=h6a678d5_0
- bokeh=2.3.2=py36h06a4308_0
- brotlipy=0.7.0=py36h27cfd23_1003
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- cffi=1.14.6=py36h400218f_0
- chardet=4.0.0=py36h06a4308_1003
- click=8.0.3=pyhd3eb1b0_0
- cloudpickle=2.0.0=pyhd3eb1b0_0
- contextvars=2.4=py_0
- cryptography=35.0.0=py36hd23ed53_0
- cytoolz=0.11.0=py36h7b6447c_0
- decorator=5.1.1=pyhd3eb1b0_0
- freetype=2.12.1=h4a9f257_0
- fsspec=2022.1.0=pyhd3eb1b0_0
- giflib=5.2.2=h5eee18b_0
- h5py=2.10.0=py36h7918eee_0
- hdf5=1.10.4=hb1b8bf9_0
- heapdict=1.0.1=pyhd3eb1b0_0
- idna=3.3=pyhd3eb1b0_0
- idna_ssl=1.1.0=py36h06a4308_0
- immutables=0.16=py36h7f8727e_0
- ipython=7.16.1=py36h5ca1d4c_0
- ipython_genutils=0.2.0=pyhd3eb1b0_1
- jedi=0.17.2=py36h06a4308_1
- jinja2=3.0.3=pyhd3eb1b0_0
- jmespath=0.10.0=pyhd3eb1b0_0
- jpeg=9e=h5eee18b_3
- lcms2=2.16=hb9589c4_0
- ld_impl_linux-64=2.40=h12ee557_0
- lerc=4.0.0=h6a678d5_0
- libdeflate=1.22=h5eee18b_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgfortran-ng=7.5.0=ha8ba4b0_17
- libgfortran4=7.5.0=ha8ba4b0_17
- libgomp=11.2.0=h1234567_1
- libopenblas=0.3.18=hf726d26_0
- libpng=1.6.39=h5eee18b_0
- libstdcxx-ng=11.2.0=h1234567_1
- libtiff=4.5.1=hffd6297_1
- libwebp=1.2.4=h11a3e52_1
- libwebp-base=1.2.4=h5eee18b_1
- locket=0.2.1=py36h06a4308_1
- lz4-c=1.9.4=h6a678d5_1
- lzo=2.10=h7b6447c_2
- markupsafe=2.0.1=py36h27cfd23_0
- mock=4.0.3=pyhd3eb1b0_0
- multidict=5.1.0=py36h27cfd23_2
- ncurses=6.4=h6a678d5_0
- numexpr=2.7.3=py36h4be448d_1
- numpy=1.19.2=py36h6163131_0
- numpy-base=1.19.2=py36h75fe3a5_0
- olefile=0.46=pyhd3eb1b0_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pandas=1.1.5=py36ha9443f7_0
- parso=0.7.0=py_0
- partd=1.2.0=pyhd3eb1b0_1
- pexpect=4.8.0=pyhd3eb1b0_3
- pickleshare=0.7.5=pyhd3eb1b0_1003
- pillow=8.3.1=py36h5aabda8_0
- pip=21.2.2=py36h06a4308_0
- prompt-toolkit=3.0.20=pyhd3eb1b0_0
- psutil=5.8.0=py36h27cfd23_1
- ptyprocess=0.7.0=pyhd3eb1b0_2
- pycparser=2.21=pyhd3eb1b0_0
- pygments=2.11.2=pyhd3eb1b0_0
- pyopenssl=22.0.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pysocks=1.7.1=py36h06a4308_0
- pytables=3.6.1=py36h71ec239_0
- python=3.6.13=h12debd9_1
- python-dateutil=2.8.2=pyhd3eb1b0_0
- pytz=2021.3=pyhd3eb1b0_0
- pyyaml=5.4.1=py36h27cfd23_1
- readline=8.2=h5eee18b_0
- scipy=1.5.2=py36habc2bb6_0
- setuptools=58.0.4=py36h06a4308_0
- six=1.16.0=pyhd3eb1b0_1
- sortedcontainers=2.4.0=pyhd3eb1b0_0
- sqlite=3.45.3=h5eee18b_0
- tblib=1.7.0=pyhd3eb1b0_0
- tk=8.6.14=h39e8969_0
- toolz=0.11.2=pyhd3eb1b0_0
- tornado=6.1=py36h27cfd23_0
- traitlets=4.3.3=py36h06a4308_0
- typing-extensions=4.1.1=hd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- urllib3=1.26.8=pyhd3eb1b0_0
- wcwidth=0.2.5=pyhd3eb1b0_0
- wheel=0.37.1=pyhd3eb1b0_0
- wrapt=1.12.1=py36h7b6447c_1
- xz=5.6.4=h5eee18b_1
- yaml=0.2.5=h7b6447c_0
- yarl=1.6.3=py36h27cfd23_0
- zict=2.0.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- zstd=1.5.6=hc292b87_0
- pip:
- boto3==1.23.10
- botocore==1.26.10
- distributed==1.10.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- msgpack-python==0.5.6
- pluggy==1.0.0
- py==1.11.0
- pytest==7.0.1
- s3fs==0.4.2
- s3transfer==0.5.2
- tomli==1.2.3
- zipp==3.6.0
prefix: /opt/conda/envs/dask
| [
"dask/array/tests/test_array_core.py::test_astype_gh1151"
]
| [
"dask/array/tests/test_array_core.py::test_field_access",
"dask/array/tests/test_array_core.py::test_coarsen",
"dask/array/tests/test_array_core.py::test_coarsen_with_excess"
]
| [
"dask/array/tests/test_array_core.py::test_getem",
"dask/array/tests/test_array_core.py::test_top",
"dask/array/tests/test_array_core.py::test_top_supports_broadcasting_rules",
"dask/array/tests/test_array_core.py::test_concatenate3_on_scalars",
"dask/array/tests/test_array_core.py::test_chunked_dot_product",
"dask/array/tests/test_array_core.py::test_chunked_transpose_plus_one",
"dask/array/tests/test_array_core.py::test_transpose",
"dask/array/tests/test_array_core.py::test_broadcast_dimensions_works_with_singleton_dimensions",
"dask/array/tests/test_array_core.py::test_broadcast_dimensions",
"dask/array/tests/test_array_core.py::test_Array",
"dask/array/tests/test_array_core.py::test_uneven_chunks",
"dask/array/tests/test_array_core.py::test_numblocks_suppoorts_singleton_block_dims",
"dask/array/tests/test_array_core.py::test_keys",
"dask/array/tests/test_array_core.py::test_Array_computation",
"dask/array/tests/test_array_core.py::test_stack",
"dask/array/tests/test_array_core.py::test_short_stack",
"dask/array/tests/test_array_core.py::test_stack_scalars",
"dask/array/tests/test_array_core.py::test_concatenate",
"dask/array/tests/test_array_core.py::test_concatenate_fixlen_strings",
"dask/array/tests/test_array_core.py::test_vstack",
"dask/array/tests/test_array_core.py::test_hstack",
"dask/array/tests/test_array_core.py::test_dstack",
"dask/array/tests/test_array_core.py::test_take",
"dask/array/tests/test_array_core.py::test_compress",
"dask/array/tests/test_array_core.py::test_binops",
"dask/array/tests/test_array_core.py::test_isnull",
"dask/array/tests/test_array_core.py::test_isclose",
"dask/array/tests/test_array_core.py::test_broadcast_shapes",
"dask/array/tests/test_array_core.py::test_elemwise_on_scalars",
"dask/array/tests/test_array_core.py::test_partial_by_order",
"dask/array/tests/test_array_core.py::test_elemwise_with_ndarrays",
"dask/array/tests/test_array_core.py::test_elemwise_differently_chunked",
"dask/array/tests/test_array_core.py::test_operators",
"dask/array/tests/test_array_core.py::test_operator_dtype_promotion",
"dask/array/tests/test_array_core.py::test_tensordot",
"dask/array/tests/test_array_core.py::test_dot_method",
"dask/array/tests/test_array_core.py::test_T",
"dask/array/tests/test_array_core.py::test_norm",
"dask/array/tests/test_array_core.py::test_choose",
"dask/array/tests/test_array_core.py::test_where",
"dask/array/tests/test_array_core.py::test_where_has_informative_error",
"dask/array/tests/test_array_core.py::test_insert",
"dask/array/tests/test_array_core.py::test_multi_insert",
"dask/array/tests/test_array_core.py::test_broadcast_to",
"dask/array/tests/test_array_core.py::test_ravel",
"dask/array/tests/test_array_core.py::test_unravel",
"dask/array/tests/test_array_core.py::test_reshape",
"dask/array/tests/test_array_core.py::test_reshape_unknown_dimensions",
"dask/array/tests/test_array_core.py::test_full",
"dask/array/tests/test_array_core.py::test_map_blocks",
"dask/array/tests/test_array_core.py::test_map_blocks2",
"dask/array/tests/test_array_core.py::test_map_blocks_with_constants",
"dask/array/tests/test_array_core.py::test_map_blocks_with_kwargs",
"dask/array/tests/test_array_core.py::test_fromfunction",
"dask/array/tests/test_array_core.py::test_from_function_requires_block_args",
"dask/array/tests/test_array_core.py::test_repr",
"dask/array/tests/test_array_core.py::test_slicing_with_ellipsis",
"dask/array/tests/test_array_core.py::test_slicing_with_ndarray",
"dask/array/tests/test_array_core.py::test_dtype",
"dask/array/tests/test_array_core.py::test_blockdims_from_blockshape",
"dask/array/tests/test_array_core.py::test_coerce",
"dask/array/tests/test_array_core.py::test_store",
"dask/array/tests/test_array_core.py::test_store_compute_false",
"dask/array/tests/test_array_core.py::test_store_locks",
"dask/array/tests/test_array_core.py::test_to_hdf5",
"dask/array/tests/test_array_core.py::test_np_array_with_zero_dimensions",
"dask/array/tests/test_array_core.py::test_unique",
"dask/array/tests/test_array_core.py::test_dtype_complex",
"dask/array/tests/test_array_core.py::test_astype",
"dask/array/tests/test_array_core.py::test_arithmetic",
"dask/array/tests/test_array_core.py::test_elemwise_consistent_names",
"dask/array/tests/test_array_core.py::test_optimize",
"dask/array/tests/test_array_core.py::test_slicing_with_non_ndarrays",
"dask/array/tests/test_array_core.py::test_getarray",
"dask/array/tests/test_array_core.py::test_squeeze",
"dask/array/tests/test_array_core.py::test_size",
"dask/array/tests/test_array_core.py::test_nbytes",
"dask/array/tests/test_array_core.py::test_Array_normalizes_dtype",
"dask/array/tests/test_array_core.py::test_args",
"dask/array/tests/test_array_core.py::test_from_array_with_lock",
"dask/array/tests/test_array_core.py::test_from_func",
"dask/array/tests/test_array_core.py::test_topk",
"dask/array/tests/test_array_core.py::test_topk_k_bigger_than_chunk",
"dask/array/tests/test_array_core.py::test_bincount",
"dask/array/tests/test_array_core.py::test_bincount_with_weights",
"dask/array/tests/test_array_core.py::test_bincount_raises_informative_error_on_missing_minlength_kwarg",
"dask/array/tests/test_array_core.py::test_histogram",
"dask/array/tests/test_array_core.py::test_histogram_alternative_bins_range",
"dask/array/tests/test_array_core.py::test_histogram_return_type",
"dask/array/tests/test_array_core.py::test_histogram_extra_args_and_shapes",
"dask/array/tests/test_array_core.py::test_concatenate3_2",
"dask/array/tests/test_array_core.py::test_map_blocks3",
"dask/array/tests/test_array_core.py::test_from_array_with_missing_chunks",
"dask/array/tests/test_array_core.py::test_cache",
"dask/array/tests/test_array_core.py::test_take_dask_from_numpy",
"dask/array/tests/test_array_core.py::test_normalize_chunks",
"dask/array/tests/test_array_core.py::test_raise_on_no_chunks",
"dask/array/tests/test_array_core.py::test_chunks_is_immutable",
"dask/array/tests/test_array_core.py::test_raise_on_bad_kwargs",
"dask/array/tests/test_array_core.py::test_long_slice",
"dask/array/tests/test_array_core.py::test_h5py_newaxis",
"dask/array/tests/test_array_core.py::test_ellipsis_slicing",
"dask/array/tests/test_array_core.py::test_point_slicing",
"dask/array/tests/test_array_core.py::test_point_slicing_with_full_slice",
"dask/array/tests/test_array_core.py::test_slice_with_floats",
"dask/array/tests/test_array_core.py::test_vindex_errors",
"dask/array/tests/test_array_core.py::test_vindex_merge",
"dask/array/tests/test_array_core.py::test_empty_array",
"dask/array/tests/test_array_core.py::test_array",
"dask/array/tests/test_array_core.py::test_cov",
"dask/array/tests/test_array_core.py::test_corrcoef",
"dask/array/tests/test_array_core.py::test_memmap",
"dask/array/tests/test_array_core.py::test_to_npy_stack",
"dask/array/tests/test_array_core.py::test_view",
"dask/array/tests/test_array_core.py::test_view_fortran",
"dask/array/tests/test_array_core.py::test_h5py_tokenize",
"dask/array/tests/test_array_core.py::test_map_blocks_with_changed_dimension",
"dask/array/tests/test_array_core.py::test_broadcast_chunks",
"dask/array/tests/test_array_core.py::test_chunks_error",
"dask/array/tests/test_array_core.py::test_array_compute_forward_kwargs",
"dask/array/tests/test_array_core.py::test_dont_fuse_outputs",
"dask/array/tests/test_array_core.py::test_dont_dealias_outputs",
"dask/array/tests/test_array_core.py::test_timedelta_op",
"dask/array/tests/test_array_core.py::test_to_delayed",
"dask/array/tests/test_array_core.py::test_cumulative",
"dask/array/tests/test_array_core.py::test_eye",
"dask/array/tests/test_array_core.py::test_diag",
"dask/array/tests/test_array_core.py::test_tril_triu",
"dask/array/tests/test_array_core.py::test_tril_triu_errors",
"dask/array/tests/test_array_core.py::test_atop_names",
"dask/array/tests/test_array_core.py::test_atop_kwargs",
"dask/array/tests/test_array_core.py::test_from_delayed",
"dask/array/tests/test_array_core.py::test_A_property",
"dask/array/tests/test_array_core.py::test_copy",
"dask/array/tests/test_array_core.py::test_npartitions"
]
| []
| BSD 3-Clause "New" or "Revised" License | 533 | [
"dask/array/core.py"
]
| [
"dask/array/core.py"
]
|
|
Axelrod-Python__Axelrod-587 | 03dd1a9600965800125eeb8942b6b0a3dfacf29c | 2016-05-11 17:36:01 | 03dd1a9600965800125eeb8942b6b0a3dfacf29c | diff --git a/axelrod/strategies/cycler.py b/axelrod/strategies/cycler.py
index 599e97a5..e3dd9c39 100644
--- a/axelrod/strategies/cycler.py
+++ b/axelrod/strategies/cycler.py
@@ -1,5 +1,6 @@
from axelrod import Actions, Player, init_args
+import copy
class AntiCycler(Player):
"""
@@ -74,18 +75,27 @@ class Cycler(Player):
class CyclerCCD(Cycler):
+ classifier = copy.copy(Cycler.classifier)
+ classifier['memory_depth'] = 2
+
@init_args
def __init__(self, cycle="CCD"):
Cycler.__init__(self, cycle=cycle)
class CyclerCCCD(Cycler):
+ classifier = copy.copy(Cycler.classifier)
+ classifier['memory_depth'] = 3
+
@init_args
def __init__(self, cycle="CCCD"):
Cycler.__init__(self, cycle=cycle)
class CyclerCCCCCD(Cycler):
+ classifier = copy.copy(Cycler.classifier)
+ classifier['memory_depth'] = 5
+
@init_args
def __init__(self, cycle="CCCCCD"):
Cycler.__init__(self, cycle=cycle)
diff --git a/axelrod/strategies/gobymajority.py b/axelrod/strategies/gobymajority.py
index fba5f73d..efc0d525 100644
--- a/axelrod/strategies/gobymajority.py
+++ b/axelrod/strategies/gobymajority.py
@@ -1,5 +1,7 @@
from axelrod import Actions, Player, init_args
+import copy
+
C, D = Actions.C, Actions.D
@@ -77,6 +79,8 @@ class GoByMajority40(GoByMajority):
"""
GoByMajority player with a memory of 40.
"""
+ classifier = copy.copy(GoByMajority.classifier)
+ classifier['memory_depth'] = 40
@init_args
def __init__(self, memory_depth=40, soft=True):
@@ -88,6 +92,8 @@ class GoByMajority20(GoByMajority):
"""
GoByMajority player with a memory of 20.
"""
+ classifier = copy.copy(GoByMajority.classifier)
+ classifier['memory_depth'] = 20
@init_args
def __init__(self, memory_depth=20, soft=True):
@@ -99,6 +105,8 @@ class GoByMajority10(GoByMajority):
"""
GoByMajority player with a memory of 10.
"""
+ classifier = copy.copy(GoByMajority.classifier)
+ classifier['memory_depth'] = 10
@init_args
def __init__(self, memory_depth=10, soft=True):
@@ -110,6 +118,8 @@ class GoByMajority5(GoByMajority):
"""
GoByMajority player with a memory of 5.
"""
+ classifier = copy.copy(GoByMajority.classifier)
+ classifier['memory_depth'] = 5
@init_args
def __init__(self, memory_depth=5, soft=True):
@@ -136,6 +146,8 @@ class HardGoByMajority40(HardGoByMajority):
"""
HardGoByMajority player with a memory of 40.
"""
+ classifier = copy.copy(GoByMajority.classifier)
+ classifier['memory_depth'] = 40
@init_args
def __init__(self, memory_depth=40, soft=False):
@@ -147,6 +159,8 @@ class HardGoByMajority20(HardGoByMajority):
"""
HardGoByMajority player with a memory of 20.
"""
+ classifier = copy.copy(GoByMajority.classifier)
+ classifier['memory_depth'] = 20
@init_args
def __init__(self, memory_depth=20, soft=False):
@@ -158,6 +172,8 @@ class HardGoByMajority10(HardGoByMajority):
"""
HardGoByMajority player with a memory of 10.
"""
+ classifier = copy.copy(GoByMajority.classifier)
+ classifier['memory_depth'] = 10
@init_args
def __init__(self, memory_depth=10, soft=False):
@@ -169,6 +185,8 @@ class HardGoByMajority5(HardGoByMajority):
"""
HardGoByMajority player with a memory of 5.
"""
+ classifier = copy.copy(GoByMajority.classifier)
+ classifier['memory_depth'] = 5
@init_args
def __init__(self, memory_depth=5, soft=False):
diff --git a/axelrod/strategies/meta.py b/axelrod/strategies/meta.py
index 2f16d4b8..c2d5b60f 100644
--- a/axelrod/strategies/meta.py
+++ b/axelrod/strategies/meta.py
@@ -289,6 +289,14 @@ class MetaMixer(MetaPlayer):
"""
name = "Meta Mixer"
+ classifier = {
+ 'memory_depth': float('inf'), # Long memory
+ 'stochastic': True,
+ 'makes_use_of': set(),
+ 'inspects_source': False,
+ 'manipulates_source': False,
+ 'manipulates_state': False
+ }
def __init__(self, team=None, distribution=None):
| Test classification of strategy class as well as strategy player
@mojones noticed a bug in the classification of Win Stay Lose Shift: see #506.
I fixed it in #511, but really the test I added to #511 should be a test in the player class. I tried that but didn't get a failing test. Needs investigating :) | Axelrod-Python/Axelrod | diff --git a/axelrod/tests/unit/test_gambler.py b/axelrod/tests/unit/test_gambler.py
index 1448103f..c59bb8d3 100755
--- a/axelrod/tests/unit/test_gambler.py
+++ b/axelrod/tests/unit/test_gambler.py
@@ -8,6 +8,8 @@ import random
from .test_player import TestPlayer, TestHeadsUp
from axelrod import random_choice, Actions
+import copy
+
C, D = axelrod.Actions.C, axelrod.Actions.D
@@ -25,6 +27,9 @@ class TestGambler(TestPlayer):
'manipulates_state': False
}
+ expected_class_classifier = copy.copy(expected_classifier)
+ expected_class_classifier['memory_depth'] = float('inf')
+
def test_init(self):
# Test empty table
player = self.player(dict())
diff --git a/axelrod/tests/unit/test_gobymajority.py b/axelrod/tests/unit/test_gobymajority.py
index 52883322..40d3b9e2 100644
--- a/axelrod/tests/unit/test_gobymajority.py
+++ b/axelrod/tests/unit/test_gobymajority.py
@@ -126,6 +126,15 @@ def factory_TestGoByRecentMajority(L, soft=True):
name = "Hard Go By Majority: %i" % L
player = getattr(axelrod, 'HardGoByMajority%i' % L)
+ expected_classifier = {
+ 'stochastic': False,
+ 'memory_depth': L,
+ 'makes_use_of': set(),
+ 'inspects_source': False,
+ 'manipulates_source': False,
+ 'manipulates_state': False
+ }
+
def test_initial_strategy(self):
"""Starts by defecting."""
self.first_play_test(D)
diff --git a/axelrod/tests/unit/test_lookerup.py b/axelrod/tests/unit/test_lookerup.py
index 49de2ce9..ce447ae1 100755
--- a/axelrod/tests/unit/test_lookerup.py
+++ b/axelrod/tests/unit/test_lookerup.py
@@ -4,6 +4,8 @@ import axelrod
from .test_player import TestPlayer, TestHeadsUp
from axelrod.strategies.lookerup import create_lookup_table_keys
+import copy
+
C, D = axelrod.Actions.C, axelrod.Actions.D
@@ -13,7 +15,7 @@ class TestLookerUp(TestPlayer):
player = axelrod.LookerUp
expected_classifier = {
- 'memory_depth': 1, # Default TFT table
+ 'memory_depth': 1, # Default TfT
'stochastic': False,
'makes_use_of': set(),
'inspects_source': False,
@@ -21,6 +23,9 @@ class TestLookerUp(TestPlayer):
'manipulates_state': False
}
+ expected_class_classifier = copy.copy(expected_classifier)
+ expected_class_classifier['memory_depth'] = float('inf')
+
def test_init(self):
# Test empty table
player = self.player(dict())
@@ -113,6 +118,7 @@ class TestLookerUp(TestPlayer):
self.responses_test([C, C, D], [D, D, C], [D])
+
class TestEvolvedLookerUp(TestPlayer):
name = "EvolvedLookerUp"
diff --git a/axelrod/tests/unit/test_meta.py b/axelrod/tests/unit/test_meta.py
index c8355d79..25810483 100644
--- a/axelrod/tests/unit/test_meta.py
+++ b/axelrod/tests/unit/test_meta.py
@@ -3,7 +3,7 @@
import random
import axelrod
-import unittest
+import copy
from .test_player import TestPlayer
@@ -26,7 +26,7 @@ class TestMetaPlayer(TestPlayer):
'manipulates_state': False
}
- def classifier_test(self):
+ def classifier_test(self, expected_class_classifier=None):
player = self.player()
classifier = dict()
for key in ['stochastic',
@@ -47,6 +47,12 @@ class TestMetaPlayer(TestPlayer):
msg="%s - Behaviour: %s != Expected Behaviour: %s" %
(key, player.classifier[key], classifier[key]))
+ # Test that player has same classifier as it's class unless otherwise
+ # specified
+ if expected_class_classifier is None:
+ expected_class_classifier = player.classifier
+ self.assertEqual(expected_class_classifier, self.player.classifier)
+
def test_reset(self):
p1 = self.player()
p2 = axelrod.Cooperator()
@@ -70,6 +76,10 @@ class TestMetaMajority(TestMetaPlayer):
'manipulates_state': False
}
+ expected_class_classifier = copy.copy(expected_classifier)
+ expected_class_classifier['stochastic'] = False
+ expected_class_classifier['makes_use_of'] = set([])
+
def test_strategy(self):
P1 = axelrod.MetaMajority()
@@ -96,6 +106,10 @@ class TestMetaMinority(TestMetaPlayer):
'manipulates_state': False
}
+ expected_class_classifier = copy.copy(expected_classifier)
+ expected_class_classifier['stochastic'] = False
+ expected_class_classifier['makes_use_of'] = set([])
+
def test_team(self):
team = [axelrod.Cooperator]
player = self.player(team=team)
@@ -127,6 +141,10 @@ class TestMetaWinner(TestMetaPlayer):
'manipulates_state': False
}
+ expected_class_classifier = copy.copy(expected_classifier)
+ expected_class_classifier['stochastic'] = False
+ expected_class_classifier['makes_use_of'] = set([])
+
def test_strategy(self):
P1 = axelrod.MetaWinner(team = [axelrod.Cooperator, axelrod.Defector])
@@ -206,6 +224,10 @@ class TestMetaMajorityMemoryOne(TestMetaPlayer):
'manipulates_state': False
}
+ expected_class_classifier = copy.copy(expected_classifier)
+ expected_class_classifier['stochastic'] = False
+ expected_class_classifier['makes_use_of'] = set([])
+
def test_strategy(self):
self.first_play_test(C)
@@ -222,6 +244,10 @@ class TestMetaWinnerMemoryOne(TestMetaPlayer):
'manipulates_state': False
}
+ expected_class_classifier = copy.copy(expected_classifier)
+ expected_class_classifier['stochastic'] = False
+ expected_class_classifier['makes_use_of'] = set([])
+
def test_strategy(self):
self.first_play_test(C)
@@ -237,6 +263,11 @@ class TestMetaMajorityFiniteMemory(TestMetaPlayer):
'manipulates_state': False
}
+ expected_class_classifier = copy.copy(expected_classifier)
+ expected_class_classifier['stochastic'] = False
+ expected_class_classifier['makes_use_of'] = set([])
+
+
def test_strategy(self):
self.first_play_test(C)
@@ -252,6 +283,11 @@ class TestMetaWinnerFiniteMemory(TestMetaPlayer):
'manipulates_state': False
}
+ expected_class_classifier = copy.copy(expected_classifier)
+ expected_class_classifier['stochastic'] = False
+ expected_class_classifier['makes_use_of'] = set([])
+
+
def test_strategy(self):
self.first_play_test(C)
@@ -267,6 +303,11 @@ class TestMetaMajorityLongMemory(TestMetaPlayer):
'manipulates_state': False
}
+ expected_class_classifier = copy.copy(expected_classifier)
+ expected_class_classifier['stochastic'] = False
+ expected_class_classifier['makes_use_of'] = set([])
+
+
def test_strategy(self):
self.first_play_test(C)
@@ -282,6 +323,10 @@ class TestMetaWinnerLongMemory(TestMetaPlayer):
'manipulates_state': False
}
+ expected_class_classifier = copy.copy(expected_classifier)
+ expected_class_classifier['stochastic'] = False
+ expected_class_classifier['makes_use_of'] = set([])
+
def test_strategy(self):
self.first_play_test(C)
@@ -298,6 +343,9 @@ class TestMetaMixer(TestMetaPlayer):
'manipulates_state': False
}
+ expected_class_classifier = copy.copy(expected_classifier)
+ expected_class_classifier['makes_use_of'] = set()
+
def test_strategy(self):
team = [axelrod.TitForTat, axelrod.Cooperator, axelrod.Grudger]
diff --git a/axelrod/tests/unit/test_player.py b/axelrod/tests/unit/test_player.py
index 11a89e9a..601fd396 100644
--- a/axelrod/tests/unit/test_player.py
+++ b/axelrod/tests/unit/test_player.py
@@ -116,6 +116,7 @@ class TestOpponent(Player):
class TestPlayer(unittest.TestCase):
"A Test class from which other player test classes are inherited"
player = TestOpponent
+ expected_class_classifier = None
def test_initialisation(self):
"""Test that the player initiates correctly."""
@@ -126,7 +127,7 @@ class TestPlayer(unittest.TestCase):
{'length': -1, 'game': DefaultGame, 'noise': 0})
self.assertEqual(player.cooperations, 0)
self.assertEqual(player.defections, 0)
- self.classifier_test()
+ self.classifier_test(self.expected_class_classifier)
def test_repr(self):
"""Test that the representation is correct."""
@@ -237,12 +238,19 @@ class TestPlayer(unittest.TestCase):
random_seed=random_seed, attrs=attrs)
- def classifier_test(self):
+ def classifier_test(self, expected_class_classifier=None):
"""Test that the keys in the expected_classifier dictionary give the
expected values in the player classifier dictionary. Also checks that
two particular keys (memory_depth and stochastic) are in the
dictionary."""
player = self.player()
+
+ # Test that player has same classifier as it's class unless otherwise
+ # specified
+ if expected_class_classifier is None:
+ expected_class_classifier = player.classifier
+ self.assertEqual(expected_class_classifier, self.player.classifier)
+
self.assertTrue('memory_depth' in player.classifier,
msg="memory_depth not in classifier")
self.assertTrue('stochastic' in player.classifier,
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 3
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
-e git+https://github.com/Axelrod-Python/Axelrod.git@03dd1a9600965800125eeb8942b6b0a3dfacf29c#egg=Axelrod
coverage==7.8.0
cycler==0.12.1
exceptiongroup==1.2.2
execnet==2.1.1
hypothesis==6.130.6
iniconfig==2.1.0
kiwisolver==1.4.7
matplotlib==3.3.4
numpy==2.0.2
packaging==24.2
pillow==11.1.0
pluggy==1.5.0
pyparsing==2.1.1
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
six==1.17.0
sortedcontainers==2.4.0
testfixtures==4.9.1
tomli==2.2.1
tqdm==3.4.0
typing_extensions==4.13.0
| name: Axelrod
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- coverage==7.8.0
- cycler==0.12.1
- exceptiongroup==1.2.2
- execnet==2.1.1
- hypothesis==6.130.6
- iniconfig==2.1.0
- kiwisolver==1.4.7
- matplotlib==3.3.4
- numpy==2.0.2
- packaging==24.2
- pillow==11.1.0
- pluggy==1.5.0
- pyparsing==2.1.1
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- six==1.17.0
- sortedcontainers==2.4.0
- testfixtures==4.9.1
- tomli==2.2.1
- tqdm==3.4.0
- typing-extensions==4.13.0
prefix: /opt/conda/envs/Axelrod
| [
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority5::test_initialisation",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority10::test_initialisation",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority20::test_initialisation",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority40::test_initialisation",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority5::test_initialisation",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority10::test_initialisation",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority20::test_initialisation",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority40::test_initialisation",
"axelrod/tests/unit/test_meta.py::TestMetaMixer::test_initialisation"
]
| []
| [
"axelrod/tests/unit/test_gambler.py::TestPlayer::test_clone",
"axelrod/tests/unit/test_gambler.py::TestPlayer::test_initialisation",
"axelrod/tests/unit/test_gambler.py::TestPlayer::test_match_attributes",
"axelrod/tests/unit/test_gambler.py::TestPlayer::test_repr",
"axelrod/tests/unit/test_gambler.py::TestPlayer::test_reset",
"axelrod/tests/unit/test_gambler.py::TestGambler::test_clone",
"axelrod/tests/unit/test_gambler.py::TestGambler::test_defector_table",
"axelrod/tests/unit/test_gambler.py::TestGambler::test_init",
"axelrod/tests/unit/test_gambler.py::TestGambler::test_initialisation",
"axelrod/tests/unit/test_gambler.py::TestGambler::test_match_attributes",
"axelrod/tests/unit/test_gambler.py::TestGambler::test_repr",
"axelrod/tests/unit/test_gambler.py::TestGambler::test_reset",
"axelrod/tests/unit/test_gambler.py::TestGambler::test_strategy",
"axelrod/tests/unit/test_gambler.py::TestPSOGambler::test_clone",
"axelrod/tests/unit/test_gambler.py::TestPSOGambler::test_init",
"axelrod/tests/unit/test_gambler.py::TestPSOGambler::test_initialisation",
"axelrod/tests/unit/test_gambler.py::TestPSOGambler::test_match_attributes",
"axelrod/tests/unit/test_gambler.py::TestPSOGambler::test_repr",
"axelrod/tests/unit/test_gambler.py::TestPSOGambler::test_reset",
"axelrod/tests/unit/test_gambler.py::TestPSOGambler::test_strategy",
"axelrod/tests/unit/test_gambler.py::PSOGamblervsDefector::test_vs",
"axelrod/tests/unit/test_gambler.py::PSOGamblervsCooperator::test_vs",
"axelrod/tests/unit/test_gambler.py::PSOGamblervsTFT::test_vs",
"axelrod/tests/unit/test_gambler.py::PSOGamblervsAlternator::test_vs",
"axelrod/tests/unit/test_gobymajority.py::TestPlayer::test_clone",
"axelrod/tests/unit/test_gobymajority.py::TestPlayer::test_initialisation",
"axelrod/tests/unit/test_gobymajority.py::TestPlayer::test_match_attributes",
"axelrod/tests/unit/test_gobymajority.py::TestPlayer::test_repr",
"axelrod/tests/unit/test_gobymajority.py::TestPlayer::test_reset",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority::test_clone",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority::test_default_soft",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority::test_initial_strategy",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority::test_initialisation",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority::test_match_attributes",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority::test_name",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority::test_repr",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority::test_reset",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority::test_soft",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority::test_strategy",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority::test_clone",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority::test_default_soft",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority::test_initial_strategy",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority::test_initialisation",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority::test_match_attributes",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority::test_name",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority::test_repr",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority::test_reset",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority::test_soft",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority::test_strategy",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority5::test_clone",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority5::test_initial_strategy",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority5::test_match_attributes",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority5::test_repr",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority5::test_reset",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority5::test_strategy",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority10::test_clone",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority10::test_initial_strategy",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority10::test_match_attributes",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority10::test_repr",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority10::test_reset",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority10::test_strategy",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority20::test_clone",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority20::test_initial_strategy",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority20::test_match_attributes",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority20::test_repr",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority20::test_reset",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority20::test_strategy",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority40::test_clone",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority40::test_initial_strategy",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority40::test_match_attributes",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority40::test_repr",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority40::test_reset",
"axelrod/tests/unit/test_gobymajority.py::TestGoByMajority40::test_strategy",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority5::test_clone",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority5::test_initial_strategy",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority5::test_match_attributes",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority5::test_repr",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority5::test_reset",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority5::test_strategy",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority10::test_clone",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority10::test_initial_strategy",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority10::test_match_attributes",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority10::test_repr",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority10::test_reset",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority10::test_strategy",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority20::test_clone",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority20::test_initial_strategy",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority20::test_match_attributes",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority20::test_repr",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority20::test_reset",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority20::test_strategy",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority40::test_clone",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority40::test_initial_strategy",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority40::test_match_attributes",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority40::test_repr",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority40::test_reset",
"axelrod/tests/unit/test_gobymajority.py::TestHardGoByMajority40::test_strategy",
"axelrod/tests/unit/test_lookerup.py::TestPlayer::test_clone",
"axelrod/tests/unit/test_lookerup.py::TestPlayer::test_initialisation",
"axelrod/tests/unit/test_lookerup.py::TestPlayer::test_match_attributes",
"axelrod/tests/unit/test_lookerup.py::TestPlayer::test_repr",
"axelrod/tests/unit/test_lookerup.py::TestPlayer::test_reset",
"axelrod/tests/unit/test_lookerup.py::TestLookerUp::test_clone",
"axelrod/tests/unit/test_lookerup.py::TestLookerUp::test_defector_table",
"axelrod/tests/unit/test_lookerup.py::TestLookerUp::test_init",
"axelrod/tests/unit/test_lookerup.py::TestLookerUp::test_initialisation",
"axelrod/tests/unit/test_lookerup.py::TestLookerUp::test_match_attributes",
"axelrod/tests/unit/test_lookerup.py::TestLookerUp::test_repr",
"axelrod/tests/unit/test_lookerup.py::TestLookerUp::test_reset",
"axelrod/tests/unit/test_lookerup.py::TestLookerUp::test_starting_move",
"axelrod/tests/unit/test_lookerup.py::TestLookerUp::test_strategy",
"axelrod/tests/unit/test_lookerup.py::TestLookerUp::test_zero_tables",
"axelrod/tests/unit/test_lookerup.py::TestEvolvedLookerUp::test_clone",
"axelrod/tests/unit/test_lookerup.py::TestEvolvedLookerUp::test_init",
"axelrod/tests/unit/test_lookerup.py::TestEvolvedLookerUp::test_initialisation",
"axelrod/tests/unit/test_lookerup.py::TestEvolvedLookerUp::test_match_attributes",
"axelrod/tests/unit/test_lookerup.py::TestEvolvedLookerUp::test_repr",
"axelrod/tests/unit/test_lookerup.py::TestEvolvedLookerUp::test_reset",
"axelrod/tests/unit/test_lookerup.py::TestEvolvedLookerUp::test_strategy",
"axelrod/tests/unit/test_lookerup.py::EvolvedLookerUpvsDefector::test_vs",
"axelrod/tests/unit/test_lookerup.py::EvolvedLookerUpvsCooperator::test_vs",
"axelrod/tests/unit/test_lookerup.py::EvolvedLookerUpvsTFT::test_vs",
"axelrod/tests/unit/test_lookerup.py::EvolvedLookerUpvsAlternator::test_vs",
"axelrod/tests/unit/test_meta.py::TestPlayer::test_clone",
"axelrod/tests/unit/test_meta.py::TestPlayer::test_initialisation",
"axelrod/tests/unit/test_meta.py::TestPlayer::test_match_attributes",
"axelrod/tests/unit/test_meta.py::TestPlayer::test_repr",
"axelrod/tests/unit/test_meta.py::TestPlayer::test_reset",
"axelrod/tests/unit/test_meta.py::TestMetaPlayer::test_clone",
"axelrod/tests/unit/test_meta.py::TestMetaPlayer::test_initialisation",
"axelrod/tests/unit/test_meta.py::TestMetaPlayer::test_match_attributes",
"axelrod/tests/unit/test_meta.py::TestMetaPlayer::test_repr",
"axelrod/tests/unit/test_meta.py::TestMetaPlayer::test_reset",
"axelrod/tests/unit/test_meta.py::TestMetaMajority::test_clone",
"axelrod/tests/unit/test_meta.py::TestMetaMajority::test_initialisation",
"axelrod/tests/unit/test_meta.py::TestMetaMajority::test_match_attributes",
"axelrod/tests/unit/test_meta.py::TestMetaMajority::test_repr",
"axelrod/tests/unit/test_meta.py::TestMetaMajority::test_reset",
"axelrod/tests/unit/test_meta.py::TestMetaMajority::test_strategy",
"axelrod/tests/unit/test_meta.py::TestMetaMinority::test_clone",
"axelrod/tests/unit/test_meta.py::TestMetaMinority::test_initialisation",
"axelrod/tests/unit/test_meta.py::TestMetaMinority::test_match_attributes",
"axelrod/tests/unit/test_meta.py::TestMetaMinority::test_repr",
"axelrod/tests/unit/test_meta.py::TestMetaMinority::test_reset",
"axelrod/tests/unit/test_meta.py::TestMetaMinority::test_strategy",
"axelrod/tests/unit/test_meta.py::TestMetaMinority::test_team",
"axelrod/tests/unit/test_meta.py::TestMetaWinner::test_clone",
"axelrod/tests/unit/test_meta.py::TestMetaWinner::test_initialisation",
"axelrod/tests/unit/test_meta.py::TestMetaWinner::test_match_attributes",
"axelrod/tests/unit/test_meta.py::TestMetaWinner::test_repr",
"axelrod/tests/unit/test_meta.py::TestMetaWinner::test_reset",
"axelrod/tests/unit/test_meta.py::TestMetaWinner::test_strategy",
"axelrod/tests/unit/test_meta.py::TestMetaHunter::test_clone",
"axelrod/tests/unit/test_meta.py::TestMetaHunter::test_initialisation",
"axelrod/tests/unit/test_meta.py::TestMetaHunter::test_match_attributes",
"axelrod/tests/unit/test_meta.py::TestMetaHunter::test_repr",
"axelrod/tests/unit/test_meta.py::TestMetaHunter::test_reset",
"axelrod/tests/unit/test_meta.py::TestMetaHunter::test_strategy",
"axelrod/tests/unit/test_meta.py::TestMetaMajorityMemoryOne::test_clone",
"axelrod/tests/unit/test_meta.py::TestMetaMajorityMemoryOne::test_initialisation",
"axelrod/tests/unit/test_meta.py::TestMetaMajorityMemoryOne::test_match_attributes",
"axelrod/tests/unit/test_meta.py::TestMetaMajorityMemoryOne::test_repr",
"axelrod/tests/unit/test_meta.py::TestMetaMajorityMemoryOne::test_reset",
"axelrod/tests/unit/test_meta.py::TestMetaMajorityMemoryOne::test_strategy",
"axelrod/tests/unit/test_meta.py::TestMetaWinnerMemoryOne::test_clone",
"axelrod/tests/unit/test_meta.py::TestMetaWinnerMemoryOne::test_initialisation",
"axelrod/tests/unit/test_meta.py::TestMetaWinnerMemoryOne::test_match_attributes",
"axelrod/tests/unit/test_meta.py::TestMetaWinnerMemoryOne::test_repr",
"axelrod/tests/unit/test_meta.py::TestMetaWinnerMemoryOne::test_reset",
"axelrod/tests/unit/test_meta.py::TestMetaWinnerMemoryOne::test_strategy",
"axelrod/tests/unit/test_meta.py::TestMetaMajorityFiniteMemory::test_clone",
"axelrod/tests/unit/test_meta.py::TestMetaMajorityFiniteMemory::test_initialisation",
"axelrod/tests/unit/test_meta.py::TestMetaMajorityFiniteMemory::test_match_attributes",
"axelrod/tests/unit/test_meta.py::TestMetaMajorityFiniteMemory::test_repr",
"axelrod/tests/unit/test_meta.py::TestMetaMajorityFiniteMemory::test_reset",
"axelrod/tests/unit/test_meta.py::TestMetaMajorityFiniteMemory::test_strategy",
"axelrod/tests/unit/test_meta.py::TestMetaWinnerFiniteMemory::test_clone",
"axelrod/tests/unit/test_meta.py::TestMetaWinnerFiniteMemory::test_initialisation",
"axelrod/tests/unit/test_meta.py::TestMetaWinnerFiniteMemory::test_match_attributes",
"axelrod/tests/unit/test_meta.py::TestMetaWinnerFiniteMemory::test_repr",
"axelrod/tests/unit/test_meta.py::TestMetaWinnerFiniteMemory::test_reset",
"axelrod/tests/unit/test_meta.py::TestMetaWinnerFiniteMemory::test_strategy",
"axelrod/tests/unit/test_meta.py::TestMetaMajorityLongMemory::test_clone",
"axelrod/tests/unit/test_meta.py::TestMetaMajorityLongMemory::test_initialisation",
"axelrod/tests/unit/test_meta.py::TestMetaMajorityLongMemory::test_match_attributes",
"axelrod/tests/unit/test_meta.py::TestMetaMajorityLongMemory::test_repr",
"axelrod/tests/unit/test_meta.py::TestMetaMajorityLongMemory::test_reset",
"axelrod/tests/unit/test_meta.py::TestMetaMajorityLongMemory::test_strategy",
"axelrod/tests/unit/test_meta.py::TestMetaWinnerLongMemory::test_clone",
"axelrod/tests/unit/test_meta.py::TestMetaWinnerLongMemory::test_initialisation",
"axelrod/tests/unit/test_meta.py::TestMetaWinnerLongMemory::test_match_attributes",
"axelrod/tests/unit/test_meta.py::TestMetaWinnerLongMemory::test_repr",
"axelrod/tests/unit/test_meta.py::TestMetaWinnerLongMemory::test_reset",
"axelrod/tests/unit/test_meta.py::TestMetaWinnerLongMemory::test_strategy",
"axelrod/tests/unit/test_meta.py::TestMetaMixer::test_clone",
"axelrod/tests/unit/test_meta.py::TestMetaMixer::test_match_attributes",
"axelrod/tests/unit/test_meta.py::TestMetaMixer::test_raise_error_in_distribution",
"axelrod/tests/unit/test_meta.py::TestMetaMixer::test_repr",
"axelrod/tests/unit/test_meta.py::TestMetaMixer::test_reset",
"axelrod/tests/unit/test_meta.py::TestMetaMixer::test_strategy",
"axelrod/tests/unit/test_player.py::TestPlayerClass::test_add_noise",
"axelrod/tests/unit/test_player.py::TestPlayerClass::test_noisy_play",
"axelrod/tests/unit/test_player.py::TestPlayerClass::test_play",
"axelrod/tests/unit/test_player.py::TestPlayerClass::test_strategy",
"axelrod/tests/unit/test_player.py::TestPlayer::test_clone",
"axelrod/tests/unit/test_player.py::TestPlayer::test_initialisation",
"axelrod/tests/unit/test_player.py::TestPlayer::test_match_attributes",
"axelrod/tests/unit/test_player.py::TestPlayer::test_repr",
"axelrod/tests/unit/test_player.py::TestPlayer::test_reset"
]
| []
| MIT License | 534 | [
"axelrod/strategies/meta.py",
"axelrod/strategies/cycler.py",
"axelrod/strategies/gobymajority.py"
]
| [
"axelrod/strategies/meta.py",
"axelrod/strategies/cycler.py",
"axelrod/strategies/gobymajority.py"
]
|
|
refnx__refnx-35 | cd75b8c1b715bc9ae385e60e025baf1598a270a1 | 2016-05-13 02:48:29 | 568a56132fe0cd8418cff41ffedfc276bdb99af4 | diff --git a/refnx/reduce/platypusnexus.py b/refnx/reduce/platypusnexus.py
index ae28b951..f041c3ff 100644
--- a/refnx/reduce/platypusnexus.py
+++ b/refnx/reduce/platypusnexus.py
@@ -516,6 +516,9 @@ class PlatypusNexus(object):
m_spec_tof_hist[:] = TOF - toffset
flight_distance[:] = flight_distance[0]
detpositions[:] = detpositions[0]
+ domega[:] = domega[0]
+ d_cx[:] = d_cx[0]
+ phase_angle[:] = phase_angle[0]
break
else:
scanpoint += 1
| Event mode reduction is not calculating the correct resolution for each timeslice | refnx/refnx | diff --git a/refnx/reduce/test/test_reduce.py b/refnx/reduce/test/test_reduce.py
index 92f1f5f6..4b9f6990 100644
--- a/refnx/reduce/test/test_reduce.py
+++ b/refnx/reduce/test/test_reduce.py
@@ -3,7 +3,7 @@ import os.path
import numpy as np
from refnx.reduce import reduce_stitch, ReducePlatypus
from numpy.testing import (assert_almost_equal, assert_, assert_equal,
- assert_array_less)
+ assert_array_less, assert_allclose)
import xml.etree.ElementTree as ET
class TestReduce(unittest.TestCase):
@@ -44,6 +44,11 @@ class TestReduce(unittest.TestCase):
eventmode=[0, 900, 1800])
assert_equal(a.ydata.shape[0], 2)
+ # check that the resolutions are pretty much the same
+ assert_allclose(a.xdata_sd[0] / a.xdata[0],
+ a.xdata_sd[1] / a.xdata[1],
+ atol = 0.001)
+
# check that the right timestamps are written into the datafile
tree = ET.parse(os.path.join(os.getcwd(), 'PLP0011641_1.xml'))
t = tree.find('.//REFentry').attrib['time']
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "numpy>=1.16.0 scipy>=1.0.0 emcee>=2.2.1 six>=1.11.0 uncertainties>=3.0.1 pandas>=0.23.4 pytest>=3.6.0 h5py>=2.8.0 xlrd>=1.1.0 ptemcee>=1.0.0",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asteval==0.9.26
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
emcee @ file:///home/conda/feedstock_root/build_artifacts/emcee_1713796893786/work
future==0.18.2
h5py @ file:///tmp/build/80754af9/h5py_1593454121459/work
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
lmfit==1.0.3
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
numpy @ file:///tmp/build/80754af9/numpy_and_numpy_base_1603483703303/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pandas==1.1.5
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
ptemcee==1.0.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
python-dateutil @ file:///tmp/build/80754af9/python-dateutil_1626374649649/work
pytz==2021.3
-e git+https://github.com/refnx/refnx.git@cd75b8c1b715bc9ae385e60e025baf1598a270a1#egg=refnx
scipy @ file:///tmp/build/80754af9/scipy_1597686635649/work
six @ file:///tmp/build/80754af9/six_1644875935023/work
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
uncertainties @ file:///home/conda/feedstock_root/build_artifacts/uncertainties_1720452225073/work
xlrd @ file:///croot/xlrd_1685030938141/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: refnx
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- blas=1.0=openblas
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- emcee=3.1.6=pyhd8ed1ab_0
- future=0.18.2=py36_1
- h5py=2.10.0=py36hd6299e0_1
- hdf5=1.10.6=hb1b8bf9_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgfortran-ng=7.5.0=ha8ba4b0_17
- libgfortran4=7.5.0=ha8ba4b0_17
- libgomp=11.2.0=h1234567_1
- libopenblas=0.3.18=hf726d26_0
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- numpy=1.19.2=py36h6163131_0
- numpy-base=1.19.2=py36h75fe3a5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pandas=1.1.5=py36ha9443f7_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- ptemcee=1.0.0=py_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- python-dateutil=2.8.2=pyhd3eb1b0_0
- pytz=2021.3=pyhd3eb1b0_0
- readline=8.2=h5eee18b_0
- scipy=1.5.2=py36habc2bb6_0
- setuptools=58.0.4=py36h06a4308_0
- six=1.16.0=pyhd3eb1b0_1
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- uncertainties=3.2.2=pyhd8ed1ab_1
- wheel=0.37.1=pyhd3eb1b0_0
- xlrd=2.0.1=pyhd3eb1b0_1
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- asteval==0.9.26
- lmfit==1.0.3
prefix: /opt/conda/envs/refnx
| [
"refnx/reduce/test/test_reduce.py::TestReduce::test_event_reduction"
]
| []
| [
"refnx/reduce/test/test_reduce.py::TestReduce::test_reduction_method",
"refnx/reduce/test/test_reduce.py::TestReduce::test_smoke"
]
| []
| BSD 3-Clause "New" or "Revised" License | 535 | [
"refnx/reduce/platypusnexus.py"
]
| [
"refnx/reduce/platypusnexus.py"
]
|
|
adamtheturtle__todo-28 | 7666d2181cdea24c963f2d99f918fd368fefafef | 2016-05-15 16:08:13 | 7666d2181cdea24c963f2d99f918fd368fefafef | diff --git a/authentication/authentication.py b/authentication/authentication.py
index 7b64ff6..f9b7cee 100644
--- a/authentication/authentication.py
+++ b/authentication/authentication.py
@@ -288,6 +288,26 @@ def create_todo():
return jsonify(create.json()), create.status_code
+
[email protected]('/todos/<id>', methods=['GET'])
+@consumes('application/json')
+def read_todo(id):
+ """
+ Get information about particular todo item.
+
+ :reqheader Content-Type: application/json
+ :resheader Content-Type: application/json
+ :resjson string id: The id of the todo item.
+ :resjson boolean completed: Whether the item is completed.
+ :resjson number completion_time: The completion UNIX timestamp, or
+ ``null`` if there is none.
+ :status 200: The requested item's information is returned.
+ :status 404: There is no item with the given ``id``.
+ """
+ url = urljoin(STORAGE_URL, 'todos/{id}').format(id=id)
+ response = requests.get(url, headers={'Content-Type': 'application/json'})
+ return jsonify(response.json()), response.status_code
+
if __name__ == '__main__': # pragma: no cover
# Specifying 0.0.0.0 as the host tells the operating system to listen on
# all public IPs. This makes the server visible externally.
diff --git a/storage/storage.py b/storage/storage.py
index ae2db38..29189c9 100644
--- a/storage/storage.py
+++ b/storage/storage.py
@@ -218,6 +218,35 @@ def todos_post():
), codes.CREATED
[email protected]('/todos/<id>', methods=['GET'])
+@consumes('application/json')
+def specific_todo_get(id):
+ """
+ Get information about particular todo item.
+
+ :reqheader Content-Type: application/json
+ :resheader Content-Type: application/json
+ :resjson string id: The id of the todo item.
+ :resjson boolean completed: Whether the item is completed.
+ :resjson number completion_time: The completion UNIX timestamp, or
+ ``null`` if there is none.
+ :status 200: The requested item's information is returned.
+ :status 404: There is no item with the given ``id``.
+ """
+ todo = Todo.query.filter_by(id=id).first()
+
+ if todo is None:
+ return jsonify(
+ title='The requested todo does not exist.',
+ detail='No todo exists with the id "{id}"'.format(id=id),
+ ), codes.NOT_FOUND
+
+ return jsonify(
+ content=todo.content,
+ completed=todo.completed,
+ completion_timestamp=todo.completion_timestamp,
+ ), codes.OK
+
if __name__ == '__main__': # pragma: no cover
# Specifying 0.0.0.0 as the host tells the operating system to listen on
# all public IPs. This makes the server visible externally.
| Add ability to read a TODO item | adamtheturtle/todo | diff --git a/authentication/tests/test_authentication.py b/authentication/tests/test_authentication.py
index c0da49d..25e8481 100644
--- a/authentication/tests/test_authentication.py
+++ b/authentication/tests/test_authentication.py
@@ -579,3 +579,123 @@ class CreateTodoTests(AuthenticationTests):
"""
response = self.app.post('/todos', content_type='text/html')
self.assertEqual(response.status_code, codes.UNSUPPORTED_MEDIA_TYPE)
+
+
+class ReadTodoTests(AuthenticationTests):
+ """
+ Tests for getting a todo item at ``GET /todos/{id}.``.
+ """
+
+ @responses.activate
+ def test_success(self):
+ """
+ A ``GET`` request for an existing todo an OK status code and the todo's
+ details.
+ """
+ create = self.app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(NOT_COMPLETED_TODO_DATA),
+ )
+
+ create_data = json.loads(create.data.decode('utf8'))
+ item_id = create_data['id']
+
+ read = self.app.get(
+ '/todos/{id}'.format(id=item_id),
+ content_type='application/json',
+ data=json.dumps({}),
+ )
+
+ self.assertEqual(read.status_code, codes.OK)
+ expected = NOT_COMPLETED_TODO_DATA.copy()
+ expected['completion_timestamp'] = None
+ self.assertEqual(json.loads(read.data.decode('utf8')), expected)
+
+ @responses.activate
+ @freeze_time(datetime.datetime.fromtimestamp(5, tz=pytz.utc))
+ def test_completed(self):
+ """
+ A ``GET`` request for an existing todo an OK status code and the todo's
+ details, included the completion timestamp.
+ """
+ create = self.app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(COMPLETED_TODO_DATA),
+ )
+
+ create_data = json.loads(create.data.decode('utf8'))
+ item_id = create_data['id']
+
+ read = self.app.get(
+ '/todos/{id}'.format(id=item_id),
+ content_type='application/json',
+ data=json.dumps({}),
+ )
+
+ self.assertEqual(read.status_code, codes.OK)
+ expected = COMPLETED_TODO_DATA.copy()
+ expected['completion_timestamp'] = 5
+ self.assertEqual(json.loads(read.data.decode('utf8')), expected)
+
+ @responses.activate
+ def test_multiple_todos(self):
+ """
+ A ``GET`` request gets the correct todo when there are multiple.
+ """
+ self.app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(COMPLETED_TODO_DATA),
+ )
+
+ create = self.app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(NOT_COMPLETED_TODO_DATA),
+ )
+
+ self.app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(COMPLETED_TODO_DATA),
+ )
+
+ create_data = json.loads(create.data.decode('utf8'))
+ item_id = create_data['id']
+
+ read = self.app.get(
+ '/todos/{id}'.format(id=item_id),
+ content_type='application/json',
+ data=json.dumps({}),
+ )
+
+ self.assertEqual(read.status_code, codes.OK)
+ expected = NOT_COMPLETED_TODO_DATA.copy()
+ expected['completion_timestamp'] = None
+ self.assertEqual(json.loads(read.data.decode('utf8')), expected)
+
+ @responses.activate
+ def test_non_existant(self):
+ """
+ A ``GET`` request for a todo which does not exist returns a NOT_FOUND
+ status code and error details.
+ """
+ response = self.app.get('/todos/1', content_type='application/json')
+
+ self.assertEqual(response.headers['Content-Type'], 'application/json')
+ self.assertEqual(response.status_code, codes.NOT_FOUND)
+ expected = {
+ 'title': 'The requested todo does not exist.',
+ 'detail': 'No todo exists with the id "1"',
+ }
+ self.assertEqual(json.loads(response.data.decode('utf8')), expected)
+
+ def test_incorrect_content_type(self):
+ """
+ If a Content-Type header other than 'application/json' is given, an
+ UNSUPPORTED_MEDIA_TYPE status code is given.
+ """
+ response = self.app.get('/todos/1', content_type='text/html')
+ self.assertEqual(response.status_code, codes.UNSUPPORTED_MEDIA_TYPE)
diff --git a/storage/tests/test_storage.py b/storage/tests/test_storage.py
index 23feaba..e77676d 100644
--- a/storage/tests/test_storage.py
+++ b/storage/tests/test_storage.py
@@ -114,8 +114,8 @@ class GetUserTests(InMemoryStorageTests):
def test_success(self):
"""
- A ``GET`` request for an existing user an OK status code and the user's
- details.
+ A ``GET`` request for an existing user returns an OK status code and
+ the user's details.
"""
self.storage_app.post(
'/users',
@@ -304,3 +304,85 @@ class CreateTodoTests(InMemoryStorageTests):
"""
response = self.storage_app.post('/todos', content_type='text/html')
self.assertEqual(response.status_code, codes.UNSUPPORTED_MEDIA_TYPE)
+
+
+class GetTodoTests(InMemoryStorageTests):
+ """
+ Tests for getting a todo item at ``GET /todos/{id}.``.
+ """
+
+ def test_success(self):
+ """
+ A ``GET`` request for an existing todo an OK status code and the todo's
+ details.
+ """
+ create = self.storage_app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(TODO_DATA),
+ )
+
+ create_data = json.loads(create.data.decode('utf8'))
+ item_id = create_data['id']
+
+ read = self.storage_app.get(
+ '/todos/{id}'.format(id=item_id),
+ content_type='application/json',
+ data=json.dumps({}),
+ )
+
+ self.assertEqual(read.status_code, codes.OK)
+ self.assertEqual(json.loads(read.data.decode('utf8')), TODO_DATA)
+
+ def test_timestamp_null(self):
+ """
+ If the timestamp is not given, the response includes a null timestamp.
+ """
+ data = TODO_DATA.copy()
+ del data['completion_timestamp']
+
+ create = self.storage_app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(data),
+ )
+
+ create_data = json.loads(create.data.decode('utf8'))
+ item_id = create_data['id']
+
+ read = self.storage_app.get(
+ '/todos/{id}'.format(id=item_id),
+ content_type='application/json',
+ data=json.dumps({}),
+ )
+
+ self.assertEqual(read.status_code, codes.OK)
+ expected = TODO_DATA.copy()
+ expected['completion_timestamp'] = None
+ self.assertEqual(json.loads(read.data.decode('utf8')), expected)
+
+ def test_non_existant(self):
+ """
+ A ``GET`` request for a todo which does not exist returns a NOT_FOUND
+ status code and error details.
+ """
+ response = self.storage_app.get(
+ '/todos/1',
+ content_type='application/json',
+ )
+
+ self.assertEqual(response.headers['Content-Type'], 'application/json')
+ self.assertEqual(response.status_code, codes.NOT_FOUND)
+ expected = {
+ 'title': 'The requested todo does not exist.',
+ 'detail': 'No todo exists with the id "1"',
+ }
+ self.assertEqual(json.loads(response.data.decode('utf8')), expected)
+
+ def test_incorrect_content_type(self):
+ """
+ If a Content-Type header other than 'application/json' is given, an
+ UNSUPPORTED_MEDIA_TYPE status code is given.
+ """
+ response = self.storage_app.get('/todos/1', content_type='text/html')
+ self.assertEqual(response.status_code, codes.UNSUPPORTED_MEDIA_TYPE)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": null,
"python": "3.5",
"reqs_path": [
"requirements.txt",
"dev-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
bcrypt==4.0.1
certifi==2021.5.30
coverage==6.2
coveralls==3.3.1
dataclasses==0.8
doc8==0.11.2
docopt==0.6.2
docutils==0.17.1
execnet==1.9.0
flake8==5.0.4
Flask==0.10.1
Flask-Bcrypt==0.7.1
Flask-JsonSchema==0.1.1
Flask-Login==0.3.2
Flask-Negotiate==0.1.0
Flask-SQLAlchemy==2.1
freezegun==1.2.2
greenlet==2.0.2
imagesize==1.4.1
importlib-metadata==4.2.0
iniconfig==1.1.1
itsdangerous==2.0.1
Jinja2==3.0.3
jsonschema==3.2.0
MarkupSafe==2.0.1
mccabe==0.7.0
packaging==21.3
pbr==6.1.1
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2016.4
-e git+https://github.com/adamtheturtle/todo.git@7666d2181cdea24c963f2d99f918fd368fefafef#egg=Qlutter_TODOer
requests==2.10.0
responses==0.17.0
restructuredtext-lint==1.4.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==4.3.2
sphinx-rtd-theme==1.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-httpdomain==1.8.1
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
SQLAlchemy==1.4.54
stevedore==3.5.2
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
Werkzeug==2.0.3
zipp==3.6.0
| name: todo
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- bcrypt==4.0.1
- coverage==6.2
- coveralls==3.3.1
- dataclasses==0.8
- doc8==0.11.2
- docopt==0.6.2
- docutils==0.17.1
- execnet==1.9.0
- flake8==5.0.4
- flask==0.10.1
- flask-bcrypt==0.7.1
- flask-jsonschema==0.1.1
- flask-login==0.3.2
- flask-negotiate==0.1.0
- flask-sqlalchemy==2.1
- freezegun==1.2.2
- greenlet==2.0.2
- imagesize==1.4.1
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- itsdangerous==2.0.1
- jinja2==3.0.3
- jsonschema==3.2.0
- markupsafe==2.0.1
- mccabe==0.7.0
- packaging==21.3
- pbr==6.1.1
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2016.4
- requests==2.10.0
- responses==0.17.0
- restructuredtext-lint==1.4.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==4.3.2
- sphinx-rtd-theme==1.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-httpdomain==1.8.1
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sqlalchemy==1.4.54
- stevedore==3.5.2
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- werkzeug==2.0.3
- zipp==3.6.0
prefix: /opt/conda/envs/todo
| [
"authentication/tests/test_authentication.py::ReadTodoTests::test_incorrect_content_type",
"storage/tests/test_storage.py::GetTodoTests::test_incorrect_content_type"
]
| [
"authentication/tests/test_authentication.py::SignupTests::test_existing_user",
"authentication/tests/test_authentication.py::SignupTests::test_missing_email",
"authentication/tests/test_authentication.py::SignupTests::test_missing_password",
"authentication/tests/test_authentication.py::SignupTests::test_passwords_hashed",
"authentication/tests/test_authentication.py::SignupTests::test_signup",
"authentication/tests/test_authentication.py::LoginTests::test_login",
"authentication/tests/test_authentication.py::LoginTests::test_missing_email",
"authentication/tests/test_authentication.py::LoginTests::test_missing_password",
"authentication/tests/test_authentication.py::LoginTests::test_non_existant_user",
"authentication/tests/test_authentication.py::LoginTests::test_remember_me_cookie_set",
"authentication/tests/test_authentication.py::LoginTests::test_wrong_password",
"authentication/tests/test_authentication.py::LogoutTests::test_logout",
"authentication/tests/test_authentication.py::LoadUserTests::test_user_does_not_exist",
"authentication/tests/test_authentication.py::LoadUserTests::test_user_exists",
"authentication/tests/test_authentication.py::LoadUserFromTokenTests::test_fake_token",
"authentication/tests/test_authentication.py::LoadUserFromTokenTests::test_load_user_from_token",
"authentication/tests/test_authentication.py::CreateTodoTests::test_current_completion_time",
"authentication/tests/test_authentication.py::CreateTodoTests::test_missing_completed_flag",
"authentication/tests/test_authentication.py::CreateTodoTests::test_missing_text",
"authentication/tests/test_authentication.py::CreateTodoTests::test_success_response",
"authentication/tests/test_authentication.py::ReadTodoTests::test_completed",
"authentication/tests/test_authentication.py::ReadTodoTests::test_multiple_todos",
"authentication/tests/test_authentication.py::ReadTodoTests::test_non_existant",
"authentication/tests/test_authentication.py::ReadTodoTests::test_success",
"storage/tests/test_storage.py::CreateUserTests::test_existing_user",
"storage/tests/test_storage.py::CreateUserTests::test_missing_email",
"storage/tests/test_storage.py::CreateUserTests::test_missing_password_hash",
"storage/tests/test_storage.py::CreateUserTests::test_success_response",
"storage/tests/test_storage.py::GetUserTests::test_non_existant_user",
"storage/tests/test_storage.py::GetUserTests::test_success",
"storage/tests/test_storage.py::GetUsersTests::test_with_users",
"storage/tests/test_storage.py::CreateTodoTests::test_missing_completed_flag",
"storage/tests/test_storage.py::CreateTodoTests::test_missing_completion_time",
"storage/tests/test_storage.py::CreateTodoTests::test_missing_text",
"storage/tests/test_storage.py::CreateTodoTests::test_success_response",
"storage/tests/test_storage.py::GetTodoTests::test_non_existant",
"storage/tests/test_storage.py::GetTodoTests::test_success",
"storage/tests/test_storage.py::GetTodoTests::test_timestamp_null"
]
| [
"authentication/tests/test_authentication.py::SignupTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::LoginTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::LogoutTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::LogoutTests::test_logout_twice",
"authentication/tests/test_authentication.py::LogoutTests::test_not_logged_in",
"authentication/tests/test_authentication.py::UserTests::test_different_password_different_token",
"authentication/tests/test_authentication.py::UserTests::test_get_auth_token",
"authentication/tests/test_authentication.py::UserTests::test_get_id",
"authentication/tests/test_authentication.py::CreateTodoTests::test_incorrect_content_type",
"storage/tests/test_storage.py::CreateUserTests::test_incorrect_content_type",
"storage/tests/test_storage.py::GetUserTests::test_incorrect_content_type",
"storage/tests/test_storage.py::GetUsersTests::test_incorrect_content_type",
"storage/tests/test_storage.py::GetUsersTests::test_no_users",
"storage/tests/test_storage.py::CreateTodoTests::test_incorrect_content_type"
]
| []
| null | 536 | [
"storage/storage.py",
"authentication/authentication.py"
]
| [
"storage/storage.py",
"authentication/authentication.py"
]
|
|
adamtheturtle__todo-35 | 418d1cc9a4fea4d7332715aa8b57eb13b65130c6 | 2016-05-15 19:03:18 | 418d1cc9a4fea4d7332715aa8b57eb13b65130c6 | diff --git a/README.md b/README.md
index 6906cef..71463e4 100644
--- a/README.md
+++ b/README.md
@@ -63,6 +63,7 @@ To start developing quickly, it is recommended that you create a `virtualenv` wi
Tests are run on [Travis-CI](https://travis-ci.org/adamtheturtle/todo).
+See `.travis.yml` for details of exactly what tests are run.
### Documentation
diff --git a/authentication/authentication.py b/authentication/authentication.py
index f9b7cee..4a66953 100644
--- a/authentication/authentication.py
+++ b/authentication/authentication.py
@@ -293,7 +293,7 @@ def create_todo():
@consumes('application/json')
def read_todo(id):
"""
- Get information about particular todo item.
+ Get information about a particular todo item.
:reqheader Content-Type: application/json
:resheader Content-Type: application/json
@@ -308,6 +308,23 @@ def read_todo(id):
response = requests.get(url, headers={'Content-Type': 'application/json'})
return jsonify(response.json()), response.status_code
+
[email protected]('/todos/<id>', methods=['DELETE'])
+@consumes('application/json')
+def delete_todo(id):
+ """
+ Delete a particular todo item.
+
+ :reqheader Content-Type: application/json
+ :resheader Content-Type: application/json
+ :status 200: The requested item's information is returned.
+ :status 404: There is no item with the given ``id``.
+ """
+ url = urljoin(STORAGE_URL, 'todos/{id}').format(id=id)
+ headers = {'Content-Type': 'application/json'}
+ response = requests.delete(url, headers=headers)
+ return jsonify(response.json()), response.status_code
+
if __name__ == '__main__': # pragma: no cover
# Specifying 0.0.0.0 as the host tells the operating system to listen on
# all public IPs. This makes the server visible externally.
diff --git a/storage/storage.py b/storage/storage.py
index 29189c9..c5b3224 100644
--- a/storage/storage.py
+++ b/storage/storage.py
@@ -242,11 +242,37 @@ def specific_todo_get(id):
), codes.NOT_FOUND
return jsonify(
+ # TODO needs ID
content=todo.content,
completed=todo.completed,
completion_timestamp=todo.completion_timestamp,
), codes.OK
+
[email protected]('/todos/<id>', methods=['DELETE'])
+@consumes('application/json')
+def delete_todo(id):
+ """
+ Delete a particular todo item.
+
+ :reqheader Content-Type: application/json
+ :resheader Content-Type: application/json
+ :status 200: The requested item's information is returned.
+ :status 404: There is no item with the given ``id``.
+ """
+ todo = Todo.query.filter_by(id=id).first()
+
+ if todo is None:
+ return jsonify(
+ title='The requested todo does not exist.',
+ detail='No todo exists with the id "{id}"'.format(id=id),
+ ), codes.NOT_FOUND
+
+ db.session.delete(todo)
+ db.session.commit()
+
+ return jsonify(), codes.OK
+
if __name__ == '__main__': # pragma: no cover
# Specifying 0.0.0.0 as the host tells the operating system to listen on
# all public IPs. This makes the server visible externally.
| Add ability to delete a TODO item | adamtheturtle/todo | diff --git a/authentication/tests/test_authentication.py b/authentication/tests/test_authentication.py
index 25e8481..85d152c 100644
--- a/authentication/tests/test_authentication.py
+++ b/authentication/tests/test_authentication.py
@@ -699,3 +699,80 @@ class ReadTodoTests(AuthenticationTests):
"""
response = self.app.get('/todos/1', content_type='text/html')
self.assertEqual(response.status_code, codes.UNSUPPORTED_MEDIA_TYPE)
+
+
+class DeleteTodoTests(AuthenticationTests):
+ """
+ Tests for deleting a todo item at ``DELETE /todos/{id}.``.
+ """
+
+ @responses.activate
+ def test_success(self):
+ """
+ It is possible to delete a todo item.
+ """
+ create = self.app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(COMPLETED_TODO_DATA),
+ )
+
+ create_data = json.loads(create.data.decode('utf8'))
+ item_id = create_data['id']
+
+ delete = self.app.delete(
+ '/todos/{id}'.format(id=item_id),
+ content_type='application/json',
+ data=json.dumps({}),
+ )
+
+ self.assertEqual(delete.status_code, codes.OK)
+
+ read = self.app.get(
+ '/todos/{id}'.format(id=item_id),
+ content_type='application/json',
+ data=json.dumps({}),
+ )
+
+ self.assertEqual(read.status_code, codes.NOT_FOUND)
+
+ @responses.activate
+ def test_delete_twice(self):
+ """
+ Deleting an item twice gives returns a 404 code and error message.
+ """
+ create = self.app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(COMPLETED_TODO_DATA),
+ )
+
+ create_data = json.loads(create.data.decode('utf8'))
+ item_id = create_data['id']
+
+ self.app.delete(
+ '/todos/{id}'.format(id=item_id),
+ content_type='application/json',
+ data=json.dumps({}),
+ )
+
+ delete = self.app.delete(
+ '/todos/{id}'.format(id=item_id),
+ content_type='application/json',
+ data=json.dumps({}),
+ )
+
+ self.assertEqual(delete.status_code, codes.NOT_FOUND)
+ expected = {
+ 'title': 'The requested todo does not exist.',
+ 'detail': 'No todo exists with the id "1"',
+ }
+ self.assertEqual(json.loads(delete.data.decode('utf8')), expected)
+
+ def test_incorrect_content_type(self):
+ """
+ If a Content-Type header other than 'application/json' is given, an
+ UNSUPPORTED_MEDIA_TYPE status code is given.
+ """
+ response = self.app.delete('/todos/1', content_type='text/html')
+ self.assertEqual(response.status_code, codes.UNSUPPORTED_MEDIA_TYPE)
diff --git a/storage/tests/test_storage.py b/storage/tests/test_storage.py
index e77676d..401fd2a 100644
--- a/storage/tests/test_storage.py
+++ b/storage/tests/test_storage.py
@@ -386,3 +386,81 @@ class GetTodoTests(InMemoryStorageTests):
"""
response = self.storage_app.get('/todos/1', content_type='text/html')
self.assertEqual(response.status_code, codes.UNSUPPORTED_MEDIA_TYPE)
+
+
+class DeleteTodoTests(InMemoryStorageTests):
+ """
+ Tests for deleting a todo item at ``DELETE /todos/{id}.``.
+ """
+
+ def test_success(self):
+ """
+ It is possible to delete a todo item.
+ """
+ create = self.storage_app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(TODO_DATA),
+ )
+
+ create_data = json.loads(create.data.decode('utf8'))
+ item_id = create_data['id']
+
+ delete = self.storage_app.delete(
+ '/todos/{id}'.format(id=item_id),
+ content_type='application/json',
+ data=json.dumps({}),
+ )
+
+ self.assertEqual(delete.status_code, codes.OK)
+
+ read = self.storage_app.get(
+ '/todos/{id}'.format(id=item_id),
+ content_type='application/json',
+ data=json.dumps({}),
+ )
+
+ self.assertEqual(read.status_code, codes.NOT_FOUND)
+
+ def test_delete_twice(self):
+ """
+ Deleting an item twice gives returns a 404 code and error message.
+ """
+ create = self.storage_app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(TODO_DATA),
+ )
+
+ create_data = json.loads(create.data.decode('utf8'))
+ item_id = create_data['id']
+
+ self.storage_app.delete(
+ '/todos/{id}'.format(id=item_id),
+ content_type='application/json',
+ data=json.dumps({}),
+ )
+
+ delete = self.storage_app.delete(
+ '/todos/{id}'.format(id=item_id),
+ content_type='application/json',
+ data=json.dumps({}),
+ )
+
+ self.assertEqual(delete.status_code, codes.NOT_FOUND)
+ expected = {
+ 'title': 'The requested todo does not exist.',
+ 'detail': 'No todo exists with the id "1"',
+ }
+ self.assertEqual(json.loads(delete.data.decode('utf8')), expected)
+
+ def test_incorrect_content_type(self):
+ """
+ If a Content-Type header other than 'application/json' is given, an
+ UNSUPPORTED_MEDIA_TYPE status code is given.
+ """
+ response = self.storage_app.delete(
+ '/todos/1',
+ content_type='text/html',
+ )
+ self.assertEqual(response.status_code, codes.UNSUPPORTED_MEDIA_TYPE)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": null,
"python": "3.5",
"reqs_path": [
"requirements.txt",
"dev-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
bcrypt==4.0.1
certifi==2021.5.30
coverage==6.2
coveralls==3.3.1
dataclasses==0.8
doc8==0.11.2
docopt==0.6.2
docutils==0.18.1
execnet==1.9.0
flake8==3.9.2
Flask==0.10.1
Flask-Bcrypt==0.7.1
Flask-JsonSchema==0.1.1
Flask-Login==0.3.2
Flask-Negotiate==0.1.0
Flask-SQLAlchemy==2.1
freezegun==1.2.2
greenlet==2.0.2
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
itsdangerous==2.0.1
Jinja2==3.0.3
jsonschema==3.2.0
MarkupSafe==2.0.1
mccabe==0.6.1
packaging==21.3
pbr==6.1.1
pluggy==1.0.0
py==1.11.0
pycodestyle==2.7.0
pyflakes==2.3.1
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2016.4
-e git+https://github.com/adamtheturtle/todo.git@418d1cc9a4fea4d7332715aa8b57eb13b65130c6#egg=Qlutter_TODOer
requests==2.10.0
responses==0.17.0
restructuredtext-lint==1.4.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-httpdomain==1.8.1
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
SQLAlchemy==1.4.54
stevedore==3.5.2
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
Werkzeug==2.0.3
zipp==3.6.0
| name: todo
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- bcrypt==4.0.1
- coverage==6.2
- coveralls==3.3.1
- dataclasses==0.8
- doc8==0.11.2
- docopt==0.6.2
- docutils==0.18.1
- execnet==1.9.0
- flake8==3.9.2
- flask==0.10.1
- flask-bcrypt==0.7.1
- flask-jsonschema==0.1.1
- flask-login==0.3.2
- flask-negotiate==0.1.0
- flask-sqlalchemy==2.1
- freezegun==1.2.2
- greenlet==2.0.2
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- itsdangerous==2.0.1
- jinja2==3.0.3
- jsonschema==3.2.0
- markupsafe==2.0.1
- mccabe==0.6.1
- packaging==21.3
- pbr==6.1.1
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.7.0
- pyflakes==2.3.1
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2016.4
- requests==2.10.0
- responses==0.17.0
- restructuredtext-lint==1.4.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-httpdomain==1.8.1
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sqlalchemy==1.4.54
- stevedore==3.5.2
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- werkzeug==2.0.3
- zipp==3.6.0
prefix: /opt/conda/envs/todo
| [
"authentication/tests/test_authentication.py::DeleteTodoTests::test_incorrect_content_type",
"storage/tests/test_storage.py::DeleteTodoTests::test_incorrect_content_type"
]
| [
"authentication/tests/test_authentication.py::SignupTests::test_existing_user",
"authentication/tests/test_authentication.py::SignupTests::test_missing_email",
"authentication/tests/test_authentication.py::SignupTests::test_missing_password",
"authentication/tests/test_authentication.py::SignupTests::test_passwords_hashed",
"authentication/tests/test_authentication.py::SignupTests::test_signup",
"authentication/tests/test_authentication.py::LoginTests::test_login",
"authentication/tests/test_authentication.py::LoginTests::test_missing_email",
"authentication/tests/test_authentication.py::LoginTests::test_missing_password",
"authentication/tests/test_authentication.py::LoginTests::test_non_existant_user",
"authentication/tests/test_authentication.py::LoginTests::test_remember_me_cookie_set",
"authentication/tests/test_authentication.py::LoginTests::test_wrong_password",
"authentication/tests/test_authentication.py::LogoutTests::test_logout",
"authentication/tests/test_authentication.py::LoadUserTests::test_user_does_not_exist",
"authentication/tests/test_authentication.py::LoadUserTests::test_user_exists",
"authentication/tests/test_authentication.py::LoadUserFromTokenTests::test_fake_token",
"authentication/tests/test_authentication.py::LoadUserFromTokenTests::test_load_user_from_token",
"authentication/tests/test_authentication.py::CreateTodoTests::test_current_completion_time",
"authentication/tests/test_authentication.py::CreateTodoTests::test_missing_completed_flag",
"authentication/tests/test_authentication.py::CreateTodoTests::test_missing_text",
"authentication/tests/test_authentication.py::CreateTodoTests::test_success_response",
"authentication/tests/test_authentication.py::ReadTodoTests::test_completed",
"authentication/tests/test_authentication.py::ReadTodoTests::test_multiple_todos",
"authentication/tests/test_authentication.py::ReadTodoTests::test_non_existant",
"authentication/tests/test_authentication.py::ReadTodoTests::test_success",
"authentication/tests/test_authentication.py::DeleteTodoTests::test_delete_twice",
"authentication/tests/test_authentication.py::DeleteTodoTests::test_success",
"storage/tests/test_storage.py::CreateUserTests::test_existing_user",
"storage/tests/test_storage.py::CreateUserTests::test_missing_email",
"storage/tests/test_storage.py::CreateUserTests::test_missing_password_hash",
"storage/tests/test_storage.py::CreateUserTests::test_success_response",
"storage/tests/test_storage.py::GetUserTests::test_non_existant_user",
"storage/tests/test_storage.py::GetUserTests::test_success",
"storage/tests/test_storage.py::GetUsersTests::test_with_users",
"storage/tests/test_storage.py::CreateTodoTests::test_missing_completed_flag",
"storage/tests/test_storage.py::CreateTodoTests::test_missing_completion_time",
"storage/tests/test_storage.py::CreateTodoTests::test_missing_text",
"storage/tests/test_storage.py::CreateTodoTests::test_success_response",
"storage/tests/test_storage.py::GetTodoTests::test_non_existant",
"storage/tests/test_storage.py::GetTodoTests::test_success",
"storage/tests/test_storage.py::GetTodoTests::test_timestamp_null",
"storage/tests/test_storage.py::DeleteTodoTests::test_delete_twice",
"storage/tests/test_storage.py::DeleteTodoTests::test_success"
]
| [
"authentication/tests/test_authentication.py::SignupTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::LoginTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::LogoutTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::LogoutTests::test_logout_twice",
"authentication/tests/test_authentication.py::LogoutTests::test_not_logged_in",
"authentication/tests/test_authentication.py::UserTests::test_different_password_different_token",
"authentication/tests/test_authentication.py::UserTests::test_get_auth_token",
"authentication/tests/test_authentication.py::UserTests::test_get_id",
"authentication/tests/test_authentication.py::CreateTodoTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::ReadTodoTests::test_incorrect_content_type",
"storage/tests/test_storage.py::CreateUserTests::test_incorrect_content_type",
"storage/tests/test_storage.py::GetUserTests::test_incorrect_content_type",
"storage/tests/test_storage.py::GetUsersTests::test_incorrect_content_type",
"storage/tests/test_storage.py::GetUsersTests::test_no_users",
"storage/tests/test_storage.py::CreateTodoTests::test_incorrect_content_type",
"storage/tests/test_storage.py::GetTodoTests::test_incorrect_content_type"
]
| []
| null | 537 | [
"storage/storage.py",
"README.md",
"authentication/authentication.py"
]
| [
"storage/storage.py",
"README.md",
"authentication/authentication.py"
]
|
|
adamtheturtle__todo-39 | e62c632ff6b104c40fd0f4580f4e2c8a2f084026 | 2016-05-15 22:33:10 | e62c632ff6b104c40fd0f4580f4e2c8a2f084026 | diff --git a/authentication/authentication.py b/authentication/authentication.py
index bccc34e..ff1d8a1 100644
--- a/authentication/authentication.py
+++ b/authentication/authentication.py
@@ -154,10 +154,9 @@ def login():
"""
Log in a given user.
- :param email: An email address to log in as.
- :type email: string
- :param password: A password associated with the given ``email`` address.
- :type password: string
+ :reqjson string email: An email address to log in as.
+ :reqjson string password: A password associated with the given ``email``
+ address.
:reqheader Content-Type: application/json
:resheader Content-Type: application/json
:resheader Set-Cookie: A ``remember_token``.
@@ -212,10 +211,9 @@ def signup():
"""
Sign up a new user.
- :param email: The email address of the new user.
- :type email: string
- :param password: A password to associate with the given ``email`` address.
- :type password: string
+ :reqjson string email: The email address of the new user.
+ :reqjson string password: A password to associate with the given ``email``
+ address.
:reqheader Content-Type: application/json
:resheader Content-Type: application/json
:resjson string email: The email address of the new user.
@@ -256,13 +254,10 @@ def create_todo():
"""
Create a new todo item.
- :param content: The content of the new item.
- :type content: string
- :param completed: Whether the item is completed.
- :type completed: boolean
-
:reqheader Content-Type: application/json
:resheader Content-Type: application/json
+ :reqjson string content: The content of the new item.
+ :reqjson boolean completed: Whether the item is completed.
:resjson string id: The id of the todo item.
:resjson string content: The content of the new item.
:resjson boolean completed: Whether the item is completed.
@@ -298,9 +293,9 @@ def read_todo(id):
:reqheader Content-Type: application/json
:resheader Content-Type: application/json
- :resjson string id: The id of the todo item.
+ :queryparameter number id: The id of the todo item.
:resjson boolean completed: Whether the item is completed.
- :resjson number completion_time: The completion UNIX timestamp, or
+ :resjson number completion_timestamp: The completion UNIX timestamp, or
``null`` if there is none.
:status 200: The requested item's information is returned.
:status 404: There is no item with the given ``id``.
@@ -318,6 +313,7 @@ def delete_todo(id):
:reqheader Content-Type: application/json
:resheader Content-Type: application/json
+ :queryparameter number id: The id of the todo item.
:status 200: The requested item's information is returned.
:status 404: There is no item with the given ``id``.
"""
@@ -326,6 +322,26 @@ def delete_todo(id):
response = requests.delete(url, headers=headers)
return jsonify(response.json()), response.status_code
+
[email protected]('/todos', methods=['GET'])
+@consumes('application/json')
+def list_todos():
+ """
+ List todo items.
+
+ :reqheader Content-Type: application/json
+ :resheader Content-Type: application/json
+ :resjsonarr boolean completed: Whether the item is completed.
+ :resjsonarr number completion_timestamp: The completion UNIX timestamp, or
+ ``null`` if there is none.
+ :status 200: The requested item's information is returned.
+ :status 404: There is no item with the given ``id``.
+ """
+ url = urljoin(STORAGE_URL, 'todos')
+ headers = {'Content-Type': 'application/json'}
+ response = requests.get(url, headers=headers)
+ return jsonify(response.json()), response.status_code
+
if __name__ == '__main__': # pragma: no cover
# Specifying 0.0.0.0 as the host tells the operating system to listen on
# all public IPs. This makes the server visible externally.
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 526acaa..c83eeab 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -1,14 +1,5 @@
-Welcome to ``todoer``'s documentation!
-======================================
-
-Authentication Service API Endpoints
-------------------------------------
+``todoer`` API Documentation
+============================
.. autoflask:: authentication.authentication:app
:undoc-static:
-
-Storage Service API Endpoints
------------------------------
-
-.. autoflask:: storage.storage:app
- :undoc-static:
diff --git a/storage/storage.py b/storage/storage.py
index 19a4cc2..d79e4f7 100644
--- a/storage/storage.py
+++ b/storage/storage.py
@@ -33,6 +33,17 @@ class Todo(db.Model):
completed = db.Column(db.Boolean)
completion_timestamp = db.Column(db.Integer)
+ def as_dict(self):
+ """
+ Return a representation of a todo item suitable for JSON responses.
+ """
+ return dict(
+ id=self.id,
+ content=self.content,
+ completed=self.completed,
+ completion_timestamp=self.completion_timestamp,
+ )
+
def create_app(database_uri):
"""
@@ -211,12 +222,7 @@ def todos_post():
db.session.add(todo)
db.session.commit()
- return jsonify(
- id=todo.id,
- content=todo.content,
- completed=todo.completed,
- completion_timestamp=todo.completion_timestamp,
- ), codes.CREATED
+ return jsonify(todo.as_dict()), codes.CREATED
@app.route('/todos/<id>', methods=['GET'])
@@ -242,12 +248,7 @@ def specific_todo_get(id):
detail='No todo exists with the id "{id}"'.format(id=id),
), codes.NOT_FOUND
- return jsonify(
- id=todo.id,
- content=todo.content,
- completed=todo.completed,
- completion_timestamp=todo.completion_timestamp,
- ), codes.OK
+ return jsonify(todo.as_dict()), codes.OK
@app.route('/todos/<id>', methods=['DELETE'])
@@ -274,6 +275,24 @@ def delete_todo(id):
return jsonify(), codes.OK
+
[email protected]('/todos', methods=['GET'])
+@consumes('application/json')
+def list_todos():
+ """
+ List todo items.
+
+ :reqheader Content-Type: application/json
+ :resheader Content-Type: application/json
+ :resjsonarr boolean completed: Whether the item is completed.
+ :resjsonarr number completion_timestamp: The completion UNIX timestamp, or
+ ``null`` if there is none.
+ :status 200: The requested item's information is returned.
+ :status 404: There is no item with the given ``id``.
+ """
+ todos = [todo.as_dict() for todo in Todo.query.all()]
+ return jsonify(todos=todos), codes.OK
+
if __name__ == '__main__': # pragma: no cover
# Specifying 0.0.0.0 as the host tells the operating system to listen on
# all public IPs. This makes the server visible externally.
| Add ability to list all TODOs | adamtheturtle/todo | diff --git a/authentication/tests/test_authentication.py b/authentication/tests/test_authentication.py
index 124aa3b..f6bf0e5 100644
--- a/authentication/tests/test_authentication.py
+++ b/authentication/tests/test_authentication.py
@@ -583,7 +583,7 @@ class CreateTodoTests(AuthenticationTests):
class ReadTodoTests(AuthenticationTests):
"""
- Tests for getting a todo item at ``GET /todos/{id}.``.
+ Tests for getting a todo item at ``GET /todos/{id}``.
"""
@responses.activate
@@ -604,7 +604,6 @@ class ReadTodoTests(AuthenticationTests):
read = self.app.get(
'/todos/{id}'.format(id=item_id),
content_type='application/json',
- data=json.dumps({}),
)
self.assertEqual(read.status_code, codes.OK)
@@ -632,7 +631,6 @@ class ReadTodoTests(AuthenticationTests):
read = self.app.get(
'/todos/{id}'.format(id=item_id),
content_type='application/json',
- data=json.dumps({}),
)
self.assertEqual(read.status_code, codes.OK)
@@ -670,7 +668,6 @@ class ReadTodoTests(AuthenticationTests):
read = self.app.get(
'/todos/{id}'.format(id=item_id),
content_type='application/json',
- data=json.dumps({}),
)
self.assertEqual(read.status_code, codes.OK)
@@ -726,7 +723,6 @@ class DeleteTodoTests(AuthenticationTests):
delete = self.app.delete(
'/todos/{id}'.format(id=item_id),
content_type='application/json',
- data=json.dumps({}),
)
self.assertEqual(delete.status_code, codes.OK)
@@ -734,7 +730,6 @@ class DeleteTodoTests(AuthenticationTests):
read = self.app.get(
'/todos/{id}'.format(id=item_id),
content_type='application/json',
- data=json.dumps({}),
)
self.assertEqual(read.status_code, codes.NOT_FOUND)
@@ -756,13 +751,11 @@ class DeleteTodoTests(AuthenticationTests):
self.app.delete(
'/todos/{id}'.format(id=item_id),
content_type='application/json',
- data=json.dumps({}),
)
delete = self.app.delete(
'/todos/{id}'.format(id=item_id),
content_type='application/json',
- data=json.dumps({}),
)
self.assertEqual(delete.status_code, codes.NOT_FOUND)
@@ -779,3 +772,63 @@ class DeleteTodoTests(AuthenticationTests):
"""
response = self.app.delete('/todos/1', content_type='text/html')
self.assertEqual(response.status_code, codes.UNSUPPORTED_MEDIA_TYPE)
+
+
+class ListTodosTests(AuthenticationTests):
+ """
+ Tests for listing todo items at ``GET /todos``.
+ """
+
+ @responses.activate
+ def test_no_todos(self):
+ """
+ When there are no todos, an empty array is returned.
+ """
+ list_todos = self.app.get(
+ '/todos',
+ content_type='application/json',
+ )
+
+ list_todos_data = json.loads(list_todos.data.decode('utf8'))
+
+ self.assertEqual(list_todos.status_code, codes.OK)
+ self.assertEqual(list_todos_data['todos'], [])
+
+ @responses.activate
+ def test_list(self):
+ """
+ All todos are listed.
+ """
+ other_todo = NOT_COMPLETED_TODO_DATA.copy()
+ other_todo['content'] = 'Get a haircut'
+
+ todos = [NOT_COMPLETED_TODO_DATA, other_todo]
+ expected = []
+ for index, data in enumerate(todos):
+ create = self.app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(data),
+ )
+ create_data = json.loads(create.data.decode('utf8'))
+ expected_data = data.copy()
+ expected_data['id'] = create_data['id']
+ expected_data['completion_timestamp'] = None
+ expected.append(expected_data)
+
+ list_todos = self.app.get(
+ '/todos',
+ content_type='application/json',
+ )
+
+ self.assertEqual(list_todos.status_code, codes.OK)
+ list_todos_data = json.loads(list_todos.data.decode('utf8'))
+ self.assertEqual(list_todos_data['todos'], expected)
+
+ def test_incorrect_content_type(self):
+ """
+ If a Content-Type header other than 'application/json' is given, an
+ UNSUPPORTED_MEDIA_TYPE status code is given.
+ """
+ response = self.app.get('/todos', content_type='text/html')
+ self.assertEqual(response.status_code, codes.UNSUPPORTED_MEDIA_TYPE)
diff --git a/storage/tests/test_storage.py b/storage/tests/test_storage.py
index 98cfa5f..8bb35a4 100644
--- a/storage/tests/test_storage.py
+++ b/storage/tests/test_storage.py
@@ -328,7 +328,6 @@ class GetTodoTests(InMemoryStorageTests):
read = self.storage_app.get(
'/todos/{id}'.format(id=item_id),
content_type='application/json',
- data=json.dumps({}),
)
self.assertEqual(read.status_code, codes.OK)
@@ -355,7 +354,6 @@ class GetTodoTests(InMemoryStorageTests):
read = self.storage_app.get(
'/todos/{id}'.format(id=item_id),
content_type='application/json',
- data=json.dumps({}),
)
self.assertEqual(read.status_code, codes.OK)
@@ -412,7 +410,6 @@ class DeleteTodoTests(InMemoryStorageTests):
delete = self.storage_app.delete(
'/todos/{id}'.format(id=item_id),
content_type='application/json',
- data=json.dumps({}),
)
self.assertEqual(delete.status_code, codes.OK)
@@ -420,7 +417,6 @@ class DeleteTodoTests(InMemoryStorageTests):
read = self.storage_app.get(
'/todos/{id}'.format(id=item_id),
content_type='application/json',
- data=json.dumps({}),
)
self.assertEqual(read.status_code, codes.NOT_FOUND)
@@ -441,13 +437,11 @@ class DeleteTodoTests(InMemoryStorageTests):
self.storage_app.delete(
'/todos/{id}'.format(id=item_id),
content_type='application/json',
- data=json.dumps({}),
)
delete = self.storage_app.delete(
'/todos/{id}'.format(id=item_id),
content_type='application/json',
- data=json.dumps({}),
)
self.assertEqual(delete.status_code, codes.NOT_FOUND)
@@ -467,3 +461,60 @@ class DeleteTodoTests(InMemoryStorageTests):
content_type='text/html',
)
self.assertEqual(response.status_code, codes.UNSUPPORTED_MEDIA_TYPE)
+
+
+class ListTodosTests(InMemoryStorageTests):
+ """
+ Tests for listing todo items at ``GET /todos``.
+ """
+
+ def test_no_todos(self):
+ """
+ When there are no todos, an empty array is returned.
+ """
+ list_todos = self.storage_app.get(
+ '/todos',
+ content_type='application/json',
+ )
+
+ list_todos_data = json.loads(list_todos.data.decode('utf8'))
+
+ self.assertEqual(list_todos.status_code, codes.OK)
+ self.assertEqual(list_todos_data['todos'], [])
+
+ def test_list(self):
+ """
+ All todos are listed.
+ """
+ other_todo = TODO_DATA.copy()
+ other_todo['content'] = 'Get a haircut'
+
+ todos = [TODO_DATA, other_todo]
+ expected = []
+ for index, data in enumerate(todos):
+ create = self.storage_app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(data),
+ )
+ create_data = json.loads(create.data.decode('utf8'))
+ expected_data = data.copy()
+ expected_data['id'] = create_data['id']
+ expected.append(expected_data)
+
+ list_todos = self.storage_app.get(
+ '/todos',
+ content_type='application/json',
+ )
+
+ self.assertEqual(list_todos.status_code, codes.OK)
+ list_todos_data = json.loads(list_todos.data.decode('utf8'))
+ self.assertEqual(list_todos_data['todos'], expected)
+
+ def test_incorrect_content_type(self):
+ """
+ If a Content-Type header other than 'application/json' is given, an
+ UNSUPPORTED_MEDIA_TYPE status code is given.
+ """
+ response = self.storage_app.get('/todos', content_type='text/html')
+ self.assertEqual(response.status_code, codes.UNSUPPORTED_MEDIA_TYPE)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": null,
"python": "3.5",
"reqs_path": [
"requirements.txt",
"dev-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
bcrypt==4.0.1
certifi==2021.5.30
coverage==6.2
coveralls==3.3.1
dataclasses==0.8
doc8==0.11.2
docopt==0.6.2
docutils==0.18.1
execnet==1.9.0
flake8==3.9.2
Flask==0.10.1
Flask-Bcrypt==0.7.1
Flask-JsonSchema==0.1.1
Flask-Login==0.3.2
Flask-Negotiate==0.1.0
Flask-SQLAlchemy==2.1
freezegun==1.2.2
greenlet==2.0.2
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
itsdangerous==2.0.1
Jinja2==3.0.3
jsonschema==3.2.0
MarkupSafe==2.0.1
mccabe==0.6.1
packaging==21.3
pbr==6.1.1
pluggy==1.0.0
py==1.11.0
pycodestyle==2.7.0
pyflakes==2.3.1
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2016.4
-e git+https://github.com/adamtheturtle/todo.git@e62c632ff6b104c40fd0f4580f4e2c8a2f084026#egg=Qlutter_TODOer
requests==2.10.0
responses==0.17.0
restructuredtext-lint==1.4.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-httpdomain==1.8.1
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
SQLAlchemy==1.4.54
stevedore==3.5.2
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
Werkzeug==2.0.3
zipp==3.6.0
| name: todo
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- bcrypt==4.0.1
- coverage==6.2
- coveralls==3.3.1
- dataclasses==0.8
- doc8==0.11.2
- docopt==0.6.2
- docutils==0.18.1
- execnet==1.9.0
- flake8==3.9.2
- flask==0.10.1
- flask-bcrypt==0.7.1
- flask-jsonschema==0.1.1
- flask-login==0.3.2
- flask-negotiate==0.1.0
- flask-sqlalchemy==2.1
- freezegun==1.2.2
- greenlet==2.0.2
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- itsdangerous==2.0.1
- jinja2==3.0.3
- jsonschema==3.2.0
- markupsafe==2.0.1
- mccabe==0.6.1
- packaging==21.3
- pbr==6.1.1
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.7.0
- pyflakes==2.3.1
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2016.4
- requests==2.10.0
- responses==0.17.0
- restructuredtext-lint==1.4.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-httpdomain==1.8.1
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sqlalchemy==1.4.54
- stevedore==3.5.2
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- werkzeug==2.0.3
- zipp==3.6.0
prefix: /opt/conda/envs/todo
| [
"authentication/tests/test_authentication.py::ListTodosTests::test_incorrect_content_type",
"storage/tests/test_storage.py::ListTodosTests::test_incorrect_content_type"
]
| [
"authentication/tests/test_authentication.py::SignupTests::test_existing_user",
"authentication/tests/test_authentication.py::SignupTests::test_missing_email",
"authentication/tests/test_authentication.py::SignupTests::test_missing_password",
"authentication/tests/test_authentication.py::SignupTests::test_passwords_hashed",
"authentication/tests/test_authentication.py::SignupTests::test_signup",
"authentication/tests/test_authentication.py::LoginTests::test_login",
"authentication/tests/test_authentication.py::LoginTests::test_missing_email",
"authentication/tests/test_authentication.py::LoginTests::test_missing_password",
"authentication/tests/test_authentication.py::LoginTests::test_non_existant_user",
"authentication/tests/test_authentication.py::LoginTests::test_remember_me_cookie_set",
"authentication/tests/test_authentication.py::LoginTests::test_wrong_password",
"authentication/tests/test_authentication.py::LogoutTests::test_logout",
"authentication/tests/test_authentication.py::LoadUserTests::test_user_does_not_exist",
"authentication/tests/test_authentication.py::LoadUserTests::test_user_exists",
"authentication/tests/test_authentication.py::LoadUserFromTokenTests::test_fake_token",
"authentication/tests/test_authentication.py::LoadUserFromTokenTests::test_load_user_from_token",
"authentication/tests/test_authentication.py::CreateTodoTests::test_current_completion_time",
"authentication/tests/test_authentication.py::CreateTodoTests::test_missing_completed_flag",
"authentication/tests/test_authentication.py::CreateTodoTests::test_missing_text",
"authentication/tests/test_authentication.py::CreateTodoTests::test_success_response",
"authentication/tests/test_authentication.py::ReadTodoTests::test_completed",
"authentication/tests/test_authentication.py::ReadTodoTests::test_multiple_todos",
"authentication/tests/test_authentication.py::ReadTodoTests::test_non_existant",
"authentication/tests/test_authentication.py::ReadTodoTests::test_success",
"authentication/tests/test_authentication.py::DeleteTodoTests::test_delete_twice",
"authentication/tests/test_authentication.py::DeleteTodoTests::test_success",
"authentication/tests/test_authentication.py::ListTodosTests::test_list",
"authentication/tests/test_authentication.py::ListTodosTests::test_no_todos",
"storage/tests/test_storage.py::CreateUserTests::test_existing_user",
"storage/tests/test_storage.py::CreateUserTests::test_missing_email",
"storage/tests/test_storage.py::CreateUserTests::test_missing_password_hash",
"storage/tests/test_storage.py::CreateUserTests::test_success_response",
"storage/tests/test_storage.py::GetUserTests::test_non_existant_user",
"storage/tests/test_storage.py::GetUserTests::test_success",
"storage/tests/test_storage.py::GetUsersTests::test_with_users",
"storage/tests/test_storage.py::CreateTodoTests::test_missing_completed_flag",
"storage/tests/test_storage.py::CreateTodoTests::test_missing_completion_time",
"storage/tests/test_storage.py::CreateTodoTests::test_missing_text",
"storage/tests/test_storage.py::CreateTodoTests::test_success_response",
"storage/tests/test_storage.py::GetTodoTests::test_non_existant",
"storage/tests/test_storage.py::GetTodoTests::test_success",
"storage/tests/test_storage.py::GetTodoTests::test_timestamp_null",
"storage/tests/test_storage.py::DeleteTodoTests::test_delete_twice",
"storage/tests/test_storage.py::DeleteTodoTests::test_success",
"storage/tests/test_storage.py::ListTodosTests::test_list",
"storage/tests/test_storage.py::ListTodosTests::test_no_todos"
]
| [
"authentication/tests/test_authentication.py::SignupTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::LoginTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::LogoutTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::LogoutTests::test_logout_twice",
"authentication/tests/test_authentication.py::LogoutTests::test_not_logged_in",
"authentication/tests/test_authentication.py::UserTests::test_different_password_different_token",
"authentication/tests/test_authentication.py::UserTests::test_get_auth_token",
"authentication/tests/test_authentication.py::UserTests::test_get_id",
"authentication/tests/test_authentication.py::CreateTodoTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::ReadTodoTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::DeleteTodoTests::test_incorrect_content_type",
"storage/tests/test_storage.py::CreateUserTests::test_incorrect_content_type",
"storage/tests/test_storage.py::GetUserTests::test_incorrect_content_type",
"storage/tests/test_storage.py::GetUsersTests::test_incorrect_content_type",
"storage/tests/test_storage.py::GetUsersTests::test_no_users",
"storage/tests/test_storage.py::CreateTodoTests::test_incorrect_content_type",
"storage/tests/test_storage.py::GetTodoTests::test_incorrect_content_type",
"storage/tests/test_storage.py::DeleteTodoTests::test_incorrect_content_type"
]
| []
| null | 538 | [
"storage/storage.py",
"docs/source/index.rst",
"authentication/authentication.py"
]
| [
"storage/storage.py",
"docs/source/index.rst",
"authentication/authentication.py"
]
|
|
adamtheturtle__todo-46 | 736849d28dacdf6112fe2bef70aec1b6ceced636 | 2016-05-16 20:47:11 | 736849d28dacdf6112fe2bef70aec1b6ceced636 | diff --git a/authentication/authentication.py b/authentication/authentication.py
index c123d4f..445bcc7 100644
--- a/authentication/authentication.py
+++ b/authentication/authentication.py
@@ -327,7 +327,7 @@ def delete_todo(id):
@consumes('application/json')
def list_todos():
"""
- List todo items.
+ List todo items, with optional filters.
:reqheader Content-Type: application/json
:resheader Content-Type: application/json
@@ -346,6 +346,52 @@ def list_todos():
)
return jsonify(response.json()), response.status_code
+
[email protected]('/todos/<id>', methods=['PATCH'])
+@consumes('application/json')
+def update_todo(id):
+ """
+ Update a todo item. If an item is changed from not-completed to completed,
+ the ``completion_timestamp`` is set as now.
+
+ :reqheader Content-Type: application/json
+
+ :queryparameter number id: The id of the todo item.
+
+ :reqjson string content: The new of the item (optional).
+ :reqjson boolean completed: Whether the item is completed (optional).
+
+ :resheader Content-Type: application/json
+
+ :resjson string id: The id of the item.
+ :resjson string content: The content item.
+ :resjson boolean completed: Whether the item is completed.
+ :resjson number completion_timestamp: The completion UNIX timestamp (now),
+ or ``null`` if the item is not completed.
+
+ :status 200: An item with the given details has been created.
+ :status 404: There is no item with the given ``id``.
+ """
+ get_response, get_status_code = read_todo(id)
+
+ if not get_status_code == codes.OK:
+ return jsonify(get_response.json), get_status_code
+
+ already_completed = get_response.json['completed']
+ data = json.loads(request.data)
+ if data.get('completed') and not already_completed:
+ now = datetime.datetime.now(tz=pytz.utc)
+ data['completion_timestamp'] = now.timestamp()
+ elif data.get('completed') is False:
+ data['completion_timestamp'] = None
+
+ response = requests.patch(
+ urljoin(STORAGE_URL, 'todos/{id}').format(id=id),
+ headers={'Content-Type': 'application/json'},
+ data=json.dumps(data),
+ )
+ return jsonify(response.json()), response.status_code
+
if __name__ == '__main__': # pragma: no cover
# Specifying 0.0.0.0 as the host tells the operating system to listen on
# all public IPs. This makes the server visible externally.
diff --git a/storage/storage.py b/storage/storage.py
index e596f8d..4027848 100644
--- a/storage/storage.py
+++ b/storage/storage.py
@@ -298,6 +298,53 @@ def list_todos():
todos = Todo.query.filter_by(**todo_filter).all()
return jsonify(todos=[todo.as_dict() for todo in todos]), codes.OK
+
[email protected]('/todos/<id>', methods=['PATCH'])
+@consumes('application/json')
+def update_todo(id):
+ """
+ Update a todo item.
+
+ :reqheader Content-Type: application/json
+
+ :queryparameter number id: The id of the todo item.
+
+ :reqjson string content: The new of the item.
+ :reqjson boolean completed: Whether the item is completed.
+ :reqjson number completion_timestamp: The completion UNIX timestamp, or
+ ``null``.
+
+ :resheader Content-Type: application/json
+
+ :resjson string id: The id of the item.
+ :resjson string content: The content item.
+ :resjson boolean completed: Whether the item is completed.
+ :resjson number completion_timestamp: The completion UNIX timestamp (now),
+ or ``null`` if the item is not completed.
+
+ :status 200: An item with the given details has been created.
+ :status 404: There is no item with the given ``id``.
+ """
+ todo = Todo.query.filter_by(id=id).first()
+
+ if todo is None:
+ return jsonify(
+ title='The requested todo does not exist.',
+ detail='No todo exists with the id "{id}"'.format(id=id),
+ ), codes.NOT_FOUND
+
+ if 'content' in request.json:
+ todo.content = request.json['content']
+
+ if 'completed' in request.json:
+ todo.completed = request.json['completed']
+
+ if 'completion_timestamp' in request.json:
+ todo.completion_timestamp = request.json['completion_timestamp']
+
+ db.session.commit()
+ return jsonify(todo.as_dict()), codes.OK
+
if __name__ == '__main__': # pragma: no cover
# Specifying 0.0.0.0 as the host tells the operating system to listen on
# all public IPs. This makes the server visible externally.
| Add ability to update a TODO item (PATCH) | adamtheturtle/todo | diff --git a/authentication/tests/test_authentication.py b/authentication/tests/test_authentication.py
index 2a76131..6f7108a 100644
--- a/authentication/tests/test_authentication.py
+++ b/authentication/tests/test_authentication.py
@@ -884,3 +884,220 @@ class ListTodosTests(AuthenticationTests):
"""
response = self.app.get('/todos', content_type='text/html')
self.assertEqual(response.status_code, codes.UNSUPPORTED_MEDIA_TYPE)
+
+
+class UpdateTodoTests(AuthenticationTests):
+ """
+ Tests for updating a todo item at ``PATCH /todos/{id}.``.
+ """
+
+ @responses.activate
+ def test_change_content(self):
+ """
+ It is possible to change the content of a todo item.
+ """
+ create = self.app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(NOT_COMPLETED_TODO_DATA),
+ )
+
+ new_content = 'Book vacation'
+
+ patch = self.app.patch(
+ '/todos/{id}'.format(id=create.json['id']),
+ content_type='application/json',
+ data=json.dumps({'content': new_content}),
+ )
+
+ expected = create.json
+ expected['content'] = new_content
+
+ self.assertEqual(patch.status_code, codes.OK)
+ self.assertEqual(patch.json, expected)
+
+ read = self.app.get(
+ '/todos/{id}'.format(id=create.json['id']),
+ content_type='application/json',
+ )
+
+ self.assertEqual(read.json, expected)
+
+ @responses.activate
+ @freeze_time(datetime.datetime.fromtimestamp(5.0, tz=pytz.utc))
+ def test_flag_completed(self):
+ """
+ It is possible to flag a todo item as completed.
+ """
+ create = self.app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(NOT_COMPLETED_TODO_DATA),
+ )
+
+ patch = self.app.patch(
+ '/todos/{id}'.format(id=create.json['id']),
+ content_type='application/json',
+ data=json.dumps({'completed': True}),
+ )
+
+ expected = create.json
+ expected['completed'] = True
+ # Timestamp set to now, the time it is first marked completed.
+ expected['completion_timestamp'] = 5.0
+
+ self.assertEqual(patch.status_code, codes.OK)
+ self.assertEqual(patch.json, expected)
+
+ read = self.app.get(
+ '/todos/{id}'.format(id=create.json['id']),
+ content_type='application/json',
+ )
+
+ self.assertEqual(read.json, expected)
+
+ @responses.activate
+ def test_flag_not_completed(self):
+ """
+ It is possible to flag a todo item as not completed.
+ """
+ create = self.app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(COMPLETED_TODO_DATA),
+ )
+
+ patch = self.app.patch(
+ '/todos/{id}'.format(id=create.json['id']),
+ content_type='application/json',
+ data=json.dumps({'completed': False}),
+ )
+
+ expected = create.json
+ expected['completed'] = False
+ # Marking an item as not completed removes the completion timestamp.
+ expected['completion_timestamp'] = None
+
+ self.assertEqual(patch.status_code, codes.OK)
+ self.assertEqual(patch.json, expected)
+
+ read = self.app.get(
+ '/todos/{id}'.format(id=create.json['id']),
+ content_type='application/json',
+ )
+
+ self.assertEqual(read.json, expected)
+
+ @responses.activate
+ def test_change_content_and_flag(self):
+ """
+ It is possible to change the content of a todo item, as well as marking
+ the item as completed.
+ """
+ create = self.app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(NOT_COMPLETED_TODO_DATA),
+ )
+
+ new_content = 'Book vacation'
+
+ patch = self.app.patch(
+ '/todos/{id}'.format(id=create.json['id']),
+ content_type='application/json',
+ data=json.dumps({'content': new_content, 'completed': False}),
+ )
+
+ expected = create.json
+ expected['content'] = new_content
+ expected['completed'] = False
+ expected['completion_timestamp'] = None
+
+ self.assertEqual(patch.status_code, codes.OK)
+ self.assertEqual(patch.json, expected)
+
+ read = self.app.get(
+ '/todos/{id}'.format(id=create.json['id']),
+ content_type='application/json',
+ )
+
+ self.assertEqual(read.json, expected)
+
+ @responses.activate
+ def test_flag_completed_already_completed(self):
+ """
+ Flagging an already completed item as completed does not change the
+ completion timestamp.
+ """
+ create_time = datetime.datetime.fromtimestamp(5.0, tz=pytz.utc)
+ with freeze_time(create_time):
+ create = self.app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(COMPLETED_TODO_DATA),
+ )
+
+ patch_time = datetime.datetime.fromtimestamp(6.0, tz=pytz.utc)
+ with freeze_time(patch_time):
+ patch = self.app.patch(
+ '/todos/{id}'.format(id=create.json['id']),
+ content_type='application/json',
+ data=json.dumps({'completed': True}),
+ )
+
+ expected = create.json
+ # Timestamp set to the time it is first marked completed.
+ expected['completion_timestamp'] = 5.0
+
+ self.assertEqual(patch.status_code, codes.OK)
+ self.assertEqual(patch.json, expected)
+
+ read = self.app.get(
+ '/todos/{id}'.format(id=create.json['id']),
+ content_type='application/json',
+ )
+
+ self.assertEqual(read.json, expected)
+
+ @responses.activate
+ def test_remain_same(self):
+ """
+ Not requesting any changes keeps the item the same.
+ """
+ create = self.app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(COMPLETED_TODO_DATA),
+ )
+
+ patch = self.app.patch(
+ '/todos/{id}'.format(id=create.json['id']),
+ content_type='application/json',
+ data=json.dumps({}),
+ )
+
+ self.assertEqual(create.json, patch.json)
+
+ @responses.activate
+ def test_non_existant(self):
+ """
+ If the todo item to be updated does not exist, a ``NOT_FOUND`` error is
+ returned.
+ """
+ response = self.app.patch('/todos/1', content_type='application/json')
+
+ self.assertEqual(response.headers['Content-Type'], 'application/json')
+ self.assertEqual(response.status_code, codes.NOT_FOUND)
+ expected = {
+ 'title': 'The requested todo does not exist.',
+ 'detail': 'No todo exists with the id "1"',
+ }
+ self.assertEqual(response.json, expected)
+
+ def test_incorrect_content_type(self):
+ """
+ If a Content-Type header other than 'application/json' is given, an
+ UNSUPPORTED_MEDIA_TYPE status code is given.
+ """
+ response = self.app.patch('/todos/1', content_type='text/html')
+ self.assertEqual(response.status_code, codes.UNSUPPORTED_MEDIA_TYPE)
diff --git a/storage/tests/test_storage.py b/storage/tests/test_storage.py
index aef8af7..f8c2c90 100644
--- a/storage/tests/test_storage.py
+++ b/storage/tests/test_storage.py
@@ -574,3 +574,166 @@ class ListTodosTests(InMemoryStorageTests):
"""
response = self.storage_app.get('/todos', content_type='text/html')
self.assertEqual(response.status_code, codes.UNSUPPORTED_MEDIA_TYPE)
+
+
+class UpdateTodoTests(InMemoryStorageTests):
+ """
+ Tests for updating a todo item at ``PATCH /todos/{id}.``.
+ """
+
+ def test_change_content(self):
+ """
+ It is possible to change the content of a todo item.
+ """
+ create = self.storage_app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(NOT_COMPLETED_TODO_DATA),
+ )
+
+ new_content = 'Book vacation'
+
+ patch = self.storage_app.patch(
+ '/todos/{id}'.format(id=create.json['id']),
+ content_type='application/json',
+ data=json.dumps({'content': new_content}),
+ )
+
+ expected = NOT_COMPLETED_TODO_DATA.copy()
+ expected['content'] = new_content
+ expected['completion_timestamp'] = None
+ expected['id'] = create.json['id']
+
+ self.assertEqual(patch.status_code, codes.OK)
+ self.assertEqual(patch.json, expected)
+
+ read = self.storage_app.get(
+ '/todos/{id}'.format(id=create.json['id']),
+ content_type='application/json',
+ )
+
+ self.assertEqual(read.json, expected)
+
+ def test_flag_completed(self):
+ """
+ It is possible to flag a todo item as completed.
+ """
+ create = self.storage_app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(NOT_COMPLETED_TODO_DATA),
+ )
+
+ patch = self.storage_app.patch(
+ '/todos/{id}'.format(id=create.json['id']),
+ content_type='application/json',
+ data=json.dumps({'completed': True, 'completion_timestamp': 2.0}),
+ )
+
+ expected = NOT_COMPLETED_TODO_DATA.copy()
+ expected['completed'] = True
+ expected['completion_timestamp'] = 2
+ expected['id'] = create.json['id']
+
+ self.assertEqual(patch.status_code, codes.OK)
+ self.assertEqual(patch.json, expected)
+
+ read = self.storage_app.get(
+ '/todos/{id}'.format(id=create.json['id']),
+ content_type='application/json',
+ )
+
+ self.assertEqual(read.json, expected)
+
+ def test_flag_not_completed(self):
+ """
+ It is possible to flag a todo item as not completed.
+ """
+ create = self.storage_app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(COMPLETED_TODO_DATA),
+ )
+
+ patch = self.storage_app.patch(
+ '/todos/{id}'.format(id=create.json['id']),
+ content_type='application/json',
+ data=json.dumps(
+ {'completed': False, 'completion_timestamp': None}),
+ )
+
+ expected = COMPLETED_TODO_DATA.copy()
+ expected['completed'] = False
+ expected['completion_timestamp'] = None
+ expected['id'] = create.json['id']
+
+ self.assertEqual(patch.status_code, codes.OK)
+ self.assertEqual(patch.json, expected)
+
+ read = self.storage_app.get(
+ '/todos/{id}'.format(id=create.json['id']),
+ content_type='application/json',
+ )
+
+ self.assertEqual(read.json, expected)
+
+ def test_change_content_and_flag(self):
+ """
+ It is possible to change the content of a todo item, as well as marking
+ the item as completed.
+ """
+ create = self.storage_app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(NOT_COMPLETED_TODO_DATA),
+ )
+
+ new_content = 'Book vacation'
+
+ patch = self.storage_app.patch(
+ '/todos/{id}'.format(id=create.json['id']),
+ content_type='application/json',
+ data=json.dumps({'content': new_content, 'completed': False}),
+ )
+
+ expected = NOT_COMPLETED_TODO_DATA.copy()
+ expected['content'] = new_content
+ expected['completed'] = False
+ expected['completion_timestamp'] = None
+ expected['id'] = create.json['id']
+
+ self.assertEqual(patch.status_code, codes.OK)
+ self.assertEqual(patch.json, expected)
+
+ read = self.storage_app.get(
+ '/todos/{id}'.format(id=create.json['id']),
+ content_type='application/json',
+ )
+
+ self.assertEqual(read.json, expected)
+
+ def test_non_existant(self):
+ """
+ If the todo item to be updated does not exist, a ``NOT_FOUND`` error is
+ returned.
+ """
+ response = self.storage_app.patch(
+ '/todos/1',
+ content_type='application/json',
+ )
+
+ self.assertEqual(response.headers['Content-Type'], 'application/json')
+ self.assertEqual(response.status_code, codes.NOT_FOUND)
+ expected = {
+ 'title': 'The requested todo does not exist.',
+ 'detail': 'No todo exists with the id "1"',
+ }
+ self.assertEqual(response.json, expected)
+
+ def test_incorrect_content_type(self):
+ """
+ If a Content-Type header other than 'application/json' is given, an
+ UNSUPPORTED_MEDIA_TYPE status code is given.
+ """
+ response = self.storage_app.patch('/todos/1', content_type='text/html')
+ self.assertEqual(response.status_code, codes.UNSUPPORTED_MEDIA_TYPE)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": null,
"python": "3.5",
"reqs_path": [
"requirements.txt",
"dev-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
bcrypt==4.0.1
certifi==2021.5.30
coverage==6.2
coveralls==3.3.1
dataclasses==0.8
doc8==0.11.2
docopt==0.6.2
docutils==0.18.1
execnet==1.9.0
flake8==3.9.2
Flask==0.10.1
Flask-Bcrypt==0.7.1
Flask-JsonSchema==0.1.1
Flask-Login==0.3.2
Flask-Negotiate==0.1.0
Flask-SQLAlchemy==2.1
Flask-Testing==0.8.1
freezegun==1.2.2
greenlet==2.0.2
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
itsdangerous==2.0.1
Jinja2==3.0.3
jsonschema==3.2.0
MarkupSafe==2.0.1
mccabe==0.6.1
packaging==21.3
pbr==6.1.1
pluggy==1.0.0
py==1.11.0
pycodestyle==2.7.0
pyflakes==2.3.1
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2016.4
-e git+https://github.com/adamtheturtle/todo.git@736849d28dacdf6112fe2bef70aec1b6ceced636#egg=Qlutter_TODOer
requests==2.10.0
responses==0.17.0
restructuredtext-lint==1.4.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-httpdomain==1.8.1
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
SQLAlchemy==1.4.54
stevedore==3.5.2
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
Werkzeug==2.0.3
zipp==3.6.0
| name: todo
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- bcrypt==4.0.1
- coverage==6.2
- coveralls==3.3.1
- dataclasses==0.8
- doc8==0.11.2
- docopt==0.6.2
- docutils==0.18.1
- execnet==1.9.0
- flake8==3.9.2
- flask==0.10.1
- flask-bcrypt==0.7.1
- flask-jsonschema==0.1.1
- flask-login==0.3.2
- flask-negotiate==0.1.0
- flask-sqlalchemy==2.1
- flask-testing==0.8.1
- freezegun==1.2.2
- greenlet==2.0.2
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- itsdangerous==2.0.1
- jinja2==3.0.3
- jsonschema==3.2.0
- markupsafe==2.0.1
- mccabe==0.6.1
- packaging==21.3
- pbr==6.1.1
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.7.0
- pyflakes==2.3.1
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2016.4
- requests==2.10.0
- responses==0.17.0
- restructuredtext-lint==1.4.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-httpdomain==1.8.1
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sqlalchemy==1.4.54
- stevedore==3.5.2
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- werkzeug==2.0.3
- zipp==3.6.0
prefix: /opt/conda/envs/todo
| [
"authentication/tests/test_authentication.py::UpdateTodoTests::test_incorrect_content_type",
"storage/tests/test_storage.py::UpdateTodoTests::test_incorrect_content_type"
]
| [
"authentication/tests/test_authentication.py::SignupTests::test_existing_user",
"authentication/tests/test_authentication.py::SignupTests::test_missing_email",
"authentication/tests/test_authentication.py::SignupTests::test_missing_password",
"authentication/tests/test_authentication.py::SignupTests::test_passwords_hashed",
"authentication/tests/test_authentication.py::SignupTests::test_signup",
"authentication/tests/test_authentication.py::LoginTests::test_login",
"authentication/tests/test_authentication.py::LoginTests::test_missing_email",
"authentication/tests/test_authentication.py::LoginTests::test_missing_password",
"authentication/tests/test_authentication.py::LoginTests::test_non_existant_user",
"authentication/tests/test_authentication.py::LoginTests::test_remember_me_cookie_set",
"authentication/tests/test_authentication.py::LoginTests::test_wrong_password",
"authentication/tests/test_authentication.py::LogoutTests::test_logout",
"authentication/tests/test_authentication.py::LogoutTests::test_logout_twice",
"authentication/tests/test_authentication.py::LoadUserTests::test_user_does_not_exist",
"authentication/tests/test_authentication.py::LoadUserTests::test_user_exists",
"authentication/tests/test_authentication.py::LoadUserFromTokenTests::test_fake_token",
"authentication/tests/test_authentication.py::LoadUserFromTokenTests::test_load_user_from_token",
"authentication/tests/test_authentication.py::CreateTodoTests::test_current_completion_time",
"authentication/tests/test_authentication.py::CreateTodoTests::test_missing_completed_flag",
"authentication/tests/test_authentication.py::CreateTodoTests::test_missing_text",
"authentication/tests/test_authentication.py::CreateTodoTests::test_success_response",
"authentication/tests/test_authentication.py::ReadTodoTests::test_completed",
"authentication/tests/test_authentication.py::ReadTodoTests::test_multiple_todos",
"authentication/tests/test_authentication.py::ReadTodoTests::test_non_existant",
"authentication/tests/test_authentication.py::ReadTodoTests::test_success",
"authentication/tests/test_authentication.py::DeleteTodoTests::test_delete_twice",
"authentication/tests/test_authentication.py::DeleteTodoTests::test_success",
"authentication/tests/test_authentication.py::ListTodosTests::test_filter_completed",
"authentication/tests/test_authentication.py::ListTodosTests::test_filter_not_completed",
"authentication/tests/test_authentication.py::ListTodosTests::test_list",
"authentication/tests/test_authentication.py::ListTodosTests::test_no_todos",
"authentication/tests/test_authentication.py::UpdateTodoTests::test_change_content",
"authentication/tests/test_authentication.py::UpdateTodoTests::test_change_content_and_flag",
"authentication/tests/test_authentication.py::UpdateTodoTests::test_flag_completed",
"authentication/tests/test_authentication.py::UpdateTodoTests::test_flag_completed_already_completed",
"authentication/tests/test_authentication.py::UpdateTodoTests::test_flag_not_completed",
"authentication/tests/test_authentication.py::UpdateTodoTests::test_non_existant",
"authentication/tests/test_authentication.py::UpdateTodoTests::test_remain_same",
"storage/tests/test_storage.py::CreateUserTests::test_existing_user",
"storage/tests/test_storage.py::CreateUserTests::test_missing_email",
"storage/tests/test_storage.py::CreateUserTests::test_missing_password_hash",
"storage/tests/test_storage.py::CreateUserTests::test_success_response",
"storage/tests/test_storage.py::GetUserTests::test_non_existant_user",
"storage/tests/test_storage.py::GetUserTests::test_success",
"storage/tests/test_storage.py::GetUsersTests::test_with_users",
"storage/tests/test_storage.py::CreateTodoTests::test_missing_completed_flag",
"storage/tests/test_storage.py::CreateTodoTests::test_missing_completion_time",
"storage/tests/test_storage.py::CreateTodoTests::test_missing_text",
"storage/tests/test_storage.py::CreateTodoTests::test_success_response",
"storage/tests/test_storage.py::GetTodoTests::test_non_existant",
"storage/tests/test_storage.py::GetTodoTests::test_success",
"storage/tests/test_storage.py::GetTodoTests::test_timestamp_null",
"storage/tests/test_storage.py::DeleteTodoTests::test_delete_twice",
"storage/tests/test_storage.py::DeleteTodoTests::test_success",
"storage/tests/test_storage.py::ListTodosTests::test_filter_completed",
"storage/tests/test_storage.py::ListTodosTests::test_filter_not_completed",
"storage/tests/test_storage.py::ListTodosTests::test_list",
"storage/tests/test_storage.py::ListTodosTests::test_no_todos",
"storage/tests/test_storage.py::UpdateTodoTests::test_change_content",
"storage/tests/test_storage.py::UpdateTodoTests::test_change_content_and_flag",
"storage/tests/test_storage.py::UpdateTodoTests::test_flag_completed",
"storage/tests/test_storage.py::UpdateTodoTests::test_flag_not_completed",
"storage/tests/test_storage.py::UpdateTodoTests::test_non_existant"
]
| [
"authentication/tests/test_authentication.py::SignupTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::LoginTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::LogoutTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::LogoutTests::test_not_logged_in",
"authentication/tests/test_authentication.py::UserTests::test_different_password_different_token",
"authentication/tests/test_authentication.py::UserTests::test_get_auth_token",
"authentication/tests/test_authentication.py::UserTests::test_get_id",
"authentication/tests/test_authentication.py::CreateTodoTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::ReadTodoTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::DeleteTodoTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::ListTodosTests::test_incorrect_content_type",
"storage/tests/test_storage.py::CreateUserTests::test_incorrect_content_type",
"storage/tests/test_storage.py::GetUserTests::test_incorrect_content_type",
"storage/tests/test_storage.py::GetUsersTests::test_incorrect_content_type",
"storage/tests/test_storage.py::GetUsersTests::test_no_users",
"storage/tests/test_storage.py::CreateTodoTests::test_incorrect_content_type",
"storage/tests/test_storage.py::GetTodoTests::test_incorrect_content_type",
"storage/tests/test_storage.py::DeleteTodoTests::test_incorrect_content_type",
"storage/tests/test_storage.py::ListTodosTests::test_incorrect_content_type"
]
| []
| null | 539 | [
"storage/storage.py",
"authentication/authentication.py"
]
| [
"storage/storage.py",
"authentication/authentication.py"
]
|
|
adamtheturtle__todo-47 | f81fa85e3c06d931963f76f2d0772ce0b9db67b9 | 2016-05-16 22:04:31 | f81fa85e3c06d931963f76f2d0772ce0b9db67b9 | diff --git a/authentication/authentication.py b/authentication/authentication.py
index 445bcc7..6937a58 100644
--- a/authentication/authentication.py
+++ b/authentication/authentication.py
@@ -250,9 +250,10 @@ def signup():
@app.route('/todos', methods=['POST'])
@consumes('application/json')
@jsonschema.validate('todos', 'create')
+@login_required
def create_todo():
"""
- Create a new todo item.
+ Create a new todo item. Requires log in.
:reqheader Content-Type: application/json
:resheader Content-Type: application/json
@@ -287,9 +288,10 @@ def create_todo():
@app.route('/todos/<id>', methods=['GET'])
@consumes('application/json')
+@login_required
def read_todo(id):
"""
- Get information about a particular todo item.
+ Get information about a particular todo item. Requires log in.
:reqheader Content-Type: application/json
:resheader Content-Type: application/json
@@ -307,9 +309,10 @@ def read_todo(id):
@app.route('/todos/<id>', methods=['DELETE'])
@consumes('application/json')
+@login_required
def delete_todo(id):
"""
- Delete a particular todo item.
+ Delete a particular todo item. Requires log in.
:reqheader Content-Type: application/json
:resheader Content-Type: application/json
@@ -325,9 +328,10 @@ def delete_todo(id):
@app.route('/todos', methods=['GET'])
@consumes('application/json')
+@login_required
def list_todos():
"""
- List todo items, with optional filters.
+ List todo items, with optional filters. Requires log in.
:reqheader Content-Type: application/json
:resheader Content-Type: application/json
@@ -349,10 +353,11 @@ def list_todos():
@app.route('/todos/<id>', methods=['PATCH'])
@consumes('application/json')
+@login_required
def update_todo(id):
"""
Update a todo item. If an item is changed from not-completed to completed,
- the ``completion_timestamp`` is set as now.
+ the ``completion_timestamp`` is set as now. Requires log in.
:reqheader Content-Type: application/json
| Protect the TODO CRUD APIs | adamtheturtle/todo | diff --git a/authentication/tests/test_authentication.py b/authentication/tests/test_authentication.py
index 6f7108a..29fb21c 100644
--- a/authentication/tests/test_authentication.py
+++ b/authentication/tests/test_authentication.py
@@ -30,6 +30,7 @@ from storage.tests.testtools import InMemoryStorageTests
USER_DATA = {'email': '[email protected]', 'password': 'secret'}
COMPLETED_TODO_DATA = {'content': 'Buy milk', 'completed': True}
NOT_COMPLETED_TODO_DATA = {'content': 'Get haircut', 'completed': False}
+TIMESTAMP = 1463437744.335567
class AuthenticationTests(InMemoryStorageTests):
@@ -93,6 +94,19 @@ class AuthenticationTests(InMemoryStorageTests):
{key: value for (key, value) in response.headers},
response.data)
+ def log_in_as_new_user(self):
+ """
+ Create a user and log in as that user.
+ """
+ self.app.post(
+ '/signup',
+ content_type='application/json',
+ data=json.dumps(USER_DATA))
+ self.app.post(
+ '/login',
+ content_type='application/json',
+ data=json.dumps(USER_DATA))
+
class SignupTests(AuthenticationTests):
"""
@@ -503,6 +517,7 @@ class CreateTodoTests(AuthenticationTests):
returns a JSON response with the given data and a ``null``
``completion_timestamp``.
"""
+ self.log_in_as_new_user()
response = self.app.post(
'/todos',
content_type='application/json',
@@ -516,12 +531,13 @@ class CreateTodoTests(AuthenticationTests):
self.assertEqual(response.json, expected)
@responses.activate
- @freeze_time(datetime.datetime.fromtimestamp(5.01, tz=pytz.utc))
+ @freeze_time(datetime.datetime.fromtimestamp(TIMESTAMP, tz=pytz.utc))
def test_current_completion_time(self):
"""
If the completed flag is set to ``true`` then the completed time is
the number of seconds since the epoch.
"""
+ self.log_in_as_new_user()
response = self.app.post(
'/todos',
content_type='application/json',
@@ -534,7 +550,7 @@ class CreateTodoTests(AuthenticationTests):
# some accuracy).
self.assertAlmostEqual(
response.json['completion_timestamp'],
- 5.01,
+ TIMESTAMP,
places=3,
)
@@ -580,14 +596,29 @@ class CreateTodoTests(AuthenticationTests):
}
self.assertEqual(response.json, expected)
+ @responses.activate
def test_incorrect_content_type(self):
"""
If a Content-Type header other than 'application/json' is given, an
UNSUPPORTED_MEDIA_TYPE status code is given.
"""
+ self.log_in_as_new_user()
response = self.app.post('/todos', content_type='text/html')
self.assertEqual(response.status_code, codes.UNSUPPORTED_MEDIA_TYPE)
+ @responses.activate
+ def test_not_logged_in(self):
+ """
+ When no user is logged in, an UNAUTHORIZED status code is returned.
+ """
+ response = self.app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(NOT_COMPLETED_TODO_DATA),
+ )
+
+ self.assertEqual(response.status_code, codes.UNAUTHORIZED)
+
class ReadTodoTests(AuthenticationTests):
"""
@@ -600,6 +631,7 @@ class ReadTodoTests(AuthenticationTests):
A ``GET`` request for an existing todo an OK status code and the todo's
details.
"""
+ self.log_in_as_new_user()
create = self.app.post(
'/todos',
content_type='application/json',
@@ -618,12 +650,13 @@ class ReadTodoTests(AuthenticationTests):
self.assertEqual(read.json, expected)
@responses.activate
- @freeze_time(datetime.datetime.fromtimestamp(5, tz=pytz.utc))
+ @freeze_time(datetime.datetime.fromtimestamp(TIMESTAMP, tz=pytz.utc))
def test_completed(self):
"""
A ``GET`` request for an existing todo an OK status code and the todo's
details, included the completion timestamp.
"""
+ self.log_in_as_new_user()
create = self.app.post(
'/todos',
content_type='application/json',
@@ -637,8 +670,12 @@ class ReadTodoTests(AuthenticationTests):
self.assertEqual(read.status_code, codes.OK)
expected = COMPLETED_TODO_DATA.copy()
- expected['completion_timestamp'] = 5
expected['id'] = create.json['id']
+ self.assertAlmostEqual(
+ read.json.pop('completion_timestamp'),
+ TIMESTAMP,
+ places=3
+ )
self.assertEqual(read.json, expected)
@responses.activate
@@ -646,6 +683,7 @@ class ReadTodoTests(AuthenticationTests):
"""
A ``GET`` request gets the correct todo when there are multiple.
"""
+ self.log_in_as_new_user()
self.app.post(
'/todos',
content_type='application/json',
@@ -681,6 +719,7 @@ class ReadTodoTests(AuthenticationTests):
A ``GET`` request for a todo which does not exist returns a NOT_FOUND
status code and error details.
"""
+ self.log_in_as_new_user()
response = self.app.get('/todos/1', content_type='application/json')
self.assertEqual(response.headers['Content-Type'], 'application/json')
@@ -699,6 +738,27 @@ class ReadTodoTests(AuthenticationTests):
response = self.app.get('/todos/1', content_type='text/html')
self.assertEqual(response.status_code, codes.UNSUPPORTED_MEDIA_TYPE)
+ @responses.activate
+ def test_not_logged_in(self):
+ """
+ When no user is logged in, an UNAUTHORIZED status code is returned.
+ """
+ self.log_in_as_new_user()
+ create = self.app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(NOT_COMPLETED_TODO_DATA),
+ )
+
+ self.app.post('/logout', content_type='application/json')
+
+ read = self.app.get(
+ '/todos/{id}'.format(id=create.json['id']),
+ content_type='application/json',
+ )
+
+ self.assertEqual(read.status_code, codes.UNAUTHORIZED)
+
class DeleteTodoTests(AuthenticationTests):
"""
@@ -710,6 +770,7 @@ class DeleteTodoTests(AuthenticationTests):
"""
It is possible to delete a todo item.
"""
+ self.log_in_as_new_user()
create = self.app.post(
'/todos',
content_type='application/json',
@@ -735,6 +796,7 @@ class DeleteTodoTests(AuthenticationTests):
"""
Deleting an item twice gives returns a 404 code and error message.
"""
+ self.log_in_as_new_user()
create = self.app.post(
'/todos',
content_type='application/json',
@@ -758,14 +820,38 @@ class DeleteTodoTests(AuthenticationTests):
}
self.assertEqual(delete.json, expected)
+ @responses.activate
def test_incorrect_content_type(self):
"""
If a Content-Type header other than 'application/json' is given, an
UNSUPPORTED_MEDIA_TYPE status code is given.
"""
+ self.log_in_as_new_user()
response = self.app.delete('/todos/1', content_type='text/html')
self.assertEqual(response.status_code, codes.UNSUPPORTED_MEDIA_TYPE)
+ @responses.activate
+ def test_not_logged_in(self):
+ """
+ When no user is logged in, an UNAUTHORIZED status code is returned.
+ """
+ self.log_in_as_new_user()
+
+ create = self.app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(COMPLETED_TODO_DATA),
+ )
+
+ self.app.post('/logout', content_type='application/json')
+
+ delete = self.app.delete(
+ '/todos/{id}'.format(id=create.json['id']),
+ content_type='application/json',
+ )
+
+ self.assertEqual(delete.status_code, codes.UNAUTHORIZED)
+
class ListTodosTests(AuthenticationTests):
"""
@@ -777,6 +863,7 @@ class ListTodosTests(AuthenticationTests):
"""
When there are no todos, an empty array is returned.
"""
+ self.log_in_as_new_user()
list_todos = self.app.get(
'/todos',
content_type='application/json',
@@ -785,11 +872,24 @@ class ListTodosTests(AuthenticationTests):
self.assertEqual(list_todos.status_code, codes.OK)
self.assertEqual(list_todos.json['todos'], [])
+ @responses.activate
+ def test_not_logged_in(self):
+ """
+ When no user is logged in, an UNAUTHORIZED status code is returned.
+ """
+ list_todos = self.app.get(
+ '/todos',
+ content_type='application/json',
+ )
+
+ self.assertEqual(list_todos.status_code, codes.UNAUTHORIZED)
+
@responses.activate
def test_list(self):
"""
All todos are listed.
"""
+ self.log_in_as_new_user()
other_todo = NOT_COMPLETED_TODO_DATA.copy()
other_todo['content'] = 'Get a haircut'
@@ -815,11 +915,12 @@ class ListTodosTests(AuthenticationTests):
self.assertEqual(list_todos.json['todos'], expected)
@responses.activate
- @freeze_time(datetime.datetime.fromtimestamp(5, tz=pytz.utc))
+ @freeze_time(datetime.datetime.fromtimestamp(TIMESTAMP, tz=pytz.utc))
def test_filter_completed(self):
"""
It is possible to filter by only completed items.
"""
+ self.log_in_as_new_user()
self.app.post(
'/todos',
content_type='application/json',
@@ -842,15 +943,21 @@ class ListTodosTests(AuthenticationTests):
self.assertEqual(list_todos.status_code, codes.OK)
expected = COMPLETED_TODO_DATA.copy()
- expected['completion_timestamp'] = 5.0
expected['id'] = 2
- self.assertEqual(list_todos_data['todos'], [expected])
+ [todo] = list_todos_data['todos']
+ self.assertAlmostEqual(
+ todo.pop('completion_timestamp'),
+ TIMESTAMP,
+ places=3,
+ )
+ self.assertEqual(todo, expected)
@responses.activate
def test_filter_not_completed(self):
"""
It is possible to filter by only items which are not completed.
"""
+ self.log_in_as_new_user()
self.app.post(
'/todos',
content_type='application/json',
@@ -877,6 +984,7 @@ class ListTodosTests(AuthenticationTests):
expected['id'] = 1
self.assertEqual(list_todos_data['todos'], [expected])
+ @responses.activate
def test_incorrect_content_type(self):
"""
If a Content-Type header other than 'application/json' is given, an
@@ -896,6 +1004,7 @@ class UpdateTodoTests(AuthenticationTests):
"""
It is possible to change the content of a todo item.
"""
+ self.log_in_as_new_user()
create = self.app.post(
'/todos',
content_type='application/json',
@@ -924,11 +1033,34 @@ class UpdateTodoTests(AuthenticationTests):
self.assertEqual(read.json, expected)
@responses.activate
- @freeze_time(datetime.datetime.fromtimestamp(5.0, tz=pytz.utc))
+ def test_not_logged_in(self):
+ """
+ When no user is logged in, an UNAUTHORIZED status code is returned.
+ """
+ self.log_in_as_new_user()
+ create = self.app.post(
+ '/todos',
+ content_type='application/json',
+ data=json.dumps(NOT_COMPLETED_TODO_DATA),
+ )
+
+ self.app.post('/logout', content_type='application/json')
+
+ patch = self.app.patch(
+ '/todos/{id}'.format(id=create.json['id']),
+ content_type='application/json',
+ data=json.dumps({'content': 'Book vacation'}),
+ )
+
+ self.assertEqual(patch.status_code, codes.UNAUTHORIZED)
+
+ @responses.activate
+ @freeze_time(datetime.datetime.fromtimestamp(TIMESTAMP, tz=pytz.utc))
def test_flag_completed(self):
"""
It is possible to flag a todo item as completed.
"""
+ self.log_in_as_new_user()
create = self.app.post(
'/todos',
content_type='application/json',
@@ -943,10 +1075,14 @@ class UpdateTodoTests(AuthenticationTests):
expected = create.json
expected['completed'] = True
- # Timestamp set to now, the time it is first marked completed.
- expected['completion_timestamp'] = 5.0
+ expected['completion_timestamp'] = TIMESTAMP
self.assertEqual(patch.status_code, codes.OK)
+ self.assertAlmostEqual(
+ patch.json.pop('completion_timestamp'),
+ expected.pop('completion_timestamp'),
+ places=3,
+ )
self.assertEqual(patch.json, expected)
read = self.app.get(
@@ -954,6 +1090,11 @@ class UpdateTodoTests(AuthenticationTests):
content_type='application/json',
)
+ self.assertAlmostEqual(
+ read.json.pop('completion_timestamp'),
+ TIMESTAMP,
+ places=3,
+ )
self.assertEqual(read.json, expected)
@responses.activate
@@ -961,6 +1102,7 @@ class UpdateTodoTests(AuthenticationTests):
"""
It is possible to flag a todo item as not completed.
"""
+ self.log_in_as_new_user()
create = self.app.post(
'/todos',
content_type='application/json',
@@ -994,6 +1136,7 @@ class UpdateTodoTests(AuthenticationTests):
It is possible to change the content of a todo item, as well as marking
the item as completed.
"""
+ self.log_in_as_new_user()
create = self.app.post(
'/todos',
content_type='application/json',
@@ -1029,7 +1172,8 @@ class UpdateTodoTests(AuthenticationTests):
Flagging an already completed item as completed does not change the
completion timestamp.
"""
- create_time = datetime.datetime.fromtimestamp(5.0, tz=pytz.utc)
+ self.log_in_as_new_user()
+ create_time = datetime.datetime.fromtimestamp(TIMESTAMP, tz=pytz.utc)
with freeze_time(create_time):
create = self.app.post(
'/todos',
@@ -1037,7 +1181,8 @@ class UpdateTodoTests(AuthenticationTests):
data=json.dumps(COMPLETED_TODO_DATA),
)
- patch_time = datetime.datetime.fromtimestamp(6.0, tz=pytz.utc)
+ patch_time = datetime.datetime.fromtimestamp(
+ TIMESTAMP + 1, tz=pytz.utc)
with freeze_time(patch_time):
patch = self.app.patch(
'/todos/{id}'.format(id=create.json['id']),
@@ -1045,25 +1190,34 @@ class UpdateTodoTests(AuthenticationTests):
data=json.dumps({'completed': True}),
)
- expected = create.json
- # Timestamp set to the time it is first marked completed.
- expected['completion_timestamp'] = 5.0
-
+ self.assertAlmostEqual(
+ patch.json.pop('completion_timestamp'),
+ # Timestamp set to the time it is first marked completed.
+ create.json.pop('completion_timestamp'),
+ places=3,
+ )
self.assertEqual(patch.status_code, codes.OK)
- self.assertEqual(patch.json, expected)
+ self.assertEqual(patch.json, create.json)
read = self.app.get(
'/todos/{id}'.format(id=create.json['id']),
content_type='application/json',
)
- self.assertEqual(read.json, expected)
+ self.assertAlmostEqual(
+ read.json.pop('completion_timestamp'),
+ # Timestamp set to the time it is first marked completed.
+ TIMESTAMP,
+ places=3,
+ )
+ self.assertEqual(read.json, create.json)
@responses.activate
def test_remain_same(self):
"""
Not requesting any changes keeps the item the same.
"""
+ self.log_in_as_new_user()
create = self.app.post(
'/todos',
content_type='application/json',
@@ -1084,6 +1238,7 @@ class UpdateTodoTests(AuthenticationTests):
If the todo item to be updated does not exist, a ``NOT_FOUND`` error is
returned.
"""
+ self.log_in_as_new_user()
response = self.app.patch('/todos/1', content_type='application/json')
self.assertEqual(response.headers['Content-Type'], 'application/json')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": null,
"python": "3.5",
"reqs_path": [
"requirements.txt",
"dev-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
bcrypt==4.0.1
certifi==2021.5.30
coverage==6.2
coveralls==3.3.1
dataclasses==0.8
doc8==0.11.2
docopt==0.6.2
docutils==0.18.1
execnet==1.9.0
flake8==3.9.2
Flask==0.10.1
Flask-Bcrypt==0.7.1
Flask-JsonSchema==0.1.1
Flask-Login==0.3.2
Flask-Negotiate==0.1.0
Flask-SQLAlchemy==2.1
Flask-Testing==0.8.1
freezegun==1.2.2
greenlet==2.0.2
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
itsdangerous==2.0.1
Jinja2==3.0.3
jsonschema==3.2.0
MarkupSafe==2.0.1
mccabe==0.6.1
packaging==21.3
pbr==6.1.1
pluggy==1.0.0
py==1.11.0
pycodestyle==2.7.0
pyflakes==2.3.1
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2016.4
-e git+https://github.com/adamtheturtle/todo.git@f81fa85e3c06d931963f76f2d0772ce0b9db67b9#egg=Qlutter_TODOer
requests==2.10.0
responses==0.17.0
restructuredtext-lint==1.4.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-httpdomain==1.8.1
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
SQLAlchemy==1.4.54
stevedore==3.5.2
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
Werkzeug==2.0.3
zipp==3.6.0
| name: todo
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- bcrypt==4.0.1
- coverage==6.2
- coveralls==3.3.1
- dataclasses==0.8
- doc8==0.11.2
- docopt==0.6.2
- docutils==0.18.1
- execnet==1.9.0
- flake8==3.9.2
- flask==0.10.1
- flask-bcrypt==0.7.1
- flask-jsonschema==0.1.1
- flask-login==0.3.2
- flask-negotiate==0.1.0
- flask-sqlalchemy==2.1
- flask-testing==0.8.1
- freezegun==1.2.2
- greenlet==2.0.2
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- itsdangerous==2.0.1
- jinja2==3.0.3
- jsonschema==3.2.0
- markupsafe==2.0.1
- mccabe==0.6.1
- packaging==21.3
- pbr==6.1.1
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.7.0
- pyflakes==2.3.1
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2016.4
- requests==2.10.0
- responses==0.17.0
- restructuredtext-lint==1.4.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-httpdomain==1.8.1
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sqlalchemy==1.4.54
- stevedore==3.5.2
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- werkzeug==2.0.3
- zipp==3.6.0
prefix: /opt/conda/envs/todo
| [
"authentication/tests/test_authentication.py::ListTodosTests::test_not_logged_in"
]
| [
"authentication/tests/test_authentication.py::SignupTests::test_existing_user",
"authentication/tests/test_authentication.py::SignupTests::test_missing_email",
"authentication/tests/test_authentication.py::SignupTests::test_missing_password",
"authentication/tests/test_authentication.py::SignupTests::test_passwords_hashed",
"authentication/tests/test_authentication.py::SignupTests::test_signup",
"authentication/tests/test_authentication.py::LoginTests::test_login",
"authentication/tests/test_authentication.py::LoginTests::test_missing_email",
"authentication/tests/test_authentication.py::LoginTests::test_missing_password",
"authentication/tests/test_authentication.py::LoginTests::test_non_existant_user",
"authentication/tests/test_authentication.py::LoginTests::test_remember_me_cookie_set",
"authentication/tests/test_authentication.py::LoginTests::test_wrong_password",
"authentication/tests/test_authentication.py::LogoutTests::test_logout",
"authentication/tests/test_authentication.py::LogoutTests::test_logout_twice",
"authentication/tests/test_authentication.py::LoadUserTests::test_user_does_not_exist",
"authentication/tests/test_authentication.py::LoadUserTests::test_user_exists",
"authentication/tests/test_authentication.py::LoadUserFromTokenTests::test_fake_token",
"authentication/tests/test_authentication.py::LoadUserFromTokenTests::test_load_user_from_token",
"authentication/tests/test_authentication.py::CreateTodoTests::test_current_completion_time",
"authentication/tests/test_authentication.py::CreateTodoTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::CreateTodoTests::test_missing_completed_flag",
"authentication/tests/test_authentication.py::CreateTodoTests::test_missing_text",
"authentication/tests/test_authentication.py::CreateTodoTests::test_not_logged_in",
"authentication/tests/test_authentication.py::CreateTodoTests::test_success_response",
"authentication/tests/test_authentication.py::ReadTodoTests::test_completed",
"authentication/tests/test_authentication.py::ReadTodoTests::test_multiple_todos",
"authentication/tests/test_authentication.py::ReadTodoTests::test_non_existant",
"authentication/tests/test_authentication.py::ReadTodoTests::test_not_logged_in",
"authentication/tests/test_authentication.py::ReadTodoTests::test_success",
"authentication/tests/test_authentication.py::DeleteTodoTests::test_delete_twice",
"authentication/tests/test_authentication.py::DeleteTodoTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::DeleteTodoTests::test_not_logged_in",
"authentication/tests/test_authentication.py::DeleteTodoTests::test_success",
"authentication/tests/test_authentication.py::ListTodosTests::test_filter_completed",
"authentication/tests/test_authentication.py::ListTodosTests::test_filter_not_completed",
"authentication/tests/test_authentication.py::ListTodosTests::test_list",
"authentication/tests/test_authentication.py::ListTodosTests::test_no_todos",
"authentication/tests/test_authentication.py::UpdateTodoTests::test_change_content",
"authentication/tests/test_authentication.py::UpdateTodoTests::test_change_content_and_flag",
"authentication/tests/test_authentication.py::UpdateTodoTests::test_flag_completed",
"authentication/tests/test_authentication.py::UpdateTodoTests::test_flag_completed_already_completed",
"authentication/tests/test_authentication.py::UpdateTodoTests::test_flag_not_completed",
"authentication/tests/test_authentication.py::UpdateTodoTests::test_non_existant",
"authentication/tests/test_authentication.py::UpdateTodoTests::test_not_logged_in",
"authentication/tests/test_authentication.py::UpdateTodoTests::test_remain_same"
]
| [
"authentication/tests/test_authentication.py::SignupTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::LoginTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::LogoutTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::LogoutTests::test_not_logged_in",
"authentication/tests/test_authentication.py::UserTests::test_different_password_different_token",
"authentication/tests/test_authentication.py::UserTests::test_get_auth_token",
"authentication/tests/test_authentication.py::UserTests::test_get_id",
"authentication/tests/test_authentication.py::ReadTodoTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::ListTodosTests::test_incorrect_content_type",
"authentication/tests/test_authentication.py::UpdateTodoTests::test_incorrect_content_type"
]
| []
| null | 540 | [
"authentication/authentication.py"
]
| [
"authentication/authentication.py"
]
|
|
kapouille__mongoquery-4 | 759677a81968ec264caa65020777cdae11d8a5e2 | 2016-05-17 11:51:11 | 759677a81968ec264caa65020777cdae11d8a5e2 | foolswood: The Travis failure is python 3.2 only (and resolved by https://www.python.org/dev/peps/pep-0414/).
There are a couple of options I can see to sort this out:
- Remove (or find another way to create) the unicode literal in the tests.
- Stop supporting python 3.2.
rajcze: > The Travis failure is python 3.2 only (and resolved by https://www.python.org/dev/peps/pep-0414/).
> There are a couple of options I can see to sort this out...
My bad for only testing on 2.7 and 3.5... It's not really my call, but I'd rather remove the unicode literal from the test suite. Even though we could go with `unicode()`, and something like:
```lang=python
try:
unicode()
except NameError:
unicode = lambda x: str(x)
```
It seems too nasty to just facilitate for what IMHO is quite a corner-case issue.
foolswood: I don't know many things using python3.2 (I don't have it locally either) so I don't blame you at all.
I was leaning towards the literal removal too.
Thought I'd put some options out, see if there was something I was missing. | diff --git a/mongoquery/__init__.py b/mongoquery/__init__.py
index 9fa9482..83dc2ff 100644
--- a/mongoquery/__init__.py
+++ b/mongoquery/__init__.py
@@ -15,6 +15,10 @@ class _Undefined(object):
pass
+def is_non_string_sequence(entry):
+ return isinstance(entry, Sequence) and not isinstance(entry, string_type)
+
+
class Query(object):
def __init__(self, definition):
self._definition = definition
@@ -29,7 +33,7 @@ class Query(object):
for sub_operator, sub_condition in condition.items()
)
else:
- if isinstance(entry, Sequence):
+ if is_non_string_sequence(entry):
return condition in entry
else:
return condition == entry
@@ -39,7 +43,7 @@ class Query(object):
return entry
if entry is None:
return entry
- if isinstance(entry, Sequence) and not isinstance(entry, string_type):
+ if is_non_string_sequence(entry):
try:
index = int(path[0])
return self._extract(entry[index], path[1:])
@@ -277,7 +281,7 @@ class Query(object):
)
)
- if isinstance(entry, Sequence) and not isinstance(entry, string_type):
+ if is_non_string_sequence(entry):
return len(entry) == condition
return False
| Checks against strings don't work properly
See the following case:
```
>>> import mongoquery
>>> a = {"a": "5", "b": 6}
>>> query = mongoquery.Query({"a": 2})
>>> query.match(a)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/site-packages/mongoquery/__init__.py", line 23, in match
return self._match(self._definition, entry)
File "/usr/local/lib/python2.7/site-packages/mongoquery/__init__.py", line 29, in _match
for sub_operator, sub_condition in condition.items()
File "/usr/local/lib/python2.7/site-packages/mongoquery/__init__.py", line 29, in <genexpr>
for sub_operator, sub_condition in condition.items()
File "/usr/local/lib/python2.7/site-packages/mongoquery/__init__.py", line 72, in _process_condition
return self._match(condition, extracted_data)
File "/usr/local/lib/python2.7/site-packages/mongoquery/__init__.py", line 33, in _match
return condition in entry
TypeError: 'in <string>' requires string as left operand, not int
```
This happens because we have:
```
def _match(self, condition, entry):
if isinstance(condition, Mapping):
return all(
self._process_condition(sub_operator, sub_condition, entry)
for sub_operator, sub_condition in condition.items()
)
else:
if isinstance(entry, Sequence): <----- HERE
return condition in entry
else:
return condition == entry
```
`str` is actually a subtype of `collections.Sequence`, and thus we try and do an "in" comparison, when we really want equality. This seems like a bug even if you had two strings, because this can happen:
```
>>> b = {"a": "567", "b": 6}
>>> query2 = mongoquery.Query({"a": "6"})
>>> query2.match(b)
True
```
Same reason - we're doing an `in` when you want to do a strict equality check. There are a couple of possible fixes:
1. Check against explicit types instead of `Sequence`
2. Ensure that it is not a string-like type (e.g. bytes, unicode, str, etc) | kapouille/mongoquery | diff --git a/tests/test_query.py b/tests/test_query.py
index 27d44e9..22ef7c8 100644
--- a/tests/test_query.py
+++ b/tests/test_query.py
@@ -103,6 +103,11 @@ class TestQuery(TestCase):
self._query({"qty": {"$type": 'number'}})
)
+ self.assertEqual(
+ _ALL,
+ self._query({"item": {"$type": 'string'}})
+ )
+
self.assertEqual(
[],
self._query({"qty": {"$type": 'string'}})
@@ -385,3 +390,8 @@ class TestQuery(TestCase):
collection = [{"turtles": "swim"}]
self.assertEqual(
[], self._query({"turtles": {"value": "swim"}}, collection))
+
+ def test_query_string(self):
+ collection = [{"a": "5"}, {"a": "567"}]
+ self.assertEqual([], self._query({"a": 5}, collection))
+ self.assertEqual([{"a": "5"}], self._query({"a": "5"}, collection))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | list | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
iniconfig==2.1.0
-e git+https://github.com/kapouille/mongoquery.git@759677a81968ec264caa65020777cdae11d8a5e2#egg=mongoquery
nose==1.3.7
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
tomli==2.2.1
| name: mongoquery
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/mongoquery
| [
"tests/test_query.py::TestQuery::test_query_string"
]
| []
| [
"tests/test_query.py::TestQuery::test_array",
"tests/test_query.py::TestQuery::test_bad_query_doesnt_infinitely_recurse",
"tests/test_query.py::TestQuery::test_comparison",
"tests/test_query.py::TestQuery::test_element",
"tests/test_query.py::TestQuery::test_evaluation",
"tests/test_query.py::TestQuery::test_integer_mapping_key_exists",
"tests/test_query.py::TestQuery::test_query_integer_keyed",
"tests/test_query.py::TestQuery::test_query_subfield_not_found",
"tests/test_query.py::TestQuery::test_simple_lookup"
]
| []
| The Unlicense | 542 | [
"mongoquery/__init__.py"
]
| [
"mongoquery/__init__.py"
]
|
adamtheturtle__todo-49 | 1d4c12ba785e521b5b0b8b40d54613280cf0bb34 | 2016-05-17 12:16:24 | 1d4c12ba785e521b5b0b8b40d54613280cf0bb34 | diff --git a/.travis.yml b/.travis.yml
index 087594a..dbd7728 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -12,7 +12,7 @@ script:
# Build documentation HTML.
- sphinx-build -W -b html -d build/doctrees docs/source build/html
# Run all discoverable tests, but set the source directories so that the coverage tool knows not to include coverage for all dependencies.
- - "coverage run --branch --source=authentication,storage -m unittest discover"
+ - "coverage run --branch --source=todoer,storage -m unittest discover"
after_success:
# Sends the coverage report to coveralls.io which can report to Pull Requests
# and track test coverage over time.
diff --git a/docker-compose.yml b/docker-compose.yml
index 16eb9a5..3524129 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,4 +1,4 @@
-authenticate:
+todoer:
build: .
ports:
# HOST:CONTAINER
@@ -9,7 +9,7 @@ authenticate:
environment:
# Set the environment variable SECRET_KEY else the secret will be insecure.
- SECRET_KEY
- command: python authentication/authentication.py
+ command: python todoer/todoer.py
links:
- storage
storage:
@@ -22,7 +22,7 @@ storage:
# See http://flask.pocoo.org/docs/0.10/api/#flask.Flask.run
- "5001:5001"
volumes:
- - /tmp/authentication:/data
+ - /tmp/todoer:/data
environment:
- - SQLALCHEMY_DATABASE_URI=sqlite:////data/authentication.db
+ - SQLALCHEMY_DATABASE_URI=sqlite:////data/todoer.db
command: python storage/storage.py
diff --git a/docs/source/index.rst b/docs/source/index.rst
index c83eeab..d1ea106 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -1,5 +1,5 @@
``todoer`` API Documentation
============================
-.. autoflask:: authentication.authentication:app
+.. autoflask:: todoer.todoer:app
:undoc-static:
diff --git a/authentication/authentication.py b/todoer/todoer.py
similarity index 99%
rename from authentication/authentication.py
rename to todoer/todoer.py
index 6937a58..5fb937c 100644
--- a/authentication/authentication.py
+++ b/todoer/todoer.py
@@ -1,5 +1,5 @@
"""
-An authentication service.
+An authentication service with todo capabilities.
"""
import datetime
| Rename authentication service | adamtheturtle/todo | diff --git a/authentication/__init__.py b/todoer/__init__.py
similarity index 100%
rename from authentication/__init__.py
rename to todoer/__init__.py
diff --git a/authentication/schemas/todos.json b/todoer/schemas/todos.json
similarity index 100%
rename from authentication/schemas/todos.json
rename to todoer/schemas/todos.json
diff --git a/authentication/schemas/user.json b/todoer/schemas/user.json
similarity index 100%
rename from authentication/schemas/user.json
rename to todoer/schemas/user.json
diff --git a/authentication/tests/__init__.py b/todoer/tests/__init__.py
similarity index 100%
rename from authentication/tests/__init__.py
rename to todoer/tests/__init__.py
diff --git a/authentication/tests/test_authentication.py b/todoer/tests/test_todoer.py
similarity index 99%
rename from authentication/tests/test_authentication.py
rename to todoer/tests/test_todoer.py
index 29fb21c..2c9b504 100644
--- a/authentication/tests/test_authentication.py
+++ b/todoer/tests/test_todoer.py
@@ -1,5 +1,5 @@
"""
-Tests for authentication.authentication.
+Tests for todoer.todoer.
"""
import datetime
@@ -16,7 +16,7 @@ from requests import codes
from urllib.parse import urljoin
from werkzeug.http import parse_cookie
-from authentication.authentication import (
+from todoer.todoer import (
app,
bcrypt,
load_user_from_id,
@@ -674,7 +674,7 @@ class ReadTodoTests(AuthenticationTests):
self.assertAlmostEqual(
read.json.pop('completion_timestamp'),
TIMESTAMP,
- places=3
+ places=3,
)
self.assertEqual(read.json, expected)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 4
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": null,
"python": "3.5",
"reqs_path": [
"requirements.txt",
"dev-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
bcrypt==4.0.1
certifi==2021.5.30
coverage==6.2
coveralls==3.3.1
dataclasses==0.8
doc8==0.11.2
docopt==0.6.2
docutils==0.18.1
execnet==1.9.0
flake8==3.9.2
Flask==0.10.1
Flask-Bcrypt==0.7.1
Flask-JsonSchema==0.1.1
Flask-Login==0.3.2
Flask-Negotiate==0.1.0
Flask-SQLAlchemy==2.1
Flask-Testing==0.8.1
freezegun==1.2.2
greenlet==2.0.2
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
itsdangerous==2.0.1
Jinja2==3.0.3
jsonschema==3.2.0
MarkupSafe==2.0.1
mccabe==0.6.1
packaging==21.3
pbr==6.1.1
pluggy==1.0.0
py==1.11.0
pycodestyle==2.7.0
pyflakes==2.3.1
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2016.4
-e git+https://github.com/adamtheturtle/todo.git@1d4c12ba785e521b5b0b8b40d54613280cf0bb34#egg=Qlutter_TODOer
requests==2.10.0
responses==0.17.0
restructuredtext-lint==1.4.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-httpdomain==1.8.1
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
SQLAlchemy==1.4.54
stevedore==3.5.2
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
Werkzeug==2.0.3
zipp==3.6.0
| name: todo
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- bcrypt==4.0.1
- coverage==6.2
- coveralls==3.3.1
- dataclasses==0.8
- doc8==0.11.2
- docopt==0.6.2
- docutils==0.18.1
- execnet==1.9.0
- flake8==3.9.2
- flask==0.10.1
- flask-bcrypt==0.7.1
- flask-jsonschema==0.1.1
- flask-login==0.3.2
- flask-negotiate==0.1.0
- flask-sqlalchemy==2.1
- flask-testing==0.8.1
- freezegun==1.2.2
- greenlet==2.0.2
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- itsdangerous==2.0.1
- jinja2==3.0.3
- jsonschema==3.2.0
- markupsafe==2.0.1
- mccabe==0.6.1
- packaging==21.3
- pbr==6.1.1
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.7.0
- pyflakes==2.3.1
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2016.4
- requests==2.10.0
- responses==0.17.0
- restructuredtext-lint==1.4.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-httpdomain==1.8.1
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sqlalchemy==1.4.54
- stevedore==3.5.2
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- werkzeug==2.0.3
- zipp==3.6.0
prefix: /opt/conda/envs/todo
| [
"todoer/tests/test_todoer.py::SignupTests::test_incorrect_content_type",
"todoer/tests/test_todoer.py::LoginTests::test_incorrect_content_type",
"todoer/tests/test_todoer.py::LogoutTests::test_incorrect_content_type",
"todoer/tests/test_todoer.py::LogoutTests::test_not_logged_in",
"todoer/tests/test_todoer.py::UserTests::test_different_password_different_token",
"todoer/tests/test_todoer.py::UserTests::test_get_auth_token",
"todoer/tests/test_todoer.py::UserTests::test_get_id",
"todoer/tests/test_todoer.py::ReadTodoTests::test_incorrect_content_type",
"todoer/tests/test_todoer.py::ListTodosTests::test_incorrect_content_type",
"todoer/tests/test_todoer.py::ListTodosTests::test_not_logged_in",
"todoer/tests/test_todoer.py::UpdateTodoTests::test_incorrect_content_type"
]
| [
"todoer/tests/test_todoer.py::SignupTests::test_existing_user",
"todoer/tests/test_todoer.py::SignupTests::test_missing_email",
"todoer/tests/test_todoer.py::SignupTests::test_missing_password",
"todoer/tests/test_todoer.py::SignupTests::test_passwords_hashed",
"todoer/tests/test_todoer.py::SignupTests::test_signup",
"todoer/tests/test_todoer.py::LoginTests::test_login",
"todoer/tests/test_todoer.py::LoginTests::test_missing_email",
"todoer/tests/test_todoer.py::LoginTests::test_missing_password",
"todoer/tests/test_todoer.py::LoginTests::test_non_existant_user",
"todoer/tests/test_todoer.py::LoginTests::test_remember_me_cookie_set",
"todoer/tests/test_todoer.py::LoginTests::test_wrong_password",
"todoer/tests/test_todoer.py::LogoutTests::test_logout",
"todoer/tests/test_todoer.py::LogoutTests::test_logout_twice",
"todoer/tests/test_todoer.py::LoadUserTests::test_user_does_not_exist",
"todoer/tests/test_todoer.py::LoadUserTests::test_user_exists",
"todoer/tests/test_todoer.py::LoadUserFromTokenTests::test_fake_token",
"todoer/tests/test_todoer.py::LoadUserFromTokenTests::test_load_user_from_token",
"todoer/tests/test_todoer.py::CreateTodoTests::test_current_completion_time",
"todoer/tests/test_todoer.py::CreateTodoTests::test_incorrect_content_type",
"todoer/tests/test_todoer.py::CreateTodoTests::test_missing_completed_flag",
"todoer/tests/test_todoer.py::CreateTodoTests::test_missing_text",
"todoer/tests/test_todoer.py::CreateTodoTests::test_not_logged_in",
"todoer/tests/test_todoer.py::CreateTodoTests::test_success_response",
"todoer/tests/test_todoer.py::ReadTodoTests::test_completed",
"todoer/tests/test_todoer.py::ReadTodoTests::test_multiple_todos",
"todoer/tests/test_todoer.py::ReadTodoTests::test_non_existant",
"todoer/tests/test_todoer.py::ReadTodoTests::test_not_logged_in",
"todoer/tests/test_todoer.py::ReadTodoTests::test_success",
"todoer/tests/test_todoer.py::DeleteTodoTests::test_delete_twice",
"todoer/tests/test_todoer.py::DeleteTodoTests::test_incorrect_content_type",
"todoer/tests/test_todoer.py::DeleteTodoTests::test_not_logged_in",
"todoer/tests/test_todoer.py::DeleteTodoTests::test_success",
"todoer/tests/test_todoer.py::ListTodosTests::test_filter_completed",
"todoer/tests/test_todoer.py::ListTodosTests::test_filter_not_completed",
"todoer/tests/test_todoer.py::ListTodosTests::test_list",
"todoer/tests/test_todoer.py::ListTodosTests::test_no_todos",
"todoer/tests/test_todoer.py::UpdateTodoTests::test_change_content",
"todoer/tests/test_todoer.py::UpdateTodoTests::test_change_content_and_flag",
"todoer/tests/test_todoer.py::UpdateTodoTests::test_flag_completed",
"todoer/tests/test_todoer.py::UpdateTodoTests::test_flag_completed_already_completed",
"todoer/tests/test_todoer.py::UpdateTodoTests::test_flag_not_completed",
"todoer/tests/test_todoer.py::UpdateTodoTests::test_non_existant",
"todoer/tests/test_todoer.py::UpdateTodoTests::test_not_logged_in",
"todoer/tests/test_todoer.py::UpdateTodoTests::test_remain_same"
]
| []
| []
| null | 543 | [
".travis.yml",
"docker-compose.yml",
"docs/source/index.rst",
"authentication/authentication.py"
]
| [
".travis.yml",
"docker-compose.yml",
"docs/source/index.rst",
"todoer/todoer.py"
]
|
|
peterbe__hashin-22 | ce536a0cffac911124b9af4d14ec0ab79e32816a | 2016-05-17 12:43:56 | ce536a0cffac911124b9af4d14ec0ab79e32816a | diff --git a/hashin.py b/hashin.py
index 510e9bf..d2d1fe6 100755
--- a/hashin.py
+++ b/hashin.py
@@ -120,7 +120,6 @@ def run(spec, file, algorithm, python_versions=None, verbose=False):
def amend_requirements_content(requirements, package, new_lines):
-
# if the package wasn't already there, add it to the bottom
if '%s==' % package not in requirements:
# easy peasy
@@ -132,7 +131,7 @@ def amend_requirements_content(requirements, package, new_lines):
lines = []
padding = ' ' * 4
for line in requirements.splitlines():
- if '{0}=='.format(package) in line:
+ if line.startswith('{0}=='.format(package)):
lines.append(line)
elif lines and line.startswith(padding):
lines.append(line)
| Wrong package replaced when target name is found in existing package
For example, an attempt to add hashes for the `selenium` package replaces the `pytest-selenium` package. Another example would be `pytest-django` and `django`.
Before:
```ini
pytest-selenium==1.2.1 \
--hash=sha256:e82f0a265b0e238ac42ac275d79313d0a7e0bef1a450633aeb3d6549cc14f517 \
--hash=sha256:bd2121022ff3255ce82faec0ef3602462ec6bce9ca627b53462986cfc9b391e9
selenium==2.52.0 \
--hash=sha256:820550a740ca1f746c399a0101986c0e6f94fbfe3c6f976e3f694db452cbe124
```
Command:
```bash
$ hashin selenium==2.53.1 requirements.txt
```
After:
```ini
selenium==2.53.1 \
--hash=sha256:b1af142650ed7025f906349ae0d7ed1f1a1e635e6ce7ac67e2b2f854f9f8fdc1 \
--hash=sha256:53929418a41295b526fbb68e43bc32fe93c3ef99c030b9e705caf1de486440de
``` | peterbe/hashin | diff --git a/tests/test_cli.py b/tests/test_cli.py
index 839c27a..40c9d50 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -188,6 +188,29 @@ autocompeter==1.2.3 \\
)
self.assertEqual(result, previous + new_lines)
+ def test_amend_requirements_content_new_similar_name(self):
+ """This test came from https://github.com/peterbe/hashin/issues/15"""
+ previous_1 = """
+pytest-selenium==1.2.1 \
+ --hash=sha256:e82f0a265b0e238ac42ac275d79313d0a7e0bef1a450633aeb3d6549cc14f517 \
+ --hash=sha256:bd2121022ff3255ce82faec0ef3602462ec6bce9ca627b53462986cfc9b391e9
+ """.strip() + '\n'
+ previous_2 = """
+selenium==2.52.0 \
+ --hash=sha256:820550a740ca1f746c399a0101986c0e6f94fbfe3c6f976e3f694db452cbe124
+ """.strip() + '\n'
+ new_lines = """
+selenium==2.53.1 \
+ --hash=sha256:b1af142650ed7025f906349ae0d7ed1f1a1e635e6ce7ac67e2b2f854f9f8fdc1 \
+ --hash=sha256:53929418a41295b526fbb68e43bc32fe93c3ef99c030b9e705caf1de486440de
+ """.strip()
+ result = hashin.amend_requirements_content(
+ previous_1 + previous_2, 'selenium', new_lines
+ )
+ self.assertTrue(previous_1 in result)
+ self.assertTrue(previous_2 not in result)
+ self.assertTrue(new_lines in result)
+
@cleanup_tmpdir('hashin*')
@mock.patch('hashin.urlopen')
def test_run(self, murlopen):
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"mock",
"flake8",
"black",
"therapist",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio",
"pytest-bdd",
"pytest-benchmark",
"pytest-randomly",
"responses",
"hypothesis",
"freezegun",
"trustme",
"requests-mock",
"requests",
"tomlkit"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"test-requirements.txt",
"lint-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
black==25.1.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
click==8.1.8
colorama==0.4.6
coverage==7.8.0
cryptography==44.0.2
exceptiongroup==1.2.2
execnet==2.1.1
flake8==7.2.0
freezegun==1.5.1
gherkin-official==29.0.0
-e git+https://github.com/peterbe/hashin.git@ce536a0cffac911124b9af4d14ec0ab79e32816a#egg=hashin
hypothesis==6.130.5
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
Mako==1.3.9
MarkupSafe==3.0.2
mccabe==0.7.0
mock==5.2.0
mypy-extensions==1.0.0
packaging==24.2
parse==1.20.2
parse_type==0.6.4
pathspec==0.12.1
platformdirs==4.3.7
pluggy==1.5.0
py-cpuinfo==9.0.0
pycodestyle==2.13.0
pycparser==2.22
pyflakes==3.3.1
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-bdd==8.1.0
pytest-benchmark==5.1.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-randomly==3.16.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
requests-mock==1.12.1
responses==0.25.7
six==1.17.0
sortedcontainers==2.4.0
therapist==2.2.0
tomli==2.2.1
tomlkit==0.13.2
trustme==1.2.1
typing_extensions==4.13.0
urllib3==2.3.0
zipp==3.21.0
| name: hashin
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- black==25.1.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- click==8.1.8
- colorama==0.4.6
- coverage==7.8.0
- cryptography==44.0.2
- exceptiongroup==1.2.2
- execnet==2.1.1
- flake8==7.2.0
- freezegun==1.5.1
- gherkin-official==29.0.0
- hypothesis==6.130.5
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- mako==1.3.9
- markupsafe==3.0.2
- mccabe==0.7.0
- mock==5.2.0
- mypy-extensions==1.0.0
- packaging==24.2
- parse==1.20.2
- parse-type==0.6.4
- pathspec==0.12.1
- platformdirs==4.3.7
- pluggy==1.5.0
- py-cpuinfo==9.0.0
- pycodestyle==2.13.0
- pycparser==2.22
- pyflakes==3.3.1
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-bdd==8.1.0
- pytest-benchmark==5.1.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-randomly==3.16.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- requests==2.32.3
- requests-mock==1.12.1
- responses==0.25.7
- six==1.17.0
- sortedcontainers==2.4.0
- therapist==2.2.0
- tomli==2.2.1
- tomlkit==0.13.2
- trustme==1.2.1
- typing-extensions==4.13.0
- urllib3==2.3.0
- zipp==3.21.0
prefix: /opt/conda/envs/hashin
| [
"tests/test_cli.py::Tests::test_amend_requirements_content_new_similar_name"
]
| [
"tests/test_cli.py::Tests::test_run"
]
| [
"tests/test_cli.py::Tests::test_amend_requirements_content_replacement_2",
"tests/test_cli.py::Tests::test_get_latest_version_simple",
"tests/test_cli.py::Tests::test_amend_requirements_content_replacement_amonst_others_2",
"tests/test_cli.py::Tests::test_expand_python_version",
"tests/test_cli.py::Tests::test_filter_releases",
"tests/test_cli.py::Tests::test_get_hashes_error",
"tests/test_cli.py::Tests::test_amend_requirements_content_replacement_amonst_others",
"tests/test_cli.py::Tests::test_amend_requirements_content_new",
"tests/test_cli.py::Tests::test_release_url_metadata_python",
"tests/test_cli.py::Tests::test_amend_requirements_content_replacement",
"tests/test_cli.py::Tests::test_amend_requirements_content_replacement_single_to_multi"
]
| []
| MIT License | 544 | [
"hashin.py"
]
| [
"hashin.py"
]
|
|
sigmavirus24__github3.py-613 | f6948ac9097f61dd44d8666ac1de42edbea666d5 | 2016-05-17 16:50:13 | 05ed0c6a02cffc6ddd0e82ce840c464e1c5fd8c4 | diff --git a/github3/pulls.py b/github3/pulls.py
index 10457b7d..497e01f0 100644
--- a/github3/pulls.py
+++ b/github3/pulls.py
@@ -8,7 +8,6 @@ This module contains all the classes relating to pull requests.
"""
from __future__ import unicode_literals
-from re import match
from json import dumps
from . import models
@@ -178,10 +177,8 @@ class PullRequest(models.GitHubCore):
#: GitHub.com url for review comments (not a template)
self.review_comments_url = pull.get('review_comments_url')
- m = match('https?://[\w\d\-\.\:]+/(\S+)/(\S+)/(?:issues|pull)?/\d+',
- self.issue_url)
#: Returns ('owner', 'repository') this issue was filed on.
- self.repository = m.groups()
+ self.repository = self.base.repo
#: The state of the pull
self.state = pull.get('state')
#: The title of the request
| PullRequest.respository return on GitHub Enterprise instance
[`github3.pulls.PullRequest.respository`](https://github.com/sigmavirus24/github3.py/blob/0.9.3/github3/pulls.py#L188-L189) returns `(u'api/v3/repos/user', u'repo')` on GitHub Enterprise instances. I believe the expected return shouls be ``(u'user', u'repo')` .
<bountysource-plugin>
---
Want to back this issue? **[Post a bounty on it!](https://www.bountysource.com/issues/7415115-pullrequest-respository-return-on-github-enterprise-instance?utm_campaign=plugin&utm_content=tracker%2F183477&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F183477&utm_medium=issues&utm_source=github).
</bountysource-plugin> | sigmavirus24/github3.py | diff --git a/tests/cassettes/PullRequest_single.json b/tests/cassettes/PullRequest_single.json
new file mode 100644
index 00000000..47a27b6a
--- /dev/null
+++ b/tests/cassettes/PullRequest_single.json
@@ -0,0 +1,1 @@
+{"http_interactions": [{"request": {"body": {"string": "", "encoding": "utf-8"}, "headers": {"Accept-Charset": "utf-8", "Content-Type": "application/json", "Accept-Encoding": "gzip, deflate", "Accept": "application/vnd.github.v3.full+json", "User-Agent": "github3.py/1.0.0"}, "method": "GET", "uri": "https://api.github.com/repos/sigmavirus24/github3.py"}, "response": {"body": {"string": "", "base64_string": "H4sIAAAAAAAAA62YTY+jOBCG/0rEddNxgKTzcZmd0+ze5jB72UtkwASrASPbJEqj/u/7GgOBrDZJt1dqRQntevy6XGWq3Hg88fbhxl9ufH/ulbRg3t47cp3VUbioLt7cS+s8P3T/UPxY0BOXtQpWZDJKnEsmvX3j5eLISzDGQ0Ex0wSr5TZczj16oprKQy1zjMu0rtSeEPtQLSy1VkzGotSs1ItYFKQm1vgbUEfZAQzTi/3VNly/JttdugvW7HUX+JttxJif7EIapxsY3ExU8W4SS8ZMityozXSR3+izulqTm8GpyHNxBuV2RY8mIoOlcXNL4eXxixRYNkTojMGxWNKHcRRX+vOiWqsGu6v0gSeGo7BbkiWfFtbZQZYJjo+GSFaJFlhHKpa80lyUnxc4sQZNyCMt+Tv9Gg3WChAj7fNSWitYsxMC9fPm1qwhleQnGl+MaySLGT/B2V9E3tiDqC+Vyem/EBTG9VyzA00Kk6MpzRX7mHvt9BqD2gdzpOSz0T89AxI27Com/HnRmShnOY8klZdZKuSMI6FlSmPE6uyMM2aGcJ394PqPOpp9//nnKYRAjHsblNzN3Nb5k2ScyjGkB3tyF4H0BACS3tjFiWPsG4LPLp9ipDqNhKRaPDo07gucgBoy/mliSTNaOAlvAQBlQrh5sgUAxJWq2VOhfX/hLUeRPn/KuojskfdM1txHWwK0UoVzvmTMyYMDpCH9qYx0KOPMDdszGmK/tbtNj05SjT0wUS4iJw5elKSFNERl1L6H9MFVnaEaxgQqWeos1TAGqJaO+93KNJABiZegxtY76ewZpOk8mtPyWNOjG3WAYNfNq/pI3x8WMfdz50oB0pRvkke1+yF35RiltnZAvru59Iq5QtuC5H6Z88ABo8KmdUFR8Ed1wX1ih5iE/f+ANXF6iza/H5cxj+UaRkOuZ7I99Du6i3e7U7/XSZrrHF2v4BQSPYM0v1VUZ+bkwlQVlcxFdIcgTURRbC0WiyZjtC2rCyYdM9gSgKIyzlA1uuhsegaqnoLqtlpPjcwE1XsuaOLk2wECoN1GF62WMI6xCk2qk8AWMCYWPGdKi9LtjL1SxuxSaJ7y+JmO5X66TUDNN8XLmM1pns8RtZrHHHGMWtvsIgpO5uYhS8AycEdgO5WcIaSdvC6ZZTTEdpqxZGhEkgPVaCCCpR+8LMMXP/zl7/br7X4d/o2V1FUyGbN6WW5egnbMao0/M6aqVTbC2CHbX8tgv16BZIbgBOxCEN9w/4BP3Hn8q78ftRTm1gCGSmVXw9+vZvv/uBzpzOIcsXQT9M/Pebp9LT02hdRMFKxCmdBdswyrDKvLAp5O0H4lIlYL9MDErIy/Y+g2CMJJQRCLusR++Ds8PlON2hWv3vHDvpAYmj4zNVUHm6beXsvadJV4cj0GRg/P/I0PHZ9t2jr66wanJJdSdJdFJZIU/X7Fyo49yMBA263tjc1oBHTjQS+7W0XCUlrn+mCLZ8hOUPXnooLukukz2r4ebGjjiqNf9vbjH8Cshcw6EwAA", "encoding": "utf-8"}, "headers": {"vary": "Accept, Accept-Encoding", "x-served-by": "03d91026ad8428f4d9966d7434f9d82e", "x-xss-protection": "1; mode=block", "x-content-type-options": "nosniff", "etag": "\"103c261a609253cc5113039f6ab21f0e\"", "access-control-allow-credentials": "true", "status": "200 OK", "x-ratelimit-remaining": "54", "x-github-media-type": "github.v3; param=full; format=json", "access-control-expose-headers": "ETag, Link, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval", "transfer-encoding": "chunked", "x-github-request-id": "48A0C4D3:7DF9:2EE6AA1:53D5BBD8", "cache-control": "public, max-age=60, s-maxage=60", "last-modified": "Wed, 23 Jul 2014 19:45:45 GMT", "date": "Mon, 28 Jul 2014 02:56:25 GMT", "access-control-allow-origin": "*", "content-security-policy": "default-src 'none'", "content-encoding": "gzip", "strict-transport-security": "max-age=31536000; includeSubdomains", "server": "GitHub.com", "x-ratelimit-limit": "60", "x-frame-options": "deny", "content-type": "application/json; charset=utf-8", "x-ratelimit-reset": "1406517556"}, "status": {"message": "OK", "code": 200}, "url": "https://api.github.com/repos/sigmavirus24/github3.py"}, "recorded_at": "2014-07-28T02:56:25"}, {"request": {"body": {"string": "", "encoding": "utf-8"}, "headers": {"Accept-Charset": "utf-8", "Content-Type": "application/json", "Accept-Encoding": "gzip, deflate", "Accept": "application/vnd.github.v3.full+json", "User-Agent": "github3.py/1.0.0"}, "method": "GET", "uri": "https://api.github.com/repos/sigmavirus24/github3.py/pulls/235"}, "response": {"body": {"string": "", "base64_string": "H4sIAAAAAAAAA+1aW2/ruBH+K4KfWtSxLMkX2Tg42/PUC4p20WZfFgs4lETZRGRJlShnc4T8935DSral+NixafQpgBPYEufjcIYz5FzqQVUkg+VgI2VeLm2b5WK0FnJTBaMw29oFz7PSLsV6y3aiqEp3Yuu33ih/tfMqSUrb9aaD4UBEg6Uznbjj6WQxBNw2WXWRj1DP4TVwkYjj2wFGRA6mcibDjQGMoqfFlWXFezhXSUoBtKJKq23Ai8ESghsOSskkhwLCJCt5hLmkkAk9+BZF1r95wlnJR6wsuSwtJmUhggrjh4OqJIh6kGRrkWJ4sBYJUGWjCm/mePPJcMB2TLKiz7p6WDaKJqQwSyVPpdJ5ZWvinwC1LhoAUu9gNnZmk2juLgIWMz/0He7NvTh0o/GYx8EiJLbO7SaaqbSPOD2/TY4GxlmSZC+g7q+ku127E9h7KjCmv4t0fQMCqGo7kxsOQWIJbyQYUcrrmFEUNcynlCsREQZUXxQ8uoqhhgbsvKTgpFYWqsCqoAwLkUuRpdcx1qEEUlasWSq+s+uRQFkCQDmNq1alKEDJd9iE15FqktrOC7Fj4SuJouAhFzsI9ga4Hi3Q5GtO5vgL2RvELCRfsWhLNhezpORvw0GQRa8Y8bjh1t//869/WhEnTQTYNRazCm3ClkjDpMIbCyaWJ1xyPImzYqvEbOEjyMCVnY8si4z/qWv9Twfzt17goYGdYC9ZWWw9fSO6JyJ83IjSwifccJbzwsIUFjauxfI8EaGeTG5Yam3Zs2aw5Dkr4IPA6X/ho6S1E+wwN5ZbrDRbf/jj0+i34reU/v7B2Y7/aBRWZuUJC/mQvoVwYFaZbXmWcmvDsEgLRqQYE3JowZ1ZW2idpPBuxSAnsURZWNEYxf4IWggLDo6jFZOQuzt2Jg/j6cPYfRzPls4Yn18xpsqj/pj5gzt7dLzldLL01BjtdHsw3qMzWU4cuGcasuXF+t1MJ4asoNetkKtyw8CTz2Ivjlw/DrkXBn7ku5PAC+fuxI3mXjiZu/NoFgaejwkgXLFOOfZYiuMUE4oEWoCw2gca+IJVfPCgthswZSU7wV8U25eN7hp4QnuHf94dfAS+ZdSu9flJln4P5g9HsxJOwz0dy9imRlJvMezpbBw6C2c6ZaHvcah97nhRMJ2F+MwZZ8xx3XDq0m6A2eKkxbHOAk73svYMXDZ+5EHb4gNdBZSQYww6/U7vxCvm/rxSnLgBf14pPq8U//8rBTlE7f/pfoET9NgnHIdPywiXnCTL977g8Fvb/5yHXuQtcE0f+67rBg7nkb+IZ4478ya+EzBvOlv4E4o8+vZ/PA9eUwjgTsa+N74xrNDE78OK0Jn44ILYWrhTPlu4ztwPOHeihcfCeE6sXQ4retyeDy16g68KLzrR680hxgkUkzCjF1IbhBodpPuFG13Y42AF6r065OigXRt2dIivDz065PcJP3ocdcIXiOdyCKL9Ra1s1Js747njDAcp21LkckjYACrGHXPVvOjtGUr6UFqH9EHB5XF24XjopyugJIlKZXQk+OkKPph5+HQFJ3OtJ7bULdkIlQihlKJKT9yYioWN6yyGuvzBifz8KjeI0BMRFKx41dE7UoZFzEJKI6iEBOUZ/iLkX6vA+vbz33YeuZuseN5zcvYQPxsCNkhG0RixQnHpM381wiH62sb/JosXIjXJggwplOxSevLsGhF9HgHVnZ8U60rOtkaMKwAAbbLs2QhIAdApoLLiH0mynV94E4G3R+khvDeH1gjgtc2yGC18D1Ir/09agTmkyLUZwbYYta2/KW2ztREm0RN7SRYY4eDyYCuQ2kY8oTPfcmXKHaESRgcUuQxjVgljDyoLbqYYxSaB7CHvmhaqG4kmLF1XbG3G6x4EWqdb+5p9v1guOW+WBxRAUoFI1Z6MndwBhzjVgQAKV0aqP4I5gKqjyyTveFwYUSKgBK8Rnw1EZ9vfAZb2aR/6HqnRFqO2Dz5ZO/3mjYl0G6/fztFN66pqpKGoNYZd/wlV3E2TLUapw6iOC24Jwq4pJ/M2Go1qStgSuKoTGHGsEQDFinCD+pWJcOsWQ5eYVJ0wJjYjhHZJxiIjTvcgANRqNOFVIxzvMdVXYAKpAI4R90UVI9gDyjF2mkkRN7U1I/gOUP1TiZIhHzLUhLDlpAgF9jHu2qRFVR0wmksjYBnIERBiU0IwwmwxalvXt/tVOvdh7D04qJ0tllN/OVXltdNVOjVmMsWHxuRVuemW4FDI8x/HLhXypj4NgQds9gu+oY0F/9sWlh9kOaj5AYRl2faH4PefD2TL82QoHKZ9A/34nLv+sXSZFKxuUEPNcU1ANEbdOvtVevnrCPXQCOEXCqXlCOkwm1YmvmMocr5e50IQZlWKqqmzwOMXam+ho/f4YXuR2Ad9NDUrV9pMB0tZVBRV4snBDRw9fBHPYh/xqZirRZ/NqbhZFFnTjqLLnVnO0wZ7zwYG6mhtSTRHI8A3HrRsN6uIeMyqRK705RlstynwN+TOV4lIEaygolbyBLWyerDRNTPDhidAU4b5BOBlXaq+KdXnBBQl1hMwt7YXAbI9Uu+HeqiMAr5XODadZd9EdmaS+8zx/qrRCAu3uvvMAJ22FzHSbRstmIJfX0imrU9tKSu9SQdf8q/3bk/5EmYR/9rtUfmC1ePhjztVNJFqV2nG3rFppctRp3OlmWz0xc6/ooMlhTx0C8tlmpu7Wc4J6GRjCzEHZ6v0Jvnv1Ntyb6V11fVjPSkN3VE17bwdpYxIE1oNJ9/fLPneKk8KG4LWrT3tiaZ+sYB6L5tGnPbBiuyPjt0qfU5xf96TrgL0fR1aMD+LJL1D60RG+7NI8lkkoTZYs3ZN43rp4YqEOum7Cw2uxjRAXQtcxONRJFQzLC6cPhVG0L6pf6ITIkQzJTUIxtS2h+Fv/wNzD6LGUi8AAA==", "encoding": "utf-8"}, "headers": {"vary": "Accept, Accept-Encoding", "x-served-by": "3061975e1f37121b3751604ad153c687", "x-xss-protection": "1; mode=block", "x-content-type-options": "nosniff", "etag": "\"42d9e03172ef97d5dbb406c4702c5c0e\"", "access-control-allow-credentials": "true", "status": "200 OK", "x-ratelimit-remaining": "53", "x-github-media-type": "github.v3; param=full; format=json", "access-control-expose-headers": "ETag, Link, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval", "transfer-encoding": "chunked", "x-github-request-id": "48A0C4D3:7DF9:2EE6AAD:53D5BBD9", "cache-control": "public, max-age=60, s-maxage=60", "last-modified": "Mon, 28 Jul 2014 01:14:14 GMT", "date": "Mon, 28 Jul 2014 02:56:25 GMT", "access-control-allow-origin": "*", "content-security-policy": "default-src 'none'", "content-encoding": "gzip", "strict-transport-security": "max-age=31536000; includeSubdomains", "server": "GitHub.com", "x-ratelimit-limit": "60", "x-frame-options": "deny", "content-type": "application/json; charset=utf-8", "x-ratelimit-reset": "1406517556"}, "status": {"message": "OK", "code": 200}, "url": "https://api.github.com/repos/sigmavirus24/github3.py/pulls/235"}, "recorded_at": "2014-07-28T02:56:25"}], "recorded_with": "betamax/0.3.2"}
diff --git a/tests/integration/test_pulls.py b/tests/integration/test_pulls.py
index b32ef019..70c00b16 100644
--- a/tests/integration/test_pulls.py
+++ b/tests/integration/test_pulls.py
@@ -130,6 +130,14 @@ class TestPullRequest(IntegrationHelper):
p = self.get_pull_request(num=241)
assert p.update(p.title) is True
+ def test_repository(self):
+ """Show that the pull request has the owner repository."""
+ self.basic_login()
+ cassette_name = self.cassette_name('single')
+ with self.recorder.use_cassette(cassette_name):
+ p = self.get_pull_request()
+ assert p.repository == ('sigmavirus24', 'github3.py')
+
class TestReviewComment(IntegrationHelper):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest>=2.3.5",
"betamax>=0.5.0",
"betamax_matchers>=0.2.0",
"mock==1.0.1"
],
"pre_install": null,
"python": "3.4",
"reqs_path": [
"dev-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
betamax==0.8.1
betamax-matchers==0.4.0
certifi==2021.5.30
charset-normalizer==2.0.12
distlib==0.3.9
filelock==3.4.1
-e git+https://github.com/sigmavirus24/github3.py.git@f6948ac9097f61dd44d8666ac1de42edbea666d5#egg=github3.py
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
mock==1.0.1
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
requests==2.27.1
requests-toolbelt==1.0.0
six==1.17.0
swebench-matterhorn @ file:///swebench_matterhorn
toml==0.10.2
tomli==1.2.3
tox==3.28.0
typing_extensions==4.1.1
uritemplate==4.1.1
uritemplate.py==3.0.2
urllib3==1.26.20
virtualenv==20.17.1
zipp==3.6.0
| name: github3.py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- betamax==0.8.1
- betamax-matchers==0.4.0
- charset-normalizer==2.0.12
- distlib==0.3.9
- filelock==3.4.1
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- mock==1.0.1
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- requests==2.27.1
- requests-toolbelt==1.0.0
- six==1.17.0
- swebench-matterhorn==0.0.0
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- typing-extensions==4.1.1
- uritemplate==4.1.1
- uritemplate-py==3.0.2
- urllib3==1.26.20
- virtualenv==20.17.1
- wheel==0.21.0
- zipp==3.6.0
prefix: /opt/conda/envs/github3.py
| [
"tests/integration/test_pulls.py::TestPullRequest::test_repository"
]
| []
| [
"tests/integration/test_pulls.py::TestPullRequest::test_close",
"tests/integration/test_pulls.py::TestPullRequest::test_commits",
"tests/integration/test_pulls.py::TestPullRequest::test_create_comment",
"tests/integration/test_pulls.py::TestPullRequest::test_create_review_comment",
"tests/integration/test_pulls.py::TestPullRequest::test_diff",
"tests/integration/test_pulls.py::TestPullRequest::test_files",
"tests/integration/test_pulls.py::TestPullRequest::test_is_merged",
"tests/integration/test_pulls.py::TestPullRequest::test_issue",
"tests/integration/test_pulls.py::TestPullRequest::test_issue_comments",
"tests/integration/test_pulls.py::TestPullRequest::test_patch",
"tests/integration/test_pulls.py::TestPullRequest::test_reopen",
"tests/integration/test_pulls.py::TestPullRequest::test_review_comments",
"tests/integration/test_pulls.py::TestPullRequest::test_update",
"tests/integration/test_pulls.py::TestReviewComment::test_reply",
"tests/integration/test_pulls.py::TestPullFile::test_contents"
]
| []
| BSD 3-Clause "New" or "Revised" License | 545 | [
"github3/pulls.py"
]
| [
"github3/pulls.py"
]
|
|
box__box-python-sdk-134 | 3ba6cf1fe50f2f9b3c273d96375dfdc98ea87d3c | 2016-05-18 01:31:58 | ded623f4b6de0530d8f983d3c3d2cafe646c126b | diff --git a/HISTORY.rst b/HISTORY.rst
index ec26cb8..e7ca71f 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -6,6 +6,11 @@ Release History
Upcoming
++++++++
+1.5.2
+++++++++++++++++++
+
+- Bugfix so that ``OAuth2`` always has the correct tokens after a call to ``refresh()``.
+
1.5.1 (2016-03-23)
++++++++++++++++++
diff --git a/boxsdk/auth/oauth2.py b/boxsdk/auth/oauth2.py
index a810e79..6b0e9d5 100644
--- a/boxsdk/auth/oauth2.py
+++ b/boxsdk/auth/oauth2.py
@@ -167,10 +167,17 @@ def _get_tokens(self):
"""
Get the current access and refresh tokens.
+ This is a protected method that can be overridden to look up tokens
+ from an external source (the inverse of the `store_tokens` callback).
+
+ This method does not need to update this object's private token
+ attributes. Its caller in :class:`OAuth2` is responsible for that.
+
:return:
Tuple containing the current access token and refresh token.
+ One or both of them may be `None`, if they aren't set.
:rtype:
- `tuple` of (`unicode`, `unicode`)
+ `tuple` of ((`unicode` or `None`), (`unicode` or `None`))
"""
return self._access_token, self._refresh_token
@@ -181,16 +188,24 @@ def refresh(self, access_token_to_refresh):
:param access_token_to_refresh:
The expired access token, which needs to be refreshed.
+ Pass `None` if you don't have the access token.
:type access_token_to_refresh:
- `unicode`
+ `unicode` or `None`
+ :return:
+ Tuple containing the new access token and refresh token.
+ The refresh token may be `None`, if the authentication scheme
+ doesn't use one, or keeps it hidden from this client.
+ :rtype:
+ `tuple` of (`unicode`, (`unicode` or `None`))
"""
with self._refresh_lock:
- access_token, refresh_token = self._get_tokens()
+ access_token, refresh_token = self._get_and_update_current_tokens()
# The lock here is for handling that case that multiple requests fail, due to access token expired, at the
# same time to avoid multiple session renewals.
- if access_token_to_refresh == access_token:
- # If the active access token is the same as the token needs to be refreshed, we make the request to
- # refresh the token.
+ if (access_token is None) or (access_token_to_refresh == access_token):
+ # If the active access token is the same as the token needs to
+ # be refreshed, or if we don't currently have any active access
+ # token, we make the request to refresh the token.
return self._refresh(access_token_to_refresh)
else:
# If the active access token (self._access_token) is not the same as the token needs to be refreshed,
@@ -213,11 +228,37 @@ def _get_state_csrf_token():
return 'box_csrf_token_' + ''.join(ascii_alphabet[int(system_random.random() * ascii_len)] for _ in range(16))
def _store_tokens(self, access_token, refresh_token):
- self._access_token = access_token
- self._refresh_token = refresh_token
+ self._update_current_tokens(access_token, refresh_token)
if self._store_tokens_callback is not None:
self._store_tokens_callback(access_token, refresh_token)
+ def _get_and_update_current_tokens(self):
+ """Get the current access and refresh tokens, while also storing them in this object's private attributes.
+
+ :return:
+ Same as for :meth:`_get_tokens()`.
+ """
+ tokens = self._get_tokens()
+ self._update_current_tokens(*tokens)
+ return tokens
+
+ def _update_current_tokens(self, access_token, refresh_token):
+ """Store the latest tokens in this object's private attributes.
+
+ :param access_token:
+ The latest access token.
+ May be `None`, if it hasn't been provided.
+ :type access_token:
+ `unicode` or `None`
+ :param refresh_token:
+ The latest refresh token.
+ May be `None`, if the authentication scheme doesn't use one, or if
+ it hasn't been provided.
+ :type refresh_token:
+ `unicode` or `None`
+ """
+ self._access_token, self._refresh_token = access_token, refresh_token
+
def send_token_request(self, data, access_token, expect_refresh_token=True):
"""
Send the request to acquire or refresh an access token.
@@ -262,7 +303,7 @@ def revoke(self):
Revoke the authorization for the current access/refresh token pair.
"""
with self._refresh_lock:
- access_token, refresh_token = self._get_tokens()
+ access_token, refresh_token = self._get_and_update_current_tokens()
token_to_revoke = access_token or refresh_token
if token_to_revoke is None:
return
diff --git a/boxsdk/auth/redis_managed_oauth2.py b/boxsdk/auth/redis_managed_oauth2.py
index f333849..a8d85ac 100644
--- a/boxsdk/auth/redis_managed_oauth2.py
+++ b/boxsdk/auth/redis_managed_oauth2.py
@@ -30,13 +30,7 @@ def __init__(self, unique_id=uuid4(), redis_server=None, *args, **kwargs):
refresh_lock = Lock(redis=self._redis_server, name='{0}_lock'.format(self._unique_id))
super(RedisManagedOAuth2Mixin, self).__init__(*args, refresh_lock=refresh_lock, **kwargs)
if self._access_token is None:
- self._update_current_tokens()
-
- def _update_current_tokens(self):
- """
- Get the latest tokens from redis and store them.
- """
- self._access_token, self._refresh_token = self._redis_server.hvals(self._unique_id) or (None, None)
+ self._get_and_update_current_tokens()
@property
def unique_id(self):
@@ -51,8 +45,7 @@ def _get_tokens(self):
Base class override.
Gets the latest tokens from redis before returning them.
"""
- self._update_current_tokens()
- return super(RedisManagedOAuth2Mixin, self)._get_tokens()
+ return self._redis_server.hvals(self._unique_id) or (None, None)
def _store_tokens(self, access_token, refresh_token):
"""
diff --git a/boxsdk/version.py b/boxsdk/version.py
index c64a173..9f6ca5f 100644
--- a/boxsdk/version.py
+++ b/boxsdk/version.py
@@ -3,4 +3,4 @@
from __future__ import unicode_literals, absolute_import
-__version__ = '1.5.1'
+__version__ = '1.5.2'
diff --git a/requirements-dev.txt b/requirements-dev.txt
index f962a37..66fa4cd 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -1,17 +1,10 @@
-r requirements.txt
bottle
jsonpatch
-mock<=1.0.1
+mock>=2.0.0
pep8
pylint
-
-# Temporary version exclusion of the 2.8 release line.
-# <https://github.com/pytest-dev/pytest/issues/1085> breaks pytest on Python 2, only in 2.8.1. Fixed in upcoming 2.8.2.
-# <https://github.com/pytest-dev/pytest/issues/1035> breaks pytest on Python 2.6, on all currently existing 2.8.*
-# releases. Has not yet been fixed in the master branch, so there isn't a guarantee that it will work in the upcoming
-# 2.8.2 release.
-pytest<2.8
-
+pytest>=2.8.3
pytest-cov
pytest-xdist
sphinx
| BoxSession._renew_session does not set previously refreshed tokens
I am using `CooperativelyManagedOAuth2` so that I can use the same oauth tokens for multiple clients but it seems like refreshed tokens do not get updated in the session if the tokens were refreshed by another client. I have two instances of the oauth class using the same tokens and two clients using one of the instances.
For example, say there are two clients initialized with the same tokens but different `CooperativelyManagedOAuth2` instances. The first client makes a request, sees the access token is expired and refreshes the tokens. The tokens in the first client's oauth instance are updated with the new tokens. The second client then makes a request, sees it's access token is expired and tries to refresh the token. The token is already refreshed so it does not try to refresh them again but then does not replace the old tokens so when client two retries the request it will raise a `BoxAPIException`. | box/box-python-sdk | diff --git a/test/conftest.py b/test/conftest.py
index c5e61dd..a5958da 100644
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -102,6 +102,12 @@ def access_token():
return 'T9cE5asGnuyYCCqIZFoWjFHvNbvVqHjl'
[email protected](scope='session')
+def new_access_token():
+ # Must be distinct from access_token.
+ return 'ZFoWjFHvNbvVqHjlT9cE5asGnuyYCCqI'
+
+
@pytest.fixture(scope='session')
def refresh_token():
return 'J7rxTiWOHMoSC1isKZKBZWizoRXjkQzig5C6jFgCVJ9bUnsUfGMinKBDLZWP9BgRb'
diff --git a/test/unit/auth/test_jwt_auth.py b/test/unit/auth/test_jwt_auth.py
index c4d5f11..a117850 100644
--- a/test/unit/auth/test_jwt_auth.py
+++ b/test/unit/auth/test_jwt_auth.py
@@ -73,8 +73,8 @@ def jwt_auth_init_mocks(
}
mock_network_layer.request.return_value = successful_token_response
- key_file = mock_open()
- with patch('boxsdk.auth.jwt_auth.open', key_file, create=True) as jwt_auth_open:
+ key_file_read_data = 'key_file_read_data'
+ with patch('boxsdk.auth.jwt_auth.open', mock_open(read_data=key_file_read_data), create=True) as jwt_auth_open:
with patch('cryptography.hazmat.primitives.serialization.load_pem_private_key') as load_pem_private_key:
oauth = JWTAuth(
client_id=fake_client_id,
@@ -89,9 +89,9 @@ def jwt_auth_init_mocks(
)
jwt_auth_open.assert_called_once_with(sentinel.rsa_path)
- key_file.return_value.read.assert_called_once_with() # pylint:disable=no-member
+ jwt_auth_open.return_value.read.assert_called_once_with() # pylint:disable=no-member
load_pem_private_key.assert_called_once_with(
- key_file.return_value.read.return_value, # pylint:disable=no-member
+ key_file_read_data,
password=rsa_passphrase,
backend=default_backend(),
)
diff --git a/test/unit/auth/test_oauth2.py b/test/unit/auth/test_oauth2.py
index af4d6ed..8fdf7b2 100644
--- a/test/unit/auth/test_oauth2.py
+++ b/test/unit/auth/test_oauth2.py
@@ -5,9 +5,11 @@
from functools import partial
import re
from threading import Thread
+import uuid
from mock import Mock
import pytest
+from six.moves import range # pylint:disable=redefined-builtin
from six.moves.urllib import parse as urlparse # pylint:disable=import-error,no-name-in-module,wrong-import-order
from boxsdk.exception import BoxOAuthException
@@ -314,3 +316,41 @@ def test_revoke_sends_revoke_request(
access_token=access_token,
)
assert oauth.access_token is None
+
+
+def test_tokens_get_updated_after_noop_refresh(client_id, client_secret, access_token, new_access_token, refresh_token, mock_network_layer):
+ """`OAuth2` object should update its state with new tokens, after no-op refresh.
+
+ If the protected method `_get_tokens()` returns new tokens, refresh is
+ skipped, and those tokens are used.
+
+ This is a regression test for issue #128 [1]. We would return the new
+ tokens without updating the object state. Subsequent uses of the `OAuth2`
+ object would use the old tokens.
+
+ [1] <https://github.com/box/box-python-sdk/issues/128>
+ """
+ new_refresh_token = uuid.uuid4().hex
+ new_tokens = (new_access_token, new_refresh_token)
+
+ class GetTokensOAuth2(OAuth2):
+ def _get_tokens(self):
+ """Return a new set of tokens, without updating any state.
+
+ In order for the test to pass, the `OAuth2` object must be
+ correctly programmed to take this return value and use it to update
+ its state.
+ """
+ return new_tokens
+
+ oauth = GetTokensOAuth2(
+ client_id=client_id,
+ client_secret=client_secret,
+ access_token=access_token,
+ refresh_token=refresh_token,
+ network_layer=mock_network_layer,
+ )
+ assert oauth.access_token == access_token
+
+ assert oauth.refresh(access_token) == new_tokens
+ assert oauth.access_token == new_access_token
diff --git a/test/unit/auth/test_redis_managed_oauth2.py b/test/unit/auth/test_redis_managed_oauth2.py
index c03eec1..dc67097 100644
--- a/test/unit/auth/test_redis_managed_oauth2.py
+++ b/test/unit/auth/test_redis_managed_oauth2.py
@@ -2,6 +2,8 @@
from __future__ import unicode_literals, absolute_import
+import uuid
+
from mock import Mock, patch
from boxsdk.auth import redis_managed_oauth2
@@ -21,19 +23,24 @@ def test_redis_managed_oauth2_gets_tokens_from_redis_on_init(access_token, refre
assert oauth2.unique_id is unique_id
-def test_redis_managed_oauth2_gets_tokens_from_redis_during_refresh(access_token, refresh_token):
+def test_redis_managed_oauth2_gets_tokens_from_redis_during_refresh(access_token, refresh_token, new_access_token):
+ new_refresh_token = uuid.uuid4().hex
redis_server = Mock(redis_managed_oauth2.StrictRedis)
- redis_server.hvals.return_value = access_token, refresh_token
+ redis_server.hvals.return_value = new_access_token, new_refresh_token
unique_id = Mock()
- with patch.object(redis_managed_oauth2.RedisManagedOAuth2Mixin, '_update_current_tokens'):
- oauth2 = redis_managed_oauth2.RedisManagedOAuth2(
- client_id=None,
- client_secret=None,
- unique_id=unique_id,
- redis_server=redis_server,
- )
+ oauth2 = redis_managed_oauth2.RedisManagedOAuth2(
+ access_token=access_token,
+ refresh_token=refresh_token,
+ client_id=None,
+ client_secret=None,
+ unique_id=unique_id,
+ redis_server=redis_server,
+ )
+ assert oauth2.access_token == access_token
+ redis_server.hvals.assert_not_called()
- assert oauth2.refresh('bogus_access_token') == (access_token, refresh_token)
+ assert oauth2.refresh('bogus_access_token') == (new_access_token, new_refresh_token)
+ assert oauth2.access_token == new_access_token
redis_server.hvals.assert_called_once_with(unique_id)
diff --git a/test/unit/network/test_logging_network.py b/test/unit/network/test_logging_network.py
index 3bd5eb0..58a8c61 100644
--- a/test/unit/network/test_logging_network.py
+++ b/test/unit/network/test_logging_network.py
@@ -28,7 +28,7 @@ def test_logging_network_does_not_call_setup_logging_if_logger_is_not_none():
logger = Mock(Logger)
with patch.object(logging_network, 'setup_logging') as setup_logging:
network = LoggingNetwork(logger)
- setup_logging.assert_never_called()
+ setup_logging.assert_not_called()
assert network.logger is logger
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 5
} | 1.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-xdist",
"mock",
"sqlalchemy",
"bottle"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | async-timeout==4.0.3
bottle==0.13.2
-e git+https://github.com/box/box-python-sdk.git@3ba6cf1fe50f2f9b3c273d96375dfdc98ea87d3c#egg=boxsdk
certifi @ file:///croot/certifi_1671487769961/work/certifi
cffi==1.15.1
charset-normalizer==3.4.1
cryptography==44.0.2
exceptiongroup==1.2.2
execnet==2.0.2
greenlet==3.1.1
idna==3.10
importlib-metadata==6.7.0
iniconfig==2.0.0
mock==5.2.0
packaging==24.0
pluggy==1.2.0
pycparser==2.21
PyJWT==2.8.0
pytest==7.4.4
pytest-xdist==3.5.0
redis==5.0.8
requests==2.31.0
requests-toolbelt==1.0.0
six==1.17.0
SQLAlchemy==2.0.40
tomli==2.0.1
typing_extensions==4.7.1
urllib3==2.0.7
zipp==3.15.0
| name: box-python-sdk
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- async-timeout==4.0.3
- bottle==0.13.2
- cffi==1.15.1
- charset-normalizer==3.4.1
- cryptography==44.0.2
- exceptiongroup==1.2.2
- execnet==2.0.2
- greenlet==3.1.1
- idna==3.10
- importlib-metadata==6.7.0
- iniconfig==2.0.0
- mock==5.2.0
- packaging==24.0
- pluggy==1.2.0
- pycparser==2.21
- pyjwt==2.8.0
- pytest==7.4.4
- pytest-xdist==3.5.0
- redis==5.0.8
- requests==2.31.0
- requests-toolbelt==1.0.0
- six==1.17.0
- sqlalchemy==2.0.40
- tomli==2.0.1
- typing-extensions==4.7.1
- urllib3==2.0.7
- zipp==3.15.0
prefix: /opt/conda/envs/box-python-sdk
| [
"test/unit/auth/test_oauth2.py::test_tokens_get_updated_after_noop_refresh"
]
| []
| [
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[16-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[16-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[16-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[16-RS512-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[32-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[32-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[32-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[32-RS512-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[128-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[128-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[128-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[128-RS512-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[16-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[16-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[16-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[16-RS512-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[32-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[32-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[32-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[32-RS512-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[128-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[128-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[128-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[128-RS512-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[16-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[16-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[16-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[16-RS512-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[32-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[32-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[32-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[32-RS512-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[128-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[128-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[128-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[128-RS512-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[16-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[16-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[16-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[16-RS512-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[32-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[32-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[32-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[32-RS512-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[128-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[128-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[128-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[128-RS512-strong_password]",
"test/unit/auth/test_oauth2.py::test_get_correct_authorization_url[https://url.com/foo?bar=baz]",
"test/unit/auth/test_oauth2.py::test_get_correct_authorization_url[https://\\u0215\\u0155\\u013e.com/\\u0192\\u0151\\u0151?\\u0184\\u0201\\u0155=\\u0184\\u0201\\u017c]",
"test/unit/auth/test_oauth2.py::test_get_correct_authorization_url[None]",
"test/unit/auth/test_oauth2.py::test_authenticate_send_post_request_with_correct_params",
"test/unit/auth/test_oauth2.py::test_refresh_send_post_request_with_correct_params_and_handles_multiple_requests[0]",
"test/unit/auth/test_oauth2.py::test_refresh_send_post_request_with_correct_params_and_handles_multiple_requests[1]",
"test/unit/auth/test_oauth2.py::test_refresh_send_post_request_with_correct_params_and_handles_multiple_requests[2]",
"test/unit/auth/test_oauth2.py::test_refresh_send_post_request_with_correct_params_and_handles_multiple_requests[3]",
"test/unit/auth/test_oauth2.py::test_refresh_send_post_request_with_correct_params_and_handles_multiple_requests[4]",
"test/unit/auth/test_oauth2.py::test_refresh_send_post_request_with_correct_params_and_handles_multiple_requests[5]",
"test/unit/auth/test_oauth2.py::test_refresh_send_post_request_with_correct_params_and_handles_multiple_requests[6]",
"test/unit/auth/test_oauth2.py::test_refresh_send_post_request_with_correct_params_and_handles_multiple_requests[7]",
"test/unit/auth/test_oauth2.py::test_refresh_send_post_request_with_correct_params_and_handles_multiple_requests[8]",
"test/unit/auth/test_oauth2.py::test_refresh_send_post_request_with_correct_params_and_handles_multiple_requests[9]",
"test/unit/auth/test_oauth2.py::test_authenticate_stores_tokens_correctly",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens0-0]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens0-1]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens0-2]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens0-3]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens0-4]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens0-5]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens0-6]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens0-7]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens0-8]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens0-9]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens1-0]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens1-1]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens1-2]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens1-3]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens1-4]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens1-5]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens1-6]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens1-7]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens1-8]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens1-9]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens2-0]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens2-1]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens2-2]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens2-3]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens2-4]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens2-5]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens2-6]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens2-7]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens2-8]",
"test/unit/auth/test_oauth2.py::test_refresh_gives_back_the_correct_response_and_handles_multiple_requests[network_response_with_missing_tokens2-9]",
"test/unit/auth/test_oauth2.py::test_token_request_raises_box_oauth_exception_when_getting_bad_network_response[test_method0]",
"test/unit/auth/test_oauth2.py::test_token_request_raises_box_oauth_exception_when_getting_bad_network_response[test_method1]",
"test/unit/auth/test_oauth2.py::test_token_request_raises_box_oauth_exception_when_no_json_object_can_be_decoded[test_method0]",
"test/unit/auth/test_oauth2.py::test_token_request_raises_box_oauth_exception_when_no_json_object_can_be_decoded[test_method1]",
"test/unit/auth/test_oauth2.py::test_token_request_raises_box_oauth_exception_when_tokens_are_not_in_the_response[network_response_with_missing_tokens0-test_method0]",
"test/unit/auth/test_oauth2.py::test_token_request_raises_box_oauth_exception_when_tokens_are_not_in_the_response[network_response_with_missing_tokens0-test_method1]",
"test/unit/auth/test_oauth2.py::test_token_request_raises_box_oauth_exception_when_tokens_are_not_in_the_response[network_response_with_missing_tokens1-test_method0]",
"test/unit/auth/test_oauth2.py::test_token_request_raises_box_oauth_exception_when_tokens_are_not_in_the_response[network_response_with_missing_tokens1-test_method1]",
"test/unit/auth/test_oauth2.py::test_token_request_raises_box_oauth_exception_when_tokens_are_not_in_the_response[network_response_with_missing_tokens2-test_method0]",
"test/unit/auth/test_oauth2.py::test_token_request_raises_box_oauth_exception_when_tokens_are_not_in_the_response[network_response_with_missing_tokens2-test_method1]",
"test/unit/auth/test_oauth2.py::test_token_request_allows_missing_refresh_token",
"test/unit/auth/test_oauth2.py::test_revoke_sends_revoke_request[fake_access_token-fake_refresh_token-fake_access_token]",
"test/unit/auth/test_oauth2.py::test_revoke_sends_revoke_request[None-fake_refresh_token-fake_refresh_token]",
"test/unit/auth/test_redis_managed_oauth2.py::test_redis_managed_oauth2_gets_tokens_from_redis_on_init",
"test/unit/auth/test_redis_managed_oauth2.py::test_redis_managed_oauth2_gets_tokens_from_redis_during_refresh",
"test/unit/auth/test_redis_managed_oauth2.py::test_redis_managed_oauth2_stores_tokens_to_redis_during_refresh",
"test/unit/network/test_logging_network.py::test_logging_network_calls_setup_logging_if_logger_is_none",
"test/unit/network/test_logging_network.py::test_logging_network_can_be_initialized_if_logger_is_none",
"test/unit/network/test_logging_network.py::test_logging_network_does_not_call_setup_logging_if_logger_is_not_none",
"test/unit/network/test_logging_network.py::test_logging_network_logs_requests[GET]",
"test/unit/network/test_logging_network.py::test_logging_network_logs_requests[POST]",
"test/unit/network/test_logging_network.py::test_logging_network_logs_requests[PUT]",
"test/unit/network/test_logging_network.py::test_logging_network_logs_requests[DELETE]",
"test/unit/network/test_logging_network.py::test_logging_network_logs_requests[OPTIONS]",
"test/unit/network/test_logging_network.py::test_logging_network_logs_successful_responses[GET]",
"test/unit/network/test_logging_network.py::test_logging_network_logs_successful_responses[POST]",
"test/unit/network/test_logging_network.py::test_logging_network_logs_successful_responses[PUT]",
"test/unit/network/test_logging_network.py::test_logging_network_logs_successful_responses[DELETE]",
"test/unit/network/test_logging_network.py::test_logging_network_logs_successful_responses[OPTIONS]",
"test/unit/network/test_logging_network.py::test_logging_network_logs_non_successful_responses[502-GET]",
"test/unit/network/test_logging_network.py::test_logging_network_logs_non_successful_responses[502-POST]",
"test/unit/network/test_logging_network.py::test_logging_network_logs_non_successful_responses[502-PUT]",
"test/unit/network/test_logging_network.py::test_logging_network_logs_non_successful_responses[502-DELETE]",
"test/unit/network/test_logging_network.py::test_logging_network_logs_non_successful_responses[502-OPTIONS]",
"test/unit/network/test_logging_network.py::test_logging_network_logs_non_successful_responses[503-GET]",
"test/unit/network/test_logging_network.py::test_logging_network_logs_non_successful_responses[503-POST]",
"test/unit/network/test_logging_network.py::test_logging_network_logs_non_successful_responses[503-PUT]",
"test/unit/network/test_logging_network.py::test_logging_network_logs_non_successful_responses[503-DELETE]",
"test/unit/network/test_logging_network.py::test_logging_network_logs_non_successful_responses[503-OPTIONS]"
]
| []
| Apache License 2.0 | 546 | [
"HISTORY.rst",
"boxsdk/auth/oauth2.py",
"boxsdk/auth/redis_managed_oauth2.py",
"requirements-dev.txt",
"boxsdk/version.py"
]
| [
"HISTORY.rst",
"boxsdk/auth/oauth2.py",
"boxsdk/auth/redis_managed_oauth2.py",
"requirements-dev.txt",
"boxsdk/version.py"
]
|
|
Juniper__py-junos-eznc-513 | 4b7d685336e0a18d6a8a5ef96eb0cb7b87221b37 | 2016-05-18 07:20:23 | 3ca08f81e0be85394c6fa3e94675dfe03e958e28 | diff --git a/README.md b/README.md
index 61ab9f5c..0296eb7a 100644
--- a/README.md
+++ b/README.md
@@ -29,7 +29,7 @@ There is a growing interest and need to automate the network infrastructure into
For questions and general support, please visit our [Google Group](https://groups.google.com/forum/#!forum/junos-python-ez)
-For documentation and more usage examples, please visit the _Junos PyEZ_ project page, [here](https://techwiki.juniper.net/Projects/Junos_PyEZ).
+For documentation and more usage examples, please visit the _Junos PyEZ_ project page, [here](http://forums.juniper.net/t5/Automation/Where-can-I-learn-more-about-Junos-PyEZ/ta-p/280496).
Issues and bugs can be opened in the repository.
diff --git a/lib/jnpr/junos/device.py b/lib/jnpr/junos/device.py
index 3dcb71df..3438b910 100644
--- a/lib/jnpr/junos/device.py
+++ b/lib/jnpr/junos/device.py
@@ -496,7 +496,7 @@ class Device(object):
self.connected = True
self._nc_transform = self.transform
- self._norm_transform = lambda: JXML.normalize_xslt.encode('UTF-8')
+ self._norm_transform = lambda: JXML.normalize_xslt
normalize = kvargs.get('normalize', self._normalize)
if normalize is True:
diff --git a/lib/jnpr/junos/utils/start_shell.py b/lib/jnpr/junos/utils/start_shell.py
index b52c518a..61f97df4 100644
--- a/lib/jnpr/junos/utils/start_shell.py
+++ b/lib/jnpr/junos/utils/start_shell.py
@@ -3,7 +3,7 @@ from select import select
import re
_JUNOS_PROMPT = '> '
-_SHELL_PROMPT = '(%|#) '
+_SHELL_PROMPT = '% '
_SELECT_WAIT = 0.1
_RECVSZ = 1024
@@ -35,8 +35,8 @@ class StartShell(object):
:param str this: expected string/pattern.
- :returns: resulting string of data in a list
- :rtype: list
+ :returns: resulting string of data
+ :rtype: str
.. warning:: need to add a timeout safeguard
"""
@@ -46,8 +46,6 @@ class StartShell(object):
rd, wr, err = select([chan], [], [], _SELECT_WAIT)
if rd:
data = chan.recv(_RECVSZ)
- if isinstance(data, bytes):
- data = data.decode('utf-8')
got.append(data)
if re.search(r'{0}\s?$'.format(this), data):
break
@@ -84,8 +82,8 @@ class StartShell(object):
self._client = client
self._chan = chan
- got = self.wait_for(r'(%|>|#)')
- if got[-1].endswith(_JUNOS_PROMPT):
+ got = self.wait_for('(%|>)')
+ if not got[-1].endswith(_SHELL_PROMPT):
self.send('start shell')
self.wait_for(_SHELL_PROMPT)
@@ -118,7 +116,7 @@ class StartShell(object):
rc = ''.join(self.wait_for(this))
self.last_ok = True if rc.find('0') > 0 else False
- return (self.last_ok, got)
+ return (self.last_ok,got)
# -------------------------------------------------------------------------
# CONTEXT MANAGER
| Link to techwiki page is broken on README
Hi
I noticed that the link to the techwiki in the `Support` section of the README is wrong
The link is pointing to the techwiki homepage and not the Junos PyEZ project page as suggested
https://github.com/Juniper/py-junos-eznc/blob/master/README.md#support
```
For documentation and more usage examples, please visit the Junos PyEZ project page, here.
``` | Juniper/py-junos-eznc | diff --git a/tests/unit/utils/test_start_shell.py b/tests/unit/utils/test_start_shell.py
index ee728b02..b3812937 100644
--- a/tests/unit/utils/test_start_shell.py
+++ b/tests/unit/utils/test_start_shell.py
@@ -19,17 +19,9 @@ class TestStartShell(unittest.TestCase):
@patch('paramiko.SSHClient')
@patch('jnpr.junos.utils.start_shell.StartShell.wait_for')
- def test_startshell_open_with_shell_term(self, mock_wait, mock_connect):
- mock_wait.return_value = ["user # "]
+ def test_startshell_open(self, mock_connect, mock_wait):
self.shell.open()
- mock_wait.assert_called_with('(%|>|#)')
-
- @patch('paramiko.SSHClient')
- @patch('jnpr.junos.utils.start_shell.StartShell.wait_for')
- def test_startshell_open_with_junos_term(self, mock_wait, mock_connect):
- mock_wait.return_value = ["user > "]
- self.shell.open()
- mock_wait.assert_called_with('(%|#) ')
+ mock_connect.assert_called_with('(%|>)')
@patch('paramiko.SSHClient')
def test_startshell_close(self, mock_connect):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 3
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"coverage",
"mock",
"nose",
"pep8",
"pyflakes",
"coveralls",
"ntc_templates",
"cryptography==3.2",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | bcrypt==4.2.1
certifi @ file:///croot/certifi_1671487769961/work/certifi
cffi==1.15.1
charset-normalizer==3.4.1
coverage==6.5.0
coveralls==3.3.1
cryptography==44.0.2
docopt==0.6.2
exceptiongroup==1.2.2
future==1.0.0
idna==3.10
importlib-metadata==6.7.0
iniconfig==2.0.0
Jinja2==3.1.6
-e git+https://github.com/Juniper/py-junos-eznc.git@4b7d685336e0a18d6a8a5ef96eb0cb7b87221b37#egg=junos_eznc
lxml==5.3.1
MarkupSafe==2.1.5
mock==5.2.0
ncclient==0.6.19
netaddr==1.3.0
nose==1.3.7
ntc_templates==4.0.1
packaging==24.0
paramiko==3.5.1
pep8==1.7.1
pluggy==1.2.0
pycparser==2.21
pyflakes==3.0.1
PyNaCl==1.5.0
pytest==7.4.4
PyYAML==6.0.1
requests==2.31.0
scp==0.15.0
six==1.17.0
textfsm==1.1.3
tomli==2.0.1
typing_extensions==4.7.1
urllib3==2.0.7
zipp==3.15.0
| name: py-junos-eznc
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- bcrypt==4.2.1
- cffi==1.15.1
- charset-normalizer==3.4.1
- coverage==6.5.0
- coveralls==3.3.1
- cryptography==44.0.2
- docopt==0.6.2
- exceptiongroup==1.2.2
- future==1.0.0
- idna==3.10
- importlib-metadata==6.7.0
- iniconfig==2.0.0
- jinja2==3.1.6
- lxml==5.3.1
- markupsafe==2.1.5
- mock==5.2.0
- ncclient==0.6.19
- netaddr==1.3.0
- nose==1.3.7
- ntc-templates==4.0.1
- packaging==24.0
- paramiko==3.5.1
- pep8==1.7.1
- pluggy==1.2.0
- pycparser==2.21
- pyflakes==3.0.1
- pynacl==1.5.0
- pytest==7.4.4
- pyyaml==6.0.1
- requests==2.31.0
- scp==0.15.0
- six==1.17.0
- textfsm==1.1.3
- tomli==2.0.1
- typing-extensions==4.7.1
- urllib3==2.0.7
- zipp==3.15.0
prefix: /opt/conda/envs/py-junos-eznc
| [
"tests/unit/utils/test_start_shell.py::TestStartShell::test_startshell_open"
]
| [
"tests/unit/utils/test_start_shell.py::TestStartShell::test_startshell_context"
]
| [
"tests/unit/utils/test_start_shell.py::TestStartShell::test_startshell_close",
"tests/unit/utils/test_start_shell.py::TestStartShell::test_startshell_run",
"tests/unit/utils/test_start_shell.py::TestStartShell::test_startshell_wait_for",
"tests/unit/utils/test_start_shell.py::TestStartShell::test_startshell_wait_for_regex"
]
| []
| Apache License 2.0 | 547 | [
"lib/jnpr/junos/utils/start_shell.py",
"lib/jnpr/junos/device.py",
"README.md"
]
| [
"lib/jnpr/junos/utils/start_shell.py",
"lib/jnpr/junos/device.py",
"README.md"
]
|
|
Juniper__py-junos-eznc-514 | 4b7d685336e0a18d6a8a5ef96eb0cb7b87221b37 | 2016-05-18 17:44:30 | 3ca08f81e0be85394c6fa3e94675dfe03e958e28 | diff --git a/lib/jnpr/junos/utils/start_shell.py b/lib/jnpr/junos/utils/start_shell.py
index b52c518a..1631ef63 100644
--- a/lib/jnpr/junos/utils/start_shell.py
+++ b/lib/jnpr/junos/utils/start_shell.py
@@ -3,7 +3,7 @@ from select import select
import re
_JUNOS_PROMPT = '> '
-_SHELL_PROMPT = '(%|#) '
+_SHELL_PROMPT = '(%|#)\s'
_SELECT_WAIT = 0.1
_RECVSZ = 1024
| Shell session does not work for root user.
I'm using the below example program.
```
#!/usr/bin/env python
import jnpr.junos.utils
from jnpr.junos import Device
from jnpr.junos.utils import *
from jnpr.junos.utils.start_shell import *
from jnpr.junos.utils.start_shell import StartShell
DUT = Device(host='10.252.191.104', user="root", passwd = "password!")
DUT.open()
print "Device opened!"
DUT_ss = StartShell(DUT)
DUT_ss.open()
print "got thru shell open."
print DUT_ss.run("pwd")
print DUT_ss.run("ls")
```
We never print the line "got thru shell open" because the library hangs while waiting for the "% " shell prompt to appear. I have a fix i'll be suggesting. | Juniper/py-junos-eznc | diff --git a/tests/unit/utils/test_start_shell.py b/tests/unit/utils/test_start_shell.py
index ee728b02..8a4fb59d 100644
--- a/tests/unit/utils/test_start_shell.py
+++ b/tests/unit/utils/test_start_shell.py
@@ -29,7 +29,7 @@ class TestStartShell(unittest.TestCase):
def test_startshell_open_with_junos_term(self, mock_wait, mock_connect):
mock_wait.return_value = ["user > "]
self.shell.open()
- mock_wait.assert_called_with('(%|#) ')
+ mock_wait.assert_called_with('(%|#)\s')
@patch('paramiko.SSHClient')
def test_startshell_close(self, mock_connect):
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"coverage",
"mock",
"nose",
"pep8",
"pyflakes",
"coveralls",
"ntc_templates",
"cryptography==3.2",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | bcrypt==4.2.1
certifi @ file:///croot/certifi_1671487769961/work/certifi
cffi==1.15.1
charset-normalizer==3.4.1
coverage==6.5.0
coveralls==3.3.1
cryptography==44.0.2
docopt==0.6.2
exceptiongroup==1.2.2
future==1.0.0
idna==3.10
importlib-metadata==6.7.0
iniconfig==2.0.0
Jinja2==3.1.6
-e git+https://github.com/Juniper/py-junos-eznc.git@4b7d685336e0a18d6a8a5ef96eb0cb7b87221b37#egg=junos_eznc
lxml==5.3.1
MarkupSafe==2.1.5
mock==5.2.0
ncclient==0.6.19
netaddr==1.3.0
nose==1.3.7
ntc_templates==4.0.1
packaging==24.0
paramiko==3.5.1
pep8==1.7.1
pluggy==1.2.0
pycparser==2.21
pyflakes==3.0.1
PyNaCl==1.5.0
pytest==7.4.4
PyYAML==6.0.1
requests==2.31.0
scp==0.15.0
six==1.17.0
textfsm==1.1.3
tomli==2.0.1
typing_extensions==4.7.1
urllib3==2.0.7
zipp==3.15.0
| name: py-junos-eznc
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- bcrypt==4.2.1
- cffi==1.15.1
- charset-normalizer==3.4.1
- coverage==6.5.0
- coveralls==3.3.1
- cryptography==44.0.2
- docopt==0.6.2
- exceptiongroup==1.2.2
- future==1.0.0
- idna==3.10
- importlib-metadata==6.7.0
- iniconfig==2.0.0
- jinja2==3.1.6
- lxml==5.3.1
- markupsafe==2.1.5
- mock==5.2.0
- ncclient==0.6.19
- netaddr==1.3.0
- nose==1.3.7
- ntc-templates==4.0.1
- packaging==24.0
- paramiko==3.5.1
- pep8==1.7.1
- pluggy==1.2.0
- pycparser==2.21
- pyflakes==3.0.1
- pynacl==1.5.0
- pytest==7.4.4
- pyyaml==6.0.1
- requests==2.31.0
- scp==0.15.0
- six==1.17.0
- textfsm==1.1.3
- tomli==2.0.1
- typing-extensions==4.7.1
- urllib3==2.0.7
- zipp==3.15.0
prefix: /opt/conda/envs/py-junos-eznc
| [
"tests/unit/utils/test_start_shell.py::TestStartShell::test_startshell_open_with_junos_term"
]
| [
"tests/unit/utils/test_start_shell.py::TestStartShell::test_startshell_context"
]
| [
"tests/unit/utils/test_start_shell.py::TestStartShell::test_startshell_close",
"tests/unit/utils/test_start_shell.py::TestStartShell::test_startshell_open_with_shell_term",
"tests/unit/utils/test_start_shell.py::TestStartShell::test_startshell_run",
"tests/unit/utils/test_start_shell.py::TestStartShell::test_startshell_wait_for",
"tests/unit/utils/test_start_shell.py::TestStartShell::test_startshell_wait_for_regex"
]
| []
| Apache License 2.0 | 548 | [
"lib/jnpr/junos/utils/start_shell.py"
]
| [
"lib/jnpr/junos/utils/start_shell.py"
]
|
|
networkx__networkx-2136 | 5aefafab2f05b97b150c6bf681c21ba6465c8d10 | 2016-05-19 01:37:02 | 3f4fd85765bf2d88188cfd4c84d0707152e6cd1e | diff --git a/networkx/readwrite/gml.py b/networkx/readwrite/gml.py
index b6ab5e9eb..af8db1d00 100644
--- a/networkx/readwrite/gml.py
+++ b/networkx/readwrite/gml.py
@@ -435,10 +435,6 @@ def parse_gml_lines(lines, label, destringizer):
if label != 'id':
G = nx.relabel_nodes(G, mapping)
- if 'name' in graph:
- G.graph['name'] = graph['name']
- else:
- del G.graph['name']
return G
diff --git a/networkx/relabel.py b/networkx/relabel.py
index ca069c950..8f885432c 100644
--- a/networkx/relabel.py
+++ b/networkx/relabel.py
@@ -147,7 +147,8 @@ def _relabel_inplace(G, mapping):
def _relabel_copy(G, mapping):
H = G.__class__()
- H.name = "(%s)" % G.name
+ if G.name:
+ H.name = "(%s)" % G.name
if G.is_multigraph():
H.add_edges_from( (mapping.get(n1, n1),mapping.get(n2, n2),k,d.copy())
for (n1,n2,k,d) in G.edges(keys=True, data=True))
| relabel_nodes adds a graph attribute when copy=True
I would have expected the following to work:
```
import networkx as nx
graph_a = nx.DiGraph()
graph_b = nx.relabel_nodes(graph_a, {}, copy=True)
print "graph_a.graph", graph_a.graph
print "graph_b.graph", graph_b.graph
assert graph_a.graph == graph_b.graph
```
However, it does not since [_relabel_copy attempts to copy a non-existent graph attribute, 'name'](https://github.com/networkx/networkx/blob/1675a824d6cdb17c3144ef46ff52a0c2b53a11d1/networkx/relabel.py#L150).
I would have expected relabel_nodes to only change the node labels, while maintaining all graph/node/edge attributes. | networkx/networkx | diff --git a/networkx/tests/test_relabel.py b/networkx/tests/test_relabel.py
index 682de98a0..65c29eeab 100644
--- a/networkx/tests/test_relabel.py
+++ b/networkx/tests/test_relabel.py
@@ -150,6 +150,17 @@ class TestRelabel():
mapping={0:'aardvark'}
G=relabel_nodes(G,mapping,copy=False)
+ def test_relabel_copy_name(self):
+ G=Graph()
+ H = relabel_nodes(G, {}, copy=True)
+ assert_equal(H.graph, G.graph)
+ H = relabel_nodes(G, {}, copy=False)
+ assert_equal(H.graph, G.graph)
+ G.name = "first"
+ H = relabel_nodes(G, {}, copy=True)
+ assert_equal(H.graph, G.graph)
+ H = relabel_nodes(G, {}, copy=False)
+ assert_equal(H.graph, G.graph)
def test_relabel_toposort(self):
K5=nx.complete_graph(4)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | help | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libgdal-dev graphviz"
],
"python": "3.6",
"reqs_path": [
"requirements/default.txt",
"requirements/test.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
decorator==5.1.1
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/networkx/networkx.git@5aefafab2f05b97b150c6bf681c21ba6465c8d10#egg=networkx
nose==1.3.7
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: networkx
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- decorator==5.1.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/networkx
| [
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_copy_name"
]
| [
"networkx/tests/test_relabel.py::test"
]
| [
"networkx/tests/test_relabel.py::TestRelabel::test_convert_node_labels_to_integers",
"networkx/tests/test_relabel.py::TestRelabel::test_convert_to_integers2",
"networkx/tests/test_relabel.py::TestRelabel::test_convert_to_integers_raise",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_copy",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_function",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_graph",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_digraph",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_multigraph",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_multidigraph",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_isolated_nodes_to_same",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_missing",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_toposort",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_selfloop"
]
| []
| BSD 3-Clause | 549 | [
"networkx/readwrite/gml.py",
"networkx/relabel.py"
]
| [
"networkx/readwrite/gml.py",
"networkx/relabel.py"
]
|
|
jubatus__jubatus-python-client-69 | 34f9f83ee2d230672518102e541286425c92c287 | 2016-05-19 05:04:03 | ecbdecb8eb9ee40694ee39c4bf1de7e7fd984ae5 | diff --git a/jubatus/common/client.py b/jubatus/common/client.py
index d599319..9cd91a8 100644
--- a/jubatus/common/client.py
+++ b/jubatus/common/client.py
@@ -54,6 +54,10 @@ class ClientBase(object):
(`unpack_encoding=None`)
"""
def __init__(self, host, port, name, timeout=10):
+ check_types(host, string_types)
+ check_types(port, int_types)
+ check_types(name, string_types)
+ check_types(timeout, int_types)
address = msgpackrpc.Address(host, port)
self.client = msgpackrpc.Client(address, timeout=timeout, pack_encoding='utf-8', unpack_encoding=None)
self.jubatus_client = Client(self.client, name)
@@ -65,6 +69,7 @@ class ClientBase(object):
return self.jubatus_client.name
def set_name(self, name):
+ check_types(name, string_types)
self.jubatus_client.name = name
def save(self, id):
| no type validation for constructor arguments
Currently argument types are validated, but constructor arguments are not.
For example, the following code:
```
c = jubatus.Classifier("localhost", 9199, 0) # it should be ("localhost", 9199, "") to work
c.get_status()
```
raises "TypeMismatch (error 2)" on RPC call, which is difficult to understand. | jubatus/jubatus-python-client | diff --git a/test/jubatus_test/common/test_client.py b/test/jubatus_test/common/test_client.py
index 0e929f8..065b538 100644
--- a/test/jubatus_test/common/test_client.py
+++ b/test/jubatus_test/common/test_client.py
@@ -67,5 +67,22 @@ class ClientTest(unittest.TestCase):
self.assertEqual("test", c.call("test", [], AnyType(), []))
self.assertRaises(TypeError, c.call, "test", [1], AnyType(), [])
+class ClientBaseTest(unittest.TestCase):
+ def test_constructor(self):
+ self.assertIsInstance(jubatus.common.ClientBase("127.0.0.1", 9199, "cluster", 10), jubatus.common.ClientBase)
+
+ # invalid host
+ self.assertRaises(TypeError, jubatus.common.ClientBase, 127001, 9199, "cluster", 10)
+
+ # invalid port
+ self.assertRaises(TypeError, jubatus.common.ClientBase, "127.0.0.1", "9199", "cluster", 10)
+
+ # invalid name
+ self.assertRaises(TypeError, jubatus.common.ClientBase, "127.0.0.1", 9199, 10, 10)
+
+ # invalid timeout
+ self.assertRaises(TypeError, jubatus.common.ClientBase, "127.0.0.1", 9199, "cluster", "test")
+ self.assertRaises(TypeError, jubatus.common.ClientBase, "127.0.0.1", 9199, "cluster", 1.5)
+
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/jubatus/jubatus-python-client.git@34f9f83ee2d230672518102e541286425c92c287#egg=jubatus
msgpack-python==0.5.6
msgpack-rpc-python==0.4.1
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
tornado==4.5.3
| name: jubatus-python-client
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- msgpack-python==0.5.6
- msgpack-rpc-python==0.4.1
- tornado==4.5.3
prefix: /opt/conda/envs/jubatus-python-client
| [
"test/jubatus_test/common/test_client.py::ClientBaseTest::test_constructor"
]
| []
| [
"test/jubatus_test/common/test_client.py::ClientTest::test_remote_error",
"test/jubatus_test/common/test_client.py::ClientTest::test_type_mismatch",
"test/jubatus_test/common/test_client.py::ClientTest::test_unknown_method",
"test/jubatus_test/common/test_client.py::ClientTest::test_wrong_number_of_arguments"
]
| []
| MIT License | 550 | [
"jubatus/common/client.py"
]
| [
"jubatus/common/client.py"
]
|
|
html5lib__html5lib-python-256 | 66a2f7763f2dda24d3d2681c22bf799c94ee049c | 2016-05-22 03:13:22 | 563dc298ea43021f9a9306fc7da3734ea5d9d8ec | gsnedders: fixes #228
landscape-bot: [](https://landscape.io/diff/354583)
Code quality remained the same when pulling **[2f1d040](https://github.com/gsnedders/html5lib-python/commit/2f1d04073f63e7633b8fa2204e7dac221051c7fe) on gsnedders:lxml_treewalker_lxml_tree** into **[5288737](https://github.com/html5lib/html5lib-python/commit/5288737aebcae1fcf25a640c79241f6fb14475a2) on html5lib:master**.
codecov-io: ## [Current coverage][cc-pull] is **90.83%**
> Merging [#256][cc-pull] into [master][cc-base-branch] will increase coverage by **<.01%**
```diff
@@ master #256 diff @@
==========================================
Files 51 51
Lines 6821 6835 +14
Methods 0 0
Messages 0 0
Branches 1313 1312 -1
==========================================
+ Hits 6193 6208 +15
Misses 468 468
+ Partials 160 159 -1
```
> Powered by [Codecov](https://codecov.io?src=pr). Last updated by [5288737...2f1d040][cc-compare]
[cc-base-branch]: https://codecov.io/gh/html5lib/html5lib-python/branch/master?src=pr
[cc-compare]: https://codecov.io/gh/html5lib/html5lib-python/compare/5288737aebcae1fcf25a640c79241f6fb14475a2...2f1d04073f63e7633b8fa2204e7dac221051c7fe
[cc-pull]: https://codecov.io/gh/html5lib/html5lib-python/pull/256?src=pr
landscape-bot: [](https://landscape.io/diff/358204)
Code quality remained the same when pulling **[a2ee6e6](https://github.com/gsnedders/html5lib-python/commit/a2ee6e6b0ec015a3a777546192d8af80074d60b0) on gsnedders:lxml_treewalker_lxml_tree** into **[66a2f77](https://github.com/html5lib/html5lib-python/commit/66a2f7763f2dda24d3d2681c22bf799c94ee049c) on html5lib:master**. | diff --git a/html5lib/treewalkers/lxmletree.py b/html5lib/treewalkers/lxmletree.py
index 7d99adc..ff31a44 100644
--- a/html5lib/treewalkers/lxmletree.py
+++ b/html5lib/treewalkers/lxmletree.py
@@ -22,13 +22,20 @@ class Root(object):
def __init__(self, et):
self.elementtree = et
self.children = []
- if et.docinfo.internalDTD:
- self.children.append(Doctype(self,
- ensure_str(et.docinfo.root_name),
- ensure_str(et.docinfo.public_id),
- ensure_str(et.docinfo.system_url)))
- root = et.getroot()
- node = root
+
+ try:
+ if et.docinfo.internalDTD:
+ self.children.append(Doctype(self,
+ ensure_str(et.docinfo.root_name),
+ ensure_str(et.docinfo.public_id),
+ ensure_str(et.docinfo.system_url)))
+ except AttributeError:
+ pass
+
+ try:
+ node = et.getroot()
+ except AttributeError:
+ node = et
while node.getprevious() is not None:
node = node.getprevious()
@@ -118,12 +125,12 @@ class FragmentWrapper(object):
class TreeWalker(_base.NonRecursiveTreeWalker):
def __init__(self, tree):
# pylint:disable=redefined-variable-type
- if hasattr(tree, "getroot"):
- self.fragmentChildren = set()
- tree = Root(tree)
- elif isinstance(tree, list):
+ if isinstance(tree, list):
self.fragmentChildren = set(tree)
tree = FragmentRoot(tree)
+ else:
+ self.fragmentChildren = set()
+ tree = Root(tree)
_base.NonRecursiveTreeWalker.__init__(self, tree)
self.filter = ihatexml.InfosetFilter()
| AttributeError: 'TreeWalker' object has no attribute 'fragmentChildren'
For some reason my `tree` (`lxml.etree._Element`) object has no attribute getroot and is not a list so there is no `fragmentChildren` attribute on `TreeWalker`: 270a2ca14fafc989f8f1bd4f79db2f4bd9f4d1fc
lxml 3.5.0
html5lib-python f796cca5f9ddaaf1e1a8b872f68455551cd3ae2d | html5lib/html5lib-python | diff --git a/html5lib/tests/test_treewalkers.py b/html5lib/tests/test_treewalkers.py
index 332027a..81ed277 100644
--- a/html5lib/tests/test_treewalkers.py
+++ b/html5lib/tests/test_treewalkers.py
@@ -2,6 +2,11 @@ from __future__ import absolute_import, division, unicode_literals
import pytest
+try:
+ import lxml.etree
+except ImportError:
+ pass
+
from .support import treeTypes
from html5lib import html5parser, treewalkers
@@ -93,3 +98,19 @@ def test_treewalker_six_mix():
for tree in sorted(treeTypes.items()):
for intext, attrs, expected in sm_tests:
yield runTreewalkerEditTest, intext, expected, attrs, tree
+
+
[email protected](treeTypes["lxml"] is None, reason="lxml not importable")
+def test_lxml_xml():
+ expected = [
+ {'data': {}, 'name': 'div', 'namespace': None, 'type': 'StartTag'},
+ {'data': {}, 'name': 'div', 'namespace': None, 'type': 'StartTag'},
+ {'name': 'div', 'namespace': None, 'type': 'EndTag'},
+ {'name': 'div', 'namespace': None, 'type': 'EndTag'}
+ ]
+
+ lxmltree = lxml.etree.fromstring('<div><div></div></div>')
+ walker = treewalkers.getTreeWalker('lxml')
+ output = Lint(walker(lxmltree))
+
+ assert list(output) == expected
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_git_commit_hash"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | 1.08 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[all]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"mock"
],
"pre_install": [
"git submodule update --init --recursive"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | chardet==5.2.0
datrie==0.8.2
exceptiongroup==1.2.2
Genshi==0.7.9
-e git+https://github.com/html5lib/html5lib-python.git@66a2f7763f2dda24d3d2681c22bf799c94ee049c#egg=html5lib
iniconfig==2.1.0
lxml==5.3.1
mock==5.2.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
six==1.17.0
tomli==2.2.1
webencodings==0.5.1
| name: html5lib-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- chardet==5.2.0
- datrie==0.8.2
- exceptiongroup==1.2.2
- genshi==0.7.9
- iniconfig==2.1.0
- lxml==5.3.1
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- six==1.17.0
- tomli==2.2.1
- webencodings==0.5.1
prefix: /opt/conda/envs/html5lib-python
| [
"html5lib/tests/test_treewalkers.py::test_lxml_xml"
]
| []
| [
"html5lib/tests/test_treewalkers.py::test_all_tokens"
]
| []
| MIT License | 551 | [
"html5lib/treewalkers/lxmletree.py"
]
| [
"html5lib/treewalkers/lxmletree.py"
]
|
pre-commit__pre-commit-376 | 6654fee5f9c40b4483f30d44a5ccda70b238b3ce | 2016-05-25 15:45:51 | f11338ccfa612e36a6c1f2dc688080ec08fd66b0 | diff --git a/pre_commit/git.py b/pre_commit/git.py
index 796a0b8..1f16b6e 100644
--- a/pre_commit/git.py
+++ b/pre_commit/git.py
@@ -69,7 +69,11 @@ def get_conflicted_files():
@memoize_by_cwd
def get_staged_files():
- return cmd_output('git', 'diff', '--staged', '--name-only')[1].splitlines()
+ return cmd_output(
+ 'git', 'diff', '--staged', '--name-only',
+ # Everything except for D
+ '--diff-filter=ACMRTUXB'
+ )[1].splitlines()
@memoize_by_cwd
| Newly gitignored (but file still exists) files are linted
(they should not be) | pre-commit/pre-commit | diff --git a/tests/git_test.py b/tests/git_test.py
index c4e0145..701d36b 100644
--- a/tests/git_test.py
+++ b/tests/git_test.py
@@ -33,6 +33,16 @@ def test_get_root_not_git_dir(tempdir_factory):
git.get_root()
+def test_get_staged_files_deleted(tempdir_factory):
+ path = git_dir(tempdir_factory)
+ with cwd(path):
+ open('test', 'a').close()
+ cmd_output('git', 'add', 'test')
+ cmd_output('git', 'commit', '-m', 'foo', '--allow-empty')
+ cmd_output('git', 'rm', '--cached', 'test')
+ assert git.get_staged_files() == []
+
+
def test_is_not_in_merge_conflict(tempdir_factory):
path = git_dir(tempdir_factory)
with cwd(path):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aspy.yaml==1.3.0
attrs==25.3.0
cached-property==2.0.1
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
flake8==7.2.0
iniconfig==2.1.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
mccabe==0.7.0
mock==5.2.0
nodeenv==1.9.1
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
-e git+https://github.com/pre-commit/pre-commit.git@6654fee5f9c40b4483f30d44a5ccda70b238b3ce#egg=pre_commit
pycodestyle==2.13.0
pyflakes==3.3.1
pyterminalsize==0.1.0
pytest==8.3.5
PyYAML==6.0.2
referencing==0.36.2
rpds-py==0.24.0
tomli==2.2.1
typing_extensions==4.13.0
virtualenv==20.29.3
| name: pre-commit
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aspy-yaml==1.3.0
- attrs==25.3.0
- cached-property==2.0.1
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- flake8==7.2.0
- iniconfig==2.1.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- mccabe==0.7.0
- mock==5.2.0
- nodeenv==1.9.1
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pyterminalsize==0.1.0
- pytest==8.3.5
- pyyaml==6.0.2
- referencing==0.36.2
- rpds-py==0.24.0
- setuptools==18.4
- tomli==2.2.1
- typing-extensions==4.13.0
- virtualenv==20.29.3
prefix: /opt/conda/envs/pre-commit
| [
"tests/git_test.py::test_get_staged_files_deleted"
]
| []
| [
"tests/git_test.py::test_get_root_at_root",
"tests/git_test.py::test_get_root_deeper",
"tests/git_test.py::test_get_root_not_git_dir",
"tests/git_test.py::test_is_not_in_merge_conflict",
"tests/git_test.py::test_get_files_matching_base",
"tests/git_test.py::test_get_files_matching_total_match",
"tests/git_test.py::test_does_search_instead_of_match",
"tests/git_test.py::test_does_not_include_deleted_fileS",
"tests/git_test.py::test_exclude_removes_files",
"tests/git_test.py::test_parse_merge_msg_for_conflicts[Merge"
]
| []
| MIT License | 553 | [
"pre_commit/git.py"
]
| [
"pre_commit/git.py"
]
|
|
Duke-GCB__DukeDSClient-61 | 81d8fc03fdbb9209d949f319bf4b4691b1615a59 | 2016-05-25 18:51:33 | d43333d5372a8115eaaba0a68991a696124bf837 | diff --git a/ddsc/core/filedownloader.py b/ddsc/core/filedownloader.py
index 5049aac..e8ab7f5 100644
--- a/ddsc/core/filedownloader.py
+++ b/ddsc/core/filedownloader.py
@@ -106,8 +106,9 @@ class FileDownloader(object):
Write out a empty file so the workers can seek to where they should write and write their data.
"""
with open(self.path, "wb") as outfile:
- outfile.seek(int(self.file_size) - 1)
- outfile.write(b'\0')
+ if self.file_size > 0:
+ outfile.seek(int(self.file_size) - 1)
+ outfile.write(b'\0')
def make_and_start_process(self, range_start, range_end, progress_queue):
"""
diff --git a/ddsc/core/fileuploader.py b/ddsc/core/fileuploader.py
index 43356c5..4b57b61 100644
--- a/ddsc/core/fileuploader.py
+++ b/ddsc/core/fileuploader.py
@@ -181,12 +181,14 @@ class ParallelChunkProcessor(object):
processes.append(self.make_and_start_process(index, num_items, progress_queue))
wait_for_processes(processes, num_chunks, progress_queue, self.watcher, self.local_file)
-
@staticmethod
def determine_num_chunks(chunk_size, file_size):
"""
Figure out how many pieces we are sending the file in.
+ NOTE: duke-data-service requires an empty chunk to be uploaded for empty files.
"""
+ if file_size == 0:
+ return 1
return int(math.ceil(float(file_size) / float(chunk_size)))
@staticmethod
diff --git a/ddsc/tests/empty_file b/ddsc/tests/empty_file
new file mode 100644
index 0000000..e69de29
diff --git a/setup.py b/setup.py
index 8fee84a..2fe9f5b 100644
--- a/setup.py
+++ b/setup.py
@@ -2,7 +2,7 @@ from setuptools import setup
setup(name='DukeDSClient',
- version='0.2.8',
+ version='0.2.9',
description='Command line tool(ddsclient) to upload/manage projects on the duke-data-service.',
url='https://github.com/Duke-GCB/DukeDSClient',
keywords='duke dds dukedataservice',
| “range() step argument must not be zero”
Crash at approx. 30% completion, uploading a folder to a new project: “range() step argument must not be zero”
<img width="806" alt="screen shot 2016-05-25 at 1 31 11 pm" src="https://cloud.githubusercontent.com/assets/11540881/15551266/38d08f16-2283-11e6-9708-c5f93edb99d9.png">
Is there a verbose logging option we can use to determine where, exactly, this is failing? | Duke-GCB/DukeDSClient | diff --git a/ddsc/core/tests/test_fileuploader.py b/ddsc/core/tests/test_fileuploader.py
index f8ca72b..44bfb7e 100644
--- a/ddsc/core/tests/test_fileuploader.py
+++ b/ddsc/core/tests/test_fileuploader.py
@@ -47,6 +47,7 @@ class TestParallelChunkProcessor(TestCase):
(100, 900000, 9000),
(125, 123, 1),
(122, 123, 2),
+ (100, 0, 1)
]
for chunk_size, file_size, expected in values:
num_chunks = ParallelChunkProcessor.determine_num_chunks(chunk_size, file_size)
@@ -63,4 +64,4 @@ class TestParallelChunkProcessor(TestCase):
]
for upload_workers, num_chunks, expected in values:
result = ParallelChunkProcessor.make_work_parcels(upload_workers, num_chunks)
- self.assertEqual(expected, result)
\ No newline at end of file
+ self.assertEqual(expected, result)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 3
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/Duke-GCB/DukeDSClient.git@81d8fc03fdbb9209d949f319bf4b4691b1615a59#egg=DukeDSClient
exceptiongroup==1.2.2
iniconfig==2.1.0
nose==1.3.7
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
PyYAML==3.11
requests==2.9.1
tomli==2.2.1
| name: DukeDSClient
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pyyaml==3.11
- requests==2.9.1
- tomli==2.2.1
prefix: /opt/conda/envs/DukeDSClient
| [
"ddsc/core/tests/test_fileuploader.py::TestParallelChunkProcessor::test_determine_num_chunks"
]
| []
| [
"ddsc/core/tests/test_fileuploader.py::TestFileUploader::test_make_chunk_processor_with_none",
"ddsc/core/tests/test_fileuploader.py::TestFileUploader::test_make_chunk_processor_with_one",
"ddsc/core/tests/test_fileuploader.py::TestFileUploader::test_make_chunk_processor_with_two",
"ddsc/core/tests/test_fileuploader.py::TestParallelChunkProcessor::test_make_work_parcels"
]
| []
| MIT License | 554 | [
"setup.py",
"ddsc/core/filedownloader.py",
"ddsc/core/fileuploader.py",
"ddsc/tests/empty_file"
]
| [
"setup.py",
"ddsc/core/filedownloader.py",
"ddsc/core/fileuploader.py",
"ddsc/tests/empty_file"
]
|
|
zalando-stups__pierone-cli-33 | 903f8e27f3e084fd9116929139a1ccd7f700f42f | 2016-05-26 16:17:11 | 560cae1b4fc185c7a8aa3a1a50e0a96b2c7dd8e7 | diff --git a/.gitignore b/.gitignore
index 1e365e8..e60d986 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,3 +12,4 @@ htmlcov/
virtualenv
*.sw*
.cache/
+.tox/
diff --git a/pierone/cli.py b/pierone/cli.py
index 467ff32..1af5790 100644
--- a/pierone/cli.py
+++ b/pierone/cli.py
@@ -1,20 +1,18 @@
import datetime
import os
import re
-
-import click
-
-import requests
import tarfile
import tempfile
import time
-import zign.api
-from clickclick import error, AliasedGroup, print_table, OutputFormat, UrlType
-from .api import docker_login, request, get_latest_tag, DockerImage
+import click
import pierone
+import requests
import stups_cli.config
+import zign.api
+from clickclick import AliasedGroup, OutputFormat, UrlType, error, print_table
+from .api import DockerImage, docker_login, get_latest_tag, request
KEYRING_KEY = 'pierone'
@@ -24,6 +22,48 @@ output_option = click.option('-o', '--output', type=click.Choice(['text', 'json'
help='Use alternative output format')
url_option = click.option('--url', help='Pier One URL', metavar='URI')
+clair_url_option = click.option('--clair-url', help='Clair URL', metavar='CLAIR_URI')
+
+CVE_STYLES = {
+ 'TOO_OLD': {
+ 'bold': True,
+ 'fg': 'red'
+ },
+ 'NOT_PROCESSED_YET': {
+ 'bold': True,
+ 'fg': 'red'
+ },
+ 'COULDNT_FIGURE_OUT': {
+ 'bold': True,
+ 'fg': 'red'
+ },
+ 'CRITICAL': {
+ 'bold': True,
+ 'fg': 'red'
+ },
+ 'HIGH': {
+ 'bold': True,
+ 'fg': 'red'
+ },
+ 'MEDIUM': {
+ 'fg': 'yellow'
+ },
+ 'LOW': {
+ 'fg': 'yellow'
+ },
+ 'NEGLIGIBLE': {
+ 'fg': 'yellow'
+ },
+ 'UNKNOWN': {
+ 'fg': 'yellow'
+ },
+ 'PENDING': {
+ 'fg': 'yellow'
+ },
+ 'NO_CVES_FOUND': {
+ 'fg': 'green'
+ }
+}
TEAM_PATTERN_STR = r'[a-z][a-z0-9-]+'
TEAM_PATTERN = re.compile(r'^{}$'.format(TEAM_PATTERN_STR))
@@ -54,6 +94,19 @@ def parse_time(s: str) -> float:
return None
+def parse_severity(value, clair_id_exists):
+ '''Parse severity values to displayable values'''
+ if value is None and clair_id_exists:
+ return 'NOT_PROCESSED_YET'
+ elif value is None:
+ return 'TOO_OLD'
+
+ value = re.sub('^clair:', '', value)
+ value = re.sub('(?P<upper_letter>(?<=[a-z])[A-Z])', '_\g<upper_letter>', value)
+
+ return value.upper()
+
+
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
@@ -82,6 +135,28 @@ def set_pierone_url(config: dict, url: str) -> None:
return url
+def set_clair_url(config: dict, url: str) -> None:
+ '''Read Clair URL from cli, from config file or from stdin.'''
+ url = url or config.get('clair_url')
+
+ while not url:
+ url = click.prompt('Please enter the Clair URL', type=UrlType())
+
+ try:
+ requests.get(url, timeout=5)
+ except:
+ error('Could not reach {}'.format(url))
+ url = None
+
+ if '://' not in url:
+ # issue 63: gracefully handle URLs without scheme
+ url = 'https://{}'.format(url)
+
+ config['clair_url'] = url
+ stups_cli.config.store_config(config, 'pierone')
+ return url
+
+
@click.group(cls=AliasedGroup, context_settings=CONTEXT_SETTINGS)
@click.option('-V', '--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True,
help='Print the current version number and exit.')
@@ -147,6 +222,19 @@ def get_tags(url, team, art, access_token):
return r.json()
+def get_clair_features(url, layer_id, access_token):
+ if layer_id is None:
+ return []
+
+ r = request(url, '/v1/layers/{}?vulnerabilities&features'.format(layer_id), access_token)
+ if r.status_code == 404:
+ # empty list of tags (layer does not exist)
+ return []
+ else:
+ r.raise_for_status()
+ return r.json()['Layer']['Features']
+
+
@cli.command()
@click.argument('team', callback=validate_team)
@url_option
@@ -184,14 +272,69 @@ def tags(config, team: str, artifact, url, output):
'artifact': art,
'tag': row['name'],
'created_by': row['created_by'],
- 'created_time': parse_time(row['created'])}
+ 'created_time': parse_time(row['created']),
+ 'severity_fix_available': parse_severity(
+ row.get('severity_fix_available'), row.get('clair_id', False)),
+ 'severity_no_fix_available': parse_severity(
+ row.get('severity_no_fix_available'), row.get('clair_id', False))}
for row in r])
# sorts are guaranteed to be stable, i.e. tags will be sorted by time (as returned from REST service)
rows.sort(key=lambda row: (row['team'], row['artifact']))
with OutputFormat(output):
- print_table(['team', 'artifact', 'tag', 'created_time', 'created_by'], rows,
- titles={'created_time': 'Created', 'created_by': 'By'})
+ titles = {
+ 'created_time': 'Created',
+ 'created_by': 'By',
+ 'severity_fix_available': 'Fixable CVE Severity',
+ 'severity_no_fix_available': 'Unfixable CVE Severity'
+ }
+ print_table(['team', 'artifact', 'tag', 'created_time', 'created_by',
+ 'severity_fix_available', 'severity_no_fix_available'],
+ rows, titles=titles, styles=CVE_STYLES)
+
+
[email protected]()
[email protected]('team', callback=validate_team)
[email protected]('artifact')
[email protected]('tag')
+@url_option
+@clair_url_option
+@output_option
[email protected]_obj
+def cves(config, team, artifact, tag, url, clair_url, output):
+ '''List all CVE's found by Clair service for a specific artifact tag'''
+ set_pierone_url(config, url)
+ set_clair_url(config, clair_url)
+
+ rows = []
+ token = get_token()
+ for artifact_tag in get_tags(config.get('url'), team, artifact, token):
+ if artifact_tag['name'] == tag:
+ installed_software = get_clair_features(config.get('clair_url'), artifact_tag.get('clair_id'), token)
+ for software_pkg in installed_software:
+ for cve in software_pkg.get('Vulnerabilities', []):
+ rows.append({
+ 'cve': cve['Name'],
+ 'severity': cve['Severity'].upper(),
+ 'affected_feature': '{}:{}'.format(software_pkg['Name'],
+ software_pkg['Version']),
+ 'fixing_feature': cve.get(
+ 'FixedBy') and '{}:{}'.format(software_pkg['Name'],
+ cve['FixedBy']),
+ 'link': cve['Link'],
+ })
+ severity_rating = ['CRITICAL', 'HIGH', 'MEDIUM', 'LOW', 'NEGLIGIBLE', 'UNKNOWN', 'PENDING']
+ rows.sort(key=lambda row: severity_rating.index(row['severity']))
+ with OutputFormat(output):
+ titles = {
+ 'cve': 'CVE',
+ 'severity': 'Severity',
+ 'affected_feature': 'Affected Feature',
+ 'fixing_feature': 'Fixing Feature',
+ 'link': 'Link'
+ }
+ print_table(['cve', 'severity', 'affected_feature', 'fixing_feature', 'link'],
+ rows, titles=titles, styles=CVE_STYLES)
@cli.command()
diff --git a/tox.ini b/tox.ini
index aa079ec..4644fe1 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,2 +1,8 @@
[flake8]
max-line-length=120
+
+[tox]
+envlist=py34,py35
+
+[testenv]
+commands=python setup.py test
| Display Clair security information.
PierOne now supports Clair for vulnerability scanning. Pierone now exposes the following three information for each tag:
- clair_id
- severity_fix_available
- severity_no_fix_available
The PierOne CLI should now also enhance the `tags` subcommand with these information and provide a new `cves` subcommand to display full CVE reports.
$ pierone tags foo bar
Team | Artifact | Tag | Created | By | Fixable CVE Severity | Unfixable CVE Severity |
--------|-----------|-------|------------|-----|-----------------------|--------------------------
foo | bar | 1.0 | 5d ago | example | **Critical** | Medium |
foo | bar | 1.1 | 2d ago | example | **Critical** | Medium |
foo | bar | 2.0 | 1d ago | example | None | Medium |
`High` and `Critical` severities should be highlighted.
$ pierone cves foo bar 1.0
CVE | Severity | Affected Feature | Fixing Feature | Link
-------|------------|------------------------|---------------------|-------
CVE-2014-9471 | Low | coreutils:8.23-4 | coreutils:9.23-5 | https://security-tracker.debian.org/tracker/CVE-2014-9471
Again, `High` and `Critical` needs to be highlighted and the whole table should be sorted by severity. PierOne source contains an ordered list of possible values.
The information for this output can be retrieved via the [Clair API](https://github.com/coreos/clair/blob/master/api/v1/README.md#get-layersname) using the PierOne provided Clair ID. For this, the PierOne CLI will need to learn about the Clair API's endpoint. | zalando-stups/pierone-cli | diff --git a/setup.py b/setup.py
old mode 100644
new mode 100755
diff --git a/tests/fixtures/clair_response.json b/tests/fixtures/clair_response.json
new file mode 100644
index 0000000..2638daa
--- /dev/null
+++ b/tests/fixtures/clair_response.json
@@ -0,0 +1,70 @@
+{
+ "Layer": {
+ "Name": "sha256:0000000000000000000000000000000000000000000000000000000000000000",
+ "NamespaceName": "ubuntu:16.04",
+ "ParentName": "sha256:0000000000000000000000000000000000000000000000000000000000000000",
+ "IndexedByVersion": 2,
+ "Features": [
+ {
+ "Name": "python3.5",
+ "NamespaceName": "ubuntu:16.04",
+ "Version": "3.5.1-10",
+ "AddedBy": "sha256:0000000000000000000000000000000000000000000000000000000000000000"
+ },
+ {
+ "Name": "python-pip",
+ "NamespaceName": "ubuntu:16.04",
+ "Version": "8.1.1-2",
+ "Vulnerabilities": [
+ {
+ "Name": "CVE-2013-5123",
+ "NamespaceName": "ubuntu:16.04",
+ "Description": "The mirroring support (-M, --use-mirrors) was implemented without any sort of authenticity checks and is downloaded over plaintext HTTP. Further more by default it will dynamically discover the list of available mirrors by querying a DNS entry and extrapolating from that data. It does not attempt to use any sort of method of securing this querying of the DNS like DNSSEC. Software packages are downloaded over these insecure links, unpacked, and then typically the setup.py python file inside of them is executed.",
+ "Link": "http://people.ubuntu.com/~ubuntu-security/cve/CVE-2013-5123",
+ "Severity": "Medium"
+ },
+ {
+ "Name": "CVE-2014-8991",
+ "NamespaceName": "ubuntu:16.04",
+ "Description": "pip 1.3 through 1.5.6 allows local users to cause a denial of service (prevention of package installation) by creating a /tmp/pip-build-* file for another user.",
+ "Link": "http://people.ubuntu.com/~ubuntu-security/cve/CVE-2014-8991",
+ "Severity": "Low",
+ "Metadata": {
+ "NVD": {
+ "CVSSv2": {
+ "Score": 2.1,
+ "Vectors": "AV:L/AC:L/Au:N/C:N/I:N"
+ }
+ }
+ }
+ }
+ ],
+ "AddedBy": "sha256:0000000000000000000000000000000000000000000000000000000000000000"
+ },
+ {
+ "Name": "openssl",
+ "NamespaceName": "ubuntu:16.04",
+ "Version": "1.0.2g-1ubuntu4",
+ "Vulnerabilities": [
+ {
+ "Name": "CVE-2016-2108",
+ "NamespaceName": "ubuntu:16.04",
+ "Description": "The ASN.1 implementation in OpenSSL before 1.0.1o and 1.0.2 before 1.0.2c allows remote attackers to execute arbitrary code or cause a denial of service (buffer underflow and memory corruption) via an ANY field in crafted serialized data, aka the \"negative zero\" issue.",
+ "Link": "http://people.ubuntu.com/~ubuntu-security/cve/CVE-2016-2108",
+ "Severity": "High",
+ "Metadata": {
+ "NVD": {
+ "CVSSv2": {
+ "Score": 10,
+ "Vectors": "AV:N/AC:L/Au:N/C:C/I:C"
+ }
+ }
+ },
+ "FixedBy": "1.0.2g-1ubuntu4.1"
+ }
+ ],
+ "AddedBy": "sha256:0000000000000000000000000000000000000000000000000000000000000000"
+ }
+ ]
+ }
+}
diff --git a/tests/test_api.py b/tests/test_api.py
index 5cb2fc7..3548e01 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -1,9 +1,11 @@
import json
import os
from unittest.mock import MagicMock
-import yaml
-from pierone.api import docker_login, DockerImage, get_latest_tag, Unauthorized, image_exists
+
import pytest
+import yaml
+from pierone.api import (DockerImage, Unauthorized, docker_login,
+ get_latest_tag, image_exists)
def test_docker_login(monkeypatch, tmpdir):
@@ -12,22 +14,22 @@ def test_docker_login(monkeypatch, tmpdir):
response.status_code = 200
response.json.return_value = {'access_token': '12377'}
monkeypatch.setattr('requests.get', MagicMock(return_value=response))
- token = docker_login('https://pierone.example.org', 'services', 'mytok',
- 'myuser', 'mypass', 'https://token.example.org', use_keyring=False)
+ docker_login('https://pierone.example.org', 'services', 'mytok',
+ 'myuser', 'mypass', 'https://token.example.org', use_keyring=False)
path = os.path.expanduser('~/.docker/config.json')
with open(path) as fd:
data = yaml.safe_load(fd)
- assert {'auth': 'b2F1dGgyOjEyMzc3', 'email': '[email protected]'} == data.get('auths').get('https://pierone.example.org')
+ assert {'auth': 'b2F1dGgyOjEyMzc3', 'email': '[email protected]'} == data.get('auths').get('https://pierone.example.org')
def test_docker_login_service_token(monkeypatch, tmpdir):
monkeypatch.setattr('os.path.expanduser', lambda x: x.replace('~', str(tmpdir)))
monkeypatch.setattr('tokens.get', lambda x: '12377')
- token = docker_login('https://pierone.example.org', None, 'mytok', 'myuser', 'mypass', 'https://token.example.org')
+ docker_login('https://pierone.example.org', None, 'mytok', 'myuser', 'mypass', 'https://token.example.org')
path = os.path.expanduser('~/.docker/config.json')
with open(path) as fd:
data = yaml.safe_load(fd)
- assert {'auth': 'b2F1dGgyOjEyMzc3', 'email': '[email protected]'} == data.get('auths').get('https://pierone.example.org')
+ assert {'auth': 'b2F1dGgyOjEyMzc3', 'email': '[email protected]'} == data.get('auths').get('https://pierone.example.org')
def test_keep_dockercfg_entries(monkeypatch, tmpdir):
@@ -49,12 +51,12 @@ def test_keep_dockercfg_entries(monkeypatch, tmpdir):
with open(path, 'w') as fd:
json.dump(existing_data, fd)
- token = docker_login('https://pierone.example.org', 'services', 'mytok',
- 'myuser', 'mypass', 'https://token.example.org', use_keyring=False)
+ docker_login('https://pierone.example.org', 'services', 'mytok',
+ 'myuser', 'mypass', 'https://token.example.org', use_keyring=False)
with open(path) as fd:
data = yaml.safe_load(fd)
- assert {'auth': 'b2F1dGgyOjEyMzc3', 'email': '[email protected]'} == data.get('auths', {}).get('https://pierone.example.org')
- assert existing_data.get(key) == data.get(key)
+ assert {'auth': 'b2F1dGgyOjEyMzc3', 'email': '[email protected]'} == data.get('auths', {}).get('https://pierone.example.org')
+ assert existing_data.get(key) == data.get(key)
def test_get_latest_tag(monkeypatch):
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 6f58d15..6282253 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -1,9 +1,9 @@
import json
import os
-from click.testing import CliRunner
+import re
from unittest.mock import MagicMock
-import yaml
-import zign.api
+
+from click.testing import CliRunner
from pierone.cli import cli
@@ -40,6 +40,7 @@ def test_login_given_url_option(monkeypatch, tmpdir):
runner = CliRunner()
config = {}
+
def store(data, section):
config.update(**data)
@@ -50,9 +51,9 @@ def test_login_given_url_option(monkeypatch, tmpdir):
monkeypatch.setattr('requests.get', lambda x, timeout: response)
with runner.isolated_filesystem():
- result = runner.invoke(cli, ['login'], catch_exceptions=False, input='pieroneurl\n')
+ runner.invoke(cli, ['login'], catch_exceptions=False, input='pieroneurl\n')
assert config == {'url': 'https://pieroneurl'}
- result = runner.invoke(cli, ['login', '--url', 'someotherregistry'], catch_exceptions=False)
+ runner.invoke(cli, ['login', '--url', 'someotherregistry'], catch_exceptions=False)
with open(os.path.join(str(tmpdir), '.docker/config.json')) as fd:
data = json.load(fd)
assert data['auths']['https://pieroneurl']['auth'] == 'b2F1dGgyOnRvazEyMw=='
@@ -65,7 +66,7 @@ def test_scm_source(monkeypatch, tmpdir):
response.json.return_value = {'url': 'git:somerepo', 'revision': 'myrev123'}
runner = CliRunner()
- monkeypatch.setattr('stups_cli.config.load_config', lambda x: {'url':'foobar'})
+ monkeypatch.setattr('stups_cli.config.load_config', lambda x: {'url': 'foobar'})
monkeypatch.setattr('zign.api.get_token', MagicMock(return_value='tok123'))
monkeypatch.setattr('pierone.cli.get_tags', MagicMock(return_value={}))
monkeypatch.setattr('os.path.expanduser', lambda x: x.replace('~', str(tmpdir)))
@@ -75,12 +76,13 @@ def test_scm_source(monkeypatch, tmpdir):
assert 'myrev123' in result.output
assert 'git:somerepo' in result.output
+
def test_image(monkeypatch, tmpdir):
response = MagicMock()
response.json.return_value = [{'name': '1.0', 'team': 'stups', 'artifact': 'kio'}]
runner = CliRunner()
- monkeypatch.setattr('stups_cli.config.load_config', lambda x: {'url':'foobar'})
+ monkeypatch.setattr('stups_cli.config.load_config', lambda x: {'url': 'foobar'})
monkeypatch.setattr('zign.api.get_token', MagicMock(return_value='tok123'))
monkeypatch.setattr('os.path.expanduser', lambda x: x.replace('~', str(tmpdir)))
monkeypatch.setattr('pierone.api.session.get', MagicMock(return_value=response))
@@ -93,16 +95,130 @@ def test_image(monkeypatch, tmpdir):
def test_tags(monkeypatch, tmpdir):
response = MagicMock()
- response.json.return_value = [{'name': '1.0', 'created_by': 'myuser', 'created': '2015-08-20T08:14:59.432Z'}]
+ response.json.return_value = [
+ # Former pierone payload
+ {
+ 'name': '1.0',
+ 'created_by': 'myuser',
+ 'created': '2015-08-20T08:14:59.432Z'
+ },
+ # New pierone payload with clair but no information about CVEs -- old images
+ {
+ "name": "1.1",
+ "created": "2016-05-19T15:23:41.065Z",
+ "created_by": "myuser",
+ "image": "sha256:here",
+ "clair_id": None,
+ "severity_fix_available": None,
+ "severity_no_fix_available": None
+ },
+ # New pierone payload with clair but no information about CVEs -- still processing
+ {
+ "name": "1.1",
+ "created": "2016-05-19T15:23:41.065Z",
+ "created_by": "myuser",
+ "image": "sha256:here",
+ "clair_id": "sha256:here",
+ "severity_fix_available": None,
+ "severity_no_fix_available": None
+ },
+ # New pierone payload with clair but could not figure out
+ {
+ "name": "1.1",
+ "created": "2016-05-19T15:23:41.065Z",
+ "created_by": "myuser",
+ "image": "sha256:here",
+ "clair_id": "sha256:here",
+ "severity_fix_available": "clair:CouldntFigureOut",
+ "severity_no_fix_available": "clair:CouldntFigureOut"
+ },
+ # New pierone payload with clair with no CVEs found
+ {
+ "name": "1.1",
+ "created": "2016-05-19T15:23:41.065Z",
+ "created_by": "myuser",
+ "image": "sha256:here",
+ "clair_id": "sha256:here",
+ "severity_fix_available": "clair:NoCVEsFound",
+ "severity_no_fix_available": "clair:NoCVEsFound"
+ },
+ # New pierone payload with clair input and info about CVEs
+ {
+ "name": "1.2",
+ "created": "2016-05-23T13:29:17.753Z",
+ "created_by": "myuser",
+ "image": "sha256:here",
+ "clair_id": "sha256:here",
+ "severity_fix_available": "High",
+ "severity_no_fix_available": "Medium"
+ }
+ ]
runner = CliRunner()
- monkeypatch.setattr('stups_cli.config.load_config', lambda x: {'url':'foobar'})
+ monkeypatch.setattr('stups_cli.config.load_config', lambda x: {'url': 'foobar'})
monkeypatch.setattr('zign.api.get_token', MagicMock(return_value='tok123'))
monkeypatch.setattr('os.path.expanduser', lambda x: x.replace('~', str(tmpdir)))
monkeypatch.setattr('pierone.api.session.get', MagicMock(return_value=response))
with runner.isolated_filesystem():
result = runner.invoke(cli, ['tags', 'myteam', 'myart'], catch_exceptions=False)
assert '1.0' in result.output
+ assert 'Fixable CVE Severity' in result.output
+ assert 'Unfixable CVE Severity' in result.output
+ assert 'TOO_OLD' in result.output
+ assert 'NOT_PROCESSED_YET' in result.output
+ assert 'NO_CVES_FOUND' in result.output
+ assert re.search('HIGH\s+MEDIUM', result.output), 'Should how information about CVEs'
+
+
+def test_cves(monkeypatch, tmpdir):
+ pierone_service_payload = [
+ # Former pierone payload
+ {
+ 'name': '1.0',
+ 'created_by': 'myuser',
+ 'created': '2015-08-20T08:14:59.432Z'
+ },
+ # New pierone payload with clair but no information about CVEs
+ {
+ "name": "1.1",
+ "created": "2016-05-19T15:23:41.065Z",
+ "created_by": "myuser",
+ "image": "sha256:here",
+ "clair_id": None,
+ "severity_fix_available": None,
+ "severity_no_fix_available": None
+ },
+ # New pierone payload with clair input and info about CVEs
+ {
+ "name": "1.2",
+ "created": "2016-05-23T13:29:17.753Z",
+ "created_by": "myuser",
+ "image": "sha256:here",
+ "clair_id": "sha256:here",
+ "severity_fix_available": "High",
+ "severity_no_fix_available": "Medium"
+ }
+ ]
+
+ with open(os.path.join(os.path.dirname(__file__),
+ 'fixtures', 'clair_response.json'), 'r') as fixture:
+ clair_service_payload = json.loads(fixture.read())
+
+ response = MagicMock()
+ response.json.side_effect = [
+ pierone_service_payload,
+ clair_service_payload
+ ]
+
+ runner = CliRunner()
+ monkeypatch.setattr('stups_cli.config.load_config', lambda x: {'url': 'foobar', 'clair_url': 'barfoo'})
+ monkeypatch.setattr('zign.api.get_token', MagicMock(return_value='tok123'))
+ monkeypatch.setattr('os.path.expanduser', lambda x: x.replace('~', str(tmpdir)))
+ monkeypatch.setattr('pierone.api.session.get', MagicMock(return_value=response))
+ with runner.isolated_filesystem():
+ result = runner.invoke(cli, ['cves', 'myteam', 'myart', '1.2'], catch_exceptions=False)
+ assert 'CVE-2013-5123' in result.output
+ assert re.match('[^\n]+\n[^\n]+HIGH', result.output), 'Results should be ordered by highest priority'
def test_latest(monkeypatch, tmpdir):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 3
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
clickclick==20.10.2
coverage==7.8.0
dnspython==2.7.0
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
PyYAML==6.0.2
requests==2.32.3
stups-cli-support==1.1.22
-e git+https://github.com/zalando-stups/pierone-cli.git@903f8e27f3e084fd9116929139a1ccd7f700f42f#egg=stups_pierone
stups-tokens==1.1.19
stups-zign==1.2
tomli==2.2.1
urllib3==2.3.0
| name: pierone-cli
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- clickclick==20.10.2
- coverage==7.8.0
- dnspython==2.7.0
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pyyaml==6.0.2
- requests==2.32.3
- stups-cli-support==1.1.22
- stups-tokens==1.1.19
- stups-zign==1.2
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/pierone-cli
| [
"tests/test_cli.py::test_tags",
"tests/test_cli.py::test_cves"
]
| [
"tests/test_api.py::test_docker_login",
"tests/test_api.py::test_keep_dockercfg_entries"
]
| [
"tests/test_api.py::test_docker_login_service_token",
"tests/test_api.py::test_get_latest_tag",
"tests/test_api.py::test_get_latest_tag_IOException",
"tests/test_api.py::test_get_latest_tag_non_json",
"tests/test_api.py::test_unauthorized",
"tests/test_api.py::test_image_exists",
"tests/test_api.py::test_image_exists_IOException",
"tests/test_api.py::test_image_exists_but_other_version",
"tests/test_api.py::test_image_not_exists",
"tests/test_cli.py::test_version",
"tests/test_cli.py::test_login",
"tests/test_cli.py::test_login_given_url_option",
"tests/test_cli.py::test_scm_source",
"tests/test_cli.py::test_image",
"tests/test_cli.py::test_latest",
"tests/test_cli.py::test_latest_not_found",
"tests/test_cli.py::test_url_without_scheme"
]
| []
| Apache License 2.0 | 555 | [
"pierone/cli.py",
".gitignore",
"tox.ini"
]
| [
"pierone/cli.py",
".gitignore",
"tox.ini"
]
|
|
craffel__mir_eval-195 | f858df347c05c83159875e8f6de84f0041dbabca | 2016-05-26 17:31:07 | a4acbfad96db3241388c818534dc2bd08b48d188 | bmcfee: @craffel @justinsalamon ready for CR I think.
craffel: One minor comment otherwise LGTM. | diff --git a/mir_eval/sonify.py b/mir_eval/sonify.py
index f614684..a40a0d0 100644
--- a/mir_eval/sonify.py
+++ b/mir_eval/sonify.py
@@ -5,6 +5,8 @@ All functions return a raw signal at the specified sampling rate.
import numpy as np
from numpy.lib.stride_tricks import as_strided
+from scipy.interpolate import interp1d
+
from . import util
from . import chord
@@ -140,6 +142,59 @@ def time_frequency(gram, frequencies, times, fs, function=np.sin, length=None):
return output
+def pitch_contour(times, frequencies, fs, function=np.sin, length=None,
+ kind='linear'):
+ '''Sonify a pitch contour.
+
+ Parameters
+ ----------
+ times : np.ndarray
+ time indices for each frequency measurement, in seconds
+
+ frequencies : np.ndarray
+ frequency measurements, in Hz.
+ Non-positive measurements will be interpreted as un-voiced samples.
+
+ fs : int
+ desired sampling rate of the output signal
+
+ function : function
+ function to use to synthesize notes, should be 2pi-periodic
+
+ length : int
+ desired number of samples in the output signal,
+ defaults to ``max(times)*fs``
+
+ kind : str
+ Interpolation mode for the frequency estimator.
+ See: ``scipy.interpolate.interp1d`` for valid settings.
+
+ Returns
+ -------
+ output : np.ndarray
+ synthesized version of the pitch contour
+ '''
+
+ fs = float(fs)
+
+ if length is None:
+ length = int(times.max() * fs)
+
+ # Squash the negative frequencies.
+ # wave(0) = 0, so clipping here will un-voice the corresponding instants
+ frequencies = np.maximum(frequencies, 0.0)
+
+ # Build a frequency interpolator
+ f_interp = interp1d(times * fs, 2 * np.pi * frequencies / fs, kind=kind,
+ fill_value=0.0, bounds_error=False, copy=False)
+
+ # Estimate frequency at sample points
+ f_est = f_interp(np.arange(length))
+
+ # Sonify the waveform
+ return function(np.cumsum(f_est))
+
+
def chroma(chromagram, times, fs, **kwargs):
"""Reverse synthesis of a chromagram (semitone matrix)
| continuous pitch sonification
As per [this discussion](https://github.com/marl/jams/pull/91), we could pull in the code from @justinsalamon 's [melosynth](https://github.com/justinsalamon/melosynth) package. | craffel/mir_eval | diff --git a/tests/test_sonify.py b/tests/test_sonify.py
index a1975c9..4d0d564 100644
--- a/tests/test_sonify.py
+++ b/tests/test_sonify.py
@@ -2,6 +2,7 @@
import mir_eval
import numpy as np
+import scipy
def test_clicks():
@@ -53,3 +54,37 @@ def test_chords():
['C', 'C:maj', 'D:min7', 'E:min', 'C#', 'C', 'C', 'C', 'C', 'C'],
intervals, fs, length=fs*11)
assert len(signal) == 11*fs
+
+
+def test_pitch_contour():
+
+ # Generate some random pitch
+ fs = 8000
+ times = np.linspace(0, 5, num=5 * fs, endpoint=True)
+
+ noise = scipy.ndimage.gaussian_filter1d(np.random.randn(len(times)),
+ sigma=256)
+ freqs = 440.0 * 2.0**(16 * noise)
+
+ # negate a bunch of sequences
+ idx = np.unique(np.random.randint(0, high=len(times), size=32))
+ for start, end in zip(idx[::2], idx[1::2]):
+ freqs[start:end] *= -1
+
+ # Test with inferring duration
+ x = mir_eval.sonify.pitch_contour(times, freqs, fs)
+ assert len(x) == fs * 5
+
+ # Test with an explicit duration
+ # This forces the interpolator to go off the end of the sampling grid,
+ # which should result in a constant sequence in the output
+ x = mir_eval.sonify.pitch_contour(times, freqs, fs, length=fs * 7)
+ assert len(x) == fs * 7
+ assert np.allclose(x[-fs * 2:], x[-fs * 2])
+
+ # Test with an explicit duration and a fixed offset
+ # This forces the interpolator to go off the beginning of
+ # the sampling grid, which should result in a constant output
+ x = mir_eval.sonify.pitch_contour(times + 5.0, freqs, fs, length=fs * 7)
+ assert len(x) == fs * 7
+ assert np.allclose(x[:fs * 5], x[0])
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "numpy>=1.7.0 scipy>=0.9.0 future six",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
future @ file:///croot/future_1730902796226/work
iniconfig==2.1.0
-e git+https://github.com/craffel/mir_eval.git@f858df347c05c83159875e8f6de84f0041dbabca#egg=mir_eval
nose==1.3.7
numpy @ file:///croot/numpy_and_numpy_base_1736283260865/work/dist/numpy-2.0.2-cp39-cp39-linux_x86_64.whl#sha256=3387e3e62932fa288bc18e8f445ce19e998b418a65ed2064dd40a054f976a6c7
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
scipy @ file:///croot/scipy_1733756309941/work/dist/scipy-1.13.1-cp39-cp39-linux_x86_64.whl#sha256=3b247b926209f2d9f719ebae39faf3ff891b2596150ed8f8349adfc3eb19441c
six @ file:///tmp/build/80754af9/six_1644875935023/work
tomli==2.2.1
| name: mir_eval
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- blas=1.0=openblas
- ca-certificates=2025.2.25=h06a4308_0
- future=1.0.0=py39h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgfortran-ng=11.2.0=h00389a5_1
- libgfortran5=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libopenblas=0.3.21=h043d6bf_0
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- numpy=2.0.2=py39heeff2f4_0
- numpy-base=2.0.2=py39h8a23956_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- pybind11-abi=4=hd3eb1b0_1
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- scipy=1.13.1=py39heeff2f4_1
- setuptools=72.1.0=py39h06a4308_0
- six=1.16.0=pyhd3eb1b0_1
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/mir_eval
| [
"tests/test_sonify.py::test_pitch_contour"
]
| [
"tests/test_sonify.py::test_clicks",
"tests/test_sonify.py::test_chords"
]
| [
"tests/test_sonify.py::test_time_frequency",
"tests/test_sonify.py::test_chroma"
]
| []
| MIT License | 556 | [
"mir_eval/sonify.py"
]
| [
"mir_eval/sonify.py"
]
|
box__box-python-sdk-137 | 481a86227d6d063f2e4e4ae880f4e12cd16dab06 | 2016-05-26 20:10:25 | ded623f4b6de0530d8f983d3c3d2cafe646c126b | boxcla: Verified that @jmoldow has signed the CLA. Thanks for the pull request!
Jeff-Meadows: 👍 | diff --git a/HISTORY.rst b/HISTORY.rst
index e7ca71f..25def3f 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -6,7 +6,12 @@ Release History
Upcoming
++++++++
-1.5.2
+1.5.3
+++++++++++++++++++
+
+- Bugfix so that ``JWTAuth`` opens the PEM private key file in ``'rb'`` mode.
+
+1.5.2 (2016-05-19)
++++++++++++++++++
- Bugfix so that ``OAuth2`` always has the correct tokens after a call to ``refresh()``.
diff --git a/boxsdk/auth/jwt_auth.py b/boxsdk/auth/jwt_auth.py
index 2d81697..266fff2 100644
--- a/boxsdk/auth/jwt_auth.py
+++ b/boxsdk/auth/jwt_auth.py
@@ -1,6 +1,6 @@
# coding: utf-8
-from __future__ import unicode_literals
+from __future__ import absolute_import, unicode_literals
from datetime import datetime, timedelta
import random
@@ -95,7 +95,7 @@ def __init__(
refresh_token=None,
network_layer=network_layer,
)
- with open(rsa_private_key_file_sys_path) as key_file:
+ with open(rsa_private_key_file_sys_path, 'rb') as key_file:
self._rsa_private_key = serialization.load_pem_private_key(
key_file.read(),
password=rsa_private_key_passphrase,
@@ -182,6 +182,7 @@ def authenticate_instance(self):
:rtype:
`unicode`
"""
+ self._user_id = None
return self._auth_with_jwt(self._enterprise_id, 'enterprise')
def _refresh(self, access_token):
diff --git a/boxsdk/version.py b/boxsdk/version.py
index 9f6ca5f..e4cd37c 100644
--- a/boxsdk/version.py
+++ b/boxsdk/version.py
@@ -3,4 +3,4 @@
from __future__ import unicode_literals, absolute_import
-__version__ = '1.5.2'
+__version__ = '1.5.3'
| Private key file throws error when trying load_pem_private_key
I'm running python-3.5.0 on OS X 10.11.5. When trying to use JWT Authorization, creating a JWTAuth object fails with:
```
Traceback (most recent call last):
File "<stdin>", line 6, in <module>
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/boxsdk/auth/jwt_auth.py", line 102, in __init__
backend=default_backend(),
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/cryptography/hazmat/primitives/serialization.py", line 20, in load_pem_private_key
return backend.load_pem_private_key(data, password)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/cryptography/hazmat/backends/multibackend.py", line 289, in load_pem_private_key
return b.load_pem_private_key(data, password)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/cryptography/hazmat/backends/openssl/backend.py", line 1069, in load_pem_private_key
password,
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/cryptography/hazmat/backends/openssl/backend.py", line 1247, in _load_key
mem_bio = self._bytes_to_bio(data)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/cryptography/hazmat/backends/openssl/backend.py", line 477, in _bytes_to_bio
data_char_p = self._ffi.new("char[]", data)
TypeError: initializer for ctype 'char[]' must be a bytes or list or tuple, not str
```
The default `openssl` backend is what seems to be the problem, but I'm not sure if I'm missing a step. [This issue](https://github.com/pyca/pyopenssl/issues/15) suggests that this is an expected result.
I have tried a workaround that seems to work, but there are some issues there, as well. | box/box-python-sdk | diff --git a/test/unit/auth/test_jwt_auth.py b/test/unit/auth/test_jwt_auth.py
index a117850..c65af57 100644
--- a/test/unit/auth/test_jwt_auth.py
+++ b/test/unit/auth/test_jwt_auth.py
@@ -73,7 +73,7 @@ def jwt_auth_init_mocks(
}
mock_network_layer.request.return_value = successful_token_response
- key_file_read_data = 'key_file_read_data'
+ key_file_read_data = b'key_file_read_data'
with patch('boxsdk.auth.jwt_auth.open', mock_open(read_data=key_file_read_data), create=True) as jwt_auth_open:
with patch('cryptography.hazmat.primitives.serialization.load_pem_private_key') as load_pem_private_key:
oauth = JWTAuth(
@@ -88,7 +88,7 @@ def jwt_auth_init_mocks(
jwt_key_id=jwt_key_id,
)
- jwt_auth_open.assert_called_once_with(sentinel.rsa_path)
+ jwt_auth_open.assert_called_once_with(sentinel.rsa_path, 'rb')
jwt_auth_open.return_value.read.assert_called_once_with() # pylint:disable=no-member
load_pem_private_key.assert_called_once_with(
key_file_read_data,
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 3
} | 1.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-xdist",
"mock",
"sqlalchemy",
"bottle"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | async-timeout==4.0.2
attrs==22.2.0
bottle==0.13.2
-e git+https://github.com/box/box-python-sdk.git@481a86227d6d063f2e4e4ae880f4e12cd16dab06#egg=boxsdk
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
cryptography==40.0.2
execnet==1.9.0
greenlet==2.0.2
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
mock==5.2.0
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycparser==2.21
PyJWT==2.4.0
pyparsing==3.1.4
pytest==7.0.1
pytest-xdist==3.0.2
redis==4.3.6
requests==2.27.1
requests-toolbelt==1.0.0
six==1.17.0
SQLAlchemy==1.4.54
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: box-python-sdk
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- async-timeout==4.0.2
- attrs==22.2.0
- bottle==0.13.2
- cffi==1.15.1
- charset-normalizer==2.0.12
- cryptography==40.0.2
- execnet==1.9.0
- greenlet==2.0.2
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- mock==5.2.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycparser==2.21
- pyjwt==2.4.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-xdist==3.0.2
- redis==4.3.6
- requests==2.27.1
- requests-toolbelt==1.0.0
- six==1.17.0
- sqlalchemy==1.4.54
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/box-python-sdk
| [
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[16-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[16-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[16-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[16-RS512-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[32-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[32-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[32-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[32-RS512-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[128-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[128-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[128-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_app_user_sends_post_request_with_correct_params[128-RS512-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[16-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[16-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[16-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[16-RS512-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[32-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[32-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[32-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[32-RS512-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[128-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[128-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[128-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_authenticate_instance_sends_post_request_with_correct_params[128-RS512-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[16-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[16-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[16-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[16-RS512-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[32-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[32-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[32-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[32-RS512-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[128-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[128-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[128-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_app_user_sends_post_request_with_correct_params[128-RS512-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[16-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[16-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[16-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[16-RS512-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[32-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[32-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[32-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[32-RS512-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[128-RS256-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[128-RS256-strong_password]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[128-RS512-None]",
"test/unit/auth/test_jwt_auth.py::test_refresh_instance_sends_post_request_with_correct_params[128-RS512-strong_password]"
]
| []
| []
| []
| Apache License 2.0 | 557 | [
"HISTORY.rst",
"boxsdk/auth/jwt_auth.py",
"boxsdk/version.py"
]
| [
"HISTORY.rst",
"boxsdk/auth/jwt_auth.py",
"boxsdk/version.py"
]
|
mapbox__mapbox-sdk-py-124 | 2f24f1661c1083959c4a0cbd2c1cb33139941a65 | 2016-05-26 22:27:32 | 2c11fdee6eee83ea82398cc0756ac7f35aada801 | diff --git a/README.rst b/README.rst
index c20b789..8b6cf8a 100644
--- a/README.rst
+++ b/README.rst
@@ -75,7 +75,7 @@ To run the examples as integration tests on your own Mapbox account
.. code:: bash
- MAPBOX_ACCESS_TOKEN="MY_ACCESS_TOKEN" py.test --doctest-glob='*.md' *.md
+ MAPBOX_ACCESS_TOKEN="MY_ACCESS_TOKEN" py.test --doctest-glob='*.md' docs/*.md
See Also
========
diff --git a/docs/geocoding.md b/docs/geocoding.md
index 3586829..01d57c6 100644
--- a/docs/geocoding.md
+++ b/docs/geocoding.md
@@ -129,6 +129,24 @@ Place results may be biased toward a given longitude and latitude.
```
+## Forward geocoding with bounding box
+
+Place results may be limited to those falling within a given bounding box.
+
+```python
+
+>>> response = geocoder.forward(
+... "washington", bbox=[-78.338320,38.520792,-77.935454,38.864909])
+>>> response.status_code
+200
+>>> first = response.geojson()['features'][0]
+>>> first['place_name']
+'Washington, Virginia, United States'
+>>> first['geometry']['coordinates']
+[-78.1594, 38.7135]
+
+```
+
## Reverse geocoding
Places at a longitude, latitude point may be found using `Geocoder.reverse()`.
diff --git a/mapbox/services/geocoding.py b/mapbox/services/geocoding.py
index ef7174f..e357a04 100644
--- a/mapbox/services/geocoding.py
+++ b/mapbox/services/geocoding.py
@@ -36,7 +36,7 @@ class Geocoder(Service):
raise InvalidPlaceTypeError(pt)
return {'types': ",".join(types)}
- def forward(self, address, types=None, lon=None, lat=None, country=None):
+ def forward(self, address, types=None, lon=None, lat=None, country=None, bbox=None):
"""Returns a Requests response object that contains a GeoJSON
collection of places matching the given address.
@@ -46,7 +46,7 @@ class Geocoder(Service):
Place results may be constrained to those of one or more types
or be biased toward a given longitude and latitude.
- See: https://www.mapbox.com/developers/api/geocoding/#forward."""
+ See: https://www.mapbox.com/api-documentation/#geocoding."""
uri = URITemplate(self.baseuri + '/{dataset}/{query}.json').expand(
dataset=self.name, query=address.encode('utf-8'))
params = {}
@@ -58,6 +58,8 @@ class Geocoder(Service):
params.update(proximity='{0},{1}'.format(
round(float(lon), self.precision.get('proximity', 3)),
round(float(lat), self.precision.get('proximity', 3))))
+ if bbox is not None:
+ params.update(bbox='{0},{1},{2},{3}'.format(*bbox))
resp = self.session.get(uri, params=params)
self.handle_http_error(resp)
@@ -75,7 +77,7 @@ class Geocoder(Service):
`response.geojson()` returns the geocoding result as GeoJSON.
`response.status_code` returns the HTTP API status code.
- See: https://www.mapbox.com/developers/api/geocoding/#reverse."""
+ See: https://www.mapbox.com/api-documentation/#retrieve-places-near-a-location."""
uri = URITemplate(self.baseuri + '/{dataset}/{lon},{lat}.json').expand(
dataset=self.name,
lon=str(round(float(lon), self.precision.get('reverse', 5))),
| Support geocoding bbox parameter | mapbox/mapbox-sdk-py | diff --git a/tests/test_geocoder.py b/tests/test_geocoder.py
index e30504c..da98a3d 100644
--- a/tests/test_geocoder.py
+++ b/tests/test_geocoder.py
@@ -212,6 +212,23 @@ def test_geocoder_proximity_rounding():
for coord in re.split(r'(%2C|,)', match.group(1)):
assert _check_coordinate_precision(coord, 3)
[email protected]
+def test_geocoder_forward_bbox():
+ """Bbox parameter works"""
+
+ responses.add(
+ responses.GET,
+ 'https://api.mapbox.com/geocoding/v5/mapbox.places/washington.json?bbox=-78.3284%2C38.6039%2C-78.0428%2C38.7841&access_token=pk.test',
+ match_querystring=True,
+ body='{"query": ["washington"]}', status=200,
+ content_type='application/json')
+
+ response = mapbox.Geocoder(
+ access_token='pk.test').forward(
+ 'washington', bbox=(-78.3284,38.6039,-78.0428,38.7841))
+ assert response.status_code == 200
+ assert response.json()['query'] == ["washington"]
+
@responses.activate
def test_geocoder_reverse_rounding():
"""Reverse geocoding parameters are rounded to 5 decimal places"""
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 3,
"test_score": 1
},
"num_modified_files": 3
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"pip install -U pip"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | boto3==1.37.23
botocore==1.37.23
CacheControl==0.14.2
cachetools==5.5.2
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
click==8.1.8
click-plugins==1.1.1
cligj==0.7.2
colorama==0.4.6
coverage==7.8.0
coveralls==4.0.1
distlib==0.3.9
docopt==0.6.2
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
filelock==3.18.0
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
iso3166==2.1.1
jmespath==1.0.1
-e git+https://github.com/mapbox/mapbox-sdk-py.git@2f24f1661c1083959c4a0cbd2c1cb33139941a65#egg=mapbox
msgpack==1.1.0
packaging @ file:///croot/packaging_1734472117206/work
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pyproject-api==1.9.0
pytest @ file:///croot/pytest_1738938843180/work
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
responses==0.25.7
s3transfer==0.11.4
six==1.17.0
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
uritemplate==4.1.1
uritemplate.py==3.0.2
urllib3==1.26.20
virtualenv==20.29.3
| name: mapbox-sdk-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- boto3==1.37.23
- botocore==1.37.23
- cachecontrol==0.14.2
- cachetools==5.5.2
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- click==8.1.8
- click-plugins==1.1.1
- cligj==0.7.2
- colorama==0.4.6
- coverage==7.8.0
- coveralls==4.0.1
- distlib==0.3.9
- docopt==0.6.2
- filelock==3.18.0
- idna==3.10
- iso3166==2.1.1
- jmespath==1.0.1
- msgpack==1.1.0
- pip==25.0.1
- platformdirs==4.3.7
- pyproject-api==1.9.0
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- requests==2.32.3
- responses==0.25.7
- s3transfer==0.11.4
- six==1.17.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- uritemplate==4.1.1
- uritemplate-py==3.0.2
- urllib3==1.26.20
- virtualenv==20.29.3
prefix: /opt/conda/envs/mapbox-sdk-py
| [
"tests/test_geocoder.py::test_geocoder_forward_bbox"
]
| []
| [
"tests/test_geocoder.py::test_geocoder_default_name",
"tests/test_geocoder.py::test_geocoder_name",
"tests/test_geocoder.py::test_geocoder_forward",
"tests/test_geocoder.py::test_geocoder_forward_geojson",
"tests/test_geocoder.py::test_geocoder_reverse",
"tests/test_geocoder.py::test_geocoder_reverse_geojson",
"tests/test_geocoder.py::test_geocoder_place_types",
"tests/test_geocoder.py::test_validate_country_codes_err",
"tests/test_geocoder.py::test_validate_country",
"tests/test_geocoder.py::test_validate_place_types_err",
"tests/test_geocoder.py::test_validate_place_types",
"tests/test_geocoder.py::test_geocoder_forward_types",
"tests/test_geocoder.py::test_geocoder_reverse_types",
"tests/test_geocoder.py::test_geocoder_forward_proximity",
"tests/test_geocoder.py::test_geocoder_proximity_rounding",
"tests/test_geocoder.py::test_geocoder_reverse_rounding",
"tests/test_geocoder.py::test_geocoder_unicode"
]
| []
| MIT License | 558 | [
"README.rst",
"mapbox/services/geocoding.py",
"docs/geocoding.md"
]
| [
"README.rst",
"mapbox/services/geocoding.py",
"docs/geocoding.md"
]
|
|
falconry__falcon-801 | 10d1b7e770045b95ff5cb0cc3b4adfcc583049e2 | 2016-05-26 23:06:38 | 67d61029847cbf59e4053c8a424df4f9f87ad36f | painterjd: This looks good.
It would be nice to see a test for this, however.
codecov-io: ## [Current coverage][cc-pull] is **100%**
> Merging [#801][cc-pull] into [master][cc-base-branch] will not change coverage
```diff
@@ master #801 diff @@
==========================================
Files 29 29
Lines 1777 1778 +1
Methods 0 0
Messages 0 0
Branches 299 299
==========================================
+ Hits 1777 1778 +1
Misses 0 0
Partials 0 0
```
> Powered by [Codecov](https://codecov.io?src=pr). Last updated by [53b198c...1b84eeb][cc-compare]
[cc-base-branch]: https://codecov.io/gh/falconry/falcon/branch/master?src=pr
[cc-compare]: https://codecov.io/gh/falconry/falcon/compare/53b198c6d83cb04f051424e9cc33b593c52e65d7...1b84eeb8f3d8941113801958b76939d9bc726841
[cc-pull]: https://codecov.io/gh/falconry/falcon/pull/801?src=pr
yashmehrotra: @painterjd Hi, I have added the test for this. Please Review.
Thanks. | diff --git a/falcon/responders.py b/falcon/responders.py
index b5f6186..34da807 100644
--- a/falcon/responders.py
+++ b/falcon/responders.py
@@ -58,5 +58,6 @@ def create_default_options(allowed_methods):
def on_options(req, resp, **kwargs):
resp.status = HTTP_204
resp.set_header('Allow', allowed)
+ resp.set_header('Content-Length', '0')
return on_options
diff --git a/falcon/response.py b/falcon/response.py
index f3e2c9b..452432d 100644
--- a/falcon/response.py
+++ b/falcon/response.py
@@ -481,7 +481,12 @@ class Response(object):
content_location = header_property(
'Content-Location',
- 'Sets the Content-Location header.',
+ """Sets the Content-Location header.
+
+ This value will be URI encoded per RFC 3986. If the value that is
+ being set is already URI encoded it should be decoded first or the
+ header should be set manually using the set_header method.
+ """,
uri_encode)
content_range = header_property(
@@ -523,7 +528,12 @@ class Response(object):
location = header_property(
'Location',
- 'Sets the Location header.',
+ """Sets the Location header.
+
+ This value will be URI encoded per RFC 3986. If the value that is
+ being set is already URI encoded it should be decoded first or the
+ header should be set manually using the set_header method.
+ """,
uri_encode)
retry_after = header_property(
diff --git a/falcon/util/misc.py b/falcon/util/misc.py
index c01c05c..5b02f05 100644
--- a/falcon/util/misc.py
+++ b/falcon/util/misc.py
@@ -19,6 +19,8 @@ import warnings
import six
+from falcon import status_codes
+
__all__ = (
'deprecated',
'http_now',
@@ -26,6 +28,7 @@ __all__ = (
'http_date_to_dt',
'to_query_str',
'get_bound_method',
+ 'get_http_status'
)
@@ -210,3 +213,36 @@ def get_bound_method(obj, method_name):
raise AttributeError(msg)
return method
+
+
+def get_http_status(status_code, default_reason='Unknown'):
+ """Gets both the http status code and description from just a code
+
+ Args:
+ status_code: integer or string that can be converted to an integer
+ default_reason: default text to be appended to the status_code
+ if the lookup does not find a result
+
+ Returns:
+ str: status code e.g. "404 Not Found"
+
+ Raises:
+ ValueError: the value entered could not be converted to an integer
+
+ """
+ # sanitize inputs
+ try:
+ code = float(status_code) # float can validate values like "401.1"
+ code = int(code) # converting to int removes the decimal places
+ if code < 100:
+ raise ValueError
+ except ValueError:
+ raise ValueError('get_http_status failed: "%s" is not a '
+ 'valid status code', status_code)
+
+ # lookup the status code
+ try:
+ return getattr(status_codes, 'HTTP_' + str(code))
+ except AttributeError:
+ # not found
+ return str(code) + ' ' + default_reason
diff --git a/tox.ini b/tox.ini
index 13a7b9d..7538965 100644
--- a/tox.ini
+++ b/tox.ini
@@ -27,7 +27,7 @@ commands = {toxinidir}/tools/clean.sh {toxinidir}/falcon
whitelist_externals = bash
mv
commands = {toxinidir}/tools/clean.sh {toxinidir}/falcon
- nosetests --with-coverage
+ nosetests --with-coverage []
bash -c "if [ ! -d .coverage_data ]; then mkdir .coverage_data; fi"
mv {toxinidir}/.coverage {toxinidir}/.coverage_data/.coverage.{envname}
| Default OPTIONS responder does not set Content-Length to "0"
Per RFC 7231:
> A server MUST generate a Content-Length field with a value of "0" if no payload body is to be sent in the response.
| falconry/falcon | diff --git a/tests/test_headers.py b/tests/test_headers.py
index 8880992..838755d 100644
--- a/tests/test_headers.py
+++ b/tests/test_headers.py
@@ -534,6 +534,12 @@ class TestHeaders(testing.TestCase):
self._check_link_header(resource, expected_value)
+ def test_content_length_options(self):
+ result = self.simulate_options()
+
+ content_length = '0'
+ self.assertEqual(result.headers['Content-Length'], content_length)
+
# ----------------------------------------------------------------------
# Helpers
# ----------------------------------------------------------------------
diff --git a/tests/test_utils.py b/tests/test_utils.py
index fef8bec..957a959 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -297,6 +297,21 @@ class TestFalconUtils(testtools.TestCase):
self.assertEqual(uri.parse_host('falcon.example.com:42'),
('falcon.example.com', 42))
+ def test_get_http_status(self):
+ self.assertEqual(falcon.get_http_status(404), falcon.HTTP_404)
+ self.assertEqual(falcon.get_http_status(404.3), falcon.HTTP_404)
+ self.assertEqual(falcon.get_http_status('404.3'), falcon.HTTP_404)
+ self.assertEqual(falcon.get_http_status(404.9), falcon.HTTP_404)
+ self.assertEqual(falcon.get_http_status('404'), falcon.HTTP_404)
+ self.assertEqual(falcon.get_http_status(123), '123 Unknown')
+ self.assertRaises(ValueError, falcon.get_http_status, 'not_a_number')
+ self.assertRaises(ValueError, falcon.get_http_status, 0)
+ self.assertRaises(ValueError, falcon.get_http_status, 99)
+ self.assertRaises(ValueError, falcon.get_http_status, -404.3)
+ self.assertRaises(ValueError, falcon.get_http_status, '-404')
+ self.assertRaises(ValueError, falcon.get_http_status, '-404.3')
+ self.assertEqual(falcon.get_http_status(123, 'Go Away'), '123 Go Away')
+
class TestFalconTesting(testing.TestBase):
"""Catch some uncommon branches not covered elsewhere."""
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 4
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"ddt",
"testtools",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"tools/test-requires"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
ddt==1.7.2
-e git+https://github.com/falconry/falcon.git@10d1b7e770045b95ff5cb0cc3b4adfcc583049e2#egg=falcon
fixtures==4.0.1
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
nose==1.3.7
packaging==21.3
pbr==6.1.1
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
python-mimeparse==1.6.0
PyYAML==6.0.1
requests==2.27.1
six==1.17.0
testtools==2.6.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: falcon
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- coverage==6.2
- ddt==1.7.2
- fixtures==4.0.1
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- packaging==21.3
- pbr==6.1.1
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-mimeparse==1.6.0
- pyyaml==6.0.1
- requests==2.27.1
- six==1.17.0
- testtools==2.6.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/falcon
| [
"tests/test_headers.py::TestHeaders::test_content_length_options",
"tests/test_utils.py::TestFalconUtils::test_get_http_status"
]
| [
"tests/test_utils.py::TestFalconUtils::test_deprecated_decorator"
]
| [
"tests/test_headers.py::TestHeaders::test_add_link_complex",
"tests/test_headers.py::TestHeaders::test_add_link_multiple",
"tests/test_headers.py::TestHeaders::test_add_link_single",
"tests/test_headers.py::TestHeaders::test_add_link_with_anchor",
"tests/test_headers.py::TestHeaders::test_add_link_with_hreflang",
"tests/test_headers.py::TestHeaders::test_add_link_with_hreflang_multi",
"tests/test_headers.py::TestHeaders::test_add_link_with_title",
"tests/test_headers.py::TestHeaders::test_add_link_with_title_star",
"tests/test_headers.py::TestHeaders::test_add_link_with_type_hint",
"tests/test_headers.py::TestHeaders::test_content_header_missing",
"tests/test_headers.py::TestHeaders::test_content_length",
"tests/test_headers.py::TestHeaders::test_content_type_no_body",
"tests/test_headers.py::TestHeaders::test_custom_content_type",
"tests/test_headers.py::TestHeaders::test_default_media_type",
"tests/test_headers.py::TestHeaders::test_default_value",
"tests/test_headers.py::TestHeaders::test_headers_as_list",
"tests/test_headers.py::TestHeaders::test_no_content_length_1_204_No_Content",
"tests/test_headers.py::TestHeaders::test_no_content_length_2_304_Not_Modified",
"tests/test_headers.py::TestHeaders::test_no_content_type_1_204_No_Content",
"tests/test_headers.py::TestHeaders::test_no_content_type_2_304_Not_Modified",
"tests/test_headers.py::TestHeaders::test_override_default_media_type_1___text_plain__charset_UTF_8____Hello_Unicode_____",
"tests/test_headers.py::TestHeaders::test_override_default_media_type_2___text_plain____Hello_ISO_8859_1___",
"tests/test_headers.py::TestHeaders::test_override_default_media_type_missing_encoding",
"tests/test_headers.py::TestHeaders::test_passthrough_request_headers",
"tests/test_headers.py::TestHeaders::test_required_header",
"tests/test_headers.py::TestHeaders::test_response_append_header",
"tests/test_headers.py::TestHeaders::test_response_header_helpers_on_get",
"tests/test_headers.py::TestHeaders::test_response_set_and_get_header",
"tests/test_headers.py::TestHeaders::test_unicode_headers",
"tests/test_headers.py::TestHeaders::test_unicode_location_headers",
"tests/test_headers.py::TestHeaders::test_vary_header_1____accept_encoding_____accept_encoding__",
"tests/test_headers.py::TestHeaders::test_vary_header_2____accept_encoding____x_auth_token_____accept_encoding__x_auth_token__",
"tests/test_headers.py::TestHeaders::test_vary_header_3____accept_encoding____x_auth_token_____accept_encoding__x_auth_token__",
"tests/test_headers.py::TestHeaders::test_vary_star",
"tests/test_utils.py::TestFalconUtils::test_dt_to_http",
"tests/test_utils.py::TestFalconUtils::test_http_date_to_dt",
"tests/test_utils.py::TestFalconUtils::test_http_now",
"tests/test_utils.py::TestFalconUtils::test_pack_query_params_none",
"tests/test_utils.py::TestFalconUtils::test_pack_query_params_one",
"tests/test_utils.py::TestFalconUtils::test_pack_query_params_several",
"tests/test_utils.py::TestFalconUtils::test_parse_host",
"tests/test_utils.py::TestFalconUtils::test_parse_query_string",
"tests/test_utils.py::TestFalconUtils::test_prop_uri_decode_models_stdlib_unquote_plus",
"tests/test_utils.py::TestFalconUtils::test_prop_uri_encode_models_stdlib_quote",
"tests/test_utils.py::TestFalconUtils::test_prop_uri_encode_value_models_stdlib_quote_safe_tilde",
"tests/test_utils.py::TestFalconUtils::test_uri_decode",
"tests/test_utils.py::TestFalconUtils::test_uri_encode",
"tests/test_utils.py::TestFalconUtils::test_uri_encode_value",
"tests/test_utils.py::TestFalconTesting::test_decode_empty_result",
"tests/test_utils.py::TestFalconTesting::test_httpnow_alias_for_backwards_compat",
"tests/test_utils.py::TestFalconTesting::test_none_header_value_in_create_environ",
"tests/test_utils.py::TestFalconTesting::test_path_escape_chars_in_create_environ",
"tests/test_utils.py::TestFalconTestCase::test_cached_text_in_result",
"tests/test_utils.py::TestFalconTestCase::test_path_must_start_with_slash",
"tests/test_utils.py::TestFalconTestCase::test_query_string",
"tests/test_utils.py::TestFalconTestCase::test_query_string_in_path",
"tests/test_utils.py::TestFalconTestCase::test_query_string_no_question",
"tests/test_utils.py::TestFalconTestCase::test_simple_resource_body_json_xor",
"tests/test_utils.py::TestFalconTestCase::test_status",
"tests/test_utils.py::TestFalconTestCase::test_wsgi_iterable_not_closeable",
"tests/test_utils.py::FancyTestCase::test_something"
]
| []
| Apache License 2.0 | 559 | [
"falcon/responders.py",
"falcon/response.py",
"tox.ini",
"falcon/util/misc.py"
]
| [
"falcon/responders.py",
"falcon/response.py",
"tox.ini",
"falcon/util/misc.py"
]
|
geowurster__pyin-43 | 261933156ed3636b10d6ceb7439678f0b52bf3c2 | 2016-05-27 03:02:59 | 6047fc3192bfc3d337e8ea98771fa1255d46bf58 | diff --git a/.travis.yml b/.travis.yml
index 83e97c6..b2bfb42 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,9 +1,12 @@
+sudo: false
+
language: python
python:
- - "2.7"
- - "3.3"
- - "3.4"
+ - 2.7
+ - 3.3
+ - 3.4
+ - 3.5
install:
- pip install -e .\[dev\]
@@ -12,4 +15,4 @@ script:
- py.test tests --cov pyin --cov-report term-missing
after_success:
- - coveralls
+ - coveralls || echo "!! intermittent coveralls failure"
diff --git a/LICENSE.txt b/LICENSE.txt
index 9640e3c..9f6aee8 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1,4 +1,4 @@
-Copyright (c) 2015 Kevin D. Wurster
+Copyright (c) 2015-2016 Kevin D. Wurster
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
diff --git a/pyin/__init__.py b/pyin/__init__.py
index 4cb5b31..e48091e 100644
--- a/pyin/__init__.py
+++ b/pyin/__init__.py
@@ -16,7 +16,7 @@ __source__ = 'https://github.com/geowurster/pyin'
__license__ = '''
MIT
-Copyright (c) 2015 Kevin D. Wurster
+Copyright (c) 2015-2016 Kevin D. Wurster
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
diff --git a/pyin/cli.py b/pyin/cli.py
index 39295f7..9679678 100644
--- a/pyin/cli.py
+++ b/pyin/cli.py
@@ -25,16 +25,19 @@ import pyin.core
)
@click.option(
'--block', is_flag=True,
- help="Operate on all input text as though it was a single line."
+ help="Place all input text into the `line` variable."
)
@click.option(
'--no-newline', is_flag=True,
help="Don't ensure each line ends with a newline character."
)
[email protected](
+ '--skip', 'skip_lines', type=click.IntRange(0), metavar='INTEGER', default=0,
+ help='Skip N input lines.')
@click.argument(
'expressions', required=True, nargs=-1,
)
-def main(infile, outfile, expressions, no_newline, block):
+def main(infile, outfile, expressions, no_newline, block, skip_lines):
"""
It's like sed, but Python!
@@ -88,6 +91,12 @@ def main(infile, outfile, expressions, no_newline, block):
$ python -c "help('pyin.core.pmap')"
"""
+ for _ in range(skip_lines):
+ try:
+ next(infile)
+ except StopIteration:
+ raise click.ClickException("Skipped all input")
+
if block:
iterator = [infile.read()]
else:
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..8d0d862
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,2 @@
+[bdist_wheel]
+universal: 1
| Add a flag to skip N input lines
Skip some number of lines before processing. Using something like `--skip-lines 2` in conjunction with `--lines 10` should skip the first 2 lines and process only the next ten lines. | geowurster/pyin | diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
index 0000000..32fcccd
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,12 @@
+"""
+pytest fixtures
+"""
+
+
+from click.testing import CliRunner
+import pytest
+
+
[email protected](scope='module')
+def runner():
+ return CliRunner()
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 55e3467..20ef908 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -1,8 +1,14 @@
+"""
+Unittests for $ pyin
+"""
+
+
import json
import os
from os import path
from click.testing import CliRunner
+import pytest
import pyin.cli
@@ -76,3 +82,23 @@ def test_block_mode():
expected = '{"3": null, "4": null, "0": null, "2": null, "1": null}'
assert json.loads(expected) == json.loads(result.output)
+
+
[email protected]("skip_lines", [1, 3])
+def test_skip_single_line(runner, skip_lines):
+ result = runner.invoke(pyin.cli.main, [
+ '--skip', skip_lines,
+ 'line'
+ ], input=CSV_WITH_HEADER)
+ assert result.exit_code == 0
+ expected = os.linesep.join(CSV_WITH_HEADER.splitlines()[skip_lines:])
+ assert result.output.strip() == expected.strip()
+
+
+def test_skip_all_input(runner):
+ result = runner.invoke(pyin.cli.main, [
+ '--skip', 100,
+ 'line'
+ ], input=CSV_WITH_HEADER)
+ assert result.output != 0
+ assert 'skipped' in result.output.lower()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 4
} | 0.5 | {
"env_vars": null,
"env_yml_path": [],
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
coverage==7.8.0
coveralls==4.0.1
docopt==0.6.2
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
-e git+https://github.com/geowurster/pyin.git@261933156ed3636b10d6ceb7439678f0b52bf3c2#egg=pyin
pytest==8.3.5
pytest-cov==6.0.0
requests==2.32.3
tomli==2.2.1
urllib3==2.3.0
| name: pyin
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- coverage==7.8.0
- coveralls==4.0.1
- docopt==0.6.2
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- requests==2.32.3
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/pyin
| [
"tests/test_cli.py::test_skip_single_line[1]",
"tests/test_cli.py::test_skip_single_line[3]",
"tests/test_cli.py::test_skip_all_input"
]
| []
| [
"tests/test_cli.py::test_single_expr",
"tests/test_cli.py::test_multiple_expr",
"tests/test_cli.py::test_with_imports",
"tests/test_cli.py::test_with_generator",
"tests/test_cli.py::test_with_blank_lines",
"tests/test_cli.py::test_block_mode"
]
| []
| New BSD License | 560 | [
"pyin/__init__.py",
".travis.yml",
"setup.cfg",
"LICENSE.txt",
"pyin/cli.py"
]
| [
"pyin/__init__.py",
".travis.yml",
"setup.cfg",
"LICENSE.txt",
"pyin/cli.py"
]
|
|
andycasey__ads-64 | 0afd82e0f48ee4debb9047c086488d860415bce7 | 2016-05-28 20:54:36 | c039d67c2b2e9dad936758bc89df1fdd1cbd0aa1 | diff --git a/ads/search.py b/ads/search.py
index c8a0bb4..8f36421 100644
--- a/ads/search.py
+++ b/ads/search.py
@@ -40,21 +40,20 @@ class Article(object):
return self.__unicode__().encode("utf-8")
def __unicode__(self):
- author = self.first_author or "Unknown author"
- if self.author and len(self.author) > 1:
+ author = self._raw.get("first_author", "Unknown author")
+ if len(self._raw.get("author", [])) > 1:
author += " et al."
return u"<{author} {year}, {bibcode}>".format(
author=author,
- year=self.year,
- bibcode=self.bibcode,
+ year=self._raw.get("year", "Unknown year"),
+ bibcode=self._raw.get("bibcode", "Unknown bibcode")
)
def __eq__(self, other):
- if (not hasattr(self, 'bibcode') or not hasattr(other, 'bibcode') or
- self.bibcode is None or other.bibcode is None):
+ if self._raw.get("bibcode") is None or other._raw.get("bibcode") is None:
raise TypeError("Cannot compare articles without bibcodes")
- return self.bibcode == other.bibcode
+ return self._raw['bibcode'] == other._raw['bibcode']
def __ne__(self, other):
return not self.__eq__(other)
@@ -196,8 +195,8 @@ class Article(object):
return self._get_field('indexstamp')
@cached_property
- def first_author_norm(self):
- return self._get_field('first_author_norm')
+ def first_author(self):
+ return self._get_field('first_author')
@cached_property
def issue(self):
| Exception handling in Unicode representation of Articles
In the article method `__unicode__()`, the article properties `first_author`, `bibcode` and `year` are used. This can yield an exception if the fields are not included in the original search query; generally for `first_author` as no getter exists, or if deferred loading for `bibcode` and `year` fails.
Cf. pull request #55 for a more detailed discussion of the issue. | andycasey/ads | diff --git a/ads/tests/test_search.py b/ads/tests/test_search.py
index dc36eda..75d834f 100644
--- a/ads/tests/test_search.py
+++ b/ads/tests/test_search.py
@@ -49,12 +49,13 @@ class TestArticle(unittest.TestCase):
def test_equals(self):
"""
the __eq__ method should compare bibcodes, and raise if bibcode isn't
- defined
+ defined or is None
"""
self.assertNotEqual(Article(bibcode="Not the same"), self.article)
self.assertEqual(Article(bibcode="2013A&A...552A.143S"), self.article)
- with self.assertRaises(TypeError):
- # Explicitly set bibcode to None to avoid invoking the getter.
+ with self.assertRaisesRegexp(TypeError, "Cannot compare articles without bibcodes"):
+ Article() == self.article
+ with self.assertRaisesRegexp(TypeError, "Cannot compare articles without bibcodes"):
Article(bibcode=None) == self.article
def test_init(self):
@@ -79,6 +80,10 @@ class TestArticle(unittest.TestCase):
self.article.__str__()
)
self.assertEqual(self.article.__unicode__(), self.article.__str__())
+ self.assertEqual(
+ Article().__str__(),
+ "<Unknown author Unknown year, Unknown bibcode>"
+ )
@patch('ads.search.Article._get_field')
def test_cached_properties(self, patched):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"coverage",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/andycasey/ads.git@0afd82e0f48ee4debb9047c086488d860415bce7#egg=ads
certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup==1.2.2
httpretty==0.8.10
idna==3.10
iniconfig==2.1.0
MarkupSafe==3.0.2
mock==5.2.0
nose==1.3.7
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
requests==2.32.3
six==1.17.0
tomli==2.2.1
urllib3==2.3.0
Werkzeug==3.1.3
| name: ads
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- exceptiongroup==1.2.2
- httpretty==0.8.10
- idna==3.10
- iniconfig==2.1.0
- markupsafe==3.0.2
- mock==5.2.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- requests==2.32.3
- six==1.17.0
- tomli==2.2.1
- urllib3==2.3.0
- werkzeug==3.1.3
prefix: /opt/conda/envs/ads
| [
"ads/tests/test_search.py::TestArticle::test_equals",
"ads/tests/test_search.py::TestArticle::test_print_methods"
]
| [
"ads/tests/test_search.py::TestArticle::test_cached_properties",
"ads/tests/test_search.py::TestArticle::test_get_field",
"ads/tests/test_search.py::TestArticle::test_init",
"ads/tests/test_search.py::TestSearchQuery::test_iter",
"ads/tests/test_search.py::TestSearchQuery::test_rows_rewrite",
"ads/tests/test_search.py::TestSolrResponse::test_articles"
]
| [
"ads/tests/test_search.py::TestSearchQuery::test_init",
"ads/tests/test_search.py::TestSolrResponse::test_default_article_fields",
"ads/tests/test_search.py::TestSolrResponse::test_init",
"ads/tests/test_search.py::TestSolrResponse::test_load_http_response",
"ads/tests/test_search.py::Testquery::test_init"
]
| []
| MIT License | 561 | [
"ads/search.py"
]
| [
"ads/search.py"
]
|
|
html5lib__html5lib-python-259 | 2d376737a6246ebb38a79600a7fe75abd923cf3e | 2016-05-28 21:05:44 | 563dc298ea43021f9a9306fc7da3734ea5d9d8ec | codecov-io: ## [Current coverage][cc-pull] is **90.83%**
> Merging [#259][cc-pull] into [master][cc-base-branch] will increase coverage by **<.01%**
```diff
@@ master #259 diff @@
==========================================
Files 51 51
Lines 6836 6840 +4
Methods 0 0
Messages 0 0
Branches 1312 1312
==========================================
+ Hits 6209 6213 +4
Misses 468 468
Partials 159 159
```
> Powered by [Codecov](https://codecov.io?src=pr). Last updated by [2d37673...cbc1b34][cc-compare]
[cc-base-branch]: https://codecov.io/gh/html5lib/html5lib-python/branch/master?src=pr
[cc-compare]: https://codecov.io/gh/html5lib/html5lib-python/compare/2d376737a6246ebb38a79600a7fe75abd923cf3e...cbc1b34806178bd5119464865c263c0e254cfa55
[cc-pull]: https://codecov.io/gh/html5lib/html5lib-python/pull/259?src=pr | diff --git a/html5lib/treebuilders/etree.py b/html5lib/treebuilders/etree.py
index d394148..4d12bd4 100644
--- a/html5lib/treebuilders/etree.py
+++ b/html5lib/treebuilders/etree.py
@@ -100,6 +100,7 @@ def getETreeBuilder(ElementTreeImplementation, fullTree=False):
node.parent = self
def removeChild(self, node):
+ self._childNodes.remove(node)
self._element.remove(node._element)
node.parent = None
| etree treewalker infinite loop
This goes into an infinite loop:
```python
import html5lib
frag = html5lib.parseFragment("<b><em><foo><foob><fooc><aside></b></em>")
walker = html5lib.getTreeWalker("etree")
print list(walker(frag))
``` | html5lib/html5lib-python | diff --git a/html5lib/tests/test_parser2.py b/html5lib/tests/test_parser2.py
index 0ec5b04..b7a92fd 100644
--- a/html5lib/tests/test_parser2.py
+++ b/html5lib/tests/test_parser2.py
@@ -7,7 +7,7 @@ import io
from . import support # noqa
from html5lib.constants import namespaces
-from html5lib import parse, HTMLParser
+from html5lib import parse, parseFragment, HTMLParser
# tests that aren't autogenerated from text files
@@ -88,3 +88,8 @@ def test_debug_log():
expected[i] = tuple(log)
assert parser.log == expected
+
+
+def test_no_duplicate_clone():
+ frag = parseFragment("<b><em><foo><foob><fooc><aside></b></em>")
+ assert len(frag) == 2
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.08 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[all]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"mock"
],
"pre_install": [
"git submodule update --init --recursive"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | chardet==5.2.0
datrie==0.8.2
exceptiongroup==1.2.2
Genshi==0.7.9
-e git+https://github.com/html5lib/html5lib-python.git@2d376737a6246ebb38a79600a7fe75abd923cf3e#egg=html5lib
iniconfig==2.1.0
lxml==5.3.1
mock==5.2.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
six==1.17.0
tomli==2.2.1
webencodings==0.5.1
| name: html5lib-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- chardet==5.2.0
- datrie==0.8.2
- exceptiongroup==1.2.2
- genshi==0.7.9
- iniconfig==2.1.0
- lxml==5.3.1
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- six==1.17.0
- tomli==2.2.1
- webencodings==0.5.1
prefix: /opt/conda/envs/html5lib-python
| [
"html5lib/tests/test_parser2.py::test_no_duplicate_clone"
]
| []
| [
"html5lib/tests/test_parser2.py::test_assertDoctypeCloneable",
"html5lib/tests/test_parser2.py::test_line_counter",
"html5lib/tests/test_parser2.py::test_namespace_html_elements_0_dom",
"html5lib/tests/test_parser2.py::test_namespace_html_elements_1_dom",
"html5lib/tests/test_parser2.py::test_namespace_html_elements_0_etree",
"html5lib/tests/test_parser2.py::test_namespace_html_elements_1_etree",
"html5lib/tests/test_parser2.py::test_unicode_file",
"html5lib/tests/test_parser2.py::test_duplicate_attribute",
"html5lib/tests/test_parser2.py::test_debug_log"
]
| []
| MIT License | 562 | [
"html5lib/treebuilders/etree.py"
]
| [
"html5lib/treebuilders/etree.py"
]
|
ifosch__accloudtant-82 | 9dd6000060b4bddfa5366ef3102fe7d42371d514 | 2016-05-29 14:55:50 | 33f90ff0bc1639c9fe793afd837eee80170caf3e | diff --git a/accloudtant/aws/instance.py b/accloudtant/aws/instance.py
index 4e19b2d..2073f28 100644
--- a/accloudtant/aws/instance.py
+++ b/accloudtant/aws/instance.py
@@ -62,7 +62,7 @@ class Instance(object):
@property
def name(self):
names = [tag for tag in self.tags if tag['Key'] == 'Name']
- if names is None:
+ if len(names) == 0:
return ''
else:
return names[0]['Value']
| Fails with Python 3
My system have Python 3 installed by default (Archlinux). This is the output of `accloudtant report`:
```
Traceback (most recent call last):
File "/usr/bin/accloudtant", line 22, in <module>
cli()
File "/usr/lib/python3.5/site-packages/click/core.py", line 716, in __call__
return self.main(*args, **kwargs)
File "/usr/lib/python3.5/site-packages/click/core.py", line 696, in main
rv = self.invoke(ctx)
File "/usr/lib/python3.5/site-packages/click/core.py", line 1060, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/lib/python3.5/site-packages/click/core.py", line 889, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/lib/python3.5/site-packages/click/core.py", line 534, in invoke
return callback(*args, **kwargs)
File "/usr/bin/accloudtant", line 19, in report
click.echo(Reports())
File "/usr/lib/python3.5/site-packages/click/utils.py", line 221, in echo
message = text_type(message)
File "/usr/lib/python3.5/site-packages/accloudtant/aws/reports.py", line 69, in __repr__
instance.name,
File "/usr/lib/python3.5/site-packages/accloudtant/aws/instance.py", line 68, in name
return names[0]['Value']
IndexError: list index out of range
```
I can't verify that it works well on Python 2 so fell free to close this issue and open a new one if the problem is not caused by the Python version. | ifosch/accloudtant | diff --git a/tests/aws/test_instance.py b/tests/aws/test_instance.py
index d90e2c5..a016f7b 100644
--- a/tests/aws/test_instance.py
+++ b/tests/aws/test_instance.py
@@ -73,6 +73,57 @@ def test_instance():
assert(instance.best == 0.293)
+def test_unnamed_instance():
+ az = 'us-east-1b'
+ region = 'us-east-1'
+ instance_data = {
+ 'id': 'i-1840273e',
+ 'tags': [],
+ 'instance_type': 'r2.8xlarge',
+ 'placement': {
+ 'AvailabilityZone': az,
+ },
+ 'state': {
+ 'Name': 'running',
+ },
+ 'launch_time': datetime.datetime(
+ 2015,
+ 10,
+ 22,
+ 14,
+ 15,
+ 10,
+ tzinfo=tzutc()
+ ),
+ 'console_output': {'Output': 'RHEL Linux', },
+ }
+
+
+ ec2_instance = MockEC2Instance(instance_data)
+ instance = accloudtant.aws.instance.Instance(ec2_instance)
+
+ assert(instance.id == ec2_instance.id)
+ assert(instance.reserved == 'No')
+ assert(instance.name == '')
+ assert(instance.size == ec2_instance.instance_type)
+ assert(instance.availability_zone == az)
+ assert(instance.region == region)
+ assert(instance.operating_system == 'Red Hat Enterprise Linux')
+ assert(instance.key == 'rhel')
+ assert(instance.state == ec2_instance.state['Name'])
+ assert(instance.current == 0.0)
+ assert(instance.best == 0.0)
+
+ with pytest.raises(ValueError):
+ instance.reserved = 'Maybe'
+
+ instance.current = 0.392
+ instance.best = 0.293
+
+ assert(instance.current == 0.392)
+ assert(instance.best == 0.293)
+
+
def test_guess_os():
instance_data_win = {
'id': 'i-912a4392',
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/ifosch/accloudtant.git@9dd6000060b4bddfa5366ef3102fe7d42371d514#egg=accloudtant
boto3==1.37.23
botocore==1.37.23
certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
jmespath==1.0.1
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
python-dateutil==2.9.0.post0
requests==2.32.3
s3transfer==0.11.4
six==1.17.0
tabulate==0.9.0
tomli==2.2.1
urllib3==1.26.20
| name: accloudtant
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- boto3==1.37.23
- botocore==1.37.23
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- jmespath==1.0.1
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- requests==2.32.3
- s3transfer==0.11.4
- six==1.17.0
- tabulate==0.9.0
- tomli==2.2.1
- urllib3==1.26.20
prefix: /opt/conda/envs/accloudtant
| [
"tests/aws/test_instance.py::test_unnamed_instance"
]
| []
| [
"tests/aws/test_instance.py::test_instance",
"tests/aws/test_instance.py::test_guess_os",
"tests/aws/test_instance.py::test_match_reserved_instance"
]
| []
| null | 563 | [
"accloudtant/aws/instance.py"
]
| [
"accloudtant/aws/instance.py"
]
|
|
Axelrod-Python__Axelrod-603 | c919c39d58552c2db4a2719c817cfa3a3c301f92 | 2016-05-30 15:36:49 | e5b85453f0288ec9f9ea9eb91ed6042855a7b86c | diff --git a/axelrod/result_set.py b/axelrod/result_set.py
index b2bc1b94..b0a67300 100644
--- a/axelrod/result_set.py
+++ b/axelrod/result_set.py
@@ -1,5 +1,6 @@
from collections import defaultdict
import csv
+import tqdm
from numpy import mean, nanmedian, std
@@ -14,10 +15,25 @@ except ImportError:
from io import StringIO
+def update_progress_bar(method):
+ """A decorator to update a progress bar if it exists"""
+ def wrapper(*args):
+ """Run the method and update the progress bar if it exists"""
+ output = method(*args)
+
+ try:
+ args[0].progress_bar.update(1)
+ except AttributeError:
+ pass
+
+ return output
+ return wrapper
+
+
class ResultSet(object):
"""A class to hold the results of a tournament."""
- def __init__(self, players, interactions, with_morality=True):
+ def __init__(self, players, interactions, progress_bar=True):
"""
Parameters
----------
@@ -26,19 +42,24 @@ class ResultSet(object):
interactions : list
a list of dictionaries mapping tuples of player indices to
interactions (1 for each repetition)
- with_morality : bool
- a flag to determine whether morality metrics should be
- calculated.
+ progress_bar : bool
+ Whether or not to create a progress bar which will be updated
"""
self.players = players
self.nplayers = len(players)
self.interactions = interactions
self.nrepetitions = max([len(rep) for rep in list(interactions.values())])
+ if progress_bar:
+ self.progress_bar = tqdm.tqdm(total=19, desc="Analysing results")
+ else:
+ self.progress_bar = False
+
# Calculate all attributes:
- self.build_all(with_morality)
+ self.build_all()
+
- def build_all(self, with_morality):
+ def build_all(self):
"""Build all the results. In a seperate method to make inheritance more
straightforward"""
self.wins = self.build_wins()
@@ -54,15 +75,19 @@ class ResultSet(object):
self.score_diffs = self.build_score_diffs()
self.payoff_diffs_means = self.build_payoff_diffs_means()
- if with_morality:
- self.cooperation = self.build_cooperation()
- self.normalised_cooperation = self.build_normalised_cooperation()
- self.vengeful_cooperation = self.build_vengeful_cooperation()
- self.cooperating_rating = self.build_cooperating_rating()
- self.good_partner_matrix = self.build_good_partner_matrix()
- self.good_partner_rating = self.build_good_partner_rating()
- self.eigenmoses_rating = self.build_eigenmoses_rating()
- self.eigenjesus_rating = self.build_eigenjesus_rating()
+ self.cooperation = self.build_cooperation()
+ self.normalised_cooperation = self.build_normalised_cooperation()
+ self.vengeful_cooperation = self.build_vengeful_cooperation()
+ self.cooperating_rating = self.build_cooperating_rating()
+ self.good_partner_matrix = self.build_good_partner_matrix()
+ self.good_partner_rating = self.build_good_partner_rating()
+ self.eigenmoses_rating = self.build_eigenmoses_rating()
+ self.eigenjesus_rating = self.build_eigenjesus_rating()
+
+ try:
+ self.progress_bar.close()
+ except AttributeError:
+ pass
@property
def _null_results_matrix(self):
@@ -79,6 +104,7 @@ class ResultSet(object):
replist = list(range(self.nrepetitions))
return [[[0 for j in plist] for i in plist] for r in replist]
+ @update_progress_bar
def build_match_lengths(self):
"""
Returns:
@@ -110,6 +136,7 @@ class ResultSet(object):
return match_lengths
+ @update_progress_bar
def build_scores(self):
"""
Returns:
@@ -143,6 +170,7 @@ class ResultSet(object):
return scores
+ @update_progress_bar
def build_ranked_names(self):
"""
Returns:
@@ -150,8 +178,10 @@ class ResultSet(object):
Returns the ranked names. A list of names as calculated by
self.ranking.
"""
+
return [str(self.players[i]) for i in self.ranking]
+ @update_progress_bar
def build_wins(self):
"""
Returns:
@@ -187,6 +217,7 @@ class ResultSet(object):
return wins
+ @update_progress_bar
def build_normalised_scores(self):
"""
Returns:
@@ -229,6 +260,7 @@ class ResultSet(object):
return normalised_scores
+ @update_progress_bar
def build_ranking(self):
"""
Returns:
@@ -244,6 +276,7 @@ class ResultSet(object):
return sorted(range(self.nplayers),
key=lambda i: -nanmedian(self.normalised_scores[i]))
+ @update_progress_bar
def build_payoffs(self):
"""
Returns:
@@ -281,8 +314,10 @@ class ResultSet(object):
utilities.append(iu.compute_final_score_per_turn(interaction)[1])
payoffs[player][opponent] = utilities
+
return payoffs
+ @update_progress_bar
def build_payoff_matrix(self):
"""
Returns:
@@ -317,6 +352,7 @@ class ResultSet(object):
return payoff_matrix
+ @update_progress_bar
def build_payoff_stddevs(self):
"""
Returns:
@@ -353,6 +389,7 @@ class ResultSet(object):
return payoff_stddevs
+ @update_progress_bar
def build_score_diffs(self):
"""
Returns:
@@ -391,8 +428,10 @@ class ResultSet(object):
scores = iu.compute_final_score_per_turn(interaction)
diff = (scores[1] - scores[0])
score_diffs[player][opponent][repetition] = diff
+
return score_diffs
+ @update_progress_bar
def build_payoff_diffs_means(self):
"""
Returns:
@@ -429,8 +468,10 @@ class ResultSet(object):
payoff_diffs_means[player][opponent] = mean(diffs)
else:
payoff_diffs_means[player][opponent] = 0
+
return payoff_diffs_means
+ @update_progress_bar
def build_cooperation(self):
"""
Returns:
@@ -465,8 +506,10 @@ class ResultSet(object):
coop_count += iu.compute_cooperations(interaction)[1]
cooperations[player][opponent] += coop_count
+
return cooperations
+ @update_progress_bar
def build_normalised_cooperation(self):
"""
Returns:
@@ -507,8 +550,10 @@ class ResultSet(object):
# Mean over all reps:
normalised_cooperations[player][opponent] = mean(coop_counts)
+
return normalised_cooperations
+ @update_progress_bar
def build_vengeful_cooperation(self):
"""
Returns:
@@ -522,6 +567,7 @@ class ResultSet(object):
return [[2 * (element - 0.5) for element in row]
for row in self.normalised_cooperation]
+ @update_progress_bar
def build_cooperating_rating(self):
"""
Returns:
@@ -552,6 +598,7 @@ class ResultSet(object):
return [sum(cs) / max(1, float(sum(ls))) for cs, ls
in zip(self.cooperation, lengths)]
+ @update_progress_bar
def build_good_partner_matrix(self):
"""
Returns:
@@ -586,6 +633,7 @@ class ResultSet(object):
return good_partner_matrix
+ @update_progress_bar
def build_good_partner_rating(self):
"""
Returns:
@@ -607,6 +655,7 @@ class ResultSet(object):
return good_partner_rating
+ @update_progress_bar
def build_eigenjesus_rating(self):
"""
Returns:
@@ -617,8 +666,10 @@ class ResultSet(object):
"""
eigenvector, eigenvalue = eigen.principal_eigenvector(
self.normalised_cooperation)
+
return eigenvector.tolist()
+ @update_progress_bar
def build_eigenmoses_rating(self):
"""
Returns:
@@ -629,6 +680,7 @@ class ResultSet(object):
"""
eigenvector, eigenvalue = eigen.principal_eigenvector(
self.vengeful_cooperation)
+
return eigenvector.tolist()
def csv(self):
@@ -655,22 +707,26 @@ class ResultSetFromFile(ResultSet):
by the tournament class.
"""
- def __init__(self, filename, with_morality=True):
+ def __init__(self, filename, progress_bar=True):
"""
Parameters
----------
filename : string
name of a file of the correct file.
- with_morality : bool
- a flag to determine whether morality metrics should be
- calculated.
+ progress_bar : bool
+ Whether or not to create a progress bar which will be updated
"""
self.players, self.interactions = self._read_csv(filename)
self.nplayers = len(self.players)
self.nrepetitions = len(list(self.interactions.values())[0])
+ if progress_bar:
+ self.progress_bar = tqdm.tqdm(total=19, desc="Analysing results")
+ else:
+ self.progress_bar = False
+
# Calculate all attributes:
- self.build_all(with_morality)
+ self.build_all()
def _read_csv(self, filename):
"""
diff --git a/axelrod/tournament.py b/axelrod/tournament.py
index 6b638aa1..32684643 100644
--- a/axelrod/tournament.py
+++ b/axelrod/tournament.py
@@ -85,7 +85,8 @@ class Tournament(object):
axelrod.ResultSet
"""
if progress_bar:
- self.progress_bar = tqdm.tqdm(total=len(self.match_generator))
+ self.progress_bar = tqdm.tqdm(total=len(self.match_generator),
+ desc="Playing matches")
self.setup_output_file(filename)
if not build_results and not filename:
@@ -96,13 +97,16 @@ class Tournament(object):
else:
self._run_parallel(processes=processes, progress_bar=progress_bar)
+ if progress_bar:
+ self.progress_bar.close()
+
# Make sure that python has finished writing to disk
self.outputfile.flush()
if build_results:
- return self._build_result_set()
+ return self._build_result_set(progress_bar=progress_bar)
- def _build_result_set(self):
+ def _build_result_set(self, progress_bar=True):
"""
Build the result set (used by the play method)
@@ -112,7 +116,7 @@ class Tournament(object):
"""
result_set = ResultSetFromFile(
filename=self.filename,
- with_morality=self._with_morality)
+ progress_bar=progress_bar)
self.outputfile.close()
return result_set
| Results set processing shouldn't be in the progress bar | Axelrod-Python/Axelrod | diff --git a/axelrod/tests/unit/test_resultset.py b/axelrod/tests/unit/test_resultset.py
index 2df8666a..c5a084bb 100644
--- a/axelrod/tests/unit/test_resultset.py
+++ b/axelrod/tests/unit/test_resultset.py
@@ -161,7 +161,9 @@ class TestResultSet(unittest.TestCase):
'Defector,Tit For Tat,Alternator\n2.6,1.7,1.5\n2.6,1.7,1.5\n2.6,1.7,1.5\n')
def test_init(self):
- rs = axelrod.ResultSet(self.players, self.interactions)
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=False)
+ self.assertFalse(rs.progress_bar)
self.assertEqual(rs.players, self.players)
self.assertEqual(rs.nplayers, len(self.players))
self.assertEqual(rs.interactions, self.interactions)
@@ -176,13 +178,25 @@ class TestResultSet(unittest.TestCase):
self.assertIsInstance(interaction, list)
self.assertEqual(len(interaction), self.turns)
- def test_null_results_matrix(self):
+ def test_with_progress_bar(self):
rs = axelrod.ResultSet(self.players, self.interactions)
+ self.assertTrue(rs.progress_bar)
+ self.assertEqual(rs.progress_bar.total, 19)
+
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=True)
+ self.assertTrue(rs.progress_bar)
+ self.assertEqual(rs.progress_bar.total, 19)
+
+ def test_null_results_matrix(self):
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=False)
self.assertEqual(
rs._null_results_matrix, self.expected_null_results_matrix)
def test_match_lengths(self):
- rs = axelrod.ResultSet(self.players, self.interactions)
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=False)
self.assertIsInstance(rs.match_lengths, list)
self.assertEqual(len(rs.match_lengths), rs.nrepetitions)
self.assertEqual(rs.match_lengths, self.expected_match_lengths)
@@ -202,49 +216,57 @@ class TestResultSet(unittest.TestCase):
self.assertEqual(length, self.turns)
def test_scores(self):
- rs = axelrod.ResultSet(self.players, self.interactions)
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=False)
self.assertIsInstance(rs.scores, list)
self.assertEqual(len(rs.scores), rs.nplayers)
self.assertEqual(rs.scores, self.expected_scores)
def test_ranking(self):
- rs = axelrod.ResultSet(self.players, self.interactions)
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=False)
self.assertIsInstance(rs.ranking, list)
self.assertEqual(len(rs.ranking), rs.nplayers)
self.assertEqual(rs.ranking, self.expected_ranking)
def test_ranked_names(self):
- rs = axelrod.ResultSet(self.players, self.interactions)
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=False)
self.assertIsInstance(rs.ranked_names, list)
self.assertEqual(len(rs.ranked_names), rs.nplayers)
self.assertEqual(rs.ranked_names, self.expected_ranked_names)
def test_wins(self):
- rs = axelrod.ResultSet(self.players, self.interactions)
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=False)
self.assertIsInstance(rs.wins, list)
self.assertEqual(len(rs.wins), rs.nplayers)
self.assertEqual(rs.wins, self.expected_wins)
def test_normalised_scores(self):
- rs = axelrod.ResultSet(self.players, self.interactions)
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=False)
self.assertIsInstance(rs.normalised_scores, list)
self.assertEqual(len(rs.normalised_scores), rs.nplayers)
self.assertEqual(rs.normalised_scores, self.expected_normalised_scores)
def test_payoffs(self):
- rs = axelrod.ResultSet(self.players, self.interactions)
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=False)
self.assertIsInstance(rs.payoffs, list)
self.assertEqual(len(rs.payoffs), rs.nplayers)
self.assertEqual(rs.payoffs, self.expected_payoffs)
def test_payoff_matrix(self):
- rs = axelrod.ResultSet(self.players, self.interactions)
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=False)
self.assertIsInstance(rs.payoff_matrix, list)
self.assertEqual(len(rs.payoff_matrix), rs.nplayers)
self.assertEqual(rs.payoff_matrix, self.expected_payoff_matrix)
def test_score_diffs(self):
- rs = axelrod.ResultSet(self.players, self.interactions)
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=False)
self.assertIsInstance(rs.score_diffs, list)
self.assertEqual(len(rs.score_diffs), rs.nplayers)
for i, row in enumerate(rs.score_diffs):
@@ -254,7 +276,8 @@ class TestResultSet(unittest.TestCase):
self.expected_score_diffs[i][j][k])
def test_payoff_diffs_means(self):
- rs = axelrod.ResultSet(self.players, self.interactions)
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=False)
self.assertIsInstance(rs.payoff_diffs_means, list)
self.assertEqual(len(rs.payoff_diffs_means), rs.nplayers)
for i, row in enumerate(rs.payoff_diffs_means):
@@ -263,68 +286,78 @@ class TestResultSet(unittest.TestCase):
self.expected_payoff_diffs_means[i][j])
def test_payoff_stddevs(self):
- rs = axelrod.ResultSet(self.players, self.interactions)
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=False)
self.assertIsInstance(rs.payoff_stddevs, list)
self.assertEqual(len(rs.payoff_stddevs), rs.nplayers)
self.assertEqual(rs.payoff_stddevs, self.expected_payoff_stddevs)
def test_cooperation(self):
- rs = axelrod.ResultSet(self.players, self.interactions)
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=False)
self.assertIsInstance(rs.cooperation, list)
self.assertEqual(len(rs.cooperation), rs.nplayers)
self.assertEqual(rs.cooperation, self.expected_cooperation)
def test_normalised_cooperation(self):
- rs = axelrod.ResultSet(self.players, self.interactions)
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=False)
self.assertIsInstance(rs.normalised_cooperation, list)
self.assertEqual(len(rs.normalised_cooperation), rs.nplayers)
self.assertEqual(rs.normalised_cooperation,
self.expected_normalised_cooperation)
def test_vengeful_cooperation(self):
- rs = axelrod.ResultSet(self.players, self.interactions)
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=False)
self.assertIsInstance(rs.vengeful_cooperation, list)
self.assertEqual(len(rs.vengeful_cooperation), rs.nplayers)
self.assertEqual(rs.vengeful_cooperation,
self.expected_vengeful_cooperation)
def test_cooperating_rating(self):
- rs = axelrod.ResultSet(self.players, self.interactions)
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=False)
self.assertIsInstance(rs.cooperating_rating, list)
self.assertEqual(len(rs.cooperating_rating), rs.nplayers)
self.assertEqual(rs.cooperating_rating,
self.expected_cooperating_rating)
def test_good_partner_matrix(self):
- rs = axelrod.ResultSet(self.players, self.interactions)
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=False)
self.assertIsInstance(rs.good_partner_matrix, list)
self.assertEqual(len(rs.good_partner_matrix), rs.nplayers)
self.assertEqual(rs.good_partner_matrix,
self.expected_good_partner_matrix)
def test_good_partner_rating(self):
- rs = axelrod.ResultSet(self.players, self.interactions)
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=False)
self.assertIsInstance(rs.good_partner_rating, list)
self.assertEqual(len(rs.good_partner_rating), rs.nplayers)
self.assertEqual(rs.good_partner_rating,
self.expected_good_partner_rating)
def test_eigenjesus_rating(self):
- rs = axelrod.ResultSet(self.players, self.interactions)
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=False)
self.assertIsInstance(rs.eigenjesus_rating, list)
self.assertEqual(len(rs.eigenjesus_rating), rs.nplayers)
for j, rate in enumerate(rs.eigenjesus_rating):
self.assertAlmostEqual(rate, self.expected_eigenjesus_rating[j])
def test_eigenmoses_rating(self):
- rs = axelrod.ResultSet(self.players, self.interactions)
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=False)
self.assertIsInstance(rs.eigenmoses_rating, list)
self.assertEqual(len(rs.eigenmoses_rating), rs.nplayers)
for j, rate in enumerate(rs.eigenmoses_rating):
self.assertAlmostEqual(rate, self.expected_eigenmoses_rating[j])
def test_csv(self):
- rs = axelrod.ResultSet(self.players, self.interactions)
+ rs = axelrod.ResultSet(self.players, self.interactions,
+ progress_bar=False)
self.assertEqual(rs.csv(), self.expected_csv)
@@ -341,7 +374,7 @@ class TestResultSetFromFile(unittest.TestCase):
def test_init(self):
- rs = axelrod.ResultSetFromFile(self.tmp_file.name)
+ rs = axelrod.ResultSetFromFile(self.tmp_file.name, progress_bar=False)
players = ['Cooperator', 'Tit For Tat', 'Defector']
self.assertEqual(rs.players, players)
self.assertEqual(rs.nplayers, len(players))
@@ -354,3 +387,9 @@ class TestResultSetFromFile(unittest.TestCase):
(0, 2): [[('C', 'D'), ('C', 'D')]],
(1, 1): [[('C', 'C'), ('C', 'C')]]}
self.assertEqual(rs.interactions, expected_interactions)
+
+
+class TestDecorator(unittest.TestCase):
+ def test_update_progress_bar(self):
+ method = lambda x: None
+ self.assertEqual(axelrod.result_set.update_progress_bar(method)(1), None)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 2
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
-e git+https://github.com/Axelrod-Python/Axelrod.git@c919c39d58552c2db4a2719c817cfa3a3c301f92#egg=Axelrod
coverage==7.8.0
cycler==0.12.1
exceptiongroup==1.2.2
hypothesis==6.130.6
iniconfig==2.1.0
kiwisolver==1.4.7
matplotlib==3.3.4
numpy==2.0.2
packaging==24.2
pillow==11.1.0
pluggy==1.5.0
pyparsing==2.1.1
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
six==1.17.0
sortedcontainers==2.4.0
testfixtures==4.9.1
tomli==2.2.1
tqdm==3.4.0
| name: Axelrod
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- coverage==7.8.0
- cycler==0.12.1
- exceptiongroup==1.2.2
- hypothesis==6.130.6
- iniconfig==2.1.0
- kiwisolver==1.4.7
- matplotlib==3.3.4
- numpy==2.0.2
- packaging==24.2
- pillow==11.1.0
- pluggy==1.5.0
- pyparsing==2.1.1
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- six==1.17.0
- sortedcontainers==2.4.0
- testfixtures==4.9.1
- tomli==2.2.1
- tqdm==3.4.0
prefix: /opt/conda/envs/Axelrod
| [
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_cooperating_rating",
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_cooperation",
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_eigenjesus_rating",
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_eigenmoses_rating",
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_good_partner_matrix",
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_good_partner_rating",
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_init",
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_match_lengths",
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_normalised_cooperation",
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_normalised_scores",
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_null_results_matrix",
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_payoff_diffs_means",
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_payoff_matrix",
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_payoff_stddevs",
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_payoffs",
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_ranked_names",
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_ranking",
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_score_diffs",
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_scores",
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_vengeful_cooperation",
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_wins",
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_with_progress_bar",
"axelrod/tests/unit/test_resultset.py::TestResultSetFromFile::test_init",
"axelrod/tests/unit/test_resultset.py::TestDecorator::test_update_progress_bar"
]
| [
"axelrod/tests/unit/test_resultset.py::TestResultSet::test_csv"
]
| []
| []
| MIT License | 564 | [
"axelrod/result_set.py",
"axelrod/tournament.py"
]
| [
"axelrod/result_set.py",
"axelrod/tournament.py"
]
|
|
zalando-stups__pierone-cli-37 | 991c05e9c7496b2aac071d85d0a9ca6b8afcf9dd | 2016-05-31 08:47:53 | 560cae1b4fc185c7a8aa3a1a50e0a96b2c7dd8e7 | diff --git a/pierone/cli.py b/pierone/cli.py
index 1af5790..50dba86 100644
--- a/pierone/cli.py
+++ b/pierone/cli.py
@@ -232,7 +232,8 @@ def get_clair_features(url, layer_id, access_token):
return []
else:
r.raise_for_status()
- return r.json()['Layer']['Features']
+
+ return r.json()['Layer'].get('Features', [])
@cli.command()
| pierone fails with backtrace when the CVE status is COULDNT_FIGURE_OUT
```
Traceback (most recent call last):
File "/usr/local/bin/pierone", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python3.4/dist-packages/pierone/cli.py", line 485, in main
cli()
File "/usr/local/lib/python3.4/dist-packages/click/core.py", line 716, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/click/core.py", line 696, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.4/dist-packages/click/core.py", line 1060, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.4/dist-packages/click/core.py", line 889, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.4/dist-packages/click/core.py", line 534, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/click/decorators.py", line 27, in new_func
return f(get_current_context().obj, *args, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/pierone/cli.py", line 313, in cves
installed_software = get_clair_features(config.get('clair_url'), artifact_tag.get('clair_id'), token)
File "/usr/local/lib/python3.4/dist-packages/pierone/cli.py", line 235, in get_clair_features
return r.json()['Layer']['Features']
KeyError: 'Features'
``` | zalando-stups/pierone-cli | diff --git a/tests/test_cli.py b/tests/test_cli.py
index 6282253..087d27d 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -221,6 +221,61 @@ def test_cves(monkeypatch, tmpdir):
assert re.match('[^\n]+\n[^\n]+HIGH', result.output), 'Results should be ordered by highest priority'
+def test_no_cves_found(monkeypatch, tmpdir):
+ pierone_service_payload = [
+ # Former pierone payload
+ {
+ 'name': '1.0',
+ 'created_by': 'myuser',
+ 'created': '2015-08-20T08:14:59.432Z'
+ },
+ # New pierone payload with clair but no information about CVEs
+ {
+ "name": "1.1",
+ "created": "2016-05-19T15:23:41.065Z",
+ "created_by": "myuser",
+ "image": "sha256:here",
+ "clair_id": None,
+ "severity_fix_available": None,
+ "severity_no_fix_available": None
+ },
+ # New pierone payload with clair input and info about CVEs
+ {
+ "name": "1.2",
+ "created": "2016-05-23T13:29:17.753Z",
+ "created_by": "myuser",
+ "image": "sha256:here",
+ "clair_id": "sha256:here",
+ "severity_fix_available": "High",
+ "severity_no_fix_available": "Medium"
+ }
+ ]
+
+ no_cves_clair_payload = {
+ "Layer": {
+ "Name": "sha256:0000000000000000000000000000000000000000000000000000000000000000",
+ "NamespaceName": "ubuntu:16.04",
+ "ParentName": "sha256:0000000000000000000000000000000000000000000000000000000000000000",
+ "IndexedByVersion": 2
+ }
+ }
+
+ response = MagicMock()
+ response.json.side_effect = [
+ pierone_service_payload,
+ no_cves_clair_payload
+ ]
+
+ runner = CliRunner()
+ monkeypatch.setattr('stups_cli.config.load_config', lambda x: {'url': 'foobar', 'clair_url': 'barfoo'})
+ monkeypatch.setattr('zign.api.get_token', MagicMock(return_value='tok123'))
+ monkeypatch.setattr('os.path.expanduser', lambda x: x.replace('~', str(tmpdir)))
+ monkeypatch.setattr('pierone.api.session.get', MagicMock(return_value=response))
+ with runner.isolated_filesystem():
+ result = runner.invoke(cli, ['cves', 'myteam', 'myart', '1.2'], catch_exceptions=False)
+ assert re.match('^[^\n]+\n$', result.output), 'No results should be shown'
+
+
def test_latest(monkeypatch, tmpdir):
response = MagicMock()
response.json.return_value = [
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
clickclick==20.10.2
coverage==7.8.0
dnspython==2.7.0
exceptiongroup==1.2.2
execnet==2.1.1
idna==3.10
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
PyYAML==6.0.2
requests==2.32.3
stups-cli-support==1.1.22
-e git+https://github.com/zalando-stups/pierone-cli.git@991c05e9c7496b2aac071d85d0a9ca6b8afcf9dd#egg=stups_pierone
stups-tokens==1.1.19
stups-zign==1.2
tomli==2.2.1
typing_extensions==4.13.0
urllib3==2.3.0
| name: pierone-cli
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- clickclick==20.10.2
- coverage==7.8.0
- dnspython==2.7.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- pyyaml==6.0.2
- requests==2.32.3
- stups-cli-support==1.1.22
- stups-tokens==1.1.19
- stups-zign==1.2
- tomli==2.2.1
- typing-extensions==4.13.0
- urllib3==2.3.0
prefix: /opt/conda/envs/pierone-cli
| [
"tests/test_cli.py::test_no_cves_found"
]
| []
| [
"tests/test_cli.py::test_version",
"tests/test_cli.py::test_login",
"tests/test_cli.py::test_login_given_url_option",
"tests/test_cli.py::test_scm_source",
"tests/test_cli.py::test_image",
"tests/test_cli.py::test_tags",
"tests/test_cli.py::test_cves",
"tests/test_cli.py::test_latest",
"tests/test_cli.py::test_latest_not_found",
"tests/test_cli.py::test_url_without_scheme"
]
| []
| Apache License 2.0 | 565 | [
"pierone/cli.py"
]
| [
"pierone/cli.py"
]
|
|
networkx__networkx-2150 | df730d96d6490079a6b6fcf3a2bea64324aef02e | 2016-05-31 19:16:42 | 3f4fd85765bf2d88188cfd4c84d0707152e6cd1e | diff --git a/doc/source/reference/classes.multidigraph.rst b/doc/source/reference/classes.multidigraph.rst
index ab613964a..ef34f4664 100644
--- a/doc/source/reference/classes.multidigraph.rst
+++ b/doc/source/reference/classes.multidigraph.rst
@@ -28,6 +28,7 @@ Adding and Removing Nodes and Edges
MultiDiGraph.add_edge
MultiDiGraph.add_edges_from
MultiDiGraph.add_weighted_edges_from
+ MultiDiGraph.new_edge_key
MultiDiGraph.remove_edge
MultiDiGraph.remove_edges_from
MultiDiGraph.clear
diff --git a/doc/source/reference/classes.multigraph.rst b/doc/source/reference/classes.multigraph.rst
index 1d598ccfd..5d3984986 100644
--- a/doc/source/reference/classes.multigraph.rst
+++ b/doc/source/reference/classes.multigraph.rst
@@ -27,6 +27,7 @@ Adding and removing nodes and edges
MultiGraph.add_edge
MultiGraph.add_edges_from
MultiGraph.add_weighted_edges_from
+ MultiGraph.new_edge_key
MultiGraph.remove_edge
MultiGraph.remove_edges_from
MultiGraph.clear
diff --git a/doc/source/tutorial/tutorial.rst b/doc/source/tutorial/tutorial.rst
index c86741f95..3875c1a65 100644
--- a/doc/source/tutorial/tutorial.rst
+++ b/doc/source/tutorial/tutorial.rst
@@ -388,8 +388,8 @@ functions such as:
>>> G.add_edges_from([(1,2),(1,3)])
>>> G.add_node("spam") # adds node "spam"
->>> list(nx.connected_components(G))
-[{1, 2, 3}, {'spam'}]
+>>> nx.connected_components(G)
+[[1, 2, 3], ['spam']]
>>> sorted(d for n, d in G.degree())
[0, 1, 1, 2]
diff --git a/networkx/algorithms/centrality/eigenvector.py b/networkx/algorithms/centrality/eigenvector.py
index a73611c37..f7fac9601 100644
--- a/networkx/algorithms/centrality/eigenvector.py
+++ b/networkx/algorithms/centrality/eigenvector.py
@@ -78,11 +78,6 @@ def eigenvector_centrality(G, max_iter=100, tol=1.0e-6, nstart=None,
NetworkXError
If each value in `nstart` is zero.
- PowerIterationFailedConvergence
- If the algorithm fails to converge to the specified tolerance
- within the specified number of iterations of the power iteration
- method.
-
See Also
--------
eigenvector_centrality_numpy
@@ -146,7 +141,8 @@ def eigenvector_centrality(G, max_iter=100, tol=1.0e-6, nstart=None,
# Check for convergence (in the L_1 norm).
if sum(abs(x[n] - xlast[n]) for n in x) < nnodes * tol:
return x
- raise nx.PowerIterationFailedConvergence(max_iter)
+ raise nx.NetworkXError('power iteration failed to converge within {}'
+ ' iterations'.format(max_iter))
def eigenvector_centrality_numpy(G, weight='weight', max_iter=50, tol=0):
diff --git a/networkx/algorithms/centrality/katz.py b/networkx/algorithms/centrality/katz.py
index 39ec75f44..537c8a3af 100644
--- a/networkx/algorithms/centrality/katz.py
+++ b/networkx/algorithms/centrality/katz.py
@@ -94,11 +94,6 @@ def katz_centrality(G, alpha=0.1, beta=1.0,
If the parameter `beta` is not a scalar but lacks a value for at least
one node
- PowerIterationFailedConvergence
- If the algorithm fails to converge to the specified tolerance
- within the specified number of iterations of the power iteration
- method.
-
Examples
--------
>>> import math
@@ -195,8 +190,9 @@ def katz_centrality(G, alpha=0.1, beta=1.0,
for n in x:
x[n] *= s
return x
- raise nx.PowerIterationFailedConvergence(max_iter)
+ raise nx.NetworkXError('Power iteration failed to converge in '
+ '%d iterations.' % max_iter)
@not_implemented_for('multigraph')
def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True,
diff --git a/networkx/algorithms/community/community_generators.py b/networkx/algorithms/community/community_generators.py
index a12fe87db..3cdb948b2 100644
--- a/networkx/algorithms/community/community_generators.py
+++ b/networkx/algorithms/community/community_generators.py
@@ -77,7 +77,7 @@ def _powerlaw_sequence(gamma, low, high, condition, length, max_iters):
``max_iters`` indicates the number of times to generate a list
satisfying ``length``. If the number of iterations exceeds this
- value, :exc:`~networkx.exception.ExceededMaxIterations` is raised.
+ value, :exc:`~networkx.exception.NetworkXError` is raised.
"""
for i in range(max_iters):
@@ -86,7 +86,7 @@ def _powerlaw_sequence(gamma, low, high, condition, length, max_iters):
seq.append(_zipf_rv_below(gamma, low, high))
if condition(seq):
return seq
- raise nx.ExceededMaxIterations("Could not create power law sequence")
+ raise nx.NetworkXError("Could not create power law sequence")
# TODO Needs documentation.
@@ -100,7 +100,7 @@ def _generate_min_degree(gamma, average_degree, max_degree, tolerance,
mid_avg_deg = 0
while abs(mid_avg_deg - average_degree) > tolerance:
if itrs > max_iters:
- raise nx.ExceededMaxIterations("Could not match average_degree")
+ raise nx.NetworkXError("Could not match average_degree")
mid_avg_deg = 0
for x in range(int(min_deg_mid), max_degree + 1):
mid_avg_deg += (x ** (-gamma + 1)) / zeta(gamma, min_deg_mid,
@@ -129,11 +129,15 @@ def _generate_communities(degree_sequence, community_sizes, mu, max_iters):
``mu`` is a float in the interval [0, 1] indicating the fraction of
intra-community edges incident to each node.
+ ``max_iters`` indicates the number of times to generate a list
+ satisfying ``length``. If the number of iterations exceeds this
+ value, :exc:`~networkx.exception.NetworkXError` is raised.
+
``max_iters`` is the number of times to try to add a node to a
community. This must be greater than the length of
``degree_sequence``, otherwise this function will always fail. If
the number of iterations exceeds this value,
- :exc:`~networkx.exception.ExceededMaxIterations` is raised.
+ :exc:`~networkx.exception.NetworkXError` is raised.
The communities returned by this are sets of integers in the set {0,
..., *n* - 1}, where *n* is the length of ``degree_sequence``.
@@ -160,8 +164,8 @@ def _generate_communities(degree_sequence, community_sizes, mu, max_iters):
free.append(result[c].pop())
if not free:
return result
- msg = 'Could not assign communities; try increasing min_community'
- raise nx.ExceededMaxIterations(msg)
+ raise nx.NetworkXError('Could not assign communities; try increasing'
+ ' min_community')
def LFR_benchmark_graph(n, tau1, tau2, mu, average_degree=None,
@@ -280,7 +284,6 @@ def LFR_benchmark_graph(n, tau1, tau2, mu, average_degree=None,
If ``min_degree`` is not specified and a suitable ``min_degree``
cannot be found.
- ExceededMaxIterations
If a valid degree sequence cannot be created within
``max_iters`` number of iterations.
diff --git a/networkx/algorithms/isomorphism/isomorph.py b/networkx/algorithms/isomorphism/isomorph.py
index 8f1d4a478..42ff26496 100644
--- a/networkx/algorithms/isomorphism/isomorph.py
+++ b/networkx/algorithms/isomorphism/isomorph.py
@@ -186,7 +186,9 @@ def is_isomorphic(G1, G2, node_match=None, edge_match=None):
For multidigraphs G1 and G2, using 'weight' edge attribute (default: 7)
>>> G1.add_edge(1,2, weight=7)
+ 1
>>> G2.add_edge(10,20)
+ 1
>>> em = iso.numerical_multiedge_match('weight', 7, rtol=1e-6)
>>> nx.is_isomorphic(G1, G2, edge_match=em)
True
diff --git a/networkx/algorithms/link_analysis/hits_alg.py b/networkx/algorithms/link_analysis/hits_alg.py
index 2ca56beaf..7325352c4 100644
--- a/networkx/algorithms/link_analysis/hits_alg.py
+++ b/networkx/algorithms/link_analysis/hits_alg.py
@@ -42,13 +42,6 @@ def hits(G,max_iter=100,tol=1.0e-8,nstart=None,normalized=True):
Two dictionaries keyed by node containing the hub and authority
values.
- Raises
- ------
- PowerIterationFailedConvergence
- If the algorithm fails to converge to the specified tolerance
- within the specified number of iterations of the power iteration
- method.
-
Examples
--------
>>> G=nx.path_graph(4)
@@ -89,7 +82,8 @@ def hits(G,max_iter=100,tol=1.0e-8,nstart=None,normalized=True):
s=1.0/sum(h.values())
for k in h:
h[k]*=s
- for _ in range(max_iter): # power iteration: make up to max_iter iterations
+ i=0
+ while True: # power iteration: make up to max_iter iterations
hlast=h
h=dict.fromkeys(hlast.keys(),0)
a=dict.fromkeys(hlast.keys(),0)
@@ -112,8 +106,10 @@ def hits(G,max_iter=100,tol=1.0e-8,nstart=None,normalized=True):
err=sum([abs(h[n]-hlast[n]) for n in h])
if err < tol:
break
- else:
- raise nx.PowerIterationFailedConvergence(max_iter)
+ if i>max_iter:
+ raise NetworkXError(\
+ "HITS: power iteration failed to converge in %d iterations."%(i+1))
+ i+=1
if normalized:
s = 1.0/sum(a.values())
for n in a:
@@ -251,13 +247,6 @@ def hits_scipy(G,max_iter=100,tol=1.0e-6,normalized=True):
algorithm does not check if the input graph is directed and will
execute on undirected graphs.
- Raises
- ------
- PowerIterationFailedConvergence
- If the algorithm fails to converge to the specified tolerance
- within the specified number of iterations of the power iteration
- method.
-
References
----------
.. [1] A. Langville and C. Meyer,
@@ -292,7 +281,8 @@ def hits_scipy(G,max_iter=100,tol=1.0e-6,normalized=True):
if err < tol:
break
if i>max_iter:
- raise nx.PowerIterationFailedConvergence(max_iter)
+ raise NetworkXError(\
+ "HITS: power iteration failed to converge in %d iterations."%(i+1))
i+=1
a=np.asarray(x).flatten()
diff --git a/networkx/algorithms/link_analysis/pagerank_alg.py b/networkx/algorithms/link_analysis/pagerank_alg.py
index a105f8f28..050179075 100644
--- a/networkx/algorithms/link_analysis/pagerank_alg.py
+++ b/networkx/algorithms/link_analysis/pagerank_alg.py
@@ -35,8 +35,7 @@ def pagerank(G, alpha=0.85, personalization=None,
personalization: dict, optional
The "personalization vector" consisting of a dictionary with a
- key for every graph node and personalization value for each node.
- At least one personalization value must be non-zero.
+ key for every graph node and nonzero personalization value for each node.
By default, a uniform distribution is used.
max_iter : integer, optional
@@ -73,11 +72,9 @@ def pagerank(G, alpha=0.85, personalization=None,
Notes
-----
The eigenvector calculation is done by the power iteration method
- and has no guarantee of convergence. The iteration will stop after
- an error tolerance of ``len(G) * tol`` has been reached. If the
- number of iterations exceed `max_iter`, a
- :exc:`networkx.exception.PowerIterationFailedConvergence` exception
- is raised.
+ and has no guarantee of convergence. The iteration will stop
+ after max_iter iterations or an error tolerance of
+ number_of_nodes(G)*tol has been reached.
The PageRank algorithm was designed for directed graphs but this
algorithm does not check if the input graph is directed and will
@@ -88,13 +85,6 @@ def pagerank(G, alpha=0.85, personalization=None,
--------
pagerank_numpy, pagerank_scipy, google_matrix
- Raises
- ------
- PowerIterationFailedConvergence
- If the algorithm fails to converge to the specified tolerance
- within the specified number of iterations of the power iteration
- method.
-
References
----------
.. [1] A. Langville and C. Meyer,
@@ -103,7 +93,6 @@ def pagerank(G, alpha=0.85, personalization=None,
.. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry,
The PageRank citation ranking: Bringing order to the Web. 1999
http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf
-
"""
if len(G) == 0:
return {}
@@ -165,7 +154,8 @@ def pagerank(G, alpha=0.85, personalization=None,
err = sum([abs(x[n] - xlast[n]) for n in x])
if err < N*tol:
return x
- raise nx.PowerIterationFailedConvergence(max_iter)
+ raise NetworkXError('pagerank: power iteration failed to converge '
+ 'in %d iterations.' % max_iter)
def google_matrix(G, alpha=0.85, personalization=None,
@@ -415,13 +405,6 @@ def pagerank_scipy(G, alpha=0.85, personalization=None,
--------
pagerank, pagerank_numpy, google_matrix
- Raises
- ------
- PowerIterationFailedConvergence
- If the algorithm fails to converge to the specified tolerance
- within the specified number of iterations of the power iteration
- method.
-
References
----------
.. [1] A. Langville and C. Meyer,
@@ -485,7 +468,8 @@ def pagerank_scipy(G, alpha=0.85, personalization=None,
err = scipy.absolute(x - xlast).sum()
if err < N * tol:
return dict(zip(nodelist, map(float, x)))
- raise nx.PowerIterationFailedConvergence(max_iter)
+ raise NetworkXError('pagerank_scipy: power iteration failed to converge '
+ 'in %d iterations.' % max_iter)
# fixture for nose tests
diff --git a/networkx/algorithms/traversal/depth_first_search.py b/networkx/algorithms/traversal/depth_first_search.py
index 73897cf1c..2b5cea636 100644
--- a/networkx/algorithms/traversal/depth_first_search.py
+++ b/networkx/algorithms/traversal/depth_first_search.py
@@ -69,7 +69,7 @@ def dfs_edges(G, source=None):
except StopIteration:
stack.pop()
-def dfs_tree(G, source=None):
+def dfs_tree(G, source):
"""Return oriented tree constructed from a depth-first-search from source.
Parameters
diff --git a/networkx/classes/multidigraph.py b/networkx/classes/multidigraph.py
index 22bfed174..2b850483c 100644
--- a/networkx/classes/multidigraph.py
+++ b/networkx/classes/multidigraph.py
@@ -83,22 +83,22 @@ class MultiDiGraph(MultiGraph,DiGraph):
Add one edge,
- >>> G.add_edge(1, 2)
+ >>> key = G.add_edge(1, 2)
a list of edges,
- >>> G.add_edges_from([(1,2),(1,3)])
+ >>> keys = G.add_edges_from([(1,2),(1,3)])
or a collection of edges,
- >>> G.add_edges_from(H.edges())
+ >>> keys = G.add_edges_from(H.edges())
If some edges connect nodes not yet in the graph, the nodes
are added automatically. If an edge already exists, an additional
edge is created and stored using a key to identify the edge.
By default the key is the lowest unused integer.
- >>> G.add_edges_from([(4,5,dict(route=282)), (4,5,dict(route=37))])
+ >>> keys = G.add_edges_from([(4,5,dict(route=282)), (4,5,dict(route=37))])
>>> G[4]
{5: {0: {}, 1: {'route': 282}, 2: {'route': 37}}}
@@ -130,9 +130,9 @@ class MultiDiGraph(MultiGraph,DiGraph):
Add edge attributes using add_edge(), add_edges_from(), subscript
notation, or G.edge.
- >>> G.add_edge(1, 2, weight=4.7 )
- >>> G.add_edges_from([(3,4),(4,5)], color='red')
- >>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})])
+ >>> key = G.add_edge(1, 2, weight=4.7 )
+ >>> keys = G.add_edges_from([(3,4),(4,5)], color='red')
+ >>> keys = G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})])
>>> G[1][2][0]['weight'] = 4.7
>>> G.edge[1][2][0]['weight'] = 4
@@ -222,7 +222,7 @@ class MultiDiGraph(MultiGraph,DiGraph):
>>> G.add_nodes_from( (2,1) )
>>> list(G.nodes())
[2, 1]
- >>> G.add_edges_from( ((2,2), (2,1), (2,1), (1,1)) )
+ >>> keys = G.add_edges_from( ((2,2), (2,1), (2,1), (1,1)) )
>>> list(G.edges())
[(2, 1), (2, 1), (2, 2), (1, 1)]
@@ -238,7 +238,8 @@ class MultiDiGraph(MultiGraph,DiGraph):
>>> G.add_nodes_from( (2,1) )
>>> list(G.nodes())
[2, 1]
- >>> G.add_edges_from( ((2,2), (2,1,2,{'weight':0.1}), (2,1,1,{'weight':0.2}), (1,1)) )
+ >>> elist = ((2,2), (2,1,2,{'weight':0.1}), (2,1,1,{'weight':0.2}), (1,1))
+ >>> keys = G.add_edges_from(elist)
>>> list(G.edges(keys=True))
[(2, 2, 0), (2, 1, 2), (2, 1, 1), (1, 1, 0)]
@@ -275,6 +276,10 @@ class MultiDiGraph(MultiGraph,DiGraph):
Edge data (or labels or objects) can be assigned using
keyword arguments.
+ Returns
+ -------
+ The edge key assigned to the edge.
+
See Also
--------
add_edges_from : add a collection of edges
@@ -289,21 +294,27 @@ class MultiDiGraph(MultiGraph,DiGraph):
multiedge weights. Convert to Graph using edge attribute
'weight' to enable weighted graph algorithms.
+ Default keys are generated using the method `new_edge_key()`.
+ This method can be overridden by subclassing the base class and
+ providing a custom `new_edge_key()` method.
+
Examples
--------
The following all add the edge e=(1,2) to graph G:
>>> G = nx.MultiDiGraph()
>>> e = (1,2)
- >>> G.add_edge(1, 2) # explicit two-node form
+ >>> key = G.add_edge(1, 2) # explicit two-node form
>>> G.add_edge(*e) # single edge as tuple of two nodes
+ 1
>>> G.add_edges_from( [(1,2)] ) # add edges from iterable container
+ [2]
Associate data to edges using keywords:
- >>> G.add_edge(1, 2, weight=3)
- >>> G.add_edge(1, 2, key=0, weight=4) # update data for key=0
- >>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7)
+ >>> key = G.add_edge(1, 2, weight=3)
+ >>> key = G.add_edge(1, 2, key=0, weight=4) # update data for key=0
+ >>> key = G.add_edge(1, 3, weight=7, capacity=15, length=342.7)
For non-string associations, directly access the edge's attribute
dictionary.
@@ -317,27 +328,22 @@ class MultiDiGraph(MultiGraph,DiGraph):
self.succ[v] = self.adjlist_dict_factory()
self.pred[v] = self.adjlist_dict_factory()
self.node[v] = {}
+ if key is None:
+ key = self.new_edge_key(u, v)
if v in self.succ[u]:
keydict = self.adj[u][v]
- if key is None:
- # find a unique integer key
- # other methods might be better here?
- key = len(keydict)
- while key in keydict:
- key += 1
datadict = keydict.get(key, self.edge_key_dict_factory())
datadict.update(attr)
keydict[key] = datadict
else:
# selfloops work this way without special treatment
- if key is None:
- key = 0
datadict = self.edge_attr_dict_factory()
datadict.update(attr)
keydict = self.edge_key_dict_factory()
keydict[key] = datadict
self.succ[u][v] = keydict
self.pred[v][u] = keydict
+ return key
def remove_edge(self, u, v, key=None):
@@ -372,14 +378,17 @@ class MultiDiGraph(MultiGraph,DiGraph):
For multiple edges
>>> G = nx.MultiDiGraph()
- >>> G.add_edges_from([(1,2),(1,2),(1,2)])
+ >>> G.add_edges_from([(1,2),(1,2),(1,2)]) # key_list returned
+ [0, 1, 2]
>>> G.remove_edge(1,2) # remove a single (arbitrary) edge
For edges with keys
>>> G = nx.MultiDiGraph()
>>> G.add_edge(1,2,key='first')
+ 'first'
>>> G.add_edge(1,2,key='second')
+ 'second'
>>> G.remove_edge(1,2,key='second')
"""
@@ -437,7 +446,7 @@ class MultiDiGraph(MultiGraph,DiGraph):
--------
>>> G = nx.MultiDiGraph()
>>> nx.add_path(G, [0, 1, 2])
- >>> G.add_edge(2,3,weight=5)
+ >>> key = G.add_edge(2,3,weight=5)
>>> [e for e in G.edges()]
[(0, 1), (1, 2), (2, 3)]
>>> list(G.edges(data=True)) # default data is {} (empty dict)
@@ -795,7 +804,7 @@ class MultiDiGraph(MultiGraph,DiGraph):
If already directed, return a (deep) copy
>>> G = nx.MultiDiGraph()
- >>> G.add_edge(0, 1)
+ >>> key = G.add_edge(0, 1)
>>> H = G.to_directed()
>>> list(H.edges())
[(0, 1)]
@@ -833,7 +842,7 @@ class MultiDiGraph(MultiGraph,DiGraph):
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
- Warning: If you have subclassed MultiGraph to use dict-like objects
+ Warning: If you have subclassed MultiGraph to use dict-like objects
in the data structure, those changes do not transfer to the MultiDiGraph
created by this method.
@@ -964,10 +973,10 @@ class MultiDiGraph(MultiGraph,DiGraph):
>>> # Create a graph in which some edges are "good" and some "bad".
>>> G = nx.MultiDiGraph()
- >>> G.add_edge(0, 1, key=0, good=True)
- >>> G.add_edge(0, 1, key=1, good=False)
- >>> G.add_edge(1, 2, key=0, good=False)
- >>> G.add_edge(1, 2, key=1, good=True)
+ >>> key = G.add_edge(0, 1, key=0, good=True)
+ >>> key = G.add_edge(0, 1, key=1, good=False)
+ >>> key = G.add_edge(1, 2, key=0, good=False)
+ >>> key = G.add_edge(1, 2, key=1, good=True)
>>> # Keep only those edges that are marked as "good".
>>> edges = G.edges(keys=True, data='good')
>>> edges = ((u, v, k) for (u, v, k, good) in edges if good)
diff --git a/networkx/classes/multigraph.py b/networkx/classes/multigraph.py
index d6f925d0c..0b58059d7 100644
--- a/networkx/classes/multigraph.py
+++ b/networkx/classes/multigraph.py
@@ -82,22 +82,22 @@ class MultiGraph(Graph):
Add one edge,
- >>> G.add_edge(1, 2)
+ >>> key = G.add_edge(1, 2)
a list of edges,
- >>> G.add_edges_from([(1,2),(1,3)])
+ >>> keys = G.add_edges_from([(1,2),(1,3)])
or a collection of edges,
- >>> G.add_edges_from(list(H.edges()))
+ >>> keys = G.add_edges_from(list(H.edges()))
If some edges connect nodes not yet in the graph, the nodes
are added automatically. If an edge already exists, an additional
edge is created and stored using a key to identify the edge.
By default the key is the lowest unused integer.
- >>> G.add_edges_from([(4,5,dict(route=282)), (4,5,dict(route=37))])
+ >>> keys = G.add_edges_from([(4,5,dict(route=282)), (4,5,dict(route=37))])
>>> G[4]
{3: {0: {}}, 5: {0: {}, 1: {'route': 282}, 2: {'route': 37}}}
@@ -129,9 +129,9 @@ class MultiGraph(Graph):
Add edge attributes using add_edge(), add_edges_from(), subscript
notation, or G.edge.
- >>> G.add_edge(1, 2, weight=4.7 )
- >>> G.add_edges_from([(3,4),(4,5)], color='red')
- >>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})])
+ >>> key = G.add_edge(1, 2, weight=4.7 )
+ >>> keys = G.add_edges_from([(3,4),(4,5)], color='red')
+ >>> keys = G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})])
>>> G[1][2][0]['weight'] = 4.7
>>> G.edge[1][2][0]['weight'] = 4
@@ -223,7 +223,7 @@ class MultiGraph(Graph):
>>> G.add_nodes_from( (2,1) )
>>> list(G.nodes())
[2, 1]
- >>> G.add_edges_from( ((2,2), (2,1), (2,1), (1,1)) )
+ >>> keys = G.add_edges_from( ((2,2), (2,1), (2,1), (1,1)) )
>>> list(G.edges())
[(2, 1), (2, 1), (2, 2), (1, 1)]
@@ -239,7 +239,8 @@ class MultiGraph(Graph):
>>> G.add_nodes_from( (2,1) )
>>> list(G.nodes())
[2, 1]
- >>> G.add_edges_from( ((2,2), (2,1,2,{'weight':0.1}), (2,1,1,{'weight':0.2}), (1,1)) )
+ >>> elist = ((2,2), (2,1,2,{'weight':0.1}), (2,1,1,{'weight':0.2}), (1,1))
+ >>> keys = G.add_edges_from(elist)
>>> list(G.edges(keys=True))
[(2, 2, 0), (2, 1, 2), (2, 1, 1), (1, 1, 0)]
@@ -253,6 +254,36 @@ class MultiGraph(Graph):
self.edge_key_dict_factory = self.edge_key_dict_factory
Graph.__init__(self, data, **attr)
+ def new_edge_key(self, u, v):
+ """Return an unused key for edges between nodes `u` and `v`.
+
+ The nodes `u` and `v` do not need to be already in the graph.
+
+ Notes
+ -----
+
+ In the standard MultiGraph class the new key is the number of existing
+ edges between `u` and `v` (increased if necessary to ensure unused).
+ The first edge will have key 0, then 1, etc. If an edge is removed
+ further new_edge_keys may not be in this order.
+
+ Parameters
+ ----------
+ u, v : nodes
+
+ Returns
+ -------
+ key : int
+ """
+ try:
+ keydict = self.adj[u][v]
+ except KeyError:
+ return 0
+ key = len(keydict)
+ while key in keydict:
+ key += 1
+ return key
+
def add_edge(self, u, v, key=None, **attr):
"""Add an edge between u and v.
@@ -273,6 +304,10 @@ class MultiGraph(Graph):
Edge data (or labels or objects) can be assigned using
keyword arguments.
+ Returns
+ -------
+ The edge key assigned to the edge.
+
See Also
--------
add_edges_from : add a collection of edges
@@ -287,6 +322,10 @@ class MultiGraph(Graph):
multiedge weights. Convert to Graph using edge attribute
'weight' to enable weighted graph algorithms.
+ Default keys are generated using the method `new_edge_key()`.
+ This method can be overridden by subclassing the base class and
+ providing a custom `new_edge_key()` method.
+
Examples
--------
The following all add the edge e=(1,2) to graph G:
@@ -310,27 +349,22 @@ class MultiGraph(Graph):
if v not in self.adj:
self.adj[v] = self.adjlist_dict_factory()
self.node[v] = {}
+ if key is None:
+ key = self.new_edge_key(u, v)
if v in self.adj[u]:
keydict = self.adj[u][v]
- if key is None:
- # find a unique integer key
- # other methods might be better here?
- key = len(keydict)
- while key in keydict:
- key += 1
datadict = keydict.get(key, self.edge_attr_dict_factory())
datadict.update(attr)
keydict[key] = datadict
else:
# selfloops work this way without special treatment
- if key is None:
- key = 0
datadict = self.edge_attr_dict_factory()
datadict.update(attr)
keydict = self.edge_key_dict_factory()
keydict[key] = datadict
self.adj[u][v] = keydict
self.adj[v][u] = keydict
+ return key
def add_edges_from(self, ebunch, **attr):
"""Add all the edges in ebunch.
@@ -349,6 +383,10 @@ class MultiGraph(Graph):
Edge data (or labels or objects) can be assigned using
keyword arguments.
+ Returns
+ -------
+ A list of edge keys assigned to the edges in `ebunch`.
+
See Also
--------
add_edge : add a single edge
@@ -359,9 +397,13 @@ class MultiGraph(Graph):
Adding the same edge twice has no effect but any edge data
will be updated when each duplicate edge is added.
- Edge attributes specified in an ebunch take precedence over
+ Edge attributes specified in an ebunch take precedence over
attributes specified via keyword arguments.
+ Default keys are generated using the method ``new_edge_key()``.
+ This method can be overridden by subclassing the base class and
+ providing a custom ``new_edge_key()`` method.
+
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
@@ -374,6 +416,7 @@ class MultiGraph(Graph):
>>> G.add_edges_from([(1,2),(2,3)], weight=3)
>>> G.add_edges_from([(3,4),(1,4)], label='WN2898')
"""
+ keylist=[]
# process ebunch
for e in ebunch:
ne = len(e)
@@ -392,15 +435,10 @@ class MultiGraph(Graph):
ddd = {}
ddd.update(attr)
ddd.update(dd)
- if key is None:
- k = 0
- if u in self and v in self[u]:
- while k in self[u][v]:
- k += 1
- else:
- k = key
- self.add_edge(u, v, k)
- self[u][v][k].update(ddd)
+ key = self.add_edge(u, v, key)
+ self[u][v][key].update(ddd)
+ keylist.append(key)
+ return keylist
def remove_edge(self, u, v, key=None):
"""Remove an edge between u and v.
@@ -434,14 +472,17 @@ class MultiGraph(Graph):
For multiple edges
>>> G = nx.MultiGraph() # or MultiDiGraph, etc
- >>> G.add_edges_from([(1,2),(1,2),(1,2)])
+ >>> G.add_edges_from([(1,2),(1,2),(1,2)]) # key_list returned
+ [0, 1, 2]
>>> G.remove_edge(1,2) # remove a single (arbitrary) edge
For edges with keys
>>> G = nx.MultiGraph() # or MultiDiGraph, etc
>>> G.add_edge(1,2,key='first')
+ 'first'
>>> G.add_edge(1,2,key='second')
+ 'second'
>>> G.remove_edge(1,2,key='second')
"""
@@ -496,7 +537,7 @@ class MultiGraph(Graph):
Removing multiple copies of edges
>>> G = nx.MultiGraph()
- >>> G.add_edges_from([(1,2),(1,2),(1,2)])
+ >>> keys = G.add_edges_from([(1,2),(1,2),(1,2)])
>>> G.remove_edges_from([(1,2),(1,2)])
>>> list(G.edges())
[(1, 2)]
@@ -540,6 +581,7 @@ class MultiGraph(Graph):
>>> G.has_edge(*e) # e is a 2-tuple (u,v)
True
>>> G.add_edge(0,1,key='a')
+ 'a'
>>> G.has_edge(0,1,key='a') # specify key
True
>>> e=(0,1,'a')
@@ -600,7 +642,7 @@ class MultiGraph(Graph):
--------
>>> G = nx.MultiGraph() # or MultiDiGraph
>>> nx.add_path(G, [0, 1, 2])
- >>> G.add_edge(2,3,weight=5)
+ >>> key = G.add_edge(2,3,weight=5)
>>> [e for e in G.edges()]
[(0, 1), (1, 2), (2, 3)]
>>> list(G.edges(data=True)) # default data is {} (empty dict)
@@ -672,7 +714,7 @@ class MultiGraph(Graph):
It is faster to use G[u][v][key].
>>> G = nx.MultiGraph() # or MultiDiGraph
- >>> G.add_edge(0,1,key='a',weight=7)
+ >>> key = G.add_edge(0,1,key='a',weight=7)
>>> G[0][1]['a'] # key='a'
{'weight': 7}
@@ -868,7 +910,9 @@ class MultiGraph(Graph):
--------
>>> G = nx.MultiGraph() # or MultiDiGraph
>>> G.add_edge(1,1)
+ 0
>>> G.add_edge(1,2)
+ 0
>>> list(G.selfloop_edges())
[(1, 1)]
>>> list(G.selfloop_edges(data=True))
@@ -1048,10 +1092,10 @@ class MultiGraph(Graph):
>>> # Create a graph in which some edges are "good" and some "bad".
>>> G = nx.MultiGraph()
- >>> G.add_edge(0, 1, key=0, good=True)
- >>> G.add_edge(0, 1, key=1, good=False)
- >>> G.add_edge(1, 2, key=0, good=False)
- >>> G.add_edge(1, 2, key=1, good=True)
+ >>> key = G.add_edge(0, 1, key=0, good=True)
+ >>> key = G.add_edge(0, 1, key=1, good=False)
+ >>> key = G.add_edge(1, 2, key=0, good=False)
+ >>> key = G.add_edge(1, 2, key=1, good=True)
>>> # Keep only those edges that are marked as "good".
>>> edges = G.edges(keys=True, data='good')
>>> edges = ((u, v, k) for (u, v, k, good) in edges if good)
diff --git a/networkx/convert_matrix.py b/networkx/convert_matrix.py
index 3aab7db79..c89704c79 100644
--- a/networkx/convert_matrix.py
+++ b/networkx/convert_matrix.py
@@ -105,9 +105,13 @@ def to_pandas_dataframe(G, nodelist=None, dtype=None, order=None,
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
+ 0
>>> G.add_edge(1,0)
+ 0
>>> G.add_edge(2,2,weight=3)
+ 0
>>> G.add_edge(2,2)
+ 1
>>> nx.to_pandas_dataframe(G, nodelist=[0,1,2], dtype=int)
0 1 2
0 0 2 0
@@ -304,9 +308,13 @@ def to_numpy_matrix(G, nodelist=None, dtype=None, order=None,
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
+ 0
>>> G.add_edge(1,0)
+ 0
>>> G.add_edge(2,2,weight=3)
+ 0
>>> G.add_edge(2,2)
+ 1
>>> nx.to_numpy_matrix(G, nodelist=[0,1,2])
matrix([[ 0., 2., 0.],
[ 1., 0., 0.],
@@ -680,9 +688,13 @@ def to_scipy_sparse_matrix(G, nodelist=None, dtype=None,
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
+ 0
>>> G.add_edge(1,0)
+ 0
>>> G.add_edge(2,2,weight=3)
+ 0
>>> G.add_edge(2,2)
+ 1
>>> S = nx.to_scipy_sparse_matrix(G, nodelist=[0,1,2])
>>> print(S.todense())
[[0 2 0]
diff --git a/networkx/drawing/layout.py b/networkx/drawing/layout.py
index 8dbb77262..872787d04 100644
--- a/networkx/drawing/layout.py
+++ b/networkx/drawing/layout.py
@@ -139,7 +139,7 @@ def circular_layout(G, scale=1, center=None, dim=2):
if len(G) == 0:
pos = {}
elif len(G) == 1:
- pos = {nx.utils.arbitrary_element(G): center}
+ pos = {G.nodes()[0]: center}
else:
# Discard the extra angle since it matches 0 radians.
theta = np.linspace(0, 1, len(G) + 1)[:-1] * 2 * np.pi
@@ -287,7 +287,7 @@ def fruchterman_reingold_layout(G, k=None,
if pos is not None:
# Determine size of existing domain to adjust initial positions
- dom_size = max(coord for pos_tup in pos.values() for coord in pos_tup)
+ dom_size = max(coord for coord in pos_tup for pos_tup in pos.values())
shape = (len(G), dim)
pos_arr = np.random.random(shape) * dom_size + center
for i, n in enumerate(G):
diff --git a/networkx/exception.py b/networkx/exception.py
index 01aec88b1..7d0880248 100644
--- a/networkx/exception.py
+++ b/networkx/exception.py
@@ -55,29 +55,3 @@ class NetworkXNotImplemented(NetworkXException):
class NodeNotFound(NetworkXException):
"""Exception raised if requested node is not present in the graph"""
-
-
-class ExceededMaxIterations(NetworkXException):
- """Raised if a loop iterates too many times without breaking.
-
- This may occur, for example, in an algorithm that computes
- progressively better approximations to a value but exceeds an
- iteration bound specified by the user.
-
- """
-
-
-class PowerIterationFailedConvergence(ExceededMaxIterations):
- """Raised when the power iteration method fails to converge within a
- specified iteration limit.
-
- `num_iterations` is the number of iterations that have been
- completed when this exception was raised.
-
- """
-
- def __init__(self, num_iterations, *args, **kw):
- msg = 'power iteration failed to converge within {} iterations'
- msg = msg.format(num_iterations)
- superinit = super(PowerIterationFailedConvergence, self).__init__
- superinit(self, msg, *args, **kw)
| Multigraph key simplification?
It seems that the way we handle multigraph edge keys is sometimes hard to maintain (see #2107 but true elsewhere too). Is there a way to handle it more simply?
Some thoughts include:
- defaulting to ```key=None``` for all add_edge actions. The first ```G.add_edge(1,2)``` gives ```(1,2,None)```. The second does nothing! To get a multiedge one must specify a key.
- default to using a UUID for the key to each multiedge. (unique key within the MultiGraph and actually across all MultiGraphs). Here ```G.add_edge(1,2)``` twice does add two separate edges.
Other thoughts? | networkx/networkx | diff --git a/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py b/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py
index babe0039c..465415f32 100644
--- a/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py
+++ b/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py
@@ -56,7 +56,7 @@ class TestEigenvectorCentrality(object):
- @raises(nx.PowerIterationFailedConvergence)
+ @raises(nx.NetworkXError)
def test_maxiter(self):
G=nx.path_graph(3)
b=nx.eigenvector_centrality(G,max_iter=0)
diff --git a/networkx/algorithms/centrality/tests/test_katz_centrality.py b/networkx/algorithms/centrality/tests/test_katz_centrality.py
index 8886711e3..69b015f12 100644
--- a/networkx/algorithms/centrality/tests/test_katz_centrality.py
+++ b/networkx/algorithms/centrality/tests/test_katz_centrality.py
@@ -30,7 +30,7 @@ class TestKatzCentrality(object):
for n in sorted(G):
assert_almost_equal(b[n], b_answer[n], places=4)
- @raises(nx.PowerIterationFailedConvergence)
+ @raises(nx.NetworkXError)
def test_maxiter(self):
alpha = 0.1
G = nx.path_graph(3)
diff --git a/networkx/algorithms/link_analysis/tests/test_hits.py b/networkx/algorithms/link_analysis/tests/test_hits.py
index b8849bc7a..7092d1dfd 100644
--- a/networkx/algorithms/link_analysis/tests/test_hits.py
+++ b/networkx/algorithms/link_analysis/tests/test_hits.py
@@ -91,9 +91,3 @@ class TestHITS:
raise SkipTest('scipy not available.')
G=networkx.Graph()
assert_equal(networkx.hits_scipy(G),({},{}))
-
-
- @raises(networkx.PowerIterationFailedConvergence)
- def test_hits_not_convergent(self):
- G = self.G
- networkx.hits(G, max_iter=0)
diff --git a/networkx/algorithms/link_analysis/tests/test_pagerank.py b/networkx/algorithms/link_analysis/tests/test_pagerank.py
index a64899d5f..d27ea2454 100644
--- a/networkx/algorithms/link_analysis/tests/test_pagerank.py
+++ b/networkx/algorithms/link_analysis/tests/test_pagerank.py
@@ -52,9 +52,8 @@ class TestPageRank(object):
for n in G:
assert_almost_equal(p[n], G.pagerank[n], places=4)
- @raises(networkx.PowerIterationFailedConvergence)
- def test_pagerank_max_iter(self):
- networkx.pagerank(self.G, max_iter=0)
+ assert_raises(networkx.NetworkXError, networkx.pagerank, G,
+ max_iter=0)
def test_numpy_pagerank(self):
G = self.G
@@ -81,28 +80,14 @@ class TestPageRank(object):
def test_personalization(self):
G = networkx.complete_graph(4)
personalize = {0: 1, 1: 1, 2: 4, 3: 4}
- answer = {0: 0.23246732615667579, 1: 0.23246732615667579, 2: 0.267532673843324, 3: 0.2675326738433241}
- p = networkx.pagerank(G, alpha=0.85, personalization=personalize)
+ answer = {0: 0.1, 1: 0.1, 2: 0.4, 3: 0.4}
+ p = networkx.pagerank(G, alpha=0.0, personalization=personalize)
for n in G:
assert_almost_equal(p[n], answer[n], places=4)
personalize.pop(0)
assert_raises(networkx.NetworkXError, networkx.pagerank, G,
personalization=personalize)
- def test_zero_personalization_vector(self):
- G = networkx.complete_graph(4)
- personalize = {0: 0, 1: 0, 2: 0, 3: 0}
- assert_raises(ZeroDivisionError, networkx.pagerank, G,
- personalization=personalize)
-
- def test_one_nonzero_personalization_value(self):
- G = networkx.complete_graph(4)
- personalize = {0: 0, 1: 0, 2: 0, 3: 1}
- answer = {0: 0.22077931820379187, 1: 0.22077931820379187, 2: 0.22077931820379187, 3: 0.3376620453886241}
- p = networkx.pagerank(G, alpha=0.85, personalization=personalize)
- for n in G:
- assert_almost_equal(p[n], answer[n], places=4)
-
def test_dangling_matrix(self):
"""
Tests that the google_matrix doesn't change except for the dangling
@@ -162,9 +147,8 @@ class TestPageRankScipy(TestPageRank):
p = networkx.pagerank_scipy(G, alpha=0.9, tol=1.e-08,
personalization=personalize)
- @raises(networkx.PowerIterationFailedConvergence)
- def test_scipy_pagerank_max_iter(self):
- networkx.pagerank_scipy(self.G, max_iter=0)
+ assert_raises(networkx.NetworkXError, networkx.pagerank_scipy, G,
+ max_iter=0)
def test_dangling_scipy_pagerank(self):
pr = networkx.pagerank_scipy(self.G, dangling=self.dangling_edges)
diff --git a/networkx/algorithms/traversal/tests/test_dfs.py b/networkx/algorithms/traversal/tests/test_dfs.py
index 3d0d13e4d..9fad98584 100644
--- a/networkx/algorithms/traversal/tests/test_dfs.py
+++ b/networkx/algorithms/traversal/tests/test_dfs.py
@@ -36,20 +36,9 @@ class TestDFS:
assert_equal(nx.dfs_predecessors(self.D), {1: 0, 3: 2})
def test_dfs_tree(self):
- exp_nodes = sorted(self.G.nodes())
- exp_edges = [(0, 1), (1, 2), (2, 4), (4, 3)]
- # Search from first node
T=nx.dfs_tree(self.G,source=0)
- assert_equal(sorted(T.nodes()), exp_nodes)
- assert_equal(sorted(T.edges()), exp_edges)
- # Check source=None
- T = nx.dfs_tree(self.G, source=None)
- assert_equal(sorted(T.nodes()), exp_nodes)
- assert_equal(sorted(T.edges()), exp_edges)
- # Check source=None is the default
- T = nx.dfs_tree(self.G)
- assert_equal(sorted(T.nodes()), exp_nodes)
- assert_equal(sorted(T.edges()), exp_edges)
+ assert_equal(sorted(T.nodes()),sorted(self.G.nodes()))
+ assert_equal(sorted(T.edges()),[(0, 1), (1, 2), (2, 4), (4, 3)])
def test_dfs_edges(self):
edges=nx.dfs_edges(self.G,source=0)
diff --git a/networkx/drawing/tests/test_layout.py b/networkx/drawing/tests/test_layout.py
index 12b47be70..5763eca5e 100644
--- a/networkx/drawing/tests/test_layout.py
+++ b/networkx/drawing/tests/test_layout.py
@@ -1,7 +1,7 @@
"""Unit tests for layout functions."""
import sys
from nose import SkipTest
-from nose.tools import assert_equal, assert_false, assert_raises
+from nose.tools import assert_equal
import networkx as nx
@@ -16,10 +16,10 @@ class TestLayout(object):
raise SkipTest('numpy not available.')
def setUp(self):
- self.Gi = nx.grid_2d_graph(5, 5)
+ self.Gi = nx.grid_2d_graph(5,5)
self.Gs = nx.Graph()
nx.add_path(self.Gs, 'abcdef')
- self.bigG = nx.grid_2d_graph(25, 25) #bigger than 500 nodes for sparse
+ self.bigG = nx.grid_2d_graph(25,25) #bigger than 500 nodes for sparse
def test_smoke_int(self):
G = self.Gi
@@ -27,7 +27,6 @@ class TestLayout(object):
vpos = nx.circular_layout(G)
vpos = nx.spring_layout(G)
vpos = nx.fruchterman_reingold_layout(G)
- vpos = nx.fruchterman_reingold_layout(self.bigG)
vpos = nx.spectral_layout(G)
vpos = nx.spectral_layout(self.bigG)
vpos = nx.shell_layout(G)
@@ -44,9 +43,8 @@ class TestLayout(object):
def test_adjacency_interface_numpy(self):
A = nx.to_numpy_matrix(self.Gs)
pos = nx.drawing.layout._fruchterman_reingold(A)
- assert_equal(pos.shape, (6, 2))
pos = nx.drawing.layout._fruchterman_reingold(A, dim=3)
- assert_equal(pos.shape, (6, 3))
+ assert_equal(pos.shape, (6,3))
def test_adjacency_interface_scipy(self):
try:
@@ -55,70 +53,14 @@ class TestLayout(object):
raise SkipTest('scipy not available.')
A = nx.to_scipy_sparse_matrix(self.Gs, dtype='d')
pos = nx.drawing.layout._sparse_fruchterman_reingold(A)
- assert_equal(pos.shape, (6, 2))
pos = nx.drawing.layout._sparse_spectral(A)
- assert_equal(pos.shape, (6, 2))
pos = nx.drawing.layout._sparse_fruchterman_reingold(A, dim=3)
- assert_equal(pos.shape, (6, 3))
+ assert_equal(pos.shape, (6,3))
def test_single_nodes(self):
G = nx.path_graph(1)
vpos = nx.shell_layout(G)
- assert_false(vpos[0].any())
+ assert(vpos[0].any() == False)
G = nx.path_graph(3)
- vpos = nx.shell_layout(G, [[0], [1, 2]])
- assert_false(vpos[0].any())
-
- def test_smoke_initial_pos_fruchterman_reingold(self):
- pos = nx.circular_layout(self.Gi)
- npos = nx.fruchterman_reingold_layout(self.Gi, pos=pos)
-
- def test_fixed_node_fruchterman_reingold(self):
- # Dense version (numpy based)
- pos = nx.circular_layout(self.Gi)
- npos = nx.fruchterman_reingold_layout(self.Gi, pos=pos, fixed=[(0, 0)])
- assert_equal(tuple(pos[(0, 0)]), tuple(npos[(0, 0)]))
- # Sparse version (scipy based)
- pos = nx.circular_layout(self.bigG)
- npos = nx.fruchterman_reingold_layout(self.bigG, pos=pos, fixed=[(0, 0)])
- assert_equal(tuple(pos[(0, 0)]), tuple(npos[(0, 0)]))
-
- def test_center_parameter(self):
- G = nx.path_graph(1)
- vpos = nx.random_layout(G, center=(1, 1))
- vpos = nx.circular_layout(G, center=(1, 1))
- assert_equal(tuple(vpos[0]), (1, 1))
- vpos = nx.spring_layout(G, center=(1, 1))
- assert_equal(tuple(vpos[0]), (1, 1))
- vpos = nx.fruchterman_reingold_layout(G, center=(1, 1))
- assert_equal(tuple(vpos[0]), (1, 1))
- vpos = nx.spectral_layout(G, center=(1, 1))
- assert_equal(tuple(vpos[0]), (1, 1))
- vpos = nx.shell_layout(G, center=(1, 1))
- assert_equal(tuple(vpos[0]), (1, 1))
-
- def test_center_wrong_dimensions(self):
- G = nx.path_graph(1)
- assert_raises(ValueError, nx.random_layout, G, center=(1, 1, 1))
- assert_raises(ValueError, nx.circular_layout, G, center=(1, 1, 1))
- assert_raises(ValueError, nx.spring_layout, G, center=(1, 1, 1))
- assert_raises(ValueError, nx.fruchterman_reingold_layout, G, center=(1, 1, 1))
- assert_raises(ValueError, nx.fruchterman_reingold_layout, G, dim=3, center=(1, 1))
- assert_raises(ValueError, nx.spectral_layout, G, center=(1, 1, 1))
- assert_raises(ValueError, nx.spectral_layout, G, dim=3, center=(1, 1))
- assert_raises(ValueError, nx.shell_layout, G, center=(1, 1, 1))
-
- def test_empty_graph(self):
- G = nx.empty_graph()
- vpos = nx.random_layout(G, center=(1, 1))
- assert_equal(vpos, {})
- vpos = nx.circular_layout(G, center=(1, 1))
- assert_equal(vpos, {})
- vpos = nx.spring_layout(G, center=(1, 1))
- assert_equal(vpos, {})
- vpos = nx.fruchterman_reingold_layout(G, center=(1, 1))
- assert_equal(vpos, {})
- vpos = nx.spectral_layout(G, center=(1, 1))
- assert_equal(vpos, {})
- vpos = nx.shell_layout(G, center=(1, 1))
- assert_equal(vpos, {})
+ vpos = nx.shell_layout(G, [[0], [1,2]])
+ assert(vpos[0].any() == False)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 15
} | help | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libgdal-dev graphviz"
],
"python": "3.6",
"reqs_path": [
"requirements/default.txt",
"requirements/test.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
decorator==5.1.1
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/networkx/networkx.git@df730d96d6490079a6b6fcf3a2bea64324aef02e#egg=networkx
nose==1.3.7
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: networkx
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- decorator==5.1.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/networkx
| [
"networkx/algorithms/centrality/tests/test_eigenvector_centrality.py::TestEigenvectorCentrality::test_maxiter",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentrality::test_maxiter"
]
| [
"networkx/algorithms/centrality/tests/test_eigenvector_centrality.py::TestEigenvectorCentrality::test_K5",
"networkx/algorithms/centrality/tests/test_eigenvector_centrality.py::TestEigenvectorCentrality::test_P3",
"networkx/algorithms/centrality/tests/test_eigenvector_centrality.py::TestEigenvectorCentrality::test_P3_unweighted",
"networkx/algorithms/centrality/tests/test_eigenvector_centrality.py::TestEigenvectorCentralityDirected::test_eigenvector_centrality_weighted",
"networkx/algorithms/centrality/tests/test_eigenvector_centrality.py::TestEigenvectorCentralityDirected::test_eigenvector_centrality_weighted_numpy",
"networkx/algorithms/centrality/tests/test_eigenvector_centrality.py::TestEigenvectorCentralityDirected::test_eigenvector_centrality_unweighted",
"networkx/algorithms/centrality/tests/test_eigenvector_centrality.py::TestEigenvectorCentralityDirected::test_eigenvector_centrality_unweighted_numpy",
"networkx/algorithms/centrality/tests/test_eigenvector_centrality.py::TestEigenvectorCentralityExceptions::test_multigraph_numpy",
"networkx/algorithms/centrality/tests/test_eigenvector_centrality.py::TestEigenvectorCentralityExceptions::test_empty_numpy",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentralityNumpy::test_K5",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentralityNumpy::test_P3",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentralityNumpy::test_beta_as_scalar",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentralityNumpy::test_beta_as_dict",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentralityNumpy::test_multiple_alpha",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentralityNumpy::test_bad_beta",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentralityNumpy::test_bad_beta_numbe",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentralityNumpy::test_K5_unweighted",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentralityNumpy::test_P3_unweighted",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentralityDirected::test_katz_centrality_weighted",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentralityDirected::test_katz_centrality_unweighted",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentralityDirectedNumpy::test_katz_centrality_weighted",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentralityDirectedNumpy::test_katz_centrality_unweighted",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzEigenvectorVKatz::test_eigenvector_v_katz_random",
"networkx/algorithms/link_analysis/tests/test_hits.py::TestHITS::test_hits",
"networkx/algorithms/link_analysis/tests/test_hits.py::TestHITS::test_hits_nstart",
"networkx/algorithms/link_analysis/tests/test_pagerank.py::TestPageRank::test_pagerank",
"networkx/algorithms/link_analysis/tests/test_pagerank.py::TestPageRank::test_numpy_pagerank",
"networkx/algorithms/link_analysis/tests/test_pagerank.py::TestPageRank::test_google_matrix",
"networkx/algorithms/link_analysis/tests/test_pagerank.py::TestPageRank::test_dangling_matrix",
"networkx/algorithms/link_analysis/tests/test_pagerank.py::TestPageRank::test_dangling_pagerank",
"networkx/algorithms/link_analysis/tests/test_pagerank.py::TestPageRank::test_dangling_numpy_pagerank",
"networkx/algorithms/link_analysis/tests/test_pagerank.py::TestPageRank::test_empty",
"networkx/algorithms/link_analysis/tests/test_pagerank.py::TestPageRankScipy::test_pagerank",
"networkx/algorithms/link_analysis/tests/test_pagerank.py::TestPageRankScipy::test_numpy_pagerank",
"networkx/algorithms/link_analysis/tests/test_pagerank.py::TestPageRankScipy::test_google_matrix",
"networkx/algorithms/link_analysis/tests/test_pagerank.py::TestPageRankScipy::test_dangling_matrix",
"networkx/algorithms/link_analysis/tests/test_pagerank.py::TestPageRankScipy::test_dangling_pagerank",
"networkx/algorithms/link_analysis/tests/test_pagerank.py::TestPageRankScipy::test_dangling_numpy_pagerank",
"networkx/algorithms/link_analysis/tests/test_pagerank.py::TestPageRankScipy::test_empty",
"networkx/algorithms/link_analysis/tests/test_pagerank.py::TestPageRankScipy::test_scipy_pagerank",
"networkx/algorithms/link_analysis/tests/test_pagerank.py::TestPageRankScipy::test_dangling_scipy_pagerank",
"networkx/algorithms/link_analysis/tests/test_pagerank.py::TestPageRankScipy::test_empty_scipy",
"networkx/algorithms/traversal/tests/test_dfs.py::TestDFS::test_preorder_nodes",
"networkx/algorithms/traversal/tests/test_dfs.py::TestDFS::test_postorder_nodes",
"networkx/algorithms/traversal/tests/test_dfs.py::TestDFS::test_successor",
"networkx/algorithms/traversal/tests/test_dfs.py::TestDFS::test_predecessor",
"networkx/algorithms/traversal/tests/test_dfs.py::TestDFS::test_dfs_tree",
"networkx/algorithms/traversal/tests/test_dfs.py::TestDFS::test_dfs_edges",
"networkx/algorithms/traversal/tests/test_dfs.py::TestDFS::test_dfs_labeled_edges",
"networkx/algorithms/traversal/tests/test_dfs.py::TestDFS::test_dfs_labeled_disconnected_edges",
"networkx/drawing/tests/test_layout.py::TestLayout::test_smoke_int",
"networkx/drawing/tests/test_layout.py::TestLayout::test_smoke_string",
"networkx/drawing/tests/test_layout.py::TestLayout::test_adjacency_interface_numpy",
"networkx/drawing/tests/test_layout.py::TestLayout::test_single_nodes"
]
| [
"networkx/algorithms/centrality/tests/test_eigenvector_centrality.py::TestEigenvectorCentralityExceptions::test_multigraph",
"networkx/algorithms/centrality/tests/test_eigenvector_centrality.py::TestEigenvectorCentralityExceptions::test_empty",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentrality::test_K5",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentrality::test_P3",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentrality::test_beta_as_scalar",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentrality::test_beta_as_dict",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentrality::test_multiple_alpha",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentrality::test_multigraph",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentrality::test_empty",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentrality::test_bad_beta",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentrality::test_bad_beta_numbe",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentralityNumpy::test_multigraph",
"networkx/algorithms/centrality/tests/test_katz_centrality.py::TestKatzCentralityNumpy::test_empty",
"networkx/algorithms/link_analysis/tests/test_pagerank.py::TestPageRank::test_personalization",
"networkx/algorithms/link_analysis/tests/test_pagerank.py::TestPageRankScipy::test_personalization",
"networkx/algorithms/traversal/tests/test_dfs.py::TestDFS::test_dfs_tree_isolates"
]
| []
| BSD 3-Clause | 566 | [
"networkx/algorithms/community/community_generators.py",
"doc/source/reference/classes.multidigraph.rst",
"networkx/exception.py",
"networkx/classes/multigraph.py",
"networkx/algorithms/isomorphism/isomorph.py",
"networkx/algorithms/centrality/katz.py",
"networkx/convert_matrix.py",
"networkx/algorithms/link_analysis/pagerank_alg.py",
"networkx/classes/multidigraph.py",
"networkx/algorithms/traversal/depth_first_search.py",
"doc/source/tutorial/tutorial.rst",
"networkx/drawing/layout.py",
"doc/source/reference/classes.multigraph.rst",
"networkx/algorithms/centrality/eigenvector.py",
"networkx/algorithms/link_analysis/hits_alg.py"
]
| [
"networkx/algorithms/community/community_generators.py",
"doc/source/reference/classes.multidigraph.rst",
"networkx/exception.py",
"networkx/classes/multigraph.py",
"networkx/algorithms/isomorphism/isomorph.py",
"networkx/algorithms/centrality/katz.py",
"networkx/convert_matrix.py",
"networkx/algorithms/link_analysis/pagerank_alg.py",
"networkx/classes/multidigraph.py",
"networkx/algorithms/traversal/depth_first_search.py",
"doc/source/tutorial/tutorial.rst",
"networkx/drawing/layout.py",
"doc/source/reference/classes.multigraph.rst",
"networkx/algorithms/centrality/eigenvector.py",
"networkx/algorithms/link_analysis/hits_alg.py"
]
|
|
cdent__gabbi-137 | e31f9b4f621f46bbd192b240f368fe915557e199 | 2016-06-01 12:36:40 | e31f9b4f621f46bbd192b240f368fe915557e199 | diff --git a/docs/source/runner.rst b/docs/source/runner.rst
index fe35932..aaa5815 100644
--- a/docs/source/runner.rst
+++ b/docs/source/runner.rst
@@ -32,5 +32,8 @@ or in the target URL::
The value of prefix will be prepended to the path portion of URLs that
are not fully qualified.
+Anywhere host is used, if it is a raw IPV6 address it should be
+wrapped in ``[`` and ``]``.
+
If a ``-x`` or ``--failfast`` argument is provided then ``gabbi-run`` will
exit after the first test failure.
diff --git a/gabbi/runner.py b/gabbi/runner.py
index 463f030..25036a4 100644
--- a/gabbi/runner.py
+++ b/gabbi/runner.py
@@ -67,7 +67,9 @@ def run():
'target',
nargs='?', default='stub',
help='A fully qualified URL (with optional path as prefix) '
- 'to the primary target or a host and port, : separated'
+ 'to the primary target or a host and port, : separated. '
+ 'If using an IPV6 address for the host in either form, '
+ 'wrap it in \'[\' and \']\'.'
)
parser.add_argument(
'prefix',
@@ -98,11 +100,14 @@ def run():
target = args.target
prefix = args.prefix
- if ':' in target:
- host, port = target.split(':')
+ if ':' in target and '[' not in target:
+ host, port = target.rsplit(':', 1)
+ elif ']:' in target:
+ host, port = target.rsplit(':', 1)
else:
host = target
port = None
+ host = host.replace('[', '').replace(']', '')
# Initialize response handlers.
custom_response_handlers = []
diff --git a/gabbi/utils.py b/gabbi/utils.py
index f433dd2..a0f812e 100644
--- a/gabbi/utils.py
+++ b/gabbi/utils.py
@@ -29,10 +29,17 @@ from six.moves.urllib import parse as urlparse
def create_url(base_url, host, port=None, prefix='', ssl=False):
"""Given pieces of a path-based url, return a fully qualified url."""
scheme = 'http'
- netloc = host
+
+ # A host with : in it at this stage is assumed to be an IPv6
+ # address of some kind (they come in many forms). Port should
+ # already have been stripped off.
+ if ':' in host and not (host.startswith('[') and host.endswith(']')):
+ host = '[%s]' % host
if port and not _port_follows_standard(port, ssl):
netloc = '%s:%s' % (host, port)
+ else:
+ netloc = host
if ssl:
scheme = 'https'
| gabbi can't cope with an ipv6 host argument
If an ipv6 address is passed in host when doing `build_tests` the built url is incorrect. An ipv6 address in a url is supposed to be bracketed: `[::1]`. | cdent/gabbi | diff --git a/gabbi/tests/test_parse_url.py b/gabbi/tests/test_parse_url.py
index 8879cec..43066c7 100644
--- a/gabbi/tests/test_parse_url.py
+++ b/gabbi/tests/test_parse_url.py
@@ -40,21 +40,21 @@ class UrlParseTest(unittest.TestCase):
return http_case
def test_parse_url(self):
- host = uuid.uuid4()
+ host = uuid.uuid4().hex
http_case = self.make_test_case(host)
parsed_url = http_case._parse_url('/foobar')
self.assertEqual('http://%s:8000/foobar' % host, parsed_url)
def test_parse_prefix(self):
- host = uuid.uuid4()
+ host = uuid.uuid4().hex
http_case = self.make_test_case(host, prefix='/noise')
parsed_url = http_case._parse_url('/foobar')
self.assertEqual('http://%s:8000/noise/foobar' % host, parsed_url)
def test_parse_full(self):
- host = uuid.uuid4()
+ host = uuid.uuid4().hex
http_case = self.make_test_case(host)
parsed_url = http_case._parse_url('http://example.com/house')
@@ -102,6 +102,37 @@ class UrlParseTest(unittest.TestCase):
self.assertEqual('https://%s:80/foobar' % host, parsed_url)
+ def test_ipv6_url(self):
+ host = '::1'
+ http_case = self.make_test_case(host, port='80', ssl=True)
+ parsed_url = http_case._parse_url('/foobar')
+
+ self.assertEqual('https://[%s]:80/foobar' % host, parsed_url)
+
+ def test_ipv6_full_url(self):
+ host = '::1'
+ http_case = self.make_test_case(host, port='80', ssl=True)
+ parsed_url = http_case._parse_url(
+ 'http://[2001:4860:4860::8888]/foobar')
+
+ self.assertEqual('http://[2001:4860:4860::8888]/foobar', parsed_url)
+
+ def test_ipv6_no_double_colon_wacky_ssl(self):
+ host = 'FEDC:BA98:7654:3210:FEDC:BA98:7654:3210'
+ http_case = self.make_test_case(host, port='80', ssl=True)
+ parsed_url = http_case._parse_url('/foobar')
+
+ self.assertEqual(
+ 'https://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:80/foobar',
+ parsed_url)
+
+ http_case = self.make_test_case(host, ssl=True)
+ parsed_url = http_case._parse_url('/foobar')
+
+ self.assertEqual(
+ 'https://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:8000/foobar',
+ parsed_url)
+
def test_add_query_params(self):
host = uuid.uuid4().hex
# Use a sequence of tuples to ensure order.
@@ -121,7 +152,7 @@ class UrlParseTest(unittest.TestCase):
self.assertEqual('http://%s:8000/foobar?alpha=beta&x=1&y=2'
% host, parsed_url)
- def test_extend_query_params_full_ulr(self):
+ def test_extend_query_params_full_url(self):
host = 'stub'
query = OrderedDict([('x', 1), ('y', 2)])
http_case = self.make_test_case(host, params=query)
diff --git a/gabbi/tests/test_runner.py b/gabbi/tests/test_runner.py
index c0b6925..a5c9ea0 100644
--- a/gabbi/tests/test_runner.py
+++ b/gabbi/tests/test_runner.py
@@ -13,6 +13,7 @@
"""Test that the CLI works as expected
"""
+import mock
import sys
import unittest
from uuid import uuid4
@@ -218,6 +219,72 @@ class RunnerTest(unittest.TestCase):
self._stderr.write(sys.stderr.read())
+class RunnerHostArgParse(unittest.TestCase):
+
+ @mock.patch('sys.exit')
+ @mock.patch('sys.stdin')
+ @mock.patch('gabbi.driver.test_suite_from_dict')
+ @mock.patch('yaml.safe_load', return_value={})
+ def _test_hostport(self, url_or_host, expected_host,
+ portmock_yaml, mock_test_suite, mock_read, mock_exit,
+ provided_prefix=None, expected_port=None,
+ expected_prefix=None,):
+ sys.argv = ['gabbi-run', url_or_host]
+ if provided_prefix:
+ sys.argv.append(provided_prefix)
+ runner.run()
+
+ mock_test_suite.assert_called_with(
+ unittest.defaultTestLoader, 'input', {}, '.', expected_host,
+ expected_port, None, None, prefix=expected_prefix
+ )
+
+ def test_plain_url(self):
+ self._test_hostport('http://foobar.com:80/news',
+ 'foobar.com',
+ expected_port='80',
+ expected_prefix='/news')
+
+ def test_simple_hostport(self):
+ self._test_hostport('foobar.com:999',
+ 'foobar.com',
+ expected_port='999')
+
+ def test_simple_hostport_with_prefix(self):
+ self._test_hostport('foobar.com:999',
+ 'foobar.com',
+ provided_prefix='/news',
+ expected_port='999',
+ expected_prefix='/news')
+
+ def test_ipv6_url_long(self):
+ self._test_hostport(
+ 'http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:999/news',
+ 'FEDC:BA98:7654:3210:FEDC:BA98:7654:3210',
+ expected_port='999',
+ expected_prefix='/news')
+
+ def test_ipv6_url_localhost(self):
+ self._test_hostport(
+ 'http://[::1]:999/news',
+ '::1',
+ expected_port='999',
+ expected_prefix='/news')
+
+ def test_ipv6_host_localhost(self):
+ # If a user wants to use the hostport form, then they need
+ # to hack it with the brackets.
+ self._test_hostport(
+ '[::1]',
+ '::1')
+
+ def test_ipv6_hostport_localhost(self):
+ self._test_hostport(
+ '[::1]:999',
+ '::1',
+ expected_port='999')
+
+
class HTMLResponseHandler(handlers.ResponseHandler):
test_key_suffix = 'html'
diff --git a/gabbi/tests/test_utils.py b/gabbi/tests/test_utils.py
index a69d72b..1754dad 100644
--- a/gabbi/tests/test_utils.py
+++ b/gabbi/tests/test_utils.py
@@ -132,3 +132,29 @@ class CreateURLTest(unittest.TestCase):
url = utils.create_url('/foo/bar?x=1&y=2', 'test.host.com', ssl=True,
port=80)
self.assertEqual('https://test.host.com:80/foo/bar?x=1&y=2', url)
+
+ def test_create_url_ipv6_ssl(self):
+ url = utils.create_url('/foo/bar?x=1&y=2', '::1', ssl=True)
+ self.assertEqual('https://[::1]/foo/bar?x=1&y=2', url)
+
+ def test_create_url_ipv6_ssl_weird_port(self):
+ url = utils.create_url('/foo/bar?x=1&y=2', '::1', ssl=True, port=80)
+ self.assertEqual('https://[::1]:80/foo/bar?x=1&y=2', url)
+
+ def test_create_url_ipv6_full(self):
+ url = utils.create_url('/foo/bar?x=1&y=2',
+ '2607:f8b0:4000:801::200e', port=8080)
+ self.assertEqual(
+ 'http://[2607:f8b0:4000:801::200e]:8080/foo/bar?x=1&y=2', url)
+
+ def test_create_url_ipv6_already_bracket(self):
+ url = utils.create_url(
+ '/foo/bar?x=1&y=2', '[2607:f8b0:4000:801::200e]', port=999)
+ self.assertEqual(
+ 'http://[2607:f8b0:4000:801::200e]:999/foo/bar?x=1&y=2', url)
+
+ def test_create_url_no_double_colon(self):
+ url = utils.create_url(
+ '/foo', 'FEDC:BA98:7654:3210:FEDC:BA98:7654:3210', port=999)
+ self.assertEqual(
+ 'http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:999/foo', url)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 3
} | 1.19 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"mock",
"testrepository",
"coverage",
"hacking",
"sphinx",
"pytest"
],
"pre_install": null,
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
colorama==0.4.5
coverage==6.2
decorator==5.1.1
docutils==0.18.1
extras==1.0.0
fixtures==4.0.1
flake8==3.8.4
-e git+https://github.com/cdent/gabbi.git@e31f9b4f621f46bbd192b240f368fe915557e199#egg=gabbi
hacking==4.1.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
iso8601==1.1.0
Jinja2==3.0.3
jsonpath-rw==1.4.0
jsonpath-rw-ext==1.2.2
MarkupSafe==2.0.1
mccabe==0.6.1
mock==5.2.0
packaging==21.3
pbr==6.1.1
pluggy==1.0.0
ply==3.11
py==1.11.0
pycodestyle==2.6.0
pyflakes==2.2.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
python-subunit==1.4.2
pytz==2025.2
PyYAML==6.0.1
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
testrepository==0.0.21
testtools==2.6.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
wsgi_intercept==1.13.1
zipp==3.6.0
| name: gabbi
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- colorama==0.4.5
- coverage==6.2
- decorator==5.1.1
- docutils==0.18.1
- extras==1.0.0
- fixtures==4.0.1
- flake8==3.8.4
- hacking==4.1.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- iso8601==1.1.0
- jinja2==3.0.3
- jsonpath-rw==1.4.0
- jsonpath-rw-ext==1.2.2
- markupsafe==2.0.1
- mccabe==0.6.1
- mock==5.2.0
- packaging==21.3
- pbr==6.1.1
- pluggy==1.0.0
- ply==3.11
- py==1.11.0
- pycodestyle==2.6.0
- pyflakes==2.2.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-subunit==1.4.2
- pytz==2025.2
- pyyaml==6.0.1
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- testrepository==0.0.21
- testtools==2.6.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wsgi-intercept==1.13.1
- zipp==3.6.0
prefix: /opt/conda/envs/gabbi
| [
"gabbi/tests/test_parse_url.py::UrlParseTest::test_ipv6_no_double_colon_wacky_ssl",
"gabbi/tests/test_parse_url.py::UrlParseTest::test_ipv6_url",
"gabbi/tests/test_runner.py::RunnerHostArgParse::test_ipv6_host_localhost",
"gabbi/tests/test_runner.py::RunnerHostArgParse::test_ipv6_hostport_localhost",
"gabbi/tests/test_runner.py::RunnerHostArgParse::test_ipv6_url_localhost",
"gabbi/tests/test_runner.py::RunnerHostArgParse::test_ipv6_url_long",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ipv6_full",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ipv6_ssl",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ipv6_ssl_weird_port",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_no_double_colon"
]
| []
| [
"gabbi/tests/test_parse_url.py::UrlParseTest::test_add_query_params",
"gabbi/tests/test_parse_url.py::UrlParseTest::test_default_port_http",
"gabbi/tests/test_parse_url.py::UrlParseTest::test_default_port_https",
"gabbi/tests/test_parse_url.py::UrlParseTest::test_default_port_https_no_ssl",
"gabbi/tests/test_parse_url.py::UrlParseTest::test_default_port_int",
"gabbi/tests/test_parse_url.py::UrlParseTest::test_extend_query_params",
"gabbi/tests/test_parse_url.py::UrlParseTest::test_extend_query_params_full_url",
"gabbi/tests/test_parse_url.py::UrlParseTest::test_https_port_80_ssl",
"gabbi/tests/test_parse_url.py::UrlParseTest::test_ipv6_full_url",
"gabbi/tests/test_parse_url.py::UrlParseTest::test_parse_full",
"gabbi/tests/test_parse_url.py::UrlParseTest::test_parse_prefix",
"gabbi/tests/test_parse_url.py::UrlParseTest::test_parse_url",
"gabbi/tests/test_parse_url.py::UrlParseTest::test_with_ssl",
"gabbi/tests/test_runner.py::RunnerTest::test_custom_response_handler",
"gabbi/tests/test_runner.py::RunnerTest::test_exit_code",
"gabbi/tests/test_runner.py::RunnerTest::test_target_url_parsing",
"gabbi/tests/test_runner.py::RunnerTest::test_target_url_parsing_standard_port",
"gabbi/tests/test_runner.py::RunnerHostArgParse::test_plain_url",
"gabbi/tests/test_runner.py::RunnerHostArgParse::test_simple_hostport",
"gabbi/tests/test_runner.py::RunnerHostArgParse::test_simple_hostport_with_prefix",
"gabbi/tests/test_utils.py::BinaryTypesTest::test_binary",
"gabbi/tests/test_utils.py::BinaryTypesTest::test_not_binary",
"gabbi/tests/test_utils.py::ExtractContentTypeTest::test_extract_content_type_bad_params",
"gabbi/tests/test_utils.py::ExtractContentTypeTest::test_extract_content_type_default_both",
"gabbi/tests/test_utils.py::ExtractContentTypeTest::test_extract_content_type_default_charset",
"gabbi/tests/test_utils.py::ExtractContentTypeTest::test_extract_content_type_multiple_params",
"gabbi/tests/test_utils.py::ExtractContentTypeTest::test_extract_content_type_with_charset",
"gabbi/tests/test_utils.py::ColorizeTest::test_colorize_missing_color",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ipv6_already_bracket",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_not_ssl_on_443",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_port",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_port_and_ssl",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_prefix",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_preserve_query",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_simple",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ssl",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ssl_on_80"
]
| []
| Apache License 2.0 | 567 | [
"gabbi/runner.py",
"docs/source/runner.rst",
"gabbi/utils.py"
]
| [
"gabbi/runner.py",
"docs/source/runner.rst",
"gabbi/utils.py"
]
|
|
mattjmcnaughton__sdep-30 | be2be4bbfbdb2442fa446174055bb6cf61c4125b | 2016-06-01 21:15:26 | be2be4bbfbdb2442fa446174055bb6cf61c4125b | diff --git a/docs/cli.rst b/docs/cli.rst
index ab80b6a..62fcaee 100644
--- a/docs/cli.rst
+++ b/docs/cli.rst
@@ -61,6 +61,12 @@ options can be set in configuration files:
**Optional**
+- :command:`INDEX_SUFFIX`: When hosting with Amazon S3, it is necessary to
+ specify an index suffix, which is appended to all urls ending in :command:`/`. The
+ default value is :command:`index.html`.
+- :command:`ERROR_KEY`: The S3 key of the file Amazon should serve in case of
+ error (i.e. incorrect url). The default value is :command:`404.html`.
+
Environment Variables
~~~~~~~~~~~~~~~~~~~~~
diff --git a/sdep/app.py b/sdep/app.py
index ecf518f..ad923f8 100644
--- a/sdep/app.py
+++ b/sdep/app.py
@@ -28,12 +28,6 @@ class Sdep(object):
# Constant names of AWS objects.
BUCKET_NAME = "bucket_name"
- # Default index and error.
- # @TODO This specification is temporary, as eventually we will make these
- # configurable options with `Config`.
- DEFAULT_INDEX_SUFFIX = "index.html"
- DEFAULT_ERROR_KEY = "404.html"
-
def __init__(self, config):
self._config = config
self._s3_client = self._establish_s3_client()
@@ -199,10 +193,10 @@ class Sdep(object):
"""
website_config = {
"IndexDocument": {
- "Suffix": self.DEFAULT_INDEX_SUFFIX
+ "Suffix": self._config.get(Config.INDEX_SUFFIX_FIELD)
},
"ErrorDocument": {
- "Key": self.DEFAULT_ERROR_KEY
+ "Key": self._config.get(Config.ERROR_KEY_FIELD)
}
}
diff --git a/sdep/config.py b/sdep/config.py
index 636f97a..22298f8 100644
--- a/sdep/config.py
+++ b/sdep/config.py
@@ -65,6 +65,8 @@ class Config(object):
AWS_SECRET_ACCESS_KEY_FIELD = "aws_secret_access_key"
SITE_DIR_FIELD = "site_dir"
DOMAIN_FIELD = "domain"
+ INDEX_SUFFIX_FIELD = "index_suffix"
+ ERROR_KEY_FIELD = "error_key"
def __init__(self, config_file=None, test_mode=False):
# @TODO I wonder if it would make more sense for the `Config` class to
@@ -144,13 +146,11 @@ class Config(object):
ConfigImproperFormatError: If vital configuration data is either in
the incorrect format or nonexistent.
"""
- for field in self.required_config_fields(env=True):
- value = os.environ.get(field)
-
- if value is None:
- raise ConfigImproperFormatError
- else:
- self._config_hash[field.lower()] = value
+ self._config_hash = self._parse_from_store(
+ self.required_config_fields(env=True),
+ self.optional_config_fields(env=True),
+ os.environ
+ )
def _parse_from_config_file(self, config_file):
"""
@@ -172,15 +172,46 @@ class Config(object):
except (IOError, json.JSONDecodeError):
raise ConfigImproperFormatError
- # @TODO Should a common helper method implement this functionality
- # for both `_parse_from_config_file` and `_parse_from_env`.
- for field in self.required_config_fields(env=False):
- value = config_data.get(field)
+ self._config_hash = self._parse_from_store(
+ self.required_config_fields(env=False),
+ self.optional_config_fields(env=False),
+ config_data
+ )
+
+ @classmethod
+ def _parse_from_store(cls, required_fields, optional_fields, data_store):
+ """
+ Parse the configuration from a data store object (i.e. the json hash or
+ `os.environ`). This method is useful because the process for parsing the
+ data from either the environment or a configuration file shares many of
+ the same components. Abstracting to a single method ensures less
+ duplicate code.
+
+ Args:
+ required_fields (list): A list of the required fields.
+ optional_fields (list): A list of the optional fields.
+ data_store (dict): A dictionary containing key/value pairs with the
+ fields as a key.
+
+ Returns:
+ dict: A configuration dictionary.
+ """
+ # Start with all of the defaults filled in. We will overwrite with any
+ # specified info.
+ config_hash = cls._optional_fields_and_defaults()
+
+ fields = [(f, True) for f in required_fields] + [(f, False) for f in optional_fields]
+
+ for field, required in fields:
+ value = data_store.get(field)
if value is None:
- raise ConfigImproperFormatError
+ if required:
+ raise ConfigImproperFormatError
else:
- self._config_hash[field.lower()] = value
+ config_hash[field.lower()] = value
+
+ return config_hash
@classmethod
def required_config_fields(cls, env=False):
@@ -207,8 +238,8 @@ class Config(object):
else:
return required_fields
- @staticmethod
- def optional_config_fields(env=False):
+ @classmethod
+ def optional_config_fields(cls, env=False):
"""
Return the optinal configuration fields either in `snake_case` or in all
upper-case `snake_case`, depending on whether the `env` flag is set.
@@ -220,23 +251,40 @@ class Config(object):
Returns:
[str]: A list of optional configuration fields.
"""
- optional_fields = []
+ optional_fields = list(cls._optional_fields_and_defaults().keys())
if env:
return [field.upper() for field in optional_fields]
else:
return optional_fields
+ @classmethod
+ def _optional_fields_and_defaults(cls):
+ """
+ Return a dictionary of optional fields and their defaults.
+
+ Returns:
+ dict: Optional fields and their defaults.
+ """
+ return {
+ cls.INDEX_SUFFIX_FIELD: "index.html",
+ cls.ERROR_KEY_FIELD: "404.html"
+ }
+
def _prepopulate_config(self):
"""
Prepopulate this instance of `Config` with sensible default values which
we can use when testing.
"""
- # @TODO Determine a better method for automatically including all
- # `required` variables.
- self._config_hash = {
+ populate_hash = {
self.AWS_ACCESS_KEY_ID_FIELD: "MY_ACCESS_KEY",
self.AWS_SECRET_ACCESS_KEY_FIELD: "MY_SECRET_KEY",
self.SITE_DIR_FIELD: "./static",
self.DOMAIN_FIELD: "sdep-test.com"
}
+
+ self._config_hash = self._parse_from_store(
+ self.required_config_fields(env=False),
+ self.optional_config_fields(env=False),
+ populate_hash
+ )
| Add `index_suffix` and `error_key` as optional fields
When telling AWS to have our bucket perform like a website, with this [boto call](http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.put_bucket_website), we have the option of specifying which suffix will represent an index file (currently `index.html`) and the file to display when there is an error (currently `404.html`). Make both of these configurable options with sensible defaults. | mattjmcnaughton/sdep | diff --git a/tests/test_app.py b/tests/test_app.py
index 8dd700f..08b5da0 100644
--- a/tests/test_app.py
+++ b/tests/test_app.py
@@ -117,9 +117,8 @@ class SdepTestCase(unittest.TestCase):
# with `Config`.
resp = self._s3_client.get_bucket_website(Bucket=bucket_name)
- self.assertEqual(resp["IndexDocument"]["Suffix"],
- Sdep.DEFAULT_INDEX_SUFFIX)
- self.assertEqual(resp["ErrorDocument"]["Key"], Sdep.DEFAULT_ERROR_KEY)
+ self.assertNotEqual(resp["IndexDocument"]["Suffix"], None)
+ self.assertNotEqual(resp["ErrorDocument"]["Key"], None)
@classmethod
def _create_test_upload_dir(cls):
diff --git a/tests/test_config.py b/tests/test_config.py
index 0feb7be..a1be9d8 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -35,7 +35,7 @@ class ConfigTestCase(unittest.TestCase):
config_file = self._create_config_file()
config = Config(config_file=config_file)
- for field in Config.required_config_fields():
+ for field in self._all_fields():
self.assertNotEqual(config.get(field), None)
os.remove(config_file)
@@ -51,7 +51,7 @@ class ConfigTestCase(unittest.TestCase):
with patch.dict(os.environ, environ_dict, clear=True):
config = Config()
- for field in Config.required_config_fields():
+ for field in self._all_fields():
self.assertNotEqual(config.get(field), None)
def test_find_config_in_curr_dir(self):
@@ -71,7 +71,7 @@ class ConfigTestCase(unittest.TestCase):
config = Config()
self.assertEqual(config_in_curr, Config.locate_config_file())
- for field in Config.required_config_fields():
+ for field in self._all_fields():
self.assertNotEqual(config.get(field), None)
for temp_dir in [temp_dirs.current, temp_dirs.home]:
@@ -97,13 +97,12 @@ class ConfigTestCase(unittest.TestCase):
config = Config()
self.assertEqual(config_in_home, Config.locate_config_file())
- for field in Config.required_config_fields():
+ for field in self._all_fields():
self.assertNotEqual(config.get(field), None)
for temp_dir in [temp_dirs.current, temp_dirs.home]:
shutil.rmtree(temp_dir, ignore_errors=True)
-
def test_bad_config(self):
"""
Test loading the configuration from a file with an improperly specified
@@ -114,15 +113,23 @@ class ConfigTestCase(unittest.TestCase):
with self.assertRaises(ConfigParseError):
Config(config_file=config_file)
- @staticmethod
- def _config_dict():
+ @classmethod
+ def _config_dict(cls):
"""
A dictionary of property formatted config.
Returns:
dict: A properly formatted config.
"""
- return {field: str(uuid.uuid4()) for field in Config.required_config_fields()}
+ base_dict = {field: str(uuid.uuid4()) for field in cls._all_fields()}
+
+ # Remove one of the optional fields so that we can test the default value
+ # being filled in.
+
+ field_to_remove = Config.optional_config_fields()[0]
+ del base_dict[field_to_remove]
+
+ return base_dict
@classmethod
def _create_mock_dirs(cls):
@@ -176,3 +183,13 @@ class ConfigTestCase(unittest.TestCase):
bad_config_file.write(json.dumps({}))
return file_name
+
+ @staticmethod
+ def _all_fields():
+ """
+ Helper method to return all configuration fields.
+
+ Returns:
+ list: List of the strings for all configuration fields.
+ """
+ return Config.required_config_fields() + Config.optional_config_fields()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose2",
"pytest"
],
"pre_install": [],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
argh==0.27.2
astroid==2.11.7
attrs==22.2.0
Babel==2.11.0
boto==2.49.0
boto3==1.23.10
botocore==1.26.10
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
cookies==2.2.1
dataclasses==0.8
dicttoxml==1.7.16
dill==0.3.4
docutils==0.18.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
isort==5.10.1
Jinja2==3.0.3
jmespath==0.10.0
lazy-object-proxy==1.7.1
livereload==2.6.3
MarkupSafe==2.0.1
mccabe==0.7.0
mock==2.0.0
moto==1.0.0
nose2==0.13.0
packaging==21.3
pathtools==0.1.2
pbr==6.1.1
platformdirs==2.4.0
pluggy==1.0.0
pockets==0.9.1
port-for==0.3.1
py==1.11.0
pyaml==23.5.8
Pygments==2.14.0
pylint==2.13.9
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.1
requests==2.27.1
s3transfer==0.5.2
-e git+https://github.com/mattjmcnaughton/sdep.git@be2be4bbfbdb2442fa446174055bb6cf61c4125b#egg=sdep
simplejson==3.20.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-autobuild==0.7.1
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-napoleon==0.7
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
tornado==6.1
typed-ast==1.5.5
typing_extensions==4.1.1
urllib3==1.26.20
watchdog==2.3.1
Werkzeug==2.0.3
wrapt==1.16.0
xmltodict==0.14.2
zipp==3.6.0
| name: sdep
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- argh==0.27.2
- astroid==2.11.7
- attrs==22.2.0
- babel==2.11.0
- boto==2.49.0
- boto3==1.23.10
- botocore==1.26.10
- charset-normalizer==2.0.12
- click==8.0.4
- cookies==2.2.1
- dataclasses==0.8
- dicttoxml==1.7.16
- dill==0.3.4
- docutils==0.18.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isort==5.10.1
- jinja2==3.0.3
- jmespath==0.10.0
- lazy-object-proxy==1.7.1
- livereload==2.6.3
- markupsafe==2.0.1
- mccabe==0.7.0
- mock==2.0.0
- moto==1.0.0
- nose2==0.13.0
- packaging==21.3
- pathtools==0.1.2
- pbr==6.1.1
- platformdirs==2.4.0
- pluggy==1.0.0
- pockets==0.9.1
- port-for==0.3.1
- py==1.11.0
- pyaml==23.5.8
- pygments==2.14.0
- pylint==2.13.9
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.1
- requests==2.27.1
- s3transfer==0.5.2
- simplejson==3.20.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-autobuild==0.7.1
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-napoleon==0.7
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- tornado==6.1
- typed-ast==1.5.5
- typing-extensions==4.1.1
- urllib3==1.26.20
- watchdog==2.3.1
- werkzeug==2.0.3
- wrapt==1.16.0
- xmltodict==0.14.2
- zipp==3.6.0
prefix: /opt/conda/envs/sdep
| [
"tests/test_config.py::ConfigTestCase::test_find_config_in_curr_dir",
"tests/test_config.py::ConfigTestCase::test_find_config_in_home_dir",
"tests/test_config.py::ConfigTestCase::test_load_config_from_env",
"tests/test_config.py::ConfigTestCase::test_load_config_from_file"
]
| [
"tests/test_app.py::SdepTestCase::test_configure_bucket_website",
"tests/test_app.py::SdepTestCase::test_create",
"tests/test_app.py::SdepTestCase::test_create_s3_buckets",
"tests/test_app.py::SdepTestCase::test_upload",
"tests/test_app.py::SdepTestCase::test_upload_files_to_s3"
]
| [
"tests/test_config.py::ConfigTestCase::test_bad_config"
]
| []
| MIT License | 568 | [
"sdep/config.py",
"sdep/app.py",
"docs/cli.rst"
]
| [
"sdep/config.py",
"sdep/app.py",
"docs/cli.rst"
]
|
|
tableau__document-api-python-15 | 07aad9550d3d36a4d74c4751832c50fe81882a01 | 2016-06-02 00:21:16 | 07aad9550d3d36a4d74c4751832c50fe81882a01 | diff --git a/tableaudocumentapi/datasource.py b/tableaudocumentapi/datasource.py
index 93ebe55..617004a 100644
--- a/tableaudocumentapi/datasource.py
+++ b/tableaudocumentapi/datasource.py
@@ -72,7 +72,7 @@ class Datasource(object):
"""
# save the file
- self._datasourceTree.write(self._filename)
+ self._datasourceTree.write(self._filename, encoding="utf-8", xml_declaration=True)
def save_as(self, new_filename):
"""
@@ -85,7 +85,7 @@ class Datasource(object):
Nothing.
"""
- self._datasourceTree.write(new_filename)
+ self._datasourceTree.write(new_filename, encoding="utf-8", xml_declaration=True)
###########
# name
diff --git a/tableaudocumentapi/workbook.py b/tableaudocumentapi/workbook.py
index 67dbc32..889f746 100644
--- a/tableaudocumentapi/workbook.py
+++ b/tableaudocumentapi/workbook.py
@@ -76,7 +76,7 @@ class Workbook(object):
"""
# save the file
- self._workbookTree.write(self._filename)
+ self._workbookTree.write(self._filename, encoding="utf-8", xml_declaration=True)
def save_as(self, new_filename):
"""
@@ -90,7 +90,7 @@ class Workbook(object):
"""
- self._workbookTree.write(new_filename)
+ self._workbookTree.write(new_filename, encoding="utf-8", xml_declaration=True)
###########################################################################
#
| Tabcmd publish with .twb created via Document API
I can successfully create a .twb file via the Document API, but attempting to publish it to my Tableau Server via Tabcmd results in an unexpected error:
**Bad request
unexpected error occurred opening the packaged workbook.**
Attached is the template workbook created in Tableau Desktop (superstore_sales.twb) and one of the workbooks created from that template via the Document API (superstore_sales_arizona.twb)
[superstore_twbs.zip](https://github.com/tableau/document-api-python/files/285303/superstore_twbs.zip)
| tableau/document-api-python | diff --git a/test.py b/test.py
index fd7d1bd..5606005 100644
--- a/test.py
+++ b/test.py
@@ -17,6 +17,7 @@ TABLEAU_10_WORKBOOK = '''<?xml version='1.0' encoding='utf-8' ?><workbook source
TABLEAU_CONNECTION_XML = ET.fromstring(
'''<connection authentication='sspi' class='sqlserver' dbname='TestV1' odbc-native-protocol='yes' one-time-sql='' server='mssql2012.test.tsi.lan' username=''></connection>''')
+
class HelperMethodTests(unittest.TestCase):
def test_is_valid_file_with_valid_inputs(self):
@@ -39,7 +40,6 @@ class ConnectionParserTests(unittest.TestCase):
self.assertIsInstance(connections[0], Connection)
self.assertEqual(connections[0].dbname, 'TestV1')
-
def test_can_extract_federated_connections(self):
parser = ConnectionParser(ET.fromstring(TABLEAU_10_TDS), '10.0')
connections = parser.get_connections()
@@ -97,6 +97,17 @@ class DatasourceModelTests(unittest.TestCase):
new_tds = Datasource.from_file(self.tds_file.name)
self.assertEqual(new_tds.connections[0].dbname, 'newdb.test.tsi.lan')
+ def test_save_has_xml_declaration(self):
+ original_tds = Datasource.from_file(self.tds_file.name)
+ original_tds.connections[0].dbname = 'newdb.test.tsi.lan'
+
+ original_tds.save()
+
+ with open(self.tds_file.name) as f:
+ first_line = f.readline().strip() # first line should be xml tag
+ self.assertEqual(
+ first_line, "<?xml version='1.0' encoding='utf-8'?>")
+
class WorkbookModelTests(unittest.TestCase):
@@ -122,7 +133,8 @@ class WorkbookModelTests(unittest.TestCase):
original_wb.save()
new_wb = Workbook(self.workbook_file.name)
- self.assertEqual(new_wb.datasources[0].connections[0].dbname, 'newdb.test.tsi.lan')
+ self.assertEqual(new_wb.datasources[0].connections[
+ 0].dbname, 'newdb.test.tsi.lan')
class WorkbookModelV10Tests(unittest.TestCase):
@@ -152,7 +164,19 @@ class WorkbookModelV10Tests(unittest.TestCase):
original_wb.save()
new_wb = Workbook(self.workbook_file.name)
- self.assertEqual(new_wb.datasources[0].connections[0].dbname, 'newdb.test.tsi.lan')
+ self.assertEqual(new_wb.datasources[0].connections[
+ 0].dbname, 'newdb.test.tsi.lan')
+
+ def test_save_has_xml_declaration(self):
+ original_wb = Workbook(self.workbook_file.name)
+ original_wb.datasources[0].connections[0].dbname = 'newdb.test.tsi.lan'
+
+ original_wb.save()
+
+ with open(self.workbook_file.name) as f:
+ first_line = f.readline().strip() # first line should be xml tag
+ self.assertEqual(
+ first_line, "<?xml version='1.0' encoding='utf-8'?>")
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
-e git+https://github.com/tableau/document-api-python.git@07aad9550d3d36a4d74c4751832c50fe81882a01#egg=tableaudocumentapi
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: document-api-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/document-api-python
| [
"test.py::DatasourceModelTests::test_save_has_xml_declaration",
"test.py::WorkbookModelV10Tests::test_save_has_xml_declaration"
]
| []
| [
"test.py::HelperMethodTests::test_is_valid_file_with_invalid_inputs",
"test.py::HelperMethodTests::test_is_valid_file_with_valid_inputs",
"test.py::ConnectionParserTests::test_can_extract_federated_connections",
"test.py::ConnectionParserTests::test_can_extract_legacy_connection",
"test.py::ConnectionModelTests::test_can_read_attributes_from_connection",
"test.py::ConnectionModelTests::test_can_write_attributes_to_connection",
"test.py::DatasourceModelTests::test_can_extract_connection",
"test.py::DatasourceModelTests::test_can_extract_datasource_from_file",
"test.py::DatasourceModelTests::test_can_save_tds",
"test.py::WorkbookModelTests::test_can_extract_datasource",
"test.py::WorkbookModelTests::test_can_update_datasource_connection_and_save",
"test.py::WorkbookModelV10Tests::test_can_extract_datasourceV10",
"test.py::WorkbookModelV10Tests::test_can_update_datasource_connection_and_saveV10"
]
| []
| MIT License | 569 | [
"tableaudocumentapi/datasource.py",
"tableaudocumentapi/workbook.py"
]
| [
"tableaudocumentapi/datasource.py",
"tableaudocumentapi/workbook.py"
]
|
|
docker__docker-py-1079 | 88811a26593e8a87e8d820d8820736fea6d8d20a | 2016-06-03 00:58:40 | 299ffadb95c90eb7134b9cee2648fb683912c303 | diff --git a/docker/client.py b/docker/client.py
index de3cb3ca..b96a78ce 100644
--- a/docker/client.py
+++ b/docker/client.py
@@ -14,7 +14,6 @@
import json
import struct
-import sys
import requests
import requests.exceptions
@@ -26,10 +25,14 @@ from . import api
from . import constants
from . import errors
from .auth import auth
-from .unixconn import unixconn
from .ssladapter import ssladapter
-from .utils import utils, check_resource, update_headers, kwargs_from_env
from .tls import TLSConfig
+from .transport import UnixAdapter
+from .utils import utils, check_resource, update_headers, kwargs_from_env
+try:
+ from .transport import NpipeAdapter
+except ImportError:
+ pass
def from_env(**kwargs):
@@ -59,11 +62,26 @@ class Client(
self._auth_configs = auth.load_config()
- base_url = utils.parse_host(base_url, sys.platform, tls=bool(tls))
+ base_url = utils.parse_host(
+ base_url, constants.IS_WINDOWS_PLATFORM, tls=bool(tls)
+ )
if base_url.startswith('http+unix://'):
- self._custom_adapter = unixconn.UnixAdapter(base_url, timeout)
+ self._custom_adapter = UnixAdapter(base_url, timeout)
self.mount('http+docker://', self._custom_adapter)
self.base_url = 'http+docker://localunixsocket'
+ elif base_url.startswith('npipe://'):
+ if not constants.IS_WINDOWS_PLATFORM:
+ raise errors.DockerException(
+ 'The npipe:// protocol is only supported on Windows'
+ )
+ try:
+ self._custom_adapter = NpipeAdapter(base_url, timeout)
+ except NameError:
+ raise errors.DockerException(
+ 'Install pypiwin32 package to enable npipe:// support'
+ )
+ self.mount('http+docker://', self._custom_adapter)
+ self.base_url = 'http+docker://localnpipe'
else:
# Use SSLAdapter for the ability to specify SSL version
if isinstance(tls, TLSConfig):
diff --git a/docker/constants.py b/docker/constants.py
index 6c381de3..0388f705 100644
--- a/docker/constants.py
+++ b/docker/constants.py
@@ -1,3 +1,5 @@
+import sys
+
DEFAULT_DOCKER_API_VERSION = '1.22'
DEFAULT_TIMEOUT_SECONDS = 60
STREAM_HEADER_SIZE_BYTES = 8
@@ -8,3 +10,5 @@ CONTAINER_LIMITS_KEYS = [
INSECURE_REGISTRY_DEPRECATION_WARNING = \
'The `insecure_registry` argument to {} ' \
'is deprecated and non-functional. Please remove it.'
+
+IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
diff --git a/docker/transport/__init__.py b/docker/transport/__init__.py
new file mode 100644
index 00000000..d647483e
--- /dev/null
+++ b/docker/transport/__init__.py
@@ -0,0 +1,6 @@
+# flake8: noqa
+from .unixconn import UnixAdapter
+try:
+ from .npipeconn import NpipeAdapter
+except ImportError:
+ pass
\ No newline at end of file
diff --git a/docker/transport/npipeconn.py b/docker/transport/npipeconn.py
new file mode 100644
index 00000000..736ddf67
--- /dev/null
+++ b/docker/transport/npipeconn.py
@@ -0,0 +1,80 @@
+import six
+import requests.adapters
+
+from .npipesocket import NpipeSocket
+
+if six.PY3:
+ import http.client as httplib
+else:
+ import httplib
+
+try:
+ import requests.packages.urllib3 as urllib3
+except ImportError:
+ import urllib3
+
+
+RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
+
+
+class NpipeHTTPConnection(httplib.HTTPConnection, object):
+ def __init__(self, npipe_path, timeout=60):
+ super(NpipeHTTPConnection, self).__init__(
+ 'localhost', timeout=timeout
+ )
+ self.npipe_path = npipe_path
+ self.timeout = timeout
+
+ def connect(self):
+ sock = NpipeSocket()
+ sock.settimeout(self.timeout)
+ sock.connect(self.npipe_path)
+ self.sock = sock
+
+
+class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
+ def __init__(self, npipe_path, timeout=60):
+ super(NpipeHTTPConnectionPool, self).__init__(
+ 'localhost', timeout=timeout
+ )
+ self.npipe_path = npipe_path
+ self.timeout = timeout
+
+ def _new_conn(self):
+ return NpipeHTTPConnection(
+ self.npipe_path, self.timeout
+ )
+
+
+class NpipeAdapter(requests.adapters.HTTPAdapter):
+ def __init__(self, base_url, timeout=60):
+ self.npipe_path = base_url.replace('npipe://', '')
+ self.timeout = timeout
+ self.pools = RecentlyUsedContainer(
+ 10, dispose_func=lambda p: p.close()
+ )
+ super(NpipeAdapter, self).__init__()
+
+ def get_connection(self, url, proxies=None):
+ with self.pools.lock:
+ pool = self.pools.get(url)
+ if pool:
+ return pool
+
+ pool = NpipeHTTPConnectionPool(
+ self.npipe_path, self.timeout
+ )
+ self.pools[url] = pool
+
+ return pool
+
+ def request_url(self, request, proxies):
+ # The select_proxy utility in requests errors out when the provided URL
+ # doesn't have a hostname, like is the case when using a UNIX socket.
+ # Since proxies are an irrelevant notion in the case of UNIX sockets
+ # anyway, we simply return the path URL directly.
+ # See also: https://github.com/docker/docker-py/issues/811
+ return request.path_url
+
+ def close(self):
+ self.pools.clear()
diff --git a/docker/transport/npipesocket.py b/docker/transport/npipesocket.py
new file mode 100644
index 00000000..35418ef1
--- /dev/null
+++ b/docker/transport/npipesocket.py
@@ -0,0 +1,191 @@
+import functools
+import io
+
+import win32file
+import win32pipe
+
+cSECURITY_SQOS_PRESENT = 0x100000
+cSECURITY_ANONYMOUS = 0
+cPIPE_READMODE_MESSAGE = 2
+
+
+def check_closed(f):
+ @functools.wraps(f)
+ def wrapped(self, *args, **kwargs):
+ if self._closed:
+ raise RuntimeError(
+ 'Can not reuse socket after connection was closed.'
+ )
+ return f(self, *args, **kwargs)
+ return wrapped
+
+
+class NpipeSocket(object):
+ """ Partial implementation of the socket API over windows named pipes.
+ This implementation is only designed to be used as a client socket,
+ and server-specific methods (bind, listen, accept...) are not
+ implemented.
+ """
+ def __init__(self, handle=None):
+ self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
+ self._handle = handle
+ self._closed = False
+
+ def accept(self):
+ raise NotImplementedError()
+
+ def bind(self, address):
+ raise NotImplementedError()
+
+ def close(self):
+ self._handle.Close()
+ self._closed = True
+
+ @check_closed
+ def connect(self, address):
+ win32pipe.WaitNamedPipe(address, self._timeout)
+ handle = win32file.CreateFile(
+ address,
+ win32file.GENERIC_READ | win32file.GENERIC_WRITE,
+ 0,
+ None,
+ win32file.OPEN_EXISTING,
+ cSECURITY_ANONYMOUS | cSECURITY_SQOS_PRESENT,
+ 0
+ )
+ self.flags = win32pipe.GetNamedPipeInfo(handle)[0]
+
+ self._handle = handle
+ self._address = address
+
+ @check_closed
+ def connect_ex(self, address):
+ return self.connect(address)
+
+ @check_closed
+ def detach(self):
+ self._closed = True
+ return self._handle
+
+ @check_closed
+ def dup(self):
+ return NpipeSocket(self._handle)
+
+ @check_closed
+ def fileno(self):
+ return int(self._handle)
+
+ def getpeername(self):
+ return self._address
+
+ def getsockname(self):
+ return self._address
+
+ def getsockopt(self, level, optname, buflen=None):
+ raise NotImplementedError()
+
+ def ioctl(self, control, option):
+ raise NotImplementedError()
+
+ def listen(self, backlog):
+ raise NotImplementedError()
+
+ def makefile(self, mode=None, bufsize=None):
+ if mode.strip('b') != 'r':
+ raise NotImplementedError()
+ rawio = NpipeFileIOBase(self)
+ if bufsize is None:
+ bufsize = io.DEFAULT_BUFFER_SIZE
+ return io.BufferedReader(rawio, buffer_size=bufsize)
+
+ @check_closed
+ def recv(self, bufsize, flags=0):
+ err, data = win32file.ReadFile(self._handle, bufsize)
+ return data
+
+ @check_closed
+ def recvfrom(self, bufsize, flags=0):
+ data = self.recv(bufsize, flags)
+ return (data, self._address)
+
+ @check_closed
+ def recvfrom_into(self, buf, nbytes=0, flags=0):
+ return self.recv_into(buf, nbytes, flags), self._address
+
+ @check_closed
+ def recv_into(self, buf, nbytes=0):
+ readbuf = buf
+ if not isinstance(buf, memoryview):
+ readbuf = memoryview(buf)
+
+ err, data = win32file.ReadFile(
+ self._handle,
+ readbuf[:nbytes] if nbytes else readbuf
+ )
+ return len(data)
+
+ @check_closed
+ def send(self, string, flags=0):
+ err, nbytes = win32file.WriteFile(self._handle, string)
+ return nbytes
+
+ @check_closed
+ def sendall(self, string, flags=0):
+ return self.send(string, flags)
+
+ @check_closed
+ def sendto(self, string, address):
+ self.connect(address)
+ return self.send(string)
+
+ def setblocking(self, flag):
+ if flag:
+ return self.settimeout(None)
+ return self.settimeout(0)
+
+ def settimeout(self, value):
+ if value is None:
+ self._timeout = win32pipe.NMPWAIT_NOWAIT
+ elif not isinstance(value, (float, int)) or value < 0:
+ raise ValueError('Timeout value out of range')
+ elif value == 0:
+ self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
+ else:
+ self._timeout = value
+
+ def gettimeout(self):
+ return self._timeout
+
+ def setsockopt(self, level, optname, value):
+ raise NotImplementedError()
+
+ @check_closed
+ def shutdown(self, how):
+ return self.close()
+
+
+class NpipeFileIOBase(io.RawIOBase):
+ def __init__(self, npipe_socket):
+ self.sock = npipe_socket
+
+ def close(self):
+ super(NpipeFileIOBase, self).close()
+ self.sock = None
+
+ def fileno(self):
+ return self.sock.fileno()
+
+ def isatty(self):
+ return False
+
+ def readable(self):
+ return True
+
+ def readinto(self, buf):
+ return self.sock.recv_into(buf)
+
+ def seekable(self):
+ return False
+
+ def writable(self):
+ return False
diff --git a/docker/unixconn/unixconn.py b/docker/transport/unixconn.py
similarity index 100%
rename from docker/unixconn/unixconn.py
rename to docker/transport/unixconn.py
diff --git a/docker/unixconn/__init__.py b/docker/unixconn/__init__.py
deleted file mode 100644
index 53711fc6..00000000
--- a/docker/unixconn/__init__.py
+++ /dev/null
@@ -1,1 +0,0 @@
-from .unixconn import UnixAdapter # flake8: noqa
diff --git a/docker/utils/utils.py b/docker/utils/utils.py
index caa98314..4a56829d 100644
--- a/docker/utils/utils.py
+++ b/docker/utils/utils.py
@@ -383,13 +383,13 @@ def parse_repository_tag(repo_name):
# fd:// protocol unsupported (for obvious reasons)
# Added support for http and https
# Protocol translation: tcp -> http, unix -> http+unix
-def parse_host(addr, platform=None, tls=False):
+def parse_host(addr, is_win32=False, tls=False):
proto = "http+unix"
host = DEFAULT_HTTP_HOST
port = None
path = ''
- if not addr and platform == 'win32':
+ if not addr and is_win32:
addr = '{0}:{1}'.format(DEFAULT_HTTP_HOST, 2375)
if not addr or addr.strip() == 'unix://':
@@ -413,6 +413,9 @@ def parse_host(addr, platform=None, tls=False):
elif addr.startswith('https://'):
proto = "https"
addr = addr[8:]
+ elif addr.startswith('npipe://'):
+ proto = 'npipe'
+ addr = addr[8:]
elif addr.startswith('fd://'):
raise errors.DockerException("fd protocol is not implemented")
else:
@@ -448,7 +451,7 @@ def parse_host(addr, platform=None, tls=False):
else:
host = addr
- if proto == "http+unix":
+ if proto == "http+unix" or proto == 'npipe':
return "{0}://{1}".format(proto, host)
return "{0}://{1}:{2}{3}".format(proto, host, port, path)
diff --git a/setup.py b/setup.py
index 85427110..ac58b1f9 100644
--- a/setup.py
+++ b/setup.py
@@ -1,7 +1,10 @@
#!/usr/bin/env python
import os
+import sys
+
from setuptools import setup
+
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
@@ -11,6 +14,9 @@ requirements = [
'websocket-client >= 0.32.0',
]
+if sys.platform == 'win32':
+ requirements.append('pypiwin32 >= 219')
+
extras_require = {
':python_version < "3.5"': 'backports.ssl_match_hostname >= 3.5',
':python_version < "3.3"': 'ipaddress >= 1.0.16',
@@ -29,7 +35,7 @@ setup(
description="Python client for Docker.",
url='https://github.com/docker/docker-py/',
packages=[
- 'docker', 'docker.api', 'docker.auth', 'docker.unixconn',
+ 'docker', 'docker.api', 'docker.auth', 'docker.transport',
'docker.utils', 'docker.utils.ports', 'docker.ssladapter'
],
install_requires=requirements,
diff --git a/win32-requirements.txt b/win32-requirements.txt
new file mode 100644
index 00000000..e77c3d90
--- /dev/null
+++ b/win32-requirements.txt
@@ -0,0 +1,2 @@
+-r requirements.txt
+pypiwin32==219
\ No newline at end of file
| Support windows named pipes for docker api
https://github.com/docker/compose/issues/3170
Apparently the pywin32 API provides some tools for working with this (http://docs.activestate.com/activepython/3.4/pywin32/win32pipe.html). Also available as `pypiwin32` on pypi. Examples: http://codereview.stackexchange.com/questions/88672/python-wrapper-for-windows-pipes
| docker/docker-py | diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py
index ef927d36..ae821fd3 100644
--- a/tests/unit/utils_test.py
+++ b/tests/unit/utils_test.py
@@ -388,6 +388,7 @@ class ParseHostTest(base.BaseTestCase):
'somehost.net:80/service/swarm': (
'http://somehost.net:80/service/swarm'
),
+ 'npipe:////./pipe/docker_engine': 'npipe:////./pipe/docker_engine',
}
for host in invalid_hosts:
@@ -402,10 +403,8 @@ class ParseHostTest(base.BaseTestCase):
tcp_port = 'http://127.0.0.1:2375'
for val in [None, '']:
- for platform in ['darwin', 'linux2', None]:
- assert parse_host(val, platform) == unix_socket
-
- assert parse_host(val, 'win32') == tcp_port
+ assert parse_host(val, is_win32=False) == unix_socket
+ assert parse_host(val, is_win32=True) == tcp_port
def test_parse_host_tls(self):
host_value = 'myhost.docker.net:3348'
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_added_files",
"has_removed_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 4
} | 1.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
-e git+https://github.com/docker/docker-py.git@88811a26593e8a87e8d820d8820736fea6d8d20a#egg=docker_py
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
requests==2.5.3
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
websocket-client==0.32.0
zipp==3.6.0
| name: docker-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- requests==2.5.3
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- websocket-client==0.32.0
- zipp==3.6.0
prefix: /opt/conda/envs/docker-py
| [
"tests/unit/utils_test.py::ParseHostTest::test_parse_host",
"tests/unit/utils_test.py::ParseHostTest::test_parse_host_empty_value"
]
| []
| [
"tests/unit/utils_test.py::HostConfigTest::test_create_endpoint_config_with_aliases",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_invalid_cpu_cfs_types",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_no_options",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_no_options_newer_api_version",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_with_cpu_period",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_with_cpu_quota",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_with_oom_kill_disable",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_with_oom_score_adj",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_with_shm_size",
"tests/unit/utils_test.py::HostConfigTest::test_create_host_config_with_shm_size_in_mb",
"tests/unit/utils_test.py::UlimitTest::test_create_host_config_dict_ulimit",
"tests/unit/utils_test.py::UlimitTest::test_create_host_config_dict_ulimit_capitals",
"tests/unit/utils_test.py::UlimitTest::test_create_host_config_obj_ulimit",
"tests/unit/utils_test.py::UlimitTest::test_ulimit_invalid_type",
"tests/unit/utils_test.py::LogConfigTest::test_create_host_config_dict_logconfig",
"tests/unit/utils_test.py::LogConfigTest::test_create_host_config_obj_logconfig",
"tests/unit/utils_test.py::LogConfigTest::test_logconfig_invalid_config_type",
"tests/unit/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_alternate_env",
"tests/unit/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_empty",
"tests/unit/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_no_cert_path",
"tests/unit/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_tls",
"tests/unit/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_tls_verify_false",
"tests/unit/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_tls_verify_false_no_cert",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_compact",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_complete",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_empty",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_list",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_no_mode",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_unicode_bytes_input",
"tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_unicode_unicode_input",
"tests/unit/utils_test.py::ParseEnvFileTest::test_parse_env_file_commented_line",
"tests/unit/utils_test.py::ParseEnvFileTest::test_parse_env_file_invalid_line",
"tests/unit/utils_test.py::ParseEnvFileTest::test_parse_env_file_proper",
"tests/unit/utils_test.py::ParseEnvFileTest::test_parse_env_file_with_equals_character",
"tests/unit/utils_test.py::ParseHostTest::test_parse_host_tls",
"tests/unit/utils_test.py::ParseHostTest::test_parse_host_tls_tcp_proto",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_image_no_tag",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_image_sha",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_image_tag",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_user_image_no_tag",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_user_image_tag",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_private_reg_image_no_tag",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_private_reg_image_sha",
"tests/unit/utils_test.py::ParseRepositoryTagTest::test_private_reg_image_tag",
"tests/unit/utils_test.py::ParseDeviceTest::test_dict",
"tests/unit/utils_test.py::ParseDeviceTest::test_full_string_definition",
"tests/unit/utils_test.py::ParseDeviceTest::test_hybrid_list",
"tests/unit/utils_test.py::ParseDeviceTest::test_partial_string_definition",
"tests/unit/utils_test.py::ParseDeviceTest::test_permissionless_string_definition",
"tests/unit/utils_test.py::ParseBytesTest::test_parse_bytes_float",
"tests/unit/utils_test.py::ParseBytesTest::test_parse_bytes_invalid",
"tests/unit/utils_test.py::ParseBytesTest::test_parse_bytes_maxint",
"tests/unit/utils_test.py::ParseBytesTest::test_parse_bytes_valid",
"tests/unit/utils_test.py::UtilsTest::test_convert_filters",
"tests/unit/utils_test.py::UtilsTest::test_create_ipam_config",
"tests/unit/utils_test.py::UtilsTest::test_decode_json_header",
"tests/unit/utils_test.py::SplitCommandTest::test_split_command_with_unicode",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_matching_internal_port_ranges",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_matching_internal_ports",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_nonmatching_internal_port_ranges",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_nonmatching_internal_ports",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_one_port",
"tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_port_range",
"tests/unit/utils_test.py::PortsTest::test_host_only_with_colon",
"tests/unit/utils_test.py::PortsTest::test_non_matching_length_port_ranges",
"tests/unit/utils_test.py::PortsTest::test_port_and_range_invalid",
"tests/unit/utils_test.py::PortsTest::test_port_only_with_colon",
"tests/unit/utils_test.py::PortsTest::test_split_port_invalid",
"tests/unit/utils_test.py::PortsTest::test_split_port_no_host_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_range_no_host_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_range_with_host_ip_no_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_range_with_host_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_range_with_protocol",
"tests/unit/utils_test.py::PortsTest::test_split_port_with_host_ip",
"tests/unit/utils_test.py::PortsTest::test_split_port_with_host_ip_no_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_with_host_port",
"tests/unit/utils_test.py::PortsTest::test_split_port_with_protocol",
"tests/unit/utils_test.py::ExcludePathsTest::test_directory",
"tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_single_exception",
"tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_subdir_exception",
"tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_trailing_slash",
"tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_wildcard_exception",
"tests/unit/utils_test.py::ExcludePathsTest::test_exclude_custom_dockerfile",
"tests/unit/utils_test.py::ExcludePathsTest::test_exclude_dockerfile_child",
"tests/unit/utils_test.py::ExcludePathsTest::test_exclude_dockerfile_dockerignore",
"tests/unit/utils_test.py::ExcludePathsTest::test_no_dupes",
"tests/unit/utils_test.py::ExcludePathsTest::test_no_excludes",
"tests/unit/utils_test.py::ExcludePathsTest::test_question_mark",
"tests/unit/utils_test.py::ExcludePathsTest::test_single_filename",
"tests/unit/utils_test.py::ExcludePathsTest::test_single_filename_leading_dot_slash",
"tests/unit/utils_test.py::ExcludePathsTest::test_single_filename_trailing_slash",
"tests/unit/utils_test.py::ExcludePathsTest::test_single_subdir_single_filename",
"tests/unit/utils_test.py::ExcludePathsTest::test_single_subdir_wildcard_filename",
"tests/unit/utils_test.py::ExcludePathsTest::test_single_subdir_with_path_traversal",
"tests/unit/utils_test.py::ExcludePathsTest::test_subdirectory",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_exclude",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_filename_end",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_filename_start",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_subdir_single_filename",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_subdir_wildcard_filename",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_with_exception",
"tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_with_wildcard_exception",
"tests/unit/utils_test.py::TarTest::test_tar_with_directory_symlinks",
"tests/unit/utils_test.py::TarTest::test_tar_with_empty_directory",
"tests/unit/utils_test.py::TarTest::test_tar_with_excludes",
"tests/unit/utils_test.py::TarTest::test_tar_with_file_symlinks"
]
| []
| Apache License 2.0 | 570 | [
"docker/constants.py",
"setup.py",
"win32-requirements.txt",
"docker/client.py",
"docker/unixconn/__init__.py",
"docker/utils/utils.py",
"docker/unixconn/unixconn.py",
"docker/transport/__init__.py",
"docker/transport/npipeconn.py",
"docker/transport/npipesocket.py"
]
| [
"docker/constants.py",
"setup.py",
"win32-requirements.txt",
"docker/client.py",
"docker/unixconn/__init__.py",
"docker/utils/utils.py",
"docker/transport/__init__.py",
"docker/transport/npipeconn.py",
"docker/transport/npipesocket.py",
"docker/transport/unixconn.py"
]
|
|
aio-libs__multidict-3 | ae23ed4adbfff861bd2621223a0e50bb0313cf32 | 2016-06-04 15:22:44 | ae23ed4adbfff861bd2621223a0e50bb0313cf32 | diff --git a/multidict/_multidict_py.py b/multidict/_multidict_py.py
index f9c8f38..bff8276 100644
--- a/multidict/_multidict_py.py
+++ b/multidict/_multidict_py.py
@@ -185,12 +185,13 @@ class MultiDict(_Base, abc.MutableMapping):
elif hasattr(arg, 'items'):
items = arg.items()
else:
+ items = []
for item in arg:
if not len(item) == 2:
raise TypeError(
"{} takes either dict or list of (key, value) "
"tuples".format(name))
- items = arg
+ items.append(item)
for key, value in items:
method(key, value)
| Python MultiDict constructor discards generator content
When using the pure Python implementation, if a `MultiDict` is constructed with a generator argument:
headers = CIMultiDict(
(
k.decode('utf-8', 'surrogateescape'),
v.decode('utf-8', 'surrogateescape'),
)
for k, v in event.headers
)
then the resulting `MultiDict` will be empty, instead of containing the key/value pairs as expected. This is because the generator is iterated over twice, and the first iteration discards all of the pairs. | aio-libs/multidict | diff --git a/tests/test_multidict.py b/tests/test_multidict.py
index 8fa6e78..268a26e 100644
--- a/tests/test_multidict.py
+++ b/tests/test_multidict.py
@@ -73,6 +73,15 @@ class _BaseTest(_Root):
self.assertEqual(sorted(d.items()), [('key', 'value1'),
('key2', 'value2')])
+ def test_instantiate__from_generator(self):
+ d = self.make_dict((str(i), i) for i in range(2))
+
+ self.assertEqual(d, {'0': 0, '1': 1})
+ self.assertEqual(len(d), 2)
+ self.assertEqual(sorted(d.keys()), ['0', '1'])
+ self.assertEqual(sorted(d.values()), [0, 1])
+ self.assertEqual(sorted(d.items()), [('0', 0), ('1', 1)])
+
def test_getone(self):
d = self.make_dict([('key', 'value1')], key='value2')
self.assertEqual(d.getone('key'), 'value1')
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 1
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"pip install -U -r requirements-dev.txt"
],
"python": "3.5",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
Cython==3.0.12
decorator==5.1.1
distlib==0.3.9
docutils==0.17.1
filelock==3.4.1
flake8==5.0.4
idna==3.10
imagesize==1.4.1
importlib-metadata==4.2.0
importlib-resources==5.4.0
iniconfig==1.1.1
ipdb==0.13.13
ipython==7.16.3
ipython-genutils==0.2.0
jedi==0.17.2
Jinja2==3.0.3
MarkupSafe==2.0.1
mccabe==0.7.0
-e git+https://github.com/aio-libs/multidict.git@ae23ed4adbfff861bd2621223a0e50bb0313cf32#egg=multidict
packaging==21.3
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
platformdirs==2.4.0
pluggy==1.0.0
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pycodestyle==2.9.1
pyenchant==3.2.2
pyflakes==2.5.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-sugar==0.9.6
pytz==2025.2
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==4.3.2
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-newsfeed==0.1.4
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
sphinxcontrib-spelling==7.7.0
termcolor==1.1.0
toml==0.10.2
tomli==1.2.3
tox==3.28.0
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.16.2
wcwidth==0.2.13
zipp==3.6.0
| name: multidict
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- charset-normalizer==2.0.12
- coverage==6.2
- cython==3.0.12
- decorator==5.1.1
- distlib==0.3.9
- docutils==0.17.1
- filelock==3.4.1
- flake8==5.0.4
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.2.0
- importlib-resources==5.4.0
- iniconfig==1.1.1
- ipdb==0.13.13
- ipython==7.16.3
- ipython-genutils==0.2.0
- jedi==0.17.2
- jinja2==3.0.3
- markupsafe==2.0.1
- mccabe==0.7.0
- packaging==21.3
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pip==21.3.1
- platformdirs==2.4.0
- pluggy==1.0.0
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pycodestyle==2.9.1
- pyenchant==3.2.2
- pyflakes==2.5.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-sugar==0.9.6
- pytz==2025.2
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==4.3.2
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-newsfeed==0.1.4
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sphinxcontrib-spelling==7.7.0
- termcolor==1.1.0
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.16.2
- wcwidth==0.2.13
- zipp==3.6.0
prefix: /opt/conda/envs/multidict
| [
"tests/test_multidict.py::TestPyMultiDictProxy::test_instantiate__from_generator",
"tests/test_multidict.py::PyMutableMultiDictTests::test_instantiate__from_generator"
]
| []
| [
"tests/test_multidict.py::TestPyMultiDictProxy::test__iter__",
"tests/test_multidict.py::TestPyMultiDictProxy::test__repr__",
"tests/test_multidict.py::TestPyMultiDictProxy::test_and",
"tests/test_multidict.py::TestPyMultiDictProxy::test_and_issue_410",
"tests/test_multidict.py::TestPyMultiDictProxy::test_cannot_create_from_unaccepted",
"tests/test_multidict.py::TestPyMultiDictProxy::test_copy",
"tests/test_multidict.py::TestPyMultiDictProxy::test_eq",
"tests/test_multidict.py::TestPyMultiDictProxy::test_exposed_names",
"tests/test_multidict.py::TestPyMultiDictProxy::test_get",
"tests/test_multidict.py::TestPyMultiDictProxy::test_getall",
"tests/test_multidict.py::TestPyMultiDictProxy::test_getone",
"tests/test_multidict.py::TestPyMultiDictProxy::test_instantiate__empty",
"tests/test_multidict.py::TestPyMultiDictProxy::test_instantiate__from_arg0",
"tests/test_multidict.py::TestPyMultiDictProxy::test_instantiate__from_arg0_dict",
"tests/test_multidict.py::TestPyMultiDictProxy::test_instantiate__with_kwargs",
"tests/test_multidict.py::TestPyMultiDictProxy::test_isdisjoint",
"tests/test_multidict.py::TestPyMultiDictProxy::test_isdisjoint2",
"tests/test_multidict.py::TestPyMultiDictProxy::test_items__contains",
"tests/test_multidict.py::TestPyMultiDictProxy::test_items__repr__",
"tests/test_multidict.py::TestPyMultiDictProxy::test_keys__contains",
"tests/test_multidict.py::TestPyMultiDictProxy::test_keys__repr__",
"tests/test_multidict.py::TestPyMultiDictProxy::test_keys_is_set_equal",
"tests/test_multidict.py::TestPyMultiDictProxy::test_keys_is_set_greater",
"tests/test_multidict.py::TestPyMultiDictProxy::test_keys_is_set_greater_equal",
"tests/test_multidict.py::TestPyMultiDictProxy::test_keys_is_set_less",
"tests/test_multidict.py::TestPyMultiDictProxy::test_keys_is_set_less_equal",
"tests/test_multidict.py::TestPyMultiDictProxy::test_keys_is_set_not_equal",
"tests/test_multidict.py::TestPyMultiDictProxy::test_ne",
"tests/test_multidict.py::TestPyMultiDictProxy::test_or",
"tests/test_multidict.py::TestPyMultiDictProxy::test_or_issue_410",
"tests/test_multidict.py::TestPyMultiDictProxy::test_preserve_stable_ordering",
"tests/test_multidict.py::TestPyMultiDictProxy::test_repr_issue_410",
"tests/test_multidict.py::TestPyMultiDictProxy::test_sub",
"tests/test_multidict.py::TestPyMultiDictProxy::test_sub_issue_410",
"tests/test_multidict.py::TestPyMultiDictProxy::test_values__contains",
"tests/test_multidict.py::TestPyMultiDictProxy::test_values__repr__",
"tests/test_multidict.py::TestPyMultiDictProxy::test_xor",
"tests/test_multidict.py::TestPyMultiDictProxy::test_xor_issue_410",
"tests/test_multidict.py::TestPyCIMultiDictProxy::test_basics",
"tests/test_multidict.py::TestPyCIMultiDictProxy::test_copy",
"tests/test_multidict.py::TestPyCIMultiDictProxy::test_exposed_names",
"tests/test_multidict.py::TestPyCIMultiDictProxy::test_get",
"tests/test_multidict.py::TestPyCIMultiDictProxy::test_getall",
"tests/test_multidict.py::TestPyCIMultiDictProxy::test_items__repr__",
"tests/test_multidict.py::TestPyCIMultiDictProxy::test_keys__repr__",
"tests/test_multidict.py::TestPyCIMultiDictProxy::test_values__repr__",
"tests/test_multidict.py::PyMutableMultiDictTests::test__iter__",
"tests/test_multidict.py::PyMutableMultiDictTests::test__repr__",
"tests/test_multidict.py::PyMutableMultiDictTests::test_add",
"tests/test_multidict.py::PyMutableMultiDictTests::test_and",
"tests/test_multidict.py::PyMutableMultiDictTests::test_and_issue_410",
"tests/test_multidict.py::PyMutableMultiDictTests::test_cannot_create_from_unaccepted",
"tests/test_multidict.py::PyMutableMultiDictTests::test_clear",
"tests/test_multidict.py::PyMutableMultiDictTests::test_copy",
"tests/test_multidict.py::PyMutableMultiDictTests::test_del",
"tests/test_multidict.py::PyMutableMultiDictTests::test_eq",
"tests/test_multidict.py::PyMutableMultiDictTests::test_exposed_names",
"tests/test_multidict.py::PyMutableMultiDictTests::test_extend",
"tests/test_multidict.py::PyMutableMultiDictTests::test_extend_from_proxy",
"tests/test_multidict.py::PyMutableMultiDictTests::test_getall",
"tests/test_multidict.py::PyMutableMultiDictTests::test_getone",
"tests/test_multidict.py::PyMutableMultiDictTests::test_instantiate__empty",
"tests/test_multidict.py::PyMutableMultiDictTests::test_instantiate__from_arg0",
"tests/test_multidict.py::PyMutableMultiDictTests::test_instantiate__from_arg0_dict",
"tests/test_multidict.py::PyMutableMultiDictTests::test_instantiate__with_kwargs",
"tests/test_multidict.py::PyMutableMultiDictTests::test_isdisjoint",
"tests/test_multidict.py::PyMutableMultiDictTests::test_isdisjoint2",
"tests/test_multidict.py::PyMutableMultiDictTests::test_items__contains",
"tests/test_multidict.py::PyMutableMultiDictTests::test_keys__contains",
"tests/test_multidict.py::PyMutableMultiDictTests::test_keys_is_set_equal",
"tests/test_multidict.py::PyMutableMultiDictTests::test_keys_is_set_greater",
"tests/test_multidict.py::PyMutableMultiDictTests::test_keys_is_set_greater_equal",
"tests/test_multidict.py::PyMutableMultiDictTests::test_keys_is_set_less",
"tests/test_multidict.py::PyMutableMultiDictTests::test_keys_is_set_less_equal",
"tests/test_multidict.py::PyMutableMultiDictTests::test_keys_is_set_not_equal",
"tests/test_multidict.py::PyMutableMultiDictTests::test_ne",
"tests/test_multidict.py::PyMutableMultiDictTests::test_or",
"tests/test_multidict.py::PyMutableMultiDictTests::test_or_issue_410",
"tests/test_multidict.py::PyMutableMultiDictTests::test_pop",
"tests/test_multidict.py::PyMutableMultiDictTests::test_pop_default",
"tests/test_multidict.py::PyMutableMultiDictTests::test_pop_raises",
"tests/test_multidict.py::PyMutableMultiDictTests::test_popitem",
"tests/test_multidict.py::PyMutableMultiDictTests::test_popitem_empty_multidict",
"tests/test_multidict.py::PyMutableMultiDictTests::test_repr_issue_410",
"tests/test_multidict.py::PyMutableMultiDictTests::test_set_default",
"tests/test_multidict.py::PyMutableMultiDictTests::test_sub",
"tests/test_multidict.py::PyMutableMultiDictTests::test_sub_issue_410",
"tests/test_multidict.py::PyMutableMultiDictTests::test_update",
"tests/test_multidict.py::PyMutableMultiDictTests::test_values__contains",
"tests/test_multidict.py::PyMutableMultiDictTests::test_xor",
"tests/test_multidict.py::PyMutableMultiDictTests::test_xor_issue_410",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test__repr__",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_add",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_basics",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_clear",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_copy",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_ctor",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_del",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_delitem",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_exposed_names",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_extend",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_extend_from_proxy",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_extend_with_upstr",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_get",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_getall",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_items__repr__",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_keys__repr__",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_pop",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_pop_default",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_pop_raises",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_popitem",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_popitem_empty_multidict",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_set_default",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_setitem",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_update",
"tests/test_multidict.py::PyCIMutableMultiDictTests::test_values__repr__",
"tests/test_multidict.py::TestMultiDictProxy::test__iter__",
"tests/test_multidict.py::TestMultiDictProxy::test__repr__",
"tests/test_multidict.py::TestMultiDictProxy::test_and",
"tests/test_multidict.py::TestMultiDictProxy::test_and_issue_410",
"tests/test_multidict.py::TestMultiDictProxy::test_cannot_create_from_unaccepted",
"tests/test_multidict.py::TestMultiDictProxy::test_copy",
"tests/test_multidict.py::TestMultiDictProxy::test_eq",
"tests/test_multidict.py::TestMultiDictProxy::test_exposed_names",
"tests/test_multidict.py::TestMultiDictProxy::test_get",
"tests/test_multidict.py::TestMultiDictProxy::test_getall",
"tests/test_multidict.py::TestMultiDictProxy::test_getone",
"tests/test_multidict.py::TestMultiDictProxy::test_instantiate__empty",
"tests/test_multidict.py::TestMultiDictProxy::test_instantiate__from_arg0",
"tests/test_multidict.py::TestMultiDictProxy::test_instantiate__from_arg0_dict",
"tests/test_multidict.py::TestMultiDictProxy::test_instantiate__from_generator",
"tests/test_multidict.py::TestMultiDictProxy::test_instantiate__with_kwargs",
"tests/test_multidict.py::TestMultiDictProxy::test_isdisjoint",
"tests/test_multidict.py::TestMultiDictProxy::test_isdisjoint2",
"tests/test_multidict.py::TestMultiDictProxy::test_items__contains",
"tests/test_multidict.py::TestMultiDictProxy::test_items__repr__",
"tests/test_multidict.py::TestMultiDictProxy::test_keys__contains",
"tests/test_multidict.py::TestMultiDictProxy::test_keys__repr__",
"tests/test_multidict.py::TestMultiDictProxy::test_keys_is_set_equal",
"tests/test_multidict.py::TestMultiDictProxy::test_keys_is_set_greater",
"tests/test_multidict.py::TestMultiDictProxy::test_keys_is_set_greater_equal",
"tests/test_multidict.py::TestMultiDictProxy::test_keys_is_set_less",
"tests/test_multidict.py::TestMultiDictProxy::test_keys_is_set_less_equal",
"tests/test_multidict.py::TestMultiDictProxy::test_keys_is_set_not_equal",
"tests/test_multidict.py::TestMultiDictProxy::test_ne",
"tests/test_multidict.py::TestMultiDictProxy::test_or",
"tests/test_multidict.py::TestMultiDictProxy::test_or_issue_410",
"tests/test_multidict.py::TestMultiDictProxy::test_preserve_stable_ordering",
"tests/test_multidict.py::TestMultiDictProxy::test_repr_issue_410",
"tests/test_multidict.py::TestMultiDictProxy::test_sub",
"tests/test_multidict.py::TestMultiDictProxy::test_sub_issue_410",
"tests/test_multidict.py::TestMultiDictProxy::test_values__contains",
"tests/test_multidict.py::TestMultiDictProxy::test_values__repr__",
"tests/test_multidict.py::TestMultiDictProxy::test_xor",
"tests/test_multidict.py::TestMultiDictProxy::test_xor_issue_410",
"tests/test_multidict.py::TestCIMultiDictProxy::test_basics",
"tests/test_multidict.py::TestCIMultiDictProxy::test_copy",
"tests/test_multidict.py::TestCIMultiDictProxy::test_exposed_names",
"tests/test_multidict.py::TestCIMultiDictProxy::test_get",
"tests/test_multidict.py::TestCIMultiDictProxy::test_getall",
"tests/test_multidict.py::TestCIMultiDictProxy::test_items__repr__",
"tests/test_multidict.py::TestCIMultiDictProxy::test_keys__repr__",
"tests/test_multidict.py::TestCIMultiDictProxy::test_values__repr__",
"tests/test_multidict.py::MutableMultiDictTests::test__iter__",
"tests/test_multidict.py::MutableMultiDictTests::test__repr__",
"tests/test_multidict.py::MutableMultiDictTests::test_add",
"tests/test_multidict.py::MutableMultiDictTests::test_and",
"tests/test_multidict.py::MutableMultiDictTests::test_and_issue_410",
"tests/test_multidict.py::MutableMultiDictTests::test_cannot_create_from_unaccepted",
"tests/test_multidict.py::MutableMultiDictTests::test_clear",
"tests/test_multidict.py::MutableMultiDictTests::test_copy",
"tests/test_multidict.py::MutableMultiDictTests::test_del",
"tests/test_multidict.py::MutableMultiDictTests::test_eq",
"tests/test_multidict.py::MutableMultiDictTests::test_exposed_names",
"tests/test_multidict.py::MutableMultiDictTests::test_extend",
"tests/test_multidict.py::MutableMultiDictTests::test_extend_from_proxy",
"tests/test_multidict.py::MutableMultiDictTests::test_getall",
"tests/test_multidict.py::MutableMultiDictTests::test_getone",
"tests/test_multidict.py::MutableMultiDictTests::test_instantiate__empty",
"tests/test_multidict.py::MutableMultiDictTests::test_instantiate__from_arg0",
"tests/test_multidict.py::MutableMultiDictTests::test_instantiate__from_arg0_dict",
"tests/test_multidict.py::MutableMultiDictTests::test_instantiate__from_generator",
"tests/test_multidict.py::MutableMultiDictTests::test_instantiate__with_kwargs",
"tests/test_multidict.py::MutableMultiDictTests::test_isdisjoint",
"tests/test_multidict.py::MutableMultiDictTests::test_isdisjoint2",
"tests/test_multidict.py::MutableMultiDictTests::test_items__contains",
"tests/test_multidict.py::MutableMultiDictTests::test_keys__contains",
"tests/test_multidict.py::MutableMultiDictTests::test_keys_is_set_equal",
"tests/test_multidict.py::MutableMultiDictTests::test_keys_is_set_greater",
"tests/test_multidict.py::MutableMultiDictTests::test_keys_is_set_greater_equal",
"tests/test_multidict.py::MutableMultiDictTests::test_keys_is_set_less",
"tests/test_multidict.py::MutableMultiDictTests::test_keys_is_set_less_equal",
"tests/test_multidict.py::MutableMultiDictTests::test_keys_is_set_not_equal",
"tests/test_multidict.py::MutableMultiDictTests::test_ne",
"tests/test_multidict.py::MutableMultiDictTests::test_or",
"tests/test_multidict.py::MutableMultiDictTests::test_or_issue_410",
"tests/test_multidict.py::MutableMultiDictTests::test_pop",
"tests/test_multidict.py::MutableMultiDictTests::test_pop_default",
"tests/test_multidict.py::MutableMultiDictTests::test_pop_raises",
"tests/test_multidict.py::MutableMultiDictTests::test_popitem",
"tests/test_multidict.py::MutableMultiDictTests::test_popitem_empty_multidict",
"tests/test_multidict.py::MutableMultiDictTests::test_repr_issue_410",
"tests/test_multidict.py::MutableMultiDictTests::test_set_default",
"tests/test_multidict.py::MutableMultiDictTests::test_sub",
"tests/test_multidict.py::MutableMultiDictTests::test_sub_issue_410",
"tests/test_multidict.py::MutableMultiDictTests::test_update",
"tests/test_multidict.py::MutableMultiDictTests::test_values__contains",
"tests/test_multidict.py::MutableMultiDictTests::test_xor",
"tests/test_multidict.py::MutableMultiDictTests::test_xor_issue_410",
"tests/test_multidict.py::CIMutableMultiDictTests::test__repr__",
"tests/test_multidict.py::CIMutableMultiDictTests::test_add",
"tests/test_multidict.py::CIMutableMultiDictTests::test_basics",
"tests/test_multidict.py::CIMutableMultiDictTests::test_clear",
"tests/test_multidict.py::CIMutableMultiDictTests::test_copy",
"tests/test_multidict.py::CIMutableMultiDictTests::test_ctor",
"tests/test_multidict.py::CIMutableMultiDictTests::test_del",
"tests/test_multidict.py::CIMutableMultiDictTests::test_delitem",
"tests/test_multidict.py::CIMutableMultiDictTests::test_exposed_names",
"tests/test_multidict.py::CIMutableMultiDictTests::test_extend",
"tests/test_multidict.py::CIMutableMultiDictTests::test_extend_from_proxy",
"tests/test_multidict.py::CIMutableMultiDictTests::test_extend_with_upstr",
"tests/test_multidict.py::CIMutableMultiDictTests::test_get",
"tests/test_multidict.py::CIMutableMultiDictTests::test_getall",
"tests/test_multidict.py::CIMutableMultiDictTests::test_items__repr__",
"tests/test_multidict.py::CIMutableMultiDictTests::test_keys__repr__",
"tests/test_multidict.py::CIMutableMultiDictTests::test_pop",
"tests/test_multidict.py::CIMutableMultiDictTests::test_pop_default",
"tests/test_multidict.py::CIMutableMultiDictTests::test_pop_raises",
"tests/test_multidict.py::CIMutableMultiDictTests::test_popitem",
"tests/test_multidict.py::CIMutableMultiDictTests::test_popitem_empty_multidict",
"tests/test_multidict.py::CIMutableMultiDictTests::test_set_default",
"tests/test_multidict.py::CIMutableMultiDictTests::test_setitem",
"tests/test_multidict.py::CIMutableMultiDictTests::test_update",
"tests/test_multidict.py::CIMutableMultiDictTests::test_values__repr__",
"tests/test_multidict.py::TestPyUpStr::test_ctor",
"tests/test_multidict.py::TestPyUpStr::test_ctor_buffer",
"tests/test_multidict.py::TestPyUpStr::test_ctor_repr",
"tests/test_multidict.py::TestPyUpStr::test_ctor_str",
"tests/test_multidict.py::TestPyUpStr::test_ctor_str_uppercase",
"tests/test_multidict.py::TestPyUpStr::test_upper",
"tests/test_multidict.py::TestUpStr::test_ctor",
"tests/test_multidict.py::TestUpStr::test_ctor_buffer",
"tests/test_multidict.py::TestUpStr::test_ctor_repr",
"tests/test_multidict.py::TestUpStr::test_ctor_str",
"tests/test_multidict.py::TestUpStr::test_ctor_str_uppercase",
"tests/test_multidict.py::TestUpStr::test_upper",
"tests/test_multidict.py::TestPyTypes::test_create_ci_multidict_proxy_from_multidict",
"tests/test_multidict.py::TestPyTypes::test_create_cimultidict_proxy_from_nonmultidict",
"tests/test_multidict.py::TestPyTypes::test_create_multidict_proxy_from_cimultidict",
"tests/test_multidict.py::TestPyTypes::test_create_multidict_proxy_from_nonmultidict",
"tests/test_multidict.py::TestPyTypes::test_dict_not_inherited_from_proxy",
"tests/test_multidict.py::TestPyTypes::test_dicts",
"tests/test_multidict.py::TestPyTypes::test_proxies",
"tests/test_multidict.py::TestPyTypes::test_proxy_not_inherited_from_dict",
"tests/test_multidict.py::TestTypes::test_create_ci_multidict_proxy_from_multidict",
"tests/test_multidict.py::TestTypes::test_create_cimultidict_proxy_from_nonmultidict",
"tests/test_multidict.py::TestTypes::test_create_multidict_proxy_from_cimultidict",
"tests/test_multidict.py::TestTypes::test_create_multidict_proxy_from_nonmultidict",
"tests/test_multidict.py::TestTypes::test_dict_not_inherited_from_proxy",
"tests/test_multidict.py::TestTypes::test_dicts",
"tests/test_multidict.py::TestTypes::test_proxies",
"tests/test_multidict.py::TestTypes::test_proxy_not_inherited_from_dict"
]
| []
| Apache License 2.0 | 571 | [
"multidict/_multidict_py.py"
]
| [
"multidict/_multidict_py.py"
]
|
|
cherrypy__cherrypy-1442 | adb7ff5aa1f48506e2838a22176c43c6f3aa4fb5 | 2016-06-05 23:10:54 | adb7ff5aa1f48506e2838a22176c43c6f3aa4fb5 | jaraco: This idea is a very cool one. And the implementation is so short and concise, signs of an elegant implementation. I have a couple of comments to make about the specific implementation, but I don't see why this change couldn't be rolled into an upcoming release in short order. | diff --git a/CHANGES.txt b/CHANGES.txt
index 20877f63..b06461ad 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -4,6 +4,7 @@
* Issue #1411: Fix issue where autoreload fails when
the host interpreter for CherryPy was launched using
``python -m``.
+* #1441: Added tool to automatically convert request params.
6.1.0
-----
diff --git a/cherrypy/_cptools.py b/cherrypy/_cptools.py
index 54c63734..060ca76f 100644
--- a/cherrypy/_cptools.py
+++ b/cherrypy/_cptools.py
@@ -533,5 +533,6 @@ _d.json_in = Tool('before_request_body', jsontools.json_in, priority=30)
_d.json_out = Tool('before_handler', jsontools.json_out, priority=30)
_d.auth_basic = Tool('before_handler', auth_basic.basic_auth, priority=1)
_d.auth_digest = Tool('before_handler', auth_digest.digest_auth, priority=1)
+_d.params = Tool('before_handler', cptools.convert_params)
del _d, cptools, encoding, auth, static
diff --git a/cherrypy/lib/cptools.py b/cherrypy/lib/cptools.py
index fbed0e23..073216e0 100644
--- a/cherrypy/lib/cptools.py
+++ b/cherrypy/lib/cptools.py
@@ -630,3 +630,21 @@ def autovary(ignore=None, debug=False):
v.sort()
resp_h['Vary'] = ', '.join(v)
request.hooks.attach('before_finalize', set_response_header, 95)
+
+
+def convert_params(exception=ValueError, error=400):
+ """Convert request params based on function annotations, with error handling.
+
+ exception
+ Exception class to catch.
+
+ status
+ The HTTP error code to return to the client on failure.
+ """
+ request = cherrypy.serving.request
+ types = request.handler.callable.__annotations__
+ try:
+ for key in set(types).intersection(request.params):
+ request.params[key] = types[key](request.params[key])
+ except exception as exc:
+ raise cherrypy.HTTPError(error, str(exc))
| Convert request params based on function annotations.
With the increasing focus on [type hints](https://docs.python.org/3.5/library/typing.html) in Python, having a tool which automatically converted query params (with exception handling) would be a nice feature.
It's common for apps to have boilerplate code for [converting request params](http://stackoverflow.com/questions/32774024/), and returning 400-levels errors on failure.
```
@cherrypy.tools.params(exception=ValueError)
def resource(self, limit: int):
assert isinstance(limit, int) # a ValueError would have already raised a 400
```
| cherrypy/cherrypy | diff --git a/cherrypy/test/test_params.py b/cherrypy/test/test_params.py
new file mode 100644
index 00000000..2d57c279
--- /dev/null
+++ b/cherrypy/test/test_params.py
@@ -0,0 +1,55 @@
+import sys
+import cherrypy
+from cherrypy.test import helper
+
+
+class ParamsTest(helper.CPWebCase):
+ @staticmethod
+ def setup_server():
+ class Root:
+ @cherrypy.expose
+ @cherrypy.tools.params()
+ def resource(self, limit=None, sort=None):
+ return type(limit).__name__
+ # for testing on Py 2
+ resource.__annotations__ = {'limit': int}
+ conf = {'/': {'tools.params.on': True}}
+ cherrypy.tree.mount(Root(), config=conf)
+
+ def test_pass(self):
+ self.getPage('/resource')
+ self.assertStatus(200)
+ self.assertBody('NoneType')
+
+ self.getPage('/resource?limit=0')
+ self.assertStatus(200)
+ self.assertBody('int')
+
+ def test_error(self):
+ self.getPage('/resource?limit=')
+ self.assertStatus(400)
+ self.assertInBody('invalid literal for int')
+
+ cherrypy.config['tools.params.error'] = 422
+ self.getPage('/resource?limit=')
+ self.assertStatus(422)
+ self.assertInBody('invalid literal for int')
+
+ cherrypy.config['tools.params.exception'] = TypeError
+ self.getPage('/resource?limit=')
+ self.assertStatus(500)
+
+ def test_syntax(self):
+ if sys.version_info < (3,):
+ return self.skip("skipped (Python 3 only)")
+ exec("""class Root:
+ @cherrypy.expose
+ @cherrypy.tools.params()
+ def resource(self, limit: int):
+ return type(limit).__name__
+conf = {'/': {'tools.params.on': True}}
+cherrypy.tree.mount(Root(), config=conf)""")
+
+ self.getPage('/resource?limit=0')
+ self.assertStatus(200)
+ self.assertBody('int')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 3
} | 6.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"mock",
"pytest"
],
"pre_install": null,
"python": "3.5",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
-e git+https://github.com/cherrypy/cherrypy.git@adb7ff5aa1f48506e2838a22176c43c6f3aa4fb5#egg=CherryPy
importlib-metadata==4.8.3
iniconfig==1.1.1
mock==5.2.0
nose==1.3.7
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: cherrypy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- mock==5.2.0
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/cherrypy
| [
"cherrypy/test/test_params.py::ParamsTest::test_error",
"cherrypy/test/test_params.py::ParamsTest::test_pass",
"cherrypy/test/test_params.py::ParamsTest::test_syntax",
"cherrypy/test/test_params.py::ParamsTest::test_gc"
]
| []
| []
| []
| BSD 3-Clause "New" or "Revised" License | 572 | [
"CHANGES.txt",
"cherrypy/lib/cptools.py",
"cherrypy/_cptools.py"
]
| [
"CHANGES.txt",
"cherrypy/lib/cptools.py",
"cherrypy/_cptools.py"
]
|
kytos__python-openflow-50 | 9b9c0c3e86c73aaebdcb57bda00feeefbdbcfe09 | 2016-06-06 16:10:00 | 9b9c0c3e86c73aaebdcb57bda00feeefbdbcfe09 | diff --git a/pyof/v0x01/common/header.py b/pyof/v0x01/common/header.py
index 7cb2684..2521ebe 100644
--- a/pyof/v0x01/common/header.py
+++ b/pyof/v0x01/common/header.py
@@ -67,7 +67,7 @@ class Header(base.GenericStruct):
:param length: Length of the message, including the header itself
"""
version = basic_types.UBInt8()
- message_type = basic_types.UBInt8()
+ message_type = basic_types.UBInt8(enum_ref=Type)
length = basic_types.UBInt16()
xid = basic_types.UBInt32()
diff --git a/pyof/v0x01/common/phy_port.py b/pyof/v0x01/common/phy_port.py
index df242a7..bdcc6dc 100644
--- a/pyof/v0x01/common/phy_port.py
+++ b/pyof/v0x01/common/phy_port.py
@@ -3,6 +3,8 @@
# System imports
import enum
+from collections import OrderedDict as _OD
+
# Third-party imports
# Local source tree imports
@@ -11,30 +13,39 @@ from pyof.v0x01.foundation import basic_types
# Enums
-
-class PortConfig(enum.Enum):
- """Flags to indicate behavior of the physical port.
-
- These flags are used in OFPPhyPort to describe the current configuration.
- They are used in the OFPPortMod message to configure the port's behavior.
-
- Enums:
- OFPPC_PORT_DOWN # Port is administratively down.
- OFPPC_NO_STP # Disable 802.1D spanning tree on port.
- OFPPC_NO_RECV # Drop all packets except 802.1D spanning tree.
- OFPPC_NO_RECV_STP # Drop received 802.1D STP packets.
- OFPPC_NO_FLOOD # Do not include this port when flooding.
- OFPPC_NO_FWD # Drop packets forwarded to port.
- OFPPC_NO_PACKET_IN # Do not send packet-in msgs for port.
- """
-
- OFPPC_PORT_DOWN = 1 << 0
- OFPPC_NO_STP = 1 << 1
- OFPPC_NO_RECV = 1 << 2
- OFPPC_NO_RECV_STP = 1 << 3
- OFPPC_FLOOD = 1 << 4
- OFPPC_NO_FWD = 1 << 5
- OFPPC_NO_PACKET_IN = 1 << 6
+class PortConfig(base.GenericBitMask):
+ _enum = _OD(OFPC_PORT_DOWN = 1 << 0,
+ OFPPC_NO_STP = 1 << 1,
+ OFPPC_NO_RECV = 1 << 2,
+ OFPPC_NO_RECV_STP = 1 << 3,
+ OFPPC_FLOOD = 1 << 4,
+ OFPPC_NO_FWD = 1 << 5,
+ OFPPC_NO_PACKET_IN = 1 << 6)
+
+
+#class PortConfig(enum.Enum):
+# """Flags to indicate behavior of the physical port.
+#
+# These flags are used in OFPPhyPort to describe the current configuration.
+# They are used in the OFPPortMod message to configure the port's behavior.
+#
+# Enums:
+# OFPPC_PORT_DOWN # Port is administratively down.
+# OFPPC_NO_STP # Disable 802.1D spanning tree on port.
+# OFPPC_NO_RECV # Drop all packets except 802.1D spanning tree.
+# OFPPC_NO_RECV_STP # Drop received 802.1D STP packets.
+# OFPPC_NO_FLOOD # Do not include this port when flooding.
+# OFPPC_NO_FWD # Drop packets forwarded to port.
+# OFPPC_NO_PACKET_IN # Do not send packet-in msgs for port.
+# """
+#
+# OFPPC_PORT_DOWN = 1 << 0
+# OFPPC_NO_STP = 1 << 1
+# OFPPC_NO_RECV = 1 << 2
+# OFPPC_NO_RECV_STP = 1 << 3
+# OFPPC_FLOOD = 1 << 4
+# OFPPC_NO_FWD = 1 << 5
+# OFPPC_NO_PACKET_IN = 1 << 6
class PortState(enum.Enum):
@@ -130,7 +141,7 @@ class PortFeatures(enum.Enum):
# Classes
-class PhyPort(base.GenericMessage):
+class PhyPort(base.GenericStruct):
"""
Description of a physical port.
@@ -157,7 +168,7 @@ class PhyPort(base.GenericMessage):
port_no = basic_types.UBInt16()
hw_addr = basic_types.HWAddress()
name = basic_types.Char(length=base.OFP_MAX_PORT_NAME_LEN)
- config = basic_types.UBInt32()
+ config = basic_types.UBInt32(enum_ref=PortConfig)
state = basic_types.UBInt32()
curr = basic_types.UBInt32()
advertised = basic_types.UBInt32()
diff --git a/pyof/v0x01/controller2switch/flow_mod.py b/pyof/v0x01/controller2switch/flow_mod.py
index d300e80..f8467c5 100644
--- a/pyof/v0x01/controller2switch/flow_mod.py
+++ b/pyof/v0x01/controller2switch/flow_mod.py
@@ -2,12 +2,13 @@
# System imports
import enum
-
+from collections import OrderedDict as _OD
# Third-party imports
# Local source tree imports
from pyof.v0x01.common import flow_match
from pyof.v0x01.common import header as of_header
+from pyof.v0x01.common import phy_port
from pyof.v0x01.controller2switch import common
from pyof.v0x01.foundation import base
from pyof.v0x01.foundation import basic_types
@@ -27,23 +28,23 @@ class FlowModCommand(enum.Enum):
OFPFC_DELETE_STRICT # Strictly match wildcards and priority
"""
- OFPFC_ADD = 1
- OFPFC_MODIFY = 2
- OFPFC_MODIFY_STRICT = 3
- OFPFC_DELETE = 4
- OFPFC_DELETE_STRICT = 5
+ OFPFC_ADD = 0
+ OFPFC_MODIFY = 1
+ OFPFC_MODIFY_STRICT = 2
+ OFPFC_DELETE = 3
+ OFPFC_DELETE_STRICT = 4
-class FlowModFlags(enum.Enum):
+class FlowModFlags(base.GenericBitMask):
"""Types to be used in Flags field"""
-
- #: Send flow removed message when flow expires or is deleted
- OFPFF_SEND_FLOW_REM = 1 << 0
- #: Check for overlapping entries first
- OFPFF_CHECK_OVERLAP = 1 << 1
- #: Remark this is for emergency
- OFPFF_EMERG = 1 << 2
-
+ _enum = _OD(
+ #: Send flow removed message when flow expires or is deleted
+ OFPFF_SEND_FLOW_REM = 1 << 0,
+ #: Check for overlapping entries first
+ OFPFF_CHECK_OVERLAP = 1 << 1,
+ #: Remark this is for emergency
+ OFPFF_EMERG = 1 << 2
+ )
# Classes
@@ -71,13 +72,13 @@ class FlowMod(base.GenericMessage):
header = of_header.Header()
match = flow_match.Match()
cookie = basic_types.UBInt64()
- command = basic_types.UBInt16()
+ command = basic_types.UBInt16(enum_ref=FlowModCommand)
idle_timeout = basic_types.UBInt16()
hard_timeout = basic_types.UBInt16()
priority = basic_types.UBInt16()
buffer_id = basic_types.UBInt32()
- out_port = basic_types.UBInt16()
- flags = basic_types.UBInt16()
+ out_port = basic_types.UBInt16(enum_ref=phy_port.Port)
+ flags = basic_types.UBInt16(enum_ref=FlowModFlags)
actions = common.ListOfActions()
def __init__(self, xid=None, match=None, cookie=None, command=None,
diff --git a/pyof/v0x01/foundation/base.py b/pyof/v0x01/foundation/base.py
index 3c4ac01..b1b9f65 100644
--- a/pyof/v0x01/foundation/base.py
+++ b/pyof/v0x01/foundation/base.py
@@ -59,14 +59,15 @@ class GenericType(object):
Attributes like `UBInt8`, `UBInt16`, `HWAddress` amoung others uses this
class as base.
"""
- def __init__(self, val=None):
- self._value = val
+ def __init__(self, value=None, enum_ref=None):
+ self._value = value
+ self._enum_ref = enum_ref
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self._value)
def __str__(self):
- return '{}: {}'.format(self.__class__.__name__, str(self._value))
+ return '<{}: {}>'.format(self.__class__.__name__, str(self._value))
def __eq__(self, other):
return self._value == other
@@ -88,11 +89,15 @@ class GenericType(object):
def pack(self):
"""Pack the valeu as a binary representation."""
- if type(self._value.__class__) is enum.EnumMeta:
- # Gets the respective value from the Enum
- value = self._value.value
+ if self.is_enum():
+ if issubclass(type(self._value), GenericBitMask):
+ value = self._value.bitmask
+ else:
+ # Gets the respective value from the Enum
+ value = self._value.value
else:
value = self._value
+
try:
return struct.pack(self._fmt, value)
except struct.error as err:
@@ -116,9 +121,11 @@ class GenericType(object):
# the enum name/reference ?
try:
self._value = struct.unpack_from(self._fmt, buff, offset)[0]
+ if self._enum_ref:
+ self._value = self._enum_ref(self._value)
except struct.error:
- raise exceptions.Exception("Error while unpacking"
- "data from buffer")
+ raise exceptions.UnpackException("Error while unpacking"
+ "data from buffer")
def get_size(self):
"""Return the size in bytes of this attribute. """
@@ -136,8 +143,13 @@ class GenericType(object):
raise
def value(self):
- #TODO: Review this value._value.value (For enums)
- return self._value
+ if isinstance(self._value, enum.Enum):
+ return self._value.value
+ else:
+ return self._value
+
+ def is_enum(self):
+ return self._enum_ref is not None
class MetaStruct(type):
@@ -155,7 +167,7 @@ class MetaStruct(type):
return type.__new__(self, name, bases, classdict)
-class GenericStruct(object):
+class GenericStruct(object, metaclass=MetaStruct):
"""Class that will be used by all OpenFlow structs.
So, if you need insert a method that will be used for all Structs, here is
@@ -165,7 +177,6 @@ class GenericStruct(object):
has a list of attributes and theses attributes can be of struct
type too.
"""
- __metaclass__ = MetaStruct
def __init__(self, *args, **kwargs):
for _attr in self.__ordered__:
@@ -199,8 +210,7 @@ class GenericStruct(object):
return message
def _attributes(self):
- """
- turns an generator with each attribute from the current instance.
+ """Returns a generator with each attribute from the current instance.
This attributes are coherced by the expected class for that attribute.
"""
@@ -218,7 +228,7 @@ class GenericStruct(object):
# Verifications for classes derived from list type
if not isinstance(attr, _class):
attr = _class(attr)
- yield attr
+ yield (_attr, attr)
def _attr_fits_into_class(attr, _class):
if not isinstance(attr, _class):
@@ -259,8 +269,7 @@ class GenericStruct(object):
raise Exception()
else:
size = 0
- for _attr in self.__ordered__:
- _class = self.__ordered__[_attr]
+ for _attr, _class in self.__ordered__.items():
attr = getattr(self, _attr)
if _class.__name__ is 'PAD':
size += attr.get_size()
@@ -289,15 +298,20 @@ class GenericStruct(object):
raise exceptions.ValidationError(error_msg)
else:
message = b''
- for attr in self._attributes():
- try:
+ for attr_name, attr_class in self.__ordered__.items():
+ attr = getattr(self, attr_name)
+ class_attr = getattr(self.__class__, attr_name)
+ if isinstance(attr, attr_class):
message += attr.pack()
- except:
- raise exceptions.AttributeTypeError(attr, type(attr),
- type(attr))
+ elif class_attr.is_enum():
+ message += attr_class(value = attr,
+ enum_ref= class_attr._enum_ref).pack()
+ else:
+ message += attr_class(attr).pack()
+
return message
- def unpack(self, buff):
+ def unpack(self, buff, offset=0):
"""Unpack a binary struct into the object attributes.
This method updated the object attributes based on the unpacked data
@@ -308,12 +322,24 @@ class GenericStruct(object):
"""
#TODO: Remove any referency to header here, this is a struct, not a
# message.
- begin = 0
- for attr in self._attributes:
- if attr.__class__.__name__ != "Header":
+ begin = offset
+
+ # TODO: Refact, ugly code
+ for attr_name, attr_class in self.__ordered__.items():
+ if attr_class.__name__ != "PAD":
+ class_attr = getattr(self.__class__, attr_name)
+ attr = attr_class()
attr.unpack(buff, offset=begin)
- setattr(self, attr.__name__, attr)
- begin += attr.get_size()
+
+ if issubclass(attr_class, GenericType) and class_attr.is_enum():
+ #raise Exception(class_attr._enum_ref)
+ attr = class_attr._enum_ref(attr._value)
+ setattr(self, attr_name, attr)
+
+ if issubclass(attr_class, GenericType):
+ attr = attr_class()
+
+ begin += attr.get_size()
def is_valid(self):
"""Checks if all attributes on struct is valid.
@@ -341,6 +367,34 @@ class GenericMessage(GenericStruct):
.. note:: A Message on this library context is like a Struct but has a
also a `header` attribute.
"""
+ def unpack(self, buff, offset=0):
+ """Unpack a binary message.
+
+ This method updated the object attributes based on the unpacked
+ data from the buffer binary message. It is an inplace method,
+ and it receives the binary data of the message without the header.
+ There is no return on this method
+
+ :param buff: binary data package to be unpacked
+ without the first 8 bytes (header)
+ """
+ begin = offset
+
+ for attr_name, attr_class in self.__ordered__.items():
+ if attr_class.__name__ != "Header":
+ if attr_class.__name__ != "PAD":
+ class_attr = getattr(self.__class__, attr_name)
+ attr = attr_class()
+ attr.unpack(buff, offset=begin)
+ begin += attr.get_size()
+
+ if issubclass(attr_class, GenericType) and \
+ class_attr.is_enum():
+ attr = class_attr._enum_ref(attr._value)
+ setattr(self, attr_name, attr)
+ else:
+ begin += attr.get_size()
+
def _validate_message_length(self):
if not self.header.length == self.get_size():
return False
@@ -393,3 +447,33 @@ class GenericMessage(GenericStruct):
size.
"""
self.header.length = self.get_size()
+
+
+class MetaBitMask(type):
+ def __getattr__(cls, name):
+ return cls._enum[name]
+
+ def __dir__(cls):
+ res = dir(type(cls)) + list(cls.__dict__.keys())
+ if cls is not GenericBitMask:
+ res.extend(cls._enum)
+ return res
+
+
+class GenericBitMask(object, metaclass=MetaBitMask):
+ def __init__(self, bitmask=None):
+ self.bitmask = bitmask
+
+ def __str__(self):
+ return "<%s: %s>" % (self.__class__.__name__, self.names)
+
+ def __repr__(self):
+ return "<%s(%s)>" % (self.__class__.__name__, self.bitmask)
+
+ @property
+ def names(self):
+ result = []
+ for key, value in self._enum.items():
+ if value & self.bitmask:
+ result.append(key)
+ return result
diff --git a/pyof/v0x01/foundation/basic_types.py b/pyof/v0x01/foundation/basic_types.py
index 74fbd58..2e49b13 100644
--- a/pyof/v0x01/foundation/basic_types.py
+++ b/pyof/v0x01/foundation/basic_types.py
@@ -120,13 +120,62 @@ class Char(base.GenericType):
:param value: the character to be build.
:param length: the character size.
"""
- if value:
- self._value = value
+ super().__init__(value)
self.length = length
self._fmt = '!{}{}'.format(self.length, 's')
-class FixedTypeList(list):
+class HWAddress(base.GenericType):
+ """Defines a hardware address"""
+
+ def __init__(self, hw_address=b'000000'):
+ super().__init__(hw_address)
+
+ def pack(self):
+ # struct.pack('!6B', *[int(x, 16) for x in self._value.split(':')])
+ value = self._value.split(':')
+ return struct.pack('!6B', *[int(x, 16) for x in value])
+
+ def unpack(self, buff, offset=0):
+ # value = ':'.join([hex(x)[2:] for x in struct.unpack('!6B', buff)])
+ unpacked_data = struct.unpack('!6B', buff[offset:offset+6])
+ transformed_data = ':'.join([hex(x)[2:] for x in unpacked_data])
+ self._value = transformed_data
+
+ def get_size(self):
+ return 6
+
+
+class BinaryData(base.GenericType):
+ """Class to create objects that represents binary data
+
+ This will be used on the 'data' attribute from
+ packet_out and packet_in messages.
+
+ Both the 'pack' and 'unpack' methods will return the binary data itself.
+ get_size method will return the size of the instance using python 'len'
+ """
+
+ def __init__(self, value=b''):
+ super().__init__(value)
+
+ def pack(self):
+ if type(self._value) is bytes:
+ if len(self._value) > 0:
+ return self._value
+ else:
+ return b''
+ else:
+ raise exceptions.NotBinarydata()
+
+ def unpack(self, buff):
+ self._value = buff
+
+ def get_size(self):
+ return len(self._value)
+
+
+class FixedTypeList(list, base.GenericStruct):
"""Creates a List that will receive OFP Classes"""
_pyof_class = None
@@ -201,15 +250,15 @@ class FixedTypeList(list):
offset: used if we need to shift the beginning of the data
"""
item_size = self._pyof_class().get_size()
- binary_items = [buff[i:i+2] for i in range(offset, len(buff),
- item_size)]
+ binary_items = [buff[i:i+item_size] for i in range(offset, len(buff),
+ item_size)]
for binary_item in binary_items:
item = self._pyof_class()
item.unpack(binary_item)
self.append(item)
-class ConstantTypeList(list):
+class ConstantTypeList(list, base.GenericStruct):
"""Creates a List that will only allow objects of the same type (class) to
be inserted"""
def __init__(self, items=[]):
@@ -298,51 +347,3 @@ class ConstantTypeList(list):
self.append(item)
-class HWAddress(base.GenericType):
- """Defines a hardware address"""
-
- def __init__(self, hw_address=None):
- self._value = hw_address
-
- def pack(self):
- # struct.pack('!6B', *[int(x, 16) for x in self._value.split(':')])
- value = self._value.split(':')
- return struct.pack('!6B', *[int(x, 16) for x in value])
-
- def unpack(self, buff, offset=0):
- # value = ':'.join([hex(x)[2:] for x in struct.unpack('!6B', buff)])
- unpacked_data = struct.unpack('!6B', buff)
- transformed_data = ':'.join([hex(x)[2:] for x in unpacked_data])
- self._value = transformed_data
-
- def get_size(self):
- return 6
-
-
-class BinaryData(base.GenericType):
- """Class to create objects that represents binary data
-
- This will be used on the 'data' attribute from
- packet_out and packet_in messages.
-
- Both the 'pack' and 'unpack' methods will return the binary data itself.
- get_size method will return the size of the instance using python 'len'
- """
-
- def __init__(self, value=b''):
- super().__init__(value)
-
- def pack(self):
- if type(self._value) is bytes:
- if len(self._value) > 0:
- return self._value
- else:
- return b''
- else:
- raise exceptions.NotBinarydata()
-
- def unpack(self, buff):
- self._value = buff
-
- def get_size(self):
- return len(self._value)
diff --git a/pyof/v0x01/foundation/exceptions.py b/pyof/v0x01/foundation/exceptions.py
index 604f00a..8da17c0 100644
--- a/pyof/v0x01/foundation/exceptions.py
+++ b/pyof/v0x01/foundation/exceptions.py
@@ -65,3 +65,7 @@ class ValidationError(Exception):
self.msg = msg
def __str__(self):
return self.msg
+
+
+class UnpackException(Exception):
+ pass
| Error on Descriptor utilization
The way we are using Descriptors is incurring in a wrong behavior when instantiating two objects of the same class. | kytos/python-openflow | diff --git a/tests/v0x01/test_common/test_header.py b/tests/v0x01/test_common/test_header.py
index 3827f84..8435aba 100644
--- a/tests/v0x01/test_common/test_header.py
+++ b/tests/v0x01/test_common/test_header.py
@@ -1,5 +1,6 @@
import unittest
+import os
from pyof.v0x01.common import header as of_header
@@ -28,8 +29,16 @@ class TestHeader(unittest.TestCase):
packed_header = b'\x01\x00\x00\x00\x00\x00\x00\x01'
self.assertEqual(self.message.pack(), packed_header)
- @unittest.skip('Not yet implemented')
def test_unpack(self):
"""[Common/Header] - unpacking Hello"""
- # TODO
- pass
+ filename = os.path.join(os.path.dirname(os.path.realpath('__file__')),
+ 'raw/v0x01/ofpt_hello.dat')
+ f = open(filename,'rb')
+ self.message.unpack(f.read(8))
+
+ self.assertEqual(self.message.length, 8)
+ self.assertEqual(self.message.xid, 1)
+ self.assertEqual(self.message.message_type, of_header.Type.OFPT_HELLO)
+ self.assertEqual(self.message.version, 1)
+
+ f.close()
diff --git a/tests/v0x01/test_controller2switch/test_barrier_reply.py b/tests/v0x01/test_controller2switch/test_barrier_reply.py
index bfe59da..d5679e3 100644
--- a/tests/v0x01/test_controller2switch/test_barrier_reply.py
+++ b/tests/v0x01/test_controller2switch/test_barrier_reply.py
@@ -1,25 +1,30 @@
import unittest
+import os
from pyof.v0x01.controller2switch import barrier_reply
+from pyof.v0x01.common import header as of_header
class TestBarrierReply(unittest.TestCase):
def setUp(self):
- self.message = barrier_reply.BarrierReply(xid=1)
+ self.message = barrier_reply.BarrierReply(xid=5)
+ self.head = of_header.Header()
def test_get_size(self):
"""[Controller2Switch/BarrierReply] - size 8"""
self.assertEqual(self.message.get_size(), 8)
- @unittest.skip('Not yet implemented')
+ @unittest.skip('Need to implement length update')
def test_pack(self):
"""[Controller2Switch/BarrierReply] - packing"""
- # TODO
- pass
+ packed_msg = b'\x01\x13\x00\x08\x00\x00\x00\x05'
+ self.assertEqual(self.message.pack(), packed_msg)
- @unittest.skip('Not yet implemented')
def test_unpack(self):
"""[Controller2Switch/BarrierReply] - unpacking"""
- # TODO
- pass
+ filename = os.path.join(os.path.dirname(os.path.realpath('__file__')),
+ 'raw/v0x01/ofpt_barrier_reply.dat')
+ with open(filename,'rb') as f:
+ self.head.unpack(f.read(8))
+ self.assertEqual(self.message.unpack(f.read()), None)
diff --git a/tests/v0x01/test_controller2switch/test_barrier_request.py b/tests/v0x01/test_controller2switch/test_barrier_request.py
index d92fbc5..355d71e 100644
--- a/tests/v0x01/test_controller2switch/test_barrier_request.py
+++ b/tests/v0x01/test_controller2switch/test_barrier_request.py
@@ -1,12 +1,14 @@
import unittest
+import os
from pyof.v0x01.controller2switch import barrier_request
-
+from pyof.v0x01.common import header as of_header
class TestBarrierRequest(unittest.TestCase):
def setUp(self):
self.message = barrier_request.BarrierRequest(xid=1)
+ self.head = of_header.Header(xid=5)
def test_get_size(self):
"""[Controller2Switch/BarrierRequest] - size 8"""
@@ -18,8 +20,10 @@ class TestBarrierRequest(unittest.TestCase):
# TODO
pass
- @unittest.skip('Not yet implemented')
def test_unpack(self):
"""[Controller2Switch/BarrierRequest] - unpacking"""
- # TODO
- pass
+ filename = os.path.join(os.path.dirname(os.path.realpath('__file__')),
+ 'raw/v0x01/ofpt_barrier_request.dat')
+ with open(filename, 'rb') as f:
+ self.head.unpack(f.read(8))
+ self.assertEqual(self.message.unpack(f.read()), None)
diff --git a/tests/v0x01/test_controller2switch/test_features_reply.py b/tests/v0x01/test_controller2switch/test_features_reply.py
index b59f6b1..da020c6 100644
--- a/tests/v0x01/test_controller2switch/test_features_reply.py
+++ b/tests/v0x01/test_controller2switch/test_features_reply.py
@@ -1,12 +1,15 @@
import unittest
+import os
from pyof.v0x01.common import action
from pyof.v0x01.controller2switch import features_reply
+from pyof.v0x01.common import header as of_header
class TestSwitchFeatures(unittest.TestCase):
def setUp(self):
+ self.head = of_header.Header()
self.message = features_reply.SwitchFeatures()
self.message.header.xid = 1
self.message.datapath_id = 1
@@ -26,8 +29,10 @@ class TestSwitchFeatures(unittest.TestCase):
# TODO
pass
- @unittest.skip('Not yet implemented')
def test_unpack(self):
"""[Controller2Switch/FeaturesReply] - unpacking"""
- # TODO
- pass
+ filename = os.path.join(os.path.dirname(os.path.realpath('__file__')),
+ 'raw/v0x01/ofpt_features_reply.dat')
+ with open(filename, 'rb') as f:
+ self.head.unpack(f.read(8))
+ self.assertEqual(self.message.unpack(f.read()), None)
diff --git a/tests/v0x01/test_controller2switch/test_features_request.py b/tests/v0x01/test_controller2switch/test_features_request.py
index 54d1121..5822823 100644
--- a/tests/v0x01/test_controller2switch/test_features_request.py
+++ b/tests/v0x01/test_controller2switch/test_features_request.py
@@ -1,11 +1,14 @@
import unittest
+import os
from pyof.v0x01.controller2switch import features_request
+from pyof.v0x01.common import header as of_header
class TestFeaturesRequest(unittest.TestCase):
def setUp(self):
self.message = features_request.FeaturesRequest(1)
+ self.head = of_header.Header()
def test_get_size(self):
"""[Controller2Switch/FeaturesRequest] - size 8"""
@@ -17,8 +20,11 @@ class TestFeaturesRequest(unittest.TestCase):
# TODO
pass
- @unittest.skip('Not yet implemented')
def test_unpack(self):
"""[Controller2Switch/FeaturesRequest] - unpacking"""
- # TODO
- pass
+ filename = os.path.join(os.path.dirname(os.path.realpath('__file__')),
+ 'raw/v0x01/ofpt_features_request.dat')
+ f = open(filename, 'rb')
+ self.head.unpack(f.read(8))
+ self.assertEqual(self.message.unpack(f.read()), None)
+ f.close()
diff --git a/tests/v0x01/test_controller2switch/test_flow_mod.py b/tests/v0x01/test_controller2switch/test_flow_mod.py
index 7e69470..8d6eff2 100644
--- a/tests/v0x01/test_controller2switch/test_flow_mod.py
+++ b/tests/v0x01/test_controller2switch/test_flow_mod.py
@@ -1,13 +1,17 @@
import unittest
+import os
from pyof.v0x01.common import flow_match
from pyof.v0x01.common import phy_port
from pyof.v0x01.controller2switch import flow_mod
+from pyof.v0x01.common import header as of_header
class TestFlowMod(unittest.TestCase):
def setUp(self):
+ self.head = of_header.Header()
+
self.message = flow_mod.FlowMod()
self.message.header.xid = 1
self.message.command = flow_mod.FlowModCommand.OFPFC_ADD
@@ -20,8 +24,8 @@ class TestFlowMod(unittest.TestCase):
self.message.out_port = phy_port.Port.OFPP_NONE
self.message.flags = flow_mod.FlowModFlags.OFPFF_EMERG
self.message.match.in_port = 80
- self.message.match.dl_src = [1, 2, 3, 4, 5, 6]
- self.message.match.dl_dst = [1, 2, 3, 4, 5, 6]
+ self.message.match.dl_src = '1a:2b:3c:4d:5e:6f'
+ self.message.match.dl_dst = '6a:5b:4c:43:2e:1f'
self.message.match.dl_vlan = 1
self.message.match.dl_vlan_pcp = 1
self.message.match.dl_type = 1
@@ -42,8 +46,24 @@ class TestFlowMod(unittest.TestCase):
# TODO
pass
- @unittest.skip('Not yet implemented')
- def test_unpack(self):
+ def test_unpack_add(self):
"""[Controller2Switch/FlowMod] - unpacking"""
- # TODO
- pass
+ filename = os.path.join(os.path.dirname(os.path.realpath('__file__')),
+ 'raw/v0x01/ofpt_flow_add.dat')
+ with open(filename,'rb') as f:
+ self.head.unpack(f.read(8))
+ self.assertEqual(self.message.unpack(f.read()), None)
+
+ self.assertEqual(self.message.command,
+ flow_mod.FlowModCommand.OFPFC_ADD)
+
+ def test_unpack_delete(self):
+ """[Controller2Switch/FlowMod] - unpacking"""
+ filename = os.path.join(os.path.dirname(os.path.realpath('__file__')),
+ 'raw/v0x01/ofpt_flow_delete.dat')
+ with open(filename,'rb') as f:
+ self.head.unpack(f.read(8))
+ self.assertEqual(self.message.unpack(f.read()), None)
+
+ self.assertEqual(self.message.command,
+ flow_mod.FlowModCommand.OFPFC_DELETE)
diff --git a/tests/v0x01/test_symmetric/test_echo_reply.py b/tests/v0x01/test_symmetric/test_echo_reply.py
index e9ccaa3..347bfc6 100644
--- a/tests/v0x01/test_symmetric/test_echo_reply.py
+++ b/tests/v0x01/test_symmetric/test_echo_reply.py
@@ -2,17 +2,20 @@ import os
import unittest
from pyof.v0x01.symmetric import echo_reply
+from pyof.v0x01.common import header as of_header
class TestEchoReply(unittest.TestCase):
def setUp(self):
self.message = echo_reply.EchoReply(xid=0)
+ self.header = of_header.Header()
def test_get_size(self):
"""[Symmetric/EchoReply] - size 8"""
self.assertEqual(self.message.get_size(), 8)
+ @unittest.skip('Need to implement length update')
def test_pack(self):
"""[Symmetric/EchoReply] - packing"""
filename = os.path.join(os.path.dirname(os.path.realpath('__file__')),
@@ -22,8 +25,11 @@ class TestEchoReply(unittest.TestCase):
packed_msg = b'\x01\x03\x00\x08\x00\x00\x00\x00'
self.assertEqual(self.message.pack(), packed_msg)
- @unittest.skip('Not yet implemented')
def test_unpack(self):
"""[Symmetric/Reply] - unpacking"""
- # TODO
- pass
+ filename = os.path.join(os.path.dirname(os.path.realpath('__file__')),
+ 'raw/v0x01/ofpt_echo_reply.dat')
+ with open(filename,'rb') as f:
+ self.header.unpack(f.read(8))
+ msg_size = self.header.length._value
+ self.assertEqual(self.message.unpack(f.read()), None)
diff --git a/tests/v0x01/test_symmetric/test_echo_request.py b/tests/v0x01/test_symmetric/test_echo_request.py
index a69a0ea..3259ccd 100644
--- a/tests/v0x01/test_symmetric/test_echo_request.py
+++ b/tests/v0x01/test_symmetric/test_echo_request.py
@@ -2,17 +2,20 @@ import os
import unittest
from pyof.v0x01.symmetric import echo_request
+from pyof.v0x01.common import header as of_header
class TestEchoRequest(unittest.TestCase):
def setUp(self):
self.message = echo_request.EchoRequest(xid=0)
+ self.header = of_header.Header()
def test_get_size(self):
"""[Symmetric/EchoRequest] - size 8"""
self.assertEqual(self.message.get_size(), 8)
+ @unittest.skip('Need to implement length update')
def test_pack(self):
"""[Symmetric/EchoRequest] - packing"""
filename = os.path.join(os.path.dirname(os.path.realpath('__file__')),
@@ -22,8 +25,10 @@ class TestEchoRequest(unittest.TestCase):
packed_msg = b'\x01\x02\x00\x08\x00\x00\x00\x00'
self.assertEqual(self.message.pack(), packed_msg)
- @unittest.skip('Not yet implemented')
def test_unpack(self):
"""[Symmetric/EchoRequest] - unpacking"""
- # TODO
- pass
+ filename = os.path.join(os.path.dirname(os.path.realpath('__file__')),
+ 'raw/v0x01/ofpt_echo_request.dat')
+ with open(filename,'rb') as f:
+ self.header.unpack(f.read(8))
+ self.assertEqual(self.message.unpack(f.read()), None)
diff --git a/tests/v0x01/test_symmetric/test_hello.py b/tests/v0x01/test_symmetric/test_hello.py
index 2ed022f..d4bbd1d 100644
--- a/tests/v0x01/test_symmetric/test_hello.py
+++ b/tests/v0x01/test_symmetric/test_hello.py
@@ -2,17 +2,19 @@ import os
import unittest
from pyof.v0x01.symmetric import hello
-
+from pyof.v0x01.common import header as of_header
class TestHello(unittest.TestCase):
def setUp(self):
self.message = hello.Hello(xid=1)
+ self.header = of_header.Header()
def test_get_size(self):
"""[Symmetric/Hello] - size 8"""
self.assertEqual(self.message.get_size(), 8)
+ @unittest.skip('Need to implement length update')
def test_pack(self):
"""[Symmetric/Hello] - packing"""
filename = os.path.join(os.path.dirname(os.path.realpath('__file__')),
@@ -22,8 +24,10 @@ class TestHello(unittest.TestCase):
packed_hello = b'\x01\x00\x00\x08\x00\x00\x00\x01'
self.assertEqual(self.message.pack(), packed_hello)
- @unittest.skip('Not yet implemented')
def test_unpack(self):
"""[Symmetric/Hello] - unpacking"""
- # TODO
- pass
+ filename = os.path.join(os.path.dirname(os.path.realpath('__file__')),
+ 'raw/v0x01/ofpt_hello.dat')
+ with open(filename, 'rb') as f:
+ self.header.unpack(f.read(8))
+ self.assertEqual(self.message.unpack(f.read()), None)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 6
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"coverage"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/kytos/python-openflow.git@9b9c0c3e86c73aaebdcb57bda00feeefbdbcfe09#egg=Kytos_OpenFlow_Parser_library
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: python-openflow
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
prefix: /opt/conda/envs/python-openflow
| [
"tests/v0x01/test_common/test_header.py::TestHeader::test_pack",
"tests/v0x01/test_common/test_header.py::TestHeader::test_size",
"tests/v0x01/test_common/test_header.py::TestHeader::test_unpack",
"tests/v0x01/test_controller2switch/test_barrier_reply.py::TestBarrierReply::test_get_size",
"tests/v0x01/test_controller2switch/test_barrier_reply.py::TestBarrierReply::test_unpack",
"tests/v0x01/test_controller2switch/test_barrier_request.py::TestBarrierRequest::test_get_size",
"tests/v0x01/test_controller2switch/test_barrier_request.py::TestBarrierRequest::test_unpack",
"tests/v0x01/test_controller2switch/test_features_reply.py::TestSwitchFeatures::test_get_size",
"tests/v0x01/test_controller2switch/test_features_request.py::TestFeaturesRequest::test_get_size",
"tests/v0x01/test_controller2switch/test_features_request.py::TestFeaturesRequest::test_unpack",
"tests/v0x01/test_controller2switch/test_flow_mod.py::TestFlowMod::test_get_size",
"tests/v0x01/test_controller2switch/test_flow_mod.py::TestFlowMod::test_unpack_add",
"tests/v0x01/test_controller2switch/test_flow_mod.py::TestFlowMod::test_unpack_delete",
"tests/v0x01/test_symmetric/test_echo_reply.py::TestEchoReply::test_get_size",
"tests/v0x01/test_symmetric/test_echo_reply.py::TestEchoReply::test_unpack",
"tests/v0x01/test_symmetric/test_echo_request.py::TestEchoRequest::test_get_size",
"tests/v0x01/test_symmetric/test_echo_request.py::TestEchoRequest::test_unpack",
"tests/v0x01/test_symmetric/test_hello.py::TestHello::test_get_size",
"tests/v0x01/test_symmetric/test_hello.py::TestHello::test_unpack"
]
| [
"tests/v0x01/test_controller2switch/test_features_reply.py::TestSwitchFeatures::test_unpack"
]
| []
| []
| MIT License | 573 | [
"pyof/v0x01/foundation/base.py",
"pyof/v0x01/controller2switch/flow_mod.py",
"pyof/v0x01/foundation/exceptions.py",
"pyof/v0x01/foundation/basic_types.py",
"pyof/v0x01/common/header.py",
"pyof/v0x01/common/phy_port.py"
]
| [
"pyof/v0x01/foundation/base.py",
"pyof/v0x01/controller2switch/flow_mod.py",
"pyof/v0x01/foundation/exceptions.py",
"pyof/v0x01/foundation/basic_types.py",
"pyof/v0x01/common/header.py",
"pyof/v0x01/common/phy_port.py"
]
|
|
FundersClub__bai-lockbox-2 | c7e4fbc207b21953e4741ba55f787eaa108e55fb | 2016-06-06 23:19:28 | c7e4fbc207b21953e4741ba55f787eaa108e55fb | diff --git a/.gitignore b/.gitignore
index 9bec050..87bf718 100644
--- a/.gitignore
+++ b/.gitignore
@@ -11,6 +11,7 @@
__pycache__/
*.py[cod]
*$py.class
+*.egg
*.egg-info/
.eggs/
.tox/
diff --git a/LICENSE.txt b/LICENSE.txt
index 65c5ca8..11b3d94 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1,165 +1,201 @@
- GNU LESSER GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-
- This version of the GNU Lesser General Public License incorporates
-the terms and conditions of version 3 of the GNU General Public
-License, supplemented by the additional permissions listed below.
-
- 0. Additional Definitions.
-
- As used herein, "this License" refers to version 3 of the GNU Lesser
-General Public License, and the "GNU GPL" refers to version 3 of the GNU
-General Public License.
-
- "The Library" refers to a covered work governed by this License,
-other than an Application or a Combined Work as defined below.
-
- An "Application" is any work that makes use of an interface provided
-by the Library, but which is not otherwise based on the Library.
-Defining a subclass of a class defined by the Library is deemed a mode
-of using an interface provided by the Library.
-
- A "Combined Work" is a work produced by combining or linking an
-Application with the Library. The particular version of the Library
-with which the Combined Work was made is also called the "Linked
-Version".
-
- The "Minimal Corresponding Source" for a Combined Work means the
-Corresponding Source for the Combined Work, excluding any source code
-for portions of the Combined Work that, considered in isolation, are
-based on the Application, and not on the Linked Version.
-
- The "Corresponding Application Code" for a Combined Work means the
-object code and/or source code for the Application, including any data
-and utility programs needed for reproducing the Combined Work from the
-Application, but excluding the System Libraries of the Combined Work.
-
- 1. Exception to Section 3 of the GNU GPL.
-
- You may convey a covered work under sections 3 and 4 of this License
-without being bound by section 3 of the GNU GPL.
-
- 2. Conveying Modified Versions.
-
- If you modify a copy of the Library, and, in your modifications, a
-facility refers to a function or data to be supplied by an Application
-that uses the facility (other than as an argument passed when the
-facility is invoked), then you may convey a copy of the modified
-version:
-
- a) under this License, provided that you make a good faith effort to
- ensure that, in the event an Application does not supply the
- function or data, the facility still operates, and performs
- whatever part of its purpose remains meaningful, or
-
- b) under the GNU GPL, with none of the additional permissions of
- this License applicable to that copy.
-
- 3. Object Code Incorporating Material from Library Header Files.
-
- The object code form of an Application may incorporate material from
-a header file that is part of the Library. You may convey such object
-code under terms of your choice, provided that, if the incorporated
-material is not limited to numerical parameters, data structure
-layouts and accessors, or small macros, inline functions and templates
-(ten or fewer lines in length), you do both of the following:
-
- a) Give prominent notice with each copy of the object code that the
- Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the object code with a copy of the GNU GPL and this license
- document.
-
- 4. Combined Works.
-
- You may convey a Combined Work under terms of your choice that,
-taken together, effectively do not restrict modification of the
-portions of the Library contained in the Combined Work and reverse
-engineering for debugging such modifications, if you also do each of
-the following:
-
- a) Give prominent notice with each copy of the Combined Work that
- the Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the Combined Work with a copy of the GNU GPL and this license
- document.
-
- c) For a Combined Work that displays copyright notices during
- execution, include the copyright notice for the Library among
- these notices, as well as a reference directing the user to the
- copies of the GNU GPL and this license document.
-
- d) Do one of the following:
-
- 0) Convey the Minimal Corresponding Source under the terms of this
- License, and the Corresponding Application Code in a form
- suitable for, and under terms that permit, the user to
- recombine or relink the Application with a modified version of
- the Linked Version to produce a modified Combined Work, in the
- manner specified by section 6 of the GNU GPL for conveying
- Corresponding Source.
-
- 1) Use a suitable shared library mechanism for linking with the
- Library. A suitable mechanism is one that (a) uses at run time
- a copy of the Library already present on the user's computer
- system, and (b) will operate properly with a modified version
- of the Library that is interface-compatible with the Linked
- Version.
-
- e) Provide Installation Information, but only if you would otherwise
- be required to provide such information under section 6 of the
- GNU GPL, and only to the extent that such information is
- necessary to install and execute a modified version of the
- Combined Work produced by recombining or relinking the
- Application with a modified version of the Linked Version. (If
- you use option 4d0, the Installation Information must accompany
- the Minimal Corresponding Source and Corresponding Application
- Code. If you use option 4d1, you must provide the Installation
- Information in the manner specified by section 6 of the GNU GPL
- for conveying Corresponding Source.)
-
- 5. Combined Libraries.
-
- You may place library facilities that are a work based on the
-Library side by side in a single library together with other library
-facilities that are not Applications and are not covered by this
-License, and convey such a combined library under terms of your
-choice, if you do both of the following:
-
- a) Accompany the combined library with a copy of the same work based
- on the Library, uncombined with any other library facilities,
- conveyed under the terms of this License.
-
- b) Give prominent notice with the combined library that part of it
- is a work based on the Library, and explaining where to find the
- accompanying uncombined form of the same work.
-
- 6. Revised Versions of the GNU Lesser General Public License.
-
- The Free Software Foundation may publish revised and/or new versions
-of the GNU Lesser General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Library as you received it specifies that a certain numbered version
-of the GNU Lesser General Public License "or any later version"
-applies to it, you have the option of following the terms and
-conditions either of that published version or of any later version
-published by the Free Software Foundation. If the Library as you
-received it does not specify a version number of the GNU Lesser
-General Public License, you may choose any version of the GNU Lesser
-General Public License ever published by the Free Software Foundation.
-
- If the Library as you received it specifies that a proxy can decide
-whether future versions of the GNU Lesser General Public License shall
-apply, that proxy's public statement of acceptance of any version is
-permanent authorization for you to choose that version for the
-Library.
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016 FundersClub Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/lockbox/exceptions.py b/lockbox/exceptions.py
index c414d89..5d5c9db 100644
--- a/lockbox/exceptions.py
+++ b/lockbox/exceptions.py
@@ -1,19 +1,3 @@
-# This file is part of bai-lockbox.
-
-# bai-lockbox is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as
-# published by the Free Software Foundation, either version 3 of the
-# License, or (at your option) any later version.
-
-# bai-lockbox is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-
-# You should have received a copy of the GNU Lesser General Public
-# License along with bai-lockbox. If not, see
-# <http://www.gnu.org/licenses/>.
-
class LockboxError(Exception):
pass
@@ -29,7 +13,7 @@ class LockboxParseError(LockboxError):
'''Base exception for problems related to reading a BAI Lockbox
record.
'''
- raw_line = ''
+ pass
class LockboxConsistencyError(LockboxError):
diff --git a/lockbox/parser.py b/lockbox/parser.py
index 9eb6120..43407fd 100644
--- a/lockbox/parser.py
+++ b/lockbox/parser.py
@@ -1,19 +1,3 @@
-# This file is part of bai-lockbox.
-
-# bai-lockbox is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as
-# published by the Free Software Foundation, either version 3 of the
-# License, or (at your option) any later version.
-
-# bai-lockbox is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-
-# You should have received a copy of the GNU Lesser General Public
-# License along with bai-lockbox. If not, see
-# <http://www.gnu.org/licenses/>.
-
import six
import sys
@@ -42,6 +26,8 @@ class Check(object):
self.number = detail.check_number
self.amount = detail.check_amount
self.memo = detail.memo
+ self.sender_routing_number=detail.transit_routing_number
+ self.sender_account_number=detail.dd_account_number
class LockboxDetail(object):
@@ -66,8 +52,8 @@ class LockboxDetail(object):
def __getattr__(self, attr):
if attr in dir(self.record):
return getattr(self.record, attr)
-
- return self.get(attr)
+ else:
+ return super(LockboxDetail, self).__getattr__(attr)
class LockboxBatch(object):
@@ -78,13 +64,8 @@ class LockboxBatch(object):
@property
def checks(self):
- checks = []
-
- for detail in self.details:
- checks.append(Check(detail))
+ return [Check(d) for d in self.details]
- return checks
-
def validate(self):
if self.summary is None:
raise LockboxParseError(
@@ -201,7 +182,7 @@ class LockboxFile(object):
self.destination_trailer_record = None
self.cur_lockbox = None
-
+
@property
def checks(self):
checks = []
@@ -210,7 +191,7 @@ class LockboxFile(object):
checks.extend(lockbox.checks)
return checks
-
+
def validate(self):
for lockbox in self.lockboxes:
lockbox.validate()
@@ -262,51 +243,49 @@ class LockboxFile(object):
else:
self.cur_lockbox.add_record(record)
-
-def read_lockbox_lines(lines):
- lockbox_file = LockboxFile()
-
- for line, line_num in zip(lines, range(1, len(lines)+1)):
- try:
- if line[0] == str(LockboxBatchTotalRecord.RECORD_TYPE_NUM):
- rec = LockboxBatchTotalRecord(line)
- elif line[0] == str(LockboxDestinationTrailerRecord.RECORD_TYPE_NUM):
- rec = LockboxDestinationTrailerRecord(line)
- elif line[0] == str(LockboxDetailHeader.RECORD_TYPE_NUM):
- rec = LockboxDetailHeader(line)
- elif line[0] == str(LockboxDetailOverflowRecord.RECORD_TYPE_NUM):
- rec = LockboxDetailOverflowRecord(line)
- elif line[0] == str(LockboxDetailRecord.RECORD_TYPE_NUM):
- rec = LockboxDetailRecord(line)
- elif line[0] == str(LockboxImmediateAddressHeader.RECORD_TYPE_NUM):
- rec = LockboxImmediateAddressHeader(line)
- elif line[0] == str(LockboxServiceRecord.RECORD_TYPE_NUM):
- rec = LockboxServiceRecord(line)
- elif line[0] == str(LockboxServiceTotalRecord.RECORD_TYPE_NUM):
- rec = LockboxServiceTotalRecord(line)
- else:
- raise LockboxParseError(
- 'unknown record type {}'.format(line[0])
+ @classmethod
+ def from_lines(cls, lines):
+ lines = [l.strip() for l in lines]
+ lockbox_file = cls()
+
+ for line_num, line in enumerate(lines, start=1):
+ try:
+ rec_type = int(line[0])
+ record_type_to_constructor = {
+ LockboxBatchTotalRecord.RECORD_TYPE_NUM: LockboxBatchTotalRecord,
+ LockboxDestinationTrailerRecord.RECORD_TYPE_NUM: LockboxDestinationTrailerRecord,
+ LockboxDetailHeader.RECORD_TYPE_NUM: LockboxDetailHeader,
+ LockboxDetailOverflowRecord.RECORD_TYPE_NUM: LockboxDetailOverflowRecord,
+ LockboxDetailRecord.RECORD_TYPE_NUM: LockboxDetailRecord,
+ LockboxImmediateAddressHeader.RECORD_TYPE_NUM: LockboxImmediateAddressHeader,
+ LockboxServiceRecord.RECORD_TYPE_NUM: LockboxServiceRecord,
+ LockboxServiceTotalRecord.RECORD_TYPE_NUM: LockboxServiceTotalRecord,
+ }
+
+ if rec_type not in record_type_to_constructor:
+ raise LockboxParseError(
+ 'unknown record type {}'.format(rec_type)
+ )
+
+ rec = record_type_to_constructor[rec_type](line)
+ lockbox_file.add_record(rec)
+ except Exception as e:
+ if not isinstance(e, LockboxError):
+ raise
+
+ # if this is some lockbox-related exception,create a new
+ # exception of the same kind we caught, bet prepend the
+ # current line number to it so we know where to look while
+ # troubleshooting
+ six.reraise(
+ type(e),
+ 'Line {}: {} ("{}")'.format(line_num, str(e), line),
+ sys.exc_info()[2]
)
- lockbox_file.add_record(rec)
- except Exception as e:
- if not isinstance(e, LockboxError):
- raise
-
- # if this is some lockbox-related exception,create a new
- # exception of the same kind we caught, bet prepend the
- # current line number to it so we know where to look while
- # troubleshooting
- six.reraise(
- type(e),
- 'Line {}: {} ("{}")'.format(line_num, str(e), line),
- sys.exc_info()[2]
- )
-
- lockbox_file.validate()
- return lockbox_file
-
+ lockbox_file.validate()
+ return lockbox_file
-def read_lockbox_file(inf):
- return read_lockbox_lines([line.strip() for line in inf.readlines()])
+ @classmethod
+ def from_file(cls, inf):
+ return LockboxFile.from_lines(inf.readlines())
diff --git a/lockbox/records.py b/lockbox/records.py
index 71fd5c9..d491c87 100644
--- a/lockbox/records.py
+++ b/lockbox/records.py
@@ -1,25 +1,16 @@
-# This file is part of bai-lockbox.
-
-# bai-lockbox is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as
-# published by the Free Software Foundation, either version 3 of the
-# License, or (at your option) any later version.
-
-# bai-lockbox is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-
-# You should have received a copy of the GNU Lesser General Public
-# License along with bai-lockbox. If not, see
-# <http://www.gnu.org/licenses/>.
-
import datetime
import re
+import six
from .exceptions import LockboxDefinitionError, LockboxParseError
+class LockboxFieldType(object):
+ Numeric = 'numeric'
+ Alphanumeric = 'alphanumeric'
+ Blank = 'blank'
+
+
class LockboxBaseRecord(object):
'''The format of the field 'fields' should be an array of dicts like
this:
@@ -30,11 +21,12 @@ class LockboxBaseRecord(object):
...
}
- Valid types: 'alphanumeric', 'numeric', 'blank'
+ Valid types are listed inside the LockboxFieldType class.
Note: The record type which is determined by first character of a
line is defined by setting MAX_RECORD_LENGTH in a derrived class
rather than by adding it to the 'fields' field.
+
'''
MAX_RECORD_LENGTH = 104
@@ -56,7 +48,7 @@ class LockboxBaseRecord(object):
# we can only parse if there are actually fields defined
self.fields['record_type'] = {
'location': (0,1),
- 'type': 'numeric',
+ 'type': LockboxFieldType.Numeric,
}
self._parse()
@@ -67,7 +59,7 @@ class LockboxBaseRecord(object):
# has already been performed by the regexps in _parse(),
# so at this point we just create any missing fields by
# doing self.my_field = self._my_field_raw
- for field_name, field_def in self.fields.items():
+ for field_name, field_def in six.iteritems(self.fields):
if hasattr(self, field_name):
continue
@@ -75,14 +67,14 @@ class LockboxBaseRecord(object):
raw_field_val = (
None
- if field_def['type'] == 'blank'
+ if field_def['type'] == LockboxFieldType.Blank
else getattr(self, raw_field_name, None)
)
setattr(self, field_name, raw_field_val)
def _parse(self):
- for field_name, field_def in self.fields.items():
+ for field_name, field_def in six.iteritems(self.fields):
raw_field_name = '_{}_raw'.format(field_name)
if hasattr(self, field_name):
raise AttributeError(
@@ -91,15 +83,14 @@ class LockboxBaseRecord(object):
)
)
- start_col = field_def['location'][0]
- end_col = field_def['location'][1]
+ start_col, end_col = field_def['location']
raw_field = self.raw_record_text[start_col:end_col]
- if field_def['type'] == 'alphanumeric':
+ if field_def['type'] == LockboxFieldType.Alphanumeric:
patt = re.compile(r'^[ A-Z0-9]+$')
- elif field_def['type'] == 'numeric':
+ elif field_def['type'] == LockboxFieldType.Numeric:
patt = re.compile(r'^[0-9]+$')
- elif field_def['type'] == 'blank':
+ elif field_def['type'] == LockboxFieldType.Blank:
patt = re.compile(r'^\s*$')
else:
raise LockboxDefinitionError(
@@ -186,12 +177,12 @@ class LockboxImmediateAddressHeader(LockboxBaseRecord):
RECORD_TYPE_NUM = 1
fields = {
- 'priority_code': { 'location': (1, 3), 'type': 'numeric' },
- 'destination_id': { 'location': (3, 13), 'type': 'alphanumeric' },
- 'originating_trn': { 'location': (13, 23), 'type': 'numeric' },
- 'processing_date': { 'location': (23, 29), 'type': 'numeric' },
- 'processing_time': { 'location': (29, 33), 'type': 'numeric' },
- 'filler': {'location': (33, 104), 'type': 'blank' },
+ 'priority_code': { 'location': (1, 3), 'type': LockboxFieldType.Numeric },
+ 'destination_id': { 'location': (3, 13), 'type': LockboxFieldType.Alphanumeric },
+ 'originating_trn': { 'location': (13, 23), 'type': LockboxFieldType.Numeric },
+ 'processing_date': { 'location': (23, 29), 'type': LockboxFieldType.Numeric },
+ 'processing_time': { 'location': (29, 33), 'type': LockboxFieldType.Numeric },
+ 'filler': {'location': (33, 104), 'type': LockboxFieldType.Blank },
}
def validate(self):
@@ -205,14 +196,14 @@ class LockboxServiceRecord(LockboxBaseRecord):
fields = {
'ultimate_dest_and_origin': {
'location': (1, 21),
- 'type': 'alphanumeric',
+ 'type': LockboxFieldType.Alphanumeric,
},
- 'ref_code': {'location': (21, 31), 'type': 'numeric'},
- 'service_type': {'location': (31, 34), 'type': 'numeric'},
- 'record_size': {'location': (34, 37), 'type': 'numeric'},
- 'blocking_factor': {'location': (37, 41), 'type': 'numeric'},
- 'format_code': {'location': (41,42), 'type': 'numeric'},
- 'filler': {'location': (42, 104), 'type': 'blank'},
+ 'ref_code': {'location': (21, 31), 'type': LockboxFieldType.Numeric},
+ 'service_type': {'location': (31, 34), 'type': LockboxFieldType.Numeric},
+ 'record_size': {'location': (34, 37), 'type': LockboxFieldType.Numeric},
+ 'blocking_factor': {'location': (37, 41), 'type': LockboxFieldType.Numeric},
+ 'format_code': {'location': (41,42), 'type': LockboxFieldType.Numeric},
+ 'filler': {'location': (42, 104), 'type': LockboxFieldType.Blank},
}
@@ -220,15 +211,15 @@ class LockboxDetailHeader(LockboxBaseRecord):
RECORD_TYPE_NUM = 5
fields = {
- 'batch_number': { 'location': (1, 4), 'type': 'numeric' },
- 'ref_code': { 'location': (4, 7, ), 'type': 'numeric' },
- 'lockbox_number': { 'location': (7, 14), 'type': 'numeric' },
- 'deposit_date': { 'location': (14, 20), 'type': 'numeric' },
+ 'batch_number': { 'location': (1, 4), 'type': LockboxFieldType.Numeric },
+ 'ref_code': { 'location': (4, 7, ), 'type': LockboxFieldType.Numeric },
+ 'lockbox_number': { 'location': (7, 14), 'type': LockboxFieldType.Numeric },
+ 'deposit_date': { 'location': (14, 20), 'type': LockboxFieldType.Numeric },
'ultimate_dest_and_origin': {
'location': (20, 40),
- 'type': 'alphanumeric',
+ 'type': LockboxFieldType.Alphanumeric,
},
- 'filler': {'location': (40, 104), 'type': 'blank' },
+ 'filler': {'location': (40, 104), 'type': LockboxFieldType.Blank },
}
def validate(self):
@@ -239,16 +230,16 @@ class LockboxDetailRecord(LockboxBaseRecord):
RECORD_TYPE_NUM = 6
fields = {
- 'batch_number': { 'location': (1, 4), 'type': 'numeric' },
- 'item_number': { 'location': (4, 7), 'type': 'numeric' },
- 'check_amount': { 'location': (7, 17), 'type': 'numeric' },
- 'transit_routing_number': { 'location': (17, 26), 'type': 'numeric' },
- 'dd_account_number': { 'location': (26, 36), 'type': 'numeric' },
- 'check_number': { 'location': (36, 46), 'type': 'numeric' },
- 'check_date': { 'location': (46, 52), 'type': 'numeric' },
- 'remitter_name': { 'location': (52, 82), 'type': 'alphanumeric' },
- 'payee_name': { 'location': (82, 102), 'type': 'alphanumeric' },
- 'filler': {'location': (102, 104), 'type': 'blank' },
+ 'batch_number': { 'location': (1, 4), 'type': LockboxFieldType.Numeric },
+ 'item_number': { 'location': (4, 7), 'type': LockboxFieldType.Numeric },
+ 'check_amount': { 'location': (7, 17), 'type': LockboxFieldType.Numeric },
+ 'transit_routing_number': { 'location': (17, 26), 'type': LockboxFieldType.Numeric },
+ 'dd_account_number': { 'location': (26, 36), 'type': LockboxFieldType.Numeric },
+ 'check_number': { 'location': (36, 46), 'type': LockboxFieldType.Numeric },
+ 'check_date': { 'location': (46, 52), 'type': LockboxFieldType.Numeric },
+ 'remitter_name': { 'location': (52, 82), 'type': LockboxFieldType.Alphanumeric },
+ 'payee_name': { 'location': (82, 102), 'type': LockboxFieldType.Alphanumeric },
+ 'filler': {'location': (102, 104), 'type': LockboxFieldType.Blank },
}
def validate(self):
@@ -269,13 +260,13 @@ class LockboxDetailOverflowRecord(LockboxBaseRecord):
RECORD_TYPE_NUM = 4
fields = {
- 'batch_number': { 'location': (1, 4), 'type': 'numeric' },
- 'item_number': { 'location': (4, 7), 'type': 'numeric' },
- 'overflow_record_type': { 'location': (7, 8), 'type': 'numeric' },
- 'overflow_sequence_number': { 'location': (8, 10), 'type': 'numeric' },
- 'overflow_indicator': { 'location': (10, 11), 'type': 'numeric' },
- 'memo_line': { 'location': (11, 41), 'type': 'alphanumeric' },
- 'filler': {'location': (41, 104), 'type': 'blank' },
+ 'batch_number': { 'location': (1, 4), 'type': LockboxFieldType.Numeric },
+ 'item_number': { 'location': (4, 7), 'type': LockboxFieldType.Numeric },
+ 'overflow_record_type': { 'location': (7, 8), 'type': LockboxFieldType.Numeric },
+ 'overflow_sequence_number': { 'location': (8, 10), 'type': LockboxFieldType.Numeric },
+ 'overflow_indicator': { 'location': (10, 11), 'type': LockboxFieldType.Numeric },
+ 'memo_line': { 'location': (11, 41), 'type': LockboxFieldType.Alphanumeric },
+ 'filler': {'location': (41, 104), 'type': LockboxFieldType.Blank },
}
def validate(self):
@@ -289,16 +280,16 @@ class LockboxBatchTotalRecord(LockboxBaseRecord):
RECORD_TYPE_NUM = 7
fields = {
- 'batch_number': { 'location': (1, 4), 'type': 'numeric' },
- 'item_number': { 'location': (4, 7), 'type': 'numeric' },
- 'lockbox_number': { 'location': (7, 14), 'type': 'numeric' },
- 'deposit_date': { 'location': (14, 20), 'type': 'numeric' },
+ 'batch_number': { 'location': (1, 4), 'type': LockboxFieldType.Numeric },
+ 'item_number': { 'location': (4, 7), 'type': LockboxFieldType.Numeric },
+ 'lockbox_number': { 'location': (7, 14), 'type': LockboxFieldType.Numeric },
+ 'deposit_date': { 'location': (14, 20), 'type': LockboxFieldType.Numeric },
'total_number_remittances': {
'location': (20, 23),
- 'type': 'numeric'
+ 'type': LockboxFieldType.Numeric
},
- 'check_dollar_total': { 'location': (23, 33), 'type': 'numeric' },
- 'filler': {'location': (33, 104), 'type': 'blank' },
+ 'check_dollar_total': { 'location': (23, 33), 'type': LockboxFieldType.Numeric },
+ 'filler': {'location': (33, 104), 'type': LockboxFieldType.Blank },
}
def validate(self):
@@ -313,13 +304,13 @@ class LockboxServiceTotalRecord(LockboxBaseRecord):
RECORD_TYPE_NUM = 8
fields = {
- 'batch_number': { 'location': (1, 4), 'type': 'numeric' },
- 'item_number': { 'location': (4, 7), 'type': 'numeric' },
- 'lockbox_number': { 'location': (7, 14), 'type': 'numeric' },
- 'deposit_date': { 'location': (14, 20), 'type': 'numeric' },
- 'total_num_checks': { 'location': (20, 24), 'type': 'numeric' },
- 'check_dollar_total': { 'location': (24, 34), 'type': 'numeric' },
- 'filler': {'location': (34, 104), 'type': 'blank' },
+ 'batch_number': { 'location': (1, 4), 'type': LockboxFieldType.Numeric },
+ 'item_number': { 'location': (4, 7), 'type': LockboxFieldType.Numeric },
+ 'lockbox_number': { 'location': (7, 14), 'type': LockboxFieldType.Numeric },
+ 'deposit_date': { 'location': (14, 20), 'type': LockboxFieldType.Numeric },
+ 'total_num_checks': { 'location': (20, 24), 'type': LockboxFieldType.Numeric },
+ 'check_dollar_total': { 'location': (24, 34), 'type': LockboxFieldType.Numeric },
+ 'filler': {'location': (34, 104), 'type': LockboxFieldType.Blank },
}
def validate(self):
@@ -333,8 +324,8 @@ class LockboxDestinationTrailerRecord(LockboxBaseRecord):
RECORD_TYPE_NUM = 9
fields = {
- 'total_num_records': { 'location': (1, 7), 'type': 'numeric' },
- 'filler': {'location': (7, 104), 'type': 'blank' },
+ 'total_num_records': { 'location': (1, 7), 'type': LockboxFieldType.Numeric },
+ 'filler': {'location': (7, 104), 'type': LockboxFieldType.Blank },
}
def validate(self):
diff --git a/setup.py b/setup.py
index aebdbc4..4cbeca1 100644
--- a/setup.py
+++ b/setup.py
@@ -9,15 +9,13 @@ setup(
version='0.0.1',
packages=find_packages(exclude=['docs', 'tests']),
install_requires=[
- 'django-jsonfield>=0.8.11',
'six',
- 'django-apptemplates',
],
test_suite='nose.collector',
tests_require=['nose', 'coverage'],
include_package_data=True,
- license='LGPLv3',
- description='An elegant solution for keeping a relational log of chronological events in a Django application.',
+ license='Apache License 2.0',
+ description='A library for parsing files in the BAI lockbox format.',
url='https://www.github.com/FundersClub/bai-lockbox',
author='Jon Friedman / FundersClub Inc.',
author_email='[email protected]',
@@ -27,8 +25,8 @@ setup(
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
- 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
+ 'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
- 'Intended Audience :: Financial and Insurance Industry'
+ 'Intended Audience :: Financial and Insurance Industry'
],
)
diff --git a/tox.ini b/tox.ini
index 4c556c8..892779a 100644
--- a/tox.ini
+++ b/tox.ini
@@ -2,8 +2,7 @@
envlist =
py27
py34
- pypy
[testenv]
deps = -rrequirements.txt
-commands = python setup.py nosetests
\ No newline at end of file
+commands = python setup.py nosetests
| Code review
* [x] https://github.com/FundersClub/bai-lockbox/blob/master/setup.py#L12 - are all of these used?
* [x] https://github.com/FundersClub/bai-lockbox/blob/master/setup.py#L20 - incorrect
* [x] https://github.com/FundersClub/bai-lockbox/blob/master/lockbox/exceptions.py#L32 - what is this?
* [x] https://github.com/FundersClub/bai-lockbox/blob/master/lockbox/parser.py#L70 - Where is `.get()` defined?
* [x] https://github.com/FundersClub/bai-lockbox/blob/master/lockbox/parser.py#L81 - Why not something like `return map(Check, self.details)` (or good ol' list comprehension)
* [x] https://github.com/FundersClub/bai-lockbox/blob/master/lockbox/parser.py#L269 - Look up `enumerate()`
* [x] https://github.com/FundersClub/bai-lockbox/blob/master/lockbox/parser.py#L266 and https://github.com/FundersClub/bai-lockbox/blob/master/lockbox/parser.py#L311 - I would make these `@classmethod`s in `LockboxFile` and call them something like `from_lines` and `from_file`
* [x] https://github.com/FundersClub/bai-lockbox/blob/master/lockbox/parser.py#L312 - I would make the `read_lockbox_lines` perform the `strip()` as it is always desired
* [x] https://github.com/FundersClub/bai-lockbox/blob/master/lockbox/parser.py#L271 - Instead of all those `str()` and repeating `line[0]` why not: 1) convert `int(line[0])` and store it in a local variable and then compare. 2) Create a dictionary that maps the record number to the class. (see *1 below)
* [x] https://github.com/FundersClub/bai-lockbox/blob/master/lockbox/records.py#L70 - Please use `iteritems`, no need to create an in-memory list
* [x] https://github.com/FundersClub/bai-lockbox/blob/master/lockbox/records.py#L85 - `iteritems()`
* [x] https://github.com/FundersClub/bai-lockbox/blob/master/lockbox/records.py#L94 - `start_col, end_col = field_def['location']`
* [x] I think we should make field types a constant and then use that. Reduces chances for unnoticed typos. (see *2 below)
----
*1
```
class FieldType(object):
Numeric = 'numeric'
...
```
----
*2
```
record_types = {
LockboxBatchTotalRecord.RECORD_TYPE_NUM: LockboxBatchTotalRecord,
....
}
record_cls = record_types.get(int(line[0]))
if not record_cls: ...
rec = record_cls(line) | FundersClub/bai-lockbox | diff --git a/lockbox/tests/test_parser.py b/lockbox/tests/test_parser.py
index 262f87d..1a2649a 100644
--- a/lockbox/tests/test_parser.py
+++ b/lockbox/tests/test_parser.py
@@ -1,25 +1,9 @@
-# This file is part of bai-lockbox.
-
-# bai-lockbox is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as
-# published by the Free Software Foundation, either version 3 of the
-# License, or (at your option) any later version.
-
-# bai-lockbox is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-
-# You should have received a copy of the GNU Lesser General Public
-# License along with bai-lockbox. If not, see
-# <http://www.gnu.org/licenses/>.
-
import datetime
import os
from unittest import TestCase
-from lockbox.parser import read_lockbox_file, read_lockbox_lines
+from lockbox.parser import LockboxFile
class TestLockboxParser(TestCase):
@@ -42,7 +26,7 @@ class TestLockboxParser(TestCase):
self.empty_lockbox_lines = [l.strip() for l in open(empty_lockbox_path, 'r').readlines()]
def test_parsing_valid_file(self):
- lockbox_file = read_lockbox_lines(self.valid_lockbox_lines)
+ lockbox_file = LockboxFile.from_lines(self.valid_lockbox_lines)
self.assertEqual(len(lockbox_file.checks), 1)
@@ -55,6 +39,6 @@ class TestLockboxParser(TestCase):
self.assertEqual(check.memo, 'CE554')
def test_parsing_file_with_no_checks(self):
- lockbox_file = read_lockbox_lines(self.empty_lockbox_lines)
+ lockbox_file = LockboxFile.from_lines(self.empty_lockbox_lines)
self.assertEqual(len(lockbox_file.checks), 0)
diff --git a/lockbox/tests/test_records.py b/lockbox/tests/test_records.py
index de5f1b3..19a52c5 100644
--- a/lockbox/tests/test_records.py
+++ b/lockbox/tests/test_records.py
@@ -1,19 +1,3 @@
-# This file is part of bai-lockbox.
-
-# bai-lockbox is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License as
-# published by the Free Software Foundation, either version 3 of the
-# License, or (at your option) any later version.
-
-# bai-lockbox is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-
-# You should have received a copy of the GNU Lesser General Public
-# License along with bai-lockbox. If not, see
-# <http://www.gnu.org/licenses/>.
-
import datetime
from unittest import TestCase
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 7
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asgiref==3.8.1
-e git+https://github.com/FundersClub/bai-lockbox.git@c7e4fbc207b21953e4741ba55f787eaa108e55fb#egg=bai_lockbox
Django==4.2.20
django-apptemplates==1.5
django-jsonfield==1.4.1
exceptiongroup==1.2.2
iniconfig==2.1.0
nose==1.3.7
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
six==1.17.0
sqlparse==0.5.3
tomli==2.2.1
typing_extensions==4.13.0
| name: bai-lockbox
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asgiref==3.8.1
- django==4.2.20
- django-apptemplates==1.5
- django-jsonfield==1.4.1
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- six==1.17.0
- sqlparse==0.5.3
- tomli==2.2.1
- typing-extensions==4.13.0
prefix: /opt/conda/envs/bai-lockbox
| [
"lockbox/tests/test_parser.py::TestLockboxParser::test_parsing_file_with_no_checks",
"lockbox/tests/test_parser.py::TestLockboxParser::test_parsing_valid_file"
]
| []
| [
"lockbox/tests/test_records.py::TestRecordDefinitions::test_immediate_address_header",
"lockbox/tests/test_records.py::TestRecordDefinitions::test_invalid_alphanumeric_field",
"lockbox/tests/test_records.py::TestRecordDefinitions::test_invalid_numeric_field",
"lockbox/tests/test_records.py::TestRecordDefinitions::test_lockbox_batch_total_record",
"lockbox/tests/test_records.py::TestRecordDefinitions::test_lockbox_destination_trailer_record",
"lockbox/tests/test_records.py::TestRecordDefinitions::test_lockbox_detail_header",
"lockbox/tests/test_records.py::TestRecordDefinitions::test_lockbox_detail_overflow_record",
"lockbox/tests/test_records.py::TestRecordDefinitions::test_lockbox_detail_record",
"lockbox/tests/test_records.py::TestRecordDefinitions::test_lockbox_service_total_record",
"lockbox/tests/test_records.py::TestRecordDefinitions::test_overlong_record",
"lockbox/tests/test_records.py::TestRecordDefinitions::test_valid_lockbox_service_record"
]
| []
| Apache License 2.0 | 574 | [
"setup.py",
".gitignore",
"lockbox/exceptions.py",
"lockbox/records.py",
"tox.ini",
"LICENSE.txt",
"lockbox/parser.py"
]
| [
"setup.py",
".gitignore",
"lockbox/exceptions.py",
"lockbox/records.py",
"tox.ini",
"LICENSE.txt",
"lockbox/parser.py"
]
|
|
peterbe__premailer-167 | cc18022e334d5336e48f75bd4e0a73c98cc5942a | 2016-06-07 12:37:52 | cc18022e334d5336e48f75bd4e0a73c98cc5942a | graingert: I think it's such a trivial and unlikely to be used piece of code, that I'd rather just remove it and recommend people add their own:
```python
# now we can delete all 'class' attributes
for item in page.xpath('//@class'):
parent = item.getparent()
del parent.attrib['class']
```
peterbe: @graingert But I want the library to be as easy as possible. Also, as lazy as possible :)
graingert: It's your call, but imagine if this feature was not implemented, would it be worth adding?
I don't see a use-case for it.
coveralls:
[](https://coveralls.io/builds/6493685)
Coverage decreased (-0.7%) to 99.299% when pulling **4e1762899aca476de755ada50c95ec02072e24db on keep-classes-by-default-fixes-33** into **cc18022e334d5336e48f75bd4e0a73c98cc5942a on master**.
peterbe: One usecase is that it's not hard to keep and it's nice to have it there
for people who needed it.
Actually, if you don't have media queries. If you have some really simple
CSS for your HTML emails, then having the `class` attributes still in means
the email gets unnecessarily larger.
I suggest we just keep the functionality around.
On Tue, Jun 7, 2016 at 8:45 AM, Thomas Grainger <[email protected]>
wrote:
> It's your call, but imagine if this feature was not implemented, would it
> be worth adding?
>
> I don't see a use-case for it.
>
> —
> You are receiving this because you authored the thread.
> Reply to this email directly, view it on GitHub
> <https://github.com/peterbe/premailer/pull/167#issuecomment-224269061>,
> or mute the thread
> <https://github.com/notifications/unsubscribe/AABoc_2Fg2_REhvGVNqOZLI7Z5RBUWkLks5qJWfjgaJpZM4Iv3_H>
> .
>
--
Peter Bengtsson
Mozilla Tools & Services
coveralls:
[](https://coveralls.io/builds/6493757)
Coverage remained the same at 100.0% when pulling **6e9df1c5587d133851f2185c63cf79998ad679ec on keep-classes-by-default-fixes-33** into **cc18022e334d5336e48f75bd4e0a73c98cc5942a on master**.
| diff --git a/premailer/__init__.py b/premailer/__init__.py
index e831baa..570b55c 100644
--- a/premailer/__init__.py
+++ b/premailer/__init__.py
@@ -1,4 +1,4 @@
from __future__ import absolute_import, unicode_literals
from .premailer import Premailer, transform
-__version__ = '2.11.0'
+__version__ = '3.0.0'
diff --git a/premailer/premailer.py b/premailer/premailer.py
index de9839d..fc157ee 100644
--- a/premailer/premailer.py
+++ b/premailer/premailer.py
@@ -121,7 +121,7 @@ class Premailer(object):
exclude_pseudoclasses=True,
keep_style_tags=False,
include_star_selectors=False,
- remove_classes=True,
+ remove_classes=False,
capitalize_float_margin=False,
strip_important=True,
external_styles=None,
| keep class attributes
the transform() function replaces the "class" attribute with the appropriate styling, but it also removes the class from the html elements. I need the styling added inline, but the classes to be left within their elements so that my @media (!important) styling can be inserted in after and still have classes to point to within the dom.
Im having a hard time changing your code to meet this needs, do you have any thoughts on how I could keep the classes within the html elements but still inject the inline styling? | peterbe/premailer | diff --git a/premailer/tests/test_premailer.py b/premailer/tests/test_premailer.py
index 42213d8..5cdf6a7 100644
--- a/premailer/tests/test_premailer.py
+++ b/premailer/tests/test_premailer.py
@@ -170,6 +170,37 @@ class Tests(unittest.TestCase):
compare_html(expect_html, result_html)
+ def test_remove_classes(self):
+ """test the simplest case"""
+
+ html = """<html>
+ <head>
+ <title>Title</title>
+ <style type="text/css">
+ .stuff {
+ color: red;
+ }
+ </style>
+ </head>
+ <body>
+ <p class="stuff"><strong>Yes!</strong></p>
+ </body>
+ </html>"""
+
+ expect_html = """<html>
+ <head>
+ <title>Title</title>
+ </head>
+ <body>
+ <p style="color:red"><strong>Yes!</strong></p>
+ </body>
+ </html>"""
+
+ p = Premailer(html, remove_classes=True)
+ result_html = p.transform()
+
+ compare_html(expect_html, result_html)
+
def test_basic_html_shortcut_function(self):
"""test the plain transform function"""
html = """<html>
@@ -1088,7 +1119,7 @@ b
<head>
</head>
<body>
- <div style="color:red"></div>
+ <div class="example" style="color:red"></div>
</body>
</html>"""
@@ -1118,7 +1149,7 @@ b
<head>
</head>
<body>
- <div style="color:green"></div>
+ <div class="example" style="color:green"></div>
</body>
</html>"""
@@ -1148,7 +1179,7 @@ b
<head>
</head>
<body>
- <div style="color:green"></div>
+ <div class="example" style="color:green"></div>
</body>
</html>"""
@@ -1178,7 +1209,7 @@ b
<head>
</head>
<body>
- <div id="identified" style="color:green"></div>
+ <div class="example" id="identified" style="color:green"></div>
</body>
</html>"""
@@ -1195,7 +1226,7 @@ b
color: blue !important;
font-size: 12px;
}
- #identified {
+ #id {
color: green;
font-size: 22px;
}
@@ -1205,17 +1236,17 @@ b
</style>
</head>
<body>
- <div class="example makeblue" id="identified"></div>
+ <div class="example makeblue" id="id"></div>
</body>
</html>"""
expect_html = """<html>
- <head>
- </head>
- <body>
- <div id="identified" style="font-size:22px; color:blue"></div>
- </body>
- </html>"""
+<head>
+</head>
+<body>
+<div class="example makeblue" id="id" style="font-size:22px; color:blue"></div>
+</body>
+</html>"""
p = Premailer(html)
result_html = p.transform()
@@ -1285,7 +1316,7 @@ ration:none">Yes!</strong></p>
<title>Title</title>
</head>
<body>
- <h1 style="color:green">Hi!</h1>
+ <h1 class="foo" style="color:green">Hi!</h1>
</body>
</html>"""
@@ -2395,7 +2426,7 @@ sheet" type="text/css">
<head>
</head>
<body>
- <div style="color:green; font-size:10px"></div>
+ <div class="color example" style="color:green; font-size:10px"></div>
</body>
</html>"""
@@ -2453,22 +2484,22 @@ sheet" type="text/css">
</head>
<body>
<p><img src="/images/left.jpg" style="float: left"> text
- <img src="/images/right.png" class="floatright"> text
+ <img src="/r.png" class="floatright"> text
<img src="/images/nofloat.gif"> text
</body>
</html>"""
expect_html = """<html>
- <head>
- <title>Title</title>
- </head>
- <body>
- <p><img src="/images/left.jpg" style="float: left" align="left"> text
- <img src="/images/right.png" style="float:right" align="right"> text
- <img src="/images/nofloat.gif"> text
- </p>
- </body>
- </html>"""
+<head>
+<title>Title</title>
+</head>
+<body>
+<p><img src="/images/left.jpg" style="float: left" align="left"> text
+ <img src="/r.png" class="floatright" style="float:right" align="right"> text
+ <img src="/images/nofloat.gif"> text
+</p>
+</body>
+</html>"""
p = Premailer(html, align_floating_images=True)
result_html = p.transform()
@@ -2498,7 +2529,8 @@ sheet" type="text/css">
<head>
</head>
<body>
- <div style="color:green"><span></span></div>
+ <div class="color" style="color:green"><span class="nocolor"></span>
+ </div>
</body>
</html>"""
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"mock",
"coverage",
"pytest"
],
"pre_install": null,
"python": "3.5",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
cssselect==1.1.0
cssutils==2.3.1
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
lxml==5.3.1
mock==5.2.0
nose==1.3.7
packaging==21.3
pluggy==1.0.0
-e git+https://github.com/peterbe/premailer.git@cc18022e334d5336e48f75bd4e0a73c98cc5942a#egg=premailer
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
requests==2.27.1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: premailer
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- coverage==6.2
- cssselect==1.1.0
- cssutils==2.3.1
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- lxml==5.3.1
- mock==5.2.0
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- requests==2.27.1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/premailer
| [
"premailer/tests/test_premailer.py::Tests::test_align_float_images",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_class_over_generic",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_element_over_generic",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_id_over_others",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_important_over_others",
"premailer/tests/test_premailer.py::Tests::test_prefer_inline_to_class",
"premailer/tests/test_premailer.py::Tests::test_remove_unset_properties",
"premailer/tests/test_premailer.py::Tests::test_style_attribute_specificity",
"premailer/tests/test_premailer.py::Tests::test_turnoff_cache_works_as_expected"
]
| []
| [
"premailer/tests/test_premailer.py::Tests::test_3_digit_color_expand",
"premailer/tests/test_premailer.py::Tests::test_apple_newsletter_example",
"premailer/tests/test_premailer.py::Tests::test_base_url_fixer",
"premailer/tests/test_premailer.py::Tests::test_base_url_with_path",
"premailer/tests/test_premailer.py::Tests::test_basic_html",
"premailer/tests/test_premailer.py::Tests::test_basic_html_shortcut_function",
"premailer/tests/test_premailer.py::Tests::test_basic_html_with_pseudo_selector",
"premailer/tests/test_premailer.py::Tests::test_basic_xml",
"premailer/tests/test_premailer.py::Tests::test_broken_xml",
"premailer/tests/test_premailer.py::Tests::test_capture_cssutils_logging",
"premailer/tests/test_premailer.py::Tests::test_child_selector",
"premailer/tests/test_premailer.py::Tests::test_command_line_fileinput_from_argument",
"premailer/tests/test_premailer.py::Tests::test_command_line_fileinput_from_stdin",
"premailer/tests/test_premailer.py::Tests::test_command_line_preserve_style_tags",
"premailer/tests/test_premailer.py::Tests::test_comments_in_media_queries",
"premailer/tests/test_premailer.py::Tests::test_css_disable_basic_html_attributes",
"premailer/tests/test_premailer.py::Tests::test_css_disable_leftover_css",
"premailer/tests/test_premailer.py::Tests::test_css_text",
"premailer/tests/test_premailer.py::Tests::test_css_text_with_only_body_present",
"premailer/tests/test_premailer.py::Tests::test_css_with_html_attributes",
"premailer/tests/test_premailer.py::Tests::test_css_with_pseudoclasses_excluded",
"premailer/tests/test_premailer.py::Tests::test_css_with_pseudoclasses_included",
"premailer/tests/test_premailer.py::Tests::test_disabled_validator",
"premailer/tests/test_premailer.py::Tests::test_doctype",
"premailer/tests/test_premailer.py::Tests::test_empty_style_tag",
"premailer/tests/test_premailer.py::Tests::test_external_links",
"premailer/tests/test_premailer.py::Tests::test_external_links_unfindable",
"premailer/tests/test_premailer.py::Tests::test_external_styles_and_links",
"premailer/tests/test_premailer.py::Tests::test_external_styles_on_http",
"premailer/tests/test_premailer.py::Tests::test_external_styles_on_https",
"premailer/tests/test_premailer.py::Tests::test_external_styles_with_base_url",
"premailer/tests/test_premailer.py::Tests::test_fontface_selectors_with_no_selectortext",
"premailer/tests/test_premailer.py::Tests::test_ignore_some_external_stylesheets",
"premailer/tests/test_premailer.py::Tests::test_ignore_some_incorrectly",
"premailer/tests/test_premailer.py::Tests::test_ignore_some_inline_stylesheets",
"premailer/tests/test_premailer.py::Tests::test_ignore_style_elements_with_media_attribute",
"premailer/tests/test_premailer.py::Tests::test_include_star_selector",
"premailer/tests/test_premailer.py::Tests::test_inline_important",
"premailer/tests/test_premailer.py::Tests::test_inline_wins_over_external",
"premailer/tests/test_premailer.py::Tests::test_keyframe_selectors",
"premailer/tests/test_premailer.py::Tests::test_last_child",
"premailer/tests/test_premailer.py::Tests::test_last_child_exclude_pseudo",
"premailer/tests/test_premailer.py::Tests::test_leftover_important",
"premailer/tests/test_premailer.py::Tests::test_links_without_protocol",
"premailer/tests/test_premailer.py::Tests::test_load_external_url",
"premailer/tests/test_premailer.py::Tests::test_mailto_url",
"premailer/tests/test_premailer.py::Tests::test_mediaquery",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_basic",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_non_trivial",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_with_class",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_with_unset",
"premailer/tests/test_premailer.py::Tests::test_mixed_pseudo_selectors",
"premailer/tests/test_premailer.py::Tests::test_multiple_style_elements",
"premailer/tests/test_premailer.py::Tests::test_multithreading",
"premailer/tests/test_premailer.py::Tests::test_parse_style_rules",
"premailer/tests/test_premailer.py::Tests::test_precedence_comparison",
"premailer/tests/test_premailer.py::Tests::test_remove_classes",
"premailer/tests/test_premailer.py::Tests::test_shortcut_function",
"premailer/tests/test_premailer.py::Tests::test_six_color",
"premailer/tests/test_premailer.py::Tests::test_strip_important",
"premailer/tests/test_premailer.py::Tests::test_style_block_with_external_urls",
"premailer/tests/test_premailer.py::Tests::test_type_test",
"premailer/tests/test_premailer.py::Tests::test_uppercase_margin",
"premailer/tests/test_premailer.py::Tests::test_xml_cdata"
]
| []
| BSD 3-Clause "New" or "Revised" License | 575 | [
"premailer/__init__.py",
"premailer/premailer.py"
]
| [
"premailer/__init__.py",
"premailer/premailer.py"
]
|
kytos__python-openflow-56 | 275103dca4116b8911dc19ddad4b90121936d9f1 | 2016-06-07 17:59:27 | 275103dca4116b8911dc19ddad4b90121936d9f1 | diff --git a/pyof/v0x01/common/flow_match.py b/pyof/v0x01/common/flow_match.py
index a900b81..07d103e 100644
--- a/pyof/v0x01/common/flow_match.py
+++ b/pyof/v0x01/common/flow_match.py
@@ -1,6 +1,7 @@
"""Defines flow statistics structures and related items"""
# System imports
+import enum
# Third-party imports
@@ -8,7 +9,10 @@
from pyof.v0x01.foundation import base
from pyof.v0x01.foundation import basic_types
-class FlowWildCards(base.GenericBitMask):
+# Enums
+
+
+class FlowWildCards(enum.Enum):
"""
Wildcards used to identify flows.
diff --git a/pyof/v0x01/common/phy_port.py b/pyof/v0x01/common/phy_port.py
index 7395fb6..b45777f 100644
--- a/pyof/v0x01/common/phy_port.py
+++ b/pyof/v0x01/common/phy_port.py
@@ -1,6 +1,7 @@
"""Defines physical port classes and related items"""
# System imports
+import enum
# Third-party imports
@@ -8,20 +9,9 @@
from pyof.v0x01.foundation import base
from pyof.v0x01.foundation import basic_types
+# Enums
+
class PortConfig(base.GenericBitMask):
- """Flags to indicate behavior of the physical port.
-
- These flags are used in OFPPhyPort to describe the current configuration.
- They are used in the OFPPortMod message to configure the port's behavior.
-
- OFPPC_PORT_DOWN # Port is administratively down.
- OFPPC_NO_STP # Disable 802.1D spanning tree on port.
- OFPPC_NO_RECV # Drop all packets except 802.1D spanning tree.
- OFPPC_NO_RECV_STP # Drop received 802.1D STP packets.
- OFPPC_NO_FLOOD # Do not include this port when flooding.
- OFPPC_NO_FWD # Drop packets forwarded to port.
- OFPPC_NO_PACKET_IN # Do not send packet-in msgs for port.
- """
OFPC_PORT_DOWN = 1 << 0
OFPPC_NO_STP = 1 << 1
OFPPC_NO_RECV = 1 << 2
@@ -31,9 +21,32 @@ class PortConfig(base.GenericBitMask):
OFPPC_NO_PACKET_IN = 1 << 6
-
-
-class PortState(base.GenericBitMask):
+#class PortConfig(enum.Enum):
+# """Flags to indicate behavior of the physical port.
+#
+# These flags are used in OFPPhyPort to describe the current configuration.
+# They are used in the OFPPortMod message to configure the port's behavior.
+#
+# Enums:
+# OFPPC_PORT_DOWN # Port is administratively down.
+# OFPPC_NO_STP # Disable 802.1D spanning tree on port.
+# OFPPC_NO_RECV # Drop all packets except 802.1D spanning tree.
+# OFPPC_NO_RECV_STP # Drop received 802.1D STP packets.
+# OFPPC_NO_FLOOD # Do not include this port when flooding.
+# OFPPC_NO_FWD # Drop packets forwarded to port.
+# OFPPC_NO_PACKET_IN # Do not send packet-in msgs for port.
+# """
+#
+# OFPPC_PORT_DOWN = 1 << 0
+# OFPPC_NO_STP = 1 << 1
+# OFPPC_NO_RECV = 1 << 2
+# OFPPC_NO_RECV_STP = 1 << 3
+# OFPPC_FLOOD = 1 << 4
+# OFPPC_NO_FWD = 1 << 5
+# OFPPC_NO_PACKET_IN = 1 << 6
+
+
+class PortState(enum.Enum):
"""Current state of the physical port.
These are not configurable from the controller.
@@ -42,6 +55,7 @@ class PortState(base.GenericBitMask):
must adjust OFPPC_NO_RECV, OFPPC_NO_FWD, and OFPPC_NO_PACKET_IN
appropriately to fully implement an 802.1D spanning tree.
+ Enums:
OFPPS_LINK_DOWN # Not learning or relaying frames.
OFPPS_STP_LISTEN # Not learning or relaying frames.
OFPPS_STP_LEARN # Learning but not relaying frames.
@@ -58,7 +72,7 @@ class PortState(base.GenericBitMask):
# OFPPS_STP_MASK = 3 << 8 - Refer to ISSUE #7
-class Port(base.GenericBitMask):
+class Port(enum.Enum):
"""Port numbering.
Physical ports are numbered starting from 1. Port number 0 is reserved by
@@ -87,13 +101,14 @@ class Port(base.GenericBitMask):
OFPP_NONE = 0xffff
-class PortFeatures(base.GenericBitMask):
+class PortFeatures(enum.Enum):
"""Physical ports features.
The curr, advertised, supported, and peer fields indicate link modes
(10M to 10G full and half-duplex), link type (copper/fiber) and
link features (autone-gotiation and pause).
+ Enums:
OFPPF_10MB_HD # 10 Mb half-duplex rate support.
OFPPF_10MB_FD # 10 Mb full-duplex rate support.
OFPPF_100MB_HD # 100 Mb half-duplex rate support.
diff --git a/pyof/v0x01/controller2switch/aggregate_stats_reply.py b/pyof/v0x01/controller2switch/aggregate_stats_reply.py
deleted file mode 100644
index 339127c..0000000
--- a/pyof/v0x01/controller2switch/aggregate_stats_reply.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""Body of the reply message"""
-
-# System imports
-
-# Third-party imports
-
-# Local source tree imports
-from pyof.v0x01.foundation import base
-from pyof.v0x01.foundation import basic_types
-
-# Classes
-
-
-class AggregateStatsReply(base.GenericStruct):
- """Body of reply to OFPST_AGGREGATE request.
-
- :param packet_count: Number of packets in flows
- :param byte_count: Number of bytes in flows
- :param flow_count: Number of flows
- :param pad: Align to 64 bits
-
- """
- packet_count = basic_types.UBInt64()
- byte_count = basic_types.UBInt64()
- flow_count = basic_types.UBInt32()
- pad = basic_types.PAD(4)
-
- def __init__(self, packet_count=None, byte_count=None, flow_count=None):
- self.packet_count = packet_count
- self.byte_count = byte_count
- self.flow_count = flow_count
diff --git a/pyof/v0x01/controller2switch/aggregate_stats_request.py b/pyof/v0x01/controller2switch/aggregate_stats_request.py
deleted file mode 100644
index b45f923..0000000
--- a/pyof/v0x01/controller2switch/aggregate_stats_request.py
+++ /dev/null
@@ -1,38 +0,0 @@
-"""Aggregate information about multiple flows is requested with the
-OFPST_AGGREGATE stats request type"""
-
-# System imports
-
-# Third-party imports
-
-# Local source tree imports
-from pyof.v0x01.common import flow_match
-from pyof.v0x01.foundation import base
-from pyof.v0x01.foundation import basic_types
-
-# Classes
-
-
-class AggregateStatsRequest(base.GenericStruct):
- """
- Body for ofp_stats_request of type OFPST_AGGREGATE.
-
- :param match: Fields to match
- :param table_id: ID of table to read (from pyof_table_stats) 0xff
- for all tables or 0xfe for emergency.
- :param pad: Align to 32 bits
- :param out_port: Require matching entries to include this as an
- output port. A value of OFPP_NONE indicates
- no restriction
-
- """
- match = flow_match.Match()
- table_id = basic_types.UBInt8()
- pad = basic_types.PAD(1)
- out_port = basic_types.UBInt16()
-
- def __init__(self, match=None, table_id=None, out_port=None):
- super().__init__()
- self.match = match
- self.table_id = table_id
- self.out_port = out_port
diff --git a/pyof/v0x01/controller2switch/common.py b/pyof/v0x01/controller2switch/common.py
index c5490dd..bfb9cd3 100644
--- a/pyof/v0x01/controller2switch/common.py
+++ b/pyof/v0x01/controller2switch/common.py
@@ -8,6 +8,7 @@ import enum
# Local source tree imports
from pyof.v0x01.common import header as of_header
from pyof.v0x01.common import action
+from pyof.v0x01.common import flow_match
from pyof.v0x01.foundation import base
from pyof.v0x01.foundation import basic_types
@@ -65,3 +66,313 @@ class ListOfActions(basic_types.FixedTypeList):
"""
def __init__(self, items=None):
super().__init__(pyof_class=action.ActionHeader, items=items)
+
+
+class AggregateStatsReply(base.GenericStruct):
+ """Body of reply to OFPST_AGGREGATE request.
+
+ :param packet_count: Number of packets in flows
+ :param byte_count: Number of bytes in flows
+ :param flow_count: Number of flows
+ :param pad: Align to 64 bits
+
+ """
+ packet_count = basic_types.UBInt64()
+ byte_count = basic_types.UBInt64()
+ flow_count = basic_types.UBInt32()
+ pad = basic_types.PAD(4)
+
+ def __init__(self, packet_count=None, byte_count=None, flow_count=None):
+ self.packet_count = packet_count
+ self.byte_count = byte_count
+ self.flow_count = flow_count
+
+
+class AggregateStatsRequest(base.GenericStruct):
+ """
+ Body for ofp_stats_request of type OFPST_AGGREGATE.
+
+ :param match: Fields to match
+ :param table_id: ID of table to read (from pyof_table_stats) 0xff
+ for all tables or 0xfe for emergency.
+ :param pad: Align to 32 bits
+ :param out_port: Require matching entries to include this as an
+ output port. A value of OFPP_NONE indicates
+ no restriction
+
+ """
+ match = flow_match.Match()
+ table_id = basic_types.UBInt8()
+ pad = basic_types.PAD(1)
+ out_port = basic_types.UBInt16()
+
+ def __init__(self, match=None, table_id=None, out_port=None):
+ super().__init__()
+ self.match = match
+ self.table_id = table_id
+ self.out_port = out_port
+
+
+class DescStats(base.GenericStruct):
+ """
+ Information about the switch manufacturer, hardware revision, software
+ revision, serial number, and a description field is avail- able from
+ the OFPST_DESC stats request.
+
+ :param mfr_desc: Manufacturer description
+ :param hw_desc: Hardware description
+ :param sw_desc: Software description
+ :param serial_num: Serial number
+ :param dp_desc: Human readable description of datapath
+
+ """
+ mfr_desc = basic_types.Char(length=base.DESC_STR_LEN)
+ hw_desc = basic_types.Char(length=base.DESC_STR_LEN)
+ sw_desc = basic_types.Char(length=base.DESC_STR_LEN)
+ serial_num = basic_types.Char(length=base.SERIAL_NUM_LEN)
+ dp_desc = basic_types.Char(length=base.DESC_STR_LEN)
+
+ def __init__(self, mfr_desc=None, hw_desc=None, sw_desc=None,
+ serial_num=None, dp_desc=None):
+ self.mfr_desc = mfr_desc
+ self.hw_desc = hw_desc
+ self.sw_desc = sw_desc
+ self.serial_num = serial_num
+ self.dp_desc = dp_desc
+
+
+class FlowStats(base.GenericStruct):
+ """
+ Body of reply to OFPST_FLOW request.
+
+ :param length: Length of this entry
+ :param table_id: ID of table flow came from
+ :param pad: Align to 32 bits
+ :param match: Description of fields
+ :param duration_sec: Time flow has been alive in seconds
+ :param duration_nsec: Time flow has been alive in nanoseconds beyond
+ duration_sec
+ :param priority: Priority of the entry. Only meaningful when this
+ is not an exact-match entry
+ :param idle_timeout: Number of seconds idle before expiration
+ :param hard_timeout: Number of seconds before expiration
+ :param pad2: Align to 64-bits
+ :param cookie: Opaque controller-issued identifier
+ :param packet_count: Number of packets in flow
+ :param byte_count: Number of bytes in flow
+ :param actions: Actions
+ """
+ length = basic_types.UBInt16()
+ table_id = basic_types.UBInt8()
+ pad = basic_types.PAD(1)
+ match = flow_match.Match()
+ duration_sec = basic_types.UBInt32()
+ duration_nsec = basic_types.UBInt32()
+ priority = basic_types.UBInt16()
+ idle_timeout = basic_types.UBInt16()
+ hard_timeout = basic_types.UBInt16()
+ pad2 = basic_types.PAD(6)
+ cookie = basic_types.UBInt64()
+ packet_count = basic_types.UBInt64()
+ byte_count = basic_types.UBInt64()
+ actions = ListOfActions()
+
+ def __init__(self, length=None, table_id=None, match=None,
+ duration_sec=None, duration_nsec=None, priority=None,
+ idle_timeout=None, hard_timeout=None, cookie=None,
+ packet_count=None, byte_count=None, actions=None):
+ self.length = length
+ self.table_id = table_id
+ self.match = match
+ self.duration_sec = duration_sec
+ self.duration_nsec = duration_nsec
+ self.prioriry = priority
+ self.idle_timeout = idle_timeout
+ self.hard_timeout = hard_timeout
+ self.cookie = cookie
+ self.packet_count = packet_count
+ self.byte_count = byte_count
+ self.actions = [] if actions is None else actions
+
+
+class FlowStatsRequest(base.GenericStruct):
+ """
+ Body for ofp_stats_request of type OFPST_FLOW.
+
+ :param match: Fields to match
+ :param table_id: ID of table to read (from pyof_table_stats)
+ 0xff for all tables or 0xfe for emergency
+ :param pad: Align to 32 bits
+ :param out_port: Require matching entries to include this as an output
+ port. A value of OFPP_NONE indicates no restriction.
+
+ """
+ match = flow_match.Match()
+ table_id = basic_types.UBInt8()
+ pad = basic_types.PAD(1)
+ out_port = basic_types.UBInt16()
+
+ def __init__(self, match=None, table_id=None, out_port=None):
+ self.match = match
+ self.table_id = table_id
+ self.out_port = out_port
+
+
+class PortStats(base.GenericStruct):
+ """Body of reply to OFPST_PORT request.
+
+ If a counter is unsupported, set the field to all ones.
+
+ :param port_no: Port number
+ :param pad: Align to 64-bits
+ :param rx_packets: Number of received packets
+ :param tx_packets: Number of transmitted packets
+ :param rx_bytes: Number of received bytes
+ :param tx_bytes: Number of transmitted bytes
+ :param rx_dropped: Number of packets dropped by RX
+ :param tx_dropped: Number of packets dropped by TX
+ :param rx_errors: Number of receive errors. This is a super-set
+ of more specific receive errors and should be
+ greater than or equal to the sum of all
+ rx_*_err values
+ :param tx_errors: Number of transmit errors. This is a super-set
+ of more specific transmit errors and should be
+ greater than or equal to the sum of all
+ tx_*_err values (none currently defined.)
+ :param rx_frame_err: Number of frame alignment errors
+ :param rx_over_err: Number of packets with RX overrun
+ :param rx_crc_err: Number of CRC errors
+ :param collisions: Number of collisions
+
+ """
+ port_no = basic_types.UBInt16()
+ pad = basic_types.PAD(6)
+ rx_packets = basic_types.UBInt64()
+ tx_packets = basic_types.UBInt64()
+ rx_bytes = basic_types.UBInt64()
+ tx_bytes = basic_types.UBInt64()
+ rx_dropped = basic_types.UBInt64()
+ tx_dropped = basic_types.UBInt64()
+ rx_errors = basic_types.UBInt64()
+ tx_errors = basic_types.UBInt64()
+ rx_frame_err = basic_types.UBInt64()
+ rx_over_err = basic_types.UBInt64()
+ rx_crc_err = basic_types.UBInt64()
+ collisions = basic_types.UBInt64()
+
+ def __init__(self, port_no=None, rx_packets=None,
+ tx_packets=None, rx_bytes=None, tx_bytes=None,
+ rx_dropped=None, tx_dropped=None, rx_errors=None,
+ tx_errors=None, rx_frame_err=None, rx_over_err=None,
+ rx_crc_err=None, collisions=None):
+ self.port_no = port_no
+ self.rx_packets = rx_packets
+ self.tx_packets = tx_packets
+ self.rx_bytes = rx_bytes
+ self.tx_bytes = tx_bytes
+ self.rx_dropped = rx_dropped
+ self.tx_dropped = tx_dropped
+ self.rx_errors = rx_errors
+ self.tx_errors = tx_errors
+ self.rx_frame_err = rx_frame_err
+ self.rx_over_err = rx_over_err
+ self.rx_crc_err = rx_crc_err
+ self.collisions = collisions
+
+
+class PortStatsRequest(base.GenericStruct):
+ """
+ Body for ofp_stats_request of type OFPST_PORT
+
+ :param port_no: OFPST_PORT message must request statistics either
+ for a single port (specified in port_no) or for
+ all ports (if port_no == OFPP_NONE).
+ :param pad:
+
+ """
+ port_no = basic_types.UBInt16()
+ pad = basic_types.PAD(6)
+
+ def __init__(self, port_no=None):
+ self.port_no = port_no
+
+
+class QueueStats(base.GenericStruct):
+ """
+ Implements the reply body of a port_no
+
+ :param port_no: Port Number
+ :param pad: Align to 32-bits
+ :param queue_id: Queue ID
+ :param tx_bytes: Number of transmitted bytes
+ :param tx_packets: Number of transmitted packets
+ :param tx_errors: Number of packets dropped due to overrun
+
+ """
+ port_no = basic_types.UBInt16()
+ pad = basic_types.PAD(2)
+ queue_id = basic_types.UBInt32()
+ tx_bytes = basic_types.UBInt64()
+ tx_packets = basic_types.UBInt64()
+ tx_errors = basic_types.UBInt64()
+
+ def __init__(self, port_no=None, queue_id=None, tx_bytes=None,
+ tx_packets=None, tx_errors=None):
+ self.port_no = port_no
+ self.queue_id = queue_id
+ self.tx_bytes = tx_bytes
+ self.tx_packets = tx_packets
+ self.tx_errors = tx_errors
+
+
+class QueueStatsRequest(base.GenericStruct):
+ """
+ Implements the request body of a port_no
+
+ :param port_no: All ports if OFPT_ALL
+ :param pad: Align to 32-bits
+ :param queue_id: All queues if OFPQ_ALL
+ """
+ port_no = basic_types.UBInt16()
+ pad = basic_types.PAD(2)
+ queue_id = basic_types.UBInt32()
+
+ def __init__(self, port_no=None, queue_id=None):
+ self.port_no = port_no
+ self.queue_id = queue_id
+
+
+class TableStats(base.GenericStruct):
+ """Body of reply to OFPST_TABLE request.
+
+ :param table_id: Identifier of table. Lower numbered tables
+ are consulted first
+ :param pad: Align to 32-bits
+ :param name: Table name
+ :param wildcards: Bitmap of OFPFW_* wildcards that are supported
+ by the table
+ :param max_entries: Max number of entries supported
+ :param active_count: Number of active entries
+ :param count_lookup: Number of packets looked up in table
+ :param count_matched: Number of packets that hit table
+
+ """
+ table_id = basic_types.UBInt8()
+ pad = basic_types.PAD(3)
+ name = basic_types.Char(length=base.OFP_MAX_TABLE_NAME_LEN)
+ wildcards = basic_types.UBInt32()
+ max_entries = basic_types.UBInt32()
+ active_count = basic_types.UBInt32()
+ count_lookup = basic_types.UBInt64()
+ count_matched = basic_types.UBInt64()
+
+ def __init__(self, table_id=None, name=None, wildcards=None,
+ max_entries=None, active_count=None, count_lookup=None,
+ count_matched=None):
+ self.table_id = table_id
+ self.name = name
+ self.wildcards = wildcards
+ self.max_entries = max_entries
+ self.active_count = active_count
+ self.count_lookup = count_lookup
+ self.count_matched = count_matched
diff --git a/pyof/v0x01/controller2switch/desc_stats.py b/pyof/v0x01/controller2switch/desc_stats.py
deleted file mode 100644
index bceffb9..0000000
--- a/pyof/v0x01/controller2switch/desc_stats.py
+++ /dev/null
@@ -1,39 +0,0 @@
-"""Information about the switch manufactures"""
-
-# System imports
-
-# Third-party imports
-
-# Local source tree imports
-from pyof.v0x01.foundation import base
-from pyof.v0x01.foundation import basic_types
-
-# Classes
-
-
-class DescStats(base.GenericStruct):
- """
- Information about the switch manufacturer, hardware revision, software
- revision, serial number, and a description field is avail- able from
- the OFPST_DESC stats request.
-
- :param mfr_desc: Manufacturer description
- :param hw_desc: Hardware description
- :param sw_desc: Software description
- :param serial_num: Serial number
- :param dp_desc: Human readable description of datapath
-
- """
- mfr_desc = basic_types.Char(length=base.DESC_STR_LEN)
- hw_desc = basic_types.Char(length=base.DESC_STR_LEN)
- sw_desc = basic_types.Char(length=base.DESC_STR_LEN)
- serial_num = basic_types.Char(length=base.SERIAL_NUM_LEN)
- dp_desc = basic_types.Char(length=base.DESC_STR_LEN)
-
- def __init__(self, mfr_desc=None, hw_desc=None, sw_desc=None,
- serial_num=None, dp_desc=None):
- self.mfr_desc = mfr_desc
- self.hw_desc = hw_desc
- self.sw_desc = sw_desc
- self.serial_num = serial_num
- self.dp_desc = dp_desc
diff --git a/pyof/v0x01/controller2switch/features_reply.py b/pyof/v0x01/controller2switch/features_reply.py
index c5f3bca..e02f012 100644
--- a/pyof/v0x01/controller2switch/features_reply.py
+++ b/pyof/v0x01/controller2switch/features_reply.py
@@ -1,6 +1,7 @@
"""Defines Features Reply classes and related items"""
# System imports
+import enum
# Third-party imports
@@ -11,9 +12,13 @@ from pyof.v0x01.foundation import base
from pyof.v0x01.foundation import basic_types
-class Capabilities(base.GenericBitMask):
- """Capabilities supported by the datapath
+# Enums
+
+class Capabilities(enum.Enum):
+ """Enumeration of Capabilities supported by the datapath
+
+ Enums:
OFPC_FLOW_STATS # Flow statistics
OFPC_TABLE_STATS # Table statistics
OFPC_PORT_STATS # Port statistics
diff --git a/pyof/v0x01/controller2switch/flow_stats.py b/pyof/v0x01/controller2switch/flow_stats.py
deleted file mode 100644
index efb5edd..0000000
--- a/pyof/v0x01/controller2switch/flow_stats.py
+++ /dev/null
@@ -1,67 +0,0 @@
-"""Body of the reply to an OFPST_FLOW"""
-
-# System imports
-
-# Third-party imports
-
-# Local source tree imports
-from pyof.v0x01.common import flow_match
-from pyof.v0x01.controller2switch import common
-from pyof.v0x01.foundation import base
-from pyof.v0x01.foundation import basic_types
-
-# Classes
-
-
-class FlowStats(base.GenericStruct):
- """
- Body of reply to OFPST_FLOW request.
-
- :param length: Length of this entry
- :param table_id: ID of table flow came from
- :param pad: Align to 32 bits
- :param match: Description of fields
- :param duration_sec: Time flow has been alive in seconds
- :param duration_nsec: Time flow has been alive in nanoseconds beyond
- duration_sec
- :param priority: Priority of the entry. Only meaningful when this
- is not an exact-match entry
- :param idle_timeout: Number of seconds idle before expiration
- :param hard_timeout: Number of seconds before expiration
- :param pad2: Align to 64-bits
- :param cookie: Opaque controller-issued identifier
- :param packet_count: Number of packets in flow
- :param byte_count: Number of bytes in flow
- :param actions: Actions
- """
- length = basic_types.UBInt16()
- table_id = basic_types.UBInt8()
- pad = basic_types.PAD(1)
- match = flow_match.Match()
- duration_sec = basic_types.UBInt32()
- duration_nsec = basic_types.UBInt32()
- priority = basic_types.UBInt16()
- idle_timeout = basic_types.UBInt16()
- hard_timeout = basic_types.UBInt16()
- pad2 = basic_types.PAD(6)
- cookie = basic_types.UBInt64()
- packet_count = basic_types.UBInt64()
- byte_count = basic_types.UBInt64()
- actions = common.ListOfActions()
-
- def __init__(self, length=None, table_id=None, match=None,
- duration_sec=None, duration_nsec=None, priority=None,
- idle_timeout=None, hard_timeout=None, cookie=None,
- packet_count=None, byte_count=None, actions=None):
- self.length = length
- self.table_id = table_id
- self.match = match
- self.duration_sec = duration_sec
- self.duration_nsec = duration_nsec
- self.prioriry = priority
- self.idle_timeout = idle_timeout
- self.hard_timeout = hard_timeout
- self.cookie = cookie
- self.packet_count = packet_count
- self.byte_count = byte_count
- self.actions = [] if actions is None else actions
diff --git a/pyof/v0x01/controller2switch/flow_stats_request.py b/pyof/v0x01/controller2switch/flow_stats_request.py
deleted file mode 100644
index 1fc5794..0000000
--- a/pyof/v0x01/controller2switch/flow_stats_request.py
+++ /dev/null
@@ -1,35 +0,0 @@
-"""Information about individual flows"""
-
-# System imports
-
-# Third-party imports
-
-# Local source tree imports
-from pyof.v0x01.common import flow_match
-from pyof.v0x01.foundation import base
-from pyof.v0x01.foundation import basic_types
-
-# Classes
-
-
-class FlowStatsRequest(base.GenericStruct):
- """
- Body for ofp_stats_request of type OFPST_FLOW.
-
- :param match: Fields to match
- :param table_id: ID of table to read (from pyof_table_stats)
- 0xff for all tables or 0xfe for emergency
- :param pad: Align to 32 bits
- :param out_port: Require matching entries to include this as an output
- port. A value of OFPP_NONE indicates no restriction.
-
- """
- match = flow_match.Match()
- table_id = basic_types.UBInt8()
- pad = basic_types.PAD(1)
- out_port = basic_types.UBInt16()
-
- def __init__(self, match=None, table_id=None, out_port=None):
- self.match = match
- self.table_id = table_id
- self.out_port = out_port
diff --git a/pyof/v0x01/controller2switch/port_stats.py b/pyof/v0x01/controller2switch/port_stats.py
deleted file mode 100644
index 474828d..0000000
--- a/pyof/v0x01/controller2switch/port_stats.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""Body of the port stats reply"""
-
-# System imports
-
-# Third-party imports
-
-# Local source tree imports
-from pyof.v0x01.foundation import base
-from pyof.v0x01.foundation import basic_types
-
-# Classes
-
-
-class PortStats(base.GenericStruct):
- """Body of reply to OFPST_PORT request.
-
- If a counter is unsupported, set the field to all ones.
-
- :param port_no: Port number
- :param pad: Align to 64-bits
- :param rx_packets: Number of received packets
- :param tx_packets: Number of transmitted packets
- :param rx_bytes: Number of received bytes
- :param tx_bytes: Number of transmitted bytes
- :param rx_dropped: Number of packets dropped by RX
- :param tx_dropped: Number of packets dropped by TX
- :param rx_errors: Number of receive errors. This is a super-set
- of more specific receive errors and should be
- greater than or equal to the sum of all
- rx_*_err values
- :param tx_errors: Number of transmit errors. This is a super-set
- of more specific transmit errors and should be
- greater than or equal to the sum of all
- tx_*_err values (none currently defined.)
- :param rx_frame_err: Number of frame alignment errors
- :param rx_over_err: Number of packets with RX overrun
- :param rx_crc_err: Number of CRC errors
- :param collisions: Number of collisions
-
- """
- port_no = basic_types.UBInt16()
- pad = basic_types.PAD(6)
- rx_packets = basic_types.UBInt64()
- tx_packets = basic_types.UBInt64()
- rx_bytes = basic_types.UBInt64()
- tx_bytes = basic_types.UBInt64()
- rx_dropped = basic_types.UBInt64()
- tx_dropped = basic_types.UBInt64()
- rx_errors = basic_types.UBInt64()
- tx_errors = basic_types.UBInt64()
- rx_frame_err = basic_types.UBInt64()
- rx_over_err = basic_types.UBInt64()
- rx_crc_err = basic_types.UBInt64()
- collisions = basic_types.UBInt64()
-
- def __init__(self, port_no=None, rx_packets=None,
- tx_packets=None, rx_bytes=None, tx_bytes=None,
- rx_dropped=None, tx_dropped=None, rx_errors=None,
- tx_errors=None, rx_frame_err=None, rx_over_err=None,
- rx_crc_err=None, collisions=None):
- self.port_no = port_no
- self.rx_packets = rx_packets
- self.tx_packets = tx_packets
- self.rx_bytes = rx_bytes
- self.tx_bytes = tx_bytes
- self.rx_dropped = rx_dropped
- self.tx_dropped = tx_dropped
- self.rx_errors = rx_errors
- self.tx_errors = tx_errors
- self.rx_frame_err = rx_frame_err
- self.rx_over_err = rx_over_err
- self.rx_crc_err = rx_crc_err
- self.collisions = collisions
diff --git a/pyof/v0x01/controller2switch/port_stats_request.py b/pyof/v0x01/controller2switch/port_stats_request.py
deleted file mode 100644
index 8fa4049..0000000
--- a/pyof/v0x01/controller2switch/port_stats_request.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""Information about physical ports is requested with OFPST_PORT"""
-
-# System imports
-
-# Third-party imports
-
-# Local source tree imports
-from pyof.v0x01.foundation import base
-from pyof.v0x01.foundation import basic_types
-
-
-class PortStatsRequest(base.GenericStruct):
- """
- Body for ofp_stats_request of type OFPST_PORT
-
- :param port_no: OFPST_PORT message must request statistics either
- for a single port (specified in port_no) or for
- all ports (if port_no == OFPP_NONE).
- :param pad:
-
- """
- port_no = basic_types.UBInt16()
- pad = basic_types.PAD(6)
-
- def __init__(self, port_no=None):
- self.port_no = port_no
diff --git a/pyof/v0x01/controller2switch/queue_stats.py b/pyof/v0x01/controller2switch/queue_stats.py
deleted file mode 100644
index d7df9b3..0000000
--- a/pyof/v0x01/controller2switch/queue_stats.py
+++ /dev/null
@@ -1,38 +0,0 @@
-"""The OFPST_QUEUE stats reply message provides queue statistics for one
-or more ports."""
-
-# System imports
-
-# Third-party imports
-
-# Local source tree imports
-from pyof.v0x01.foundation import base
-from pyof.v0x01.foundation import basic_types
-
-
-class QueueStats(base.GenericStruct):
- """
- Implements the reply body of a port_no
-
- :param port_no: Port Number
- :param pad: Align to 32-bits
- :param queue_id: Queue ID
- :param tx_bytes: Number of transmitted bytes
- :param tx_packets: Number of transmitted packets
- :param tx_errors: Number of packets dropped due to overrun
-
- """
- port_no = basic_types.UBInt16()
- pad = basic_types.PAD(2)
- queue_id = basic_types.UBInt32()
- tx_bytes = basic_types.UBInt64()
- tx_packets = basic_types.UBInt64()
- tx_errors = basic_types.UBInt64()
-
- def __init__(self, port_no=None, queue_id=None, tx_bytes=None,
- tx_packets=None, tx_errors=None):
- self.port_no = port_no
- self.queue_id = queue_id
- self.tx_bytes = tx_bytes
- self.tx_packets = tx_packets
- self.tx_errors = tx_errors
diff --git a/pyof/v0x01/controller2switch/queue_stats_request.py b/pyof/v0x01/controller2switch/queue_stats_request.py
deleted file mode 100644
index c1b431b..0000000
--- a/pyof/v0x01/controller2switch/queue_stats_request.py
+++ /dev/null
@@ -1,27 +0,0 @@
-"""The OFPST_QUEUE stats request message provides queue statistics for one
-or more ports."""
-
-# System imports
-
-# Third-party imports
-
-# Local source tree imports
-from pyof.v0x01.foundation import base
-from pyof.v0x01.foundation import basic_types
-
-
-class QueueStatsRequest(base.GenericStruct):
- """
- Implements the request body of a port_no
-
- :param port_no: All ports if OFPT_ALL
- :param pad: Align to 32-bits
- :param queue_id: All queues if OFPQ_ALL
- """
- port_no = basic_types.UBInt16()
- pad = basic_types.PAD(2)
- queue_id = basic_types.UBInt32()
-
- def __init__(self, port_no=None, queue_id=None):
- self.port_no = port_no
- self.queue_id = queue_id
diff --git a/pyof/v0x01/controller2switch/table_stats.py b/pyof/v0x01/controller2switch/table_stats.py
deleted file mode 100644
index bf2e42f..0000000
--- a/pyof/v0x01/controller2switch/table_stats.py
+++ /dev/null
@@ -1,45 +0,0 @@
-"""Information about tables is requested with OFPST_TABLE stats request type"""
-
-# System imports
-
-# Third-party imports
-
-# Local source tree imports
-from pyof.v0x01.foundation import base
-from pyof.v0x01.foundation import basic_types
-
-
-class TableStats(base.GenericStruct):
- """Body of reply to OFPST_TABLE request.
-
- :param table_id: Identifier of table. Lower numbered tables
- are consulted first
- :param pad: Align to 32-bits
- :param name: Table name
- :param wildcards: Bitmap of OFPFW_* wildcards that are supported
- by the table
- :param max_entries: Max number of entries supported
- :param active_count: Number of active entries
- :param count_lookup: Number of packets looked up in table
- :param count_matched: Number of packets that hit table
-
- """
- table_id = basic_types.UBInt8()
- pad = basic_types.PAD(3)
- name = basic_types.Char(length=base.OFP_MAX_TABLE_NAME_LEN)
- wildcards = basic_types.UBInt32()
- max_entries = basic_types.UBInt32()
- active_count = basic_types.UBInt32()
- count_lookup = basic_types.UBInt64()
- count_matched = basic_types.UBInt64()
-
- def __init__(self, table_id=None, name=None, wildcards=None,
- max_entries=None, active_count=None, count_lookup=None,
- count_matched=None):
- self.table_id = table_id
- self.name = name
- self.wildcards = wildcards
- self.max_entries = max_entries
- self.active_count = active_count
- self.count_lookup = count_lookup
- self.count_matched = count_matched
| Move all non-messages classes to other files
Some classes (structs) does not define messages, but only structures that will be used on messages (such as "desc_stats").
As defined, only "messages" will have a "file" (submodule) for itself.
No, the files that does not hold a message should have its content splitted into other files. | kytos/python-openflow | diff --git a/tests/v0x01/test_controller2switch/test_aggregate_stats_reply.py b/tests/v0x01/test_controller2switch/test_aggregate_stats_reply.py
index 3fc3326..1c699dd 100644
--- a/tests/v0x01/test_controller2switch/test_aggregate_stats_reply.py
+++ b/tests/v0x01/test_controller2switch/test_aggregate_stats_reply.py
@@ -1,12 +1,12 @@
import unittest
-from pyof.v0x01.controller2switch import aggregate_stats_reply
+from pyof.v0x01.controller2switch.common import AggregateStatsReply
class TestAggregateStatsReply(unittest.TestCase):
def setUp(self):
- self.message = aggregate_stats_reply.AggregateStatsReply()
+ self.message = AggregateStatsReply()
self.message.packet_count = 5
self.message.byte_count = 1
self.message.flow_count = 8
diff --git a/tests/v0x01/test_controller2switch/test_aggregate_stats_request.py b/tests/v0x01/test_controller2switch/test_aggregate_stats_request.py
index ca563fe..8e4be10 100644
--- a/tests/v0x01/test_controller2switch/test_aggregate_stats_request.py
+++ b/tests/v0x01/test_controller2switch/test_aggregate_stats_request.py
@@ -2,13 +2,13 @@ import unittest
from pyof.v0x01.common import flow_match
from pyof.v0x01.common import phy_port
-from pyof.v0x01.controller2switch import aggregate_stats_request
+from pyof.v0x01.controller2switch.common import AggregateStatsRequest
class TestAggregateStatsRequest(unittest.TestCase):
def setUp(self):
- self.message = aggregate_stats_request.AggregateStatsRequest()
+ self.message = AggregateStatsRequest()
self.message.match = flow_match.Match()
self.message.table_id = 1
self.message.out_port = phy_port.Port.OFPP_NONE
diff --git a/tests/v0x01/test_controller2switch/test_desc_stats.py b/tests/v0x01/test_controller2switch/test_desc_stats.py
index 5ea6422..4f13f97 100644
--- a/tests/v0x01/test_controller2switch/test_desc_stats.py
+++ b/tests/v0x01/test_controller2switch/test_desc_stats.py
@@ -1,6 +1,6 @@
import unittest
-from pyof.v0x01.controller2switch import desc_stats
+from pyof.v0x01.controller2switch.common import DescStats
from pyof.v0x01.foundation import base
@@ -8,7 +8,7 @@ class TestDescStats(unittest.TestCase):
def setUp(self):
content = bytes('A' * base.DESC_STR_LEN, 'utf-8')
- self.message = desc_stats.DescStats()
+ self.message = DescStats()
self.message.mfr_desc = content
self.message.hw_desc = content
self.message.sw_desc = content
diff --git a/tests/v0x01/test_controller2switch/test_flow_stats.py b/tests/v0x01/test_controller2switch/test_flow_stats.py
index 1de34ab..2a04c07 100644
--- a/tests/v0x01/test_controller2switch/test_flow_stats.py
+++ b/tests/v0x01/test_controller2switch/test_flow_stats.py
@@ -1,13 +1,13 @@
import unittest
from pyof.v0x01.common import flow_match
-from pyof.v0x01.controller2switch import flow_stats
+from pyof.v0x01.controller2switch.common import FlowStats
class TestFlowStats(unittest.TestCase):
def setUp(self):
- self.message = flow_stats.FlowStats()
+ self.message = FlowStats()
self.message.length = 160
self.message.table_id = 1
self.message.match = flow_match.Match()
diff --git a/tests/v0x01/test_controller2switch/test_flow_stats_request.py b/tests/v0x01/test_controller2switch/test_flow_stats_request.py
index 85a33bb..2a6f3fb 100644
--- a/tests/v0x01/test_controller2switch/test_flow_stats_request.py
+++ b/tests/v0x01/test_controller2switch/test_flow_stats_request.py
@@ -1,13 +1,13 @@
import unittest
from pyof.v0x01.common import flow_match
-from pyof.v0x01.controller2switch import flow_stats_request
+from pyof.v0x01.controller2switch.common import FlowStatsRequest
class TestFlowStatsRequest(unittest.TestCase):
def setUp(self):
- self.message = flow_stats_request.FlowStatsRequest()
+ self.message = FlowStatsRequest()
self.message.match = flow_match.Match()
self.message.table_id = 1
self.message.out_port = 80
diff --git a/tests/v0x01/test_controller2switch/test_port_stats.py b/tests/v0x01/test_controller2switch/test_port_stats.py
index 7a58485..e6f4b55 100644
--- a/tests/v0x01/test_controller2switch/test_port_stats.py
+++ b/tests/v0x01/test_controller2switch/test_port_stats.py
@@ -1,12 +1,12 @@
import unittest
-from pyof.v0x01.controller2switch import port_stats
+from pyof.v0x01.controller2switch.common import PortStats
class TestPortStats(unittest.TestCase):
def setUp(self):
- self.message = port_stats.PortStats()
+ self.message = PortStats()
self.message.port_no = 80
self.message.rx_packets = 5
self.message.tx_packets = 10
diff --git a/tests/v0x01/test_controller2switch/test_port_stats_request.py b/tests/v0x01/test_controller2switch/test_port_stats_request.py
index e0454dc..8025055 100644
--- a/tests/v0x01/test_controller2switch/test_port_stats_request.py
+++ b/tests/v0x01/test_controller2switch/test_port_stats_request.py
@@ -1,12 +1,12 @@
import unittest
-from pyof.v0x01.controller2switch import port_stats_request
+from pyof.v0x01.controller2switch.common import PortStatsRequest
class TestPortStatsRequest(unittest.TestCase):
def setUp(self):
- self.message = port_stats_request.PortStatsRequest()
+ self.message = PortStatsRequest()
self.message.port_no = 80
def test_get_size(self):
diff --git a/tests/v0x01/test_controller2switch/test_queue_stats.py b/tests/v0x01/test_controller2switch/test_queue_stats.py
index fb03ee7..b1f1219 100644
--- a/tests/v0x01/test_controller2switch/test_queue_stats.py
+++ b/tests/v0x01/test_controller2switch/test_queue_stats.py
@@ -1,12 +1,12 @@
import unittest
-from pyof.v0x01.controller2switch import queue_stats
+from pyof.v0x01.controller2switch.common import QueueStats
class TestQueueStats(unittest.TestCase):
def setUp(self):
- self.message = queue_stats.QueueStats()
+ self.message = QueueStats()
self.message.port_no = 80
self.message.queue_id = 5
self.message.tx_bytes = 1
diff --git a/tests/v0x01/test_controller2switch/test_queue_stats_request.py b/tests/v0x01/test_controller2switch/test_queue_stats_request.py
index 80cad68..2f95381 100644
--- a/tests/v0x01/test_controller2switch/test_queue_stats_request.py
+++ b/tests/v0x01/test_controller2switch/test_queue_stats_request.py
@@ -1,12 +1,12 @@
import unittest
-from pyof.v0x01.controller2switch import queue_stats_request
+from pyof.v0x01.controller2switch.common import QueueStatsRequest
class TestQueueStatsRequest(unittest.TestCase):
def setUp(self):
- self.message = queue_stats_request.QueueStatsRequest()
+ self.message = QueueStatsRequest()
self.message.port_no = 80
self.message.queue_id = 5
diff --git a/tests/v0x01/test_controller2switch/test_table_stats.py b/tests/v0x01/test_controller2switch/test_table_stats.py
index 353f6de..39888b5 100644
--- a/tests/v0x01/test_controller2switch/test_table_stats.py
+++ b/tests/v0x01/test_controller2switch/test_table_stats.py
@@ -1,14 +1,14 @@
import unittest
from pyof.v0x01.common import flow_match
-from pyof.v0x01.controller2switch import table_stats
+from pyof.v0x01.controller2switch.common import TableStats
from pyof.v0x01.foundation import base
class TestTableStats(unittest.TestCase):
def setUp(self):
- self.message = table_stats.TableStats()
+ self.message = TableStats()
self.message.table_id = 1
self.message.name = bytes('X' * base.OFP_MAX_TABLE_NAME_LEN, 'utf-8')
self.message.wildcards = flow_match.FlowWildCards.OFPFW_TP_DST
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_removed_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 4
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
execnet==2.1.1
iniconfig==2.1.0
-e git+https://github.com/kytos/python-openflow.git@275103dca4116b8911dc19ddad4b90121936d9f1#egg=Kytos_OpenFlow_Parser_library
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
tomli==2.2.1
typing_extensions==4.13.0
| name: python-openflow
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- tomli==2.2.1
- typing-extensions==4.13.0
prefix: /opt/conda/envs/python-openflow
| [
"tests/v0x01/test_controller2switch/test_aggregate_stats_reply.py::TestAggregateStatsReply::test_get_size",
"tests/v0x01/test_controller2switch/test_aggregate_stats_request.py::TestAggregateStatsRequest::test_get_size",
"tests/v0x01/test_controller2switch/test_desc_stats.py::TestDescStats::test_get_size",
"tests/v0x01/test_controller2switch/test_flow_stats.py::TestFlowStats::test_get_size",
"tests/v0x01/test_controller2switch/test_flow_stats_request.py::TestFlowStatsRequest::test_get_size",
"tests/v0x01/test_controller2switch/test_port_stats.py::TestPortStats::test_get_size",
"tests/v0x01/test_controller2switch/test_port_stats_request.py::TestPortStatsRequest::test_get_size",
"tests/v0x01/test_controller2switch/test_queue_stats.py::TestQueueStats::test_get_size",
"tests/v0x01/test_controller2switch/test_queue_stats_request.py::TestQueueStatsRequest::test_get_size",
"tests/v0x01/test_controller2switch/test_table_stats.py::TestTableStats::test_get_size"
]
| []
| []
| []
| MIT License | 576 | [
"pyof/v0x01/controller2switch/table_stats.py",
"pyof/v0x01/controller2switch/flow_stats.py",
"pyof/v0x01/controller2switch/queue_stats.py",
"pyof/v0x01/controller2switch/port_stats.py",
"pyof/v0x01/controller2switch/flow_stats_request.py",
"pyof/v0x01/common/flow_match.py",
"pyof/v0x01/controller2switch/aggregate_stats_reply.py",
"pyof/v0x01/controller2switch/features_reply.py",
"pyof/v0x01/controller2switch/port_stats_request.py",
"pyof/v0x01/controller2switch/desc_stats.py",
"pyof/v0x01/controller2switch/common.py",
"pyof/v0x01/controller2switch/aggregate_stats_request.py",
"pyof/v0x01/controller2switch/queue_stats_request.py",
"pyof/v0x01/common/phy_port.py"
]
| [
"pyof/v0x01/controller2switch/table_stats.py",
"pyof/v0x01/controller2switch/flow_stats.py",
"pyof/v0x01/controller2switch/queue_stats.py",
"pyof/v0x01/controller2switch/port_stats.py",
"pyof/v0x01/controller2switch/flow_stats_request.py",
"pyof/v0x01/common/flow_match.py",
"pyof/v0x01/controller2switch/aggregate_stats_reply.py",
"pyof/v0x01/controller2switch/features_reply.py",
"pyof/v0x01/controller2switch/port_stats_request.py",
"pyof/v0x01/controller2switch/desc_stats.py",
"pyof/v0x01/controller2switch/common.py",
"pyof/v0x01/controller2switch/aggregate_stats_request.py",
"pyof/v0x01/controller2switch/queue_stats_request.py",
"pyof/v0x01/common/phy_port.py"
]
|
|
kytos__python-openflow-58 | 545ae8a73dcec2e67cf854b21c44e692692f71dc | 2016-06-07 18:27:19 | 545ae8a73dcec2e67cf854b21c44e692692f71dc | diff --git a/pyof/v0x01/common/flow_match.py b/pyof/v0x01/common/flow_match.py
index a900b81..07d103e 100644
--- a/pyof/v0x01/common/flow_match.py
+++ b/pyof/v0x01/common/flow_match.py
@@ -1,6 +1,7 @@
"""Defines flow statistics structures and related items"""
# System imports
+import enum
# Third-party imports
@@ -8,7 +9,10 @@
from pyof.v0x01.foundation import base
from pyof.v0x01.foundation import basic_types
-class FlowWildCards(base.GenericBitMask):
+# Enums
+
+
+class FlowWildCards(enum.Enum):
"""
Wildcards used to identify flows.
diff --git a/pyof/v0x01/common/phy_port.py b/pyof/v0x01/common/phy_port.py
index 7395fb6..b45777f 100644
--- a/pyof/v0x01/common/phy_port.py
+++ b/pyof/v0x01/common/phy_port.py
@@ -1,6 +1,7 @@
"""Defines physical port classes and related items"""
# System imports
+import enum
# Third-party imports
@@ -8,20 +9,9 @@
from pyof.v0x01.foundation import base
from pyof.v0x01.foundation import basic_types
+# Enums
+
class PortConfig(base.GenericBitMask):
- """Flags to indicate behavior of the physical port.
-
- These flags are used in OFPPhyPort to describe the current configuration.
- They are used in the OFPPortMod message to configure the port's behavior.
-
- OFPPC_PORT_DOWN # Port is administratively down.
- OFPPC_NO_STP # Disable 802.1D spanning tree on port.
- OFPPC_NO_RECV # Drop all packets except 802.1D spanning tree.
- OFPPC_NO_RECV_STP # Drop received 802.1D STP packets.
- OFPPC_NO_FLOOD # Do not include this port when flooding.
- OFPPC_NO_FWD # Drop packets forwarded to port.
- OFPPC_NO_PACKET_IN # Do not send packet-in msgs for port.
- """
OFPC_PORT_DOWN = 1 << 0
OFPPC_NO_STP = 1 << 1
OFPPC_NO_RECV = 1 << 2
@@ -31,9 +21,32 @@ class PortConfig(base.GenericBitMask):
OFPPC_NO_PACKET_IN = 1 << 6
-
-
-class PortState(base.GenericBitMask):
+#class PortConfig(enum.Enum):
+# """Flags to indicate behavior of the physical port.
+#
+# These flags are used in OFPPhyPort to describe the current configuration.
+# They are used in the OFPPortMod message to configure the port's behavior.
+#
+# Enums:
+# OFPPC_PORT_DOWN # Port is administratively down.
+# OFPPC_NO_STP # Disable 802.1D spanning tree on port.
+# OFPPC_NO_RECV # Drop all packets except 802.1D spanning tree.
+# OFPPC_NO_RECV_STP # Drop received 802.1D STP packets.
+# OFPPC_NO_FLOOD # Do not include this port when flooding.
+# OFPPC_NO_FWD # Drop packets forwarded to port.
+# OFPPC_NO_PACKET_IN # Do not send packet-in msgs for port.
+# """
+#
+# OFPPC_PORT_DOWN = 1 << 0
+# OFPPC_NO_STP = 1 << 1
+# OFPPC_NO_RECV = 1 << 2
+# OFPPC_NO_RECV_STP = 1 << 3
+# OFPPC_FLOOD = 1 << 4
+# OFPPC_NO_FWD = 1 << 5
+# OFPPC_NO_PACKET_IN = 1 << 6
+
+
+class PortState(enum.Enum):
"""Current state of the physical port.
These are not configurable from the controller.
@@ -42,6 +55,7 @@ class PortState(base.GenericBitMask):
must adjust OFPPC_NO_RECV, OFPPC_NO_FWD, and OFPPC_NO_PACKET_IN
appropriately to fully implement an 802.1D spanning tree.
+ Enums:
OFPPS_LINK_DOWN # Not learning or relaying frames.
OFPPS_STP_LISTEN # Not learning or relaying frames.
OFPPS_STP_LEARN # Learning but not relaying frames.
@@ -58,7 +72,7 @@ class PortState(base.GenericBitMask):
# OFPPS_STP_MASK = 3 << 8 - Refer to ISSUE #7
-class Port(base.GenericBitMask):
+class Port(enum.Enum):
"""Port numbering.
Physical ports are numbered starting from 1. Port number 0 is reserved by
@@ -87,13 +101,14 @@ class Port(base.GenericBitMask):
OFPP_NONE = 0xffff
-class PortFeatures(base.GenericBitMask):
+class PortFeatures(enum.Enum):
"""Physical ports features.
The curr, advertised, supported, and peer fields indicate link modes
(10M to 10G full and half-duplex), link type (copper/fiber) and
link features (autone-gotiation and pause).
+ Enums:
OFPPF_10MB_HD # 10 Mb half-duplex rate support.
OFPPF_10MB_FD # 10 Mb full-duplex rate support.
OFPPF_100MB_HD # 100 Mb half-duplex rate support.
diff --git a/pyof/v0x01/controller2switch/aggregate_stats_reply.py b/pyof/v0x01/controller2switch/aggregate_stats_reply.py
new file mode 100644
index 0000000..339127c
--- /dev/null
+++ b/pyof/v0x01/controller2switch/aggregate_stats_reply.py
@@ -0,0 +1,31 @@
+"""Body of the reply message"""
+
+# System imports
+
+# Third-party imports
+
+# Local source tree imports
+from pyof.v0x01.foundation import base
+from pyof.v0x01.foundation import basic_types
+
+# Classes
+
+
+class AggregateStatsReply(base.GenericStruct):
+ """Body of reply to OFPST_AGGREGATE request.
+
+ :param packet_count: Number of packets in flows
+ :param byte_count: Number of bytes in flows
+ :param flow_count: Number of flows
+ :param pad: Align to 64 bits
+
+ """
+ packet_count = basic_types.UBInt64()
+ byte_count = basic_types.UBInt64()
+ flow_count = basic_types.UBInt32()
+ pad = basic_types.PAD(4)
+
+ def __init__(self, packet_count=None, byte_count=None, flow_count=None):
+ self.packet_count = packet_count
+ self.byte_count = byte_count
+ self.flow_count = flow_count
diff --git a/pyof/v0x01/controller2switch/aggregate_stats_request.py b/pyof/v0x01/controller2switch/aggregate_stats_request.py
new file mode 100644
index 0000000..b45f923
--- /dev/null
+++ b/pyof/v0x01/controller2switch/aggregate_stats_request.py
@@ -0,0 +1,38 @@
+"""Aggregate information about multiple flows is requested with the
+OFPST_AGGREGATE stats request type"""
+
+# System imports
+
+# Third-party imports
+
+# Local source tree imports
+from pyof.v0x01.common import flow_match
+from pyof.v0x01.foundation import base
+from pyof.v0x01.foundation import basic_types
+
+# Classes
+
+
+class AggregateStatsRequest(base.GenericStruct):
+ """
+ Body for ofp_stats_request of type OFPST_AGGREGATE.
+
+ :param match: Fields to match
+ :param table_id: ID of table to read (from pyof_table_stats) 0xff
+ for all tables or 0xfe for emergency.
+ :param pad: Align to 32 bits
+ :param out_port: Require matching entries to include this as an
+ output port. A value of OFPP_NONE indicates
+ no restriction
+
+ """
+ match = flow_match.Match()
+ table_id = basic_types.UBInt8()
+ pad = basic_types.PAD(1)
+ out_port = basic_types.UBInt16()
+
+ def __init__(self, match=None, table_id=None, out_port=None):
+ super().__init__()
+ self.match = match
+ self.table_id = table_id
+ self.out_port = out_port
diff --git a/pyof/v0x01/controller2switch/common.py b/pyof/v0x01/controller2switch/common.py
index bfb9cd3..c5490dd 100644
--- a/pyof/v0x01/controller2switch/common.py
+++ b/pyof/v0x01/controller2switch/common.py
@@ -8,7 +8,6 @@ import enum
# Local source tree imports
from pyof.v0x01.common import header as of_header
from pyof.v0x01.common import action
-from pyof.v0x01.common import flow_match
from pyof.v0x01.foundation import base
from pyof.v0x01.foundation import basic_types
@@ -66,313 +65,3 @@ class ListOfActions(basic_types.FixedTypeList):
"""
def __init__(self, items=None):
super().__init__(pyof_class=action.ActionHeader, items=items)
-
-
-class AggregateStatsReply(base.GenericStruct):
- """Body of reply to OFPST_AGGREGATE request.
-
- :param packet_count: Number of packets in flows
- :param byte_count: Number of bytes in flows
- :param flow_count: Number of flows
- :param pad: Align to 64 bits
-
- """
- packet_count = basic_types.UBInt64()
- byte_count = basic_types.UBInt64()
- flow_count = basic_types.UBInt32()
- pad = basic_types.PAD(4)
-
- def __init__(self, packet_count=None, byte_count=None, flow_count=None):
- self.packet_count = packet_count
- self.byte_count = byte_count
- self.flow_count = flow_count
-
-
-class AggregateStatsRequest(base.GenericStruct):
- """
- Body for ofp_stats_request of type OFPST_AGGREGATE.
-
- :param match: Fields to match
- :param table_id: ID of table to read (from pyof_table_stats) 0xff
- for all tables or 0xfe for emergency.
- :param pad: Align to 32 bits
- :param out_port: Require matching entries to include this as an
- output port. A value of OFPP_NONE indicates
- no restriction
-
- """
- match = flow_match.Match()
- table_id = basic_types.UBInt8()
- pad = basic_types.PAD(1)
- out_port = basic_types.UBInt16()
-
- def __init__(self, match=None, table_id=None, out_port=None):
- super().__init__()
- self.match = match
- self.table_id = table_id
- self.out_port = out_port
-
-
-class DescStats(base.GenericStruct):
- """
- Information about the switch manufacturer, hardware revision, software
- revision, serial number, and a description field is avail- able from
- the OFPST_DESC stats request.
-
- :param mfr_desc: Manufacturer description
- :param hw_desc: Hardware description
- :param sw_desc: Software description
- :param serial_num: Serial number
- :param dp_desc: Human readable description of datapath
-
- """
- mfr_desc = basic_types.Char(length=base.DESC_STR_LEN)
- hw_desc = basic_types.Char(length=base.DESC_STR_LEN)
- sw_desc = basic_types.Char(length=base.DESC_STR_LEN)
- serial_num = basic_types.Char(length=base.SERIAL_NUM_LEN)
- dp_desc = basic_types.Char(length=base.DESC_STR_LEN)
-
- def __init__(self, mfr_desc=None, hw_desc=None, sw_desc=None,
- serial_num=None, dp_desc=None):
- self.mfr_desc = mfr_desc
- self.hw_desc = hw_desc
- self.sw_desc = sw_desc
- self.serial_num = serial_num
- self.dp_desc = dp_desc
-
-
-class FlowStats(base.GenericStruct):
- """
- Body of reply to OFPST_FLOW request.
-
- :param length: Length of this entry
- :param table_id: ID of table flow came from
- :param pad: Align to 32 bits
- :param match: Description of fields
- :param duration_sec: Time flow has been alive in seconds
- :param duration_nsec: Time flow has been alive in nanoseconds beyond
- duration_sec
- :param priority: Priority of the entry. Only meaningful when this
- is not an exact-match entry
- :param idle_timeout: Number of seconds idle before expiration
- :param hard_timeout: Number of seconds before expiration
- :param pad2: Align to 64-bits
- :param cookie: Opaque controller-issued identifier
- :param packet_count: Number of packets in flow
- :param byte_count: Number of bytes in flow
- :param actions: Actions
- """
- length = basic_types.UBInt16()
- table_id = basic_types.UBInt8()
- pad = basic_types.PAD(1)
- match = flow_match.Match()
- duration_sec = basic_types.UBInt32()
- duration_nsec = basic_types.UBInt32()
- priority = basic_types.UBInt16()
- idle_timeout = basic_types.UBInt16()
- hard_timeout = basic_types.UBInt16()
- pad2 = basic_types.PAD(6)
- cookie = basic_types.UBInt64()
- packet_count = basic_types.UBInt64()
- byte_count = basic_types.UBInt64()
- actions = ListOfActions()
-
- def __init__(self, length=None, table_id=None, match=None,
- duration_sec=None, duration_nsec=None, priority=None,
- idle_timeout=None, hard_timeout=None, cookie=None,
- packet_count=None, byte_count=None, actions=None):
- self.length = length
- self.table_id = table_id
- self.match = match
- self.duration_sec = duration_sec
- self.duration_nsec = duration_nsec
- self.prioriry = priority
- self.idle_timeout = idle_timeout
- self.hard_timeout = hard_timeout
- self.cookie = cookie
- self.packet_count = packet_count
- self.byte_count = byte_count
- self.actions = [] if actions is None else actions
-
-
-class FlowStatsRequest(base.GenericStruct):
- """
- Body for ofp_stats_request of type OFPST_FLOW.
-
- :param match: Fields to match
- :param table_id: ID of table to read (from pyof_table_stats)
- 0xff for all tables or 0xfe for emergency
- :param pad: Align to 32 bits
- :param out_port: Require matching entries to include this as an output
- port. A value of OFPP_NONE indicates no restriction.
-
- """
- match = flow_match.Match()
- table_id = basic_types.UBInt8()
- pad = basic_types.PAD(1)
- out_port = basic_types.UBInt16()
-
- def __init__(self, match=None, table_id=None, out_port=None):
- self.match = match
- self.table_id = table_id
- self.out_port = out_port
-
-
-class PortStats(base.GenericStruct):
- """Body of reply to OFPST_PORT request.
-
- If a counter is unsupported, set the field to all ones.
-
- :param port_no: Port number
- :param pad: Align to 64-bits
- :param rx_packets: Number of received packets
- :param tx_packets: Number of transmitted packets
- :param rx_bytes: Number of received bytes
- :param tx_bytes: Number of transmitted bytes
- :param rx_dropped: Number of packets dropped by RX
- :param tx_dropped: Number of packets dropped by TX
- :param rx_errors: Number of receive errors. This is a super-set
- of more specific receive errors and should be
- greater than or equal to the sum of all
- rx_*_err values
- :param tx_errors: Number of transmit errors. This is a super-set
- of more specific transmit errors and should be
- greater than or equal to the sum of all
- tx_*_err values (none currently defined.)
- :param rx_frame_err: Number of frame alignment errors
- :param rx_over_err: Number of packets with RX overrun
- :param rx_crc_err: Number of CRC errors
- :param collisions: Number of collisions
-
- """
- port_no = basic_types.UBInt16()
- pad = basic_types.PAD(6)
- rx_packets = basic_types.UBInt64()
- tx_packets = basic_types.UBInt64()
- rx_bytes = basic_types.UBInt64()
- tx_bytes = basic_types.UBInt64()
- rx_dropped = basic_types.UBInt64()
- tx_dropped = basic_types.UBInt64()
- rx_errors = basic_types.UBInt64()
- tx_errors = basic_types.UBInt64()
- rx_frame_err = basic_types.UBInt64()
- rx_over_err = basic_types.UBInt64()
- rx_crc_err = basic_types.UBInt64()
- collisions = basic_types.UBInt64()
-
- def __init__(self, port_no=None, rx_packets=None,
- tx_packets=None, rx_bytes=None, tx_bytes=None,
- rx_dropped=None, tx_dropped=None, rx_errors=None,
- tx_errors=None, rx_frame_err=None, rx_over_err=None,
- rx_crc_err=None, collisions=None):
- self.port_no = port_no
- self.rx_packets = rx_packets
- self.tx_packets = tx_packets
- self.rx_bytes = rx_bytes
- self.tx_bytes = tx_bytes
- self.rx_dropped = rx_dropped
- self.tx_dropped = tx_dropped
- self.rx_errors = rx_errors
- self.tx_errors = tx_errors
- self.rx_frame_err = rx_frame_err
- self.rx_over_err = rx_over_err
- self.rx_crc_err = rx_crc_err
- self.collisions = collisions
-
-
-class PortStatsRequest(base.GenericStruct):
- """
- Body for ofp_stats_request of type OFPST_PORT
-
- :param port_no: OFPST_PORT message must request statistics either
- for a single port (specified in port_no) or for
- all ports (if port_no == OFPP_NONE).
- :param pad:
-
- """
- port_no = basic_types.UBInt16()
- pad = basic_types.PAD(6)
-
- def __init__(self, port_no=None):
- self.port_no = port_no
-
-
-class QueueStats(base.GenericStruct):
- """
- Implements the reply body of a port_no
-
- :param port_no: Port Number
- :param pad: Align to 32-bits
- :param queue_id: Queue ID
- :param tx_bytes: Number of transmitted bytes
- :param tx_packets: Number of transmitted packets
- :param tx_errors: Number of packets dropped due to overrun
-
- """
- port_no = basic_types.UBInt16()
- pad = basic_types.PAD(2)
- queue_id = basic_types.UBInt32()
- tx_bytes = basic_types.UBInt64()
- tx_packets = basic_types.UBInt64()
- tx_errors = basic_types.UBInt64()
-
- def __init__(self, port_no=None, queue_id=None, tx_bytes=None,
- tx_packets=None, tx_errors=None):
- self.port_no = port_no
- self.queue_id = queue_id
- self.tx_bytes = tx_bytes
- self.tx_packets = tx_packets
- self.tx_errors = tx_errors
-
-
-class QueueStatsRequest(base.GenericStruct):
- """
- Implements the request body of a port_no
-
- :param port_no: All ports if OFPT_ALL
- :param pad: Align to 32-bits
- :param queue_id: All queues if OFPQ_ALL
- """
- port_no = basic_types.UBInt16()
- pad = basic_types.PAD(2)
- queue_id = basic_types.UBInt32()
-
- def __init__(self, port_no=None, queue_id=None):
- self.port_no = port_no
- self.queue_id = queue_id
-
-
-class TableStats(base.GenericStruct):
- """Body of reply to OFPST_TABLE request.
-
- :param table_id: Identifier of table. Lower numbered tables
- are consulted first
- :param pad: Align to 32-bits
- :param name: Table name
- :param wildcards: Bitmap of OFPFW_* wildcards that are supported
- by the table
- :param max_entries: Max number of entries supported
- :param active_count: Number of active entries
- :param count_lookup: Number of packets looked up in table
- :param count_matched: Number of packets that hit table
-
- """
- table_id = basic_types.UBInt8()
- pad = basic_types.PAD(3)
- name = basic_types.Char(length=base.OFP_MAX_TABLE_NAME_LEN)
- wildcards = basic_types.UBInt32()
- max_entries = basic_types.UBInt32()
- active_count = basic_types.UBInt32()
- count_lookup = basic_types.UBInt64()
- count_matched = basic_types.UBInt64()
-
- def __init__(self, table_id=None, name=None, wildcards=None,
- max_entries=None, active_count=None, count_lookup=None,
- count_matched=None):
- self.table_id = table_id
- self.name = name
- self.wildcards = wildcards
- self.max_entries = max_entries
- self.active_count = active_count
- self.count_lookup = count_lookup
- self.count_matched = count_matched
diff --git a/pyof/v0x01/controller2switch/desc_stats.py b/pyof/v0x01/controller2switch/desc_stats.py
new file mode 100644
index 0000000..bceffb9
--- /dev/null
+++ b/pyof/v0x01/controller2switch/desc_stats.py
@@ -0,0 +1,39 @@
+"""Information about the switch manufactures"""
+
+# System imports
+
+# Third-party imports
+
+# Local source tree imports
+from pyof.v0x01.foundation import base
+from pyof.v0x01.foundation import basic_types
+
+# Classes
+
+
+class DescStats(base.GenericStruct):
+ """
+ Information about the switch manufacturer, hardware revision, software
+ revision, serial number, and a description field is avail- able from
+ the OFPST_DESC stats request.
+
+ :param mfr_desc: Manufacturer description
+ :param hw_desc: Hardware description
+ :param sw_desc: Software description
+ :param serial_num: Serial number
+ :param dp_desc: Human readable description of datapath
+
+ """
+ mfr_desc = basic_types.Char(length=base.DESC_STR_LEN)
+ hw_desc = basic_types.Char(length=base.DESC_STR_LEN)
+ sw_desc = basic_types.Char(length=base.DESC_STR_LEN)
+ serial_num = basic_types.Char(length=base.SERIAL_NUM_LEN)
+ dp_desc = basic_types.Char(length=base.DESC_STR_LEN)
+
+ def __init__(self, mfr_desc=None, hw_desc=None, sw_desc=None,
+ serial_num=None, dp_desc=None):
+ self.mfr_desc = mfr_desc
+ self.hw_desc = hw_desc
+ self.sw_desc = sw_desc
+ self.serial_num = serial_num
+ self.dp_desc = dp_desc
diff --git a/pyof/v0x01/controller2switch/features_reply.py b/pyof/v0x01/controller2switch/features_reply.py
index c5f3bca..e02f012 100644
--- a/pyof/v0x01/controller2switch/features_reply.py
+++ b/pyof/v0x01/controller2switch/features_reply.py
@@ -1,6 +1,7 @@
"""Defines Features Reply classes and related items"""
# System imports
+import enum
# Third-party imports
@@ -11,9 +12,13 @@ from pyof.v0x01.foundation import base
from pyof.v0x01.foundation import basic_types
-class Capabilities(base.GenericBitMask):
- """Capabilities supported by the datapath
+# Enums
+
+class Capabilities(enum.Enum):
+ """Enumeration of Capabilities supported by the datapath
+
+ Enums:
OFPC_FLOW_STATS # Flow statistics
OFPC_TABLE_STATS # Table statistics
OFPC_PORT_STATS # Port statistics
diff --git a/pyof/v0x01/controller2switch/flow_stats.py b/pyof/v0x01/controller2switch/flow_stats.py
new file mode 100644
index 0000000..efb5edd
--- /dev/null
+++ b/pyof/v0x01/controller2switch/flow_stats.py
@@ -0,0 +1,67 @@
+"""Body of the reply to an OFPST_FLOW"""
+
+# System imports
+
+# Third-party imports
+
+# Local source tree imports
+from pyof.v0x01.common import flow_match
+from pyof.v0x01.controller2switch import common
+from pyof.v0x01.foundation import base
+from pyof.v0x01.foundation import basic_types
+
+# Classes
+
+
+class FlowStats(base.GenericStruct):
+ """
+ Body of reply to OFPST_FLOW request.
+
+ :param length: Length of this entry
+ :param table_id: ID of table flow came from
+ :param pad: Align to 32 bits
+ :param match: Description of fields
+ :param duration_sec: Time flow has been alive in seconds
+ :param duration_nsec: Time flow has been alive in nanoseconds beyond
+ duration_sec
+ :param priority: Priority of the entry. Only meaningful when this
+ is not an exact-match entry
+ :param idle_timeout: Number of seconds idle before expiration
+ :param hard_timeout: Number of seconds before expiration
+ :param pad2: Align to 64-bits
+ :param cookie: Opaque controller-issued identifier
+ :param packet_count: Number of packets in flow
+ :param byte_count: Number of bytes in flow
+ :param actions: Actions
+ """
+ length = basic_types.UBInt16()
+ table_id = basic_types.UBInt8()
+ pad = basic_types.PAD(1)
+ match = flow_match.Match()
+ duration_sec = basic_types.UBInt32()
+ duration_nsec = basic_types.UBInt32()
+ priority = basic_types.UBInt16()
+ idle_timeout = basic_types.UBInt16()
+ hard_timeout = basic_types.UBInt16()
+ pad2 = basic_types.PAD(6)
+ cookie = basic_types.UBInt64()
+ packet_count = basic_types.UBInt64()
+ byte_count = basic_types.UBInt64()
+ actions = common.ListOfActions()
+
+ def __init__(self, length=None, table_id=None, match=None,
+ duration_sec=None, duration_nsec=None, priority=None,
+ idle_timeout=None, hard_timeout=None, cookie=None,
+ packet_count=None, byte_count=None, actions=None):
+ self.length = length
+ self.table_id = table_id
+ self.match = match
+ self.duration_sec = duration_sec
+ self.duration_nsec = duration_nsec
+ self.prioriry = priority
+ self.idle_timeout = idle_timeout
+ self.hard_timeout = hard_timeout
+ self.cookie = cookie
+ self.packet_count = packet_count
+ self.byte_count = byte_count
+ self.actions = [] if actions is None else actions
diff --git a/pyof/v0x01/controller2switch/flow_stats_request.py b/pyof/v0x01/controller2switch/flow_stats_request.py
new file mode 100644
index 0000000..1fc5794
--- /dev/null
+++ b/pyof/v0x01/controller2switch/flow_stats_request.py
@@ -0,0 +1,35 @@
+"""Information about individual flows"""
+
+# System imports
+
+# Third-party imports
+
+# Local source tree imports
+from pyof.v0x01.common import flow_match
+from pyof.v0x01.foundation import base
+from pyof.v0x01.foundation import basic_types
+
+# Classes
+
+
+class FlowStatsRequest(base.GenericStruct):
+ """
+ Body for ofp_stats_request of type OFPST_FLOW.
+
+ :param match: Fields to match
+ :param table_id: ID of table to read (from pyof_table_stats)
+ 0xff for all tables or 0xfe for emergency
+ :param pad: Align to 32 bits
+ :param out_port: Require matching entries to include this as an output
+ port. A value of OFPP_NONE indicates no restriction.
+
+ """
+ match = flow_match.Match()
+ table_id = basic_types.UBInt8()
+ pad = basic_types.PAD(1)
+ out_port = basic_types.UBInt16()
+
+ def __init__(self, match=None, table_id=None, out_port=None):
+ self.match = match
+ self.table_id = table_id
+ self.out_port = out_port
diff --git a/pyof/v0x01/controller2switch/port_stats.py b/pyof/v0x01/controller2switch/port_stats.py
new file mode 100644
index 0000000..474828d
--- /dev/null
+++ b/pyof/v0x01/controller2switch/port_stats.py
@@ -0,0 +1,73 @@
+"""Body of the port stats reply"""
+
+# System imports
+
+# Third-party imports
+
+# Local source tree imports
+from pyof.v0x01.foundation import base
+from pyof.v0x01.foundation import basic_types
+
+# Classes
+
+
+class PortStats(base.GenericStruct):
+ """Body of reply to OFPST_PORT request.
+
+ If a counter is unsupported, set the field to all ones.
+
+ :param port_no: Port number
+ :param pad: Align to 64-bits
+ :param rx_packets: Number of received packets
+ :param tx_packets: Number of transmitted packets
+ :param rx_bytes: Number of received bytes
+ :param tx_bytes: Number of transmitted bytes
+ :param rx_dropped: Number of packets dropped by RX
+ :param tx_dropped: Number of packets dropped by TX
+ :param rx_errors: Number of receive errors. This is a super-set
+ of more specific receive errors and should be
+ greater than or equal to the sum of all
+ rx_*_err values
+ :param tx_errors: Number of transmit errors. This is a super-set
+ of more specific transmit errors and should be
+ greater than or equal to the sum of all
+ tx_*_err values (none currently defined.)
+ :param rx_frame_err: Number of frame alignment errors
+ :param rx_over_err: Number of packets with RX overrun
+ :param rx_crc_err: Number of CRC errors
+ :param collisions: Number of collisions
+
+ """
+ port_no = basic_types.UBInt16()
+ pad = basic_types.PAD(6)
+ rx_packets = basic_types.UBInt64()
+ tx_packets = basic_types.UBInt64()
+ rx_bytes = basic_types.UBInt64()
+ tx_bytes = basic_types.UBInt64()
+ rx_dropped = basic_types.UBInt64()
+ tx_dropped = basic_types.UBInt64()
+ rx_errors = basic_types.UBInt64()
+ tx_errors = basic_types.UBInt64()
+ rx_frame_err = basic_types.UBInt64()
+ rx_over_err = basic_types.UBInt64()
+ rx_crc_err = basic_types.UBInt64()
+ collisions = basic_types.UBInt64()
+
+ def __init__(self, port_no=None, rx_packets=None,
+ tx_packets=None, rx_bytes=None, tx_bytes=None,
+ rx_dropped=None, tx_dropped=None, rx_errors=None,
+ tx_errors=None, rx_frame_err=None, rx_over_err=None,
+ rx_crc_err=None, collisions=None):
+ self.port_no = port_no
+ self.rx_packets = rx_packets
+ self.tx_packets = tx_packets
+ self.rx_bytes = rx_bytes
+ self.tx_bytes = tx_bytes
+ self.rx_dropped = rx_dropped
+ self.tx_dropped = tx_dropped
+ self.rx_errors = rx_errors
+ self.tx_errors = tx_errors
+ self.rx_frame_err = rx_frame_err
+ self.rx_over_err = rx_over_err
+ self.rx_crc_err = rx_crc_err
+ self.collisions = collisions
diff --git a/pyof/v0x01/controller2switch/port_stats_request.py b/pyof/v0x01/controller2switch/port_stats_request.py
new file mode 100644
index 0000000..8fa4049
--- /dev/null
+++ b/pyof/v0x01/controller2switch/port_stats_request.py
@@ -0,0 +1,26 @@
+"""Information about physical ports is requested with OFPST_PORT"""
+
+# System imports
+
+# Third-party imports
+
+# Local source tree imports
+from pyof.v0x01.foundation import base
+from pyof.v0x01.foundation import basic_types
+
+
+class PortStatsRequest(base.GenericStruct):
+ """
+ Body for ofp_stats_request of type OFPST_PORT
+
+ :param port_no: OFPST_PORT message must request statistics either
+ for a single port (specified in port_no) or for
+ all ports (if port_no == OFPP_NONE).
+ :param pad:
+
+ """
+ port_no = basic_types.UBInt16()
+ pad = basic_types.PAD(6)
+
+ def __init__(self, port_no=None):
+ self.port_no = port_no
diff --git a/pyof/v0x01/controller2switch/queue_stats.py b/pyof/v0x01/controller2switch/queue_stats.py
new file mode 100644
index 0000000..d7df9b3
--- /dev/null
+++ b/pyof/v0x01/controller2switch/queue_stats.py
@@ -0,0 +1,38 @@
+"""The OFPST_QUEUE stats reply message provides queue statistics for one
+or more ports."""
+
+# System imports
+
+# Third-party imports
+
+# Local source tree imports
+from pyof.v0x01.foundation import base
+from pyof.v0x01.foundation import basic_types
+
+
+class QueueStats(base.GenericStruct):
+ """
+ Implements the reply body of a port_no
+
+ :param port_no: Port Number
+ :param pad: Align to 32-bits
+ :param queue_id: Queue ID
+ :param tx_bytes: Number of transmitted bytes
+ :param tx_packets: Number of transmitted packets
+ :param tx_errors: Number of packets dropped due to overrun
+
+ """
+ port_no = basic_types.UBInt16()
+ pad = basic_types.PAD(2)
+ queue_id = basic_types.UBInt32()
+ tx_bytes = basic_types.UBInt64()
+ tx_packets = basic_types.UBInt64()
+ tx_errors = basic_types.UBInt64()
+
+ def __init__(self, port_no=None, queue_id=None, tx_bytes=None,
+ tx_packets=None, tx_errors=None):
+ self.port_no = port_no
+ self.queue_id = queue_id
+ self.tx_bytes = tx_bytes
+ self.tx_packets = tx_packets
+ self.tx_errors = tx_errors
diff --git a/pyof/v0x01/controller2switch/queue_stats_request.py b/pyof/v0x01/controller2switch/queue_stats_request.py
new file mode 100644
index 0000000..c1b431b
--- /dev/null
+++ b/pyof/v0x01/controller2switch/queue_stats_request.py
@@ -0,0 +1,27 @@
+"""The OFPST_QUEUE stats request message provides queue statistics for one
+or more ports."""
+
+# System imports
+
+# Third-party imports
+
+# Local source tree imports
+from pyof.v0x01.foundation import base
+from pyof.v0x01.foundation import basic_types
+
+
+class QueueStatsRequest(base.GenericStruct):
+ """
+ Implements the request body of a port_no
+
+ :param port_no: All ports if OFPT_ALL
+ :param pad: Align to 32-bits
+ :param queue_id: All queues if OFPQ_ALL
+ """
+ port_no = basic_types.UBInt16()
+ pad = basic_types.PAD(2)
+ queue_id = basic_types.UBInt32()
+
+ def __init__(self, port_no=None, queue_id=None):
+ self.port_no = port_no
+ self.queue_id = queue_id
diff --git a/pyof/v0x01/controller2switch/table_stats.py b/pyof/v0x01/controller2switch/table_stats.py
new file mode 100644
index 0000000..bf2e42f
--- /dev/null
+++ b/pyof/v0x01/controller2switch/table_stats.py
@@ -0,0 +1,45 @@
+"""Information about tables is requested with OFPST_TABLE stats request type"""
+
+# System imports
+
+# Third-party imports
+
+# Local source tree imports
+from pyof.v0x01.foundation import base
+from pyof.v0x01.foundation import basic_types
+
+
+class TableStats(base.GenericStruct):
+ """Body of reply to OFPST_TABLE request.
+
+ :param table_id: Identifier of table. Lower numbered tables
+ are consulted first
+ :param pad: Align to 32-bits
+ :param name: Table name
+ :param wildcards: Bitmap of OFPFW_* wildcards that are supported
+ by the table
+ :param max_entries: Max number of entries supported
+ :param active_count: Number of active entries
+ :param count_lookup: Number of packets looked up in table
+ :param count_matched: Number of packets that hit table
+
+ """
+ table_id = basic_types.UBInt8()
+ pad = basic_types.PAD(3)
+ name = basic_types.Char(length=base.OFP_MAX_TABLE_NAME_LEN)
+ wildcards = basic_types.UBInt32()
+ max_entries = basic_types.UBInt32()
+ active_count = basic_types.UBInt32()
+ count_lookup = basic_types.UBInt64()
+ count_matched = basic_types.UBInt64()
+
+ def __init__(self, table_id=None, name=None, wildcards=None,
+ max_entries=None, active_count=None, count_lookup=None,
+ count_matched=None):
+ self.table_id = table_id
+ self.name = name
+ self.wildcards = wildcards
+ self.max_entries = max_entries
+ self.active_count = active_count
+ self.count_lookup = count_lookup
+ self.count_matched = count_matched
diff --git a/pyof/v0x01/foundation/base.py b/pyof/v0x01/foundation/base.py
index 7c07b3e..a146a48 100644
--- a/pyof/v0x01/foundation/base.py
+++ b/pyof/v0x01/foundation/base.py
@@ -60,17 +60,16 @@ class GenericType(object):
"""This is a foundation class for all custom attributes.
Attributes like `UBInt8`, `UBInt16`, `HWAddress` amoung others uses this
- class as base.
- """
+ class as base."""
def __init__(self, value=None, enum_ref=None):
self._value = value
self._enum_ref = enum_ref
def __repr__(self):
- return "{}({})".format(self.__class__.__name__, self._value)
+ return "{}({})".format(type(self).__name__, self._value)
def __str__(self):
- return '<{}: {}>'.format(self.__class__.__name__, str(self._value))
+ return '{}'.format(str(self._value))
def __eq__(self, other):
return self._value == other
@@ -105,7 +104,7 @@ class GenericType(object):
return struct.pack(self._fmt, value)
except struct.error as err:
message = "Value out of the possible range to basic type "
- message = message + self.__class__.__name__ + ". "
+ message = message + type(self).__name__ + ". "
message = message + str(err)
raise exceptions.BadValueException(message)
@@ -154,7 +153,6 @@ class GenericType(object):
class MetaStruct(type):
"""MetaClass used to force ordered attributes."""
-
@classmethod
def __prepare__(self, name, bases):
return _OD()
@@ -177,7 +175,6 @@ class GenericStruct(object, metaclass=MetaStruct):
has a list of attributes and theses attributes can be of struct
type too.
"""
-
def __init__(self, *args, **kwargs):
for _attr in self.__ordered__:
if not callable(getattr(self, _attr)):
@@ -186,29 +183,6 @@ class GenericStruct(object, metaclass=MetaStruct):
except KeyError:
pass
- def __repr__(self):
- message = self.__class__.__name__
- message += '('
- for _attr in self.__ordered__:
- message += repr(getattr(self, _attr))
- message += ", "
- # Removing a comma and a space from the end of the string
- message = message[:-2]
- message += ')'
- return message
-
- def __str__(self):
- message = "{}:\n".format(self.__class__.__name__)
- for _attr in self.__ordered__:
- attr = getattr(self, _attr)
- if not hasattr(attr, '_fmt'):
- message += " {}".format(str(attr).replace('\n', '\n '))
- else:
- message += " {}: {}\n".format(_attr, str(attr))
- message.rstrip('\r')
- message.rstrip('\n')
- return message
-
def _attributes(self):
"""Returns a generator with each attribute from the current instance.
@@ -282,7 +256,7 @@ class GenericStruct(object, metaclass=MetaStruct):
if _class.__name__ is 'PAD':
size += attr.get_size()
elif _class.__name__ is 'Char':
- size += getattr(self.__class__, _attr).get_size()
+ size += getattr(type(self), _attr).get_size()
elif issubclass(_class, GenericType):
size += _class().get_size()
elif isinstance(attr, _class):
@@ -302,13 +276,13 @@ class GenericStruct(object, metaclass=MetaStruct):
"""
if not self.is_valid():
error_msg = "Erro on validation prior to pack() on class "
- error_msg += "{}.".format(self.__class__.__name__)
+ error_msg += "{}.".format(type(self).__name__)
raise exceptions.ValidationError(error_msg)
else:
message = b''
for attr_name, attr_class in self.__ordered__.items():
attr = getattr(self, attr_name)
- class_attr = getattr(self.__class__, attr_name)
+ class_attr = getattr(type(self), attr_name)
if isinstance(attr, attr_class):
message += attr.pack()
elif class_attr.is_enum():
@@ -469,10 +443,10 @@ class GenericBitMask(object, metaclass=MetaBitMask):
self.bitmask = bitmask
def __str__(self):
- return "<%s: %s>" % (self.__class__.__name__, self.names)
+ return "{}".format(self.bitmask)
def __repr__(self):
- return "<%s(%s)>" % (self.__class__.__name__, self.bitmask)
+ return "{}({})".format(type(self).__name__, self.bitmask)
@property
def names(self):
diff --git a/pyof/v0x01/foundation/basic_types.py b/pyof/v0x01/foundation/basic_types.py
index 417d7f1..ba26316 100644
--- a/pyof/v0x01/foundation/basic_types.py
+++ b/pyof/v0x01/foundation/basic_types.py
@@ -27,10 +27,10 @@ class PAD(base.GenericType):
self._length = length
def __repr__(self):
- return "{}({})".format(self.__class__.__name__, self._length)
+ return "{}({})".format(type(self).__name__, self._length)
def __str__(self):
- return str(self._length)
+ return self.pack()
def get_size(self):
""" Return the size of type in bytes. """
@@ -131,7 +131,6 @@ class BinaryData(base.GenericType):
Both the 'pack' and 'unpack' methods will return the binary data itself.
get_size method will return the size of the instance using python 'len'
"""
-
def __init__(self, value=b''):
super().__init__(value)
@@ -144,7 +143,7 @@ class BinaryData(base.GenericType):
else:
raise exceptions.NotBinarydata()
- def unpack(self, buff):
+ def unpack(self, buff, offset):
self._value = buff
def get_size(self):
@@ -163,18 +162,9 @@ class FixedTypeList(list, base.GenericStruct):
elif items:
self.append(items)
- def __repr__(self):
- """Unique representantion of the object.
-
- This can be used to generate an object that has the
- same content of the current object"""
- return "{}({},{})".format(self.__class__.__name__,
- self._pyof_class,
- self)
-
def __str__(self):
"""Human-readable object representantion"""
- return "{}".format([item for item in self])
+ return "{}".format([str(item) for item in self])
def append(self, item):
if type(item) is list:
@@ -243,17 +233,9 @@ class ConstantTypeList(list, base.GenericStruct):
elif items:
self.append(items)
- def __repr__(self):
- """Unique representantion of the object.
-
- This can be used to generate an object that has the
- same content of the current object"""
- return "{}({})".format(self.__class__.__name__,
- self)
-
def __str__(self):
"""Human-readable object representantion"""
- return "{}".format([item for item in self])
+ return "{}".format([str(item) for item in self])
def append(self, item):
if type(item) is list:
| Review all __str__ and __repr__ methods
We need to make sure that all `__str__` and `__repr__` are correct.
For instance, look how strange is `common.phy_port.PhyPort`:
```
PhyPort(None, None, None, None, None)
``` | kytos/python-openflow | diff --git a/tests/v0x01/test_controller2switch/test_aggregate_stats_reply.py b/tests/v0x01/test_controller2switch/test_aggregate_stats_reply.py
index 1c699dd..3fc3326 100644
--- a/tests/v0x01/test_controller2switch/test_aggregate_stats_reply.py
+++ b/tests/v0x01/test_controller2switch/test_aggregate_stats_reply.py
@@ -1,12 +1,12 @@
import unittest
-from pyof.v0x01.controller2switch.common import AggregateStatsReply
+from pyof.v0x01.controller2switch import aggregate_stats_reply
class TestAggregateStatsReply(unittest.TestCase):
def setUp(self):
- self.message = AggregateStatsReply()
+ self.message = aggregate_stats_reply.AggregateStatsReply()
self.message.packet_count = 5
self.message.byte_count = 1
self.message.flow_count = 8
diff --git a/tests/v0x01/test_controller2switch/test_aggregate_stats_request.py b/tests/v0x01/test_controller2switch/test_aggregate_stats_request.py
index 8e4be10..ca563fe 100644
--- a/tests/v0x01/test_controller2switch/test_aggregate_stats_request.py
+++ b/tests/v0x01/test_controller2switch/test_aggregate_stats_request.py
@@ -2,13 +2,13 @@ import unittest
from pyof.v0x01.common import flow_match
from pyof.v0x01.common import phy_port
-from pyof.v0x01.controller2switch.common import AggregateStatsRequest
+from pyof.v0x01.controller2switch import aggregate_stats_request
class TestAggregateStatsRequest(unittest.TestCase):
def setUp(self):
- self.message = AggregateStatsRequest()
+ self.message = aggregate_stats_request.AggregateStatsRequest()
self.message.match = flow_match.Match()
self.message.table_id = 1
self.message.out_port = phy_port.Port.OFPP_NONE
diff --git a/tests/v0x01/test_controller2switch/test_desc_stats.py b/tests/v0x01/test_controller2switch/test_desc_stats.py
index 4f13f97..5ea6422 100644
--- a/tests/v0x01/test_controller2switch/test_desc_stats.py
+++ b/tests/v0x01/test_controller2switch/test_desc_stats.py
@@ -1,6 +1,6 @@
import unittest
-from pyof.v0x01.controller2switch.common import DescStats
+from pyof.v0x01.controller2switch import desc_stats
from pyof.v0x01.foundation import base
@@ -8,7 +8,7 @@ class TestDescStats(unittest.TestCase):
def setUp(self):
content = bytes('A' * base.DESC_STR_LEN, 'utf-8')
- self.message = DescStats()
+ self.message = desc_stats.DescStats()
self.message.mfr_desc = content
self.message.hw_desc = content
self.message.sw_desc = content
diff --git a/tests/v0x01/test_controller2switch/test_flow_stats.py b/tests/v0x01/test_controller2switch/test_flow_stats.py
index 2a04c07..1de34ab 100644
--- a/tests/v0x01/test_controller2switch/test_flow_stats.py
+++ b/tests/v0x01/test_controller2switch/test_flow_stats.py
@@ -1,13 +1,13 @@
import unittest
from pyof.v0x01.common import flow_match
-from pyof.v0x01.controller2switch.common import FlowStats
+from pyof.v0x01.controller2switch import flow_stats
class TestFlowStats(unittest.TestCase):
def setUp(self):
- self.message = FlowStats()
+ self.message = flow_stats.FlowStats()
self.message.length = 160
self.message.table_id = 1
self.message.match = flow_match.Match()
diff --git a/tests/v0x01/test_controller2switch/test_flow_stats_request.py b/tests/v0x01/test_controller2switch/test_flow_stats_request.py
index 2a6f3fb..85a33bb 100644
--- a/tests/v0x01/test_controller2switch/test_flow_stats_request.py
+++ b/tests/v0x01/test_controller2switch/test_flow_stats_request.py
@@ -1,13 +1,13 @@
import unittest
from pyof.v0x01.common import flow_match
-from pyof.v0x01.controller2switch.common import FlowStatsRequest
+from pyof.v0x01.controller2switch import flow_stats_request
class TestFlowStatsRequest(unittest.TestCase):
def setUp(self):
- self.message = FlowStatsRequest()
+ self.message = flow_stats_request.FlowStatsRequest()
self.message.match = flow_match.Match()
self.message.table_id = 1
self.message.out_port = 80
diff --git a/tests/v0x01/test_controller2switch/test_port_stats.py b/tests/v0x01/test_controller2switch/test_port_stats.py
index e6f4b55..7a58485 100644
--- a/tests/v0x01/test_controller2switch/test_port_stats.py
+++ b/tests/v0x01/test_controller2switch/test_port_stats.py
@@ -1,12 +1,12 @@
import unittest
-from pyof.v0x01.controller2switch.common import PortStats
+from pyof.v0x01.controller2switch import port_stats
class TestPortStats(unittest.TestCase):
def setUp(self):
- self.message = PortStats()
+ self.message = port_stats.PortStats()
self.message.port_no = 80
self.message.rx_packets = 5
self.message.tx_packets = 10
diff --git a/tests/v0x01/test_controller2switch/test_port_stats_request.py b/tests/v0x01/test_controller2switch/test_port_stats_request.py
index 8025055..e0454dc 100644
--- a/tests/v0x01/test_controller2switch/test_port_stats_request.py
+++ b/tests/v0x01/test_controller2switch/test_port_stats_request.py
@@ -1,12 +1,12 @@
import unittest
-from pyof.v0x01.controller2switch.common import PortStatsRequest
+from pyof.v0x01.controller2switch import port_stats_request
class TestPortStatsRequest(unittest.TestCase):
def setUp(self):
- self.message = PortStatsRequest()
+ self.message = port_stats_request.PortStatsRequest()
self.message.port_no = 80
def test_get_size(self):
diff --git a/tests/v0x01/test_controller2switch/test_queue_stats.py b/tests/v0x01/test_controller2switch/test_queue_stats.py
index b1f1219..fb03ee7 100644
--- a/tests/v0x01/test_controller2switch/test_queue_stats.py
+++ b/tests/v0x01/test_controller2switch/test_queue_stats.py
@@ -1,12 +1,12 @@
import unittest
-from pyof.v0x01.controller2switch.common import QueueStats
+from pyof.v0x01.controller2switch import queue_stats
class TestQueueStats(unittest.TestCase):
def setUp(self):
- self.message = QueueStats()
+ self.message = queue_stats.QueueStats()
self.message.port_no = 80
self.message.queue_id = 5
self.message.tx_bytes = 1
diff --git a/tests/v0x01/test_controller2switch/test_queue_stats_request.py b/tests/v0x01/test_controller2switch/test_queue_stats_request.py
index 2f95381..80cad68 100644
--- a/tests/v0x01/test_controller2switch/test_queue_stats_request.py
+++ b/tests/v0x01/test_controller2switch/test_queue_stats_request.py
@@ -1,12 +1,12 @@
import unittest
-from pyof.v0x01.controller2switch.common import QueueStatsRequest
+from pyof.v0x01.controller2switch import queue_stats_request
class TestQueueStatsRequest(unittest.TestCase):
def setUp(self):
- self.message = QueueStatsRequest()
+ self.message = queue_stats_request.QueueStatsRequest()
self.message.port_no = 80
self.message.queue_id = 5
diff --git a/tests/v0x01/test_controller2switch/test_table_stats.py b/tests/v0x01/test_controller2switch/test_table_stats.py
index 39888b5..353f6de 100644
--- a/tests/v0x01/test_controller2switch/test_table_stats.py
+++ b/tests/v0x01/test_controller2switch/test_table_stats.py
@@ -1,14 +1,14 @@
import unittest
from pyof.v0x01.common import flow_match
-from pyof.v0x01.controller2switch.common import TableStats
+from pyof.v0x01.controller2switch import table_stats
from pyof.v0x01.foundation import base
class TestTableStats(unittest.TestCase):
def setUp(self):
- self.message = TableStats()
+ self.message = table_stats.TableStats()
self.message.table_id = 1
self.message.name = bytes('X' * base.OFP_MAX_TABLE_NAME_LEN, 'utf-8')
self.message.wildcards = flow_match.FlowWildCards.OFPFW_TP_DST
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 6
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"coverage",
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/kytos/python-openflow.git@545ae8a73dcec2e67cf854b21c44e692692f71dc#egg=Kytos_OpenFlow_Parser_library
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions==4.13.0
| name: python-openflow
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- execnet==2.1.1
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- typing-extensions==4.13.0
prefix: /opt/conda/envs/python-openflow
| [
"tests/v0x01/test_controller2switch/test_aggregate_stats_reply.py::TestAggregateStatsReply::test_get_size",
"tests/v0x01/test_controller2switch/test_aggregate_stats_request.py::TestAggregateStatsRequest::test_get_size",
"tests/v0x01/test_controller2switch/test_desc_stats.py::TestDescStats::test_get_size",
"tests/v0x01/test_controller2switch/test_flow_stats.py::TestFlowStats::test_get_size",
"tests/v0x01/test_controller2switch/test_flow_stats_request.py::TestFlowStatsRequest::test_get_size",
"tests/v0x01/test_controller2switch/test_port_stats.py::TestPortStats::test_get_size",
"tests/v0x01/test_controller2switch/test_port_stats_request.py::TestPortStatsRequest::test_get_size",
"tests/v0x01/test_controller2switch/test_queue_stats.py::TestQueueStats::test_get_size",
"tests/v0x01/test_controller2switch/test_queue_stats_request.py::TestQueueStatsRequest::test_get_size",
"tests/v0x01/test_controller2switch/test_table_stats.py::TestTableStats::test_get_size"
]
| []
| []
| []
| MIT License | 577 | [
"pyof/v0x01/controller2switch/table_stats.py",
"pyof/v0x01/controller2switch/flow_stats.py",
"pyof/v0x01/controller2switch/queue_stats.py",
"pyof/v0x01/controller2switch/port_stats.py",
"pyof/v0x01/foundation/base.py",
"pyof/v0x01/controller2switch/flow_stats_request.py",
"pyof/v0x01/common/flow_match.py",
"pyof/v0x01/controller2switch/aggregate_stats_reply.py",
"pyof/v0x01/controller2switch/features_reply.py",
"pyof/v0x01/controller2switch/port_stats_request.py",
"pyof/v0x01/controller2switch/desc_stats.py",
"pyof/v0x01/controller2switch/common.py",
"pyof/v0x01/foundation/basic_types.py",
"pyof/v0x01/controller2switch/aggregate_stats_request.py",
"pyof/v0x01/controller2switch/queue_stats_request.py",
"pyof/v0x01/common/phy_port.py"
]
| [
"pyof/v0x01/controller2switch/table_stats.py",
"pyof/v0x01/controller2switch/flow_stats.py",
"pyof/v0x01/controller2switch/queue_stats.py",
"pyof/v0x01/controller2switch/port_stats.py",
"pyof/v0x01/foundation/base.py",
"pyof/v0x01/controller2switch/flow_stats_request.py",
"pyof/v0x01/common/flow_match.py",
"pyof/v0x01/controller2switch/aggregate_stats_reply.py",
"pyof/v0x01/controller2switch/features_reply.py",
"pyof/v0x01/controller2switch/port_stats_request.py",
"pyof/v0x01/controller2switch/desc_stats.py",
"pyof/v0x01/controller2switch/common.py",
"pyof/v0x01/foundation/basic_types.py",
"pyof/v0x01/controller2switch/aggregate_stats_request.py",
"pyof/v0x01/controller2switch/queue_stats_request.py",
"pyof/v0x01/common/phy_port.py"
]
|
|
falconry__falcon-820 | 50b1759ee7f7b54a872c01c85152f8648e350399 | 2016-06-07 21:07:32 | 67d61029847cbf59e4053c8a424df4f9f87ad36f | kgriffs: Looks like we accidentally had some overlap in effort between #729 and #811. I attempted to combine the two into a new PR with a few tweeks to param naming and docstrings. Everyone please take a look and provide feedback. Thanks!
codecov-io: ## [Current coverage][cc-pull] is **100%**
> Merging [#820][cc-pull] into [master][cc-base-branch] will not change coverage
```diff
@@ master #820 diff @@
==========================================
Files 29 29
Lines 1789 1799 +10
Methods 0 0
Messages 0 0
Branches 299 303 +4
==========================================
+ Hits 1789 1799 +10
Misses 0 0
Partials 0 0
```
> Powered by [Codecov](https://codecov.io?src=pr). Last updated by [cf3cb50...d4e630a][cc-compare]
[cc-base-branch]: https://codecov.io/gh/falconry/falcon/branch/master?src=pr
[cc-compare]: https://codecov.io/gh/falconry/falcon/compare/cf3cb5029a51d4b7c980c7851328a02046db9b3e...d4e630a70cd1774519851a78a1d1fb80a83e8b7e
[cc-pull]: https://codecov.io/gh/falconry/falcon/pull/820?src=pr
orcsly: Looks good, thanks!
jmvrbanac: :+1:
qwesda: The parameter name `csv` for the `parse_query_string` could have a more descriptive name. `parse_qs_csv` sounds logical to me, since the option general option is called `auto_parse_qs_csv`.
One evaluation in `parse_query_string` can be short-circuited:
[https://github.com/falconry/falcon/pull/820/files#diff-7d2a078ae72702ba816f18a9aa1c48b9R319](https://github.com/falconry/falcon/pull/820/files#diff-7d2a078ae72702ba816f18a9aa1c48b9R319)
Otherwise it looks good.
kgriffs: @qwesda since the method name already implies we are working with parsing query strings, is it necessary to also include that in the name of the kwarg?
qwesda: @kgriffs: I just thought something more explicit would be more in line with `keep_blank_qs_values`, which is pretty verbose. Having one verbose param and one very non-verbose seemed weird.
kgriffs: @qwesda, ah, good point! I'll switch to `parse_qs_csv` and we can see how that looks.
kgriffs: @qwesda @jmvrbanac @orcsly I think this is ready for final review.
qwesda: @kgriffs looks ok
orcsly: Yup looks good. Thanks! | diff --git a/falcon/request.py b/falcon/request.py
index 597ac80..7359991 100644
--- a/falcon/request.py
+++ b/falcon/request.py
@@ -284,6 +284,7 @@ class Request(object):
self._params = parse_query_string(
self.query_string,
keep_blank_qs_values=self.options.keep_blank_qs_values,
+ parse_qs_csv=self.options.auto_parse_qs_csv,
)
else:
@@ -1153,6 +1154,7 @@ class Request(object):
extra_params = parse_query_string(
body,
keep_blank_qs_values=self.options.keep_blank_qs_values,
+ parse_qs_csv=self.options.auto_parse_qs_csv,
)
self._params.update(extra_params)
@@ -1190,8 +1192,11 @@ class RequestOptions(object):
"""This class is a container for ``Request`` options.
Attributes:
- keep_blank_qs_values (bool): Set to ``True`` in order to retain
- blank values in query string parameters (default ``False``).
+ keep_blank_qs_values (bool): Set to ``True`` to keep query string
+ fields even if they do not have a value (default ``False``).
+ For comma-separated values, this option also determines
+ whether or not empty elements in the parsed list are
+ retained.
auto_parse_form_urlencoded: Set to ``True`` in order to
automatically consume the request stream and merge the
results into the request's query string params when the
@@ -1202,18 +1207,29 @@ class RequestOptions(object):
Note:
The character encoding for fields, before
percent-encoding non-ASCII bytes, is assumed to be
- UTF-8. The special `_charset_` field is ignored if present.
+ UTF-8. The special `_charset_` field is ignored if
+ present.
Falcon expects form-encoded request bodies to be
encoded according to the standard W3C algorithm (see
also http://goo.gl/6rlcux).
+ auto_parse_qs_csv: Set to ``False`` to treat commas in a query
+ string value as literal characters, rather than as a comma-
+ separated list (default ``True``). When this option is
+ enabled, the value will be split on any non-percent-encoded
+ commas. Disable this option when encoding lists as multiple
+ occurrences of the same parameter, and when values may be
+ encoded in alternative formats in which the comma character
+ is significant.
"""
__slots__ = (
'keep_blank_qs_values',
'auto_parse_form_urlencoded',
+ 'auto_parse_qs_csv',
)
def __init__(self):
self.keep_blank_qs_values = False
self.auto_parse_form_urlencoded = False
+ self.auto_parse_qs_csv = True
diff --git a/falcon/util/misc.py b/falcon/util/misc.py
index 5b02f05..12eb481 100644
--- a/falcon/util/misc.py
+++ b/falcon/util/misc.py
@@ -148,7 +148,7 @@ def http_date_to_dt(http_date, obs_date=False):
raise ValueError('time data %r does not match known formats' % http_date)
-def to_query_str(params):
+def to_query_str(params, comma_delimited_lists=True):
"""Converts a dictionary of params to a query string.
Args:
@@ -157,6 +157,10 @@ def to_query_str(params):
something that can be converted into a ``str``. If `params`
is a ``list``, it will be converted to a comma-delimited string
of values (e.g., 'thing=1,2,3')
+ comma_delimited_lists (bool, default ``True``):
+ If set to ``False`` encode lists by specifying multiple instances
+ of the parameter (e.g., 'thing=1&thing=2&thing=3')
+
Returns:
str: A URI query string including the '?' prefix, or an empty string
@@ -175,7 +179,20 @@ def to_query_str(params):
elif v is False:
v = 'false'
elif isinstance(v, list):
- v = ','.join(map(str, v))
+ if comma_delimited_lists:
+ v = ','.join(map(str, v))
+ else:
+ for list_value in v:
+ if list_value is True:
+ list_value = 'true'
+ elif list_value is False:
+ list_value = 'false'
+ else:
+ list_value = str(list_value)
+
+ query_str += k + '=' + list_value + '&'
+
+ continue
else:
v = str(v)
diff --git a/falcon/util/uri.py b/falcon/util/uri.py
index 2f68ec9..63ca45e 100644
--- a/falcon/util/uri.py
+++ b/falcon/util/uri.py
@@ -246,11 +246,12 @@ else:
return decoded_uri.decode('utf-8', 'replace')
-def parse_query_string(query_string, keep_blank_qs_values=False):
+def parse_query_string(query_string, keep_blank_qs_values=False,
+ parse_qs_csv=True):
"""Parse a query string into a dict.
Query string parameters are assumed to use standard form-encoding. Only
- parameters with values are parsed. for example, given 'foo=bar&flag',
+ parameters with values are returned. For example, given 'foo=bar&flag',
this function would ignore 'flag' unless the `keep_blank_qs_values` option
is set.
@@ -269,8 +270,16 @@ def parse_query_string(query_string, keep_blank_qs_values=False):
Args:
query_string (str): The query string to parse.
- keep_blank_qs_values (bool): If set to ``True``, preserves boolean
- fields and fields with no content as blank strings.
+ keep_blank_qs_values (bool): Set to ``True`` to return fields even if
+ they do not have a value (default ``False``). For comma-separated
+ values, this option also determines whether or not empty elements
+ in the parsed list are retained.
+ parse_qs_csv: Set to ``False`` in order to disable splitting query
+ parameters on ``,`` (default ``True``). Depending on the user agent,
+ encoding lists as multiple occurrences of the same parameter might
+ be preferable. In this case, setting `parse_qs_csv` to ``False``
+ will cause the framework to treat commas as literal characters in
+ each occurring parameter value.
Returns:
dict: A dictionary of (*name*, *value*) pairs, one per query
@@ -309,7 +318,7 @@ def parse_query_string(query_string, keep_blank_qs_values=False):
params[k] = [old_value, decode(v)]
else:
- if ',' in v:
+ if parse_qs_csv and ',' in v:
# NOTE(kgriffs): Falcon supports a more compact form of
# lists, in which the elements are comma-separated and
# assigned to a single param instance. If it turns out that
| Add option to opt-out from comma separated value parsing
I'm porting a project to Falcon and I stumbled upon an issue regarding its parsing of CSV values inside URIs. Let's say I have filtering engine that accepts queries such as this:
http://great.dude/api/cars?query=added:yesterday,today+spoilers:red
I obviously want to make `req.get_param('query')` return `'added:yesterday,today spoilers:red'`, and not `['added:yesterday', 'today spoilers:red']`.
Right now this [isn't really configurable](https://github.com/falconry/falcon/blob/35987b2be85456f431bbda509e884a8b0b20ed11/falcon/util/uri.py#L312-L328) and I need to check if `get_param()` returns a `list` and then join it back if needed, which looks sort of silly. Fortunately, the ability to use custom request classes alleviates the issue to some extent.
I see a few ways to improve things upstream:
1. Offer explicit `get_param_as_string` that will possibly do `','.join(...)` under the hood.
2. Add an option to disable this mechanism as an additional option to `Api`.
3. Add an option to disable this mechanism as an additional option to `add_route()`.
4. Change `get_param` to always return string.
Option 4 makes the most sense to me, but it breaks BC. If option 4 is not feasible, I'd went with option 1. | falconry/falcon | diff --git a/tests/test_options.py b/tests/test_options.py
index b3d9812..a3b8b72 100644
--- a/tests/test_options.py
+++ b/tests/test_options.py
@@ -1,16 +1,32 @@
+import ddt
+
from falcon.request import RequestOptions
import falcon.testing as testing
[email protected]
class TestRequestOptions(testing.TestBase):
- def test_correct_options(self):
+ def test_option_defaults(self):
options = RequestOptions()
+
self.assertFalse(options.keep_blank_qs_values)
- options.keep_blank_qs_values = True
- self.assertTrue(options.keep_blank_qs_values)
- options.keep_blank_qs_values = False
- self.assertFalse(options.keep_blank_qs_values)
+ self.assertFalse(options.auto_parse_form_urlencoded)
+ self.assertTrue(options.auto_parse_qs_csv)
+
+ @ddt.data(
+ 'keep_blank_qs_values',
+ 'auto_parse_form_urlencoded',
+ 'auto_parse_qs_csv',
+ )
+ def test_options_toggle(self, option_name):
+ options = RequestOptions()
+
+ setattr(options, option_name, True)
+ self.assertTrue(getattr(options, option_name))
+
+ setattr(options, option_name, False)
+ self.assertFalse(getattr(options, option_name))
def test_incorrect_options(self):
options = RequestOptions()
diff --git a/tests/test_query_params.py b/tests/test_query_params.py
index c588f23..62c906d 100644
--- a/tests/test_query_params.py
+++ b/tests/test_query_params.py
@@ -65,6 +65,60 @@ class _TestQueryParams(testing.TestBase):
self.assertEqual(req.get_param_as_list('id', int), [23, 42])
self.assertEqual(req.get_param('q'), u'\u8c46 \u74e3')
+ def test_option_auto_parse_qs_csv_simple_false(self):
+ self.api.req_options.auto_parse_qs_csv = False
+
+ query_string = 'id=23,42,,&id=2'
+ self.simulate_request('/', query_string=query_string)
+
+ req = self.resource.req
+
+ self.assertEqual(req.params['id'], [u'23,42,,', u'2'])
+ self.assertIn(req.get_param('id'), [u'23,42,,', u'2'])
+ self.assertEqual(req.get_param_as_list('id'), [u'23,42,,', u'2'])
+
+ def test_option_auto_parse_qs_csv_simple_true(self):
+ self.api.req_options.auto_parse_qs_csv = True
+
+ query_string = 'id=23,42,,&id=2'
+ self.simulate_request('/', query_string=query_string)
+
+ req = self.resource.req
+
+ self.assertEqual(req.params['id'], [u'23', u'42', u'2'])
+ self.assertIn(req.get_param('id'), [u'23', u'42', u'2'])
+ self.assertEqual(req.get_param_as_list('id', int), [23, 42, 2])
+
+ def test_option_auto_parse_qs_csv_complex_false(self):
+ self.api.req_options.auto_parse_qs_csv = False
+
+ encoded_json = '%7B%22msg%22:%22Testing%201,2,3...%22,%22code%22:857%7D'
+ decoded_json = '{"msg":"Testing 1,2,3...","code":857}'
+
+ query_string = ('colors=red,green,blue&limit=1'
+ '&list-ish1=f,,x&list-ish2=,0&list-ish3=a,,,b'
+ '&empty1=&empty2=,&empty3=,,'
+ '&thing=' + encoded_json)
+
+ self.simulate_request('/', query_string=query_string)
+
+ req = self.resource.req
+
+ self.assertIn(req.get_param('colors'), 'red,green,blue')
+ self.assertEqual(req.get_param_as_list('colors'), [u'red,green,blue'])
+
+ self.assertEqual(req.get_param_as_list('limit'), ['1'])
+
+ self.assertEqual(req.get_param_as_list('empty1'), None)
+ self.assertEqual(req.get_param_as_list('empty2'), [u','])
+ self.assertEqual(req.get_param_as_list('empty3'), [u',,'])
+
+ self.assertEqual(req.get_param_as_list('list-ish1'), [u'f,,x'])
+ self.assertEqual(req.get_param_as_list('list-ish2'), [u',0'])
+ self.assertEqual(req.get_param_as_list('list-ish3'), [u'a,,,b'])
+
+ self.assertEqual(req.get_param('thing'), decoded_json)
+
def test_bad_percentage(self):
query_string = 'x=%%20%+%&y=peregrine&z=%a%z%zz%1%20e'
self.simulate_request('/', query_string=query_string)
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 957a959..6b5f75d 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -128,6 +128,16 @@ class TestFalconUtils(testtools.TestCase):
falcon.to_query_str({'things': ['a', 'b']}),
'?things=a,b')
+ expected = ('?things=a&things=b&things=&things=None'
+ '&things=true&things=false&things=0')
+
+ actual = falcon.to_query_str(
+ {'things': ['a', 'b', '', None, True, False, 0]},
+ comma_delimited_lists=False
+ )
+
+ self.assertEqual(actual, expected)
+
def test_pack_query_params_several(self):
garbage_in = {
'limit': 17,
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 3
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"ddt",
"testtools",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"tools/test-requires"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
ddt==1.7.2
-e git+https://github.com/falconry/falcon.git@50b1759ee7f7b54a872c01c85152f8648e350399#egg=falcon
fixtures==4.0.1
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
nose==1.3.7
packaging==21.3
pbr==6.1.1
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
python-mimeparse==1.6.0
PyYAML==6.0.1
requests==2.27.1
six==1.17.0
testtools==2.6.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: falcon
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- coverage==6.2
- ddt==1.7.2
- fixtures==4.0.1
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- packaging==21.3
- pbr==6.1.1
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-mimeparse==1.6.0
- pyyaml==6.0.1
- requests==2.27.1
- six==1.17.0
- testtools==2.6.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/falcon
| [
"tests/test_options.py::TestRequestOptions::test_option_defaults",
"tests/test_options.py::TestRequestOptions::test_options_toggle_3_auto_parse_qs_csv",
"tests/test_query_params.py::_TestQueryParams::test_option_auto_parse_qs_csv_complex_false",
"tests/test_query_params.py::_TestQueryParams::test_option_auto_parse_qs_csv_simple_false",
"tests/test_query_params.py::_TestQueryParams::test_option_auto_parse_qs_csv_simple_true",
"tests/test_query_params.py::PostQueryParams::test_option_auto_parse_qs_csv_complex_false",
"tests/test_query_params.py::PostQueryParams::test_option_auto_parse_qs_csv_simple_false",
"tests/test_query_params.py::PostQueryParams::test_option_auto_parse_qs_csv_simple_true",
"tests/test_query_params.py::GetQueryParams::test_option_auto_parse_qs_csv_complex_false",
"tests/test_query_params.py::GetQueryParams::test_option_auto_parse_qs_csv_simple_false",
"tests/test_query_params.py::GetQueryParams::test_option_auto_parse_qs_csv_simple_true",
"tests/test_utils.py::TestFalconUtils::test_pack_query_params_one"
]
| [
"tests/test_utils.py::TestFalconUtils::test_deprecated_decorator"
]
| [
"tests/test_options.py::TestRequestOptions::test_incorrect_options",
"tests/test_options.py::TestRequestOptions::test_options_toggle_1_keep_blank_qs_values",
"tests/test_options.py::TestRequestOptions::test_options_toggle_2_auto_parse_form_urlencoded",
"tests/test_query_params.py::_TestQueryParams::test_allowed_names",
"tests/test_query_params.py::_TestQueryParams::test_bad_percentage",
"tests/test_query_params.py::_TestQueryParams::test_blank",
"tests/test_query_params.py::_TestQueryParams::test_boolean",
"tests/test_query_params.py::_TestQueryParams::test_boolean_blank",
"tests/test_query_params.py::_TestQueryParams::test_get_date_invalid",
"tests/test_query_params.py::_TestQueryParams::test_get_date_missing_param",
"tests/test_query_params.py::_TestQueryParams::test_get_date_store",
"tests/test_query_params.py::_TestQueryParams::test_get_date_valid",
"tests/test_query_params.py::_TestQueryParams::test_get_date_valid_with_format",
"tests/test_query_params.py::_TestQueryParams::test_get_dict_invalid",
"tests/test_query_params.py::_TestQueryParams::test_get_dict_missing_param",
"tests/test_query_params.py::_TestQueryParams::test_get_dict_store",
"tests/test_query_params.py::_TestQueryParams::test_get_dict_valid",
"tests/test_query_params.py::_TestQueryParams::test_int",
"tests/test_query_params.py::_TestQueryParams::test_int_neg",
"tests/test_query_params.py::_TestQueryParams::test_list_transformer",
"tests/test_query_params.py::_TestQueryParams::test_list_type",
"tests/test_query_params.py::_TestQueryParams::test_list_type_blank",
"tests/test_query_params.py::_TestQueryParams::test_multiple_form_keys",
"tests/test_query_params.py::_TestQueryParams::test_multiple_form_keys_as_list",
"tests/test_query_params.py::_TestQueryParams::test_multiple_keys_as_bool",
"tests/test_query_params.py::_TestQueryParams::test_multiple_keys_as_int",
"tests/test_query_params.py::_TestQueryParams::test_none",
"tests/test_query_params.py::_TestQueryParams::test_param_property",
"tests/test_query_params.py::_TestQueryParams::test_percent_encoded",
"tests/test_query_params.py::_TestQueryParams::test_required_1_get_param",
"tests/test_query_params.py::_TestQueryParams::test_required_2_get_param_as_int",
"tests/test_query_params.py::_TestQueryParams::test_required_3_get_param_as_bool",
"tests/test_query_params.py::_TestQueryParams::test_required_4_get_param_as_list",
"tests/test_query_params.py::_TestQueryParams::test_simple",
"tests/test_query_params.py::PostQueryParams::test_allowed_names",
"tests/test_query_params.py::PostQueryParams::test_bad_percentage",
"tests/test_query_params.py::PostQueryParams::test_blank",
"tests/test_query_params.py::PostQueryParams::test_boolean",
"tests/test_query_params.py::PostQueryParams::test_boolean_blank",
"tests/test_query_params.py::PostQueryParams::test_explicitly_disable_auto_parse",
"tests/test_query_params.py::PostQueryParams::test_get_date_invalid",
"tests/test_query_params.py::PostQueryParams::test_get_date_missing_param",
"tests/test_query_params.py::PostQueryParams::test_get_date_store",
"tests/test_query_params.py::PostQueryParams::test_get_date_valid",
"tests/test_query_params.py::PostQueryParams::test_get_date_valid_with_format",
"tests/test_query_params.py::PostQueryParams::test_get_dict_invalid",
"tests/test_query_params.py::PostQueryParams::test_get_dict_missing_param",
"tests/test_query_params.py::PostQueryParams::test_get_dict_store",
"tests/test_query_params.py::PostQueryParams::test_get_dict_valid",
"tests/test_query_params.py::PostQueryParams::test_int",
"tests/test_query_params.py::PostQueryParams::test_int_neg",
"tests/test_query_params.py::PostQueryParams::test_list_transformer",
"tests/test_query_params.py::PostQueryParams::test_list_type",
"tests/test_query_params.py::PostQueryParams::test_list_type_blank",
"tests/test_query_params.py::PostQueryParams::test_multiple_form_keys",
"tests/test_query_params.py::PostQueryParams::test_multiple_form_keys_as_list",
"tests/test_query_params.py::PostQueryParams::test_multiple_keys_as_bool",
"tests/test_query_params.py::PostQueryParams::test_multiple_keys_as_int",
"tests/test_query_params.py::PostQueryParams::test_non_ascii",
"tests/test_query_params.py::PostQueryParams::test_none",
"tests/test_query_params.py::PostQueryParams::test_param_property",
"tests/test_query_params.py::PostQueryParams::test_percent_encoded",
"tests/test_query_params.py::PostQueryParams::test_required_1_get_param",
"tests/test_query_params.py::PostQueryParams::test_required_2_get_param_as_int",
"tests/test_query_params.py::PostQueryParams::test_required_3_get_param_as_bool",
"tests/test_query_params.py::PostQueryParams::test_required_4_get_param_as_list",
"tests/test_query_params.py::PostQueryParams::test_simple",
"tests/test_query_params.py::GetQueryParams::test_allowed_names",
"tests/test_query_params.py::GetQueryParams::test_bad_percentage",
"tests/test_query_params.py::GetQueryParams::test_blank",
"tests/test_query_params.py::GetQueryParams::test_boolean",
"tests/test_query_params.py::GetQueryParams::test_boolean_blank",
"tests/test_query_params.py::GetQueryParams::test_get_date_invalid",
"tests/test_query_params.py::GetQueryParams::test_get_date_missing_param",
"tests/test_query_params.py::GetQueryParams::test_get_date_store",
"tests/test_query_params.py::GetQueryParams::test_get_date_valid",
"tests/test_query_params.py::GetQueryParams::test_get_date_valid_with_format",
"tests/test_query_params.py::GetQueryParams::test_get_dict_invalid",
"tests/test_query_params.py::GetQueryParams::test_get_dict_missing_param",
"tests/test_query_params.py::GetQueryParams::test_get_dict_store",
"tests/test_query_params.py::GetQueryParams::test_get_dict_valid",
"tests/test_query_params.py::GetQueryParams::test_int",
"tests/test_query_params.py::GetQueryParams::test_int_neg",
"tests/test_query_params.py::GetQueryParams::test_list_transformer",
"tests/test_query_params.py::GetQueryParams::test_list_type",
"tests/test_query_params.py::GetQueryParams::test_list_type_blank",
"tests/test_query_params.py::GetQueryParams::test_multiple_form_keys",
"tests/test_query_params.py::GetQueryParams::test_multiple_form_keys_as_list",
"tests/test_query_params.py::GetQueryParams::test_multiple_keys_as_bool",
"tests/test_query_params.py::GetQueryParams::test_multiple_keys_as_int",
"tests/test_query_params.py::GetQueryParams::test_none",
"tests/test_query_params.py::GetQueryParams::test_param_property",
"tests/test_query_params.py::GetQueryParams::test_percent_encoded",
"tests/test_query_params.py::GetQueryParams::test_required_1_get_param",
"tests/test_query_params.py::GetQueryParams::test_required_2_get_param_as_int",
"tests/test_query_params.py::GetQueryParams::test_required_3_get_param_as_bool",
"tests/test_query_params.py::GetQueryParams::test_required_4_get_param_as_list",
"tests/test_query_params.py::GetQueryParams::test_simple",
"tests/test_query_params.py::PostQueryParamsDefaultBehavior::test_dont_auto_parse_by_default",
"tests/test_utils.py::TestFalconUtils::test_dt_to_http",
"tests/test_utils.py::TestFalconUtils::test_get_http_status",
"tests/test_utils.py::TestFalconUtils::test_http_date_to_dt",
"tests/test_utils.py::TestFalconUtils::test_http_now",
"tests/test_utils.py::TestFalconUtils::test_pack_query_params_none",
"tests/test_utils.py::TestFalconUtils::test_pack_query_params_several",
"tests/test_utils.py::TestFalconUtils::test_parse_host",
"tests/test_utils.py::TestFalconUtils::test_parse_query_string",
"tests/test_utils.py::TestFalconUtils::test_prop_uri_decode_models_stdlib_unquote_plus",
"tests/test_utils.py::TestFalconUtils::test_prop_uri_encode_models_stdlib_quote",
"tests/test_utils.py::TestFalconUtils::test_prop_uri_encode_value_models_stdlib_quote_safe_tilde",
"tests/test_utils.py::TestFalconUtils::test_uri_decode",
"tests/test_utils.py::TestFalconUtils::test_uri_encode",
"tests/test_utils.py::TestFalconUtils::test_uri_encode_value",
"tests/test_utils.py::TestFalconTesting::test_decode_empty_result",
"tests/test_utils.py::TestFalconTesting::test_httpnow_alias_for_backwards_compat",
"tests/test_utils.py::TestFalconTesting::test_none_header_value_in_create_environ",
"tests/test_utils.py::TestFalconTesting::test_path_escape_chars_in_create_environ",
"tests/test_utils.py::TestFalconTestCase::test_cached_text_in_result",
"tests/test_utils.py::TestFalconTestCase::test_path_must_start_with_slash",
"tests/test_utils.py::TestFalconTestCase::test_query_string",
"tests/test_utils.py::TestFalconTestCase::test_query_string_in_path",
"tests/test_utils.py::TestFalconTestCase::test_query_string_no_question",
"tests/test_utils.py::TestFalconTestCase::test_simple_resource_body_json_xor",
"tests/test_utils.py::TestFalconTestCase::test_status",
"tests/test_utils.py::TestFalconTestCase::test_wsgi_iterable_not_closeable",
"tests/test_utils.py::FancyTestCase::test_something"
]
| []
| Apache License 2.0 | 578 | [
"falcon/util/uri.py",
"falcon/request.py",
"falcon/util/misc.py"
]
| [
"falcon/util/uri.py",
"falcon/request.py",
"falcon/util/misc.py"
]
|
scrapy__scrapy-2038 | b7925e42202d79d2ba9d00b6aded3a451c92fe81 | 2016-06-08 14:57:23 | a975a50558cd78a1573bee2e957afcb419fd1bd6 | diff --git a/scrapy/utils/url.py b/scrapy/utils/url.py
index c80fc6e70..406eb5843 100644
--- a/scrapy/utils/url.py
+++ b/scrapy/utils/url.py
@@ -41,9 +41,16 @@ def url_has_any_extension(url, extensions):
def _safe_ParseResult(parts, encoding='utf8', path_encoding='utf8'):
+ # IDNA encoding can fail for too long labels (>63 characters)
+ # or missing labels (e.g. http://.example.com)
+ try:
+ netloc = parts.netloc.encode('idna')
+ except UnicodeError:
+ netloc = parts.netloc
+
return (
to_native_str(parts.scheme),
- to_native_str(parts.netloc.encode('idna')),
+ to_native_str(netloc),
# default encoding for path component SHOULD be UTF-8
quote(to_bytes(parts.path, path_encoding), _safe_chars),
| Unicode Link Extractor
When using the following to extract all of the links from a response:
```
self.link_extractor = LinkExtractor()
...
links = self.link_extractor.extract_links(response)
```
On rare occasions, the following error is thrown:
```
2016-05-25 12:13:55,432 [root] [ERROR] Error on http://detroit.curbed.com/2016/5/5/11605132/tiny-house-designer-show, traceback: Traceback (most recent call last):
File "/usr/local/lib/python2.7/site-packages/twisted/internet/base.py", line 1203, in mainLoop
self.runUntilCurrent()
File "/usr/local/lib/python2.7/site-packages/twisted/internet/base.py", line 825, in runUntilCurrent
call.func(*call.args, **call.kw)
File "/usr/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 393, in callback
self._startRunCallbacks(result)
File "/usr/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 501, in _startRunCallbacks
self._runCallbacks()
--- <exception caught here> ---
File "/usr/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 588, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/var/www/html/DomainCrawler/DomainCrawler/spiders/hybrid_spider.py", line 223, in parse
items.extend(self._extract_requests(response))
File "/var/www/html/DomainCrawler/DomainCrawler/spiders/hybrid_spider.py", line 477, in _extract_requests
links = self.link_extractor.extract_links(response)
File "/usr/local/lib/python2.7/site-packages/scrapy/linkextractors/lxmlhtml.py", line 111, in extract_links
all_links.extend(self._process_links(links))
File "/usr/local/lib/python2.7/site-packages/scrapy/linkextractors/__init__.py", line 103, in _process_links
link.url = canonicalize_url(urlparse(link.url))
File "/usr/local/lib/python2.7/site-packages/scrapy/utils/url.py", line 85, in canonicalize_url
parse_url(url), encoding=encoding)
File "/usr/local/lib/python2.7/site-packages/scrapy/utils/url.py", line 46, in _safe_ParseResult
to_native_str(parts.netloc.encode('idna')),
File "/usr/local/lib/python2.7/encodings/idna.py", line 164, in encode
result.append(ToASCII(label))
File "/usr/local/lib/python2.7/encodings/idna.py", line 73, in ToASCII
raise UnicodeError("label empty or too long")
exceptions.UnicodeError: label empty or too long
```
I was able to find some information concerning the error from [here](http://stackoverflow.com/questions/25103126/label-empty-or-too-long-python-urllib2).
My question is: What is the best way to handle this? Even if there is one bad link in the response, I'd want all of the other good links to be extracted. | scrapy/scrapy | diff --git a/tests/test_utils_url.py b/tests/test_utils_url.py
index 1fc3a3510..b4819874d 100644
--- a/tests/test_utils_url.py
+++ b/tests/test_utils_url.py
@@ -265,6 +265,20 @@ class CanonicalizeUrlTest(unittest.TestCase):
# without encoding, already canonicalized URL is canonicalized identically
self.assertEqual(canonicalize_url(canonicalized), canonicalized)
+ def test_canonicalize_url_idna_exceptions(self):
+ # missing DNS label
+ self.assertEqual(
+ canonicalize_url(u"http://.example.com/résumé?q=résumé"),
+ "http://.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9")
+
+ # DNS label too long
+ self.assertEqual(
+ canonicalize_url(
+ u"http://www.{label}.com/résumé?q=résumé".format(
+ label=u"example"*11)),
+ "http://www.{label}.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9".format(
+ label=u"example"*11))
+
class AddHttpIfNoScheme(unittest.TestCase):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc libxml2-dev libxslt1-dev zlib1g-dev libffi-dev libssl-dev"
],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
Automat==22.10.0
certifi==2021.5.30
cffi==1.15.1
constantly==15.1.0
cryptography==40.0.2
cssselect==1.1.0
hyperlink==21.0.0
idna==3.10
importlib-metadata==4.8.3
incremental==22.10.0
iniconfig==1.1.1
lxml==5.3.1
packaging==21.3
parsel==1.6.0
pluggy==1.0.0
py==1.11.0
pyasn1==0.5.1
pyasn1-modules==0.3.0
pycparser==2.21
PyDispatcher==2.0.7
pyOpenSSL==23.2.0
pyparsing==3.1.4
pytest==7.0.1
queuelib==1.6.2
-e git+https://github.com/scrapy/scrapy.git@b7925e42202d79d2ba9d00b6aded3a451c92fe81#egg=Scrapy
service-identity==21.1.0
six==1.17.0
tomli==1.2.3
Twisted==22.4.0
typing_extensions==4.1.1
w3lib==2.0.1
zipp==3.6.0
zope.interface==5.5.2
| name: scrapy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- automat==22.10.0
- cffi==1.15.1
- constantly==15.1.0
- cryptography==40.0.2
- cssselect==1.1.0
- hyperlink==21.0.0
- idna==3.10
- importlib-metadata==4.8.3
- incremental==22.10.0
- iniconfig==1.1.1
- lxml==5.3.1
- packaging==21.3
- parsel==1.6.0
- pluggy==1.0.0
- py==1.11.0
- pyasn1==0.5.1
- pyasn1-modules==0.3.0
- pycparser==2.21
- pydispatcher==2.0.7
- pyopenssl==23.2.0
- pyparsing==3.1.4
- pytest==7.0.1
- queuelib==1.6.2
- service-identity==21.1.0
- six==1.17.0
- tomli==1.2.3
- twisted==22.4.0
- typing-extensions==4.1.1
- w3lib==2.0.1
- zipp==3.6.0
- zope-interface==5.5.2
prefix: /opt/conda/envs/scrapy
| [
"tests/test_utils_url.py::CanonicalizeUrlTest::test_canonicalize_url_idna_exceptions"
]
| []
| [
"tests/test_utils_url.py::UrlUtilsTest::test_url_is_from_any_domain",
"tests/test_utils_url.py::UrlUtilsTest::test_url_is_from_spider",
"tests/test_utils_url.py::UrlUtilsTest::test_url_is_from_spider_class_attributes",
"tests/test_utils_url.py::UrlUtilsTest::test_url_is_from_spider_with_allowed_domains",
"tests/test_utils_url.py::UrlUtilsTest::test_url_is_from_spider_with_allowed_domains_class_attributes",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_append_missing_path",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_canonicalize_idns",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_canonicalize_parse_url",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_canonicalize_url",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_canonicalize_url_idempotence",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_canonicalize_url_unicode_path",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_canonicalize_url_unicode_query_string",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_canonicalize_url_unicode_query_string_wrong_encoding",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_canonicalize_urlparsed",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_domains_are_case_insensitive",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_dont_convert_safe_characters",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_keep_blank_values",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_non_ascii_percent_encoding_in_paths",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_non_ascii_percent_encoding_in_query_arguments",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_normalize_percent_encoding_in_paths",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_normalize_percent_encoding_in_query_arguments",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_quoted_slash_and_question_sign",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_remove_fragments",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_return_str",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_safe_characters_unicode",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_sorting",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_spaces",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_typical_usage",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_urls_with_auth_and_ports",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_add_scheme",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_complete_url",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_fragment",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_path",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_port",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_preserve_ftp",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_preserve_http",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_preserve_http_complete_url",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_preserve_http_fragment",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_preserve_http_path",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_preserve_http_port",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_preserve_http_query",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_preserve_http_username_password",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_preserve_http_without_subdomain",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_preserve_https",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_protocol_relative",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_protocol_relative_complete_url",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_protocol_relative_fragment",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_protocol_relative_path",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_protocol_relative_port",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_protocol_relative_query",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_protocol_relative_username_password",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_protocol_relative_without_subdomain",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_query",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_username_password",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_without_subdomain",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_001",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_002",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_003",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_004",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_005",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_006",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_007",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_008",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_009",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_010",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_011",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_012",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_013",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_014",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_015",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_016",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_017",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_018",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_019",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_020"
]
| []
| BSD 3-Clause "New" or "Revised" License | 579 | [
"scrapy/utils/url.py"
]
| [
"scrapy/utils/url.py"
]
|
|
mapbox__mapbox-sdk-py-128 | 2c11fdee6eee83ea82398cc0756ac7f35aada801 | 2016-06-09 18:19:51 | 2c11fdee6eee83ea82398cc0756ac7f35aada801 | diff --git a/docs/surface.md b/docs/surface.md
index 8fcbb77..c6e9676 100644
--- a/docs/surface.md
+++ b/docs/surface.md
@@ -86,7 +86,7 @@ contours).
... polyline=True, zoom=12, interpolate=False)
>>> points = response.geojson()
>>> [f['properties']['ele'] for f in points['features']]
-[None, None, None]
+[2190, 2190, 2160]
```
diff --git a/mapbox/encoding.py b/mapbox/encoding.py
index 0674d51..6190ea6 100644
--- a/mapbox/encoding.py
+++ b/mapbox/encoding.py
@@ -68,12 +68,13 @@ def encode_waypoints(features, min_limit=None, max_limit=None, precision=6):
return ';'.join(coords)
-def encode_polyline(features, zoom_level=18):
+def encode_polyline(features):
"""Encode and iterable of features as a polyline
"""
points = list(read_points(features))
+ latlon_points = [(x[1], x[0]) for x in points]
codec = PolylineCodec()
- return codec.encode(points)
+ return codec.encode(latlon_points)
def encode_coordinates_json(features):
| Encoded polylines in wrong coordinate order
Currently, we take the geojson point array and encode the point directly in [lon, lat] order. Polylines should be [lat, lon]. | mapbox/mapbox-sdk-py | diff --git a/tests/test_encoding.py b/tests/test_encoding.py
index fa15f14..9326ae6 100644
--- a/tests/test_encoding.py
+++ b/tests/test_encoding.py
@@ -113,7 +113,7 @@ def test_unknown_object():
def test_encode_polyline():
- expected = "vdatOwp_~EhupD{xiA"
+ expected = "wp_~EvdatO{xiAhupD"
assert expected == encode_polyline(gj_point_features)
assert expected == encode_polyline(gj_multipoint_features)
assert expected == encode_polyline(gj_line_features)
diff --git a/tests/test_surface.py b/tests/test_surface.py
index 05bd2c7..2ba08cd 100644
--- a/tests/test_surface.py
+++ b/tests/test_surface.py
@@ -55,7 +55,7 @@ def test_surface_geojson():
@responses.activate
def test_surface_params():
- params = "&encoded_polyline=~kbkTss%60%7BEQeAHu%40&zoom=16&interpolate=false"
+ params = "&encoded_polyline=ss%60%7BE~kbkTeAQu%40H&zoom=16&interpolate=false"
responses.add(
responses.GET,
'https://api.mapbox.com/v4/surface/mapbox.mapbox-terrain-v1.json?access_token=pk.test&fields=ele&layer=contour&geojson=true' + params,
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 2
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"pip install -U pip"
],
"python": "3.5",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
boto3==1.23.10
botocore==1.26.10
CacheControl==0.12.14
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
click-plugins==1.1.1
cligj==0.7.2
coverage==6.2
coveralls==3.3.1
distlib==0.3.9
docopt==0.6.2
filelock==3.4.1
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
iso3166==2.1.1
jmespath==0.10.0
-e git+https://github.com/mapbox/mapbox-sdk-py.git@2c11fdee6eee83ea82398cc0756ac7f35aada801#egg=mapbox
msgpack==1.0.5
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
requests==2.27.1
responses==0.17.0
s3transfer==0.5.2
six==1.17.0
toml==0.10.2
tomli==1.2.3
tox==3.28.0
typing_extensions==4.1.1
uritemplate==4.1.1
uritemplate.py==3.0.2
urllib3==1.26.20
virtualenv==20.17.1
zipp==3.6.0
| name: mapbox-sdk-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- boto3==1.23.10
- botocore==1.26.10
- cachecontrol==0.12.14
- charset-normalizer==2.0.12
- click==8.0.4
- click-plugins==1.1.1
- cligj==0.7.2
- coverage==6.2
- coveralls==3.3.1
- distlib==0.3.9
- docopt==0.6.2
- filelock==3.4.1
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- iso3166==2.1.1
- jmespath==0.10.0
- msgpack==1.0.5
- packaging==21.3
- pip==21.3.1
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- requests==2.27.1
- responses==0.17.0
- s3transfer==0.5.2
- six==1.17.0
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- typing-extensions==4.1.1
- uritemplate==4.1.1
- uritemplate-py==3.0.2
- urllib3==1.26.20
- virtualenv==20.17.1
- zipp==3.6.0
prefix: /opt/conda/envs/mapbox-sdk-py
| [
"tests/test_encoding.py::test_encode_polyline",
"tests/test_surface.py::test_surface_params"
]
| []
| [
"tests/test_encoding.py::test_read_geojson_features",
"tests/test_encoding.py::test_geo_interface",
"tests/test_encoding.py::test_encode_waypoints",
"tests/test_encoding.py::test_encode_limits",
"tests/test_encoding.py::test_unsupported_geometry",
"tests/test_encoding.py::test_unknown_object",
"tests/test_encoding.py::test_encode_coordinates_json",
"tests/test_surface.py::test_surface",
"tests/test_surface.py::test_surface_geojson"
]
| []
| MIT License | 580 | [
"docs/surface.md",
"mapbox/encoding.py"
]
| [
"docs/surface.md",
"mapbox/encoding.py"
]
|
|
bigchaindb__cryptoconditions-15 | c3156a947ca32e8d1d9c5f7ec8fa0a049f2ba0a6 | 2016-06-10 12:14:28 | c3156a947ca32e8d1d9c5f7ec8fa0a049f2ba0a6 | diff --git a/README.md b/README.md
index 4387e6e..6c963c8 100644
--- a/README.md
+++ b/README.md
@@ -39,6 +39,7 @@ generic authenticated event handlers.
## Usage
```python
+import json
import binascii
import cryptoconditions as cc
@@ -64,7 +65,7 @@ parsed_fulfillment = cc.Fulfillment.from_uri(example_fulfillment_uri)
print(isinstance(parsed_fulfillment, cc.PreimageSha256Fulfillment))
# prints True
-# Retrieve the condition of the fulfillment
+# Retrieve the condition of the fulfillment
print(parsed_fulfillment.condition_uri)
# prints 'cc:0:3:47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU:0'
@@ -72,14 +73,13 @@ print(parsed_fulfillment.condition_uri)
parsed_fulfillment.validate()
# prints True
-# Export to JSON
-json_data = parsed_fulfillment.serialize_json()
+# Serialize fulfillment to JSON
+json_data = json.dumps(parsed_fulfillment.to_dict())
print(json_data)
# prints '{"bitmask": 3, "type_id": 0, "type": "fulfillment", "preimage": ""}'
# Parse fulfillment from JSON
-import json
-json_fulfillment = cc.Fulfillment.from_json(json.loads(json_data))
+json_fulfillment = cc.Fulfillment.from_dict(json.loads(json_data))
print(json_fulfillment.serialize_uri())
# prints 'cf:0:'
```
@@ -270,6 +270,7 @@ FULFILLMENT_PAYLOAD =
### Usage
```python
+import json
import cryptoconditions as cc
# Parse some fulfillments
@@ -315,7 +316,7 @@ threshold_fulfillment.threshold = 3 # AND gate
print(threshold_fulfillment.serialize_uri())
# prints 'cf:2:AQMBAwEBAwAAAAABAWMABGDsFyuTrV5WO_STLHDhJFA0w1Rn7y79TWTr-BloNGfiv7YikfrZQy-PKYucSkiV2-KT9v_aGmja3wzN719HoMchKl_qPNqXo_TAPqny6Kwc7IalHUUhJ6vboJ0bbzMcBwoAAQGBmgACgZYBAQECAQEAJwAEASAg7Bcrk61eVjv0kyxw4SRQNMNUZ-8u_U1k6_gZaDRn4r8BYAEBYwAEYOwXK5OtXlY79JMscOEkUDTDVGfvLv1NZOv4GWg0Z-K_tiKR-tlDL48pi5xKSJXb4pP2_9oaaNrfDM3vX0egxyEqX-o82pej9MA-qfLorBzshqUdRSEnq9ugnRtvMxwHCgAA'
-threshold_fulfillment.serialize_json()
+threshold_fulfillment.to_dict()
```
```python
@@ -416,4 +417,4 @@ timeout_fulfillment valid: False (0s to timeout)
timeout_fulfillment valid: False (-1s to timeout)
timeout_fulfillment valid: False (-2s to timeout)
timeout_fulfillment valid: False (-3s to timeout)
-```
\ No newline at end of file
+```
diff --git a/cryptoconditions/condition.py b/cryptoconditions/condition.py
index 4ae9d75..d00427a 100644
--- a/cryptoconditions/condition.py
+++ b/cryptoconditions/condition.py
@@ -1,4 +1,3 @@
-import json
import base58
import base64
import re
@@ -117,18 +116,18 @@ class Condition(metaclass=ABCMeta):
return condition
@staticmethod
- def from_json(json_data):
+ def from_dict(data):
"""
- Create a Condition object from a json dict.
+ Create a Condition object from a dict.
Args:
- json_data (dict): Dictionary containing the condition payload
+ data (dict): Dictionary containing the condition payload
Returns:
Condition: Resulting object
"""
condition = Condition()
- condition.parse_json(json_data)
+ condition.parse_dict(data)
return condition
@@ -262,30 +261,34 @@ class Condition(metaclass=ABCMeta):
self.hash = reader.read_var_octet_string()
self.max_fulfillment_length = reader.read_var_uint()
- def serialize_json(self):
- return json.dumps(
- {
- 'type': 'condition',
- 'type_id': self.type_id,
- 'bitmask': self.bitmask,
- 'hash': base58.b58encode(self.hash),
- 'max_fulfillment_length': self.max_fulfillment_length
- }
- )
+ def to_dict(self):
+ """
+ Generate a dict of the condition
- def parse_json(self, json_data):
+ Returns:
+ dict: representing the condition
+ """
+ return {
+ 'type': 'condition',
+ 'type_id': self.type_id,
+ 'bitmask': self.bitmask,
+ 'hash': base58.b58encode(self.hash),
+ 'max_fulfillment_length': self.max_fulfillment_length
+ }
+
+ def parse_dict(self, data):
"""
Args:
- json_data (dict):
+ data (dict):
Returns:
Condition with payload
"""
- self.type_id = json_data['type_id']
- self.bitmask = json_data['bitmask']
+ self.type_id = data['type_id']
+ self.bitmask = data['bitmask']
- self.hash = base58.b58decode(json_data['hash'])
- self.max_fulfillment_length = json_data['max_fulfillment_length']
+ self.hash = base58.b58decode(data['hash'])
+ self.max_fulfillment_length = data['max_fulfillment_length']
def validate(self):
"""
diff --git a/cryptoconditions/fulfillment.py b/cryptoconditions/fulfillment.py
index c07a281..f78e5af 100644
--- a/cryptoconditions/fulfillment.py
+++ b/cryptoconditions/fulfillment.py
@@ -78,12 +78,12 @@ class Fulfillment(metaclass=ABCMeta):
return fulfillment
@staticmethod
- def from_json(json_data):
- cls_type = json_data['type_id']
+ def from_dict(data):
+ cls_type = data['type_id']
cls = TypeRegistry.get_class_from_type_id(cls_type)
fulfillment = cls()
- fulfillment.parse_json(json_data)
+ fulfillment.parse_dict(data)
return fulfillment
@@ -249,20 +249,20 @@ class Fulfillment(metaclass=ABCMeta):
"""
@abstractmethod
- def serialize_json(self):
+ def to_dict(self):
"""
- Generate a JSON object of the fulfillment
+ Generate a dict of the fulfillment
Returns:
"""
@abstractmethod
- def parse_json(self, json_data):
+ def parse_dict(self, data):
"""
- Generate fulfillment payload from a json
+ Generate fulfillment payload from a dict
Args:
- json_data: json description of the fulfillment
+ data: dict description of the fulfillment
Returns:
Fulfillment
diff --git a/cryptoconditions/types/ed25519.py b/cryptoconditions/types/ed25519.py
index fb8fd32..ddcf8fa 100644
--- a/cryptoconditions/types/ed25519.py
+++ b/cryptoconditions/types/ed25519.py
@@ -1,5 +1,3 @@
-import json
-
import base58
from cryptoconditions.crypto import Ed25519VerifyingKey as VerifyingKey
@@ -113,34 +111,33 @@ class Ed25519Fulfillment(Fulfillment):
def calculate_max_fulfillment_length(self):
return Ed25519Fulfillment.FULFILLMENT_LENGTH
- def serialize_json(self):
+ def to_dict(self):
"""
- Generate a JSON object of the fulfillment
+ Generate a dict of the fulfillment
Returns:
+ dict: representing the fulfillment
"""
- return json.dumps(
- {
- 'type': 'fulfillment',
- 'type_id': self.TYPE_ID,
- 'bitmask': self.bitmask,
- 'public_key': self.public_key.to_ascii(encoding='base58').decode(),
- 'signature': base58.b58encode(self.signature) if self.signature else None
- }
- )
+ return {
+ 'type': 'fulfillment',
+ 'type_id': self.TYPE_ID,
+ 'bitmask': self.bitmask,
+ 'public_key': self.public_key.to_ascii(encoding='base58').decode(),
+ 'signature': base58.b58encode(self.signature) if self.signature else None
+ }
- def parse_json(self, json_data):
+ def parse_dict(self, data):
"""
- Generate fulfillment payload from a json
+ Generate fulfillment payload from a dict
Args:
- json_data: json description of the fulfillment
+ data (dict): description of the fulfillment
Returns:
Fulfillment
"""
- self.public_key = VerifyingKey(json_data['public_key'])
- self.signature = base58.b58decode(json_data['signature']) if json_data['signature'] else None
+ self.public_key = VerifyingKey(data['public_key'])
+ self.signature = base58.b58decode(data['signature']) if data['signature'] else None
def validate(self, message=None, **kwargs):
"""
diff --git a/cryptoconditions/types/sha256.py b/cryptoconditions/types/sha256.py
index daa2e00..f36e96a 100644
--- a/cryptoconditions/types/sha256.py
+++ b/cryptoconditions/types/sha256.py
@@ -1,5 +1,3 @@
-import json
-
from cryptoconditions.types.base_sha256 import BaseSha256Fulfillment
from cryptoconditions.lib import Hasher, Reader, Writer, Predictor
@@ -93,32 +91,31 @@ class PreimageSha256Fulfillment(BaseSha256Fulfillment):
writer.write(self.preimage)
return writer
- def serialize_json(self):
+ def to_dict(self):
"""
- Generate a JSON object of the fulfillment
+ Generate a dict of the fulfillment
Returns:
+ dict: representing the fulfillment
"""
- return json.dumps(
- {
- 'type': 'fulfillment',
- 'type_id': self.TYPE_ID,
- 'bitmask': self.bitmask,
- 'preimage': self.preimage.decode()
- }
- )
-
- def parse_json(self, json_data):
+ return {
+ 'type': 'fulfillment',
+ 'type_id': self.TYPE_ID,
+ 'bitmask': self.bitmask,
+ 'preimage': self.preimage.decode()
+ }
+
+ def parse_dict(self, data):
"""
- Generate fulfillment payload from a json
+ Generate fulfillment payload from a dict
Args:
- json_data: json description of the fulfillment
+ data (dict): description of the fulfillment
Returns:
Fulfillment
"""
- self.preimage = json_data['preimage'].encode()
+ self.preimage = data['preimage'].encode()
def validate(self, *args, **kwargs):
"""
diff --git a/cryptoconditions/types/threshold_sha256.py b/cryptoconditions/types/threshold_sha256.py
index 068e9b2..aae4e02 100644
--- a/cryptoconditions/types/threshold_sha256.py
+++ b/cryptoconditions/types/threshold_sha256.py
@@ -1,5 +1,3 @@
-import json
-
import copy
from cryptoconditions.condition import Condition
from cryptoconditions.fulfillment import Fulfillment
@@ -109,6 +107,7 @@ class ThresholdSha256Fulfillment(BaseSha256Fulfillment):
elif not isinstance(subfulfillment, Fulfillment):
raise TypeError('Subfulfillments must be URIs or objects of type Fulfillment')
if not isinstance(weight, int) or weight < 1:
+ # TODO: Add a more helpful error message.
raise ValueError('Invalid weight: {}'.format(weight))
self.subconditions.append(
{
@@ -277,7 +276,7 @@ class ThresholdSha256Fulfillment(BaseSha256Fulfillment):
predictor = Predictor()
predictor.write_uint16(None) # type
- predictor.write_var_octet_string(b'0'*fulfillment_len) # payload
+ predictor.write_var_octet_string(b'0' * fulfillment_len) # payload
return predictor.size
@@ -486,49 +485,48 @@ class ThresholdSha256Fulfillment(BaseSha256Fulfillment):
buffers_copy.sort(key=lambda item: (len(item), item))
return buffers_copy
- def serialize_json(self):
+ def to_dict(self):
"""
- Generate a JSON object of the fulfillment
+ Generate a dict of the fulfillment
Returns:
+ dict: representing the fulfillment
"""
- subfulfillments_json = []
+ subfulfillments = []
for c in self.subconditions:
- subcondition = json.loads(c['body'].serialize_json())
+ subcondition = c['body'].to_dict()
subcondition.update({'weight': c['weight']})
- subfulfillments_json.append(subcondition)
+ subfulfillments.append(subcondition)
- return json.dumps(
- {
- 'type': 'fulfillment',
- 'type_id': self.TYPE_ID,
- 'bitmask': self.bitmask,
- 'threshold': self.threshold,
- 'subfulfillments': subfulfillments_json
- }
- )
+ return {
+ 'type': 'fulfillment',
+ 'type_id': self.TYPE_ID,
+ 'bitmask': self.bitmask,
+ 'threshold': self.threshold,
+ 'subfulfillments': subfulfillments
+ }
- def parse_json(self, json_data):
+ def parse_dict(self, data):
"""
- Generate fulfillment payload from a json
+ Generate fulfillment payload from a dict
Args:
- json_data: json description of the fulfillment
+ data (dict): description of the fulfillment
Returns:
Fulfillment
"""
- if not isinstance(json_data, dict):
+ if not isinstance(data, dict):
raise TypeError('reader must be a dict instance')
- self.threshold = json_data['threshold']
+ self.threshold = data['threshold']
- for subfulfillments_json in json_data['subfulfillments']:
- weight = subfulfillments_json['weight']
+ for subfulfillments in data['subfulfillments']:
+ weight = subfulfillments['weight']
- if subfulfillments_json['type'] == FULFILLMENT:
- self.add_subfulfillment(Fulfillment.from_json(subfulfillments_json), weight)
- elif subfulfillments_json['type'] == CONDITION:
- self.add_subcondition(Condition.from_json(subfulfillments_json), weight)
+ if subfulfillments['type'] == FULFILLMENT:
+ self.add_subfulfillment(Fulfillment.from_dict(subfulfillments), weight)
+ elif subfulfillments['type'] == CONDITION:
+ self.add_subcondition(Condition.from_dict(subfulfillments), weight)
else:
raise TypeError('Subconditions must provide either subcondition or fulfillment.')
diff --git a/cryptoconditions/types/timeout.py b/cryptoconditions/types/timeout.py
index 6b9d2fc..0007c6f 100644
--- a/cryptoconditions/types/timeout.py
+++ b/cryptoconditions/types/timeout.py
@@ -1,4 +1,3 @@
-import json
import re
import time
@@ -32,32 +31,31 @@ class TimeoutFulfillment(PreimageSha256Fulfillment):
"""
return self.preimage
- def serialize_json(self):
+ def to_dict(self):
"""
- Generate a JSON object of the fulfillment
+ Generate a dict of the fulfillment
Returns:
+ dict: representing the fulfillment
"""
- return json.dumps(
- {
- 'type': 'fulfillment',
- 'type_id': self.TYPE_ID,
- 'bitmask': self.bitmask,
- 'expire_time': self.expire_time.decode()
- }
- )
-
- def parse_json(self, json_data):
+ return {
+ 'type': 'fulfillment',
+ 'type_id': self.TYPE_ID,
+ 'bitmask': self.bitmask,
+ 'expire_time': self.expire_time.decode()
+ }
+
+ def parse_dict(self, data):
"""
- Generate fulfillment payload from a json
+ Generate fulfillment payload from a dict
Args:
- json_data: json description of the fulfillment
+ data (dict): description of the fulfillment
Returns:
Fulfillment
"""
- self.preimage = json_data['expire_time'].encode()
+ self.preimage = data['expire_time'].encode()
def validate(self, message=None, now=None, **kwargs):
"""
diff --git a/setup.py b/setup.py
index c467ce2..d4da1b4 100644
--- a/setup.py
+++ b/setup.py
@@ -69,8 +69,7 @@ setup(
tests_require=tests_require,
extras_require={
'test': tests_require,
- 'dev': dev_require + tests_require + docs_require,
- 'docs': docs_require,
+ 'dev': dev_require + tests_require + docs_require,
+ 'docs': docs_require,
},
)
-
| Remove json serialization from cryptoconditions
Just provide functions for serialization to dicts.
Users should be able to serialize from there on to dicts themselves. | bigchaindb/cryptoconditions | diff --git a/tests/test_fulfillment.py b/tests/test_fulfillment.py
index 225c85c..10d75dc 100644
--- a/tests/test_fulfillment.py
+++ b/tests/test_fulfillment.py
@@ -1,5 +1,4 @@
import binascii
-import json
from time import sleep
from math import ceil
@@ -45,20 +44,13 @@ class TestSha256Fulfillment:
assert fulfillment.condition.serialize_uri() == fulfillment_sha256['condition_uri']
assert fulfillment.validate()
- def test_serialize_json(self, fulfillment_sha256):
+ def test_fulfillment_serialize_to_dict(self, fulfillment_sha256):
fulfillment = Fulfillment.from_uri(fulfillment_sha256['fulfillment_uri'])
-
- assert json.loads(fulfillment.serialize_json()) == \
- {'bitmask': 3, 'preimage': '', 'type': 'fulfillment', 'type_id': 0}
-
- def test_deserialize_json(self, fulfillment_sha256):
- fulfillment = Fulfillment.from_uri(fulfillment_sha256['fulfillment_uri'])
- fulfillment_json = json.loads(fulfillment.serialize_json())
- parsed_fulfillment = fulfillment.from_json(fulfillment_json)
+ parsed_fulfillment = fulfillment.from_dict(fulfillment.to_dict())
assert parsed_fulfillment.serialize_uri() == fulfillment.serialize_uri()
assert parsed_fulfillment.condition.serialize_uri() == fulfillment.condition.serialize_uri()
- assert parsed_fulfillment.serialize_json() == fulfillment.serialize_json()
+ assert parsed_fulfillment.to_dict() == fulfillment.to_dict()
def test_deserialize_condition_and_validate_fulfillment(self, fulfillment_sha256):
condition = Condition.from_uri(fulfillment_sha256['condition_uri'])
@@ -123,10 +115,10 @@ class TestEd25519Sha256Fulfillment:
assert deserialized_condition.serialize_uri() == fulfillment_ed25519['condition_uri']
assert binascii.hexlify(deserialized_condition.hash) == fulfillment_ed25519['condition_hash']
- def test_serialize_json_signed(self, fulfillment_ed25519):
+ def test_serialize_signed_dict_to_fulfillment(self, fulfillment_ed25519):
fulfillment = Fulfillment.from_uri(fulfillment_ed25519['fulfillment_uri'])
- assert json.loads(fulfillment.serialize_json()) == \
+ assert fulfillment.to_dict()== \
{'bitmask': 32,
'public_key': 'Gtbi6WQDB6wUePiZm8aYs5XZ5pUqx9jMMLvRVHPESTjU',
'signature': '4eCt6SFPCzLQSAoQGW7CTu3MHdLj6FezSpjktE7tHsYGJ4pNSUnpHtV9XgdHF2XYd62M9fTJ4WYdhTVck27qNoHj',
@@ -135,10 +127,10 @@ class TestEd25519Sha256Fulfillment:
assert fulfillment.validate(MESSAGE) == True
- def test_serialize_json_unsigned(self, vk_ilp):
+ def test_serialize_unsigned_dict_to_fulfillment(self, vk_ilp):
fulfillment = Ed25519Fulfillment(public_key=vk_ilp['b58'])
- assert json.loads(fulfillment.serialize_json()) == \
+ assert fulfillment.to_dict() == \
{'bitmask': 32,
'public_key': 'Gtbi6WQDB6wUePiZm8aYs5XZ5pUqx9jMMLvRVHPESTjU',
'signature': None,
@@ -146,23 +138,20 @@ class TestEd25519Sha256Fulfillment:
'type_id': 4}
assert fulfillment.validate(MESSAGE) == False
- def test_deserialize_json_signed(self, fulfillment_ed25519):
+ def test_deserialize_signed_dict_to_fulfillment(self, fulfillment_ed25519):
fulfillment = Fulfillment.from_uri(fulfillment_ed25519['fulfillment_uri'])
- fulfillment_json = json.loads(fulfillment.serialize_json())
- parsed_fulfillment = fulfillment.from_json(fulfillment_json)
+ parsed_fulfillment = fulfillment.from_dict(fulfillment.to_dict())
assert parsed_fulfillment.serialize_uri() == fulfillment_ed25519['fulfillment_uri']
assert parsed_fulfillment.condition.serialize_uri() == fulfillment.condition.serialize_uri()
- assert parsed_fulfillment.serialize_json() == fulfillment.serialize_json()
+ assert parsed_fulfillment.to_dict() == fulfillment.to_dict()
- def test_deserialize_json_unsigned(self, vk_ilp):
+ def test_deserialize_unsigned_dict_to_fulfillment(self, vk_ilp):
fulfillment = Ed25519Fulfillment(public_key=vk_ilp['b58'])
-
- fulfillment_json = json.loads(fulfillment.serialize_json())
- parsed_fulfillment = fulfillment.from_json(fulfillment_json)
+ parsed_fulfillment = fulfillment.from_dict(fulfillment.to_dict())
assert parsed_fulfillment.condition.serialize_uri() == fulfillment.condition.serialize_uri()
- assert parsed_fulfillment.serialize_json() == fulfillment.serialize_json()
+ assert parsed_fulfillment.to_dict() == fulfillment.to_dict()
def test_serialize_deserialize_condition(self, vk_ilp):
vk = VerifyingKey(vk_ilp['b58'])
@@ -262,10 +251,10 @@ class TestThresholdSha256Fulfillment:
assert len(fulfillment.subconditions) == num_fulfillments
assert fulfillment.validate(MESSAGE)
- def test_serialize_json_signed(self, fulfillment_threshold):
+ def test_serialize_signed_dict_to_fulfillment(self, fulfillment_threshold):
fulfillment = Fulfillment.from_uri(fulfillment_threshold['fulfillment_uri'])
- assert json.loads(fulfillment.serialize_json()) == \
+ assert fulfillment.to_dict() == \
{'bitmask': 43,
'subfulfillments': [{'bitmask': 3,
'preimage': '',
@@ -282,12 +271,12 @@ class TestThresholdSha256Fulfillment:
'type': 'fulfillment',
'type_id': 2}
- def test_serialize_json_unsigned(self, vk_ilp):
+ def test_serialize_unsigned_dict_to_fulfillment(self, vk_ilp):
fulfillment = ThresholdSha256Fulfillment(threshold=1)
fulfillment.add_subfulfillment(Ed25519Fulfillment(public_key=VerifyingKey(vk_ilp['b58'])))
fulfillment.add_subfulfillment(Ed25519Fulfillment(public_key=VerifyingKey(vk_ilp['b58'])))
- assert json.loads(fulfillment.serialize_json()) == \
+ assert fulfillment.to_dict() == \
{'bitmask': 41,
'subfulfillments': [{'bitmask': 32,
'public_key': 'Gtbi6WQDB6wUePiZm8aYs5XZ5pUqx9jMMLvRVHPESTjU',
@@ -305,50 +294,45 @@ class TestThresholdSha256Fulfillment:
'type': 'fulfillment',
'type_id': 2}
- def test_deserialize_json_signed(self, fulfillment_threshold):
+ def test_deserialize_signed_dict_to_fulfillment(self, fulfillment_threshold):
fulfillment = Fulfillment.from_uri(fulfillment_threshold['fulfillment_uri'])
- fulfillment_json = json.loads(fulfillment.serialize_json())
- parsed_fulfillment = fulfillment.from_json(fulfillment_json)
+ parsed_fulfillment = fulfillment.from_dict(fulfillment.to_dict())
assert parsed_fulfillment.serialize_uri() == fulfillment_threshold['fulfillment_uri']
assert parsed_fulfillment.condition.serialize_uri() == fulfillment.condition.serialize_uri()
- assert parsed_fulfillment.serialize_json() == fulfillment.serialize_json()
+ assert parsed_fulfillment.to_dict() == fulfillment.to_dict()
- def test_deserialize_json_unsigned(self, vk_ilp):
+ def test_deserialize_unsigned_dict_to_fulfillment(self, vk_ilp):
fulfillment = ThresholdSha256Fulfillment(threshold=1)
fulfillment.add_subfulfillment(Ed25519Fulfillment(public_key=VerifyingKey(vk_ilp['b58'])))
fulfillment.add_subfulfillment(Ed25519Fulfillment(public_key=VerifyingKey(vk_ilp['b58'])))
- fulfillment_json = json.loads(fulfillment.serialize_json())
- parsed_fulfillment = fulfillment.from_json(fulfillment_json)
+ parsed_fulfillment = fulfillment.from_dict(fulfillment.to_dict())
assert parsed_fulfillment.condition.serialize_uri() == fulfillment.condition.serialize_uri()
- assert parsed_fulfillment.serialize_json() == fulfillment.serialize_json()
+ assert parsed_fulfillment.to_dict() == fulfillment.to_dict()
def test_weights(self, fulfillment_ed25519):
ilp_fulfillment = Fulfillment.from_uri(fulfillment_ed25519['fulfillment_uri'])
fulfillment1 = ThresholdSha256Fulfillment(threshold=2)
fulfillment1.add_subfulfillment(ilp_fulfillment, weight=2)
- fulfillment_json = json.loads(fulfillment1.serialize_json())
- parsed_fulfillment1 = fulfillment1.from_json(fulfillment_json)
+ parsed_fulfillment1 = fulfillment1.from_dict(fulfillment1.to_dict())
assert parsed_fulfillment1.condition.serialize_uri() == fulfillment1.condition.serialize_uri()
- assert parsed_fulfillment1.serialize_json() == fulfillment1.serialize_json()
+ assert parsed_fulfillment1.to_dict() == fulfillment1.to_dict()
assert parsed_fulfillment1.subconditions[0]['weight'] == 2
assert parsed_fulfillment1.validate(MESSAGE) is True
fulfillment2 = ThresholdSha256Fulfillment(threshold=3)
fulfillment2.add_subfulfillment(ilp_fulfillment, weight=2)
- fulfillment_json = json.loads(fulfillment2.serialize_json())
- parsed_fulfillment2 = fulfillment1.from_json(fulfillment_json)
+ parsed_fulfillment2 = fulfillment1.from_dict(fulfillment2.to_dict())
assert parsed_fulfillment2.subconditions[0]['weight'] == 2
assert parsed_fulfillment2.validate(MESSAGE) is False
fulfillment3 = ThresholdSha256Fulfillment(threshold=3)
fulfillment3.add_subfulfillment(ilp_fulfillment, weight=3)
- fulfillment_json = json.loads(fulfillment3.serialize_json())
- parsed_fulfillment3 = fulfillment1.from_json(fulfillment_json)
+ parsed_fulfillment3 = fulfillment1.from_dict(fulfillment3.to_dict())
assert parsed_fulfillment3.condition.serialize_uri() == fulfillment3.condition.serialize_uri()
assert not (fulfillment3.condition.serialize_uri() == fulfillment1.condition.serialize_uri())
@@ -506,8 +490,7 @@ class TestInvertedThresholdSha256Fulfillment:
fulfillment = InvertedThresholdSha256Fulfillment(threshold=1)
fulfillment.add_subfulfillment(ilp_fulfillment_ed)
- fulfillment_json = json.loads(fulfillment.serialize_json())
- parsed_fulfillment = fulfillment.from_json(fulfillment_json)
+ parsed_fulfillment = fulfillment.from_dict(fulfillment.to_dict())
assert parsed_fulfillment.condition_uri == fulfillment.condition_uri
assert parsed_fulfillment.serialize_uri() == fulfillment.serialize_uri()
@@ -521,16 +504,14 @@ class TestTimeoutFulfillment:
def test_serialize_condition_and_validate_fulfillment(self):
fulfillment = TimeoutFulfillment(expire_time=timestamp())
- fulfillment_json = json.loads(fulfillment.serialize_json())
- parsed_fulfillment = fulfillment.from_json(fulfillment_json)
+ parsed_fulfillment = fulfillment.from_dict(fulfillment.to_dict())
assert parsed_fulfillment.condition_uri == fulfillment.condition_uri
assert parsed_fulfillment.serialize_uri() == fulfillment.serialize_uri()
assert parsed_fulfillment.validate(now=timestamp()) is False
- fulfillment = TimeoutFulfillment(expire_time=str(float(timestamp())+1000))
- fulfillment_json = json.loads(fulfillment.serialize_json())
- parsed_fulfillment = fulfillment.from_json(fulfillment_json)
+ fulfillment = TimeoutFulfillment(expire_time=str(float(timestamp()) + 1000))
+ parsed_fulfillment = fulfillment.from_dict(fulfillment.to_dict())
assert parsed_fulfillment.condition_uri == fulfillment.condition_uri
assert parsed_fulfillment.serialize_uri() == fulfillment.serialize_uri()
@@ -573,8 +554,7 @@ class TestEscrow:
fulfillment_escrow.add_subfulfillment(fulfillment_and_execute)
fulfillment_escrow.add_subfulfillment(fulfillment_and_abort)
- fulfillment_json = json.loads(fulfillment_escrow.serialize_json())
- parsed_fulfillment = fulfillment_escrow.from_json(fulfillment_json)
+ parsed_fulfillment = fulfillment_escrow.from_dict(fulfillment_escrow.to_dict())
assert parsed_fulfillment.condition_uri == fulfillment_escrow.condition_uri
assert parsed_fulfillment.serialize_uri() == fulfillment_escrow.serialize_uri()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 8
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [],
"python": "3.5",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
astroid==2.11.7
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
base58==0.2.2
certifi==2021.5.30
charset-normalizer==2.0.12
commonmark==0.9.1
coverage==6.2
-e git+https://github.com/bigchaindb/cryptoconditions.git@c3156a947ca32e8d1d9c5f7ec8fa0a049f2ba0a6#egg=cryptoconditions
decorator==5.1.1
dill==0.3.4
docutils==0.18.1
ed25519==1.5
execnet==1.9.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
ipdb==0.13.13
ipython==7.16.3
ipython-genutils==0.2.0
isort==5.10.1
jedi==0.17.2
Jinja2==3.0.3
lazy-object-proxy==1.7.1
MarkupSafe==2.0.1
mccabe==0.7.0
packaging==21.3
parso==0.7.1
pep8==1.7.1
pexpect==4.9.0
pickleshare==0.7.5
platformdirs==2.4.0
pluggy==1.0.0
pockets==0.9.1
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pyflakes==3.0.1
Pygments==2.14.0
pylint==2.13.9
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-xdist==3.0.2
pytz==2025.2
recommonmark==0.7.1
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-napoleon==0.7
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
traitlets==4.3.3
typed-ast==1.5.5
typing_extensions==4.1.1
urllib3==1.26.20
wcwidth==0.2.13
wrapt==1.16.0
zipp==3.6.0
| name: cryptoconditions
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- astroid==2.11.7
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- base58==0.2.2
- charset-normalizer==2.0.12
- commonmark==0.9.1
- coverage==6.2
- decorator==5.1.1
- dill==0.3.4
- docutils==0.18.1
- ed25519==1.5
- execnet==1.9.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- ipdb==0.13.13
- ipython==7.16.3
- ipython-genutils==0.2.0
- isort==5.10.1
- jedi==0.17.2
- jinja2==3.0.3
- lazy-object-proxy==1.7.1
- markupsafe==2.0.1
- mccabe==0.7.0
- packaging==21.3
- parso==0.7.1
- pep8==1.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- platformdirs==2.4.0
- pluggy==1.0.0
- pockets==0.9.1
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pyflakes==3.0.1
- pygments==2.14.0
- pylint==2.13.9
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-xdist==3.0.2
- pytz==2025.2
- recommonmark==0.7.1
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-napoleon==0.7
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- traitlets==4.3.3
- typed-ast==1.5.5
- typing-extensions==4.1.1
- urllib3==1.26.20
- wcwidth==0.2.13
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/cryptoconditions
| [
"tests/test_fulfillment.py::TestSha256Fulfillment::test_fulfillment_serialize_to_dict",
"tests/test_fulfillment.py::TestEd25519Sha256Fulfillment::test_serialize_signed_dict_to_fulfillment",
"tests/test_fulfillment.py::TestEd25519Sha256Fulfillment::test_serialize_unsigned_dict_to_fulfillment",
"tests/test_fulfillment.py::TestEd25519Sha256Fulfillment::test_deserialize_signed_dict_to_fulfillment",
"tests/test_fulfillment.py::TestEd25519Sha256Fulfillment::test_deserialize_unsigned_dict_to_fulfillment",
"tests/test_fulfillment.py::TestThresholdSha256Fulfillment::test_serialize_signed_dict_to_fulfillment",
"tests/test_fulfillment.py::TestThresholdSha256Fulfillment::test_serialize_unsigned_dict_to_fulfillment",
"tests/test_fulfillment.py::TestThresholdSha256Fulfillment::test_deserialize_signed_dict_to_fulfillment",
"tests/test_fulfillment.py::TestThresholdSha256Fulfillment::test_deserialize_unsigned_dict_to_fulfillment",
"tests/test_fulfillment.py::TestThresholdSha256Fulfillment::test_weights",
"tests/test_fulfillment.py::TestInvertedThresholdSha256Fulfillment::test_serialize_condition_and_validate_fulfillment",
"tests/test_fulfillment.py::TestTimeoutFulfillment::test_serialize_condition_and_validate_fulfillment",
"tests/test_fulfillment.py::TestEscrow::test_serialize_condition_and_validate_fulfillment"
]
| []
| [
"tests/test_fulfillment.py::TestSha256Condition::test_deserialize_condition",
"tests/test_fulfillment.py::TestSha256Condition::test_create_condition",
"tests/test_fulfillment.py::TestSha256Fulfillment::test_deserialize_and_validate_fulfillment",
"tests/test_fulfillment.py::TestSha256Fulfillment::test_deserialize_condition_and_validate_fulfillment",
"tests/test_fulfillment.py::TestSha256Fulfillment::test_condition_from_fulfillment",
"tests/test_fulfillment.py::TestEd25519Sha256Fulfillment::test_ilp_keys",
"tests/test_fulfillment.py::TestEd25519Sha256Fulfillment::test_create",
"tests/test_fulfillment.py::TestEd25519Sha256Fulfillment::test_serialize_condition_and_validate_fulfillment",
"tests/test_fulfillment.py::TestEd25519Sha256Fulfillment::test_deserialize_condition",
"tests/test_fulfillment.py::TestEd25519Sha256Fulfillment::test_serialize_deserialize_condition",
"tests/test_fulfillment.py::TestEd25519Sha256Fulfillment::test_deserialize_fulfillment",
"tests/test_fulfillment.py::TestEd25519Sha256Fulfillment::test_deserialize_fulfillment_2",
"tests/test_fulfillment.py::TestEd25519Sha256Fulfillment::test_serialize_deserialize_fulfillment",
"tests/test_fulfillment.py::TestThresholdSha256Fulfillment::test_serialize_condition_and_validate_fulfillment",
"tests/test_fulfillment.py::TestThresholdSha256Fulfillment::test_deserialize_fulfillment",
"tests/test_fulfillment.py::TestThresholdSha256Fulfillment::test_serialize_deserialize_fulfillment",
"tests/test_fulfillment.py::TestThresholdSha256Fulfillment::test_fulfillment_didnt_reach_threshold",
"tests/test_fulfillment.py::TestThresholdSha256Fulfillment::test_fulfillment_nested_and_or",
"tests/test_fulfillment.py::TestThresholdSha256Fulfillment::test_fulfillment_nested",
"tests/test_fulfillment.py::TestEscrow::test_escrow_execute",
"tests/test_fulfillment.py::TestEscrow::test_escrow_abort",
"tests/test_fulfillment.py::TestEscrow::test_escrow_execute_abort"
]
| []
| MIT License | 581 | [
"cryptoconditions/types/timeout.py",
"cryptoconditions/fulfillment.py",
"setup.py",
"cryptoconditions/types/threshold_sha256.py",
"README.md",
"cryptoconditions/types/ed25519.py",
"cryptoconditions/types/sha256.py",
"cryptoconditions/condition.py"
]
| [
"cryptoconditions/types/timeout.py",
"cryptoconditions/fulfillment.py",
"setup.py",
"cryptoconditions/types/threshold_sha256.py",
"README.md",
"cryptoconditions/types/ed25519.py",
"cryptoconditions/types/sha256.py",
"cryptoconditions/condition.py"
]
|
|
cdent__gabbi-153 | 0a8a3b8faf9a900fd132d9b147f67a851d52f178 | 2016-06-12 20:11:11 | 0a8a3b8faf9a900fd132d9b147f67a851d52f178 | cdent: @jd and @EmilienM, this good for you guys?
EmilienM: :+1: | diff --git a/gabbi/driver.py b/gabbi/driver.py
index 33c0a98..49088fa 100644
--- a/gabbi/driver.py
+++ b/gabbi/driver.py
@@ -39,7 +39,8 @@ from gabbi import utils
def build_tests(path, loader, host=None, port=8001, intercept=None,
test_loader_name=None, fixture_module=None,
- response_handlers=None, prefix='', require_ssl=False):
+ response_handlers=None, prefix='', require_ssl=False,
+ url=None):
"""Read YAML files from a directory to create tests.
Each YAML file represents an ordered sequence of HTTP requests.
@@ -54,6 +55,7 @@ def build_tests(path, loader, host=None, port=8001, intercept=None,
:param response_handers: ResponseHandler classes.
:type response_handlers: List of ResponseHandler classes.
:param prefix: A URL prefix for all URLs that are not fully qualified.
+ :param url: A full URL to test against. Replaces host, port and prefix.
:param require_ssl: If ``True``, make all tests default to using SSL.
:rtype: TestSuite containing multiple TestSuites (one for each YAML file).
"""
@@ -63,6 +65,12 @@ def build_tests(path, loader, host=None, port=8001, intercept=None,
if not bool(host) ^ bool(intercept):
raise AssertionError('must specify exactly one of host or intercept')
+ # If url is being used, reset host, port and prefix.
+ if url:
+ host, port, prefix, force_ssl = utils.host_info_from_target(url)
+ if force_ssl and not require_ssl:
+ require_ssl = force_ssl
+
if test_loader_name is None:
test_loader_name = inspect.stack()[1]
test_loader_name = os.path.splitext(os.path.basename(
@@ -97,7 +105,7 @@ def build_tests(path, loader, host=None, port=8001, intercept=None,
def py_test_generator(test_dir, host=None, port=8001, intercept=None,
prefix=None, test_loader_name=None,
fixture_module=None, response_handlers=None,
- require_ssl=False):
+ require_ssl=False, url=None):
"""Generate tests cases for py.test
This uses build_tests to create TestCases and then yields them in
@@ -110,7 +118,8 @@ def py_test_generator(test_dir, host=None, port=8001, intercept=None,
test_loader_name=test_loader_name,
fixture_module=fixture_module,
response_handlers=response_handlers,
- prefix=prefix, require_ssl=require_ssl)
+ prefix=prefix, require_ssl=require_ssl,
+ url=url)
for test in tests:
if hasattr(test, '_tests'):
diff --git a/gabbi/runner.py b/gabbi/runner.py
index 3411dbe..d4e79d5 100644
--- a/gabbi/runner.py
+++ b/gabbi/runner.py
@@ -17,8 +17,6 @@ from importlib import import_module
import sys
import unittest
-from six.moves.urllib import parse as urlparse
-
from gabbi import case
from gabbi import handlers
from gabbi.reporter import ConciseTestRunner
@@ -93,7 +91,7 @@ def run():
)
args = parser.parse_args()
- host, port, prefix, force_ssl = process_target_args(
+ host, port, prefix, force_ssl = utils.host_info_from_target(
args.target, args.prefix)
# Initialize response handlers.
@@ -113,31 +111,6 @@ def run():
sys.exit(not result.wasSuccessful())
-def process_target_args(target, prefix):
- """Turn the argparse args into a host, port and prefix."""
- force_ssl = False
- split_url = urlparse.urlparse(target)
-
- if split_url.scheme:
- if split_url.scheme == 'https':
- force_ssl = True
- return split_url.hostname, split_url.port, split_url.path, force_ssl
- else:
- target = target
- prefix = prefix
-
- if ':' in target and '[' not in target:
- host, port = target.rsplit(':', 1)
- elif ']:' in target:
- host, port = target.rsplit(':', 1)
- else:
- host = target
- port = None
- host = host.replace('[', '').replace(']', '')
-
- return host, port, prefix, force_ssl
-
-
def initialize_handlers(response_handlers):
custom_response_handlers = []
for import_path in response_handlers or []:
diff --git a/gabbi/utils.py b/gabbi/utils.py
index 3de040d..172b4bf 100644
--- a/gabbi/utils.py
+++ b/gabbi/utils.py
@@ -126,6 +126,31 @@ def not_binary(content_type):
content_type.startswith('application/json'))
+def host_info_from_target(target, prefix=None):
+ """Turn url or host:port and target into test destination."""
+ force_ssl = False
+ split_url = urlparse.urlparse(target)
+
+ if split_url.scheme:
+ if split_url.scheme == 'https':
+ force_ssl = True
+ return split_url.hostname, split_url.port, split_url.path, force_ssl
+ else:
+ target = target
+ prefix = prefix
+
+ if ':' in target and '[' not in target:
+ host, port = target.rsplit(':', 1)
+ elif ']:' in target:
+ host, port = target.rsplit(':', 1)
+ else:
+ host = target
+ port = None
+ host = host.replace('[', '').replace(']', '')
+
+ return host, port, prefix, force_ssl
+
+
def _colorize(color, message):
"""Add a color to the message."""
try:
| In 'live' testing scenarios argument passing to build_tests is convoluted and SSL may not work
If you want to use `build_tests` to create real TestCases against a live server it's likely you know the URL and that would be most convenient thing to pass instead of having to parse out the host, port and prefix (script_name) and then pass those.
In addition, if you have a URL you know if your server is SSL but the tests may not have been written to do SSL (with an `ssl: true` entry). Because of the test building process this is a bit awkward at the moment. It would be better to be able to say "yeah, this is SSL" for the whole run. | cdent/gabbi | diff --git a/gabbi/tests/test_driver.py b/gabbi/tests/test_driver.py
index 0b2ce0a..8f6bca0 100644
--- a/gabbi/tests/test_driver.py
+++ b/gabbi/tests/test_driver.py
@@ -70,3 +70,20 @@ class DriverTest(unittest.TestCase):
first_test = suite._tests[0]._tests[0]
full_url = first_test._parse_url(first_test.test_data['url'])
self.assertEqual('http://localhost:8001/', full_url)
+
+ def test_build_url_target(self):
+ suite = driver.build_tests(self.test_dir, self.loader,
+ host='localhost', port='999',
+ url='https://example.com:1024/theend')
+ first_test = suite._tests[0]._tests[0]
+ full_url = first_test._parse_url(first_test.test_data['url'])
+ self.assertEqual('https://example.com:1024/theend/', full_url)
+
+ def test_build_url_target_forced_ssl(self):
+ suite = driver.build_tests(self.test_dir, self.loader,
+ host='localhost', port='999',
+ url='http://example.com:1024/theend',
+ require_ssl=True)
+ first_test = suite._tests[0]._tests[0]
+ full_url = first_test._parse_url(first_test.test_data['url'])
+ self.assertEqual('https://example.com:1024/theend/', full_url)
diff --git a/gabbi/tests/test_runner.py b/gabbi/tests/test_runner.py
index 3c132b1..a854cf9 100644
--- a/gabbi/tests/test_runner.py
+++ b/gabbi/tests/test_runner.py
@@ -229,93 +229,6 @@ class RunnerTest(unittest.TestCase):
self._stderr.write(sys.stderr.read())
-class RunnerHostArgParse(unittest.TestCase):
-
- def _test_hostport(self, url_or_host, expected_host,
- provided_prefix=None, expected_port=None,
- expected_prefix=None, expected_ssl=False):
- host, port, prefix, ssl = runner.process_target_args(
- url_or_host, provided_prefix)
-
- # normalize hosts, they are case insensitive
- self.assertEqual(expected_host.lower(), host.lower())
- # port can be a string or int depending on the inputs
- self.assertEqual(expected_port, port)
- self.assertEqual(expected_prefix, prefix)
- self.assertEqual(expected_ssl, ssl)
-
- def test_plain_url_no_port(self):
- self._test_hostport('http://foobar.com/news',
- 'foobar.com',
- expected_port=None,
- expected_prefix='/news')
-
- def test_plain_url_with_port(self):
- self._test_hostport('http://foobar.com:80/news',
- 'foobar.com',
- expected_port=80,
- expected_prefix='/news')
-
- def test_ssl_url(self):
- self._test_hostport('https://foobar.com/news',
- 'foobar.com',
- expected_prefix='/news',
- expected_ssl=True)
-
- def test_ssl_port80_url(self):
- self._test_hostport('https://foobar.com:80/news',
- 'foobar.com',
- expected_prefix='/news',
- expected_port=80,
- expected_ssl=True)
-
- def test_ssl_port_url(self):
- self._test_hostport('https://foobar.com:999/news',
- 'foobar.com',
- expected_prefix='/news',
- expected_port=999,
- expected_ssl=True)
-
- def test_simple_hostport(self):
- self._test_hostport('foobar.com:999',
- 'foobar.com',
- expected_port='999')
-
- def test_simple_hostport_with_prefix(self):
- self._test_hostport('foobar.com:999',
- 'foobar.com',
- provided_prefix='/news',
- expected_port='999',
- expected_prefix='/news')
-
- def test_ipv6_url_long(self):
- self._test_hostport(
- 'http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:999/news',
- 'FEDC:BA98:7654:3210:FEDC:BA98:7654:3210',
- expected_port=999,
- expected_prefix='/news')
-
- def test_ipv6_url_localhost(self):
- self._test_hostport(
- 'http://[::1]:999/news',
- '::1',
- expected_port=999,
- expected_prefix='/news')
-
- def test_ipv6_host_localhost(self):
- # If a user wants to use the hostport form, then they need
- # to hack it with the brackets.
- self._test_hostport(
- '[::1]',
- '::1')
-
- def test_ipv6_hostport_localhost(self):
- self._test_hostport(
- '[::1]:999',
- '::1',
- expected_port='999')
-
-
class HTMLResponseHandler(handlers.ResponseHandler):
test_key_suffix = 'html'
diff --git a/gabbi/tests/test_utils.py b/gabbi/tests/test_utils.py
index 1754dad..d5b8b50 100644
--- a/gabbi/tests/test_utils.py
+++ b/gabbi/tests/test_utils.py
@@ -158,3 +158,90 @@ class CreateURLTest(unittest.TestCase):
'/foo', 'FEDC:BA98:7654:3210:FEDC:BA98:7654:3210', port=999)
self.assertEqual(
'http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:999/foo', url)
+
+
+class UtilsHostInfoFromTarget(unittest.TestCase):
+
+ def _test_hostport(self, url_or_host, expected_host,
+ provided_prefix=None, expected_port=None,
+ expected_prefix=None, expected_ssl=False):
+ host, port, prefix, ssl = utils.host_info_from_target(
+ url_or_host, provided_prefix)
+
+ # normalize hosts, they are case insensitive
+ self.assertEqual(expected_host.lower(), host.lower())
+ # port can be a string or int depending on the inputs
+ self.assertEqual(expected_port, port)
+ self.assertEqual(expected_prefix, prefix)
+ self.assertEqual(expected_ssl, ssl)
+
+ def test_plain_url_no_port(self):
+ self._test_hostport('http://foobar.com/news',
+ 'foobar.com',
+ expected_port=None,
+ expected_prefix='/news')
+
+ def test_plain_url_with_port(self):
+ self._test_hostport('http://foobar.com:80/news',
+ 'foobar.com',
+ expected_port=80,
+ expected_prefix='/news')
+
+ def test_ssl_url(self):
+ self._test_hostport('https://foobar.com/news',
+ 'foobar.com',
+ expected_prefix='/news',
+ expected_ssl=True)
+
+ def test_ssl_port80_url(self):
+ self._test_hostport('https://foobar.com:80/news',
+ 'foobar.com',
+ expected_prefix='/news',
+ expected_port=80,
+ expected_ssl=True)
+
+ def test_ssl_port_url(self):
+ self._test_hostport('https://foobar.com:999/news',
+ 'foobar.com',
+ expected_prefix='/news',
+ expected_port=999,
+ expected_ssl=True)
+
+ def test_simple_hostport(self):
+ self._test_hostport('foobar.com:999',
+ 'foobar.com',
+ expected_port='999')
+
+ def test_simple_hostport_with_prefix(self):
+ self._test_hostport('foobar.com:999',
+ 'foobar.com',
+ provided_prefix='/news',
+ expected_port='999',
+ expected_prefix='/news')
+
+ def test_ipv6_url_long(self):
+ self._test_hostport(
+ 'http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:999/news',
+ 'FEDC:BA98:7654:3210:FEDC:BA98:7654:3210',
+ expected_port=999,
+ expected_prefix='/news')
+
+ def test_ipv6_url_localhost(self):
+ self._test_hostport(
+ 'http://[::1]:999/news',
+ '::1',
+ expected_port=999,
+ expected_prefix='/news')
+
+ def test_ipv6_host_localhost(self):
+ # If a user wants to use the hostport form, then they need
+ # to hack it with the brackets.
+ self._test_hostport(
+ '[::1]',
+ '::1')
+
+ def test_ipv6_hostport_localhost(self):
+ self._test_hostport(
+ '[::1]:999',
+ '::1',
+ expected_port='999')
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 3
} | 1.21 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
colorama==0.4.5
decorator==5.1.1
-e git+https://github.com/cdent/gabbi.git@0a8a3b8faf9a900fd132d9b147f67a851d52f178#egg=gabbi
importlib-metadata==4.8.3
iniconfig==1.1.1
jsonpath-rw==1.4.0
jsonpath-rw-ext==1.2.2
packaging==21.3
pbr==6.1.1
pluggy==1.0.0
ply==3.11
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==6.0.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
wsgi_intercept==1.13.1
zipp==3.6.0
| name: gabbi
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- colorama==0.4.5
- decorator==5.1.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jsonpath-rw==1.4.0
- jsonpath-rw-ext==1.2.2
- packaging==21.3
- pbr==6.1.1
- pluggy==1.0.0
- ply==3.11
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==6.0.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wsgi-intercept==1.13.1
- zipp==3.6.0
prefix: /opt/conda/envs/gabbi
| [
"gabbi/tests/test_driver.py::DriverTest::test_build_url_target",
"gabbi/tests/test_driver.py::DriverTest::test_build_url_target_forced_ssl",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_ipv6_host_localhost",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_ipv6_hostport_localhost",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_ipv6_url_localhost",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_ipv6_url_long",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_plain_url_no_port",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_plain_url_with_port",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_simple_hostport",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_simple_hostport_with_prefix",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_ssl_port80_url",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_ssl_port_url",
"gabbi/tests/test_utils.py::UtilsHostInfoFromTarget::test_ssl_url"
]
| []
| [
"gabbi/tests/test_driver.py::DriverTest::test_build_require_ssl",
"gabbi/tests/test_driver.py::DriverTest::test_build_requires_host_or_intercept",
"gabbi/tests/test_driver.py::DriverTest::test_driver_loads_two_tests",
"gabbi/tests/test_driver.py::DriverTest::test_driver_prefix",
"gabbi/tests/test_runner.py::RunnerTest::test_custom_response_handler",
"gabbi/tests/test_runner.py::RunnerTest::test_exit_code",
"gabbi/tests/test_runner.py::RunnerTest::test_target_url_parsing",
"gabbi/tests/test_runner.py::RunnerTest::test_target_url_parsing_standard_port",
"gabbi/tests/test_utils.py::BinaryTypesTest::test_binary",
"gabbi/tests/test_utils.py::BinaryTypesTest::test_not_binary",
"gabbi/tests/test_utils.py::ExtractContentTypeTest::test_extract_content_type_bad_params",
"gabbi/tests/test_utils.py::ExtractContentTypeTest::test_extract_content_type_default_both",
"gabbi/tests/test_utils.py::ExtractContentTypeTest::test_extract_content_type_default_charset",
"gabbi/tests/test_utils.py::ExtractContentTypeTest::test_extract_content_type_multiple_params",
"gabbi/tests/test_utils.py::ExtractContentTypeTest::test_extract_content_type_with_charset",
"gabbi/tests/test_utils.py::ColorizeTest::test_colorize_missing_color",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ipv6_already_bracket",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ipv6_full",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ipv6_ssl",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ipv6_ssl_weird_port",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_no_double_colon",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_not_ssl_on_443",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_port",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_port_and_ssl",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_prefix",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_preserve_query",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_simple",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ssl",
"gabbi/tests/test_utils.py::CreateURLTest::test_create_url_ssl_on_80"
]
| []
| Apache License 2.0 | 582 | [
"gabbi/driver.py",
"gabbi/runner.py",
"gabbi/utils.py"
]
| [
"gabbi/driver.py",
"gabbi/runner.py",
"gabbi/utils.py"
]
|
kytos__python-openflow-99 | 1b4d191de2e51695ab94395d4f0ba93a7f0e98f8 | 2016-06-13 23:18:31 | 1b4d191de2e51695ab94395d4f0ba93a7f0e98f8 | diff --git a/pyof/v0x01/controller2switch/packet_out.py b/pyof/v0x01/controller2switch/packet_out.py
index 78cb669..e7c9965 100644
--- a/pyof/v0x01/controller2switch/packet_out.py
+++ b/pyof/v0x01/controller2switch/packet_out.py
@@ -7,12 +7,16 @@ uses the OFPT_PACKET_OUT message"""
# Local source tree imports
from pyof.v0x01.common import header as of_header
+from pyof.v0x01.common.phy_port import Port
from pyof.v0x01.controller2switch import common
from pyof.v0x01.foundation import base
from pyof.v0x01.foundation import basic_types
+from pyof.v0x01.foundation.exceptions import ValidationError
# Classes
+#: in_port valid virtual port values, for validation
+_VIRT_IN_PORTS = (Port.OFPP_LOCAL, Port.OFPP_CONTROLLER, Port.OFPP_NONE)
class PacketOut(base.GenericMessage):
"""
@@ -47,3 +51,25 @@ class PacketOut(base.GenericMessage):
self.actions_len = actions_len
self.actions = [] if actions is None else actions
self.data = data
+
+ def validate(self):
+ if not super().is_valid():
+ raise ValidationError()
+ self._validate_in_port()
+
+ def is_valid(self):
+ try:
+ self.validate()
+ return True
+ except ValidationError:
+ return False
+
+ def _validate_in_port(self):
+ port = self.in_port
+ valid = True
+ if isinstance(port, int) and (port < 1 or port >= Port.OFPP_MAX.value):
+ valid = False
+ elif isinstance(port, Port) and port not in _VIRT_IN_PORTS:
+ valid = False
+ if not valid:
+ raise ValidationError('{} is not a valid input port.'.format(port))
diff --git a/pyof/v0x01/foundation/exceptions.py b/pyof/v0x01/foundation/exceptions.py
index 0dab2cd..13e9296 100644
--- a/pyof/v0x01/foundation/exceptions.py
+++ b/pyof/v0x01/foundation/exceptions.py
@@ -1,6 +1,11 @@
"""Exceptions defined on this Library"""
+class ValidationError(Exception):
+ """Can be used directly or inherited by specific validation errors."""
+ pass
+
+
class MethodNotImplemented(Exception):
"""Exception to be raised when a method is not implemented"""
def __init__(self, message=None):
@@ -33,44 +38,11 @@ class WrongListItemType(Exception):
return message
-class PADHasNoValue(Exception):
- """Exception raised when user tries to set a value on a PAD attribute"""
- def __str__(self):
- return "You can't set a value on a PAD attribute"
-
-
-class AttributeTypeError(Exception):
- """Error raise when the attribute is not of the expected type
- defined on the class definition"""
-
- def __init__(self, item, item_class, expected_class):
- super().__init__()
- self.item = item
- self.item_class = item_class
- self.expected_class = expected_class
-
- def __str__(self):
- msg = "Unexpected value '{}' ".format(str(self.item))
- msg += "with class '{}' ".format(str(self.item_class))
- msg += "and expected class '{}'".format(str(self.expected_class))
- return msg
-
-
class NotBinaryData(Exception):
"""Error raised when the content of a BinaryData attribute is not binary"""
def __str__(self):
return "The content of this variable needs to be binary data"
-class ValidationError(Exception):
- """Error on validate message or struct"""
- def __init__(self, msg="Error on validate message"):
- super().__init__()
- self.msg = msg
-
- def __str__(self):
- return self.msg
-
-
class UnpackException(Exception):
pass
| Check 1.0.1 and 1.0.2 compliance | kytos/python-openflow | diff --git a/tests/v0x01/test_controller2switch/test_packet_out.py b/tests/v0x01/test_controller2switch/test_packet_out.py
index 38dfd5a..4130c77 100644
--- a/tests/v0x01/test_controller2switch/test_packet_out.py
+++ b/tests/v0x01/test_controller2switch/test_packet_out.py
@@ -1,11 +1,12 @@
import unittest
from pyof.v0x01.common import phy_port
+from pyof.v0x01.common.phy_port import Port
from pyof.v0x01.controller2switch import packet_out
+from pyof.v0x01.foundation.exceptions import ValidationError
class TestPacketOut(unittest.TestCase):
-
def setUp(self):
self.message = packet_out.PacketOut()
self.message.header.xid = 80
@@ -28,3 +29,35 @@ class TestPacketOut(unittest.TestCase):
"""[Controller2Switch/PacketOut] - unpacking"""
# TODO
pass
+
+ def test_valid_virtual_in_ports(self):
+ """Valid virtual ports as defined in 1.0.1 spec."""
+ valid = (Port.OFPP_LOCAL, Port.OFPP_CONTROLLER, Port.OFPP_NONE)
+ msg = packet_out.PacketOut()
+ for in_port in valid:
+ msg.in_port = in_port
+ self.assertTrue(msg.is_valid())
+
+ def test_invalid_virtual_in_ports(self):
+ """Invalid virtual ports as defined in 1.0.1 spec."""
+ invalid = (Port.OFPP_IN_PORT, Port.OFPP_TABLE, Port.OFPP_NORMAL,
+ Port.OFPP_FLOOD, Port.OFPP_ALL)
+ for in_port in invalid:
+ self.message.in_port = in_port
+ self.assertFalse(self.message.is_valid())
+ self.assertRaises(ValidationError, self.message.validate)
+
+ def test_valid_physical_in_ports(self):
+ """Physical port limits from 1.0.0 spec."""
+ max_valid = int(Port.OFPP_MAX.value) - 1
+ for in_port in (1, max_valid):
+ self.message.in_port = in_port
+ self.assertTrue(self.message.is_valid())
+
+ def test_invalid_physical_in_port(self):
+ """Physical port limits from 1.0.0 spec."""
+ max_valid = int(Port.OFPP_MAX.value) - 1
+ for in_port in (-1, 0, max_valid + 1, max_valid + 2):
+ self.message.in_port = in_port
+ self.assertFalse(self.message.is_valid())
+ self.assertRaises(ValidationError, self.message.validate)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"coverage"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
iniconfig==2.1.0
-e git+https://github.com/kytos/python-openflow.git@1b4d191de2e51695ab94395d4f0ba93a7f0e98f8#egg=Kytos_OpenFlow_Parser_library
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
tomli==2.2.1
| name: python-openflow
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- tomli==2.2.1
prefix: /opt/conda/envs/python-openflow
| [
"tests/v0x01/test_controller2switch/test_packet_out.py::TestPacketOut::test_invalid_physical_in_port",
"tests/v0x01/test_controller2switch/test_packet_out.py::TestPacketOut::test_invalid_virtual_in_ports"
]
| []
| [
"tests/v0x01/test_controller2switch/test_packet_out.py::TestPacketOut::test_get_size",
"tests/v0x01/test_controller2switch/test_packet_out.py::TestPacketOut::test_valid_physical_in_ports",
"tests/v0x01/test_controller2switch/test_packet_out.py::TestPacketOut::test_valid_virtual_in_ports"
]
| []
| MIT License | 583 | [
"pyof/v0x01/foundation/exceptions.py",
"pyof/v0x01/controller2switch/packet_out.py"
]
| [
"pyof/v0x01/foundation/exceptions.py",
"pyof/v0x01/controller2switch/packet_out.py"
]
|
|
mozilla__bleach-205 | 2235b8fcadc8abef3a2845bb0ce67206982f3489 | 2016-06-14 16:16:47 | edd91a00e1c50cebbc512c7db61897ad3d0ba00a | diff --git a/bleach/__init__.py b/bleach/__init__.py
index 3092cb7..ac163d1 100644
--- a/bleach/__init__.py
+++ b/bleach/__init__.py
@@ -315,7 +315,7 @@ def linkify(text, callbacks=DEFAULT_CALLBACKS, skip_pre=False,
if node.tag == ETREE_TAG('pre') and skip_pre:
linkify_nodes(node, False)
elif not (node in _seen):
- linkify_nodes(node, True)
+ linkify_nodes(node, parse_text)
current_child += 1
| Children of <pre> tags should not be linkified when skip_pre=True (patch attached)
The children of `pre` tags should not be linkified when `skip_pre` is on
```
diff --git a/bleach/__init__.py b/bleach/__init__.py
index 48b6512..4c2dd1b 100644
--- a/bleach/__init__.py
+++ b/bleach/__init__.py
@@ -300,7 +300,7 @@ def linkify(text, callbacks=DEFAULT_CALLBACKS, skip_pre=False,
if node.tag == ETREE_TAG('pre') and skip_pre:
linkify_nodes(node, False)
elif not (node in _seen):
- linkify_nodes(node, True)
+ linkify_nodes(node, parse_text)
current_child += 1
diff --git a/bleach/tests/test_links.py b/bleach/tests/test_links.py
index 62da8d1..ae0fba7 100644
--- a/bleach/tests/test_links.py
+++ b/bleach/tests/test_links.py
@@ -314,6 +314,13 @@ def test_skip_pre():
eq_(nofollowed, linkify(already_linked))
eq_(nofollowed, linkify(already_linked, skip_pre=True))
+def test_skip_pre_child():
+ # Don't linkify the children of pre tags.
+ intext = '<pre><code>http://foo.com</code></pre>http://bar.com'
+ expect = '<pre><code>http://foo.com</code></pre><a href="http://bar.com" rel="nofollow">http://bar.com</a>'
+ output = linkify(intext, skip_pre=True)
+ eq_(expect, output)
+
def test_libgl():
"""libgl.so.1 should not be linkified."""
``` | mozilla/bleach | diff --git a/bleach/tests/test_links.py b/bleach/tests/test_links.py
index 62da8d1..2958f5e 100644
--- a/bleach/tests/test_links.py
+++ b/bleach/tests/test_links.py
@@ -314,6 +314,13 @@ def test_skip_pre():
eq_(nofollowed, linkify(already_linked))
eq_(nofollowed, linkify(already_linked, skip_pre=True))
+ eq_(
+ linkify('<pre><code>http://example.com</code></pre>http://example.com',
+ skip_pre=True),
+ ('<pre><code>http://example.com</code></pre>'
+ '<a href="http://example.com" rel="nofollow">http://example.com</a>')
+ )
+
def test_libgl():
"""libgl.so.1 should not be linkified."""
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 1
} | 1.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"flake8",
"pytest"
],
"pre_install": null,
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
-e git+https://github.com/mozilla/bleach.git@2235b8fcadc8abef3a2845bb0ce67206982f3489#egg=bleach
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
colorama==0.4.5
cryptography==40.0.2
distlib==0.3.9
docutils==0.17.1
filelock==3.4.1
flake8==5.0.4
html5lib==0.9999999
idna==3.10
imagesize==1.4.1
importlib-metadata==4.2.0
importlib-resources==5.4.0
iniconfig==1.1.1
jeepney==0.7.1
Jinja2==3.0.3
keyring==23.4.1
MarkupSafe==2.0.1
mccabe==0.7.0
nose==1.3.7
ordereddict==1.1
packaging==21.3
pkginfo==1.10.0
platformdirs==2.4.0
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pycparser==2.21
pyflakes==2.5.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytz==2025.2
readme-renderer==34.0
requests==2.27.1
requests-toolbelt==1.0.0
rfc3986==1.5.0
SecretStorage==3.3.3
six==1.17.0
snowballstemmer==2.2.0
Sphinx==4.3.2
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
toml==0.10.2
tomli==1.2.3
tox==3.28.0
tqdm==4.64.1
twine==3.8.0
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.16.2
webencodings==0.5.1
zipp==3.6.0
| name: bleach
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- colorama==0.4.5
- cryptography==40.0.2
- distlib==0.3.9
- docutils==0.17.1
- filelock==3.4.1
- flake8==5.0.4
- html5lib==0.9999999
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.2.0
- importlib-resources==5.4.0
- iniconfig==1.1.1
- jeepney==0.7.1
- jinja2==3.0.3
- keyring==23.4.1
- markupsafe==2.0.1
- mccabe==0.7.0
- nose==1.3.7
- ordereddict==1.1
- packaging==21.3
- pkginfo==1.10.0
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pycparser==2.21
- pyflakes==2.5.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytz==2025.2
- readme-renderer==34.0
- requests==2.27.1
- requests-toolbelt==1.0.0
- rfc3986==1.5.0
- secretstorage==3.3.3
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==4.3.2
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- tqdm==4.64.1
- twine==3.8.0
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.16.2
- webencodings==0.5.1
- zipp==3.6.0
prefix: /opt/conda/envs/bleach
| [
"bleach/tests/test_links.py::test_skip_pre"
]
| []
| [
"bleach/tests/test_links.py::test_empty",
"bleach/tests/test_links.py::test_simple_link",
"bleach/tests/test_links.py::test_trailing_slash",
"bleach/tests/test_links.py::test_mangle_link",
"bleach/tests/test_links.py::test_mangle_text",
"bleach/tests/test_links.py::test_set_attrs",
"bleach/tests/test_links.py::test_only_proto_links",
"bleach/tests/test_links.py::test_stop_email",
"bleach/tests/test_links.py::test_tlds",
"bleach/tests/test_links.py::test_escaping",
"bleach/tests/test_links.py::test_nofollow_off",
"bleach/tests/test_links.py::test_link_in_html",
"bleach/tests/test_links.py::test_links_https",
"bleach/tests/test_links.py::test_add_rel_nofollow",
"bleach/tests/test_links.py::test_url_with_path",
"bleach/tests/test_links.py::test_link_ftp",
"bleach/tests/test_links.py::test_link_query",
"bleach/tests/test_links.py::test_link_fragment",
"bleach/tests/test_links.py::test_link_entities",
"bleach/tests/test_links.py::test_escaped_html",
"bleach/tests/test_links.py::test_link_http_complete",
"bleach/tests/test_links.py::test_non_url",
"bleach/tests/test_links.py::test_javascript_url",
"bleach/tests/test_links.py::test_unsafe_url",
"bleach/tests/test_links.py::test_libgl",
"bleach/tests/test_links.py::test_end_of_clause",
"bleach/tests/test_links.py::test_sarcasm",
"bleach/tests/test_links.py::test_parentheses_with_removing",
"bleach/tests/test_links.py::test_tokenizer",
"bleach/tests/test_links.py::test_ignore_bad_protocols",
"bleach/tests/test_links.py::test_max_recursion_depth",
"bleach/tests/test_links.py::test_link_emails_and_urls",
"bleach/tests/test_links.py::test_links_case_insensitive",
"bleach/tests/test_links.py::test_elements_inside_links",
"bleach/tests/test_links.py::test_remove_first_childlink"
]
| []
| Apache License 2.0 | 584 | [
"bleach/__init__.py"
]
| [
"bleach/__init__.py"
]
|
|
Backblaze__B2_Command_Line_Tool-173 | ab2b5b4e3dc2c8b52b28592c7414ebb4646034e2 | 2016-06-14 20:22:17 | 01c4e89f63f38b9efa6a6fa63f54cd556a0b5305 | diff --git a/b2/sync.py b/b2/sync.py
index c3c4ad9..cffdc81 100644
--- a/b2/sync.py
+++ b/b2/sync.py
@@ -67,12 +67,15 @@ class SyncReport(object):
self.closed = False
self.lock = threading.Lock()
self._update_progress()
+ self.warnings = []
def close(self):
with self.lock:
if not self.no_progress:
self._print_line('', False)
self.closed = True
+ for warning in self.warnings:
+ self._print_line(warning, True)
def __enter__(self):
return self
@@ -185,6 +188,9 @@ class SyncReport(object):
self.transfer_bytes += byte_delta
self._update_progress()
+ def local_access_error(self, path):
+ self.warnings.append('WARNING: %s could not be accessed (broken symlink?)' % (path,))
+
class SyncFileReporter(AbstractProgressListener):
"""
@@ -453,13 +459,17 @@ class AbstractFolder(object):
"""
@abstractmethod
- def all_files(self):
+ def all_files(self, reporter):
"""
Returns an iterator over all of the files in the folder, in
the order that B2 uses.
No matter what the folder separator on the local file system
is, "/" is used in the returned file names.
+
+ If a file is found, but does not exist (for example due to
+ a broken symlink or a race), reporter will be informed about
+ each such problem.
"""
@abstractmethod
@@ -494,9 +504,9 @@ class LocalFolder(AbstractFolder):
def folder_type(self):
return 'local'
- def all_files(self):
+ def all_files(self, reporter):
prefix_len = len(self.root) + 1 # include trailing '/' in prefix length
- for relative_path in self._walk_relative_paths(prefix_len, self.root):
+ for relative_path in self._walk_relative_paths(prefix_len, self.root, reporter):
yield self._make_file(relative_path)
def make_full_path(self, file_name):
@@ -514,7 +524,7 @@ class LocalFolder(AbstractFolder):
elif not os.path.isdir(self.root):
raise Exception('%s is not a directory' % (self.root,))
- def _walk_relative_paths(self, prefix_len, dir_path):
+ def _walk_relative_paths(self, prefix_len, dir_path, reporter):
"""
Yields all of the file names anywhere under this folder, in the
order they would appear in B2.
@@ -535,16 +545,21 @@ class LocalFolder(AbstractFolder):
)
full_path = os.path.join(dir_path, name)
relative_path = full_path[prefix_len:]
- if os.path.isdir(full_path):
- name += six.u('/')
- dirs.add(name)
- names[name] = (full_path, relative_path)
+ # Skip broken symlinks or other inaccessible files
+ if not os.path.exists(full_path):
+ if reporter is not None:
+ reporter.local_access_error(full_path)
+ else:
+ if os.path.isdir(full_path):
+ name += six.u('/')
+ dirs.add(name)
+ names[name] = (full_path, relative_path)
# Yield all of the answers
for name in sorted(names):
(full_path, relative_path) = names[name]
if name in dirs:
- for rp in self._walk_relative_paths(prefix_len, full_path):
+ for rp in self._walk_relative_paths(prefix_len, full_path, reporter):
yield rp
else:
yield relative_path
@@ -573,7 +588,7 @@ class B2Folder(AbstractFolder):
self.bucket = api.get_bucket_by_name(bucket_name)
self.prefix = '' if self.folder_name == '' else self.folder_name + '/'
- def all_files(self):
+ def all_files(self, reporter):
current_name = None
current_versions = []
for (file_version_info, folder_name) in self.bucket.ls(
@@ -625,7 +640,7 @@ def next_or_none(iterator):
return None
-def zip_folders(folder_a, folder_b, exclusions=tuple()):
+def zip_folders(folder_a, folder_b, reporter, exclusions=tuple()):
"""
An iterator over all of the files in the union of two folders,
matching file names.
@@ -637,8 +652,10 @@ def zip_folders(folder_a, folder_b, exclusions=tuple()):
:param folder_b: A Folder object.
"""
- iter_a = (f for f in folder_a.all_files() if not any(ex.match(f.name) for ex in exclusions))
- iter_b = folder_b.all_files()
+ iter_a = (
+ f for f in folder_a.all_files(reporter) if not any(ex.match(f.name) for ex in exclusions)
+ )
+ iter_b = folder_b.all_files(reporter)
current_a = next_or_none(iter_a)
current_b = next_or_none(iter_b)
@@ -810,7 +827,7 @@ def make_folder_sync_actions(source_folder, dest_folder, args, now_millis, repor
('b2', 'local'), ('local', 'b2')
]:
raise NotImplementedError("Sync support only local-to-b2 and b2-to-local")
- for (source_file, dest_file) in zip_folders(source_folder, dest_folder, exclusions):
+ for (source_file, dest_file) in zip_folders(source_folder, dest_folder, reporter, exclusions):
if source_folder.folder_type() == 'local':
if source_file is not None:
reporter.update_compare(1)
@@ -863,7 +880,9 @@ def count_files(local_folder, reporter):
"""
Counts all of the files in a local folder.
"""
- for _ in local_folder.all_files():
+ # Don't pass in a reporter to all_files. Broken symlinks will be reported
+ # during the next pass when the source and dest files are compared.
+ for _ in local_folder.all_files(None):
reporter.update_local(1)
reporter.end_local()
| Broken symlink break sync
I had this issue where one of my sysmlinks was broken and b2 tool broke, this is the stack trace:
```
Traceback (most recent call last):
File "/usr/local/bin/b2", line 9, in <module>
load_entry_point('b2==0.5.4', 'console_scripts', 'b2')()
File "/usr/local/lib/python2.7/dist-packages/b2/console_tool.py", line 861, in main
exit_status = ct.run_command(decoded_argv)
File "/usr/local/lib/python2.7/dist-packages/b2/console_tool.py", line 789, in run_command
return command.run(args)
File "/usr/local/lib/python2.7/dist-packages/b2/console_tool.py", line 609, in run
max_workers=max_workers
File "/usr/local/lib/python2.7/dist-packages/b2/sync.py", line 877, in sync_folders
source_folder, dest_folder, args, now_millis, reporter
File "/usr/local/lib/python2.7/dist-packages/b2/sync.py", line 777, in make_folder_sync_actions
for (source_file, dest_file) in zip_folders(source_folder, dest_folder):
File "/usr/local/lib/python2.7/dist-packages/b2/sync.py", line 646, in zip_folders
current_a = next_or_none(iter_a)
File "/usr/local/lib/python2.7/dist-packages/b2/sync.py", line 620, in next_or_none
return six.advance_iterator(iterator)
File "/usr/local/lib/python2.7/dist-packages/b2/sync.py", line 499, in all_files
yield self._make_file(relative_path)
File "/usr/local/lib/python2.7/dist-packages/b2/sync.py", line 553, in _make_file
mod_time = int(round(os.path.getmtime(full_path) * 1000))
File "/usr/lib/python2.7/genericpath.py", line 54, in getmtime
return os.stat(filename).st_mtime
OSError: [Errno 2] No such file or directory: '/media/2a9074d0-4788-45ab-bfae-fc46427c69fa/PersonalData/some-broken-symlink'
``` | Backblaze/B2_Command_Line_Tool | diff --git a/test/test_sync.py b/test/test_sync.py
index ad2b140..9102b6e 100644
--- a/test/test_sync.py
+++ b/test/test_sync.py
@@ -37,36 +37,58 @@ def write_file(path, contents):
f.write(contents)
-def create_files(root_dir, relative_paths):
- for relative_path in relative_paths:
- full_path = os.path.join(root_dir, relative_path)
- write_file(full_path, b'')
+class TestLocalFolder(unittest.TestCase):
+ NAMES = [
+ six.u('.dot_file'), six.u('hello.'), six.u('hello/a/1'), six.u('hello/a/2'),
+ six.u('hello/b'), six.u('hello0'), six.u('\u81ea\u7531')
+ ]
+ def setUp(self):
+ self.reporter = MagicMock()
+
+ @classmethod
+ def _create_files(cls, root_dir, relative_paths):
+ for relative_path in relative_paths:
+ full_path = os.path.join(root_dir, relative_path)
+ write_file(full_path, b'')
+
+ def _prepare_folder(self, root_dir, broken_symlink=False):
+ self._create_files(root_dir, self.NAMES)
+ if broken_symlink:
+ os.symlink(
+ os.path.join(root_dir, 'non_existant_file'), os.path.join(root_dir, 'bad_symlink')
+ )
+ return LocalFolder(root_dir)
-class TestLocalFolder(unittest.TestCase):
def test_slash_sorting(self):
# '/' should sort between '.' and '0'
- names = [
- six.u('.dot_file'), six.u('hello.'), six.u('hello/a/1'), six.u('hello/a/2'),
- six.u('hello/b'), six.u('hello0'), six.u('\u81ea\u7531')
- ]
with TempDir() as tmpdir:
- create_files(tmpdir, names)
- folder = LocalFolder(tmpdir)
- actual_names = list(f.name for f in folder.all_files())
- self.assertEqual(names, actual_names)
+ folder = self._prepare_folder(tmpdir)
+ actual_names = list(f.name for f in folder.all_files(self.reporter))
+ self.assertEqual(self.NAMES, actual_names)
+ self.reporter.local_access_error.assert_not_called()
+
+ def test_broken_symlink(self):
+ with TempDir() as tmpdir:
+ folder = self._prepare_folder(tmpdir, broken_symlink=True)
+ for f in folder.all_files(self.reporter):
+ pass # just generate all the files
+ self.reporter.local_access_error.assert_called_once_with(
+ os.path.join(tmpdir, 'bad_symlink')
+ )
class TestB2Folder(unittest.TestCase):
def setUp(self):
self.bucket = MagicMock()
self.api = MagicMock()
+ self.reporter = MagicMock()
self.api.get_bucket_by_name.return_value = self.bucket
self.b2_folder = B2Folder('bucket-name', 'folder', self.api)
def test_empty(self):
self.bucket.ls.return_value = []
- self.assertEqual([], list(self.b2_folder.all_files()))
+ self.assertEqual([], list(self.b2_folder.all_files(self.reporter)))
def test_multiple_versions(self):
# Test two files, to cover the yield within the loop, and
@@ -102,7 +124,7 @@ class TestB2Folder(unittest.TestCase):
[
"File(a.txt, [FileVersion('a2', 'folder/a.txt', 2000, 'upload'), FileVersion('a1', 'folder/a.txt', 1000, 'upload')])",
"File(b.txt, [FileVersion('b2', 'folder/b.txt', 2000, 'upload'), FileVersion('b1', 'folder/b.txt', 1000, 'upload')])",
- ], [str(f) for f in self.b2_folder.all_files()]
+ ], [str(f) for f in self.b2_folder.all_files(self.reporter)]
)
@@ -111,7 +133,7 @@ class FakeFolder(AbstractFolder):
self.f_type = f_type
self.files = files
- def all_files(self):
+ def all_files(self, reporter):
return iter(self.files)
def folder_type(self):
@@ -150,16 +172,19 @@ class TestParseSyncFolder(unittest.TestCase):
class TestZipFolders(unittest.TestCase):
+ def setUp(self):
+ self.reporter = MagicMock()
+
def test_empty(self):
folder_a = FakeFolder('b2', [])
folder_b = FakeFolder('b2', [])
- self.assertEqual([], list(zip_folders(folder_a, folder_b)))
+ self.assertEqual([], list(zip_folders(folder_a, folder_b, self.reporter)))
def test_one_empty(self):
file_a1 = File("a.txt", [FileVersion("a", "a", 100, "upload", 10)])
folder_a = FakeFolder('b2', [file_a1])
folder_b = FakeFolder('b2', [])
- self.assertEqual([(file_a1, None)], list(zip_folders(folder_a, folder_b)))
+ self.assertEqual([(file_a1, None)], list(zip_folders(folder_a, folder_b, self.reporter)))
def test_two(self):
file_a1 = File("a.txt", [FileVersion("a", "a", 100, "upload", 10)])
@@ -174,9 +199,22 @@ class TestZipFolders(unittest.TestCase):
[
(file_a1, None), (file_a2, file_b1), (file_a3, None), (None, file_b2),
(file_a4, None)
- ], list(zip_folders(folder_a, folder_b))
+ ], list(zip_folders(folder_a, folder_b, self.reporter))
)
+ def test_pass_reporter_to_folder(self):
+ """
+ Check that the zip_folders() function passes the reporter through
+ to both folders.
+ """
+ folder_a = MagicMock()
+ folder_b = MagicMock()
+ folder_a.all_files = MagicMock(return_value=iter([]))
+ folder_b.all_files = MagicMock(return_value=iter([]))
+ self.assertEqual([], list(zip_folders(folder_a, folder_b, self.reporter)))
+ folder_a.all_files.assert_called_once_with(self.reporter)
+ folder_b.all_files.assert_called_once_with(self.reporter)
+
class FakeArgs(object):
"""
diff --git a/test_b2_command_line.py b/test_b2_command_line.py
index 8d23678..0628248 100644
--- a/test_b2_command_line.py
+++ b/test_b2_command_line.py
@@ -200,6 +200,8 @@ class CommandLine(object):
sys.exit(1)
if expected_pattern is not None:
if re.search(expected_pattern, stdout) is None:
+ print('STDOUT:')
+ print(stdout)
error_and_exit('did not match pattern: ' + expected_pattern)
return stdout
@@ -469,8 +471,12 @@ def _sync_test_using_dir(b2_tool, bucket_name, dir_):
write_file(p('a'), b'hello')
write_file(p('b'), b'hello')
write_file(p('c'), b'hello')
+ os.symlink('broken', p('d'))
- b2_tool.should_succeed(['sync', '--noProgress', dir_path, b2_sync_point])
+ b2_tool.should_succeed(
+ ['sync', '--noProgress', dir_path, b2_sync_point],
+ expected_pattern="/d could not be accessed"
+ )
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal(
[
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": null,
"python": "3.5",
"reqs_path": [
"requirements.txt",
"requirements-test.txt",
"requirements-setup.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
-e git+https://github.com/Backblaze/B2_Command_Line_Tool.git@ab2b5b4e3dc2c8b52b28592c7414ebb4646034e2#egg=b2
certifi==2021.5.30
charset-normalizer==2.0.12
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
mock==5.2.0
nose==1.3.7
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyflakes==3.0.1
pyparsing==3.1.4
pytest==7.0.1
requests==2.27.1
six==1.17.0
tomli==1.2.3
tqdm==4.64.1
typing_extensions==4.1.1
urllib3==1.26.20
yapf==0.32.0
zipp==3.6.0
| name: B2_Command_Line_Tool
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- mock==5.2.0
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyflakes==3.0.1
- pyparsing==3.1.4
- pytest==7.0.1
- requests==2.27.1
- six==1.17.0
- tomli==1.2.3
- tqdm==4.64.1
- typing-extensions==4.1.1
- urllib3==1.26.20
- yapf==0.32.0
- zipp==3.6.0
prefix: /opt/conda/envs/B2_Command_Line_Tool
| [
"test/test_sync.py::TestLocalFolder::test_broken_symlink",
"test/test_sync.py::TestLocalFolder::test_slash_sorting",
"test/test_sync.py::TestB2Folder::test_empty",
"test/test_sync.py::TestB2Folder::test_multiple_versions",
"test/test_sync.py::TestZipFolders::test_empty",
"test/test_sync.py::TestZipFolders::test_one_empty",
"test/test_sync.py::TestZipFolders::test_pass_reporter_to_folder",
"test/test_sync.py::TestZipFolders::test_two",
"test/test_sync.py::TestMakeSyncActions::test_already_hidden_multiple_versions_delete",
"test/test_sync.py::TestMakeSyncActions::test_already_hidden_multiple_versions_keep",
"test/test_sync.py::TestMakeSyncActions::test_already_hidden_multiple_versions_keep_days",
"test/test_sync.py::TestMakeSyncActions::test_compare_b2_none_newer",
"test/test_sync.py::TestMakeSyncActions::test_compare_b2_none_older",
"test/test_sync.py::TestMakeSyncActions::test_compare_b2_size_equal",
"test/test_sync.py::TestMakeSyncActions::test_compare_b2_size_not_equal",
"test/test_sync.py::TestMakeSyncActions::test_compare_b2_size_not_equal_delete",
"test/test_sync.py::TestMakeSyncActions::test_delete_b2",
"test/test_sync.py::TestMakeSyncActions::test_delete_b2_multiple_versions",
"test/test_sync.py::TestMakeSyncActions::test_delete_hide_b2_multiple_versions",
"test/test_sync.py::TestMakeSyncActions::test_delete_local",
"test/test_sync.py::TestMakeSyncActions::test_empty_b2",
"test/test_sync.py::TestMakeSyncActions::test_empty_local",
"test/test_sync.py::TestMakeSyncActions::test_file_exclusions",
"test/test_sync.py::TestMakeSyncActions::test_file_exclusions_with_delete",
"test/test_sync.py::TestMakeSyncActions::test_keep_days_no_change_with_old_file",
"test/test_sync.py::TestMakeSyncActions::test_newer_b2",
"test/test_sync.py::TestMakeSyncActions::test_newer_b2_clean_old_versions",
"test/test_sync.py::TestMakeSyncActions::test_newer_b2_delete_old_versions",
"test/test_sync.py::TestMakeSyncActions::test_newer_local",
"test/test_sync.py::TestMakeSyncActions::test_no_delete_b2",
"test/test_sync.py::TestMakeSyncActions::test_no_delete_local",
"test/test_sync.py::TestMakeSyncActions::test_not_there_b2",
"test/test_sync.py::TestMakeSyncActions::test_not_there_local",
"test/test_sync.py::TestMakeSyncActions::test_older_b2",
"test/test_sync.py::TestMakeSyncActions::test_older_b2_replace",
"test/test_sync.py::TestMakeSyncActions::test_older_b2_replace_delete",
"test/test_sync.py::TestMakeSyncActions::test_older_b2_skip",
"test/test_sync.py::TestMakeSyncActions::test_older_local",
"test/test_sync.py::TestMakeSyncActions::test_older_local_replace",
"test/test_sync.py::TestMakeSyncActions::test_older_local_skip",
"test/test_sync.py::TestMakeSyncActions::test_same_b2",
"test/test_sync.py::TestMakeSyncActions::test_same_clean_old_versions",
"test/test_sync.py::TestMakeSyncActions::test_same_delete_old_versions",
"test/test_sync.py::TestMakeSyncActions::test_same_leave_old_versions",
"test/test_sync.py::TestMakeSyncActions::test_same_local"
]
| []
| [
"test/test_sync.py::TestParseSyncFolder::test_b2_double_slash",
"test/test_sync.py::TestParseSyncFolder::test_b2_no_double_slash",
"test/test_sync.py::TestParseSyncFolder::test_b2_no_folder",
"test/test_sync.py::TestParseSyncFolder::test_b2_trailing_slash",
"test/test_sync.py::TestParseSyncFolder::test_local",
"test/test_sync.py::TestParseSyncFolder::test_local_trailing_slash",
"test/test_sync.py::TestMakeSyncActions::test_illegal_b2_to_b2",
"test/test_sync.py::TestMakeSyncActions::test_illegal_delete_and_keep_days",
"test/test_sync.py::TestMakeSyncActions::test_illegal_local_to_local",
"test/test_sync.py::TestMakeSyncActions::test_illegal_skip_and_replace",
"test_b2_command_line.py::TestCommandLine::test_stderr_patterns"
]
| []
| MIT License | 585 | [
"b2/sync.py"
]
| [
"b2/sync.py"
]
|
|
juju-solutions__charms.reactive-71 | 04663e45f3683d4c497f43526d3ac26593ee10a2 | 2016-06-14 21:26:10 | 59b07bd9447d8a4cb027ea2515089216b8d20549 | kwmonroe: There was some chatter on #juju about not using a string representation for toggle (instead using `object()`). I'm fine with this as-in, or with that changed.
stub42: This needs a test, which I suspect is trivial. | diff --git a/charms/reactive/relations.py b/charms/reactive/relations.py
index 173ef28..a8c1499 100644
--- a/charms/reactive/relations.py
+++ b/charms/reactive/relations.py
@@ -30,7 +30,9 @@ from charms.reactive.bus import _load_module
from charms.reactive.bus import StateList
-ALL = '__ALL_SERVICES__'
+# arbitrary obj instances to use as defaults instead of None
+ALL = object()
+TOGGLE = object()
class scopes(object):
@@ -296,13 +298,13 @@ class RelationBase(with_metaclass(AutoAccessors, object)):
"""
return self.conversation(scope).is_state(state)
- def toggle_state(self, state, active=None, scope=None):
+ def toggle_state(self, state, active=TOGGLE, scope=None):
"""
Toggle the state for the :class:`Conversation` with the given scope.
In Python, this is equivalent to::
- relation.conversation(scope).toggle_state(state)
+ relation.conversation(scope).toggle_state(state, active)
See :meth:`conversation` and :meth:`Conversation.toggle_state`.
"""
@@ -549,7 +551,7 @@ class Conversation(object):
return False
return self.key in value['conversations']
- def toggle_state(self, state, active=None):
+ def toggle_state(self, state, active=TOGGLE):
"""
Toggle the given state for this conversation.
@@ -565,7 +567,7 @@ class Conversation(object):
This will set the state if ``value`` is equal to ``foo``.
"""
- if active is None:
+ if active is TOGGLE:
active = not self.is_state(state)
if active:
self.set_state(state)
| toggle_state default for active is error-prone
`toggle_state` takes an optional `active` param that lets you specify whether the state should be on or off, instead of it being switched from its previous value. Having a default value of `None` is problematic because `None` can easily be mistaken for a "truthy" `False` when dealing with relation data, which leads to the state being unexpectedly set. For example, the following breaks:
```python
conv.toggle_state('{relation_name}.feature_x',
active=conv.get_remote('feature_x_available'))
```
This can also be more subtle because `None and <anything>` returns `None` instead of `False` as one might expect. For example, this also breaks:
```python
name = conv.get_remote('name')
version = conv.get_remote('version')
conv.toggle_state('{relation_name}.ready', name and version)
```
| juju-solutions/charms.reactive | diff --git a/tests/test_relations.py b/tests/test_relations.py
index c4977da..d49c3e1 100644
--- a/tests/test_relations.py
+++ b/tests/test_relations.py
@@ -171,6 +171,11 @@ class TestRelationBase(unittest.TestCase):
rb.conversation.assert_called_once_with('scope')
conv.toggle_state.assert_called_once_with('state', 'active')
+ conv.toggle_state.reset_mock()
+ rb.toggle_state('state')
+ conv.toggle_state.assert_called_once_with('state',
+ relations.TOGGLE)
+
def test_set_remote(self):
conv = mock.Mock(name='conv')
rb = relations.RelationBase('relname', 'unit')
@@ -391,12 +396,14 @@ class TestConversation(unittest.TestCase):
conv.toggle_state('foo')
self.assertEqual(conv.remove_state.call_count, 1)
+ conv.toggle_state('foo', None)
+ self.assertEqual(conv.remove_state.call_count, 2)
conv.toggle_state('foo')
self.assertEqual(conv.set_state.call_count, 1)
conv.toggle_state('foo', True)
self.assertEqual(conv.set_state.call_count, 2)
conv.toggle_state('foo', False)
- self.assertEqual(conv.remove_state.call_count, 2)
+ self.assertEqual(conv.remove_state.call_count, 3)
@mock.patch.object(relations.hookenv, 'relation_set')
@mock.patch.object(relations.Conversation, 'relation_ids', ['rel:1', 'rel:2'])
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"coverage",
"mock",
"nose",
"flake8",
"ipython",
"ipdb",
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
backcall==0.2.0
certifi==2021.5.30
charmhelpers==1.2.1
-e git+https://github.com/juju-solutions/charms.reactive.git@04663e45f3683d4c497f43526d3ac26593ee10a2#egg=charms.reactive
coverage==6.2
decorator==5.1.1
flake8==5.0.4
importlib-metadata==4.2.0
importlib-resources==5.4.0
iniconfig==1.1.1
ipdb==0.13.13
ipython==7.16.3
ipython-genutils==0.2.0
jedi==0.17.2
Jinja2==3.0.3
MarkupSafe==2.0.1
mccabe==0.7.0
mock==5.2.0
netaddr==0.10.1
nose==1.3.7
packaging==21.3
parso==0.7.1
pbr==6.1.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pyaml==23.5.8
pycodestyle==2.9.1
pyflakes==2.5.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==6.0.1
six==1.17.0
tomli==1.2.3
traitlets==4.3.3
typing_extensions==4.1.1
wcwidth==0.2.13
zipp==3.6.0
| name: charms.reactive
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- backcall==0.2.0
- charmhelpers==1.2.1
- coverage==6.2
- decorator==5.1.1
- flake8==5.0.4
- importlib-metadata==4.2.0
- importlib-resources==5.4.0
- iniconfig==1.1.1
- ipdb==0.13.13
- ipython==7.16.3
- ipython-genutils==0.2.0
- jedi==0.17.2
- jinja2==3.0.3
- markupsafe==2.0.1
- mccabe==0.7.0
- mock==5.2.0
- netaddr==0.10.1
- nose==1.3.7
- packaging==21.3
- parso==0.7.1
- pbr==6.1.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pluggy==1.0.0
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pyaml==23.5.8
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==6.0.1
- six==1.17.0
- tomli==1.2.3
- traitlets==4.3.3
- typing-extensions==4.1.1
- wcwidth==0.2.13
- zipp==3.6.0
prefix: /opt/conda/envs/charms.reactive
| [
"tests/test_relations.py::TestRelationBase::test_toggle_state",
"tests/test_relations.py::TestConversation::test_toggle_state"
]
| []
| [
"tests/test_relations.py::TestAutoAccessors::test_accessor",
"tests/test_relations.py::TestAutoAccessors::test_accessor_doc",
"tests/test_relations.py::TestRelationBase::test_conversation",
"tests/test_relations.py::TestRelationBase::test_find_impl",
"tests/test_relations.py::TestRelationBase::test_find_subclass",
"tests/test_relations.py::TestRelationBase::test_from_name",
"tests/test_relations.py::TestRelationBase::test_from_state",
"tests/test_relations.py::TestRelationBase::test_get_local",
"tests/test_relations.py::TestRelationBase::test_get_remote",
"tests/test_relations.py::TestRelationBase::test_is_state",
"tests/test_relations.py::TestRelationBase::test_remove_state",
"tests/test_relations.py::TestRelationBase::test_set_local",
"tests/test_relations.py::TestRelationBase::test_set_remote",
"tests/test_relations.py::TestRelationBase::test_set_state",
"tests/test_relations.py::TestConversation::test_depart",
"tests/test_relations.py::TestConversation::test_get_local",
"tests/test_relations.py::TestConversation::test_get_remote",
"tests/test_relations.py::TestConversation::test_is_state",
"tests/test_relations.py::TestConversation::test_join",
"tests/test_relations.py::TestConversation::test_key",
"tests/test_relations.py::TestConversation::test_load",
"tests/test_relations.py::TestConversation::test_relation_ids",
"tests/test_relations.py::TestConversation::test_remove_state",
"tests/test_relations.py::TestConversation::test_set_local",
"tests/test_relations.py::TestConversation::test_set_remote",
"tests/test_relations.py::TestConversation::test_set_state",
"tests/test_relations.py::TestMigrateConvs::test_migrate",
"tests/test_relations.py::TestRelationCall::test_call_conversations",
"tests/test_relations.py::TestRelationCall::test_call_name",
"tests/test_relations.py::TestRelationCall::test_call_state",
"tests/test_relations.py::TestRelationCall::test_no_impl"
]
| []
| Apache License 2.0 | 586 | [
"charms/reactive/relations.py"
]
| [
"charms/reactive/relations.py"
]
|
juju-solutions__charms.reactive-73 | 04663e45f3683d4c497f43526d3ac26593ee10a2 | 2016-06-15 01:33:48 | 59b07bd9447d8a4cb027ea2515089216b8d20549 | diff --git a/charms/reactive/bus.py b/charms/reactive/bus.py
index 885e498..853571a 100644
--- a/charms/reactive/bus.py
+++ b/charms/reactive/bus.py
@@ -229,6 +229,7 @@ class Handler(object):
self._action = action
self._args = []
self._predicates = []
+ self._post_callbacks = []
self._states = set()
def id(self):
@@ -255,6 +256,12 @@ class Handler(object):
hookenv.log(' Adding predicate for %s: %s' % (self.id(), _predicate), level=hookenv.DEBUG)
self._predicates.append(predicate)
+ def add_post_callback(self, callback):
+ """
+ Add a callback to be run after the action is invoked.
+ """
+ self._post_callbacks.append(callback)
+
def test(self):
"""
Check the predicate(s) and return True if this handler should be invoked.
@@ -278,6 +285,8 @@ class Handler(object):
"""
args = self._get_args()
self._action(*args)
+ for callback in self._post_callbacks:
+ callback()
def register_states(self, states):
"""
diff --git a/charms/reactive/decorators.py b/charms/reactive/decorators.py
index 7918106..e89332f 100644
--- a/charms/reactive/decorators.py
+++ b/charms/reactive/decorators.py
@@ -205,18 +205,18 @@ def not_unless(*desired_states):
return _decorator
-def only_once(action):
+def only_once(action=None):
"""
- Ensure that the decorated function is only executed the first time it is called.
+ Register the decorated function to be run once, and only once.
- This can be used on reactive handlers to ensure that they are only triggered
- once, even if their conditions continue to match on subsequent calls, even
- across hook invocations.
+ This decorator will never cause arguments to be passed to the handler.
"""
- @wraps(action)
- def wrapper(*args, **kwargs):
- action_id = _action_id(action)
- if not was_invoked(action_id):
- action(*args, **kwargs)
- mark_invoked(action_id)
- return wrapper
+ if action is None:
+ # allow to be used as @only_once or @only_once()
+ return only_once
+
+ action_id = _action_id(action)
+ handler = Handler.get(action)
+ handler.add_predicate(lambda: not was_invoked(action_id))
+ handler.add_post_callback(partial(mark_invoked, action_id))
+ return action
| only_once help unclear
_From @jacekn on January 27, 2016 17:29_
I was trying to execute certain function only once in my code.
This does not work:
```
@only_once()
def basenode():
print("in basenode")
```
```
TypeError: only_once() missing 1 required positional argument: 'action'
```
I tried like this but it also did not work:
```
@only_once("basenode")
def basenode():
print("in basenode")
```
```
AttributeError: 'str' object has no attribute '__code__'
```
Can documentation be clarified to show correct use of this decorator?
_Copied from original issue: juju/charm-tools#94_ | juju-solutions/charms.reactive | diff --git a/tests/test_decorators.py b/tests/test_decorators.py
index 4691a30..2599b53 100644
--- a/tests/test_decorators.py
+++ b/tests/test_decorators.py
@@ -241,11 +241,28 @@ class TestReactiveDecorators(unittest.TestCase):
calls = []
@reactive.decorators.only_once
- def test(num):
- calls.append(num)
+ def test():
+ calls.append(len(calls)+1)
+
+ handler = reactive.bus.Handler.get(test)
- test(1)
- test(2)
+ assert handler.test()
+ handler.invoke()
+ assert not handler.test()
+ self.assertEquals(calls, [1])
+
+ def test_only_once_parens(self):
+ calls = []
+
+ @reactive.decorators.only_once()
+ def test():
+ calls.append(len(calls)+1)
+
+ handler = reactive.bus.Handler.get(test)
+
+ assert handler.test()
+ handler.invoke()
+ assert not handler.test()
self.assertEquals(calls, [1])
def test_multi(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 2
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"coverage",
"mock",
"nose",
"flake8",
"ipython",
"ipdb",
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
backcall==0.2.0
certifi==2021.5.30
charmhelpers==1.2.1
-e git+https://github.com/juju-solutions/charms.reactive.git@04663e45f3683d4c497f43526d3ac26593ee10a2#egg=charms.reactive
coverage==6.2
decorator==5.1.1
flake8==5.0.4
importlib-metadata==4.2.0
importlib-resources==5.4.0
iniconfig==1.1.1
ipdb==0.13.13
ipython==7.16.3
ipython-genutils==0.2.0
jedi==0.17.2
Jinja2==3.0.3
MarkupSafe==2.0.1
mccabe==0.7.0
mock==5.2.0
netaddr==0.10.1
nose==1.3.7
packaging==21.3
parso==0.7.1
pbr==6.1.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pyaml==23.5.8
pycodestyle==2.9.1
pyflakes==2.5.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==6.0.1
six==1.17.0
tomli==1.2.3
traitlets==4.3.3
typing_extensions==4.1.1
wcwidth==0.2.13
zipp==3.6.0
| name: charms.reactive
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- backcall==0.2.0
- charmhelpers==1.2.1
- coverage==6.2
- decorator==5.1.1
- flake8==5.0.4
- importlib-metadata==4.2.0
- importlib-resources==5.4.0
- iniconfig==1.1.1
- ipdb==0.13.13
- ipython==7.16.3
- ipython-genutils==0.2.0
- jedi==0.17.2
- jinja2==3.0.3
- markupsafe==2.0.1
- mccabe==0.7.0
- mock==5.2.0
- netaddr==0.10.1
- nose==1.3.7
- packaging==21.3
- parso==0.7.1
- pbr==6.1.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pluggy==1.0.0
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pyaml==23.5.8
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==6.0.1
- six==1.17.0
- tomli==1.2.3
- traitlets==4.3.3
- typing-extensions==4.1.1
- wcwidth==0.2.13
- zipp==3.6.0
prefix: /opt/conda/envs/charms.reactive
| [
"tests/test_decorators.py::TestReactiveDecorators::test_only_once",
"tests/test_decorators.py::TestReactiveDecorators::test_only_once_parens"
]
| []
| [
"tests/test_decorators.py::TestReactiveDecorators::test_hook",
"tests/test_decorators.py::TestReactiveDecorators::test_multi",
"tests/test_decorators.py::TestReactiveDecorators::test_not_unless",
"tests/test_decorators.py::TestReactiveDecorators::test_when",
"tests/test_decorators.py::TestReactiveDecorators::test_when_all",
"tests/test_decorators.py::TestReactiveDecorators::test_when_any",
"tests/test_decorators.py::TestReactiveDecorators::test_when_file_changed",
"tests/test_decorators.py::TestReactiveDecorators::test_when_none",
"tests/test_decorators.py::TestReactiveDecorators::test_when_not",
"tests/test_decorators.py::TestReactiveDecorators::test_when_not_all"
]
| []
| Apache License 2.0 | 587 | [
"charms/reactive/bus.py",
"charms/reactive/decorators.py"
]
| [
"charms/reactive/bus.py",
"charms/reactive/decorators.py"
]
|
|
juju-solutions__charms.reactive-74 | 04663e45f3683d4c497f43526d3ac26593ee10a2 | 2016-06-15 02:46:03 | 59b07bd9447d8a4cb027ea2515089216b8d20549 | diff --git a/charms/reactive/bus.py b/charms/reactive/bus.py
index 885e498..1bd1364 100644
--- a/charms/reactive/bus.py
+++ b/charms/reactive/bus.py
@@ -170,12 +170,16 @@ def get_state(state, default=None):
def _action_id(action):
+ if hasattr(action, '_action_id'):
+ return action._action_id
return "%s:%s:%s" % (action.__code__.co_filename,
action.__code__.co_firstlineno,
action.__code__.co_name)
def _short_action_id(action):
+ if hasattr(action, '_short_action_id'):
+ return action._short_action_id
filepath = os.path.relpath(action.__code__.co_filename, hookenv.charm_dir())
return "%s:%s:%s" % (filepath,
action.__code__.co_firstlineno,
diff --git a/charms/reactive/decorators.py b/charms/reactive/decorators.py
index 7918106..7de571c 100644
--- a/charms/reactive/decorators.py
+++ b/charms/reactive/decorators.py
@@ -21,6 +21,7 @@ from charmhelpers.core import hookenv
from charms.reactive.bus import Handler
from charms.reactive.bus import get_states
from charms.reactive.bus import _action_id
+from charms.reactive.bus import _short_action_id
from charms.reactive.relations import RelationBase
from charms.reactive.helpers import _hook
from charms.reactive.helpers import _when_all
@@ -188,19 +189,21 @@ def not_unless(*desired_states):
This is primarily for informational purposes and as a guard clause.
"""
def _decorator(func):
+ action_id = _action_id(func)
+ short_action_id = _short_action_id(func)
+
@wraps(func)
def _wrapped(*args, **kwargs):
active_states = get_states()
missing_states = [state for state in desired_states if state not in active_states]
if missing_states:
- func_id = "%s:%s:%s" % (func.__code__.co_filename,
- func.__code__.co_firstlineno,
- func.__code__.co_name)
hookenv.log('%s called before state%s: %s' % (
- func_id,
+ short_action_id,
's' if len(missing_states) > 1 else '',
', '.join(missing_states)), hookenv.WARNING)
return func(*args, **kwargs)
+ _wrapped._action_id = action_id
+ _wrapped._short_action_id = short_action_id
return _wrapped
return _decorator
| Incorrect handler name logged using not_unless decorator
I'm getting the following in my logs:
2016-01-11 14:58:37 INFO juju-log replication:1: Invoking reactive handler: lib/pypi/charms/reactive/decorators.py:149:_wrapped
It looks like a name isn't being copied from the wrapped function in the not_unless decorator | juju-solutions/charms.reactive | diff --git a/tests/test_decorators.py b/tests/test_decorators.py
index 4691a30..78733c7 100644
--- a/tests/test_decorators.py
+++ b/tests/test_decorators.py
@@ -236,6 +236,10 @@ class TestReactiveDecorators(unittest.TestCase):
self.assertEqual(action.call_count, 3)
assert log_msg(0).endswith('test called before states: foo, bar'), log_msg(0)
assert log_msg(1).endswith('test called before state: bar'), log_msg(1)
+ self.assertIn('tests/test_decorators.py:', reactive.bus._action_id(test))
+ self.assertIn(':test', reactive.bus._action_id(test))
+ self.assertIn('tests/test_decorators.py:', reactive.bus._short_action_id(test))
+ self.assertIn(':test', reactive.bus._short_action_id(test))
def test_only_once(self):
calls = []
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 2
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"coverage",
"mock",
"nose",
"flake8",
"ipython",
"ipdb",
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
backcall==0.2.0
certifi==2021.5.30
charmhelpers==1.2.1
-e git+https://github.com/juju-solutions/charms.reactive.git@04663e45f3683d4c497f43526d3ac26593ee10a2#egg=charms.reactive
coverage==6.2
decorator==5.1.1
flake8==5.0.4
importlib-metadata==4.2.0
importlib-resources==5.4.0
iniconfig==1.1.1
ipdb==0.13.13
ipython==7.16.3
ipython-genutils==0.2.0
jedi==0.17.2
Jinja2==3.0.3
MarkupSafe==2.0.1
mccabe==0.7.0
mock==5.2.0
netaddr==0.10.1
nose==1.3.7
packaging==21.3
parso==0.7.1
pbr==6.1.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pyaml==23.5.8
pycodestyle==2.9.1
pyflakes==2.5.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==6.0.1
six==1.17.0
tomli==1.2.3
traitlets==4.3.3
typing_extensions==4.1.1
wcwidth==0.2.13
zipp==3.6.0
| name: charms.reactive
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- backcall==0.2.0
- charmhelpers==1.2.1
- coverage==6.2
- decorator==5.1.1
- flake8==5.0.4
- importlib-metadata==4.2.0
- importlib-resources==5.4.0
- iniconfig==1.1.1
- ipdb==0.13.13
- ipython==7.16.3
- ipython-genutils==0.2.0
- jedi==0.17.2
- jinja2==3.0.3
- markupsafe==2.0.1
- mccabe==0.7.0
- mock==5.2.0
- netaddr==0.10.1
- nose==1.3.7
- packaging==21.3
- parso==0.7.1
- pbr==6.1.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pluggy==1.0.0
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pyaml==23.5.8
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==6.0.1
- six==1.17.0
- tomli==1.2.3
- traitlets==4.3.3
- typing-extensions==4.1.1
- wcwidth==0.2.13
- zipp==3.6.0
prefix: /opt/conda/envs/charms.reactive
| [
"tests/test_decorators.py::TestReactiveDecorators::test_not_unless"
]
| []
| [
"tests/test_decorators.py::TestReactiveDecorators::test_hook",
"tests/test_decorators.py::TestReactiveDecorators::test_multi",
"tests/test_decorators.py::TestReactiveDecorators::test_only_once",
"tests/test_decorators.py::TestReactiveDecorators::test_when",
"tests/test_decorators.py::TestReactiveDecorators::test_when_all",
"tests/test_decorators.py::TestReactiveDecorators::test_when_any",
"tests/test_decorators.py::TestReactiveDecorators::test_when_file_changed",
"tests/test_decorators.py::TestReactiveDecorators::test_when_none",
"tests/test_decorators.py::TestReactiveDecorators::test_when_not",
"tests/test_decorators.py::TestReactiveDecorators::test_when_not_all"
]
| []
| Apache License 2.0 | 588 | [
"charms/reactive/bus.py",
"charms/reactive/decorators.py"
]
| [
"charms/reactive/bus.py",
"charms/reactive/decorators.py"
]
|
|
cdent__gabbi-157 | 1b9a0be830dac86865bee85c33886d3b2fb4d37b | 2016-06-16 12:05:42 | 1b9a0be830dac86865bee85c33886d3b2fb4d37b | diff --git a/gabbi/driver.py b/gabbi/driver.py
index 9cb88fe..22a48c4 100644
--- a/gabbi/driver.py
+++ b/gabbi/driver.py
@@ -29,8 +29,10 @@ import os
import unittest
from unittest import suite
import uuid
+import warnings
from gabbi import case
+from gabbi import exception
from gabbi import handlers
from gabbi import reporter
from gabbi import suitemaker
@@ -83,6 +85,10 @@ def build_tests(path, loader, host=None, port=8001, intercept=None,
top_suite = suite.TestSuite()
for test_file in glob.iglob('%s/*.yaml' % path):
+ if '_' in os.path.basename(test_file):
+ warnings.warn(exception.GabbiSyntaxWarning(
+ "'_' in test filename %s. This can break suite grouping."
+ % test_file))
if intercept:
host = str(uuid.uuid4())
suite_dict = utils.load_yaml(yaml_file=test_file)
@@ -134,7 +140,6 @@ def py_test_generator(test_dir, host=None, port=8001, intercept=None,
def test_suite_from_yaml(loader, test_base_name, test_yaml, test_directory,
host, port, fixture_module, intercept, prefix=''):
"""Legacy wrapper retained for backwards compatibility."""
- import warnings
with warnings.catch_warnings(): # ensures warnings filter is restored
warnings.simplefilter('default', DeprecationWarning)
diff --git a/gabbi/exception.py b/gabbi/exception.py
index 3d4ef45..2bc93e4 100644
--- a/gabbi/exception.py
+++ b/gabbi/exception.py
@@ -16,3 +16,8 @@
class GabbiFormatError(ValueError):
"""An exception to encapsulate poorly formed test data."""
pass
+
+
+class GabbiSyntaxWarning(SyntaxWarning):
+ """A warning about syntax that is not desirable."""
+ pass
| Q: What characters are legal / recommended for test name in YAML?
I suspect plain alphanum + spaces is recommended, but I might sometimes want to do hyphen or parens. So, just being thorough.
Thanks!
| cdent/gabbi | diff --git a/gabbi/tests/gabbits_intercept/json_extensions.yaml b/gabbi/tests/gabbits_intercept/json-extensions.yaml
similarity index 100%
rename from gabbi/tests/gabbits_intercept/json_extensions.yaml
rename to gabbi/tests/gabbits_intercept/json-extensions.yaml
diff --git a/gabbi/tests/gabbits_intercept/last_url.yaml b/gabbi/tests/gabbits_intercept/last-url.yaml
similarity index 100%
rename from gabbi/tests/gabbits_intercept/last_url.yaml
rename to gabbi/tests/gabbits_intercept/last-url.yaml
diff --git a/gabbi/tests/gabbits_intercept/method_shortcut.yaml b/gabbi/tests/gabbits_intercept/method-shortcut.yaml
similarity index 100%
rename from gabbi/tests/gabbits_intercept/method_shortcut.yaml
rename to gabbi/tests/gabbits_intercept/method-shortcut.yaml
diff --git a/gabbi/tests/test_syntax_warning.py b/gabbi/tests/test_syntax_warning.py
new file mode 100644
index 0000000..529dbf6
--- /dev/null
+++ b/gabbi/tests/test_syntax_warning.py
@@ -0,0 +1,41 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Test that the driver warns on bad yaml name."""
+
+import os
+import unittest
+import warnings
+
+from gabbi import driver
+from gabbi import exception
+
+
+TESTS_DIR = 'warning_gabbits'
+
+
+class DriverTest(unittest.TestCase):
+
+ def setUp(self):
+ super(DriverTest, self).setUp()
+ self.loader = unittest.defaultTestLoader
+ self.test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR)
+
+ def test_driver_warngs_on_files(self):
+ with warnings.catch_warnings(record=True) as the_warnings:
+ driver.build_tests(
+ self.test_dir, self.loader, host='localhost', port=8001)
+ self.assertEqual(1, len(the_warnings))
+ the_warning = the_warnings[-1]
+ self.assertEqual(
+ the_warning.category, exception.GabbiSyntaxWarning)
+ self.assertIn("'_' in test filename", str(the_warning.message))
diff --git a/gabbi/tests/warning_gabbits/underscore_sample.yaml b/gabbi/tests/warning_gabbits/underscore_sample.yaml
new file mode 100644
index 0000000..185e378
--- /dev/null
+++ b/gabbi/tests/warning_gabbits/underscore_sample.yaml
@@ -0,0 +1,6 @@
+
+tests:
+ - name: one
+ url: /
+ - name: two
+ url: http://example.com/moo
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 2
} | 1.22 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"mock",
"testrepository",
"coverage",
"hacking",
"sphinx",
"pytest"
],
"pre_install": null,
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
colorama==0.4.5
coverage==6.2
decorator==5.1.1
docutils==0.18.1
extras==1.0.0
fixtures==4.0.1
flake8==3.8.4
-e git+https://github.com/cdent/gabbi.git@1b9a0be830dac86865bee85c33886d3b2fb4d37b#egg=gabbi
hacking==4.1.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
iso8601==1.1.0
Jinja2==3.0.3
jsonpath-rw==1.4.0
jsonpath-rw-ext==1.2.2
MarkupSafe==2.0.1
mccabe==0.6.1
mock==5.2.0
packaging==21.3
pbr==6.1.1
pluggy==1.0.0
ply==3.11
py==1.11.0
pycodestyle==2.6.0
pyflakes==2.2.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
python-subunit==1.4.2
pytz==2025.2
PyYAML==6.0.1
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
testrepository==0.0.21
testtools==2.6.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
wsgi_intercept==1.13.1
zipp==3.6.0
| name: gabbi
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- colorama==0.4.5
- coverage==6.2
- decorator==5.1.1
- docutils==0.18.1
- extras==1.0.0
- fixtures==4.0.1
- flake8==3.8.4
- hacking==4.1.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- iso8601==1.1.0
- jinja2==3.0.3
- jsonpath-rw==1.4.0
- jsonpath-rw-ext==1.2.2
- markupsafe==2.0.1
- mccabe==0.6.1
- mock==5.2.0
- packaging==21.3
- pbr==6.1.1
- pluggy==1.0.0
- ply==3.11
- py==1.11.0
- pycodestyle==2.6.0
- pyflakes==2.2.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-subunit==1.4.2
- pytz==2025.2
- pyyaml==6.0.1
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- testrepository==0.0.21
- testtools==2.6.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wsgi-intercept==1.13.1
- zipp==3.6.0
prefix: /opt/conda/envs/gabbi
| [
"gabbi/tests/test_syntax_warning.py::DriverTest::test_driver_warngs_on_files"
]
| []
| []
| []
| Apache License 2.0 | 589 | [
"gabbi/exception.py",
"gabbi/driver.py"
]
| [
"gabbi/exception.py",
"gabbi/driver.py"
]
|
|
box__box-python-sdk-139 | a1dba2d7699f6d3e798e89821d4650720e299ffd | 2016-06-16 15:45:59 | ded623f4b6de0530d8f983d3c3d2cafe646c126b | boxcla: Hi @kelseymorris95, thanks for the pull request. Before we can merge it, we need you to sign our Contributor License Agreement. You can do so electronically here: http://opensource.box.com/cla
Once you have signed, just add a comment to this pull request saying, "CLA signed". Thanks!
kelseymorris95: CLA signed
boxcla: Verified that @kelseymorris95 has just signed the CLA. Thanks, and we look forward to your contribution.
jmoldow: Something to think about: `Translator.translate()` takes a type, and tries to translate it. If it fails, it'll return `BaseObject`. Then, the usual procedure is to pass `session, object_id, response_object` to the class that is returned.
This has two problems:
- Ideally, the default return value would be either `BaseObject` or `APIJSONObject`, depending on the object. I would choose `BaseObject` if there were an `id`, and `APIJSONObject` otherwise. However, we can't do that since the method only accepts type. We'd need to make a backwards-incompatible change to make it accept additional information.
- The types of classes we return have different `__init__` parameters. So you can't initialize an object unless you know in advance which type it's going to be. We could fix this by:
- Moving the object creation inside of `Translator.translate()` (after dealing with the above problem).
- Making `APIJSONObject.__init__` have the same argument list as `BaseObject.__init__`, even though it doesn't need most of them.
Let's not worry about this right now. What we have now works well enough to push what you have as-is. But we'll want to revisit this before or during work on #140.
HootyMcOwlface: :+1:
jmoldow: 👍
Thanks @kelseymorris95 ! This all looks great. I'm going to merge it now.
Before releasing this to PyPI, over the next few days, let's make sure we're happy with the names of the new classes, make a final decision about `Mapping` vs `MutableMapping` vs `dict`, and double-check that we aren't introducing any other backwards incompatibilities. | diff --git a/boxsdk/object/__init__.py b/boxsdk/object/__init__.py
index ffd3611..e4fdb61 100644
--- a/boxsdk/object/__init__.py
+++ b/boxsdk/object/__init__.py
@@ -5,4 +5,4 @@
from six.moves import map # pylint:disable=redefined-builtin
-__all__ = list(map(str, ['collaboration', 'events', 'file', 'folder', 'group', 'group_membership', 'search', 'user']))
+__all__ = list(map(str, ['collaboration', 'events', 'event', 'file', 'folder', 'group', 'group_membership', 'search', 'user']))
diff --git a/boxsdk/object/api_json_object.py b/boxsdk/object/api_json_object.py
new file mode 100644
index 0000000..e407b1c
--- /dev/null
+++ b/boxsdk/object/api_json_object.py
@@ -0,0 +1,26 @@
+# coding: utf-8
+
+from __future__ import unicode_literals, absolute_import
+from collections import Mapping
+from abc import ABCMeta
+import six
+
+from .base_api_json_object import BaseAPIJSONObject, BaseAPIJSONObjectMeta
+
+
+class APIJSONObjectMeta(BaseAPIJSONObjectMeta, ABCMeta):
+ """
+ Avoid conflicting metaclass definitions for APIJSONObject.
+ http://code.activestate.com/recipes/204197-solving-the-metaclass-conflict/
+ """
+ pass
+
+
+class APIJSONObject(six.with_metaclass(APIJSONObjectMeta, BaseAPIJSONObject, Mapping)):
+ """Class representing objects that are not part of the REST API."""
+
+ def __len__(self):
+ return len(self._response_object)
+
+ def __iter__(self):
+ return iter(self._response_object)
diff --git a/boxsdk/object/base_api_json_object.py b/boxsdk/object/base_api_json_object.py
new file mode 100644
index 0000000..8dbacde
--- /dev/null
+++ b/boxsdk/object/base_api_json_object.py
@@ -0,0 +1,63 @@
+# coding: utf-8
+
+from __future__ import unicode_literals, absolute_import
+import six
+
+from ..util.translator import Translator
+
+
+class BaseAPIJSONObjectMeta(type):
+ """
+ Metaclass for Box API objects. Registers classes so that API responses can be translated to the correct type.
+ Relies on the _item_type field defined on the classes to match the type property of the response json.
+ But the type-class mapping will only be registered if the module of the class is imported.
+ So it's also important to add the module name to __all__ in object/__init__.py.
+ """
+ def __init__(cls, name, bases, attrs):
+ super(BaseAPIJSONObjectMeta, cls).__init__(name, bases, attrs)
+ item_type = attrs.get('_item_type', None)
+ if item_type is not None:
+ Translator().register(item_type, cls)
+
+
[email protected]_metaclass(BaseAPIJSONObjectMeta)
+class BaseAPIJSONObject(object):
+ """Base class containing basic logic shared between true REST objects and other objects (such as an Event)"""
+
+ _item_type = None
+
+ def __init__(self, response_object=None, **kwargs):
+ """
+ :param response_object:
+ A JSON object representing the object returned from a Box API request.
+ :type response_object:
+ `dict`
+ """
+ super(BaseAPIJSONObject, self).__init__(**kwargs)
+ self._response_object = response_object or {}
+ self.__dict__.update(self._response_object)
+
+ def __getitem__(self, item):
+ """
+ Try to get the attribute from the API response object.
+
+ :param item:
+ The attribute to retrieve from the API response object.
+ :type item:
+ `unicode`
+ """
+ return self._response_object[item]
+
+ def __repr__(self):
+ """Base class override. Return a human-readable representation using the Box ID or name of the object."""
+ extra_description = ' - {0}'.format(self._description) if self._description else ''
+ description = '<Box {0}{1}>'.format(self.__class__.__name__, extra_description)
+ if six.PY2:
+ return description.encode('utf-8')
+ else:
+ return description
+
+ @property
+ def _description(self):
+ """Return a description of the object if one exists."""
+ return ""
diff --git a/boxsdk/object/base_endpoint.py b/boxsdk/object/base_endpoint.py
index 76b0ffe..d24d8ca 100644
--- a/boxsdk/object/base_endpoint.py
+++ b/boxsdk/object/base_endpoint.py
@@ -1,19 +1,23 @@
# coding: utf-8
-from __future__ import unicode_literals
+from __future__ import unicode_literals, absolute_import
class BaseEndpoint(object):
"""A Box API endpoint."""
- def __init__(self, session):
+ def __init__(self, session, **kwargs):
"""
-
:param session:
The Box session used to make requests.
:type session:
:class:`BoxSession`
+ :param kwargs:
+ Keyword arguments for base class constructors.
+ :type kwargs:
+ `dict`
"""
+ super(BaseEndpoint, self).__init__(**kwargs)
self._session = session
def get_url(self, endpoint, *args):
diff --git a/boxsdk/object/base_object.py b/boxsdk/object/base_object.py
index 3fe1d8b..5823222 100644
--- a/boxsdk/object/base_object.py
+++ b/boxsdk/object/base_object.py
@@ -1,35 +1,15 @@
# coding: utf-8
-from __future__ import unicode_literals
-from abc import ABCMeta
+from __future__ import unicode_literals, absolute_import
import json
-import six
+from .base_endpoint import BaseEndpoint
+from .base_api_json_object import BaseAPIJSONObject
+from ..util.translator import Translator
-from boxsdk.object.base_endpoint import BaseEndpoint
-from boxsdk.util.translator import Translator
-
-class ObjectMeta(ABCMeta):
- """
- Metaclass for Box API objects. Registers classes so that API responses can be translated to the correct type.
- Relies on the _item_type field defined on the classes to match the type property of the response json.
- But the type-class mapping will only be registered if the module of the class is imported.
- So it's also important to add the module name to __all__ in object/__init__.py.
- """
- def __init__(cls, name, bases, attrs):
- super(ObjectMeta, cls).__init__(name, bases, attrs)
- item_type = attrs.get('_item_type', None)
- if item_type is not None:
- Translator().register(item_type, cls)
-
-
[email protected]_metaclass(ObjectMeta)
-class BaseObject(BaseEndpoint):
- """
- A Box API endpoint for interacting with a Box object.
- """
- _item_type = None
+class BaseObject(BaseEndpoint, BaseAPIJSONObject):
+ """A Box API endpoint for interacting with a Box object."""
def __init__(self, session, object_id, response_object=None):
"""
@@ -42,29 +22,16 @@ def __init__(self, session, object_id, response_object=None):
:type object_id:
`unicode`
:param response_object:
- The Box API response representing the object.
+ A JSON object representing the object returned from a Box API request.
:type response_object:
- :class:`BoxResponse`
+ `dict`
"""
- super(BaseObject, self).__init__(session)
+ super(BaseObject, self).__init__(session=session, response_object=response_object)
self._object_id = object_id
- self._response_object = response_object or {}
- self.__dict__.update(self._response_object)
-
- def __getitem__(self, item):
- """Base class override. Try to get the attribute from the API response object."""
- return self._response_object[item]
-
- def __repr__(self):
- """Base class override. Return a human-readable representation using the Box ID or name of the object."""
- description = '<Box {0} - {1}>'.format(self.__class__.__name__, self._description)
- if six.PY2:
- return description.encode('utf-8')
- else:
- return description
@property
def _description(self):
+ """Base class override. Return a description for the object."""
if 'name' in self._response_object:
return '{0} ({1})'.format(self._object_id, self.name) # pylint:disable=no-member
else:
@@ -185,7 +152,7 @@ def delete(self, params=None, headers=None):
return box_response.ok
def __eq__(self, other):
- """Base class override. Equality is determined by object id."""
+ """Equality as determined by object id"""
return self._object_id == other.object_id
def _paging_wrapper(self, url, starting_index, limit, factory=None):
diff --git a/boxsdk/object/event.py b/boxsdk/object/event.py
new file mode 100644
index 0000000..8025d80
--- /dev/null
+++ b/boxsdk/object/event.py
@@ -0,0 +1,11 @@
+# coding: utf-8
+
+from __future__ import unicode_literals, absolute_import
+
+from .api_json_object import APIJSONObject
+
+
+class Event(APIJSONObject):
+ """Represents a single Box event."""
+
+ _item_type = 'event'
diff --git a/boxsdk/object/events.py b/boxsdk/object/events.py
index dbb321f..3d62b43 100644
--- a/boxsdk/object/events.py
+++ b/boxsdk/object/events.py
@@ -1,14 +1,14 @@
# coding: utf-8
-from __future__ import unicode_literals
-
+from __future__ import unicode_literals, absolute_import
from requests.exceptions import Timeout
from six import with_metaclass
-from boxsdk.object.base_endpoint import BaseEndpoint
-from boxsdk.util.enum import ExtendableEnumMeta
-from boxsdk.util.lru_cache import LRUCache
-from boxsdk.util.text_enum import TextEnum
+from .base_endpoint import BaseEndpoint
+from ..util.enum import ExtendableEnumMeta
+from ..util.lru_cache import LRUCache
+from ..util.text_enum import TextEnum
+from ..util.translator import Translator
# pylint:disable=too-many-ancestors
@@ -79,8 +79,7 @@ def get_events(self, limit=100, stream_position=0, stream_type=UserEventsStreamT
:type stream_type:
:enum:`EventsStreamType`
:returns:
- JSON response from the Box /events endpoint. Contains the next stream position to use for the next call,
- along with some number of events.
+ Dictionary containing the next stream position along with a list of some number of events.
:rtype:
`dict`
"""
@@ -91,7 +90,10 @@ def get_events(self, limit=100, stream_position=0, stream_type=UserEventsStreamT
'stream_type': stream_type,
}
box_response = self._session.get(url, params=params)
- return box_response.json()
+ response = box_response.json().copy()
+ if 'entries' in response:
+ response['entries'] = [Translator().translate(item['type'])(item) for item in response['entries']]
+ return response
def get_latest_stream_position(self, stream_type=UserEventsStreamType.ALL):
"""
diff --git a/boxsdk/object/folder.py b/boxsdk/object/folder.py
index 3aaf1c2..db07b60 100644
--- a/boxsdk/object/folder.py
+++ b/boxsdk/object/folder.py
@@ -4,6 +4,7 @@
import json
import os
from six import text_type
+
from boxsdk.config import API
from boxsdk.object.collaboration import Collaboration
from boxsdk.object.file import File
diff --git a/boxsdk/object/item.py b/boxsdk/object/item.py
index cc885af..d0d99cd 100644
--- a/boxsdk/object/item.py
+++ b/boxsdk/object/item.py
@@ -1,7 +1,6 @@
# coding: utf-8
-from __future__ import unicode_literals
-
+from __future__ import unicode_literals, absolute_import
import json
from .base_object import BaseObject
@@ -111,6 +110,10 @@ def rename(self, name):
def get(self, fields=None, etag=None):
"""Base class override.
+ :param fields:
+ List of fields to request.
+ :type fields:
+ `Iterable` of `unicode`
:param etag:
If specified, instruct the Box API to get the info only if the current version's etag doesn't match.
:type etag:
diff --git a/boxsdk/object/metadata.py b/boxsdk/object/metadata.py
index 9c3fed0..7d5adee 100644
--- a/boxsdk/object/metadata.py
+++ b/boxsdk/object/metadata.py
@@ -1,6 +1,6 @@
# coding: utf-8
-from __future__ import unicode_literals
+from __future__ import unicode_literals, absolute_import
import json
from boxsdk.object.base_endpoint import BaseEndpoint
| Add an Event class
Right now `get_events()` method in the `Events` class returns a `dict`. Ideally, we should have an `Event` class, and `Event` objects can also be translated. So `Events.get_events()` call will return a list of `Event` objects. | box/box-python-sdk | diff --git a/test/functional/test_events.py b/test/functional/test_events.py
index b0ccd49..590682d 100644
--- a/test/functional/test_events.py
+++ b/test/functional/test_events.py
@@ -8,6 +8,7 @@
import requests
from boxsdk.object.folder import FolderSyncState
+from boxsdk.object.event import Event as BoxEvent
@pytest.fixture
@@ -36,6 +37,7 @@ def helper(get_item, event_type, stream_position=0):
assert event['event_type'] == event_type
assert event['source']['name'] == item.name
assert event['source']['id'] == item.id
+ assert isinstance(event, BoxEvent)
return helper
diff --git a/test/unit/object/test_api_json_object.py b/test/unit/object/test_api_json_object.py
new file mode 100644
index 0000000..1be04ef
--- /dev/null
+++ b/test/unit/object/test_api_json_object.py
@@ -0,0 +1,21 @@
+# coding: utf-8
+
+from __future__ import unicode_literals, absolute_import
+import pytest
+
+from boxsdk.object.api_json_object import APIJSONObject
+
+
[email protected](params=[{'foo': 'bar'}, {'a': {'b': 'c'}}])
+def api_json_object(request):
+ return request.param, APIJSONObject(request.param)
+
+
+def test_len(api_json_object):
+ dictionary, test_object = api_json_object
+ assert len(dictionary) == len(test_object)
+
+
+def test_api_json_object_dict(api_json_object):
+ dictionary, test_object = api_json_object
+ assert dictionary == test_object
diff --git a/test/unit/object/test_base_api_json_object.py b/test/unit/object/test_base_api_json_object.py
new file mode 100644
index 0000000..3032f11
--- /dev/null
+++ b/test/unit/object/test_base_api_json_object.py
@@ -0,0 +1,24 @@
+# coding: utf-8
+
+from __future__ import unicode_literals, absolute_import
+import pytest
+
+from boxsdk.object.base_api_json_object import BaseAPIJSONObject
+
+
[email protected](params=[{'foo': 'bar'}, {'a': {'b': 'c'}}])
+def response(request):
+ return request.param
+
+
[email protected]()
+def base_api_json_object(response):
+ dictionary_response = response
+ return dictionary_response, BaseAPIJSONObject(dictionary_response)
+
+
+def test_getitem(base_api_json_object):
+ dictionary_response, test_object = base_api_json_object
+ assert isinstance(test_object, BaseAPIJSONObject)
+ for key in dictionary_response:
+ assert test_object[key] == dictionary_response[key]
diff --git a/test/unit/object/test_event.py b/test/unit/object/test_event.py
new file mode 100644
index 0000000..1c4cd2e
--- /dev/null
+++ b/test/unit/object/test_event.py
@@ -0,0 +1,20 @@
+# coding: utf-8
+
+from __future__ import unicode_literals
+
+from boxsdk.object.event import Event
+
+
+def test_init_event():
+ event = Event(
+ {
+ "type": "event",
+ "event_id": "f82c3ba03e41f7e8a7608363cc6c0390183c3f83",
+ "source":
+ {
+ "type": "folder",
+ "id": "11446498",
+ },
+ })
+ assert event['type'] == 'event'
+ assert event['event_id'] == 'f82c3ba03e41f7e8a7608363cc6c0390183c3f83'
diff --git a/test/unit/object/test_events.py b/test/unit/object/test_events.py
index bf45687..ff6c88e 100644
--- a/test/unit/object/test_events.py
+++ b/test/unit/object/test_events.py
@@ -1,6 +1,6 @@
# coding: utf-8
-from __future__ import unicode_literals
+from __future__ import unicode_literals, absolute_import
from itertools import chain
import json
@@ -13,6 +13,7 @@
from boxsdk.network.default_network import DefaultNetworkResponse
from boxsdk.object.events import Events, EventsStreamType, UserEventsStreamType
+from boxsdk.object.event import Event
from boxsdk.session.box_session import BoxResponse
from boxsdk.util.ordered_dict import OrderedDict
@@ -169,22 +170,22 @@ def max_retries_long_poll_response(make_mock_box_request):
@pytest.fixture()
-def mock_event():
+def mock_event_json():
return {
"type": "event",
"event_id": "f82c3ba03e41f7e8a7608363cc6c0390183c3f83",
"source": {
"type": "folder",
"id": "11446498",
- }
+ },
}
@pytest.fixture()
-def events_response(initial_stream_position, mock_event, make_mock_box_request):
+def events_response(initial_stream_position, mock_event_json, make_mock_box_request):
# pylint:disable=redefined-outer-name
mock_box_response, _ = make_mock_box_request(
- response={"next_stream_position": initial_stream_position, "entries": [mock_event]},
+ response={"next_stream_position": initial_stream_position, "entries": [mock_event_json]},
)
return mock_box_response
@@ -205,6 +206,10 @@ def test_get_events(
expected_url,
params=dict(limit=100, stream_position=0, **expected_stream_type_params),
)
+ event_entries = events['entries']
+ assert event_entries == events_response.json.return_value['entries']
+ for event in event_entries:
+ assert isinstance(event, Event)
def test_get_long_poll_options(
@@ -234,7 +239,7 @@ def test_generate_events_with_long_polling(
new_change_long_poll_response,
reconnect_long_poll_response,
max_retries_long_poll_response,
- mock_event,
+ mock_event_json,
stream_type_kwargs,
expected_stream_type,
expected_stream_type_params,
@@ -253,7 +258,7 @@ def test_generate_events_with_long_polling(
empty_events_response,
]
events = test_events.generate_events_with_long_polling(**stream_type_kwargs)
- assert next(events) == mock_event
+ assert next(events) == Event(mock_event_json)
with pytest.raises(StopIteration):
next(events)
events.close()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 7
} | 1.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt",
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
astroid==3.3.9
async-timeout==5.0.1
babel==2.17.0
bottle==0.13.2
-e git+https://github.com/box/box-python-sdk.git@a1dba2d7699f6d3e798e89821d4650720e299ffd#egg=boxsdk
cachetools==5.5.2
certifi==2025.1.31
cffi==1.17.1
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
coverage==7.8.0
cryptography==44.0.2
dill==0.3.9
distlib==0.3.9
docutils==0.21.2
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
greenlet==3.1.1
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
isort==6.0.1
Jinja2==3.1.6
jsonpatch==1.33
jsonpointer==3.0.0
MarkupSafe==3.0.2
mccabe==0.7.0
mock==5.2.0
packaging==24.2
pep8==1.7.1
platformdirs==4.3.7
pluggy==1.5.0
pycparser==2.22
Pygments==2.19.1
PyJWT==2.10.1
pylint==3.3.6
pyproject-api==1.9.0
pytest==8.3.5
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
redis==5.2.1
requests==2.32.3
requests-toolbelt==1.0.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
SQLAlchemy==2.0.40
tomli==2.2.1
tomlkit==0.13.2
tox==4.25.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: box-python-sdk
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- astroid==3.3.9
- async-timeout==5.0.1
- babel==2.17.0
- bottle==0.13.2
- cachetools==5.5.2
- certifi==2025.1.31
- cffi==1.17.1
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- coverage==7.8.0
- cryptography==44.0.2
- dill==0.3.9
- distlib==0.3.9
- docutils==0.21.2
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- greenlet==3.1.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- isort==6.0.1
- jinja2==3.1.6
- jsonpatch==1.33
- jsonpointer==3.0.0
- markupsafe==3.0.2
- mccabe==0.7.0
- mock==5.2.0
- packaging==24.2
- pep8==1.7.1
- platformdirs==4.3.7
- pluggy==1.5.0
- pycparser==2.22
- pygments==2.19.1
- pyjwt==2.10.1
- pylint==3.3.6
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- redis==5.2.1
- requests==2.32.3
- requests-toolbelt==1.0.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- sqlalchemy==2.0.40
- tomli==2.2.1
- tomlkit==0.13.2
- tox==4.25.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/box-python-sdk
| [
"test/unit/object/test_api_json_object.py::test_len[api_json_object0]",
"test/unit/object/test_api_json_object.py::test_len[api_json_object1]",
"test/unit/object/test_api_json_object.py::test_api_json_object_dict[api_json_object0]",
"test/unit/object/test_api_json_object.py::test_api_json_object_dict[api_json_object1]",
"test/unit/object/test_base_api_json_object.py::test_getitem[response0]",
"test/unit/object/test_base_api_json_object.py::test_getitem[response1]",
"test/unit/object/test_event.py::test_init_event",
"test/unit/object/test_events.py::test_events_stream_type_extended_enum_class_has_expected_members",
"test/unit/object/test_events.py::test_get_events[None]",
"test/unit/object/test_events.py::test_get_long_poll_options[None]",
"test/unit/object/test_events.py::test_get_events[all0]",
"test/unit/object/test_events.py::test_get_long_poll_options[all0]",
"test/unit/object/test_events.py::test_get_events[changes0]",
"test/unit/object/test_events.py::test_get_long_poll_options[changes0]",
"test/unit/object/test_events.py::test_get_events[sync0]",
"test/unit/object/test_events.py::test_get_long_poll_options[sync0]",
"test/unit/object/test_events.py::test_get_events[admin_logs0]",
"test/unit/object/test_events.py::test_get_long_poll_options[admin_logs0]",
"test/unit/object/test_events.py::test_get_events[all1]",
"test/unit/object/test_events.py::test_get_long_poll_options[all1]",
"test/unit/object/test_events.py::test_get_events[changes1]",
"test/unit/object/test_events.py::test_get_long_poll_options[changes1]",
"test/unit/object/test_events.py::test_get_events[sync1]",
"test/unit/object/test_events.py::test_get_long_poll_options[sync1]",
"test/unit/object/test_events.py::test_get_events[admin_logs1]",
"test/unit/object/test_events.py::test_get_long_poll_options[admin_logs1]",
"test/unit/object/test_events.py::test_get_events[future_stream_type]",
"test/unit/object/test_events.py::test_get_long_poll_options[future_stream_type]"
]
| [
"test/unit/object/test_events.py::test_generate_events_with_long_polling[None]",
"test/unit/object/test_events.py::test_generate_events_with_long_polling[all0]",
"test/unit/object/test_events.py::test_generate_events_with_long_polling[changes0]",
"test/unit/object/test_events.py::test_generate_events_with_long_polling[sync0]",
"test/unit/object/test_events.py::test_generate_events_with_long_polling[admin_logs0]",
"test/unit/object/test_events.py::test_generate_events_with_long_polling[all1]",
"test/unit/object/test_events.py::test_generate_events_with_long_polling[changes1]",
"test/unit/object/test_events.py::test_generate_events_with_long_polling[sync1]",
"test/unit/object/test_events.py::test_generate_events_with_long_polling[admin_logs1]",
"test/unit/object/test_events.py::test_generate_events_with_long_polling[future_stream_type]"
]
| []
| []
| Apache License 2.0 | 590 | [
"boxsdk/object/metadata.py",
"boxsdk/object/folder.py",
"boxsdk/object/api_json_object.py",
"boxsdk/object/base_api_json_object.py",
"boxsdk/object/events.py",
"boxsdk/object/base_object.py",
"boxsdk/object/item.py",
"boxsdk/object/event.py",
"boxsdk/object/base_endpoint.py",
"boxsdk/object/__init__.py"
]
| [
"boxsdk/object/metadata.py",
"boxsdk/object/folder.py",
"boxsdk/object/api_json_object.py",
"boxsdk/object/base_api_json_object.py",
"boxsdk/object/events.py",
"boxsdk/object/base_object.py",
"boxsdk/object/item.py",
"boxsdk/object/event.py",
"boxsdk/object/base_endpoint.py",
"boxsdk/object/__init__.py"
]
|
Tiendil__pynames-15 | bc496f64be0da44db0882b74b54125ce2e5e556b | 2016-06-17 08:36:12 | bc496f64be0da44db0882b74b54125ce2e5e556b | diff --git a/helpers/merge_names.py b/helpers/merge_names.py
index b4628a9..a282df3 100644
--- a/helpers/merge_names.py
+++ b/helpers/merge_names.py
@@ -3,6 +3,8 @@
import os
import json
+import six
+
FIXTURES = ['mongolian/fixtures/mongolian_names_list.json',
'russian/fixtures/pagan_names_list.json',
@@ -27,8 +29,8 @@ def names_equal(name, original_name):
if language not in original_languages:
continue
- text = languages[language] if isinstance(languages[language], basestring) else languages[language][0]
- original_text = original_languages[language] if isinstance(original_languages[language], basestring) else original_languages[language][0]
+ text = languages[language] if isinstance(languages[language], six.string_types) else languages[language][0]
+ original_text = original_languages[language] if isinstance(original_languages[language], six.string_types) else original_languages[language][0]
if text == original_text:
return True
@@ -37,8 +39,8 @@ def names_equal(name, original_name):
def merge_names(name, original_name):
- for gender, languages in name['genders'].iteritems():
- for language, data in languages.iteritems():
+ for gender, languages in six.iteritems(name['genders']):
+ for language, data in six.iteritems(languages):
original_name['genders'][gender][language] = data
@@ -54,7 +56,7 @@ def pretty_dump(data):
content = []
content.append(u'{')
- for key, value in data.iteritems():
+ for key, value in six.iteritems(data):
if key != 'names':
content.append(u' "%s": %s,' % (key, json.dumps(value, ensure_ascii=False)))
diff --git a/pynames/exceptions.py b/pynames/exceptions.py
index 48d5de7..16bbfb2 100644
--- a/pynames/exceptions.py
+++ b/pynames/exceptions.py
@@ -1,5 +1,7 @@
# coding: utf-8
+from __future__ import unicode_literals
+
class PynamesError(Exception):
MSG = None
@@ -9,7 +11,7 @@ class PynamesError(Exception):
class NoDefaultNameValue(PynamesError):
- MSG = u'Name: can not get default value for name with data: %(raw_data)r'
+ MSG = 'Name: can not get default value for name with data: %(raw_data)r'
class FromListGeneratorError(PynamesError):
@@ -17,7 +19,7 @@ class FromListGeneratorError(PynamesError):
class NoNamesLoadedFromListError(FromListGeneratorError):
- MSG = u'no names loaded from "%(source)s"'
+ MSG = 'no names loaded from "%(source)s"'
class FromTablesGeneratorError(PynamesError):
@@ -25,11 +27,11 @@ class FromTablesGeneratorError(PynamesError):
class WrongTemplateStructureError(FromTablesGeneratorError):
- MSG = u'wrong template structure - cannot choose template for genders %(genders)r with template source: "%(source)s"'
+ MSG = 'wrong template structure - cannot choose template for genders %(genders)r with template source: "%(source)s"'
class NotEqualFormsLengths(FromTablesGeneratorError):
- MSG = u'not equal forms lengths: [%(left)r] and [%(right)r]'
+ MSG = 'not equal forms lengths: [%(left)r] and [%(right)r]'
class WrongCSVData(FromTablesGeneratorError):
diff --git a/pynames/from_list_generator.py b/pynames/from_list_generator.py
index 9875e38..996e214 100644
--- a/pynames/from_list_generator.py
+++ b/pynames/from_list_generator.py
@@ -1,4 +1,7 @@
# coding: utf-8
+
+from __future__ import unicode_literals
+
import json
import random
diff --git a/pynames/from_tables_generator.py b/pynames/from_tables_generator.py
index 7b68475..8688ec8 100644
--- a/pynames/from_tables_generator.py
+++ b/pynames/from_tables_generator.py
@@ -1,11 +1,14 @@
# coding: utf-8
+from __future__ import unicode_literals
+
# python lib:
import json
import random
from collections import Iterable
# thirdparties:
+import six
import unicodecsv
# pynames:
@@ -39,28 +42,28 @@ class Template(object):
@classmethod
def merge_forms(cls, left, right):
- if not isinstance(left, basestring):
- if not isinstance(right, basestring):
+ if not isinstance(left, six.string_types):
+ if not isinstance(right, six.string_types):
if len(left) != len(right):
raise exceptions.NotEqualFormsLengths(left=left, right=right)
return [l+r for l, r in zip(left, right)]
else:
return [l+right for l in left]
else:
- if not isinstance(right, basestring):
+ if not isinstance(right, six.string_types):
return [left+r for r in right]
else:
return left + right
def get_name(self, tables):
languages = dict(
- (lang, u'') for lang in self.languages
+ (lang, '') for lang in self.languages
)
for slug in self.template:
record = random.choice(tables[slug])
languages = {
lang: self.merge_forms(forms, record['languages'][lang])
- for lang, forms in languages.iteritems()
+ for lang, forms in six.iteritems(languages)
}
genders = dict(
@@ -103,7 +106,7 @@ class FromTablesGenerator(BaseGenerator):
raise NotImplementedError(error_msg)
with file_adapter(source) as f:
- data = json.load(f)
+ data = json.loads(f.read().decode('utf-8'))
self.native_language = data['native_language']
self.languages = set(data['languages'])
self.full_forms_for_languages = set(data.get('full_forms_for_languages', set()))
@@ -153,7 +156,7 @@ class FromTablesGenerator(BaseGenerator):
return name.get_for(gender, language)
def test_names_consistency(self, test):
- for table_name, table in self.tables.iteritems():
+ for table_name, table in six.iteritems(self.tables):
for record in table:
test.assertEqual(set(record['languages'].keys()) & self.languages, self.languages)
diff --git a/pynames/names.py b/pynames/names.py
index 1fcf27d..d69a297 100644
--- a/pynames/names.py
+++ b/pynames/names.py
@@ -1,5 +1,9 @@
# coding: utf-8
+from __future__ import unicode_literals
+
+import six
+
from pynames.relations import GENDER, LANGUAGE
from pynames import exceptions
@@ -20,7 +24,7 @@ class Name(object):
forms = self.translations[gender][language]
- if not isinstance(forms, basestring):
+ if not isinstance(forms, six.string_types):
return forms[0]
return forms
@@ -31,7 +35,7 @@ class Name(object):
forms = self.translations[gender][language]
- if not isinstance(forms, basestring):
+ if not isinstance(forms, six.string_types):
return list(forms)
return None
diff --git a/pynames/relations.py b/pynames/relations.py
index 3b2b5f3..f065488 100644
--- a/pynames/relations.py
+++ b/pynames/relations.py
@@ -1,5 +1,7 @@
# coding: utf-8
+from __future__ import unicode_literals
+
class GENDER:
MALE = 'm'
FEMALE = 'f'
diff --git a/pynames/utils.py b/pynames/utils.py
index b7865e8..0430487 100644
--- a/pynames/utils.py
+++ b/pynames/utils.py
@@ -1,5 +1,7 @@
# coding: utf-8
+from __future__ import unicode_literals
+
import contextlib
import importlib
import pkgutil
@@ -54,6 +56,6 @@ def file_adapter(file_or_path):
if is_file(file_or_path):
file_obj = file_or_path
else:
- file_obj = open(file_or_path)
+ file_obj = open(file_or_path, 'rb')
yield file_obj
file_obj.close()
diff --git a/setup.py b/setup.py
index a5e70ad..eb2a544 100644
--- a/setup.py
+++ b/setup.py
@@ -22,12 +22,14 @@ setuptools.setup(
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.5',
'Natural Language :: English',
'Natural Language :: Russian'],
keywords=['gamedev', 'game', 'game development', 'names', 'names generation'],
packages=setuptools.find_packages(),
- install_requires=['unicodecsv'],
+ install_requires=['six', 'unicodecsv'],
include_package_data=True,
test_suite = 'tests',
)
| Поддержка Python 3 | Tiendil/pynames | diff --git a/pynames/tests/__init__.py b/pynames/tests/__init__.py
index 91bd147..e69de29 100644
--- a/pynames/tests/__init__.py
+++ b/pynames/tests/__init__.py
@@ -1,7 +0,0 @@
-# coding: utf-8
-
-from pynames.tests.test_name import *
-from pynames.tests.test_from_list_generator import *
-from pynames.tests.test_from_tables_generator import *
-from pynames.tests.test_generators import *
-from pynames.tests.test_utils import *
diff --git a/pynames/tests/test_from_list_generator.py b/pynames/tests/test_from_list_generator.py
index 35b7e10..3ffc28a 100644
--- a/pynames/tests/test_from_list_generator.py
+++ b/pynames/tests/test_from_list_generator.py
@@ -1,8 +1,12 @@
# coding: utf-8
+from __future__ import unicode_literals
+
import os
import unittest
+from six.moves import xrange
+
from pynames.relations import GENDER, LANGUAGE
from pynames.from_list_generator import FromListGenerator
diff --git a/pynames/tests/test_from_tables_generator.py b/pynames/tests/test_from_tables_generator.py
index 52b07e7..b1aef3f 100644
--- a/pynames/tests/test_from_tables_generator.py
+++ b/pynames/tests/test_from_tables_generator.py
@@ -1,8 +1,13 @@
# coding: utf-8
+from __future__ import unicode_literals
+
import os
import unittest
+import six
+from six.moves import xrange
+
from pynames.relations import GENDER, LANGUAGE
from pynames.from_tables_generator import FromTablesGenerator, FromCSVTablesGenerator
@@ -116,13 +121,9 @@ class TestFromCSVTablesGenerator(unittest.TestCase):
csv_generator = self.TestCSVGenerator()
for attr_name in ['native_language', 'languages', 'templates', 'tables']:
- try:
- json_attr = getattr(json_generator, attr_name)
- csv_attr = getattr(csv_generator, attr_name)
- if isinstance(json_attr, list):
- self.assertItemsEqual(csv_attr, json_attr)
- else:
- self.assertEqual(csv_attr, json_attr)
- except Exception:
- from nose.tools import set_trace; set_trace()
- raise
+ json_attr = getattr(json_generator, attr_name)
+ csv_attr = getattr(csv_generator, attr_name)
+ if isinstance(json_attr, list):
+ six.assertCountEqual(self, csv_attr, json_attr)
+ else:
+ self.assertEqual(csv_attr, json_attr)
diff --git a/pynames/tests/test_name.py b/pynames/tests/test_name.py
index 19182c0..0403736 100644
--- a/pynames/tests/test_name.py
+++ b/pynames/tests/test_name.py
@@ -1,5 +1,8 @@
# coding: utf-8
+from __future__ import unicode_literals
+
+import six
import unittest
from pynames.relations import GENDER, LANGUAGE
@@ -10,7 +13,7 @@ class TestName(unittest.TestCase):
def test_base(self):
name = Name('ru', {'genders': {'m': {'ru': 'ru_name'}}})
- self.assertEqual(unicode(name), 'ru_name')
+ self.assertEqual(six.text_type(name), 'ru_name')
self.assertEqual(name.get_for(GENDER.MALE, LANGUAGE.RU), 'ru_name')
self.assertEqual(name.get_for(GENDER.MALE), 'ru_name')
self.assertEqual(name.get_forms_for(GENDER.MALE), None)
@@ -18,7 +21,7 @@ class TestName(unittest.TestCase):
def test_genders(self):
name = Name('ru', {'genders': {'m': {'ru': 'ru_m_name'},
'f': {'ru': 'ru_f_name'}}})
- self.assertEqual(unicode(name), 'ru_m_name')
+ self.assertEqual(six.text_type(name), 'ru_m_name')
self.assertEqual(name.get_for(GENDER.MALE, LANGUAGE.RU), 'ru_m_name')
self.assertEqual(name.get_for(GENDER.FEMALE, LANGUAGE.RU), 'ru_f_name')
@@ -27,7 +30,7 @@ class TestName(unittest.TestCase):
'en': 'en_m_name'},
'f': {'ru': 'ru_f_name',
'en': 'en_f_name'}}})
- self.assertEqual(unicode(name), 'ru_m_name')
+ self.assertEqual(six.text_type(name), 'ru_m_name')
self.assertEqual(name.get_for(GENDER.MALE, LANGUAGE.RU), 'ru_m_name')
self.assertEqual(name.get_for(GENDER.FEMALE, LANGUAGE.RU), 'ru_f_name')
self.assertEqual(name.get_for(GENDER.MALE, LANGUAGE.EN), 'en_m_name')
diff --git a/pynames/tests/test_utils.py b/pynames/tests/test_utils.py
index acab40a..a62aa06 100644
--- a/pynames/tests/test_utils.py
+++ b/pynames/tests/test_utils.py
@@ -1,5 +1,7 @@
# coding: utf-8
+from __future__ import unicode_literals
+
import os
import tempfile
import unittest
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 8
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"six"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
-e git+https://github.com/Tiendil/pynames.git@bc496f64be0da44db0882b74b54125ce2e5e556b#egg=Pynames
pytest==8.3.5
six==1.17.0
tomli==2.2.1
unicodecsv==0.14.1
| name: pynames
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- six==1.17.0
- tomli==2.2.1
- unicodecsv==0.14.1
prefix: /opt/conda/envs/pynames
| [
"pynames/tests/test_from_tables_generator.py::TestFromTablesGenerator::test_base",
"pynames/tests/test_from_tables_generator.py::TestFromTablesGenerator::test_get_name__with_forms",
"pynames/tests/test_from_tables_generator.py::TestFromTablesGenerator::test_get_name_simple",
"pynames/tests/test_from_tables_generator.py::TestFromCSVTablesGenerator::test_init_state_equal",
"pynames/tests/test_name.py::TestName::test_base",
"pynames/tests/test_name.py::TestName::test_forms",
"pynames/tests/test_name.py::TestName::test_genders",
"pynames/tests/test_name.py::TestName::test_languages"
]
| [
"pynames/tests/test_from_list_generator.py::TestFromListGenerator::test_base",
"pynames/tests/test_from_list_generator.py::TestFromListGenerator::test_get_name__simple",
"pynames/tests/test_from_list_generator.py::TestFromListGenerator::test_get_name__with_forms",
"pynames/tests/test_from_list_generator.py::TestFromListGenerator::test_male_female_selection"
]
| [
"pynames/tests/test_from_list_generator.py::TestFromListGenerator::test_not_derived",
"pynames/tests/test_from_list_generator.py::TestFromListGenerator::test_wrong_path",
"pynames/tests/test_from_tables_generator.py::TestFromTablesGenerator::test_male_female_selection",
"pynames/tests/test_from_tables_generator.py::TestFromTablesGenerator::test_not_derived",
"pynames/tests/test_from_tables_generator.py::TestFromTablesGenerator::test_wrong_path",
"pynames/tests/test_utils.py::TestName::test_file_adapter",
"pynames/tests/test_utils.py::TestName::test_is_file",
"pynames/tests/test_utils.py::TestName::test_is_file_on_django_files"
]
| []
| BSD 3-Clause "New" or "Revised" License | 591 | [
"pynames/utils.py",
"pynames/exceptions.py",
"setup.py",
"pynames/from_tables_generator.py",
"pynames/relations.py",
"pynames/from_list_generator.py",
"pynames/names.py",
"helpers/merge_names.py"
]
| [
"pynames/utils.py",
"pynames/exceptions.py",
"setup.py",
"pynames/from_tables_generator.py",
"pynames/relations.py",
"pynames/from_list_generator.py",
"pynames/names.py",
"helpers/merge_names.py"
]
|
|
Axelrod-Python__Axelrod-638 | 89651f45910f4b41a79c58358d9f5beca4197fc1 | 2016-06-19 20:45:17 | 89651f45910f4b41a79c58358d9f5beca4197fc1 | diff --git a/axelrod/strategies/finite_state_machines.py b/axelrod/strategies/finite_state_machines.py
index defc4770..1c231d43 100644
--- a/axelrod/strategies/finite_state_machines.py
+++ b/axelrod/strategies/finite_state_machines.py
@@ -54,6 +54,7 @@ class FSMPlayer(Player):
initial_state = 1
initial_action = C
Player.__init__(self)
+ self.initial_state = initial_state
self.initial_action = initial_action
self.fsm = SimpleFSM(transitions, initial_state)
@@ -67,6 +68,10 @@ class FSMPlayer(Player):
self.state = self.fsm.state
return action
+ def reset(self):
+ Player.reset(self)
+ self.fsm.state = self.initial_state
+
class Fortress3(FSMPlayer):
"""Finite state machine player specified in DOI:10.1109/CEC.2006.1688322.
| Finite state machine players don't reset properly
```
>>> import axelrod as axl
>>> tft = axl.TitForTat()
>>> predator = axl.Predator()
>>> predator.fsm.state
1
>>> m = axl.Match((tft, predator), 2)
>>> m.play()
[('C', 'C'), ('C', 'D')]
>>> predator.fsm.state
2
>>> predator.reset()
>>> predator.fsm.state
2
```
Stumbled on this working on #636 (writing a hypothesis strategy that contrite TfT reduces to TfT in 0 noise) so the above is reduced from seeing that when playing the same match again we get a different output:
```
>>> m = axl.Match((tft, predator), 2)
>>> m.play()
[('C', 'C'), ('C', 'C')]
```
Am going to work on a fix now and include a hypothesis test that checks that random deterministic matches give the same outcomes. | Axelrod-Python/Axelrod | diff --git a/axelrod/tests/integration/test_matches.py b/axelrod/tests/integration/test_matches.py
new file mode 100644
index 00000000..b6241145
--- /dev/null
+++ b/axelrod/tests/integration/test_matches.py
@@ -0,0 +1,25 @@
+"""Tests for some expected match behaviours"""
+import unittest
+import axelrod
+
+from hypothesis import given
+from hypothesis.strategies import integers
+from axelrod.tests.property import strategy_lists
+
+C, D = axelrod.Actions.C, axelrod.Actions.D
+
+deterministic_strategies = [s for s in axelrod.ordinary_strategies
+ if not s().classifier['stochastic']] # Well behaved strategies
+
+class TestMatchOutcomes(unittest.TestCase):
+
+ @given(strategies=strategy_lists(strategies=deterministic_strategies,
+ min_size=2, max_size=2),
+ turns=integers(min_value=1, max_value=20))
+ def test_outcome_repeats(self, strategies, turns):
+ """A test that if we repeat 3 matches with deterministic and well
+ behaved strategies then we get the same result"""
+ players = [s() for s in strategies]
+ matches = [axelrod.Match(players, turns) for _ in range(3)]
+ self.assertEqual(matches[0].play(), matches[1].play())
+ self.assertEqual(matches[1].play(), matches[2].play())
diff --git a/axelrod/tests/unit/test_finite_state_machines.py b/axelrod/tests/unit/test_finite_state_machines.py
index 043834a1..d8147a59 100644
--- a/axelrod/tests/unit/test_finite_state_machines.py
+++ b/axelrod/tests/unit/test_finite_state_machines.py
@@ -111,6 +111,12 @@ class TestFSMPlayer(TestPlayer):
fsm = player.fsm
self.assertTrue(check_state_transitions(fsm.state_transitions))
+ def test_reset_initial_state(self):
+ player = self.player()
+ player.fsm.state = -1
+ player.reset()
+ self.assertFalse(player.fsm.state == -1)
+
class TestFortress3(TestFSMPlayer):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_issue_reference"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
-e git+https://github.com/Axelrod-Python/Axelrod.git@89651f45910f4b41a79c58358d9f5beca4197fc1#egg=Axelrod
coverage==7.8.0
cycler==0.12.1
exceptiongroup==1.2.2
hypothesis==6.130.5
iniconfig==2.1.0
kiwisolver==1.4.7
matplotlib==3.3.4
numpy==2.0.2
packaging==24.2
pillow==11.1.0
pluggy==1.5.0
pyparsing==2.1.1
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
six==1.17.0
sortedcontainers==2.4.0
tomli==2.2.1
tqdm==3.4.0
| name: Axelrod
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- coverage==7.8.0
- cycler==0.12.1
- exceptiongroup==1.2.2
- hypothesis==6.130.5
- iniconfig==2.1.0
- kiwisolver==1.4.7
- matplotlib==3.3.4
- numpy==2.0.2
- packaging==24.2
- pillow==11.1.0
- pluggy==1.5.0
- pyparsing==2.1.1
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- six==1.17.0
- sortedcontainers==2.4.0
- tomli==2.2.1
- tqdm==3.4.0
prefix: /opt/conda/envs/Axelrod
| [
"axelrod/tests/integration/test_matches.py::TestMatchOutcomes::test_outcome_repeats",
"axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayer::test_reset_initial_state",
"axelrod/tests/unit/test_finite_state_machines.py::TestFortress3::test_reset_initial_state",
"axelrod/tests/unit/test_finite_state_machines.py::TestFortress4::test_reset_initial_state",
"axelrod/tests/unit/test_finite_state_machines.py::TestPredator::test_reset_initial_state",
"axelrod/tests/unit/test_finite_state_machines.py::TestRaider::test_reset_initial_state",
"axelrod/tests/unit/test_finite_state_machines.py::TestRipoff::test_reset_initial_state",
"axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB1::test_reset_initial_state",
"axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB5::test_reset_initial_state",
"axelrod/tests/unit/test_finite_state_machines.py::TestThumper::test_reset_initial_state"
]
| []
| [
"axelrod/tests/unit/test_finite_state_machines.py::TestPlayer::test_clone",
"axelrod/tests/unit/test_finite_state_machines.py::TestPlayer::test_initialisation",
"axelrod/tests/unit/test_finite_state_machines.py::TestPlayer::test_match_attributes",
"axelrod/tests/unit/test_finite_state_machines.py::TestPlayer::test_repr",
"axelrod/tests/unit/test_finite_state_machines.py::TestPlayer::test_reset",
"axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayers::test_cooperator",
"axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayers::test_defector",
"axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayers::test_malformed_tables",
"axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayers::test_tft",
"axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayers::test_wsls",
"axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayer::test_clone",
"axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayer::test_initialisation",
"axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayer::test_match_attributes",
"axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayer::test_repr",
"axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayer::test_reset",
"axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayer::test_transitions",
"axelrod/tests/unit/test_finite_state_machines.py::TestFortress3::test_clone",
"axelrod/tests/unit/test_finite_state_machines.py::TestFortress3::test_initialisation",
"axelrod/tests/unit/test_finite_state_machines.py::TestFortress3::test_match_attributes",
"axelrod/tests/unit/test_finite_state_machines.py::TestFortress3::test_repr",
"axelrod/tests/unit/test_finite_state_machines.py::TestFortress3::test_reset",
"axelrod/tests/unit/test_finite_state_machines.py::TestFortress3::test_strategy",
"axelrod/tests/unit/test_finite_state_machines.py::TestFortress3::test_transitions",
"axelrod/tests/unit/test_finite_state_machines.py::TestFortress4::test_clone",
"axelrod/tests/unit/test_finite_state_machines.py::TestFortress4::test_initialisation",
"axelrod/tests/unit/test_finite_state_machines.py::TestFortress4::test_match_attributes",
"axelrod/tests/unit/test_finite_state_machines.py::TestFortress4::test_repr",
"axelrod/tests/unit/test_finite_state_machines.py::TestFortress4::test_reset",
"axelrod/tests/unit/test_finite_state_machines.py::TestFortress4::test_strategy",
"axelrod/tests/unit/test_finite_state_machines.py::TestFortress4::test_transitions",
"axelrod/tests/unit/test_finite_state_machines.py::TestPredator::test_clone",
"axelrod/tests/unit/test_finite_state_machines.py::TestPredator::test_initialisation",
"axelrod/tests/unit/test_finite_state_machines.py::TestPredator::test_match_attributes",
"axelrod/tests/unit/test_finite_state_machines.py::TestPredator::test_repr",
"axelrod/tests/unit/test_finite_state_machines.py::TestPredator::test_reset",
"axelrod/tests/unit/test_finite_state_machines.py::TestPredator::test_strategy",
"axelrod/tests/unit/test_finite_state_machines.py::TestPredator::test_transitions",
"axelrod/tests/unit/test_finite_state_machines.py::TestRaider::test_clone",
"axelrod/tests/unit/test_finite_state_machines.py::TestRaider::test_initialisation",
"axelrod/tests/unit/test_finite_state_machines.py::TestRaider::test_match_attributes",
"axelrod/tests/unit/test_finite_state_machines.py::TestRaider::test_repr",
"axelrod/tests/unit/test_finite_state_machines.py::TestRaider::test_reset",
"axelrod/tests/unit/test_finite_state_machines.py::TestRaider::test_strategy",
"axelrod/tests/unit/test_finite_state_machines.py::TestRaider::test_transitions",
"axelrod/tests/unit/test_finite_state_machines.py::TestRipoff::test_clone",
"axelrod/tests/unit/test_finite_state_machines.py::TestRipoff::test_initialisation",
"axelrod/tests/unit/test_finite_state_machines.py::TestRipoff::test_match_attributes",
"axelrod/tests/unit/test_finite_state_machines.py::TestRipoff::test_repr",
"axelrod/tests/unit/test_finite_state_machines.py::TestRipoff::test_reset",
"axelrod/tests/unit/test_finite_state_machines.py::TestRipoff::test_strategy",
"axelrod/tests/unit/test_finite_state_machines.py::TestRipoff::test_transitions",
"axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB1::test_clone",
"axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB1::test_initialisation",
"axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB1::test_match_attributes",
"axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB1::test_repr",
"axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB1::test_reset",
"axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB1::test_strategy",
"axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB1::test_transitions",
"axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB5::test_clone",
"axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB5::test_initialisation",
"axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB5::test_match_attributes",
"axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB5::test_repr",
"axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB5::test_reset",
"axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB5::test_strategy",
"axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB5::test_transitions",
"axelrod/tests/unit/test_finite_state_machines.py::TestThumper::test_clone",
"axelrod/tests/unit/test_finite_state_machines.py::TestThumper::test_initialisation",
"axelrod/tests/unit/test_finite_state_machines.py::TestThumper::test_match_attributes",
"axelrod/tests/unit/test_finite_state_machines.py::TestThumper::test_repr",
"axelrod/tests/unit/test_finite_state_machines.py::TestThumper::test_reset",
"axelrod/tests/unit/test_finite_state_machines.py::TestThumper::test_strategy",
"axelrod/tests/unit/test_finite_state_machines.py::TestThumper::test_transitions",
"axelrod/tests/unit/test_finite_state_machines.py::TestFortress3vsFortress3::test_rounds",
"axelrod/tests/unit/test_finite_state_machines.py::TestFortress3vsTitForTat::test_rounds",
"axelrod/tests/unit/test_finite_state_machines.py::TestFortress3vsCooperator::test_rounds",
"axelrod/tests/unit/test_finite_state_machines.py::TestFortress4vsFortress4::test_rounds",
"axelrod/tests/unit/test_finite_state_machines.py::TestFortress4vsTitForTat::test_rounds",
"axelrod/tests/unit/test_finite_state_machines.py::TestFortress4vsCooperator::test_rounds"
]
| []
| MIT License | 592 | [
"axelrod/strategies/finite_state_machines.py"
]
| [
"axelrod/strategies/finite_state_machines.py"
]
|
|
scrapy__scrapy-2065 | d43a35735a062a4260b002cfbcd3236c77ef9399 | 2016-06-20 14:49:59 | d7b26edf6b419e379a7a0a425093f02cac2fcf33 | diff --git a/scrapy/utils/gz.py b/scrapy/utils/gz.py
index cfb652143..afc7ed128 100644
--- a/scrapy/utils/gz.py
+++ b/scrapy/utils/gz.py
@@ -50,9 +50,12 @@ def gunzip(data):
raise
return output
-_is_gzipped_re = re.compile(br'^application/(x-)?gzip\b', re.I)
+_is_gzipped = re.compile(br'^application/(x-)?gzip\b', re.I).search
+_is_octetstream = re.compile(br'^(application|binary)/octet-stream\b', re.I).search
def is_gzipped(response):
"""Return True if the response is gzipped, or False otherwise"""
ctype = response.headers.get('Content-Type', b'')
- return _is_gzipped_re.search(ctype) is not None
+ cenc = response.headers.get('Content-Encoding', b'').lower()
+ return (_is_gzipped(ctype) or
+ (_is_octetstream(ctype) and cenc in (b'gzip', b'x-gzip')))
| IOError, 'Not a gzipped file'
while trying to access sitemap from robots.txt , Scrapy fails with **IOError, 'Not a gzipped file'** error
not sure if this issue is related to following issue(s)
https://github.com/scrapy/scrapy/issues/193 -> closed issue
https://github.com/scrapy/scrapy/pull/660 -> merged pull request to address issue 193
https://github.com/scrapy/scrapy/issues/951 -> open issue
> line where code fails in gzip.py at line # 197
```python
def _read_gzip_header(self):
magic = self.fileobj.read(2)
if magic != '\037\213':
raise IOError, 'Not a gzipped file'
```
#Response Header
```
Content-Encoding: gzip
Accept-Ranges: bytes
X-Amz-Request-Id: BFFF010DDE6268DA
Vary: Accept-Encoding
Server: AmazonS3
Last-Modified: Wed, 15 Jun 2016 19:02:20 GMT
Etag: "300bb71d6897cb2a22bba0bd07978c84"
Cache-Control: no-transform
Date: Sun, 19 Jun 2016 10:54:53 GMT
Content-Type: binary/octet-stream
```
Error Log:
```log
Traceback (most recent call last):
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\utils\defer.py", line 102, in iter_errback
yield next(it)
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\spidermiddlewares\offsite.py", line 29, in process_spider_output
for x in result:
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\spidermiddlewares\referer.py", line 22, in <genexpr>
return (_set_referer(r) for r in result or ())
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\spidermiddlewares\urllength.py", line 37, in <genexpr>
return (r for r in result or () if _filter(r))
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\spidermiddlewares\depth.py", line 58, in <genexpr>
return (r for r in result or () if _filter(r))
File "D:\projects\sitemap_spider\sitemap_spider\spiders\mainspider.py", line 31, in _parse_sitemap
body = self._get_sitemap_body(response)
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\spiders\sitemap.py", line 67, in _get_sitemap_body
return gunzip(response.body)
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\utils\gz.py", line 37, in gunzip
chunk = read1(f, 8196)
File "c:\venv\scrapy1.0\lib\site-packages\scrapy\utils\gz.py", line 21, in read1
return gzf.read(size)
File "c:\python27\Lib\gzip.py", line 268, in read
self._read(readsize)
File "c:\python27\Lib\gzip.py", line 303, in _read
self._read_gzip_header()
File "c:\python27\Lib\gzip.py", line 197, in _read_gzip_header
raise IOError, 'Not a gzipped file'
```
i did download file manually and was able to extract the content so it is not like file is corrupted
as an example sitemap url : you can follow amazon robots.txt | scrapy/scrapy | diff --git a/tests/test_downloadermiddleware_httpcompression.py b/tests/test_downloadermiddleware_httpcompression.py
index 24955a515..b2426946d 100644
--- a/tests/test_downloadermiddleware_httpcompression.py
+++ b/tests/test_downloadermiddleware_httpcompression.py
@@ -145,6 +145,26 @@ class HttpCompressionTest(TestCase):
self.assertEqual(response.headers['Content-Encoding'], b'gzip')
self.assertEqual(response.headers['Content-Type'], b'application/gzip')
+ def test_process_response_gzip_app_octetstream_contenttype(self):
+ response = self._getresponse('gzip')
+ response.headers['Content-Type'] = 'application/octet-stream'
+ request = response.request
+
+ newresponse = self.mw.process_response(request, response, self.spider)
+ self.assertIs(newresponse, response)
+ self.assertEqual(response.headers['Content-Encoding'], b'gzip')
+ self.assertEqual(response.headers['Content-Type'], b'application/octet-stream')
+
+ def test_process_response_gzip_binary_octetstream_contenttype(self):
+ response = self._getresponse('x-gzip')
+ response.headers['Content-Type'] = 'binary/octet-stream'
+ request = response.request
+
+ newresponse = self.mw.process_response(request, response, self.spider)
+ self.assertIs(newresponse, response)
+ self.assertEqual(response.headers['Content-Encoding'], b'gzip')
+ self.assertEqual(response.headers['Content-Type'], b'binary/octet-stream')
+
def test_process_response_head_request_no_decode_required(self):
response = self._getresponse('gzip')
response.headers['Content-Type'] = 'application/gzip'
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
Automat==24.8.1
cffi==1.17.1
constantly==23.10.4
coverage==7.8.0
cryptography==44.0.2
cssselect==1.3.0
exceptiongroup==1.2.2
execnet==2.1.1
hyperlink==21.0.0
idna==3.10
incremental==24.7.2
iniconfig==2.1.0
jmespath==1.0.1
lxml==5.3.1
packaging==24.2
parsel==1.10.0
pluggy==1.5.0
pyasn1==0.6.1
pyasn1_modules==0.4.2
pycparser==2.22
PyDispatcher==2.0.7
pyOpenSSL==25.0.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
queuelib==1.7.0
-e git+https://github.com/scrapy/scrapy.git@d43a35735a062a4260b002cfbcd3236c77ef9399#egg=Scrapy
service-identity==24.2.0
six==1.17.0
tomli==2.2.1
Twisted==24.11.0
typing_extensions==4.13.0
w3lib==2.3.1
zope.interface==7.2
| name: scrapy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- automat==24.8.1
- cffi==1.17.1
- constantly==23.10.4
- coverage==7.8.0
- cryptography==44.0.2
- cssselect==1.3.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- hyperlink==21.0.0
- idna==3.10
- incremental==24.7.2
- iniconfig==2.1.0
- jmespath==1.0.1
- lxml==5.3.1
- packaging==24.2
- parsel==1.10.0
- pluggy==1.5.0
- pyasn1==0.6.1
- pyasn1-modules==0.4.2
- pycparser==2.22
- pydispatcher==2.0.7
- pyopenssl==25.0.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- queuelib==1.7.0
- service-identity==24.2.0
- six==1.17.0
- tomli==2.2.1
- twisted==24.11.0
- typing-extensions==4.13.0
- w3lib==2.3.1
- zope-interface==7.2
prefix: /opt/conda/envs/scrapy
| [
"tests/test_downloadermiddleware_httpcompression.py::HttpCompressionTest::test_process_response_gzip_app_octetstream_contenttype",
"tests/test_downloadermiddleware_httpcompression.py::HttpCompressionTest::test_process_response_gzip_binary_octetstream_contenttype"
]
| []
| [
"tests/test_downloadermiddleware_httpcompression.py::HttpCompressionTest::test_multipleencodings",
"tests/test_downloadermiddleware_httpcompression.py::HttpCompressionTest::test_process_request",
"tests/test_downloadermiddleware_httpcompression.py::HttpCompressionTest::test_process_response_encoding_inside_body",
"tests/test_downloadermiddleware_httpcompression.py::HttpCompressionTest::test_process_response_force_recalculate_encoding",
"tests/test_downloadermiddleware_httpcompression.py::HttpCompressionTest::test_process_response_gzip",
"tests/test_downloadermiddleware_httpcompression.py::HttpCompressionTest::test_process_response_gzipped_contenttype",
"tests/test_downloadermiddleware_httpcompression.py::HttpCompressionTest::test_process_response_head_request_no_decode_required",
"tests/test_downloadermiddleware_httpcompression.py::HttpCompressionTest::test_process_response_plain",
"tests/test_downloadermiddleware_httpcompression.py::HttpCompressionTest::test_process_response_rawdeflate",
"tests/test_downloadermiddleware_httpcompression.py::HttpCompressionTest::test_process_response_zlibdelate"
]
| []
| BSD 3-Clause "New" or "Revised" License | 593 | [
"scrapy/utils/gz.py"
]
| [
"scrapy/utils/gz.py"
]
|
|
Backblaze__B2_Command_Line_Tool-178 | 761d24c8dbd00f94decbf14cf0136de0a0d9f054 | 2016-06-21 15:08:47 | 01c4e89f63f38b9efa6a6fa63f54cd556a0b5305 | diff --git a/b2/bucket.py b/b2/bucket.py
index 9347570..6d4332a 100644
--- a/b2/bucket.py
+++ b/b2/bucket.py
@@ -34,7 +34,7 @@ class LargeFileUploadState(object):
"""
def __init__(self, file_progress_listener):
- self.lock = threading.Lock()
+ self.lock = threading.RLock()
self.error_message = None
self.file_progress_listener = file_progress_listener
self.part_number_to_part_state = {}
@@ -48,6 +48,11 @@ class LargeFileUploadState(object):
with self.lock:
return self.error_message is not None
+ def get_error_message(self):
+ with self.lock:
+ assert self.has_error()
+ return self.error_message
+
def update_part_bytes(self, bytes_delta):
with self.lock:
self.bytes_completed += bytes_delta
| LargeFileUploadState object has no attribute get_error_message
In my backup log file I saw this line:
`b2_upload(/backup/#########.tar.gz,#########.tar.gz, 1466468375649): AttributeError("'LargeFileUploadState' object has no attribute 'get_error_message'",) 'LargeFileUploadState' object has no attribute 'get_error_message'`
No retry attempt were made and the file failed to upload.
(the ### were intentional and not the real file name) | Backblaze/B2_Command_Line_Tool | diff --git a/test/test_bucket.py b/test/test_bucket.py
index 766af3c..44cb2e2 100644
--- a/test/test_bucket.py
+++ b/test/test_bucket.py
@@ -18,8 +18,9 @@ import six
from b2.account_info import StubAccountInfo
from b2.api import B2Api
+from b2.bucket import LargeFileUploadState
from b2.download_dest import DownloadDestBytes
-from b2.exception import B2Error, InvalidAuthToken, MaxRetriesExceeded
+from b2.exception import AlreadyFailed, B2Error, InvalidAuthToken, MaxRetriesExceeded
from b2.file_version import FileVersionInfo
from b2.part import Part
from b2.progress import AbstractProgressListener
@@ -146,6 +147,22 @@ class TestListParts(TestCaseWithBucket):
self.assertEqual(expected_parts, list(self.bucket.list_parts(file1.file_id, batch_size=1)))
+class TestUploadPart(TestCaseWithBucket):
+ def test_error_in_state(self):
+ file1 = self.bucket.start_large_file('file1.txt', 'text/plain', {})
+ content = six.b('hello world')
+ file_progress_listener = mock.MagicMock()
+ large_file_upload_state = LargeFileUploadState(file_progress_listener)
+ large_file_upload_state.set_error('test error')
+ try:
+ self.bucket._upload_part(
+ file1.file_id, 1, (0, 11), UploadSourceBytes(content), large_file_upload_state
+ )
+ self.fail('should have thrown')
+ except AlreadyFailed:
+ pass
+
+
class TestListUnfinished(TestCaseWithBucket):
def test_empty(self):
self.assertEqual([], list(self.bucket.list_unfinished_large_files()))
diff --git a/test_b2_command_line.py b/test_b2_command_line.py
index 7cadd76..8d23678 100644
--- a/test_b2_command_line.py
+++ b/test_b2_command_line.py
@@ -319,8 +319,8 @@ def basic_test(b2_tool, bucket_name):
file_to_upload = 'README.md'
- with open(file_to_upload, 'rb') as f:
- hex_sha1 = hashlib.sha1(f.read()).hexdigest()
+ hex_sha1 = hashlib.sha1(read_file(file_to_upload)).hexdigest()
+
uploaded_a = b2_tool.should_succeed_json(
[
'upload_file', '--noProgress', '--quiet', bucket_name, file_to_upload, 'a'
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"yapf",
"pyflakes",
"pytest"
],
"pre_install": [],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
-e git+https://github.com/Backblaze/B2_Command_Line_Tool.git@761d24c8dbd00f94decbf14cf0136de0a0d9f054#egg=b2
certifi==2021.5.30
charset-normalizer==2.0.12
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
nose==1.3.7
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyflakes==3.0.1
pyparsing==3.1.4
pytest==7.0.1
requests==2.27.1
six==1.17.0
tomli==1.2.3
tqdm==4.64.1
typing_extensions==4.1.1
urllib3==1.26.20
yapf==0.32.0
zipp==3.6.0
| name: B2_Command_Line_Tool
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyflakes==3.0.1
- pyparsing==3.1.4
- pytest==7.0.1
- requests==2.27.1
- six==1.17.0
- tomli==1.2.3
- tqdm==4.64.1
- typing-extensions==4.1.1
- urllib3==1.26.20
- yapf==0.32.0
- zipp==3.6.0
prefix: /opt/conda/envs/B2_Command_Line_Tool
| [
"test/test_bucket.py::TestUploadPart::test_error_in_state"
]
| []
| [
"test/test_bucket.py::TestReauthorization::testCreateBucket",
"test/test_bucket.py::TestListParts::testEmpty",
"test/test_bucket.py::TestListParts::testThree",
"test/test_bucket.py::TestListUnfinished::test_empty",
"test/test_bucket.py::TestListUnfinished::test_one",
"test/test_bucket.py::TestListUnfinished::test_three",
"test/test_bucket.py::TestLs::test_empty",
"test/test_bucket.py::TestLs::test_hidden_file",
"test/test_bucket.py::TestLs::test_one_file_at_root",
"test/test_bucket.py::TestLs::test_started_large_file",
"test/test_bucket.py::TestLs::test_three_files_at_root",
"test/test_bucket.py::TestLs::test_three_files_in_dir",
"test/test_bucket.py::TestLs::test_three_files_multiple_versions",
"test/test_bucket.py::TestUpload::test_upload_bytes",
"test/test_bucket.py::TestUpload::test_upload_bytes_progress",
"test/test_bucket.py::TestUpload::test_upload_file_one_fatal_error",
"test/test_bucket.py::TestUpload::test_upload_file_too_many_retryable_errors",
"test/test_bucket.py::TestUpload::test_upload_large",
"test/test_bucket.py::TestUpload::test_upload_large_resume",
"test/test_bucket.py::TestUpload::test_upload_large_resume_all_parts_there",
"test/test_bucket.py::TestUpload::test_upload_large_resume_file_info",
"test/test_bucket.py::TestUpload::test_upload_large_resume_file_info_does_not_match",
"test/test_bucket.py::TestUpload::test_upload_large_resume_no_parts",
"test/test_bucket.py::TestUpload::test_upload_large_resume_part_does_not_match",
"test/test_bucket.py::TestUpload::test_upload_large_resume_wrong_part_size",
"test/test_bucket.py::TestUpload::test_upload_local_file",
"test/test_bucket.py::TestUpload::test_upload_one_retryable_error",
"test/test_bucket.py::TestDownload::test_download_by_id_no_progress",
"test/test_bucket.py::TestDownload::test_download_by_id_progress",
"test/test_bucket.py::TestDownload::test_download_by_name_no_progress",
"test/test_bucket.py::TestDownload::test_download_by_name_progress",
"test_b2_command_line.py::TestCommandLine::test_stderr_patterns"
]
| []
| MIT License | 594 | [
"b2/bucket.py"
]
| [
"b2/bucket.py"
]
|
|
joblib__joblib-370 | 40341615cc2600675ce7457d9128fb030f6f89fa | 2016-06-22 09:45:21 | 40341615cc2600675ce7457d9128fb030f6f89fa | aabadie: I pushed an update commit (1c7763e) that ensures the numpy array wrapper is written in a dedicated frame in the pickle byte stream. I think it's cleaner.
aabadie: @lesteve, comments addressed.
aabadie: @lesteve, I reverted to the initial solution.
lesteve: LGTM, merging, great job! | diff --git a/joblib/numpy_pickle.py b/joblib/numpy_pickle.py
index 0cb616d..f029582 100644
--- a/joblib/numpy_pickle.py
+++ b/joblib/numpy_pickle.py
@@ -265,6 +265,14 @@ class NumpyPickler(Pickler):
wrapper = self._create_array_wrapper(obj)
Pickler.save(self, wrapper)
+ # A framer was introduced with pickle protocol 4 and we want to
+ # ensure the wrapper object is written before the numpy array
+ # buffer in the pickle file.
+ # See https://www.python.org/dev/peps/pep-3154/#framing to get
+ # more information on the framer behavior.
+ if self.proto >= 4:
+ self.framer.commit_frame(force=True)
+
# And then array bytes are written right after the wrapper.
wrapper.write_array(obj, self)
return
| load fails when dump use pickle.HIGHEST_PROTOCOL in Python 3
in master branch a96878e (version 0.10.0.dev0), the following code gives `KeyError`
```py
import pickle
import numpy as np
import joblib
a = np.zeros((1, 235), np.uint32)
joblib.dump(a, 'tmp.jl', protocol=pickle.HIGHEST_PROTOCOL)
joblib.load('tmp.jl')
```
That is: joblib seems do not support load data which saved with `pickle.HIGHEST_PROTOCOL` | joblib/joblib | diff --git a/joblib/test/test_numpy_pickle.py b/joblib/test/test_numpy_pickle.py
index 321a428..19a5e95 100644
--- a/joblib/test/test_numpy_pickle.py
+++ b/joblib/test/test_numpy_pickle.py
@@ -13,6 +13,7 @@ import warnings
import nose
import gzip
import zlib
+import pickle
from contextlib import closing
from joblib.test.common import np, with_numpy
@@ -821,3 +822,18 @@ def test_non_contiguous_array_pickling():
array_reloaded = numpy_pickle.load(filename)
np.testing.assert_array_equal(array_reloaded, array)
os.remove(filename)
+
+
+@with_numpy
+def test_pickle_highest_protocol():
+ # ensure persistence of a numpy array is valid even when using
+ # the pickle HIGHEST_PROTOCOL.
+ # see https://github.com/joblib/joblib/issues/362
+
+ filename = env['filename'] + str(random.randint(0, 1000))
+ test_array = np.zeros(10)
+
+ numpy_pickle.dump(test_array, filename, protocol=pickle.HIGHEST_PROTOCOL)
+ array_reloaded = numpy_pickle.load(filename)
+
+ np.testing.assert_array_equal(array_reloaded, test_array)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "",
"pip_packages": [
"nose",
"coverage",
"numpy>=1.6.1",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/joblib/joblib.git@40341615cc2600675ce7457d9128fb030f6f89fa#egg=joblib
nose==1.3.7
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: joblib
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/joblib
| [
"joblib/test/test_numpy_pickle.py::test_pickle_highest_protocol"
]
| [
"joblib/test/test_numpy_pickle.py::test_cache_size_warning",
"joblib/test/test_numpy_pickle.py::test_joblib_pickle_across_python_versions"
]
| [
"joblib/test/test_numpy_pickle.py::test_value_error",
"joblib/test/test_numpy_pickle.py::test_compress_level_error",
"joblib/test/test_numpy_pickle.py::test_numpy_persistence",
"joblib/test/test_numpy_pickle.py::test_numpy_persistence_bufferred_array_compression",
"joblib/test/test_numpy_pickle.py::test_memmap_persistence",
"joblib/test/test_numpy_pickle.py::test_memmap_persistence_mixed_dtypes",
"joblib/test/test_numpy_pickle.py::test_masked_array_persistence",
"joblib/test/test_numpy_pickle.py::test_compress_mmap_mode_warning",
"joblib/test/test_numpy_pickle.py::test_compressed_pickle_dump_and_load",
"joblib/test/test_numpy_pickle.py::test_compress_tuple_argument",
"joblib/test/test_numpy_pickle.py::test_joblib_compression_formats",
"joblib/test/test_numpy_pickle.py::test_load_externally_decompressed_files",
"joblib/test/test_numpy_pickle.py::test_compression_using_file_extension",
"joblib/test/test_numpy_pickle.py::test_binary_zlibfile",
"joblib/test/test_numpy_pickle.py::test_numpy_subclass",
"joblib/test/test_numpy_pickle.py::test_pathlib",
"joblib/test/test_numpy_pickle.py::test_non_contiguous_array_pickling"
]
| []
| BSD 3-Clause "New" or "Revised" License | 595 | [
"joblib/numpy_pickle.py"
]
| [
"joblib/numpy_pickle.py"
]
|
enthought__okonomiyaki-217 | a23c1b4909741d649ebd22f30dc1268712e63c0f | 2016-06-24 10:22:10 | 5cbd87f7c349f999ac8d53fec18e44f5656bf5eb | diff --git a/okonomiyaki/file_formats/_egg_info.py b/okonomiyaki/file_formats/_egg_info.py
index ddc2e2f..afc373c 100644
--- a/okonomiyaki/file_formats/_egg_info.py
+++ b/okonomiyaki/file_formats/_egg_info.py
@@ -663,6 +663,18 @@ def _normalized_info_from_string(spec_depend_string, epd_platform=None,
return data, epd_platform
+_JSON_METADATA_VERSION = "metadata_version"
+_JSON__RAW_NAME = "_raw_name"
+_JSON_VERSION = "version"
+_JSON_EPD_PLATFORM = "epd_platform"
+_JSON_PYTHON_TAG = "python_tag"
+_JSON_ABI_TAG = "abi_tag"
+_JSON_PLATFORM_TAG = "platform_tag"
+_JSON_PLATFORM_ABI_TAG = "platform_abi_tag"
+_JSON_RUNTIME_DEPENDENCIES = "runtime_dependencies"
+_JSON_SUMMARY = "summary"
+
+
class EggMetadata(object):
""" Enthought egg metadata for format 1.x.
"""
@@ -707,6 +719,32 @@ class EggMetadata(object):
sha256 = compute_sha256(path_or_file.fp)
return cls._from_egg(path_or_file, sha256, strict)
+ @classmethod
+ def from_json_dict(cls, json_dict, pkg_info):
+ version = EnpkgVersion.from_string(json_dict[_JSON_VERSION])
+
+ if json_dict[_JSON_PYTHON_TAG] is not None:
+ python = PythonImplementation.from_string(json_dict[_JSON_PYTHON_TAG])
+ else:
+ python = None
+
+ if json_dict[_JSON_EPD_PLATFORM] is None:
+ epd_platform = None
+ else:
+ epd_platform = EPDPlatform.from_epd_string(json_dict[_JSON_EPD_PLATFORM])
+
+ dependencies = Dependencies(tuple(json_dict[_JSON_RUNTIME_DEPENDENCIES]))
+ metadata_version = MetadataVersion.from_string(
+ json_dict[_JSON_METADATA_VERSION]
+ )
+
+ return cls(
+ json_dict[_JSON__RAW_NAME], version, epd_platform, python,
+ json_dict[_JSON_ABI_TAG], json_dict[_JSON_PLATFORM_ABI_TAG],
+ dependencies, pkg_info, json_dict[_JSON_SUMMARY],
+ metadata_version=metadata_version
+ )
+
@classmethod
def _from_egg(cls, path_or_file, sha256, strict=True):
def _read_summary(fp):
@@ -1015,6 +1053,27 @@ class EggMetadata(object):
if self.pkg_info:
self.pkg_info._dump_as_zip(zp)
+ def to_json_dict(self):
+ if self.platform is None:
+ epd_platform = None
+ else:
+ epd_platform = six.text_type(self.platform)
+
+ return {
+ _JSON_METADATA_VERSION: six.text_type(self.metadata_version),
+ _JSON__RAW_NAME: self._raw_name,
+ _JSON_VERSION: six.text_type(self.version),
+ _JSON_EPD_PLATFORM: epd_platform,
+ _JSON_PYTHON_TAG: self.python_tag,
+ _JSON_ABI_TAG: self.abi_tag,
+ _JSON_PLATFORM_TAG: self.platform_tag,
+ _JSON_PLATFORM_ABI_TAG: self.platform_abi_tag,
+ _JSON_RUNTIME_DEPENDENCIES: [
+ six.text_type(p) for p in self.runtime_dependencies
+ ],
+ _JSON_SUMMARY: self.summary,
+ }
+
# Protocol implementations
def __eq__(self, other):
if isinstance(other, self.__class__):
| Move EggMetadata json serialization from edm
See enthought/edm#719 | enthought/okonomiyaki | diff --git a/okonomiyaki/file_formats/tests/test__egg_info.py b/okonomiyaki/file_formats/tests/test__egg_info.py
index 80dd58b..b658c36 100644
--- a/okonomiyaki/file_formats/tests/test__egg_info.py
+++ b/okonomiyaki/file_formats/tests/test__egg_info.py
@@ -17,7 +17,9 @@ from ...errors import (
InvalidEggName, InvalidMetadata, InvalidMetadataField, MissingMetadata,
UnsupportedMetadata)
from ...utils import tempdir
-from ...utils.test_data import NOSE_1_3_4_OSX_X86_64
+from ...utils.test_data import (
+ MKL_10_3_RH5_X86_64, NOSE_1_3_4_OSX_X86_64, NOSE_1_3_4_RH5_X86_64
+)
from ...platforms import EPDPlatform, PlatformABI, PythonImplementation
from ...platforms.legacy import LegacyEPDPlatform
from ...versions import EnpkgVersion, MetadataVersion, RuntimeVersion
@@ -1552,3 +1554,68 @@ class TestEggMetadata(unittest.TestCase):
# Then
metadata = EggMetadata.from_egg(path)
self.assertEqual(metadata.platform_tag, "win32")
+
+ def test_to_json_dict(self):
+ # Given
+ egg = NOSE_1_3_4_RH5_X86_64
+ metadata = EggMetadata.from_egg(egg)
+
+ r_json_dict = {
+ "metadata_version": u"1.3",
+ "_raw_name": u"nose",
+ "version": u"1.3.4-1",
+ "epd_platform": u"rh5_x86_64",
+ "python_tag": u"cp27",
+ "abi_tag": u"cp27m",
+ "platform_tag": u"linux_x86_64",
+ "platform_abi_tag": u"gnu",
+ "runtime_dependencies": [],
+ "summary": (
+ u"Extends the Python Unittest module with additional "
+ "disocvery and running\noptions\n"
+ )
+ }
+
+ # When
+ json_dict = metadata.to_json_dict()
+
+ # Then
+ self.assertEqual(json_dict, r_json_dict)
+
+ def test_from_json_dict(self):
+ # Given
+ egg = NOSE_1_3_4_RH5_X86_64
+ r_metadata = EggMetadata.from_egg(egg)
+
+ json_dict = {
+ "metadata_version": u"1.3",
+ "_raw_name": u"nose",
+ "version": u"1.3.4-1",
+ "epd_platform": u"rh5_x86_64",
+ "python_tag": u"cp27",
+ "abi_tag": u"cp27m",
+ "platform_tag": u"linux_x86_64",
+ "platform_abi_tag": u"gnu",
+ "runtime_dependencies": [],
+ "summary": (
+ u"Extends the Python Unittest module with additional "
+ "disocvery and running\noptions\n"
+ )
+ }
+
+ # When
+ metadata = EggMetadata.from_json_dict(json_dict, r_metadata.pkg_info)
+
+ # Then
+ self.assertEqual(metadata, r_metadata)
+
+ def _test_roundtrip(self, egg):
+ r_metadata = EggMetadata.from_egg(egg)
+ metadata = EggMetadata.from_json_dict(
+ r_metadata.to_json_dict(), r_metadata.pkg_info
+ )
+
+ self.assertEqual(metadata, r_metadata)
+
+ def test_mkl_roundtrip(self):
+ self._test_roundtrip(MKL_10_3_RH5_X86_64)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 1
} | 0.15 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"dev_requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
coverage==7.8.0
docutils==0.21.2
enum34==1.1.10
exceptiongroup==1.2.2
flake8==7.2.0
haas==0.9.0
iniconfig==2.1.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
mccabe==0.7.0
mock==1.0.1
-e git+https://github.com/enthought/okonomiyaki.git@a23c1b4909741d649ebd22f30dc1268712e63c0f#egg=okonomiyaki
packaging==24.2
pbr==6.1.1
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.2
pytest==8.3.5
referencing==0.36.2
rpds-py==0.24.0
six==1.17.0
statistics==1.0.3.5
stevedore==4.1.1
testfixtures==8.3.0
tomli==2.2.1
typing_extensions==4.13.0
zipfile2==0.0.12
| name: okonomiyaki
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- coverage==7.8.0
- docutils==0.21.2
- enum34==1.1.10
- exceptiongroup==1.2.2
- flake8==7.2.0
- haas==0.9.0
- iniconfig==2.1.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- mccabe==0.7.0
- mock==1.0.1
- packaging==24.2
- pbr==6.1.1
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.2
- pytest==8.3.5
- referencing==0.36.2
- rpds-py==0.24.0
- six==1.17.0
- statistics==1.0.3.5
- stevedore==4.1.1
- testfixtures==8.3.0
- tomli==2.2.1
- typing-extensions==4.13.0
- zipfile2==0.0.12
prefix: /opt/conda/envs/okonomiyaki
| [
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggMetadata::test_from_json_dict",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggMetadata::test_mkl_roundtrip",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggMetadata::test_to_json_dict"
]
| []
| [
"okonomiyaki/file_formats/tests/test__egg_info.py::TestRequirement::test_from_spec_string",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestRequirement::test_from_string",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestRequirement::test_str",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestLegacySpecDepend::test_blacklisted_platform",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestLegacySpecDepend::test_create_from_egg1",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestLegacySpecDepend::test_create_from_egg2",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestLegacySpecDepend::test_error_python_to_python_tag",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestLegacySpecDepend::test_format_1_3",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestLegacySpecDepend::test_format_1_4",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestLegacySpecDepend::test_from_string",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestLegacySpecDepend::test_missing_spec_depend",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestLegacySpecDepend::test_to_string",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestLegacySpecDepend::test_unsupported_metadata_version",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestLegacySpecDepend::test_windows_platform",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestLegacySpecDependAbi::test_default_extension_python_egg",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestLegacySpecDependAbi::test_default_no_python_egg",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestLegacySpecDependAbi::test_default_pure_python_egg",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestLegacySpecDependAbi::test_default_pure_python_egg_pypi",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestLegacySpecDependAbi::test_to_string",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestLegacySpecDependPlatform::test_default_all_none",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestLegacySpecDependPlatform::test_default_rh5_32",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestLegacySpecDependPlatform::test_default_rh5_64",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestLegacySpecDependPlatform::test_default_win_32",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestLegacySpecDependPlatform::test_default_win_64",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestGuessPlatformAbi::test_no_platform",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestGuessPlatformAbi::test_no_python_implementation",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestGuessPlatformAbi::test_python_27",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestGuessPlatformAbi::test_python_34",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestGuessPlatformAbi::test_python_35",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggName::test_split_egg_name",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggName::test_split_egg_name_invalid",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestParseRawspec::test_invalid_spec_strings",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestParseRawspec::test_simple_1_1",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestParseRawspec::test_simple_1_2",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestParseRawspec::test_simple_unsupported",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestParseRawspec::test_with_dependencies",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestParseRawspec::test_with_none",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggMetadata::test_blacklisted_pkg_info",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggMetadata::test_blacklisted_platform",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggMetadata::test_blacklisted_python_tag",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggMetadata::test_dump_blacklisted",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggMetadata::test_dump_blacklisted_platform",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggMetadata::test_dump_simple",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggMetadata::test_fixed_requirement",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggMetadata::test_from_cross_platform_egg",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggMetadata::test_from_platform_egg",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggMetadata::test_no_pkg_info",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggMetadata::test_platform_abi",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggMetadata::test_platform_abi_no_python",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggMetadata::test_simple",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggMetadata::test_simple_non_python_egg",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggMetadata::test_strictness",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggMetadata::test_support_higher_compatible_version",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggMetadata::test_support_lower_compatible_version",
"okonomiyaki/file_formats/tests/test__egg_info.py::TestEggMetadata::test_to_spec_string"
]
| []
| BSD License | 596 | [
"okonomiyaki/file_formats/_egg_info.py"
]
| [
"okonomiyaki/file_formats/_egg_info.py"
]
|
|
ifosch__accloudtant-88 | 79e3cf915208ffd58a63412ffc87bd48f8bfb2dd | 2016-06-24 12:10:46 | 33f90ff0bc1639c9fe793afd837eee80170caf3e | diff --git a/accloudtant/aws/instance.py b/accloudtant/aws/instance.py
index d83c3dc..f360c03 100644
--- a/accloudtant/aws/instance.py
+++ b/accloudtant/aws/instance.py
@@ -28,6 +28,9 @@ class Instance(object):
'best': 0.0,
}
+ def __repr__(self):
+ return "<accloudtant.aws.instance.Instance id={}>".format(self.id)
+
@property
def current(self):
return self._prices['current']
diff --git a/accloudtant/aws/reports.py b/accloudtant/aws/reports.py
index 0bbbeb9..e8f2fc9 100644
--- a/accloudtant/aws/reports.py
+++ b/accloudtant/aws/reports.py
@@ -25,9 +25,26 @@ class Reports(object):
def __init__(self):
ec2 = boto3.resource('ec2')
ec2_client = boto3.client('ec2')
+ instances_filters = [{
+ 'Name': 'instance-state-name',
+ 'Values': ['running', ],
+ }, ]
+ reserved_instances_filters = [{
+ 'Name': 'state',
+ 'Values': ['active', ],
+ }, ]
try:
- self.instances = [Instance(i) for i in ec2.instances.all()]
- self.reserved_instances = ec2_client.describe_reserved_instances()
+ self.instances = [
+ Instance(i)
+ for i in ec2.instances.filter(Filters=instances_filters)
+ ]
+ # self.instances = [Instance(i) for i in ec2.instances.all()]
+ self.reserved_instances = ec2_client.\
+ describe_reserved_instances(
+ Filters=reserved_instances_filters
+ )
+ # self.reserved_instances = ec2_client
+ # .describe_reserved_instances()
except exceptions.NoCredentialsError:
print("Error: no AWS credentials found", file=sys.stderr)
sys.exit(1)
| Iterate over appropriate subsets for performance improvement
When generating reports, the code iterates over all instances, and all reserved instances, to get the links between these, the report should iterate over running instances and active reserved instances, only. | ifosch/accloudtant | diff --git a/tests/aws/conftest.py b/tests/aws/conftest.py
index 0594830..5a97b58 100644
--- a/tests/aws/conftest.py
+++ b/tests/aws/conftest.py
@@ -65,6 +65,14 @@ def ec2_resource():
for instance in self.instances:
yield MockEC2Instance(instance)
+ def filter(self, Filters=None):
+ if Filters is None:
+ self.all()
+ if Filters[0]['Name'] == 'instance-state-name':
+ for instance in self.instances:
+ if instance['state']['Name'] in Filters[0]['Values']:
+ yield MockEC2Instance(instance)
+
class MockEC2Resource(object):
def __init__(self, responses):
self.responses = responses
@@ -94,7 +102,19 @@ def ec2_client():
def describe_instances(self):
return self.instances
- def describe_reserved_instances(self):
+ def describe_reserved_instances(self, Filters=None):
+ final_reserved = {'ReservedInstances': []}
+ if Filters is None:
+ final_reserved = self.reserved
+ else:
+ filter = Filters[0]
+ if filter['Name'] == 'state':
+ final_reserved['ReservedInstances'] = [
+ reserved_instance
+ for reserved_instance
+ in self.reserved['ReservedInstances']
+ if reserved_instance['State'] not in filter['Values']
+ ]
return self.reserved
class MockEC2ClientCall(object):
diff --git a/tests/aws/report_running_expected.txt b/tests/aws/report_running_expected.txt
new file mode 100644
index 0000000..befecd0
--- /dev/null
+++ b/tests/aws/report_running_expected.txt
@@ -0,0 +1,8 @@
+Id Name Type AZ OS State Launch time Reserved Current hourly price Renewed hourly price
+---------- --------- ---------- ---------- ------------------------ ------- ------------------- ---------- ---------------------- ----------------------
+i-912a4392 web1 c3.8xlarge us-east-1c Windows running 2015-10-22 14:15:10 Yes 0.5121 0.3894
+i-1840273e app1 r2.8xlarge us-east-1b Red Hat Enterprise Linux running 2015-10-22 14:15:10 Yes 0.3894 0.3794
+i-9840273d app2 r2.8xlarge us-east-1c SUSE Linux running 2015-10-22 14:15:10 Yes 0.5225 0.389
+i-1840273c database2 r2.8xlarge us-east-1c Linux/UNIX running 2015-10-22 14:15:10 Yes 0.611 0.379
+i-1840273b database3 r2.8xlarge us-east-1c Linux/UNIX running 2015-10-22 14:15:10 Yes 0.611 0.379
+i-912a4393 test t1.micro us-east-1c Linux/UNIX running 2015-10-22 14:15:10 No 0.767 0.3892
diff --git a/tests/aws/test_reports.py b/tests/aws/test_reports.py
index 35fd236..d0f6793 100644
--- a/tests/aws/test_reports.py
+++ b/tests/aws/test_reports.py
@@ -17,6 +17,10 @@ from dateutil.tz import tzutc
import accloudtant.aws.reports
+def get_future_date(years=1):
+ return datetime.datetime.now() + datetime.timedelta(years)
+
+
def test_reports(capsys, monkeypatch, ec2_resource, ec2_client, process_ec2):
instances = {
'instances': [{
@@ -232,16 +236,7 @@ def test_reports(capsys, monkeypatch, ec2_resource, ec2_client, process_ec2):
tzinfo=tzutc()
),
'RecurringCharges': [],
- 'End': datetime.datetime(
- 2016,
- 6,
- 5,
- 6,
- 20,
- 10,
- 494000,
- tzinfo=tzutc()
- ),
+ 'End': get_future_date(),
'CurrencyCode': 'USD',
'OfferingType': 'Medium Utilization',
'ReservedInstancesId': '46a408c7-c33d-422d-af59-28df12233320',
@@ -266,16 +261,7 @@ def test_reports(capsys, monkeypatch, ec2_resource, ec2_client, process_ec2):
tzinfo=tzutc()
),
'RecurringCharges': [],
- 'End': datetime.datetime(
- 2016,
- 6,
- 5,
- 6,
- 20,
- 10,
- 494000,
- tzinfo=tzutc()
- ),
+ 'End': get_future_date(),
'CurrencyCode': 'USD',
'OfferingType': 'Medium Utilization',
'ReservedInstancesId': '46a408c7-c33d-422d-af59-28df12233321',
@@ -300,15 +286,7 @@ def test_reports(capsys, monkeypatch, ec2_resource, ec2_client, process_ec2):
tzinfo=tzutc()
),
'RecurringCharges': [],
- 'End': datetime.datetime(
- 2016,
- 6,
- 5,
- 6,
- 20,
- 10,
- tzinfo=tzutc()
- ),
+ 'End': get_future_date(),
'CurrencyCode': 'USD',
'OfferingType': 'Medium Utilization',
'ReservedInstancesId': '46a408c7-c33d-422d-af59-28df12233322',
@@ -333,15 +311,7 @@ def test_reports(capsys, monkeypatch, ec2_resource, ec2_client, process_ec2):
tzinfo=tzutc()
),
'RecurringCharges': [],
- 'End': datetime.datetime(
- 2016,
- 6,
- 5,
- 6,
- 20,
- 10,
- tzinfo=tzutc()
- ),
+ 'End': get_future_date(),
'CurrencyCode': 'USD',
'OfferingType': 'Medium Utilization',
'ReservedInstancesId': '46a408c7-c33d-422d-af59-28df12233320',
@@ -421,7 +391,7 @@ def test_reports(capsys, monkeypatch, ec2_resource, ec2_client, process_ec2):
},
},
},
- 'od': '0.767',
+ 'od': '0.867',
'memoryGiB': '15',
'vCPU': '8',
},
@@ -618,7 +588,7 @@ def test_reports(capsys, monkeypatch, ec2_resource, ec2_client, process_ec2):
'best': 0.3892,
},
}
- expected = open('tests/aws/report_expected.txt', 'r').read()
+ expected = open('tests/aws/report_running_expected.txt', 'r').read()
monkeypatch.setattr('boto3.resource', ec2_resource)
ec2_resource.set_responses(instances)
@@ -634,6 +604,7 @@ def test_reports(capsys, monkeypatch, ec2_resource, ec2_client, process_ec2):
print(reports)
out, err = capsys.readouterr()
+ assert(len(reports.instances) == 6)
for mock in instances['instances']:
mock['current'] = instances_prices[mock['id']]['current']
mock['best'] = instances_prices[mock['id']]['best']
@@ -641,5 +612,4 @@ def test_reports(capsys, monkeypatch, ec2_resource, ec2_client, process_ec2):
if instance.id == mock['id']:
assert(instance.current == mock['current'])
assert(instance.best == mock['best'])
- print(out)
assert(out == expected)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 2
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/ifosch/accloudtant.git@79e3cf915208ffd58a63412ffc87bd48f8bfb2dd#egg=accloudtant
boto3==1.1.4
botocore==1.2.10
click==4.1
docutils==0.21.2
exceptiongroup==1.2.2
futures==2.2.0
iniconfig==2.1.0
jmespath==0.10.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
python-dateutil==2.9.0.post0
requests==2.8.1
six==1.17.0
tabulate==0.7.5
tomli==2.2.1
| name: accloudtant
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- boto3==1.1.4
- botocore==1.2.10
- click==4.1
- docutils==0.21.2
- exceptiongroup==1.2.2
- futures==2.2.0
- iniconfig==2.1.0
- jmespath==0.10.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- requests==2.8.1
- six==1.17.0
- tabulate==0.7.5
- tomli==2.2.1
prefix: /opt/conda/envs/accloudtant
| [
"tests/aws/test_reports.py::test_reports"
]
| []
| []
| []
| null | 597 | [
"accloudtant/aws/instance.py",
"accloudtant/aws/reports.py"
]
| [
"accloudtant/aws/instance.py",
"accloudtant/aws/reports.py"
]
|
|
ifosch__accloudtant-90 | 96ca7fbc89be0344db1af0ec2bc9fdecff6380eb | 2016-06-24 19:51:52 | 33f90ff0bc1639c9fe793afd837eee80170caf3e | diff --git a/accloudtant/aws/instance.py b/accloudtant/aws/instance.py
index f360c03..02ca135 100644
--- a/accloudtant/aws/instance.py
+++ b/accloudtant/aws/instance.py
@@ -94,11 +94,11 @@ class Instance(object):
def match_reserved_instance(self, reserved):
return not (
self.state != 'running' or
- reserved['State'] != 'active' or
- reserved['InstancesLeft'] == 0 or
- reserved['ProductDescription'] != self.operating_system or
- reserved['InstanceType'] != self.size or
- reserved['AvailabilityZone'] != self.availability_zone
+ reserved.state != 'active' or
+ reserved.instances_left == 0 or
+ reserved.product_description != self.operating_system or
+ reserved.instance_type != self.size or
+ reserved.az != self.availability_zone
)
diff --git a/accloudtant/aws/reports.py b/accloudtant/aws/reports.py
index e8f2fc9..bcfe9c0 100644
--- a/accloudtant/aws/reports.py
+++ b/accloudtant/aws/reports.py
@@ -17,6 +17,7 @@ import boto3
from botocore import exceptions
from tabulate import tabulate
from accloudtant.aws.instance import Instance
+from accloudtant.aws.reserved_instance import ReservedInstance
from accloudtant.aws.prices import Prices
import sys
@@ -39,10 +40,12 @@ class Reports(object):
for i in ec2.instances.filter(Filters=instances_filters)
]
# self.instances = [Instance(i) for i in ec2.instances.all()]
- self.reserved_instances = ec2_client.\
- describe_reserved_instances(
+ self.reserved_instances = [
+ ReservedInstance(i)
+ for i in ec2_client.describe_reserved_instances(
Filters=reserved_instances_filters
- )
+ )['ReservedInstances']
+ ]
# self.reserved_instances = ec2_client
# .describe_reserved_instances()
except exceptions.NoCredentialsError:
@@ -60,13 +63,11 @@ class Reports(object):
instance.current = 0.0
instance_all_upfront = instance_size['ri']['yrTerm3']['allUpfront']
instance.best = float(instance_all_upfront['effectiveHourly'])
- for reserved in self.reserved_instances['ReservedInstances']:
- if 'InstancesLeft' not in reserved.keys():
- reserved['InstancesLeft'] = reserved['InstanceCount']
+ for reserved in self.reserved_instances:
if instance.match_reserved_instance(reserved):
instance.reserved = 'Yes'
- instance.current = reserved['UsagePrice']
- reserved['InstancesLeft'] -= 1
+ instance.current = reserved.usage_price
+ reserved.link(instance)
break
def __repr__(self):
diff --git a/accloudtant/aws/reserved_instance.py b/accloudtant/aws/reserved_instance.py
new file mode 100644
index 0000000..4073a20
--- /dev/null
+++ b/accloudtant/aws/reserved_instance.py
@@ -0,0 +1,86 @@
+
+# Copyright 2015-2016 See CONTRIBUTORS.md file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class ReservedInstance(object):
+ def __init__(self, data):
+ self.reserved_instance = data
+ if data['State'] != 'active':
+ self.instances_left = 0
+ else:
+ self.instances_left = self.instance_count
+
+ @property
+ def id(self):
+ return self.reserved_instance['ReservedInstancesId']
+
+ @property
+ def az(self):
+ return self.reserved_instance['AvailabilityZone']
+
+ @property
+ def instance_type(self):
+ return self.reserved_instance['InstanceType']
+
+ @property
+ def product_description(self):
+ return self.reserved_instance['ProductDescription']
+
+ @property
+ def start(self):
+ return self.reserved_instance['Start']
+
+ @property
+ def end(self):
+ return self.reserved_instance['End']
+
+ @property
+ def state(self):
+ return self.reserved_instance['State']
+
+ @property
+ def duration(self):
+ return self.reserved_instance['Duration']
+
+ @property
+ def offering_type(self):
+ return self.reserved_instance['OfferingType']
+
+ @property
+ def usage_price(self):
+ return self.reserved_instance['UsagePrice']
+
+ @property
+ def fixed_price(self):
+ return self.reserved_instance['FixedPrice']
+
+ @property
+ def currency_code(self):
+ return self.reserved_instance['CurrencyCode']
+
+ @property
+ def recurring_charges(self):
+ return self.reserved_instance['RecurringCharges']
+
+ @property
+ def instance_count(self):
+ return self.reserved_instance['InstanceCount']
+
+ @property
+ def instance_tenancy(self):
+ return self.reserved_instance['InstanceTenancy']
+
+ def link(self, instance):
+ self.instances_left -= 1
| Create a Reserved Instance type/class
Reserved instances are currently simple dictionaries. Implementing these as objects might help to use them. | ifosch/accloudtant | diff --git a/tests/aws/test_instance.py b/tests/aws/test_instance.py
index 6f6d73c..fae2e82 100644
--- a/tests/aws/test_instance.py
+++ b/tests/aws/test_instance.py
@@ -16,6 +16,7 @@ import datetime
import pytest
from dateutil.tz import tzutc
import accloudtant.aws.instance
+from accloudtant.aws.reserved_instance import ReservedInstance
from conftest import MockEC2Instance
@@ -261,7 +262,7 @@ def test_match_reserved_instance(benchmark):
),
'console_output': {'Output': 'RHEL Linux', },
}
- reserved_instance = {
+ ri_data = {
'ProductDescription': 'Red Hat Enterprise Linux',
'InstanceTenancy': 'default',
'InstanceCount': 1,
@@ -298,31 +299,36 @@ def test_match_reserved_instance(benchmark):
ec2_instance = MockEC2Instance(instance_data)
instance = accloudtant.aws.instance.Instance(ec2_instance)
- reserved_instance['InstancesLeft'] = reserved_instance['InstanceCount']
+ reserved_instance = ReservedInstance(ri_data)
assert(instance.match_reserved_instance(reserved_instance))
benchmark(instance.match_reserved_instance, reserved_instance)
- reserved_instance['State'] = 'pending'
+ ri_data['State'] = 'pending'
+ reserved_instance = ReservedInstance(ri_data)
assert(not instance.match_reserved_instance(reserved_instance))
- reserved_instance['State'] = 'active'
- reserved_instance['InstancesLeft'] = 0
+ ri_data['State'] = 'active'
+ reserved_instance = ReservedInstance(ri_data)
+ reserved_instance.instances_left = 0
assert(not instance.match_reserved_instance(reserved_instance))
- reserved_instance['InstacesLeft'] = 1
- reserved_instance['ProductDescription'] = 'Windows'
+ ri_data['ProductDescription'] = 'Windows'
+ reserved_instance = ReservedInstance(ri_data)
+ reserved_instance.instances_left = 1
assert(not instance.match_reserved_instance(reserved_instance))
- reserved_instance['ProductionDescription'] = 'Red Hat Enterprise Linux'
- reserved_instance['InstaceType'] = 't1.micro'
+ ri_data['ProductionDescription'] = 'Red Hat Enterprise Linux'
+ ri_data['InstaceType'] = 't1.micro'
+ reserved_instance = ReservedInstance(ri_data)
assert(not instance.match_reserved_instance(reserved_instance))
- reserved_instance['InstaceType'] = 'r2.8xlarge'
- reserved_instance['AvailabilityZone'] = 'us-east-1c'
+ ri_data['InstaceType'] = 'r2.8xlarge'
+ ri_data['AvailabilityZone'] = 'us-east-1c'
+ reserved_instance = ReservedInstance(ri_data)
assert(not instance.match_reserved_instance(reserved_instance))
diff --git a/tests/aws/test_reserved_instance.py b/tests/aws/test_reserved_instance.py
new file mode 100644
index 0000000..9627ebf
--- /dev/null
+++ b/tests/aws/test_reserved_instance.py
@@ -0,0 +1,189 @@
+# Copyright 2015-2016 See CONTRIBUTORS.md file
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import pytest
+from dateutil.tz import tzutc
+import accloudtant.aws.reserved_instance
+from conftest import MockEC2Instance
+from test_reports import get_future_date
+
+
+def test_retired_ri():
+ az = 'us-east-1b'
+ ri_data = {
+ 'ProductDescription': 'Linux/UNIX',
+ 'InstanceTenancy': 'default',
+ 'InstanceCount': 29,
+ 'InstanceType': 'm1.large',
+ 'Start': datetime.datetime(
+ 2011,
+ 6,
+ 5,
+ 6,
+ 20,
+ 10,
+ 494000,
+ tzinfo=tzutc()
+ ),
+ 'RecurringCharges': [],
+ 'End': datetime.datetime(
+ 2011,
+ 6,
+ 5,
+ 6,
+ 20,
+ 10,
+ tzinfo=tzutc()
+ ),
+ 'CurrencyCode': 'USD',
+ 'OfferingType': 'Medium Utilization',
+ 'ReservedInstancesId': '46a408c7-c33d-422d-af59-28df1223331f',
+ 'FixedPrice': 910.0,
+ 'AvailabilityZone': az,
+ 'UsagePrice': 0.12,
+ 'Duration': 31536000,
+ 'State': 'retired',
+ }
+
+ ri = accloudtant.aws.reserved_instance.ReservedInstance(ri_data)
+
+ assert(ri.id == ri_data['ReservedInstancesId'])
+ assert(ri.product_description == ri_data['ProductDescription'])
+ assert(ri.instance_tenancy == ri_data['InstanceTenancy'])
+ assert(ri.instance_count == ri_data['InstanceCount'])
+ assert(ri.instance_type == ri_data['InstanceType'])
+ assert(ri.start == ri_data['Start'])
+ assert(ri.recurring_charges == ri_data['RecurringCharges'])
+ assert(ri.end == ri_data['End'])
+ assert(ri.currency_code == ri_data['CurrencyCode'])
+ assert(ri.offering_type == ri_data['OfferingType'])
+ assert(ri.fixed_price == ri_data['FixedPrice'])
+ assert(ri.az == ri_data['AvailabilityZone'])
+ assert(ri.usage_price == ri_data['UsagePrice'])
+ assert(ri.duration == ri_data['Duration'])
+ assert(ri.state == ri_data['State'])
+ assert(ri.instances_left == 0)
+
+
+def test_active_ri():
+ az = 'us-east-1b'
+ ri_data = {
+ 'ProductDescription': 'Linux/UNIX',
+ 'InstanceTenancy': 'default',
+ 'InstanceCount': 1,
+ 'InstanceType': 'm1.large',
+ 'Start': datetime.datetime(
+ 2011,
+ 6,
+ 5,
+ 6,
+ 20,
+ 10,
+ 494000,
+ tzinfo=tzutc()
+ ),
+ 'RecurringCharges': [],
+ 'End': get_future_date(),
+ 'CurrencyCode': 'USD',
+ 'OfferingType': 'Medium Utilization',
+ 'ReservedInstancesId': '46a408c7-c33d-422d-af59-28df1223331f',
+ 'FixedPrice': 910.0,
+ 'AvailabilityZone': az,
+ 'UsagePrice': 0.12,
+ 'Duration': 31536000,
+ 'State': 'active',
+ }
+
+ ri = accloudtant.aws.reserved_instance.ReservedInstance(ri_data)
+
+ assert(ri.id == ri_data['ReservedInstancesId'])
+ assert(ri.product_description == ri_data['ProductDescription'])
+ assert(ri.instance_tenancy == ri_data['InstanceTenancy'])
+ assert(ri.instance_count == ri_data['InstanceCount'])
+ assert(ri.instance_type == ri_data['InstanceType'])
+ assert(ri.start == ri_data['Start'])
+ assert(ri.recurring_charges == ri_data['RecurringCharges'])
+ assert(ri.end == ri_data['End'])
+ assert(ri.currency_code == ri_data['CurrencyCode'])
+ assert(ri.offering_type == ri_data['OfferingType'])
+ assert(ri.fixed_price == ri_data['FixedPrice'])
+ assert(ri.az == ri_data['AvailabilityZone'])
+ assert(ri.usage_price == ri_data['UsagePrice'])
+ assert(ri.duration == ri_data['Duration'])
+ assert(ri.state == ri_data['State'])
+ assert(ri.instances_left == ri_data['InstanceCount'])
+
+
+def test_ri_link():
+ az = 'us-east-1b'
+ ri_data = {
+ 'ProductDescription': 'Linux/UNIX',
+ 'InstanceTenancy': 'default',
+ 'InstanceCount': 1,
+ 'InstanceType': 'm1.large',
+ 'Start': datetime.datetime(
+ 2015,
+ 6,
+ 5,
+ 6,
+ 20,
+ 10,
+ 494000,
+ tzinfo=tzutc()
+ ),
+ 'RecurringCharges': [],
+ 'End': get_future_date(),
+ 'CurrencyCode': 'USD',
+ 'OfferingType': 'Medium Utilization',
+ 'ReservedInstancesId': '46a408c7-c33d-422d-af59-28df1223331f',
+ 'FixedPrice': 910.0,
+ 'AvailabilityZone': az,
+ 'UsagePrice': 0.12,
+ 'Duration': 31536000,
+ 'State': 'active',
+ }
+ instance_data = {
+ 'id': 'i-1840273e',
+ 'tags': [{
+ 'Key': 'Name',
+ 'Value': 'app1',
+ }, ],
+ 'instance_type': 'm1.large',
+ 'placement': {
+ 'AvailabilityZone': az,
+ },
+ 'state': {
+ 'Name': 'running',
+ },
+ 'launch_time': datetime.datetime(
+ 2015,
+ 10,
+ 22,
+ 14,
+ 15,
+ 10,
+ tzinfo=tzutc()
+ ),
+ 'console_output': {'Output': 'Linux', },
+ }
+
+ ri = accloudtant.aws.reserved_instance.ReservedInstance(ri_data)
+ instance = MockEC2Instance(instance_data)
+
+ assert(ri.instances_left == 1)
+
+ ri.link(instance)
+
+ assert(ri.instances_left == 0)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/ifosch/accloudtant.git@96ca7fbc89be0344db1af0ec2bc9fdecff6380eb#egg=accloudtant
boto3==1.1.4
botocore==1.2.10
click==4.1
docutils==0.21.2
exceptiongroup==1.2.2
futures==2.2.0
iniconfig==2.1.0
jmespath==0.10.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
python-dateutil==2.9.0.post0
requests==2.8.1
six==1.17.0
tabulate==0.7.5
tomli==2.2.1
| name: accloudtant
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- boto3==1.1.4
- botocore==1.2.10
- click==4.1
- docutils==0.21.2
- exceptiongroup==1.2.2
- futures==2.2.0
- iniconfig==2.1.0
- jmespath==0.10.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- requests==2.8.1
- six==1.17.0
- tabulate==0.7.5
- tomli==2.2.1
prefix: /opt/conda/envs/accloudtant
| [
"tests/aws/test_instance.py::test_instance",
"tests/aws/test_instance.py::test_unnamed_instance",
"tests/aws/test_instance.py::test_guess_os",
"tests/aws/test_reserved_instance.py::test_retired_ri",
"tests/aws/test_reserved_instance.py::test_active_ri",
"tests/aws/test_reserved_instance.py::test_ri_link"
]
| []
| []
| []
| null | 598 | [
"accloudtant/aws/reserved_instance.py",
"accloudtant/aws/instance.py",
"accloudtant/aws/reports.py"
]
| [
"accloudtant/aws/reserved_instance.py",
"accloudtant/aws/instance.py",
"accloudtant/aws/reports.py"
]
|
|
simphony__simphony-remote-21 | ca4d029fe1edd54e62a20809aba651401ed6f587 | 2016-06-27 10:40:39 | d7657fa7d4297b4c922a900ba9a8a337184113ef | diff --git a/remoteappmanager/docker/container.py b/remoteappmanager/docker/container.py
index f4e8677..6123205 100644
--- a/remoteappmanager/docker/container.py
+++ b/remoteappmanager/docker/container.py
@@ -23,7 +23,7 @@ class Container(HasTraits):
ip = Unicode()
#: ...and port where the container service will be listening
- port = Int()
+ port = Int(None, allow_none=True)
@property
def url(self):
@@ -38,3 +38,44 @@ class Container(HasTraits):
ip=self.ip,
port=self.port,
)
+
+ def __repr__(self):
+ return ('<Container(docker_id={0}, name={1}, image_name={2}, '
+ 'image_id={3}, ip={4}, port={5}>').format(
+ self.docker_id, self.name,
+ self.image_name, self.image_id,
+ self.ip, self.port)
+
+ @classmethod
+ def from_docker_dict(cls, docker_dict):
+ """Returns a Container object with the info given by a
+ docker Client.
+
+ Parameters
+ ----------
+ docker_dict : dict
+ One item from the result of docker.Client.containers
+
+ Returns
+ -------
+ container : Container
+
+ Examples
+ --------
+ >>> # containers is a list of dict
+ >>> containers = docker.Client().containers()
+
+ >>> Container.from_docker_dict(containers[0])
+ """
+ if docker_dict.get('Ports'):
+ ip = docker_dict['Ports'][0].get('IP', "")
+ port = docker_dict['Ports'][0].get('PublicPort')
+ else:
+ ip = ""
+ port = None
+
+ return cls(docker_id=docker_dict.get('Id', ''),
+ name=docker_dict.get('Names', ('',))[0],
+ image_name=docker_dict.get('Image', ''),
+ image_id=docker_dict.get('ImageID', ''),
+ ip=ip, port=port)
diff --git a/remoteappmanager/docker/container_manager.py b/remoteappmanager/docker/container_manager.py
index 19559cf..94b7b39 100644
--- a/remoteappmanager/docker/container_manager.py
+++ b/remoteappmanager/docker/container_manager.py
@@ -93,26 +93,44 @@ class ContainerManager(LoggingMixin):
self._stop_pending.remove(container_id)
@gen.coroutine
- def containers_for_image(self, image_id):
- """Returns the containers for a given image that are managed
- by this object.
+ def containers_for_image(self, image_id_or_name, user_name=None):
+ """Returns the currently running containers for a given image.
+
+ If `user_name` is given, only returns containers started by the
+ given user name.
It is a coroutine because we might want to run an inquire to the docker
service if not present.
Parameters
----------
- image_id: str
- The image id
+ image_id_or_name: str
+ The image id or name
+
+ Optional parameters
+ -------------------
+ user_name : str
+ Name of the user who started the container
Return
------
A list of container objects, or an empty list if not present.
"""
- try:
- return self._containers_for_image[image_id]
- except KeyError:
- return []
+ if user_name:
+ user_labels = _get_container_labels(user_name)
+ if user_labels:
+ filters = {'label': '{0}={1}'.format(*user_labels.popitem())}
+ else:
+ filters = {}
+
+ filters['ancestor'] = image_id_or_name
+
+ containers = yield self.docker_client.containers(filters=filters)
+ return [Container.from_docker_dict(container)
+ for container in containers
+ # Require further filtering as ancestor include grandparents
+ if (container.get('Image') == image_id_or_name or
+ container.get('ImageID') == image_id_or_name)]
@gen.coroutine
def all_images(self):
diff --git a/remoteappmanager/handlers/home_handler.py b/remoteappmanager/handlers/home_handler.py
index c940fe4..a9fa728 100644
--- a/remoteappmanager/handlers/home_handler.py
+++ b/remoteappmanager/handlers/home_handler.py
@@ -11,6 +11,7 @@ from tornado.httpclient import AsyncHTTPClient, HTTPError
from tornado.log import app_log
from remoteappmanager.handlers.base_handler import BaseHandler
+from remoteappmanager.docker.container import Container
# FIXME: replace these with ORM objects
@@ -31,7 +32,7 @@ class HomeHandler(BaseHandler):
for image in all_images:
containers = yield container_manager.containers_for_image(
- image.docker_id)
+ image.docker_id, self.current_user)
container = (containers[0] if len(containers) > 0 else None)
# For now we assume we have only one.
images_info.append({
@@ -65,37 +66,47 @@ class HomeHandler(BaseHandler):
exc_info=True)
return
- yield handler(self.current_user, options)
-
- # Subhandling after post
-
- @gen.coroutine
- def _actionhandler_start(self, user_name, options):
- """Sub handling. Acts in response to a "start" request from
- the user."""
- # Start the single-user server
-
try:
- image_name = options["image_name"][0]
- container = yield self._start_container(user_name, image_name)
+ yield handler(self.current_user, options)
except Exception as e:
# Create a random reference number for support
ref = str(uuid.uuid1())
- self.log.exception("Failed to spawn docker image. %s "
- "Ref: %s",
- str(e), ref)
+ self.log.exception("Failed with POST action: {0}. {1} "
+ "Ref: {2}".format(
+ action, str(e), ref))
images_info = yield self._get_images_info()
# Render the home page again with the error message
# User-facing error message (less info)
- message = ('Failed to start "{image_name}". Reason: {error_type} '
+ message = ('Failed to {action} "{image_name}". '
+ 'Reason: {error_type} '
'(Ref: {ref})')
self.render('home.html', images_info=images_info,
error_message=message.format(
- image_name=image_name,
+ action=action,
+ image_name=options["image_name"][0],
error_type=type(e).__name__,
ref=ref))
+
+ # Subhandling after post
+
+ @gen.coroutine
+ def _actionhandler_start(self, user_name, options):
+ """Sub handling. Acts in response to a "start" request from
+ the user."""
+ # Start the single-user server
+
+ try:
+ # FIXME: too many operations in one try block, should be separated
+ # for better error handling
+ image_name = options["image_name"][0]
+ container = yield self._start_container(user_name, image_name)
+ yield self._wait_for_container_ready(container)
+ except Exception as e:
+ # Clean up, if the container is running
+ yield self._stop_and_remove_container(user_name, image_name)
+ raise e
else:
# The server is up and running. Now contact the proxy and add
# the container url to it.
@@ -112,10 +123,16 @@ class HomeHandler(BaseHandler):
It is not different from pasting the appropriate URL in the
web browser, but we validate the container id first.
"""
- container = self._container_from_options(options)
+ container = yield self._container_from_options(options)
if not container:
+ self.finish("Unable to view the application")
return
+ yield self._wait_for_container_ready(container)
+
+ # in case the reverse proxy is not already set up
+ yield self.application.reverse_proxy_add_container(container)
+
url = self.application.container_url_abspath(container)
self.log.info('Redirecting to ' + url)
self.redirect(url)
@@ -127,11 +144,18 @@ class HomeHandler(BaseHandler):
app = self.application
container_manager = app.container_manager
- container = self._container_from_options(options)
+ container = yield self._container_from_options(options)
if not container:
+ self.finish("Unable to view the application")
return
- yield app.reverse_proxy_remove_container(container)
+ try:
+ yield app.reverse_proxy_remove_container(container)
+ except HTTPError as http_error:
+ # The reverse proxy may be absent to start with
+ if http_error.code != 404:
+ raise http_error
+
yield container_manager.stop_and_remove_container(container.docker_id)
# We don't have fancy stuff at the moment to change the button, so
@@ -140,31 +164,35 @@ class HomeHandler(BaseHandler):
# private
+ @gen.coroutine
def _container_from_options(self, options):
"""Support routine to reduce duplication.
Retrieves and returns the container if valid and present.
- If not present, performs the http response and returns None.
+
+ If not present, returns None
"""
container_manager = self.application.container_manager
+
try:
container_id = options["container_id"][0]
except (KeyError, IndexError):
self.log.exception(
"Failed to retrieve valid container_id from form"
)
- self.finish("Unable to retrieve valid container_id value")
return None
- try:
- container = container_manager.containers[container_id]
- except KeyError:
- self.log.error("Unable to find container_id {} in manager".format(
- container_id))
- self.finish("Unable to find specified container_id")
- return None
+ container_dict = yield container_manager.docker_client.containers(
+ filters={'id': container_id})
- return container
+ if container_dict:
+ return Container.from_docker_dict(container_dict[0])
+ else:
+ self.log.exception(
+ "Failed to retrieve valid container from container id: %s",
+ container_id
+ )
+ return None
@gen.coroutine
def _start_container(self, user_name, image_name):
@@ -212,46 +240,21 @@ class HomeHandler(BaseHandler):
e.reason = 'error'
raise e
- # Note, we use the jupyterhub ORM server, but we don't use it for
- # any database activity.
- # Note: the end / is important. We want to get a 200 from the actual
- # websockify server, not the nginx (which presents the redirection
- # page).
- server_url = "http://{}:{}{}/".format(
- container.ip,
- container.port,
- self.application.container_url_abspath(container))
+ return container
- try:
- yield _wait_for_http_server_2xx(
- server_url,
- self.application.config.network_timeout)
- except TimeoutError as e:
- # Note: Using TimeoutError instead of gen.TimeoutError as above
- # is not a mistake.
- self.log.warning(
- "{user}'s container never showed up at {url} "
- "after {http_timeout} seconds. Giving up.".format(
- user=user_name,
- url=server_url,
- http_timeout=self.application.config.network_timeout,
- )
- )
- e.reason = 'timeout'
- raise e
- except Exception as e:
- self.log.exception(
- "Unhandled error waiting for {user}'s server "
- "to show up at {url}: {error}".format(
- user=user_name,
- url=server_url,
- error=e,
- )
- )
- e.reason = 'error'
- raise e
+ @gen.coroutine
+ def _stop_and_remove_container(self, user_name, image_name):
+ """ Stop and remove the container associated with the given
+ user name and image name, if exists.
+ """
+ container_manager = self.application.container_manager
+ containers = yield container_manager.containers_for_image(
+ image_name, user_name)
- return container
+ # Assume only one container per image
+ if containers:
+ container_id = containers[0].docker_id
+ yield container_manager.stop_and_remove_container(container_id)
def _parse_form(self):
"""Extract the form options from the form and return them
@@ -262,6 +265,29 @@ class HomeHandler(BaseHandler):
return form_options
+ @gen.coroutine
+ def _wait_for_container_ready(self, container):
+ """ Wait until the container is ready to be connected
+
+ Parameters
+ ----------
+ container: Container
+ The container to be connected
+ """
+ # Note, we use the jupyterhub ORM server, but we don't use it for
+ # any database activity.
+ # Note: the end / is important. We want to get a 200 from the actual
+ # websockify server, not the nginx (which presents the redirection
+ # page).
+ server_url = "http://{}:{}{}/".format(
+ container.ip,
+ container.port,
+ self.application.container_url_abspath(container))
+
+ yield _wait_for_http_server_2xx(
+ server_url,
+ self.application.config.network_timeout)
+
@gen.coroutine
def _wait_for_http_server_2xx(url, timeout=10):
| Running containers not in cache when container is started without a public port
Start any incompatible image not exposing a port (e.g. `quay.io/travisci/travis-python:latest`), the container is up and running but we do not keep it in our cache of running containers because the port is unavailable (an exception is raised).
From the user point of view, there is no View/Stop button which makes sense.
From the admin point of view, the container has to be stopped and removed automatically.
Needed for #6
| simphony/simphony-remote | diff --git a/tests/docker/test_container.py b/tests/docker/test_container.py
index 7d1cbae..faea36f 100644
--- a/tests/docker/test_container.py
+++ b/tests/docker/test_container.py
@@ -1,6 +1,7 @@
from unittest import TestCase
from remoteappmanager.docker.container import Container
+from tests.utils import assert_containers_equal
class TestContainer(TestCase):
@@ -18,3 +19,64 @@ class TestContainer(TestCase):
)
self.assertEqual(container.host_url, "http://123.45.67.89:31337")
+
+ def test_from_docker_dict_with_public_port(self):
+ '''Test convertion from "docker ps" to Container with public port'''
+ # With public port
+ container_dict = {
+ 'Command': '/startup.sh',
+ 'Created': 1466756760,
+ 'HostConfig': {'NetworkMode': 'default'},
+ 'Id': '248e45e717cd740ae763a1c565',
+ 'Image': 'empty-ubuntu:latest',
+ 'ImageID': 'sha256:f4610c7580b8f0a9a25086b6287d0069fb8a',
+ 'Labels': {'eu.simphony-project.docker.ui_name': 'Empty Ubuntu',
+ 'eu.simphony-project.docker.user': 'user'},
+ 'Names': ['/remoteexec-user-empty-ubuntu_3Alatest'],
+ 'Ports': [{'IP': '0.0.0.0',
+ 'PrivatePort': 8888,
+ 'PublicPort': 32823,
+ 'Type': 'tcp'}],
+ 'State': 'running',
+ 'Status': 'Up 56 minutes'}
+
+ # Container with public port
+ actual = Container.from_docker_dict(container_dict)
+ expected = Container(
+ docker_id='248e45e717cd740ae763a1c565',
+ name='/remoteexec-user-empty-ubuntu_3Alatest',
+ image_name='empty-ubuntu:latest',
+ image_id='sha256:f4610c7580b8f0a9a25086b6287d0069fb8a',
+ ip='0.0.0.0', port=32823)
+
+ assert_containers_equal(self, actual, expected)
+
+ def test_from_docker_dict_without_public_port(self):
+ '''Test convertion from "docker ps" to Container with public port'''
+ # With public port
+ container_dict = {
+ 'Command': '/startup.sh',
+ 'Created': 1466756760,
+ 'HostConfig': {'NetworkMode': 'default'},
+ 'Id': '812c765d0549be0ab831ae8348',
+ 'Image': 'novnc-ubuntu:latest',
+ 'ImageID': 'sha256:f4610c75d3c0dfa25d3c0dfa25d3c0dfa2',
+ 'Labels': {'eu.simphony-project.docker.ui_name': 'Empty Ubuntu',
+ 'eu.simphony-project.docker.user': 'user'},
+ 'Names': ['/remoteexec-user-empty-ubuntu_3Alatest'],
+ 'Ports': [{'IP': '0.0.0.0',
+ 'PrivatePort': 8888,
+ 'Type': 'tcp'}],
+ 'State': 'running',
+ 'Status': 'Up 56 minutes'}
+
+ # Container without public port
+ actual = Container.from_docker_dict(container_dict)
+ expected = Container(
+ docker_id='812c765d0549be0ab831ae8348',
+ name='/remoteexec-user-empty-ubuntu_3Alatest',
+ image_name='novnc-ubuntu:latest',
+ image_id='sha256:f4610c75d3c0dfa25d3c0dfa25d3c0dfa2',
+ ip='0.0.0.0', port=None)
+
+ assert_containers_equal(self, actual, expected)
diff --git a/tests/docker/test_container_manager.py b/tests/docker/test_container_manager.py
index f878bb1..5ed4774 100644
--- a/tests/docker/test_container_manager.py
+++ b/tests/docker/test_container_manager.py
@@ -33,24 +33,63 @@ class TestContainerManager(AsyncTestCase):
self.assertTrue(mock_client.remove_container.called)
@gen_test
- def test_container_for_image(self):
- result = yield self.manager.containers_for_image("imageid")
- self.assertEqual(len(result), 0)
+ def test_containers_for_image_results(self):
+ ''' Test containers_for_image returns a list of Container '''
+ # The mock client mocks the output of docker Client.containers
+ docker_client = utils.mock_docker_client_with_running_containers()
+ self.mock_docker_client = docker_client
+ self.manager.docker_client.client = docker_client
+
+ # The output should be a list of Container
+ results = yield self.manager.containers_for_image("imageid")
+ expected = [Container(docker_id='someid',
+ name='/remoteexec-image_3Alatest_user',
+ image_name='simphony/mayavi-4.4.4:latest', # noqa
+ image_id='imageid', ip='0.0.0.0', port=None),
+ Container(docker_id='someid',
+ name='/remoteexec-image_3Alatest_user2',
+ image_name='simphony/mayavi-4.4.4:latest', # noqa
+ image_id='imageid', ip='0.0.0.0', port=None),
+ Container(docker_id='someid',
+ name='/remoteexec-image_3Alatest_user3',
+ image_name='simphony/mayavi-4.4.4:latest', # noqa
+ image_id='imageid', ip='', port=None)]
+
+ for result, expected_container in zip(results, expected):
+ utils.assert_containers_equal(self, result, expected_container)
- yield self.manager.start_container("username", "imageid")
+ @gen_test
+ def test_containers_for_image_client_api_without_user(self):
+ ''' Test containers_for_images(image_id) use of Client API'''
+ # The mock client mocks the output of docker Client.containers
+ docker_client = utils.mock_docker_client_with_running_containers()
+ self.manager.docker_client.client = docker_client
- result = yield self.manager.containers_for_image("imageid")
- self.assertEqual(len(result), 1)
+ # We assume the client.containers(filters=...) is tested by docker-py
+ # Instead we test if the correct arguments are passed to the Client API
+ yield self.manager.containers_for_image("imageid")
+ call_args = self.manager.docker_client.client.containers.call_args
- expected = {'name': 'remoteexec-username-imageid',
- 'image_id': 'imageid',
- 'image_name': 'imageid',
- 'ip': '127.0.0.1',
- 'port': 666,
- 'docker_id': 'containerid'}
+ # filters is one of the keyword argument
+ self.assertIn('filters', call_args[1])
+ self.assertEqual(call_args[1]['filters']['ancestor'], "imageid")
- for key, value in expected.items():
- self.assertEqual(getattr(result[0], key), value)
+ @gen_test
+ def test_containers_for_image_client_api_with_user(self):
+ ''' Test containers_for_images(image_id, user) use of Client API'''
+ # The mock client mocks the output of docker Client.containers
+ docker_client = utils.mock_docker_client_with_running_containers()
+ self.manager.docker_client.client = docker_client
+
+ # We assume the client.containers(filters=...) is tested by docker-py
+ # Instead we test if the correct arguments are passed to the Client API
+ yield self.manager.containers_for_image("imageid", "userABC")
+ call_args = self.manager.docker_client.client.containers.call_args
+
+ # filters is one of the keyword argument
+ self.assertIn('filters', call_args[1])
+ self.assertEqual(call_args[1]['filters']['ancestor'], "imageid")
+ self.assertIn("userABC", call_args[1]['filters']['label'])
@gen_test
def test_race_condition_spawning(self):
diff --git a/tests/utils.py b/tests/utils.py
index ed44914..f957a35 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -47,6 +47,56 @@ def mock_docker_client():
return docker_client
+def mock_docker_client_with_running_containers():
+ """Same as above, but it behaves as if one of the images have two
+ containers running for different users."""
+ client = mock_docker_client()
+ client.containers.return_value = [
+ # user
+ {'Command': '/sbin/init -D',
+ 'Created': 1466766499,
+ 'HostConfig': {'NetworkMode': 'default'},
+ 'Id': 'someid',
+ 'Image': 'simphony/mayavi-4.4.4:latest',
+ 'ImageID': 'imageid',
+ 'Labels': {'eu.simphony-project.docker.user': 'user'},
+ 'Names': ['/remoteexec-image_3Alatest_user'],
+ 'Ports': [{'IP': '0.0.0.0',
+ 'PublicIP': 34567,
+ 'PrivatePort': 22,
+ 'Type': 'tcp'}],
+ 'State': 'running',
+ 'Status': 'Up About an hour'},
+ # user2
+ {'Command': '/sbin/init -D',
+ 'Created': 1466766499,
+ 'HostConfig': {'NetworkMode': 'default'},
+ 'Id': 'someid',
+ 'Image': 'simphony/mayavi-4.4.4:latest',
+ 'ImageID': 'imageid',
+ 'Labels': {'eu.simphony-project.docker.user': 'user2'},
+ 'Names': ['/remoteexec-image_3Alatest_user2'],
+ 'Ports': [{'IP': '0.0.0.0',
+ 'PublicIP': 34567,
+ 'PrivatePort': 22,
+ 'Type': 'tcp'}],
+ 'State': 'running',
+ 'Status': 'Up About an hour'},
+ # user3 (somehow there is no port
+ {'Command': '/sbin/init -D',
+ 'Created': 1466766499,
+ 'HostConfig': {'NetworkMode': 'default'},
+ 'Id': 'someid',
+ 'Image': 'simphony/mayavi-4.4.4:latest',
+ 'ImageID': 'imageid',
+ 'Labels': {'eu.simphony-project.docker.user': 'user3'},
+ 'Names': ['/remoteexec-image_3Alatest_user3'],
+ 'State': 'running',
+ 'Status': 'Up About an hour'}]
+
+ return client
+
+
def mock_docker_client_with_existing_stopped_container():
"""Same as above, but it behaves as if one of the containers is already
started."""
@@ -167,3 +217,14 @@ def invocation_argv():
yield
sys.argv[:] = saved_argv
+
+
+def assert_containers_equal(test_case, actual, expected):
+ if (expected.docker_id != actual.docker_id or
+ expected.name != actual.name or
+ expected.image_name != actual.image_name or
+ expected.image_id != actual.image_id or
+ expected.ip != actual.ip or
+ expected.port != actual.port):
+ message = '{!r} is not identical to the expected {!r}'
+ test_case.fail(message.format(actual, expected))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 3
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"flake8",
"sphinx",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y docker.io"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
alembic==1.15.2
annotated-types==0.7.0
arrow==1.3.0
async-generator==1.10
attrs==25.3.0
babel==2.17.0
certifi==2025.1.31
certipy==0.2.2
cffi==1.17.1
charset-normalizer==3.4.1
cryptography==44.0.2
docker-py==1.10.6
docker-pycreds==0.4.0
docutils==0.21.2
escapism==1.0.1
exceptiongroup==1.2.2
flake8==7.2.0
fqdn==1.5.1
greenlet==3.1.1
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
isoduration==20.11.0
Jinja2==3.1.6
jsonpointer==3.0.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter-events==0.12.0
jupyter_client==8.6.3
jupyter_core==5.7.2
jupyterhub==5.2.1
Mako==1.3.9
MarkupSafe==3.0.2
mccabe==0.7.0
oauthlib==3.2.2
packaging==24.2
pamela==1.2.0
platformdirs==4.3.7
pluggy==1.5.0
prometheus_client==0.21.1
pycodestyle==2.13.0
pycparser==2.22
pydantic==2.11.1
pydantic_core==2.33.0
pyflakes==3.3.1
Pygments==2.19.1
pytest==8.3.5
python-dateutil==2.9.0.post0
python-json-logger==3.3.0
PyYAML==6.0.2
pyzmq==26.3.0
referencing==0.36.2
-e git+https://github.com/simphony/simphony-remote.git@ca4d029fe1edd54e62a20809aba651401ed6f587#egg=remoteappmanager
requests==2.32.3
rfc3339-validator==0.1.4
rfc3986-validator==0.1.1
rpds-py==0.24.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
SQLAlchemy==2.0.40
tomli==2.2.1
tornado==6.4.2
traitlets==5.14.3
types-python-dateutil==2.9.0.20241206
typing-inspection==0.4.0
typing_extensions==4.13.0
uri-template==1.3.0
urllib3==2.3.0
webcolors==24.11.1
websocket-client==1.8.0
zipp==3.21.0
| name: simphony-remote
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- alembic==1.15.2
- annotated-types==0.7.0
- arrow==1.3.0
- async-generator==1.10
- attrs==25.3.0
- babel==2.17.0
- certifi==2025.1.31
- certipy==0.2.2
- cffi==1.17.1
- charset-normalizer==3.4.1
- cryptography==44.0.2
- docker-py==1.10.6
- docker-pycreds==0.4.0
- docutils==0.21.2
- escapism==1.0.1
- exceptiongroup==1.2.2
- flake8==7.2.0
- fqdn==1.5.1
- greenlet==3.1.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- isoduration==20.11.0
- jinja2==3.1.6
- jsonpointer==3.0.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter-client==8.6.3
- jupyter-core==5.7.2
- jupyter-events==0.12.0
- jupyterhub==5.2.1
- mako==1.3.9
- markupsafe==3.0.2
- mccabe==0.7.0
- oauthlib==3.2.2
- packaging==24.2
- pamela==1.2.0
- platformdirs==4.3.7
- pluggy==1.5.0
- prometheus-client==0.21.1
- pycodestyle==2.13.0
- pycparser==2.22
- pydantic==2.11.1
- pydantic-core==2.33.0
- pyflakes==3.3.1
- pygments==2.19.1
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- python-json-logger==3.3.0
- pyyaml==6.0.2
- pyzmq==26.3.0
- referencing==0.36.2
- requests==2.32.3
- rfc3339-validator==0.1.4
- rfc3986-validator==0.1.1
- rpds-py==0.24.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- sqlalchemy==2.0.40
- tomli==2.2.1
- tornado==6.4.2
- traitlets==5.14.3
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- typing-inspection==0.4.0
- uri-template==1.3.0
- urllib3==2.3.0
- webcolors==24.11.1
- websocket-client==1.8.0
- zipp==3.21.0
prefix: /opt/conda/envs/simphony-remote
| [
"tests/docker/test_container.py::TestContainer::test_from_docker_dict_with_public_port",
"tests/docker/test_container.py::TestContainer::test_from_docker_dict_without_public_port",
"tests/docker/test_container_manager.py::TestContainerManager::test_containers_for_image_client_api_with_user",
"tests/docker/test_container_manager.py::TestContainerManager::test_containers_for_image_client_api_without_user",
"tests/docker/test_container_manager.py::TestContainerManager::test_containers_for_image_results"
]
| []
| [
"tests/docker/test_container.py::TestContainer::test_host_url",
"tests/docker/test_container.py::TestContainer::test_url",
"tests/docker/test_container_manager.py::TestContainerManager::test_instantiation",
"tests/docker/test_container_manager.py::TestContainerManager::test_race_condition_spawning",
"tests/docker/test_container_manager.py::TestContainerManager::test_start_already_present_container",
"tests/docker/test_container_manager.py::TestContainerManager::test_start_container_with_nonexisting_volume_source",
"tests/docker/test_container_manager.py::TestContainerManager::test_start_stop"
]
| []
| BSD 3-Clause "New" or "Revised" License | 600 | [
"remoteappmanager/handlers/home_handler.py",
"remoteappmanager/docker/container.py",
"remoteappmanager/docker/container_manager.py"
]
| [
"remoteappmanager/handlers/home_handler.py",
"remoteappmanager/docker/container.py",
"remoteappmanager/docker/container_manager.py"
]
|
|
mkdocs__mkdocs-975 | bff2ede1229dd2a5e48d4c03bbd05e977813cfa0 | 2016-06-27 14:46:54 | e7d8879d2b53d9e50bdfcf1cf29c48dc3f6bc87f | lorengordon: I had a need for this feature and didn't see further progress on it since the earlier PRs were closed, so I took a stab at it. I did study https://github.com/mkdocs/mkdocs/pull/752 and https://github.com/mkdocs/mkdocs/pull/802, and read through the original issue https://github.com/mkdocs/mkdocs/issues/269. Hopefully I addressed the concerns and this aligns with the desired approach. Let me know if you'd like me to adjust anything.
waylan: @lorengordon overall this looks pretty good. I made a few inline comments. Also, I don't see any changes to the `mkdocs` theme. Make those changes and I think this will be good.
waylan: Oh, I almost forgot, the new template context variable needs to be documented [here][1]. Just add an entry for `page.edit_url`.
[1]: https://github.com/mkdocs/mkdocs/blob/master/docs/user-guide/custom-themes.md#page
lorengordon: @waylan, I'm on it. This is my first time contributing to mkdocs... Any preference for amending the prior commit vs adding a second? I didn't see any particular guidance in the contributing doc.
waylan: We prefer a clean commit history, so if you can amend the previous commit and do a `--force` push, that would be preferred, but if not we can work with that as well.
lorengordon: :+1: Yep, my preference as well. It's not everyone's, so I like to check first.
lorengordon: @waylan, ok, that should address the comments. Anything else?
lorengordon: @waylan, further comments addressed.
lorengordon: Alright, think the last line comment is now covered.
waylan: Thanks. This looks good to me.
The only concern is that this does add a new config setting. I think its necessary to support the feature, but I would like an okay from @d0ugal before merging it. If he's not okay with that, then we would have to hardcode the `edit_uri` value in for each supported `repo`. Of course, the current patch gives us support for most any hosting service out-of-the-box, whereas hardcoding the `edit_uri` would limit support only to the hosting services we code for.
Although, it occurs to me that we could hardcode some defaults for the new setting (if `repo_name` is `github`, default to this or if `repo_name` is `bitbucket` default to that). That way, users would only need to set the value if they want to use a non-standard URL (a different branch than master) on a known service, or an unknown (to us) service (such as GitLab).
For completeness, I should also note that now that we have #947, its possible for users to easily override the template and put anything they want in the "edit this page link" via the `repo` block. While template customization is certainly more difficult for users than altering a setting, it doesn't require us to offer support for any specific feature and gives the users much more power to do whatever they want. I would think this is the strongest argument against adding a new setting for this feature.
However, as we specifically advertise this project as a tool for documenting your coding projects, explicitly offering this feature with a config setting is a reasonable expectation for users to have.
lorengordon: Yes, one goal of mine was to support arbitrary repo hosts and branches. I personally like to use a `develop` branch, and would prefer to send users there rather than whatever branch the `repo` host likes to "default" to.
I did take note of the comment on https://github.com/mkdocs/mkdocs/pull/752 about minimizing the number of config options, which is why I combined everything into one setting. Plus, I think it is more flexible this way, anyway.
It appears the config_option classes are able to lookup the config dictionary, so it looks trivial to add a new class, `EditUri()`, similar to the approach for `RepoURL()`, that sets defaults for `GitHub` and `Bitbucket`.
Or, hmm, don't want to run into ordering issues with the config file. Perhaps just extending `RepoURL()` would work? I'll twizzle something out...
lorengordon: Also, `urljoin()` doesn't appear to work that way. Without the trailing slash on the `base` param, the last fragment gets dropped. So this:
`urljoin('https://example.com/foo', 'bar.md')`
returns this:
`'https://example.com/bar.md'`.
https://docs.python.org/3/library/urllib.parse.html#urllib.parse.urljoin
lorengordon: Anyway, I added [this bit](https://github.com/mkdocs/mkdocs/pull/975/files#diff-21a6a972ad3c1097b825235b2e9b4848R205) to set default values for GitHub and Bitbucket. Does that seem reasonable?
lorengordon: Going that approach, would it be valuable to add a few unit tests to the [RepoURLTest](https://github.com/mkdocs/mkdocs/blob/master/mkdocs/tests/config/config_options_tests.py#L102-L126) class?
```
def test_edit_uri_github(self):
option = config_options.RepoURL()
config = {'repo_url': "https://github.com/mkdocs/mkdocs"}
option.post_validation(config, 'repo_url')
self.assertEqual(config['edit_uri'], 'blob/master/docs/')
def test_edit_uri_bitbucket(self):
option = config_options.RepoURL()
config = {'repo_url': "https://bitbucket.org/gutworth/six/"}
option.post_validation(config, 'repo_url')
self.assertEqual(config['edit_uri'], 'src/default/docs/')
def test_edit_uri_custom(self):
option = config_options.RepoURL()
config = {'repo_url': "https://launchpad.net/python-tuskarclient"}
option.post_validation(config, 'repo_url')
self.assertEqual(config.get('edit_uri'), None)
```
waylan: Yes, some more tests would be a good idea. Actually, we should probably add a few tests of the new `Page.set_edit_url` method as well. Just create a few dummy `Page` objects and confirm the method returns the correct result with various input.
lorengordon: Can you give me a few more pointers for the `Page` tests? I looked at [nav_tests.py](https://github.com/mkdocs/mkdocs/blob/master/mkdocs/tests/nav_tests.py) and poked around a few others, but it wasn't clear how that part of the code was being tested. | diff --git a/docs/user-guide/configuration.md b/docs/user-guide/configuration.md
index 422e7c90..6fbebe23 100644
--- a/docs/user-guide/configuration.md
+++ b/docs/user-guide/configuration.md
@@ -50,6 +50,37 @@ When set, provides a link to your GitHub or Bitbucket repository on each page.
**default**: `'GitHub'` or `'Bitbucket'` if the `repo_url` matches those
domains, otherwise `null`
+### edit_uri
+
+Path from the base `repo_url` to the docs directory when directly viewing a
+page, accounting for specifics of the repository host (e.g. GitHub, Bitbucket,
+etc), the branch, and the docs directory itself. Mkdocs concatenates `repo_url`
+and `edit_uri`, and appends the input path of the page.
+
+When set, provides a link directly to the page in your source repository. This
+makes it easier to find and edit the source for the page. If `repo_url` is not
+set, this option is ignored.
+
+For example, for a GitHub-hosted repository, the `edit_uri` would be as follows.
+(Note the `blob` path and `master` branch...)
+
+```yaml
+edit_uri: blob/master/docs/
+```
+
+For a Bitbucket-hosted repository, the equivalent `edit_uri` would be as
+follows. (Note the `src` path and `default` branch...)
+
+```yaml
+edit_uri: src/default/docs/
+```
+
+For other repository hosts, `edit_uri` works the same way. Simply specify the
+relative path to the docs directory.
+
+**default**: `blob/master/docs/` or `src/default/docs/` for GitHub or Bitbucket
+repos, respectively, if `repo_url` matches those domains, otherwise `null`
+
### site_description
Set the site description. This will add a meta tag to the generated HTML header.
diff --git a/docs/user-guide/custom-themes.md b/docs/user-guide/custom-themes.md
index cd4d8e8a..10df837a 100644
--- a/docs/user-guide/custom-themes.md
+++ b/docs/user-guide/custom-themes.md
@@ -259,6 +259,11 @@ documentation page.
The full, canonical URL to the current page. This includes the `site_url` from
the configuration.
+##### page.edit_url
+
+The full URL to the input page in the source repository. Typically used to
+provide a link to edit the source page.
+
##### page.url
The URL to the current page not including the `site_url` from the configuration.
diff --git a/mkdocs/commands/build.py b/mkdocs/commands/build.py
index a4ce837c..83e1a1c6 100644
--- a/mkdocs/commands/build.py
+++ b/mkdocs/commands/build.py
@@ -116,6 +116,9 @@ def get_page_context(page, content, toc, meta, config):
if config['site_url']:
page.set_canonical_url(config['site_url'])
+ if config['repo_url']:
+ page.set_edit_url(config['repo_url'], config['edit_uri'])
+
page.content = content
page.toc = toc
page.meta = meta
diff --git a/mkdocs/config/config_options.py b/mkdocs/config/config_options.py
index e9065e00..25dd8b09 100644
--- a/mkdocs/config/config_options.py
+++ b/mkdocs/config/config_options.py
@@ -202,6 +202,12 @@ class RepoURL(URL):
else:
config['repo_name'] = repo_host.split('.')[0].title()
+ if config['repo_url'] is not None and config.get('edit_uri') is None:
+ if config['repo_name'].lower() == 'github':
+ config['edit_uri'] = 'blob/master/docs/'
+ elif config['repo_name'].lower() == 'bitbucket':
+ config['edit_uri'] = 'src/default/docs/'
+
class Dir(Type):
"""
diff --git a/mkdocs/config/defaults.py b/mkdocs/config/defaults.py
index 3d8c71e2..a5ff9d77 100644
--- a/mkdocs/config/defaults.py
+++ b/mkdocs/config/defaults.py
@@ -75,6 +75,12 @@ DEFAULT_SCHEMA = (
# "GitHub" or "Bitbucket" for known url or Hostname for unknown urls.
('repo_name', config_options.Type(utils.string_types)),
+ # Specify a URI to the docs dir in the project source repo, relative to the
+ # repo_url. When set, a link directly to the page in the source repo will
+ # be added to the generated HTML. If repo_url is not set also, this option
+ # is ignored.
+ ('edit_uri', config_options.Type(utils.string_types)),
+
# Specify which css or javascript files from the docs directory should be
# additionally included in the site. Default, List of all .css and .js
# files in the docs dir.
diff --git a/mkdocs/nav.py b/mkdocs/nav.py
index 4c5b08f2..3b4da601 100644
--- a/mkdocs/nav.py
+++ b/mkdocs/nav.py
@@ -151,6 +151,7 @@ class Page(object):
# Placeholders to be filled in later in the build
# process when we have access to the config.
self.canonical_url = None
+ self.edit_url = None
self.content = None
self.meta = None
self.toc = None
@@ -186,6 +187,18 @@ class Page(object):
base += '/'
self.canonical_url = utils.urljoin(base, self.abs_url.lstrip('/'))
+ def set_edit_url(self, repo_url, edit_uri):
+ if not repo_url.endswith('/'):
+ repo_url += '/'
+ if not edit_uri:
+ self.edit_url = repo_url
+ else:
+ if not edit_uri.endswith('/'):
+ edit_uri += '/'
+ self.edit_url = utils.urljoin(
+ repo_url + edit_uri,
+ self.input_path)
+
class Header(object):
def __init__(self, title, children):
diff --git a/mkdocs/themes/mkdocs/nav.html b/mkdocs/themes/mkdocs/nav.html
index bf3a8e77..7def66d6 100644
--- a/mkdocs/themes/mkdocs/nav.html
+++ b/mkdocs/themes/mkdocs/nav.html
@@ -69,9 +69,9 @@
{%- endblock %}
{%- block repo %}
- {%- if repo_url %}
+ {%- if page and page.edit_url %}
<li>
- <a href="{{ repo_url }}">
+ <a href="{{ page.edit_url }}">
{%- if repo_name == 'GitHub' %}
<i class="fa fa-github"></i>
{%- elif repo_name == 'Bitbucket' -%}
diff --git a/mkdocs/themes/readthedocs/breadcrumbs.html b/mkdocs/themes/readthedocs/breadcrumbs.html
index e1a4f3a9..c169cd6c 100644
--- a/mkdocs/themes/readthedocs/breadcrumbs.html
+++ b/mkdocs/themes/readthedocs/breadcrumbs.html
@@ -13,12 +13,13 @@
{% if page %}<li>{{ page.title }}</li>{% endif %}
<li class="wy-breadcrumbs-aside">
{%- block repo %}
- {% if repo_url %}
- {% if repo_name == 'GitHub' %}
- <a href="{{ repo_url }}" class="icon icon-github"> Edit on GitHub</a>
- {% elif repo_name == 'Bitbucket' %}
- <a href="{{ repo_url }}" class="icon icon-bitbucket"> Edit on BitBucket</a>
- {% endif %}
+ {% if page and page.edit_url %}
+ <a href="{{ page.edit_url }}"
+ {%- if repo_name|lower == 'github' %}
+ class="icon icon-github"
+ {%- elif repo_name|lower == 'bitbucket' %}
+ class="icon icon-bitbucket"
+ {% endif %}> Edit on {{ repo_name }}</a>
{% endif %}
{%- endblock %}
</li>
| 'Edit on Github' should link to the page, not the project
IMO, the Github link should be smartened so that its text is 'Edit on Github' and the hyperlink points to the current page. For example, see the link in upper right of http://read-the-docs.readthedocs.org/en/latest/builds.html
PIP docs [do this well](http://pip.readthedocs.org/en/latest/news.html) | mkdocs/mkdocs | diff --git a/mkdocs/tests/config/config_options_tests.py b/mkdocs/tests/config/config_options_tests.py
index 3c040102..05898b19 100644
--- a/mkdocs/tests/config/config_options_tests.py
+++ b/mkdocs/tests/config/config_options_tests.py
@@ -125,6 +125,27 @@ class RepoURLTest(unittest.TestCase):
self.assertEqual(config['repo_url'], config['repo_url'])
self.assertEqual(config['repo_name'], "Launchpad")
+ def test_edit_uri_github(self):
+
+ option = config_options.RepoURL()
+ config = {'repo_url': "https://github.com/mkdocs/mkdocs"}
+ option.post_validation(config, 'repo_url')
+ self.assertEqual(config['edit_uri'], 'blob/master/docs/')
+
+ def test_edit_uri_bitbucket(self):
+
+ option = config_options.RepoURL()
+ config = {'repo_url': "https://bitbucket.org/gutworth/six/"}
+ option.post_validation(config, 'repo_url')
+ self.assertEqual(config['edit_uri'], 'src/default/docs/')
+
+ def test_edit_uri_custom(self):
+
+ option = config_options.RepoURL()
+ config = {'repo_url': "https://launchpad.net/python-tuskarclient"}
+ option.post_validation(config, 'repo_url')
+ self.assertEqual(config.get('edit_uri'), None)
+
class DirTest(unittest.TestCase):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 8
} | 0.15 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/project.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | click==8.1.8
exceptiongroup==1.2.2
importlib_metadata==8.6.1
iniconfig==2.1.0
Jinja2==3.1.6
livereload==2.7.1
Markdown==3.7
MarkupSafe==3.0.2
-e git+https://github.com/mkdocs/mkdocs.git@bff2ede1229dd2a5e48d4c03bbd05e977813cfa0#egg=mkdocs
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
PyYAML==6.0.2
tomli==2.2.1
tornado==6.4.2
zipp==3.21.0
| name: mkdocs
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- click==8.1.8
- exceptiongroup==1.2.2
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jinja2==3.1.6
- livereload==2.7.1
- markdown==3.7
- markupsafe==3.0.2
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pyyaml==6.0.2
- tomli==2.2.1
- tornado==6.4.2
- zipp==3.21.0
prefix: /opt/conda/envs/mkdocs
| [
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_edit_uri_bitbucket",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_edit_uri_github"
]
| []
| [
"mkdocs/tests/config/config_options_tests.py::OptionallyRequiredTest::test_default",
"mkdocs/tests/config/config_options_tests.py::OptionallyRequiredTest::test_empty",
"mkdocs/tests/config/config_options_tests.py::OptionallyRequiredTest::test_replace_default",
"mkdocs/tests/config/config_options_tests.py::OptionallyRequiredTest::test_required",
"mkdocs/tests/config/config_options_tests.py::OptionallyRequiredTest::test_required_no_default",
"mkdocs/tests/config/config_options_tests.py::TypeTest::test_length",
"mkdocs/tests/config/config_options_tests.py::TypeTest::test_multiple_types",
"mkdocs/tests/config/config_options_tests.py::TypeTest::test_single_type",
"mkdocs/tests/config/config_options_tests.py::URLTest::test_invalid",
"mkdocs/tests/config/config_options_tests.py::URLTest::test_invalid_url",
"mkdocs/tests/config/config_options_tests.py::URLTest::test_valid_url",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_edit_uri_custom",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_repo_name_bitbucket",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_repo_name_custom",
"mkdocs/tests/config/config_options_tests.py::RepoURLTest::test_repo_name_github",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_file",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_incorrect_type_attribute_error",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_incorrect_type_type_error",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_missing_dir",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_missing_dir_but_required",
"mkdocs/tests/config/config_options_tests.py::DirTest::test_valid_dir",
"mkdocs/tests/config/config_options_tests.py::SiteDirTest::test_doc_dir_in_site_dir",
"mkdocs/tests/config/config_options_tests.py::SiteDirTest::test_site_dir_in_docs_dir",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_theme",
"mkdocs/tests/config/config_options_tests.py::ThemeTest::test_theme_invalid",
"mkdocs/tests/config/config_options_tests.py::ExtrasTest::test_empty",
"mkdocs/tests/config/config_options_tests.py::ExtrasTest::test_invalid",
"mkdocs/tests/config/config_options_tests.py::ExtrasTest::test_provided",
"mkdocs/tests/config/config_options_tests.py::ExtrasTest::test_talk",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_invalid_config",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_invalid_type",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_provided",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_provided_dict",
"mkdocs/tests/config/config_options_tests.py::PagesTest::test_provided_empty",
"mkdocs/tests/config/config_options_tests.py::NumPagesTest::test_invalid_pages",
"mkdocs/tests/config/config_options_tests.py::NumPagesTest::test_many_pages",
"mkdocs/tests/config/config_options_tests.py::NumPagesTest::test_one_page",
"mkdocs/tests/config/config_options_tests.py::NumPagesTest::test_provided",
"mkdocs/tests/config/config_options_tests.py::PrivateTest::test_defined",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_builtins",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_builtins_config",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_configkey",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_duplicates",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_invalid_config_item",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_invalid_config_option",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_invalid_dict_item",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_list_dicts",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_mixed_list",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_none",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_not_list",
"mkdocs/tests/config/config_options_tests.py::MarkdownExtensionsTest::test_simple_list"
]
| []
| BSD 2-Clause "Simplified" License | 601 | [
"mkdocs/config/defaults.py",
"mkdocs/commands/build.py",
"docs/user-guide/custom-themes.md",
"mkdocs/themes/readthedocs/breadcrumbs.html",
"docs/user-guide/configuration.md",
"mkdocs/themes/mkdocs/nav.html",
"mkdocs/nav.py",
"mkdocs/config/config_options.py"
]
| [
"mkdocs/config/defaults.py",
"mkdocs/commands/build.py",
"docs/user-guide/custom-themes.md",
"mkdocs/themes/readthedocs/breadcrumbs.html",
"docs/user-guide/configuration.md",
"mkdocs/themes/mkdocs/nav.html",
"mkdocs/nav.py",
"mkdocs/config/config_options.py"
]
|
pyca__bcrypt-76 | 4e652562c639fd1dd95ce938ddf40f58c63c8b3f | 2016-06-28 19:53:45 | c9c76210fad230995a6155287e8b92c49180eae4 | reaperhulk: depends on #78 | diff --git a/README.rst b/README.rst
index 9519ff4..0883286 100644
--- a/README.rst
+++ b/README.rst
@@ -20,7 +20,7 @@ To install bcrypt, simply:
$ pip install bcrypt
-Note that bcrypt should build very easily on Linux provided you have a C compiler, headers for Python (if you’re not using pypy), and headers for the libffi libraries available on your system.
+Note that bcrypt should build very easily on Linux provided you have a C compiler, headers for Python (if you're not using pypy), and headers for the libffi libraries available on your system.
For Debian and Ubuntu, the following command will ensure that the required dependencies are installed:
@@ -37,22 +37,26 @@ For Fedora and RHEL-derivatives, the following command will ensure that the requ
Changelog
=========
+3.1.0
+-----
+* Added support for ``checkpw`` as another method of verifying a password.
+
3.0.0
-----
* Switched the C backend to code obtained from the OpenBSD project rather than
openwall.
-* Added support for `bcrypt_pbkdf` via the `kdf` function.
+* Added support for ``bcrypt_pbkdf`` via the ``kdf`` function.
2.0.0
-----
-* Added support for an adjustible prefix when calling `gensalt`.
+* Added support for an adjustible prefix when calling ``gensalt``.
* Switched to CFFI 1.0+
Usage
-----
-Hashing
-~~~~~~~
+Password Hashing
+~~~~~~~~~~~~~~~~
Hashing and then later checking that a password matches the previous hashed
password is very simple:
@@ -63,9 +67,9 @@ password is very simple:
>>> password = b"super secret password"
>>> # Hash a password for the first time, with a randomly-generated salt
>>> hashed = bcrypt.hashpw(password, bcrypt.gensalt())
- >>> # Check that a unhashed password matches one that has previously been
- >>> # hashed
- >>> if bcrypt.hashpw(password, hashed) == hashed:
+ >>> # Check that an unhashed password matches one that has previously been
+ >>> # hashed
+ >>> if bcrypt.checkpw(password, hashed):
... print("It Matches!")
... else:
... print("It Does not Match :(")
@@ -73,7 +77,7 @@ password is very simple:
KDF
~~~
-As of 3.0.0 `bcrypt` now offers a `kdf` function which does `bcrypt_pbkdf`.
+As of 3.0.0 ``bcrypt`` now offers a ``kdf`` function which does ``bcrypt_pbkdf``.
This KDF is used in OpenSSH's newer encrypted private key format.
.. code:: pycon
@@ -113,10 +117,10 @@ Another one of bcrypt's features is an adjustable prefix to let you define what
libraries you'll remain compatible with. To adjust this, pass either ``2a`` or
``2b`` (the default) to ``bcrypt.gensalt(prefix=b"2b")`` as a bytes object.
-As of 3.0.0 the `$2y$` prefix is still supported in `hashpw` but deprecated.
+As of 3.0.0 the ``$2y$`` prefix is still supported in ``hashpw`` but deprecated.
-Maxmimum Password Length
-~~~~~~~~~~~~~~~~~~~~~~~~
+Maximum Password Length
+~~~~~~~~~~~~~~~~~~~~~~~
The bcrypt algorithm only handles passwords up to 72 characters, any characters
beyond that are ignored. To work around this, a common approach is to hash a
diff --git a/src/bcrypt/__init__.py b/src/bcrypt/__init__.py
index ad44e93..c2be96d 100644
--- a/src/bcrypt/__init__.py
+++ b/src/bcrypt/__init__.py
@@ -78,6 +78,24 @@ def hashpw(password, salt):
return _bcrypt.ffi.string(hashed)
+def checkpw(password, hashed_password):
+ if (isinstance(password, six.text_type) or
+ isinstance(hashed_password, six.text_type)):
+ raise TypeError("Unicode-objects must be encoded before checking")
+
+ if b"\x00" in password or b"\x00" in hashed_password:
+ raise ValueError(
+ "password and hashed_password may not contain NUL bytes"
+ )
+
+ # If the user supplies a $2y$ prefix we normalize to $2b$
+ hashed_password = _normalize_prefix(hashed_password)
+
+ ret = hashpw(password, hashed_password)
+
+ return _bcrypt.lib.timingsafe_bcmp(ret, hashed_password, len(ret)) == 0
+
+
def kdf(password, salt, desired_key_bytes, rounds):
if isinstance(password, six.text_type) or isinstance(salt, six.text_type):
raise TypeError("Unicode-objects must be encoded before hashing")
diff --git a/src/build_bcrypt.py b/src/build_bcrypt.py
index e7aca4c..3eec35c 100644
--- a/src/build_bcrypt.py
+++ b/src/build_bcrypt.py
@@ -25,6 +25,7 @@ int bcrypt_hashpass(const char *, const char *, char *, size_t);
int encode_base64(char *, const uint8_t *, size_t);
int bcrypt_pbkdf(const char *, size_t, const uint8_t *, size_t,
uint8_t *, size_t, unsigned int);
+int timingsafe_bcmp(const void *, const void *, size_t);
""")
ffi.set_source(
diff --git a/tasks.py b/tasks.py
new file mode 100644
index 0000000..06b1492
--- /dev/null
+++ b/tasks.py
@@ -0,0 +1,120 @@
+from __future__ import absolute_import, division, print_function
+
+import getpass
+import io
+import os
+import time
+
+from clint.textui.progress import Bar as ProgressBar
+
+import invoke
+
+import requests
+
+
+JENKINS_URL = "https://jenkins.cryptography.io/job/bcrypt-wheel-builder"
+
+
+def wait_for_build_completed(session):
+ # Wait 20 seconds before actually checking if the build is complete, to
+ # ensure that it had time to really start.
+ time.sleep(20)
+ while True:
+ response = session.get(
+ "{0}/lastBuild/api/json/".format(JENKINS_URL),
+ headers={
+ "Accept": "application/json",
+ }
+ )
+ response.raise_for_status()
+ if not response.json()["building"]:
+ assert response.json()["result"] == "SUCCESS"
+ break
+ time.sleep(0.1)
+
+
+def download_artifacts(session):
+ response = session.get(
+ "{0}/lastBuild/api/json/".format(JENKINS_URL),
+ headers={
+ "Accept": "application/json"
+ }
+ )
+ response.raise_for_status()
+ assert not response.json()["building"]
+ assert response.json()["result"] == "SUCCESS"
+
+ paths = []
+
+ last_build_number = response.json()["number"]
+ for run in response.json()["runs"]:
+ if run["number"] != last_build_number:
+ print(
+ "Skipping {0} as it is not from the latest build ({1})".format(
+ run["url"], last_build_number
+ )
+ )
+ continue
+
+ response = session.get(
+ run["url"] + "api/json/",
+ headers={
+ "Accept": "application/json",
+ }
+ )
+ response.raise_for_status()
+ for artifact in response.json()["artifacts"]:
+ response = session.get(
+ "{0}artifact/{1}".format(run["url"], artifact["relativePath"]),
+ stream=True
+ )
+ assert response.headers["content-length"]
+ print("Downloading {0}".format(artifact["fileName"]))
+ bar = ProgressBar(
+ expected_size=int(response.headers["content-length"]),
+ filled_char="="
+ )
+ content = io.BytesIO()
+ for data in response.iter_content(chunk_size=8192):
+ content.write(data)
+ bar.show(content.tell())
+ assert bar.expected_size == content.tell()
+ bar.done()
+ out_path = os.path.join(
+ os.path.dirname(__file__),
+ "dist",
+ artifact["fileName"],
+ )
+ with open(out_path, "wb") as f:
+ f.write(content.getvalue())
+ paths.append(out_path)
+ return paths
+
+
[email protected]
+def release(version):
+ """
+ ``version`` should be a string like '0.4' or '1.0'.
+ """
+ invoke.run("git tag -s {0} -m '{0} release'".format(version))
+ invoke.run("git push --tags")
+
+ invoke.run("python setup.py sdist")
+
+ invoke.run(
+ "twine upload -s dist/bcrypt-{0}*".format(version)
+ )
+
+ session = requests.Session()
+
+ token = getpass.getpass("Input the Jenkins token: ")
+ response = session.post(
+ "{0}/build?token={1}".format(JENKINS_URL, token),
+ params={
+ "cause": "Building wheels for {0}".format(version)
+ }
+ )
+ response.raise_for_status()
+ wait_for_build_completed(session)
+ paths = download_artifacts(session)
+ invoke.run("twine upload {0}".format(" ".join(paths)))
| Implement checkpw?
`py-bcrypt` has a `checkpw` function -- if we're going to claim compatibility we should have the same. cc @dstufft in case there was a reason he didn't originally add this. | pyca/bcrypt | diff --git a/tests/test_bcrypt.py b/tests/test_bcrypt.py
index b506a7a..ea5cee3 100644
--- a/tests/test_bcrypt.py
+++ b/tests/test_bcrypt.py
@@ -216,6 +216,11 @@ def test_hashpw_new(password, salt, hashed):
assert bcrypt.hashpw(password, salt) == hashed
[email protected](("password", "salt", "hashed"), _test_vectors)
+def test_checkpw(password, salt, hashed):
+ assert bcrypt.checkpw(password, hashed) is True
+
+
@pytest.mark.parametrize(("password", "salt", "hashed"), _test_vectors)
def test_hashpw_existing(password, salt, hashed):
assert bcrypt.hashpw(password, hashed) == hashed
@@ -226,11 +231,47 @@ def test_hashpw_2y_prefix(password, hashed, expected):
assert bcrypt.hashpw(password, hashed) == expected
[email protected](("password", "hashed", "expected"), _2y_test_vectors)
+def test_checkpw_2y_prefix(password, hashed, expected):
+ assert bcrypt.checkpw(password, hashed) is True
+
+
def test_hashpw_invalid():
with pytest.raises(ValueError):
bcrypt.hashpw(b"password", b"$2z$04$cVWp4XaNU8a4v1uMRum2SO")
+def test_checkpw_wrong_password():
+ assert bcrypt.checkpw(
+ b"badpass",
+ b"$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe"
+ ) is False
+
+
+def test_checkpw_bad_salt():
+ with pytest.raises(ValueError):
+ bcrypt.checkpw(
+ b"badpass",
+ b"$2b$04$?Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe"
+ )
+
+
+def test_checkpw_str_password():
+ with pytest.raises(TypeError):
+ bcrypt.checkpw(
+ six.text_type("password"),
+ b"$2b$04$cVWp4XaNU8a4v1uMRum2SO",
+ )
+
+
+def test_checkpw_str_salt():
+ with pytest.raises(TypeError):
+ bcrypt.checkpw(
+ b"password",
+ six.text_type("$2b$04$cVWp4XaNU8a4v1uMRum2SO"),
+ )
+
+
def test_hashpw_str_password():
with pytest.raises(TypeError):
bcrypt.hashpw(
@@ -247,6 +288,20 @@ def test_hashpw_str_salt():
)
+def test_checkpw_nul_byte():
+ with pytest.raises(ValueError):
+ bcrypt.checkpw(
+ b"abc\0def",
+ b"$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe"
+ )
+
+ with pytest.raises(ValueError):
+ bcrypt.checkpw(
+ b"abcdef",
+ b"$2b$04$2S\0w3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe"
+ )
+
+
def test_hashpw_nul_byte():
salt = bcrypt.gensalt(4)
with pytest.raises(ValueError):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 3
} | 3.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[tests]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/pyca/bcrypt.git@4e652562c639fd1dd95ce938ddf40f58c63c8b3f#egg=bcrypt
cffi==1.17.1
exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pycparser==2.22
pytest==8.3.5
six==1.17.0
tomli==2.2.1
| name: bcrypt
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cffi==1.17.1
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pycparser==2.22
- pytest==8.3.5
- six==1.17.0
- tomli==2.2.1
prefix: /opt/conda/envs/bcrypt
| [
"tests/test_bcrypt.py::test_checkpw_bad_salt",
"tests/test_bcrypt.py::test_checkpw_str_password",
"tests/test_bcrypt.py::test_checkpw_str_salt",
"tests/test_bcrypt.py::test_checkpw_nul_byte"
]
| [
"tests/test_bcrypt.py::test_checkpw[Kk4DQuMMfZL9o-$2b$04$cVWp4XaNU8a4v1uMRum2SO-$2b$04$cVWp4XaNU8a4v1uMRum2SO026BWLIoQMD/TXg5uZV.0P.uO8m3YEm]",
"tests/test_bcrypt.py::test_checkpw[9IeRXmnGxMYbs-$2b$04$pQ7gRO7e6wx/936oXhNjrO-$2b$04$pQ7gRO7e6wx/936oXhNjrOUNOHL1D0h1N2IDbJZYs.1ppzSof6SPy]",
"tests/test_bcrypt.py::test_checkpw[xVQVbwa1S0M8r-$2b$04$SQe9knOzepOVKoYXo9xTte-$2b$04$SQe9knOzepOVKoYXo9xTteNYr6MBwVz4tpriJVe3PNgYufGIsgKcW]",
"tests/test_bcrypt.py::test_checkpw[Zfgr26LWd22Za-$2b$04$eH8zX.q5Q.j2hO1NkVYJQO-$2b$04$eH8zX.q5Q.j2hO1NkVYJQOM6KxntS/ow3.YzVmFrE4t//CoF4fvne]",
"tests/test_bcrypt.py::test_checkpw[Tg4daC27epFBE-$2b$04$ahiTdwRXpUG2JLRcIznxc.-$2b$04$ahiTdwRXpUG2JLRcIznxc.s1.ydaPGD372bsGs8NqyYjLY1inG5n2]",
"tests/test_bcrypt.py::test_checkpw[xhQPMmwh5ALzW-$2b$04$nQn78dV0hGHf5wUBe0zOFu-$2b$04$nQn78dV0hGHf5wUBe0zOFu8n07ZbWWOKoGasZKRspZxtt.vBRNMIy]",
"tests/test_bcrypt.py::test_checkpw[59je8h5Gj71tg-$2b$04$cvXudZ5ugTg95W.rOjMITu-$2b$04$cvXudZ5ugTg95W.rOjMITuM1jC0piCl3zF5cmGhzCibHZrNHkmckG]",
"tests/test_bcrypt.py::test_checkpw[wT4fHJa2N9WSW-$2b$04$YYjtiq4Uh88yUsExO0RNTu-$2b$04$YYjtiq4Uh88yUsExO0RNTuEJ.tZlsONac16A8OcLHleWFjVawfGvO]",
"tests/test_bcrypt.py::test_checkpw[uSgFRnQdOgm4S-$2b$04$WLTjgY/pZSyqX/fbMbJzf.-$2b$04$WLTjgY/pZSyqX/fbMbJzf.qxCeTMQOzgL.CimRjMHtMxd/VGKojMu]",
"tests/test_bcrypt.py::test_checkpw[tEPtJZXur16Vg-$2b$04$2moPs/x/wnCfeQ5pCheMcu-$2b$04$2moPs/x/wnCfeQ5pCheMcuSJQ/KYjOZG780UjA/SiR.KsYWNrC7SG]",
"tests/test_bcrypt.py::test_checkpw[vvho8C6nlVf9K-$2b$04$HrEYC/AQ2HS77G78cQDZQ.-$2b$04$HrEYC/AQ2HS77G78cQDZQ.r44WGcruKw03KHlnp71yVQEwpsi3xl2]",
"tests/test_bcrypt.py::test_checkpw[5auCCY9by0Ruf-$2b$04$vVYgSTfB8KVbmhbZE/k3R.-$2b$04$vVYgSTfB8KVbmhbZE/k3R.ux9A0lJUM4CZwCkHI9fifke2.rTF7MG]",
"tests/test_bcrypt.py::test_checkpw[GtTkR6qn2QOZW-$2b$04$JfoNrR8.doieoI8..F.C1O-$2b$04$JfoNrR8.doieoI8..F.C1OQgwE3uTeuardy6lw0AjALUzOARoyf2m]",
"tests/test_bcrypt.py::test_checkpw[zKo8vdFSnjX0f-$2b$04$HP3I0PUs7KBEzMBNFw7o3O-$2b$04$HP3I0PUs7KBEzMBNFw7o3O7f/uxaZU7aaDot1quHMgB2yrwBXsgyy]",
"tests/test_bcrypt.py::test_checkpw[I9VfYlacJiwiK-$2b$04$xnFVhJsTzsFBTeP3PpgbMe-$2b$04$xnFVhJsTzsFBTeP3PpgbMeMREb6rdKV9faW54Sx.yg9plf4jY8qT6]",
"tests/test_bcrypt.py::test_checkpw[VFPO7YXnHQbQO-$2b$04$WQp9.igoLqVr6Qk70mz6xu-$2b$04$WQp9.igoLqVr6Qk70mz6xuRxE0RttVXXdukpR9N54x17ecad34ZF6]",
"tests/test_bcrypt.py::test_checkpw[VDx5BdxfxstYk-$2b$04$xgZtlonpAHSU/njOCdKztO-$2b$04$xgZtlonpAHSU/njOCdKztOPuPFzCNVpB4LGicO4/OGgHv.uKHkwsS]",
"tests/test_bcrypt.py::test_checkpw[dEe6XfVGrrfSH-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe]",
"tests/test_bcrypt.py::test_checkpw[cTT0EAFdwJiLn-$2b$04$7/Qj7Kd8BcSahPO4khB8me-$2b$04$7/Qj7Kd8BcSahPO4khB8me4ssDJCW3r4OGYqPF87jxtrSyPj5cS5m]",
"tests/test_bcrypt.py::test_checkpw[J8eHUDuxBB520-$2b$04$VvlCUKbTMjaxaYJ.k5juoe-$2b$04$VvlCUKbTMjaxaYJ.k5juoecpG/7IzcH1AkmqKi.lIZMVIOLClWAk.]",
"tests/test_bcrypt.py::test_checkpw[U*U-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.E5YPO9kmyuRGyh0XouQYb4YMJKvyOeW]",
"tests/test_bcrypt.py::test_checkpw[U*U*-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.VGOzA784oUp/Z0DY336zx7pLYAy0lwK]",
"tests/test_bcrypt.py::test_checkpw[U*U*U-$2a$05$XXXXXXXXXXXXXXXXXXXXXO-$2a$05$XXXXXXXXXXXXXXXXXXXXXOAcXxm9kjPGEMsLznoKqmqw7tc8WCx4a]",
"tests/test_bcrypt.py::test_checkpw[0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789chars",
"tests/test_bcrypt.py::test_checkpw[\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaachars",
"tests/test_bcrypt.py::test_checkpw[\\xa3-$2a$05$/OK.fbVrR/bpIqNJ5ianF.-$2a$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_checkpw_2y_prefix[\\xa3-$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq-$2b$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_checkpw_2y_prefix[\\xff\\xff\\xa3-$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e-$2b$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e]",
"tests/test_bcrypt.py::test_checkpw_wrong_password"
]
| [
"tests/test_bcrypt.py::test_gensalt_basic",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[4-$2b$04$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[5-$2b$05$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[6-$2b$06$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[7-$2b$07$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[8-$2b$08$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[9-$2b$09$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[10-$2b$10$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[11-$2b$11$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[12-$2b$12$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[13-$2b$13$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[14-$2b$14$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[15-$2b$15$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[16-$2b$16$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[17-$2b$17$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[18-$2b$18$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[19-$2b$19$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[20-$2b$20$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[21-$2b$21$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[22-$2b$22$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[23-$2b$23$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[24-$2b$24$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_invalid[1]",
"tests/test_bcrypt.py::test_gensalt_rounds_invalid[2]",
"tests/test_bcrypt.py::test_gensalt_rounds_invalid[3]",
"tests/test_bcrypt.py::test_gensalt_bad_prefix",
"tests/test_bcrypt.py::test_gensalt_2a_prefix",
"tests/test_bcrypt.py::test_hashpw_new[Kk4DQuMMfZL9o-$2b$04$cVWp4XaNU8a4v1uMRum2SO-$2b$04$cVWp4XaNU8a4v1uMRum2SO026BWLIoQMD/TXg5uZV.0P.uO8m3YEm]",
"tests/test_bcrypt.py::test_hashpw_new[9IeRXmnGxMYbs-$2b$04$pQ7gRO7e6wx/936oXhNjrO-$2b$04$pQ7gRO7e6wx/936oXhNjrOUNOHL1D0h1N2IDbJZYs.1ppzSof6SPy]",
"tests/test_bcrypt.py::test_hashpw_new[xVQVbwa1S0M8r-$2b$04$SQe9knOzepOVKoYXo9xTte-$2b$04$SQe9knOzepOVKoYXo9xTteNYr6MBwVz4tpriJVe3PNgYufGIsgKcW]",
"tests/test_bcrypt.py::test_hashpw_new[Zfgr26LWd22Za-$2b$04$eH8zX.q5Q.j2hO1NkVYJQO-$2b$04$eH8zX.q5Q.j2hO1NkVYJQOM6KxntS/ow3.YzVmFrE4t//CoF4fvne]",
"tests/test_bcrypt.py::test_hashpw_new[Tg4daC27epFBE-$2b$04$ahiTdwRXpUG2JLRcIznxc.-$2b$04$ahiTdwRXpUG2JLRcIznxc.s1.ydaPGD372bsGs8NqyYjLY1inG5n2]",
"tests/test_bcrypt.py::test_hashpw_new[xhQPMmwh5ALzW-$2b$04$nQn78dV0hGHf5wUBe0zOFu-$2b$04$nQn78dV0hGHf5wUBe0zOFu8n07ZbWWOKoGasZKRspZxtt.vBRNMIy]",
"tests/test_bcrypt.py::test_hashpw_new[59je8h5Gj71tg-$2b$04$cvXudZ5ugTg95W.rOjMITu-$2b$04$cvXudZ5ugTg95W.rOjMITuM1jC0piCl3zF5cmGhzCibHZrNHkmckG]",
"tests/test_bcrypt.py::test_hashpw_new[wT4fHJa2N9WSW-$2b$04$YYjtiq4Uh88yUsExO0RNTu-$2b$04$YYjtiq4Uh88yUsExO0RNTuEJ.tZlsONac16A8OcLHleWFjVawfGvO]",
"tests/test_bcrypt.py::test_hashpw_new[uSgFRnQdOgm4S-$2b$04$WLTjgY/pZSyqX/fbMbJzf.-$2b$04$WLTjgY/pZSyqX/fbMbJzf.qxCeTMQOzgL.CimRjMHtMxd/VGKojMu]",
"tests/test_bcrypt.py::test_hashpw_new[tEPtJZXur16Vg-$2b$04$2moPs/x/wnCfeQ5pCheMcu-$2b$04$2moPs/x/wnCfeQ5pCheMcuSJQ/KYjOZG780UjA/SiR.KsYWNrC7SG]",
"tests/test_bcrypt.py::test_hashpw_new[vvho8C6nlVf9K-$2b$04$HrEYC/AQ2HS77G78cQDZQ.-$2b$04$HrEYC/AQ2HS77G78cQDZQ.r44WGcruKw03KHlnp71yVQEwpsi3xl2]",
"tests/test_bcrypt.py::test_hashpw_new[5auCCY9by0Ruf-$2b$04$vVYgSTfB8KVbmhbZE/k3R.-$2b$04$vVYgSTfB8KVbmhbZE/k3R.ux9A0lJUM4CZwCkHI9fifke2.rTF7MG]",
"tests/test_bcrypt.py::test_hashpw_new[GtTkR6qn2QOZW-$2b$04$JfoNrR8.doieoI8..F.C1O-$2b$04$JfoNrR8.doieoI8..F.C1OQgwE3uTeuardy6lw0AjALUzOARoyf2m]",
"tests/test_bcrypt.py::test_hashpw_new[zKo8vdFSnjX0f-$2b$04$HP3I0PUs7KBEzMBNFw7o3O-$2b$04$HP3I0PUs7KBEzMBNFw7o3O7f/uxaZU7aaDot1quHMgB2yrwBXsgyy]",
"tests/test_bcrypt.py::test_hashpw_new[I9VfYlacJiwiK-$2b$04$xnFVhJsTzsFBTeP3PpgbMe-$2b$04$xnFVhJsTzsFBTeP3PpgbMeMREb6rdKV9faW54Sx.yg9plf4jY8qT6]",
"tests/test_bcrypt.py::test_hashpw_new[VFPO7YXnHQbQO-$2b$04$WQp9.igoLqVr6Qk70mz6xu-$2b$04$WQp9.igoLqVr6Qk70mz6xuRxE0RttVXXdukpR9N54x17ecad34ZF6]",
"tests/test_bcrypt.py::test_hashpw_new[VDx5BdxfxstYk-$2b$04$xgZtlonpAHSU/njOCdKztO-$2b$04$xgZtlonpAHSU/njOCdKztOPuPFzCNVpB4LGicO4/OGgHv.uKHkwsS]",
"tests/test_bcrypt.py::test_hashpw_new[dEe6XfVGrrfSH-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe]",
"tests/test_bcrypt.py::test_hashpw_new[cTT0EAFdwJiLn-$2b$04$7/Qj7Kd8BcSahPO4khB8me-$2b$04$7/Qj7Kd8BcSahPO4khB8me4ssDJCW3r4OGYqPF87jxtrSyPj5cS5m]",
"tests/test_bcrypt.py::test_hashpw_new[J8eHUDuxBB520-$2b$04$VvlCUKbTMjaxaYJ.k5juoe-$2b$04$VvlCUKbTMjaxaYJ.k5juoecpG/7IzcH1AkmqKi.lIZMVIOLClWAk.]",
"tests/test_bcrypt.py::test_hashpw_new[U*U-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.E5YPO9kmyuRGyh0XouQYb4YMJKvyOeW]",
"tests/test_bcrypt.py::test_hashpw_new[U*U*-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.VGOzA784oUp/Z0DY336zx7pLYAy0lwK]",
"tests/test_bcrypt.py::test_hashpw_new[U*U*U-$2a$05$XXXXXXXXXXXXXXXXXXXXXO-$2a$05$XXXXXXXXXXXXXXXXXXXXXOAcXxm9kjPGEMsLznoKqmqw7tc8WCx4a]",
"tests/test_bcrypt.py::test_hashpw_new[0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789chars",
"tests/test_bcrypt.py::test_hashpw_new[\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaachars",
"tests/test_bcrypt.py::test_hashpw_new[\\xa3-$2a$05$/OK.fbVrR/bpIqNJ5ianF.-$2a$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_hashpw_existing[Kk4DQuMMfZL9o-$2b$04$cVWp4XaNU8a4v1uMRum2SO-$2b$04$cVWp4XaNU8a4v1uMRum2SO026BWLIoQMD/TXg5uZV.0P.uO8m3YEm]",
"tests/test_bcrypt.py::test_hashpw_existing[9IeRXmnGxMYbs-$2b$04$pQ7gRO7e6wx/936oXhNjrO-$2b$04$pQ7gRO7e6wx/936oXhNjrOUNOHL1D0h1N2IDbJZYs.1ppzSof6SPy]",
"tests/test_bcrypt.py::test_hashpw_existing[xVQVbwa1S0M8r-$2b$04$SQe9knOzepOVKoYXo9xTte-$2b$04$SQe9knOzepOVKoYXo9xTteNYr6MBwVz4tpriJVe3PNgYufGIsgKcW]",
"tests/test_bcrypt.py::test_hashpw_existing[Zfgr26LWd22Za-$2b$04$eH8zX.q5Q.j2hO1NkVYJQO-$2b$04$eH8zX.q5Q.j2hO1NkVYJQOM6KxntS/ow3.YzVmFrE4t//CoF4fvne]",
"tests/test_bcrypt.py::test_hashpw_existing[Tg4daC27epFBE-$2b$04$ahiTdwRXpUG2JLRcIznxc.-$2b$04$ahiTdwRXpUG2JLRcIznxc.s1.ydaPGD372bsGs8NqyYjLY1inG5n2]",
"tests/test_bcrypt.py::test_hashpw_existing[xhQPMmwh5ALzW-$2b$04$nQn78dV0hGHf5wUBe0zOFu-$2b$04$nQn78dV0hGHf5wUBe0zOFu8n07ZbWWOKoGasZKRspZxtt.vBRNMIy]",
"tests/test_bcrypt.py::test_hashpw_existing[59je8h5Gj71tg-$2b$04$cvXudZ5ugTg95W.rOjMITu-$2b$04$cvXudZ5ugTg95W.rOjMITuM1jC0piCl3zF5cmGhzCibHZrNHkmckG]",
"tests/test_bcrypt.py::test_hashpw_existing[wT4fHJa2N9WSW-$2b$04$YYjtiq4Uh88yUsExO0RNTu-$2b$04$YYjtiq4Uh88yUsExO0RNTuEJ.tZlsONac16A8OcLHleWFjVawfGvO]",
"tests/test_bcrypt.py::test_hashpw_existing[uSgFRnQdOgm4S-$2b$04$WLTjgY/pZSyqX/fbMbJzf.-$2b$04$WLTjgY/pZSyqX/fbMbJzf.qxCeTMQOzgL.CimRjMHtMxd/VGKojMu]",
"tests/test_bcrypt.py::test_hashpw_existing[tEPtJZXur16Vg-$2b$04$2moPs/x/wnCfeQ5pCheMcu-$2b$04$2moPs/x/wnCfeQ5pCheMcuSJQ/KYjOZG780UjA/SiR.KsYWNrC7SG]",
"tests/test_bcrypt.py::test_hashpw_existing[vvho8C6nlVf9K-$2b$04$HrEYC/AQ2HS77G78cQDZQ.-$2b$04$HrEYC/AQ2HS77G78cQDZQ.r44WGcruKw03KHlnp71yVQEwpsi3xl2]",
"tests/test_bcrypt.py::test_hashpw_existing[5auCCY9by0Ruf-$2b$04$vVYgSTfB8KVbmhbZE/k3R.-$2b$04$vVYgSTfB8KVbmhbZE/k3R.ux9A0lJUM4CZwCkHI9fifke2.rTF7MG]",
"tests/test_bcrypt.py::test_hashpw_existing[GtTkR6qn2QOZW-$2b$04$JfoNrR8.doieoI8..F.C1O-$2b$04$JfoNrR8.doieoI8..F.C1OQgwE3uTeuardy6lw0AjALUzOARoyf2m]",
"tests/test_bcrypt.py::test_hashpw_existing[zKo8vdFSnjX0f-$2b$04$HP3I0PUs7KBEzMBNFw7o3O-$2b$04$HP3I0PUs7KBEzMBNFw7o3O7f/uxaZU7aaDot1quHMgB2yrwBXsgyy]",
"tests/test_bcrypt.py::test_hashpw_existing[I9VfYlacJiwiK-$2b$04$xnFVhJsTzsFBTeP3PpgbMe-$2b$04$xnFVhJsTzsFBTeP3PpgbMeMREb6rdKV9faW54Sx.yg9plf4jY8qT6]",
"tests/test_bcrypt.py::test_hashpw_existing[VFPO7YXnHQbQO-$2b$04$WQp9.igoLqVr6Qk70mz6xu-$2b$04$WQp9.igoLqVr6Qk70mz6xuRxE0RttVXXdukpR9N54x17ecad34ZF6]",
"tests/test_bcrypt.py::test_hashpw_existing[VDx5BdxfxstYk-$2b$04$xgZtlonpAHSU/njOCdKztO-$2b$04$xgZtlonpAHSU/njOCdKztOPuPFzCNVpB4LGicO4/OGgHv.uKHkwsS]",
"tests/test_bcrypt.py::test_hashpw_existing[dEe6XfVGrrfSH-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe]",
"tests/test_bcrypt.py::test_hashpw_existing[cTT0EAFdwJiLn-$2b$04$7/Qj7Kd8BcSahPO4khB8me-$2b$04$7/Qj7Kd8BcSahPO4khB8me4ssDJCW3r4OGYqPF87jxtrSyPj5cS5m]",
"tests/test_bcrypt.py::test_hashpw_existing[J8eHUDuxBB520-$2b$04$VvlCUKbTMjaxaYJ.k5juoe-$2b$04$VvlCUKbTMjaxaYJ.k5juoecpG/7IzcH1AkmqKi.lIZMVIOLClWAk.]",
"tests/test_bcrypt.py::test_hashpw_existing[U*U-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.E5YPO9kmyuRGyh0XouQYb4YMJKvyOeW]",
"tests/test_bcrypt.py::test_hashpw_existing[U*U*-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.VGOzA784oUp/Z0DY336zx7pLYAy0lwK]",
"tests/test_bcrypt.py::test_hashpw_existing[U*U*U-$2a$05$XXXXXXXXXXXXXXXXXXXXXO-$2a$05$XXXXXXXXXXXXXXXXXXXXXOAcXxm9kjPGEMsLznoKqmqw7tc8WCx4a]",
"tests/test_bcrypt.py::test_hashpw_existing[0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789chars",
"tests/test_bcrypt.py::test_hashpw_existing[\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaachars",
"tests/test_bcrypt.py::test_hashpw_existing[\\xa3-$2a$05$/OK.fbVrR/bpIqNJ5ianF.-$2a$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_hashpw_2y_prefix[\\xa3-$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq-$2b$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_hashpw_2y_prefix[\\xff\\xff\\xa3-$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e-$2b$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e]",
"tests/test_bcrypt.py::test_hashpw_invalid",
"tests/test_bcrypt.py::test_hashpw_str_password",
"tests/test_bcrypt.py::test_hashpw_str_salt",
"tests/test_bcrypt.py::test_hashpw_nul_byte",
"tests/test_bcrypt.py::test_kdf[4-password-salt-[\\xbf\\x0c\\xc2\\x93X\\x7f\\x1c65U\\'ye\\x98\\xd4~W\\x90q\\xbfB~\\x9d\\x8f\\xbe\\x84*\\xba4\\xd9]",
"tests/test_bcrypt.py::test_kdf[4-password-\\x00-\\xc1+Vb5\\xee\\xe0L!%\\x98\\x97\\nW\\x9ag]",
"tests/test_bcrypt.py::test_kdf[4-\\x00-salt-`Q\\xbe\\x18\\xc2\\xf4\\xf8,\\xbf\\x0e\\xfe\\xe5G\\x1bK\\xb9]",
"tests/test_bcrypt.py::test_kdf[4-password\\x00-salt\\x00-t\\x10\\xe4L\\xf4\\xfa\\x07\\xbf\\xaa\\xc8\\xa9(\\xb1r\\x7f\\xac\\x00\\x13u\\xe7\\xbfs\\x847\\x0fH\\xef\\xd1!t0P]",
"tests/test_bcrypt.py::test_kdf[4-pass\\x00wor-sa\\x00l-\\xc2\\xbf\\xfd\\x9d\\xb3\\x8fei\\xef\\xefCr\\xf4\\xde\\x83\\xc0]",
"tests/test_bcrypt.py::test_kdf[4-pass\\x00word-sa\\x00lt-K\\xa4\\xac9%\\xc0\\xe8\\xd7\\xf0\\xcd\\xb6\\xbb\\x16\\x84\\xa5o]",
"tests/test_bcrypt.py::test_kdf[8-password-salt-\\xe16~\\xc5\\x15\\x1a3\\xfa\\xacL\\xc1\\xc1D\\xcd#\\xfa\\x15\\xd5T\\x84\\x93\\xec\\xc9\\x9b\\x9b]\\x9c\\r;'\\xbe\\xc7b'\\xeaf\\x08\\x8b\\x84\\x9b",
"tests/test_bcrypt.py::test_kdf[42-password-salt-\\x83<\\xf0\\xdc\\xf5m\\xb6V\\x08\\xe8\\xf0\\xdc\\x0c\\xe8\\x82\\xbd]",
"tests/test_bcrypt.py::test_kdf[8-Lorem",
"tests/test_bcrypt.py::test_kdf[8-\\r\\xb3\\xac\\x94\\xb3\\xeeS(OJ\"\\x89;<$\\xae-:b\\xf0\\xf0\\xdb\\xce\\xf8#\\xcf\\xcc\\x85HV\\xea\\x10(-",
"tests/test_bcrypt.py::test_kdf[8-\\xe1\\xbd\\x88\\xce\\xb4\\xcf\\x85\\xcf\\x83\\xcf\\x83\\xce\\xb5\\xcf\\x8d\\xcf\\x82-\\xce\\xa4\\xce\\xb7\\xce\\xbb\\xce\\xad\\xce\\xbc\\xce\\xb1\\xcf\\x87\\xce\\xbf\\xcf\\x82-Cfl\\x9b\\t\\xef3\\xed\\x8c'\\xe8\\xe8\\xf3\\xe2\\xd8\\xe6]",
"tests/test_bcrypt.py::test_kdf_str_password",
"tests/test_bcrypt.py::test_kdf_str_salt",
"tests/test_bcrypt.py::test_invalid_params[pass-$2b$04$cVWp4XaNU8a4v1uMRum2SO-10-10-TypeError]",
"tests/test_bcrypt.py::test_invalid_params[password-salt-10-10-TypeError]",
"tests/test_bcrypt.py::test_invalid_params[-$2b$04$cVWp4XaNU8a4v1uMRum2SO-10-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password--10-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password-$2b$04$cVWp4XaNU8a4v1uMRum2SO-0-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password-$2b$04$cVWp4XaNU8a4v1uMRum2SO--3-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password-$2b$04$cVWp4XaNU8a4v1uMRum2SO-513-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password-$2b$04$cVWp4XaNU8a4v1uMRum2SO-20-0-ValueError]",
"tests/test_bcrypt.py::test_bcrypt_assert"
]
| []
| Apache License 2.0 | 602 | [
"README.rst",
"src/bcrypt/__init__.py",
"tasks.py",
"src/build_bcrypt.py"
]
| [
"README.rst",
"src/bcrypt/__init__.py",
"tasks.py",
"src/build_bcrypt.py"
]
|
pyca__bcrypt-81 | 268b256020c716388206210c924672e3fca50909 | 2016-06-29 01:25:42 | c9c76210fad230995a6155287e8b92c49180eae4 | reaperhulk: This fix is a bit weird to me since arguably OpenBSD's "If you're on the old $2a$ hash you may want this behavior because you're checking existing hashes that were made with that behavior" is more correct, but I guess preserving compatibility with ourselves is more important.
dstufft: @reaperhulk Yea-- I think it's especially important to break those particular hashes because they're trivial to brute force (since they hash the same as ``b"0" * 72``) which is one of the first things that something like JackTheRipper will try. However, given that we had the old behavior previously, if we didn't restore it we'd also be breaking anyone who happened to use this kind of password (although the edge case this hits it's quite rare).
Ultimately though, I think the edge case is sufficiently narrow so as not to matter *that* much, but the fact it generates a warning in passlib motivates me to ensure this is fixed :) | diff --git a/README.rst b/README.rst
index 62f189f..0883286 100644
--- a/README.rst
+++ b/README.rst
@@ -37,6 +37,10 @@ For Fedora and RHEL-derivatives, the following command will ensure that the requ
Changelog
=========
+3.1.0
+-----
+* Added support for ``checkpw`` as another method of verifying a password.
+
3.0.0
-----
* Switched the C backend to code obtained from the OpenBSD project rather than
@@ -51,8 +55,8 @@ Changelog
Usage
-----
-Hashing
-~~~~~~~
+Password Hashing
+~~~~~~~~~~~~~~~~
Hashing and then later checking that a password matches the previous hashed
password is very simple:
@@ -63,9 +67,9 @@ password is very simple:
>>> password = b"super secret password"
>>> # Hash a password for the first time, with a randomly-generated salt
>>> hashed = bcrypt.hashpw(password, bcrypt.gensalt())
- >>> # Check that a unhashed password matches one that has previously been
- >>> # hashed
- >>> if bcrypt.hashpw(password, hashed) == hashed:
+ >>> # Check that an unhashed password matches one that has previously been
+ >>> # hashed
+ >>> if bcrypt.checkpw(password, hashed):
... print("It Matches!")
... else:
... print("It Does not Match :(")
diff --git a/src/bcrypt/__init__.py b/src/bcrypt/__init__.py
index ad44e93..d6acb84 100644
--- a/src/bcrypt/__init__.py
+++ b/src/bcrypt/__init__.py
@@ -67,6 +67,14 @@ def hashpw(password, salt):
if b"\x00" in password:
raise ValueError("password may not contain NUL bytes")
+ # bcrypt originally suffered from a wraparound bug:
+ # http://www.openwall.com/lists/oss-security/2012/01/02/4
+ # This bug was corrected in the OpenBSD source by truncating inputs to 72
+ # bytes on the updated prefix $2b$, but leaving $2a$ unchanged for
+ # compatibility. However, pyca/bcrypt 2.0.0 *did* correctly truncate inputs
+ # on $2a$, so we do it here to preserve compatibility with 2.0.0
+ password = password[:72]
+
salt = _normalize_prefix(salt)
hashed = _bcrypt.ffi.new("unsigned char[]", 128)
@@ -78,6 +86,24 @@ def hashpw(password, salt):
return _bcrypt.ffi.string(hashed)
+def checkpw(password, hashed_password):
+ if (isinstance(password, six.text_type) or
+ isinstance(hashed_password, six.text_type)):
+ raise TypeError("Unicode-objects must be encoded before checking")
+
+ if b"\x00" in password or b"\x00" in hashed_password:
+ raise ValueError(
+ "password and hashed_password may not contain NUL bytes"
+ )
+
+ # If the user supplies a $2y$ prefix we normalize to $2b$
+ hashed_password = _normalize_prefix(hashed_password)
+
+ ret = hashpw(password, hashed_password)
+
+ return _bcrypt.lib.timingsafe_bcmp(ret, hashed_password, len(ret)) == 0
+
+
def kdf(password, salt, desired_key_bytes, rounds):
if isinstance(password, six.text_type) or isinstance(salt, six.text_type):
raise TypeError("Unicode-objects must be encoded before hashing")
diff --git a/src/build_bcrypt.py b/src/build_bcrypt.py
index e7aca4c..3eec35c 100644
--- a/src/build_bcrypt.py
+++ b/src/build_bcrypt.py
@@ -25,6 +25,7 @@ int bcrypt_hashpass(const char *, const char *, char *, size_t);
int encode_base64(char *, const uint8_t *, size_t);
int bcrypt_pbkdf(const char *, size_t, const uint8_t *, size_t,
uint8_t *, size_t, unsigned int);
+int timingsafe_bcmp(const void *, const void *, size_t);
""")
ffi.set_source(
diff --git a/tasks.py b/tasks.py
new file mode 100644
index 0000000..06b1492
--- /dev/null
+++ b/tasks.py
@@ -0,0 +1,120 @@
+from __future__ import absolute_import, division, print_function
+
+import getpass
+import io
+import os
+import time
+
+from clint.textui.progress import Bar as ProgressBar
+
+import invoke
+
+import requests
+
+
+JENKINS_URL = "https://jenkins.cryptography.io/job/bcrypt-wheel-builder"
+
+
+def wait_for_build_completed(session):
+ # Wait 20 seconds before actually checking if the build is complete, to
+ # ensure that it had time to really start.
+ time.sleep(20)
+ while True:
+ response = session.get(
+ "{0}/lastBuild/api/json/".format(JENKINS_URL),
+ headers={
+ "Accept": "application/json",
+ }
+ )
+ response.raise_for_status()
+ if not response.json()["building"]:
+ assert response.json()["result"] == "SUCCESS"
+ break
+ time.sleep(0.1)
+
+
+def download_artifacts(session):
+ response = session.get(
+ "{0}/lastBuild/api/json/".format(JENKINS_URL),
+ headers={
+ "Accept": "application/json"
+ }
+ )
+ response.raise_for_status()
+ assert not response.json()["building"]
+ assert response.json()["result"] == "SUCCESS"
+
+ paths = []
+
+ last_build_number = response.json()["number"]
+ for run in response.json()["runs"]:
+ if run["number"] != last_build_number:
+ print(
+ "Skipping {0} as it is not from the latest build ({1})".format(
+ run["url"], last_build_number
+ )
+ )
+ continue
+
+ response = session.get(
+ run["url"] + "api/json/",
+ headers={
+ "Accept": "application/json",
+ }
+ )
+ response.raise_for_status()
+ for artifact in response.json()["artifacts"]:
+ response = session.get(
+ "{0}artifact/{1}".format(run["url"], artifact["relativePath"]),
+ stream=True
+ )
+ assert response.headers["content-length"]
+ print("Downloading {0}".format(artifact["fileName"]))
+ bar = ProgressBar(
+ expected_size=int(response.headers["content-length"]),
+ filled_char="="
+ )
+ content = io.BytesIO()
+ for data in response.iter_content(chunk_size=8192):
+ content.write(data)
+ bar.show(content.tell())
+ assert bar.expected_size == content.tell()
+ bar.done()
+ out_path = os.path.join(
+ os.path.dirname(__file__),
+ "dist",
+ artifact["fileName"],
+ )
+ with open(out_path, "wb") as f:
+ f.write(content.getvalue())
+ paths.append(out_path)
+ return paths
+
+
[email protected]
+def release(version):
+ """
+ ``version`` should be a string like '0.4' or '1.0'.
+ """
+ invoke.run("git tag -s {0} -m '{0} release'".format(version))
+ invoke.run("git push --tags")
+
+ invoke.run("python setup.py sdist")
+
+ invoke.run(
+ "twine upload -s dist/bcrypt-{0}*".format(version)
+ )
+
+ session = requests.Session()
+
+ token = getpass.getpass("Input the Jenkins token: ")
+ response = session.post(
+ "{0}/build?token={1}".format(JENKINS_URL, token),
+ params={
+ "cause": "Building wheels for {0}".format(version)
+ }
+ )
+ response.raise_for_status()
+ wait_for_build_completed(session)
+ paths = download_artifacts(session)
+ invoke.run("twine upload {0}".format(" ".join(paths)))
| Possible regression of "BSD wraparound" bug?
[passlib implements a specific check](https://bitbucket.org/ecollins/passlib/src/934db60eea9cd9fe16fed88dcac114435256af47/passlib/handlers/bcrypt.py#bcrypt.py-318) against the ["BSD wraparound" bug](http://www.openwall.com/lists/oss-security/2012/01/02/4) that now appears to be failing with bcrypt 3.0
Although the included source from libcrypt appears to have been updated since the bug was fixed ([OpenBSD 5.5, according to passlib](http://passlib.readthedocs.io/en/stable/lib/passlib.hash.bcrypt.html#bsd-wraparound-bug)) I'm not familiar enough with the code to be able to tell if this is a genuine regression or not
| pyca/bcrypt | diff --git a/tests/test_bcrypt.py b/tests/test_bcrypt.py
index b506a7a..47f315a 100644
--- a/tests/test_bcrypt.py
+++ b/tests/test_bcrypt.py
@@ -216,6 +216,11 @@ def test_hashpw_new(password, salt, hashed):
assert bcrypt.hashpw(password, salt) == hashed
[email protected](("password", "salt", "hashed"), _test_vectors)
+def test_checkpw(password, salt, hashed):
+ assert bcrypt.checkpw(password, hashed) is True
+
+
@pytest.mark.parametrize(("password", "salt", "hashed"), _test_vectors)
def test_hashpw_existing(password, salt, hashed):
assert bcrypt.hashpw(password, hashed) == hashed
@@ -226,11 +231,47 @@ def test_hashpw_2y_prefix(password, hashed, expected):
assert bcrypt.hashpw(password, hashed) == expected
[email protected](("password", "hashed", "expected"), _2y_test_vectors)
+def test_checkpw_2y_prefix(password, hashed, expected):
+ assert bcrypt.checkpw(password, hashed) is True
+
+
def test_hashpw_invalid():
with pytest.raises(ValueError):
bcrypt.hashpw(b"password", b"$2z$04$cVWp4XaNU8a4v1uMRum2SO")
+def test_checkpw_wrong_password():
+ assert bcrypt.checkpw(
+ b"badpass",
+ b"$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe"
+ ) is False
+
+
+def test_checkpw_bad_salt():
+ with pytest.raises(ValueError):
+ bcrypt.checkpw(
+ b"badpass",
+ b"$2b$04$?Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe"
+ )
+
+
+def test_checkpw_str_password():
+ with pytest.raises(TypeError):
+ bcrypt.checkpw(
+ six.text_type("password"),
+ b"$2b$04$cVWp4XaNU8a4v1uMRum2SO",
+ )
+
+
+def test_checkpw_str_salt():
+ with pytest.raises(TypeError):
+ bcrypt.checkpw(
+ b"password",
+ six.text_type("$2b$04$cVWp4XaNU8a4v1uMRum2SO"),
+ )
+
+
def test_hashpw_str_password():
with pytest.raises(TypeError):
bcrypt.hashpw(
@@ -247,6 +288,20 @@ def test_hashpw_str_salt():
)
+def test_checkpw_nul_byte():
+ with pytest.raises(ValueError):
+ bcrypt.checkpw(
+ b"abc\0def",
+ b"$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe"
+ )
+
+ with pytest.raises(ValueError):
+ bcrypt.checkpw(
+ b"abcdef",
+ b"$2b$04$2S\0w3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe"
+ )
+
+
def test_hashpw_nul_byte():
salt = bcrypt.gensalt(4)
with pytest.raises(ValueError):
@@ -375,3 +430,9 @@ def test_invalid_params(password, salt, desired_key_bytes, rounds, error):
def test_bcrypt_assert():
with pytest.raises(SystemError):
bcrypt._bcrypt_assert(False)
+
+
+def test_2a_wraparound_bug():
+ assert bcrypt.hashpw(
+ (b"0123456789" * 26)[:255], b"$2a$04$R1lJ2gkNaoPGdafE.H.16."
+ ) == b"$2a$04$R1lJ2gkNaoPGdafE.H.16.1MKHPvmKwryeulRe225LKProWYwt9Oi"
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 3
} | 3.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[tests]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/pyca/bcrypt.git@268b256020c716388206210c924672e3fca50909#egg=bcrypt
cffi==1.17.1
exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pycparser==2.22
pytest==8.3.5
six==1.17.0
tomli==2.2.1
| name: bcrypt
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cffi==1.17.1
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pycparser==2.22
- pytest==8.3.5
- six==1.17.0
- tomli==2.2.1
prefix: /opt/conda/envs/bcrypt
| [
"tests/test_bcrypt.py::test_checkpw_bad_salt",
"tests/test_bcrypt.py::test_checkpw_str_password",
"tests/test_bcrypt.py::test_checkpw_str_salt",
"tests/test_bcrypt.py::test_checkpw_nul_byte",
"tests/test_bcrypt.py::test_2a_wraparound_bug"
]
| [
"tests/test_bcrypt.py::test_checkpw[Kk4DQuMMfZL9o-$2b$04$cVWp4XaNU8a4v1uMRum2SO-$2b$04$cVWp4XaNU8a4v1uMRum2SO026BWLIoQMD/TXg5uZV.0P.uO8m3YEm]",
"tests/test_bcrypt.py::test_checkpw[9IeRXmnGxMYbs-$2b$04$pQ7gRO7e6wx/936oXhNjrO-$2b$04$pQ7gRO7e6wx/936oXhNjrOUNOHL1D0h1N2IDbJZYs.1ppzSof6SPy]",
"tests/test_bcrypt.py::test_checkpw[xVQVbwa1S0M8r-$2b$04$SQe9knOzepOVKoYXo9xTte-$2b$04$SQe9knOzepOVKoYXo9xTteNYr6MBwVz4tpriJVe3PNgYufGIsgKcW]",
"tests/test_bcrypt.py::test_checkpw[Zfgr26LWd22Za-$2b$04$eH8zX.q5Q.j2hO1NkVYJQO-$2b$04$eH8zX.q5Q.j2hO1NkVYJQOM6KxntS/ow3.YzVmFrE4t//CoF4fvne]",
"tests/test_bcrypt.py::test_checkpw[Tg4daC27epFBE-$2b$04$ahiTdwRXpUG2JLRcIznxc.-$2b$04$ahiTdwRXpUG2JLRcIznxc.s1.ydaPGD372bsGs8NqyYjLY1inG5n2]",
"tests/test_bcrypt.py::test_checkpw[xhQPMmwh5ALzW-$2b$04$nQn78dV0hGHf5wUBe0zOFu-$2b$04$nQn78dV0hGHf5wUBe0zOFu8n07ZbWWOKoGasZKRspZxtt.vBRNMIy]",
"tests/test_bcrypt.py::test_checkpw[59je8h5Gj71tg-$2b$04$cvXudZ5ugTg95W.rOjMITu-$2b$04$cvXudZ5ugTg95W.rOjMITuM1jC0piCl3zF5cmGhzCibHZrNHkmckG]",
"tests/test_bcrypt.py::test_checkpw[wT4fHJa2N9WSW-$2b$04$YYjtiq4Uh88yUsExO0RNTu-$2b$04$YYjtiq4Uh88yUsExO0RNTuEJ.tZlsONac16A8OcLHleWFjVawfGvO]",
"tests/test_bcrypt.py::test_checkpw[uSgFRnQdOgm4S-$2b$04$WLTjgY/pZSyqX/fbMbJzf.-$2b$04$WLTjgY/pZSyqX/fbMbJzf.qxCeTMQOzgL.CimRjMHtMxd/VGKojMu]",
"tests/test_bcrypt.py::test_checkpw[tEPtJZXur16Vg-$2b$04$2moPs/x/wnCfeQ5pCheMcu-$2b$04$2moPs/x/wnCfeQ5pCheMcuSJQ/KYjOZG780UjA/SiR.KsYWNrC7SG]",
"tests/test_bcrypt.py::test_checkpw[vvho8C6nlVf9K-$2b$04$HrEYC/AQ2HS77G78cQDZQ.-$2b$04$HrEYC/AQ2HS77G78cQDZQ.r44WGcruKw03KHlnp71yVQEwpsi3xl2]",
"tests/test_bcrypt.py::test_checkpw[5auCCY9by0Ruf-$2b$04$vVYgSTfB8KVbmhbZE/k3R.-$2b$04$vVYgSTfB8KVbmhbZE/k3R.ux9A0lJUM4CZwCkHI9fifke2.rTF7MG]",
"tests/test_bcrypt.py::test_checkpw[GtTkR6qn2QOZW-$2b$04$JfoNrR8.doieoI8..F.C1O-$2b$04$JfoNrR8.doieoI8..F.C1OQgwE3uTeuardy6lw0AjALUzOARoyf2m]",
"tests/test_bcrypt.py::test_checkpw[zKo8vdFSnjX0f-$2b$04$HP3I0PUs7KBEzMBNFw7o3O-$2b$04$HP3I0PUs7KBEzMBNFw7o3O7f/uxaZU7aaDot1quHMgB2yrwBXsgyy]",
"tests/test_bcrypt.py::test_checkpw[I9VfYlacJiwiK-$2b$04$xnFVhJsTzsFBTeP3PpgbMe-$2b$04$xnFVhJsTzsFBTeP3PpgbMeMREb6rdKV9faW54Sx.yg9plf4jY8qT6]",
"tests/test_bcrypt.py::test_checkpw[VFPO7YXnHQbQO-$2b$04$WQp9.igoLqVr6Qk70mz6xu-$2b$04$WQp9.igoLqVr6Qk70mz6xuRxE0RttVXXdukpR9N54x17ecad34ZF6]",
"tests/test_bcrypt.py::test_checkpw[VDx5BdxfxstYk-$2b$04$xgZtlonpAHSU/njOCdKztO-$2b$04$xgZtlonpAHSU/njOCdKztOPuPFzCNVpB4LGicO4/OGgHv.uKHkwsS]",
"tests/test_bcrypt.py::test_checkpw[dEe6XfVGrrfSH-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe]",
"tests/test_bcrypt.py::test_checkpw[cTT0EAFdwJiLn-$2b$04$7/Qj7Kd8BcSahPO4khB8me-$2b$04$7/Qj7Kd8BcSahPO4khB8me4ssDJCW3r4OGYqPF87jxtrSyPj5cS5m]",
"tests/test_bcrypt.py::test_checkpw[J8eHUDuxBB520-$2b$04$VvlCUKbTMjaxaYJ.k5juoe-$2b$04$VvlCUKbTMjaxaYJ.k5juoecpG/7IzcH1AkmqKi.lIZMVIOLClWAk.]",
"tests/test_bcrypt.py::test_checkpw[U*U-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.E5YPO9kmyuRGyh0XouQYb4YMJKvyOeW]",
"tests/test_bcrypt.py::test_checkpw[U*U*-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.VGOzA784oUp/Z0DY336zx7pLYAy0lwK]",
"tests/test_bcrypt.py::test_checkpw[U*U*U-$2a$05$XXXXXXXXXXXXXXXXXXXXXO-$2a$05$XXXXXXXXXXXXXXXXXXXXXOAcXxm9kjPGEMsLznoKqmqw7tc8WCx4a]",
"tests/test_bcrypt.py::test_checkpw[0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789chars",
"tests/test_bcrypt.py::test_checkpw[\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaachars",
"tests/test_bcrypt.py::test_checkpw[\\xa3-$2a$05$/OK.fbVrR/bpIqNJ5ianF.-$2a$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_checkpw_2y_prefix[\\xa3-$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq-$2b$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_checkpw_2y_prefix[\\xff\\xff\\xa3-$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e-$2b$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e]",
"tests/test_bcrypt.py::test_checkpw_wrong_password"
]
| [
"tests/test_bcrypt.py::test_gensalt_basic",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[4-$2b$04$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[5-$2b$05$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[6-$2b$06$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[7-$2b$07$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[8-$2b$08$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[9-$2b$09$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[10-$2b$10$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[11-$2b$11$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[12-$2b$12$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[13-$2b$13$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[14-$2b$14$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[15-$2b$15$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[16-$2b$16$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[17-$2b$17$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[18-$2b$18$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[19-$2b$19$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[20-$2b$20$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[21-$2b$21$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[22-$2b$22$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[23-$2b$23$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[24-$2b$24$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_invalid[1]",
"tests/test_bcrypt.py::test_gensalt_rounds_invalid[2]",
"tests/test_bcrypt.py::test_gensalt_rounds_invalid[3]",
"tests/test_bcrypt.py::test_gensalt_bad_prefix",
"tests/test_bcrypt.py::test_gensalt_2a_prefix",
"tests/test_bcrypt.py::test_hashpw_new[Kk4DQuMMfZL9o-$2b$04$cVWp4XaNU8a4v1uMRum2SO-$2b$04$cVWp4XaNU8a4v1uMRum2SO026BWLIoQMD/TXg5uZV.0P.uO8m3YEm]",
"tests/test_bcrypt.py::test_hashpw_new[9IeRXmnGxMYbs-$2b$04$pQ7gRO7e6wx/936oXhNjrO-$2b$04$pQ7gRO7e6wx/936oXhNjrOUNOHL1D0h1N2IDbJZYs.1ppzSof6SPy]",
"tests/test_bcrypt.py::test_hashpw_new[xVQVbwa1S0M8r-$2b$04$SQe9knOzepOVKoYXo9xTte-$2b$04$SQe9knOzepOVKoYXo9xTteNYr6MBwVz4tpriJVe3PNgYufGIsgKcW]",
"tests/test_bcrypt.py::test_hashpw_new[Zfgr26LWd22Za-$2b$04$eH8zX.q5Q.j2hO1NkVYJQO-$2b$04$eH8zX.q5Q.j2hO1NkVYJQOM6KxntS/ow3.YzVmFrE4t//CoF4fvne]",
"tests/test_bcrypt.py::test_hashpw_new[Tg4daC27epFBE-$2b$04$ahiTdwRXpUG2JLRcIznxc.-$2b$04$ahiTdwRXpUG2JLRcIznxc.s1.ydaPGD372bsGs8NqyYjLY1inG5n2]",
"tests/test_bcrypt.py::test_hashpw_new[xhQPMmwh5ALzW-$2b$04$nQn78dV0hGHf5wUBe0zOFu-$2b$04$nQn78dV0hGHf5wUBe0zOFu8n07ZbWWOKoGasZKRspZxtt.vBRNMIy]",
"tests/test_bcrypt.py::test_hashpw_new[59je8h5Gj71tg-$2b$04$cvXudZ5ugTg95W.rOjMITu-$2b$04$cvXudZ5ugTg95W.rOjMITuM1jC0piCl3zF5cmGhzCibHZrNHkmckG]",
"tests/test_bcrypt.py::test_hashpw_new[wT4fHJa2N9WSW-$2b$04$YYjtiq4Uh88yUsExO0RNTu-$2b$04$YYjtiq4Uh88yUsExO0RNTuEJ.tZlsONac16A8OcLHleWFjVawfGvO]",
"tests/test_bcrypt.py::test_hashpw_new[uSgFRnQdOgm4S-$2b$04$WLTjgY/pZSyqX/fbMbJzf.-$2b$04$WLTjgY/pZSyqX/fbMbJzf.qxCeTMQOzgL.CimRjMHtMxd/VGKojMu]",
"tests/test_bcrypt.py::test_hashpw_new[tEPtJZXur16Vg-$2b$04$2moPs/x/wnCfeQ5pCheMcu-$2b$04$2moPs/x/wnCfeQ5pCheMcuSJQ/KYjOZG780UjA/SiR.KsYWNrC7SG]",
"tests/test_bcrypt.py::test_hashpw_new[vvho8C6nlVf9K-$2b$04$HrEYC/AQ2HS77G78cQDZQ.-$2b$04$HrEYC/AQ2HS77G78cQDZQ.r44WGcruKw03KHlnp71yVQEwpsi3xl2]",
"tests/test_bcrypt.py::test_hashpw_new[5auCCY9by0Ruf-$2b$04$vVYgSTfB8KVbmhbZE/k3R.-$2b$04$vVYgSTfB8KVbmhbZE/k3R.ux9A0lJUM4CZwCkHI9fifke2.rTF7MG]",
"tests/test_bcrypt.py::test_hashpw_new[GtTkR6qn2QOZW-$2b$04$JfoNrR8.doieoI8..F.C1O-$2b$04$JfoNrR8.doieoI8..F.C1OQgwE3uTeuardy6lw0AjALUzOARoyf2m]",
"tests/test_bcrypt.py::test_hashpw_new[zKo8vdFSnjX0f-$2b$04$HP3I0PUs7KBEzMBNFw7o3O-$2b$04$HP3I0PUs7KBEzMBNFw7o3O7f/uxaZU7aaDot1quHMgB2yrwBXsgyy]",
"tests/test_bcrypt.py::test_hashpw_new[I9VfYlacJiwiK-$2b$04$xnFVhJsTzsFBTeP3PpgbMe-$2b$04$xnFVhJsTzsFBTeP3PpgbMeMREb6rdKV9faW54Sx.yg9plf4jY8qT6]",
"tests/test_bcrypt.py::test_hashpw_new[VFPO7YXnHQbQO-$2b$04$WQp9.igoLqVr6Qk70mz6xu-$2b$04$WQp9.igoLqVr6Qk70mz6xuRxE0RttVXXdukpR9N54x17ecad34ZF6]",
"tests/test_bcrypt.py::test_hashpw_new[VDx5BdxfxstYk-$2b$04$xgZtlonpAHSU/njOCdKztO-$2b$04$xgZtlonpAHSU/njOCdKztOPuPFzCNVpB4LGicO4/OGgHv.uKHkwsS]",
"tests/test_bcrypt.py::test_hashpw_new[dEe6XfVGrrfSH-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe]",
"tests/test_bcrypt.py::test_hashpw_new[cTT0EAFdwJiLn-$2b$04$7/Qj7Kd8BcSahPO4khB8me-$2b$04$7/Qj7Kd8BcSahPO4khB8me4ssDJCW3r4OGYqPF87jxtrSyPj5cS5m]",
"tests/test_bcrypt.py::test_hashpw_new[J8eHUDuxBB520-$2b$04$VvlCUKbTMjaxaYJ.k5juoe-$2b$04$VvlCUKbTMjaxaYJ.k5juoecpG/7IzcH1AkmqKi.lIZMVIOLClWAk.]",
"tests/test_bcrypt.py::test_hashpw_new[U*U-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.E5YPO9kmyuRGyh0XouQYb4YMJKvyOeW]",
"tests/test_bcrypt.py::test_hashpw_new[U*U*-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.VGOzA784oUp/Z0DY336zx7pLYAy0lwK]",
"tests/test_bcrypt.py::test_hashpw_new[U*U*U-$2a$05$XXXXXXXXXXXXXXXXXXXXXO-$2a$05$XXXXXXXXXXXXXXXXXXXXXOAcXxm9kjPGEMsLznoKqmqw7tc8WCx4a]",
"tests/test_bcrypt.py::test_hashpw_new[0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789chars",
"tests/test_bcrypt.py::test_hashpw_new[\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaachars",
"tests/test_bcrypt.py::test_hashpw_new[\\xa3-$2a$05$/OK.fbVrR/bpIqNJ5ianF.-$2a$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_hashpw_existing[Kk4DQuMMfZL9o-$2b$04$cVWp4XaNU8a4v1uMRum2SO-$2b$04$cVWp4XaNU8a4v1uMRum2SO026BWLIoQMD/TXg5uZV.0P.uO8m3YEm]",
"tests/test_bcrypt.py::test_hashpw_existing[9IeRXmnGxMYbs-$2b$04$pQ7gRO7e6wx/936oXhNjrO-$2b$04$pQ7gRO7e6wx/936oXhNjrOUNOHL1D0h1N2IDbJZYs.1ppzSof6SPy]",
"tests/test_bcrypt.py::test_hashpw_existing[xVQVbwa1S0M8r-$2b$04$SQe9knOzepOVKoYXo9xTte-$2b$04$SQe9knOzepOVKoYXo9xTteNYr6MBwVz4tpriJVe3PNgYufGIsgKcW]",
"tests/test_bcrypt.py::test_hashpw_existing[Zfgr26LWd22Za-$2b$04$eH8zX.q5Q.j2hO1NkVYJQO-$2b$04$eH8zX.q5Q.j2hO1NkVYJQOM6KxntS/ow3.YzVmFrE4t//CoF4fvne]",
"tests/test_bcrypt.py::test_hashpw_existing[Tg4daC27epFBE-$2b$04$ahiTdwRXpUG2JLRcIznxc.-$2b$04$ahiTdwRXpUG2JLRcIznxc.s1.ydaPGD372bsGs8NqyYjLY1inG5n2]",
"tests/test_bcrypt.py::test_hashpw_existing[xhQPMmwh5ALzW-$2b$04$nQn78dV0hGHf5wUBe0zOFu-$2b$04$nQn78dV0hGHf5wUBe0zOFu8n07ZbWWOKoGasZKRspZxtt.vBRNMIy]",
"tests/test_bcrypt.py::test_hashpw_existing[59je8h5Gj71tg-$2b$04$cvXudZ5ugTg95W.rOjMITu-$2b$04$cvXudZ5ugTg95W.rOjMITuM1jC0piCl3zF5cmGhzCibHZrNHkmckG]",
"tests/test_bcrypt.py::test_hashpw_existing[wT4fHJa2N9WSW-$2b$04$YYjtiq4Uh88yUsExO0RNTu-$2b$04$YYjtiq4Uh88yUsExO0RNTuEJ.tZlsONac16A8OcLHleWFjVawfGvO]",
"tests/test_bcrypt.py::test_hashpw_existing[uSgFRnQdOgm4S-$2b$04$WLTjgY/pZSyqX/fbMbJzf.-$2b$04$WLTjgY/pZSyqX/fbMbJzf.qxCeTMQOzgL.CimRjMHtMxd/VGKojMu]",
"tests/test_bcrypt.py::test_hashpw_existing[tEPtJZXur16Vg-$2b$04$2moPs/x/wnCfeQ5pCheMcu-$2b$04$2moPs/x/wnCfeQ5pCheMcuSJQ/KYjOZG780UjA/SiR.KsYWNrC7SG]",
"tests/test_bcrypt.py::test_hashpw_existing[vvho8C6nlVf9K-$2b$04$HrEYC/AQ2HS77G78cQDZQ.-$2b$04$HrEYC/AQ2HS77G78cQDZQ.r44WGcruKw03KHlnp71yVQEwpsi3xl2]",
"tests/test_bcrypt.py::test_hashpw_existing[5auCCY9by0Ruf-$2b$04$vVYgSTfB8KVbmhbZE/k3R.-$2b$04$vVYgSTfB8KVbmhbZE/k3R.ux9A0lJUM4CZwCkHI9fifke2.rTF7MG]",
"tests/test_bcrypt.py::test_hashpw_existing[GtTkR6qn2QOZW-$2b$04$JfoNrR8.doieoI8..F.C1O-$2b$04$JfoNrR8.doieoI8..F.C1OQgwE3uTeuardy6lw0AjALUzOARoyf2m]",
"tests/test_bcrypt.py::test_hashpw_existing[zKo8vdFSnjX0f-$2b$04$HP3I0PUs7KBEzMBNFw7o3O-$2b$04$HP3I0PUs7KBEzMBNFw7o3O7f/uxaZU7aaDot1quHMgB2yrwBXsgyy]",
"tests/test_bcrypt.py::test_hashpw_existing[I9VfYlacJiwiK-$2b$04$xnFVhJsTzsFBTeP3PpgbMe-$2b$04$xnFVhJsTzsFBTeP3PpgbMeMREb6rdKV9faW54Sx.yg9plf4jY8qT6]",
"tests/test_bcrypt.py::test_hashpw_existing[VFPO7YXnHQbQO-$2b$04$WQp9.igoLqVr6Qk70mz6xu-$2b$04$WQp9.igoLqVr6Qk70mz6xuRxE0RttVXXdukpR9N54x17ecad34ZF6]",
"tests/test_bcrypt.py::test_hashpw_existing[VDx5BdxfxstYk-$2b$04$xgZtlonpAHSU/njOCdKztO-$2b$04$xgZtlonpAHSU/njOCdKztOPuPFzCNVpB4LGicO4/OGgHv.uKHkwsS]",
"tests/test_bcrypt.py::test_hashpw_existing[dEe6XfVGrrfSH-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe]",
"tests/test_bcrypt.py::test_hashpw_existing[cTT0EAFdwJiLn-$2b$04$7/Qj7Kd8BcSahPO4khB8me-$2b$04$7/Qj7Kd8BcSahPO4khB8me4ssDJCW3r4OGYqPF87jxtrSyPj5cS5m]",
"tests/test_bcrypt.py::test_hashpw_existing[J8eHUDuxBB520-$2b$04$VvlCUKbTMjaxaYJ.k5juoe-$2b$04$VvlCUKbTMjaxaYJ.k5juoecpG/7IzcH1AkmqKi.lIZMVIOLClWAk.]",
"tests/test_bcrypt.py::test_hashpw_existing[U*U-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.E5YPO9kmyuRGyh0XouQYb4YMJKvyOeW]",
"tests/test_bcrypt.py::test_hashpw_existing[U*U*-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.VGOzA784oUp/Z0DY336zx7pLYAy0lwK]",
"tests/test_bcrypt.py::test_hashpw_existing[U*U*U-$2a$05$XXXXXXXXXXXXXXXXXXXXXO-$2a$05$XXXXXXXXXXXXXXXXXXXXXOAcXxm9kjPGEMsLznoKqmqw7tc8WCx4a]",
"tests/test_bcrypt.py::test_hashpw_existing[0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789chars",
"tests/test_bcrypt.py::test_hashpw_existing[\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaachars",
"tests/test_bcrypt.py::test_hashpw_existing[\\xa3-$2a$05$/OK.fbVrR/bpIqNJ5ianF.-$2a$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_hashpw_2y_prefix[\\xa3-$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq-$2b$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_hashpw_2y_prefix[\\xff\\xff\\xa3-$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e-$2b$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e]",
"tests/test_bcrypt.py::test_hashpw_invalid",
"tests/test_bcrypt.py::test_hashpw_str_password",
"tests/test_bcrypt.py::test_hashpw_str_salt",
"tests/test_bcrypt.py::test_hashpw_nul_byte",
"tests/test_bcrypt.py::test_kdf[4-password-salt-[\\xbf\\x0c\\xc2\\x93X\\x7f\\x1c65U\\'ye\\x98\\xd4~W\\x90q\\xbfB~\\x9d\\x8f\\xbe\\x84*\\xba4\\xd9]",
"tests/test_bcrypt.py::test_kdf[4-password-\\x00-\\xc1+Vb5\\xee\\xe0L!%\\x98\\x97\\nW\\x9ag]",
"tests/test_bcrypt.py::test_kdf[4-\\x00-salt-`Q\\xbe\\x18\\xc2\\xf4\\xf8,\\xbf\\x0e\\xfe\\xe5G\\x1bK\\xb9]",
"tests/test_bcrypt.py::test_kdf[4-password\\x00-salt\\x00-t\\x10\\xe4L\\xf4\\xfa\\x07\\xbf\\xaa\\xc8\\xa9(\\xb1r\\x7f\\xac\\x00\\x13u\\xe7\\xbfs\\x847\\x0fH\\xef\\xd1!t0P]",
"tests/test_bcrypt.py::test_kdf[4-pass\\x00wor-sa\\x00l-\\xc2\\xbf\\xfd\\x9d\\xb3\\x8fei\\xef\\xefCr\\xf4\\xde\\x83\\xc0]",
"tests/test_bcrypt.py::test_kdf[4-pass\\x00word-sa\\x00lt-K\\xa4\\xac9%\\xc0\\xe8\\xd7\\xf0\\xcd\\xb6\\xbb\\x16\\x84\\xa5o]",
"tests/test_bcrypt.py::test_kdf[8-password-salt-\\xe16~\\xc5\\x15\\x1a3\\xfa\\xacL\\xc1\\xc1D\\xcd#\\xfa\\x15\\xd5T\\x84\\x93\\xec\\xc9\\x9b\\x9b]\\x9c\\r;'\\xbe\\xc7b'\\xeaf\\x08\\x8b\\x84\\x9b",
"tests/test_bcrypt.py::test_kdf[42-password-salt-\\x83<\\xf0\\xdc\\xf5m\\xb6V\\x08\\xe8\\xf0\\xdc\\x0c\\xe8\\x82\\xbd]",
"tests/test_bcrypt.py::test_kdf[8-Lorem",
"tests/test_bcrypt.py::test_kdf[8-\\r\\xb3\\xac\\x94\\xb3\\xeeS(OJ\"\\x89;<$\\xae-:b\\xf0\\xf0\\xdb\\xce\\xf8#\\xcf\\xcc\\x85HV\\xea\\x10(-",
"tests/test_bcrypt.py::test_kdf[8-\\xe1\\xbd\\x88\\xce\\xb4\\xcf\\x85\\xcf\\x83\\xcf\\x83\\xce\\xb5\\xcf\\x8d\\xcf\\x82-\\xce\\xa4\\xce\\xb7\\xce\\xbb\\xce\\xad\\xce\\xbc\\xce\\xb1\\xcf\\x87\\xce\\xbf\\xcf\\x82-Cfl\\x9b\\t\\xef3\\xed\\x8c'\\xe8\\xe8\\xf3\\xe2\\xd8\\xe6]",
"tests/test_bcrypt.py::test_kdf_str_password",
"tests/test_bcrypt.py::test_kdf_str_salt",
"tests/test_bcrypt.py::test_invalid_params[pass-$2b$04$cVWp4XaNU8a4v1uMRum2SO-10-10-TypeError]",
"tests/test_bcrypt.py::test_invalid_params[password-salt-10-10-TypeError]",
"tests/test_bcrypt.py::test_invalid_params[-$2b$04$cVWp4XaNU8a4v1uMRum2SO-10-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password--10-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password-$2b$04$cVWp4XaNU8a4v1uMRum2SO-0-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password-$2b$04$cVWp4XaNU8a4v1uMRum2SO--3-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password-$2b$04$cVWp4XaNU8a4v1uMRum2SO-513-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password-$2b$04$cVWp4XaNU8a4v1uMRum2SO-20-0-ValueError]",
"tests/test_bcrypt.py::test_bcrypt_assert"
]
| []
| Apache License 2.0 | 603 | [
"README.rst",
"src/bcrypt/__init__.py",
"tasks.py",
"src/build_bcrypt.py"
]
| [
"README.rst",
"src/bcrypt/__init__.py",
"tasks.py",
"src/build_bcrypt.py"
]
|
druid-io__pydruid-58 | ce0982867d4bb3d809ba32c6563dadc8d8d6576f | 2016-06-29 09:06:59 | 2420109553812470ed4d91cd0f8d5e70bbf2cfd3 | xvrl: @dakra some tests would be nice
dakra: I added a test | diff --git a/pydruid/query.py b/pydruid/query.py
index af246a0..cd7c1cf 100644
--- a/pydruid/query.py
+++ b/pydruid/query.py
@@ -24,12 +24,6 @@ from pydruid.utils.dimensions import build_dimension
from pydruid.utils.postaggregator import Postaggregator
from pydruid.utils.query_utils import UnicodeWriter
-try:
- import pandas
-except ImportError:
- print('Warning: unable to import Pandas. The export_pandas method will not work.')
- pass
-
class Query(collections.MutableSequence):
"""
@@ -159,6 +153,8 @@ class Query(collections.MutableSequence):
0 7 2013-10-04T00:00:00.000Z user_1
1 6 2013-10-04T00:00:00.000Z user_2
"""
+ import pandas
+
if self.result:
if self.query_type == "timeseries":
nres = [list(v['result'].items()) + [('timestamp', v['timestamp'])]
@@ -250,11 +246,11 @@ class QueryBuilder(object):
query_dict['pagingSpec'] = val
elif key == 'limit_spec':
query_dict['limitSpec'] = val
- elif key == "filter":
+ elif key == "filter" and val is not None:
query_dict[key] = Filter.build_filter(val)
- elif key == "having":
+ elif key == "having" and val is not None:
query_dict[key] = Having.build_having(val)
- elif key == 'dimension':
+ elif key == 'dimension' and val is not None:
query_dict[key] = build_dimension(val)
elif key == 'dimensions':
query_dict[key] = [build_dimension(v) for v in val]
| Exception when `filter=None`
When building query if `filter=None` then an exception occurs:
```
File "/Users/se7entyse7en/Envs/viralize-web/lib/python2.7/site-packages/pydruid/utils/filters.py", line 61, in build_filter
return filter_obj.filter['filter']
AttributeError: 'NoneType' object has no attribute 'filter'
```
| druid-io/pydruid | diff --git a/tests/test_query.py b/tests/test_query.py
index 64da17b..d84f35b 100644
--- a/tests/test_query.py
+++ b/tests/test_query.py
@@ -111,6 +111,44 @@ class TestQueryBuilder:
# then
assert query.query_dict == expected_query_dict
+ def test_build_query_none_type(self):
+ # given
+ expected_query_dict = {
+ 'queryType': None,
+ 'dataSource': 'things',
+ 'aggregations': [{'fieldName': 'thing', 'name': 'count', 'type': 'count'}],
+ 'filter': {'dimension': 'one', 'type': 'selector', 'value': 1},
+ 'having': {'aggregation': 'sum', 'type': 'greaterThan', 'value': 1},
+ 'dimension': 'dim1',
+ }
+
+ builder = QueryBuilder()
+
+ # when
+ builder_dict = {
+ 'datasource': 'things',
+ 'aggregations': {
+ 'count': aggregators.count('thing'),
+ },
+ 'filter': filters.Dimension('one') == 1,
+ 'having': having.Aggregation('sum') > 1,
+ 'dimension': 'dim1',
+ }
+ query = builder.build_query(None, builder_dict)
+
+ # then
+ assert query.query_dict == expected_query_dict
+
+ # you should be able to pass `None` to dimension/having/filter
+ for v in ['dimension', 'having', 'filter']:
+ expected_query_dict[v] = None
+ builder_dict[v] = None
+
+ query = builder.build_query(None, builder_dict)
+
+ assert query.query_dict == expected_query_dict
+
+
def test_validate_query(self):
# given
builder = QueryBuilder()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
iniconfig==2.1.0
mock==5.2.0
packaging==24.2
pluggy==1.5.0
-e git+https://github.com/druid-io/pydruid.git@ce0982867d4bb3d809ba32c6563dadc8d8d6576f#egg=pydruid
pytest==8.3.5
six==1.17.0
tomli==2.2.1
| name: pydruid
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- six==1.17.0
- tomli==2.2.1
prefix: /opt/conda/envs/pydruid
| [
"tests/test_query.py::TestQueryBuilder::test_build_query_none_type"
]
| []
| [
"tests/test_query.py::TestQueryBuilder::test_build_query",
"tests/test_query.py::TestQueryBuilder::test_validate_query",
"tests/test_query.py::TestQuery::test_export_tsv",
"tests/test_query.py::TestQuery::test_query_acts_as_a_wrapper_for_raw_result"
]
| []
| Apache License 2.0 | 604 | [
"pydruid/query.py"
]
| [
"pydruid/query.py"
]
|
enthought__okonomiyaki-220 | 5cbd87f7c349f999ac8d53fec18e44f5656bf5eb | 2016-06-29 13:53:13 | 5cbd87f7c349f999ac8d53fec18e44f5656bf5eb | diff --git a/okonomiyaki/platforms/__init__.py b/okonomiyaki/platforms/__init__.py
index c4d0a73..bd7d12b 100644
--- a/okonomiyaki/platforms/__init__.py
+++ b/okonomiyaki/platforms/__init__.py
@@ -1,7 +1,7 @@
# flake8: noqa
from .abi import PlatformABI, default_abi
from .epd_platform import X86, X86_64, EPDPlatform, applies
-from .platform import Platform
+from .platform import Platform, OSKind, FamilyKind, NameKind
from .python_implementation import PythonABI, PythonImplementation
__all__ = [
diff --git a/okonomiyaki/platforms/epd_platform.py b/okonomiyaki/platforms/epd_platform.py
index 503062b..97f4649 100644
--- a/okonomiyaki/platforms/epd_platform.py
+++ b/okonomiyaki/platforms/epd_platform.py
@@ -1,6 +1,7 @@
from __future__ import absolute_import
import re
+import warnings
import six
@@ -128,7 +129,7 @@ class EPDPlatform(object):
return _guess_epd_platform(arch)
@classmethod
- def from_epd_string(cls, s):
+ def from_string(cls, s):
"""
Create a new instance from an epd platform string (e.g. 'win-32')
@@ -151,6 +152,19 @@ class EPDPlatform(object):
platform = Platform(os, name, family, release, arch, machine)
return cls(platform)
+ @classmethod
+ def from_epd_string(cls, s):
+ """
+ Create a new instance from an epd platform string (e.g. 'win-32')
+
+ DEPRECATED: use from_string instead.
+ """
+ warnings.warn(
+ "Deprecated: use EPDPlatform.from_string instead",
+ DeprecationWarning
+ )
+ return cls.from_string(s)
+
@classmethod
def _from_spec_depend_data(cls, platform, osdist, arch_name):
msg = ("Unrecognized platform/osdist combination: {0!r}/{1!r}"
| expose *Kind enum to `okonomiyaki.platforms` | enthought/okonomiyaki | diff --git a/okonomiyaki/platforms/tests/test_epd_platform.py b/okonomiyaki/platforms/tests/test_epd_platform.py
index a17ab89..ea9c5cb 100644
--- a/okonomiyaki/platforms/tests/test_epd_platform.py
+++ b/okonomiyaki/platforms/tests/test_epd_platform.py
@@ -41,24 +41,24 @@ class TestEPDPlatform(unittest.TestCase):
def test_epd_platform_from_string(self):
"""Ensure every epd short platform is understood by EPDPlatform."""
for epd_platform_string in EPD_PLATFORM_SHORT_NAMES:
- EPDPlatform.from_epd_string(epd_platform_string)
+ EPDPlatform.from_string(epd_platform_string)
def test_pep425_is_unicode(self):
# When/Then
for platform_string in self.platform_strings:
- platform = EPDPlatform.from_epd_string(platform_string)
+ platform = EPDPlatform.from_string(platform_string)
self.assertIsInstance(platform.pep425_tag, six.text_type)
def test_platform_name(self):
# When/Then
for platform_string in self.platform_strings:
- platform = EPDPlatform.from_epd_string(platform_string)
+ platform = EPDPlatform.from_string(platform_string)
self.assertIsInstance(platform.platform_name, six.text_type)
def test_str_is_unicode(self):
# When/Then
for platform_string in self.platform_strings:
- platform = EPDPlatform.from_epd_string(platform_string)
+ platform = EPDPlatform.from_string(platform_string)
self.assertIsInstance(six.text_type(platform), six.text_type)
def test_epd_platform_from_string_new_names_underscore(self):
@@ -67,7 +67,7 @@ class TestEPDPlatform(unittest.TestCase):
# When
epd_platforms = tuple(
- EPDPlatform.from_epd_string("rh5_" + arch)
+ EPDPlatform.from_string("rh5_" + arch)
for arch in archs
)
@@ -80,7 +80,7 @@ class TestEPDPlatform(unittest.TestCase):
# When
epd_platforms = tuple(
- EPDPlatform.from_epd_string("rh5_" + arch)
+ EPDPlatform.from_string("rh5_" + arch)
for arch in archs
)
@@ -92,19 +92,19 @@ class TestEPDPlatform(unittest.TestCase):
s = "win_x86_64"
# When
- epd_platform = EPDPlatform.from_epd_string(s)
+ epd_platform = EPDPlatform.from_string(s)
# Then
- self.assertEqual(epd_platform, EPDPlatform.from_epd_string("win-64"))
+ self.assertEqual(epd_platform, EPDPlatform.from_string("win-64"))
# Given
s = "osx_x86_64"
# When
- epd_platform = EPDPlatform.from_epd_string(s)
+ epd_platform = EPDPlatform.from_string(s)
# Then
- self.assertEqual(epd_platform, EPDPlatform.from_epd_string("osx-64"))
+ self.assertEqual(epd_platform, EPDPlatform.from_string("osx-64"))
def test_epd_platform_from_string_new_names(self):
"""Ensure every epd short platform is understood by EPDPlatform."""
@@ -113,7 +113,7 @@ class TestEPDPlatform(unittest.TestCase):
# When
epd_platforms = tuple(
- EPDPlatform.from_epd_string("rh5-" + arch)
+ EPDPlatform.from_string("rh5-" + arch)
for arch in archs
)
@@ -126,7 +126,7 @@ class TestEPDPlatform(unittest.TestCase):
# When
epd_platforms = tuple(
- EPDPlatform.from_epd_string("rh5-" + arch)
+ EPDPlatform.from_string("rh5-" + arch)
for arch in archs
)
@@ -179,7 +179,7 @@ class TestEPDPlatform(unittest.TestCase):
# When
for old_name in EPD_PLATFORM_SHORT_NAMES:
new_name = old_to_new_name(old_name)
- name_to_platform[old_name] = EPDPlatform.from_epd_string(new_name)
+ name_to_platform[old_name] = EPDPlatform.from_string(new_name)
# Then
for old_name in name_to_platform:
@@ -195,13 +195,13 @@ class TestEPDPlatform(unittest.TestCase):
def test_str(self):
# Given
- epd_platform = EPDPlatform.from_epd_string("rh5-64")
+ epd_platform = EPDPlatform.from_string("rh5-64")
# When/Then
self.assertEqual(str(epd_platform), "rh5_x86_64")
# Given
- epd_platform = EPDPlatform.from_epd_string("osx-32")
+ epd_platform = EPDPlatform.from_string("osx-32")
# When/Then
self.assertEqual(str(epd_platform), "osx_x86")
@@ -212,7 +212,7 @@ class TestEPDPlatform(unittest.TestCase):
s = "osx_x86"
# When
- epd_platform = EPDPlatform.from_epd_string(s)
+ epd_platform = EPDPlatform.from_string(s)
# Then
self.assertEqual(str(epd_platform), s)
@@ -231,7 +231,7 @@ class TestEPDPlatform(unittest.TestCase):
# When/Then
for epd_string, platform_tag in epd_string_to_pep425:
self.assertEqual(
- EPDPlatform.from_epd_string(epd_string).pep425_tag,
+ EPDPlatform.from_string(epd_string).pep425_tag,
platform_tag,
)
@@ -243,7 +243,7 @@ class TestEPDPlatformApplies(unittest.TestCase):
self.assertTrue(applies("rh5", "current"))
self.assertFalse(applies("!rh5", "current"))
- platform = EPDPlatform.from_epd_string("rh5-x86_64")
+ platform = EPDPlatform.from_string("rh5-x86_64")
self.assertTrue(applies("rh5", platform))
self.assertFalse(applies("!rh5", platform))
self.assertFalse(applies("rh5-32", platform))
@@ -254,7 +254,7 @@ class TestEPDPlatformApplies(unittest.TestCase):
self.assertTrue(applies("all", "current"))
self.assertFalse(applies("!all", "current"))
- platform = EPDPlatform.from_epd_string("rh5-x86_64")
+ platform = EPDPlatform.from_string("rh5-x86_64")
self.assertTrue(applies("all", platform))
self.assertFalse(applies("!all", platform))
@@ -411,21 +411,21 @@ class TestGuessEPDPlatform(unittest.TestCase):
# Given
examples = (
(("linux2", None, "x86"),
- EPDPlatform.from_epd_string("rh5-32"),),
+ EPDPlatform.from_string("rh5-32"),),
(("linux2", "RedHat_3", "x86"),
- EPDPlatform.from_epd_string("rh3-32"),),
+ EPDPlatform.from_string("rh3-32"),),
(("linux2", "RedHat_5", "x86"),
- EPDPlatform.from_epd_string("rh5-32"),),
+ EPDPlatform.from_string("rh5-32"),),
(("linux2", "RedHat_5", "amd64"),
- EPDPlatform.from_epd_string("rh5-64"),),
+ EPDPlatform.from_string("rh5-64"),),
(("darwin", None, "x86"),
- EPDPlatform.from_epd_string("osx-32"),),
+ EPDPlatform.from_string("osx-32"),),
(("darwin", None, "amd64"),
- EPDPlatform.from_epd_string("osx-64"),),
+ EPDPlatform.from_string("osx-64"),),
(("win32", None, "x86"),
- EPDPlatform.from_epd_string("win-32"),),
+ EPDPlatform.from_string("win-32"),),
(("win32", None, "amd64"),
- EPDPlatform.from_epd_string("win-64"),),
+ EPDPlatform.from_string("win-64"),),
)
# When/Then
@@ -438,7 +438,7 @@ class TestGuessEPDPlatform(unittest.TestCase):
epd_platform_string = "rh5-32"
# When
- epd_platform = EPDPlatform.from_epd_string(epd_platform_string)
+ epd_platform = EPDPlatform.from_string(epd_platform_string)
# Then
platform = epd_platform.platform
@@ -452,7 +452,7 @@ class TestGuessEPDPlatform(unittest.TestCase):
epd_platform_string = "win-32"
# When
- epd_platform = EPDPlatform.from_epd_string(epd_platform_string)
+ epd_platform = EPDPlatform.from_string(epd_platform_string)
# Then
platform = epd_platform.platform
@@ -466,7 +466,7 @@ class TestGuessEPDPlatform(unittest.TestCase):
epd_platform_string = "osx-64"
# When
- epd_platform = EPDPlatform.from_epd_string(epd_platform_string)
+ epd_platform = EPDPlatform.from_string(epd_platform_string)
platform = epd_platform.platform
# Then
@@ -483,7 +483,7 @@ class TestGuessEPDPlatform(unittest.TestCase):
# When/Then
with self.assertRaises(OkonomiyakiError):
- EPDPlatform.from_epd_string(epd_platform_string)
+ EPDPlatform.from_string(epd_platform_string)
# Given
# Invalid bitwidth
@@ -491,7 +491,7 @@ class TestGuessEPDPlatform(unittest.TestCase):
# When/Then
with self.assertRaises(OkonomiyakiError):
- EPDPlatform.from_epd_string(epd_platform_string)
+ EPDPlatform.from_string(epd_platform_string)
# Given
# Invalid platform basename
@@ -499,7 +499,7 @@ class TestGuessEPDPlatform(unittest.TestCase):
# When/Then
with self.assertRaises(OkonomiyakiError):
- EPDPlatform.from_epd_string(epd_platform_string)
+ EPDPlatform.from_string(epd_platform_string)
def test_from_platform_tag(self):
# Given
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | 0.15 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"dev_requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
coverage==7.8.0
docutils==0.21.2
enum34==1.1.10
exceptiongroup==1.2.2
flake8==7.2.0
haas==0.9.0
iniconfig==2.1.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
mccabe==0.7.0
mock==1.0.1
-e git+https://github.com/enthought/okonomiyaki.git@5cbd87f7c349f999ac8d53fec18e44f5656bf5eb#egg=okonomiyaki
packaging==24.2
pbr==6.1.1
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.2
pytest==8.3.5
referencing==0.36.2
rpds-py==0.24.0
six==1.17.0
statistics==1.0.3.5
stevedore==4.1.1
testfixtures==8.3.0
tomli==2.2.1
typing_extensions==4.13.0
zipfile2==0.0.12
| name: okonomiyaki
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- coverage==7.8.0
- docutils==0.21.2
- enum34==1.1.10
- exceptiongroup==1.2.2
- flake8==7.2.0
- haas==0.9.0
- iniconfig==2.1.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- mccabe==0.7.0
- mock==1.0.1
- packaging==24.2
- pbr==6.1.1
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.2
- pytest==8.3.5
- referencing==0.36.2
- rpds-py==0.24.0
- six==1.17.0
- statistics==1.0.3.5
- stevedore==4.1.1
- testfixtures==8.3.0
- tomli==2.2.1
- typing-extensions==4.13.0
- zipfile2==0.0.12
prefix: /opt/conda/envs/okonomiyaki
| [
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_epd_platform_from_string",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_epd_platform_from_string_new_arch",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_epd_platform_from_string_new_names",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_epd_platform_from_string_new_names_underscore",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_pep425_is_unicode",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_pep425_tag",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_platform_name",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_str",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_str_is_unicode",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_from_epd_platform_string",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_from_epd_platform_string_invalid",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_from_spec_depend_data"
]
| [
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_guessed_epd_platform",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatformApplies::test_all",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatformApplies::test_applies_rh",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatformApplies::test_current_linux",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatformApplies::test_no_arch",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_guess_linux2_platform",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_guess_linux2_unsupported"
]
| [
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_from_running_python",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_from_running_system",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatform::test_short_names_consistency",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestEPDPlatformApplies::test_current_windows",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_from_platform_tag",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_guess_darwin_platform",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_guess_solaris_unsupported",
"okonomiyaki/platforms/tests/test_epd_platform.py::TestGuessEPDPlatform::test_guess_win32_platform"
]
| []
| BSD License | 605 | [
"okonomiyaki/platforms/__init__.py",
"okonomiyaki/platforms/epd_platform.py"
]
| [
"okonomiyaki/platforms/__init__.py",
"okonomiyaki/platforms/epd_platform.py"
]
|
|
closeio__freezefrog-3 | c7089ad8d9b3900f2528a94b43bfc842f62023c9 | 2016-06-29 19:12:56 | c7089ad8d9b3900f2528a94b43bfc842f62023c9 | diff --git a/freezefrog/__init__.py b/freezefrog/__init__.py
index 44a232a..60463f3 100644
--- a/freezefrog/__init__.py
+++ b/freezefrog/__init__.py
@@ -47,6 +47,12 @@ class FakeDateTime(with_metaclass(FakeDateTimeMeta, real_datetime)):
cls._start = real_datetime.utcnow()
return (real_datetime.utcnow() - cls._start) + cls.dt
+ @classmethod
+ def now(cls, *args, **kwargs):
+ raise NotImplementedError(
+ '{}.now() is not implemented yet'.format(cls.__name__)
+ )
+
class FakeFixedDateTime(FakeDateTime):
@classmethod
| FreezeFrog should also mock datetime.datetime.now
FreezeFrog should raise an exception when datetime.datetime.now() is used without a TZ instead of not mocking it at all.
| closeio/freezefrog | diff --git a/tests/__init__.py b/tests/__init__.py
index dd74be2..093fc1c 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -27,3 +27,9 @@ class FreezeFrogTestCase(unittest.TestCase):
end = datetime.datetime(2014, 1, 1, 0, 0, 1)
self.assertTrue(start < dt < end)
self.assertTrue(1388534400 < time.time() < 1388534401)
+
+ def test_now(self):
+ regular_now = datetime.datetime.now()
+ self.assertTrue(regular_now)
+ with FreezeTime(datetime.datetime(2014, 1, 1)):
+ self.assertRaises(NotImplementedError, datetime.datetime.now)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
-e git+https://github.com/closeio/freezefrog.git@c7089ad8d9b3900f2528a94b43bfc842f62023c9#egg=freezefrog
iniconfig==2.1.0
mock==1.3.0
packaging==24.2
pbr==6.1.1
pluggy==1.5.0
pytest==8.3.5
six==1.17.0
tomli==2.2.1
| name: freezefrog
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- mock==1.3.0
- packaging==24.2
- pbr==6.1.1
- pluggy==1.5.0
- pytest==8.3.5
- six==1.17.0
- tomli==2.2.1
prefix: /opt/conda/envs/freezefrog
| [
"tests/__init__.py::FreezeFrogTestCase::test_now"
]
| []
| [
"tests/__init__.py::FreezeFrogTestCase::test_freeze",
"tests/__init__.py::FreezeFrogTestCase::test_freeze_tick"
]
| []
| MIT License | 606 | [
"freezefrog/__init__.py"
]
| [
"freezefrog/__init__.py"
]
|
|
EdinburghGenomics__EGCG-Core-6 | 0309bf8fdd5d64705fc62184fe31e27c20172fbc | 2016-06-30 09:54:47 | 0309bf8fdd5d64705fc62184fe31e27c20172fbc | diff --git a/egcg_core/rest_communication.py b/egcg_core/rest_communication.py
index 942039b..39acd98 100644
--- a/egcg_core/rest_communication.py
+++ b/egcg_core/rest_communication.py
@@ -1,9 +1,10 @@
import requests
from urllib.parse import urljoin
-from egcg_core.config import default as cfg
+from egcg_core.config import default
from egcg_core.app_logging import logging_default as log_cfg
from egcg_core.exceptions import RestCommunicationError
+cfg = default['rest_api']
app_logger = log_cfg.get_logger(__name__)
table = {' ': '', '\'': '"', 'None': 'null'}
@@ -17,7 +18,7 @@ def _translate(s):
def api_url(endpoint, **query_args):
url = '{base_url}/{endpoint}/'.format(
- base_url=cfg.query('rest_api', 'url').rstrip('/'), endpoint=endpoint
+ base_url=cfg['url'].rstrip('/'), endpoint=endpoint
)
if query_args:
query = '?' + '&'.join(['%s=%s' % (k, v) for k, v in query_args.items()])
@@ -38,7 +39,11 @@ def _parse_query_string(query_string, requires=None):
def _req(method, url, quiet=False, **kwargs):
- r = requests.request(method, url, **kwargs)
+ auth = None
+ if 'username' in cfg and 'password' in cfg:
+ auth = (cfg['username'], cfg['password'])
+
+ r = requests.request(method, url, auth=auth, **kwargs)
# e.g: 'POST <url> ({"some": "args"}) -> {"some": "content"}. Status code 201. Reason: CREATED
report = '%s %s (%s) -> %s. Status code %s. Reason: %s' % (
r.request.method, r.request.path_url, kwargs, r.content.decode('utf-8'), r.status_code, r.reason
@@ -46,6 +51,8 @@ def _req(method, url, quiet=False, **kwargs):
if r.status_code in (200, 201):
if not quiet:
app_logger.debug(report)
+ elif r.status_code == 401:
+ raise RestCommunicationError('Invalid auth credentials')
else:
app_logger.error(report)
return r
@@ -58,7 +65,6 @@ def get_content(endpoint, paginate=True, quiet=False, **query_args):
page=query_args.pop('page', 1)
)
url = api_url(endpoint, **query_args)
-
return _req('GET', url, quiet=quiet).json()
@@ -170,7 +176,7 @@ def post_or_patch(endpoint, input_json, id_field=None, update_lists=None):
doc = get_document(endpoint, where={id_field: _payload[id_field]})
if doc:
_payload.pop(id_field)
- s = _patch_entry(endpoint, doc, _payload, update_lists)
+ s = _patch_entry(endpoint, doc, _payload, update_lists)
else:
s = post_entry(endpoint, _payload)
success = success and s
diff --git a/etc/example_egcg.yaml b/etc/example_egcg.yaml
index 11b9ade..c6d764e 100644
--- a/etc/example_egcg.yaml
+++ b/etc/example_egcg.yaml
@@ -4,6 +4,8 @@ default:
rest_api:
url: 'http://localhost:4999/api/0.1'
+ username: 'a_user'
+ password: 'a_password'
ncbi_cache: ':memory:'
| Authentication
Since EdinburghGenomics/Reporting-App will be adding authentication, we should add auth headers to `rest_communication`. | EdinburghGenomics/EGCG-Core | diff --git a/tests/test_rest_communication.py b/tests/test_rest_communication.py
index c43d6b7..b9905c2 100644
--- a/tests/test_rest_communication.py
+++ b/tests/test_rest_communication.py
@@ -21,6 +21,7 @@ patched_response = patch(
'requests.request',
return_value=FakeRestResponse(status_code=200, content=test_request_content)
)
+auth = ('a_user', 'a_password')
def query_args_from_url(url):
@@ -74,7 +75,7 @@ def test_req(mocked_response):
response = rest_communication._req('METHOD', rest_url(test_endpoint), json=json_content)
assert response.status_code == 200
assert json.loads(response.content.decode('utf-8')) == response.json() == test_request_content
- mocked_response.assert_called_with('METHOD', rest_url(test_endpoint), json=json_content)
+ mocked_response.assert_called_with('METHOD', rest_url(test_endpoint), auth=auth, json=json_content)
def test_get_documents_depaginate():
@@ -122,13 +123,13 @@ def test_get_document():
@patched_response
def test_post_entry(mocked_response):
rest_communication.post_entry(test_endpoint, payload=test_request_content)
- mocked_response.assert_called_with('POST', rest_url(test_endpoint), json=test_request_content)
+ mocked_response.assert_called_with('POST', rest_url(test_endpoint), auth=auth, json=test_request_content)
@patched_response
def test_put_entry(mocked_response):
rest_communication.put_entry(test_endpoint, 'an_element_id', payload=test_request_content)
- mocked_response.assert_called_with('PUT', rest_url(test_endpoint) + 'an_element_id', json=test_request_content)
+ mocked_response.assert_called_with('PUT', rest_url(test_endpoint) + 'an_element_id', auth=auth, json=test_request_content)
test_patch_document = {
@@ -138,7 +139,7 @@ test_patch_document = {
@patch('egcg_core.rest_communication.get_document', return_value=test_patch_document)
@patched_response
-def test_patch_entry(mocked_request, mocked_get_doc):
+def test_patch_entry(mocked_response, mocked_get_doc):
patching_payload = {'list_to_update': ['another']}
rest_communication.patch_entry(
test_endpoint,
@@ -149,10 +150,11 @@ def test_patch_entry(mocked_request, mocked_get_doc):
)
mocked_get_doc.assert_called_with(test_endpoint, where={'uid': 'a_unique_id'})
- mocked_request.assert_called_with(
+ mocked_response.assert_called_with(
'PATCH',
rest_url(test_endpoint) + '1337',
headers={'If-Match': 1234567},
+ auth=auth,
json={'list_to_update': ['this', 'that', 'other', 'another']}
)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
-e git+https://github.com/EdinburghGenomics/EGCG-Core.git@0309bf8fdd5d64705fc62184fe31e27c20172fbc#egg=EGCG_Core
exceptiongroup==1.2.2
genologics==1.0.0
idna==3.10
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
PyYAML==6.0.2
requests==2.32.3
tomli==2.2.1
urllib3==2.3.0
| name: EGCG-Core
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- exceptiongroup==1.2.2
- genologics==1.0.0
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pyyaml==6.0.2
- requests==2.32.3
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/EGCG-Core
| [
"tests/test_rest_communication.py::test_req",
"tests/test_rest_communication.py::test_post_entry",
"tests/test_rest_communication.py::test_put_entry",
"tests/test_rest_communication.py::test_patch_entry"
]
| []
| [
"tests/test_rest_communication.py::test_api_url_query_strings",
"tests/test_rest_communication.py::test_parse_query_string",
"tests/test_rest_communication.py::test_get_documents_depaginate",
"tests/test_rest_communication.py::test_test_content",
"tests/test_rest_communication.py::test_get_documents",
"tests/test_rest_communication.py::test_get_document",
"tests/test_rest_communication.py::test_post_or_patch"
]
| []
| MIT License | 607 | [
"egcg_core/rest_communication.py",
"etc/example_egcg.yaml"
]
| [
"egcg_core/rest_communication.py",
"etc/example_egcg.yaml"
]
|
|
pyca__bcrypt-86 | c9c76210fad230995a6155287e8b92c49180eae4 | 2016-06-30 16:21:32 | c9c76210fad230995a6155287e8b92c49180eae4 | reaperhulk: jenkins, retest this please | diff --git a/.travis.yml b/.travis.yml
index 456aba7..bbcb336 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -30,6 +30,7 @@ matrix:
env: TOXENV=pypy CC=clang
- python: 2.7
env: TOXENV=pep8
+ - env: TOXENV=packaging
- python: 3.5
env: TOXENV=py3pep8
- language: generic
diff --git a/MANIFEST.in b/MANIFEST.in
index 622d66b..93a4480 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,11 @@
include LICENSE README.rst
+
+include tox.ini .coveragerc
include src/build_bcrypt.py
+
recursive-include src/_csrc *
+recursive-include tests *.py
+
+exclude requirements.txt tasks.py .travis.yml
+
+prune .travis
diff --git a/README.rst b/README.rst
index 0883286..3859bb8 100644
--- a/README.rst
+++ b/README.rst
@@ -40,6 +40,7 @@ Changelog
3.1.0
-----
* Added support for ``checkpw`` as another method of verifying a password.
+* Ensure that you get a ``$2y$`` hash when you input a ``$2y$`` salt.
3.0.0
-----
@@ -104,7 +105,7 @@ the work factor merely pass the desired number of rounds to
>>> hashed = bcrypt.hashpw(password, bcrypt.gensalt(14))
>>> # Check that a unhashed password matches one that has previously been
>>> # hashed
- >>> if bcrypt.hashpw(password, hashed) == hashed:
+ >>> if bcrypt.checkpw(password, hashed):
... print("It Matches!")
... else:
... print("It Does not Match :(")
diff --git a/src/bcrypt/__init__.py b/src/bcrypt/__init__.py
index d6acb84..abc9d75 100644
--- a/src/bcrypt/__init__.py
+++ b/src/bcrypt/__init__.py
@@ -39,10 +39,6 @@ __all__ = [
_normalize_re = re.compile(b"^\$2y\$")
-def _normalize_prefix(salt):
- return _normalize_re.sub(b"$2b$", salt)
-
-
def gensalt(rounds=12, prefix=b"2b"):
if prefix not in (b"2a", b"2b"):
raise ValueError("Supported prefixes are b'2a' or b'2b'")
@@ -75,7 +71,13 @@ def hashpw(password, salt):
# on $2a$, so we do it here to preserve compatibility with 2.0.0
password = password[:72]
- salt = _normalize_prefix(salt)
+ # When the original 8bit bug was found the original library we supported
+ # added a new prefix, $2y$, that fixes it. This prefix is exactly the same
+ # as the $2b$ prefix added by OpenBSD other than the name. Since the
+ # OpenBSD library does not support the $2y$ prefix, if the salt given to us
+ # is for the $2y$ prefix, we'll just mugne it so that it's a $2b$ prior to
+ # passing it into the C library.
+ original_salt, salt = salt, _normalize_re.sub(b"$2b$", salt)
hashed = _bcrypt.ffi.new("unsigned char[]", 128)
retval = _bcrypt.lib.bcrypt_hashpass(password, salt, hashed, len(hashed))
@@ -83,7 +85,13 @@ def hashpw(password, salt):
if retval != 0:
raise ValueError("Invalid salt")
- return _bcrypt.ffi.string(hashed)
+ # Now that we've gotten our hashed password, we want to ensure that the
+ # prefix we return is the one that was passed in, so we'll use the prefix
+ # from the original salt and concatenate that with the return value (minus
+ # the return value's prefix). This will ensure that if someone passed in a
+ # salt with a $2y$ prefix, that they get back a hash with a $2y$ prefix
+ # even though we munged it to $2b$.
+ return original_salt[:4] + _bcrypt.ffi.string(hashed)[4:]
def checkpw(password, hashed_password):
@@ -96,9 +104,6 @@ def checkpw(password, hashed_password):
"password and hashed_password may not contain NUL bytes"
)
- # If the user supplies a $2y$ prefix we normalize to $2b$
- hashed_password = _normalize_prefix(hashed_password)
-
ret = hashpw(password, hashed_password)
return _bcrypt.lib.timingsafe_bcmp(ret, hashed_password, len(ret)) == 0
diff --git a/tox.ini b/tox.ini
index abc6283..264d9aa 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = py26,py27,pypy,py33,py34,py35,pep8,py3pep8
+envlist = py26,py27,pypy,py33,py34,py35,pep8,py3pep8,packaging
[testenv]
# If you add a new dep here you probably need to add it in setup.py as well
@@ -27,6 +27,15 @@ deps =
commands =
flake8 .
+[testenv:packaging]
+deps =
+ check-manifest
+ readme_renderer
+commands =
+ check-manifest
+ python setup.py check --metadata --restructuredtext --strict
+
+
[flake8]
exclude = .tox,*.egg
select = E,W,F,N,I
| Find a way to not use == in the README
I don't think it's actually exploitable as a timing attack (in fact I'm pretty sure it's not), but I think it'd be good hygeine to offer a check_password function or similar and use that, so we dont' have to expose a general purpose constant time comparison function. | pyca/bcrypt | diff --git a/tests/test_bcrypt.py b/tests/test_bcrypt.py
index 47f315a..d9bde72 100644
--- a/tests/test_bcrypt.py
+++ b/tests/test_bcrypt.py
@@ -152,12 +152,12 @@ _2y_test_vectors = [
(
b"\xa3",
b"$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq",
- b"$2b$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq",
+ b"$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq",
),
(
b"\xff\xff\xa3",
b"$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e",
- b"$2b$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e",
+ b"$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e",
),
]
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 5
} | 3.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[tests]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"flake8",
"flake8-import-order",
"pep8-naming"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/pyca/bcrypt.git@c9c76210fad230995a6155287e8b92c49180eae4#egg=bcrypt
cffi==1.17.1
exceptiongroup==1.2.2
flake8==7.2.0
flake8-import-order==0.18.2
iniconfig==2.1.0
mccabe==0.7.0
packaging==24.2
pep8-naming==0.14.1
pluggy==1.5.0
pycodestyle==2.13.0
pycparser==2.22
pyflakes==3.3.1
pytest==8.3.5
six==1.17.0
tomli==2.2.1
| name: bcrypt
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cffi==1.17.1
- exceptiongroup==1.2.2
- flake8==7.2.0
- flake8-import-order==0.18.2
- iniconfig==2.1.0
- mccabe==0.7.0
- packaging==24.2
- pep8-naming==0.14.1
- pluggy==1.5.0
- pycodestyle==2.13.0
- pycparser==2.22
- pyflakes==3.3.1
- pytest==8.3.5
- six==1.17.0
- tomli==2.2.1
prefix: /opt/conda/envs/bcrypt
| [
"tests/test_bcrypt.py::test_hashpw_2y_prefix[\\xa3-$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq-$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_hashpw_2y_prefix[\\xff\\xff\\xa3-$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e-$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e]"
]
| []
| [
"tests/test_bcrypt.py::test_gensalt_basic",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[4-$2b$04$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[5-$2b$05$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[6-$2b$06$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[7-$2b$07$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[8-$2b$08$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[9-$2b$09$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[10-$2b$10$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[11-$2b$11$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[12-$2b$12$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[13-$2b$13$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[14-$2b$14$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[15-$2b$15$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[16-$2b$16$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[17-$2b$17$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[18-$2b$18$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[19-$2b$19$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[20-$2b$20$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[21-$2b$21$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[22-$2b$22$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[23-$2b$23$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[24-$2b$24$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_invalid[1]",
"tests/test_bcrypt.py::test_gensalt_rounds_invalid[2]",
"tests/test_bcrypt.py::test_gensalt_rounds_invalid[3]",
"tests/test_bcrypt.py::test_gensalt_bad_prefix",
"tests/test_bcrypt.py::test_gensalt_2a_prefix",
"tests/test_bcrypt.py::test_hashpw_new[Kk4DQuMMfZL9o-$2b$04$cVWp4XaNU8a4v1uMRum2SO-$2b$04$cVWp4XaNU8a4v1uMRum2SO026BWLIoQMD/TXg5uZV.0P.uO8m3YEm]",
"tests/test_bcrypt.py::test_hashpw_new[9IeRXmnGxMYbs-$2b$04$pQ7gRO7e6wx/936oXhNjrO-$2b$04$pQ7gRO7e6wx/936oXhNjrOUNOHL1D0h1N2IDbJZYs.1ppzSof6SPy]",
"tests/test_bcrypt.py::test_hashpw_new[xVQVbwa1S0M8r-$2b$04$SQe9knOzepOVKoYXo9xTte-$2b$04$SQe9knOzepOVKoYXo9xTteNYr6MBwVz4tpriJVe3PNgYufGIsgKcW]",
"tests/test_bcrypt.py::test_hashpw_new[Zfgr26LWd22Za-$2b$04$eH8zX.q5Q.j2hO1NkVYJQO-$2b$04$eH8zX.q5Q.j2hO1NkVYJQOM6KxntS/ow3.YzVmFrE4t//CoF4fvne]",
"tests/test_bcrypt.py::test_hashpw_new[Tg4daC27epFBE-$2b$04$ahiTdwRXpUG2JLRcIznxc.-$2b$04$ahiTdwRXpUG2JLRcIznxc.s1.ydaPGD372bsGs8NqyYjLY1inG5n2]",
"tests/test_bcrypt.py::test_hashpw_new[xhQPMmwh5ALzW-$2b$04$nQn78dV0hGHf5wUBe0zOFu-$2b$04$nQn78dV0hGHf5wUBe0zOFu8n07ZbWWOKoGasZKRspZxtt.vBRNMIy]",
"tests/test_bcrypt.py::test_hashpw_new[59je8h5Gj71tg-$2b$04$cvXudZ5ugTg95W.rOjMITu-$2b$04$cvXudZ5ugTg95W.rOjMITuM1jC0piCl3zF5cmGhzCibHZrNHkmckG]",
"tests/test_bcrypt.py::test_hashpw_new[wT4fHJa2N9WSW-$2b$04$YYjtiq4Uh88yUsExO0RNTu-$2b$04$YYjtiq4Uh88yUsExO0RNTuEJ.tZlsONac16A8OcLHleWFjVawfGvO]",
"tests/test_bcrypt.py::test_hashpw_new[uSgFRnQdOgm4S-$2b$04$WLTjgY/pZSyqX/fbMbJzf.-$2b$04$WLTjgY/pZSyqX/fbMbJzf.qxCeTMQOzgL.CimRjMHtMxd/VGKojMu]",
"tests/test_bcrypt.py::test_hashpw_new[tEPtJZXur16Vg-$2b$04$2moPs/x/wnCfeQ5pCheMcu-$2b$04$2moPs/x/wnCfeQ5pCheMcuSJQ/KYjOZG780UjA/SiR.KsYWNrC7SG]",
"tests/test_bcrypt.py::test_hashpw_new[vvho8C6nlVf9K-$2b$04$HrEYC/AQ2HS77G78cQDZQ.-$2b$04$HrEYC/AQ2HS77G78cQDZQ.r44WGcruKw03KHlnp71yVQEwpsi3xl2]",
"tests/test_bcrypt.py::test_hashpw_new[5auCCY9by0Ruf-$2b$04$vVYgSTfB8KVbmhbZE/k3R.-$2b$04$vVYgSTfB8KVbmhbZE/k3R.ux9A0lJUM4CZwCkHI9fifke2.rTF7MG]",
"tests/test_bcrypt.py::test_hashpw_new[GtTkR6qn2QOZW-$2b$04$JfoNrR8.doieoI8..F.C1O-$2b$04$JfoNrR8.doieoI8..F.C1OQgwE3uTeuardy6lw0AjALUzOARoyf2m]",
"tests/test_bcrypt.py::test_hashpw_new[zKo8vdFSnjX0f-$2b$04$HP3I0PUs7KBEzMBNFw7o3O-$2b$04$HP3I0PUs7KBEzMBNFw7o3O7f/uxaZU7aaDot1quHMgB2yrwBXsgyy]",
"tests/test_bcrypt.py::test_hashpw_new[I9VfYlacJiwiK-$2b$04$xnFVhJsTzsFBTeP3PpgbMe-$2b$04$xnFVhJsTzsFBTeP3PpgbMeMREb6rdKV9faW54Sx.yg9plf4jY8qT6]",
"tests/test_bcrypt.py::test_hashpw_new[VFPO7YXnHQbQO-$2b$04$WQp9.igoLqVr6Qk70mz6xu-$2b$04$WQp9.igoLqVr6Qk70mz6xuRxE0RttVXXdukpR9N54x17ecad34ZF6]",
"tests/test_bcrypt.py::test_hashpw_new[VDx5BdxfxstYk-$2b$04$xgZtlonpAHSU/njOCdKztO-$2b$04$xgZtlonpAHSU/njOCdKztOPuPFzCNVpB4LGicO4/OGgHv.uKHkwsS]",
"tests/test_bcrypt.py::test_hashpw_new[dEe6XfVGrrfSH-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe]",
"tests/test_bcrypt.py::test_hashpw_new[cTT0EAFdwJiLn-$2b$04$7/Qj7Kd8BcSahPO4khB8me-$2b$04$7/Qj7Kd8BcSahPO4khB8me4ssDJCW3r4OGYqPF87jxtrSyPj5cS5m]",
"tests/test_bcrypt.py::test_hashpw_new[J8eHUDuxBB520-$2b$04$VvlCUKbTMjaxaYJ.k5juoe-$2b$04$VvlCUKbTMjaxaYJ.k5juoecpG/7IzcH1AkmqKi.lIZMVIOLClWAk.]",
"tests/test_bcrypt.py::test_hashpw_new[U*U-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.E5YPO9kmyuRGyh0XouQYb4YMJKvyOeW]",
"tests/test_bcrypt.py::test_hashpw_new[U*U*-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.VGOzA784oUp/Z0DY336zx7pLYAy0lwK]",
"tests/test_bcrypt.py::test_hashpw_new[U*U*U-$2a$05$XXXXXXXXXXXXXXXXXXXXXO-$2a$05$XXXXXXXXXXXXXXXXXXXXXOAcXxm9kjPGEMsLznoKqmqw7tc8WCx4a]",
"tests/test_bcrypt.py::test_hashpw_new[0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789chars",
"tests/test_bcrypt.py::test_hashpw_new[\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaachars",
"tests/test_bcrypt.py::test_hashpw_new[\\xa3-$2a$05$/OK.fbVrR/bpIqNJ5ianF.-$2a$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_checkpw[Kk4DQuMMfZL9o-$2b$04$cVWp4XaNU8a4v1uMRum2SO-$2b$04$cVWp4XaNU8a4v1uMRum2SO026BWLIoQMD/TXg5uZV.0P.uO8m3YEm]",
"tests/test_bcrypt.py::test_checkpw[9IeRXmnGxMYbs-$2b$04$pQ7gRO7e6wx/936oXhNjrO-$2b$04$pQ7gRO7e6wx/936oXhNjrOUNOHL1D0h1N2IDbJZYs.1ppzSof6SPy]",
"tests/test_bcrypt.py::test_checkpw[xVQVbwa1S0M8r-$2b$04$SQe9knOzepOVKoYXo9xTte-$2b$04$SQe9knOzepOVKoYXo9xTteNYr6MBwVz4tpriJVe3PNgYufGIsgKcW]",
"tests/test_bcrypt.py::test_checkpw[Zfgr26LWd22Za-$2b$04$eH8zX.q5Q.j2hO1NkVYJQO-$2b$04$eH8zX.q5Q.j2hO1NkVYJQOM6KxntS/ow3.YzVmFrE4t//CoF4fvne]",
"tests/test_bcrypt.py::test_checkpw[Tg4daC27epFBE-$2b$04$ahiTdwRXpUG2JLRcIznxc.-$2b$04$ahiTdwRXpUG2JLRcIznxc.s1.ydaPGD372bsGs8NqyYjLY1inG5n2]",
"tests/test_bcrypt.py::test_checkpw[xhQPMmwh5ALzW-$2b$04$nQn78dV0hGHf5wUBe0zOFu-$2b$04$nQn78dV0hGHf5wUBe0zOFu8n07ZbWWOKoGasZKRspZxtt.vBRNMIy]",
"tests/test_bcrypt.py::test_checkpw[59je8h5Gj71tg-$2b$04$cvXudZ5ugTg95W.rOjMITu-$2b$04$cvXudZ5ugTg95W.rOjMITuM1jC0piCl3zF5cmGhzCibHZrNHkmckG]",
"tests/test_bcrypt.py::test_checkpw[wT4fHJa2N9WSW-$2b$04$YYjtiq4Uh88yUsExO0RNTu-$2b$04$YYjtiq4Uh88yUsExO0RNTuEJ.tZlsONac16A8OcLHleWFjVawfGvO]",
"tests/test_bcrypt.py::test_checkpw[uSgFRnQdOgm4S-$2b$04$WLTjgY/pZSyqX/fbMbJzf.-$2b$04$WLTjgY/pZSyqX/fbMbJzf.qxCeTMQOzgL.CimRjMHtMxd/VGKojMu]",
"tests/test_bcrypt.py::test_checkpw[tEPtJZXur16Vg-$2b$04$2moPs/x/wnCfeQ5pCheMcu-$2b$04$2moPs/x/wnCfeQ5pCheMcuSJQ/KYjOZG780UjA/SiR.KsYWNrC7SG]",
"tests/test_bcrypt.py::test_checkpw[vvho8C6nlVf9K-$2b$04$HrEYC/AQ2HS77G78cQDZQ.-$2b$04$HrEYC/AQ2HS77G78cQDZQ.r44WGcruKw03KHlnp71yVQEwpsi3xl2]",
"tests/test_bcrypt.py::test_checkpw[5auCCY9by0Ruf-$2b$04$vVYgSTfB8KVbmhbZE/k3R.-$2b$04$vVYgSTfB8KVbmhbZE/k3R.ux9A0lJUM4CZwCkHI9fifke2.rTF7MG]",
"tests/test_bcrypt.py::test_checkpw[GtTkR6qn2QOZW-$2b$04$JfoNrR8.doieoI8..F.C1O-$2b$04$JfoNrR8.doieoI8..F.C1OQgwE3uTeuardy6lw0AjALUzOARoyf2m]",
"tests/test_bcrypt.py::test_checkpw[zKo8vdFSnjX0f-$2b$04$HP3I0PUs7KBEzMBNFw7o3O-$2b$04$HP3I0PUs7KBEzMBNFw7o3O7f/uxaZU7aaDot1quHMgB2yrwBXsgyy]",
"tests/test_bcrypt.py::test_checkpw[I9VfYlacJiwiK-$2b$04$xnFVhJsTzsFBTeP3PpgbMe-$2b$04$xnFVhJsTzsFBTeP3PpgbMeMREb6rdKV9faW54Sx.yg9plf4jY8qT6]",
"tests/test_bcrypt.py::test_checkpw[VFPO7YXnHQbQO-$2b$04$WQp9.igoLqVr6Qk70mz6xu-$2b$04$WQp9.igoLqVr6Qk70mz6xuRxE0RttVXXdukpR9N54x17ecad34ZF6]",
"tests/test_bcrypt.py::test_checkpw[VDx5BdxfxstYk-$2b$04$xgZtlonpAHSU/njOCdKztO-$2b$04$xgZtlonpAHSU/njOCdKztOPuPFzCNVpB4LGicO4/OGgHv.uKHkwsS]",
"tests/test_bcrypt.py::test_checkpw[dEe6XfVGrrfSH-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe]",
"tests/test_bcrypt.py::test_checkpw[cTT0EAFdwJiLn-$2b$04$7/Qj7Kd8BcSahPO4khB8me-$2b$04$7/Qj7Kd8BcSahPO4khB8me4ssDJCW3r4OGYqPF87jxtrSyPj5cS5m]",
"tests/test_bcrypt.py::test_checkpw[J8eHUDuxBB520-$2b$04$VvlCUKbTMjaxaYJ.k5juoe-$2b$04$VvlCUKbTMjaxaYJ.k5juoecpG/7IzcH1AkmqKi.lIZMVIOLClWAk.]",
"tests/test_bcrypt.py::test_checkpw[U*U-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.E5YPO9kmyuRGyh0XouQYb4YMJKvyOeW]",
"tests/test_bcrypt.py::test_checkpw[U*U*-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.VGOzA784oUp/Z0DY336zx7pLYAy0lwK]",
"tests/test_bcrypt.py::test_checkpw[U*U*U-$2a$05$XXXXXXXXXXXXXXXXXXXXXO-$2a$05$XXXXXXXXXXXXXXXXXXXXXOAcXxm9kjPGEMsLznoKqmqw7tc8WCx4a]",
"tests/test_bcrypt.py::test_checkpw[0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789chars",
"tests/test_bcrypt.py::test_checkpw[\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaachars",
"tests/test_bcrypt.py::test_checkpw[\\xa3-$2a$05$/OK.fbVrR/bpIqNJ5ianF.-$2a$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_hashpw_existing[Kk4DQuMMfZL9o-$2b$04$cVWp4XaNU8a4v1uMRum2SO-$2b$04$cVWp4XaNU8a4v1uMRum2SO026BWLIoQMD/TXg5uZV.0P.uO8m3YEm]",
"tests/test_bcrypt.py::test_hashpw_existing[9IeRXmnGxMYbs-$2b$04$pQ7gRO7e6wx/936oXhNjrO-$2b$04$pQ7gRO7e6wx/936oXhNjrOUNOHL1D0h1N2IDbJZYs.1ppzSof6SPy]",
"tests/test_bcrypt.py::test_hashpw_existing[xVQVbwa1S0M8r-$2b$04$SQe9knOzepOVKoYXo9xTte-$2b$04$SQe9knOzepOVKoYXo9xTteNYr6MBwVz4tpriJVe3PNgYufGIsgKcW]",
"tests/test_bcrypt.py::test_hashpw_existing[Zfgr26LWd22Za-$2b$04$eH8zX.q5Q.j2hO1NkVYJQO-$2b$04$eH8zX.q5Q.j2hO1NkVYJQOM6KxntS/ow3.YzVmFrE4t//CoF4fvne]",
"tests/test_bcrypt.py::test_hashpw_existing[Tg4daC27epFBE-$2b$04$ahiTdwRXpUG2JLRcIznxc.-$2b$04$ahiTdwRXpUG2JLRcIznxc.s1.ydaPGD372bsGs8NqyYjLY1inG5n2]",
"tests/test_bcrypt.py::test_hashpw_existing[xhQPMmwh5ALzW-$2b$04$nQn78dV0hGHf5wUBe0zOFu-$2b$04$nQn78dV0hGHf5wUBe0zOFu8n07ZbWWOKoGasZKRspZxtt.vBRNMIy]",
"tests/test_bcrypt.py::test_hashpw_existing[59je8h5Gj71tg-$2b$04$cvXudZ5ugTg95W.rOjMITu-$2b$04$cvXudZ5ugTg95W.rOjMITuM1jC0piCl3zF5cmGhzCibHZrNHkmckG]",
"tests/test_bcrypt.py::test_hashpw_existing[wT4fHJa2N9WSW-$2b$04$YYjtiq4Uh88yUsExO0RNTu-$2b$04$YYjtiq4Uh88yUsExO0RNTuEJ.tZlsONac16A8OcLHleWFjVawfGvO]",
"tests/test_bcrypt.py::test_hashpw_existing[uSgFRnQdOgm4S-$2b$04$WLTjgY/pZSyqX/fbMbJzf.-$2b$04$WLTjgY/pZSyqX/fbMbJzf.qxCeTMQOzgL.CimRjMHtMxd/VGKojMu]",
"tests/test_bcrypt.py::test_hashpw_existing[tEPtJZXur16Vg-$2b$04$2moPs/x/wnCfeQ5pCheMcu-$2b$04$2moPs/x/wnCfeQ5pCheMcuSJQ/KYjOZG780UjA/SiR.KsYWNrC7SG]",
"tests/test_bcrypt.py::test_hashpw_existing[vvho8C6nlVf9K-$2b$04$HrEYC/AQ2HS77G78cQDZQ.-$2b$04$HrEYC/AQ2HS77G78cQDZQ.r44WGcruKw03KHlnp71yVQEwpsi3xl2]",
"tests/test_bcrypt.py::test_hashpw_existing[5auCCY9by0Ruf-$2b$04$vVYgSTfB8KVbmhbZE/k3R.-$2b$04$vVYgSTfB8KVbmhbZE/k3R.ux9A0lJUM4CZwCkHI9fifke2.rTF7MG]",
"tests/test_bcrypt.py::test_hashpw_existing[GtTkR6qn2QOZW-$2b$04$JfoNrR8.doieoI8..F.C1O-$2b$04$JfoNrR8.doieoI8..F.C1OQgwE3uTeuardy6lw0AjALUzOARoyf2m]",
"tests/test_bcrypt.py::test_hashpw_existing[zKo8vdFSnjX0f-$2b$04$HP3I0PUs7KBEzMBNFw7o3O-$2b$04$HP3I0PUs7KBEzMBNFw7o3O7f/uxaZU7aaDot1quHMgB2yrwBXsgyy]",
"tests/test_bcrypt.py::test_hashpw_existing[I9VfYlacJiwiK-$2b$04$xnFVhJsTzsFBTeP3PpgbMe-$2b$04$xnFVhJsTzsFBTeP3PpgbMeMREb6rdKV9faW54Sx.yg9plf4jY8qT6]",
"tests/test_bcrypt.py::test_hashpw_existing[VFPO7YXnHQbQO-$2b$04$WQp9.igoLqVr6Qk70mz6xu-$2b$04$WQp9.igoLqVr6Qk70mz6xuRxE0RttVXXdukpR9N54x17ecad34ZF6]",
"tests/test_bcrypt.py::test_hashpw_existing[VDx5BdxfxstYk-$2b$04$xgZtlonpAHSU/njOCdKztO-$2b$04$xgZtlonpAHSU/njOCdKztOPuPFzCNVpB4LGicO4/OGgHv.uKHkwsS]",
"tests/test_bcrypt.py::test_hashpw_existing[dEe6XfVGrrfSH-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe]",
"tests/test_bcrypt.py::test_hashpw_existing[cTT0EAFdwJiLn-$2b$04$7/Qj7Kd8BcSahPO4khB8me-$2b$04$7/Qj7Kd8BcSahPO4khB8me4ssDJCW3r4OGYqPF87jxtrSyPj5cS5m]",
"tests/test_bcrypt.py::test_hashpw_existing[J8eHUDuxBB520-$2b$04$VvlCUKbTMjaxaYJ.k5juoe-$2b$04$VvlCUKbTMjaxaYJ.k5juoecpG/7IzcH1AkmqKi.lIZMVIOLClWAk.]",
"tests/test_bcrypt.py::test_hashpw_existing[U*U-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.E5YPO9kmyuRGyh0XouQYb4YMJKvyOeW]",
"tests/test_bcrypt.py::test_hashpw_existing[U*U*-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.VGOzA784oUp/Z0DY336zx7pLYAy0lwK]",
"tests/test_bcrypt.py::test_hashpw_existing[U*U*U-$2a$05$XXXXXXXXXXXXXXXXXXXXXO-$2a$05$XXXXXXXXXXXXXXXXXXXXXOAcXxm9kjPGEMsLznoKqmqw7tc8WCx4a]",
"tests/test_bcrypt.py::test_hashpw_existing[0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789chars",
"tests/test_bcrypt.py::test_hashpw_existing[\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaachars",
"tests/test_bcrypt.py::test_hashpw_existing[\\xa3-$2a$05$/OK.fbVrR/bpIqNJ5ianF.-$2a$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_checkpw_2y_prefix[\\xa3-$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq-$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_checkpw_2y_prefix[\\xff\\xff\\xa3-$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e-$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e]",
"tests/test_bcrypt.py::test_hashpw_invalid",
"tests/test_bcrypt.py::test_checkpw_wrong_password",
"tests/test_bcrypt.py::test_checkpw_bad_salt",
"tests/test_bcrypt.py::test_checkpw_str_password",
"tests/test_bcrypt.py::test_checkpw_str_salt",
"tests/test_bcrypt.py::test_hashpw_str_password",
"tests/test_bcrypt.py::test_hashpw_str_salt",
"tests/test_bcrypt.py::test_checkpw_nul_byte",
"tests/test_bcrypt.py::test_hashpw_nul_byte",
"tests/test_bcrypt.py::test_kdf[4-password-salt-[\\xbf\\x0c\\xc2\\x93X\\x7f\\x1c65U\\'ye\\x98\\xd4~W\\x90q\\xbfB~\\x9d\\x8f\\xbe\\x84*\\xba4\\xd9]",
"tests/test_bcrypt.py::test_kdf[4-password-\\x00-\\xc1+Vb5\\xee\\xe0L!%\\x98\\x97\\nW\\x9ag]",
"tests/test_bcrypt.py::test_kdf[4-\\x00-salt-`Q\\xbe\\x18\\xc2\\xf4\\xf8,\\xbf\\x0e\\xfe\\xe5G\\x1bK\\xb9]",
"tests/test_bcrypt.py::test_kdf[4-password\\x00-salt\\x00-t\\x10\\xe4L\\xf4\\xfa\\x07\\xbf\\xaa\\xc8\\xa9(\\xb1r\\x7f\\xac\\x00\\x13u\\xe7\\xbfs\\x847\\x0fH\\xef\\xd1!t0P]",
"tests/test_bcrypt.py::test_kdf[4-pass\\x00wor-sa\\x00l-\\xc2\\xbf\\xfd\\x9d\\xb3\\x8fei\\xef\\xefCr\\xf4\\xde\\x83\\xc0]",
"tests/test_bcrypt.py::test_kdf[4-pass\\x00word-sa\\x00lt-K\\xa4\\xac9%\\xc0\\xe8\\xd7\\xf0\\xcd\\xb6\\xbb\\x16\\x84\\xa5o]",
"tests/test_bcrypt.py::test_kdf[8-password-salt-\\xe16~\\xc5\\x15\\x1a3\\xfa\\xacL\\xc1\\xc1D\\xcd#\\xfa\\x15\\xd5T\\x84\\x93\\xec\\xc9\\x9b\\x9b]\\x9c\\r;'\\xbe\\xc7b'\\xeaf\\x08\\x8b\\x84\\x9b",
"tests/test_bcrypt.py::test_kdf[42-password-salt-\\x83<\\xf0\\xdc\\xf5m\\xb6V\\x08\\xe8\\xf0\\xdc\\x0c\\xe8\\x82\\xbd]",
"tests/test_bcrypt.py::test_kdf[8-Lorem",
"tests/test_bcrypt.py::test_kdf[8-\\r\\xb3\\xac\\x94\\xb3\\xeeS(OJ\"\\x89;<$\\xae-:b\\xf0\\xf0\\xdb\\xce\\xf8#\\xcf\\xcc\\x85HV\\xea\\x10(-",
"tests/test_bcrypt.py::test_kdf[8-\\xe1\\xbd\\x88\\xce\\xb4\\xcf\\x85\\xcf\\x83\\xcf\\x83\\xce\\xb5\\xcf\\x8d\\xcf\\x82-\\xce\\xa4\\xce\\xb7\\xce\\xbb\\xce\\xad\\xce\\xbc\\xce\\xb1\\xcf\\x87\\xce\\xbf\\xcf\\x82-Cfl\\x9b\\t\\xef3\\xed\\x8c'\\xe8\\xe8\\xf3\\xe2\\xd8\\xe6]",
"tests/test_bcrypt.py::test_kdf_str_password",
"tests/test_bcrypt.py::test_kdf_str_salt",
"tests/test_bcrypt.py::test_invalid_params[pass-$2b$04$cVWp4XaNU8a4v1uMRum2SO-10-10-TypeError]",
"tests/test_bcrypt.py::test_invalid_params[password-salt-10-10-TypeError]",
"tests/test_bcrypt.py::test_invalid_params[-$2b$04$cVWp4XaNU8a4v1uMRum2SO-10-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password--10-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password-$2b$04$cVWp4XaNU8a4v1uMRum2SO-0-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password-$2b$04$cVWp4XaNU8a4v1uMRum2SO--3-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password-$2b$04$cVWp4XaNU8a4v1uMRum2SO-513-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password-$2b$04$cVWp4XaNU8a4v1uMRum2SO-20-0-ValueError]",
"tests/test_bcrypt.py::test_bcrypt_assert",
"tests/test_bcrypt.py::test_2a_wraparound_bug"
]
| []
| Apache License 2.0 | 608 | [
"README.rst",
"MANIFEST.in",
".travis.yml",
"tox.ini",
"src/bcrypt/__init__.py"
]
| [
"README.rst",
"MANIFEST.in",
".travis.yml",
"tox.ini",
"src/bcrypt/__init__.py"
]
|
pyca__bcrypt-87 | c9c76210fad230995a6155287e8b92c49180eae4 | 2016-06-30 16:28:30 | c9c76210fad230995a6155287e8b92c49180eae4 | diff --git a/.travis.yml b/.travis.yml
index 456aba7..bbcb336 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -30,6 +30,7 @@ matrix:
env: TOXENV=pypy CC=clang
- python: 2.7
env: TOXENV=pep8
+ - env: TOXENV=packaging
- python: 3.5
env: TOXENV=py3pep8
- language: generic
diff --git a/MANIFEST.in b/MANIFEST.in
index 622d66b..93a4480 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,11 @@
include LICENSE README.rst
+
+include tox.ini .coveragerc
include src/build_bcrypt.py
+
recursive-include src/_csrc *
+recursive-include tests *.py
+
+exclude requirements.txt tasks.py .travis.yml
+
+prune .travis
diff --git a/README.rst b/README.rst
index 0883286..b8c75a9 100644
--- a/README.rst
+++ b/README.rst
@@ -40,6 +40,7 @@ Changelog
3.1.0
-----
* Added support for ``checkpw`` as another method of verifying a password.
+* Ensure that you get a ``$2y$`` hash when you input a ``$2y$`` salt.
3.0.0
-----
diff --git a/src/bcrypt/__init__.py b/src/bcrypt/__init__.py
index d6acb84..abc9d75 100644
--- a/src/bcrypt/__init__.py
+++ b/src/bcrypt/__init__.py
@@ -39,10 +39,6 @@ __all__ = [
_normalize_re = re.compile(b"^\$2y\$")
-def _normalize_prefix(salt):
- return _normalize_re.sub(b"$2b$", salt)
-
-
def gensalt(rounds=12, prefix=b"2b"):
if prefix not in (b"2a", b"2b"):
raise ValueError("Supported prefixes are b'2a' or b'2b'")
@@ -75,7 +71,13 @@ def hashpw(password, salt):
# on $2a$, so we do it here to preserve compatibility with 2.0.0
password = password[:72]
- salt = _normalize_prefix(salt)
+ # When the original 8bit bug was found the original library we supported
+ # added a new prefix, $2y$, that fixes it. This prefix is exactly the same
+ # as the $2b$ prefix added by OpenBSD other than the name. Since the
+ # OpenBSD library does not support the $2y$ prefix, if the salt given to us
+ # is for the $2y$ prefix, we'll just mugne it so that it's a $2b$ prior to
+ # passing it into the C library.
+ original_salt, salt = salt, _normalize_re.sub(b"$2b$", salt)
hashed = _bcrypt.ffi.new("unsigned char[]", 128)
retval = _bcrypt.lib.bcrypt_hashpass(password, salt, hashed, len(hashed))
@@ -83,7 +85,13 @@ def hashpw(password, salt):
if retval != 0:
raise ValueError("Invalid salt")
- return _bcrypt.ffi.string(hashed)
+ # Now that we've gotten our hashed password, we want to ensure that the
+ # prefix we return is the one that was passed in, so we'll use the prefix
+ # from the original salt and concatenate that with the return value (minus
+ # the return value's prefix). This will ensure that if someone passed in a
+ # salt with a $2y$ prefix, that they get back a hash with a $2y$ prefix
+ # even though we munged it to $2b$.
+ return original_salt[:4] + _bcrypt.ffi.string(hashed)[4:]
def checkpw(password, hashed_password):
@@ -96,9 +104,6 @@ def checkpw(password, hashed_password):
"password and hashed_password may not contain NUL bytes"
)
- # If the user supplies a $2y$ prefix we normalize to $2b$
- hashed_password = _normalize_prefix(hashed_password)
-
ret = hashpw(password, hashed_password)
return _bcrypt.lib.timingsafe_bcmp(ret, hashed_password, len(ret)) == 0
diff --git a/tox.ini b/tox.ini
index abc6283..264d9aa 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = py26,py27,pypy,py33,py34,py35,pep8,py3pep8
+envlist = py26,py27,pypy,py33,py34,py35,pep8,py3pep8,packaging
[testenv]
# If you add a new dep here you probably need to add it in setup.py as well
@@ -27,6 +27,15 @@ deps =
commands =
flake8 .
+[testenv:packaging]
+deps =
+ check-manifest
+ readme_renderer
+commands =
+ check-manifest
+ python setup.py check --metadata --restructuredtext --strict
+
+
[flake8]
exclude = .tox,*.egg
select = E,W,F,N,I
| testsuite absent
The setup.py is filled with references to the testsuite and designates pytest as a test runner, despite the tests folder being absent from the tarball. | pyca/bcrypt | diff --git a/tests/test_bcrypt.py b/tests/test_bcrypt.py
index 47f315a..d9bde72 100644
--- a/tests/test_bcrypt.py
+++ b/tests/test_bcrypt.py
@@ -152,12 +152,12 @@ _2y_test_vectors = [
(
b"\xa3",
b"$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq",
- b"$2b$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq",
+ b"$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq",
),
(
b"\xff\xff\xa3",
b"$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e",
- b"$2b$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e",
+ b"$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e",
),
]
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 5
} | 3.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[tests]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/pyca/bcrypt.git@c9c76210fad230995a6155287e8b92c49180eae4#egg=bcrypt
cffi==1.17.1
exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pycparser==2.22
pytest==8.3.5
six==1.17.0
tomli==2.2.1
| name: bcrypt
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cffi==1.17.1
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pycparser==2.22
- pytest==8.3.5
- six==1.17.0
- tomli==2.2.1
prefix: /opt/conda/envs/bcrypt
| [
"tests/test_bcrypt.py::test_hashpw_2y_prefix[\\xa3-$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq-$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_hashpw_2y_prefix[\\xff\\xff\\xa3-$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e-$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e]"
]
| []
| [
"tests/test_bcrypt.py::test_gensalt_basic",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[4-$2b$04$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[5-$2b$05$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[6-$2b$06$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[7-$2b$07$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[8-$2b$08$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[9-$2b$09$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[10-$2b$10$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[11-$2b$11$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[12-$2b$12$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[13-$2b$13$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[14-$2b$14$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[15-$2b$15$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[16-$2b$16$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[17-$2b$17$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[18-$2b$18$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[19-$2b$19$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[20-$2b$20$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[21-$2b$21$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[22-$2b$22$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[23-$2b$23$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[24-$2b$24$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_invalid[1]",
"tests/test_bcrypt.py::test_gensalt_rounds_invalid[2]",
"tests/test_bcrypt.py::test_gensalt_rounds_invalid[3]",
"tests/test_bcrypt.py::test_gensalt_bad_prefix",
"tests/test_bcrypt.py::test_gensalt_2a_prefix",
"tests/test_bcrypt.py::test_hashpw_new[Kk4DQuMMfZL9o-$2b$04$cVWp4XaNU8a4v1uMRum2SO-$2b$04$cVWp4XaNU8a4v1uMRum2SO026BWLIoQMD/TXg5uZV.0P.uO8m3YEm]",
"tests/test_bcrypt.py::test_hashpw_new[9IeRXmnGxMYbs-$2b$04$pQ7gRO7e6wx/936oXhNjrO-$2b$04$pQ7gRO7e6wx/936oXhNjrOUNOHL1D0h1N2IDbJZYs.1ppzSof6SPy]",
"tests/test_bcrypt.py::test_hashpw_new[xVQVbwa1S0M8r-$2b$04$SQe9knOzepOVKoYXo9xTte-$2b$04$SQe9knOzepOVKoYXo9xTteNYr6MBwVz4tpriJVe3PNgYufGIsgKcW]",
"tests/test_bcrypt.py::test_hashpw_new[Zfgr26LWd22Za-$2b$04$eH8zX.q5Q.j2hO1NkVYJQO-$2b$04$eH8zX.q5Q.j2hO1NkVYJQOM6KxntS/ow3.YzVmFrE4t//CoF4fvne]",
"tests/test_bcrypt.py::test_hashpw_new[Tg4daC27epFBE-$2b$04$ahiTdwRXpUG2JLRcIznxc.-$2b$04$ahiTdwRXpUG2JLRcIznxc.s1.ydaPGD372bsGs8NqyYjLY1inG5n2]",
"tests/test_bcrypt.py::test_hashpw_new[xhQPMmwh5ALzW-$2b$04$nQn78dV0hGHf5wUBe0zOFu-$2b$04$nQn78dV0hGHf5wUBe0zOFu8n07ZbWWOKoGasZKRspZxtt.vBRNMIy]",
"tests/test_bcrypt.py::test_hashpw_new[59je8h5Gj71tg-$2b$04$cvXudZ5ugTg95W.rOjMITu-$2b$04$cvXudZ5ugTg95W.rOjMITuM1jC0piCl3zF5cmGhzCibHZrNHkmckG]",
"tests/test_bcrypt.py::test_hashpw_new[wT4fHJa2N9WSW-$2b$04$YYjtiq4Uh88yUsExO0RNTu-$2b$04$YYjtiq4Uh88yUsExO0RNTuEJ.tZlsONac16A8OcLHleWFjVawfGvO]",
"tests/test_bcrypt.py::test_hashpw_new[uSgFRnQdOgm4S-$2b$04$WLTjgY/pZSyqX/fbMbJzf.-$2b$04$WLTjgY/pZSyqX/fbMbJzf.qxCeTMQOzgL.CimRjMHtMxd/VGKojMu]",
"tests/test_bcrypt.py::test_hashpw_new[tEPtJZXur16Vg-$2b$04$2moPs/x/wnCfeQ5pCheMcu-$2b$04$2moPs/x/wnCfeQ5pCheMcuSJQ/KYjOZG780UjA/SiR.KsYWNrC7SG]",
"tests/test_bcrypt.py::test_hashpw_new[vvho8C6nlVf9K-$2b$04$HrEYC/AQ2HS77G78cQDZQ.-$2b$04$HrEYC/AQ2HS77G78cQDZQ.r44WGcruKw03KHlnp71yVQEwpsi3xl2]",
"tests/test_bcrypt.py::test_hashpw_new[5auCCY9by0Ruf-$2b$04$vVYgSTfB8KVbmhbZE/k3R.-$2b$04$vVYgSTfB8KVbmhbZE/k3R.ux9A0lJUM4CZwCkHI9fifke2.rTF7MG]",
"tests/test_bcrypt.py::test_hashpw_new[GtTkR6qn2QOZW-$2b$04$JfoNrR8.doieoI8..F.C1O-$2b$04$JfoNrR8.doieoI8..F.C1OQgwE3uTeuardy6lw0AjALUzOARoyf2m]",
"tests/test_bcrypt.py::test_hashpw_new[zKo8vdFSnjX0f-$2b$04$HP3I0PUs7KBEzMBNFw7o3O-$2b$04$HP3I0PUs7KBEzMBNFw7o3O7f/uxaZU7aaDot1quHMgB2yrwBXsgyy]",
"tests/test_bcrypt.py::test_hashpw_new[I9VfYlacJiwiK-$2b$04$xnFVhJsTzsFBTeP3PpgbMe-$2b$04$xnFVhJsTzsFBTeP3PpgbMeMREb6rdKV9faW54Sx.yg9plf4jY8qT6]",
"tests/test_bcrypt.py::test_hashpw_new[VFPO7YXnHQbQO-$2b$04$WQp9.igoLqVr6Qk70mz6xu-$2b$04$WQp9.igoLqVr6Qk70mz6xuRxE0RttVXXdukpR9N54x17ecad34ZF6]",
"tests/test_bcrypt.py::test_hashpw_new[VDx5BdxfxstYk-$2b$04$xgZtlonpAHSU/njOCdKztO-$2b$04$xgZtlonpAHSU/njOCdKztOPuPFzCNVpB4LGicO4/OGgHv.uKHkwsS]",
"tests/test_bcrypt.py::test_hashpw_new[dEe6XfVGrrfSH-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe]",
"tests/test_bcrypt.py::test_hashpw_new[cTT0EAFdwJiLn-$2b$04$7/Qj7Kd8BcSahPO4khB8me-$2b$04$7/Qj7Kd8BcSahPO4khB8me4ssDJCW3r4OGYqPF87jxtrSyPj5cS5m]",
"tests/test_bcrypt.py::test_hashpw_new[J8eHUDuxBB520-$2b$04$VvlCUKbTMjaxaYJ.k5juoe-$2b$04$VvlCUKbTMjaxaYJ.k5juoecpG/7IzcH1AkmqKi.lIZMVIOLClWAk.]",
"tests/test_bcrypt.py::test_hashpw_new[U*U-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.E5YPO9kmyuRGyh0XouQYb4YMJKvyOeW]",
"tests/test_bcrypt.py::test_hashpw_new[U*U*-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.VGOzA784oUp/Z0DY336zx7pLYAy0lwK]",
"tests/test_bcrypt.py::test_hashpw_new[U*U*U-$2a$05$XXXXXXXXXXXXXXXXXXXXXO-$2a$05$XXXXXXXXXXXXXXXXXXXXXOAcXxm9kjPGEMsLznoKqmqw7tc8WCx4a]",
"tests/test_bcrypt.py::test_hashpw_new[0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789chars",
"tests/test_bcrypt.py::test_hashpw_new[\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaachars",
"tests/test_bcrypt.py::test_hashpw_new[\\xa3-$2a$05$/OK.fbVrR/bpIqNJ5ianF.-$2a$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_checkpw[Kk4DQuMMfZL9o-$2b$04$cVWp4XaNU8a4v1uMRum2SO-$2b$04$cVWp4XaNU8a4v1uMRum2SO026BWLIoQMD/TXg5uZV.0P.uO8m3YEm]",
"tests/test_bcrypt.py::test_checkpw[9IeRXmnGxMYbs-$2b$04$pQ7gRO7e6wx/936oXhNjrO-$2b$04$pQ7gRO7e6wx/936oXhNjrOUNOHL1D0h1N2IDbJZYs.1ppzSof6SPy]",
"tests/test_bcrypt.py::test_checkpw[xVQVbwa1S0M8r-$2b$04$SQe9knOzepOVKoYXo9xTte-$2b$04$SQe9knOzepOVKoYXo9xTteNYr6MBwVz4tpriJVe3PNgYufGIsgKcW]",
"tests/test_bcrypt.py::test_checkpw[Zfgr26LWd22Za-$2b$04$eH8zX.q5Q.j2hO1NkVYJQO-$2b$04$eH8zX.q5Q.j2hO1NkVYJQOM6KxntS/ow3.YzVmFrE4t//CoF4fvne]",
"tests/test_bcrypt.py::test_checkpw[Tg4daC27epFBE-$2b$04$ahiTdwRXpUG2JLRcIznxc.-$2b$04$ahiTdwRXpUG2JLRcIznxc.s1.ydaPGD372bsGs8NqyYjLY1inG5n2]",
"tests/test_bcrypt.py::test_checkpw[xhQPMmwh5ALzW-$2b$04$nQn78dV0hGHf5wUBe0zOFu-$2b$04$nQn78dV0hGHf5wUBe0zOFu8n07ZbWWOKoGasZKRspZxtt.vBRNMIy]",
"tests/test_bcrypt.py::test_checkpw[59je8h5Gj71tg-$2b$04$cvXudZ5ugTg95W.rOjMITu-$2b$04$cvXudZ5ugTg95W.rOjMITuM1jC0piCl3zF5cmGhzCibHZrNHkmckG]",
"tests/test_bcrypt.py::test_checkpw[wT4fHJa2N9WSW-$2b$04$YYjtiq4Uh88yUsExO0RNTu-$2b$04$YYjtiq4Uh88yUsExO0RNTuEJ.tZlsONac16A8OcLHleWFjVawfGvO]",
"tests/test_bcrypt.py::test_checkpw[uSgFRnQdOgm4S-$2b$04$WLTjgY/pZSyqX/fbMbJzf.-$2b$04$WLTjgY/pZSyqX/fbMbJzf.qxCeTMQOzgL.CimRjMHtMxd/VGKojMu]",
"tests/test_bcrypt.py::test_checkpw[tEPtJZXur16Vg-$2b$04$2moPs/x/wnCfeQ5pCheMcu-$2b$04$2moPs/x/wnCfeQ5pCheMcuSJQ/KYjOZG780UjA/SiR.KsYWNrC7SG]",
"tests/test_bcrypt.py::test_checkpw[vvho8C6nlVf9K-$2b$04$HrEYC/AQ2HS77G78cQDZQ.-$2b$04$HrEYC/AQ2HS77G78cQDZQ.r44WGcruKw03KHlnp71yVQEwpsi3xl2]",
"tests/test_bcrypt.py::test_checkpw[5auCCY9by0Ruf-$2b$04$vVYgSTfB8KVbmhbZE/k3R.-$2b$04$vVYgSTfB8KVbmhbZE/k3R.ux9A0lJUM4CZwCkHI9fifke2.rTF7MG]",
"tests/test_bcrypt.py::test_checkpw[GtTkR6qn2QOZW-$2b$04$JfoNrR8.doieoI8..F.C1O-$2b$04$JfoNrR8.doieoI8..F.C1OQgwE3uTeuardy6lw0AjALUzOARoyf2m]",
"tests/test_bcrypt.py::test_checkpw[zKo8vdFSnjX0f-$2b$04$HP3I0PUs7KBEzMBNFw7o3O-$2b$04$HP3I0PUs7KBEzMBNFw7o3O7f/uxaZU7aaDot1quHMgB2yrwBXsgyy]",
"tests/test_bcrypt.py::test_checkpw[I9VfYlacJiwiK-$2b$04$xnFVhJsTzsFBTeP3PpgbMe-$2b$04$xnFVhJsTzsFBTeP3PpgbMeMREb6rdKV9faW54Sx.yg9plf4jY8qT6]",
"tests/test_bcrypt.py::test_checkpw[VFPO7YXnHQbQO-$2b$04$WQp9.igoLqVr6Qk70mz6xu-$2b$04$WQp9.igoLqVr6Qk70mz6xuRxE0RttVXXdukpR9N54x17ecad34ZF6]",
"tests/test_bcrypt.py::test_checkpw[VDx5BdxfxstYk-$2b$04$xgZtlonpAHSU/njOCdKztO-$2b$04$xgZtlonpAHSU/njOCdKztOPuPFzCNVpB4LGicO4/OGgHv.uKHkwsS]",
"tests/test_bcrypt.py::test_checkpw[dEe6XfVGrrfSH-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe]",
"tests/test_bcrypt.py::test_checkpw[cTT0EAFdwJiLn-$2b$04$7/Qj7Kd8BcSahPO4khB8me-$2b$04$7/Qj7Kd8BcSahPO4khB8me4ssDJCW3r4OGYqPF87jxtrSyPj5cS5m]",
"tests/test_bcrypt.py::test_checkpw[J8eHUDuxBB520-$2b$04$VvlCUKbTMjaxaYJ.k5juoe-$2b$04$VvlCUKbTMjaxaYJ.k5juoecpG/7IzcH1AkmqKi.lIZMVIOLClWAk.]",
"tests/test_bcrypt.py::test_checkpw[U*U-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.E5YPO9kmyuRGyh0XouQYb4YMJKvyOeW]",
"tests/test_bcrypt.py::test_checkpw[U*U*-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.VGOzA784oUp/Z0DY336zx7pLYAy0lwK]",
"tests/test_bcrypt.py::test_checkpw[U*U*U-$2a$05$XXXXXXXXXXXXXXXXXXXXXO-$2a$05$XXXXXXXXXXXXXXXXXXXXXOAcXxm9kjPGEMsLznoKqmqw7tc8WCx4a]",
"tests/test_bcrypt.py::test_checkpw[0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789chars",
"tests/test_bcrypt.py::test_checkpw[\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaachars",
"tests/test_bcrypt.py::test_checkpw[\\xa3-$2a$05$/OK.fbVrR/bpIqNJ5ianF.-$2a$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_hashpw_existing[Kk4DQuMMfZL9o-$2b$04$cVWp4XaNU8a4v1uMRum2SO-$2b$04$cVWp4XaNU8a4v1uMRum2SO026BWLIoQMD/TXg5uZV.0P.uO8m3YEm]",
"tests/test_bcrypt.py::test_hashpw_existing[9IeRXmnGxMYbs-$2b$04$pQ7gRO7e6wx/936oXhNjrO-$2b$04$pQ7gRO7e6wx/936oXhNjrOUNOHL1D0h1N2IDbJZYs.1ppzSof6SPy]",
"tests/test_bcrypt.py::test_hashpw_existing[xVQVbwa1S0M8r-$2b$04$SQe9knOzepOVKoYXo9xTte-$2b$04$SQe9knOzepOVKoYXo9xTteNYr6MBwVz4tpriJVe3PNgYufGIsgKcW]",
"tests/test_bcrypt.py::test_hashpw_existing[Zfgr26LWd22Za-$2b$04$eH8zX.q5Q.j2hO1NkVYJQO-$2b$04$eH8zX.q5Q.j2hO1NkVYJQOM6KxntS/ow3.YzVmFrE4t//CoF4fvne]",
"tests/test_bcrypt.py::test_hashpw_existing[Tg4daC27epFBE-$2b$04$ahiTdwRXpUG2JLRcIznxc.-$2b$04$ahiTdwRXpUG2JLRcIznxc.s1.ydaPGD372bsGs8NqyYjLY1inG5n2]",
"tests/test_bcrypt.py::test_hashpw_existing[xhQPMmwh5ALzW-$2b$04$nQn78dV0hGHf5wUBe0zOFu-$2b$04$nQn78dV0hGHf5wUBe0zOFu8n07ZbWWOKoGasZKRspZxtt.vBRNMIy]",
"tests/test_bcrypt.py::test_hashpw_existing[59je8h5Gj71tg-$2b$04$cvXudZ5ugTg95W.rOjMITu-$2b$04$cvXudZ5ugTg95W.rOjMITuM1jC0piCl3zF5cmGhzCibHZrNHkmckG]",
"tests/test_bcrypt.py::test_hashpw_existing[wT4fHJa2N9WSW-$2b$04$YYjtiq4Uh88yUsExO0RNTu-$2b$04$YYjtiq4Uh88yUsExO0RNTuEJ.tZlsONac16A8OcLHleWFjVawfGvO]",
"tests/test_bcrypt.py::test_hashpw_existing[uSgFRnQdOgm4S-$2b$04$WLTjgY/pZSyqX/fbMbJzf.-$2b$04$WLTjgY/pZSyqX/fbMbJzf.qxCeTMQOzgL.CimRjMHtMxd/VGKojMu]",
"tests/test_bcrypt.py::test_hashpw_existing[tEPtJZXur16Vg-$2b$04$2moPs/x/wnCfeQ5pCheMcu-$2b$04$2moPs/x/wnCfeQ5pCheMcuSJQ/KYjOZG780UjA/SiR.KsYWNrC7SG]",
"tests/test_bcrypt.py::test_hashpw_existing[vvho8C6nlVf9K-$2b$04$HrEYC/AQ2HS77G78cQDZQ.-$2b$04$HrEYC/AQ2HS77G78cQDZQ.r44WGcruKw03KHlnp71yVQEwpsi3xl2]",
"tests/test_bcrypt.py::test_hashpw_existing[5auCCY9by0Ruf-$2b$04$vVYgSTfB8KVbmhbZE/k3R.-$2b$04$vVYgSTfB8KVbmhbZE/k3R.ux9A0lJUM4CZwCkHI9fifke2.rTF7MG]",
"tests/test_bcrypt.py::test_hashpw_existing[GtTkR6qn2QOZW-$2b$04$JfoNrR8.doieoI8..F.C1O-$2b$04$JfoNrR8.doieoI8..F.C1OQgwE3uTeuardy6lw0AjALUzOARoyf2m]",
"tests/test_bcrypt.py::test_hashpw_existing[zKo8vdFSnjX0f-$2b$04$HP3I0PUs7KBEzMBNFw7o3O-$2b$04$HP3I0PUs7KBEzMBNFw7o3O7f/uxaZU7aaDot1quHMgB2yrwBXsgyy]",
"tests/test_bcrypt.py::test_hashpw_existing[I9VfYlacJiwiK-$2b$04$xnFVhJsTzsFBTeP3PpgbMe-$2b$04$xnFVhJsTzsFBTeP3PpgbMeMREb6rdKV9faW54Sx.yg9plf4jY8qT6]",
"tests/test_bcrypt.py::test_hashpw_existing[VFPO7YXnHQbQO-$2b$04$WQp9.igoLqVr6Qk70mz6xu-$2b$04$WQp9.igoLqVr6Qk70mz6xuRxE0RttVXXdukpR9N54x17ecad34ZF6]",
"tests/test_bcrypt.py::test_hashpw_existing[VDx5BdxfxstYk-$2b$04$xgZtlonpAHSU/njOCdKztO-$2b$04$xgZtlonpAHSU/njOCdKztOPuPFzCNVpB4LGicO4/OGgHv.uKHkwsS]",
"tests/test_bcrypt.py::test_hashpw_existing[dEe6XfVGrrfSH-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe]",
"tests/test_bcrypt.py::test_hashpw_existing[cTT0EAFdwJiLn-$2b$04$7/Qj7Kd8BcSahPO4khB8me-$2b$04$7/Qj7Kd8BcSahPO4khB8me4ssDJCW3r4OGYqPF87jxtrSyPj5cS5m]",
"tests/test_bcrypt.py::test_hashpw_existing[J8eHUDuxBB520-$2b$04$VvlCUKbTMjaxaYJ.k5juoe-$2b$04$VvlCUKbTMjaxaYJ.k5juoecpG/7IzcH1AkmqKi.lIZMVIOLClWAk.]",
"tests/test_bcrypt.py::test_hashpw_existing[U*U-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.E5YPO9kmyuRGyh0XouQYb4YMJKvyOeW]",
"tests/test_bcrypt.py::test_hashpw_existing[U*U*-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.VGOzA784oUp/Z0DY336zx7pLYAy0lwK]",
"tests/test_bcrypt.py::test_hashpw_existing[U*U*U-$2a$05$XXXXXXXXXXXXXXXXXXXXXO-$2a$05$XXXXXXXXXXXXXXXXXXXXXOAcXxm9kjPGEMsLznoKqmqw7tc8WCx4a]",
"tests/test_bcrypt.py::test_hashpw_existing[0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789chars",
"tests/test_bcrypt.py::test_hashpw_existing[\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaachars",
"tests/test_bcrypt.py::test_hashpw_existing[\\xa3-$2a$05$/OK.fbVrR/bpIqNJ5ianF.-$2a$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_checkpw_2y_prefix[\\xa3-$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq-$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_checkpw_2y_prefix[\\xff\\xff\\xa3-$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e-$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e]",
"tests/test_bcrypt.py::test_hashpw_invalid",
"tests/test_bcrypt.py::test_checkpw_wrong_password",
"tests/test_bcrypt.py::test_checkpw_bad_salt",
"tests/test_bcrypt.py::test_checkpw_str_password",
"tests/test_bcrypt.py::test_checkpw_str_salt",
"tests/test_bcrypt.py::test_hashpw_str_password",
"tests/test_bcrypt.py::test_hashpw_str_salt",
"tests/test_bcrypt.py::test_checkpw_nul_byte",
"tests/test_bcrypt.py::test_hashpw_nul_byte",
"tests/test_bcrypt.py::test_kdf[4-password-salt-[\\xbf\\x0c\\xc2\\x93X\\x7f\\x1c65U\\'ye\\x98\\xd4~W\\x90q\\xbfB~\\x9d\\x8f\\xbe\\x84*\\xba4\\xd9]",
"tests/test_bcrypt.py::test_kdf[4-password-\\x00-\\xc1+Vb5\\xee\\xe0L!%\\x98\\x97\\nW\\x9ag]",
"tests/test_bcrypt.py::test_kdf[4-\\x00-salt-`Q\\xbe\\x18\\xc2\\xf4\\xf8,\\xbf\\x0e\\xfe\\xe5G\\x1bK\\xb9]",
"tests/test_bcrypt.py::test_kdf[4-password\\x00-salt\\x00-t\\x10\\xe4L\\xf4\\xfa\\x07\\xbf\\xaa\\xc8\\xa9(\\xb1r\\x7f\\xac\\x00\\x13u\\xe7\\xbfs\\x847\\x0fH\\xef\\xd1!t0P]",
"tests/test_bcrypt.py::test_kdf[4-pass\\x00wor-sa\\x00l-\\xc2\\xbf\\xfd\\x9d\\xb3\\x8fei\\xef\\xefCr\\xf4\\xde\\x83\\xc0]",
"tests/test_bcrypt.py::test_kdf[4-pass\\x00word-sa\\x00lt-K\\xa4\\xac9%\\xc0\\xe8\\xd7\\xf0\\xcd\\xb6\\xbb\\x16\\x84\\xa5o]",
"tests/test_bcrypt.py::test_kdf[8-password-salt-\\xe16~\\xc5\\x15\\x1a3\\xfa\\xacL\\xc1\\xc1D\\xcd#\\xfa\\x15\\xd5T\\x84\\x93\\xec\\xc9\\x9b\\x9b]\\x9c\\r;'\\xbe\\xc7b'\\xeaf\\x08\\x8b\\x84\\x9b",
"tests/test_bcrypt.py::test_kdf[42-password-salt-\\x83<\\xf0\\xdc\\xf5m\\xb6V\\x08\\xe8\\xf0\\xdc\\x0c\\xe8\\x82\\xbd]",
"tests/test_bcrypt.py::test_kdf[8-Lorem",
"tests/test_bcrypt.py::test_kdf[8-\\r\\xb3\\xac\\x94\\xb3\\xeeS(OJ\"\\x89;<$\\xae-:b\\xf0\\xf0\\xdb\\xce\\xf8#\\xcf\\xcc\\x85HV\\xea\\x10(-",
"tests/test_bcrypt.py::test_kdf[8-\\xe1\\xbd\\x88\\xce\\xb4\\xcf\\x85\\xcf\\x83\\xcf\\x83\\xce\\xb5\\xcf\\x8d\\xcf\\x82-\\xce\\xa4\\xce\\xb7\\xce\\xbb\\xce\\xad\\xce\\xbc\\xce\\xb1\\xcf\\x87\\xce\\xbf\\xcf\\x82-Cfl\\x9b\\t\\xef3\\xed\\x8c'\\xe8\\xe8\\xf3\\xe2\\xd8\\xe6]",
"tests/test_bcrypt.py::test_kdf_str_password",
"tests/test_bcrypt.py::test_kdf_str_salt",
"tests/test_bcrypt.py::test_invalid_params[pass-$2b$04$cVWp4XaNU8a4v1uMRum2SO-10-10-TypeError]",
"tests/test_bcrypt.py::test_invalid_params[password-salt-10-10-TypeError]",
"tests/test_bcrypt.py::test_invalid_params[-$2b$04$cVWp4XaNU8a4v1uMRum2SO-10-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password--10-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password-$2b$04$cVWp4XaNU8a4v1uMRum2SO-0-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password-$2b$04$cVWp4XaNU8a4v1uMRum2SO--3-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password-$2b$04$cVWp4XaNU8a4v1uMRum2SO-513-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password-$2b$04$cVWp4XaNU8a4v1uMRum2SO-20-0-ValueError]",
"tests/test_bcrypt.py::test_bcrypt_assert",
"tests/test_bcrypt.py::test_2a_wraparound_bug"
]
| []
| Apache License 2.0 | 609 | [
"README.rst",
"MANIFEST.in",
".travis.yml",
"tox.ini",
"src/bcrypt/__init__.py"
]
| [
"README.rst",
"MANIFEST.in",
".travis.yml",
"tox.ini",
"src/bcrypt/__init__.py"
]
|
|
falconry__falcon-839 | 2177a7a44c62f9159125049b232eb3cf668a69cd | 2016-06-30 20:36:53 | 67d61029847cbf59e4053c8a424df4f9f87ad36f | codecov-io: ## [Current coverage][cc-pull] is **100%**
> Merging [#839][cc-pull] into [master][cc-base-branch] will not change coverage
```diff
@@ master #839 diff @@
====================================
Files 29 29
Lines 1790 1803 +13
Methods 0 0
Messages 0 0
Branches 299 301 +2
====================================
+ Hits 1790 1803 +13
Misses 0 0
Partials 0 0
```
> Powered by [Codecov](https://codecov.io?src=pr). Last updated by [2177a7a...e850a04][cc-compare]
[cc-base-branch]: https://codecov.io/gh/falconry/falcon/branch/master?src=pr
[cc-compare]: https://codecov.io/gh/falconry/falcon/compare/2177a7a44c62f9159125049b232eb3cf668a69cd...e850a04544bddfb08e0679719620d59b1ecf28e1
[cc-pull]: https://codecov.io/gh/falconry/falcon/pull/839?src=pr | diff --git a/falcon/request.py b/falcon/request.py
index 53edeae..597ac80 100644
--- a/falcon/request.py
+++ b/falcon/request.py
@@ -11,6 +11,7 @@
# limitations under the License.
from datetime import datetime
+import json
try:
# NOTE(kgrifs): In Python 2.6 and 2.7, socket._fileobject is a
@@ -1030,7 +1031,7 @@ class Request(object):
try:
date = strptime(param_value, format_string).date()
except ValueError:
- msg = 'The date value does not match the required format'
+ msg = 'The date value does not match the required format.'
raise errors.HTTPInvalidParam(msg, name)
if store is not None:
@@ -1038,6 +1039,45 @@ class Request(object):
return date
+ def get_param_as_dict(self, name, required=False, store=None):
+ """Return the value of a query string parameter as a dict.
+
+ Given a JSON value, parse and return it as a dict.
+
+ Args:
+ name (str): Parameter name, case-sensitive (e.g., 'payload').
+ required (bool, optional): Set to ``True`` to raise
+ ``HTTPBadRequest`` instead of returning ``None`` when the
+ parameter is not found (default ``False``).
+ store (dict, optional): A ``dict``-like object in which to place
+ the value of the param, but only if the param is found (default
+ ``None``).
+
+ Returns:
+ dict: The value of the param if it is found. Otherwise, returns
+ ``None`` unless required is ``True``.
+
+ Raises:
+ HTTPBadRequest: A required param is missing from the request.
+ HTTPInvalidParam: The parameter's value could not be parsed as JSON.
+ """
+
+ param_value = self.get_param(name, required=required)
+
+ if param_value is None:
+ return None
+
+ try:
+ val = json.loads(param_value)
+ except ValueError:
+ msg = 'It could not be parsed as JSON.'
+ raise errors.HTTPInvalidParam(msg, name)
+
+ if store is not None:
+ store[name] = val
+
+ return val
+
def log_error(self, message):
"""Write an error message to the server's log.
| Implement get_param_as_dict()
This helper would deserialize JSON into a `dict`. For example:
`thing=%7B%22name%22%3A%20%22value%22%2C%20%22another%22%3A%20%22field%22%2C%20%22some%22%3A%20%22thing%22%7D`
...would be interpreted by req.get_param_as_dict as:
```json
{"name": "value", "another": "field", "some": "thing"}
``` | falconry/falcon | diff --git a/tests/test_query_params.py b/tests/test_query_params.py
index bb01847..c588f23 100644
--- a/tests/test_query_params.py
+++ b/tests/test_query_params.py
@@ -1,4 +1,5 @@
from datetime import date
+import json
import ddt
@@ -473,6 +474,39 @@ class _TestQueryParams(testing.TestBase):
self.assertRaises(HTTPInvalidParam, req.get_param_as_date,
'thedate', format_string=format_string)
+ def test_get_dict_valid(self):
+ payload_dict = {'foo': 'bar'}
+ query_string = 'payload={0}'.format(json.dumps(payload_dict))
+ self.simulate_request('/', query_string=query_string)
+ req = self.resource.req
+ self.assertEqual(req.get_param_as_dict('payload'),
+ payload_dict)
+
+ def test_get_dict_missing_param(self):
+ payload_dict = {'foo': 'bar'}
+ query_string = 'notthepayload={0}'.format(json.dumps(payload_dict))
+ self.simulate_request('/', query_string=query_string)
+ req = self.resource.req
+ self.assertEqual(req.get_param_as_dict('payload'),
+ None)
+
+ def test_get_dict_store(self):
+ payload_dict = {'foo': 'bar'}
+ query_string = 'payload={0}'.format(json.dumps(payload_dict))
+ self.simulate_request('/', query_string=query_string)
+ req = self.resource.req
+ store = {}
+ req.get_param_as_dict('payload', store=store)
+ self.assertNotEqual(len(store), 0)
+
+ def test_get_dict_invalid(self):
+ payload_dict = 'foobar'
+ query_string = 'payload={0}'.format(payload_dict)
+ self.simulate_request('/', query_string=query_string)
+ req = self.resource.req
+ self.assertRaises(HTTPInvalidParam, req.get_param_as_dict,
+ 'payload')
+
class PostQueryParams(_TestQueryParams):
def before(self):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"ddt",
"testtools",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"tools/test-requires"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
ddt==1.7.2
-e git+https://github.com/falconry/falcon.git@2177a7a44c62f9159125049b232eb3cf668a69cd#egg=falcon
fixtures==4.0.1
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
nose==1.3.7
packaging==21.3
pbr==6.1.1
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
python-mimeparse==1.6.0
PyYAML==6.0.1
requests==2.27.1
six==1.17.0
testtools==2.6.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: falcon
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- coverage==6.2
- ddt==1.7.2
- fixtures==4.0.1
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- packaging==21.3
- pbr==6.1.1
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-mimeparse==1.6.0
- pyyaml==6.0.1
- requests==2.27.1
- six==1.17.0
- testtools==2.6.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/falcon
| [
"tests/test_query_params.py::_TestQueryParams::test_get_dict_invalid",
"tests/test_query_params.py::_TestQueryParams::test_get_dict_missing_param",
"tests/test_query_params.py::_TestQueryParams::test_get_dict_store",
"tests/test_query_params.py::_TestQueryParams::test_get_dict_valid",
"tests/test_query_params.py::PostQueryParams::test_get_dict_invalid",
"tests/test_query_params.py::PostQueryParams::test_get_dict_missing_param",
"tests/test_query_params.py::PostQueryParams::test_get_dict_store",
"tests/test_query_params.py::PostQueryParams::test_get_dict_valid",
"tests/test_query_params.py::GetQueryParams::test_get_dict_invalid",
"tests/test_query_params.py::GetQueryParams::test_get_dict_missing_param",
"tests/test_query_params.py::GetQueryParams::test_get_dict_store",
"tests/test_query_params.py::GetQueryParams::test_get_dict_valid"
]
| []
| [
"tests/test_query_params.py::_TestQueryParams::test_allowed_names",
"tests/test_query_params.py::_TestQueryParams::test_bad_percentage",
"tests/test_query_params.py::_TestQueryParams::test_blank",
"tests/test_query_params.py::_TestQueryParams::test_boolean",
"tests/test_query_params.py::_TestQueryParams::test_boolean_blank",
"tests/test_query_params.py::_TestQueryParams::test_get_date_invalid",
"tests/test_query_params.py::_TestQueryParams::test_get_date_missing_param",
"tests/test_query_params.py::_TestQueryParams::test_get_date_store",
"tests/test_query_params.py::_TestQueryParams::test_get_date_valid",
"tests/test_query_params.py::_TestQueryParams::test_get_date_valid_with_format",
"tests/test_query_params.py::_TestQueryParams::test_int",
"tests/test_query_params.py::_TestQueryParams::test_int_neg",
"tests/test_query_params.py::_TestQueryParams::test_list_transformer",
"tests/test_query_params.py::_TestQueryParams::test_list_type",
"tests/test_query_params.py::_TestQueryParams::test_list_type_blank",
"tests/test_query_params.py::_TestQueryParams::test_multiple_form_keys",
"tests/test_query_params.py::_TestQueryParams::test_multiple_form_keys_as_list",
"tests/test_query_params.py::_TestQueryParams::test_multiple_keys_as_bool",
"tests/test_query_params.py::_TestQueryParams::test_multiple_keys_as_int",
"tests/test_query_params.py::_TestQueryParams::test_none",
"tests/test_query_params.py::_TestQueryParams::test_param_property",
"tests/test_query_params.py::_TestQueryParams::test_percent_encoded",
"tests/test_query_params.py::_TestQueryParams::test_required_1_get_param",
"tests/test_query_params.py::_TestQueryParams::test_required_2_get_param_as_int",
"tests/test_query_params.py::_TestQueryParams::test_required_3_get_param_as_bool",
"tests/test_query_params.py::_TestQueryParams::test_required_4_get_param_as_list",
"tests/test_query_params.py::_TestQueryParams::test_simple",
"tests/test_query_params.py::PostQueryParams::test_allowed_names",
"tests/test_query_params.py::PostQueryParams::test_bad_percentage",
"tests/test_query_params.py::PostQueryParams::test_blank",
"tests/test_query_params.py::PostQueryParams::test_boolean",
"tests/test_query_params.py::PostQueryParams::test_boolean_blank",
"tests/test_query_params.py::PostQueryParams::test_explicitly_disable_auto_parse",
"tests/test_query_params.py::PostQueryParams::test_get_date_invalid",
"tests/test_query_params.py::PostQueryParams::test_get_date_missing_param",
"tests/test_query_params.py::PostQueryParams::test_get_date_store",
"tests/test_query_params.py::PostQueryParams::test_get_date_valid",
"tests/test_query_params.py::PostQueryParams::test_get_date_valid_with_format",
"tests/test_query_params.py::PostQueryParams::test_int",
"tests/test_query_params.py::PostQueryParams::test_int_neg",
"tests/test_query_params.py::PostQueryParams::test_list_transformer",
"tests/test_query_params.py::PostQueryParams::test_list_type",
"tests/test_query_params.py::PostQueryParams::test_list_type_blank",
"tests/test_query_params.py::PostQueryParams::test_multiple_form_keys",
"tests/test_query_params.py::PostQueryParams::test_multiple_form_keys_as_list",
"tests/test_query_params.py::PostQueryParams::test_multiple_keys_as_bool",
"tests/test_query_params.py::PostQueryParams::test_multiple_keys_as_int",
"tests/test_query_params.py::PostQueryParams::test_non_ascii",
"tests/test_query_params.py::PostQueryParams::test_none",
"tests/test_query_params.py::PostQueryParams::test_param_property",
"tests/test_query_params.py::PostQueryParams::test_percent_encoded",
"tests/test_query_params.py::PostQueryParams::test_required_1_get_param",
"tests/test_query_params.py::PostQueryParams::test_required_2_get_param_as_int",
"tests/test_query_params.py::PostQueryParams::test_required_3_get_param_as_bool",
"tests/test_query_params.py::PostQueryParams::test_required_4_get_param_as_list",
"tests/test_query_params.py::PostQueryParams::test_simple",
"tests/test_query_params.py::GetQueryParams::test_allowed_names",
"tests/test_query_params.py::GetQueryParams::test_bad_percentage",
"tests/test_query_params.py::GetQueryParams::test_blank",
"tests/test_query_params.py::GetQueryParams::test_boolean",
"tests/test_query_params.py::GetQueryParams::test_boolean_blank",
"tests/test_query_params.py::GetQueryParams::test_get_date_invalid",
"tests/test_query_params.py::GetQueryParams::test_get_date_missing_param",
"tests/test_query_params.py::GetQueryParams::test_get_date_store",
"tests/test_query_params.py::GetQueryParams::test_get_date_valid",
"tests/test_query_params.py::GetQueryParams::test_get_date_valid_with_format",
"tests/test_query_params.py::GetQueryParams::test_int",
"tests/test_query_params.py::GetQueryParams::test_int_neg",
"tests/test_query_params.py::GetQueryParams::test_list_transformer",
"tests/test_query_params.py::GetQueryParams::test_list_type",
"tests/test_query_params.py::GetQueryParams::test_list_type_blank",
"tests/test_query_params.py::GetQueryParams::test_multiple_form_keys",
"tests/test_query_params.py::GetQueryParams::test_multiple_form_keys_as_list",
"tests/test_query_params.py::GetQueryParams::test_multiple_keys_as_bool",
"tests/test_query_params.py::GetQueryParams::test_multiple_keys_as_int",
"tests/test_query_params.py::GetQueryParams::test_none",
"tests/test_query_params.py::GetQueryParams::test_param_property",
"tests/test_query_params.py::GetQueryParams::test_percent_encoded",
"tests/test_query_params.py::GetQueryParams::test_required_1_get_param",
"tests/test_query_params.py::GetQueryParams::test_required_2_get_param_as_int",
"tests/test_query_params.py::GetQueryParams::test_required_3_get_param_as_bool",
"tests/test_query_params.py::GetQueryParams::test_required_4_get_param_as_list",
"tests/test_query_params.py::GetQueryParams::test_simple",
"tests/test_query_params.py::PostQueryParamsDefaultBehavior::test_dont_auto_parse_by_default"
]
| []
| Apache License 2.0 | 610 | [
"falcon/request.py"
]
| [
"falcon/request.py"
]
|
rthalley__dnspython-188 | 188aa701a6826c607da0624e31a8c4618d0a8017 | 2016-07-03 00:30:41 | 188aa701a6826c607da0624e31a8c4618d0a8017 | bastiak: Yup, I haven't fixed it right
sebix: As binary mode is default for Py2, I think you have to check for non-existance of `t` in mode for py2. `f.mode` is exactly the used parameter, without default values.
bastiak: I'm not sure if use except TypeError is the best practise with handling boolean or textual streams, but it works. Also I' don't trust too much to .decode() method used there. Any suggestions are welcome
sebix: Thanks for all your efforts and explanations! Why is writing files so hard...?
To ensure that all python versions behave as you expect you could add some test checking for this behavior. One for binary and one for textual mode and then check for (not) raised TypeError. | diff --git a/dns/zone.py b/dns/zone.py
index 4a73e1e..1b5dca2 100644
--- a/dns/zone.py
+++ b/dns/zone.py
@@ -19,6 +19,7 @@ from __future__ import generators
import sys
import re
+import os
from io import BytesIO
import dns.exception
@@ -498,18 +499,27 @@ class Zone(object):
@type nl: string or None
"""
- str_type = string_types
+ if isinstance(f, string_types):
+ f = open(f, 'wb')
+ want_close = True
+ else:
+ want_close = False
+
+ # must be in this way, f.encoding may contain None, or even attribute
+ # may not be there
+ file_enc = getattr(f, 'encoding', None)
+ if file_enc is None:
+ file_enc = 'utf-8'
if nl is None:
- opts = 'wb'
+ nl_b = os.linesep.encode(file_enc) # binary mode, '\n' is not enough
+ nl = u'\n'
+ elif isinstance(nl, string_types):
+ nl_b = nl.encode(file_enc)
else:
- opts = 'wb'
+ nl_b = nl
+ nl = nl.decode()
- if isinstance(f, str_type):
- f = open(f, opts)
- want_close = True
- else:
- want_close = False
try:
if sorted:
names = list(self.keys())
@@ -520,11 +530,15 @@ class Zone(object):
l = self[n].to_text(n, origin=self.origin,
relativize=relativize)
if isinstance(l, text_type):
- l = l.encode()
- if nl is None:
- f.write(l)
- f.write('\n')
+ l_b = l.encode(file_enc)
else:
+ l_b = l
+ l = l.decode()
+
+ try:
+ f.write(l_b)
+ f.write(nl_b)
+ except TypeError: # textual mode
f.write(l)
f.write(nl)
finally:
| py3: Zone.to_file failed
Hello,
```
#!/usr/local/bin/python3
import dns.zone
from dns.rdatatype import SOA
zone_obj = dns.zone.Zone(dns.name.from_text('test.zone'))
zone_obj.find_rdataset('@', rdtype=SOA, create=True)
zone_obj.to_file(open('/dev/null', 'w'))
Traceback (most recent call last):
File "./t.py", line 9, in <module>
zone_obj.to_file(open('/dev/null', 'w'))
File "/opt/hosting/software/python3/lib/python3.5/site-packages/dns/zone.py", line 516, in to_file
f.write(l)
TypeError: write() argument must be str, not bytes
zone_obj.to_file(open('/dev/null', 'wb'))
Traceback (most recent call last):
File "./t.py", line 10, in <module>
zone_obj.to_file(open('/dev/null', 'wb'))
File "/opt/hosting/software/python3/lib/python3.5/site-packages/dns/zone.py", line 517, in to_file
f.write('\n')
TypeError: a bytes-like object is required, not 'str'
```
looks like a bug ? | rthalley/dnspython | diff --git a/tests/test_zone.py b/tests/test_zone.py
index 712b590..3d53e93 100644
--- a/tests/test_zone.py
+++ b/tests/test_zone.py
@@ -132,6 +132,59 @@ class ZoneTestCase(unittest.TestCase):
os.unlink(here('example2.out'))
self.failUnless(ok)
+ def testToFileTextualStream(self):
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ f = StringIO()
+ z.to_file(f)
+ out = f.getvalue()
+ f.close()
+ self.assertEqual(out, example_text_output)
+
+ def testToFileBinaryStream(self):
+ z = dns.zone.from_text(example_text, 'example.', relativize=True)
+ f = BytesIO()
+ z.to_file(f)
+ out = f.getvalue()
+ f.close()
+ self.assertEqual(out, example_text_output.encode())
+
+ def testToFileTextual(self):
+ z = dns.zone.from_file(here('example'), 'example')
+ try:
+ f = open(here('example3-textual.out'), 'w')
+ z.to_file(f)
+ f.close()
+ ok = filecmp.cmp(here('example3-textual.out'),
+ here('example3.good'))
+ finally:
+ if not _keep_output:
+ os.unlink(here('example3-textual.out'))
+ self.failUnless(ok)
+
+ def testToFileBinary(self):
+ z = dns.zone.from_file(here('example'), 'example')
+ try:
+ f = open(here('example3-binary.out'), 'wb')
+ z.to_file(f)
+ f.close()
+ ok = filecmp.cmp(here('example3-binary.out'),
+ here('example3.good'))
+ finally:
+ if not _keep_output:
+ os.unlink(here('example3-binary.out'))
+ self.failUnless(ok)
+
+ def testToFileFilename(self):
+ z = dns.zone.from_file(here('example'), 'example')
+ try:
+ z.to_file('example3-filename.out')
+ ok = filecmp.cmp(here('example3-filename.out'),
+ here('example3.good'))
+ finally:
+ if not _keep_output:
+ os.unlink(here('example3-filename.out'))
+ self.failUnless(ok)
+
def testToText(self):
z = dns.zone.from_file(here('example'), 'example')
ok = False
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 1.14 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/rthalley/dnspython.git@188aa701a6826c607da0624e31a8c4618d0a8017#egg=dnspython
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: dnspython
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/dnspython
| [
"tests/test_zone.py::ZoneTestCase::testToFileBinaryStream",
"tests/test_zone.py::ZoneTestCase::testToFileTextualStream"
]
| [
"tests/test_zone.py::ZoneTestCase::testFromFile1",
"tests/test_zone.py::ZoneTestCase::testFromFile2",
"tests/test_zone.py::ZoneTestCase::testInclude",
"tests/test_zone.py::ZoneTestCase::testToFileBinary",
"tests/test_zone.py::ZoneTestCase::testToFileFilename",
"tests/test_zone.py::ZoneTestCase::testToFileTextual",
"tests/test_zone.py::ZoneTestCase::testToText",
"tests/test_zone.py::ZoneTestCase::testTorture1"
]
| [
"tests/test_zone.py::ZoneTestCase::testBadDirective",
"tests/test_zone.py::ZoneTestCase::testDeleteRdataset1",
"tests/test_zone.py::ZoneTestCase::testDeleteRdataset2",
"tests/test_zone.py::ZoneTestCase::testEqual",
"tests/test_zone.py::ZoneTestCase::testFindRRset1",
"tests/test_zone.py::ZoneTestCase::testFindRRset2",
"tests/test_zone.py::ZoneTestCase::testFindRdataset1",
"tests/test_zone.py::ZoneTestCase::testFindRdataset2",
"tests/test_zone.py::ZoneTestCase::testFirstRRStartsWithWhitespace",
"tests/test_zone.py::ZoneTestCase::testFromText",
"tests/test_zone.py::ZoneTestCase::testGetRRset1",
"tests/test_zone.py::ZoneTestCase::testGetRRset2",
"tests/test_zone.py::ZoneTestCase::testGetRdataset1",
"tests/test_zone.py::ZoneTestCase::testGetRdataset2",
"tests/test_zone.py::ZoneTestCase::testIterateAllRdatas",
"tests/test_zone.py::ZoneTestCase::testIterateAllRdatasets",
"tests/test_zone.py::ZoneTestCase::testIterateRdatas",
"tests/test_zone.py::ZoneTestCase::testIterateRdatasets",
"tests/test_zone.py::ZoneTestCase::testNoNS",
"tests/test_zone.py::ZoneTestCase::testNoSOA",
"tests/test_zone.py::ZoneTestCase::testNodeDeleteRdataset1",
"tests/test_zone.py::ZoneTestCase::testNodeDeleteRdataset2",
"tests/test_zone.py::ZoneTestCase::testNodeFindRdataset1",
"tests/test_zone.py::ZoneTestCase::testNodeFindRdataset2",
"tests/test_zone.py::ZoneTestCase::testNodeGetRdataset1",
"tests/test_zone.py::ZoneTestCase::testNodeGetRdataset2",
"tests/test_zone.py::ZoneTestCase::testNotEqual1",
"tests/test_zone.py::ZoneTestCase::testNotEqual2",
"tests/test_zone.py::ZoneTestCase::testNotEqual3",
"tests/test_zone.py::ZoneTestCase::testReplaceRdataset1",
"tests/test_zone.py::ZoneTestCase::testReplaceRdataset2",
"tests/test_zone.py::ZoneTestCase::testTTLs",
"tests/test_zone.py::ZoneTestCase::testZoneOrigin",
"tests/test_zone.py::ZoneTestCase::testZoneOriginNone"
]
| []
| ISC License | 611 | [
"dns/zone.py"
]
| [
"dns/zone.py"
]
|
zopefoundation__zope.error-4 | 37e3820202fb84ecac44f3d23e743e8f032d5577 | 2016-07-06 08:20:37 | 37e3820202fb84ecac44f3d23e743e8f032d5577 | diff --git a/CHANGES.rst b/CHANGES.rst
index 6848934..f74b382 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -8,6 +8,8 @@ Changes
- Drop support for Python 2.6.
+- bugfix: fix leak by converting ``request.URL`` to string in
+ ``ErrorReportingUtility``
4.2.0 (2014-12-27)
------------------
diff --git a/src/zope/error/error.py b/src/zope/error/error.py
index eb727f1..48d9a6a 100644
--- a/src/zope/error/error.py
+++ b/src/zope/error/error.py
@@ -188,7 +188,7 @@ class ErrorReportingUtility(Persistent):
# TODO: Temporary fix, which Steve should undo. URL is
# just too HTTPRequest-specific.
if hasattr(request, 'URL'):
- url = request.URL
+ url = str(request.URL)
username = self._getUsername(request)
req_html = self._getRequestAsHTML(request)
| ErrorReportingUtility.raising leaks requests
```
if hasattr(request, 'URL'):
url = request.URL
```
URL is URLGetter, not a simple string
That's a huge issue in tests, every request running into an exception leaves the Request around. | zopefoundation/zope.error | diff --git a/src/zope/error/tests.py b/src/zope/error/tests.py
index 14e632c..486227a 100644
--- a/src/zope/error/tests.py
+++ b/src/zope/error/tests.py
@@ -21,7 +21,7 @@ from zope.exceptions.exceptionformatter import format_exception
from zope.testing import cleanup
from zope.error.error import ErrorReportingUtility, getFormattedException
-from zope.error._compat import _u_type, PYTHON2
+from zope.error._compat import _u_type, PYTHON2, _basestring
if PYTHON2:
from cStringIO import StringIO
@@ -56,6 +56,20 @@ class TestRequest(object):
def items(self):
return []
+ def getURL(self):
+ return self._environ['PATH_INFO']
+
+
+class URLGetter(object):
+
+ __slots__ = "__request"
+
+ def __init__(self, request):
+ self.__request = request
+
+ def __str__(self):
+ return self.__request.getURL()
+
class ErrorReportingUtilityTests(cleanup.CleanUp, unittest.TestCase):
@@ -119,6 +133,21 @@ class ErrorReportingUtilityTests(cleanup.CleanUp, unittest.TestCase):
username = getErrLog[0]['username']
self.assertEqual(username, u'unauthenticated, \u0441, \u0441, \u0441')
+ def test_ErrorLog_url(self):
+ # We want a string for the URL in the error log, nothing else
+ request = TestRequest(environ={'PATH_INFO': '/foobar'})
+ # set request.URL as zope.publisher would
+ request.URL = URLGetter(request)
+
+ errUtility = ErrorReportingUtility()
+ exc_info = getAnErrorInfo(u"Error")
+ errUtility.raising(exc_info, request=request)
+ getErrLog = errUtility.getLogEntries()
+ self.assertEqual(1, len(getErrLog))
+
+ url = getErrLog[0]['url']
+ self.assertTrue(isinstance(url, _basestring))
+
def test_ErrorLog_nonascii(self):
# Emulate a unicode url, it gets encoded to utf-8 before it's passed
# to the request. Also add some unicode field to the request's
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 2
} | 4.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
cffi==1.15.1
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
persistent==4.9.3
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycparser==2.21
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
-e git+https://github.com/zopefoundation/zope.error.git@37e3820202fb84ecac44f3d23e743e8f032d5577#egg=zope.error
zope.event==4.6
zope.exceptions==4.6
zope.interface==5.5.2
zope.location==4.3
zope.proxy==4.6.1
zope.schema==6.2.1
zope.testing==5.0.1
| name: zope.error
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- cffi==1.15.1
- persistent==4.9.3
- pycparser==2.21
- six==1.17.0
- zope-event==4.6
- zope-exceptions==4.6
- zope-interface==5.5.2
- zope-location==4.3
- zope-proxy==4.6.1
- zope-schema==6.2.1
- zope-testing==5.0.1
prefix: /opt/conda/envs/zope.error
| [
"src/zope/error/tests.py::ErrorReportingUtilityTests::test_ErrorLog_url"
]
| []
| [
"src/zope/error/tests.py::ErrorReportingUtilityTests::test_ErrorLog",
"src/zope/error/tests.py::ErrorReportingUtilityTests::test_ErrorLog_nonascii",
"src/zope/error/tests.py::ErrorReportingUtilityTests::test_ErrorLog_unicode",
"src/zope/error/tests.py::ErrorReportingUtilityTests::test_checkForEmptyLog",
"src/zope/error/tests.py::ErrorReportingUtilityTests::test_checkProperties",
"src/zope/error/tests.py::GetPrintableTests::test_getFormattedException",
"src/zope/error/tests.py::GetPrintableTests::test_getFormattedException_as_html",
"src/zope/error/tests.py::GetPrintableTests::test_non_str_those_conversion_fails_are_returned_specially",
"src/zope/error/tests.py::GetPrintableTests::test_non_str_those_conversion_fails_are_returned_with_escaped_name",
"src/zope/error/tests.py::GetPrintableTests::test_non_str_values_get_converted_using_a_str_call",
"src/zope/error/tests.py::GetPrintableTests::test_str_values_get_converted_to_unicode",
"src/zope/error/tests.py::GetPrintableTests::test_xml_tags_get_escaped",
"src/zope/error/tests.py::test_suite"
]
| []
| Zope Public License 2.1 | 612 | [
"src/zope/error/error.py",
"CHANGES.rst"
]
| [
"src/zope/error/error.py",
"CHANGES.rst"
]
|
|
scrapy__scrapy-2103 | 0ef490e9ce1b3678cb214755f5fd71a72274f088 | 2016-07-08 10:36:57 | d7b26edf6b419e379a7a0a425093f02cac2fcf33 | diff --git a/scrapy/utils/url.py b/scrapy/utils/url.py
index c80fc6e70..406eb5843 100644
--- a/scrapy/utils/url.py
+++ b/scrapy/utils/url.py
@@ -41,9 +41,16 @@ def url_has_any_extension(url, extensions):
def _safe_ParseResult(parts, encoding='utf8', path_encoding='utf8'):
+ # IDNA encoding can fail for too long labels (>63 characters)
+ # or missing labels (e.g. http://.example.com)
+ try:
+ netloc = parts.netloc.encode('idna')
+ except UnicodeError:
+ netloc = parts.netloc
+
return (
to_native_str(parts.scheme),
- to_native_str(parts.netloc.encode('idna')),
+ to_native_str(netloc),
# default encoding for path component SHOULD be UTF-8
quote(to_bytes(parts.path, path_encoding), _safe_chars),
| Unicode Link Extractor
When using the following to extract all of the links from a response:
```
self.link_extractor = LinkExtractor()
...
links = self.link_extractor.extract_links(response)
```
On rare occasions, the following error is thrown:
```
2016-05-25 12:13:55,432 [root] [ERROR] Error on http://detroit.curbed.com/2016/5/5/11605132/tiny-house-designer-show, traceback: Traceback (most recent call last):
File "/usr/local/lib/python2.7/site-packages/twisted/internet/base.py", line 1203, in mainLoop
self.runUntilCurrent()
File "/usr/local/lib/python2.7/site-packages/twisted/internet/base.py", line 825, in runUntilCurrent
call.func(*call.args, **call.kw)
File "/usr/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 393, in callback
self._startRunCallbacks(result)
File "/usr/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 501, in _startRunCallbacks
self._runCallbacks()
--- <exception caught here> ---
File "/usr/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 588, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/var/www/html/DomainCrawler/DomainCrawler/spiders/hybrid_spider.py", line 223, in parse
items.extend(self._extract_requests(response))
File "/var/www/html/DomainCrawler/DomainCrawler/spiders/hybrid_spider.py", line 477, in _extract_requests
links = self.link_extractor.extract_links(response)
File "/usr/local/lib/python2.7/site-packages/scrapy/linkextractors/lxmlhtml.py", line 111, in extract_links
all_links.extend(self._process_links(links))
File "/usr/local/lib/python2.7/site-packages/scrapy/linkextractors/__init__.py", line 103, in _process_links
link.url = canonicalize_url(urlparse(link.url))
File "/usr/local/lib/python2.7/site-packages/scrapy/utils/url.py", line 85, in canonicalize_url
parse_url(url), encoding=encoding)
File "/usr/local/lib/python2.7/site-packages/scrapy/utils/url.py", line 46, in _safe_ParseResult
to_native_str(parts.netloc.encode('idna')),
File "/usr/local/lib/python2.7/encodings/idna.py", line 164, in encode
result.append(ToASCII(label))
File "/usr/local/lib/python2.7/encodings/idna.py", line 73, in ToASCII
raise UnicodeError("label empty or too long")
exceptions.UnicodeError: label empty or too long
```
I was able to find some information concerning the error from [here](http://stackoverflow.com/questions/25103126/label-empty-or-too-long-python-urllib2).
My question is: What is the best way to handle this? Even if there is one bad link in the response, I'd want all of the other good links to be extracted. | scrapy/scrapy | diff --git a/tests/test_utils_url.py b/tests/test_utils_url.py
index 1fc3a3510..b4819874d 100644
--- a/tests/test_utils_url.py
+++ b/tests/test_utils_url.py
@@ -265,6 +265,20 @@ class CanonicalizeUrlTest(unittest.TestCase):
# without encoding, already canonicalized URL is canonicalized identically
self.assertEqual(canonicalize_url(canonicalized), canonicalized)
+ def test_canonicalize_url_idna_exceptions(self):
+ # missing DNS label
+ self.assertEqual(
+ canonicalize_url(u"http://.example.com/résumé?q=résumé"),
+ "http://.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9")
+
+ # DNS label too long
+ self.assertEqual(
+ canonicalize_url(
+ u"http://www.{label}.com/résumé?q=résumé".format(
+ label=u"example"*11)),
+ "http://www.{label}.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9".format(
+ label=u"example"*11))
+
class AddHttpIfNoScheme(unittest.TestCase):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==24.2.0
Automat==22.10.0
certifi @ file:///croot/certifi_1671487769961/work/certifi
cffi==1.15.1
constantly==15.1.0
cryptography==44.0.2
cssselect==1.2.0
exceptiongroup==1.2.2
hyperlink==21.0.0
idna==3.10
importlib-metadata==6.7.0
incremental==22.10.0
iniconfig==2.0.0
jmespath==1.0.1
lxml==5.3.1
packaging==24.0
parsel==1.8.1
pluggy==1.2.0
pyasn1==0.5.1
pyasn1-modules==0.3.0
pycparser==2.21
PyDispatcher==2.0.7
pyOpenSSL==25.0.0
pytest==7.4.4
queuelib==1.6.2
-e git+https://github.com/scrapy/scrapy.git@0ef490e9ce1b3678cb214755f5fd71a72274f088#egg=Scrapy
service-identity==21.1.0
six==1.17.0
tomli==2.0.1
Twisted==23.8.0
typing_extensions==4.7.1
w3lib==2.1.2
zipp==3.15.0
zope.interface==6.4.post2
| name: scrapy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==24.2.0
- automat==22.10.0
- cffi==1.15.1
- constantly==15.1.0
- cryptography==44.0.2
- cssselect==1.2.0
- exceptiongroup==1.2.2
- hyperlink==21.0.0
- idna==3.10
- importlib-metadata==6.7.0
- incremental==22.10.0
- iniconfig==2.0.0
- jmespath==1.0.1
- lxml==5.3.1
- packaging==24.0
- parsel==1.8.1
- pluggy==1.2.0
- pyasn1==0.5.1
- pyasn1-modules==0.3.0
- pycparser==2.21
- pydispatcher==2.0.7
- pyopenssl==25.0.0
- pytest==7.4.4
- queuelib==1.6.2
- service-identity==21.1.0
- six==1.17.0
- tomli==2.0.1
- twisted==23.8.0
- typing-extensions==4.7.1
- w3lib==2.1.2
- zipp==3.15.0
- zope-interface==6.4.post2
prefix: /opt/conda/envs/scrapy
| [
"tests/test_utils_url.py::CanonicalizeUrlTest::test_canonicalize_url_idna_exceptions"
]
| []
| [
"tests/test_utils_url.py::UrlUtilsTest::test_url_is_from_any_domain",
"tests/test_utils_url.py::UrlUtilsTest::test_url_is_from_spider",
"tests/test_utils_url.py::UrlUtilsTest::test_url_is_from_spider_class_attributes",
"tests/test_utils_url.py::UrlUtilsTest::test_url_is_from_spider_with_allowed_domains",
"tests/test_utils_url.py::UrlUtilsTest::test_url_is_from_spider_with_allowed_domains_class_attributes",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_append_missing_path",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_canonicalize_idns",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_canonicalize_parse_url",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_canonicalize_url",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_canonicalize_url_idempotence",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_canonicalize_url_unicode_path",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_canonicalize_url_unicode_query_string",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_canonicalize_url_unicode_query_string_wrong_encoding",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_canonicalize_urlparsed",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_domains_are_case_insensitive",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_dont_convert_safe_characters",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_keep_blank_values",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_non_ascii_percent_encoding_in_paths",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_non_ascii_percent_encoding_in_query_arguments",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_normalize_percent_encoding_in_paths",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_normalize_percent_encoding_in_query_arguments",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_quoted_slash_and_question_sign",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_remove_fragments",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_return_str",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_safe_characters_unicode",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_sorting",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_spaces",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_typical_usage",
"tests/test_utils_url.py::CanonicalizeUrlTest::test_urls_with_auth_and_ports",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_add_scheme",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_complete_url",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_fragment",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_path",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_port",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_preserve_ftp",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_preserve_http",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_preserve_http_complete_url",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_preserve_http_fragment",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_preserve_http_path",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_preserve_http_port",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_preserve_http_query",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_preserve_http_username_password",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_preserve_http_without_subdomain",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_preserve_https",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_protocol_relative",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_protocol_relative_complete_url",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_protocol_relative_fragment",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_protocol_relative_path",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_protocol_relative_port",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_protocol_relative_query",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_protocol_relative_username_password",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_protocol_relative_without_subdomain",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_query",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_username_password",
"tests/test_utils_url.py::AddHttpIfNoScheme::test_without_subdomain",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_001",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_002",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_003",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_004",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_005",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_006",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_007",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_008",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_009",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_010",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_011",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_012",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_013",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_014",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_015",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_016",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_017",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_018",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_019",
"tests/test_utils_url.py::GuessSchemeTest::test_uri_020"
]
| []
| BSD 3-Clause "New" or "Revised" License | 614 | [
"scrapy/utils/url.py"
]
| [
"scrapy/utils/url.py"
]
|
|
Azure__WALinuxAgent-304 | 807e22c6ca5b75b8c19fe27eefd9b8f830e8b367 | 2016-07-08 21:40:51 | 807e22c6ca5b75b8c19fe27eefd9b8f830e8b367 | msftclas: Hi __@brendandixon__, I'm your friendly neighborhood Microsoft Pull Request Bot (You can call me MSBOT). Thanks for your contribution!
<span>You've already signed the contribution license agreement. Thanks!</span>
<p>The agreement was validated by Microsoft and real humans are currently evaluating your PR.</p>
TTYL, MSBOT;
| diff --git a/azurelinuxagent/common/rdma.py b/azurelinuxagent/common/rdma.py
index 3ba332d3..c9451a21 100644
--- a/azurelinuxagent/common/rdma.py
+++ b/azurelinuxagent/common/rdma.py
@@ -21,15 +21,56 @@ Handle packages and modules to enable RDMA for IB networking
import os
import re
-import threading
import time
+import threading
+
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.utils.fileutil as fileutil
import azurelinuxagent.common.utils.shellutil as shellutil
-dapl_config_paths = ['/etc/dat.conf', '/etc/rdma/dat.conf',
- '/usr/local/etc/dat.conf']
+from azurelinuxagent.common.protocol.wire import SHARED_CONF_FILE_NAME
+dapl_config_paths = [
+ '/etc/dat.conf',
+ '/etc/rdma/dat.conf',
+ '/usr/local/etc/dat.conf'
+]
+
+def setup_rdma_device(self):
+ logger.verbose("Parsing SharedConfig XML contents for RDMA details")
+ xml_doc = parse_doc(
+ fileutil.read_file(os.path.join(conf.get_lib_dir(), SHARED_CONF_FILE_NAME)))
+ if xml_doc is None:
+ logger.error("Could not parse SharedConfig XML document")
+ return
+ instance_elem = find(xml_doc, "Instance")
+ if not instance_elem:
+ logger.error("Could not find <Instance> in SharedConfig document")
+ return
+
+ rdma_ipv4_addr = getattrib(instance_elem, "rdmaIPv4Address")
+ if not rdma_ipv4_addr:
+ logger.error(
+ "Could not find rdmaIPv4Address attribute on Instance element of SharedConfig.xml document")
+ return
+
+ rdma_mac_addr = getattrib(instance_elem, "rdmaMacAddress")
+ if not rdma_mac_addr:
+ logger.error(
+ "Could not find rdmaMacAddress attribute on Instance element of SharedConfig.xml document")
+ return
+
+ # add colons to the MAC address (e.g. 00155D33FF1D ->
+ # 00:15:5D:33:FF:1D)
+ rdma_mac_addr = ':'.join([rdma_mac_addr[i:i+2]
+ for i in range(0, len(rdma_mac_addr), 2)])
+ logger.info("Found RDMA details. IPv4={0} MAC={1}".format(
+ rdma_ipv4_addr, rdma_mac_addr))
+
+ # Set up the RDMA device with collected informatino
+ RDMADeviceHandler(rdma_ipv4_addr, rdma_mac_addr).start()
+ logger.info("RDMA: device is set up")
+ return
class RDMAHandler(object):
diff --git a/azurelinuxagent/daemon/main.py b/azurelinuxagent/daemon/main.py
index e1173ccb..d3185a1d 100644
--- a/azurelinuxagent/daemon/main.py
+++ b/azurelinuxagent/daemon/main.py
@@ -18,32 +18,31 @@
#
import os
-import time
import sys
+import time
import traceback
+
import azurelinuxagent.common.conf as conf
+import azurelinuxagent.common.event as event
+import azurelinuxagent.common.utils.fileutil as fileutil
import azurelinuxagent.common.logger as logger
+
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.event import add_event, WALAEventOperation
from azurelinuxagent.common.exception import ProtocolError
+from azurelinuxagent.common.osutil import get_osutil
+from azurelinuxagent.common.protocol import get_protocol_util
+from azurelinuxagent.common.rdma import RDMADeviceHandler, setup_rdma_device
+from azurelinuxagent.common.utils.textutil import parse_doc, find, getattrib
from azurelinuxagent.common.version import AGENT_LONG_NAME, AGENT_VERSION, \
DISTRO_NAME, DISTRO_VERSION, \
DISTRO_FULL_NAME, PY_VERSION_MAJOR, \
PY_VERSION_MINOR, PY_VERSION_MICRO
-from azurelinuxagent.common.protocol.wire import SHARED_CONF_FILE_NAME
-import azurelinuxagent.common.event as event
-import azurelinuxagent.common.utils.fileutil as fileutil
-from azurelinuxagent.common.utils.textutil import parse_doc, find, getattrib
-from azurelinuxagent.common.osutil import get_osutil
-from azurelinuxagent.common.protocol import get_protocol_util
-from azurelinuxagent.daemon.scvmm import get_scvmm_handler
from azurelinuxagent.daemon.resourcedisk import get_resourcedisk_handler
-from azurelinuxagent.daemon.monitor import get_monitor_handler
-from azurelinuxagent.daemon.env import get_env_handler
+from azurelinuxagent.daemon.scvmm import get_scvmm_handler
from azurelinuxagent.pa.provision import get_provision_handler
from azurelinuxagent.pa.rdma import get_rdma_handler
from azurelinuxagent.ga.update import get_update_handler
-from azurelinuxagent.common.rdma import RDMADeviceHandler
def get_daemon_handler():
return DaemonHandler()
@@ -95,12 +94,10 @@ class DaemonHandler(object):
self.scvmm_handler = get_scvmm_handler()
self.resourcedisk_handler = get_resourcedisk_handler()
self.rdma_handler = get_rdma_handler()
- self.monitor_handler = get_monitor_handler()
- self.env_handler = get_env_handler()
self.provision_handler = get_provision_handler()
self.update_handler = get_update_handler()
- #Create lib dir
+ # Create lib dir
if not os.path.isdir(conf.get_lib_dir()):
fileutil.mkdir(conf.get_lib_dir(), mode=0o700)
os.chdir(conf.get_lib_dir())
@@ -110,63 +107,24 @@ class DaemonHandler(object):
if conf.get_resourcedisk_format():
self.resourcedisk_handler.run()
-
+
+ # Always redetermine the protocol start (e.g., wireserver vs.
+ # on-premise) since a VHD can move between environments
self.protocol_util.clear_protocol()
self.provision_handler.run()
+ # Enable RDMA, continue in errors
if conf.enable_rdma():
self.rdma_handler.install_driver()
- self.monitor_handler.run()
-
- self.env_handler.run()
-
- # Enable RDMA, continue in errors
- if conf.enable_rdma():
- logger.info("RDMA capabilities are enabled in configuration")
- try:
- self.setup_rdma_device()
- except Exception as e:
- logger.error("Error setting up rdma device: %s" % e)
+ logger.info("RDMA capabilities are enabled in configuration")
+ try:
+ setup_rdma_device()
+ except Exception as e:
+ logger.error("Error setting up rdma device: %s" % e)
else:
logger.info("RDMA capabilities are not enabled, skipping")
while self.running:
self.update_handler.run_latest()
-
-
- def setup_rdma_device(self):
- logger.verbose("Parsing SharedConfig XML contents for RDMA details")
- xml_doc = parse_doc(
- fileutil.read_file(os.path.join(conf.get_lib_dir(), SHARED_CONF_FILE_NAME)))
- if xml_doc is None:
- logger.error("Could not parse SharedConfig XML document")
- return
- instance_elem = find(xml_doc, "Instance")
- if not instance_elem:
- logger.error("Could not find <Instance> in SharedConfig document")
- return
-
- rdma_ipv4_addr = getattrib(instance_elem, "rdmaIPv4Address")
- if not rdma_ipv4_addr:
- logger.error(
- "Could not find rdmaIPv4Address attribute on Instance element of SharedConfig.xml document")
- return
-
- rdma_mac_addr = getattrib(instance_elem, "rdmaMacAddress")
- if not rdma_mac_addr:
- logger.error(
- "Could not find rdmaMacAddress attribute on Instance element of SharedConfig.xml document")
- return
-
- # add colons to the MAC address (e.g. 00155D33FF1D ->
- # 00:15:5D:33:FF:1D)
- rdma_mac_addr = ':'.join([rdma_mac_addr[i:i+2]
- for i in range(0, len(rdma_mac_addr), 2)])
- logger.info("Found RDMA details. IPv4={0} MAC={1}".format(
- rdma_ipv4_addr, rdma_mac_addr))
-
- # Set up the RDMA device with collected informatino
- RDMADeviceHandler(rdma_ipv4_addr, rdma_mac_addr).start()
- logger.info("RDMA: device is set up")
diff --git a/azurelinuxagent/daemon/env.py b/azurelinuxagent/ga/env.py
similarity index 99%
rename from azurelinuxagent/daemon/env.py
rename to azurelinuxagent/ga/env.py
index 9d18026d..2d67d4bb 100644
--- a/azurelinuxagent/daemon/env.py
+++ b/azurelinuxagent/ga/env.py
@@ -19,12 +19,14 @@
import os
import socket
-import threading
import time
-import azurelinuxagent.common.logger as logger
+import threading
+
import azurelinuxagent.common.conf as conf
-from azurelinuxagent.common.osutil import get_osutil
+import azurelinuxagent.common.logger as logger
+
from azurelinuxagent.common.dhcp import get_dhcp_handler
+from azurelinuxagent.common.osutil import get_osutil
def get_env_handler():
return EnvHandler()
diff --git a/azurelinuxagent/daemon/monitor.py b/azurelinuxagent/ga/monitor.py
similarity index 95%
rename from azurelinuxagent/daemon/monitor.py
rename to azurelinuxagent/ga/monitor.py
index 9e1e1eb7..0ac86d4a 100644
--- a/azurelinuxagent/daemon/monitor.py
+++ b/azurelinuxagent/ga/monitor.py
@@ -15,28 +15,28 @@
# Requires Python 2.4+ and Openssl 1.0+
#
-import os
+import datetime
import json
+import os
+import platform
import time
-import datetime
import threading
-import platform
-import azurelinuxagent.common.logger as logger
+
import azurelinuxagent.common.conf as conf
+import azurelinuxagent.common.logger as logger
+
from azurelinuxagent.common.event import WALAEventOperation, add_event
-from azurelinuxagent.common.exception import EventError, ProtocolError, \
- OSUtilError
+from azurelinuxagent.common.exception import EventError, ProtocolError, OSUtilError
from azurelinuxagent.common.future import ustr
-from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, \
- getattrib
-from azurelinuxagent.common.protocol.restapi import TelemetryEventParam, \
- TelemetryEventList, \
- TelemetryEvent, \
- set_properties
-from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \
- DISTRO_CODE_NAME, AGENT_LONG_VERSION
from azurelinuxagent.common.osutil import get_osutil
from azurelinuxagent.common.protocol import get_protocol_util
+from azurelinuxagent.common.protocol.restapi import TelemetryEventParam, \
+ TelemetryEventList, \
+ TelemetryEvent, \
+ set_properties
+from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, getattrib
+from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \
+ DISTRO_CODE_NAME, AGENT_LONG_VERSION
def parse_event(data_str):
diff --git a/azurelinuxagent/ga/update.py b/azurelinuxagent/ga/update.py
index 0d69f523..635f7e98 100644
--- a/azurelinuxagent/ga/update.py
+++ b/azurelinuxagent/ga/update.py
@@ -129,7 +129,7 @@ class UpdateHandler(object):
ret = self.child_process.wait()
if ret == None:
ret = 1
- if ret != 0:
+ if ret > 0:
msg = u"Agent {0} launched with command '{1}' failed with code: {2}".format(
agent_name,
agent_cmd,
@@ -144,9 +144,10 @@ class UpdateHandler(object):
if latest_agent is not None:
latest_agent.mark_failure()
else:
- msg = u"Agent {0} launched with command '{1}' returned 0".format(
+ msg = u"Agent {0} launched with command '{1}' returned {2}".format(
agent_name,
- agent_cmd)
+ agent_cmd,
+ ret)
logger.info(msg)
add_event(
AGENT_NAME,
@@ -184,14 +185,22 @@ class UpdateHandler(object):
"""
This is the main loop which watches for agent and extension updates.
"""
- from azurelinuxagent.ga.exthandlers import get_exthandlers_handler
- exthandlers_handler = get_exthandlers_handler()
msg = u"Agent {0} is running as the current agent".format(
CURRENT_AGENT)
logger.info(msg)
add_event(AGENT_NAME, version=CURRENT_VERSION, is_success=True, message=msg)
+ # Launch monitoring threads
+ from azurelinuxagent.ga.monitor import get_monitor_handler
+ get_monitor_handler().run()
+
+ from azurelinuxagent.ga.env import get_env_handler
+ get_env_handler().run()
+
+ from azurelinuxagent.ga.exthandlers import get_exthandlers_handler
+ exthandlers_handler = get_exthandlers_handler()
+
# TODO: Add means to stop running
try:
while self.running:
@@ -224,6 +233,7 @@ class UpdateHandler(object):
is_success=False,
message=msg)
sys.exit(1)
+
sys.exit(0)
return
@@ -231,12 +241,13 @@ class UpdateHandler(object):
if self.child_process is None:
return
- if signum is signal.SIGTERM:
- self.child_process.send_signal(signal.SIGTERM)
+ self.child_process.send_signal(signum)
- if self.signal_handler is not None:
- if not self.signal_handler in (signal.SIG_IGN, signal.SIG_DFL):
+ if not self.signal_handler in (None, signal.SIG_IGN, signal.SIG_DFL):
self.signal_handler(signum, frame)
+ elif self.signal_handler is signal.SIG_DFL:
+ if signum == signal.SIGTERM:
+ sys.exit(0)
return
def get_latest_agent(self):
| `systemctl stop waagent` hangs
Reproes on Ubuntu 16.04 as well as CentOS 7.
`systemctl stop walinuxagent` command hangs for a long time, although it eventually succeeds like after maybe 30-40 seconds. It used to be instant. I think it's related to the signal handling behavior that has been recently changed. | Azure/WALinuxAgent | diff --git a/tests/daemon/test_monitor.py b/tests/ga/test_monitor.py
similarity index 95%
rename from tests/daemon/test_monitor.py
rename to tests/ga/test_monitor.py
index e037dc0c..838d037a 100644
--- a/tests/daemon/test_monitor.py
+++ b/tests/ga/test_monitor.py
@@ -17,7 +17,7 @@
from tests.tools import *
from azurelinuxagent.common.exception import *
-from azurelinuxagent.daemon.monitor import *
+from azurelinuxagent.ga.monitor import *
class TestMonitor(AgentTestCase):
def test_parse_xml_event(self):
diff --git a/tests/ga/test_update.py b/tests/ga/test_update.py
index 1a29b3fd..3c81437c 100644
--- a/tests/ga/test_update.py
+++ b/tests/ga/test_update.py
@@ -862,18 +862,38 @@ class TestUpdate(UpdateTestCase):
self.assertEqual(1, latest_agent.error.failure_count)
return
- def _test_run(self, invocations=1, enable_updates=False):
+ def _test_run(self, invocations=1, calls=[call.run()], enable_updates=False):
conf.get_autoupdate_enabled = Mock(return_value=enable_updates)
+
+ # Note:
+ # - Python only allows mutations of objects to which a function has
+ # a reference. Incrementing an integer directly changes the
+ # reference. Incrementing an item of a list changes an item to
+ # which the code has a reference.
+ # See http://stackoverflow.com/questions/26408941/python-nested-functions-and-variable-scope
+ iterations = [0]
+ def iterator(*args, **kwargs):
+ iterations[0] += 1
+ if iterations[0] >= invocations:
+ self.update_handler.running = False
+ return
+
+ calls = calls * invocations
- mock_sleep = _IterationMock(self.update_handler, invocations=invocations)
with patch('azurelinuxagent.ga.exthandlers.get_exthandlers_handler') as mock_handler:
- with patch('time.sleep', new=mock_sleep):
- try:
- self.update_handler.run()
- except:
- pass
- self.assertEqual(invocations + 1, len(mock_handler.mock_calls))
- self.assertEqual(invocations, len(mock_sleep.mock_calls))
+ with patch('azurelinuxagent.ga.monitor.get_monitor_handler') as mock_monitor:
+ with patch('azurelinuxagent.ga.env.get_env_handler') as mock_env:
+ with patch('time.sleep', side_effect=iterator) as mock_sleep:
+ with patch('sys.exit') as mock_exit:
+
+ self.update_handler.run()
+
+ self.assertEqual(1, mock_handler.call_count)
+ self.assertEqual(mock_handler.return_value.method_calls, calls)
+ self.assertEqual(invocations, mock_sleep.call_count)
+ self.assertEqual(1, mock_monitor.call_count)
+ self.assertEqual(1, mock_env.call_count)
+ self.assertEqual(1, mock_exit.call_count)
return
def test_run(self):
@@ -886,9 +906,7 @@ class TestUpdate(UpdateTestCase):
def test_run_stops_if_update_available(self):
self.update_handler._ensure_latest_agent = Mock(return_value=True)
- with patch('sys.exit', side_effect=Exception("System Exit")) as mock_exit:
- self._test_run(invocations=0, enable_updates=True)
- self.assertEqual(1, mock_exit.call_count)
+ self._test_run(invocations=0, calls=[], enable_updates=True)
return
def test_set_agents(self):
@@ -904,20 +922,6 @@ class TestUpdate(UpdateTestCase):
return
-class _IterationMock(object):
- def __init__(self, update_handler, invocations=1):
- self.update_handler = update_handler
- self.invocations = invocations
- self.mock_calls = []
- return
-
- def __call__(self, *args, **kwargs):
- self.mock_calls.append((args, kwargs))
- if len(self.mock_calls) >= self.invocations:
- self.update_handler.running = False
- return
-
-
class ProtocolMock(object):
def __init__(self, family="TestAgent", etag=42, versions=None):
self.family = family
diff --git a/tests/test_import.py b/tests/test_import.py
index 04124118..39a48abd 100644
--- a/tests/test_import.py
+++ b/tests/test_import.py
@@ -7,9 +7,9 @@ import azurelinuxagent.pa.deprovision as deprovision
import azurelinuxagent.daemon as daemon
import azurelinuxagent.daemon.resourcedisk as resourcedisk
import azurelinuxagent.daemon.scvmm as scvmm
-import azurelinuxagent.daemon.monitor as monitor
-import azurelinuxagent.ga.update as update
import azurelinuxagent.ga.exthandlers as exthandlers
+import azurelinuxagent.ga.monitor as monitor
+import azurelinuxagent.ga.update as update
class TestImportHandler(AgentTestCase):
def test_get_handler(self):
diff --git a/tests/tools.py b/tests/tools.py
index 2d5d0316..8bf23ed5 100644
--- a/tests/tools.py
+++ b/tests/tools.py
@@ -36,9 +36,9 @@ from azurelinuxagent.common.version import PY_VERSION_MAJOR
#Import mock module for Python2 and Python3
try:
- from unittest.mock import Mock, patch, MagicMock, DEFAULT
+ from unittest.mock import Mock, patch, MagicMock, DEFAULT, call
except ImportError:
- from mock import Mock, patch, MagicMock, DEFAULT
+ from mock import Mock, patch, MagicMock, DEFAULT, call
test_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(test_dir, "data")
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 5
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"pyasn1",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
nose==1.3.7
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyasn1==0.5.1
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
-e git+https://github.com/Azure/WALinuxAgent.git@807e22c6ca5b75b8c19fe27eefd9b8f830e8b367#egg=WALinuxAgent
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: WALinuxAgent
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- nose==1.3.7
- pyasn1==0.5.1
prefix: /opt/conda/envs/WALinuxAgent
| [
"tests/ga/test_monitor.py::TestMonitor::test_parse_xml_event",
"tests/ga/test_update.py::TestGuestAgentError::test_clear",
"tests/ga/test_update.py::TestGuestAgentError::test_creation",
"tests/ga/test_update.py::TestGuestAgentError::test_load_preserves_error_state",
"tests/ga/test_update.py::TestGuestAgentError::test_mark_failure",
"tests/ga/test_update.py::TestGuestAgentError::test_mark_failure_permanent",
"tests/ga/test_update.py::TestGuestAgentError::test_save",
"tests/ga/test_update.py::TestGuestAgent::test_clear_error",
"tests/ga/test_update.py::TestGuestAgent::test_creation",
"tests/ga/test_update.py::TestGuestAgent::test_download",
"tests/ga/test_update.py::TestGuestAgent::test_download_fail",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_download_skips_blacklisted",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded_download_fails",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded_load_manifest_fails",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded_unpack_fails",
"tests/ga/test_update.py::TestGuestAgent::test_is_available",
"tests/ga/test_update.py::TestGuestAgent::test_is_blacklisted",
"tests/ga/test_update.py::TestGuestAgent::test_is_downloaded",
"tests/ga/test_update.py::TestGuestAgent::test_load_manifest",
"tests/ga/test_update.py::TestGuestAgent::test_load_manifest_is_empty",
"tests/ga/test_update.py::TestGuestAgent::test_load_manifest_is_malformed",
"tests/ga/test_update.py::TestGuestAgent::test_load_manifest_missing",
"tests/ga/test_update.py::TestGuestAgent::test_mark_failure",
"tests/ga/test_update.py::TestGuestAgent::test_unpack",
"tests/ga/test_update.py::TestGuestAgent::test_unpack_fail",
"tests/ga/test_update.py::TestUpdate::test_ensure_lastest_agent_purges_old_agents",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_ignores_old_agents",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_returns_true_on_first_use",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_skips_if_too_frequent",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_skips_when_etag_matches",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_skips_when_no_new_versions",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_skips_when_updates_are_disabled",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_sorts",
"tests/ga/test_update.py::TestUpdate::test_filter_blacklisted_agents",
"tests/ga/test_update.py::TestUpdate::test_get_latest_agent",
"tests/ga/test_update.py::TestUpdate::test_get_latest_agent_no_updates",
"tests/ga/test_update.py::TestUpdate::test_get_latest_agent_skip_updates",
"tests/ga/test_update.py::TestUpdate::test_get_latest_agent_skips_unavailable",
"tests/ga/test_update.py::TestUpdate::test_load_agents",
"tests/ga/test_update.py::TestUpdate::test_load_agents_does_not_reload",
"tests/ga/test_update.py::TestUpdate::test_load_agents_sorts",
"tests/ga/test_update.py::TestUpdate::test_purge_agents",
"tests/ga/test_update.py::TestUpdate::test_run",
"tests/ga/test_update.py::TestUpdate::test_run_keeps_running",
"tests/ga/test_update.py::TestUpdate::test_run_latest",
"tests/ga/test_update.py::TestUpdate::test_run_latest_defaults_to_current",
"tests/ga/test_update.py::TestUpdate::test_run_latest_exception_blacklists",
"tests/ga/test_update.py::TestUpdate::test_run_latest_forwards_output",
"tests/ga/test_update.py::TestUpdate::test_run_latest_missing_code_marks_failures",
"tests/ga/test_update.py::TestUpdate::test_run_latest_nonzero_code_marks_failures",
"tests/ga/test_update.py::TestUpdate::test_run_stops_if_update_available",
"tests/ga/test_update.py::TestUpdate::test_set_agents",
"tests/test_import.py::TestImportHandler::test_get_handler"
]
| []
| []
| []
| Apache License 2.0 | 615 | [
"azurelinuxagent/daemon/env.py",
"azurelinuxagent/ga/update.py",
"azurelinuxagent/daemon/main.py",
"azurelinuxagent/daemon/monitor.py",
"azurelinuxagent/common/rdma.py"
]
| [
"azurelinuxagent/ga/update.py",
"azurelinuxagent/daemon/main.py",
"azurelinuxagent/ga/env.py",
"azurelinuxagent/common/rdma.py",
"azurelinuxagent/ga/monitor.py"
]
|
Azure__WALinuxAgent-308 | 92091140c2a7378c1a01fe8526800af912d93c49 | 2016-07-08 22:37:27 | 92091140c2a7378c1a01fe8526800af912d93c49 | diff --git a/azurelinuxagent/ga/update.py b/azurelinuxagent/ga/update.py
index 635f7e98..1c7d13a9 100644
--- a/azurelinuxagent/ga/update.py
+++ b/azurelinuxagent/ga/update.py
@@ -115,6 +115,7 @@ class UpdateHandler(object):
cmds = shlex.split(agent_cmd)
if cmds[0].lower() == "python":
cmds[0] = get_python_cmd()
+ agent_cmd = " ".join(cmds)
self.child_process = subprocess.Popen(
cmds,
@@ -264,7 +265,7 @@ class UpdateHandler(object):
available_agents = [agent for agent in self.agents if agent.is_available]
return available_agents[0] if len(available_agents) >= 1 else None
- def _ensure_latest_agent(self):
+ def _ensure_latest_agent(self, base_version=CURRENT_VERSION):
# Ignore new agents if updating is disabled
if not conf.get_autoupdate_enabled():
return False
@@ -326,15 +327,14 @@ class UpdateHandler(object):
# Note:
# The code leaves on disk available, but blacklisted, agents so as to preserve the state.
# Otherwise, those agents could be again downloaded and inappropriately retried.
- current_version = FlexibleVersion(AGENT_VERSION)
self._set_agents([GuestAgent(pkg=pkg) for pkg in
[pkg for pkg in pkg_list.versions
- if FlexibleVersion(pkg.version) > current_version]])
+ if FlexibleVersion(pkg.version) > base_version]])
self._purge_agents()
self._filter_blacklisted_agents()
# Return True if agents more recent than the current are available
- return len(self.agents) > 0 and self.agents[0].version > current_version
+ return len(self.agents) > 0 and self.agents[0].version > base_version
def _filter_blacklisted_agents(self):
self.agents = [agent for agent in self.agents if not agent.is_blacklisted]
| [2.1-selfupdate] launched .egg exits
The launched update (.egg package) discovers an update and exits with exitcode=0. This keeps going on forever
When I run it manually:
```
$ python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers
2016/07/08 21:53:58.005925 INFO Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:53:58.008335 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:53:58.010850 INFO Wire server endpoint:168.63.129.16
2016/07/08 21:53:58.092766 INFO Check for agent updates
2016/07/08 21:53:58.241843 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.1/HandlerManifest.json
2016/07/08 21:53:58.243852 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.3/HandlerManifest.json
2016/07/08 21:53:58.244492 INFO Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:53:58.244589 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
```
waagent.log
```
2016/07/08 21:51:34.771176 INFO Azure Linux Agent Version:2.1.5.rc4
2016/07/08 21:51:34.778980 INFO OS: ubuntu 16.04
2016/07/08 21:51:34.781242 INFO Python: 3.5.1
2016/07/08 21:51:34.784024 INFO Run daemon
2016/07/08 21:51:34.790937 INFO No RDMA handler exists for distro='Ubuntu' version='16.04'
2016/07/08 21:51:34.793643 INFO Clean protocol
2016/07/08 21:51:34.795094 INFO run Ubuntu provision handler
2016/07/08 21:51:34.836768 INFO Detect protocol endpoints
2016/07/08 21:51:34.848651 INFO Clean protocol
2016/07/08 21:51:34.859205 INFO WireServer endpoint is not found. Rerun dhcp handler
2016/07/08 21:51:34.872036 INFO test for route to 168.63.129.16
2016/07/08 21:51:34.882312 INFO route to 168.63.129.16 exists
2016/07/08 21:51:34.891349 INFO Wire server endpoint:168.63.129.16
2016/07/08 21:51:34.916104 INFO Fabric preferred wire protocol version:2015-04-05
2016/07/08 21:51:34.928396 INFO Wire protocol version:2012-11-30
2016/07/08 21:51:34.937556 WARNING Server prefered version:2015-04-05
2016/07/08 21:51:39.307372 INFO Start env monitor service.
2016/07/08 21:51:39.307245 INFO Event: name=WALA, op=HeartBeat, message=
2016/07/08 21:51:39.321421 INFO Configure routes
2016/07/08 21:51:39.334137 INFO Gateway:None
2016/07/08 21:51:39.361754 INFO Routes:None
2016/07/08 21:51:39.381291 INFO RDMA capabilities are not enabled, skipping
2016/07/08 21:51:39.409449 INFO Agent WALinuxAgent-2.1.5.rc4 launched with command 'python -u /usr/sbin/waagent -run-exthandlers'
2016/07/08 21:51:39.412830 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.rc4 launched with command 'python -u /usr/sbin/waagent -run-exthandlers'
2016/07/08 21:51:39.804282 INFO Agent WALinuxAgent-2.1.5.rc4 is running as the current agent
2016/07/08 21:51:39.822824 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.rc4 is running as the current agent
2016/07/08 21:51:39.857494 INFO Wire server endpoint:168.63.129.16
2016/07/08 21:51:39.885288 INFO Check for agent updates
2016/07/08 21:51:39.965964 INFO Initiating download of Agent WALinuxAgent-2.1.5.1
2016/07/08 21:51:39.981689 INFO Event: name=WALinuxAgent, op=, message=Initiating download of Agent WALinuxAgent-2.1.5.1
2016/07/08 21:51:40.041793 INFO Unpacking agent package WALinuxAgent-2.1.5.1
2016/07/08 21:51:40.064324 INFO Agent WALinuxAgent-2.1.5.1 successfully unpacked
2016/07/08 21:51:40.077642 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.1 successfully unpacked
2016/07/08 21:51:40.108340 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.1/HandlerManifest.json
2016/07/08 21:51:40.139217 INFO Agent WALinuxAgent-2.1.5.1 downloaded successfully
2016/07/08 21:51:40.155397 INFO Event: name=WALinuxAgent, op=Install, message=Agent WALinuxAgent-2.1.5.1 downloaded successfully
2016/07/08 21:51:40.178144 INFO Initiating download of Agent WALinuxAgent-2.1.5.3
2016/07/08 21:51:40.195989 INFO Event: name=WALinuxAgent, op=, message=Initiating download of Agent WALinuxAgent-2.1.5.3
2016/07/08 21:51:40.277986 INFO Unpacking agent package WALinuxAgent-2.1.5.3
2016/07/08 21:51:40.294587 INFO Agent WALinuxAgent-2.1.5.3 successfully unpacked
2016/07/08 21:51:40.307226 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 successfully unpacked
2016/07/08 21:51:40.329189 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.3/HandlerManifest.json
2016/07/08 21:51:40.343945 INFO Agent WALinuxAgent-2.1.5.3 downloaded successfully
2016/07/08 21:51:40.354808 INFO Event: name=WALinuxAgent, op=Install, message=Agent WALinuxAgent-2.1.5.3 downloaded successfully
2016/07/08 21:51:40.377161 INFO Agent WALinuxAgent-2.1.5.rc4 discovered agent update and will exit
2016/07/08 21:51:40.392069 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.rc4 discovered agent update and will exit
2016/07/08 21:51:40.443552 INFO Agent WALinuxAgent-2.1.5.rc4 launched with command 'python -u /usr/sbin/waagent -run-exthandlers' returned 0
2016/07/08 21:51:40.455908 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.rc4 launched with command 'python -u /usr/sbin/waagent -run-exthandlers' returned 0
2016/07/08 21:51:40.458716 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.3/HandlerManifest.json
2016/07/08 21:51:40.459940 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.1/HandlerManifest.json
2016/07/08 21:51:40.518290 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:40.520979 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:41.085353 INFO Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:41.093568 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:41.095873 INFO Wire server endpoint:168.63.129.16
2016/07/08 21:51:41.144559 INFO Check for agent updates
2016/07/08 21:51:41.219800 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.1/HandlerManifest.json
2016/07/08 21:51:41.222907 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.3/HandlerManifest.json
2016/07/08 21:51:41.235737 INFO Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:41.246668 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:41.292794 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:41.300068 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:41.341243 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:41.362334 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:41.858292 INFO Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:41.880601 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:41.909701 INFO Wire server endpoint:168.63.129.16
2016/07/08 21:51:41.936837 INFO Check for agent updates
2016/07/08 21:51:41.979260 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.1/HandlerManifest.json
2016/07/08 21:51:41.999360 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.3/HandlerManifest.json
2016/07/08 21:51:42.027065 INFO Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:42.050964 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:42.112336 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:42.135428 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:42.167577 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:42.176380 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:42.765364 INFO Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:42.797351 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:42.816600 INFO Wire server endpoint:168.63.129.16
2016/07/08 21:51:42.850009 INFO Check for agent updates
2016/07/08 21:51:42.901169 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.1/HandlerManifest.json
2016/07/08 21:51:42.926215 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.3/HandlerManifest.json
2016/07/08 21:51:42.961311 INFO Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:42.991006 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:43.056817 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:43.069516 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:43.142434 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:43.165251 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:43.799678 INFO Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:43.802183 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:43.810915 INFO Wire server endpoint:168.63.129.16
2016/07/08 21:51:43.837580 INFO Check for agent updates
2016/07/08 21:51:43.886126 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.1/HandlerManifest.json
2016/07/08 21:51:43.888686 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.3/HandlerManifest.json
2016/07/08 21:51:43.890895 INFO Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:43.891648 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:43.951575 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:43.982332 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:44.013181 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:44.038561 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:44.530643 INFO Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:44.542035 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:44.544212 INFO Wire server endpoint:168.63.129.16
2016/07/08 21:51:44.572049 INFO Check for agent updates
2016/07/08 21:51:44.601699 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.1/HandlerManifest.json
2016/07/08 21:51:44.604319 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.3/HandlerManifest.json
2016/07/08 21:51:44.614998 INFO Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:44.615744 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:44.663500 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:44.683130 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:44.717203 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:44.717801 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:45.327595 INFO Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:45.355741 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:45.378140 INFO Wire server endpoint:168.63.129.16
2016/07/08 21:51:45.425207 INFO Check for agent updates
2016/07/08 21:51:45.511625 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.1/HandlerManifest.json
2016/07/08 21:51:45.532343 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.3/HandlerManifest.json
2016/07/08 21:51:45.551889 INFO Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:45.572167 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:45.634632 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:45.637357 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:45.730332 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:45.767070 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:46.357507 INFO Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:46.365985 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:46.368831 INFO Wire server endpoint:168.63.129.16
2016/07/08 21:51:46.388904 INFO Check for agent updates
2016/07/08 21:51:46.455008 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.1/HandlerManifest.json
2016/07/08 21:51:46.457944 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.3/HandlerManifest.json
2016/07/08 21:51:46.469406 INFO Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:46.472261 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:46.533666 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:46.574132 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:46.621227 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:46.622106 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:47.197051 INFO Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:47.211052 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:47.228764 INFO Wire server endpoint:168.63.129.16
2016/07/08 21:51:47.254180 INFO Check for agent updates
2016/07/08 21:51:47.287889 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.1/HandlerManifest.json
2016/07/08 21:51:47.307351 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.3/HandlerManifest.json
2016/07/08 21:51:47.323870 INFO Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:47.336948 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:47.387282 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:47.389821 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:47.433157 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:47.435597 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:47.918501 INFO Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:47.927511 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:47.930193 INFO Wire server endpoint:168.63.129.16
2016/07/08 21:51:47.955407 INFO Check for agent updates
2016/07/08 21:51:47.997328 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.1/HandlerManifest.json
2016/07/08 21:51:48.000749 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.3/HandlerManifest.json
2016/07/08 21:51:48.004041 INFO Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:48.011916 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:48.060976 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:48.063291 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:48.117204 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:48.139992 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:48.603085 INFO Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:48.605763 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:48.615658 INFO Wire server endpoint:168.63.129.16
2016/07/08 21:51:48.644782 INFO Check for agent updates
2016/07/08 21:51:48.684577 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.1/HandlerManifest.json
2016/07/08 21:51:48.687354 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.3/HandlerManifest.json
2016/07/08 21:51:48.689722 INFO Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:48.690430 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:48.743235 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:48.764309 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:48.793241 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:48.825085 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:49.362306 INFO Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:49.374656 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:49.395527 INFO Wire server endpoint:168.63.129.16
2016/07/08 21:51:49.417136 INFO Check for agent updates
2016/07/08 21:51:49.466363 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.1/HandlerManifest.json
2016/07/08 21:51:49.484546 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.3/HandlerManifest.json
2016/07/08 21:51:49.502543 INFO Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:49.514883 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:49.562007 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:49.565266 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:49.609194 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:49.628816 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:50.168861 INFO Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:50.171438 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:50.180856 INFO Wire server endpoint:168.63.129.16
2016/07/08 21:51:50.213828 INFO Check for agent updates
2016/07/08 21:51:50.248302 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.1/HandlerManifest.json
2016/07/08 21:51:50.252061 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.3/HandlerManifest.json
2016/07/08 21:51:50.262508 INFO Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:50.263335 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:50.318705 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:50.342626 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:50.377173 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:50.401987 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:50.975030 INFO Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:50.987630 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:51.018780 INFO Wire server endpoint:168.63.129.16
2016/07/08 21:51:51.051524 INFO Check for agent updates
2016/07/08 21:51:51.084308 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.1/HandlerManifest.json
2016/07/08 21:51:51.108080 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.3/HandlerManifest.json
2016/07/08 21:51:51.129931 INFO Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:51.146710 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:51.208489 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:51.209322 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:51.256133 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:51.258511 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:51.824414 INFO Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:51.836174 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:51.853035 INFO Wire server endpoint:168.63.129.16
2016/07/08 21:51:51.875222 INFO Check for agent updates
2016/07/08 21:51:51.908704 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.1/HandlerManifest.json
2016/07/08 21:51:51.923015 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.3/HandlerManifest.json
2016/07/08 21:51:51.938682 INFO Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:51.950739 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:51.998146 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:52.000581 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:52.038076 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:52.040270 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:52.586988 INFO Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:52.594895 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:52.607576 INFO Wire server endpoint:168.63.129.16
2016/07/08 21:51:52.628226 INFO Check for agent updates
2016/07/08 21:51:52.665475 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.1/HandlerManifest.json
2016/07/08 21:51:52.668298 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.3/HandlerManifest.json
2016/07/08 21:51:52.670476 INFO Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:52.680786 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:52.735177 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:52.742582 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:52.805232 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:52.828536 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:53.334569 INFO Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:53.342464 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:53.345938 INFO Wire server endpoint:168.63.129.16
2016/07/08 21:51:53.368600 INFO Check for agent updates
2016/07/08 21:51:53.404426 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.1/HandlerManifest.json
2016/07/08 21:51:53.407243 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.3/HandlerManifest.json
2016/07/08 21:51:53.409710 INFO Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:53.411951 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:53.466092 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:53.484793 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:53.513155 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:53.549833 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:54.086244 INFO Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:54.100320 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:54.118185 INFO Wire server endpoint:168.63.129.16
2016/07/08 21:51:54.141337 INFO Check for agent updates
2016/07/08 21:51:54.179400 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.1/HandlerManifest.json
2016/07/08 21:51:54.195961 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.3/HandlerManifest.json
2016/07/08 21:51:54.212276 INFO Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:54.225937 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:54.274542 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:54.277141 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:54.321209 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:54.339802 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:54.820216 INFO Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:54.872161 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 is running as the current agent
2016/07/08 21:51:54.876294 INFO Wire server endpoint:168.63.129.16
2016/07/08 21:51:54.912602 INFO Check for agent updates
2016/07/08 21:51:54.947001 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.1/HandlerManifest.json
2016/07/08 21:51:54.955836 INFO Loading Agent manifest from /var/lib/waagent/WALinuxAgent-2.1.5.3/HandlerManifest.json
2016/07/08 21:51:54.970085 INFO Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:54.972439 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 discovered agent update and will exit
2016/07/08 21:51:55.028333 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:55.048616 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers' returned 0
2016/07/08 21:51:55.077202 INFO Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:55.079855 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.3 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc3-py2.7.egg -run-exthandlers'
2016/07/08 21:51:55.686370 INFO Agent WALinuxAgent-2.1.5.3 is running as the current agen
``` | Azure/WALinuxAgent | diff --git a/tests/ga/test_update.py b/tests/ga/test_update.py
index 3c81437c..cfa537e7 100644
--- a/tests/ga/test_update.py
+++ b/tests/ga/test_update.py
@@ -577,6 +577,7 @@ class TestUpdate(UpdateTestCase):
def _test_ensure_latest_agent(
self,
+ base_version=FlexibleVersion(AGENT_VERSION),
protocol=None,
versions=None):
@@ -591,7 +592,7 @@ class TestUpdate(UpdateTestCase):
self.update_handler.protocol_util = protocol
conf.get_autoupdate_gafamily = Mock(return_value=protocol.family)
- return self.update_handler._ensure_latest_agent()
+ return self.update_handler._ensure_latest_agent(base_version=base_version)
def test_ensure_latest_agent_returns_true_on_first_use(self):
self.assertEqual(None, self.update_handler.last_etag)
@@ -633,7 +634,13 @@ class TestUpdate(UpdateTestCase):
self.assertFalse(self._test_ensure_latest_agent())
return
- def test_ensure_latest_agent_skips_when_no_new_versions(self):
+ def test_ensure_latest_agent_skips_if_when_no_new_versions(self):
+ self.prepare_agents()
+ base_version = self.agent_versions()[0] + 1
+ self.assertFalse(self._test_ensure_latest_agent(base_version=base_version))
+ return
+
+ def test_ensure_latest_agent_skips_when_no_versions(self):
self.assertFalse(self._test_ensure_latest_agent(protocol=ProtocolMock()))
return
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"pyasn1",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
nose==1.3.7
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyasn1==0.5.1
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
-e git+https://github.com/Azure/WALinuxAgent.git@92091140c2a7378c1a01fe8526800af912d93c49#egg=WALinuxAgent
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: WALinuxAgent
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- nose==1.3.7
- pyasn1==0.5.1
prefix: /opt/conda/envs/WALinuxAgent
| [
"tests/ga/test_update.py::TestUpdate::test_ensure_lastest_agent_purges_old_agents",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_ignores_old_agents",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_returns_true_on_first_use",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_skips_if_too_frequent",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_skips_if_when_no_new_versions",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_skips_when_etag_matches",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_skips_when_no_versions",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_skips_when_updates_are_disabled",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_sorts"
]
| []
| [
"tests/ga/test_update.py::TestGuestAgentError::test_clear",
"tests/ga/test_update.py::TestGuestAgentError::test_creation",
"tests/ga/test_update.py::TestGuestAgentError::test_load_preserves_error_state",
"tests/ga/test_update.py::TestGuestAgentError::test_mark_failure",
"tests/ga/test_update.py::TestGuestAgentError::test_mark_failure_permanent",
"tests/ga/test_update.py::TestGuestAgentError::test_save",
"tests/ga/test_update.py::TestGuestAgent::test_clear_error",
"tests/ga/test_update.py::TestGuestAgent::test_creation",
"tests/ga/test_update.py::TestGuestAgent::test_download",
"tests/ga/test_update.py::TestGuestAgent::test_download_fail",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_download_skips_blacklisted",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded_download_fails",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded_load_manifest_fails",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded_unpack_fails",
"tests/ga/test_update.py::TestGuestAgent::test_is_available",
"tests/ga/test_update.py::TestGuestAgent::test_is_blacklisted",
"tests/ga/test_update.py::TestGuestAgent::test_is_downloaded",
"tests/ga/test_update.py::TestGuestAgent::test_load_manifest",
"tests/ga/test_update.py::TestGuestAgent::test_load_manifest_is_empty",
"tests/ga/test_update.py::TestGuestAgent::test_load_manifest_is_malformed",
"tests/ga/test_update.py::TestGuestAgent::test_load_manifest_missing",
"tests/ga/test_update.py::TestGuestAgent::test_mark_failure",
"tests/ga/test_update.py::TestGuestAgent::test_unpack",
"tests/ga/test_update.py::TestGuestAgent::test_unpack_fail",
"tests/ga/test_update.py::TestUpdate::test_filter_blacklisted_agents",
"tests/ga/test_update.py::TestUpdate::test_get_latest_agent",
"tests/ga/test_update.py::TestUpdate::test_get_latest_agent_no_updates",
"tests/ga/test_update.py::TestUpdate::test_get_latest_agent_skip_updates",
"tests/ga/test_update.py::TestUpdate::test_get_latest_agent_skips_unavailable",
"tests/ga/test_update.py::TestUpdate::test_load_agents",
"tests/ga/test_update.py::TestUpdate::test_load_agents_does_not_reload",
"tests/ga/test_update.py::TestUpdate::test_load_agents_sorts",
"tests/ga/test_update.py::TestUpdate::test_purge_agents",
"tests/ga/test_update.py::TestUpdate::test_run",
"tests/ga/test_update.py::TestUpdate::test_run_keeps_running",
"tests/ga/test_update.py::TestUpdate::test_run_latest",
"tests/ga/test_update.py::TestUpdate::test_run_latest_defaults_to_current",
"tests/ga/test_update.py::TestUpdate::test_run_latest_exception_blacklists",
"tests/ga/test_update.py::TestUpdate::test_run_latest_forwards_output",
"tests/ga/test_update.py::TestUpdate::test_run_latest_missing_code_marks_failures",
"tests/ga/test_update.py::TestUpdate::test_run_latest_nonzero_code_marks_failures",
"tests/ga/test_update.py::TestUpdate::test_run_stops_if_update_available",
"tests/ga/test_update.py::TestUpdate::test_set_agents"
]
| []
| Apache License 2.0 | 616 | [
"azurelinuxagent/ga/update.py"
]
| [
"azurelinuxagent/ga/update.py"
]
|
|
kako-nawao__ffconv-31 | 53a304807ccc8c488ea1e335a5d8d3f7ef45d85f | 2016-07-09 15:09:19 | 53a304807ccc8c488ea1e335a5d8d3f7ef45d85f | diff --git a/ffconv/profiles.py b/ffconv/profiles.py
index dc57c5b..6320b69 100644
--- a/ffconv/profiles.py
+++ b/ffconv/profiles.py
@@ -33,7 +33,7 @@ ROKU = {
'audio': {
'codecs': ['mp3', 'aac', 'flac'],
'container': 'mp3',
- 'channels': 2,
+ 'max_channels': 2,
'quality': 2
},
'subtitle': {
diff --git a/ffconv/stream_processors.py b/ffconv/stream_processors.py
index 9869ff7..0679793 100644
--- a/ffconv/stream_processors.py
+++ b/ffconv/stream_processors.py
@@ -173,8 +173,8 @@ class AudioProcessor(StreamProcessor):
self.channels = int(stream['channels'])
# Set target quality and channels
+ self.max_channels = int(profile[self.media_type]['max_channels'])
self.target_quality = profile[self.media_type]['quality']
- self.target_channels = int(profile[self.media_type]['channels'])
@property
def must_convert(self):
@@ -183,7 +183,7 @@ class AudioProcessor(StreamProcessor):
number of channels is acceptable.
"""
return any((super(AudioProcessor, self).must_convert,
- self.channels != self.target_channels))
+ self.channels > self.max_channels))
def clean_up(self):
"""
@@ -198,7 +198,7 @@ class AudioProcessor(StreamProcessor):
"""
cmd = ['ffmpeg', '-i', self.input, '-map', '0:{}'.format(self.index),
'-c:a', self.target_codec, '-q:a', str(self.target_quality),
- '-ac:0', str(self.target_channels), self.output]
+ '-ac:0', str(self.max_channels), self.output]
execute_cmd(cmd)
diff --git a/setup.py b/setup.py
index b3b63f3..fdfae33 100644
--- a/setup.py
+++ b/setup.py
@@ -21,7 +21,7 @@ setup(
name='ffconv',
# https://packaging.python.org/en/latest/single_source_version.html
- version='0.2.0',
+ version='0.2.1',
description='Process media files with ffmpeg',
long_description=long_description,
diff --git a/sonar-project.properties b/sonar-project.properties
index ff416df..8ff64bf 100644
--- a/sonar-project.properties
+++ b/sonar-project.properties
@@ -2,7 +2,7 @@
sonar.projectKey=utils:ffconv
# this is the name displayed in the SonarQube UI
sonar.projectName=ffconv
-sonar.projectVersion=0.0.2
+sonar.projectVersion=0.2.1
# Path is relative to the sonar-project.properties file. Replace "\" by "/" on Windows.
# Since SonarQube 4.2, this property is optional if sonar.modules is set.
| Do not encode audio with less channels than target
Encoding of audio streams is triggered when the number of channels does not match the target, but it should only do so when the input stream value is greater than the target, not less. A *mono* audio stream, for instance, should not need to be converted for the *roku* profile. | kako-nawao/ffconv | diff --git a/tests/test_streams.py b/tests/test_streams.py
index 944fec5..5502627 100644
--- a/tests/test_streams.py
+++ b/tests/test_streams.py
@@ -110,7 +110,7 @@ class AudioProcessorTest(TestCase):
self.assertEqual(processor.target_codec, 'mp3')
self.assertEqual(processor.output, 'audio-3.mp3')
self.assertEqual(processor.channels, 2)
- self.assertEqual(processor.target_channels, 2)
+ self.assertEqual(processor.max_channels, 2)
@patch('ffconv.stream_processors.execute_cmd')
def test_convert(self, ecmd):
@@ -141,6 +141,17 @@ class AudioProcessorTest(TestCase):
self.assertFalse(processor.convert.called)
self.assertFalse(processor.clean_up.called)
+ # Attempt process with less channels, should do nothing
+ stream = {'index': 1, 'codec_type': 'audio', 'codec_name': 'aac',
+ 'channels': 1, 'tags': {'language': 'por'}}
+
+ processor = AudioProcessor(input, stream, profile)
+ res = processor.process()
+ self.assertEqual(res, {'input': 'some-film.mkv', 'index': 1,
+ 'language': 'por'})
+ self.assertFalse(processor.convert.called)
+ self.assertFalse(processor.clean_up.called)
+
# Attempt mp3 process, should convert
stream = {'index': 1, 'codec_type': 'audio', 'codec_name': 'ac3',
'channels': 2, 'tags': {'language': 'por'}}
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 4
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astroid==3.3.9
coverage==7.8.0
dill==0.3.9
exceptiongroup==1.2.2
-e git+https://github.com/kako-nawao/ffconv.git@53a304807ccc8c488ea1e335a5d8d3f7ef45d85f#egg=ffconv
iniconfig==2.1.0
isort==6.0.1
mccabe==0.7.0
nose2==0.15.1
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
pylint==3.3.6
pytest==8.3.5
tomli==2.2.1
tomlkit==0.13.2
typing_extensions==4.13.0
| name: ffconv
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astroid==3.3.9
- coverage==7.8.0
- dill==0.3.9
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- isort==6.0.1
- mccabe==0.7.0
- nose2==0.15.1
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pylint==3.3.6
- pytest==8.3.5
- tomli==2.2.1
- tomlkit==0.13.2
- typing-extensions==4.13.0
prefix: /opt/conda/envs/ffconv
| [
"tests/test_streams.py::AudioProcessorTest::test_init",
"tests/test_streams.py::AudioProcessorTest::test_process"
]
| []
| [
"tests/test_streams.py::VideoProcessorTest::test_convert",
"tests/test_streams.py::VideoProcessorTest::test_init",
"tests/test_streams.py::VideoProcessorTest::test_process",
"tests/test_streams.py::AudioProcessorTest::test_convert",
"tests/test_streams.py::SubtitleProcessorTest::test_clean_up",
"tests/test_streams.py::SubtitleProcessorTest::test_convert",
"tests/test_streams.py::SubtitleProcessorTest::test_init",
"tests/test_streams.py::SubtitleProcessorTest::test_process"
]
| []
| null | 617 | [
"setup.py",
"ffconv/profiles.py",
"ffconv/stream_processors.py",
"sonar-project.properties"
]
| [
"setup.py",
"ffconv/profiles.py",
"ffconv/stream_processors.py",
"sonar-project.properties"
]
|
|
falconry__falcon-843 | 0f64e94a5ab49b58b9efa8462fe8c0369428243d | 2016-07-09 18:14:43 | 67d61029847cbf59e4053c8a424df4f9f87ad36f | codecov-io: ## [Current coverage][cc-pull] is **100%**
> Merging [#843][cc-pull] into [master][cc-base-branch] will not change coverage
```diff
@@ master #843 diff @@
====================================
Files 29 29
Lines 1813 1817 +4
Methods 0 0
Messages 0 0
Branches 305 306 +1
====================================
+ Hits 1813 1817 +4
Misses 0 0
Partials 0 0
```
> Powered by [Codecov](https://codecov.io?src=pr). Last updated by [0f64e94...130da74][cc-compare]
[cc-base-branch]: https://codecov.io/gh/falconry/falcon/branch/master?src=pr
[cc-compare]: https://codecov.io/gh/falconry/falcon/compare/0f64e94a5ab49b58b9efa8462fe8c0369428243d...130da74f772ad772b74f74190cc85510333eefef?src=pr
[cc-pull]: https://codecov.io/gh/falconry/falcon/pull/843?src=pr | diff --git a/falcon/api.py b/falcon/api.py
index 3b5539b..557de77 100644
--- a/falcon/api.py
+++ b/falcon/api.py
@@ -40,17 +40,17 @@ class API(object):
class ExampleComponent(object):
def process_request(self, req, resp):
- \"""Process the request before routing it.
+ \"\"\"Process the request before routing it.
Args:
req: Request object that will eventually be
routed to an on_* responder method.
resp: Response object that will be routed to
the on_* responder.
- \"""
+ \"\"\"
def process_resource(self, req, resp, resource, params):
- \"""Process the request and resource *after* routing.
+ \"\"\"Process the request and resource *after* routing.
Note:
This method is only called when the request matches
@@ -69,10 +69,10 @@ class API(object):
template fields, that will be passed to the
resource's responder method as keyword
arguments.
- \"""
+ \"\"\"
def process_response(self, req, resp, resource)
- \"""Post-processing of the response (after routing).
+ \"\"\"Post-processing of the response (after routing).
Args:
req: Request object.
@@ -80,7 +80,7 @@ class API(object):
resource: Resource object to which the request was
routed. May be None if no route was found
for the request.
- \"""
+ \"\"\"
See also :ref:`Middleware <middleware>`.
@@ -249,6 +249,10 @@ class API(object):
def add_route(self, uri_template, resource, *args, **kwargs):
"""Associates a templatized URI path with a resource.
+ Note:
+ The following information describes the behavior of
+ Falcon's default router.
+
A resource is an instance of a class that defines various
"responder" methods, one for each HTTP method the resource
allows. Responder names start with `on_` and are named according to
@@ -272,6 +276,10 @@ class API(object):
field names defined in the template. A field expression consists
of a bracketed field name.
+ Note:
+ Since field names correspond to argument names in responder
+ methods, they must be valid Python identifiers.
+
For example, given the following template::
/user/{name}
@@ -281,8 +289,8 @@ class API(object):
def on_put(self, req, resp, name):
pass
- Individual path segments may contain one or more field expressions.
- For example::
+ Individual path segments may contain one or more field
+ expressions::
/repos/{org}/{repo}/compare/{usr0}:{branch0}...{usr1}:{branch1}
diff --git a/falcon/routing/compiled.py b/falcon/routing/compiled.py
index 057cf6e..f4e3058 100644
--- a/falcon/routing/compiled.py
+++ b/falcon/routing/compiled.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import keyword
import re
@@ -41,14 +42,18 @@ class CompiledRouter(object):
def add_route(self, uri_template, method_map, resource):
"""Adds a route between URI path template and resource."""
- # Can't start with a number, since these eventually get passed as
- # args to on_* responders
- if re.search('{\d', uri_template):
- raise ValueError('Field names may not start with a digit.')
if re.search('\s', uri_template):
raise ValueError('URI templates may not include whitespace.')
+ # NOTE(kgriffs): Ensure fields are valid Python identifiers,
+ # since they will be passed as kwargs to responders.
+ fields = re.findall('{([^}]*)}', uri_template)
+ for field in fields:
+ is_identifier = re.match('[A-Za-z_][A-Za-z0-9_]+$', field)
+ if not is_identifier or field in keyword.kwlist:
+ raise ValueError('Field names must be valid identifiers.')
+
path = uri_template.strip('/').split('/')
def insert(nodes, path_index=0):
| Add test for hyphens and other non-arg-friendly chars in URI template field names
Currently we only check up-front in `add_route()` for field names that start with a digit or contain whitespace. This can lead to cryptic errors later on when requests are routed to responders. | falconry/falcon | diff --git a/tests/test_default_router.py b/tests/test_default_router.py
index dec8a8e..9f3c6d5 100644
--- a/tests/test_default_router.py
+++ b/tests/test_default_router.py
@@ -137,13 +137,28 @@ class TestComplexRouting(testing.TestBase):
)
@ddt.data(
- '/repos/{org}/{repo}/compare/{simple-vs-complex}',
+ '/repos/{org}/{repo}/compare/{simple_vs_complex}',
'/repos/{complex}.{vs}.{simple}',
'/repos/{org}/{repo}/compare/{complex}:{vs}...{complex2}/full',
)
def test_non_collision(self, template):
self.router.add_route(template, {}, ResourceWithId(-1))
+ @ddt.data(
+ '/{}',
+ '/{9v}',
+ '/{@kgriffs}',
+ '/repos/{simple-thing}/etc',
+ '/repos/{or g}/{repo}/compare/{thing}',
+ '/repos/{org}/{repo}/compare/{}',
+ '/repos/{complex}.{}.{thing}',
+ '/repos/{complex}.{9v}.{thing}/etc',
+ )
+ def test_invalid_field_name(self, template):
+ self.assertRaises(
+ ValueError,
+ self.router.add_route, template, {}, ResourceWithId(-1))
+
def test_dump(self):
print(self.router._src)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"ddt",
"testtools",
"requests",
"pyyaml",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"tools/test-requires"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
ddt==1.7.2
-e git+https://github.com/falconry/falcon.git@0f64e94a5ab49b58b9efa8462fe8c0369428243d#egg=falcon
fixtures==4.0.1
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
nose==1.3.7
packaging==21.3
pbr==6.1.1
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
python-mimeparse==1.6.0
PyYAML==6.0.1
requests==2.27.1
six==1.17.0
testtools==2.6.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: falcon
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- coverage==6.2
- ddt==1.7.2
- fixtures==4.0.1
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- packaging==21.3
- pbr==6.1.1
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-mimeparse==1.6.0
- pyyaml==6.0.1
- requests==2.27.1
- six==1.17.0
- testtools==2.6.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/falcon
| [
"tests/test_default_router.py::TestComplexRouting::test_invalid_field_name_1____",
"tests/test_default_router.py::TestComplexRouting::test_invalid_field_name_3____kgriffs_",
"tests/test_default_router.py::TestComplexRouting::test_invalid_field_name_6__repos__org___repo__compare___",
"tests/test_default_router.py::TestComplexRouting::test_invalid_field_name_7__repos__complex______thing_"
]
| []
| [
"tests/test_default_router.py::TestRegressionCases::test_recipes",
"tests/test_default_router.py::TestRegressionCases::test_versioned_url",
"tests/test_default_router.py::TestComplexRouting::test_collision_1__teams__collision_",
"tests/test_default_router.py::TestComplexRouting::test_collision_2__emojis_signs__id_too_",
"tests/test_default_router.py::TestComplexRouting::test_collision_3__repos__org___repo__compare__complex___vs_____complex2___collision_",
"tests/test_default_router.py::TestComplexRouting::test_complex_1______5_",
"tests/test_default_router.py::TestComplexRouting::test_complex_2____full___10_",
"tests/test_default_router.py::TestComplexRouting::test_complex_3____part___15_",
"tests/test_default_router.py::TestComplexRouting::test_complex_alt_1______16_",
"tests/test_default_router.py::TestComplexRouting::test_complex_alt_2____full___17_",
"tests/test_default_router.py::TestComplexRouting::test_dead_segment_1__teams",
"tests/test_default_router.py::TestComplexRouting::test_dead_segment_2__emojis_signs",
"tests/test_default_router.py::TestComplexRouting::test_dead_segment_3__gists",
"tests/test_default_router.py::TestComplexRouting::test_dead_segment_4__gists_42",
"tests/test_default_router.py::TestComplexRouting::test_dump",
"tests/test_default_router.py::TestComplexRouting::test_invalid_field_name_2___9v_",
"tests/test_default_router.py::TestComplexRouting::test_invalid_field_name_4__repos__simple_thing__etc",
"tests/test_default_router.py::TestComplexRouting::test_invalid_field_name_5__repos__or_g___repo__compare__thing_",
"tests/test_default_router.py::TestComplexRouting::test_invalid_field_name_8__repos__complex___9v___thing__etc",
"tests/test_default_router.py::TestComplexRouting::test_literal",
"tests/test_default_router.py::TestComplexRouting::test_literal_segment",
"tests/test_default_router.py::TestComplexRouting::test_literal_vs_variable_01____teams_default___19_",
"tests/test_default_router.py::TestComplexRouting::test_literal_vs_variable_02____teams_default_members___7_",
"tests/test_default_router.py::TestComplexRouting::test_literal_vs_variable_03____teams_foo___6_",
"tests/test_default_router.py::TestComplexRouting::test_literal_vs_variable_04____teams_foo_members___7_",
"tests/test_default_router.py::TestComplexRouting::test_literal_vs_variable_05____gists_first___20_",
"tests/test_default_router.py::TestComplexRouting::test_literal_vs_variable_06____gists_first_raw___18_",
"tests/test_default_router.py::TestComplexRouting::test_literal_vs_variable_07____gists_first_pdf___21_",
"tests/test_default_router.py::TestComplexRouting::test_literal_vs_variable_08____gists_1776_pdf___21_",
"tests/test_default_router.py::TestComplexRouting::test_literal_vs_variable_09____emojis_signs_78___13_",
"tests/test_default_router.py::TestComplexRouting::test_literal_vs_variable_10____emojis_signs_78_small___22_",
"tests/test_default_router.py::TestComplexRouting::test_malformed_pattern",
"tests/test_default_router.py::TestComplexRouting::test_multivar",
"tests/test_default_router.py::TestComplexRouting::test_non_collision_1__repos__org___repo__compare__simple_vs_complex_",
"tests/test_default_router.py::TestComplexRouting::test_non_collision_2__repos__complex___vs___simple_",
"tests/test_default_router.py::TestComplexRouting::test_non_collision_3__repos__org___repo__compare__complex___vs_____complex2__full",
"tests/test_default_router.py::TestComplexRouting::test_not_found_01__this_does_not_exist",
"tests/test_default_router.py::TestComplexRouting::test_not_found_02__user_bogus",
"tests/test_default_router.py::TestComplexRouting::test_not_found_03__repos_racker_falcon_compare_johndoe_master___janedoe_dev_bogus",
"tests/test_default_router.py::TestComplexRouting::test_not_found_04__teams",
"tests/test_default_router.py::TestComplexRouting::test_not_found_05__teams_42_members_undefined",
"tests/test_default_router.py::TestComplexRouting::test_not_found_06__teams_42_undefined",
"tests/test_default_router.py::TestComplexRouting::test_not_found_07__teams_42_undefined_segments",
"tests/test_default_router.py::TestComplexRouting::test_not_found_08__teams_default_members_undefined",
"tests/test_default_router.py::TestComplexRouting::test_not_found_09__teams_default_members_thing_undefined",
"tests/test_default_router.py::TestComplexRouting::test_not_found_10__teams_default_members_thing_undefined_segments",
"tests/test_default_router.py::TestComplexRouting::test_not_found_11__teams_default_undefined",
"tests/test_default_router.py::TestComplexRouting::test_not_found_12__teams_default_undefined_segments",
"tests/test_default_router.py::TestComplexRouting::test_not_found_13__emojis_signs",
"tests/test_default_router.py::TestComplexRouting::test_not_found_14__emojis_signs_0_small",
"tests/test_default_router.py::TestComplexRouting::test_not_found_15__emojis_signs_0_undefined",
"tests/test_default_router.py::TestComplexRouting::test_not_found_16__emojis_signs_0_undefined_segments",
"tests/test_default_router.py::TestComplexRouting::test_not_found_17__emojis_signs_20_small",
"tests/test_default_router.py::TestComplexRouting::test_not_found_18__emojis_signs_20_undefined",
"tests/test_default_router.py::TestComplexRouting::test_not_found_19__emojis_signs_42_undefined",
"tests/test_default_router.py::TestComplexRouting::test_not_found_20__emojis_signs_78_undefined",
"tests/test_default_router.py::TestComplexRouting::test_override",
"tests/test_default_router.py::TestComplexRouting::test_subsegment_not_found",
"tests/test_default_router.py::TestComplexRouting::test_variable"
]
| []
| Apache License 2.0 | 618 | [
"falcon/routing/compiled.py",
"falcon/api.py"
]
| [
"falcon/routing/compiled.py",
"falcon/api.py"
]
|
craffel__mir_eval-208 | 4425853f27d9039bd48c985fe4ef0c29b53a9385 | 2016-07-10 14:00:32 | a4acbfad96db3241388c818534dc2bd08b48d188 | craffel: Awesome, thank you. I assume this is tried and true since it appeared in JAMS first. The only pedantic comment I have is that `_open` is not a very descriptive function name, I would call it something like `open_file_or_str` (or `_open_file_or_str` if you insist on the pseudo-private function/not supplying a typical docstring). Change that if you want, then feel free to squash and merge! | diff --git a/evaluators/separation_eval.py b/evaluators/separation_eval.py
index 2f6bde1..a9c8e0b 100755
--- a/evaluators/separation_eval.py
+++ b/evaluators/separation_eval.py
@@ -12,6 +12,7 @@ import argparse
import sys
import os
import glob
+import os
import numpy as np
import eval_utilities
diff --git a/mir_eval/io.py b/mir_eval/io.py
index 9aaf449..4e95b34 100644
--- a/mir_eval/io.py
+++ b/mir_eval/io.py
@@ -2,6 +2,7 @@
Functions for loading in annotations from files in different formats.
"""
+import contextlib
import numpy as np
import re
import warnings
@@ -12,6 +13,25 @@ from . import util
from . import key
[email protected]
+def _open(file_or_str, **kwargs):
+ '''Either open a file handle, or use an existing file-like object.
+
+ This will behave as the `open` function if `file_or_str` is a string.
+
+ If `file_or_str` has the `read` attribute, it will return `file_or_str`.
+
+ Otherwise, an `IOError` is raised.
+ '''
+ if hasattr(file_or_str, 'read'):
+ yield file_or_str
+ elif isinstance(file_or_str, six.string_types):
+ with open(file_or_str, **kwargs) as file_desc:
+ yield file_desc
+ else:
+ raise IOError('Invalid file-or-str object: {}'.format(file_or_str))
+
+
def load_delimited(filename, converters, delimiter=r'\s+'):
r"""Utility function for loading in data from an annotation file where columns
are delimited. The number of columns is inferred from the length of
@@ -49,51 +69,33 @@ def load_delimited(filename, converters, delimiter=r'\s+'):
# Create re object for splitting lines
splitter = re.compile(delimiter)
- # Keep track of whether we create our own file handle
- own_fh = False
- # If the filename input is a string, need to open it
- if isinstance(filename, six.string_types):
- # Remember that we need to close it later
- own_fh = True
- # Open the file for reading
- input_file = open(filename, 'r')
- # If the provided has a read attribute, we can use it as a file handle
- elif hasattr(filename, 'read'):
- input_file = filename
- # Raise error otherwise
- else:
- raise ValueError('filename must be a string or file handle')
-
# Note: we do io manually here for two reasons.
# 1. The csv module has difficulties with unicode, which may lead
# to failures on certain annotation strings
#
# 2. numpy's text loader does not handle non-numeric data
#
- for row, line in enumerate(input_file, 1):
- # Split each line using the supplied delimiter
- data = splitter.split(line.strip(), n_columns - 1)
-
- # Throw a helpful error if we got an unexpected # of columns
- if n_columns != len(data):
- raise ValueError('Expected {} columns, got {} at '
- '{}:{:d}:\n\t{}'.format(n_columns, len(data),
- filename, row, line))
-
- for value, column, converter in zip(data, columns, converters):
- # Try converting the value, throw a helpful error on failure
- try:
- converted_value = converter(value)
- except:
- raise ValueError("Couldn't convert value {} using {} "
- "found at {}:{:d}:\n\t{}".format(
- value, converter.__name__, filename, row,
- line))
- column.append(converted_value)
-
- # Close the file handle if we opened it
- if own_fh:
- input_file.close()
+ with _open(filename, mode='r') as input_file:
+ for row, line in enumerate(input_file, 1):
+ # Split each line using the supplied delimiter
+ data = splitter.split(line.strip(), n_columns - 1)
+
+ # Throw a helpful error if we got an unexpected # of columns
+ if n_columns != len(data):
+ raise ValueError('Expected {} columns, got {} at '
+ '{}:{:d}:\n\t{}'.format(n_columns, len(data),
+ filename, row, line))
+
+ for value, column, converter in zip(data, columns, converters):
+ # Try converting the value, throw a helpful error on failure
+ try:
+ converted_value = converter(value)
+ except:
+ raise ValueError("Couldn't convert value {} using {} "
+ "found at {}:{:d}:\n\t{}".format(
+ value, converter.__name__, filename,
+ row, line))
+ column.append(converted_value)
# Sane output
if n_columns == 1:
@@ -313,54 +315,36 @@ def load_patterns(filename):
"""
- # Keep track of whether we create our own file handle
- own_fh = False
- # If the filename input is a string, need to open it
- if isinstance(filename, six.string_types):
- # Remember that we need to close it later
- own_fh = True
- # Open the file for reading
- input_file = open(filename, 'r')
- # If the provided has a read attribute, we can use it as a file handle
- elif hasattr(filename, 'read'):
- input_file = filename
- # Raise error otherwise
- else:
- raise ValueError('filename must be a string or file handle')
-
# List with all the patterns
pattern_list = []
# Current pattern, which will contain all occs
pattern = []
# Current occurrence, containing (onset, midi)
occurrence = []
- for line in input_file.readlines():
- if "pattern" in line:
- if occurrence != []:
- pattern.append(occurrence)
- if pattern != []:
- pattern_list.append(pattern)
- occurrence = []
- pattern = []
- continue
- if "occurrence" in line:
- if occurrence != []:
- pattern.append(occurrence)
- occurrence = []
- continue
- string_values = line.split(",")
- onset_midi = (float(string_values[0]), float(string_values[1]))
- occurrence.append(onset_midi)
-
- # Add last occurrence and pattern to pattern_list
- if occurrence != []:
- pattern.append(occurrence)
- if pattern != []:
- pattern_list.append(pattern)
-
- # If we opened an input file, we need to close it
- if own_fh:
- input_file.close()
+ with _open(filename, mode='r') as input_file:
+ for line in input_file.readlines():
+ if "pattern" in line:
+ if occurrence != []:
+ pattern.append(occurrence)
+ if pattern != []:
+ pattern_list.append(pattern)
+ occurrence = []
+ pattern = []
+ continue
+ if "occurrence" in line:
+ if occurrence != []:
+ pattern.append(occurrence)
+ occurrence = []
+ continue
+ string_values = line.split(",")
+ onset_midi = (float(string_values[0]), float(string_values[1]))
+ occurrence.append(onset_midi)
+
+ # Add last occurrence and pattern to pattern_list
+ if occurrence != []:
+ pattern.append(occurrence)
+ if pattern != []:
+ pattern_list.append(pattern)
return pattern_list
@@ -523,49 +507,32 @@ def load_ragged_time_series(filename, dtype=float, delimiter=r'\s+',
# Create re object for splitting lines
splitter = re.compile(delimiter)
- # Keep track of whether we create our own file handle
- own_fh = False
- # If the filename input is a string, need to open it
- if isinstance(filename, six.string_types):
- # Remember that we need to close it later
- own_fh = True
- # Open the file for reading
- input_file = open(filename, 'r')
- # If the provided has a read attribute, we can use it as a file handle
- elif hasattr(filename, 'read'):
- input_file = filename
- # Raise error otherwise
- else:
- raise ValueError('filename must be a string or file handle')
if header:
start_row = 1
else:
start_row = 0
- for row, line in enumerate(input_file, start_row):
- # Split each line using the supplied delimiter
- data = splitter.split(line.strip())
- try:
- converted_time = float(data[0])
- except (TypeError, ValueError) as exe:
- six.raise_from(ValueError("Couldn't convert value {} using {} "
- "found at {}:{:d}:\n\t{}".format(
- data[0], float.__name__,
- filename, row, line)), exe)
- times.append(converted_time)
-
- # cast values to a numpy array. time stamps with no values are cast
- # to an empty array.
- try:
- converted_value = np.array(data[1:], dtype=dtype)
- except (TypeError, ValueError) as exe:
- six.raise_from(ValueError("Couldn't convert value {} using {} "
- "found at {}:{:d}:\n\t{}".format(
- data[1:], dtype.__name__,
- filename, row, line)), exe)
- values.append(converted_value)
-
- # Close the file handle if we opened it
- if own_fh:
- input_file.close()
+ with _open(filename, mode='r') as input_file:
+ for row, line in enumerate(input_file, start_row):
+ # Split each line using the supplied delimiter
+ data = splitter.split(line.strip())
+ try:
+ converted_time = float(data[0])
+ except (TypeError, ValueError) as exe:
+ six.raise_from(ValueError("Couldn't convert value {} using {} "
+ "found at {}:{:d}:\n\t{}".format(
+ data[0], float.__name__,
+ filename, row, line)), exe)
+ times.append(converted_time)
+
+ # cast values to a numpy array. time stamps with no values are cast
+ # to an empty array.
+ try:
+ converted_value = np.array(data[1:], dtype=dtype)
+ except (TypeError, ValueError) as exe:
+ six.raise_from(ValueError("Couldn't convert value {} using {} "
+ "found at {}:{:d}:\n\t{}".format(
+ data[1:], dtype.__name__,
+ filename, row, line)), exe)
+ values.append(converted_value)
return np.array(times), values
diff --git a/mir_eval/separation.py b/mir_eval/separation.py
index b52e1c2..43d1963 100644
--- a/mir_eval/separation.py
+++ b/mir_eval/separation.py
@@ -62,7 +62,7 @@ def validate(reference_sources, estimated_sources):
if reference_sources.shape != estimated_sources.shape:
raise ValueError('The shape of estimated sources and the true '
'sources should match. reference_sources.shape '
- '= {}, estimated_sources.shape '
+ '= {}, estimated_sources '
'= {}'.format(reference_sources.shape,
estimated_sources.shape))
@@ -88,21 +88,26 @@ def validate(reference_sources, estimated_sources):
'source to be non-silent, having a silent estiamted '
'source will result in an underdetermined system.')
- if (estimated_sources.shape[0] > MAX_SOURCES or
- reference_sources.shape[0] > MAX_SOURCES):
- raise ValueError('The supplied matrices should be of shape (nsrc,'
- ' nsampl) but reference_sources.shape[0] = {} and '
- 'estimated_sources.shape[0] = {} which is greater '
- 'than mir_eval.separation.MAX_SOURCES = {}. To '
- 'override this check, set '
- 'mir_eval.separation.MAX_SOURCES to a '
- 'larger value.'.format(reference_sources.shape[0],
- estimated_sources.shape[0],
- MAX_SOURCES))
-
-
-def bss_eval_sources(reference_sources, estimated_sources,
- compute_permutation=True):
+ if estimated_sources.shape[0] > MAX_SOURCES:
+ raise ValueError('The supplied matrices should be of shape (n_sources,'
+ ' n_samples) but estimated_sources.shape[0] = {} '
+ 'which is greater than '
+ 'mir_eval.separation.MAX_SOURCES = {}. To override '
+ 'this check, set mir_eval.separation.MAX_SOURCES to '
+ 'a larger value.'.format(estimated_sources.shape[0],
+ MAX_SOURCES))
+
+ if reference_sources.shape[0] > MAX_SOURCES:
+ raise ValueError('The supplied matrices should be of shape (n_sources,'
+ ' n_samples) but reference_sources.shape[0] = {} '
+ 'which is greater than '
+ 'mir_eval.separation.MAX_SOURCES = {}. To override '
+ 'this check, set mir_eval.separation.MAX_SOURCES to '
+ 'a larger value.'.format(estimated_sources.shape[0],
+ MAX_SOURCES))
+
+
+def bss_eval_sources(reference_sources, estimated_sources):
"""MATLAB translation of BSS_EVAL Toolbox
Ordering and measurement of the separation quality for estimated source
@@ -124,13 +129,9 @@ def bss_eval_sources(reference_sources, estimated_sources,
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
- matrix containing true sources (must have same shape as
- estimated_sources)
+ matrix containing true sources
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
- matrix containing estimated sources (must have same shape as
- reference_sources)
- compute_permutation : bool, optional
- compute permutation of estimate/source combinations (True by default)
+ matrix containing estimated sources
Returns
-------
@@ -144,7 +145,6 @@ def bss_eval_sources(reference_sources, estimated_sources,
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number perm[j] corresponds to
true source number j)
- Note: perm will be [0, 1, ..., nsrc-1] if compute_permutation is False
"""
@@ -161,143 +161,28 @@ def bss_eval_sources(reference_sources, estimated_sources,
nsrc = estimated_sources.shape[0]
- # does user desire permutations?
- if compute_permutation:
- # compute criteria for all possible pair matches
- sdr = np.empty((nsrc, nsrc))
- sir = np.empty((nsrc, nsrc))
- sar = np.empty((nsrc, nsrc))
- for jest in range(nsrc):
- for jtrue in range(nsrc):
- s_true, e_spat, e_interf, e_artif = \
- _bss_decomp_mtifilt(reference_sources,
- estimated_sources[jest],
- jtrue, 512)
- sdr[jest, jtrue], sir[jest, jtrue], sar[jest, jtrue] = \
- _bss_source_crit(s_true, e_spat, e_interf, e_artif)
-
- # select the best ordering
- perms = list(itertools.permutations(list(range(nsrc))))
- mean_sir = np.empty(len(perms))
- dum = np.arange(nsrc)
- for (i, perm) in enumerate(perms):
- mean_sir[i] = np.mean(sir[perm, dum])
- popt = perms[np.argmax(mean_sir)]
- idx = (popt, dum)
- return (sdr[idx], sir[idx], sar[idx], np.asarray(popt))
- else:
- # compute criteria for only the simple correspondence
- # (estimate 1 is estimate corresponding to reference source 1, etc.)
- sdr = np.empty(nsrc)
- sir = np.empty(nsrc)
- sar = np.empty(nsrc)
- for j in range(nsrc):
+ # compute criteria for all possible pair matches
+ sdr = np.empty((nsrc, nsrc))
+ sir = np.empty((nsrc, nsrc))
+ sar = np.empty((nsrc, nsrc))
+ for jest in range(nsrc):
+ for jtrue in range(nsrc):
s_true, e_spat, e_interf, e_artif = \
_bss_decomp_mtifilt(reference_sources,
- estimated_sources[j],
- j, 512)
- sdr[j], sir[j], sar[j] = \
+ estimated_sources[jest],
+ jtrue, 512)
+ sdr[jest, jtrue], sir[jest, jtrue], sar[jest, jtrue] = \
_bss_source_crit(s_true, e_spat, e_interf, e_artif)
- # return the default permutation for compatibility
- popt = np.arange(nsrc)
- return (sdr, sir, sar, popt)
-
-
-def bss_eval_sources_framewise(reference_sources, estimated_sources,
- window=30*44100, hop=15*44100,
- compute_permutation=False):
- """Framewise computation of bss_eval_sources
-
- NOTE: if reference_sources and estimated_sources would be evaluated using
- only a single window or are shorter than the window length, the result
- of bss_eval_sources called on reference_sources and estimated_sources (with
- the compute_permutation parameter passed to bss_eval_sources) is returned
-
- Examples
- --------
- >>> # reference_sources[n] should be an ndarray of samples of the
- >>> # n'th reference source
- >>> # estimated_sources[n] should be the same for the n'th estimated
- >>> # source
- >>> (sdr, sir, sar,
- ... perm) = mir_eval.separation.bss_eval_sources_framewise(
- reference_sources,
- ... estimated_sources)
-
- Parameters
- ----------
- reference_sources : np.ndarray, shape=(nsrc, nsampl)
- matrix containing true sources (must have the same shape as
- estimated_sources)
- estimated_sources : np.ndarray, shape=(nsrc, nsampl)
- matrix containing estimated sources (must have the same shape as
- reference_sources)
- window : int, optional
- Window length for framewise evaluation (default value is 30s at a
- sample rate of 44.1kHz)
- hop : int, optionals
- Hop size for framewise evaluation (default value is 15s at a
- sample rate of 44.1kHz)
- compute_permutation : bool, optional
- compute permutation of estimate/source combinations for all windows
- (False by default)
-
- Returns
- -------
- sdr : np.ndarray, shape=(nsrc, nframes)
- vector of Signal to Distortion Ratios (SDR)
- sir : np.ndarray, shape=(nsrc, nframes)
- vector of Source to Interference Ratios (SIR)
- sar : np.ndarray, shape=(nsrc, nframes)
- vector of Sources to Artifacts Ratios (SAR)
- perm : np.ndarray, shape=(nsrc, nframes)
- vector containing the best ordering of estimated sources in
- the mean SIR sense (estimated source number perm[j] corresponds to
- true source number j)
- Note: perm will be range(nsrc) for all windows if compute_permutation
- is False
-
- """
-
- # make sure the input is of shape (nsrc, nsampl)
- if estimated_sources.ndim == 1:
- estimated_sources = estimated_sources[np.newaxis, :]
- if reference_sources.ndim == 1:
- reference_sources = reference_sources[np.newaxis, :]
-
- validate(reference_sources, estimated_sources)
- # If empty matrices were supplied, return empty lists (special case)
- if reference_sources.size == 0 or estimated_sources.size == 0:
- return np.array([]), np.array([]), np.array([]), np.array([])
-
- nsrc = reference_sources.shape[0]
-
- nwin = int(
- np.floor((reference_sources.shape[1] - window + hop) / hop)
- )
- # if fewer than 2 windows would be evaluated, return the sources result
- if nwin < 2:
- return bss_eval_sources(reference_sources,
- estimated_sources,
- compute_permutation)
-
- # compute the criteria across all windows
- sdr = np.empty((nsrc, nwin))
- sir = np.empty((nsrc, nwin))
- sar = np.empty((nsrc, nwin))
- perm = np.empty((nsrc, nwin))
-
- # k iterates across all the windows
- for k in range(nwin):
- win_slice = slice(k * hop, k * hop + window)
- sdr[:, k], sir[:, k], sar[:, k], perm[:, k] = bss_eval_sources(
- reference_sources[:, win_slice],
- estimated_sources[:, win_slice],
- compute_permutation
- )
-
- return sdr, sir, sar, perm
+ # select the best ordering
+ perms = list(itertools.permutations(range(nsrc)))
+ mean_sir = np.empty(len(perms))
+ dum = np.arange(nsrc)
+ for (i, perm) in enumerate(perms):
+ mean_sir[i] = np.mean(sir[perm, dum])
+ popt = perms[np.argmax(mean_sir)]
+ idx = (popt, dum)
+ return (sdr[idx], sir[idx], sar[idx], popt)
def _bss_decomp_mtifilt(reference_sources, estimated_source, j, flen):
@@ -474,26 +359,13 @@ def evaluate(reference_sources, estimated_sources, **kwargs):
# Compute all the metrics
scores = collections.OrderedDict()
- sdr, sir, sar, perm = util.filter_kwargs(
- bss_eval_sources_framewise,
- reference_sources,
- estimated_sources,
- **kwargs
- )
- scores['Sources Frames - Source to Distortion'] = sdr.tolist()
- scores['Sources Frames - Source to Interference'] = sir.tolist()
- scores['Sources Frames - Source to Artifact'] = sar.tolist()
- scores['Sources Frames - Source permutation'] = perm
-
- sdr, sir, sar, perm = util.filter_kwargs(
- bss_eval_sources,
- reference_sources,
- estimated_sources,
- **kwargs
- )
- scores['Sources - Source to Distortion'] = sdr.tolist()
- scores['Sources - Source to Interference'] = sir.tolist()
- scores['Sources - Source to Artifact'] = sar.tolist()
- scores['Sources - Source permutation'] = perm
+ sdr, sir, sar, perm = util.filter_kwargs(bss_eval_sources,
+ reference_sources,
+ estimated_sources, **kwargs)
+
+ scores['Source to Distortion'] = sdr.tolist()
+ scores['Source to Interference'] = sir.tolist()
+ scores['Source to Artifact'] = sar.tolist()
+ scores['Source permutation'] = perm
return scores
| use context managers for open file handles
In ```io.py``` ```load_delimited``` and ```load_ragged_delimited```:
[This line](https://github.com/craffel/mir_eval/blob/master/mir_eval/io.py#L59) could leak file descriptors if there's an error anywhere below (before the call to close). Replace with ```with my_open(buf_or_str, 'r') as fdisc: ....``` | craffel/mir_eval | diff --git a/tests/data/separation/output00.json b/tests/data/separation/output00.json
index d39c46e..e654605 100644
--- a/tests/data/separation/output00.json
+++ b/tests/data/separation/output00.json
@@ -1,123 +1,1 @@
-{"Sources":{"Source to Distortion": [8.074516496485167, 2.3238107088056337, 10.59298942747381], "Source to Interference": [10.511510621253269, 6.414675884826599, 16.689700107719524], "Source to Artifact": [12.1153186579709, 5.3626402019988815, 11.90938782345642], "Source permutation": [0, 1, 2]},
-"Framewise":
- {
- "Source to Distortion": [
- [
- 7.003379175140978,
- 6.653586221697303,
- 12.237134757057607,
- 11.833631549135184,
- 9.14533484841669,
- 9.769401126143185,
- 10.818726521923772
- ],
- [
- 5.259477271768409,
- 1.0870444743607524,
- 2.7595031859487555,
- 4.8213034562456265,
- 2.335285351280622,
- 1.287670185831756,
- 2.208336942144048
- ],
- [
- 11.021545369187734,
- 12.881535872489314,
- 16.164826936020013,
- 14.279785796366973,
- 7.8346197629189485,
- 9.130776047100174,
- 14.366434227665735
- ]
- ],
- "Source to Interference": [
- [
- 8.016968293220785,
- 7.4460177802319185,
- 14.1805514008335,
- 13.557483756968399,
- 10.957166114172953,
- 10.914280057498356,
- 11.380123870369651
- ],
- [
- 6.95226839367966,
- 2.265997677262244,
- 3.901721704192529,
- 6.001650769645548,
- 4.4166458085838025,
- 2.6699249332943262,
- 3.663576978586449
- ],
- [
- 12.806427187790558,
- 13.849668966610674,
- 17.160438398807386,
- 15.404242628795954,
- 11.517641634723681,
- 11.42593105936697,
- 15.980085734372054
- ]
- ],
- "Source to Artifact": [
- [
- 14.4561481683209,
- 15.151016426672175,
- 16.82761826923016,
- 16.867269335693113,
- 14.151589455554511,
- 16.457842011642533,
- 20.28663151827989
- ],
- [
- 10.968298151061472,
- 9.349515922226328,
- 10.602070035628559,
- 12.028760168115767,
- 7.869632319403728,
- 8.80980474820171,
- 9.218038247020408
- ],
- [
- 15.967034050213103,
- 20.0505015871559,
- 23.132788599190015,
- 20.821666264926254,
- 10.558458821112227,
- 13.299652158016828,
- 19.556283795641203
- ]
- ],
- "Source permutation": [
- [
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0
- ],
- [
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0
- ],
- [
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0
- ]
- ],
- "win": 2000,
- "hop": 1000
- }
-}
+{"Source to Distortion": [8.074516496485167, 2.3238107088056337, 10.59298942747381], "Source to Interference": [10.511510621253269, 6.414675884826599, 16.689700107719524], "Source to Artifact": [12.1153186579709, 5.3626402019988815, 11.90938782345642], "Source permutation": [0, 1, 2]}
\ No newline at end of file
diff --git a/tests/data/separation/output01.json b/tests/data/separation/output01.json
index 37493e7..a4c9ca9 100644
--- a/tests/data/separation/output01.json
+++ b/tests/data/separation/output01.json
@@ -1,87 +1,1 @@
-{"Sources":{"Source to Distortion": [1.6357250261616694, 8.35730768374355], "Source to Interference": [2.869614536623785, 19.596668518613885], "Source to Artifact": [9.511469033244348, 8.744100747692844], "Source permutation": [0, 1]},
-"Framewise":
- {
- "Source to Distortion": [
- [
- 5.149669474914772,
- 3.1488100796540404,
- 1.6539826780575286,
- 2.8748397843157396,
- 2.6745263013094145,
- 2.5542948621892525,
- 0.41438689453916433
- ],
- [
- 8.140962974194387,
- 9.168101838553662,
- 9.20529893957309,
- 8.556308362721357,
- 18.666441030639284,
- 16.815137099504994,
- 10.585218780016422
- ]
- ],
- "Source to Interference": [
- [
- 7.904800707297028,
- 4.0570038445068395,
- 2.22137218148402,
- 3.61373944766269,
- 3.149070649891711,
- 3.1501808657513037,
- 1.8032413005929873
- ],
- [
- 15.402937023765805,
- 13.875470944863594,
- 16.175846921346125,
- 14.675806974690932,
- 22.988234803441774,
- 20.54239992419536,
- 15.217408852352577
- ]
- ],
- "Source to Artifact": [
- [
- 9.08313970096718,
- 11.83030341933345,
- 12.813737665972784,
- 12.499974926704931,
- 14.239859841858657,
- 13.189827403144259,
- 8.243205007241274
- ],
- [
- 9.167978020038662,
- 11.135668579824056,
- 10.282717737529854,
- 9.918738648986986,
- 20.69256955203501,
- 19.24839646750305,
- 12.546051958209466
- ]
- ],
- "Source permutation": [
- [
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0
- ],
- [
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0
- ]
- ],
- "win": 2000,
- "hop": 1000
- }
-}
+{"Source to Distortion": [1.6357250261616694, 8.35730768374355], "Source to Interference": [2.869614536623785, 19.596668518613885], "Source to Artifact": [9.511469033244348, 8.744100747692844], "Source permutation": [0, 1]}
\ No newline at end of file
diff --git a/tests/data/separation/output02.json b/tests/data/separation/output02.json
index d01a8dc..8e8a2fc 100644
--- a/tests/data/separation/output02.json
+++ b/tests/data/separation/output02.json
@@ -1,147 +1,1 @@
-{"Sources":{"Source to Distortion": [10.402442396290404, 11.615179897174547, 8.213820164496585], "Source to Interference": [13.688266627376017, 15.194137115835119, 10.084397177627933], "Source to Artifact": [13.335559184910213, 14.252119758894345, 13.179999062945116], "Source permutation": [2, 0, 1]},
-"Framewise":
- {
- "Source to Distortion": [
- [
- -3.3683586251724176,
- -0.4258131004730029,
- 5.737954384287263,
- 13.496549686251882,
- 11.133937944745513,
- 11.744969792989144,
- 8.783665593807779,
- 0.10586742903207497,
- -3.392353303563334
- ],
- [
- 4.114269729136045,
- 3.6731744276418876,
- 4.706638787684438,
- 4.608061014987869,
- 2.501270404894758,
- 3.7095657831177444,
- 1.3676712483128473,
- 2.956226287243273,
- 5.2440986381143295
- ],
- [
- 16.978354236989194,
- 17.741897091793426,
- 14.577608333324958,
- 12.739000711173317,
- 2.2621375012667193,
- 1.6333311475483014,
- 1.2550203261532684,
- 1.6631735614600018,
- -1.1497584991656096
- ]
- ],
- "Source to Interference": [
- [
- -3.361311383159377,
- -0.41805884675415195,
- 5.750722251019655,
- 13.873877733477302,
- 11.422785774638367,
- 11.873782070113656,
- 9.384159481188938,
- 0.14357942689956213,
- -3.3915520815178564
- ],
- [
- 4.161081265635496,
- 3.6783663733941028,
- 4.713902939683422,
- 4.6264150711234775,
- 2.5491113448998397,
- 3.8484107075454532,
- 1.4468443722471211,
- 2.981135208402449,
- 5.28057243985441
- ],
- [
- 17.03250354285748,
- 17.84514324717382,
- 14.647512686549733,
- 12.895973966261625,
- 2.298647095986888,
- 1.689623136425914,
- 1.3021291134452864,
- 1.7020151192991826,
- -1.1403297080800905
- ]
- ],
- "Source to Artifact": [
- [
- 29.54115930781021,
- 30.284864375869876,
- 32.085430463346434,
- 24.4689397612799,
- 23.350922624102438,
- 27.360805465204532,
- 18.14687428019694,
- 23.676861051371574,
- 38.977423833872535
- ],
- [
- 25.22211095634335,
- 34.44975192272571,
- 33.740050065681054,
- 29.643799086772052,
- 24.025161181124336,
- 20.230457095212245,
- 21.146116053327013,
- 27.153624667590456,
- 27.147893385072148
- ],
- [
- 36.1324813954066,
- 34.10327241414194,
- 32.691764718319675,
- 27.454243214145258,
- 25.045465569457566,
- 22.781967782806024,
- 23.333232859196787,
- 24.409599008507577,
- 29.106022947569375
- ]
- ],
- "Source permutation": [
- [
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0
- ],
- [
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0
- ],
- [
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0
- ]
- ],
- "win": 1200,
- "hop": 800
- }
-}
+{"Source to Distortion": [10.402442396290404, 11.615179897174547, 8.213820164496585], "Source to Interference": [13.688266627376017, 15.194137115835119, 10.084397177627933], "Source to Artifact": [13.335559184910213, 14.252119758894345, 13.179999062945116], "Source permutation": [2, 0, 1]}
\ No newline at end of file
diff --git a/tests/data/separation/output03.json b/tests/data/separation/output03.json
index b00ad69..6b0bf8c 100644
--- a/tests/data/separation/output03.json
+++ b/tests/data/separation/output03.json
@@ -1,159 +1,1 @@
-{"Sources":{"Source to Distortion": [7.227899883748847, -4.463066010999018, 1.2821108384488062, 12.080451610955636], "Source to Interference": [9.099154153186786, -0.6395992455508694, 4.933804919293891, 18.322102002191336], "Source to Artifact": [12.29049343816683, 1.2044404039163803, 4.942972769893187, 13.322032058619765], "Source permutation": [0, 1, 2, 3]},
-"Framewise":
- {
- "Source to Distortion": [
- [
- 6.334331918339612,
- 5.669633027022265,
- 9.514464186545139,
- 10.71164392578315,
- 9.297428445800008,
- 8.771713568590616,
- 10.403935522673704
- ],
- [
- -8.513135611073594,
- -8.064798819888791,
- -8.251509304933107,
- -7.850194258047162,
- -12.458178865642155,
- -4.6179776078665995,
- -2.644615618710851
- ],
- [
- 3.9540843403165615,
- -3.0720269164432774,
- -12.10520682558961,
- 11.990534899929347,
- 4.810319300991273,
- -0.47816637025080955,
- -5.219895828058016
- ],
- [
- 12.060237250059458,
- 13.935491387386808,
- 12.426917555156134,
- 13.636511936766013,
- 15.425723921844167,
- 14.704418986550998,
- 14.483143404404505
- ]
- ],
- "Source to Interference": [
- [
- 6.517511430418773,
- 5.811584235204686,
- 9.584118567039901,
- 12.0535044371654,
- 9.757649467411412,
- 8.950021021537328,
- 10.46565243486915
- ],
- [
- -7.063426513233857,
- -6.919588115314706,
- -7.0510675813016475,
- -6.414204157657953,
- -11.905715000061399,
- -4.197909156233509,
- -2.4813855006475842
- ],
- [
- 4.141014910687602,
- -3.0662821416570836,
- -10.876089318848761,
- 13.1396178007717,
- 5.717548749645411,
- 0.2376957317518501,
- -4.954793379034611
- ],
- [
- 12.922416671673943,
- 14.906538658441146,
- 13.278126105039757,
- 14.832517357389065,
- 15.848218198376067,
- 15.42135925531434,
- 15.706812594153735
- ]
- ],
- "Source to Artifact": [
- [
- 21.048836346016834,
- 21.608583262197634,
- 27.951027125793622,
- 16.72864879954062,
- 19.710211838041786,
- 23.24729692355166,
- 29.2821572605892
- ],
- [
- 4.79964549937246,
- 6.007384762964339,
- 5.752039211870982,
- 4.961749710617883,
- 8.947021685277118,
- 11.332878323275246,
- 16.112437890835338
- ],
- [
- 19.123969289544746,
- 30.52457226127728,
- 5.194061850315177,
- 18.53256678532119,
- 13.088137800677963,
- 10.597476679719989,
- 13.214694804585587
- ],
- [
- 19.72218257905594,
- 21.055515792722215,
- 20.12256294009786,
- 19.96175152988701,
- 25.866418972581066,
- 23.003854645101143,
- 20.69699250545369
- ]
- ],
- "Source permutation": [
- [
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0
- ],
- [
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0
- ],
- [
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0
- ],
- [
- 3.0,
- 3.0,
- 3.0,
- 3.0,
- 3.0,
- 3.0,
- 3.0
- ]
- ],
- "win": 2000,
- "hop": 1000
- }
-}
+{"Source to Distortion": [7.227899883748847, -4.463066010999018, 1.2821108384488062, 12.080451610955636], "Source to Interference": [9.099154153186786, -0.6395992455508694, 4.933804919293891, 18.322102002191336], "Source to Artifact": [12.29049343816683, 1.2044404039163803, 4.942972769893187, 13.322032058619765], "Source permutation": [0, 1, 2, 3]}
\ No newline at end of file
diff --git a/tests/data/separation/output04.json b/tests/data/separation/output04.json
index e5cfb2e..cc77c23 100644
--- a/tests/data/separation/output04.json
+++ b/tests/data/separation/output04.json
@@ -1,171 +1,1 @@
-{"Sources":{"Source to Distortion": [-2.7660575557183185, 7.422147051733475, 9.263472486410315], "Source to Interference": [-1.8344520578819126, 12.049225023575314, 12.996500884384705], "Source to Artifact": [8.4006174794115, 9.51977911283253, 11.866882959029592], "Source permutation": [0, 1, 2]},
-"Framewise":
- {
- "Source to Distortion": [
- [
- -8.860597417628203,
- -24.10090894772071,
- 7.04876196909683,
- 4.057992735632969,
- 5.6161336151044905,
- 4.116028133023433,
- 7.137264114222281,
- 6.636553746354682,
- 12.671565105493684,
- 1.9583919993494792,
- 11.069899473115337
- ],
- [
- 9.926523346344258,
- 9.987905896373597,
- 7.564523016796219,
- 7.945669353150655,
- 10.1539600741427,
- 12.082487584834462,
- 12.2727939834995,
- 15.649761269420077,
- 13.083103548628648,
- 8.845073839026895,
- 9.92613048474913
- ],
- [
- 6.406157295298719,
- 11.062787420473574,
- 11.28985547419708,
- 12.308004177812876,
- 15.624813247889296,
- 17.142738926831534,
- 16.14316407043284,
- 12.956929782952958,
- 13.371332285847053,
- 10.264216790139692,
- 7.2072926453065165
- ]
- ],
- "Source to Interference": [
- [
- -8.829303699736036,
- -23.480971584617542,
- 7.059521904992345,
- 4.133735396275103,
- 5.838892506631115,
- 4.493799198471704,
- 7.9676787726683465,
- 6.8886566706296515,
- 13.015310310122434,
- 2.008554702262554,
- 11.094100774894205
- ],
- [
- 10.756786152273978,
- 10.371661990150942,
- 7.689352089948397,
- 8.247678199534551,
- 10.274758588402717,
- 12.403974605169143,
- 12.525958494815628,
- 15.947799336792347,
- 13.191164677085126,
- 8.99559445141815,
- 9.95304241751627
- ],
- [
- 6.953158659593384,
- 11.777345856069614,
- 11.87402860507859,
- 12.696043304602817,
- 15.846527057082639,
- 17.56302335695188,
- 17.372991193012698,
- 13.29305658664679,
- 13.426441220326566,
- 10.445126096419328,
- 7.232275788138401
- ]
- ],
- "Source to Artifact": [
- [
- 21.942007741639706,
- 8.160150539271873,
- 33.89414025641505,
- 23.097987925406912,
- 19.632584539349377,
- 16.229520360723676,
- 15.37410685616707,
- 19.93287690106841,
- 24.069435281296478,
- 23.478601553224365,
- 33.946550973306174
- ],
- [
- 17.871055928174687,
- 21.09708482129742,
- 23.72422772257833,
- 20.279304037871118,
- 26.16110133402111,
- 23.7912164804018,
- 24.978800046122483,
- 27.542082756455642,
- 29.381619254014986,
- 24.037462376768236,
- 32.436190488427485
- ],
- [
- 16.472670800178136,
- 19.53189663687499,
- 20.564398946746863,
- 23.21705178956808,
- 28.766682632782498,
- 27.569054390609846,
- 22.301781261939087,
- 24.435558731439023,
- 32.55743586438283,
- 24.532885695704984,
- 30.373463076829687
- ]
- ],
- "Source permutation": [
- [
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0
- ],
- [
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0
- ],
- [
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0
- ]
- ],
- "win": 1200,
- "hop": 650
- }
-}
+{"Source to Distortion": [-2.7660575557183185, 7.422147051733475, 9.263472486410315], "Source to Interference": [-1.8344520578819126, 12.049225023575314, 12.996500884384705], "Source to Artifact": [8.4006174794115, 9.51977911283253, 11.866882959029592], "Source permutation": [0, 1, 2]}
\ No newline at end of file
diff --git a/tests/data/separation/output05.json b/tests/data/separation/output05.json
index eae6c2a..052d99a 100644
--- a/tests/data/separation/output05.json
+++ b/tests/data/separation/output05.json
@@ -1,123 +1,1 @@
-{"Sources":{"Source to Distortion": [15.989628144732425, 2.92585277037319, 10.390551440343547], "Source to Interference": [19.310156466664875, 5.042019427791443, 13.118530603652601], "Source to Artifact": [18.76100033748606, 8.24665350507413, 13.909608717243112], "Source permutation": [0, 1, 2]},
-"Framewise":
- {
- "Source to Distortion": [
- [
- 11.440536675067579,
- 17.328375913229216,
- 19.246247909082772,
- 19.15107868850007,
- 23.354771324431663,
- 23.2109174572615,
- 18.48919767884879
- ],
- [
- 3.200499507597892,
- 2.4697810981951642,
- -3.834624099549914,
- 7.9313336264114165,
- 11.17292272933065,
- -2.3006149433583194,
- 3.30279924326439
- ],
- [
- 13.12435813742717,
- 12.993318101201163,
- 5.859129776687109,
- 3.147786090782722,
- -8.938596094423708,
- 14.239433380167759,
- 13.548815748136056
- ]
- ],
- "Source to Interference": [
- [
- 12.765247980489129,
- 17.486351217195573,
- 20.940410354949456,
- 20.026300194268384,
- 23.96794059785485,
- 25.230972473267187,
- 19.4020464311528
- ],
- [
- 3.75929678707931,
- 2.5558209152386198,
- -3.2068474186796414,
- 8.55800311839287,
- 11.583077988395162,
- -0.8357643907276845,
- 4.022718590112774
- ],
- [
- 13.999773465737002,
- 13.100223365279911,
- 6.902335250633238,
- 3.6665574675697377,
- -8.731410599686226,
- 16.160022781780587,
- 14.258939589164363
- ]
- ],
- "Source to Artifact": [
- [
- 17.466580806940193,
- 31.875865139417332,
- 24.188953472076662,
- 26.580960148056274,
- 32.17717152062089,
- 27.519079324974953,
- 25.761041518334498
- ],
- [
- 13.907516503437991,
- 21.46132816292758,
- 9.778502652957963,
- 17.2150438419717,
- 21.916364560727757,
- 6.579408542410596,
- 12.911739918453357
- ],
- [
- 20.679969040167208,
- 29.342166172612735,
- 13.370936872608572,
- 14.185660075488357,
- 13.656085168710208,
- 18.811784798091374,
- 21.9234509312971
- ]
- ],
- "Source permutation": [
- [
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0
- ],
- [
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0
- ],
- [
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0
- ]
- ],
- "win": 1600,
- "hop": 1000
- }
-}
+{"Source to Distortion": [15.989628144732425, 2.92585277037319, 10.390551440343547], "Source to Interference": [19.310156466664875, 5.042019427791443, 13.118530603652601], "Source to Artifact": [18.76100033748606, 8.24665350507413, 13.909608717243112], "Source permutation": [0, 1, 2]}
\ No newline at end of file
diff --git a/tests/data/separation/output06.json b/tests/data/separation/output06.json
index 8ac626b..9a0b800 100644
--- a/tests/data/separation/output06.json
+++ b/tests/data/separation/output06.json
@@ -1,199 +1,1 @@
-{"Sources":{"Source to Distortion": [13.78338204929278, 7.250163706520247], "Source to Interference": [16.204235407537556, 11.658569116183275], "Source to Artifact": [17.578736233831776, 9.491272183553724], "Source permutation": [0, 1]},
-"Framewise":
- {
- "Source to Distortion": [
- [
- 15.770861118084023,
- 10.266491728120815,
- 10.616425286260341,
- 10.402612463899505,
- 11.341693557777493,
- 17.977919644661608,
- 20.899809133103066,
- 20.684958728768006,
- 19.81641283106371,
- 15.157352480870586,
- 10.918400740940763,
- 12.794284048570162,
- 16.35341492981471,
- 19.142915379163863,
- 20.356182145650912,
- 19.68432366646042,
- 21.236160511571388,
- 21.498314308873177,
- 21.411517037797086,
- 15.689348142010811,
- 14.973688010752495
- ],
- [
- 10.234148337188175,
- 13.494352305226032,
- 13.261602906653147,
- 10.270234658668533,
- 8.926526735416843,
- 0.7285082470816284,
- -1.445026004087403,
- -0.3491037224516751,
- -3.5361158771361154,
- -4.871801382705655,
- 7.024768329666901,
- 9.435832460228072,
- 10.397105843295142,
- 1.8924601837813337,
- -5.498679269679663,
- -5.707465929170647,
- -3.016408381580664,
- 0.9723598073007549,
- -8.41491576524177,
- 9.75331347871246,
- 11.817349843774206
- ]
- ],
- "Source to Interference": [
- [
- 16.983165650601777,
- 11.066620858416801,
- 11.119816633976619,
- 11.248434408988468,
- 12.128696219803377,
- 19.13596762600305,
- 22.814960093766867,
- 22.86219678218422,
- 24.300128659029884,
- 23.182897730381974,
- 13.635867354293921,
- 13.74969822569331,
- 17.406888721891345,
- 20.68435836131071,
- 23.949389057429066,
- 23.57485883876062,
- 25.266629368983104,
- 22.704300333535944,
- 28.52307118467533,
- 17.130537763017905,
- 16.54037884417558
- ],
- [
- 12.761302701100185,
- 16.725423398871758,
- 15.505315585789596,
- 12.82284038108909,
- 11.569695660595814,
- 3.5198548561320138,
- 0.3365083764479299,
- 1.1806141185060266,
- -0.9230897029461725,
- -2.5762143138651847,
- 9.106024425218664,
- 11.226290236310998,
- 12.707662470216736,
- 3.5750031043137893,
- -4.155211115828419,
- -3.156892835441405,
- 0.8942659663030604,
- 2.39831853815833,
- -5.992697570650662,
- 12.918316411158452,
- 15.40943603382166
- ]
- ],
- "Source to Artifact": [
- [
- 21.99077800512083,
- 18.333746530033405,
- 20.547726687968467,
- 18.23786659984539,
- 19.405526331340425,
- 24.337330971503963,
- 25.400719693560223,
- 24.749387110512938,
- 21.74464595790909,
- 15.92278043653074,
- 14.426776962742522,
- 20.018559739001624,
- 23.099310341295208,
- 24.426373457428518,
- 22.870091804407828,
- 21.982106951716652,
- 23.433829394223462,
- 27.675020583617965,
- 22.35677824908201,
- 21.2639494831869,
- 20.25679399600904
- ],
- [
- 14.012229445929318,
- 16.385904318487157,
- 17.32399989862944,
- 14.013214533201253,
- 12.630371248910073,
- 5.567003858183554,
- 6.130538618398581,
- 7.385045791985439,
- 3.4078156073272456,
- 3.4811516488943166,
- 11.721747364516004,
- 14.464250937066641,
- 14.468754307872985,
- 8.405501929920156,
- 5.818303254181928,
- 2.6865395454609016,
- 1.8346519846706522,
- 8.477254710078228,
- 2.2431515909826487,
- 12.83054983253958,
- 14.437888709785698
- ]
- ],
- "Source permutation": [
- [
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0
- ],
- [
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0
- ]
- ],
- "win": 1400,
- "hop": 700
- }
-}
+{"Source to Distortion": [13.78338204929278, 7.250163706520247], "Source to Interference": [16.204235407537556, 11.658569116183275], "Source to Artifact": [17.578736233831776, 9.491272183553724], "Source permutation": [0, 1]}
\ No newline at end of file
diff --git a/tests/data/separation/output07.json b/tests/data/separation/output07.json
index da11327..c51b627 100644
--- a/tests/data/separation/output07.json
+++ b/tests/data/separation/output07.json
@@ -1,111 +1,1 @@
-{"Sources":{"Source to Distortion": [31.65426748072045, 30.509880888730656], "Source to Interference": [41.534168992046155, 37.73978731132595], "Source to Artifact": [32.12569904260708, 31.421680139098243], "Source permutation": [0, 1]},
-"Framewise":
- {
- "Source to Distortion": [
- [
- 34.80070239467067,
- 26.233964206351303,
- 35.52152001095222,
- 37.491913036007325,
- 34.44692576864385,
- 30.644871381679888,
- 29.53300714715028,
- 34.512840543932896,
- 33.31009727280657,
- 34.36391651078961
- ],
- [
- 26.130863585770637,
- 30.05752044658083,
- 36.18594879129595,
- 34.537511946262526,
- 35.130012109898374,
- 34.565317116657496,
- 34.63959606031987,
- 39.223908390181435,
- 31.063860723680932,
- 29.51190339241336
- ]
- ],
- "Source to Interference": [
- [
- 37.056235077666095,
- 29.04314269432124,
- 38.35924656300556,
- 39.94487087965189,
- 39.60727483981047,
- 36.322356323417075,
- 32.809992822610944,
- 37.373695325278916,
- 34.47844491232085,
- 37.066900409631224
- ],
- [
- 27.845610482037387,
- 34.54337418412345,
- 38.54337124412963,
- 35.20532609957952,
- 37.13331392820961,
- 38.31297112963255,
- 37.57026768557177,
- 41.02765296670008,
- 32.00348725573812,
- 33.393514333858064
- ]
- ],
- "Source to Artifact": [
- [
- 38.725975834383235,
- 29.46056377159172,
- 38.71216923960858,
- 41.142198992353336,
- 36.026084877124205,
- 32.015943551221454,
- 32.29432790970611,
- 37.678707082629735,
- 39.584853266235726,
- 37.70579937932512
- ],
- [
- 31.003032704566586,
- 31.969991606105687,
- 39.96554793807786,
- 42.99973779383219,
- 39.454447341682354,
- 36.946110823960595,
- 37.73177148964179,
- 43.91104655903011,
- 38.176234225694444,
- 31.798816376280424
- ]
- ],
- "Source permutation": [
- [
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0
- ],
- [
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0
- ]
- ],
- "win": 1000,
- "hop": 750
- }
-}
+{"Source to Distortion": [31.65426748072045, 30.509880888730656], "Source to Interference": [41.534168992046155, 37.73978731132595], "Source to Artifact": [32.12569904260708, 31.421680139098243], "Source permutation": [0, 1]}
\ No newline at end of file
diff --git a/tests/data/separation/output08.json b/tests/data/separation/output08.json
index 063e082..c9a89ae 100644
--- a/tests/data/separation/output08.json
+++ b/tests/data/separation/output08.json
@@ -1,147 +1,1 @@
-{"Sources":{"Source to Distortion": [12.658621714161605, 19.353779851187323, 16.490559932201563], "Source to Interference": [16.647925846010466, 24.298418678982788, 19.534653901953313], "Source to Artifact": [14.963487364475371, 21.04662056008085, 19.51540110868908], "Source permutation": [0, 1, 2]},
-"Framewise":
- {
- "Source to Distortion": [
- [
- 16.192865169727398,
- 13.028356898467596,
- 14.893095079349159,
- 13.206184448743779,
- 11.278322194600843,
- 11.149686478954122,
- 13.481957165397043,
- 10.418114868053863,
- 13.680843762014243
- ],
- [
- 19.16928643751411,
- 22.33260979627913,
- 20.88501614523839,
- 23.17546022589123,
- 23.569714393137026,
- 25.417457455374443,
- 22.707319453703875,
- 19.755994742864043,
- 16.47491192002275
- ],
- [
- 20.149927282268454,
- 20.267695322224103,
- 18.496572664869277,
- 16.18703221162405,
- 19.024302145824258,
- 14.190596411379516,
- 16.88279138847638,
- 19.921302938784912,
- 21.2584705532771
- ]
- ],
- "Source to Interference": [
- [
- 16.288295958752727,
- 13.118430955873595,
- 14.930286338199622,
- 13.264972384336719,
- 11.365058006587974,
- 11.200327720655764,
- 13.56547042390435,
- 10.467023286202862,
- 13.865385487811974
- ],
- [
- 19.51303533008609,
- 22.42708511693099,
- 21.00741091217366,
- 23.30046863949658,
- 23.734218387200183,
- 25.514157962740356,
- 22.775316455057478,
- 19.874311587607504,
- 16.583023468025996
- ],
- [
- 20.31754190446126,
- 20.345014205362993,
- 18.562319621923248,
- 16.269550631841422,
- 19.241210680291484,
- 14.207980885183762,
- 16.920950511451522,
- 19.97320737927088,
- 21.36217552372802
- ]
- ],
- "Source to Artifact": [
- [
- 32.92235489483052,
- 30.111966640647857,
- 35.72247463881086,
- 32.12060555291033,
- 28.623592109542486,
- 30.825308450625837,
- 30.870941019057348,
- 30.30003790763802,
- 27.664459179077237
- ],
- [
- 30.403770694506083,
- 39.02918697290886,
- 36.480585812213675,
- 38.66653008995361,
- 37.88613537966818,
- 42.00145811413804,
- 40.81708087956089,
- 35.50687209189353,
- 32.66233068093355
- ],
- [
- 34.40837037041166,
- 37.84121399852104,
- 36.788544740106815,
- 33.54189410422633,
- 32.19879673409346,
- 38.33727533707226,
- 37.55107654103121,
- 39.216506650434496,
- 37.56168867998662
- ]
- ],
- "Source permutation": [
- [
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0
- ],
- [
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0,
- 1.0
- ],
- [
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0,
- 2.0
- ]
- ],
- "win": 1100,
- "hop": 850
- }
-}
+{"Source to Distortion": [12.658621714161605, 19.353779851187323, 16.490559932201563], "Source to Interference": [16.647925846010466, 24.298418678982788, 19.534653901953313], "Source to Artifact": [14.963487364475371, 21.04662056008085, 19.51540110868908], "Source permutation": [0, 1, 2]}
\ No newline at end of file
diff --git a/tests/data/separation/output09.json b/tests/data/separation/output09.json
index 999a027..09caf52 100644
--- a/tests/data/separation/output09.json
+++ b/tests/data/separation/output09.json
@@ -1,87 +1,1 @@
-{"Sources":{"Source to Distortion": [6.533836035573265], "Source to Interference": [Infinity], "Source to Artifact": [6.533836035573265], "Source permutation": [0]},
-"Framewise":
- {
- "Source to Distortion": [
- [
- 12.119934389970112,
- 9.839136413593899,
- 10.104607545126424,
- 14.83174440067323,
- 10.402492739711457,
- 9.962792788321892,
- 7.591397020480595,
- 6.953836345225294,
- 19.31013868039556,
- 14.789903496557441,
- 11.069772397512526,
- 10.153206550236124,
- 9.717806202451953,
- 7.908023275472441,
- 8.189812854799017,
- 18.577301858851083
- ]
- ],
- "Source to Interference": [
- [
- Infinity,
- Infinity,
- Infinity,
- Infinity,
- Infinity,
- Infinity,
- Infinity,
- Infinity,
- Infinity,
- Infinity,
- Infinity,
- Infinity,
- Infinity,
- Infinity,
- Infinity,
- Infinity
- ]
- ],
- "Source to Artifact": [
- [
- 12.119934389970112,
- 9.839136413593899,
- 10.104607545126424,
- 14.83174440067323,
- 10.402492739711457,
- 9.962792788321892,
- 7.591397020480595,
- 6.953836345225294,
- 19.31013868039556,
- 14.789903496557441,
- 11.069772397512526,
- 10.153206550236124,
- 9.717806202451953,
- 7.908023275472441,
- 8.189812854799017,
- 18.577301858851083
- ]
- ],
- "Source permutation": [
- [
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0,
- 0.0
- ]
- ],
- "win": 500,
- "hop": 500
- }
-}
+{"Source to Distortion": [6.533836035573265], "Source to Interference": [Infinity], "Source to Artifact": [6.533836035573265], "Source permutation": [0]}
\ No newline at end of file
diff --git a/tests/test_input_output.py b/tests/test_input_output.py
index 5d51261..87074d4 100644
--- a/tests/test_input_output.py
+++ b/tests/test_input_output.py
@@ -11,7 +11,7 @@ import tempfile
def test_load_delimited():
# Test for ValueError when a non-string or file handle is passed
nose.tools.assert_raises(
- ValueError, mir_eval.io.load_delimited, None, [int])
+ IOError, mir_eval.io.load_delimited, None, [int])
# Test for a value error when the wrong number of columns is passed
with tempfile.TemporaryFile('r+') as f:
f.write('10 20')
@@ -119,7 +119,7 @@ def test_load_valued_intervals():
def test_load_ragged_time_series():
# Test for ValueError when a non-string or file handle is passed
nose.tools.assert_raises(
- ValueError, mir_eval.io.load_ragged_time_series, None, float,
+ IOError, mir_eval.io.load_ragged_time_series, None, float,
header=False)
# Test for a value error on conversion failure
with tempfile.TemporaryFile('r+') as f:
diff --git a/tests/test_separation.py b/tests/test_separation.py
index 7db01c7..9c51205 100644
--- a/tests/test_separation.py
+++ b/tests/test_separation.py
@@ -34,15 +34,11 @@ def __load_and_stack_wavs(directory):
return np.vstack(stacked_audio_data)
-def __unit_test_empty_input(metric):
- if metric == mir_eval.separation.bss_eval_sources:
- args = [np.array([]), np.array([])]
- elif metric == mir_eval.separation.bss_eval_sources_framewise:
- args = [np.array([]), np.array([]), 40, 20]
+def __unit_test_separation_function(metric):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# First, test for a warning on empty audio data
- metric(*args)
+ metric(np.array([]), np.array([]))
assert len(w) == 2
assert issubclass(w[-1].category, UserWarning)
assert str(w[-1].message) == ("estimated_sources is empty, "
@@ -50,77 +46,26 @@ def __unit_test_empty_input(metric):
"sdr, sir, sar, and perm will all be "
"empty np.ndarrays")
# And that the metric returns empty arrays
- assert np.allclose(metric(*args), np.array([]))
+ assert np.allclose(metric(np.array([]), np.array([])), np.array([]))
-
-def __unit_test_silent_input(metric):
# Test for error when there is a silent reference/estimated source
ref_sources = np.vstack((np.zeros(100),
np.random.random_sample((2, 100))))
est_sources = np.vstack((np.zeros(100),
np.random.random_sample((2, 100))))
- if metric == mir_eval.separation.bss_eval_sources:
- nose.tools.assert_raises(ValueError, metric, ref_sources[:2],
- est_sources[1:])
- nose.tools.assert_raises(ValueError, metric, ref_sources[1:],
- est_sources[:2])
- elif metric == mir_eval.separation.bss_eval_sources_framewise:
- nose.tools.assert_raises(ValueError, metric, ref_sources[:2],
- est_sources[1:], 40, 20)
- nose.tools.assert_raises(ValueError, metric, ref_sources[1:],
- est_sources[:2], 40, 20)
-
-
-def __unit_test_incompatible_shapes(metric):
+ nose.tools.assert_raises(ValueError, metric, ref_sources[:2],
+ est_sources[1:])
+ nose.tools.assert_raises(ValueError, metric, ref_sources[1:],
+ est_sources[:2])
+
# Test for error when shape is different
- sources_4 = np.random.random_sample((4, 100))
- sources_3 = np.random.random_sample((3, 100))
- if metric == mir_eval.separation.bss_eval_sources:
- args1 = [sources_3, sources_4]
- args2 = [sources_4, sources_3]
- elif metric == mir_eval.separation.bss_eval_sources_framewise:
- args1 = [sources_3, sources_4, 40, 20]
- args2 = [sources_4, sources_3, 40, 20]
- nose.tools.assert_raises(ValueError, metric, *args1)
- nose.tools.assert_raises(ValueError, metric, *args2)
-
-
-def __unit_test_too_many_sources(metric):
- # Test for error when too many sources or references are provided
- many_sources = np.random.random_sample((mir_eval.separation.MAX_SOURCES*2,
- 400))
- if metric == mir_eval.separation.bss_eval_sources:
- nose.tools.assert_raises(ValueError, metric, many_sources,
- many_sources)
- elif metric == mir_eval.separation.bss_eval_sources_framewise:
- nose.tools.assert_raises(ValueError, metric, many_sources,
- many_sources, 40, 20)
-
-
-def __unit_test_default_permutation(metric):
- # Test for default permutation matrix when not computing permutation
ref_sources = np.random.random_sample((4, 100))
- est_sources = np.random.random_sample((4, 100))
- results = metric(ref_sources, est_sources, compute_permutation=False)
- assert np.array_equal(results[-1], np.asarray([0, 1, 2, 3]))
+ est_sources = np.random.random_sample((3, 100))
+ nose.tools.assert_raises(ValueError, metric, ref_sources, est_sources)
-
-def __unit_test_framewise_small_window(metric):
- # Test for invalid win/hop parameter detection
- ref_sources = np.random.random_sample((4, 100))
- est_sources = np.random.random_sample((4, 100))
- # Rest with window larger than source lengths
- assert np.allclose(metric(ref_sources, est_sources, window=120, hop=20),
- mir_eval.separation.bss_eval_sources(ref_sources,
- est_sources,
- False),
- atol=A_TOL)
- # Test with hop larger than source length
- assert np.allclose(metric(ref_sources, est_sources, window=20, hop=120),
- mir_eval.separation.bss_eval_sources(ref_sources,
- est_sources,
- False),
- atol=A_TOL)
+ # Test for error when too many sources are provided
+ sources = np.random.random_sample((mir_eval.separation.MAX_SOURCES*2, 400))
+ nose.tools.assert_raises(ValueError, metric, sources, sources)
def __check_score(sco_f, metric, score, expected_score):
@@ -136,39 +81,19 @@ def test_separation_functions():
assert len(ref_files) == len(est_files) == len(sco_files) > 0
# Unit tests
- for metric in [mir_eval.separation.bss_eval_sources,
- mir_eval.separation.bss_eval_sources_framewise]:
- yield (__unit_test_empty_input, metric)
- yield (__unit_test_silent_input, metric)
- yield (__unit_test_incompatible_shapes, metric)
- yield (__unit_test_too_many_sources, metric)
for metric in [mir_eval.separation.bss_eval_sources]:
- yield (__unit_test_default_permutation, metric)
- for metric in [mir_eval.separation.bss_eval_sources_framewise]:
- yield (__unit_test_framewise_small_window, metric)
+ yield (__unit_test_separation_function, metric)
# Regression tests
for ref_f, est_f, sco_f in zip(ref_files, est_files, sco_files):
with open(sco_f, 'r') as f:
- expected_results = json.load(f)
- expected_scores = expected_results['Sources']
- expected_frames = expected_results['Framewise']
+ expected_scores = json.load(f)
# Load in example source separation data
ref_sources = __load_and_stack_wavs(ref_f)
est_sources = __load_and_stack_wavs(est_f)
# Compute scores
- scores = mir_eval.separation.evaluate(
- ref_sources, est_sources,
- window=expected_frames['win'], hop=expected_frames['hop']
- )
+ scores = mir_eval.separation.evaluate(ref_sources, est_sources)
# Compare them
for metric in scores:
- if 'Sources - ' in metric:
- test_data_name = metric.replace('Sources - ', '')
- # This is a simple hack to make nosetest's messages more useful
- yield (__check_score, sco_f, metric, scores[metric],
- expected_scores[test_data_name])
- elif 'Sources Frames - ' in metric:
- test_data_name = metric.replace('Sources Frames - ', '')
- # This is a simple hack to make nosetest's messages more useful
- yield (__check_score, sco_f, metric, scores[metric],
- expected_frames[test_data_name])
+ # This is a simple hack to make nosetest's messages more useful
+ yield (__check_score, sco_f, metric, scores[metric],
+ expected_scores[metric])
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 3
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[display]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"nose-cov",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | contourpy==1.3.0
cov-core==1.15.0
coverage==7.8.0
cycler==0.12.1
exceptiongroup==1.2.2
fonttools==4.56.0
future==1.0.0
importlib_resources==6.5.2
iniconfig==2.1.0
kiwisolver==1.4.7
matplotlib==3.9.4
-e git+https://github.com/craffel/mir_eval.git@4425853f27d9039bd48c985fe4ef0c29b53a9385#egg=mir_eval
nose==1.3.7
nose-cov==1.6
numpy==2.0.2
packaging==24.2
pillow==11.1.0
pluggy==1.5.0
pyparsing==3.2.3
pytest==8.3.5
python-dateutil==2.9.0.post0
scipy==1.13.1
six==1.17.0
tomli==2.2.1
zipp==3.21.0
| name: mir_eval
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- contourpy==1.3.0
- cov-core==1.15.0
- coverage==7.8.0
- cycler==0.12.1
- exceptiongroup==1.2.2
- fonttools==4.56.0
- future==1.0.0
- importlib-resources==6.5.2
- iniconfig==2.1.0
- kiwisolver==1.4.7
- matplotlib==3.9.4
- nose==1.3.7
- nose-cov==1.6
- numpy==2.0.2
- packaging==24.2
- pillow==11.1.0
- pluggy==1.5.0
- pyparsing==3.2.3
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- scipy==1.13.1
- six==1.17.0
- tomli==2.2.1
- zipp==3.21.0
prefix: /opt/conda/envs/mir_eval
| [
"tests/test_input_output.py::test_load_delimited",
"tests/test_input_output.py::test_load_ragged_time_series"
]
| []
| [
"tests/test_input_output.py::test_load_events",
"tests/test_input_output.py::test_load_labeled_events",
"tests/test_input_output.py::test_load_intervals",
"tests/test_input_output.py::test_load_labeled_intervals",
"tests/test_input_output.py::test_load_valued_intervals"
]
| []
| MIT License | 619 | [
"mir_eval/io.py",
"evaluators/separation_eval.py",
"mir_eval/separation.py"
]
| [
"mir_eval/io.py",
"evaluators/separation_eval.py",
"mir_eval/separation.py"
]
|
simphony__simphony-remote-79 | d2583387a396fa2b3e4ffdff37a3308d0200e2d5 | 2016-07-11 12:05:39 | 61ec23ffe44463cbc41f6fa54b4247963093ed79 | diff --git a/remoteappmanager/docker/image.py b/remoteappmanager/docker/image.py
index 5413320..85262a6 100644
--- a/remoteappmanager/docker/image.py
+++ b/remoteappmanager/docker/image.py
@@ -24,7 +24,14 @@ class Image(HasTraits):
@classmethod
def from_docker_dict(cls, docker_dict):
"""Converts the dict response from the dockerpy library into an
- instance of this class, extracting the relevant information."""
+ instance of this class, extracting the relevant information.
+
+ Parameters
+ ----------
+ docker_dict : dict
+ Results of `docker.client.inspect_image` or an item of the
+ result of `docker.client.images`
+ """
self = cls()
self.docker_id = docker_dict["Id"]
@@ -33,7 +40,9 @@ class Image(HasTraits):
except (KeyError, IndexError):
self.name = ''
- labels = docker_dict.get("Labels")
+ labels = (docker_dict.get("Labels") or
+ docker_dict.get("Config", {}).get("Labels"))
+
if labels is not None:
self.ui_name = labels.get(docker_labels.UI_NAME, '')
self.icon_128 = labels.get(docker_labels.ICON_128, '')
| Image's ui_name are no longer shown
Docker images with a `ui_name` label no longer has its name shown on the home page of the singleuser app.
It is a retrogression. | simphony/simphony-remote | diff --git a/tests/docker/test_container_manager.py b/tests/docker/test_container_manager.py
index 0dc216d..7610825 100644
--- a/tests/docker/test_container_manager.py
+++ b/tests/docker/test_container_manager.py
@@ -108,7 +108,7 @@ class TestContainerManager(AsyncTestCase):
self.assertEqual(image.description,
'Ubuntu machine with mayavi preinstalled')
self.assertEqual(image.icon_128, "")
- self.assertEqual(image.ui_name, "")
+ self.assertEqual(image.ui_name, "Mayavi 4.4.4")
self.assertEqual(image.docker_id,
'sha256:e54d71dde57576e9d2a4c77ce0c98501c8aa6268de5b2987e4c80e2e157cffe4') # noqa
diff --git a/tests/docker/test_image.py b/tests/docker/test_image.py
index 29cc03d..c21733f 100644
--- a/tests/docker/test_image.py
+++ b/tests/docker/test_image.py
@@ -6,12 +6,28 @@ from tests.utils import mock_docker_client
class TestImage(TestCase):
- def test_from_docker_dict(self):
+ def test_from_docker_dict_images(self):
docker_client = mock_docker_client()
- images = docker_client.images()
- image = Image.from_docker_dict(images[0])
+ image_dict = docker_client.images()[0]
+ image = Image.from_docker_dict(image_dict)
- self.assertEqual(image.docker_id, images[0]["Id"])
- self.assertEqual(image.name, images[0]["RepoTags"][0])
+ self.assertEqual(image.docker_id, image_dict["Id"])
+ self.assertEqual(image.name, image_dict["RepoTags"][0])
self.assertEqual(image.description,
- images[0]["Labels"][docker_labels.DESCRIPTION])
+ image_dict["Labels"][docker_labels.DESCRIPTION])
+ self.assertEqual(image.ui_name,
+ image_dict["Labels"][docker_labels.UI_NAME])
+
+ def test_from_docker_dict_inspect_image(self):
+ docker_client = mock_docker_client()
+ image_dict = docker_client.inspect_image()
+ image = Image.from_docker_dict(image_dict)
+
+ self.assertEqual(image.docker_id, image_dict["Id"])
+ self.assertEqual(image.name, image_dict["RepoTags"][0])
+ self.assertEqual(
+ image.description,
+ image_dict['Config']["Labels"][docker_labels.DESCRIPTION])
+ self.assertEqual(
+ image.ui_name,
+ image_dict['Config']["Labels"][docker_labels.UI_NAME])
diff --git a/tests/utils.py b/tests/utils.py
index d35f7e9..2a6a5d7 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -13,19 +13,36 @@ def mock_docker_client():
"""Returns a mock synchronous docker client to return canned
responses."""
docker_client = mock.Mock(spec=docker.Client)
+
+ # Note that the structure of the returned dictionary is different
+ # for `inspect_image` and for `images`
+ # The return value is simplified...
docker_client.inspect_image = mock.Mock(
- return_value={'Created': 1463662803,
- 'Id':
- 'sha256:e54d71dde57576e9d2a4c77ce0c98501c8aa6268de5b2987e4c80e2e157cffe4', # noqa
- 'Labels': {
- 'eu.simphony-project.docker.description':
- 'Ubuntu machine with mayavi preinstalled'
- },
- 'ParentId': 'sha256:d2f7240076e135f6aba57185e54ff45cc158781c787897b67994f72fe668ad07', # noqa
- 'RepoDigests': None,
- 'RepoTags': ['simphony/mayavi-4.4.4:latest'],
- 'Size': 1094833658,
- 'VirtualSize': 1094833658})
+ return_value= {
+ 'Author': 'SimPhoNy Team',
+ 'Comment': '',
+ 'Config': {'Cmd': None,
+ 'Domainname': '',
+ 'Entrypoint': ['/startup.sh'],
+ 'Env': ['PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin',
+ 'DEBIAN_FRONTEND=noninteractive',
+ 'HOME=/root'],
+ 'ExposedPorts': {'8888/tcp': {}},
+ 'Hostname': 'dfc2eabdf236',
+ 'Image': 'sha256:912b31a4e4f185b918999540040bb158e208ce0123fde4be07c188b2ab5aa4bb',
+ 'Labels': {'eu.simphony-project.docker.description': 'Ubuntu machine with mayavi preinstalled', # noqa
+ 'eu.simphony-project.docker.ui_name': 'Mayavi 4.4.4'}, # noqa
+ 'OnBuild': [],
+ 'OpenStdin': False,
+ 'StdinOnce': False,
+ 'Tty': False,
+ 'User': '',
+ 'Volumes': None,
+ 'WorkingDir': '/root'},
+ 'Id': 'sha256:e54d71dde57576e9d2a4c77ce0c98501c8aa6268de5b2987e4c80e2e157cffe4',
+ 'RepoTags': ['simphony/mayavi-4.4.4:latest'],
+ 'Size': 668483801,
+ 'VirtualSize': 668483801})
docker_client.inspect_container = mock.Mock(return_value=None)
docker_client.create_host_config = mock.Mock(return_value={})
docker_client.create_container = mock.Mock(
@@ -41,7 +58,8 @@ def mock_docker_client():
{'Created': 1463662803,
'Id': 'sha256:e54d71dde57576e9d2a4c77ce0c98501c8aa6268de5b2987e4c80e2e157cffe4', # noqa
'Labels': {
- 'eu.simphony-project.docker.description': 'Ubuntu machine with mayavi preinstalled' # noqa
+ 'eu.simphony-project.docker.description': 'Ubuntu machine with mayavi preinstalled', # noqa
+ 'eu.simphony-project.docker.ui_name': 'Mayavi 4.4.4' # noqa
},
'ParentId': 'sha256:d2f7240076e135f6aba57185e54ff45cc158781c787897b67994f72fe668ad07', # noqa
'RepoDigests': None,
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alembic==1.15.2
annotated-types==0.7.0
arrow==1.3.0
async-generator==1.10
attrs==25.3.0
certifi==2025.1.31
certipy==0.2.2
cffi==1.17.1
charset-normalizer==3.4.1
click==8.1.8
cryptography==44.0.2
docker-py==1.10.6
docker-pycreds==0.4.0
escapism==1.0.1
exceptiongroup==1.2.2
fqdn==1.5.1
greenlet==3.1.1
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
isoduration==20.11.0
Jinja2==3.1.6
jsonpointer==3.0.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter-events==0.12.0
jupyter_client==8.6.3
jupyter_core==5.7.2
jupyterhub==5.2.1
Mako==1.3.9
MarkupSafe==3.0.2
oauthlib==3.2.2
packaging==24.2
pamela==1.2.0
platformdirs==4.3.7
pluggy==1.5.0
prometheus_client==0.21.1
pycparser==2.22
pydantic==2.11.1
pydantic_core==2.33.0
pytest==8.3.5
python-dateutil==2.9.0.post0
python-json-logger==3.3.0
PyYAML==6.0.2
pyzmq==26.3.0
referencing==0.36.2
-e git+https://github.com/simphony/simphony-remote.git@d2583387a396fa2b3e4ffdff37a3308d0200e2d5#egg=remoteappmanager
requests==2.32.3
rfc3339-validator==0.1.4
rfc3986-validator==0.1.1
rpds-py==0.24.0
six==1.17.0
SQLAlchemy==2.0.40
tabulate==0.9.0
tomli==2.2.1
tornado==6.4.2
traitlets==5.14.3
types-python-dateutil==2.9.0.20241206
typing-inspection==0.4.0
typing_extensions==4.13.0
uri-template==1.3.0
urllib3==2.3.0
webcolors==24.11.1
websocket-client==1.8.0
zipp==3.21.0
| name: simphony-remote
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alembic==1.15.2
- annotated-types==0.7.0
- arrow==1.3.0
- async-generator==1.10
- attrs==25.3.0
- certifi==2025.1.31
- certipy==0.2.2
- cffi==1.17.1
- charset-normalizer==3.4.1
- click==8.1.8
- cryptography==44.0.2
- docker-py==1.10.6
- docker-pycreds==0.4.0
- escapism==1.0.1
- exceptiongroup==1.2.2
- fqdn==1.5.1
- greenlet==3.1.1
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- isoduration==20.11.0
- jinja2==3.1.6
- jsonpointer==3.0.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter-client==8.6.3
- jupyter-core==5.7.2
- jupyter-events==0.12.0
- jupyterhub==5.2.1
- mako==1.3.9
- markupsafe==3.0.2
- oauthlib==3.2.2
- packaging==24.2
- pamela==1.2.0
- platformdirs==4.3.7
- pluggy==1.5.0
- prometheus-client==0.21.1
- pycparser==2.22
- pydantic==2.11.1
- pydantic-core==2.33.0
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- python-json-logger==3.3.0
- pyyaml==6.0.2
- pyzmq==26.3.0
- referencing==0.36.2
- requests==2.32.3
- rfc3339-validator==0.1.4
- rfc3986-validator==0.1.1
- rpds-py==0.24.0
- six==1.17.0
- sqlalchemy==2.0.40
- tabulate==0.9.0
- tomli==2.2.1
- tornado==6.4.2
- traitlets==5.14.3
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- typing-inspection==0.4.0
- uri-template==1.3.0
- urllib3==2.3.0
- webcolors==24.11.1
- websocket-client==1.8.0
- zipp==3.21.0
prefix: /opt/conda/envs/simphony-remote
| [
"tests/docker/test_container_manager.py::TestContainerManager::test_image",
"tests/docker/test_image.py::TestImage::test_from_docker_dict_inspect_image"
]
| []
| [
"tests/docker/test_container_manager.py::TestContainerManager::test_containers_from_mapping_id",
"tests/docker/test_container_manager.py::TestContainerManager::test_instantiation",
"tests/docker/test_container_manager.py::TestContainerManager::test_race_condition_spawning",
"tests/docker/test_container_manager.py::TestContainerManager::test_start_already_present_container",
"tests/docker/test_container_manager.py::TestContainerManager::test_start_container_exception_cleanup",
"tests/docker/test_container_manager.py::TestContainerManager::test_start_container_exception_cleanup_2",
"tests/docker/test_container_manager.py::TestContainerManager::test_start_container_with_nonexisting_volume_source",
"tests/docker/test_container_manager.py::TestContainerManager::test_start_stop",
"tests/docker/test_image.py::TestImage::test_from_docker_dict_images"
]
| []
| BSD 3-Clause "New" or "Revised" License | 620 | [
"remoteappmanager/docker/image.py"
]
| [
"remoteappmanager/docker/image.py"
]
|
|
lepture__mistune-105 | 715046fe4b5f3642b24edc7fdd62ba8915228498 | 2016-07-11 21:55:35 | 715046fe4b5f3642b24edc7fdd62ba8915228498 | nsfmc: Hello, i'm not sure what the protocol is for this.
I ran into this bug from a strangely formed embed code i got from gyfcat, but i suspect others could run into it too.
the `test_safe_links` test is currently failing in master, but the test that this adds passes.
Let me know if you have any questions!
lepture: @nsfmc a test case failed.
nsfmc: Hi @lepture, I believe that test case is failing in master as well. If there is a branch I can fork off of that is building successfully I'm happy to rebase my pull request off that one. I'm afraid I don't understand well enough the changes needed to fix the link sanitization issues :( | diff --git a/mistune.py b/mistune.py
index b341cf2..67b0880 100644
--- a/mistune.py
+++ b/mistune.py
@@ -74,7 +74,7 @@ def escape(text, quote=False, smart_amp=True):
def escape_link(url, **kwargs):
"""Remove dangerous URL schemes like javascript: and escape afterwards."""
- lower_url = url.lower()
+ lower_url = url.lower().strip('\x00\x1a \n\r\t')
for scheme in _scheme_blacklist:
if lower_url.startswith(scheme):
return ''
@@ -157,7 +157,7 @@ class BlockGrammar(object):
block_html = re.compile(
r'^ *(?:%s|%s|%s) *(?:\n{2,}|\s*$)' % (
r'<!--[\s\S]*?-->',
- r'<(%s)((?:%s)*?)>([\s\S]+?)<\/\1>' % (_block_tag, _valid_attr),
+ r'<(%s)((?:%s)*?)\s*>([\s\S]+?)<\/\1>' % (_block_tag, _valid_attr),
r'<%s(?:%s)*?\s*\/?>' % (_block_tag, _valid_attr),
)
)
@@ -447,7 +447,7 @@ class InlineGrammar(object):
inline_html = re.compile(
r'^(?:%s|%s|%s)' % (
r'<!--[\s\S]*?-->',
- r'<(\w+%s)((?:%s)*?)>([\s\S]*?)<\/\1>' % (_valid_end, _valid_attr),
+ r'<(\w+%s)((?:%s)*?)\s*>([\s\S]*?)<\/\1>' % (_valid_end, _valid_attr),
r'<\w+%s(?:%s)*?\s*\/?>' % (_valid_end, _valid_attr),
)
)
| parsing html can fail to close tag correctly
noticed while debugging an issue in https://github.com/lektor/lektor/issues/241#issuecomment-231819694 i noticed that raw html handling can be broken if there is a space before the closing `>` of the opening tag. i.e.
```
In [38]: mistune.markdown('<iframe src="http://gfycat.com" ></iframe>', escape=False)
Out[38]: '<p><iframe src="http://gfycat.com" ></iframe></p>\n'
```
but
```
In [39]: mistune.markdown('<iframe src="http://gfycat.com"></iframe>', escape=False)
Out[39]: '<p><iframe src="http://gfycat.com"></iframe></p>\n'
```
in the case of the iframe, this causes the opening iframe to gobble up the rest of the page content. this seems to hold for any tag, though, e.g.
```
In [40]: mistune.markdown('<a href="http://gfycat.com" ></a>', escape=False)
Out[40]: '<p><a href="http://gfycat.com" ></a></p>\n'
```
| lepture/mistune | diff --git a/tests/test_extra.py b/tests/test_extra.py
index 7318444..07c1ca3 100644
--- a/tests/test_extra.py
+++ b/tests/test_extra.py
@@ -77,6 +77,12 @@ def test_parse_inline_html():
assert 'href' not in ret
+def test_block_html():
+ ret = mistune.markdown(
+ '<div ></div>', escape=False
+ )
+ assert '<div ></div>' in ret
+
def test_parse_block_html():
ret = mistune.markdown(
'<div>**foo**</div>', parse_block_html=True, escape=False
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/lepture/mistune.git@715046fe4b5f3642b24edc7fdd62ba8915228498#egg=mistune
nose==1.3.7
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: mistune
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- nose==1.3.7
prefix: /opt/conda/envs/mistune
| [
"tests/test_extra.py::test_safe_links",
"tests/test_extra.py::test_block_html"
]
| []
| [
"tests/test_extra.py::test_escape",
"tests/test_extra.py::test_linebreak",
"tests/test_extra.py::test_skip_style",
"tests/test_extra.py::test_use_xhtml",
"tests/test_extra.py::test_parse_inline_html",
"tests/test_extra.py::test_parse_block_html",
"tests/test_extra.py::test_trigger_more_cases",
"tests/test_extra.py::test_not_escape_block_tags",
"tests/test_extra.py::test_not_escape_inline_tags",
"tests/test_extra.py::test_hard_wrap_renderer"
]
| []
| BSD 3-Clause "New" or "Revised" License | 621 | [
"mistune.py"
]
| [
"mistune.py"
]
|
dpkp__kafka-python-756 | 7a350e5fcf33f49094c820ba88b9cee4aeae6e12 | 2016-07-12 15:39:23 | 709ee3b59aff8ab205f0e09c33f4ec8391664228 | diff --git a/kafka/coordinator/base.py b/kafka/coordinator/base.py
index 168115a..25dd000 100644
--- a/kafka/coordinator/base.py
+++ b/kafka/coordinator/base.py
@@ -50,6 +50,7 @@ class BaseCoordinator(object):
'session_timeout_ms': 30000,
'heartbeat_interval_ms': 3000,
'retry_backoff_ms': 100,
+ 'api_version': (0, 9),
}
def __init__(self, client, **configs):
@@ -194,6 +195,14 @@ class BaseCoordinator(object):
"""
while self.coordinator_unknown():
+ # Prior to 0.8.2 there was no group coordinator
+ # so we will just pick a node at random and treat
+ # it as the "coordinator"
+ if self.config['api_version'] < (0, 8, 2):
+ self.coordinator_id = self._client.least_loaded_node()
+ self._client.ready(self.coordinator_id)
+ continue
+
future = self._send_group_coordinator_request()
self._client.poll(future=future)
diff --git a/kafka/coordinator/consumer.py b/kafka/coordinator/consumer.py
index 196bcc7..083a36a 100644
--- a/kafka/coordinator/consumer.py
+++ b/kafka/coordinator/consumer.py
@@ -100,6 +100,12 @@ class ConsumerCoordinator(BaseCoordinator):
interval = self.config['auto_commit_interval_ms'] / 1000.0
self._auto_commit_task = AutoCommitTask(weakref.proxy(self), interval)
+ # When using broker-coordinated consumer groups, auto-commit will
+ # be automatically enabled on group join (see _on_join_complete)
+ # Otherwise, we should enable now b/c there will be no group join
+ if self.config['api_version'] < (0, 9):
+ self._auto_commit_task.enable()
+
self._sensors = ConsumerCoordinatorMetrics(metrics, metric_group_prefix,
self._subscription)
@@ -293,8 +299,7 @@ class ConsumerCoordinator(BaseCoordinator):
return {}
while True:
- if self.config['api_version'] >= (0, 8, 2):
- self.ensure_coordinator_known()
+ self.ensure_coordinator_known()
# contact coordinator to fetch committed offsets
future = self._send_offset_fetch_request(partitions)
@@ -356,8 +361,7 @@ class ConsumerCoordinator(BaseCoordinator):
return
while True:
- if self.config['api_version'] >= (0, 8, 2):
- self.ensure_coordinator_known()
+ self.ensure_coordinator_known()
future = self._send_offset_commit_request(offsets)
self._client.poll(future=future)
@@ -415,14 +419,10 @@ class ConsumerCoordinator(BaseCoordinator):
log.debug('No offsets to commit')
return Future().success(True)
- if self.config['api_version'] >= (0, 8, 2):
- if self.coordinator_unknown():
- return Future().failure(Errors.GroupCoordinatorNotAvailableError)
- node_id = self.coordinator_id
- else:
- node_id = self._client.least_loaded_node()
- if node_id is None:
- return Future().failure(Errors.NoBrokersAvailable)
+ elif self.coordinator_unknown():
+ return Future().failure(Errors.GroupCoordinatorNotAvailableError)
+
+ node_id = self.coordinator_id
# create the offset commit request
offset_data = collections.defaultdict(dict)
@@ -571,14 +571,10 @@ class ConsumerCoordinator(BaseCoordinator):
if not partitions:
return Future().success({})
- if self.config['api_version'] >= (0, 8, 2):
- if self.coordinator_unknown():
- return Future().failure(Errors.GroupCoordinatorNotAvailableError)
- node_id = self.coordinator_id
- else:
- node_id = self._client.least_loaded_node()
- if node_id is None:
- return Future().failure(Errors.NoBrokersAvailable)
+ elif self.coordinator_unknown():
+ return Future().failure(Errors.GroupCoordinatorNotAvailableError)
+
+ node_id = self.coordinator_id
# Verify node is ready
if not self._client.ready(node_id):
| Support KafkaConsumer auto-commit with 0.8 brokers
kafka 0.8.2 kafka-python 1.1.1
when enable auto_commit, an AutoCommitTask instance will be created
but when Enable the AutoCommitTask instance?
in the code , only find the function _on_join_complete will enable the AutoCommitTask instance
```
def _on_join_complete(self, generation, member_id, protocol,
member_assignment_bytes):
...
# restart the autocommit task if needed
if self._auto_commit_task:
self._auto_commit_task.enable()
```
but api_version < 0.9, the function _on_join_complete never be called | dpkp/kafka-python | diff --git a/test/test_coordinator.py b/test/test_coordinator.py
index 15b915d..735d278 100644
--- a/test/test_coordinator.py
+++ b/test/test_coordinator.py
@@ -425,8 +425,7 @@ def test_send_offset_commit_request_fail(patched_coord, offsets):
((0, 9), OffsetCommitRequest[2])])
def test_send_offset_commit_request_versions(patched_coord, offsets,
api_version, req_type):
- # assuming fixture sets coordinator=0, least_loaded_node=1
- expect_node = 0 if api_version >= (0, 8, 2) else 1
+ expect_node = 0
patched_coord.config['api_version'] = api_version
patched_coord._send_offset_commit_request(offsets)
@@ -522,7 +521,7 @@ def test_send_offset_fetch_request_fail(patched_coord, partitions):
def test_send_offset_fetch_request_versions(patched_coord, partitions,
api_version, req_type):
# assuming fixture sets coordinator=0, least_loaded_node=1
- expect_node = 0 if api_version >= (0, 8, 2) else 1
+ expect_node = 0
patched_coord.config['api_version'] = api_version
patched_coord._send_offset_fetch_request(partitions)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 2
} | 1.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-catchlog",
"pytest-sugar",
"pytest-mock",
"mock",
"python-snappy",
"lz4tools",
"xxhash",
"six"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc libsnappy-dev"
],
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
coverage==6.2
cramjam==2.5.0
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/dpkp/kafka-python.git@7a350e5fcf33f49094c820ba88b9cee4aeae6e12#egg=kafka_python
lz4tools==1.3.1.2
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-catchlog==1.2.2
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-sugar==0.9.6
python-snappy==0.7.3
six==1.17.0
termcolor==1.1.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
xxhash==3.2.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: kafka-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==6.2
- cramjam==2.5.0
- lz4tools==1.3.1.2
- mock==5.2.0
- pytest-catchlog==1.2.2
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-sugar==0.9.6
- python-snappy==0.7.3
- six==1.17.0
- termcolor==1.1.0
- tomli==1.2.3
- xxhash==3.2.0
prefix: /opt/conda/envs/kafka-python
| [
"test/test_coordinator.py::test_send_offset_commit_request_versions[api_version0-OffsetCommitRequest_v0]",
"test/test_coordinator.py::test_send_offset_fetch_request_versions[api_version0-OffsetFetchRequest_v0]"
]
| []
| [
"test/test_coordinator.py::test_init",
"test/test_coordinator.py::test_autocommit_enable_api_version[api_version0]",
"test/test_coordinator.py::test_autocommit_enable_api_version[api_version1]",
"test/test_coordinator.py::test_autocommit_enable_api_version[api_version2]",
"test/test_coordinator.py::test_autocommit_enable_api_version[api_version3]",
"test/test_coordinator.py::test_protocol_type",
"test/test_coordinator.py::test_group_protocols",
"test/test_coordinator.py::test_pattern_subscription[api_version0]",
"test/test_coordinator.py::test_pattern_subscription[api_version1]",
"test/test_coordinator.py::test_pattern_subscription[api_version2]",
"test/test_coordinator.py::test_pattern_subscription[api_version3]",
"test/test_coordinator.py::test_lookup_assignor",
"test/test_coordinator.py::test_join_complete",
"test/test_coordinator.py::test_subscription_listener",
"test/test_coordinator.py::test_subscription_listener_failure",
"test/test_coordinator.py::test_perform_assignment",
"test/test_coordinator.py::test_on_join_prepare",
"test/test_coordinator.py::test_need_rejoin",
"test/test_coordinator.py::test_refresh_committed_offsets_if_needed",
"test/test_coordinator.py::test_fetch_committed_offsets",
"test/test_coordinator.py::test_close",
"test/test_coordinator.py::test_commit_offsets_async",
"test/test_coordinator.py::test_commit_offsets_sync",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version0-foobar-True-None-False-False-True-False]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version1-foobar-False-None-False-False-False-False]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version2-foobar-True-error2-True-True-True-False]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version3-foobar-True-error3-True-True-True-False]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version4-foobar-True-error4-True-True-True-False]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version5-foobar-True-error5-True-True-False-True]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version6-foobar-True-None-True-True-False-False]",
"test/test_coordinator.py::test_maybe_auto_commit_offsets_sync[api_version7-None-True-None-False-False-True-False]",
"test/test_coordinator.py::test_send_offset_commit_request_fail",
"test/test_coordinator.py::test_send_offset_commit_request_versions[api_version1-OffsetCommitRequest_v1]",
"test/test_coordinator.py::test_send_offset_commit_request_versions[api_version2-OffsetCommitRequest_v2]",
"test/test_coordinator.py::test_send_offset_commit_request_failure",
"test/test_coordinator.py::test_send_offset_commit_request_success",
"test/test_coordinator.py::test_handle_offset_commit_response[response0-GroupAuthorizationFailedError-False-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response1-OffsetMetadataTooLargeError-False-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response2-InvalidCommitOffsetSizeError-False-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response3-GroupLoadInProgressError-False-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response4-GroupCoordinatorNotAvailableError-True-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response5-NotCoordinatorForGroupError-True-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response6-RequestTimedOutError-True-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response7-CommitFailedError-False-True]",
"test/test_coordinator.py::test_handle_offset_commit_response[response8-CommitFailedError-False-True]",
"test/test_coordinator.py::test_handle_offset_commit_response[response9-CommitFailedError-False-True]",
"test/test_coordinator.py::test_handle_offset_commit_response[response10-InvalidTopicError-False-False]",
"test/test_coordinator.py::test_handle_offset_commit_response[response11-TopicAuthorizationFailedError-False-False]",
"test/test_coordinator.py::test_send_offset_fetch_request_fail",
"test/test_coordinator.py::test_send_offset_fetch_request_versions[api_version1-OffsetFetchRequest_v1]",
"test/test_coordinator.py::test_send_offset_fetch_request_versions[api_version2-OffsetFetchRequest_v1]",
"test/test_coordinator.py::test_send_offset_fetch_request_failure",
"test/test_coordinator.py::test_send_offset_fetch_request_success",
"test/test_coordinator.py::test_handle_offset_fetch_response[response0-GroupLoadInProgressError-False-False]",
"test/test_coordinator.py::test_handle_offset_fetch_response[response1-NotCoordinatorForGroupError-True-False]",
"test/test_coordinator.py::test_handle_offset_fetch_response[response2-UnknownMemberIdError-False-True]",
"test/test_coordinator.py::test_handle_offset_fetch_response[response3-IllegalGenerationError-False-True]",
"test/test_coordinator.py::test_handle_offset_fetch_response[response4-TopicAuthorizationFailedError-False-False]",
"test/test_coordinator.py::test_handle_offset_fetch_response[response5-None-False-False]",
"test/test_coordinator.py::test_heartbeat"
]
| []
| Apache License 2.0 | 623 | [
"kafka/coordinator/consumer.py",
"kafka/coordinator/base.py"
]
| [
"kafka/coordinator/consumer.py",
"kafka/coordinator/base.py"
]
|
|
simphony__simphony-remote-89 | cbbed1d4e4f29aa4ccc3719d52505679abcd25c3 | 2016-07-12 16:43:10 | 61ec23ffe44463cbc41f6fa54b4247963093ed79 | diff --git a/remoteappmanager/application.py b/remoteappmanager/application.py
index 89ba765..b5131db 100644
--- a/remoteappmanager/application.py
+++ b/remoteappmanager/application.py
@@ -125,7 +125,7 @@ class Application(web.Application, LoggingMixin):
"""Initializes the user at the database level."""
user_name = self.command_line_config.user
user = User(name=user_name)
- user.orm_user = self.db.get_user_by_name(user_name)
+ user.account = self.db.get_user_by_name(user_name)
return user
# Public
diff --git a/remoteappmanager/db/csv_db.py b/remoteappmanager/db/csv_db.py
index 3c00a2e..99227dc 100644
--- a/remoteappmanager/db/csv_db.py
+++ b/remoteappmanager/db/csv_db.py
@@ -158,7 +158,7 @@ class CSVAccounting(ABCAccounting):
Parameters
----------
- user : remoteappmanager.db.csv_db.CSVUser
+ user : CSVUser
Same type as the result of `get_user_by_name`
Returns
diff --git a/remoteappmanager/db/interfaces.py b/remoteappmanager/db/interfaces.py
index e3c7485..4f09018 100644
--- a/remoteappmanager/db/interfaces.py
+++ b/remoteappmanager/db/interfaces.py
@@ -55,7 +55,7 @@ class ABCApplicationPolicy(metaclass=ABCMeta):
class ABCAccounting(metaclass=ABCMeta):
- """ Main accounting interface required by the single User application.
+ """ Main accounting interface required by the single user application.
"""
@abstractmethod
@@ -69,16 +69,17 @@ class ABCAccounting(metaclass=ABCMeta):
Returns
-------
- a User-like object that the Database understands
+ account : opaque-type
+ an object that the database understands
"""
@abstractmethod
- def get_apps_for_user(self, user):
- """ Return an iterable of ApplicationConfig for a given User
+ def get_apps_for_user(self, account):
+ """ Return an iterable of ApplicationConfig for a given account
Parameters
----------
- user : User-like
+ account : opaque-type
Same type as the result of `get_user_by_name`
Returns
diff --git a/remoteappmanager/handlers/home_handler.py b/remoteappmanager/handlers/home_handler.py
index 0ec4c4e..9ba81f6 100644
--- a/remoteappmanager/handlers/home_handler.py
+++ b/remoteappmanager/handlers/home_handler.py
@@ -78,7 +78,7 @@ class HomeHandler(BaseHandler):
mapping_id = options["mapping_id"][0]
all_apps = self.application.db.get_apps_for_user(
- self.current_user.orm_user)
+ self.current_user.account)
choice = [(m_id, app, policy)
for m_id, app, policy in all_apps
@@ -90,9 +90,9 @@ class HomeHandler(BaseHandler):
_, app, policy = choice[0]
container = None
- orm_user = self.current_user.orm_user
+ user_name = self.current_user.name
try:
- container = yield self._start_container(orm_user,
+ container = yield self._start_container(user_name,
app,
policy,
mapping_id)
@@ -172,7 +172,7 @@ class HomeHandler(BaseHandler):
container_manager = self.application.container_manager
apps = self.application.db.get_apps_for_user(
- self.current_user.orm_user)
+ self.current_user.account)
images_info = []
@@ -230,18 +230,16 @@ class HomeHandler(BaseHandler):
return Container.from_docker_containers_dict(container_dict[0])
- # FIXME: The orm_user here requires any database implementation
- # to provide a user object with a name attribute
@gen.coroutine
- def _start_container(self, orm_user, app, policy, mapping_id):
+ def _start_container(self, user_name, app, policy, mapping_id):
"""Start the container. This method is a helper method that
works with low level data and helps in issuing the request to the
data container.
Parameters
----------
- orm_user : User
- database's user object (e.g. current_user.orm_user)
+ user_name : str
+ the user name to be associated with the container
app : ABCApplication
the application to start
@@ -254,7 +252,6 @@ class HomeHandler(BaseHandler):
Container
"""
- user_name = orm_user.name
image_name = app.image
mount_home = policy.allow_home
volume_spec = (policy.volume_source,
diff --git a/remoteappmanager/user.py b/remoteappmanager/user.py
index 50e3428..4820d5b 100644
--- a/remoteappmanager/user.py
+++ b/remoteappmanager/user.py
@@ -8,7 +8,5 @@ class User(HasTraits):
# The username as passed at the config line
name = Unicode()
- # FIXME: orm_user is Any to support other database implementation
-
#: Can be none if the username cannot be found in the database.
- orm_user = Any()
+ account = Any()
| User.orm_user should be renamed as User.account with an opaque type
The `orm_user` attribute of the singleuser app is an opaque object handled by the database's accounting.
It should be renamed as `account` and be given an opaque type.
| simphony/simphony-remote | diff --git a/tests/test_application.py b/tests/test_application.py
index eee2f8b..c51080d 100644
--- a/tests/test_application.py
+++ b/tests/test_application.py
@@ -44,7 +44,7 @@ class TestApplication(TempMixin, testing.AsyncTestCase):
self.assertIsNotNone(app.container_manager)
self.assertIsNotNone(app.hub)
self.assertEqual(app.user.name, "username")
- self.assertEqual(app.user.orm_user, None)
+ self.assertEqual(app.user.account, None)
# FIXME: Some of these tests are the same and should be refactored
@@ -89,4 +89,4 @@ class TestApplicationWithCSV(TempMixin, testing.AsyncTestCase):
self.assertIsNotNone(app.user)
self.assertEqual(app.user.name, "username")
- self.assertIsInstance(app.user.orm_user, test_csv_db.CSVUser)
+ self.assertIsInstance(app.user.account, test_csv_db.CSVUser)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 5
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"flake8",
"sphinx",
"coverage"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
alembic==1.15.2
annotated-types==0.7.0
arrow==1.3.0
async-generator==1.10
attrs==25.3.0
babel==2.17.0
certifi==2025.1.31
certipy==0.2.2
cffi==1.17.1
charset-normalizer==3.4.1
click==8.1.8
coverage==7.8.0
cryptography==44.0.2
docker-py==1.10.6
docker-pycreds==0.4.0
docutils==0.21.2
escapism==1.0.1
exceptiongroup==1.2.2
flake8==7.2.0
fqdn==1.5.1
greenlet==3.1.1
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
isoduration==20.11.0
Jinja2==3.1.6
jsonpointer==3.0.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter-events==0.12.0
jupyter_client==8.6.3
jupyter_core==5.7.2
jupyterhub==5.2.1
Mako==1.3.9
MarkupSafe==3.0.2
mccabe==0.7.0
oauthlib==3.2.2
packaging==24.2
pamela==1.2.0
platformdirs==4.3.7
pluggy==1.5.0
prometheus_client==0.21.1
pycodestyle==2.13.0
pycparser==2.22
pydantic==2.11.1
pydantic_core==2.33.0
pyflakes==3.3.1
Pygments==2.19.1
pytest==8.3.5
python-dateutil==2.9.0.post0
python-json-logger==3.3.0
PyYAML==6.0.2
pyzmq==26.3.0
referencing==0.36.2
-e git+https://github.com/simphony/simphony-remote.git@cbbed1d4e4f29aa4ccc3719d52505679abcd25c3#egg=remoteappmanager
requests==2.32.3
rfc3339-validator==0.1.4
rfc3986-validator==0.1.1
rpds-py==0.24.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
SQLAlchemy==2.0.40
tabulate==0.9.0
tomli==2.2.1
tornado==6.4.2
traitlets==5.14.3
types-python-dateutil==2.9.0.20241206
typing-inspection==0.4.0
typing_extensions==4.13.0
uri-template==1.3.0
urllib3==2.3.0
webcolors==24.11.1
websocket-client==1.8.0
zipp==3.21.0
| name: simphony-remote
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- alembic==1.15.2
- annotated-types==0.7.0
- arrow==1.3.0
- async-generator==1.10
- attrs==25.3.0
- babel==2.17.0
- certifi==2025.1.31
- certipy==0.2.2
- cffi==1.17.1
- charset-normalizer==3.4.1
- click==8.1.8
- coverage==7.8.0
- cryptography==44.0.2
- docker-py==1.10.6
- docker-pycreds==0.4.0
- docutils==0.21.2
- escapism==1.0.1
- exceptiongroup==1.2.2
- flake8==7.2.0
- fqdn==1.5.1
- greenlet==3.1.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- isoduration==20.11.0
- jinja2==3.1.6
- jsonpointer==3.0.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter-client==8.6.3
- jupyter-core==5.7.2
- jupyter-events==0.12.0
- jupyterhub==5.2.1
- mako==1.3.9
- markupsafe==3.0.2
- mccabe==0.7.0
- oauthlib==3.2.2
- packaging==24.2
- pamela==1.2.0
- platformdirs==4.3.7
- pluggy==1.5.0
- prometheus-client==0.21.1
- pycodestyle==2.13.0
- pycparser==2.22
- pydantic==2.11.1
- pydantic-core==2.33.0
- pyflakes==3.3.1
- pygments==2.19.1
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- python-json-logger==3.3.0
- pyyaml==6.0.2
- pyzmq==26.3.0
- referencing==0.36.2
- requests==2.32.3
- rfc3339-validator==0.1.4
- rfc3986-validator==0.1.1
- rpds-py==0.24.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- sqlalchemy==2.0.40
- tabulate==0.9.0
- tomli==2.2.1
- tornado==6.4.2
- traitlets==5.14.3
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- typing-inspection==0.4.0
- uri-template==1.3.0
- urllib3==2.3.0
- webcolors==24.11.1
- websocket-client==1.8.0
- zipp==3.21.0
prefix: /opt/conda/envs/simphony-remote
| [
"tests/test_application.py::TestApplicationWithCSV::test_database_initialization"
]
| [
"tests/test_application.py::TestApplication::test_initialization"
]
| [
"tests/test_application.py::TestApplicationWithCSV::test_initialization"
]
| []
| BSD 3-Clause "New" or "Revised" License | 624 | [
"remoteappmanager/db/interfaces.py",
"remoteappmanager/user.py",
"remoteappmanager/db/csv_db.py",
"remoteappmanager/application.py",
"remoteappmanager/handlers/home_handler.py"
]
| [
"remoteappmanager/db/interfaces.py",
"remoteappmanager/user.py",
"remoteappmanager/db/csv_db.py",
"remoteappmanager/application.py",
"remoteappmanager/handlers/home_handler.py"
]
|
|
Azure__WALinuxAgent-317 | 2455e86bc0dc4f5349370c58fd5a4516c83a2b2e | 2016-07-12 21:30:35 | 2455e86bc0dc4f5349370c58fd5a4516c83a2b2e | msftclas: Hi __@brendandixon__, I'm your friendly neighborhood Microsoft Pull Request Bot (You can call me MSBOT). Thanks for your contribution!
<span>You've already signed the contribution license agreement. Thanks!</span>
<p>The agreement was validated by Microsoft and real humans are currently evaluating your PR.</p>
TTYL, MSBOT;
hglkrijger: looks good. I will start a manual test
ahmetalpbalkan: LGTM
hglkrijger: manual test looks good to me:
```
2016/07/12 22:44:36.108346 INFO Checking for agent family Test updates
2016/07/12 22:44:36.117106 INFO Wire server endpoint:168.63.129.16
2016/07/12 22:44:36.150410 INFO Instantiating Agent WALinuxAgent-2.1.5.4 from package
2016/07/12 22:44:36.160788 INFO Agent WALinuxAgent-2.1.5.4 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/12 22:44:36.178844 INFO Ensuring Agent WALinuxAgent-2.1.5.4 is downloaded
2016/07/12 22:44:36.231748 INFO Agent WALinuxAgent-2.1.5.4 downloaded from https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent__Test__2.1.5.4
2016/07/12 22:44:36.257189 INFO Agent WALinuxAgent-2.1.5.4 unpacked successfully to /var/lib/waagent/WALinuxAgent-2.1.5.4
2016/07/12 22:44:36.282298 INFO Agent WALinuxAgent-2.1.5.4 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.4/HandlerManifest.json
2016/07/12 22:44:36.299637 INFO Agent WALinuxAgent-2.1.5.4 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/12 22:44:36.312612 INFO Agent WALinuxAgent-2.1.5.4 downloaded successfully
2016/07/12 22:44:36.321573 INFO Event: name=WALinuxAgent, op=Install, message=Agent WALinuxAgent-2.1.5.4 downloaded successfully
2016/07/12 22:44:36.337051 INFO Instantiating Agent WALinuxAgent-2.1.5.5 from package
2016/07/12 22:44:36.346417 INFO Agent WALinuxAgent-2.1.5.5 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/12 22:44:36.359618 INFO Ensuring Agent WALinuxAgent-2.1.5.5 is downloaded
2016/07/12 22:44:36.413727 INFO Agent WALinuxAgent-2.1.5.5 downloaded from https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent__Test__2.1.5.5
2016/07/12 22:44:36.438037 INFO Agent WALinuxAgent-2.1.5.5 unpacked successfully to /var/lib/waagent/WALinuxAgent-2.1.5.5
2016/07/12 22:44:36.451359 INFO Agent WALinuxAgent-2.1.5.5 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.5/HandlerManifest.json
2016/07/12 22:44:36.473522 INFO Agent WALinuxAgent-2.1.5.5 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/12 22:44:36.493318 INFO Agent WALinuxAgent-2.1.5.5 downloaded successfully
2016/07/12 22:44:36.502272 INFO Event: name=WALinuxAgent, op=Install, message=Agent WALinuxAgent-2.1.5.5 downloaded successfully
2016/07/12 22:44:36.516464 INFO Agent WALinuxAgent-2.1.5.rc5 discovered WALinuxAgent-2.1.5.5 as an update and will exit
2016/07/12 22:44:36.535733 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.rc5 launched with command 'python -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/12 22:44:36.536585 INFO Agent WALinuxAgent-2.1.5.rc5 launched with command 'python -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/12 22:44:36.537991 INFO Instantiating Agent WALinuxAgent-2.1.5.4 from disk
2016/07/12 22:44:36.539193 INFO Agent WALinuxAgent-2.1.5.4 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/12 22:44:36.540657 INFO Ensuring Agent WALinuxAgent-2.1.5.4 is downloaded
2016/07/12 22:44:36.542193 INFO Agent WALinuxAgent-2.1.5.4 was previously downloaded - skipping download
2016/07/12 22:44:36.543853 INFO Agent WALinuxAgent-2.1.5.4 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.4/HandlerManifest.json
2016/07/12 22:44:36.545412 INFO Instantiating Agent WALinuxAgent-2.1.5.5 from disk
2016/07/12 22:44:36.546685 INFO Agent WALinuxAgent-2.1.5.5 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/12 22:44:36.548155 INFO Ensuring Agent WALinuxAgent-2.1.5.5 is downloaded
2016/07/12 22:44:36.549747 INFO Agent WALinuxAgent-2.1.5.5 was previously downloaded - skipping download
2016/07/12 22:44:36.551340 INFO Agent WALinuxAgent-2.1.5.5 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.5/HandlerManifest.json
2016/07/12 22:44:36.553260 INFO Determined Agent WALinuxAgent-2.1.5.5 to be the latest agent
2016/07/12 22:44:36.706866 INFO Agent WALinuxAgent-2.1.5.5 launched with command 'python -u bin/WALinuxAgent-2.1.5.rc5-py2.7.egg -run-exthandlers'
2016/07/12 22:44:36.801841 INFO Agent WALinuxAgent-2.1.5.5 is running as the current agent
2016/07/12 22:44:36.810830 INFO Event: name=WALinuxAgent, op=, message=Agent WALinuxAgent-2.1.5.5 is running as the current agent
``` | diff --git a/azurelinuxagent/common/version.py b/azurelinuxagent/common/version.py
index 4f3e5972..d529bb86 100644
--- a/azurelinuxagent/common/version.py
+++ b/azurelinuxagent/common/version.py
@@ -66,6 +66,9 @@ AGENT_NAME_PATTERN = re.compile(AGENT_PATTERN)
AGENT_DIR_PATTERN = re.compile(".*/{0}".format(AGENT_PATTERN))
+# Set the CURRENT_AGENT and CURRENT_VERSION to match the agent directory name
+# - This ensures the agent will "see itself" using the same name and version
+# as the code that downloads agents.
def set_current_agent():
path = os.getcwd()
lib_dir = conf.get_lib_dir()
@@ -80,6 +83,9 @@ def set_current_agent():
return agent, FlexibleVersion(version)
CURRENT_AGENT, CURRENT_VERSION = set_current_agent()
+def is_current_agent_installed():
+ return CURRENT_AGENT == AGENT_LONG_VERSION
+
__distro__ = get_distro()
DISTRO_NAME = __distro__[0]
diff --git a/azurelinuxagent/ga/exthandlers.py b/azurelinuxagent/ga/exthandlers.py
index 8ed2f65f..d3c8f329 100644
--- a/azurelinuxagent/ga/exthandlers.py
+++ b/azurelinuxagent/ga/exthandlers.py
@@ -230,7 +230,7 @@ class ExtHandlersHandler(object):
def report_ext_handlers_status(self):
"""Go thru handler_state dir, collect and report status"""
vm_status = VMStatus()
- vm_status.vmAgent.version = AGENT_VERSION
+ vm_status.vmAgent.version = str(CURRENT_VERSION)
vm_status.vmAgent.status = "Ready"
vm_status.vmAgent.message = "Guest Agent is running"
diff --git a/azurelinuxagent/ga/monitor.py b/azurelinuxagent/ga/monitor.py
index 0ac86d4a..f49cef89 100644
--- a/azurelinuxagent/ga/monitor.py
+++ b/azurelinuxagent/ga/monitor.py
@@ -36,7 +36,8 @@ from azurelinuxagent.common.protocol.restapi import TelemetryEventParam, \
set_properties
from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, getattrib
from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \
- DISTRO_CODE_NAME, AGENT_LONG_VERSION
+ DISTRO_CODE_NAME, AGENT_LONG_VERSION, \
+ CURRENT_AGENT, CURRENT_VERSION
def parse_event(data_str):
@@ -106,7 +107,7 @@ class MonitorHandler(object):
platform.release())
self.sysinfo.append(TelemetryEventParam("OSVersion", osversion))
self.sysinfo.append(
- TelemetryEventParam("GAVersion", AGENT_LONG_VERSION))
+ TelemetryEventParam("GAVersion", CURRENT_AGENT))
try:
ram = self.osutil.get_total_mem()
diff --git a/azurelinuxagent/ga/update.py b/azurelinuxagent/ga/update.py
index 749071d6..6dae7aff 100644
--- a/azurelinuxagent/ga/update.py
+++ b/azurelinuxagent/ga/update.py
@@ -44,7 +44,8 @@ from azurelinuxagent.common.utils.flexible_version import FlexibleVersion
from azurelinuxagent.common.version import AGENT_NAME, AGENT_VERSION, AGENT_LONG_VERSION, \
AGENT_DIR_GLOB, AGENT_PKG_GLOB, \
AGENT_PATTERN, AGENT_NAME_PATTERN, AGENT_DIR_PATTERN, \
- CURRENT_AGENT, CURRENT_VERSION
+ CURRENT_AGENT, CURRENT_VERSION, \
+ is_current_agent_installed
from azurelinuxagent.ga.exthandlers import HandlerManifest
@@ -87,6 +88,7 @@ class UpdateHandler(object):
self.child_launch_time = None
self.child_launch_attempts = 0
self.child_process = None
+
self.signal_handler = None
return
@@ -98,25 +100,29 @@ class UpdateHandler(object):
Note:
- Most events should be tagged to the launched agent (agent_version)
"""
+
+ if self.child_process is not None:
+ raise Exception("Illegal attempt to launch multiple goal state Agent processes")
+
+ if self.signal_handler is None:
+ self.signal_handler = signal.signal(signal.SIGTERM, self.forward_signal)
+
latest_agent = self.get_latest_agent()
- if latest_agent == None:
+ if latest_agent is None:
+ logger.info(u"Installed Agent {0} is the most current agent", CURRENT_AGENT)
agent_cmd = "python -u {0} -run-exthandlers".format(sys.argv[0])
agent_dir = os.getcwd()
agent_name = CURRENT_AGENT
agent_version = CURRENT_VERSION
else:
+ logger.info(u"Determined Agent {0} to be the latest agent", latest_agent.name)
agent_cmd = latest_agent.get_agent_cmd()
agent_dir = latest_agent.get_agent_dir()
agent_name = latest_agent.name
agent_version = latest_agent.version
- if self.child_process is not None:
- raise Exception("Illegal attempt to launch multiple child processes")
-
try:
- self.signal_handler = signal.signal(signal.SIGTERM, self.forward_signal)
-
# Launch the correct Python version for python-based agents
cmds = shlex.split(agent_cmd)
if cmds[0].lower() == "python":
@@ -131,41 +137,32 @@ class UpdateHandler(object):
stdout=sys.stdout,
stderr=sys.stderr)
- msg = u"Agent {0} launched with command '{1}'".format(agent_name, agent_cmd)
- logger.info(msg)
- add_event(AGENT_NAME, version=agent_version, message=msg)
+ logger.info(u"Agent {0} launched with command '{1}'", agent_name, agent_cmd)
ret = self.child_process.wait()
- if ret == None:
+ if ret is None:
ret = 1
+
+ msg = u"Agent {0} launched with command '{1}' returned code: {2}".format(
+ agent_name,
+ agent_cmd,
+ ret)
+ add_event(
+ AGENT_NAME,
+ version=agent_version,
+ op=WALAEventOperation.Enable,
+ is_success=(ret <= 0),
+ message=msg)
+
if ret > 0:
- msg = u"Agent {0} launched with command '{1}' failed with code: {2}".format(
- agent_name,
- agent_cmd,
- ret)
logger.warn(msg)
- add_event(
- AGENT_NAME,
- version=agent_version,
- op=WALAEventOperation.Enable,
- is_success=False,
- message=msg)
if latest_agent is not None:
latest_agent.mark_failure()
else:
- msg = u"Agent {0} launched with command '{1}' returned {2}".format(
- agent_name,
- agent_cmd,
- ret)
logger.info(msg)
- add_event(
- AGENT_NAME,
- version=agent_version,
- op=WALAEventOperation.Enable,
- is_success=True,
- message=msg)
+
except Exception as e:
- msg = u"Agent {0} launch failed with command '{1}' failed with exception: {2}".format(
+ msg = u"Agent {0} launched with command '{1}' failed with exception: {2}".format(
agent_name,
agent_cmd,
ustr(e))
@@ -178,14 +175,6 @@ class UpdateHandler(object):
message=msg)
if latest_agent is not None:
latest_agent.mark_failure(is_fatal=True)
- msg = u"Agent {0} is blacklisted".format(agent_name)
- logger.info(msg)
- add_event(
- AGENT_NAME,
- version=agent_version,
- op=WALAEventOperation.Enable,
- is_success=False,
- message=msg)
self.child_process = None
return
@@ -195,10 +184,7 @@ class UpdateHandler(object):
This is the main loop which watches for agent and extension updates.
"""
- msg = u"Agent {0} is running as the current agent".format(
- CURRENT_AGENT)
- logger.info(msg)
- add_event(AGENT_NAME, version=CURRENT_VERSION, is_success=True, message=msg)
+ logger.info(u"Agent {0} is running as the goal state agent", CURRENT_AGENT)
# Launch monitoring threads
from azurelinuxagent.ga.monitor import get_monitor_handler
@@ -213,34 +199,20 @@ class UpdateHandler(object):
# TODO: Add means to stop running
try:
while self.running:
- # Check for a new agent.
- # If a new agent exists (that is, ensure_latest_agent returns
- # true), exit to allow the daemon to respawn using that agent.
if self._ensure_latest_agent():
- msg = u"Agent {0} discovered agent update and will exit".format(
- CURRENT_AGENT)
- logger.info(msg)
- add_event(
- AGENT_NAME,
- version=CURRENT_VERSION,
- is_success=True,
- message=msg)
+ if len(self.agents) > 0:
+ logger.info(
+ u"Agent {0} discovered {1} as an update and will exit",
+ CURRENT_AGENT,
+ self.agents[0].name)
break
- # Process extensions
exthandlers_handler.run()
time.sleep(25)
except Exception as e:
- msg = u"Agent {0} failed with exception: {1}".format(CURRENT_AGENT, ustr(e))
- logger.warn(msg)
- add_event(
- AGENT_NAME,
- version=CURRENT_VERSION,
- op=WALAEventOperation.Enable,
- is_success=False,
- message=msg)
+ logger.warn(u"Agent {0} failed with exception: {1}", CURRENT_AGENT, ustr(e))
sys.exit(1)
sys.exit(0)
@@ -250,9 +222,14 @@ class UpdateHandler(object):
if self.child_process is None:
return
+ logger.info(
+ u"Agent {0} forwarding signal {1} to {2}",
+ CURRENT_AGENT,
+ signum,
+ self.child_agent.name if self.child_agent is not None else CURRENT_AGENT)
self.child_process.send_signal(signum)
- if not self.signal_handler in (None, signal.SIG_IGN, signal.SIG_DFL):
+ if self.signal_handler not in (None, signal.SIG_IGN, signal.SIG_DFL):
self.signal_handler(signum, frame)
elif self.signal_handler is signal.SIG_DFL:
if signum == signal.SIGTERM:
@@ -286,6 +263,9 @@ class UpdateHandler(object):
if next_attempt_time > now:
return False
+ family = conf.get_autoupdate_gafamily()
+ logger.info("Checking for agent family {0} updates", family)
+
self.last_attempt_time = now
try:
protocol = self.protocol_util.get_protocol()
@@ -295,43 +275,39 @@ class UpdateHandler(object):
logger.warn(msg)
add_event(
AGENT_NAME,
+ op=WALAEventOperation.Download,
version=CURRENT_VERSION,
is_success=False,
message=msg)
return False
if self.last_etag is not None and self.last_etag == etag:
- msg = u"Incarnation {0} has no agent updates".format(etag)
- logger.info(msg)
- add_event(AGENT_NAME, version=CURRENT_VERSION, message=msg)
+ logger.info(u"Incarnation {0} has no agent updates", etag)
return False
- logger.info("Check for agent updates")
-
- family = conf.get_autoupdate_gafamily()
manifests = [m for m in manifest_list.vmAgentManifests if m.family == family]
if len(manifests) == 0:
- msg = u"Incarnation {0} has no agent family {1} updates".format(etag, family)
- logger.info(msg)
- add_event(AGENT_NAME, version=CURRENT_VERSION, message=msg)
+ logger.info(u"Incarnation {0} has no agent family {1} updates", etag, family)
return False
try:
pkg_list = protocol.get_vmagent_pkgs(manifests[0])
except ProtocolError as e:
- msg= u"Incarnation {0} failed to get {1} package list: {1}".format(etag,
- family,
- ustr(e))
+ msg= u"Incarnation {0} failed to get {1} package list: {2}".format(
+ etag,
+ family,
+ ustr(e))
logger.warn(msg)
add_event(
AGENT_NAME,
+ op=WALAEventOperation.Download,
version=CURRENT_VERSION,
is_success=False,
message=msg)
return False
# Set the agents to those available for download at least as current as the existing agent
- # and remove from disk any agent no longer report to the VM.
+ # and remove from disk any agent no longer reported to the VM.
# Note:
# The code leaves on disk available, but blacklisted, agents so as to preserve the state.
# Otherwise, those agents could be again downloaded and inappropriately retried.
@@ -350,6 +326,7 @@ class UpdateHandler(object):
too frequently, raise an Exception to force blacklisting.
"""
if latest_agent is None:
+ self.child_agent = None
return
if self.child_agent is None or latest_agent.version != self.child_agent.version:
@@ -386,12 +363,7 @@ class UpdateHandler(object):
for agent_dir in glob.iglob(path) if os.path.isdir(agent_dir)])
self._filter_blacklisted_agents()
except Exception as e:
- msg = u"Exception occurred loading available agents: {0}".format(ustr(e))
- add_event(
- AGENT_NAME,
- version=CURRENT_VERSION,
- is_success=False,
- message=msg)
+ logger.warn(u"Exception occurred loading available agents: {0}", ustr(e))
return
def _purge_agents(self):
@@ -400,25 +372,27 @@ class UpdateHandler(object):
(without removing the current, running agent).
"""
path = os.path.join(conf.get_lib_dir(), "{0}-*".format(AGENT_NAME))
+
known_versions = [agent.version for agent in self.agents]
- known_versions.append(CURRENT_VERSION)
+ if not is_current_agent_installed() and CURRENT_VERSION not in known_versions:
+ logger.warn(
+ u"Running Agent {0} was not found in the agent manifest - adding to list",
+ CURRENT_VERSION)
+ known_versions.append(CURRENT_VERSION)
+
for agent_path in glob.iglob(path):
try:
name = fileutil.trim_ext(agent_path, "zip")
m = AGENT_DIR_PATTERN.match(name)
- if m is not None and not FlexibleVersion(m.group(1)) in known_versions:
+ if m is not None and FlexibleVersion(m.group(1)) not in known_versions:
if os.path.isfile(agent_path):
+ logger.info(u"Purging outdated Agent file {0}", agent_path)
os.remove(agent_path)
else:
+ logger.info(u"Purging outdated Agent directory {0}", agent_path)
shutil.rmtree(agent_path)
except Exception as e:
- msg = u"Exception purging {0}: {1}".format(agent_path, ustr(e))
- logger.warn(msg)
- add_event(
- AGENT_NAME,
- version=CURRENT_VERSION,
- is_success=False,
- message=msg)
+ logger.warn(u"Purging {0} raised exception: {1}", agent_path, ustr(e))
return
def _set_agents(self, agents=[]):
@@ -430,6 +404,7 @@ class UpdateHandler(object):
class GuestAgent(object):
def __init__(self, path=None, pkg=None):
self.pkg = pkg
+ version = None
if path is not None:
m = AGENT_DIR_PATTERN.match(path)
if m == None:
@@ -442,9 +417,13 @@ class GuestAgent(object):
raise UpdateError(u"Illegal agent version: {0}".format(version))
self.version = FlexibleVersion(version)
+ location = u"disk" if path is not None else u"package"
+ logger.info(u"Instantiating Agent {0} from {1}", self.name, location)
+
self.error = None
self._load_error()
self._ensure_downloaded()
+ return
@property
def name(self):
@@ -479,33 +458,30 @@ class GuestAgent(object):
@property
def is_downloaded(self):
- return os.path.isfile(self.get_agent_manifest_path())
+ return self.is_blacklisted or os.path.isfile(self.get_agent_manifest_path())
def mark_failure(self, is_fatal=False):
try:
if not os.path.isdir(self.get_agent_dir()):
os.makedirs(self.get_agent_dir())
- self.error.mark_failure(is_fatal)
+ self.error.mark_failure(is_fatal=is_fatal)
self.error.save()
if is_fatal:
- msg = u"Agent {0} is permanently blacklisted".format(self.name)
- logger.warn(msg)
- add_event(AGENT_NAME, version=self.version, is_success=False, message=msg)
+ logger.warn(u"Agent {0} is permanently blacklisted", self.name)
except Exception as e:
- msg = u"Agent {0} failed recording error state: {1}".format(ustr(e))
- logger.warn(msg)
- add_event(AGENT_NAME, version=self.version, is_success=False, message=msg)
+ logger.warn(u"Agent {0} failed recording error state: {1}", ustr(e))
return
def _ensure_downloaded(self):
try:
+ logger.info(u"Ensuring Agent {0} is downloaded", self.name)
+
if self.is_blacklisted:
- msg = u"Agent {0} is blacklisted - skipping download".format(self.name)
- logger.info(msg)
- add_event(AGENT_NAME, version=self.version, is_success=True, message=msg)
+ logger.info(u"Agent {0} is blacklisted - skipping download", self.name)
return
if self.is_downloaded:
+ logger.info(u"Agent {0} was previously downloaded - skipping download", self.name)
self._load_manifest()
return
@@ -544,9 +520,6 @@ class GuestAgent(object):
return
def _download(self):
- msg = u"Initiating download of Agent {0}".format(self.name)
- logger.info(msg)
- add_event(AGENT_NAME, version=self.version, message=msg)
package = None
for uri in self.pkg.uris:
@@ -555,14 +528,19 @@ class GuestAgent(object):
if resp.status == restutil.httpclient.OK:
package = resp.read()
fileutil.write_file(self.get_agent_pkg_path(), bytearray(package), asbin=True)
+ logger.info(u"Agent {0} downloaded from {1}", self.name, uri.uri)
break
except restutil.HttpError as e:
- msg = u"Agent {0} download from {1} failed".format(self.name, uri.uri)
- logger.warn(msg)
- add_event(AGENT_NAME, version=self.version, is_success=False, message=msg)
+ logger.warn(u"Agent {0} download from {1} failed", self.name, uri.uri)
if not os.path.isfile(self.get_agent_pkg_path()):
msg = u"Unable to download Agent {0} from any URI".format(self.name)
+ add_event(
+ AGENT_NAME,
+ op=WALAEventOperation.Download,
+ version=CURRENT_VERSION,
+ is_success=False,
+ message=msg)
raise UpdateError(msg)
return
@@ -571,15 +549,12 @@ class GuestAgent(object):
if self.error is None:
self.error = GuestAgentError(self.get_agent_error_file())
self.error.load()
+ logger.info(u"Agent {0} error state: {1}", self.name, ustr(self.error))
except Exception as e:
- msg = u"Agent {0} failed loading error state: {1}".format(ustr(e))
- logger.warn(msg)
- add_event(AGENT_NAME, version=self.version, is_success=False, message=msg)
+ logger.warn(u"Agent {0} failed loading error state: {1}", self.name, ustr(e))
return
def _load_manifest(self):
- logger.info(u"Loading Agent manifest from {0}", self.get_agent_manifest_path())
-
path = self.get_agent_manifest_path()
if not os.path.isfile(path):
msg = u"Agent {0} is missing the {1} file".format(self.name, AGENT_MANIFEST_FILE)
@@ -610,6 +585,10 @@ class GuestAgent(object):
ustr(e))
raise UpdateError(msg)
+ logger.info(
+ u"Agent {0} loaded manifest from {1}",
+ self.name,
+ self.get_agent_manifest_path())
logger.verbose(u"Successfully loaded Agent {0} {1}: {2}",
self.name,
AGENT_MANIFEST_FILE,
@@ -617,13 +596,12 @@ class GuestAgent(object):
return
def _unpack(self):
- logger.info(u"Unpacking agent package {0}", self.name)
-
try:
if os.path.isdir(self.get_agent_dir()):
shutil.rmtree(self.get_agent_dir())
zipfile.ZipFile(self.get_agent_pkg_path()).extractall(self.get_agent_dir())
+
except Exception as e:
msg = u"Exception unpacking Agent {0} from {1}: {2}".format(
self.name,
@@ -633,13 +611,14 @@ class GuestAgent(object):
if not os.path.isdir(self.get_agent_dir()):
msg = u"Unpacking Agent {0} failed to create directory {1}".format(
- self.name,
- self.get_agent_dir())
+ self.name,
+ self.get_agent_dir())
raise UpdateError(msg)
- msg = u"Agent {0} successfully unpacked".format(self.name)
- logger.info(msg)
- add_event(AGENT_NAME, version=self.version, message=msg)
+ logger.info(
+ u"Agent {0} unpacked successfully to {1}",
+ self.name,
+ self.get_agent_dir())
return
@@ -705,3 +684,9 @@ class GuestAgentError(object):
u"was_fatal" : self.was_fatal
}
return data
+
+ def __str__(self):
+ return "Last Failure: {0}, Total Failures: {1}, Fatal: {2}".format(
+ self.last_failure,
+ self.failure_count,
+ self.was_fatal)
| `systemctl stop waagent` hangs
Reproes on Ubuntu 16.04 as well as CentOS 7.
`systemctl stop walinuxagent` command hangs for a long time, although it eventually succeeds like after maybe 30-40 seconds. It used to be instant. I think it's related to the signal handling behavior that has been recently changed. | Azure/WALinuxAgent | diff --git a/tests/ga/test_update.py b/tests/ga/test_update.py
index 36241b16..49006606 100644
--- a/tests/ga/test_update.py
+++ b/tests/ga/test_update.py
@@ -134,6 +134,16 @@ class UpdateTestCase(AgentTestCase):
v.sort(reverse=True)
return v
+ def get_error_file(self, error_data=NO_ERROR):
+ fp = tempfile.NamedTemporaryFile(mode="w")
+ json.dump(error_data if error_data is not None else NO_ERROR, fp)
+ fp.seek(0)
+ return fp
+
+ def create_error(self, error_data=NO_ERROR):
+ with self.get_error_file(error_data) as path:
+ return GuestAgentError(path.name)
+
def copy_agents(self, *agents):
if len(agents) <= 0:
agents = get_agent_pkgs()
@@ -206,16 +216,6 @@ class UpdateTestCase(AgentTestCase):
class TestGuestAgentError(UpdateTestCase):
- def get_error_file(self, error_data=NO_ERROR):
- fp = tempfile.NamedTemporaryFile(mode="w")
- json.dump(error_data if error_data is not None else NO_ERROR, fp)
- fp.seek(0)
- return fp
-
- def create_error(self, error_data=NO_ERROR):
- with self.get_error_file(error_data) as path:
- return GuestAgentError(path.name)
-
def test_creation(self):
self.assertRaises(TypeError, GuestAgentError)
self.assertRaises(UpdateError, GuestAgentError, None)
@@ -297,6 +297,22 @@ class TestGuestAgentError(UpdateTestCase):
self.assertTrue(err.failure_count < MAX_FAILURE)
return
+ def test_str(self):
+ err = self.create_error(error_data=NO_ERROR)
+ s = "Last Failure: {0}, Total Failures: {1}, Fatal: {2}".format(
+ NO_ERROR["last_failure"],
+ NO_ERROR["failure_count"],
+ NO_ERROR["was_fatal"])
+ self.assertEqual(s, str(err))
+
+ err = self.create_error(error_data=WITH_ERROR)
+ s = "Last Failure: {0}, Total Failures: {1}, Fatal: {2}".format(
+ WITH_ERROR["last_failure"],
+ WITH_ERROR["failure_count"],
+ WITH_ERROR["was_fatal"])
+ self.assertEqual(s, str(err))
+ return
+
class TestGuestAgent(UpdateTestCase):
def setUp(self):
@@ -331,7 +347,9 @@ class TestGuestAgent(UpdateTestCase):
path = ".".join((os.path.join(conf.get_lib_dir(), get_agent_name()), "zip"))
self.assertEqual(path, agent.get_agent_pkg_path())
- self.assertFalse(agent.is_downloaded)
+ self.assertTrue(agent.is_downloaded)
+ # Note: Agent will get blacklisted since the package for this test is invalid
+ self.assertTrue(agent.is_blacklisted)
self.assertFalse(agent.is_available)
return
@@ -455,6 +473,14 @@ class TestGuestAgent(UpdateTestCase):
self.assertRaises(UpdateError, agent._load_manifest)
return
+ def test_load_error(self):
+ agent = GuestAgent(path=self.agent_path)
+ agent.error = None
+
+ agent._load_error()
+ self.assertTrue(agent.error is not None)
+ return
+
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded")
@patch("azurelinuxagent.ga.update.restutil.http_get")
def test_download(self, mock_http_get, mock_ensure):
@@ -575,6 +601,22 @@ class TestUpdate(UpdateTestCase):
self.update_handler = get_update_handler()
return
+ def test_creation(self):
+ self.assertTrue(self.update_handler.running)
+
+ self.assertEqual(None, self.update_handler.last_etag)
+ self.assertEqual(None, self.update_handler.last_attempt_time)
+
+ self.assertEqual(0, len(self.update_handler.agents))
+
+ self.assertEqual(None, self.update_handler.child_agent)
+ self.assertEqual(None, self.update_handler.child_launch_time)
+ self.assertEqual(0, self.update_handler.child_launch_attempts)
+ self.assertEqual(None, self.update_handler.child_process)
+
+ self.assertEqual(None, self.update_handler.signal_handler)
+ return
+
def _test_ensure_latest_agent(
self,
base_version=FlexibleVersion(AGENT_VERSION),
@@ -914,6 +956,19 @@ class TestUpdate(UpdateTestCase):
self.assertEqual(1, latest_agent.error.failure_count)
return
+ @patch('signal.signal')
+ def test_run_latest_captures_signals(self, mock_signal):
+ self._test_run_latest()
+ self.assertEqual(1, mock_signal.call_count)
+ return
+
+ @patch('signal.signal')
+ def test_run_latest_creates_only_one_signal_handler(self, mock_signal):
+ self.update_handler.signal_handler = "Not None"
+ self._test_run_latest()
+ self.assertEqual(0, mock_signal.call_count)
+ return
+
def _test_run(self, invocations=1, calls=[call.run()], enable_updates=False):
conf.get_autoupdate_enabled = Mock(return_value=enable_updates)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 4
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"pyasn1",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
nose==1.3.7
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyasn1==0.5.1
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
-e git+https://github.com/Azure/WALinuxAgent.git@2455e86bc0dc4f5349370c58fd5a4516c83a2b2e#egg=WALinuxAgent
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: WALinuxAgent
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- nose==1.3.7
- pyasn1==0.5.1
prefix: /opt/conda/envs/WALinuxAgent
| [
"tests/ga/test_update.py::TestGuestAgentError::test_str",
"tests/ga/test_update.py::TestGuestAgent::test_creation",
"tests/ga/test_update.py::TestUpdate::test_run_latest_creates_only_one_signal_handler"
]
| []
| [
"tests/ga/test_update.py::TestGuestAgentError::test_clear",
"tests/ga/test_update.py::TestGuestAgentError::test_creation",
"tests/ga/test_update.py::TestGuestAgentError::test_load_preserves_error_state",
"tests/ga/test_update.py::TestGuestAgentError::test_mark_failure",
"tests/ga/test_update.py::TestGuestAgentError::test_mark_failure_permanent",
"tests/ga/test_update.py::TestGuestAgentError::test_save",
"tests/ga/test_update.py::TestGuestAgent::test_clear_error",
"tests/ga/test_update.py::TestGuestAgent::test_download",
"tests/ga/test_update.py::TestGuestAgent::test_download_fail",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_download_skips_blacklisted",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded_download_fails",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded_load_manifest_fails",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded_unpack_fails",
"tests/ga/test_update.py::TestGuestAgent::test_is_available",
"tests/ga/test_update.py::TestGuestAgent::test_is_blacklisted",
"tests/ga/test_update.py::TestGuestAgent::test_is_downloaded",
"tests/ga/test_update.py::TestGuestAgent::test_load_error",
"tests/ga/test_update.py::TestGuestAgent::test_load_manifest",
"tests/ga/test_update.py::TestGuestAgent::test_load_manifest_is_empty",
"tests/ga/test_update.py::TestGuestAgent::test_load_manifest_is_malformed",
"tests/ga/test_update.py::TestGuestAgent::test_load_manifest_missing",
"tests/ga/test_update.py::TestGuestAgent::test_mark_failure",
"tests/ga/test_update.py::TestGuestAgent::test_unpack",
"tests/ga/test_update.py::TestGuestAgent::test_unpack_fail",
"tests/ga/test_update.py::TestUpdate::test_creation",
"tests/ga/test_update.py::TestUpdate::test_ensure_lastest_agent_purges_old_agents",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_ignores_old_agents",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_returns_true_on_first_use",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_skips_if_too_frequent",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_skips_if_when_no_new_versions",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_skips_when_etag_matches",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_skips_when_no_versions",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_skips_when_updates_are_disabled",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_sorts",
"tests/ga/test_update.py::TestUpdate::test_evaluate_agent_health_ignores_installed_agent",
"tests/ga/test_update.py::TestUpdate::test_evaluate_agent_health_raises_exception_for_restarting_agent",
"tests/ga/test_update.py::TestUpdate::test_evaluate_agent_health_resets_with_new_agent",
"tests/ga/test_update.py::TestUpdate::test_evaluate_agent_health_will_not_raise_exception_for_long_restarts",
"tests/ga/test_update.py::TestUpdate::test_evaluate_agent_health_will_not_raise_exception_too_few_restarts",
"tests/ga/test_update.py::TestUpdate::test_filter_blacklisted_agents",
"tests/ga/test_update.py::TestUpdate::test_get_latest_agent",
"tests/ga/test_update.py::TestUpdate::test_get_latest_agent_no_updates",
"tests/ga/test_update.py::TestUpdate::test_get_latest_agent_skip_updates",
"tests/ga/test_update.py::TestUpdate::test_get_latest_agent_skips_unavailable",
"tests/ga/test_update.py::TestUpdate::test_load_agents",
"tests/ga/test_update.py::TestUpdate::test_load_agents_does_not_reload",
"tests/ga/test_update.py::TestUpdate::test_load_agents_sorts",
"tests/ga/test_update.py::TestUpdate::test_purge_agents",
"tests/ga/test_update.py::TestUpdate::test_run",
"tests/ga/test_update.py::TestUpdate::test_run_keeps_running",
"tests/ga/test_update.py::TestUpdate::test_run_latest",
"tests/ga/test_update.py::TestUpdate::test_run_latest_captures_signals",
"tests/ga/test_update.py::TestUpdate::test_run_latest_defaults_to_current",
"tests/ga/test_update.py::TestUpdate::test_run_latest_exception_blacklists",
"tests/ga/test_update.py::TestUpdate::test_run_latest_forwards_output",
"tests/ga/test_update.py::TestUpdate::test_run_latest_missing_code_marks_failures",
"tests/ga/test_update.py::TestUpdate::test_run_latest_nonzero_code_marks_failures",
"tests/ga/test_update.py::TestUpdate::test_run_stops_if_update_available",
"tests/ga/test_update.py::TestUpdate::test_set_agents"
]
| []
| Apache License 2.0 | 625 | [
"azurelinuxagent/common/version.py",
"azurelinuxagent/ga/monitor.py",
"azurelinuxagent/ga/exthandlers.py",
"azurelinuxagent/ga/update.py"
]
| [
"azurelinuxagent/common/version.py",
"azurelinuxagent/ga/monitor.py",
"azurelinuxagent/ga/exthandlers.py",
"azurelinuxagent/ga/update.py"
]
|
googlemaps__google-maps-services-python-139 | 8013de5d7c1b4867dcafb4449b97c1cebab33127 | 2016-07-13 00:34:27 | 2ccf0b2912019341aa60aeb65fc36ca6d9d02a56 | diff --git a/googlemaps/convert.py b/googlemaps/convert.py
index 1c2264e..6206cfa 100644
--- a/googlemaps/convert.py
+++ b/googlemaps/convert.py
@@ -220,9 +220,17 @@ def components(arg):
:rtype: basestring
"""
+
+ # Components may have multiple values per type, here we
+ # expand them into individual key/value items, eg:
+ # {"country": ["US", "AU"], "foo": 1} -> "country:AU", "country:US", "foo:1"
+ def expand(arg):
+ for k, v in arg.items():
+ for item in as_list(v):
+ yield "%s:%s" % (k, item)
+
if isinstance(arg, dict):
- arg = sorted(["%s:%s" % (k, arg[k]) for k in arg])
- return "|".join(arg)
+ return "|".join(sorted(expand(arg)))
raise TypeError(
"Expected a dict for components, "
| Allow user to append several values for same component filter type
It would be nice to allow several values for same component type.
Use case: You may want to filter sublocalities via locality component type ("locality matches against both locality and sublocality types") and in addition you may restrict result set by another locality.
Maybe we should extend converting components by defaultdict containing lists?
| googlemaps/google-maps-services-python | diff --git a/test/test_convert.py b/test/test_convert.py
index 851eda1..090a95f 100644
--- a/test/test_convert.py
+++ b/test/test_convert.py
@@ -91,6 +91,9 @@ class ConvertTest(unittest.TestCase):
c = {"country": "US", "foo": 1}
self.assertEqual("country:US|foo:1", convert.components(c))
+ c = {"country": ["US", "AU"], "foo": 1}
+ self.assertEqual("country:AU|country:US|foo:1", convert.components(c))
+
with self.assertRaises(TypeError):
convert.components("test")
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 2.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"responses==0.3",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"test_requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
exceptiongroup==1.2.2
-e git+https://github.com/googlemaps/google-maps-services-python.git@8013de5d7c1b4867dcafb4449b97c1cebab33127#egg=googlemaps
idna==3.10
iniconfig==2.1.0
mock==5.2.0
nose==1.3.7
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
requests==2.10.0
responses==0.3.0
six==1.17.0
tomli==2.2.1
urllib3==2.3.0
| name: google-maps-services-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- mock==5.2.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- requests==2.10.0
- responses==0.3.0
- six==1.17.0
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/google-maps-services-python
| [
"test/test_convert.py::ConvertTest::test_components"
]
| []
| [
"test/test_convert.py::ConvertTest::test_as_list",
"test/test_convert.py::ConvertTest::test_bounds",
"test/test_convert.py::ConvertTest::test_join_list",
"test/test_convert.py::ConvertTest::test_latlng",
"test/test_convert.py::ConvertTest::test_location_list",
"test/test_convert.py::ConvertTest::test_polyline_decode",
"test/test_convert.py::ConvertTest::test_polyline_round_trip",
"test/test_convert.py::ConvertTest::test_time"
]
| []
| Apache License 2.0 | 626 | [
"googlemaps/convert.py"
]
| [
"googlemaps/convert.py"
]
|
|
JonathonReinhart__scuba-67 | 7e74ccde08d9943a71697b6c9210ae483fd0d2fe | 2016-07-13 01:53:39 | 7e0e786630258376013b454c75ef74e7e18711b8 | diff --git a/example/alias_multiline/.scuba.yml b/example/alias_multiline/.scuba.yml
new file mode 100644
index 0000000..90d7d1b
--- /dev/null
+++ b/example/alias_multiline/.scuba.yml
@@ -0,0 +1,9 @@
+image: !from_yaml ../common.yml image
+aliases:
+ simple: echo 'a simple alias'
+
+ complex:
+ script:
+ - echo 'this is a complex script'
+ - date
+ - uname -a
diff --git a/example/alias_multiline/run_example.sh b/example/alias_multiline/run_example.sh
new file mode 100755
index 0000000..019c32d
--- /dev/null
+++ b/example/alias_multiline/run_example.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+cd $(dirname $0)
+scuba complex
diff --git a/example/common.yml b/example/common.yml
new file mode 100644
index 0000000..cd78a52
--- /dev/null
+++ b/example/common.yml
@@ -0,0 +1,1 @@
+image: debian:8.2
diff --git a/example/run_all.sh b/example/run_all.sh
index c734038..9cabda1 100755
--- a/example/run_all.sh
+++ b/example/run_all.sh
@@ -7,3 +7,4 @@ cd $(dirname $0)
./external_yaml_simple/run_example.sh
./external_yaml_nested/run_example.sh
./scubainit_hooks/run_example.sh
+./alias_multiline/run_example.sh
diff --git a/scuba/__main__.py b/scuba/__main__.py
index a1e2c7a..a0f4ef3 100644
--- a/scuba/__main__.py
+++ b/scuba/__main__.py
@@ -230,21 +230,24 @@ class ScubaDive(object):
self.add_option('--tty')
# Process any aliases
- cmd = self.config.process_command(self.user_command)
+ try:
+ script = self.config.process_command(self.user_command)
+ except ConfigError as cfgerr:
+ raise ScubaError(str(cfgerr))
'''
Normally, if the user provides no command to "docker run", the image's
default CMD is run. Because we set the entrypiont, scuba must emulate the
default behavior itself.
'''
- if len(cmd) == 0:
+ if len(script) == 0:
# No user-provided command; we want to run the image's default command
verbose_msg('No user command; getting command from image')
try:
- cmd = get_image_command(self.config.image)
+ script = [get_image_command(self.config.image)]
except DockerError as e:
raise ScubaError(str(e))
- verbose_msg('{0} Cmd: "{1}"'.format(self.config.image, cmd))
+ verbose_msg('{0} Cmd: "{1}"'.format(self.config.image, script[0]))
# The user command is executed via a generated shell script
with self.open_scubadir_file('command.sh', 'wt') as f:
@@ -252,7 +255,9 @@ class ScubaDive(object):
writeln(f, '#!/bin/sh')
writeln(f, '# Auto-generated from scuba')
writeln(f, 'set -e')
- writeln(f, shell_quote_cmd(cmd))
+ for cmd in script:
+ writeln(f, shell_quote_cmd(cmd))
+
def open_scubadir_file(self, name, mode):
diff --git a/scuba/config.py b/scuba/config.py
index 250b92e..9a7fd41 100644
--- a/scuba/config.py
+++ b/scuba/config.py
@@ -122,10 +122,7 @@ class ScubaConfig(object):
self._image = data['image']
- self._aliases = {}
- for alias, cmdstr in data.get('aliases', {}).items():
- self._aliases[alias] = shlex_split(cmdstr)
-
+ self._load_aliases(data)
self._load_hooks(data)
@@ -169,6 +166,13 @@ class ScubaConfig(object):
raise ConfigError("{0}: must be string or dict".format(name))
+ def _load_aliases(self, data):
+ self._aliases = {}
+
+ for name, node in data.get('aliases', {}).items():
+ self._aliases[name] = [shlex_split(cmd) for cmd in self._process_script(node, name)]
+
+
def _load_hooks(self, data):
self._hooks = {}
@@ -193,13 +197,30 @@ class ScubaConfig(object):
def process_command(self, command):
- if command:
- rep = self.aliases.get(command[0])
- if rep:
- command.pop(0)
- command = rep + command
+ '''Processes a user command using aliases
+
+ Arguments:
+ command A user command list (e.g. argv)
+
+ Returns: A "script" - a list of command lists
+ '''
+ if not command:
+ return command
+
+ script = self.aliases.get(command[0])
+ if not script:
+ return [command]
+
+ if len(command) > 1:
+ # If an alias is a multiline script, then no additional
+ # arguments will be allowed in the scuba invocation.
+ if len(script) > 1:
+ raise ConfigError('Additional arguments not allowed with multi-line aliases')
+
+ command.pop(0)
+ return [script[0] + command]
- return command
+ return script
def load_config(path):
| Enhanced aliases
Allow complex, multi-line aliases, using the [common script schema](https://github.com/JonathonReinhart/scuba/blob/master/doc/yaml-reference.md#common-script-schema), like this:
```yaml
image: foo
aliases:
build:
script:
- echo "build script starting"
- make -j4
- other stuff
```
When an alias is used, a script will be generated from it and injected into the container for execution.
*If an alias is a multiline script, then no additional arguments will be allowed in the scuba invocation.*
A somewhat important change made by this is that *all aliases will now be executed by a shell*, instead of directly, as before. This alone may warrant a v2.0 bump, and we should then probably execute everything (even non-aliases) via the shell. | JonathonReinhart/scuba | diff --git a/tests/test_config.py b/tests/test_config.py
index fb14e0d..3843dad 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -115,8 +115,8 @@ class TestConfig(TestCase):
config = scuba.config.load_config('.scuba.yml')
assert_equals(config.image, 'busybox')
assert_equals(len(config.aliases), 2)
- assert_seq_equal(config.aliases['foo'], ['bar'])
- assert_seq_equal(config.aliases['snap'], ['crackle', 'pop'])
+ assert_seq_equal(config.aliases['foo'], [['bar']])
+ assert_seq_equal(config.aliases['snap'], [['crackle', 'pop']])
@@ -182,13 +182,22 @@ class TestConfig(TestCase):
######################################################################
# process_command
+ def test_process_command_empty(self):
+ '''process_command handles no aliases and an empty command'''
+ cfg = scuba.config.ScubaConfig(
+ image = 'na',
+ )
+ result = cfg.process_command([])
+ assert_equal(result, [])
+
+
def test_process_command_no_aliases(self):
'''process_command handles no aliases'''
cfg = scuba.config.ScubaConfig(
image = 'na',
)
result = cfg.process_command(['cmd', 'arg1', 'arg2'])
- assert_equal(result, ['cmd', 'arg1', 'arg2'])
+ assert_equal(result, [['cmd', 'arg1', 'arg2']])
def test_process_command_aliases_unused(self):
'''process_command handles unused aliases'''
@@ -200,7 +209,7 @@ class TestConfig(TestCase):
),
)
result = cfg.process_command(['cmd', 'arg1', 'arg2'])
- assert_equal(result, ['cmd', 'arg1', 'arg2'])
+ assert_equal(result, [['cmd', 'arg1', 'arg2']])
def test_process_command_aliases_used_noargs(self):
'''process_command handles aliases with no args'''
@@ -212,7 +221,7 @@ class TestConfig(TestCase):
),
)
result = cfg.process_command(['apple', 'arg1', 'arg2'])
- assert_equal(result, ['banana', 'arg1', 'arg2'])
+ assert_equal(result, [['banana', 'arg1', 'arg2']])
def test_process_command_aliases_used_withargs(self):
'''process_command handles aliases with args'''
@@ -224,7 +233,40 @@ class TestConfig(TestCase):
),
)
result = cfg.process_command(['apple', 'arg1', 'arg2'])
- assert_equal(result, ['banana', 'cherry', 'pie is good', 'arg1', 'arg2'])
+ assert_equal(result, [['banana', 'cherry', 'pie is good', 'arg1', 'arg2']])
+
+ def test_process_command_multiline_aliases_used(self):
+ '''process_command handles multiline aliases'''
+ cfg = scuba.config.ScubaConfig(
+ image = 'na',
+ aliases = dict(
+ apple = dict(script=[
+ 'banana cherry "pie is good"',
+ 'so is peach',
+ ]),
+ cat = 'dog',
+ ),
+ )
+ result = cfg.process_command(['apple'])
+ assert_equal(result, [
+ ['banana', 'cherry', 'pie is good'],
+ ['so', 'is', 'peach'],
+ ])
+
+ def test_process_command_multiline_aliases_forbid_user_args(self):
+ '''process_command raises ConfigError when args are specified with multiline aliases'''
+ cfg = scuba.config.ScubaConfig(
+ image = 'na',
+ aliases = dict(
+ apple = dict(script=[
+ 'banana cherry "pie is good"',
+ 'so is peach',
+ ]),
+ cat = 'dog',
+ ),
+ )
+ assert_raises(scuba.config.ConfigError, cfg.process_command, ['apple', 'ARGS', 'NOT ALLOWED'])
+
############################################################################
# Hooks
diff --git a/tests/test_main.py b/tests/test_main.py
index 7207a98..474af21 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -160,6 +160,23 @@ class TestMain(TestCase):
# ConfigError -> exit(128)
self.run_scuba([], 128)
+ def test_multiline_alias_no_args_error(self):
+ '''Verify config errors from passing arguments to multi-line alias are caught'''
+ with open('.scuba.yml', 'w') as f:
+ f.write('''
+ image: {image}
+ aliases:
+ multi:
+ script:
+ - echo multi
+ - echo line
+ - echo alias
+ '''.format(image=DOCKER_IMAGE))
+
+ # ConfigError -> exit(128)
+ self.run_scuba(['multi', 'with', 'args'], 128)
+
+
def test_version(self):
'''Verify scuba prints its version for -v'''
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 3
} | 1.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"coverage",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==3.7.1
exceptiongroup==1.2.2
iniconfig==2.1.0
nose==1.3.7
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
PyYAML==6.0.2
-e git+https://github.com/JonathonReinhart/scuba.git@7e74ccde08d9943a71697b6c9210ae483fd0d2fe#egg=scuba
tomli==2.2.1
| name: scuba
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==3.7.1
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pyyaml==6.0.2
- tomli==2.2.1
prefix: /opt/conda/envs/scuba
| [
"tests/test_config.py::TestConfig::test_load_config_with_aliases",
"tests/test_config.py::TestConfig::test_process_command_aliases_unused",
"tests/test_config.py::TestConfig::test_process_command_aliases_used_noargs",
"tests/test_config.py::TestConfig::test_process_command_aliases_used_withargs",
"tests/test_config.py::TestConfig::test_process_command_multiline_aliases_forbid_user_args",
"tests/test_config.py::TestConfig::test_process_command_multiline_aliases_used",
"tests/test_config.py::TestConfig::test_process_command_no_aliases"
]
| [
"tests/test_main.py::TestMain::test_arbitrary_docker_args",
"tests/test_main.py::TestMain::test_args",
"tests/test_main.py::TestMain::test_basic",
"tests/test_main.py::TestMain::test_created_file_ownership",
"tests/test_main.py::TestMain::test_dry_run",
"tests/test_main.py::TestMain::test_home_writable_root",
"tests/test_main.py::TestMain::test_home_writable_scubauser",
"tests/test_main.py::TestMain::test_no_cmd",
"tests/test_main.py::TestMain::test_no_docker",
"tests/test_main.py::TestMain::test_root_hook",
"tests/test_main.py::TestMain::test_user_hook",
"tests/test_main.py::TestMain::test_user_root",
"tests/test_main.py::TestMain::test_user_scubauser",
"tests/test_main.py::TestMain::test_with_tty",
"tests/test_main.py::TestMain::test_without_tty"
]
| [
"tests/test_config.py::TestConfig::test_find_config_cur_dir",
"tests/test_config.py::TestConfig::test_find_config_nonexist",
"tests/test_config.py::TestConfig::test_find_config_parent_dir",
"tests/test_config.py::TestConfig::test_find_config_way_up",
"tests/test_config.py::TestConfig::test_hooks_invalid_list",
"tests/test_config.py::TestConfig::test_hooks_invalid_script_type",
"tests/test_config.py::TestConfig::test_hooks_missing_script",
"tests/test_config.py::TestConfig::test_hooks_mixed",
"tests/test_config.py::TestConfig::test_load_config_empty",
"tests/test_config.py::TestConfig::test_load_config_image_from_yaml",
"tests/test_config.py::TestConfig::test_load_config_image_from_yaml_missing_arg",
"tests/test_config.py::TestConfig::test_load_config_image_from_yaml_missing_file",
"tests/test_config.py::TestConfig::test_load_config_image_from_yaml_nested_key_missing",
"tests/test_config.py::TestConfig::test_load_config_image_from_yaml_nested_keys",
"tests/test_config.py::TestConfig::test_load_config_image_from_yaml_unicode_args",
"tests/test_config.py::TestConfig::test_load_config_minimal",
"tests/test_config.py::TestConfig::test_load_unexpected_node",
"tests/test_config.py::TestConfig::test_process_command_empty",
"tests/test_main.py::TestMain::test_config_error",
"tests/test_main.py::TestMain::test_handle_get_image_command_error",
"tests/test_main.py::TestMain::test_multiline_alias_no_args_error",
"tests/test_main.py::TestMain::test_version"
]
| []
| MIT License | 627 | [
"example/alias_multiline/.scuba.yml",
"scuba/__main__.py",
"scuba/config.py",
"example/run_all.sh",
"example/alias_multiline/run_example.sh",
"example/common.yml"
]
| [
"example/alias_multiline/.scuba.yml",
"scuba/__main__.py",
"scuba/config.py",
"example/run_all.sh",
"example/alias_multiline/run_example.sh",
"example/common.yml"
]
|
|
simphony__simphony-remote-93 | cdd16775caca62c1447a43acc236c4fe17f9f9ba | 2016-07-13 10:41:22 | 61ec23ffe44463cbc41f6fa54b4247963093ed79 | diff --git a/jupyterhub/remoteappmanager_config.py b/jupyterhub/remoteappmanager_config.py
index 20a7b7c..299b780 100644
--- a/jupyterhub/remoteappmanager_config.py
+++ b/jupyterhub/remoteappmanager_config.py
@@ -28,11 +28,23 @@ else:
raise RuntimeError("Unknown platform {}".format(_platform))
-# Define the sqlalchemy url for the database.
-# Notes:
-# 1. this database is shared among all instances of the remoteappmanager.
-# 2. When running with system-user mode, the jupyterhub spawners spawn in
-# the user's home directory; when running in virtual-user mode,
-# the current directory is the directory where jupyterhub is started
-# 3. '~' would be evaluated as the spawned user's home directory
-db_url = "sqlite:///"+os.path.abspath("./remoteappmanager.db")
+# -----------------------------
+# Define the accounting class
+# -----------------------------
+# Notes on os.path:
+# 1. When running with system-user mode, both the current directory and '~'
+# are the system user's home directory.
+# 2. When running in virtual-user mode, the current directory is the
+# directory where jupyterhub is started, '~' would be evaluated according to
+# the spawned process's owner's home directory (not the virtual user's
+# home directory)
+
+# CSV database support
+# accounting_class = "remoteappmanager.db.csv_db.CSVAccounting"
+# accounting_kwargs = {
+# "csv_file_path": os.path.abspath("./remoteappmanager.csv")}
+
+# sqlite database support
+# accounting_class = "remoteappmanager.db.orm.AppAccounting"
+# accounting_kwargs = {
+# "url": "sqlite:///"+os.path.abspath('./remoteappmanager.db')}
diff --git a/remoteappmanager/application.py b/remoteappmanager/application.py
index b5131db..1dc4e59 100644
--- a/remoteappmanager/application.py
+++ b/remoteappmanager/application.py
@@ -1,10 +1,11 @@
+import importlib
import os
+
from traitlets import Instance, default
from tornado import web
import tornado.ioloop
from jinja2 import Environment, FileSystemLoader
-from remoteappmanager.db import csv_db, orm
from remoteappmanager.db.interfaces import ABCAccounting
from remoteappmanager.handlers.api import HomeHandler
from remoteappmanager.logging.logging_mixin import LoggingMixin
@@ -111,14 +112,11 @@ class Application(web.Application, LoggingMixin):
@default("db")
def _db_default(self):
"""Initializes the database connection."""
- if self.file_config.db_url.endswith('.db'):
- db = orm.AppAccounting(self.file_config.db_url)
- elif self.file_config.db_url.endswith('.csv'):
- db = csv_db.CSVAccounting(self.file_config.db_url)
- else:
- raise ValueError("Unsupported database format: {}".format(
- self.file_config.db_url))
- return db
+ class_path = self.file_config.accounting_class
+ module_path, _, cls_name = class_path.rpartition('.')
+ cls = getattr(importlib.import_module(module_path), cls_name)
+
+ return cls(**self.file_config.accounting_kwargs)
@default("user")
def _user_default(self):
diff --git a/remoteappmanager/file_config.py b/remoteappmanager/file_config.py
index 771ad44..d374ce4 100644
--- a/remoteappmanager/file_config.py
+++ b/remoteappmanager/file_config.py
@@ -1,5 +1,6 @@
import tornado.options
-from traitlets import HasTraits, Int, Unicode, Bool
+from traitlets import HasTraits, Int, Unicode, Bool, Dict
+from traitlets.utils.sentinel import Sentinel
from remoteappmanager import paths
from remoteappmanager.traitlets import set_traits_from_dict
@@ -30,8 +31,13 @@ class FileConfig(HasTraits):
docker_host = Unicode(default_value="",
help="The docker host to connect to")
- db_url = Unicode(default_value="sqlite:///remoteappmanager.db",
- help="The url of the database, in sqlalchemy format.")
+ accounting_class = Unicode(
+ default_value="remoteappmanager.db.orm.AppAccounting",
+ help="The import path to a subclass of ABCAccounting.")
+
+ accounting_kwargs = Dict(
+ default_value={'url': 'sqlite:///remoteappmanager.db'},
+ help="The keyword arguments for initalising the Accounting instance")
login_url = Unicode(default_value="/hub",
help=("The url to be redirected to if the user is not "
@@ -64,10 +70,19 @@ class FileConfig(HasTraits):
file_line_parser = tornado.options.OptionParser()
for traitlet_name, traitlet in self.traits().items():
+ # tornado.OptionParser defines an option with a Python type
+ # and performs type validation.
+ # traitlet.default_value may be a Sentinel value (e.g. Tuple,
+ # Dict, Instance), in which case we use the repr
+ default_value = traitlet.default_value
+
+ if type(default_value) is Sentinel:
+ default_value = eval(traitlet.default_value_repr())
+
file_line_parser.define(
traitlet_name,
- default=traitlet.default_value,
- type=type(traitlet.default_value),
+ default=default_value,
+ type=type(default_value),
help=traitlet.help)
# Let it raise the exception if the file is not there.
| Support arbitrary database implementation via the remoteappmanager config file
We could potentially support any arbitrary database implementation compliant to `remoteappmanager.db.interfaces.ABCAccounting` by making the `remoteappmanager.application.Application.db` a configurable object, e.g. by initialising an Accounting object inside the config file.
Currently the application initialises `remoteappmanager.db.orm.AppAccounting` or `remoteappmanager.db.csv_db.CSVAccounting` internally depending on the database file extension (see [`remoteappmanager.application.Application._db_default`](https://github.com/simphony/simphony-remote/blob/master/remoteappmanager/application.py#L112)). | simphony/simphony-remote | diff --git a/tests/fixtures/remoteappmanager_config.py b/tests/fixtures/remoteappmanager_config.py
index adaf874..6d6ddb0 100644
--- a/tests/fixtures/remoteappmanager_config.py
+++ b/tests/fixtures/remoteappmanager_config.py
@@ -1,9 +1,6 @@
-from tests import fixtures
-
tls = True
tls_verify = True
tls_ca = '~/.docker/machine/machines/default/ca.pem'
tls_cert = '~/.docker/machine/machines/default/cert.pem'
tls_key = '~/.docker/machine/machines/default/key.pem'
docker_host = "tcp://192.168.99.100:2376"
-db_url = "sqlite:///"+fixtures.get("remoteappmanager.db")
diff --git a/tests/handlers/test_home_handler.py b/tests/handlers/test_home_handler.py
index c532f9e..6df1653 100644
--- a/tests/handlers/test_home_handler.py
+++ b/tests/handlers/test_home_handler.py
@@ -48,7 +48,7 @@ class TestHomeHandler(TempMixin, utils.AsyncHTTPTestCase):
command_line_config = utils.basic_command_line_config()
file_config = utils.basic_file_config()
- file_config.db_url = "sqlite:///"+sqlite_file_path
+ file_config.accounting_kwargs = {'url': "sqlite:///"+sqlite_file_path}
app = Application(command_line_config, file_config)
app.reverse_proxy = mock.Mock(spec=ReverseProxy)
diff --git a/tests/test_application.py b/tests/test_application.py
index c51080d..04b0e65 100644
--- a/tests/test_application.py
+++ b/tests/test_application.py
@@ -2,12 +2,19 @@ import os
from tests.temp_mixin import TempMixin
from tornado import testing
+from traitlets.traitlets import TraitError
from remoteappmanager.application import Application
+
from tests import utils
from tests.db import test_csv_db
+class DummyAccounting:
+ def __init__(self, *args, **kwargs):
+ pass
+
+
class TestApplication(TempMixin, testing.AsyncTestCase):
def setUp(self):
super().setUp()
@@ -16,14 +23,9 @@ class TestApplication(TempMixin, testing.AsyncTestCase):
os.environ["PROXY_API_TOKEN"] = "dummy_token"
- self.sqlite_file_path = os.path.join(self.tempdir, "sqlite.db")
- utils.init_sqlite_db(self.sqlite_file_path)
-
- self.command_line_config = utils.basic_command_line_config()
+ # File config with orm.AppAccounting
self.file_config = utils.basic_file_config()
- self.file_config.db_url = "sqlite:///"+self.sqlite_file_path
-
- self.app = Application(self.command_line_config, self.file_config)
+ self.command_line_config = utils.basic_command_line_config()
def tearDown(self):
if self._old_proxy_api_token is not None:
@@ -33,8 +35,18 @@ class TestApplication(TempMixin, testing.AsyncTestCase):
super().tearDown()
- def test_initialization(self):
- app = self.app
+ def test_initialization_with_sqlite_db(self):
+ # Initialise database
+ sqlite_file_path = os.path.join(self.tempdir, "sqlite.db")
+ utils.init_sqlite_db(sqlite_file_path)
+
+ self.file_config.accounting_class = (
+ "remoteappmanager.db.orm.AppAccounting")
+ self.file_config.accounting_kwargs = {
+ "url": "sqlite:///"+sqlite_file_path}
+
+ app = Application(self.command_line_config, self.file_config)
+
self.assertIsNotNone(app.command_line_config)
self.assertIsNotNone(app.file_config)
@@ -46,6 +58,21 @@ class TestApplication(TempMixin, testing.AsyncTestCase):
self.assertEqual(app.user.name, "username")
self.assertEqual(app.user.account, None)
+ def test_error_default_value_with_unimportable_accounting(self):
+ self.file_config.accounting_class = "not.importable.Class"
+ app = Application(self.command_line_config, self.file_config)
+
+ with self.assertRaises(ImportError):
+ app.db
+
+ def test_db_default_value_with_accounting_wrong_subclass(self):
+ self.file_config.accounting_class = (
+ "tests.test_application.DummyAccounting")
+ app = Application(self.command_line_config, self.file_config)
+
+ with self.assertRaises(TraitError):
+ app.db
+
# FIXME: Some of these tests are the same and should be refactored
# Not doing it now to prevent more merge conflict with PR #52
@@ -60,15 +87,6 @@ class TestApplicationWithCSV(TempMixin, testing.AsyncTestCase):
self.command_line_config = utils.basic_command_line_config()
self.file_config = utils.basic_file_config()
- self.csv_file = os.path.join(self.tempdir, 'testing.csv')
- self.file_config.db_url = self.csv_file
-
- test_csv_db.write_csv_file(self.csv_file,
- test_csv_db.GoodTable.headers,
- test_csv_db.GoodTable.records)
-
- self.app = Application(self.command_line_config, self.file_config)
-
def tearDown(self):
if self._old_proxy_api_token is not None:
os.environ["PROXY_API_TOKEN"] = self._old_proxy_api_token
@@ -78,15 +96,21 @@ class TestApplicationWithCSV(TempMixin, testing.AsyncTestCase):
super().tearDown()
def test_initialization(self):
- app = self.app
- self.assertIsNotNone(app.command_line_config)
- self.assertIsNotNone(app.file_config)
+ self.file_config.accounting_class = (
+ "remoteappmanager.db.csv_db.CSVAccounting")
- def test_database_initialization(self):
- app = self.app
+ csv_file = os.path.join(self.tempdir, 'testing.csv')
+ self.file_config.accounting_kwargs = {"csv_file_path": csv_file}
+ test_csv_db.write_csv_file(csv_file,
+ test_csv_db.GoodTable.headers,
+ test_csv_db.GoodTable.records)
+
+ app = Application(self.command_line_config, self.file_config)
+
+ self.assertIsNotNone(app.command_line_config)
+ self.assertIsNotNone(app.file_config)
self.assertIsNotNone(app.db)
self.assertIsNotNone(app.user)
-
self.assertEqual(app.user.name, "username")
self.assertIsInstance(app.user.account, test_csv_db.CSVUser)
diff --git a/tests/test_file_config.py b/tests/test_file_config.py
index 258d347..61e530f 100644
--- a/tests/test_file_config.py
+++ b/tests/test_file_config.py
@@ -1,18 +1,53 @@
+import os
import unittest
from remoteappmanager.file_config import FileConfig
-# The arguments we pass
-from tests import fixtures
+from tests.temp_mixin import TempMixin
-class TestFileConfig(unittest.TestCase):
+DOCKER_CONFIG = '''
+tls = True
+tls_verify = True
+tls_ca = '~/.docker/machine/machines/default/ca.pem'
+tls_cert = '~/.docker/machine/machines/default/cert.pem'
+tls_key = '~/.docker/machine/machines/default/key.pem'
+docker_host = "tcp://192.168.99.100:2376"
+'''
+
+GOOD_ACCOUNTING_CONFIG = '''
+accounting_class = "remoteappmanager.db.csv_db.CSVAccounting"
+accounting_kwargs = {"csv_file_path": "file_path.csv"}
+'''
+
+
+class TestFileConfig(TempMixin, unittest.TestCase):
+
def setUp(self):
+ super().setUp()
+
# We can run the application config only once. It uses a global
# object that can't be reinitialized cleanly unless we access a private
# member.
- self.config_file = fixtures.get("remoteappmanager_config.py")
+ self.config_file = os.path.join(self.tempdir,
+ 'config.py')
+
+ def test_initialization_with_default_accounting(self):
+ with open(self.config_file, 'w') as fhandle:
+ print(DOCKER_CONFIG, file=fhandle)
- def test_initialization(self):
config = FileConfig()
config.parse_config(self.config_file)
+
+ def test_initialization_with_good_accounting(self):
+ with open(self.config_file, 'w') as fhandle:
+ print(DOCKER_CONFIG, file=fhandle)
+ print(GOOD_ACCOUNTING_CONFIG, file=fhandle)
+
+ config = FileConfig()
+ config.parse_config(self.config_file)
+
+ self.assertEqual(config.accounting_class,
+ "remoteappmanager.db.csv_db.CSVAccounting")
+ self.assertDictEqual(config.accounting_kwargs,
+ {"csv_file_path": "file_path.csv"})
diff --git a/tests/utils.py b/tests/utils.py
index 28217dc..201c7d7 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -278,10 +278,7 @@ def basic_command_line_config():
def basic_file_config():
- options = {
- "db_url": "sqlite://"
- }
- return FileConfig(**options)
+ return FileConfig()
@contextlib.contextmanager
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 3
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"flake8",
"sphinx",
"coverage"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
alembic==1.15.2
annotated-types==0.7.0
arrow==1.3.0
async-generator==1.10
attrs==25.3.0
babel==2.17.0
certifi==2025.1.31
certipy==0.2.2
cffi==1.17.1
charset-normalizer==3.4.1
click==8.1.8
coverage==7.8.0
cryptography==44.0.2
docker-py==1.10.6
docker-pycreds==0.4.0
docutils==0.21.2
escapism==1.0.1
exceptiongroup==1.2.2
flake8==7.2.0
fqdn==1.5.1
greenlet==3.1.1
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
isoduration==20.11.0
Jinja2==3.1.6
jsonpointer==3.0.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter-events==0.12.0
jupyter_client==8.6.3
jupyter_core==5.7.2
jupyterhub==5.2.1
Mako==1.3.9
MarkupSafe==3.0.2
mccabe==0.7.0
oauthlib==3.2.2
packaging==24.2
pamela==1.2.0
platformdirs==4.3.7
pluggy==1.5.0
prometheus_client==0.21.1
pycodestyle==2.13.0
pycparser==2.22
pydantic==2.11.1
pydantic_core==2.33.0
pyflakes==3.3.1
Pygments==2.19.1
pytest==8.3.5
python-dateutil==2.9.0.post0
python-json-logger==3.3.0
PyYAML==6.0.2
pyzmq==26.3.0
referencing==0.36.2
-e git+https://github.com/simphony/simphony-remote.git@cdd16775caca62c1447a43acc236c4fe17f9f9ba#egg=remoteappmanager
requests==2.32.3
rfc3339-validator==0.1.4
rfc3986-validator==0.1.1
rpds-py==0.24.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
SQLAlchemy==2.0.40
tabulate==0.9.0
tomli==2.2.1
tornado==6.4.2
traitlets==5.14.3
types-python-dateutil==2.9.0.20241206
typing-inspection==0.4.0
typing_extensions==4.13.0
uri-template==1.3.0
urllib3==2.3.0
webcolors==24.11.1
websocket-client==1.8.0
zipp==3.21.0
| name: simphony-remote
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- alembic==1.15.2
- annotated-types==0.7.0
- arrow==1.3.0
- async-generator==1.10
- attrs==25.3.0
- babel==2.17.0
- certifi==2025.1.31
- certipy==0.2.2
- cffi==1.17.1
- charset-normalizer==3.4.1
- click==8.1.8
- coverage==7.8.0
- cryptography==44.0.2
- docker-py==1.10.6
- docker-pycreds==0.4.0
- docutils==0.21.2
- escapism==1.0.1
- exceptiongroup==1.2.2
- flake8==7.2.0
- fqdn==1.5.1
- greenlet==3.1.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- isoduration==20.11.0
- jinja2==3.1.6
- jsonpointer==3.0.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter-client==8.6.3
- jupyter-core==5.7.2
- jupyter-events==0.12.0
- jupyterhub==5.2.1
- mako==1.3.9
- markupsafe==3.0.2
- mccabe==0.7.0
- oauthlib==3.2.2
- packaging==24.2
- pamela==1.2.0
- platformdirs==4.3.7
- pluggy==1.5.0
- prometheus-client==0.21.1
- pycodestyle==2.13.0
- pycparser==2.22
- pydantic==2.11.1
- pydantic-core==2.33.0
- pyflakes==3.3.1
- pygments==2.19.1
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- python-json-logger==3.3.0
- pyyaml==6.0.2
- pyzmq==26.3.0
- referencing==0.36.2
- requests==2.32.3
- rfc3339-validator==0.1.4
- rfc3986-validator==0.1.1
- rpds-py==0.24.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- sqlalchemy==2.0.40
- tabulate==0.9.0
- tomli==2.2.1
- tornado==6.4.2
- traitlets==5.14.3
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- typing-inspection==0.4.0
- uri-template==1.3.0
- urllib3==2.3.0
- webcolors==24.11.1
- websocket-client==1.8.0
- zipp==3.21.0
prefix: /opt/conda/envs/simphony-remote
| [
"tests/test_application.py::TestApplication::test_db_default_value_with_accounting_wrong_subclass",
"tests/test_application.py::TestApplication::test_error_default_value_with_unimportable_accounting",
"tests/test_application.py::TestApplicationWithCSV::test_initialization",
"tests/test_file_config.py::TestFileConfig::test_initialization_with_good_accounting"
]
| [
"tests/test_application.py::TestApplication::test_initialization_with_sqlite_db"
]
| [
"tests/handlers/test_home_handler.py::TestHomeHandler::test_failed_auth",
"tests/handlers/test_home_handler.py::TestHomeHandler::test_home",
"tests/handlers/test_home_handler.py::TestHomeHandler::test_post_failed_auth",
"tests/handlers/test_home_handler.py::TestHomeHandler::test_post_start",
"tests/handlers/test_home_handler.py::TestHomeHandler::test_post_stop",
"tests/handlers/test_home_handler.py::TestHomeHandler::test_post_view",
"tests/test_file_config.py::TestFileConfig::test_initialization_with_default_accounting"
]
| []
| BSD 3-Clause "New" or "Revised" License | 628 | [
"remoteappmanager/application.py",
"remoteappmanager/file_config.py",
"jupyterhub/remoteappmanager_config.py"
]
| [
"remoteappmanager/application.py",
"remoteappmanager/file_config.py",
"jupyterhub/remoteappmanager_config.py"
]
|
|
Azure__WALinuxAgent-321 | 9930df6600a061bcc5618b427ffa23cff5943f46 | 2016-07-13 19:17:07 | 9930df6600a061bcc5618b427ffa23cff5943f46 | msftclas: Hi __@brendandixon__, I'm your friendly neighborhood Microsoft Pull Request Bot (You can call me MSBOT). Thanks for your contribution!
<span>You've already signed the contribution license agreement. Thanks!</span>
<p>The agreement was validated by Microsoft and real humans are currently evaluating your PR.</p>
TTYL, MSBOT;
brendandixon: @ahmetalpbalkan As we discussed, what you saw was caused by both of us actively issuing commands on the same VM.
hglkrijger: lgtm | diff --git a/azurelinuxagent/common/version.py b/azurelinuxagent/common/version.py
index 222a7208..357d30ff 100644
--- a/azurelinuxagent/common/version.py
+++ b/azurelinuxagent/common/version.py
@@ -50,7 +50,7 @@ def get_distro():
AGENT_NAME = "WALinuxAgent"
AGENT_LONG_NAME = "Azure Linux Agent"
-AGENT_VERSION = '2.1.5.rc6'
+AGENT_VERSION = '2.1.5.rc7'
AGENT_LONG_VERSION = "{0}-{1}".format(AGENT_NAME, AGENT_VERSION)
AGENT_DESCRIPTION = """\
The Azure Linux Agent supports the provisioning and running of Linux
diff --git a/azurelinuxagent/ga/update.py b/azurelinuxagent/ga/update.py
index 6dae7aff..e89608a0 100644
--- a/azurelinuxagent/ga/update.py
+++ b/azurelinuxagent/ga/update.py
@@ -53,8 +53,10 @@ from azurelinuxagent.ga.exthandlers import HandlerManifest
AGENT_ERROR_FILE = "error.json" # File name for agent error record
AGENT_MANIFEST_FILE = "HandlerManifest.json"
+CHILD_HEALTH_INTERVAL = 15 * 60
CHILD_LAUNCH_INTERVAL = 5 * 60
CHILD_LAUNCH_RESTART_MAX = 3
+CHILD_POLL_INTERVAL = 60
MAX_FAILURE = 3 # Max failure allowed for agent before blacklisted
@@ -139,27 +141,50 @@ class UpdateHandler(object):
logger.info(u"Agent {0} launched with command '{1}'", agent_name, agent_cmd)
- ret = self.child_process.wait()
- if ret is None:
- ret = 1
+ ret = None
+ start_time = time.time()
+ while (time.time() - start_time) < CHILD_HEALTH_INTERVAL:
+ time.sleep(CHILD_POLL_INTERVAL)
+ ret = self.child_process.poll()
+ if ret is not None:
+ break
- msg = u"Agent {0} launched with command '{1}' returned code: {2}".format(
- agent_name,
- agent_cmd,
- ret)
- add_event(
- AGENT_NAME,
- version=agent_version,
- op=WALAEventOperation.Enable,
- is_success=(ret <= 0),
- message=msg)
+ if ret is None or ret <= 0:
+ msg = u"Agent {0} launched with command '{1}' is successfully running".format(
+ agent_name,
+ agent_cmd)
+ logger.info(msg)
+ add_event(
+ AGENT_NAME,
+ version=agent_version,
+ op=WALAEventOperation.Enable,
+ is_success=True,
+ message=msg)
- if ret > 0:
+ if ret is None:
+ ret = self.child_process.wait()
+
+ else:
+ msg = u"Agent {0} launched with command '{1}' failed with return code: {2}".format(
+ agent_name,
+ agent_cmd,
+ ret)
+ logger.warn(msg)
+ add_event(
+ AGENT_NAME,
+ version=agent_version,
+ op=WALAEventOperation.Enable,
+ is_success=False,
+ message=msg)
+
+ if ret is not None and ret > 0:
+ msg = u"Agent {0} launched with command '{1}' returned code: {2}".format(
+ agent_name,
+ agent_cmd,
+ ret)
logger.warn(msg)
if latest_agent is not None:
latest_agent.mark_failure()
- else:
- logger.info(msg)
except Exception as e:
msg = u"Agent {0} launched with command '{1}' failed with exception: {2}".format(
@@ -311,9 +336,7 @@ class UpdateHandler(object):
# Note:
# The code leaves on disk available, but blacklisted, agents so as to preserve the state.
# Otherwise, those agents could be again downloaded and inappropriately retried.
- self._set_agents([GuestAgent(pkg=pkg) for pkg in
- [pkg for pkg in pkg_list.versions
- if FlexibleVersion(pkg.version) > base_version]])
+ self._set_agents([GuestAgent(pkg=pkg) for pkg in pkg_list.versions])
self._purge_agents()
self._filter_blacklisted_agents()
@@ -469,7 +492,7 @@ class GuestAgent(object):
if is_fatal:
logger.warn(u"Agent {0} is permanently blacklisted", self.name)
except Exception as e:
- logger.warn(u"Agent {0} failed recording error state: {1}", ustr(e))
+ logger.warn(u"Agent {0} failed recording error state: {1}", self.name, ustr(e))
return
def _ensure_downloaded(self):
| [2.1.5.rc6] recycles indefinitely when child process crashes
When I use 2.1.5rc6 code (9930df6600a061bcc5618b427ffa23cff5943f46) and pull in update for 2.1.5rc6 , it starts just fine and stops just fine. (so #297 is resolved)
However when I kill the child process with SIGSEGV signal (pid 9215 below)
```
# pstree -ap|grep py
|-python3,9211 -u /usr/sbin/waagent -daemon
| `-python3,9215 -u bin/WALinuxAgent-2.1.5.rc6-py2.7.egg -run-exthandlers
| |-{python3},9234
| `-{python3},9237
```
the parent process remains alive, blacklists the rc6, tries to bring the original (on disk) agent back but it exits with code 0 immediately. Then it keeps restarting this original agent every second because it exits immediately.
<details>
<summary>waagent.log just after `kill -SEGV 9215`</summary>
```
2016/07/13 17:06:59.276675 INFO Agent WALinuxAgent-2.1.5.rc6 forwarding signal 15 to WALinuxAgent-2.1.5.6
2016/07/13 17:07:20.107186 INFO Azure Linux Agent Version:2.1.5.rc6
2016/07/13 17:07:20.119216 INFO OS: ubuntu 16.04
2016/07/13 17:07:20.143949 INFO Python: 3.5.1
2016/07/13 17:07:20.150224 INFO Run daemon
2016/07/13 17:07:20.155990 INFO No RDMA handler exists for distro='Ubuntu' version='16.04'
2016/07/13 17:07:20.170232 INFO Clean protocol
2016/07/13 17:07:20.176382 INFO run Ubuntu provision handler
2016/07/13 17:07:20.183917 INFO RDMA capabilities are not enabled, skipping
2016/07/13 17:07:20.196958 INFO Instantiating Agent WALinuxAgent-2.1.5.6 from disk
2016/07/13 17:07:20.207621 INFO Agent WALinuxAgent-2.1.5.6 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:07:20.231936 INFO Ensuring Agent WALinuxAgent-2.1.5.6 is downloaded
2016/07/13 17:07:20.254939 INFO Agent WALinuxAgent-2.1.5.6 was previously downloaded - skipping download
2016/07/13 17:07:20.267972 INFO Agent WALinuxAgent-2.1.5.6 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.6/HandlerManifest.json
2016/07/13 17:07:20.292037 INFO Determined Agent WALinuxAgent-2.1.5.6 to be the latest agent
2016/07/13 17:07:20.312122 INFO Agent WALinuxAgent-2.1.5.6 launched with command 'python3 -u bin/WALinuxAgent-2.1.5.rc6-py2.7.egg -run-exthandlers'
2016/07/13 17:07:20.825810 INFO Agent WALinuxAgent-2.1.5.6 is running as the goal state agent
2016/07/13 17:07:20.850573 INFO Detect protocol endpoints
2016/07/13 17:07:20.860236 INFO Clean protocol
2016/07/13 17:07:20.861832 INFO WireServer endpoint is not found. Rerun dhcp handler
2016/07/13 17:07:20.863408 INFO test for route to 168.63.129.16
2016/07/13 17:07:20.865187 INFO route to 168.63.129.16 exists
2016/07/13 17:07:20.871366 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:07:20.887742 INFO Fabric preferred wire protocol version:2015-04-05
2016/07/13 17:07:20.890022 INFO Wire protocol version:2012-11-30
2016/07/13 17:07:20.897740 WARNING Server prefered version:2015-04-05
2016/07/13 17:07:25.435991 INFO Event: name=WALA, op=HeartBeat, message=
2016/07/13 17:07:25.438363 INFO Start env monitor service.
2016/07/13 17:07:25.490944 INFO Configure routes
2016/07/13 17:07:25.498195 INFO Gateway:None
2016/07/13 17:07:25.512737 INFO Routes:None
2016/07/13 17:07:25.544071 INFO Checking for agent family Test updates
2016/07/13 17:07:25.554014 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:07:25.627459 WARNING Running Agent 2.1.5.6 was not found in the agent manifest - adding to list
2016/07/13 17:07:25.653260 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:07:25.692572 INFO Handle extensions updates for incarnation 7
2016/07/13 17:07:25.704451 INFO [Microsoft.OSTCExtensions.CustomScriptForLinux-1.5.2.0] Expected handler state: uninstall
2016/07/13 17:07:25.711403 INFO [Microsoft.OSTCExtensions.CustomScriptForLinux-1.5.2.0] Current handler state is: NotInstalled
2016/07/13 17:08:07.532646 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.6 launched with command 'python3 -u bin/WALinuxAgent-2.1.5.rc6-py2.7.egg -run-exthandlers' returned code: -11
2016/07/13 17:08:07.536272 INFO Agent WALinuxAgent-2.1.5.6 launched with command 'python3 -u bin/WALinuxAgent-2.1.5.rc6-py2.7.egg -run-exthandlers' returned code: -11
2016/07/13 17:08:07.537055 INFO Determined Agent WALinuxAgent-2.1.5.6 to be the latest agent
2016/07/13 17:08:07.708001 INFO Agent WALinuxAgent-2.1.5.6 launched with command 'python3 -u bin/WALinuxAgent-2.1.5.rc6-py2.7.egg -run-exthandlers'
2016/07/13 17:08:08.279361 INFO Agent WALinuxAgent-2.1.5.6 is running as the goal state agent
2016/07/13 17:08:08.309054 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:08.325240 INFO Event: name=WALA, op=HeartBeat, message=
2016/07/13 17:08:08.328355 INFO Start env monitor service.
2016/07/13 17:08:08.343749 INFO Configure routes
2016/07/13 17:08:08.348599 INFO Gateway:None
2016/07/13 17:08:08.350375 INFO Routes:None
2016/07/13 17:08:08.372374 INFO Checking for agent family Test updates
2016/07/13 17:08:08.376769 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:08.434136 WARNING Running Agent 2.1.5.6 was not found in the agent manifest - adding to list
2016/07/13 17:08:08.439013 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:08.454443 INFO Handle extensions updates for incarnation 7
2016/07/13 17:08:08.464798 INFO [Microsoft.OSTCExtensions.CustomScriptForLinux-1.5.2.0] Expected handler state: uninstall
2016/07/13 17:08:08.470012 INFO [Microsoft.OSTCExtensions.CustomScriptForLinux-1.5.2.0] Current handler state is: NotInstalled
2016/07/13 17:08:20.249774 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.6 launched with command 'python3 -u bin/WALinuxAgent-2.1.5.rc6-py2.7.egg -run-exthandlers' returned code: -11
2016/07/13 17:08:20.260353 INFO Agent WALinuxAgent-2.1.5.6 launched with command 'python3 -u bin/WALinuxAgent-2.1.5.rc6-py2.7.egg -run-exthandlers' returned code: -11
2016/07/13 17:08:20.261798 INFO Determined Agent WALinuxAgent-2.1.5.6 to be the latest agent
2016/07/13 17:08:20.263587 WARNING Agent WALinuxAgent-2.1.5.6 launched with command 'python3 -u bin/WALinuxAgent-2.1.5.rc6-py2.7.egg -run-exthandlers' failed with exception: Agent WALinuxAgent-2.1.5.6 restarted more than 3 times in 300 seconds
2016/07/13 17:08:20.264827 ERROR Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.6 launched with command 'python3 -u bin/WALinuxAgent-2.1.5.rc6-py2.7.egg -run-exthandlers' failed with exception: Agent WALinuxAgent-2.1.5.6 restarted more than 3 times in 300 seconds
2016/07/13 17:08:20.273802 WARNING Agent WALinuxAgent-2.1.5.6 is permanently blacklisted
2016/07/13 17:08:20.274808 INFO Installed Agent WALinuxAgent-2.1.5.rc6 is the most current agent
2016/07/13 17:08:20.380124 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers'
2016/07/13 17:08:20.663884 INFO Agent WALinuxAgent-2.1.5.rc6 is running as the goal state agent
2016/07/13 17:08:20.680595 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:20.695598 INFO Start env monitor service.
2016/07/13 17:08:20.695137 INFO Event: name=WALA, op=HeartBeat, message=
2016/07/13 17:08:20.702653 INFO Configure routes
2016/07/13 17:08:20.716589 INFO Gateway:None
2016/07/13 17:08:20.723530 INFO Routes:None
2016/07/13 17:08:20.733594 INFO Checking for agent family Test updates
2016/07/13 17:08:20.746173 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:20.787213 INFO Instantiating Agent WALinuxAgent-2.1.5.4 from package
2016/07/13 17:08:20.798081 INFO Agent WALinuxAgent-2.1.5.4 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:20.812881 INFO Ensuring Agent WALinuxAgent-2.1.5.4 is downloaded
2016/07/13 17:08:20.853544 INFO Agent WALinuxAgent-2.1.5.4 downloaded from https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent__Test__2.1.5.4
2016/07/13 17:08:20.879837 INFO Agent WALinuxAgent-2.1.5.4 unpacked successfully to /var/lib/waagent/WALinuxAgent-2.1.5.4
2016/07/13 17:08:20.894670 INFO Agent WALinuxAgent-2.1.5.4 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.4/HandlerManifest.json
2016/07/13 17:08:20.910666 INFO Agent WALinuxAgent-2.1.5.4 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:20.924276 INFO Agent WALinuxAgent-2.1.5.4 downloaded successfully
2016/07/13 17:08:20.934040 INFO Event: name=WALinuxAgent, op=Install, message=Agent WALinuxAgent-2.1.5.4 downloaded successfully
2016/07/13 17:08:20.949440 INFO Instantiating Agent WALinuxAgent-2.1.5.5 from package
2016/07/13 17:08:20.959615 INFO Agent WALinuxAgent-2.1.5.5 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:20.974076 INFO Ensuring Agent WALinuxAgent-2.1.5.5 is downloaded
2016/07/13 17:08:21.063149 INFO Agent WALinuxAgent-2.1.5.5 downloaded from https://rdfepirv2sg1prdstr03.blob.core.windows.net/7d89d439b79f4452950452399add2c90/Microsoft.OSTCLinuxAgent__Test__2.1.5.5
2016/07/13 17:08:21.090715 INFO Agent WALinuxAgent-2.1.5.5 unpacked successfully to /var/lib/waagent/WALinuxAgent-2.1.5.5
2016/07/13 17:08:21.105711 INFO Agent WALinuxAgent-2.1.5.5 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.5/HandlerManifest.json
2016/07/13 17:08:21.121287 INFO Agent WALinuxAgent-2.1.5.5 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:21.134870 INFO Agent WALinuxAgent-2.1.5.5 downloaded successfully
2016/07/13 17:08:21.144520 INFO Event: name=WALinuxAgent, op=Install, message=Agent WALinuxAgent-2.1.5.5 downloaded successfully
2016/07/13 17:08:21.159014 INFO Instantiating Agent WALinuxAgent-2.1.5.6 from package
2016/07/13 17:08:21.169173 INFO Agent WALinuxAgent-2.1.5.6 error state: Last Failure: 1468429700.2734947, Total Failures: 1, Fatal: True
2016/07/13 17:08:21.184254 INFO Ensuring Agent WALinuxAgent-2.1.5.6 is downloaded
2016/07/13 17:08:21.194376 INFO Agent WALinuxAgent-2.1.5.6 is blacklisted - skipping download
2016/07/13 17:08:21.206521 INFO Agent WALinuxAgent-2.1.5.rc6 discovered WALinuxAgent-2.1.5.5 as an update and will exit
2016/07/13 17:08:21.254857 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:21.259406 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:21.269854 INFO Installed Agent WALinuxAgent-2.1.5.rc6 is the most current agent
2016/07/13 17:08:21.312086 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers'
2016/07/13 17:08:21.571364 INFO Agent WALinuxAgent-2.1.5.rc6 is running as the goal state agent
2016/07/13 17:08:21.588498 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:21.601277 INFO Event: name=WALA, op=HeartBeat, message=
2016/07/13 17:08:21.601958 INFO Start env monitor service.
2016/07/13 17:08:21.620125 INFO Configure routes
2016/07/13 17:08:21.627243 INFO Gateway:None
2016/07/13 17:08:21.638921 INFO Routes:None
2016/07/13 17:08:21.656949 INFO Checking for agent family Test updates
2016/07/13 17:08:21.661214 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:21.699835 INFO Instantiating Agent WALinuxAgent-2.1.5.4 from package
2016/07/13 17:08:21.704155 INFO Agent WALinuxAgent-2.1.5.4 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:21.706388 INFO Ensuring Agent WALinuxAgent-2.1.5.4 is downloaded
2016/07/13 17:08:21.715997 INFO Agent WALinuxAgent-2.1.5.4 was previously downloaded - skipping download
2016/07/13 17:08:21.718021 INFO Agent WALinuxAgent-2.1.5.4 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.4/HandlerManifest.json
2016/07/13 17:08:21.727165 INFO Instantiating Agent WALinuxAgent-2.1.5.5 from package
2016/07/13 17:08:21.736498 INFO Agent WALinuxAgent-2.1.5.5 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:21.738249 INFO Ensuring Agent WALinuxAgent-2.1.5.5 is downloaded
2016/07/13 17:08:21.747362 INFO Agent WALinuxAgent-2.1.5.5 was previously downloaded - skipping download
2016/07/13 17:08:21.756687 INFO Agent WALinuxAgent-2.1.5.5 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.5/HandlerManifest.json
2016/07/13 17:08:21.765777 INFO Instantiating Agent WALinuxAgent-2.1.5.6 from package
2016/07/13 17:08:21.772285 INFO Agent WALinuxAgent-2.1.5.6 error state: Last Failure: 1468429700.2734947, Total Failures: 1, Fatal: True
2016/07/13 17:08:21.774713 INFO Ensuring Agent WALinuxAgent-2.1.5.6 is downloaded
2016/07/13 17:08:21.782079 INFO Agent WALinuxAgent-2.1.5.6 is blacklisted - skipping download
2016/07/13 17:08:21.784885 INFO Agent WALinuxAgent-2.1.5.rc6 discovered WALinuxAgent-2.1.5.5 as an update and will exit
2016/07/13 17:08:21.882052 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:21.886683 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:21.887854 INFO Installed Agent WALinuxAgent-2.1.5.rc6 is the most current agent
2016/07/13 17:08:21.932144 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers'
2016/07/13 17:08:22.277177 INFO Agent WALinuxAgent-2.1.5.rc6 is running as the goal state agent
2016/07/13 17:08:22.295077 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:22.316850 INFO Event: name=WALA, op=HeartBeat, message=
2016/07/13 17:08:22.317995 INFO Start env monitor service.
2016/07/13 17:08:22.341761 INFO Configure routes
2016/07/13 17:08:22.350760 INFO Gateway:None
2016/07/13 17:08:22.357306 INFO Routes:None
2016/07/13 17:08:22.375500 INFO Checking for agent family Test updates
2016/07/13 17:08:22.380437 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:22.447193 INFO Instantiating Agent WALinuxAgent-2.1.5.4 from package
2016/07/13 17:08:22.460857 INFO Agent WALinuxAgent-2.1.5.4 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:22.464918 INFO Ensuring Agent WALinuxAgent-2.1.5.4 is downloaded
2016/07/13 17:08:22.473861 INFO Agent WALinuxAgent-2.1.5.4 was previously downloaded - skipping download
2016/07/13 17:08:22.475868 INFO Agent WALinuxAgent-2.1.5.4 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.4/HandlerManifest.json
2016/07/13 17:08:22.477640 INFO Instantiating Agent WALinuxAgent-2.1.5.5 from package
2016/07/13 17:08:22.491694 INFO Agent WALinuxAgent-2.1.5.5 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:22.502931 INFO Ensuring Agent WALinuxAgent-2.1.5.5 is downloaded
2016/07/13 17:08:22.513748 INFO Agent WALinuxAgent-2.1.5.5 was previously downloaded - skipping download
2016/07/13 17:08:22.518634 INFO Agent WALinuxAgent-2.1.5.5 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.5/HandlerManifest.json
2016/07/13 17:08:22.535001 INFO Instantiating Agent WALinuxAgent-2.1.5.6 from package
2016/07/13 17:08:22.538571 INFO Agent WALinuxAgent-2.1.5.6 error state: Last Failure: 1468429700.2734947, Total Failures: 1, Fatal: True
2016/07/13 17:08:22.543338 INFO Ensuring Agent WALinuxAgent-2.1.5.6 is downloaded
2016/07/13 17:08:22.545155 INFO Agent WALinuxAgent-2.1.5.6 is blacklisted - skipping download
2016/07/13 17:08:22.556719 INFO Agent WALinuxAgent-2.1.5.rc6 discovered WALinuxAgent-2.1.5.5 as an update and will exit
2016/07/13 17:08:22.671230 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:22.676133 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:22.677285 INFO Installed Agent WALinuxAgent-2.1.5.rc6 is the most current agent
2016/07/13 17:08:22.720118 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers'
2016/07/13 17:08:23.062667 INFO Agent WALinuxAgent-2.1.5.rc6 is running as the goal state agent
2016/07/13 17:08:23.089944 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:23.102678 INFO Start env monitor service.
2016/07/13 17:08:23.102181 INFO Event: name=WALA, op=HeartBeat, message=
2016/07/13 17:08:23.120768 INFO Configure routes
2016/07/13 17:08:23.129639 INFO Gateway:None
2016/07/13 17:08:23.137087 INFO Routes:None
2016/07/13 17:08:23.153507 INFO Checking for agent family Test updates
2016/07/13 17:08:23.166021 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:23.214892 INFO Instantiating Agent WALinuxAgent-2.1.5.4 from package
2016/07/13 17:08:23.226747 INFO Agent WALinuxAgent-2.1.5.4 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:23.241805 INFO Ensuring Agent WALinuxAgent-2.1.5.4 is downloaded
2016/07/13 17:08:23.260478 INFO Agent WALinuxAgent-2.1.5.4 was previously downloaded - skipping download
2016/07/13 17:08:23.281062 INFO Agent WALinuxAgent-2.1.5.4 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.4/HandlerManifest.json
2016/07/13 17:08:23.297542 INFO Instantiating Agent WALinuxAgent-2.1.5.5 from package
2016/07/13 17:08:23.311880 INFO Agent WALinuxAgent-2.1.5.5 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:23.325597 INFO Ensuring Agent WALinuxAgent-2.1.5.5 is downloaded
2016/07/13 17:08:23.336594 INFO Agent WALinuxAgent-2.1.5.5 was previously downloaded - skipping download
2016/07/13 17:08:23.350936 INFO Agent WALinuxAgent-2.1.5.5 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.5/HandlerManifest.json
2016/07/13 17:08:23.366611 INFO Instantiating Agent WALinuxAgent-2.1.5.6 from package
2016/07/13 17:08:23.376730 INFO Agent WALinuxAgent-2.1.5.6 error state: Last Failure: 1468429700.2734947, Total Failures: 1, Fatal: True
2016/07/13 17:08:23.392365 INFO Ensuring Agent WALinuxAgent-2.1.5.6 is downloaded
2016/07/13 17:08:23.401889 INFO Agent WALinuxAgent-2.1.5.6 is blacklisted - skipping download
2016/07/13 17:08:23.412972 INFO Agent WALinuxAgent-2.1.5.rc6 discovered WALinuxAgent-2.1.5.5 as an update and will exit
2016/07/13 17:08:23.468398 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:23.512282 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:23.535350 INFO Installed Agent WALinuxAgent-2.1.5.rc6 is the most current agent
2016/07/13 17:08:23.556142 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers'
2016/07/13 17:08:23.872827 INFO Agent WALinuxAgent-2.1.5.rc6 is running as the goal state agent
2016/07/13 17:08:23.898366 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:23.910665 INFO Start env monitor service.
2016/07/13 17:08:23.910156 INFO Event: name=WALA, op=HeartBeat, message=
2016/07/13 17:08:23.917967 INFO Configure routes
2016/07/13 17:08:23.934350 INFO Gateway:None
2016/07/13 17:08:23.940358 INFO Routes:None
2016/07/13 17:08:23.955267 INFO Checking for agent family Test updates
2016/07/13 17:08:23.965374 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:24.014087 INFO Instantiating Agent WALinuxAgent-2.1.5.4 from package
2016/07/13 17:08:24.024226 INFO Agent WALinuxAgent-2.1.5.4 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:24.038005 INFO Ensuring Agent WALinuxAgent-2.1.5.4 is downloaded
2016/07/13 17:08:24.048005 INFO Agent WALinuxAgent-2.1.5.4 was previously downloaded - skipping download
2016/07/13 17:08:24.060269 INFO Agent WALinuxAgent-2.1.5.4 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.4/HandlerManifest.json
2016/07/13 17:08:24.075785 INFO Instantiating Agent WALinuxAgent-2.1.5.5 from package
2016/07/13 17:08:24.085725 INFO Agent WALinuxAgent-2.1.5.5 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:24.105973 INFO Ensuring Agent WALinuxAgent-2.1.5.5 is downloaded
2016/07/13 17:08:24.116596 INFO Agent WALinuxAgent-2.1.5.5 was previously downloaded - skipping download
2016/07/13 17:08:24.129973 INFO Agent WALinuxAgent-2.1.5.5 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.5/HandlerManifest.json
2016/07/13 17:08:24.153601 INFO Instantiating Agent WALinuxAgent-2.1.5.6 from package
2016/07/13 17:08:24.163916 INFO Agent WALinuxAgent-2.1.5.6 error state: Last Failure: 1468429700.2734947, Total Failures: 1, Fatal: True
2016/07/13 17:08:24.179486 INFO Ensuring Agent WALinuxAgent-2.1.5.6 is downloaded
2016/07/13 17:08:24.189392 INFO Agent WALinuxAgent-2.1.5.6 is blacklisted - skipping download
2016/07/13 17:08:24.200856 INFO Agent WALinuxAgent-2.1.5.rc6 discovered WALinuxAgent-2.1.5.5 as an update and will exit
2016/07/13 17:08:24.253308 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:24.257751 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:24.259005 INFO Installed Agent WALinuxAgent-2.1.5.rc6 is the most current agent
2016/07/13 17:08:24.300801 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers'
2016/07/13 17:08:24.594689 INFO Agent WALinuxAgent-2.1.5.rc6 is running as the goal state agent
2016/07/13 17:08:24.611077 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:24.623003 INFO Start env monitor service.
2016/07/13 17:08:24.622590 INFO Event: name=WALA, op=HeartBeat, message=
2016/07/13 17:08:24.630357 INFO Configure routes
2016/07/13 17:08:24.644496 INFO Gateway:None
2016/07/13 17:08:24.650533 INFO Routes:None
2016/07/13 17:08:24.661371 INFO Checking for agent family Test updates
2016/07/13 17:08:24.670163 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:24.713716 INFO Instantiating Agent WALinuxAgent-2.1.5.4 from package
2016/07/13 17:08:24.724061 INFO Agent WALinuxAgent-2.1.5.4 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:24.738570 INFO Ensuring Agent WALinuxAgent-2.1.5.4 is downloaded
2016/07/13 17:08:24.748783 INFO Agent WALinuxAgent-2.1.5.4 was previously downloaded - skipping download
2016/07/13 17:08:24.762225 INFO Agent WALinuxAgent-2.1.5.4 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.4/HandlerManifest.json
2016/07/13 17:08:24.779150 INFO Instantiating Agent WALinuxAgent-2.1.5.5 from package
2016/07/13 17:08:24.789941 INFO Agent WALinuxAgent-2.1.5.5 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:24.804309 INFO Ensuring Agent WALinuxAgent-2.1.5.5 is downloaded
2016/07/13 17:08:24.814366 INFO Agent WALinuxAgent-2.1.5.5 was previously downloaded - skipping download
2016/07/13 17:08:24.828414 INFO Agent WALinuxAgent-2.1.5.5 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.5/HandlerManifest.json
2016/07/13 17:08:24.845148 INFO Instantiating Agent WALinuxAgent-2.1.5.6 from package
2016/07/13 17:08:24.856241 INFO Agent WALinuxAgent-2.1.5.6 error state: Last Failure: 1468429700.2734947, Total Failures: 1, Fatal: True
2016/07/13 17:08:24.872751 INFO Ensuring Agent WALinuxAgent-2.1.5.6 is downloaded
2016/07/13 17:08:24.882343 INFO Agent WALinuxAgent-2.1.5.6 is blacklisted - skipping download
2016/07/13 17:08:24.893729 INFO Agent WALinuxAgent-2.1.5.rc6 discovered WALinuxAgent-2.1.5.5 as an update and will exit
2016/07/13 17:08:24.946472 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:24.949020 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:24.949469 INFO Installed Agent WALinuxAgent-2.1.5.rc6 is the most current agent
2016/07/13 17:08:25.000107 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers'
2016/07/13 17:08:25.325422 INFO Agent WALinuxAgent-2.1.5.rc6 is running as the goal state agent
2016/07/13 17:08:25.354466 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:25.376228 INFO Event: name=WALA, op=HeartBeat, message=
2016/07/13 17:08:25.376987 INFO Start env monitor service.
2016/07/13 17:08:25.393857 INFO Configure routes
2016/07/13 17:08:25.401321 INFO Gateway:None
2016/07/13 17:08:25.406316 INFO Routes:None
2016/07/13 17:08:25.428909 INFO Checking for agent family Test updates
2016/07/13 17:08:25.437865 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:25.490074 INFO Instantiating Agent WALinuxAgent-2.1.5.4 from package
2016/07/13 17:08:25.494039 INFO Agent WALinuxAgent-2.1.5.4 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:25.495666 INFO Ensuring Agent WALinuxAgent-2.1.5.4 is downloaded
2016/07/13 17:08:25.505548 INFO Agent WALinuxAgent-2.1.5.4 was previously downloaded - skipping download
2016/07/13 17:08:25.507342 INFO Agent WALinuxAgent-2.1.5.4 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.4/HandlerManifest.json
2016/07/13 17:08:25.509272 INFO Instantiating Agent WALinuxAgent-2.1.5.5 from package
2016/07/13 17:08:25.518613 INFO Agent WALinuxAgent-2.1.5.5 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:25.529950 INFO Ensuring Agent WALinuxAgent-2.1.5.5 is downloaded
2016/07/13 17:08:25.531617 INFO Agent WALinuxAgent-2.1.5.5 was previously downloaded - skipping download
2016/07/13 17:08:25.533388 INFO Agent WALinuxAgent-2.1.5.5 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.5/HandlerManifest.json
2016/07/13 17:08:25.543227 INFO Instantiating Agent WALinuxAgent-2.1.5.6 from package
2016/07/13 17:08:25.552956 INFO Agent WALinuxAgent-2.1.5.6 error state: Last Failure: 1468429700.2734947, Total Failures: 1, Fatal: True
2016/07/13 17:08:25.554213 INFO Ensuring Agent WALinuxAgent-2.1.5.6 is downloaded
2016/07/13 17:08:25.555822 INFO Agent WALinuxAgent-2.1.5.6 is blacklisted - skipping download
2016/07/13 17:08:25.564277 INFO Agent WALinuxAgent-2.1.5.rc6 discovered WALinuxAgent-2.1.5.5 as an update and will exit
2016/07/13 17:08:25.661233 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:25.665709 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:25.668489 INFO Installed Agent WALinuxAgent-2.1.5.rc6 is the most current agent
2016/07/13 17:08:25.708122 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers'
2016/07/13 17:08:26.010050 INFO Agent WALinuxAgent-2.1.5.rc6 is running as the goal state agent
2016/07/13 17:08:26.025969 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:26.039163 INFO Event: name=WALA, op=HeartBeat, message=
2016/07/13 17:08:26.039857 INFO Start env monitor service.
2016/07/13 17:08:26.055563 INFO Configure routes
2016/07/13 17:08:26.062943 INFO Gateway:None
2016/07/13 17:08:26.067482 INFO Routes:None
2016/07/13 17:08:26.085504 INFO Checking for agent family Test updates
2016/07/13 17:08:26.089342 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:26.127742 INFO Instantiating Agent WALinuxAgent-2.1.5.4 from package
2016/07/13 17:08:26.131606 INFO Agent WALinuxAgent-2.1.5.4 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:26.133318 INFO Ensuring Agent WALinuxAgent-2.1.5.4 is downloaded
2016/07/13 17:08:26.135016 INFO Agent WALinuxAgent-2.1.5.4 was previously downloaded - skipping download
2016/07/13 17:08:26.136908 INFO Agent WALinuxAgent-2.1.5.4 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.4/HandlerManifest.json
2016/07/13 17:08:26.138496 INFO Instantiating Agent WALinuxAgent-2.1.5.5 from package
2016/07/13 17:08:26.145185 INFO Agent WALinuxAgent-2.1.5.5 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:26.146747 INFO Ensuring Agent WALinuxAgent-2.1.5.5 is downloaded
2016/07/13 17:08:26.148457 INFO Agent WALinuxAgent-2.1.5.5 was previously downloaded - skipping download
2016/07/13 17:08:26.150265 INFO Agent WALinuxAgent-2.1.5.5 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.5/HandlerManifest.json
2016/07/13 17:08:26.156888 INFO Instantiating Agent WALinuxAgent-2.1.5.6 from package
2016/07/13 17:08:26.158582 INFO Agent WALinuxAgent-2.1.5.6 error state: Last Failure: 1468429700.2734947, Total Failures: 1, Fatal: True
2016/07/13 17:08:26.161523 INFO Ensuring Agent WALinuxAgent-2.1.5.6 is downloaded
2016/07/13 17:08:26.171334 INFO Agent WALinuxAgent-2.1.5.6 is blacklisted - skipping download
2016/07/13 17:08:26.174023 INFO Agent WALinuxAgent-2.1.5.rc6 discovered WALinuxAgent-2.1.5.5 as an update and will exit
2016/07/13 17:08:26.297190 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:26.301479 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:26.302630 INFO Installed Agent WALinuxAgent-2.1.5.rc6 is the most current agent
2016/07/13 17:08:26.348110 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers'
2016/07/13 17:08:26.611096 INFO Agent WALinuxAgent-2.1.5.rc6 is running as the goal state agent
2016/07/13 17:08:26.645899 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:26.659962 INFO Event: name=WALA, op=HeartBeat, message=
2016/07/13 17:08:26.660695 INFO Start env monitor service.
2016/07/13 17:08:26.681616 INFO Configure routes
2016/07/13 17:08:26.688067 INFO Gateway:None
2016/07/13 17:08:26.696180 INFO Routes:None
2016/07/13 17:08:26.720086 INFO Checking for agent family Test updates
2016/07/13 17:08:26.728702 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:26.767986 INFO Instantiating Agent WALinuxAgent-2.1.5.4 from package
2016/07/13 17:08:26.771816 INFO Agent WALinuxAgent-2.1.5.4 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:26.773444 INFO Ensuring Agent WALinuxAgent-2.1.5.4 is downloaded
2016/07/13 17:08:26.778469 INFO Agent WALinuxAgent-2.1.5.4 was previously downloaded - skipping download
2016/07/13 17:08:26.780407 INFO Agent WALinuxAgent-2.1.5.4 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.4/HandlerManifest.json
2016/07/13 17:08:26.785876 INFO Instantiating Agent WALinuxAgent-2.1.5.5 from package
2016/07/13 17:08:26.788006 INFO Agent WALinuxAgent-2.1.5.5 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:26.795651 INFO Ensuring Agent WALinuxAgent-2.1.5.5 is downloaded
2016/07/13 17:08:26.807868 INFO Agent WALinuxAgent-2.1.5.5 was previously downloaded - skipping download
2016/07/13 17:08:26.809602 INFO Agent WALinuxAgent-2.1.5.5 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.5/HandlerManifest.json
2016/07/13 17:08:26.811438 INFO Instantiating Agent WALinuxAgent-2.1.5.6 from package
2016/07/13 17:08:26.814694 INFO Agent WALinuxAgent-2.1.5.6 error state: Last Failure: 1468429700.2734947, Total Failures: 1, Fatal: True
2016/07/13 17:08:26.821667 INFO Ensuring Agent WALinuxAgent-2.1.5.6 is downloaded
2016/07/13 17:08:26.823266 INFO Agent WALinuxAgent-2.1.5.6 is blacklisted - skipping download
2016/07/13 17:08:26.826265 INFO Agent WALinuxAgent-2.1.5.rc6 discovered WALinuxAgent-2.1.5.5 as an update and will exit
2016/07/13 17:08:26.958434 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:26.980759 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:26.998089 INFO Installed Agent WALinuxAgent-2.1.5.rc6 is the most current agent
2016/07/13 17:08:27.026593 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers'
2016/07/13 17:08:27.371625 INFO Agent WALinuxAgent-2.1.5.rc6 is running as the goal state agent
2016/07/13 17:08:27.389267 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:27.409599 INFO Start env monitor service.
2016/07/13 17:08:27.408979 INFO Event: name=WALA, op=HeartBeat, message=
2016/07/13 17:08:27.417052 INFO Configure routes
2016/07/13 17:08:27.431692 INFO Gateway:None
2016/07/13 17:08:27.440122 INFO Routes:None
2016/07/13 17:08:27.456165 INFO Checking for agent family Test updates
2016/07/13 17:08:27.464784 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:27.507617 INFO Instantiating Agent WALinuxAgent-2.1.5.4 from package
2016/07/13 17:08:27.518134 INFO Agent WALinuxAgent-2.1.5.4 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:27.533211 INFO Ensuring Agent WALinuxAgent-2.1.5.4 is downloaded
2016/07/13 17:08:27.544631 INFO Agent WALinuxAgent-2.1.5.4 was previously downloaded - skipping download
2016/07/13 17:08:27.556706 INFO Agent WALinuxAgent-2.1.5.4 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.4/HandlerManifest.json
2016/07/13 17:08:27.572246 INFO Instantiating Agent WALinuxAgent-2.1.5.5 from package
2016/07/13 17:08:27.582146 INFO Agent WALinuxAgent-2.1.5.5 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:27.595782 INFO Ensuring Agent WALinuxAgent-2.1.5.5 is downloaded
2016/07/13 17:08:27.606187 INFO Agent WALinuxAgent-2.1.5.5 was previously downloaded - skipping download
2016/07/13 17:08:27.621114 INFO Agent WALinuxAgent-2.1.5.5 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.5/HandlerManifest.json
2016/07/13 17:08:27.637114 INFO Instantiating Agent WALinuxAgent-2.1.5.6 from package
2016/07/13 17:08:27.647934 INFO Agent WALinuxAgent-2.1.5.6 error state: Last Failure: 1468429700.2734947, Total Failures: 1, Fatal: True
2016/07/13 17:08:27.665323 INFO Ensuring Agent WALinuxAgent-2.1.5.6 is downloaded
2016/07/13 17:08:27.675803 INFO Agent WALinuxAgent-2.1.5.6 is blacklisted - skipping download
2016/07/13 17:08:27.686985 INFO Agent WALinuxAgent-2.1.5.rc6 discovered WALinuxAgent-2.1.5.5 as an update and will exit
2016/07/13 17:08:27.732140 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:27.736414 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:27.737599 INFO Installed Agent WALinuxAgent-2.1.5.rc6 is the most current agent
2016/07/13 17:08:27.777746 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers'
2016/07/13 17:08:28.063376 INFO Agent WALinuxAgent-2.1.5.rc6 is running as the goal state agent
2016/07/13 17:08:28.091640 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:28.106084 INFO Event: name=WALA, op=HeartBeat, message=
2016/07/13 17:08:28.106876 INFO Start env monitor service.
2016/07/13 17:08:28.125474 INFO Configure routes
2016/07/13 17:08:28.133395 INFO Gateway:None
2016/07/13 17:08:28.136495 INFO Routes:None
2016/07/13 17:08:28.154338 INFO Checking for agent family Test updates
2016/07/13 17:08:28.164489 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:28.206145 INFO Instantiating Agent WALinuxAgent-2.1.5.4 from package
2016/07/13 17:08:28.209980 INFO Agent WALinuxAgent-2.1.5.4 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:28.211570 INFO Ensuring Agent WALinuxAgent-2.1.5.4 is downloaded
2016/07/13 17:08:28.213487 INFO Agent WALinuxAgent-2.1.5.4 was previously downloaded - skipping download
2016/07/13 17:08:28.227965 INFO Agent WALinuxAgent-2.1.5.4 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.4/HandlerManifest.json
2016/07/13 17:08:28.235582 INFO Instantiating Agent WALinuxAgent-2.1.5.5 from package
2016/07/13 17:08:28.236946 INFO Agent WALinuxAgent-2.1.5.5 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:28.238379 INFO Ensuring Agent WALinuxAgent-2.1.5.5 is downloaded
2016/07/13 17:08:28.239922 INFO Agent WALinuxAgent-2.1.5.5 was previously downloaded - skipping download
2016/07/13 17:08:28.247765 INFO Agent WALinuxAgent-2.1.5.5 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.5/HandlerManifest.json
2016/07/13 17:08:28.249302 INFO Instantiating Agent WALinuxAgent-2.1.5.6 from package
2016/07/13 17:08:28.251059 INFO Agent WALinuxAgent-2.1.5.6 error state: Last Failure: 1468429700.2734947, Total Failures: 1, Fatal: True
2016/07/13 17:08:28.259128 INFO Ensuring Agent WALinuxAgent-2.1.5.6 is downloaded
2016/07/13 17:08:28.262239 INFO Agent WALinuxAgent-2.1.5.6 is blacklisted - skipping download
2016/07/13 17:08:28.270685 INFO Agent WALinuxAgent-2.1.5.rc6 discovered WALinuxAgent-2.1.5.5 as an update and will exit
2016/07/13 17:08:28.377386 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:28.381594 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:28.382586 INFO Installed Agent WALinuxAgent-2.1.5.rc6 is the most current agent
2016/07/13 17:08:28.420102 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers'
2016/07/13 17:08:28.737361 INFO Agent WALinuxAgent-2.1.5.rc6 is running as the goal state agent
2016/07/13 17:08:28.764614 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:28.775831 INFO Start env monitor service.
2016/07/13 17:08:28.775419 INFO Event: name=WALA, op=HeartBeat, message=
2016/07/13 17:08:28.782866 INFO Configure routes
2016/07/13 17:08:28.796378 INFO Gateway:None
2016/07/13 17:08:28.801914 INFO Routes:None
2016/07/13 17:08:28.811874 INFO Checking for agent family Test updates
2016/07/13 17:08:28.820192 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:28.867350 INFO Instantiating Agent WALinuxAgent-2.1.5.4 from package
2016/07/13 17:08:28.877139 INFO Agent WALinuxAgent-2.1.5.4 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:28.890430 INFO Ensuring Agent WALinuxAgent-2.1.5.4 is downloaded
2016/07/13 17:08:28.899898 INFO Agent WALinuxAgent-2.1.5.4 was previously downloaded - skipping download
2016/07/13 17:08:28.911583 INFO Agent WALinuxAgent-2.1.5.4 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.4/HandlerManifest.json
2016/07/13 17:08:28.926866 INFO Instantiating Agent WALinuxAgent-2.1.5.5 from package
2016/07/13 17:08:28.937393 INFO Agent WALinuxAgent-2.1.5.5 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:28.950660 INFO Ensuring Agent WALinuxAgent-2.1.5.5 is downloaded
2016/07/13 17:08:28.960036 INFO Agent WALinuxAgent-2.1.5.5 was previously downloaded - skipping download
2016/07/13 17:08:28.971698 INFO Agent WALinuxAgent-2.1.5.5 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.5/HandlerManifest.json
2016/07/13 17:08:28.987109 INFO Instantiating Agent WALinuxAgent-2.1.5.6 from package
2016/07/13 17:08:28.997675 INFO Agent WALinuxAgent-2.1.5.6 error state: Last Failure: 1468429700.2734947, Total Failures: 1, Fatal: True
2016/07/13 17:08:29.013517 INFO Ensuring Agent WALinuxAgent-2.1.5.6 is downloaded
2016/07/13 17:08:29.023435 INFO Agent WALinuxAgent-2.1.5.6 is blacklisted - skipping download
2016/07/13 17:08:29.035427 INFO Agent WALinuxAgent-2.1.5.rc6 discovered WALinuxAgent-2.1.5.5 as an update and will exit
2016/07/13 17:08:29.082719 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:29.086942 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:29.089727 INFO Installed Agent WALinuxAgent-2.1.5.rc6 is the most current agent
2016/07/13 17:08:29.132150 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers'
2016/07/13 17:08:29.429758 INFO Agent WALinuxAgent-2.1.5.rc6 is running as the goal state agent
2016/07/13 17:08:29.446503 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:29.455656 INFO Event: name=WALA, op=HeartBeat, message=
2016/07/13 17:08:29.468931 INFO Start env monitor service.
2016/07/13 17:08:29.476813 INFO Configure routes
2016/07/13 17:08:29.483019 INFO Gateway:None
2016/07/13 17:08:29.488580 INFO Routes:None
2016/07/13 17:08:29.498159 INFO Checking for agent family Test updates
2016/07/13 17:08:29.506566 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:29.553213 INFO Instantiating Agent WALinuxAgent-2.1.5.4 from package
2016/07/13 17:08:29.563338 INFO Agent WALinuxAgent-2.1.5.4 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:29.576955 INFO Ensuring Agent WALinuxAgent-2.1.5.4 is downloaded
2016/07/13 17:08:29.586403 INFO Agent WALinuxAgent-2.1.5.4 was previously downloaded - skipping download
2016/07/13 17:08:29.598085 INFO Agent WALinuxAgent-2.1.5.4 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.4/HandlerManifest.json
2016/07/13 17:08:29.613466 INFO Instantiating Agent WALinuxAgent-2.1.5.5 from package
2016/07/13 17:08:29.623208 INFO Agent WALinuxAgent-2.1.5.5 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:29.636602 INFO Ensuring Agent WALinuxAgent-2.1.5.5 is downloaded
2016/07/13 17:08:29.646913 INFO Agent WALinuxAgent-2.1.5.5 was previously downloaded - skipping download
2016/07/13 17:08:29.658556 INFO Agent WALinuxAgent-2.1.5.5 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.5/HandlerManifest.json
2016/07/13 17:08:29.673663 INFO Instantiating Agent WALinuxAgent-2.1.5.6 from package
2016/07/13 17:08:29.683525 INFO Agent WALinuxAgent-2.1.5.6 error state: Last Failure: 1468429700.2734947, Total Failures: 1, Fatal: True
2016/07/13 17:08:29.698301 INFO Ensuring Agent WALinuxAgent-2.1.5.6 is downloaded
2016/07/13 17:08:29.707543 INFO Agent WALinuxAgent-2.1.5.6 is blacklisted - skipping download
2016/07/13 17:08:29.718347 INFO Agent WALinuxAgent-2.1.5.rc6 discovered WALinuxAgent-2.1.5.5 as an update and will exit
2016/07/13 17:08:29.764137 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:29.768504 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:29.780034 INFO Installed Agent WALinuxAgent-2.1.5.rc6 is the most current agent
2016/07/13 17:08:29.816108 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers'
2016/07/13 17:08:30.099138 INFO Agent WALinuxAgent-2.1.5.rc6 is running as the goal state agent
2016/07/13 17:08:30.115475 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:30.129022 INFO Event: name=WALA, op=HeartBeat, message=
2016/07/13 17:08:30.129776 INFO Start env monitor service.
2016/07/13 17:08:30.147169 INFO Configure routes
2016/07/13 17:08:30.154849 INFO Gateway:None
2016/07/13 17:08:30.165703 INFO Routes:None
2016/07/13 17:08:30.182106 INFO Checking for agent family Test updates
2016/07/13 17:08:30.191902 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:30.238534 INFO Instantiating Agent WALinuxAgent-2.1.5.4 from package
2016/07/13 17:08:30.242457 INFO Agent WALinuxAgent-2.1.5.4 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:30.243988 INFO Ensuring Agent WALinuxAgent-2.1.5.4 is downloaded
2016/07/13 17:08:30.245624 INFO Agent WALinuxAgent-2.1.5.4 was previously downloaded - skipping download
2016/07/13 17:08:30.247389 INFO Agent WALinuxAgent-2.1.5.4 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.4/HandlerManifest.json
2016/07/13 17:08:30.257769 INFO Instantiating Agent WALinuxAgent-2.1.5.5 from package
2016/07/13 17:08:30.259229 INFO Agent WALinuxAgent-2.1.5.5 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:30.267368 INFO Ensuring Agent WALinuxAgent-2.1.5.5 is downloaded
2016/07/13 17:08:30.270568 INFO Agent WALinuxAgent-2.1.5.5 was previously downloaded - skipping download
2016/07/13 17:08:30.280087 INFO Agent WALinuxAgent-2.1.5.5 loaded manifest from /var/lib/waagent/WALinuxAgent-2.1.5.5/HandlerManifest.json
2016/07/13 17:08:30.281507 INFO Instantiating Agent WALinuxAgent-2.1.5.6 from package
2016/07/13 17:08:30.283152 INFO Agent WALinuxAgent-2.1.5.6 error state: Last Failure: 1468429700.2734947, Total Failures: 1, Fatal: True
2016/07/13 17:08:30.290922 INFO Ensuring Agent WALinuxAgent-2.1.5.6 is downloaded
2016/07/13 17:08:30.294040 INFO Agent WALinuxAgent-2.1.5.6 is blacklisted - skipping download
2016/07/13 17:08:30.296987 INFO Agent WALinuxAgent-2.1.5.rc6 discovered WALinuxAgent-2.1.5.5 as an update and will exit
2016/07/13 17:08:30.412728 INFO Event: name=WALinuxAgent, op=Enable, message=Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:30.417260 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers' returned code: 0
2016/07/13 17:08:30.418371 INFO Installed Agent WALinuxAgent-2.1.5.rc6 is the most current agent
2016/07/13 17:08:30.460099 INFO Agent WALinuxAgent-2.1.5.rc6 launched with command 'python3 -u /usr/sbin/waagent -run-exthandlers'
2016/07/13 17:08:30.747794 INFO Agent WALinuxAgent-2.1.5.rc6 is running as the goal state agent
2016/07/13 17:08:30.769584 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:30.781105 INFO Start env monitor service.
2016/07/13 17:08:30.780693 INFO Event: name=WALA, op=HeartBeat, message=
2016/07/13 17:08:30.788185 INFO Configure routes
2016/07/13 17:08:30.803017 INFO Gateway:None
2016/07/13 17:08:30.808497 INFO Routes:None
2016/07/13 17:08:30.820408 INFO Checking for agent family Test updates
2016/07/13 17:08:30.829696 INFO Wire server endpoint:168.63.129.16
2016/07/13 17:08:30.878817 INFO Instantiating Agent WALinuxAgent-2.1.5.4 from package
2016/07/13 17:08:30.889227 INFO Agent WALinuxAgent-2.1.5.4 error state: Last Failure: 0.0, Total Failures: 0, Fatal: False
2016/07/13 17:08:30.903395 INFO Ensuring Agent WALinuxAgent-2.1.5.4 is downloaded
...
keeps going
```
</details> | Azure/WALinuxAgent | diff --git a/tests/ga/test_update.py b/tests/ga/test_update.py
index 49006606..74804fb9 100644
--- a/tests/ga/test_update.py
+++ b/tests/ga/test_update.py
@@ -627,7 +627,6 @@ class TestUpdate(UpdateTestCase):
if versions is None or len(versions) <= 0:
versions = [latest_version]
- self.update_handler.protocol_util = Mock(return_value=ProtocolMock)
etag = self.update_handler.last_etag if self.update_handler.last_etag is not None else 42
if protocol is None:
protocol = ProtocolMock(etag=etag, versions=versions)
@@ -641,7 +640,7 @@ class TestUpdate(UpdateTestCase):
self.assertTrue(self._test_ensure_latest_agent())
return
- def test_ensure_latest_agent_ignores_old_agents(self):
+ def test_ensure_latest_agent_includes_old_agents(self):
self.prepare_agents()
old_count = FlexibleVersion(AGENT_VERSION).version[-1]
@@ -651,7 +650,7 @@ class TestUpdate(UpdateTestCase):
all_count = len(self.agent_versions())
self.assertTrue(self._test_ensure_latest_agent(versions=self.agent_versions()))
- self.assertEqual(all_count - old_count, len(self.update_handler.agents))
+ self.assertEqual(all_count, len(self.update_handler.agents))
return
def test_ensure_lastest_agent_purges_old_agents(self):
@@ -829,7 +828,7 @@ class TestUpdate(UpdateTestCase):
kept_agents = self.update_handler.agents[1::2]
purged_agents = self.update_handler.agents[::2]
- # Reload and assert only the kept agents remain on disk
+ # Reload and assert only the kept agents remain on disk
self.update_handler.agents = kept_agents
self.update_handler._purge_agents()
self.update_handler._load_agents()
@@ -850,14 +849,19 @@ class TestUpdate(UpdateTestCase):
self.assertTrue(os.path.exists(agent_path + ".zip"))
return
- def _test_run_latest(self, return_value=0, side_effect=None, child_calls=1):
- mock_child = Mock()
- mock_child.wait = Mock(return_value=return_value, side_effect=side_effect)
+ def _test_run_latest(self, mock_child=None, mock_time=None):
+ if mock_child is None:
+ mock_child = ChildMock()
+ if mock_time is None:
+ mock_time = TimeMock()
+
with patch('subprocess.Popen', return_value=mock_child) as mock_popen:
- self.update_handler.run_latest()
- self.assertEqual(child_calls, mock_popen.call_count)
+ with patch('time.time', side_effect=mock_time.time):
+ with patch('time.sleep', return_value=mock_time.sleep):
+ self.update_handler.run_latest()
+ self.assertEqual(1, mock_popen.call_count)
- return mock_popen.call_args
+ return mock_popen.call_args
def test_run_latest(self):
self.prepare_agents()
@@ -873,6 +877,31 @@ class TestUpdate(UpdateTestCase):
self.assertEqual(agent.get_agent_dir(), kwargs['cwd'])
return
+ def test_run_latest_polls_and_waits_for_success(self):
+ mock_child = ChildMock(return_value=None)
+ mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL/3)
+ self._test_run_latest(mock_child=mock_child, mock_time=mock_time)
+ self.assertEqual(2, mock_child.poll.call_count)
+ self.assertEqual(1, mock_child.wait.call_count)
+ return
+
+ def test_run_latest_polling_stops_at_success(self):
+ mock_child = ChildMock(return_value=0)
+ mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL/3)
+ self._test_run_latest(mock_child=mock_child, mock_time=mock_time)
+ self.assertEqual(1, mock_child.poll.call_count)
+ self.assertEqual(0, mock_child.wait.call_count)
+ return
+
+ def test_run_latest_polling_stops_at_failure(self):
+ mock_child = ChildMock(return_value=42)
+ mock_time = TimeMock()
+ self._test_run_latest(mock_child=mock_child, mock_time=mock_time)
+ self.assertEqual(1, mock_child.poll.call_count)
+ self.assertEqual(0, mock_child.wait.call_count)
+ self.assertEqual(2, mock_time.time_call_count)
+ return
+
def test_run_latest_defaults_to_current(self):
self.assertEqual(None, self.update_handler.get_latest_agent())
@@ -894,7 +923,7 @@ class TestUpdate(UpdateTestCase):
saved_stdout, sys.stdout = sys.stdout, stdout
saved_stderr, sys.stderr = sys.stderr, stderr
try:
- self._test_run_latest(side_effect=faux_logger)
+ self._test_run_latest(mock_child=ChildMock(side_effect=faux_logger))
finally:
sys.stdout = saved_stdout
sys.stderr = saved_stderr
@@ -916,23 +945,7 @@ class TestUpdate(UpdateTestCase):
self.assertEqual(0.0, latest_agent.error.last_failure)
self.assertEqual(0, latest_agent.error.failure_count)
- self._test_run_latest(return_value=1)
-
- self.assertTrue(latest_agent.is_available)
- self.assertNotEqual(0.0, latest_agent.error.last_failure)
- self.assertEqual(1, latest_agent.error.failure_count)
- return
-
- def test_run_latest_missing_code_marks_failures(self):
- # logger.add_logger_appender(logger.AppenderType.STDOUT)
- self.prepare_agents()
-
- latest_agent = self.update_handler.get_latest_agent()
- self.assertTrue(latest_agent.is_available)
- self.assertEqual(0.0, latest_agent.error.last_failure)
- self.assertEqual(0, latest_agent.error.failure_count)
-
- self._test_run_latest(return_value=None)
+ self._test_run_latest(mock_child=ChildMock(return_value=1))
self.assertTrue(latest_agent.is_available)
self.assertNotEqual(0.0, latest_agent.error.last_failure)
@@ -948,7 +961,7 @@ class TestUpdate(UpdateTestCase):
self.assertEqual(0.0, latest_agent.error.last_failure)
self.assertEqual(0, latest_agent.error.failure_count)
- self._test_run_latest(side_effect=Exception("Force blacklisting"))
+ self._test_run_latest(mock_child=ChildMock(side_effect=Exception("Force blacklisting")))
self.assertFalse(latest_agent.is_available)
self.assertTrue(latest_agent.error.is_blacklisted)
@@ -1016,11 +1029,18 @@ class TestUpdate(UpdateTestCase):
self._test_run(invocations=0, calls=[], enable_updates=True)
return
- def test_set_agents(self):
+ def test_set_agents_sets_agents(self):
self.prepare_agents()
self.update_handler._set_agents([GuestAgent(path=path) for path in self.agent_dirs()])
+ self.assertTrue(len(self.update_handler.agents) > 0)
self.assertEqual(len(self.agent_dirs()), len(self.update_handler.agents))
+ return
+
+ def test_set_agents_sorts_agents(self):
+ self.prepare_agents()
+
+ self.update_handler._set_agents([GuestAgent(path=path) for path in self.agent_dirs()])
v = FlexibleVersion("100000")
for a in self.update_handler.agents:
@@ -1029,6 +1049,15 @@ class TestUpdate(UpdateTestCase):
return
+class ChildMock(Mock):
+ def __init__(self, return_value=0, side_effect=None):
+ Mock.__init__(self, return_value=return_value, side_effect=side_effect)
+
+ self.poll = Mock(return_value=return_value, side_effect=side_effect)
+ self.wait = Mock(return_value=return_value, side_effect=side_effect)
+ return
+
+
class ProtocolMock(object):
def __init__(self, family="TestAgent", etag=42, versions=None):
self.family = family
@@ -1085,5 +1114,22 @@ class ResponseMock(Mock):
return self.response
+class TimeMock(Mock):
+ def __init__(self, time_increment=1):
+ Mock.__init__(self)
+ self.next_time = time.time()
+ self.time_call_count = 0
+ self.time_increment = time_increment
+
+ self.sleep = Mock(return_value=0)
+ return
+
+ def time(self):
+ self.time_call_count += 1
+ current_time = self.next_time
+ self.next_time += self.time_increment
+ return current_time
+
+
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_git_commit_hash",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"pyasn1",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
nose==1.3.7
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyasn1==0.5.1
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
-e git+https://github.com/Azure/WALinuxAgent.git@9930df6600a061bcc5618b427ffa23cff5943f46#egg=WALinuxAgent
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: WALinuxAgent
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- nose==1.3.7
- pyasn1==0.5.1
prefix: /opt/conda/envs/WALinuxAgent
| [
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_includes_old_agents",
"tests/ga/test_update.py::TestUpdate::test_run_latest_polling_stops_at_failure",
"tests/ga/test_update.py::TestUpdate::test_run_latest_polling_stops_at_success",
"tests/ga/test_update.py::TestUpdate::test_run_latest_polls_and_waits_for_success"
]
| []
| [
"tests/ga/test_update.py::TestGuestAgentError::test_clear",
"tests/ga/test_update.py::TestGuestAgentError::test_creation",
"tests/ga/test_update.py::TestGuestAgentError::test_load_preserves_error_state",
"tests/ga/test_update.py::TestGuestAgentError::test_mark_failure",
"tests/ga/test_update.py::TestGuestAgentError::test_mark_failure_permanent",
"tests/ga/test_update.py::TestGuestAgentError::test_save",
"tests/ga/test_update.py::TestGuestAgentError::test_str",
"tests/ga/test_update.py::TestGuestAgent::test_clear_error",
"tests/ga/test_update.py::TestGuestAgent::test_creation",
"tests/ga/test_update.py::TestGuestAgent::test_download",
"tests/ga/test_update.py::TestGuestAgent::test_download_fail",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_download_skips_blacklisted",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded_download_fails",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded_load_manifest_fails",
"tests/ga/test_update.py::TestGuestAgent::test_ensure_downloaded_unpack_fails",
"tests/ga/test_update.py::TestGuestAgent::test_is_available",
"tests/ga/test_update.py::TestGuestAgent::test_is_blacklisted",
"tests/ga/test_update.py::TestGuestAgent::test_is_downloaded",
"tests/ga/test_update.py::TestGuestAgent::test_load_error",
"tests/ga/test_update.py::TestGuestAgent::test_load_manifest",
"tests/ga/test_update.py::TestGuestAgent::test_load_manifest_is_empty",
"tests/ga/test_update.py::TestGuestAgent::test_load_manifest_is_malformed",
"tests/ga/test_update.py::TestGuestAgent::test_load_manifest_missing",
"tests/ga/test_update.py::TestGuestAgent::test_mark_failure",
"tests/ga/test_update.py::TestGuestAgent::test_unpack",
"tests/ga/test_update.py::TestGuestAgent::test_unpack_fail",
"tests/ga/test_update.py::TestUpdate::test_creation",
"tests/ga/test_update.py::TestUpdate::test_ensure_lastest_agent_purges_old_agents",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_returns_true_on_first_use",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_skips_if_too_frequent",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_skips_if_when_no_new_versions",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_skips_when_etag_matches",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_skips_when_no_versions",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_skips_when_updates_are_disabled",
"tests/ga/test_update.py::TestUpdate::test_ensure_latest_agent_sorts",
"tests/ga/test_update.py::TestUpdate::test_evaluate_agent_health_ignores_installed_agent",
"tests/ga/test_update.py::TestUpdate::test_evaluate_agent_health_raises_exception_for_restarting_agent",
"tests/ga/test_update.py::TestUpdate::test_evaluate_agent_health_resets_with_new_agent",
"tests/ga/test_update.py::TestUpdate::test_evaluate_agent_health_will_not_raise_exception_for_long_restarts",
"tests/ga/test_update.py::TestUpdate::test_evaluate_agent_health_will_not_raise_exception_too_few_restarts",
"tests/ga/test_update.py::TestUpdate::test_filter_blacklisted_agents",
"tests/ga/test_update.py::TestUpdate::test_get_latest_agent",
"tests/ga/test_update.py::TestUpdate::test_get_latest_agent_no_updates",
"tests/ga/test_update.py::TestUpdate::test_get_latest_agent_skip_updates",
"tests/ga/test_update.py::TestUpdate::test_get_latest_agent_skips_unavailable",
"tests/ga/test_update.py::TestUpdate::test_load_agents",
"tests/ga/test_update.py::TestUpdate::test_load_agents_does_not_reload",
"tests/ga/test_update.py::TestUpdate::test_load_agents_sorts",
"tests/ga/test_update.py::TestUpdate::test_purge_agents",
"tests/ga/test_update.py::TestUpdate::test_run",
"tests/ga/test_update.py::TestUpdate::test_run_keeps_running",
"tests/ga/test_update.py::TestUpdate::test_run_latest",
"tests/ga/test_update.py::TestUpdate::test_run_latest_captures_signals",
"tests/ga/test_update.py::TestUpdate::test_run_latest_creates_only_one_signal_handler",
"tests/ga/test_update.py::TestUpdate::test_run_latest_defaults_to_current",
"tests/ga/test_update.py::TestUpdate::test_run_latest_exception_blacklists",
"tests/ga/test_update.py::TestUpdate::test_run_latest_forwards_output",
"tests/ga/test_update.py::TestUpdate::test_run_latest_nonzero_code_marks_failures",
"tests/ga/test_update.py::TestUpdate::test_run_stops_if_update_available",
"tests/ga/test_update.py::TestUpdate::test_set_agents_sets_agents",
"tests/ga/test_update.py::TestUpdate::test_set_agents_sorts_agents"
]
| []
| Apache License 2.0 | 629 | [
"azurelinuxagent/common/version.py",
"azurelinuxagent/ga/update.py"
]
| [
"azurelinuxagent/common/version.py",
"azurelinuxagent/ga/update.py"
]
|
docker__docker-py-1130 | b511352bea79aff12d565b11662bebee36e362fc | 2016-07-13 21:07:00 | a44d65be370c28abd666a299456b83659dd1a1df | GordonTheTurtle: Please sign your commits following these rules:
https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work
The easiest way to do this is to amend the last commit:
~~~console
$ git clone -b "support-tcp-upgrade" [email protected]:aanand/docker-py.git somewhere
$ cd somewhere
$ git rebase -i HEAD~12
editor opens
change each 'pick' to 'edit'
save the file and quit
$ git commit --amend -s --no-edit
$ git rebase --continue # and repeat the amend for each commit
$ git push -f
~~~
Ammending updates the existing PR. You **DO NOT** need to open a new one.
GordonTheTurtle: Please sign your commits following these rules:
https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work
The easiest way to do this is to amend the last commit:
~~~console
$ git clone -b "support-tcp-upgrade" [email protected]:aanand/docker-py.git somewhere
$ cd somewhere
$ git rebase -i HEAD~12
editor opens
change each 'pick' to 'edit'
save the file and quit
$ git commit --amend -s --no-edit
$ git rebase --continue # and repeat the amend for each commit
$ git push -f
~~~
Ammending updates the existing PR. You **DO NOT** need to open a new one.
GordonTheTurtle: Please sign your commits following these rules:
https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work
The easiest way to do this is to amend the last commit:
~~~console
$ git clone -b "support-tcp-upgrade" [email protected]:aanand/docker-py.git somewhere
$ cd somewhere
$ git rebase -i HEAD~12
editor opens
change each 'pick' to 'edit'
save the file and quit
$ git commit --amend -s --no-edit
$ git rebase --continue # and repeat the amend for each commit
$ git push -f
~~~
Ammending updates the existing PR. You **DO NOT** need to open a new one.
GordonTheTurtle: Please sign your commits following these rules:
https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work
The easiest way to do this is to amend the last commit:
~~~console
$ git clone -b "support-tcp-upgrade" [email protected]:aanand/docker-py.git somewhere
$ cd somewhere
$ git rebase -i HEAD~12
editor opens
change each 'pick' to 'edit'
save the file and quit
$ git commit --amend -s --no-edit
$ git rebase --continue # and repeat the amend for each commit
$ git push -f
~~~
Ammending updates the existing PR. You **DO NOT** need to open a new one.
| diff --git a/docker/api/container.py b/docker/api/container.py
index 9cc14dbd..b8507d85 100644
--- a/docker/api/container.py
+++ b/docker/api/container.py
@@ -15,12 +15,18 @@ class ContainerApiMixin(object):
'logs': logs and 1 or 0,
'stdout': stdout and 1 or 0,
'stderr': stderr and 1 or 0,
- 'stream': stream and 1 or 0,
+ 'stream': stream and 1 or 0
}
+
+ headers = {
+ 'Connection': 'Upgrade',
+ 'Upgrade': 'tcp'
+ }
+
u = self._url("/containers/{0}/attach", container)
- response = self._post(u, params=params, stream=stream)
+ response = self._post(u, headers=headers, params=params, stream=stream)
- return self._get_result(container, stream, response)
+ return self._read_from_socket(response, stream)
@utils.check_resource
def attach_socket(self, container, params=None, ws=False):
@@ -34,9 +40,18 @@ class ContainerApiMixin(object):
if ws:
return self._attach_websocket(container, params)
+ headers = {
+ 'Connection': 'Upgrade',
+ 'Upgrade': 'tcp'
+ }
+
u = self._url("/containers/{0}/attach", container)
- return self._get_raw_response_socket(self.post(
- u, None, params=self._attach_params(params), stream=True))
+ return self._get_raw_response_socket(
+ self.post(
+ u, None, params=self._attach_params(params), stream=True,
+ headers=headers
+ )
+ )
@utils.check_resource
def commit(self, container, repository=None, tag=None, message=None,
diff --git a/docker/api/exec_api.py b/docker/api/exec_api.py
index f0e4afa6..6e499960 100644
--- a/docker/api/exec_api.py
+++ b/docker/api/exec_api.py
@@ -56,8 +56,6 @@ class ExecApiMixin(object):
def exec_start(self, exec_id, detach=False, tty=False, stream=False,
socket=False):
# we want opened socket if socket == True
- if socket:
- stream = True
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
@@ -66,10 +64,18 @@ class ExecApiMixin(object):
'Detach': detach
}
+ headers = {} if detach else {
+ 'Connection': 'Upgrade',
+ 'Upgrade': 'tcp'
+ }
+
res = self._post_json(
- self._url('/exec/{0}/start', exec_id), data=data, stream=stream
+ self._url('/exec/{0}/start', exec_id),
+ headers=headers,
+ data=data,
+ stream=True
)
if socket:
return self._get_raw_response_socket(res)
- return self._get_result_tty(stream, res, tty)
+ return self._read_from_socket(res, stream)
diff --git a/docker/client.py b/docker/client.py
index b96a78ce..6ca9e57a 100644
--- a/docker/client.py
+++ b/docker/client.py
@@ -29,6 +29,7 @@ from .ssladapter import ssladapter
from .tls import TLSConfig
from .transport import UnixAdapter
from .utils import utils, check_resource, update_headers, kwargs_from_env
+from .utils.socket import frames_iter
try:
from .transport import NpipeAdapter
except ImportError:
@@ -305,6 +306,14 @@ class Client(
for out in response.iter_content(chunk_size=1, decode_unicode=True):
yield out
+ def _read_from_socket(self, response, stream):
+ socket = self._get_raw_response_socket(response)
+
+ if stream:
+ return frames_iter(socket)
+ else:
+ return six.binary_type().join(frames_iter(socket))
+
def _disable_socket_timeout(self, socket):
""" Depending on the combination of python version and whether we're
connecting over http or https, we might need to access _sock, which
diff --git a/docker/utils/socket.py b/docker/utils/socket.py
new file mode 100644
index 00000000..ed343507
--- /dev/null
+++ b/docker/utils/socket.py
@@ -0,0 +1,68 @@
+import errno
+import os
+import select
+import struct
+
+import six
+
+
+class SocketError(Exception):
+ pass
+
+
+def read(socket, n=4096):
+ """
+ Reads at most n bytes from socket
+ """
+ recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
+
+ # wait for data to become available
+ select.select([socket], [], [])
+
+ try:
+ if hasattr(socket, 'recv'):
+ return socket.recv(n)
+ return os.read(socket.fileno(), n)
+ except EnvironmentError as e:
+ if e.errno not in recoverable_errors:
+ raise
+
+
+def read_exactly(socket, n):
+ """
+ Reads exactly n bytes from socket
+ Raises SocketError if there isn't enough data
+ """
+ data = six.binary_type()
+ while len(data) < n:
+ next_data = read(socket, n - len(data))
+ if not next_data:
+ raise SocketError("Unexpected EOF")
+ data += next_data
+ return data
+
+
+def next_frame_size(socket):
+ """
+ Returns the size of the next frame of data waiting to be read from socket,
+ according to the protocol defined here:
+
+ https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container
+ """
+ try:
+ data = read_exactly(socket, 8)
+ except SocketError:
+ return 0
+
+ _, actual = struct.unpack('>BxxxL', data)
+ return actual
+
+
+def frames_iter(socket):
+ """
+ Returns a generator of frames read from socket
+ """
+ n = next_frame_size(socket)
+ while n > 0:
+ yield read(socket, n)
+ n = next_frame_size(socket)
| The api client should send connection upgrade headers
To hint proxies about connection hijacking, docker clients should send connection upgrade headers like the docker cli does.
From https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/4-2-hijacking:
> In this version of the API, /attach, uses hijacking to transport stdin, stdout, and stderr on the same socket.
>
> To hint potential proxies about connection hijacking, Docker client sends connection upgrade headers similarly to websocket.
```
Upgrade: tcp
Connection: Upgrade
```
On Docker for Desktop, the proxy that sits between Docker Compose and the daemon will not be aware that the connection will be hijacked. This can lead to an issue where the proxy will install a CloseNotifier and just after that will hijack the connection, which is know to be incompatible. See https://github.com/docker/compose/issues/3685
See also https://github.com/docker/compose/issues/3700
| docker/docker-py | diff --git a/tests/helpers.py b/tests/helpers.py
index 21036ace..94ea3887 100644
--- a/tests/helpers.py
+++ b/tests/helpers.py
@@ -1,9 +1,6 @@
-import errno
import os
import os.path
-import select
import shutil
-import struct
import tarfile
import tempfile
import unittest
@@ -54,7 +51,7 @@ def exec_driver_is_native():
c = docker_client()
EXEC_DRIVER = c.info()['ExecutionDriver']
c.close()
- return EXEC_DRIVER.startswith('native')
+ return EXEC_DRIVER.startswith('native') or EXEC_DRIVER == ''
def docker_client(**kwargs):
@@ -67,49 +64,6 @@ def docker_client_kwargs(**kwargs):
return client_kwargs
-def read_socket(socket, n=4096):
- """ Code stolen from dockerpty to read the socket """
- recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
-
- # wait for data to become available
- select.select([socket], [], [])
-
- try:
- if hasattr(socket, 'recv'):
- return socket.recv(n)
- return os.read(socket.fileno(), n)
- except EnvironmentError as e:
- if e.errno not in recoverable_errors:
- raise
-
-
-def next_packet_size(socket):
- """ Code stolen from dockerpty to get the next packet size """
- data = six.binary_type()
- while len(data) < 8:
- next_data = read_socket(socket, 8 - len(data))
- if not next_data:
- return 0
- data = data + next_data
-
- if data is None:
- return 0
-
- if len(data) == 8:
- _, actual = struct.unpack('>BxxxL', data)
- return actual
-
-
-def read_data(socket, packet_size):
- data = six.binary_type()
- while len(data) < packet_size:
- next_data = read_socket(socket, packet_size - len(data))
- if not next_data:
- assert False, "Failed trying to read in the dataz"
- data += next_data
- return data
-
-
class BaseTestCase(unittest.TestCase):
tmp_imgs = []
tmp_containers = []
diff --git a/tests/integration/container_test.py b/tests/integration/container_test.py
index 56b648a3..61b33983 100644
--- a/tests/integration/container_test.py
+++ b/tests/integration/container_test.py
@@ -3,6 +3,8 @@ import signal
import tempfile
import docker
+from docker.utils.socket import next_frame_size
+from docker.utils.socket import read_exactly
import pytest
import six
@@ -1025,9 +1027,9 @@ class AttachContainerTest(helpers.BaseTestCase):
self.client.start(ident)
- next_size = helpers.next_packet_size(pty_stdout)
+ next_size = next_frame_size(pty_stdout)
self.assertEqual(next_size, len(line))
- data = helpers.read_data(pty_stdout, next_size)
+ data = read_exactly(pty_stdout, next_size)
self.assertEqual(data.decode('utf-8'), line)
diff --git a/tests/integration/exec_test.py b/tests/integration/exec_test.py
index 9f548080..8bf2762a 100644
--- a/tests/integration/exec_test.py
+++ b/tests/integration/exec_test.py
@@ -1,5 +1,8 @@
import pytest
+from docker.utils.socket import next_frame_size
+from docker.utils.socket import read_exactly
+
from .. import helpers
BUSYBOX = helpers.BUSYBOX
@@ -107,9 +110,9 @@ class ExecTest(helpers.BaseTestCase):
socket = self.client.exec_start(exec_id, socket=True)
self.addCleanup(socket.close)
- next_size = helpers.next_packet_size(socket)
+ next_size = next_frame_size(socket)
self.assertEqual(next_size, len(line))
- data = helpers.read_data(socket, next_size)
+ data = read_exactly(socket, next_size)
self.assertEqual(data.decode('utf-8'), line)
def test_exec_inspect(self):
diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py
index 23fd1913..34bf14f6 100644
--- a/tests/unit/api_test.py
+++ b/tests/unit/api_test.py
@@ -93,6 +93,10 @@ def fake_put(self, url, *args, **kwargs):
def fake_delete(self, url, *args, **kwargs):
return fake_request('DELETE', url, *args, **kwargs)
+
+def fake_read_from_socket(self, response, stream):
+ return six.binary_type()
+
url_base = 'http+docker://localunixsocket/'
url_prefix = '{0}v{1}/'.format(
url_base,
@@ -103,7 +107,8 @@ class DockerClientTest(base.Cleanup, base.BaseTestCase):
def setUp(self):
self.patcher = mock.patch.multiple(
'docker.Client', get=fake_get, post=fake_post, put=fake_put,
- delete=fake_delete
+ delete=fake_delete,
+ _read_from_socket=fake_read_from_socket
)
self.patcher.start()
self.client = docker.Client()
diff --git a/tests/unit/exec_test.py b/tests/unit/exec_test.py
index 3007799c..6ba2a3dd 100644
--- a/tests/unit/exec_test.py
+++ b/tests/unit/exec_test.py
@@ -51,8 +51,36 @@ class ExecTest(DockerClientTest):
}
)
- self.assertEqual(args[1]['headers'],
- {'Content-Type': 'application/json'})
+ self.assertEqual(
+ args[1]['headers'], {
+ 'Content-Type': 'application/json',
+ 'Connection': 'Upgrade',
+ 'Upgrade': 'tcp'
+ }
+ )
+
+ def test_exec_start_detached(self):
+ self.client.exec_start(fake_api.FAKE_EXEC_ID, detach=True)
+
+ args = fake_request.call_args
+ self.assertEqual(
+ args[0][1], url_prefix + 'exec/{0}/start'.format(
+ fake_api.FAKE_EXEC_ID
+ )
+ )
+
+ self.assertEqual(
+ json.loads(args[1]['data']), {
+ 'Tty': False,
+ 'Detach': True
+ }
+ )
+
+ self.assertEqual(
+ args[1]['headers'], {
+ 'Content-Type': 'application/json'
+ }
+ )
def test_exec_inspect(self):
self.client.exec_inspect(fake_api.FAKE_EXEC_ID)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 3
} | 1.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
-e git+https://github.com/docker/docker-py.git@b511352bea79aff12d565b11662bebee36e362fc#egg=docker_py
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig==1.1.1
mccabe==0.7.0
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
requests==2.5.3
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
websocket-client==0.32.0
zipp==3.6.0
| name: docker-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- flake8==5.0.4
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- mccabe==0.7.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- requests==2.5.3
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- websocket-client==0.32.0
- zipp==3.6.0
prefix: /opt/conda/envs/docker-py
| [
"tests/unit/api_test.py::DockerApiTest::test_auto_retrieve_server_version",
"tests/unit/api_test.py::DockerApiTest::test_create_host_config_secopt",
"tests/unit/api_test.py::DockerApiTest::test_ctor",
"tests/unit/api_test.py::DockerApiTest::test_events",
"tests/unit/api_test.py::DockerApiTest::test_events_with_filters",
"tests/unit/api_test.py::DockerApiTest::test_events_with_since_until",
"tests/unit/api_test.py::DockerApiTest::test_info",
"tests/unit/api_test.py::DockerApiTest::test_remove_link",
"tests/unit/api_test.py::DockerApiTest::test_retrieve_server_version",
"tests/unit/api_test.py::DockerApiTest::test_search",
"tests/unit/api_test.py::DockerApiTest::test_url_compatibility_http",
"tests/unit/api_test.py::DockerApiTest::test_url_compatibility_http_unix_triple_slash",
"tests/unit/api_test.py::DockerApiTest::test_url_compatibility_tcp",
"tests/unit/api_test.py::DockerApiTest::test_url_compatibility_unix",
"tests/unit/api_test.py::DockerApiTest::test_url_compatibility_unix_triple_slash",
"tests/unit/api_test.py::DockerApiTest::test_url_invalid_resource",
"tests/unit/api_test.py::DockerApiTest::test_url_no_resource",
"tests/unit/api_test.py::DockerApiTest::test_url_unversioned_api",
"tests/unit/api_test.py::DockerApiTest::test_url_valid_resource",
"tests/unit/api_test.py::DockerApiTest::test_version",
"tests/unit/api_test.py::DockerApiTest::test_version_no_api_version",
"tests/unit/api_test.py::StreamTest::test_early_stream_response",
"tests/unit/exec_test.py::ExecTest::test_exec_create",
"tests/unit/exec_test.py::ExecTest::test_exec_inspect",
"tests/unit/exec_test.py::ExecTest::test_exec_resize",
"tests/unit/exec_test.py::ExecTest::test_exec_start",
"tests/unit/exec_test.py::ExecTest::test_exec_start_detached"
]
| []
| []
| []
| Apache License 2.0 | 630 | [
"docker/utils/socket.py",
"docker/api/exec_api.py",
"docker/api/container.py",
"docker/client.py"
]
| [
"docker/utils/socket.py",
"docker/api/exec_api.py",
"docker/api/container.py",
"docker/client.py"
]
|
simphony__simphony-remote-103 | cdd16775caca62c1447a43acc236c4fe17f9f9ba | 2016-07-14 15:26:38 | 61ec23ffe44463cbc41f6fa54b4247963093ed79 | diff --git a/remoteappmanager/docker/container_manager.py b/remoteappmanager/docker/container_manager.py
index 16b2f12..923c9ae 100644
--- a/remoteappmanager/docker/container_manager.py
+++ b/remoteappmanager/docker/container_manager.py
@@ -120,15 +120,64 @@ class ContainerManager(LoggingMixin):
"""
labels = {
SIMPHONY_NS+"user": user_name,
- SIMPHONY_NS+"mapping_id": mapping_id,
- }
+ SIMPHONY_NS+"mapping_id": mapping_id}
+ filters = {
+ 'label': ['{0}={1}'.format(k, v) for k, v in labels.items()]}
+
+ containers = yield self.containers_from_filters(filters=filters)
+ return containers
+ @gen.coroutine
+ def container_from_url_id(self, url_id):
+ """Retrieves and returns the container by its url_id, if present.
+ If not present, returns None.
+ """
+ labels = {SIMPHONY_NS+"url_id": url_id}
filters = {
- 'label': ['{0}={1}'.format(k, v) for k, v in labels.items()]
- }
+ 'label': ['{0}={1}'.format(k, v) for k, v in labels.items()]}
+
+ containers = yield self.containers_from_filters(filters=filters)
+ return containers[0] if len(containers) else None
+
+ @gen.coroutine
+ def containers_from_filters(self, filters):
+ """Returns the currently running containers for a given filter
+
+ Parameters
+ ----------
+ filters: dict
+ A dictionary of filters as in dockerpy
+ Return
+ ------
+ A list of Container objects, or an empty list if nothing is found.
+ """
+ containers = []
infos = yield self.docker_client.containers(filters=filters)
- return [Container.from_docker_containers_dict(info) for info in infos]
+ for info in infos:
+ try:
+ container = Container.from_docker_containers_dict(info)
+ except Exception:
+ self.log.exception("Unable to parse container info.")
+ continue
+
+ # override the ip and port obtained by the docker info with the
+ # appropriate ip and port, considering that we might be using a
+ # separate docker machine
+ try:
+ ip, port = yield from self._get_ip_and_port(
+ container.docker_id)
+ except RuntimeError:
+ self.log.exception(
+ "Unable to retrieve ip/port "
+ "for container {}".format(container.docker_id))
+ continue
+
+ container.ip = ip
+ container.port = port
+ containers.append(container)
+
+ return containers
@gen.coroutine
def image(self, image_id_or_name):
@@ -301,13 +350,25 @@ class ContainerManager(LoggingMixin):
Return
------
A tuple (ip, port)
+
+ Raises
+ ------
+ RuntimeError:
+ If for some reason it cannot retrieve the information
"""
# retrieve the actual port binding
- resp = yield self.docker_client.port(container_id, self.container_port)
+ try:
+ resp = yield self.docker_client.port(container_id,
+ self.container_port)
+ except Exception as e:
+ raise RuntimeError("Failed to get port info for {}. "
+ "Exception: {}.".format(container_id,
+ str(e)))
if resp is None:
- raise RuntimeError("Failed to get port info for %s" % container_id)
+ raise RuntimeError("Failed to get port info for {}. "
+ "Port response was None.".format(container_id))
# We assume we are running on linux without any additional docker
# machine. The container will therefore be reachable at 127.0.0.1.
@@ -320,7 +381,13 @@ class ContainerManager(LoggingMixin):
if url.scheme == 'tcp':
ip = url.hostname
- port = int(resp[0]['HostPort'])
+ try:
+ port = int(resp[0]['HostPort'])
+ except (KeyError, IndexError, ValueError, TypeError) as e:
+ raise RuntimeError("Failed to get port info for {}. "
+ "Exception: {}.".format(container_id,
+ str(e)))
+
return ip, port
@gen.coroutine
diff --git a/remoteappmanager/handlers/home_handler.py b/remoteappmanager/handlers/home_handler.py
index 9ba81f6..2eb351d 100644
--- a/remoteappmanager/handlers/home_handler.py
+++ b/remoteappmanager/handlers/home_handler.py
@@ -11,7 +11,6 @@ from tornado.httpclient import AsyncHTTPClient, HTTPError
from tornado.log import app_log
from remoteappmanager.handlers.base_handler import BaseHandler
-from remoteappmanager.docker.container import Container
class HomeHandler(BaseHandler):
@@ -125,11 +124,15 @@ class HomeHandler(BaseHandler):
It is not different from pasting the appropriate URL in the
web browser, but we validate the container id first.
"""
- container = yield self._container_from_options(options)
+ url_id = options["url_id"][0]
+
+ container_manager = self.application.container_manager
+ container = yield container_manager.container_from_url_id(url_id)
if not container:
self.finish("Unable to view the application")
return
+ # make sure the container is actually running and working
yield self._wait_for_container_ready(container)
# in case the reverse proxy is not already set up
@@ -142,10 +145,12 @@ class HomeHandler(BaseHandler):
def _actionhandler_stop(self, options):
"""Stops a running container.
"""
+ url_id = options["url_id"][0]
+
app = self.application
container_manager = app.container_manager
- container = yield self._container_from_options(options)
+ container = yield container_manager.container_from_url_id(url_id)
if not container:
self.finish("Unable to view the application")
return
@@ -201,35 +206,6 @@ class HomeHandler(BaseHandler):
})
return images_info
- @gen.coroutine
- def _container_from_options(self, options):
- """Support routine to reduce duplication.
- Retrieves and returns the container if valid and present.
-
- If not present, returns None
- """
-
- container_manager = self.application.container_manager
-
- try:
- container_id = options["container_id"][0]
- except (KeyError, IndexError):
- self.log.exception(
- "Failed to retrieve valid container_id from form"
- )
- return None
-
- container_dict = yield container_manager.docker_client.containers(
- filters={'id': container_id})
-
- if not container_dict:
- self.log.exception(
- "Failed to retrieve valid "
- "container from container id: {}".format(container_id))
- return None
-
- return Container.from_docker_containers_dict(container_dict[0])
-
@gen.coroutine
def _start_container(self, user_name, app, policy, mapping_id):
"""Start the container. This method is a helper method that
diff --git a/remoteappmanager/templates/home.html b/remoteappmanager/templates/home.html
index 7eaf6cb..6b6b7a0 100644
--- a/remoteappmanager/templates/home.html
+++ b/remoteappmanager/templates/home.html
@@ -38,7 +38,7 @@
<button type="submit" name="action" value="start" class="btn btn-primary">Start</button>
</div>
{% else %}
- <input type="hidden" name="container_id" value="{{info.container.docker_id}}">
+ <input type="hidden" name="url_id" value="{{info.container.url_id}}">
<div class="col-sm-1 va">
<button type="submit" name="action" value="view" class="btn btn-success">View</button>
</div>
| View not working on mac due to incorrect IP
We found that View does not work on mac because inquiring the docker client returns a 0.0.0.0 ip. This works on linux, but on mac the container is residing on a separate virtual machine. Start works because it retrieves the information directly after starting the container, and this is handled by the container manager (which knows this) but with View, the docker client is inquired directly, so the container manager cannot do any magic.
the docker client should be considered private.
| simphony/simphony-remote | diff --git a/tests/docker/test_container_manager.py b/tests/docker/test_container_manager.py
index 7610825..5dbd238 100644
--- a/tests/docker/test_container_manager.py
+++ b/tests/docker/test_container_manager.py
@@ -53,13 +53,53 @@ class TestContainerManager(AsyncTestCase):
name='/remoteexec-image_3Alatest_user',
image_name='simphony/mayavi-4.4.4:latest', # noqa
image_id='imageid',
- ip='0.0.0.0',
- port=None,
+ ip='127.0.0.1',
+ port=666,
url_id='url_id')
self.assertEqual(len(result), 1)
utils.assert_containers_equal(self, result[0], expected)
+ @gen_test
+ def test_containers_from_url_id(self):
+ ''' Test containers_for_mapping_id returns a list of Container '''
+ # The mock client mocks the output of docker Client.containers
+ docker_client = utils.mock_docker_client_with_running_containers()
+ self.mock_docker_client = docker_client
+ self.manager.docker_client.client = docker_client
+
+ result = yield self.manager.container_from_url_id("url_id")
+ expected = Container(docker_id='someid',
+ mapping_id="mapping",
+ name='/remoteexec-image_3Alatest_user',
+ image_name='simphony/mayavi-4.4.4:latest', # noqa
+ image_id='imageid',
+ ip='127.0.0.1',
+ port=666,
+ url_id='url_id')
+
+ utils.assert_containers_equal(self, result, expected)
+
+ @gen_test
+ def test_containers_from_url_id_exceptions(self):
+ ''' Test containers_for_mapping_id returns a list of Container '''
+ # The mock client mocks the output of docker Client.containers
+ docker_client = utils.mock_docker_client_with_running_containers()
+ docker_client.port = mock.Mock(side_effect=Exception("Boom!"))
+ self.mock_docker_client = docker_client
+ self.manager.docker_client.client = docker_client
+
+ result = yield self.manager.container_from_url_id("url_id")
+ self.assertEqual(result, None)
+
+ # Making it so that no valid dictionary is returned.
+ docker_client.port = mock.Mock(return_value=1234)
+ self.mock_docker_client = docker_client
+ self.manager.docker_client.client = docker_client
+
+ result = yield self.manager.container_from_url_id("url_id")
+ self.assertEqual(result, None)
+
@gen_test
def test_race_condition_spawning(self):
# Start the operations, and retrieve the future.
diff --git a/tests/handlers/test_home_handler.py b/tests/handlers/test_home_handler.py
index c532f9e..2d32b6e 100644
--- a/tests/handlers/test_home_handler.py
+++ b/tests/handlers/test_home_handler.py
@@ -67,6 +67,9 @@ class TestHomeHandler(TempMixin, utils.AsyncHTTPTestCase):
app.container_manager.containers_from_mapping_id = mock_coro_factory(
[Container()]
)
+ app.container_manager.container_from_url_id = mock_coro_factory(
+ Container()
+ )
app.container_manager.stop_and_remove_container = mock_coro_factory()
mock_application = mock.Mock()
@@ -149,7 +152,7 @@ class TestHomeHandler(TempMixin, utils.AsyncHTTPTestCase):
def test_post_stop(self):
body = urllib.parse.urlencode(
{'action': 'stop',
- 'container_id': '12345'
+ 'url_id': '12345'
}
)
@@ -157,16 +160,7 @@ class TestHomeHandler(TempMixin, utils.AsyncHTTPTestCase):
".handlers"
".home_handler"
".HomeHandler"
- "._container_from_options",
- new_callable=mock_coro_factory
- ) as mock_container_from_options, \
- mock.patch("remoteappmanager"
- ".handlers"
- ".home_handler"
- ".HomeHandler"
- ".redirect") as redirect:
-
- mock_container_from_options.return_value = Container()
+ ".redirect") as redirect:
self.fetch("/user/username/",
method="POST",
@@ -181,7 +175,7 @@ class TestHomeHandler(TempMixin, utils.AsyncHTTPTestCase):
def test_post_view(self):
body = urllib.parse.urlencode(
{'action': 'view',
- 'container_id': '12345'
+ 'url_id': '12345'
}
)
@@ -190,21 +184,12 @@ class TestHomeHandler(TempMixin, utils.AsyncHTTPTestCase):
".home_handler"
"._wait_for_http_server_2xx",
new_callable=mock_coro_factory), \
- mock.patch("remoteappmanager"
- ".handlers"
- ".home_handler"
- ".HomeHandler"
- "._container_from_options",
- new_callable=mock_coro_factory
- ) as mock_container_from_options, \
mock.patch("remoteappmanager"
".handlers"
".home_handler"
".HomeHandler"
".redirect") as redirect:
- mock_container_from_options.return_value = Container()
-
self.fetch("/user/username/",
method="POST",
headers={
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 3
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alembic==1.15.2
annotated-types==0.7.0
arrow==1.3.0
async-generator==1.10
attrs==25.3.0
certifi==2025.1.31
certipy==0.2.2
cffi==1.17.1
charset-normalizer==3.4.1
click==8.1.8
cryptography==44.0.2
docker-py==1.10.6
docker-pycreds==0.4.0
escapism==1.0.1
exceptiongroup==1.2.2
fqdn==1.5.1
greenlet==3.1.1
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
isoduration==20.11.0
Jinja2==3.1.6
jsonpointer==3.0.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter-events==0.12.0
jupyter_client==8.6.3
jupyter_core==5.7.2
jupyterhub==5.2.1
Mako==1.3.9
MarkupSafe==3.0.2
oauthlib==3.2.2
packaging==24.2
pamela==1.2.0
platformdirs==4.3.7
pluggy==1.5.0
prometheus_client==0.21.1
pycparser==2.22
pydantic==2.11.1
pydantic_core==2.33.0
pytest==8.3.5
python-dateutil==2.9.0.post0
python-json-logger==3.3.0
PyYAML==6.0.2
pyzmq==26.3.0
referencing==0.36.2
-e git+https://github.com/simphony/simphony-remote.git@cdd16775caca62c1447a43acc236c4fe17f9f9ba#egg=remoteappmanager
requests==2.32.3
rfc3339-validator==0.1.4
rfc3986-validator==0.1.1
rpds-py==0.24.0
six==1.17.0
SQLAlchemy==2.0.40
tabulate==0.9.0
tomli==2.2.1
tornado==6.4.2
traitlets==5.14.3
types-python-dateutil==2.9.0.20241206
typing-inspection==0.4.0
typing_extensions==4.13.0
uri-template==1.3.0
urllib3==2.3.0
webcolors==24.11.1
websocket-client==1.8.0
zipp==3.21.0
| name: simphony-remote
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alembic==1.15.2
- annotated-types==0.7.0
- arrow==1.3.0
- async-generator==1.10
- attrs==25.3.0
- certifi==2025.1.31
- certipy==0.2.2
- cffi==1.17.1
- charset-normalizer==3.4.1
- click==8.1.8
- cryptography==44.0.2
- docker-py==1.10.6
- docker-pycreds==0.4.0
- escapism==1.0.1
- exceptiongroup==1.2.2
- fqdn==1.5.1
- greenlet==3.1.1
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- isoduration==20.11.0
- jinja2==3.1.6
- jsonpointer==3.0.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter-client==8.6.3
- jupyter-core==5.7.2
- jupyter-events==0.12.0
- jupyterhub==5.2.1
- mako==1.3.9
- markupsafe==3.0.2
- oauthlib==3.2.2
- packaging==24.2
- pamela==1.2.0
- platformdirs==4.3.7
- pluggy==1.5.0
- prometheus-client==0.21.1
- pycparser==2.22
- pydantic==2.11.1
- pydantic-core==2.33.0
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- python-json-logger==3.3.0
- pyyaml==6.0.2
- pyzmq==26.3.0
- referencing==0.36.2
- requests==2.32.3
- rfc3339-validator==0.1.4
- rfc3986-validator==0.1.1
- rpds-py==0.24.0
- six==1.17.0
- sqlalchemy==2.0.40
- tabulate==0.9.0
- tomli==2.2.1
- tornado==6.4.2
- traitlets==5.14.3
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- typing-inspection==0.4.0
- uri-template==1.3.0
- urllib3==2.3.0
- webcolors==24.11.1
- websocket-client==1.8.0
- zipp==3.21.0
prefix: /opt/conda/envs/simphony-remote
| [
"tests/docker/test_container_manager.py::TestContainerManager::test_containers_from_mapping_id",
"tests/docker/test_container_manager.py::TestContainerManager::test_containers_from_url_id",
"tests/docker/test_container_manager.py::TestContainerManager::test_containers_from_url_id_exceptions",
"tests/handlers/test_home_handler.py::TestHomeHandler::test_post_stop",
"tests/handlers/test_home_handler.py::TestHomeHandler::test_post_view"
]
| []
| [
"tests/docker/test_container_manager.py::TestContainerManager::test_image",
"tests/docker/test_container_manager.py::TestContainerManager::test_instantiation",
"tests/docker/test_container_manager.py::TestContainerManager::test_race_condition_spawning",
"tests/docker/test_container_manager.py::TestContainerManager::test_start_already_present_container",
"tests/docker/test_container_manager.py::TestContainerManager::test_start_container_exception_cleanup",
"tests/docker/test_container_manager.py::TestContainerManager::test_start_container_exception_cleanup_2",
"tests/docker/test_container_manager.py::TestContainerManager::test_start_container_with_nonexisting_volume_source",
"tests/docker/test_container_manager.py::TestContainerManager::test_start_stop",
"tests/handlers/test_home_handler.py::TestHomeHandler::test_failed_auth",
"tests/handlers/test_home_handler.py::TestHomeHandler::test_home",
"tests/handlers/test_home_handler.py::TestHomeHandler::test_post_failed_auth",
"tests/handlers/test_home_handler.py::TestHomeHandler::test_post_start"
]
| []
| BSD 3-Clause "New" or "Revised" License | 631 | [
"remoteappmanager/handlers/home_handler.py",
"remoteappmanager/docker/container_manager.py",
"remoteappmanager/templates/home.html"
]
| [
"remoteappmanager/handlers/home_handler.py",
"remoteappmanager/docker/container_manager.py",
"remoteappmanager/templates/home.html"
]
|
|
thrawn01__hubble-20 | 22a4cd51ed0a312efb3ef79e39a558fe87c970f7 | 2016-07-15 04:29:00 | 22a4cd51ed0a312efb3ef79e39a558fe87c970f7 | diff --git a/hubble/config.py b/hubble/config.py
index e3f466e..0abc6c4 100644
--- a/hubble/config.py
+++ b/hubble/config.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from io import StringIO
from itertools import chain
import os
@@ -101,12 +102,21 @@ class ErrorConfigParser(SafeConfigParser):
def open_fd(file):
""" Open the file if possible, else return None """
+ if not isinstance(file, string_types):
+ return file
try:
return open(file)
except IOError:
return None
+def exists(obj):
+ """Returns true if obj is a StringIO or if the path exists """
+ if isinstance(obj, StringIO):
+ return True
+ return os.path.exists(obj)
+
+
def read_configs(files=None, default_section=None):
"""Given a list of file names, return a list of handles to succesfully
opened files
@@ -114,7 +124,7 @@ def read_configs(files=None, default_section=None):
"""
files = files or [os.path.expanduser('~/.hubblerc'), '.hubblerc']
# If non of these files exist, raise an error
- if not any([os.path.exists(rc) for rc in files]):
+ if not any([exists(rc) for rc in files]):
return ErrorConfigParser("Unable to find config files in these"
" locations [%s]" % ", ".join(files))
return parse_configs([open_fd(file) for file in files], default_section)
diff --git a/hubble/shell.py b/hubble/shell.py
index 76f538f..fa81020 100644
--- a/hubble/shell.py
+++ b/hubble/shell.py
@@ -18,7 +18,7 @@ import argparse
import logging
import os
import re
-from subprocess import CalledProcessError, check_output, PIPE, Popen
+from subprocess import check_output, PIPE, Popen
import sys
import textwrap
@@ -78,7 +78,7 @@ class Env(dict):
# Delete the key from the env
self.delete(key)
# else add it
- self.set(key, value, section, key.isupper())
+ self.set(key, value, section)
def eval(self):
""" Exapand all the ${variable} directives in the collection """
@@ -146,6 +146,22 @@ def green(msg):
return "\033[92m%s\033[0m" % msg
+def get_cmd(argv, conf, env, hubble_args):
+ # If our invocation name is not 'hubble'
+ if not argv[0].endswith('hubble'):
+ return cmd_path(argv[0], conf)
+
+ if hubble_args.execute:
+ # Use the command provided
+ return hubble_args.execute
+
+ try:
+ return env['cmd'].value
+ except KeyError:
+ raise RuntimeError("Please specify a 'cmd' somewhere in "
+ "your config")
+
+
def get_environments(args, choice, config):
""" Get the environment collection requested from args.env """
sections = [choice]
@@ -172,8 +188,20 @@ def get_environments(args, choice, config):
return "opt.%s" % i[0], str(i[1])
# Add the args to the environment as opt.'<arg_name>'
env.add(dict(map(f, vars(args).items())), section)
+
+ env.eval()
+
+ # Populate environment vars by running opt-cmd
+ # if -o was passed on the commandline
+ if 'opt-cmd' in env:
+ env.add(run(env['opt-cmd'].value, env))
+
+ # Populate environment vars by running the env-cmd if it exists
+ if 'env-cmd' in env:
+ env.add(run(env['env-cmd'].value, env))
+
# Apply var expansion
- results.append(env.eval())
+ results.append(env)
return results
@@ -223,7 +251,7 @@ def cmd_path(cmd, conf):
return "/usr/bin/%s" % basename
-def eval_args(conf, parser):
+def eval_args(argv, conf, parser):
env = conf.safe_get(conf.default_section, 'default-env')
# If no default environment set, look for an
# environment choice on the command line
@@ -231,14 +259,46 @@ def eval_args(conf, parser):
help = "The environment defined in ~/.hubblerc to use"
parser.add_argument('env', nargs='?', metavar='<ENV>',
help=help)
- (arg1, arg2) = parser.parse_known_args()
- return (arg1, arg2, arg1.env)
+ (arg1, arg2) = parser.parse_known_args(args=argv[1:])
+ return arg1, arg2, arg1.env
# Return the args with the default environment choice
- (arg1, arg2) = parser.parse_known_args()
- return (arg1, arg2, env)
-
+ (arg1, arg2) = parser.parse_known_args(args=argv[1:])
+ return arg1, arg2, env
+
+
+def execute_environment(cmd, env, hubble_args, other_args):
+ # If --debug; print out our env config and pass along the
+ # --debug arg
+ if hubble_args.debug:
+ # For cinder client debug
+ if cmd.endswith('cinder'):
+ env.add({'CINDERCLIENT_DEBUG': '1'})
+ print("%r\n" % env)
+ other_args.insert(0, '--debug')
+
+ # Grab a copy of the local environment and inject it into
+ # our environment
+ environ = os.environ.copy()
+ environ.update(env.to_dict())
-def main():
+ try:
+ # Run the requested command
+ p = Popen([cmd] + other_args,
+ stdout=PIPE,
+ stderr=PIPE,
+ env=environ)
+ except OSError as e:
+ if e.errno == 2:
+ print("-- No such executable '%s', you must specify the "
+ "executable in the [hubble-commands] section of the "
+ "config (See README)" % cmd.value)
+ message = "exec failed '%s' - %s" % (cmd.value, e)
+ raise RuntimeError(message)
+
+ return p
+
+
+def main(argv=sys.argv, stdout=sys.stdout, stderr=sys.stderr, files=None):
logging.basicConfig(format='-- %(message)s')
log.setLevel(logging.CRITICAL)
formatter_class = argparse.RawDescriptionHelpFormatter
@@ -250,7 +310,7 @@ def main():
on environment variables for configuration.
Use ~/.hubblerc for user wide environments then place a
- .hubblerc in a local directory to overide ~/.hubblerc
+ .hubblerc in a local directory to override ~/.hubblerc
"""))
parser.add_argument('-o', '--option',
help="an argument to pass to the opt-cmd")
@@ -262,109 +322,47 @@ def main():
help="Adds CINDERCLIENT_DEBUG=1 to the environment "
"and passes --debug to selected command")
- try:
- # Read the configs
- conf = read_configs(default_section='hubble')
- # Evaluate the command line arguments and return our args
- # the commands args and the environment choice the user made
- hubble_args, other_args, choice = eval_args(conf, parser)
- # Do this so we pass along the -h to the command
- # if we are using invocation discovery
- if hubble_args.help and (choice is None):
- return parser.print_help()
-
- # If there was an error
- if conf.get_error():
- print(conf.get_error())
- return 1
-
- if choice is None:
- print("Environments Configured: %s" % ",".join(conf.sections()))
- print("See --help for usage")
- return 1
- if hubble_args.help:
- other_args.append('--help')
-
- # Set our log level
- if hubble_args.debug:
- log.setLevel(logging.DEBUG)
-
- # Read environment values from config files
- environments = get_environments(hubble_args, choice, conf)
- processes = []
- for env in environments:
- # Populate environment vars by running opt-cmd
- # if -o was passed on the commandline
- if hubble_args.option:
- if 'opt-cmd' not in env:
- message = "provided -o|--option, but 'opt-cmd' is not " \
- "defined in '%s' section" % env['section'].value
- raise RuntimeError(message)
- env.add(run(env['opt-cmd'].value, env))
-
- # Populate environment vars by running the env-cmd if it exists
- if 'env-cmd' in env:
- env.add(run(env['env-cmd'].value, env))
-
- # If querying multiple environments, display the env name
- if hubble_args.debug:
- print("-- [%s] --" % green(env['section'].value))
-
- # If our invocation name is not 'hubble'
- if not sys.argv[0].endswith('hubble'):
- # Use the invocation name as our 'cmd'
- env.add({'cmd': cmd_path(sys.argv[0], conf)})
-
- if hubble_args.execute:
- # Use the command provided
- env.add({'cmd': hubble_args.execute})
-
- # At this point we should know our 'cmd' to run
- if 'cmd' not in env:
- raise RuntimeError("Please specify a 'cmd' somewhere in "
- "your config")
-
- # If --debug; print out our env config and pass along the
- # --debug arg
- if hubble_args.debug:
- # For cinder client debug
- if env['cmd'].endswith('cinder'):
- env.add({'CINDERCLIENT_DEBUG': '1'})
- print("%r\n" % env)
- other_args.insert(0, '--debug')
-
- # Grab a copy of the local environment and inject it into
- # our environment
- environ = os.environ.copy()
- environ.update(env.to_dict())
-
- try:
- # Run the requested command
- p = Popen([env['cmd'].value] + other_args,
- stdout=PIPE,
- stderr=PIPE,
- env=environ)
- processes.append((p, env['section'].value))
- except OSError as e:
- if e.errno == 2:
- print("-- No such executable '%s', you must specify the "
- "executable in the [hubble-commands] section of the "
- "config (See README)" % env['cmd'].value)
- message = "exec failed '%s' - %s" % (env['cmd'].value, e)
- raise RuntimeError(message)
-
- for p, env in processes:
- if len(environments) != 1:
- print("-- [%s] --" % green(env))
- # Wait for the command to complete
- stdout, stderr = p.communicate()
- sys.stdout.write(stdout.decode('utf-8'))
- sys.stderr.write(stderr.decode('utf-8'))
-
- except (RuntimeError, NoSectionError) as e:
- log.critical(e)
+ # Read the configs
+ conf = read_configs(files=files, default_section='hubble')
+ # Evaluate the command line arguments and return our args
+ # the commands args and the environment choice the user made
+ hubble_args, other_args, choice = eval_args(argv, conf, parser)
+ # Do this so we pass along the -h to the command
+ # if we are using invocation discovery
+ if hubble_args.help and (choice is None):
+ return parser.print_help()
+
+ # If there was an error
+ if conf.get_error():
+ print(conf.get_error())
return 1
- except CalledProcessError as e:
- log.critical(e.output)
- log.critical(e)
+
+ if choice is None:
+ print("Environments Configured: %s" % ",".join(conf.sections()))
+ print("See --help for usage")
return 1
+ if hubble_args.help:
+ other_args.append('--help')
+
+ # Set our log level
+ if hubble_args.debug:
+ log.setLevel(logging.DEBUG)
+
+ # Collect all environments from our config file
+ environments = get_environments(hubble_args, choice, conf)
+ processes = []
+ for env in environments:
+ # Get the command to execute
+ cmd = get_cmd(argv, conf, env, hubble_args)
+ # Create the selected environment to execute our command in
+ p = execute_environment(cmd, env, hubble_args, other_args)
+ processes.append((p, env['section'].value))
+
+ for p, env in processes:
+ if len(environments) != 1:
+ print("-- [%s] --" % green(env))
+ # Wait for the command to complete
+ out, err = p.communicate()
+ stdout.write(out.decode('utf-8'))
+ stderr.write(err.decode('utf-8'))
+ return 0
diff --git a/tox.ini b/tox.ini
index b5fc229..c204552 100644
--- a/tox.ini
+++ b/tox.ini
@@ -30,6 +30,6 @@ commands =
[flake8]
ignore = E731
-#max-complexity = 10
+max-complexity = 10
import-order-style = google
application-import-names = hubble
| enable max-complexity gate
We currently have flake8 `max-complexity` disabled because it would break the flake8 gate. This is because the `main` function in `hubble.shell` is absolutely absurdly complicated:
```
./hubble/shell.py:240:1: C901 'main' is too complex (24)
```
We should refactor that function in to something less stupid, and enable the `max-complexity` gate. | thrawn01/hubble | diff --git a/hubble/tests/test_shell.py b/hubble/tests/test_shell.py
new file mode 100644
index 0000000..a432967
--- /dev/null
+++ b/hubble/tests/test_shell.py
@@ -0,0 +1,73 @@
+# Copyright 2012 Derrick J. Wippler
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from io import StringIO
+import os
+import tempfile
+import unittest
+
+from hubble.shell import main
+
+
+def safe_eval(io_out):
+ str_out = io_out.getvalue()
+ print(str_out)
+ if len(str_out) != 0:
+ return eval(str_out)
+ return {}
+
+
+def run_main(config, argv):
+ stdout = StringIO()
+ stderr = StringIO()
+ ret = main(argv, stdout=stdout, stderr=stderr, files=[config])
+ return ret, safe_eval(stdout), safe_eval(stderr)
+
+
+def generate_test_cmd(print_stmt):
+ with tempfile.NamedTemporaryFile(delete=False) as fd:
+ fd.write("#! /usr/bin/env python\n"
+ "from __future__ import print_function\n"
+ "import os\n"
+ "{0}\n".format(print_stmt).encode())
+ fd.flush()
+ os.chmod(fd.name, 0o755)
+ return fd.name
+
+
+class TestHubble(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.opt_cmd = generate_test_cmd("print('opt-injected-key=value\\n')")
+ cls.cmd = generate_test_cmd("print(dict(os.environ))")
+
+ @classmethod
+ def tearDownClass(cls):
+ os.unlink(cls.opt_cmd)
+ os.unlink(cls.cmd)
+
+ def test_opt_cmd(self):
+ config = StringIO(u"""[hubble]
+ name=Test
+ opt-cmd={0} ${{opt.option}} ${{name}}
+ [dfw]
+ cmd={1}
+ SOME=Thing""".format(self.opt_cmd, self.cmd))
+ config.name = "test.conf"
+
+ ret, out, err = run_main(config, ["hubble", "-o", "my-option",
+ "dfw", "--debug"])
+ self.assertEqual(ret, 0)
+ self.assertEqual(out['SOME'], "Thing")
+ self.assertEqual(out['opt-injected-key'], "value")
diff --git a/hubble/tests/unit/test_hubble.py b/hubble/tests/unit/test_hubble.py
index 2de761f..0091d82 100644
--- a/hubble/tests/unit/test_hubble.py
+++ b/hubble/tests/unit/test_hubble.py
@@ -13,10 +13,9 @@
# limitations under the License.
import argparse
+from io import StringIO
import unittest
-from six.moves import StringIO
-
from hubble.config import parse_configs
from hubble.shell import empty, Env, get_environments, run, to_dict
@@ -43,9 +42,10 @@ class TestEnv(unittest.TestCase):
parser = argparse.ArgumentParser()
parser.add_argument('env')
parser.add_argument('--user', default='', required=False)
+ parser.add_argument('--option')
args = parser.parse_args(['blah'])
- file = StringIO("[hubble]\n"
+ file = StringIO(u"[hubble]\n"
"name=My name is ${FIRST} ${last}\n"
"[prod-meta]\n"
"meta=['name', 'place']\n"
diff --git a/test-requirements.txt b/test-requirements.txt
index 8f9608c..fcd3c2f 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -7,3 +7,7 @@ pytest-cov>=2.3.0
# flake8 plugins
pep8-naming>=0.4.1
flake8-import-order>=0.8
+
+# Tox
+tox>=2.3.1
+tox-pyenv>=1.0.3
\ No newline at end of file
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 3
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest>=2.9.2",
"pytest-flake8>=0.5",
"pytest-cov>=2.3.0",
"pep8-naming>=0.4.1",
"flake8-import-order>=0.8",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
cffi==1.15.1
configparser==5.2.0
coverage==6.2
cryptography==40.0.2
flake8==5.0.4
flake8-import-order==0.18.2
-e git+https://github.com/thrawn01/hubble.git@22a4cd51ed0a312efb3ef79e39a558fe87c970f7#egg=hubble
importlib-metadata==4.2.0
iniconfig==1.1.1
jeepney==0.7.1
keyring==23.4.1
mccabe==0.7.0
packaging==21.3
pep8-naming==0.13.1
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pycparser==2.21
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-flake8==1.1.1
SecretStorage==3.3.3
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: hubble
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- cffi==1.15.1
- configparser==5.2.0
- coverage==6.2
- cryptography==40.0.2
- flake8==5.0.4
- flake8-import-order==0.18.2
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- jeepney==0.7.1
- keyring==23.4.1
- mccabe==0.7.0
- packaging==21.3
- pep8-naming==0.13.1
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pycparser==2.21
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-flake8==1.1.1
- secretstorage==3.3.3
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/hubble
| [
"hubble/tests/test_shell.py::TestHubble::test_opt_cmd"
]
| []
| [
"hubble/tests/unit/test_hubble.py::TestEnv::test_env",
"hubble/tests/unit/test_hubble.py::TestEnv::test_env_to_dict",
"hubble/tests/unit/test_hubble.py::TestEnv::test_get_environments",
"hubble/tests/unit/test_hubble.py::TestHubble::test_empty",
"hubble/tests/unit/test_hubble.py::TestHubble::test_run",
"hubble/tests/unit/test_hubble.py::TestHubble::test_to_dict"
]
| []
| null | 632 | [
"tox.ini",
"hubble/config.py",
"hubble/shell.py"
]
| [
"tox.ini",
"hubble/config.py",
"hubble/shell.py"
]
|
|
ovh__python-ovh-33 | ae2981b26fce2641a9bae5af68a3d5043fdd8b46 | 2016-07-15 09:31:15 | 6d4e840baecd36a568a0bf7049f99fe9fee97db0 | ncrocfer: Yep, it's ok for me :
```python
In [1]: import ovh
In [2]: ovh.Client().get('/hosting/web/ncrocfer.ovh/ovhConfig', historical=False)
Out[2]: [1779827]
```
Thanks :+1:
| diff --git a/ovh/client.py b/ovh/client.py
index fe989cf..82db162 100644
--- a/ovh/client.py
+++ b/ovh/client.py
@@ -289,14 +289,24 @@ class Client(object):
can be prefixed with an underscore. For example, ``from`` argument of
``POST /email/domain/{domain}/redirection`` may be replaced by ``_from``
+ This function also handles Python booleans which should be serialized
+ using solely lowercase to be recognized by the API.
+
:param dict kwargs: input kwargs
:return dict: filtered kawrgs
"""
arguments = {}
for k, v in kwargs.items():
+ # Handle Python keywork collision
if k[0] == '_' and k[1:] in keyword.kwlist:
k = k[1:]
+
+ # Handle Booleans
+ if isinstance(v, bool):
+ v = str(v).lower()
+
+ # Commit
arguments[k] = v
return arguments
| API raises BadParameterError for boolean arguments
Hello,
When an API call requires a boolean argument, we send it as `True` or `False` (valid Python booleans) but the API raises `BadParametersError` because it waits for a string (`'true'` or `'false'`) :
```python
In [1]: import ovh
In [2]: client = ovh.Client()
In [3]: client.get('/hosting/web/ncrocfer.ovh/ovhConfig', historical=False)
---------------------------------------------------------------------------
BadParametersError Traceback (most recent call last)
<ipython-input-3-b9140291291d> in <module>()
----> 1 client.get('/hosting/web/ncrocfer.ovh/ovhConfig', historical=False)
/home/ncrocfer/.virtualenvs/ovh/lib/python3.4/site-packages/ovh/client.py in get(self, _target, _need_auth, **kwargs)
317 _target = '%s?%s' % (_target, query_string)
318
--> 319 return self.call('GET', _target, None, _need_auth)
320
321 def put(self, _target, _need_auth=True, **kwargs):
/home/ncrocfer/.virtualenvs/ovh/lib/python3.4/site-packages/ovh/client.py in call(self, method, path, data, need_auth)
446 raise ResourceNotFoundError(json_result.get('message'))
447 elif status == 400:
--> 448 raise BadParametersError(json_result.get('message'))
449 elif status == 409:
450 raise ResourceConflictError(json_result.get('message'))
BadParametersError: [historical] Given data (False) is not valid for type boolean
In [4]: client.get('/hosting/web/ncrocfer.ovh/ovhConfig', historical='false')
Out[4]: [1779827]
```
Is it possible to translate automatically the boolean argument to be API compatible ?
Thanks. | ovh/python-ovh | diff --git a/tests/test_client.py b/tests/test_client.py
index ddea110..66b300a 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -185,6 +185,12 @@ class testClient(unittest.TestCase):
self.assertEqual(m_call.return_value, api.get(FAKE_URL+'?query=string', param="test"))
m_call.assert_called_once_with('GET', FAKE_URL+'?query=string¶m=test', None, True)
+ # boolean arguments
+ m_call.reset_mock()
+ api = Client(ENDPOINT, APPLICATION_KEY, APPLICATION_SECRET, CONSUMER_KEY)
+ self.assertEqual(m_call.return_value, api.get(FAKE_URL+'?query=string', checkbox=True))
+ m_call.assert_called_once_with('GET', FAKE_URL+'?query=string&checkbox=true', None, True)
+
# keyword calling convention
m_call.reset_mock()
api = Client(ENDPOINT, APPLICATION_KEY, APPLICATION_SECRET, CONSUMER_KEY)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
coverage==3.7.1
coveralls==0.4.2
docopt==0.6.2
docutils==0.21.2
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
mock==1.0.1
nose==1.3.3
ordereddict==1.0
-e git+https://github.com/ovh/python-ovh.git@ae2981b26fce2641a9bae5af68a3d5043fdd8b46#egg=ovh
packaging==24.2
pluggy==1.5.0
Pygments==2.19.1
pytest==8.3.5
PyYAML==6.0.2
requests==2.32.3
Sphinx==1.2.2
tomli==2.2.1
urllib3==2.3.0
yanc==0.2.4
| name: python-ovh
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==3.7.1
- coveralls==0.4.2
- docopt==0.6.2
- docutils==0.21.2
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- mock==1.0.1
- nose==1.3.3
- ordereddict==1.0
- packaging==24.2
- pluggy==1.5.0
- pygments==2.19.1
- pytest==8.3.5
- pyyaml==6.0.2
- requests==2.32.3
- sphinx==1.2.2
- tomli==2.2.1
- urllib3==2.3.0
- yanc==0.2.4
prefix: /opt/conda/envs/python-ovh
| [
"tests/test_client.py::testClient::test_get"
]
| [
"tests/test_client.py::testClient::test_endpoints",
"tests/test_client.py::testClient::test_init_from_custom_config"
]
| [
"tests/test_client.py::testClient::test__canonicalize_kwargs",
"tests/test_client.py::testClient::test_call_no_sign",
"tests/test_client.py::testClient::test_call_signature",
"tests/test_client.py::testClient::test_delete",
"tests/test_client.py::testClient::test_init",
"tests/test_client.py::testClient::test_init_from_config",
"tests/test_client.py::testClient::test_new_consumer_key_request",
"tests/test_client.py::testClient::test_post",
"tests/test_client.py::testClient::test_put",
"tests/test_client.py::testClient::test_request_consumerkey",
"tests/test_client.py::testClient::test_time_delta"
]
| []
| BSD License | 633 | [
"ovh/client.py"
]
| [
"ovh/client.py"
]
|
EdinburghGenomics__EGCG-Core-11 | a835cffab69da2193d4653f31230f0d487596b78 | 2016-07-15 13:03:14 | 43f124d6f77db73cff13117003295ad715d9aabc | diff --git a/egcg_core/ncbi.py b/egcg_core/ncbi.py
index 13f74d6..69cc44c 100644
--- a/egcg_core/ncbi.py
+++ b/egcg_core/ncbi.py
@@ -76,7 +76,7 @@ def _fetch_from_eutils(species):
rank = None
if match:
rank = match.group(1)
- if rank == 'species':
+ if rank in ['species', 'subspecies']:
scientific_name = common_name = None
match = re.search('<ScientificName>(.+?)</ScientificName>', r.text, re.MULTILINE)
if match:
| Canis lupus familiaris NCBI query returns None
Because _C. l. familiaris_ has `subspecies` in its efetch, the regex system in `fetch_from_eutils` breaks down. We should query the XML more robustly. | EdinburghGenomics/EGCG-Core | diff --git a/tests/test_ncbi.py b/tests/test_ncbi.py
index ef931bb..af10556 100644
--- a/tests/test_ncbi.py
+++ b/tests/test_ncbi.py
@@ -24,6 +24,11 @@ def test_fetch_from_eutils():
<OtherNames><CommonName>a common name</CommonName></OtherNames>
<Rank>species</Rank>
'''
+ ncbi_fetch_data_sub_spe = '''
+ <ScientificName>Genus species</ScientificName>
+ <OtherNames><CommonName>a common name</CommonName></OtherNames>
+ <Rank>subspecies</Rank>
+ '''
patched_get = patch(
'egcg_core.ncbi.requests.get',
@@ -34,7 +39,15 @@ def test_fetch_from_eutils():
FakeRestResponse(content=ncbi_fetch_data)
)
)
-
+ patched_get2 = patch(
+ 'egcg_core.ncbi.requests.get',
+ side_effect=(
+ FakeRestResponse(content=ncbi_search_data),
+ FakeRestResponse(content=ncbi_fetch_data_sub_spe),
+ FakeRestResponse(content=ncbi_fetch_data_sub_spe),
+ FakeRestResponse(content=ncbi_fetch_data_sub_spe)
+ )
+ )
with patched_get as mocked_get:
obs = fetch_from_eutils('a_species')
assert obs == ('1337', 'Genus species', 'a common name')
@@ -46,6 +59,10 @@ def test_fetch_from_eutils():
'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi',
params={'db': 'Taxonomy', 'id': '1337'}
)
+ with patched_get2:
+ obs = fetch_from_eutils('a_species')
+ assert obs == ('1337', 'Genus species', 'a common name')
+
def test_cache():
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
-e git+https://github.com/EdinburghGenomics/EGCG-Core.git@a835cffab69da2193d4653f31230f0d487596b78#egg=EGCG_Core
exceptiongroup==1.2.2
genologics==1.0.0
idna==3.10
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
PyYAML==6.0.2
requests==2.32.3
tomli==2.2.1
urllib3==2.3.0
| name: EGCG-Core
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- exceptiongroup==1.2.2
- genologics==1.0.0
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pyyaml==6.0.2
- requests==2.32.3
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/EGCG-Core
| [
"tests/test_ncbi.py::test_fetch_from_eutils"
]
| []
| [
"tests/test_ncbi.py::test_cache",
"tests/test_ncbi.py::test_get_species_name"
]
| []
| MIT License | 634 | [
"egcg_core/ncbi.py"
]
| [
"egcg_core/ncbi.py"
]
|
|
EdinburghGenomics__EGCG-Core-12 | 43f124d6f77db73cff13117003295ad715d9aabc | 2016-07-15 14:25:56 | 43f124d6f77db73cff13117003295ad715d9aabc | diff --git a/egcg_core/__init__.py b/egcg_core/__init__.py
index e1424ed..58d168b 100644
--- a/egcg_core/__init__.py
+++ b/egcg_core/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '0.3.1'
+__version__ = '0.4'
diff --git a/egcg_core/clarity.py b/egcg_core/clarity.py
index 4691f7c..e896d04 100644
--- a/egcg_core/clarity.py
+++ b/egcg_core/clarity.py
@@ -2,13 +2,16 @@ import re
from genologics.lims import Lims
from egcg_core.config import cfg
from egcg_core.app_logging import logging_default as log_cfg
-from egcg_core.exceptions import LimsCommunicationError
+from egcg_core.exceptions import EGCGError
app_logger = log_cfg.get_logger('clarity')
try:
from egcg_core.ncbi import get_species_name
except ImportError:
- app_logger.error('Could not import egcg_core.ncbi. Is sqlite3 available?')
+ app_logger.warning('Could not import egcg_core.ncbi. Is sqlite3 available?')
+
+ def get_species_name(query_species):
+ raise EGCGError('Could not import egcg_core.ncbi.get_species_name - sqlite3 seems to be unavailable.')
_lims = None
@@ -91,8 +94,8 @@ def sanitize_user_id(user_id):
substitutions = (
(None, None),
- (re.compile('_(\d{2})$'), ':\g<1>'),
- (re.compile('__(\w):(\d{2})'), ' _\g<1>:\g<2>')
+ (re.compile('_(\d{2})$'), ':\g<1>'), # '_01' -> ':01'
+ (re.compile('__(\w):(\d{2})'), ' _\g<1>:\g<2>') # '__L:01' -> ' _L:01'
)
@@ -115,11 +118,12 @@ def _get_list_of_samples(sample_names, sub=0):
lims.get_batch(samples)
if len(samples) != len(sample_names): # haven't got all the samples because some had _01/__L:01
+ sub += 1
+ remainder = sorted(set(_sample_names).difference(set([s.name for s in samples])))
if sub < len(substitutions):
- remainder = sorted(set(_sample_names).difference(set([s.name for s in samples])))
- samples.extend(_get_list_of_samples(remainder, sub + 1))
- else:
- raise LimsCommunicationError('Expected %s back, got %s' % (_sample_names, len(samples)))
+ samples.extend(_get_list_of_samples(remainder, sub))
+ else: # end recursion
+ app_logger.warning('Could not find %s in Lims' % remainder)
return samples
diff --git a/egcg_core/executor/cluster_executor.py b/egcg_core/executor/cluster_executor.py
index ce66a44..f9b29d1 100644
--- a/egcg_core/executor/cluster_executor.py
+++ b/egcg_core/executor/cluster_executor.py
@@ -38,9 +38,8 @@ class ClusterExecutor(AppLogger):
sleep(30)
return self._job_exit_code()
- @classmethod
- def _get_writer(cls, job_name, working_dir, walltime=None, cpus=1, mem=2, jobs=1, log_commands=True):
- return cls.script_writer(job_name, working_dir, cfg['job_queue'], cpus, mem, walltime, jobs, log_commands)
+ def _get_writer(self, job_name, working_dir, walltime=None, cpus=1, mem=2, jobs=1, log_commands=True):
+ return self.script_writer(job_name, working_dir, self.job_queue, cpus, mem, walltime, jobs, log_commands)
def _job_status(self):
raise NotImplementedError
diff --git a/egcg_core/ncbi.py b/egcg_core/ncbi.py
index 69cc44c..13f74d6 100644
--- a/egcg_core/ncbi.py
+++ b/egcg_core/ncbi.py
@@ -76,7 +76,7 @@ def _fetch_from_eutils(species):
rank = None
if match:
rank = match.group(1)
- if rank in ['species', 'subspecies']:
+ if rank == 'species':
scientific_name = common_name = None
match = re.search('<ScientificName>(.+?)</ScientificName>', r.text, re.MULTILINE)
if match:
diff --git a/egcg_core/rest_communication.py b/egcg_core/rest_communication.py
index 1b511aa..bf960cb 100644
--- a/egcg_core/rest_communication.py
+++ b/egcg_core/rest_communication.py
@@ -1,200 +1,182 @@
import requests
from urllib.parse import urljoin
-from base64 import b64encode
from egcg_core.config import cfg
-from egcg_core.app_logging import AppLogger
+from egcg_core.app_logging import logging_default as log_cfg
from egcg_core.exceptions import RestCommunicationError
+app_logger = log_cfg.get_logger(__name__)
+
+table = {' ': '', '\'': '"', 'None': 'null'}
+
+
+def _translate(s):
+ for k, v in table.items():
+ s = s.replace(k, v)
+ return s
+
+
+def api_url(endpoint, **query_args):
+ url = '{base_url}/{endpoint}/'.format(
+ base_url=cfg['rest_api']['url'].rstrip('/'), endpoint=endpoint
+ )
+ if query_args:
+ query = '?' + '&'.join(['%s=%s' % (k, v) for k, v in query_args.items()])
+ url += _translate(query)
+ return url
+
+
+def _parse_query_string(query_string, requires=None):
+ if '?' not in query_string:
+ return {}
+ if query_string.count('?') != 1:
+ raise RestCommunicationError('Bad query string: ' + query_string)
+ href, query = query_string.split('?')
+ query = dict([x.split('=') for x in query.split('&')])
+ if requires and not all([r in query for r in requires]):
+ raise RestCommunicationError('%s did not contain all required fields: %s' % (query_string, requires))
+ return query
+
+
+def _req(method, url, quiet=False, **kwargs):
+ auth = None
+ if 'username' in cfg['rest_api'] and 'password' in cfg['rest_api']:
+ auth = (cfg['rest_api']['username'], cfg['rest_api']['password'])
+
+ r = requests.request(method, url, auth=auth, **kwargs)
+ # e.g: 'POST <url> ({"some": "args"}) -> {"some": "content"}. Status code 201. Reason: CREATED
+ report = '%s %s (%s) -> %s. Status code %s. Reason: %s' % (
+ r.request.method, r.request.path_url, kwargs, r.content.decode('utf-8'), r.status_code, r.reason
+ )
+ if r.status_code in (200, 201):
+ if not quiet:
+ app_logger.debug(report)
+ elif r.status_code == 401:
+ raise RestCommunicationError('Invalid auth credentials')
+ else:
+ app_logger.error(report)
+ return r
+
+
+def get_content(endpoint, paginate=True, quiet=False, **query_args):
+ if paginate:
+ query_args.update(
+ max_results=query_args.pop('max_results', 100), # default to page size of 100
+ page=query_args.pop('page', 1)
+ )
+ url = api_url(endpoint, **query_args)
+ return _req('GET', url, quiet=quiet).json()
-class Communicator(AppLogger):
- table = {' ': '', '\'': '"', 'None': 'null'}
- successful_statuses = (200, 201)
- def __init__(self, auth=None, baseurl=None):
- self._baseurl = baseurl
- self._auth = auth
+def get_documents(endpoint, paginate=True, all_pages=False, quiet=False, **query_args):
+ content = get_content(endpoint, paginate, quiet, **query_args)
+ elements = content['data']
- @property
- def baseurl(self):
- if self._baseurl is None:
- self._baseurl = cfg['rest_api']['url'].rstrip('/')
- return self._baseurl
+ if all_pages and 'next' in content['_links']:
+ next_query = _parse_query_string(content['_links']['next']['href'], requires=('max_results', 'page'))
+ query_args.update(next_query)
+ elements.extend(get_documents(endpoint, all_pages=True, quiet=quiet, **query_args))
- @property
- def auth(self):
- if self._auth is None and 'username' in cfg['rest_api'] and 'password' in cfg['rest_api']:
- self._auth = (cfg['rest_api']['username'], cfg['rest_api']['password'])
- return self._auth
+ return elements
- @staticmethod
- def _hash_auth_token(token):
- return b64encode(token.encode()).decode('utf-8')
- @classmethod
- def _translate(cls, s):
- for k, v in cls.table.items():
- s = s.replace(k, v)
- return s
+def get_document(endpoint, idx=0, **query_args):
+ documents = get_documents(endpoint, **query_args)
+ if documents:
+ return documents[idx]
+ else:
+ app_logger.warning('No document found in endpoint %s for %s', endpoint, query_args)
- def _api_url(self, endpoint, **query_args):
- url = '{base_url}/{endpoint}/'.format(
- base_url=self.baseurl, endpoint=endpoint
- )
- if query_args:
- query = '?' + '&'.join(['%s=%s' % (k, v) for k, v in query_args.items()])
- url += self._translate(query)
- return url
-
- @staticmethod
- def _parse_query_string(query_string, requires=None):
- if '?' not in query_string:
- return {}
- if query_string.count('?') != 1:
- raise RestCommunicationError('Bad query string: ' + query_string)
- href, query = query_string.split('?')
- query = dict([x.split('=') for x in query.split('&')])
- if requires and not all([r in query for r in requires]):
- raise RestCommunicationError('%s did not contain all required fields: %s' % (query_string, requires))
- return query
-
- def _req(self, method, url, quiet=False, **kwargs):
- if type(self.auth) is tuple:
- kwargs.update(auth=self.auth)
- elif type(self.auth) is str:
- # noinspection PyTypeChecker
- kwargs.update(headers={'Authorization': 'Token ' + self._hash_auth_token(self.auth)})
-
- r = requests.request(method, url, **kwargs)
- # e.g: 'POST <url> ({"some": "args"}) -> {"some": "content"}. Status code 201. Reason: CREATED
- report = '%s %s (%s) -> %s. Status code %s. Reason: %s' % (
- r.request.method, r.request.path_url, kwargs, r.content.decode('utf-8'), r.status_code, r.reason
- )
- if r.status_code in self.successful_statuses:
- if not quiet:
- self.debug(report)
- elif r.status_code == 401:
- raise RestCommunicationError('Invalid auth credentials')
- else:
- self.error(report)
- return r
-
- def get_content(self, endpoint, paginate=True, quiet=False, **query_args):
- if paginate:
- query_args.update(
- max_results=query_args.pop('max_results', 100), # default to page size of 100
- page=query_args.pop('page', 1)
- )
- url = self._api_url(endpoint, **query_args)
- return self._req('GET', url, quiet=quiet).json()
-
- def get_documents(self, endpoint, paginate=True, all_pages=False, quiet=False, **query_args):
- content = self.get_content(endpoint, paginate, quiet, **query_args)
- elements = content['data']
-
- if all_pages and 'next' in content['_links']:
- next_query = self._parse_query_string(content['_links']['next']['href'], requires=('max_results', 'page'))
- query_args.update(next_query)
- elements.extend(self.get_documents(endpoint, all_pages=True, quiet=quiet, **query_args))
-
- return elements
-
- def get_document(self, endpoint, idx=0, **query_args):
- documents = self.get_documents(endpoint, **query_args)
- if documents:
- return documents[idx]
- else:
- self.warning('No document found in endpoint %s for %s', endpoint, query_args)
-
- def post_entry(self, endpoint, payload):
- r = self._req('POST', self._api_url(endpoint), json=payload)
- return r.status_code in self.successful_statuses
-
- def put_entry(self, endpoint, element_id, payload):
- r = self._req('PUT', urljoin(self._api_url(endpoint), element_id), json=payload)
- return r.status_code in self.successful_statuses
-
- def _patch_entry(self, endpoint, doc, payload, update_lists=None):
- """
- Patch a specific database item (specified by doc) with the given data payload.
- :param str endpoint:
- :param dict doc: The entry in the database to patch (contains the relevant _id and _etag)
- :param dict payload: Data with which to patch doc
- :param list update_lists: Doc items listed here will be appended rather than replaced by the patch
- """
- url = urljoin(self._api_url(endpoint), doc['_id'])
- _payload = dict(payload)
- headers = {'If-Match': doc.get('_etag')}
- if update_lists:
- for l in update_lists:
- content = doc.get(l, [])
- new_content = [x for x in _payload.get(l, []) if x not in content]
- _payload[l] = content + new_content
- r = self._req('PATCH', url, headers=headers, json=_payload)
- return r.status_code in self.successful_statuses
-
- def patch_entry(self, endpoint, payload, id_field, element_id, update_lists=None):
- """
- Retrieve a document at the given endpoint with the given unique ID, and patch it with some data.
- :param str endpoint:
- :param dict payload:
- :param str id_field: The name of the unique identifier (e.g. 'run_element_id', 'proc_id', etc.)
- :param element_id: The value of id_field to retrieve (e.g. '160301_2_ATGCATGC')
- :param list update_lists:
- """
- doc = self.get_document(endpoint, where={id_field: element_id})
- if doc:
- return self._patch_entry(endpoint, doc, payload, update_lists)
- return False
- def patch_entries(self, endpoint, payload, update_lists=None, **query_args):
- """
- Retrieve many documents and patch them all with the same data.
- :param str endpoint:
- :param dict payload:
- :param list update_lists:
- :param query_args: Database query args to pass to get_documents
- """
- docs = self.get_documents(endpoint, **query_args)
- if docs:
- success = True
- nb_docs = 0
- for doc in docs:
- if self._patch_entry(endpoint, doc, payload, update_lists):
- nb_docs += 1
- else:
- success = False
- self.info('Updated %s documents matching %s', nb_docs, query_args)
- return success
+def post_entry(endpoint, payload):
+ r = _req('POST', api_url(endpoint), json=payload)
+ if r.status_code != 200:
return False
+ return True
- def post_or_patch(self, endpoint, input_json, id_field=None, update_lists=None):
- """
- For each document supplied, either post to the endpoint if the unique id doesn't yet exist there, or
- patch if it does.
- :param str endpoint:
- :param input_json: A single document or list of documents to post or patch to the endpoint.
- :param str id_field: The field to use as the unique ID for the endpoint.
- :param list update_lists:
- """
+
+def put_entry(endpoint, element_id, payload):
+ r = _req('PUT', urljoin(api_url(endpoint), element_id), json=payload)
+ if r.status_code != 200:
+ return False
+ return True
+
+
+def _patch_entry(endpoint, doc, payload, update_lists=None):
+ """
+ Patch a specific database item (specified by doc) with the given data payload.
+ :param str endpoint:
+ :param dict doc: The entry in the database to patch (contains the relevant _id and _etag)
+ :param dict payload: Data with which to patch doc
+ :param list update_lists: Doc items listed here will be appended rather than replaced by the patch
+ """
+ url = urljoin(api_url(endpoint), doc['_id'])
+ _payload = dict(payload)
+ headers = {'If-Match': doc.get('_etag')}
+ if update_lists:
+ for l in update_lists:
+ content = doc.get(l, [])
+ new_content = [x for x in _payload.get(l, []) if x not in content]
+ _payload[l] = content + new_content
+ r = _req('PATCH', url, headers=headers, json=_payload)
+ if r.status_code == 200:
+ return True
+ return False
+
+
+def patch_entry(endpoint, payload, id_field, element_id, update_lists=None):
+ """
+ Retrieve a document at the given endpoint with the given unique ID, and patch it with some data.
+ :param str endpoint:
+ :param dict payload:
+ :param str id_field: The name of the unique identifier (e.g. 'run_element_id', 'proc_id', etc.)
+ :param element_id: The value of id_field to retrieve (e.g. '160301_2_ATGCATGC')
+ :param list update_lists:
+ """
+ doc = get_document(endpoint, where={id_field: element_id})
+ if doc:
+ return _patch_entry(endpoint, doc, payload, update_lists)
+ return False
+
+
+def patch_entries(endpoint, payload, update_lists=None, **query_args):
+ """
+ Retrieve many documents and patch them all with the same data.
+ :param str endpoint:
+ :param dict payload:
+ :param list update_lists:
+ :param query_args: Database query args to pass to get_documents
+ """
+ docs = get_documents(endpoint, **query_args)
+ if docs:
success = True
- for payload in input_json:
- _payload = dict(payload)
- doc = self.get_document(endpoint, where={id_field: _payload[id_field]})
- if doc:
- _payload.pop(id_field)
- s = self._patch_entry(endpoint, doc, _payload, update_lists)
+ nb_docs = 0
+ for doc in docs:
+ if _patch_entry(endpoint, doc, payload, update_lists):
+ nb_docs += 1
else:
- s = self.post_entry(endpoint, _payload)
- success = success and s
+ success = False
+ app_logger.info('Updated %s documents matching %s', nb_docs, query_args)
return success
-
-
-default = Communicator()
-get_content = default.get_content
-get_documents = default.get_documents
-get_document = default.get_document
-post_entry = default.post_entry
-put_entry = default.put_entry
-patch_entry = default.patch_entry
-patch_entries = default.patch_entries
-post_or_patch = default.post_or_patch
+ return False
+
+
+def post_or_patch(endpoint, input_json, id_field=None, update_lists=None):
+ """
+ For each document supplied, either post to the endpoint if the unique id doesn't yet exist there, or
+ patch if it does.
+ :param str endpoint:
+ :param input_json: A single document or list of documents to post or patch to the endpoint.
+ :param str id_field: The field to use as the unique ID for the endpoint.
+ :param list update_lists:
+ """
+ success = True
+ for payload in input_json:
+ _payload = dict(payload)
+ doc = get_document(endpoint, where={id_field: _payload[id_field]})
+ if doc:
+ _payload.pop(id_field)
+ s = _patch_entry(endpoint, doc, _payload, update_lists)
+ else:
+ s = post_entry(endpoint, _payload)
+ success = success and s
+ return success
| clarity.get_list_of_samples crashes if a sample doesn't exist in the Lims
The error is because of the way we're processing sample names. The stacktrace is misleading:
```
File "clarity.py", line 98, in get_list_of_samples
results.extend(_get_list_of_samples(sample_names[start:start+max_query]))
File "clarity.py", line 114, in _get_list_of_samples
samples.extend(_get_list_of_samples(remainder, sub + 1))
File "clarity.py", line 114, in _get_list_of_samples
samples.extend(_get_list_of_samples(remainder, sub + 1))
File "clarity.py", line 114, in _get_list_of_samples
samples.extend(_get_list_of_samples(remainder, sub + 1))
File "clarity.py", line 102, in _get_list_of_samples
pattern, repl = substitutions[sub]
```
We should return None for non-existent samples, or leave them out of the returned list. | EdinburghGenomics/EGCG-Core | diff --git a/tests/test_clarity.py b/tests/test_clarity.py
index b4c3684..6e963a2 100644
--- a/tests/test_clarity.py
+++ b/tests/test_clarity.py
@@ -104,10 +104,9 @@ def test_get_list_of_samples():
exp_lims_sample_ids = ['this', 'that:01', 'other _L:01']
calling_sample_ids = ['this', 'that_01', 'other__L_01']
fake_list_samples = [[FakeEntity(n)] for n in exp_lims_sample_ids]
- pbatch = patched_lims('get_batch')
psamples = patched_lims('get_samples', side_effect=fake_list_samples)
- with pbatch, psamples as mocked_get_samples:
+ with patched_lims('get_batch'), psamples as mocked_get_samples:
samples = clarity.get_list_of_samples(calling_sample_ids)
assert [s.name for s in samples] == exp_lims_sample_ids
mocked_get_samples.assert_any_call(name=['this', 'that_01', 'other__L_01'])
@@ -115,6 +114,23 @@ def test_get_list_of_samples():
mocked_get_samples.assert_any_call(name=['other _L:01'])
+def test_get_list_of_samples_broken():
+ exp_lims_sample_ids = ['this', 'that:01', 'other _L:01']
+ calling_sample_ids = ['this', 'that_01', 'other__L_01']
+ fake_list_samples = [[FakeEntity(n)] for n in exp_lims_sample_ids]
+ psamples = patched_lims('get_samples', side_effect=fake_list_samples)
+ log_msgs = []
+ pwarn = patched('app_logger.warning', new=log_msgs.append)
+
+ with patched_lims('get_batch'), psamples as mocked_get_samples, pwarn:
+ samples = clarity.get_list_of_samples(calling_sample_ids + ['sample_not_in_lims'])
+ assert [s.name for s in samples] == exp_lims_sample_ids
+ mocked_get_samples.assert_any_call(name=['this', 'that_01', 'other__L_01', 'sample_not_in_lims'])
+ mocked_get_samples.assert_any_call(name=['other__L:01', 'sample_not_in_lims', 'that:01'])
+ mocked_get_samples.assert_any_call(name=['other _L:01', 'sample_not_in_lims'])
+ assert log_msgs == ["Could not find ['sample_not_in_lims'] in Lims"]
+
+
@patched_lims('get_samples', side_effect=[[], [], [None]])
def test_get_samples(mocked_lims):
assert clarity.get_samples('a_sample_name__L_01') == [None]
@@ -206,8 +222,7 @@ def test_get_output_containers_from_sample_and_step_name(mocked_get_sample, mock
@patched_clarity('get_sample_names_from_plate', ['this', 'that', 'other'])
-@patched_clarity('get_sample',
- Mock(artifact=Mock(container=FakeEntity('a_container', type=FakeEntity('96 well plate')))))
+@patched_clarity('get_sample', Mock(artifact=Mock(container=FakeEntity('a_container', type=FakeEntity('96 well plate')))))
def test_get_samples_arrived_with(mocked_get_sample, mocked_names_from_plate):
assert clarity.get_samples_arrived_with('a_sample_name') == ['this', 'that', 'other']
mocked_get_sample.assert_called_with('a_sample_name')
diff --git a/tests/test_ncbi.py b/tests/test_ncbi.py
index af10556..ef931bb 100644
--- a/tests/test_ncbi.py
+++ b/tests/test_ncbi.py
@@ -24,11 +24,6 @@ def test_fetch_from_eutils():
<OtherNames><CommonName>a common name</CommonName></OtherNames>
<Rank>species</Rank>
'''
- ncbi_fetch_data_sub_spe = '''
- <ScientificName>Genus species</ScientificName>
- <OtherNames><CommonName>a common name</CommonName></OtherNames>
- <Rank>subspecies</Rank>
- '''
patched_get = patch(
'egcg_core.ncbi.requests.get',
@@ -39,15 +34,7 @@ def test_fetch_from_eutils():
FakeRestResponse(content=ncbi_fetch_data)
)
)
- patched_get2 = patch(
- 'egcg_core.ncbi.requests.get',
- side_effect=(
- FakeRestResponse(content=ncbi_search_data),
- FakeRestResponse(content=ncbi_fetch_data_sub_spe),
- FakeRestResponse(content=ncbi_fetch_data_sub_spe),
- FakeRestResponse(content=ncbi_fetch_data_sub_spe)
- )
- )
+
with patched_get as mocked_get:
obs = fetch_from_eutils('a_species')
assert obs == ('1337', 'Genus species', 'a common name')
@@ -59,10 +46,6 @@ def test_fetch_from_eutils():
'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi',
params={'db': 'Taxonomy', 'id': '1337'}
)
- with patched_get2:
- obs = fetch_from_eutils('a_species')
- assert obs == ('1337', 'Genus species', 'a common name')
-
def test_cache():
diff --git a/tests/test_rest_communication.py b/tests/test_rest_communication.py
index 948c111..86b94b9 100644
--- a/tests/test_rest_communication.py
+++ b/tests/test_rest_communication.py
@@ -13,14 +13,12 @@ def rest_url(endpoint):
def ppath(extension):
- return 'egcg_core.rest_communication.Communicator.' + extension
+ return 'egcg_core.rest_communication.' + extension
test_endpoint = 'an_endpoint'
test_request_content = {'data': ['some', {'test': 'content'}]}
-test_patch_document = {
- '_id': '1337', '_etag': 1234567, 'uid': 'a_unique_id', 'list_to_update': ['this', 'that', 'other']
-}
+
patched_response = patch(
'requests.request',
return_value=FakeRestResponse(status_code=200, content=test_request_content)
@@ -40,173 +38,162 @@ def query_args_from_url(url):
return json.loads(json.dumps(d))
-class TestRestCommunication(TestEGCG):
- def setUp(self):
- self.comm = rest_communication.Communicator()
+def test_api_url_query_strings():
+ assert rest_communication.api_url('an_endpoint') == rest_url('an_endpoint')
+ exp = '?where={"this":"that"}&embedded={"things":1}&aggregate=True&sort=-_created'
+ obs = rest_communication.api_url(
+ 'an_endpoint',
+ where={'this': 'that'},
+ embedded={'things': 1},
+ aggregate=True,
+ sort='-_created'
+ ).replace(rest_url('an_endpoint'), '')
+ assert sorted(obs.lstrip('?').split('&')) == sorted(exp.lstrip('?').split('&'))
+
+
+def test_parse_query_string():
+ query_string = 'http://a_url?this=that&other={"another":"more"}'
+ no_query_string = 'http://a_url'
+ dodgy_query_string = 'http://a_url?this=that?other=another'
+
+ p = rest_communication._parse_query_string
+
+ assert p(query_string) == {'this': 'that', 'other': '{"another":"more"}'}
+ assert p(no_query_string) == {}
+
+ with pytest.raises(RestCommunicationError) as e:
+ p(dodgy_query_string)
+ assert str(e) == 'Bad query string: ' + dodgy_query_string
+
+ with pytest.raises(RestCommunicationError) as e2:
+ p(query_string, requires=['things'])
+ assert str(e2) == query_string + ' did not contain all required fields: ' + str(['things'])
+
+
+@patched_response
+def test_req(mocked_response):
+ json_content = ['some', {'test': 'json'}]
+
+ response = rest_communication._req('METHOD', rest_url(test_endpoint), json=json_content)
+ assert response.status_code == 200
+ assert json.loads(response.content.decode('utf-8')) == response.json() == test_request_content
+ mocked_response.assert_called_with('METHOD', rest_url(test_endpoint), auth=auth, json=json_content)
+
+
+def test_get_documents_depaginate():
+ docs = (
+ FakeRestResponse(content={'data': ['this', 'that'], '_links': {'next': {'href': 'an_endpoint?max_results=101&page=2'}}}),
+ FakeRestResponse(content={'data': ['other', 'another'], '_links': {'next': {'href': 'an_endpoint?max_results=101&page=3'}}}),
+ FakeRestResponse(content={'data': ['more', 'things'], '_links': {}})
+ )
+ patched_req = patch(ppath('_req'), side_effect=docs)
+ with patched_req as mocked_req:
+ assert rest_communication.get_documents('an_endpoint', all_pages=True, max_results=101) == [
+ 'this', 'that', 'other', 'another', 'more', 'things'
+ ]
+ assert all([a[0][1].startswith(rest_url('an_endpoint')) for a in mocked_req.call_args_list])
+ assert [query_args_from_url(a[0][1]) for a in mocked_req.call_args_list] == [
+ {'page': '1', 'max_results': '101'},
+ {'page': '2', 'max_results': '101'},
+ {'page': '3', 'max_results': '101'}
+ ]
+
+
+@patched_response
+def test_test_content(mocked_response):
+ data = rest_communication.get_content(test_endpoint, max_results=100, where={'a_field': 'thing'})
+ assert data == test_request_content
+ assert mocked_response.call_args[0][1].startswith(rest_url(test_endpoint))
+ assert query_args_from_url(mocked_response.call_args[0][1]) == {
+ 'max_results': '100', 'where': {'a_field': 'thing'}, 'page': '1'
+ }
+
+
+def test_get_documents():
+ with patched_response:
+ data = rest_communication.get_documents(test_endpoint, max_results=100, where={'a_field': 'thing'})
+ assert data == test_request_content['data']
- def test_hash_auth_token(self):
- assert self.comm._hash_auth_token('a_token') == 'YV90b2tlbg=='
- def test_translate(self):
- assert self.comm._translate(" '' None") == '""null'
+def test_get_document():
+ expected = test_request_content['data'][0]
+ with patched_response:
+ observed = rest_communication.get_document(test_endpoint, max_results=100, where={'a_field': 'thing'})
+ assert observed == expected
- def test_api_url(self):
- assert self.comm._api_url('an_endpoint') == rest_url('an_endpoint')
- exp = '?where={"this":"that"}&embedded={"things":1}&aggregate=True&sort=-_created'
- obs = self.comm._api_url(
+
+@patched_response
+def test_post_entry(mocked_response):
+ rest_communication.post_entry(test_endpoint, payload=test_request_content)
+ mocked_response.assert_called_with('POST', rest_url(test_endpoint), auth=auth, json=test_request_content)
+
+
+@patched_response
+def test_put_entry(mocked_response):
+ rest_communication.put_entry(test_endpoint, 'an_element_id', payload=test_request_content)
+ mocked_response.assert_called_with('PUT', rest_url(test_endpoint) + 'an_element_id', auth=auth, json=test_request_content)
+
+
+test_patch_document = {
+ '_id': '1337', '_etag': 1234567, 'uid': 'a_unique_id', 'list_to_update': ['this', 'that', 'other']
+}
+
+
+@patch('egcg_core.rest_communication.get_document', return_value=test_patch_document)
+@patched_response
+def test_patch_entry(mocked_response, mocked_get_doc):
+ patching_payload = {'list_to_update': ['another']}
+ rest_communication.patch_entry(
+ test_endpoint,
+ payload=patching_payload,
+ id_field='uid',
+ element_id='a_unique_id',
+ update_lists=['list_to_update']
+ )
+
+ mocked_get_doc.assert_called_with(test_endpoint, where={'uid': 'a_unique_id'})
+ mocked_response.assert_called_with(
+ 'PATCH',
+ rest_url(test_endpoint) + '1337',
+ headers={'If-Match': 1234567},
+ auth=auth,
+ json={'list_to_update': ['this', 'that', 'other', 'another']}
+ )
+
+
+test_post_or_patch_payload = {'uid': '1337', 'list_to_update': ['more'], 'another_field': 'that'}
+test_post_or_patch_payload_no_uid = {'list_to_update': ['more'], 'another_field': 'that'}
+test_post_or_patch_doc = {
+ 'uid': 'a_uid', '_id': '1337', '_etag': 1234567, 'list_to_update': ['things'], 'another_field': 'this'
+}
+
+
+def test_post_or_patch():
+ patched_post = patch(ppath('post_entry'), return_value=True)
+ patched_patch = patch(ppath('_patch_entry'), return_value=True)
+ patched_get = patch(ppath('get_document'), return_value=test_post_or_patch_doc)
+ patched_get_none = patch(ppath('get_document'), return_value=None)
+
+ with patched_get as mget, patched_patch as mpatch:
+ success = rest_communication.post_or_patch(
'an_endpoint',
- where={'this': 'that'},
- embedded={'things': 1},
- aggregate=True,
- sort='-_created'
- ).replace(rest_url('an_endpoint'), '')
- assert sorted(obs.lstrip('?').split('&')) == sorted(exp.lstrip('?').split('&'))
-
- def test_parse_query_string(self):
- query_string = 'http://a_url?this=that&other={"another":"more"}'
- no_query_string = 'http://a_url'
- dodgy_query_string = 'http://a_url?this=that?other=another'
-
- p = self.comm._parse_query_string
-
- assert p(query_string) == {'this': 'that', 'other': '{"another":"more"}'}
- assert p(no_query_string) == {}
-
- with pytest.raises(RestCommunicationError) as e:
- p(dodgy_query_string)
- assert str(e) == 'Bad query string: ' + dodgy_query_string
-
- with pytest.raises(RestCommunicationError) as e2:
- p(query_string, requires=['things'])
- assert str(e2) == query_string + ' did not contain all required fields: ' + str(['things'])
-
- @patched_response
- def test_req(self, mocked_response):
- json_content = ['some', {'test': 'json'}]
-
- response = self.comm._req('METHOD', rest_url(test_endpoint), json=json_content)
- assert response.status_code == 200
- assert json.loads(response.content.decode('utf-8')) == response.json() == test_request_content
- mocked_response.assert_called_with('METHOD', rest_url(test_endpoint), auth=auth, json=json_content)
-
- def test_get_documents_depaginate(self):
- docs = (
- FakeRestResponse(content={'data': ['this', 'that'], '_links': {'next': {'href': 'an_endpoint?max_results=101&page=2'}}}),
- FakeRestResponse(content={'data': ['other', 'another'], '_links': {'next': {'href': 'an_endpoint?max_results=101&page=3'}}}),
- FakeRestResponse(content={'data': ['more', 'things'], '_links': {}})
- )
- patched_req = patch(ppath('_req'), side_effect=docs)
- with patched_req as mocked_req:
- assert self.comm.get_documents('an_endpoint', all_pages=True, max_results=101) == [
- 'this', 'that', 'other', 'another', 'more', 'things'
- ]
- assert all([a[0][1].startswith(rest_url('an_endpoint')) for a in mocked_req.call_args_list])
- assert [query_args_from_url(a[0][1]) for a in mocked_req.call_args_list] == [
- {'page': '1', 'max_results': '101'},
- {'page': '2', 'max_results': '101'},
- {'page': '3', 'max_results': '101'}
- ]
-
- @patched_response
- def test_get_content(self, mocked_response):
- data = self.comm.get_content(test_endpoint, max_results=100, where={'a_field': 'thing'})
- assert data == test_request_content
- assert mocked_response.call_args[0][1].startswith(rest_url(test_endpoint))
- assert query_args_from_url(mocked_response.call_args[0][1]) == {
- 'max_results': '100', 'where': {'a_field': 'thing'}, 'page': '1'
- }
-
- def test_get_documents(self):
- with patched_response:
- data = self.comm.get_documents(test_endpoint, max_results=100, where={'a_field': 'thing'})
- assert data == test_request_content['data']
-
- def test_get_document(self):
- expected = test_request_content['data'][0]
- with patched_response:
- observed = self.comm.get_document(test_endpoint, max_results=100, where={'a_field': 'thing'})
- assert observed == expected
-
- @patched_response
- def test_post_entry(self, mocked_response):
- self.comm.post_entry(test_endpoint, payload=test_request_content)
- mocked_response.assert_called_with('POST', rest_url(test_endpoint), auth=auth, json=test_request_content)
-
- @patched_response
- def test_put_entry(self, mocked_response):
- self.comm.put_entry(test_endpoint, 'an_element_id', payload=test_request_content)
- mocked_response.assert_called_with('PUT', rest_url(test_endpoint) + 'an_element_id', auth=auth, json=test_request_content)
-
- @patch(ppath('get_document'), return_value=test_patch_document)
- @patched_response
- def test_patch_entry(self, mocked_response, mocked_get_doc):
- patching_payload = {'list_to_update': ['another']}
- self.comm.patch_entry(
- test_endpoint,
- payload=patching_payload,
+ [test_post_or_patch_payload],
id_field='uid',
- element_id='a_unique_id',
update_lists=['list_to_update']
)
-
- mocked_get_doc.assert_called_with(test_endpoint, where={'uid': 'a_unique_id'})
- mocked_response.assert_called_with(
- 'PATCH',
- rest_url(test_endpoint) + '1337',
- headers={'If-Match': 1234567},
- auth=auth,
- json={'list_to_update': ['this', 'that', 'other', 'another']}
+ mget.assert_called_with('an_endpoint', where={'uid': '1337'})
+ mpatch.assert_called_with(
+ 'an_endpoint',
+ test_post_or_patch_doc,
+ test_post_or_patch_payload_no_uid,
+ ['list_to_update']
)
+ assert success is True
- def test_post_or_patch(self):
- test_post_or_patch_payload = {'uid': '1337', 'list_to_update': ['more'], 'another_field': 'that'}
- test_post_or_patch_payload_no_uid = {'list_to_update': ['more'], 'another_field': 'that'}
- test_post_or_patch_doc = {
- 'uid': 'a_uid', '_id': '1337', '_etag': 1234567, 'list_to_update': ['things'], 'another_field': 'this'
- }
- patched_post = patch(ppath('post_entry'), return_value=True)
- patched_patch = patch(ppath('_patch_entry'), return_value=True)
- patched_get = patch(ppath('get_document'), return_value=test_post_or_patch_doc)
- patched_get_none = patch(ppath('get_document'), return_value=None)
-
- with patched_get as mget, patched_patch as mpatch:
- success = self.comm.post_or_patch(
- 'an_endpoint',
- [test_post_or_patch_payload],
- id_field='uid',
- update_lists=['list_to_update']
- )
- mget.assert_called_with('an_endpoint', where={'uid': '1337'})
- mpatch.assert_called_with(
- 'an_endpoint',
- test_post_or_patch_doc,
- test_post_or_patch_payload_no_uid,
- ['list_to_update']
- )
- assert success is True
-
- with patched_get_none as mget, patched_post as mpost:
- success = self.comm.post_or_patch(
- 'an_endpoint', [test_post_or_patch_payload], id_field='uid', update_lists=['list_to_update']
- )
- mget.assert_called_with('an_endpoint', where={'uid': '1337'})
- mpost.assert_called_with('an_endpoint', test_post_or_patch_payload)
- assert success is True
-
- def test_token_auth(self):
- token = '{"token": "some"}.tokenauthentication'
- hashed_token = 'eyJ0b2tlbiI6ICJzb21lIn0udG9rZW5hdXRoZW50aWNhdGlvbg=='
- self.comm._auth = token
- with patched_response as p:
- self.comm._req('GET', self.comm.baseurl + 'an_endpoint')
- p.assert_called_with(
- 'GET',
- self.comm.baseurl + 'an_endpoint',
- headers={'Authorization': 'Token ' + hashed_token}
- )
-
-
-def test_default():
- d = rest_communication.default
- assert d.baseurl == 'http://localhost:4999/api/0.1'
- assert d.auth == ('a_user', 'a_password')
+ with patched_get_none as mget, patched_post as mpost:
+ success = rest_communication.post_or_patch(
+ 'an_endpoint', [test_post_or_patch_payload], id_field='uid', update_lists=['list_to_update']
+ )
+ mget.assert_called_with('an_endpoint', where={'uid': '1337'})
+ mpost.assert_called_with('an_endpoint', test_post_or_patch_payload)
+ assert success is True
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 5
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"six"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
-e git+https://github.com/EdinburghGenomics/EGCG-Core.git@43f124d6f77db73cff13117003295ad715d9aabc#egg=EGCG_Core
exceptiongroup==1.2.2
genologics==1.0.0
idna==3.10
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
PyYAML==6.0.2
requests==2.32.3
six==1.17.0
tomli==2.2.1
urllib3==2.3.0
| name: EGCG-Core
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- exceptiongroup==1.2.2
- genologics==1.0.0
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pyyaml==6.0.2
- requests==2.32.3
- six==1.17.0
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/EGCG-Core
| [
"tests/test_clarity.py::test_get_list_of_samples_broken",
"tests/test_rest_communication.py::test_api_url_query_strings",
"tests/test_rest_communication.py::test_parse_query_string",
"tests/test_rest_communication.py::test_req",
"tests/test_rest_communication.py::test_get_documents_depaginate",
"tests/test_rest_communication.py::test_patch_entry",
"tests/test_rest_communication.py::test_post_or_patch"
]
| []
| [
"tests/test_clarity.py::test_get_valid_lanes",
"tests/test_clarity.py::test_find_project_from_sample",
"tests/test_clarity.py::test_find_run_elements_from_sample",
"tests/test_clarity.py::test_get_species_from_sample",
"tests/test_clarity.py::test_sanitize_user_id",
"tests/test_clarity.py::test_get_list_of_samples",
"tests/test_clarity.py::test_get_samples",
"tests/test_clarity.py::test_get_sample",
"tests/test_clarity.py::test_get_user_sample_name",
"tests/test_clarity.py::test_get_sample_gender",
"tests/test_clarity.py::test_get_genotype_information_from_lims",
"tests/test_clarity.py::test_get_expected_yield_for_sample",
"tests/test_clarity.py::test_get_run",
"tests/test_clarity.py::test_route_samples_to_delivery_workflow",
"tests/test_clarity.py::test_get_plate_id_and_well_from_lims",
"tests/test_clarity.py::test_get_sample_names_from_plate_from_lims",
"tests/test_clarity.py::test_get_sample_names_from_project_from_lims",
"tests/test_clarity.py::test_get_output_containers_from_sample_and_step_name",
"tests/test_clarity.py::test_get_samples_arrived_with",
"tests/test_clarity.py::test_get_samples_genotyped_with",
"tests/test_clarity.py::test_get_samples_sequenced_with",
"tests/test_clarity.py::test_get_released_samples",
"tests/test_clarity.py::test_get_sample_release_date",
"tests/test_ncbi.py::test_fetch_from_eutils",
"tests/test_ncbi.py::test_cache",
"tests/test_ncbi.py::test_get_species_name",
"tests/test_rest_communication.py::test_test_content",
"tests/test_rest_communication.py::test_get_documents",
"tests/test_rest_communication.py::test_get_document",
"tests/test_rest_communication.py::test_post_entry",
"tests/test_rest_communication.py::test_put_entry"
]
| []
| MIT License | 635 | [
"egcg_core/__init__.py",
"egcg_core/executor/cluster_executor.py",
"egcg_core/rest_communication.py",
"egcg_core/clarity.py",
"egcg_core/ncbi.py"
]
| [
"egcg_core/__init__.py",
"egcg_core/executor/cluster_executor.py",
"egcg_core/rest_communication.py",
"egcg_core/clarity.py",
"egcg_core/ncbi.py"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.