instance_id
stringlengths 10
57
| patch
stringlengths 261
19.8k
| repo
stringlengths 7
53
| base_commit
stringlengths 40
40
| hints_text
stringclasses 1
value | test_patch
stringlengths 276
2.22M
| problem_statement
stringlengths 40
9.59k
| version
stringclasses 1
value | FAIL_TO_PASS
sequencelengths 1
1.02k
| PASS_TO_PASS
sequencelengths 0
7.82k
| created_at
stringlengths 25
25
| __index_level_0__
int64 0
1.89k
|
---|---|---|---|---|---|---|---|---|---|---|---|
dapr__python-sdk-692 | diff --git a/dapr/clients/health.py b/dapr/clients/health.py
index 120b559..e3daec7 100644
--- a/dapr/clients/health.py
+++ b/dapr/clients/health.py
@@ -28,7 +28,7 @@ class DaprHealth:
headers = {USER_AGENT_HEADER: DAPR_USER_AGENT}
if settings.DAPR_API_TOKEN is not None:
headers[DAPR_API_TOKEN_HEADER] = settings.DAPR_API_TOKEN
- timeout = settings.DAPR_HEALTH_TIMEOUT
+ timeout = float(settings.DAPR_HEALTH_TIMEOUT)
start = time.time()
while True:
| dapr/python-sdk | 9021b952c686dec424cda07023e8df4e5feb978b | diff --git a/tests/clients/test_heatlhcheck.py b/tests/clients/test_heatlhcheck.py
index 5c67c2c..f3be8a4 100644
--- a/tests/clients/test_heatlhcheck.py
+++ b/tests/clients/test_heatlhcheck.py
@@ -62,7 +62,7 @@ class DaprHealthCheckTests(unittest.TestCase):
self.assertIn('Dapr-api-token', headers)
self.assertEqual(headers['Dapr-api-token'], 'mytoken')
- @patch.object(settings, 'DAPR_HEALTH_TIMEOUT', 2)
+ @patch.object(settings, 'DAPR_HEALTH_TIMEOUT', '2.5')
@patch('urllib.request.urlopen')
def test_wait_until_ready_timeout(self, mock_urlopen):
mock_urlopen.return_value.__enter__.return_value = MagicMock(status=500)
@@ -72,5 +72,5 @@ class DaprHealthCheckTests(unittest.TestCase):
with self.assertRaises(TimeoutError):
DaprHealth.wait_until_ready()
- self.assertGreaterEqual(time.time() - start, 2)
+ self.assertGreaterEqual(time.time() - start, 2.5)
self.assertGreater(mock_urlopen.call_count, 1)
| [BUG] DAPR_HEALTH_TIMEOUT as environment variable is not converted to numerical value
## Expected Behavior
When passing `DAPR_HEALTH_TIMEOUT` as environment variable it should be converted to a numerical value.
## Actual Behavior
docker container with env `DAPR_HEALTH_TIMEOUT=30`
```timestamp="2024-03-22T11:03:26" level=ERROR message="Traceback (most recent call last):
File "/usr/local/lib/python3.11/site-packages/starlette/routing.py", line 705, in lifespan
async with self.lifespan_context(app) as maybe_state:
File "/usr/local/lib/python3.11/contextlib.py", line 210, in __aenter__
return await anext(self.gen)
^^^^^^^^^^^^^^^^^^^^^
File "/app/proxy/proxy_app.py", line 35, in lifespan
DaprClient()
File "/usr/local/lib/python3.11/site-packages/dapr/clients/__init__.py", line 81, in __init__
super().__init__(address, interceptors, max_grpc_message_length)
File "/usr/local/lib/python3.11/site-packages/dapr/clients/grpc/client.py", line 131, in __init__
DaprHealth.wait_until_ready()
File "/usr/local/lib/python3.11/site-packages/dapr/clients/health.py", line 45, in wait_until_ready
remaining = (start + timeout) - time.time()
~~~~~~^~~~~~~~~
TypeError: unsupported operand type(s) for +: 'float' and 'str'
```
## Steps to Reproduce the Problem
Start a docker container with an example env variable value `DAPR_HEALTH_TIMEOUT=30` and initialize `DaprClient()`
| 0.0 | [
"tests/clients/test_heatlhcheck.py::DaprHealthCheckTests::test_wait_until_ready_timeout"
] | [
"tests/clients/test_heatlhcheck.py::DaprHealthCheckTests::test_wait_until_ready_success",
"tests/clients/test_heatlhcheck.py::DaprHealthCheckTests::test_wait_until_ready_success_with_api_token"
] | 2024-04-09 10:04:12+00:00 | 1,800 |
|
darosior__python-bip32-10 | diff --git a/bip32/bip32.py b/bip32/bip32.py
index e66103d..b48af98 100644
--- a/bip32/bip32.py
+++ b/bip32/bip32.py
@@ -84,6 +84,7 @@ class BIP32:
if isinstance(path, str):
path = _deriv_path_str_to_list(path)
chaincode, key = self.master_chaincode, self.master_privkey
+ pubkey = self.master_pubkey
# We'll need the private key at some point anyway, so let's derive
# everything from private keys.
if _hardened_index_in_path(path):
@@ -98,15 +99,13 @@ class BIP32:
# We won't need private keys for the whole path, so let's only use
# public key derivation.
else:
- key = self.master_pubkey
for index in path:
- key, chaincode = \
- _derive_public_child(key, chaincode, index)
- pubkey = key
+ pubkey, chaincode = \
+ _derive_public_child(pubkey, chaincode, index)
return chaincode, pubkey
def get_pubkey_from_path(self, path):
- """Get a privkey from a derivation path.
+ """Get a pubkey from a derivation path.
:param path: A list of integers (index of each depth) or a string with
m/x/x'/x notation. (e.g. m/0'/1/2'/2 or m/0H/1/2H/2).
| darosior/python-bip32 | b3f6696229bada1089f06ec095b3d650276c0d7b | diff --git a/tests/test_bip32.py b/tests/test_bip32.py
index d111f3a..80303cb 100644
--- a/tests/test_bip32.py
+++ b/tests/test_bip32.py
@@ -1,4 +1,6 @@
+import coincurve
import os
+import pytest
from bip32 import BIP32, HARDENED_INDEX
@@ -85,7 +87,7 @@ def test_vector_3():
assert (bip32.get_xpriv_from_path("m/0H") == bip32.get_xpriv_from_path([HARDENED_INDEX]))
-def test_sanity_tests():
+def test_sanity_checks():
seed = bytes.fromhex("1077a46dc8545d372f22d9e110ae6c5c2bf7620fe9c4c911f5404d112233e1aa270567dd3554092e051ba3ba86c303590b0309116ac89964ff284db2219d7511")
first_bip32 = BIP32.from_seed(seed)
sec_bip32 = BIP32.from_xpriv("xprv9s21ZrQH143K3o4KUs47P2x9afhH31ekMo2foNTYwrU9wwZ8g5EatR9bn6YmCacdvnHWMnPFUqieQrnunrzuF5UfgGbhbEW43zRnhpPDBUL")
@@ -105,6 +107,7 @@ def test_sanity_tests():
assert first_bip32.get_xpub_from_path(h_path) == sec_bip32.get_xpub_from_path(h_path)
assert first_bip32.get_xpriv_from_path(mixed_path) == sec_bip32.get_xpriv_from_path(mixed_path)
assert first_bip32.get_xpub_from_path(mixed_path) == sec_bip32.get_xpub_from_path(mixed_path)
+
# Taken from iancoleman's website
bip32 = BIP32.from_seed(bytes.fromhex("ac8c2377e5cde867d7e420fbe04d8906309b70d51b8fe58d6844930621a9bc223929155dcfebb4da9d62c86ec0d15adf936a663f4f0cf39cbb0352e7dac073d6"))
assert bip32.get_master_xpriv() == bip32.get_xpriv_from_path([]) == "xprv9s21ZrQH143K2GzaKJsW7DQsxeDpY3zqgusaSx6owWGC19k4mhwnVAsm4qPsCw43NkY2h1BzVLyxWHt9NKF86QRyBj53vModdGcNxtpD6KX"
@@ -124,6 +127,7 @@ def test_sanity_tests():
assert bip32.get_master_xpriv() == xpriv2
assert bip32.get_xpriv_from_path([HARDENED_INDEX, 18]) == xpriv
assert bip32.get_xpub_from_path([HARDENED_INDEX, 18]) == xpub
+
# We should recognize the networks..
# .. for xprivs:
bip32 = BIP32.from_xpriv("xprv9wHokC2KXdTSpEepFcu53hMDUHYfAtTaLEJEMyxBPAMf78hJg17WhL5FyeDUQH5KWmGjGgEb2j74gsZqgupWpPbZgP6uFmP8MYEy5BNbyET")
@@ -135,6 +139,7 @@ def test_sanity_tests():
assert bip32.network == "main"
bip32 = BIP32.from_xpub("tpubD6NzVbkrYhZ4WN3WiKRjeo2eGyYNiKNg8vcQ1UjLNJJaDvoFhmR1XwJsbo5S4vicSPoWQBThR3Rt8grXtP47c1AnoiXMrEmFdRZupxJzH1j")
assert bip32.network == "test"
+
# We should create valid network encoding..
assert BIP32.from_seed(os.urandom(32),
"test").get_master_xpub().startswith("tpub")
@@ -144,3 +149,16 @@ def test_sanity_tests():
"main").get_master_xpub().startswith("xpub")
assert BIP32.from_seed(os.urandom(32),
"main").get_master_xpriv().startswith("xprv")
+
+ # We can get the keys from "m" or []
+ bip32 = BIP32.from_seed(os.urandom(32))
+ assert (bip32.get_master_xpub() == bip32.get_xpub_from_path("m") ==
+ bip32.get_xpub_from_path([]))
+ assert (bip32.get_master_xpriv() == bip32.get_xpriv_from_path("m") ==
+ bip32.get_xpriv_from_path([]))
+ master_non_extended_pubkey = bip32.get_privkey_from_path("m")
+ pubkey = coincurve.PublicKey.from_secret(master_non_extended_pubkey)
+ assert pubkey.format() == bip32.get_pubkey_from_path("m")
+ # But getting from "m'" does not make sense
+ with pytest.raises(ValueError, match="invalid format"):
+ bip32.get_pubkey_from_path("m'")
| Can't derive master xpubs
```python
if _hardened_index_in_path(path):
for index in path:
if index & HARDENED_INDEX:
key, chaincode = \
_derive_hardened_private_child(key, chaincode, index)
else:
key, chaincode = \
_derive_unhardened_private_child(key, chaincode, index)
pubkey = _privkey_to_pubkey(key)
# We won't need private keys for the whole path, so let's only use
# public key derivation.
else:
key = self.master_pubkey
for index in path:
key, chaincode = \
_derive_public_child(key, chaincode, index)
pubkey = key
return chaincode, pubkey
```
Will not set `pubkey` if path is `[]`, but it's a valid path! | 0.0 | [
"tests/test_bip32.py::test_sanity_checks"
] | [
"tests/test_bip32.py::test_vector_1",
"tests/test_bip32.py::test_vector_2",
"tests/test_bip32.py::test_vector_3"
] | 2021-02-20 13:34:24+00:00 | 1,801 |
|
darosior__python-bip32-11 | diff --git a/bip32/__init__.py b/bip32/__init__.py
index cc491f9..3a35711 100644
--- a/bip32/__init__.py
+++ b/bip32/__init__.py
@@ -1,4 +1,4 @@
-from .bip32 import BIP32
+from .bip32 import BIP32, PrivateDerivationError, InvalidInputError
from .utils import BIP32DerivationError, HARDENED_INDEX
__version__ = "0.0.8"
@@ -6,5 +6,7 @@ __version__ = "0.0.8"
__all__ = [
"BIP32",
"BIP32DerivationError",
+ "PrivateDerivationError",
+ "InvalidInputError",
"HARDENED_INDEX",
]
diff --git a/bip32/bip32.py b/bip32/bip32.py
index b48af98..7dc8299 100644
--- a/bip32/bip32.py
+++ b/bip32/bip32.py
@@ -10,6 +10,18 @@ from .utils import (
)
+class PrivateDerivationError(ValueError):
+ """
+ Tried to use a derivation requiring private keys, without private keys.
+ """
+ pass
+
+
+class InvalidInputError(ValueError):
+ def __init__(self, message):
+ self.message = message
+
+
class BIP32:
def __init__(self, chaincode, privkey=None, pubkey=None, fingerprint=None,
depth=0, index=0, network="main"):
@@ -30,14 +42,21 @@ class BIP32:
need this for serialization.
:param network: Either "main" or "test".
"""
- assert isinstance(chaincode, bytes)
- assert privkey is not None or pubkey is not None
+ if network not in ["main", "test"]:
+ raise InvalidInputError("'network' must be one of 'main' or 'test'")
+ if not isinstance(chaincode, bytes):
+ raise InvalidInputError("'chaincode' must be bytes")
+ if privkey is None and pubkey is None:
+ raise InvalidInputError("Need at least a 'pubkey' or a 'privkey'")
if privkey is not None:
- assert isinstance(privkey, bytes)
+ if not isinstance(privkey, bytes):
+ raise InvalidInputError("'privkey' must be bytes")
if pubkey is not None:
- assert isinstance(pubkey, bytes)
+ if not isinstance(pubkey, bytes):
+ raise InvalidInputError("'pubkey' must be bytes")
else:
pubkey = _privkey_to_pubkey(privkey)
+
self.master_chaincode = chaincode
self.master_privkey = privkey
self.master_pubkey = pubkey
@@ -53,8 +72,12 @@ class BIP32:
m/x/x'/x notation. (e.g. m/0'/1/2'/2 or m/0H/1/2H/2).
:return: chaincode (bytes), privkey (bytes)
"""
+ if self.master_privkey is None:
+ raise PrivateDerivationError
+
if isinstance(path, str):
path = _deriv_path_str_to_list(path)
+
chaincode, privkey = self.master_chaincode, self.master_privkey
for index in path:
if index & HARDENED_INDEX:
@@ -63,6 +86,7 @@ class BIP32:
else:
privkey, chaincode = \
_derive_unhardened_private_child(privkey, chaincode, index)
+
return chaincode, privkey
def get_privkey_from_path(self, path):
@@ -72,6 +96,9 @@ class BIP32:
m/x/x'/x notation. (e.g. m/0'/1/2'/2 or m/0H/1/2H/2).
:return: privkey (bytes)
"""
+ if self.master_privkey is None:
+ raise PrivateDerivationError
+
return self.get_extended_privkey_from_path(path)[1]
def get_extended_pubkey_from_path(self, path):
@@ -83,6 +110,10 @@ class BIP32:
"""
if isinstance(path, str):
path = _deriv_path_str_to_list(path)
+
+ if _hardened_index_in_path(path) and self.master_privkey is None:
+ raise PrivateDerivationError
+
chaincode, key = self.master_chaincode, self.master_privkey
pubkey = self.master_pubkey
# We'll need the private key at some point anyway, so let's derive
@@ -102,6 +133,7 @@ class BIP32:
for index in path:
pubkey, chaincode = \
_derive_public_child(pubkey, chaincode, index)
+
return chaincode, pubkey
def get_pubkey_from_path(self, path):
@@ -120,8 +152,12 @@ class BIP32:
m/x/x'/x notation. (e.g. m/0'/1/2'/2 or m/0H/1/2H/2).
:return: The encoded extended pubkey as str.
"""
+ if self.master_privkey is None:
+ raise PrivateDerivationError
+
if isinstance(path, str):
path = _deriv_path_str_to_list(path)
+
if len(path) == 0:
return self.get_master_xpriv()
elif len(path) == 1:
@@ -133,6 +169,7 @@ class BIP32:
parent_pubkey,
path[-1], chaincode,
self.network)
+
return base58.b58encode_check(extended_key).decode()
def get_xpub_from_path(self, path):
@@ -144,6 +181,10 @@ class BIP32:
"""
if isinstance(path, str):
path = _deriv_path_str_to_list(path)
+
+ if _hardened_index_in_path(path) and self.master_privkey is None:
+ raise PrivateDerivationError
+
if len(path) == 0:
return self.get_master_xpub()
elif len(path) == 1:
@@ -155,10 +196,13 @@ class BIP32:
parent_pubkey,
path[-1], chaincode,
self.network)
+
return base58.b58encode_check(extended_key).decode()
def get_master_xpriv(self):
"""Get the encoded extended private key of the master private key"""
+ if self.master_privkey is None:
+ raise PrivateDerivationError
extended_key = _serialize_extended_key(self.master_privkey, self.depth,
self.parent_fingerprint,
self.index,
@@ -181,6 +225,9 @@ class BIP32:
:param xpriv: (str) The encoded serialized extended private key.
"""
+ if not isinstance(xpriv, str):
+ raise InvalidInputError("'xpriv' must be a string")
+
extended_key = base58.b58decode_check(xpriv)
(network, depth, fingerprint,
index, chaincode, key) = _unserialize_extended_key(extended_key)
@@ -194,6 +241,9 @@ class BIP32:
:param xpub: (str) The encoded serialized extended public key.
"""
+ if not isinstance(xpub, str):
+ raise InvalidInputError("'xpub' must be a string")
+
extended_key = base58.b58decode_check(xpub)
(network, depth, fingerprint,
index, chaincode, key) = _unserialize_extended_key(extended_key)
| darosior/python-bip32 | 689b212003acc25b7f558262255027b06c6923ce | diff --git a/tests/test_bip32.py b/tests/test_bip32.py
index 80303cb..e3b1d66 100644
--- a/tests/test_bip32.py
+++ b/tests/test_bip32.py
@@ -2,7 +2,9 @@ import coincurve
import os
import pytest
-from bip32 import BIP32, HARDENED_INDEX
+from bip32 import (
+ BIP32, HARDENED_INDEX, PrivateDerivationError, InvalidInputError
+)
def test_vector_1():
@@ -162,3 +164,19 @@ def test_sanity_checks():
# But getting from "m'" does not make sense
with pytest.raises(ValueError, match="invalid format"):
bip32.get_pubkey_from_path("m'")
+
+ # We raise if we attempt to use a privkey without privkey access
+ bip32 = BIP32.from_xpub("xpub6C6zm7YgrLrnd7gXkyYDjQihT6F2ei9EYbNuSiDAjok7Ht56D5zbnv8WDoAJGg1RzKzK4i9U2FUwXG7TFGETFc35vpQ4sZBuYKntKMLshiq")
+ bip32.get_master_xpub()
+ bip32.get_pubkey_from_path("m/0/1")
+ bip32.get_xpub_from_path("m/10000/18")
+ with pytest.raises(PrivateDerivationError):
+ bip32.get_master_xpriv()
+ bip32.get_extended_privkey_from_path("m/0/1/2")
+ bip32.get_privkey_from_path([9, 8])
+ bip32.get_pubkey_from_path("m/0'/1")
+ bip32.get_xpub_from_path("m/10000'/18")
+
+ # We can't create a BIP32 for an unknown network (to test InvalidInputError)
+ with pytest.raises(InvalidInputError, match="'network' must be one of"):
+ BIP32.from_seed(os.urandom(32), network="invalid_net")
| Trying to xpub-derive hardened key could use better error message
Obviously this is impossible, but the current `AssertionError` is not very clear.
```python
>>> bipp = BIP32.from_xpub("xpub6AFtDPwErcEhAY71AQoj83J9E63eLrRwqDar7zi2Ds9GWa3PqjwvPGLs4KrKvaZCojRzKAHSvQ9QEat4NPjvwc3D1ZFYcStbQJki2gxnd62")
>>> bipp.get_xpub_from_path("m/0'")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/store/orion/projects/bitcoin/experiment/pbip32/venv/lib/python3.8/site-packages/bip32/bip32.py", line 154, in get_xpub_from_path
chaincode, pubkey = self.get_extended_pubkey_from_path(path)
File "/store/orion/projects/bitcoin/experiment/pbip32/venv/lib/python3.8/site-packages/bip32/bip32.py", line 93, in get_extended_pubkey_from_path
_derive_hardened_private_child(key, chaincode, index)
File "/store/orion/projects/bitcoin/experiment/pbip32/venv/lib/python3.8/site-packages/bip32/utils.py", line 62, in _derive_hardened_private_child
assert isinstance(privkey, bytes) and isinstance(chaincode, bytes)
AssertionError
``` | 0.0 | [
"tests/test_bip32.py::test_vector_1",
"tests/test_bip32.py::test_vector_2",
"tests/test_bip32.py::test_vector_3",
"tests/test_bip32.py::test_sanity_checks"
] | [] | 2021-02-20 14:16:54+00:00 | 1,802 |
|
dask__dask-image-170 | diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 05e334d..1c53012 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -125,9 +125,71 @@ Before you submit a pull request, check that it meets these guidelines:
and make sure that the tests pass for all supported Python versions
and platforms.
-Tips
-----
+Running tests locally
+---------------------
-To run a subset of tests::
+To setup a local testing environment that matches the test environments we use
+for our continuous integration services, you can use the ``.yml``
+conda environment files included in the dask-image repository.
- $ py.test tests/test_dask_image.py
+The test environment ``.yml`` files are included in hidden folders:
+
+- Linux test environment files are found in ``.circleci/environments``
+- MacOS test environment files are found in ``.travis_support/environments``
+- Windows test environment files are found in ``.appveyor_support\environments``
+
+There is a separate environment file for each supported Python version.
+
+.. note::
+ If you do not have Anaconda/miniconda installed, please follow
+ `these instructions <https://docs.conda.io/projects/conda/en/latest/user-guide/install/>`_.
+
+
+We will use conda to
+`create an environment from a file
+<https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#creating-an-environment-from-an-environment-yml-file>`_
+(``conda env create -f name-of-environment-file.yml``).
+
+
+For example, to make a Python 3.8 test environment on Linux, MacOS, or Windows,
+we would use the command shown in the table below:
+
+.. list-table:: Creating a test environment for dask-image with Python 3.8
+ :widths: 20 50
+ :header-rows: 1
+
+ * - OS
+ - conda command
+ * - Linux
+ - ``conda env create -f .circleci/environments/tst_py38.yml``
+ * - MacOS
+ - ``conda env create -f .travis_support/environment/tst_py38.yml``
+ * - Windows
+ - ``conda env create -f .appveyor_support\environments\tst_py38.yml``
+
+
+This command will create a new conda test environment for Python 3.8,
+called ``dask_image_py38_env`` with all the dependencies.
+
+Now you can activate your new testing environment with::
+
+.. code-block:: console
+
+ $ conda activate dask_image_py38_env
+
+Finally, install the development version of dask-image::
+
+.. code-block:: console
+
+ $ pip install -e .
+
+For local testing, please run ``pytest`` in the test environment::
+
+.. code-block:: console
+
+ $ pytest
+
+
+To run a subset of tests, for example all the tests for ndfourier::
+
+ $ pytest tests/test_dask_image/test_ndfourier
diff --git a/dask_image/ndmeasure/__init__.py b/dask_image/ndmeasure/__init__.py
index c3c109d..ded8bc8 100644
--- a/dask_image/ndmeasure/__init__.py
+++ b/dask_image/ndmeasure/__init__.py
@@ -30,6 +30,7 @@ __all__ = [
"minimum_position",
"standard_deviation",
"sum",
+ "sum_labels",
"variance",
]
@@ -678,9 +679,9 @@ def standard_deviation(image, label_image=None, index=None):
return std_lbl
-def sum(image, label_image=None, index=None):
+def sum_labels(image, label_image=None, index=None):
"""
- Find the sum over an image at specified subregions.
+ Find the sum of all pixels over specified subregions of an image.
Parameters
----------
@@ -696,7 +697,7 @@ def sum(image, label_image=None, index=None):
Returns
-------
- sum : ndarray
+ sum_lbl : ndarray
Sum of ``image`` over the ``index`` selected regions from
``label_image``.
"""
@@ -712,6 +713,12 @@ def sum(image, label_image=None, index=None):
return sum_lbl
+def sum(image, label_image=None, index=None):
+ """DEPRECATED FUNCTION. Use `sum_labels` instead."""
+ warnings.warn("DEPRECATED FUNCTION. Use `sum_labels` instead.", DeprecationWarning)
+ return sum_labels(image, label_image=label_image, index=index)
+
+
def variance(image, label_image=None, index=None):
"""
Find the variance over an image at specified subregions.
diff --git a/docs/coverage.rst b/docs/coverage.rst
index bea2893..6e3fe96 100644
--- a/docs/coverage.rst
+++ b/docs/coverage.rst
@@ -212,7 +212,7 @@ This table shows which SciPy ndimage functions are supported by dask-image.
* - ``standard_deviation``
- ✓
- ✓
- * - ``sum``
+ * - ``sum_labels``
- ✓
- ✓
* - ``uniform_filter``
diff --git a/docs/quickstart.rst b/docs/quickstart.rst
index 83ab611..e047ad6 100644
--- a/docs/quickstart.rst
+++ b/docs/quickstart.rst
@@ -83,7 +83,8 @@ Here are some talks and slides that you can watch to learn dask-image:
- 2020, Genevieve Buckley's talk at PyConAU and SciPy Japan
- - `Watch the talk <https://www.youtube.com/watch?v=MpjgzNeISeI&list=PLs4CJRBY5F1IEFq-wumrBDRCu2EqkpY-R&index=2>`_
+ - `Watch the talk in PyConAU <https://www.youtube.com/watch?v=MpjgzNeISeI&list=PLs4CJRBY5F1IEFq-wumrBDRCu2EqkpY-R&index=2>`_
+ - `Scipy Japanのトークを見る(プレゼンテーション:英語, 字幕:日本語) <https://www.youtube.com/watch?v=dP0m2iZX0PU>`_ Watch the talk at SciPy Japan (presentation in English, captions in Japanese)
- `See the slides <https://genevievebuckley.github.io/dask-image-talk-2020>`_
- 2019, John Kirkham's SciPy talk
| dask/dask-image | 78fabbfe6d97daaefee4747258ae278b1300b604 | diff --git a/tests/test_dask_image/test_ndmeasure/test_core.py b/tests/test_dask_image/test_ndmeasure/test_core.py
index b3eaa17..3d66cac 100644
--- a/tests/test_dask_image/test_ndmeasure/test_core.py
+++ b/tests/test_dask_image/test_ndmeasure/test_core.py
@@ -2,7 +2,8 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
-
+from distutils.version import LooseVersion
+import scipy
import itertools as it
import warnings as wrn
@@ -10,6 +11,7 @@ import pytest
import numpy as np
+import scipy
import scipy.ndimage as spnd
import dask.array as da
@@ -28,7 +30,7 @@ import dask_image.ndmeasure
"minimum",
"minimum_position",
"standard_deviation",
- "sum",
+ "sum_labels",
"variance",
]
)
@@ -88,7 +90,7 @@ def test_center_of_mass(datatype):
"minimum",
"minimum_position",
"standard_deviation",
- "sum",
+ "sum_labels",
"variance",
]
)
@@ -110,7 +112,13 @@ def test_center_of_mass(datatype):
]
)
def test_measure_props(funcname, shape, chunks, has_lbls, ind):
- sp_func = getattr(spnd, funcname)
+ # early scipy version uses a different name sum insted of sum_labels.
+ if funcname == 'sum_labels' and scipy.__version__ < LooseVersion('1.5.0'):
+ scipy_funcname = 'sum'
+ else:
+ scipy_funcname = funcname
+
+ sp_func = getattr(spnd, scipy_funcname)
da_func = getattr(dask_image.ndmeasure, funcname)
a = np.random.random(shape)
| DOC: How to create testing environments
Add a note to CONTRIBUTING.rst under the headings 'Fix bugs' and 'Implement features' to say how to create the development conda environment (they're hiding in the hidden folders for travis, appveyor and circleCI). As an example, PR ( https://github.com/dask/dask-image/pull/90 ) shows the text added for testing the docs. | 0.0 | [
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props_err[sum_labels]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape0-chunks0-False-None-sum_labels]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape1-chunks1-False-None-sum_labels]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape2-chunks2-True-None-sum_labels]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape3-chunks3-True-0-sum_labels]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape4-chunks4-True-1-sum_labels]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape5-chunks5-True-ind5-sum_labels]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape6-chunks6-True-ind6-sum_labels]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape7-chunks7-True-ind7-sum_labels]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape8-chunks8-True-ind8-sum_labels]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape9-chunks9-True-ind9-sum_labels]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape10-chunks10-True-ind10-sum_labels]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape11-chunks11-True-ind11-sum_labels]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape12-chunks12-True-ind12-sum_labels]"
] | [
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props_err[center_of_mass]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props_err[extrema]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props_err[maximum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props_err[maximum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props_err[mean]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props_err[median]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props_err[minimum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props_err[minimum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props_err[standard_deviation]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props_err[variance]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_center_of_mass[int]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_center_of_mass[float]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_center_of_mass[bool]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_center_of_mass[uint8]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_center_of_mass[uint16]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_center_of_mass[uint32]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_center_of_mass[uint64]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_center_of_mass[int16]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_center_of_mass[int32]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_center_of_mass[int64]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_center_of_mass[float32]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_center_of_mass[float64]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape0-chunks0-False-None-center_of_mass]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape0-chunks0-False-None-maximum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape0-chunks0-False-None-maximum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape0-chunks0-False-None-mean]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape0-chunks0-False-None-median]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape0-chunks0-False-None-minimum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape0-chunks0-False-None-minimum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape0-chunks0-False-None-standard_deviation]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape0-chunks0-False-None-variance]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape1-chunks1-False-None-center_of_mass]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape1-chunks1-False-None-maximum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape1-chunks1-False-None-maximum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape1-chunks1-False-None-mean]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape1-chunks1-False-None-median]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape1-chunks1-False-None-minimum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape1-chunks1-False-None-minimum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape1-chunks1-False-None-standard_deviation]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape1-chunks1-False-None-variance]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape2-chunks2-True-None-center_of_mass]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape2-chunks2-True-None-maximum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape2-chunks2-True-None-maximum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape2-chunks2-True-None-mean]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape2-chunks2-True-None-median]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape2-chunks2-True-None-minimum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape2-chunks2-True-None-minimum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape2-chunks2-True-None-standard_deviation]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape2-chunks2-True-None-variance]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape3-chunks3-True-0-center_of_mass]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape3-chunks3-True-0-maximum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape3-chunks3-True-0-maximum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape3-chunks3-True-0-mean]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape3-chunks3-True-0-median]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape3-chunks3-True-0-minimum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape3-chunks3-True-0-minimum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape3-chunks3-True-0-standard_deviation]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape3-chunks3-True-0-variance]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape4-chunks4-True-1-center_of_mass]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape4-chunks4-True-1-maximum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape4-chunks4-True-1-maximum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape4-chunks4-True-1-mean]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape4-chunks4-True-1-median]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape4-chunks4-True-1-minimum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape4-chunks4-True-1-minimum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape4-chunks4-True-1-standard_deviation]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape4-chunks4-True-1-variance]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape5-chunks5-True-ind5-center_of_mass]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape5-chunks5-True-ind5-maximum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape5-chunks5-True-ind5-maximum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape5-chunks5-True-ind5-mean]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape5-chunks5-True-ind5-median]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape5-chunks5-True-ind5-minimum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape5-chunks5-True-ind5-minimum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape5-chunks5-True-ind5-standard_deviation]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape5-chunks5-True-ind5-variance]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape6-chunks6-True-ind6-center_of_mass]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape6-chunks6-True-ind6-maximum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape6-chunks6-True-ind6-maximum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape6-chunks6-True-ind6-mean]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape6-chunks6-True-ind6-median]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape6-chunks6-True-ind6-minimum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape6-chunks6-True-ind6-minimum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape6-chunks6-True-ind6-standard_deviation]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape6-chunks6-True-ind6-variance]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape7-chunks7-True-ind7-center_of_mass]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape7-chunks7-True-ind7-maximum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape7-chunks7-True-ind7-maximum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape7-chunks7-True-ind7-mean]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape7-chunks7-True-ind7-median]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape7-chunks7-True-ind7-minimum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape7-chunks7-True-ind7-minimum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape7-chunks7-True-ind7-standard_deviation]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape7-chunks7-True-ind7-variance]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape8-chunks8-True-ind8-center_of_mass]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape8-chunks8-True-ind8-maximum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape8-chunks8-True-ind8-maximum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape8-chunks8-True-ind8-mean]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape8-chunks8-True-ind8-minimum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape8-chunks8-True-ind8-minimum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape8-chunks8-True-ind8-standard_deviation]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape8-chunks8-True-ind8-variance]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape9-chunks9-True-ind9-center_of_mass]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape9-chunks9-True-ind9-maximum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape9-chunks9-True-ind9-maximum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape9-chunks9-True-ind9-mean]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape9-chunks9-True-ind9-minimum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape9-chunks9-True-ind9-minimum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape9-chunks9-True-ind9-standard_deviation]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape9-chunks9-True-ind9-variance]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape10-chunks10-True-ind10-center_of_mass]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape10-chunks10-True-ind10-maximum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape10-chunks10-True-ind10-maximum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape10-chunks10-True-ind10-mean]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape10-chunks10-True-ind10-median]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape10-chunks10-True-ind10-minimum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape10-chunks10-True-ind10-minimum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape10-chunks10-True-ind10-standard_deviation]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape10-chunks10-True-ind10-variance]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape11-chunks11-True-ind11-center_of_mass]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape11-chunks11-True-ind11-maximum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape11-chunks11-True-ind11-maximum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape11-chunks11-True-ind11-mean]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape11-chunks11-True-ind11-median]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape11-chunks11-True-ind11-minimum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape11-chunks11-True-ind11-minimum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape11-chunks11-True-ind11-standard_deviation]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape11-chunks11-True-ind11-variance]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape12-chunks12-True-ind12-center_of_mass]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape12-chunks12-True-ind12-maximum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape12-chunks12-True-ind12-maximum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape12-chunks12-True-ind12-mean]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape12-chunks12-True-ind12-median]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape12-chunks12-True-ind12-minimum]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape12-chunks12-True-ind12-minimum_position]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape12-chunks12-True-ind12-standard_deviation]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_measure_props[shape12-chunks12-True-ind12-variance]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_area[shape0-chunks0-False-None]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_area[shape1-chunks1-False-None]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_area[shape2-chunks2-True-None]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_area[shape3-chunks3-True-0]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_area[shape4-chunks4-True-1]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_area[shape5-chunks5-True-ind5]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_area[shape6-chunks6-True-ind6]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_area[shape7-chunks7-True-ind7]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_area[shape8-chunks8-True-ind8]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_area[shape9-chunks9-True-ind9]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_area[shape10-chunks10-True-ind10]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_area[shape11-chunks11-True-ind11]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_area[shape12-chunks12-True-ind12]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_histogram[0-1-5-shape0-chunks0-False-None]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_histogram[0-1-5-shape1-chunks1-False-None]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_histogram[0-1-5-shape2-chunks2-True-None]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_histogram[0-1-5-shape3-chunks3-True-0]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_histogram[0-1-5-shape4-chunks4-True-1]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_histogram[0-1-5-shape5-chunks5-True-100]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_histogram[0-1-5-shape6-chunks6-True-ind6]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_histogram[0-1-5-shape7-chunks7-True-ind7]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_histogram[0-1-5-shape8-chunks8-True-ind8]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_histogram[0-1-5-shape9-chunks9-True-ind9]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_histogram[0-1-5-shape10-chunks10-True-ind10]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_label[42-0.4-shape0-chunks0-1]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_label[42-0.4-shape1-chunks1-1]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_label[42-0.4-shape2-chunks2-2]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_label[42-0.4-shape3-chunks3-1]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_label[42-0.4-shape4-chunks4-2]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_label[42-0.3-shape5-chunks5-1]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_label[42-0.3-shape6-chunks6-2]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_label[42-0.3-shape7-chunks7-3]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-None-shape0-chunks0-None]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-None-shape1-chunks1-None]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-None-shape2-chunks2-0]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-None-shape3-chunks3-1]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-None-shape4-chunks4-ind4]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-None-shape5-chunks5-ind5]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-None-shape6-chunks6-ind6]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-None-shape7-chunks7-ind7]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-None-shape8-chunks8-ind8]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-0-shape0-chunks0-None]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-0-shape1-chunks1-None]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-0-shape2-chunks2-0]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-0-shape3-chunks3-1]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-0-shape4-chunks4-ind4]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-0-shape5-chunks5-ind5]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-0-shape6-chunks6-ind6]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-0-shape7-chunks7-ind7]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-0-shape8-chunks8-ind8]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-1.5-shape0-chunks0-None]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-1.5-shape1-chunks1-None]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-1.5-shape2-chunks2-0]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-1.5-shape3-chunks3-1]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-1.5-shape4-chunks4-ind4]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-1.5-shape5-chunks5-ind5]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-1.5-shape6-chunks6-ind6]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-1.5-shape7-chunks7-ind7]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[False-1.5-shape8-chunks8-ind8]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-None-shape0-chunks0-None]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-None-shape1-chunks1-None]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-None-shape2-chunks2-0]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-None-shape3-chunks3-1]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-None-shape4-chunks4-ind4]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-None-shape5-chunks5-ind5]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-None-shape6-chunks6-ind6]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-None-shape7-chunks7-ind7]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-None-shape8-chunks8-ind8]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-0-shape0-chunks0-None]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-0-shape1-chunks1-None]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-0-shape2-chunks2-0]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-0-shape3-chunks3-1]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-0-shape4-chunks4-ind4]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-0-shape5-chunks5-ind5]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-0-shape6-chunks6-ind6]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-0-shape7-chunks7-ind7]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-0-shape8-chunks8-ind8]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-1.5-shape0-chunks0-None]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-1.5-shape1-chunks1-None]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-1.5-shape2-chunks2-0]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-1.5-shape3-chunks3-1]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-1.5-shape4-chunks4-ind4]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-1.5-shape5-chunks5-ind5]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-1.5-shape6-chunks6-ind6]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-1.5-shape7-chunks7-ind7]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension[True-1.5-shape8-chunks8-ind8]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension_object[shape0-chunks0-None]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension_object[shape1-chunks1-None]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension_object[shape2-chunks2-0]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension_object[shape3-chunks3-1]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension_object[shape4-chunks4-ind4]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension_object[shape5-chunks5-ind5]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension_object[shape6-chunks6-ind6]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension_object[shape7-chunks7-ind7]",
"tests/test_dask_image/test_ndmeasure/test_core.py::test_labeled_comprehension_object[shape8-chunks8-ind8]"
] | 2020-11-01 02:24:17+00:00 | 1,803 |
|
dask__dask-jobqueue-563 | diff --git a/dask_jobqueue/htcondor.py b/dask_jobqueue/htcondor.py
index fb7b0be..18fc74e 100644
--- a/dask_jobqueue/htcondor.py
+++ b/dask_jobqueue/htcondor.py
@@ -16,7 +16,6 @@ class HTCondorJob(Job):
%(job_header)s
-Environment = "%(quoted_environment)s"
Arguments = "%(quoted_arguments)s"
Executable = %(executable)s
@@ -67,7 +66,14 @@ Queue
env_extra = dask.config.get(
"jobqueue.%s.env-extra" % self.config_name, default=[]
)
- self.env_dict = self.env_lines_to_dict(env_extra)
+
+ if env_extra is not None:
+ # Overwrite command template: prepend commands from env_extra separated by semicolon.
+ # This is special for HTCondor, because lines to execute on the worker node cannot be
+ # simply added to the submit script like for other batch systems.
+ self._command_template = (
+ "; ".join(env_extra) + "; " + self._command_template
+ )
self.job_header_dict = {
"MY.DaskWorkerName": '"htcondor--$F(MY.JobId)--"',
@@ -118,31 +124,15 @@ Queue
+ " ".join(shlex.quote(arg) for arg in cancel_command_extra)
)
- def env_lines_to_dict(self, env_lines):
- """Convert an array of export statements (what we get from env-extra
- in the config) into a dict"""
- env_dict = {}
- for env_line in env_lines:
- split_env_line = shlex.split(env_line)
- if split_env_line[0] == "export":
- split_env_line = split_env_line[1:]
- for item in split_env_line:
- if "=" in item:
- k, v = item.split("=", 1)
- env_dict[k] = v
- return env_dict
-
def job_script(self):
"""Construct a job submission script"""
quoted_arguments = quote_arguments(["-c", self._command_template])
- quoted_environment = quote_environment(self.env_dict)
job_header_lines = "\n".join(
"%s = %s" % (k, v) for k, v in self.job_header_dict.items()
)
return self._script_template % {
"shebang": self.shebang,
"job_header": job_header_lines,
- "quoted_environment": quoted_environment,
"quoted_arguments": quoted_arguments,
"executable": self.executable,
}
@@ -260,6 +250,17 @@ class HTCondorCluster(JobQueueCluster):
This also works with adaptive clusters. This automatically launches and kill workers based on load.
>>> cluster.adapt(maximum_jobs=20)
+
+ If setup commands need to be run before starting the worker on the worker node, ``env_extra`` can be used,
+ e.g., to activate a virtual environment:
+
+ >>> from dask_jobqueue.htcondor import HTCondorCluster
+ >>> cluster = HTCondorCluster(cores=1, memory="2GB", disk="4GB",
+ env_extra=['cd /some/path/', 'source venv/bin/activate'])
+
+ Note that environment variables are no longer passed via the ``Environment`` parameter in the submit
+ description file. If you explictly want to set that, you need to use ``job_extra``.
+
""".format(
job=job_parameters, cluster=cluster_parameters
)
diff --git a/docs/source/advanced-tips-and-tricks.rst b/docs/source/advanced-tips-and-tricks.rst
index bd79810..237adc6 100644
--- a/docs/source/advanced-tips-and-tricks.rst
+++ b/docs/source/advanced-tips-and-tricks.rst
@@ -68,6 +68,36 @@ accepted option on some SLURM clusters. The error was something like this:
sbatch: error: Memory specification can not be satisfied
sbatch: error: Batch job submission failed: Requested node configuration is not available
+Run setup commands before starting the worker with ``env_extra``
+----------------------------------------------------------------
+
+Sometimes you need to run some setup commands before the actual worker can be started. This includes
+setting environment variables, loading environment modules, sourcing/activating a virtual environment,
+or activating conda/mamba environments.
+
+This can be achieved using the ``env_extra`` parameter. Example for setting up a virtual environment:
+
+.. code-block:: python
+
+ from dask_jobqueue.htcondor import HTCondorCluster
+ env_extra = ['cd /some/path', 'source venv/bin/activate']
+ cluster = HTCondorCluster(cores=1, memory="2GB", disk="4GB", log_directory = 'logs', python='python3',
+ env_extra=env_extra)
+ print(cluster.job_script())
+
+For ``HTCondorCluster``, the commands will be prepended to the actual python call in the ``Arguments``
+parameter in the submit description file. The relevant lines will look like this:
+
+.. code-block:: text
+
+ ...
+ Arguments = "-c 'cd /some/path; source venv/bin/activate; python3 -m distributed.cli.dask_worker tcp://<IP>:<PORT> --nthreads 1 --memory-limit 2.00GB --name dummy-name --nanny --death-timeout 60'"
+ Executable = /bin/sh
+ ...
+
+For other batch systems (``*Cluster`` classes) the additional commands will be inserted as separate lines
+in the submission script.
+
How to handle job queueing system walltime killing workers
----------------------------------------------------------
diff --git a/docs/source/examples.rst b/docs/source/examples.rst
index ad02971..4f9a382 100644
--- a/docs/source/examples.rst
+++ b/docs/source/examples.rst
@@ -32,7 +32,7 @@ PBS Deployments
interface='ib0')
Moab Deployments
-~~~~~~~~~~~~~~~~
+----------------
On systems which use the Moab Workload Manager, a subclass of ``PBSCluster``
can be used, called ``MoabCluster``:
| dask/dask-jobqueue | 066f69c0994d2a87b7b0a54d7de6e2d296d04575 | diff --git a/dask_jobqueue/tests/test_htcondor.py b/dask_jobqueue/tests/test_htcondor.py
index 99a5573..1664da8 100644
--- a/dask_jobqueue/tests/test_htcondor.py
+++ b/dask_jobqueue/tests/test_htcondor.py
@@ -27,7 +27,12 @@ def test_job_script():
processes=2,
memory="100MB",
disk="100MB",
- env_extra=['export LANG="en_US.utf8"', 'export LC_ALL="en_US.utf8"'],
+ env_extra=[
+ 'export LANG="en_US.utf8"',
+ 'export LC_ALL="en_US.utf8"',
+ "cd /some/path/",
+ "source venv/bin/activate",
+ ],
job_extra={"+Extra": "True"},
submit_command_extra=["-verbose"],
cancel_command_extra=["-forcex"],
@@ -40,9 +45,10 @@ def test_job_script():
assert "MY.DaskWorkerDisk = 100000000" in job_script
assert "MY.DaskWorkerMemory = 100000000" in job_script
assert 'MY.JobId = "$(ClusterId).$(ProcId)"' in job_script
- assert "LANG=en_US.utf8" in job_script
- assert "LC_ALL=en_US.utf8" in job_script
- assert "export" not in job_script
+ assert 'export LANG=""en_US.utf8""' in job_script
+ assert 'export LC_ALL=""en_US.utf8""' in job_script
+ assert "cd /some/path/" in job_script
+ assert "source venv/bin/activate" in job_script
assert "+Extra = True" in job_script
assert re.search(
r"condor_submit\s.*-verbose", cluster._dummy_job.submit_command
| Incorrect description for env_extra for HTCondorCluster
Hi,
The description for env_extra in [HTCondorCluster](https://jobqueue.dask.org/en/latest/generated/dask_jobqueue.HTCondorCluster.html#dask_jobqueue.HTCondorCluster) is not correct: the job that HTCondorCluster creates calls dask-worker directly instead of through a bash wrapper script, so you cannot put arbitrary shell commands into env_extra.
The interface supports environment variables as `key=value` pairs, which will be inserted into dask-worker's environment (via the "Environment" attribute in the submit file). (For consistency, you can write `export foo=bar` but the word "export" will be ignored.)
This is also important to keep in mind with regards to #323; renaming env_extra to job_script_extra or similar would be even more inaccurate (for the HTCondor case anyway). | 0.0 | [
"dask_jobqueue/tests/test_htcondor.py::test_job_script"
] | [
"dask_jobqueue/tests/test_htcondor.py::test_header",
"dask_jobqueue/tests/test_htcondor.py::test_config_name_htcondor_takes_custom_config"
] | 2022-07-26 13:35:37+00:00 | 1,804 |
|
dask__dask-jobqueue-606 | diff --git a/dask_jobqueue/core.py b/dask_jobqueue/core.py
index 01c1756..2883c08 100644
--- a/dask_jobqueue/core.py
+++ b/dask_jobqueue/core.py
@@ -48,6 +48,8 @@ job_parameters = """
Seconds to wait for a scheduler before closing workers
extra : list
Deprecated: use ``worker_extra_args`` instead. This parameter will be removed in a future version.
+ worker_command : list
+ Command to run when launching a worker. Defaults to "distributed.cli.dask_worker"
worker_extra_args : list
Additional arguments to pass to `dask-worker`
env_extra : list
@@ -166,6 +168,7 @@ class Job(ProcessInterface, abc.ABC):
death_timeout=None,
local_directory=None,
extra=None,
+ worker_command=None,
worker_extra_args=None,
job_extra=None,
job_extra_directives=None,
@@ -222,6 +225,10 @@ class Job(ProcessInterface, abc.ABC):
)
if extra is None:
extra = dask.config.get("jobqueue.%s.extra" % self.config_name)
+ if worker_command is None:
+ worker_command = dask.config.get(
+ "jobqueue.%s.worker-command" % self.config_name
+ )
if worker_extra_args is None:
worker_extra_args = dask.config.get(
"jobqueue.%s.worker-extra-args" % self.config_name
@@ -332,17 +339,23 @@ class Job(ProcessInterface, abc.ABC):
self._job_script_prologue = job_script_prologue
# dask-worker command line build
- dask_worker_command = "%(python)s -m distributed.cli.dask_worker" % dict(
- python=python
+ dask_worker_command = "%(python)s -m %(worker_command)s" % dict(
+ python=python,
+ worker_command=worker_command
)
+
command_args = [dask_worker_command, self.scheduler]
- command_args += ["--nthreads", self.worker_process_threads]
- if processes is not None and processes > 1:
- command_args += ["--nworkers", processes]
- command_args += ["--memory-limit", self.worker_process_memory]
+ # common
command_args += ["--name", str(name)]
- command_args += ["--nanny" if nanny else "--no-nanny"]
+ command_args += ["--nthreads", self.worker_process_threads]
+ command_args += ["--memory-limit", self.worker_process_memory]
+
+ # distributed.cli.dask_worker specific
+ if worker_command == "distributed.cli.dask_worker":
+ if processes is not None and processes > 1:
+ command_args += ["--nworkers", processes]
+ command_args += ["--nanny" if nanny else "--no-nanny"]
if death_timeout is not None:
command_args += ["--death-timeout", death_timeout]
diff --git a/dask_jobqueue/jobqueue.yaml b/dask_jobqueue/jobqueue.yaml
index bd7b9c5..3bcb8c5 100644
--- a/dask_jobqueue/jobqueue.yaml
+++ b/dask_jobqueue/jobqueue.yaml
@@ -12,6 +12,7 @@ jobqueue:
local-directory: null # Location of fast local storage like /scratch or $TMPDIR
shared-temp-directory: null # Shared directory currently used to dump temporary security objects for workers
extra: null # deprecated: use worker-extra-args
+ worker-command: "distributed.cli.dask_worker" # Command to launch a worker
worker-extra-args: [] # Additional arguments to pass to `dask-worker`
# OAR resource manager options
@@ -44,6 +45,7 @@ jobqueue:
local-directory: null # Location of fast local storage like /scratch or $TMPDIR
shared-temp-directory: null # Shared directory currently used to dump temporary security objects for workers
extra: null # deprecated: use worker-extra-args
+ worker-command: "distributed.cli.dask_worker" # Command to launch a worker
worker-extra-args: [] # Additional arguments to pass to `dask-worker`
# PBS resource manager options
@@ -75,6 +77,7 @@ jobqueue:
local-directory: null # Location of fast local storage like /scratch or $TMPDIR
shared-temp-directory: null # Shared directory currently used to dump temporary security objects for workers
extra: null # deprecated: use worker-extra-args
+ worker-command: "distributed.cli.dask_worker" # Command to launch a worker
worker-extra-args: [] # Additional arguments to pass to `dask-worker`
# SGE resource manager options
@@ -106,6 +109,7 @@ jobqueue:
local-directory: null # Location of fast local storage like /scratch or $TMPDIR
shared-temp-directory: null # Shared directory currently used to dump temporary security objects for workers
extra: null # deprecated: use worker-extra-args
+ worker-command: "distributed.cli.dask_worker" # Command to launch a worker
worker-extra-args: [] # Additional arguments to pass to `dask-worker`
# SLURM resource manager options
@@ -138,6 +142,7 @@ jobqueue:
local-directory: null # Location of fast local storage like /scratch or $TMPDIR
shared-temp-directory: null # Shared directory currently used to dump temporary security objects for workers
extra: null # deprecated: use worker-extra-args
+ worker-command: "distributed.cli.dask_worker" # Command to launch a worker
worker-extra-args: [] # Additional arguments to pass to `dask-worker`
# PBS resource manager options
@@ -169,6 +174,7 @@ jobqueue:
local-directory: null # Location of fast local storage like /scratch or $TMPDIR
shared-temp-directory: null # Shared directory currently used to dump temporary security objects for workers
extra: null # deprecated: use worker-extra-args
+ worker-command: "distributed.cli.dask_worker" # Command to launch a worker
worker-extra-args: [] # Additional arguments to pass to `dask-worker`
# LSF resource manager options
@@ -203,6 +209,7 @@ jobqueue:
local-directory: null # Location of fast local storage like /scratch or $TMPDIR
shared-temp-directory: null # Shared directory currently used to dump temporary security objects for workers
extra: null # deprecated: use worker-extra-args
+ worker-command: "distributed.cli.dask_worker" # Command to launch a worker
worker-extra-args: [] # Additional arguments to pass to `dask-worker`
# HTCondor Resource Manager options
@@ -232,6 +239,7 @@ jobqueue:
local-directory: null # Location of fast local storage like /scratch or $TMPDIR
shared-temp-directory: null # Shared directory currently used to dump temporary security objects for workers
extra: null # deprecated: use worker-extra-args
+ worker-command: "distributed.cli.dask_worker" # Command to launch a worker
worker-extra-args: [] # Additional arguments to pass to `dask-worker`
env-extra: null
diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst
index 6df2f64..40ccb4c 100644
--- a/docs/source/changelog.rst
+++ b/docs/source/changelog.rst
@@ -4,6 +4,9 @@ Changelog
Development version
-------------------
+0.8.2 / 2023-06-15
+------------------
+
- Extend OARCluster implementation to let OAR take into account the memory parameter (:pr:`598`, :pr:`595`)
0.8.1 / 2022-10-04
| dask/dask-jobqueue | af044b4f151d0f749af1958e2e61636f62545a3e | diff --git a/dask_jobqueue/tests/test_htcondor.py b/dask_jobqueue/tests/test_htcondor.py
index c5c5bf6..653b72d 100644
--- a/dask_jobqueue/tests/test_htcondor.py
+++ b/dask_jobqueue/tests/test_htcondor.py
@@ -141,6 +141,7 @@ def test_config_name_htcondor_takes_custom_config():
"interface": None,
"death-timeout": None,
"extra": None,
+ "worker-command": None,
"worker-extra-args": [],
"env-extra": None,
"job-script-prologue": [],
diff --git a/dask_jobqueue/tests/test_jobqueue_core.py b/dask_jobqueue/tests/test_jobqueue_core.py
index 8f743f7..23d2059 100644
--- a/dask_jobqueue/tests/test_jobqueue_core.py
+++ b/dask_jobqueue/tests/test_jobqueue_core.py
@@ -49,6 +49,9 @@ def test_command_template(Cluster):
assert " --local-directory /scratch" in cluster._dummy_job._command_template
assert " --preload mymodule" in cluster._dummy_job._command_template
+ with Cluster(cores=2, memory="4GB", worker_command="dask_cuda.cli") as cluster:
+ assert "dask_cuda.cli" in cluster._dummy_job._command_template
+
def test_shebang_settings(Cluster, request):
if Cluster is HTCondorCluster or Cluster is LocalCluster:
diff --git a/dask_jobqueue/tests/test_lsf.py b/dask_jobqueue/tests/test_lsf.py
index 40c826c..134599b 100644
--- a/dask_jobqueue/tests/test_lsf.py
+++ b/dask_jobqueue/tests/test_lsf.py
@@ -101,9 +101,9 @@ def test_job_script():
in job_script
)
formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "")
- assert (
- f"--nthreads 2 --nworkers 4 --memory-limit {formatted_bytes}" in job_script
- )
+ assert ("--nthreads 2" in job_script)
+ assert ("--nworkers 4" in job_script)
+ assert (f"--memory-limit {formatted_bytes}" in job_script)
with LSFCluster(
queue="general",
@@ -130,9 +130,9 @@ def test_job_script():
in job_script
)
formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "")
- assert (
- f"--nthreads 2 --nworkers 4 --memory-limit {formatted_bytes}" in job_script
- )
+ assert ("--nthreads 2" in job_script)
+ assert ("--nworkers 4" in job_script)
+ assert (f"--memory-limit {formatted_bytes}" in job_script)
with LSFCluster(
walltime="1:00",
@@ -322,6 +322,7 @@ def test_config_name_lsf_takes_custom_config():
"local-directory": "/foo",
"shared-temp-directory": None,
"extra": None,
+ "worker-command": None,
"worker-extra-args": [],
"env-extra": None,
"job-script-prologue": [],
diff --git a/dask_jobqueue/tests/test_oar.py b/dask_jobqueue/tests/test_oar.py
index 52fd51b..c938488 100644
--- a/dask_jobqueue/tests/test_oar.py
+++ b/dask_jobqueue/tests/test_oar.py
@@ -82,9 +82,9 @@ def test_job_script():
in job_script
)
formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "")
- assert (
- f"--nthreads 2 --nworkers 4 --memory-limit {formatted_bytes}" in job_script
- )
+ assert ("--nthreads 2" in job_script)
+ assert ("--nworkers 4" in job_script)
+ assert (f"--memory-limit {formatted_bytes}" in job_script)
with OARCluster(
walltime="00:02:00",
@@ -115,9 +115,9 @@ def test_job_script():
in job_script
)
formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "")
- assert (
- f"--nthreads 2 --nworkers 4 --memory-limit {formatted_bytes}" in job_script
- )
+ assert ("--nthreads 2" in job_script)
+ assert ("--nworkers 4" in job_script)
+ assert (f"--memory-limit {formatted_bytes}" in job_script)
def test_config_name_oar_takes_custom_config():
@@ -137,6 +137,7 @@ def test_config_name_oar_takes_custom_config():
"local-directory": "/foo",
"shared-temp-directory": None,
"extra": None,
+ "worker-command": None,
"worker-extra-args": [],
"env-extra": None,
"job-script-prologue": [],
diff --git a/dask_jobqueue/tests/test_pbs.py b/dask_jobqueue/tests/test_pbs.py
index 3959c86..565d955 100644
--- a/dask_jobqueue/tests/test_pbs.py
+++ b/dask_jobqueue/tests/test_pbs.py
@@ -76,9 +76,9 @@ def test_job_script(Cluster):
in job_script
)
formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "")
- assert (
- f"--nthreads 2 --nworkers 4 --memory-limit {formatted_bytes}" in job_script
- )
+ assert ("--nthreads 2" in job_script)
+ assert ("--nworkers 4" in job_script)
+ assert (f"--memory-limit {formatted_bytes}" in job_script)
with Cluster(
queue="regular",
@@ -102,9 +102,9 @@ def test_job_script(Cluster):
in job_script
)
formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "")
- assert (
- f"--nthreads 2 --nworkers 4 --memory-limit {formatted_bytes}" in job_script
- )
+ assert ("--nthreads 2" in job_script)
+ assert ("--nworkers 4" in job_script)
+ assert (f"--memory-limit {formatted_bytes}" in job_script)
@pytest.mark.env("pbs")
@@ -361,6 +361,7 @@ def test_config_name_pbs_takes_custom_config():
"local-directory": "/foo",
"shared-temp-directory": None,
"extra": None,
+ "worker-command": None,
"worker-extra-args": [],
"env-extra": None,
"job-script-prologue": [],
diff --git a/dask_jobqueue/tests/test_sge.py b/dask_jobqueue/tests/test_sge.py
index 10e2da2..d1b5d2f 100644
--- a/dask_jobqueue/tests/test_sge.py
+++ b/dask_jobqueue/tests/test_sge.py
@@ -58,6 +58,7 @@ def test_config_name_sge_takes_custom_config():
"local-directory": "/foo",
"shared-temp-directory": None,
"extra": None,
+ "worker-command": None,
"worker-extra-args": [],
"env-extra": None,
"job-script-prologue": [],
diff --git a/dask_jobqueue/tests/test_slurm.py b/dask_jobqueue/tests/test_slurm.py
index 80769d5..c2aaed3 100644
--- a/dask_jobqueue/tests/test_slurm.py
+++ b/dask_jobqueue/tests/test_slurm.py
@@ -77,9 +77,9 @@ def test_job_script():
in job_script
)
formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "")
- assert (
- f"--nthreads 2 --nworkers 4 --memory-limit {formatted_bytes}" in job_script
- )
+ assert ("--nthreads 2" in job_script)
+ assert ("--nworkers 4" in job_script)
+ assert (f"--memory-limit {formatted_bytes}" in job_script)
with SLURMCluster(
walltime="00:02:00",
@@ -111,9 +111,9 @@ def test_job_script():
in job_script
)
formatted_bytes = format_bytes(parse_bytes("7GB")).replace(" ", "")
- assert (
- f"--nthreads 2 --nworkers 4 --memory-limit {formatted_bytes}" in job_script
- )
+ assert ("--nthreads 2" in job_script)
+ assert ("--nworkers 4" in job_script)
+ assert (f"--memory-limit {formatted_bytes}" in job_script)
@pytest.mark.env("slurm")
@@ -193,6 +193,7 @@ def test_config_name_slurm_takes_custom_config():
"local-directory": "/foo",
"shared-temp-directory": None,
"extra": None,
+ "worker-command": None,
"worker-extra-args": [],
"env-extra": None,
"job-script-prologue": [],
| Provide parameter for "dask-worker"
Issue https://github.com/rapidsai/dask-cuda/issues/3 proposes creating a `dask-cuda-worker` alternative to `dask-worker` which starts one process per GPU with appropriate environment variables. If we go ahead with that then it would be convenient to compose that approach with dask-jobqueue. This would be particularly easy if the following line could be made configurable to allow other executables
https://github.com/dask/dask-jobqueue/blob/5a5585dffea0237e452792c7ed0b13d5d1e3f8bb/dask_jobqueue/core.py#L254
Thoughts? | 0.0 | [
"dask_jobqueue/tests/test_jobqueue_core.py::test_command_template[LocalCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_command_template[PBSCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_command_template[MoabCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_command_template[SLURMCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_command_template[SGECluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_command_template[LSFCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_command_template[OARCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_command_template[HTCondorCluster]"
] | [
"dask_jobqueue/tests/test_htcondor.py::test_header",
"dask_jobqueue/tests/test_htcondor.py::test_job_script",
"dask_jobqueue/tests/test_htcondor.py::test_config_name_htcondor_takes_custom_config",
"dask_jobqueue/tests/test_jobqueue_core.py::test_errors",
"dask_jobqueue/tests/test_jobqueue_core.py::test_shebang_settings[PBSCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_shebang_settings[MoabCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_shebang_settings[SLURMCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_shebang_settings[SGECluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_shebang_settings[LSFCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_shebang_settings[OARCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_dashboard_link[LocalCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_dashboard_link[PBSCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_dashboard_link[MoabCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_dashboard_link[SLURMCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_dashboard_link[SGECluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_dashboard_link[LSFCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_dashboard_link[OARCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_dashboard_link[HTCondorCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_forward_ip[LocalCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_forward_ip[PBSCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_forward_ip[MoabCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_forward_ip[SLURMCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_forward_ip[SGECluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_forward_ip[LSFCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_forward_ip[OARCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_forward_ip[HTCondorCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_job_id_from_qsub[{job_id}.admin01-SGEJob]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_job_id_from_qsub[Request",
"dask_jobqueue/tests/test_jobqueue_core.py::test_job_id_from_qsub[sbatch:",
"dask_jobqueue/tests/test_jobqueue_core.py::test_job_id_from_qsub[{job_id};cluster-SGEJob]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_job_id_from_qsub[Job",
"dask_jobqueue/tests/test_jobqueue_core.py::test_job_id_from_qsub[{job_id}-SGEJob]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_job_id_error_handling[SGEJob]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_log_directory[LocalCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_log_directory[PBSCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_log_directory[MoabCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_log_directory[SLURMCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_log_directory[SGECluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_log_directory[LSFCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_log_directory[OARCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_log_directory[HTCondorCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_cluster_has_cores_and_memory[LocalCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_cluster_has_cores_and_memory[PBSCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_cluster_has_cores_and_memory[MoabCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_cluster_has_cores_and_memory[SLURMCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_cluster_has_cores_and_memory[SGECluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_cluster_has_cores_and_memory[LSFCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_cluster_has_cores_and_memory[OARCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_cluster_has_cores_and_memory[HTCondorCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_config_interface",
"dask_jobqueue/tests/test_jobqueue_core.py::test_job_without_config_name",
"dask_jobqueue/tests/test_jobqueue_core.py::test_cluster_without_job_cls",
"dask_jobqueue/tests/test_jobqueue_core.py::test_default_number_of_worker_processes[LocalCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_default_number_of_worker_processes[PBSCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_default_number_of_worker_processes[MoabCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_default_number_of_worker_processes[SLURMCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_default_number_of_worker_processes[SGECluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_default_number_of_worker_processes[LSFCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_default_number_of_worker_processes[OARCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_default_number_of_worker_processes[HTCondorCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_scheduler_options[LocalCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_scheduler_options[PBSCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_scheduler_options[MoabCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_scheduler_options[SLURMCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_scheduler_options[SGECluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_scheduler_options[LSFCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_scheduler_options[OARCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_scheduler_options[HTCondorCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_scheduler_options_interface[LocalCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_scheduler_options_interface[PBSCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_scheduler_options_interface[MoabCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_scheduler_options_interface[SLURMCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_scheduler_options_interface[SGECluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_scheduler_options_interface[LSFCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_scheduler_options_interface[OARCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_scheduler_options_interface[HTCondorCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_cluster_error_scheduler_arguments_should_use_scheduler_options[LocalCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_cluster_error_scheduler_arguments_should_use_scheduler_options[PBSCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_cluster_error_scheduler_arguments_should_use_scheduler_options[MoabCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_cluster_error_scheduler_arguments_should_use_scheduler_options[SLURMCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_cluster_error_scheduler_arguments_should_use_scheduler_options[SGECluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_cluster_error_scheduler_arguments_should_use_scheduler_options[LSFCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_cluster_error_scheduler_arguments_should_use_scheduler_options[OARCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_cluster_error_scheduler_arguments_should_use_scheduler_options[HTCondorCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_import_scheduler_options_from_config[LocalCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_import_scheduler_options_from_config[PBSCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_import_scheduler_options_from_config[MoabCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_import_scheduler_options_from_config[SLURMCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_import_scheduler_options_from_config[SGECluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_import_scheduler_options_from_config[LSFCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_import_scheduler_options_from_config[OARCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_import_scheduler_options_from_config[HTCondorCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_wrong_parameter_error[LocalCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_wrong_parameter_error[PBSCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_wrong_parameter_error[MoabCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_wrong_parameter_error[SLURMCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_wrong_parameter_error[SGECluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_wrong_parameter_error[LSFCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_wrong_parameter_error[OARCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_wrong_parameter_error[HTCondorCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_security[LocalCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_security_temporary[LocalCluster]",
"dask_jobqueue/tests/test_jobqueue_core.py::test_security_temporary_defaults[LocalCluster]",
"dask_jobqueue/tests/test_lsf.py::test_header",
"dask_jobqueue/tests/test_lsf.py::test_job_script",
"dask_jobqueue/tests/test_lsf.py::test_config",
"dask_jobqueue/tests/test_lsf.py::test_use_stdin[None-False]",
"dask_jobqueue/tests/test_lsf.py::test_use_stdin[None-True]",
"dask_jobqueue/tests/test_lsf.py::test_use_stdin[True-None]",
"dask_jobqueue/tests/test_lsf.py::test_use_stdin[False-None]",
"dask_jobqueue/tests/test_lsf.py::test_use_stdin[True-False]",
"dask_jobqueue/tests/test_lsf.py::test_config_name_lsf_takes_custom_config",
"dask_jobqueue/tests/test_lsf.py::test_informative_errors",
"dask_jobqueue/tests/test_lsf.py::test_lsf_unit_detection[LSF_UNIT_FOR_LIMITS=MB-mb]",
"dask_jobqueue/tests/test_lsf.py::test_lsf_unit_detection[LSF_UNIT_FOR_LIMITS=G",
"dask_jobqueue/tests/test_lsf.py::test_lsf_unit_detection[#LSF_UNIT_FOR_LIMITS=NotDetected-kb]",
"dask_jobqueue/tests/test_lsf.py::test_lsf_unit_detection_without_file",
"dask_jobqueue/tests/test_oar.py::test_header",
"dask_jobqueue/tests/test_oar.py::test_job_script",
"dask_jobqueue/tests/test_oar.py::test_config_name_oar_takes_custom_config",
"dask_jobqueue/tests/test_oar.py::test_memory_per_core_property_name_none_warning",
"dask_jobqueue/tests/test_pbs.py::test_header[PBSCluster]",
"dask_jobqueue/tests/test_pbs.py::test_header[MoabCluster]",
"dask_jobqueue/tests/test_pbs.py::test_job_script[PBSCluster]",
"dask_jobqueue/tests/test_pbs.py::test_job_script[MoabCluster]",
"dask_jobqueue/tests/test_pbs.py::test_config",
"dask_jobqueue/tests/test_pbs.py::test_config_name_pbs_takes_custom_config",
"dask_jobqueue/tests/test_pbs.py::test_informative_errors",
"dask_jobqueue/tests/test_pbs.py::test_adapt",
"dask_jobqueue/tests/test_pbs.py::test_deprecation_project",
"dask_jobqueue/tests/test_sge.py::test_config_name_sge_takes_custom_config",
"dask_jobqueue/tests/test_sge.py::test_job_script",
"dask_jobqueue/tests/test_slurm.py::test_header",
"dask_jobqueue/tests/test_slurm.py::test_job_script",
"dask_jobqueue/tests/test_slurm.py::test_config_name_slurm_takes_custom_config",
"dask_jobqueue/tests/test_slurm.py::test_deprecation_project"
] | 2023-05-26 01:34:18+00:00 | 1,805 |
|
dask__zict-13 | diff --git a/zict/file.py b/zict/file.py
index c561471..0b45752 100644
--- a/zict/file.py
+++ b/zict/file.py
@@ -3,9 +3,9 @@ from __future__ import absolute_import, division, print_function
import errno
import os
try:
- from urllib.parse import quote
+ from urllib.parse import quote, unquote
except ImportError:
- from urllib import quote
+ from urllib import quote, unquote
from .common import ZictBase
@@ -18,11 +18,21 @@ def _safe_key(key):
return quote(key, safe='')
+def _unsafe_key(key):
+ """
+ Undo the escaping done by _safe_key().
+ """
+ return unquote(key)
+
+
class File(ZictBase):
""" Mutable Mapping interface to a directory
Keys must be strings, values must be bytes
+ Note this shouldn't be used for interprocess persistence, as keys
+ are cached in memory.
+
Parameters
----------
directory: string
@@ -38,44 +48,42 @@ class File(ZictBase):
def __init__(self, directory, mode='a'):
self.directory = directory
self.mode = mode
+ self._keys = set()
if not os.path.exists(self.directory):
os.mkdir(self.directory)
+ else:
+ for n in os.listdir(self.directory):
+ self._keys.add(_unsafe_key(n))
def __str__(self):
- return '<File: %s, mode="%s">' % (self.directory, self.mode)
+ return '<File: %s, mode="%s", %d elements>' % (self.directory, self.mode, len(self))
__repr__ = __str__
def __getitem__(self, key):
- try:
- with open(os.path.join(self.directory, _safe_key(key)), 'rb') as f:
- result = f.read()
- except EnvironmentError as e:
- if e.args[0] != errno.ENOENT:
- raise
+ if key not in self._keys:
raise KeyError(key)
- return result
+ with open(os.path.join(self.directory, _safe_key(key)), 'rb') as f:
+ return f.read()
def __setitem__(self, key, value):
with open(os.path.join(self.directory, _safe_key(key)), 'wb') as f:
f.write(value)
+ self._keys.add(key)
def __contains__(self, key):
- return os.path.exists(os.path.join(self.directory, _safe_key(key)))
+ return key in self._keys
def keys(self):
- return iter(os.listdir(self.directory))
+ return iter(self._keys)
- def __iter__(self):
- return self.keys()
+ __iter__ = keys
def __delitem__(self, key):
- try:
- os.remove(os.path.join(self.directory, _safe_key(key)))
- except EnvironmentError as e:
- if e.args[0] != errno.ENOENT:
- raise
+ if key not in self._keys:
raise KeyError(key)
+ os.remove(os.path.join(self.directory, _safe_key(key)))
+ self._keys.remove(key)
def __len__(self):
- return sum(1 for _ in self.keys())
+ return len(self._keys)
| dask/zict | 4621b4c40456b3dd00eab9ce8e9d3742b080833c | diff --git a/zict/tests/test_file.py b/zict/tests/test_file.py
index d88d90b..62fe887 100644
--- a/zict/tests/test_file.py
+++ b/zict/tests/test_file.py
@@ -90,6 +90,19 @@ def test_arbitrary_chars(fn):
z[key]
z[key] = b'foo'
assert z[key] == b'foo'
+ assert list(z) == [key]
+ assert list(z.keys()) == [key]
+ assert list(z.items()) == [(key, b'foo')]
+ assert list(z.values()) == [b'foo']
+
+ zz = File(fn)
+ assert zz[key] == b'foo'
+ assert list(zz) == [key]
+ assert list(zz.keys()) == [key]
+ assert list(zz.items()) == [(key, b'foo')]
+ assert list(zz.values()) == [b'foo']
+ del zz
+
del z[key]
with pytest.raises(KeyError):
z[key]
| File.__contains__ is slow
It is convenient in Dask to frequently check if a key is present in the `.data` dictionary. Unfortunately this is slow, due to calls to both `os.path.exists` and `_safe_key`. | 0.0 | [
"zict/tests/test_file.py::test_arbitrary_chars"
] | [
"zict/tests/test_file.py::test_mapping",
"zict/tests/test_file.py::test_implementation",
"zict/tests/test_file.py::test_str",
"zict/tests/test_file.py::test_setitem_typeerror",
"zict/tests/test_file.py::test_contextmanager",
"zict/tests/test_file.py::test_delitem",
"zict/tests/test_file.py::test_missing_key"
] | 2016-11-28 13:35:08+00:00 | 1,806 |
|
dask__zict-64 | diff --git a/doc/source/changelog.rst b/doc/source/changelog.rst
index 0375754..e493d3f 100644
--- a/doc/source/changelog.rst
+++ b/doc/source/changelog.rst
@@ -4,6 +4,8 @@ Changelog
2.2.0 - Unreleased
------------------
- Added type annotations (:pr:`62`) `Guido Imperiale`_
+- If you call Func.update() and Func wraps around File, do not store all dump outputs in
+ memory (:pr:`64`) `Guido Imperiale`_
2.1.0 - 2022-02-25
diff --git a/zict/common.py b/zict/common.py
index 6ed9e94..3d94a2e 100644
--- a/zict/common.py
+++ b/zict/common.py
@@ -1,6 +1,7 @@
from __future__ import annotations
from collections.abc import Iterable, Mapping
+from itertools import chain
from typing import MutableMapping # TODO move to collections.abc (needs Python >=3.9)
from typing import Any, TypeVar, overload
@@ -39,12 +40,12 @@ class ZictBase(MutableMapping[KT, VT]):
if args:
other = args[0]
if isinstance(other, Mapping) or hasattr(other, "items"):
- items += other.items()
+ items = other.items()
else:
# Assuming (key, value) pairs
- items += other
+ items = other
if kwds:
- items += kwds.items()
+ items = chain(items, kwds.items())
self._do_update(items)
def _do_update(self, items: Iterable[tuple[KT, VT]]) -> None:
| dask/zict | 6850845b645aea71bac342db9cafc8ed9546db4d | diff --git a/zict/tests/test_func.py b/zict/tests/test_func.py
index d2fbd34..5345045 100644
--- a/zict/tests/test_func.py
+++ b/zict/tests/test_func.py
@@ -1,4 +1,10 @@
+import gc
+from collections.abc import MutableMapping
+
+import pytest
+
from zict import Func
+from zict.common import ZictBase
from . import utils_test
@@ -46,3 +52,40 @@ def test_mapping():
z = Func(rotl, rotr, d)
utils_test.check_mapping(z)
utils_test.check_closing(z)
+
+
[email protected]("wrapped_cls", [MutableMapping, ZictBase])
+def test_update_descopes_early(wrapped_cls):
+ """Test that Func.update() descopes the output of self.dump as soon as it can, if
+ the wrapped mapping allows, and doesn't store everything into a list.
+ """
+
+ class Dumped:
+ n = 0
+
+ def __init__(self):
+ gc.collect() # Only necessary on pypy
+ Dumped.n += 1
+ assert Dumped.n < 3
+
+ def __del__(self):
+ Dumped.n -= 1
+
+ class Dummy(wrapped_cls):
+ def __setitem__(self, key, value):
+ pass
+
+ def __getitem__(self, key, value):
+ raise KeyError(key)
+
+ def __delitem__(self, key):
+ raise KeyError(key)
+
+ def __iter__(self):
+ return iter(())
+
+ def __len__(self):
+ return 0
+
+ d = Func(lambda v: Dumped(), lambda w: None, Dummy())
+ d.update(dict.fromkeys(range(10)))
| Memory flare on Func.update() with File backend
Consider:
```python
d = Func(pickle.dumps, pickle.loads, File(somedir))
d.update(mydata)
```
### Current behaviour
1. call ``pickle.dumps`` on every element of mydata and store all output in memory
2. call ``File.__setitem__`` on each pickled element
3. descope the pickled data all at once
### Expected behaviour
File does not have an optimized update method, unlike LMDB or Sieve, as it does not benefit from updating everything in a single call.
Therefore, it should be possible to create a pickle buffer for every value, write it to disk, and then release it straight away.
The issue is not in Func, but actually in File.update and to be precise in ZictBase.update, which converts iterables into lists.
### Mitigation
- Instead of ``pickle.dumps``, write a function that returns a tuple of (pickle5 output, *buffers).
- If you wrap Func in a zict.Buffer, Func.update will never be called.
dask.distributed does both of the above.
| 0.0 | [
"zict/tests/test_func.py::test_update_descopes_early[ZictBase]"
] | [
"zict/tests/test_func.py::test_simple",
"zict/tests/test_func.py::test_mapping",
"zict/tests/test_func.py::test_update_descopes_early[MutableMapping]"
] | 2022-03-07 14:38:44+00:00 | 1,807 |
|
data61__blocklib-75 | diff --git a/blocklib/blocks_generator.py b/blocklib/blocks_generator.py
index a62e044..21a91cd 100644
--- a/blocklib/blocks_generator.py
+++ b/blocklib/blocks_generator.py
@@ -1,7 +1,7 @@
"""Module that implement final block generations."""
from collections import defaultdict
from typing import Any, Dict, Sequence, Set, List, cast
-
+import ast
import numpy as np
from blocklib import PPRLIndex
@@ -89,6 +89,7 @@ def generate_blocks_psig(reversed_indices: Sequence[Dict], block_states: Sequenc
for reversed_index, state in zip(reversed_indices, block_states):
cbf = set() # type: Set[int]
for bf_set in reversed_index:
+ bf_set = ast.literal_eval(bf_set)
cbf = cbf.union(bf_set)
bf_len = int(block_states[0].blocking_config.get("bf-len", None))
@@ -102,10 +103,11 @@ def generate_blocks_psig(reversed_indices: Sequence[Dict], block_states: Sequenc
# filter reversed_indices with block filter
for reversed_index in reversed_indices:
- has_matches = {bf_set: all(block_filter[i] for i in bf_set) for bf_set in reversed_index}
+ has_matches = {bf_set: all(block_filter[i] for i in ast.literal_eval(bf_set)) for bf_set in reversed_index}
for bf_set in has_matches:
if not has_matches[bf_set]:
del reversed_index[bf_set]
+
# because of collisions in counting bloom filter, there are blocks only unique to one filtered index
# only keep blocks that exist in at least threshold many reversed indices
keys = defaultdict(int) # type: Dict[Set, int]
@@ -116,4 +118,5 @@ def generate_blocks_psig(reversed_indices: Sequence[Dict], block_states: Sequenc
clean_reversed_indices = [] # type: List[Dict[Set, List]]
for reversed_index in reversed_indices:
clean_reversed_indices.append(dict((k, reversed_index[k]) for k in common_keys if k in reversed_index))
+
return clean_reversed_indices
diff --git a/blocklib/pprlpsig.py b/blocklib/pprlpsig.py
index 6ba617d..d0eabb3 100644
--- a/blocklib/pprlpsig.py
+++ b/blocklib/pprlpsig.py
@@ -94,10 +94,10 @@ class PPRLIndexPSignature(PPRLIndex):
num_hash_func = int(self.blocking_config.get("number-hash-functions", None))
bf_len = int(self.blocking_config.get("bf-len", None))
- reversed_index = {} # type: Dict[Any, List[Any]]
+ reversed_index = {} # type: Dict[str, List[Any]]
for signature, rec_ids in filtered_reversed_index.items():
- bf_set = tuple(flip_bloom_filter(signature, bf_len, num_hash_func))
+ bf_set = str(tuple(flip_bloom_filter(signature, bf_len, num_hash_func)))
if bf_set in reversed_index:
reversed_index[bf_set].extend(rec_ids)
else:
| data61/blocklib | 5a32d194fcefae240d60604c8eb2bbe64266fee4 | diff --git a/tests/test_blocks_generators.py b/tests/test_blocks_generators.py
index 3021379..931556d 100644
--- a/tests/test_blocks_generators.py
+++ b/tests/test_blocks_generators.py
@@ -107,7 +107,7 @@ class TestBlocksGenerator:
for string in ['1_Fr', '0_Fred', '1_Li']:
bf_set = flip_bloom_filter(string, config['blocking-filter']['bf-len'],
config['blocking-filter']['number-hash-functions'])
- expected_bf_sets[tuple(bf_set)] = True
+ expected_bf_sets[str(tuple(bf_set))] = True
assert all(key in expected_bf_sets for key in filtered_alice)
assert filtered_alice.keys() == filtered_bob.keys()
@@ -177,7 +177,7 @@ class TestBlocksGenerator:
for string in ['1_Fr', '1_Jo']:
bf_set = flip_bloom_filter(string, config['blocking-filter']['bf-len'],
config['blocking-filter']['number-hash-functions'])
- expected_bf_sets[string] = tuple(bf_set)
+ expected_bf_sets[string] = str(tuple(bf_set))
expected_m1 = {expected_bf_sets['1_Fr']: ['m1-2'], expected_bf_sets['1_Jo']: ['m1-1']}
expected_m2 = {expected_bf_sets['1_Fr']: ['m2-1'], expected_bf_sets['1_Jo']: ['m2-2']}
diff --git a/tests/test_candidate_block_generator.py b/tests/test_candidate_block_generator.py
index 9b65e82..e7755cb 100644
--- a/tests/test_candidate_block_generator.py
+++ b/tests/test_candidate_block_generator.py
@@ -61,6 +61,6 @@ class TestCandidateBlockGenerator:
'version': 1,
'config': config}
candidate_block_obj = generate_candidate_blocks(data, block_config)
- bf_set_fred = tuple(flip_bloom_filter('0_Fred', bf_len, num_hash_funcs))
- bf_set_lindsay = tuple(flip_bloom_filter('0_Lindsay', bf_len, num_hash_funcs))
+ bf_set_fred = str(tuple(flip_bloom_filter('0_Fred', bf_len, num_hash_funcs)))
+ bf_set_lindsay = str(tuple(flip_bloom_filter('0_Lindsay', bf_len, num_hash_funcs)))
assert candidate_block_obj.blocks == {bf_set_fred: ['id4', 'id5'], bf_set_lindsay: ['id6']}
diff --git a/tests/test_pprlpsig.py b/tests/test_pprlpsig.py
index c285172..258450a 100644
--- a/tests/test_pprlpsig.py
+++ b/tests/test_pprlpsig.py
@@ -84,4 +84,4 @@ class TestPSig(unittest.TestCase):
reversed_index = psig.build_reversed_index(data, verbose=True)
bf_set = tuple(flip_bloom_filter("0_Fred", config['blocking-filter']['bf-len'],
config['blocking-filter']['number-hash-functions']))
- assert reversed_index == {bf_set: ['id4', 'id5']}
+ assert reversed_index == {str(bf_set): ['id4', 'id5']}
| Convert block key into string
Currently P-sig generates block keys of type `set` while Lambda-fold generates block key of type `string`. For entity service deployment, we prefer string block keys. This issue is to convert block key into string for P-Sig | 0.0 | [
"tests/test_blocks_generators.py::TestBlocksGenerator::test_psig",
"tests/test_blocks_generators.py::TestBlocksGenerator::test_psig_multiparty",
"tests/test_candidate_block_generator.py::TestCandidateBlockGenerator::test_generate_candidate_blocks_psig",
"tests/test_pprlpsig.py::TestPSig::test_build_reversed_index"
] | [
"tests/test_blocks_generators.py::TestBlocksGenerator::test_candidate_block_type",
"tests/test_blocks_generators.py::TestBlocksGenerator::test_generate_reverse_block",
"tests/test_blocks_generators.py::TestBlocksGenerator::test_lambdafold",
"tests/test_candidate_block_generator.py::TestCandidateBlockGenerator::test_generate_candidate_blocks_assertion",
"tests/test_pprlpsig.py::TestPSig::test_combine_blocks_in_blocking_filter",
"tests/test_pprlpsig.py::TestPSig::test_config"
] | 2020-06-11 01:37:05+00:00 | 1,808 |
|
datadriventests__ddt-98 | diff --git a/ddt.py b/ddt.py
index b34ab79..2b7c670 100644
--- a/ddt.py
+++ b/ddt.py
@@ -90,20 +90,30 @@ def data(*values):
Should be added to methods of instances of ``unittest.TestCase``.
"""
- return idata(values, len(str(len(values))))
+ return idata(values)
-def idata(iterable, index_len):
+def idata(iterable, index_len=None):
"""
Method decorator to add to your test methods.
Should be added to methods of instances of ``unittest.TestCase``.
+ :param iterable: iterable of the values to provide to the test function.
+ :param index_len: an optional integer specifying the width to zero-pad the
+ test identifier indices to. If not provided, this will add the fewest
+ zeros necessary to make all identifiers the same length.
"""
+ if index_len is None:
+ # Avoid consuming a one-time-use generator.
+ iterable = tuple(iterable)
+ index_len = len(str(len(iterable)))
+
def wrapper(func):
setattr(func, DATA_ATTR, iterable)
setattr(func, INDEX_LEN, index_len)
return func
+
return wrapper
@@ -371,4 +381,4 @@ def ddt(arg=None, **kwargs):
# ``arg`` is the unittest's test class when decorating with ``@ddt`` while
# it is ``None`` when decorating a test class with ``@ddt(k=v)``.
- return wrapper(arg) if inspect.isclass(arg) else wrapper
\ No newline at end of file
+ return wrapper(arg) if inspect.isclass(arg) else wrapper
| datadriventests/ddt | b0683d29b6aa2f24c8c5595070f81295df85fe0e | diff --git a/test/test_example.py b/test/test_example.py
index 1d27043..dfc0454 100644
--- a/test/test_example.py
+++ b/test/test_example.py
@@ -1,6 +1,7 @@
+import itertools
import unittest
-from ddt import ddt, data, file_data, unpack
+from ddt import ddt, data, file_data, idata, unpack
from test.mycode import larger_than_two, has_three_elements, is_a_greeting
try:
@@ -64,6 +65,12 @@ class FooTestCase(unittest.TestCase):
a, b = value
self.assertGreater(a, b)
+ @idata(itertools.product([0, 1, 2], [3, 4, 5]))
+ def test_iterable_argument(self, value):
+ first_value, second_value = value
+ self.assertLessEqual(first_value, 2)
+ self.assertGreaterEqual(second_value, 3)
+
@data(annotated2([2, 1], 'Test_case_1', """Test docstring 1"""),
annotated2([10, 5], 'Test_case_2', """Test docstring 2"""))
def test_greater_with_name_docstring(self, value):
diff --git a/test/test_functional.py b/test/test_functional.py
index e860e34..a930dc3 100644
--- a/test/test_functional.py
+++ b/test/test_functional.py
@@ -9,7 +9,7 @@ try:
except ImportError:
import mock
-from ddt import ddt, data, file_data, TestNameFormat
+from ddt import ddt, data, file_data, idata, TestNameFormat
from test.mycode import has_three_elements
@@ -185,6 +185,97 @@ def test_ddt_format_test_name_default():
assert ("test_something_{}_{}".format(i, d) in tests)
+def test_idata_single_argument():
+ """Test that the single-argument form of ``idata`` works."""
+ payload = [5, 12, 13]
+
+ @ddt
+ class Dummy(object):
+ """Dummy class to test that the ``idata(iterable)`` decorator works."""
+ @idata(payload)
+ def test_something(self, value):
+ return value
+
+ tests = list(filter(_is_test, Dummy.__dict__))
+ assert len(tests) == len(payload)
+
+ expected_tests = [
+ "test_something_{:1d}_{}".format(i + 1, v) for i, v in enumerate(payload)
+ ]
+ assert sorted(tests) == sorted(expected_tests)
+
+
+def test_idata_automatic_zero_padding():
+ """
+ Test that the single-argument form of ``idata`` zero-pads its keys so the
+ lengths all match
+ """
+ payload = range(15)
+
+ @ddt
+ class Dummy(object):
+ """Dummy class to test that the ``idata(iterable)`` decorator works."""
+ @idata(payload)
+ def test_something(self, value):
+ return value
+
+ tests = list(filter(_is_test, Dummy.__dict__))
+ assert len(tests) == len(payload)
+
+ expected_tests = [
+ "test_something_{:02d}_{}".format(i + 1, v) for i, v in enumerate(payload)
+ ]
+ assert sorted(tests) == sorted(expected_tests)
+
+
+def test_idata_override_index_len():
+ """
+ Test that overriding ``index_len`` in ``idata`` can allow additional
+ zero-padding to be added.
+ """
+ payload = [4, 2, 1]
+
+ @ddt
+ class Dummy(object):
+ @idata(payload, index_len=2)
+ def test_something(self, value):
+ return value
+
+ tests = list(filter(_is_test, Dummy.__dict__))
+ assert len(tests) == len(payload)
+
+ expected_tests = [
+ "test_something_{:02d}_{}".format(i + 1, v) for i, v in enumerate(payload)
+ ]
+ assert sorted(tests) == sorted(expected_tests)
+
+
+def test_idata_consumable_iterator():
+ """
+ Test that using ``idata`` with a consumable iterator still generates the
+ expected tests.
+ """
+ payload = [51, 78, 2]
+
+ def consumable_iterator():
+ # Not using `yield from` for Python 2.7.
+ for i in payload:
+ yield i
+
+ @ddt
+ class Dummy(object):
+ @idata(consumable_iterator())
+ def test_something(self, value):
+ return value
+
+ tests = list(filter(_is_test, Dummy.__dict__))
+
+ expected_tests = [
+ "test_something_{:1d}_{}".format(i + 1, v) for i, v in enumerate(payload)
+ ]
+ assert sorted(tests) == sorted(expected_tests)
+
+
def test_file_data_test_creation():
"""
Test that the ``file_data`` decorator creates two tests
| Signature change of `ddt.idata` in 1.4.3 breaks downstream uses
In #92, the signature of `idata` was changed from `idata(values)` to `idata(values, index_len)`, with no default handling for the latter argument. This means that all current uses of `idata(values)` are now broken on upgrade to 1.4.3, and there's no compatible calling method for versions 1.4.2 and 1.4.3.
I'm not too familiar with your internals, but glancing through PR #92, it looks like it could be handled safely without affecting that PR by changing the signature to something like:
```python
def idata(values, index_len=None):
if index_len is None:
# Avoid accidentally consuming a one-time-use iterable.
values = tuple(values)
index_len = len(str(len(values)))
# ... continue as normal ...
```
If so, I'd be happy to make the PR. | 0.0 | [
"test/test_example.py::FooTestCase::test_dicts_extracted_into_kwargs_1",
"test/test_example.py::FooTestCase::test_dicts_extracted_into_kwargs_2",
"test/test_example.py::FooTestCase::test_doc_missing_args_1_3",
"test/test_example.py::FooTestCase::test_doc_missing_args_2_4",
"test/test_example.py::FooTestCase::test_doc_missing_args_3_12",
"test/test_example.py::FooTestCase::test_doc_missing_args_4_23",
"test/test_example.py::FooTestCase::test_doc_missing_kargs_1_3",
"test/test_example.py::FooTestCase::test_doc_missing_kargs_2_4",
"test/test_example.py::FooTestCase::test_doc_missing_kargs_3_12",
"test/test_example.py::FooTestCase::test_doc_missing_kargs_4_23",
"test/test_example.py::FooTestCase::test_file_data_json_dict_1_unsorted_list",
"test/test_example.py::FooTestCase::test_file_data_json_dict_2_sorted_list",
"test/test_example.py::FooTestCase::test_file_data_json_dict_dict_1_positive_integer_range",
"test/test_example.py::FooTestCase::test_file_data_json_dict_dict_2_negative_integer_range",
"test/test_example.py::FooTestCase::test_file_data_json_dict_dict_3_positive_real_range",
"test/test_example.py::FooTestCase::test_file_data_json_dict_dict_4_negative_real_range",
"test/test_example.py::FooTestCase::test_file_data_json_list_1_Hello",
"test/test_example.py::FooTestCase::test_file_data_json_list_2_Goodbye",
"test/test_example.py::FooTestCase::test_greater_1_test_2_greater_than_1",
"test/test_example.py::FooTestCase::test_greater_2_test_10_greater_than_5",
"test/test_example.py::FooTestCase::test_greater_with_name_docstring_1_Test_case_1",
"test/test_example.py::FooTestCase::test_greater_with_name_docstring_2_Test_case_2",
"test/test_example.py::FooTestCase::test_iterable_argument_1__0__3_",
"test/test_example.py::FooTestCase::test_iterable_argument_2__0__4_",
"test/test_example.py::FooTestCase::test_iterable_argument_3__0__5_",
"test/test_example.py::FooTestCase::test_iterable_argument_4__1__3_",
"test/test_example.py::FooTestCase::test_iterable_argument_5__1__4_",
"test/test_example.py::FooTestCase::test_iterable_argument_6__1__5_",
"test/test_example.py::FooTestCase::test_iterable_argument_7__2__3_",
"test/test_example.py::FooTestCase::test_iterable_argument_8__2__4_",
"test/test_example.py::FooTestCase::test_iterable_argument_9__2__5_",
"test/test_example.py::FooTestCase::test_larger_than_two_1_3",
"test/test_example.py::FooTestCase::test_larger_than_two_2_4",
"test/test_example.py::FooTestCase::test_larger_than_two_3_12",
"test/test_example.py::FooTestCase::test_larger_than_two_4_23",
"test/test_example.py::FooTestCase::test_larger_than_two_with_doc_1_3",
"test/test_example.py::FooTestCase::test_larger_than_two_with_doc_2_4",
"test/test_example.py::FooTestCase::test_larger_than_two_with_doc_3_12",
"test/test_example.py::FooTestCase::test_larger_than_two_with_doc_4_23",
"test/test_example.py::FooTestCase::test_list_extracted_into_arguments_1__3__2_",
"test/test_example.py::FooTestCase::test_list_extracted_into_arguments_2__4__3_",
"test/test_example.py::FooTestCase::test_list_extracted_into_arguments_3__5__3_",
"test/test_example.py::FooTestCase::test_list_extracted_with_doc_1__3__2_",
"test/test_example.py::FooTestCase::test_list_extracted_with_doc_2__4__3_",
"test/test_example.py::FooTestCase::test_list_extracted_with_doc_3__5__3_",
"test/test_example.py::FooTestCase::test_not_larger_than_two_1_1",
"test/test_example.py::FooTestCase::test_not_larger_than_two_2__3",
"test/test_example.py::FooTestCase::test_not_larger_than_two_3_2",
"test/test_example.py::FooTestCase::test_not_larger_than_two_4_0",
"test/test_example.py::FooTestCase::test_tuples_extracted_into_arguments_1__3__2_",
"test/test_example.py::FooTestCase::test_tuples_extracted_into_arguments_2__4__3_",
"test/test_example.py::FooTestCase::test_tuples_extracted_into_arguments_3__5__3_",
"test/test_example.py::FooTestCase::test_undecorated",
"test/test_example.py::FooTestCase::test_unicode_1_ascii",
"test/test_example.py::FooTestCase::test_unicode_2_non_ascii__",
"test/test_functional.py::test_data_decorator",
"test/test_functional.py::test_file_data_decorator_with_dict",
"test/test_functional.py::test_ddt",
"test/test_functional.py::test_ddt_format_test_name_index_only",
"test/test_functional.py::test_ddt_format_test_name_default",
"test/test_functional.py::test_idata_single_argument",
"test/test_functional.py::test_idata_automatic_zero_padding",
"test/test_functional.py::test_idata_override_index_len",
"test/test_functional.py::test_idata_consumable_iterator",
"test/test_functional.py::test_file_data_test_creation",
"test/test_functional.py::test_file_data_test_names_dict",
"test/test_functional.py::test_feed_data_data",
"test/test_functional.py::test_feed_data_file_data",
"test/test_functional.py::test_feed_data_file_data_missing_json",
"test/test_functional.py::test_feed_data_file_data_missing_yaml",
"test/test_functional.py::test_ddt_data_name_attribute",
"test/test_functional.py::test_ddt_data_doc_attribute",
"test/test_functional.py::test_ddt_data_unicode",
"test/test_functional.py::test_ddt_data_object",
"test/test_functional.py::test_feed_data_with_invalid_identifier",
"test/test_functional.py::test_load_yaml_without_yaml_support"
] | [] | 2021-09-27 22:06:14+00:00 | 1,809 |
|
datafolklabs__cement-559 | diff --git a/cement/ext/ext_configparser.py b/cement/ext/ext_configparser.py
index 587e89e..0aa8f0d 100644
--- a/cement/ext/ext_configparser.py
+++ b/cement/ext/ext_configparser.py
@@ -152,12 +152,12 @@ class ConfigParserConfigHandler(config.ConfigHandler, RawConfigParser):
env_var = re.sub('[^0-9a-zA-Z]+', '_', env_var)
return env_var
- def get(self, section, key):
+ def get(self, section, key, **kwargs):
env_var = self._get_env_var(section, key)
if env_var in os.environ.keys():
return os.environ[env_var]
else:
- return RawConfigParser.get(self, section, key)
+ return RawConfigParser.get(self, section, key, **kwargs)
def has_section(self, section):
return RawConfigParser.has_section(self, section)
| datafolklabs/cement | 775fc4d933a4674f131418671c87f79944778e13 | diff --git a/tests/ext/test_ext_configparser.py b/tests/ext/test_ext_configparser.py
index b0ed755..e5bf5fa 100644
--- a/tests/ext/test_ext_configparser.py
+++ b/tests/ext/test_ext_configparser.py
@@ -54,3 +54,15 @@ def test_env_var_override():
assert app.config.get('dummy', 'foo') == 'dummy-not-bar'
section_dict = app.config.get_section_dict('dummy')
assert section_dict['foo'] == 'dummy-not-bar'
+
+
+def test_get_boolean():
+ with TestApp(config_section='testapp') as app:
+ app.config.set('testapp', 'foobool', 'true')
+ assert app.config['testapp'].getboolean('foobool') is True
+
+ app.config.set('testapp', 'foobool', 'no')
+ assert app.config['testapp'].getboolean('foobool') is False
+
+ os.environ['TESTAPP_FOOBOOL'] = '1'
+ assert app.config['testapp'].getboolean('foobool') is True
| Configparser 'getboolean' exception
**System Information**
- Cement Version: 3.0.0
- Python Version: 3.6.8
- Operating System and Version: Linux Mint 19.1
**Steps to Reproduce (Bugs Only)**
- Create a boolean setting: configparser only supports string values so this has to be a string representation of a boolean. [According to configparser](https://docs.python.org/3/library/configparser.html#supported-datatypes), this includes
> 'yes'/'no', 'on'/'off', 'true'/'false' and '1'/'0'
- Access this value using the `getboolean` method recommended by configparser. This should automatically convert the string value to a bool. Example usage:
`if self.app.config['hydra'].getboolean('validator_metrics'):`
- Exception:
```Traceback (most recent call last):
File "/home/adam/.pyenv/versions/hydra/bin/hydra", line 11, in <module>
load_entry_point('hydra', 'console_scripts', 'hydra')()
File "/home/adam/PycharmProjects/hydra/hydra/main.py", line 152, in main
app.run()
File "/home/adam/.pyenv/versions/hydra/lib/python3.6/site-packages/cement-3.0.0-py3.6.egg/cement/core/foundation.py", line 916, in run
return_val = self.controller._dispatch()
File "/home/adam/.pyenv/versions/hydra/lib/python3.6/site-packages/cement-3.0.0-py3.6.egg/cement/ext/ext_argparse.py", line 806, in _dispatch
return func()
File "/home/adam/PycharmProjects/hydra/hydra/controllers/client.py", line 621, in enable_metrics
self.app.client.configure_metrics()
File "/home/adam/PycharmProjects/hydra/hydra/helpers/client.py", line 275, in configure_metrics
if self.app.config['hydra'].getboolean('validator_metrics'):
File "/home/adam/.pyenv/versions/3.6.8/lib/python3.6/configparser.py", line 1283, in get
fallback=fallback, **kwargs)
File "/home/adam/.pyenv/versions/3.6.8/lib/python3.6/configparser.py", line 829, in getboolean
raw=raw, vars=vars, fallback=fallback, **kwargs)
File "/home/adam/.pyenv/versions/3.6.8/lib/python3.6/configparser.py", line 809, in _get_conv
**kwargs)
File "/home/adam/.pyenv/versions/3.6.8/lib/python3.6/configparser.py", line 803, in _get
return conv(self.get(section, option, **kwargs))
TypeError: get() got an unexpected keyword argument 'raw'
```
I am thinking this is due to the fact that cement does not pass kwargs to configparser's wrapped `get` method. I am going to attempt making a PR to address this issue.
| 0.0 | [
"tests/ext/test_ext_configparser.py::test_get_boolean"
] | [
"tests/ext/test_ext_configparser.py::TestConfigParserConfigHandler::test_subclassing",
"tests/ext/test_ext_configparser.py::test_get_dict",
"tests/ext/test_ext_configparser.py::test_env_var_override"
] | 2019-05-07 15:16:20+00:00 | 1,810 |
|
datahq__dataflows-101 | diff --git a/.gitignore b/.gitignore
index 1f4ba80..bafb34c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -85,6 +85,5 @@ celerybeat-schedule
# Resources created by our tests
.coverage.*
-
todo/
-out/
+.checkpoints/
diff --git a/PROCESSORS.md b/PROCESSORS.md
index 736a336..bfa8a9a 100644
--- a/PROCESSORS.md
+++ b/PROCESSORS.md
@@ -42,6 +42,7 @@ Loads data from various source types (local files, remote URLS, Google Spreadshe
def load(source, name=None, resources=None, strip=True, limit_rows=None,
infer_strategy=None, cast_strategy=None,
override_schema=None, override_fields=None,
+ deduplicate_headers=False,
on_error=raise_exception,
**options)
pass
@@ -76,6 +77,7 @@ Relevant only when _not_ loading data from a datapackage:
- `load.CAST_WITH_SCHEMA` - Data will be parsed and casted using the schema and will error in case of faulty data
- `override_schema` - Provided dictionary will be merged into the inferred schema. If `fields` key is set its contents will fully replace the inferred fields array. The same behavior will be applied for all other nested structures.
- `override_fields` - Provided mapping will patch the inferred `schema.fields` array. In the mapping keys must be field names and values must be dictionaries intended to be merged into the corresponding field's metadata.
+- `deduplicate_headers` - (default `False`) If there are duplicate headers and the flag is set to `True` it will rename them using a `header (1), header (2), etc` approach. If there are duplicate headers and the flag is set to `False` it will raise an error.
- `on_error` - Dictates how `load` will behave in case of a validation error.
Options are identical to `on_error` in `set_type` and `validate`
diff --git a/data/duplicate_headers.csv b/data/duplicate_headers.csv
new file mode 100644
index 0000000..1f72e28
--- /dev/null
+++ b/data/duplicate_headers.csv
@@ -0,0 +1,2 @@
+header1,header2,header2
+value1,value2,value3
diff --git a/dataflows/processors/load.py b/dataflows/processors/load.py
index 6a88b55..a674882 100644
--- a/dataflows/processors/load.py
+++ b/dataflows/processors/load.py
@@ -106,8 +106,10 @@ class load(DataStreamProcessor):
ERRORS_RAISE = raise_exception
def __init__(self, load_source, name=None, resources=None, strip=True, limit_rows=None,
- infer_strategy=None, cast_strategy=None, on_error=raise_exception,
+ infer_strategy=None, cast_strategy=None,
override_schema=None, override_fields=None,
+ deduplicate_headers=False,
+ on_error=raise_exception,
**options):
super(load, self).__init__()
self.load_source = load_source
@@ -119,6 +121,7 @@ class load(DataStreamProcessor):
self.resources = resources
self.override_schema = override_schema
self.override_fields = override_fields
+ self.deduplicate_headers = deduplicate_headers
self.load_dp = None
self.resource_descriptors = []
@@ -194,6 +197,11 @@ class load(DataStreamProcessor):
self.options.setdefault('ignore_blank_headers', True)
self.options.setdefault('headers', 1)
stream: Stream = Stream(self.load_source, **self.options).open()
+ if len(stream.headers) != len(set(stream.headers)):
+ if not self.deduplicate_headers:
+ raise ValueError(
+ 'Found duplicate headers. Use the `deduplicate_headers` flag')
+ stream.headers = self.rename_duplicate_headers(stream.headers)
schema = Schema().infer(
stream.sample, headers=stream.headers,
confidence=1, guesser_cls=self.guesser)
@@ -241,3 +249,16 @@ class load(DataStreamProcessor):
if self.limit_rows:
it = self.limiter(it)
yield it
+
+ def rename_duplicate_headers(self, duplicate_headers):
+ counter = {}
+ headers = []
+ for header in duplicate_headers:
+ counter.setdefault(header, 0)
+ counter[header] += 1
+ if counter[header] > 1:
+ if counter[header] == 2:
+ headers[headers.index(header)] = '%s (%s)' % (header, 1)
+ header = '%s (%s)' % (header, counter[header])
+ headers.append(header)
+ return headers
diff --git a/out/.gitignore b/out/.gitignore
new file mode 100644
index 0000000..d6b7ef3
--- /dev/null
+++ b/out/.gitignore
@@ -0,0 +1,2 @@
+*
+!.gitignore
diff --git a/setup.py b/setup.py
index e6b2477..e920cf6 100644
--- a/setup.py
+++ b/setup.py
@@ -21,6 +21,7 @@ def read(*paths):
PACKAGE = 'dataflows'
NAME = PACKAGE.replace('_', '-')
INSTALL_REQUIRES = [
+ 'tabulator>=1.23.0',
'datapackage>=1.5.0',
'tableschema>=1.5',
'kvfile>=0.0.6',
diff --git a/tox.ini b/tox.ini
index 739870d..22f82fe 100644
--- a/tox.ini
+++ b/tox.ini
@@ -22,6 +22,12 @@ commands=
--cov-config tox.ini \
--cov-report term-missing \
{posargs}
+
[pytest]
# pytest.ini configuration here
testpaths = tests
+
+[coverage:run]
+# .coveragerc configuration here
+omit =
+ */.tox/*
| datahq/dataflows | b1e795ce56eb3ae19e9a3d91db0fa7665290fb98 | diff --git a/tests/test_cli.py b/tests/test_cli.py
index f6d20ae..ad10b07 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -1,5 +1,5 @@
import subprocess
def test_init_remote():
- subprocess.check_output('dataflows init https://raw.githubusercontent.com/datahq/dataflows/master/data/academy.csv',
- shell=True)
\ No newline at end of file
+ subprocess.check_output('cd ./out && dataflows init https://raw.githubusercontent.com/datahq/dataflows/master/data/academy.csv',
+ shell=True)
diff --git a/tests/test_lib.py b/tests/test_lib.py
index 036fa6d..d082723 100644
--- a/tests/test_lib.py
+++ b/tests/test_lib.py
@@ -1,3 +1,4 @@
+import pytest
from dataflows import Flow
data = [
@@ -19,12 +20,12 @@ def test_dump_to_sql():
'resource-name': 'res_1'
}
),
- engine='sqlite:///test.db')
+ engine='sqlite:///out/test.db')
)
f.process()
# Check validity
- engine = create_engine('sqlite:///test.db')
+ engine = create_engine('sqlite:///out/test.db')
result = list(dict(x) for x in engine.execute('select * from output_table'))
assert result == data
@@ -438,11 +439,11 @@ def test_load_from_package():
Flow(
[{'foo': 'bar', 'moo': 12}],
- dump_to_path('data/load_from_package')
+ dump_to_path('out/load_from_package')
).process()
ds = Flow(
- load('data/load_from_package/datapackage.json')
+ load('out/load_from_package/datapackage.json')
).datastream()
assert len(ds.dp.resources) == 1
@@ -455,10 +456,10 @@ def test_load_from_env_var():
Flow(
[{'foo': 'bar'}],
- dump_to_path('data/load_from_env_var')
+ dump_to_path('out/load_from_env_var')
).process()
- os.environ['MY_DATAPACKAGE'] = 'data/load_from_env_var/datapackage.json'
+ os.environ['MY_DATAPACKAGE'] = 'out/load_from_env_var/datapackage.json'
results, dp, _ = Flow(
load('env://MY_DATAPACKAGE')
).results()
@@ -473,11 +474,11 @@ def test_load_from_package_resource_matching():
Flow(
[{'foo': 'bar'}],
[{'foo': 'baz'}],
- dump_to_path('data/load_from_package_resource_matching(')
+ dump_to_path('out/load_from_package_resource_matching(')
).process()
ds = Flow(
- load('data/load_from_package_resource_matching(/datapackage.json', resources=['res_2'])
+ load('out/load_from_package_resource_matching(/datapackage.json', resources=['res_2'])
).datastream()
assert len(ds.dp.resources) == 1
@@ -705,10 +706,10 @@ def test_dump_to_path_use_titles():
[{'hello': 'world', 'hola': 'mundo'}, {'hello': 'עולם', 'hola': 'عالم'}],
*(set_type(name, resources=['res_1'], title=title) for name, title
in (('hello', 'שלום'), ('hola', 'aloha'))),
- dump_to_path('data/dump_with_titles', use_titles=True)
+ dump_to_path('out/dump_with_titles', use_titles=True)
).process()
- with tabulator.Stream('data/dump_with_titles/res_1.csv') as stream:
+ with tabulator.Stream('out/dump_with_titles/res_1.csv') as stream:
assert stream.read() == [['שלום', 'aloha'],
['world', 'mundo'],
['עולם', 'عالم']]
@@ -727,7 +728,7 @@ def test_load_dates():
[{'today': str(_today), 'now': str(_now)}],
set_type('today', type='date'),
set_type('now', type='datetime', format=datetime_format),
- dump_to_path('data/dump_dates')
+ dump_to_path('out/dump_dates')
).process()
try:
@@ -748,7 +749,7 @@ def test_load_dates():
out_now = datetime.datetime(_now.year, _now.month, _now.day, _now.hour, _now.minute, _now.second)
assert Flow(
- load('data/dump_dates/datapackage.json'),
+ load('out/dump_dates/datapackage.json'),
).results()[0] == [[{'today': _today, 'now': out_now}]]
@@ -900,11 +901,11 @@ def test_save_load_dates():
[{'id': 1, 'ts': datetime.datetime.now()},
{'id': 2, 'ts': datetime.datetime.now()}],
set_type('ts', type='datetime', format='%Y-%m-%d/%H:%M:%S'),
- dump_to_path('data/test_save_load_dates')
+ dump_to_path('out/test_save_load_dates')
).process()
res, _, _ = Flow(
- load('data/test_save_load_dates/datapackage.json'),
+ load('out/test_save_load_dates/datapackage.json'),
printer()
).results()
@@ -1236,7 +1237,7 @@ def test_load_override_schema_and_fields():
{'name': 'george', 'age': '17'},
{'name': None, 'age': '22'},
]]
-
+
def test_delete_fields_regex():
from dataflows import load, delete_fields
flow = Flow(
@@ -1271,3 +1272,29 @@ def test_join_full_outer():
{'id': 3, 'city': 'rome', 'population': None},
{'id': 4, 'city': None, 'population': 3},
]]
+
+
+def test_load_duplicate_headers():
+ from dataflows import load
+ flow = Flow(
+ load('data/duplicate_headers.csv'),
+ )
+ with pytest.raises(ValueError) as excinfo:
+ flow.results()
+ assert 'duplicate headers' in str(excinfo.value)
+
+
+def test_load_duplicate_headers_with_deduplicate_headers_flag():
+ from dataflows import load
+ flow = Flow(
+ load('data/duplicate_headers.csv', deduplicate_headers=True),
+ )
+ data, package, stats = flow.results()
+ assert package.descriptor['resources'][0]['schema']['fields'] == [
+ {'name': 'header1', 'type': 'string', 'format': 'default'},
+ {'name': 'header2 (1)', 'type': 'string', 'format': 'default'},
+ {'name': 'header2 (2)', 'type': 'string', 'format': 'default'},
+ ]
+ assert data == [[
+ {'header1': 'value1', 'header2 (1)': 'value2', 'header2 (2)': 'value3'},
+ ]]
| Local test runs create many files not present in codebase or gitignore
# Overview
After `py.test` we end up with a lot neither checked-in nor ignored files. | 0.0 | [
"tests/test_cli.py::test_init_remote",
"tests/test_lib.py::test_load_duplicate_headers",
"tests/test_lib.py::test_load_duplicate_headers_with_deduplicate_headers_flag"
] | [
"tests/test_lib.py::test_add_computed_field",
"tests/test_lib.py::test_add_computed_field_func",
"tests/test_lib.py::test_add_metadata",
"tests/test_lib.py::test_select_field",
"tests/test_lib.py::test_find_replace",
"tests/test_lib.py::test_unpivot",
"tests/test_lib.py::test_unpivot_any_resources",
"tests/test_lib.py::test_concatenate",
"tests/test_lib.py::test_filter_rows",
"tests/test_lib.py::test_filter_rows_callable",
"tests/test_lib.py::test_sort_rows",
"tests/test_lib.py::test_sort_reverse_many_rows",
"tests/test_lib.py::test_duplicate",
"tests/test_lib.py::test_duplicate_many_rows",
"tests/test_lib.py::test_flow_as_step",
"tests/test_lib.py::test_load_from_package",
"tests/test_lib.py::test_load_from_env_var",
"tests/test_lib.py::test_load_from_package_resource_matching",
"tests/test_lib.py::test_load_strategies",
"tests/test_lib.py::test_load_name_path",
"tests/test_lib.py::test_load_from_package_resources",
"tests/test_lib.py::test_checkpoint",
"tests/test_lib.py::test_load_from_checkpoint",
"tests/test_lib.py::test_update_resource",
"tests/test_lib.py::test_set_type_resources",
"tests/test_lib.py::test_set_type_errors",
"tests/test_lib.py::test_dump_to_path_use_titles",
"tests/test_lib.py::test_load_dates",
"tests/test_lib.py::test_load_dates_timezones",
"tests/test_lib.py::test_add_field",
"tests/test_lib.py::test_load_empty_headers",
"tests/test_lib.py::test_load_xml",
"tests/test_lib.py::test_save_load_dates",
"tests/test_lib.py::test_validate",
"tests/test_lib.py::test_join",
"tests/test_lib.py::test_load_limit_rows",
"tests/test_lib.py::test_set_type_regex",
"tests/test_lib.py::test_load_override_schema",
"tests/test_lib.py::test_load_override_schema_and_fields",
"tests/test_lib.py::test_delete_fields_regex",
"tests/test_lib.py::test_join_full_outer"
] | 2019-07-05 11:58:16+00:00 | 1,811 |
|
datalad__datalad-5580 | diff --git a/datalad/interface/__init__.py b/datalad/interface/__init__.py
index f5a4c5b55..5ac873a5d 100644
--- a/datalad/interface/__init__.py
+++ b/datalad/interface/__init__.py
@@ -73,6 +73,7 @@ _group_misc = (
('datalad.interface.add_archive_content', 'AddArchiveContent',
'add-archive-content'),
('datalad.interface.download_url', 'DownloadURL', 'download-url'),
+ ('datalad.interface.shell_completion', 'ShellCompletion', 'shell-completion'),
('datalad.core.local.run', 'Run', 'run'),
('datalad.interface.rerun', 'Rerun', 'rerun'),
('datalad.interface.run_procedure', 'RunProcedure', 'run-procedure'),
diff --git a/datalad/interface/results.py b/datalad/interface/results.py
index ae98b228a..4769d1a46 100644
--- a/datalad/interface/results.py
+++ b/datalad/interface/results.py
@@ -224,7 +224,7 @@ def annexjson2result(d, ds, **kwargs):
res['status'] = 'ok' if d.get('success', False) is True else 'error'
# we cannot rely on any of these to be available as the feed from
# git annex (or its wrapper) is not always homogeneous
- if 'file' in d:
+ if d.get('file'):
res['path'] = str(ds.pathobj / PurePosixPath(d['file']))
if 'command' in d:
res['action'] = d['command']
diff --git a/datalad/interface/shell_completion.py b/datalad/interface/shell_completion.py
new file mode 100644
index 000000000..4c165fe69
--- /dev/null
+++ b/datalad/interface/shell_completion.py
@@ -0,0 +1,78 @@
+# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
+# ex: set sts=4 ts=4 sw=4 noet:
+# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
+#
+# See COPYING file distributed along with the datalad package for the
+# copyright and license terms.
+#
+# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
+"""A helper command to enable shell (bash, zsh) completion for DataLad
+
+"""
+__docformat__ = 'restructuredtext'
+
+
+from .base import Interface
+from datalad.interface.base import build_doc
+from datalad.interface.utils import eval_results
+from datalad.interface.results import get_status_dict
+
+
+@build_doc
+class ShellCompletion(Interface):
+ """Display shell script for enabling shell completion for DataLad.
+
+ Output of this command should be "sourced" by the bash or zsh to enable
+ shell completions provided by argcomplete.
+
+ Example:
+
+ $ source <(datalad shell-completion)
+ $ datalad --<PRESS TAB to display available option>
+
+ """
+ # XXX prevent common args from being added to the docstring
+ _no_eval_results = True
+ result_renderer = 'tailored'
+ _params_ = {}
+
+ @staticmethod
+ @eval_results
+ def __call__():
+ """
+ """
+ content = """\
+# Universal completion script for DataLad with the core autogenerated by
+# python-argcomplete and only slightly improved to work for ZSH if sourced under ZSH.
+#
+# Instead of just running this command and seeing this output, do
+#
+# source <(datalad shell-completion)
+#
+# in your bash or zsh session.
+
+if [ "${ZSH_VERSION:-}" != "" ]; then
+ autoload -U compinit && compinit
+ autoload -U bashcompinit && bashcompinit
+fi
+
+_python_argcomplete() {
+ local IFS=''
+ COMPREPLY=( $(IFS="$IFS" COMP_LINE="$COMP_LINE" COMP_POINT="$COMP_POINT" _ARGCOMPLETE_COMP_WORDBREAKS="$COMP_WORDBREAKS" _ARGCOMPLETE=1 "$1" 8>&1 9>&2 1>/dev/null 2>/dev/null) )
+ if [[ $? != 0 ]]; then
+ unset COMPREPLY
+ fi
+}
+
+complete -o nospace -o default -F _python_argcomplete "datalad"
+"""
+ yield get_status_dict(
+ action='shell_completion',
+ status='ok',
+ content=content
+ )
+
+ @staticmethod
+ def custom_result_renderer(res, **kwargs):
+ from datalad.ui import ui
+ ui.message(res['content'])
diff --git a/docs/source/cmdline.rst b/docs/source/cmdline.rst
index b428b28b5..cc7c5b89b 100644
--- a/docs/source/cmdline.rst
+++ b/docs/source/cmdline.rst
@@ -67,6 +67,7 @@ Miscellaneous commands
generated/man/datalad-clean
generated/man/datalad-download-url
generated/man/datalad-ls
+ generated/man/datalad-shell-completion
generated/man/datalad-test
generated/man/datalad-export-archive-ora
| datalad/datalad | b16632f41cb13765a4e539788b853954cd016543 | diff --git a/datalad/interface/tests/test_results.py b/datalad/interface/tests/test_results.py
new file mode 100644
index 000000000..d573f34e0
--- /dev/null
+++ b/datalad/interface/tests/test_results.py
@@ -0,0 +1,46 @@
+# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
+# ex: set sts=4 ts=4 sw=4 noet:
+# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
+#
+# See COPYING file distributed along with the datalad package for the
+# copyright and license terms.
+#
+# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
+"""Result utility tests
+
+"""
+
+from datalad.interface.results import (
+ annexjson2result,
+)
+from datalad.distribution.dataset import Dataset
+
+from datalad.tests.utils import (
+ eq_,
+ with_tempfile,
+)
+
+
+@ with_tempfile
+def test_annexjson2result(dspath):
+ # no explicit success means 'error'
+ eq_(annexjson2result(dict(), None),
+ dict(status='error'))
+ # unrecognized -> error
+ eq_(annexjson2result(dict(success='random'), None),
+ dict(status='error'))
+ # success is possible ;-)
+ eq_(annexjson2result(dict(success=True), None),
+ dict(status='ok'))
+
+ # path handling
+ # needs a dataset
+ ds = Dataset(dspath)
+ eq_(annexjson2result(dict(file='file1'), ds),
+ dict(status='error',
+ path=str(ds.pathobj / 'file1')))
+ # on all platforms, paths are reported in platform conventions
+ # although git-annex reports in posix
+ eq_(annexjson2result(dict(file='dir1/file1'), ds),
+ dict(status='error',
+ path=str(ds.pathobj / 'dir1' / 'file1')))
diff --git a/datalad/interface/tests/test_shell_completion.py b/datalad/interface/tests/test_shell_completion.py
new file mode 100644
index 000000000..4ece7833c
--- /dev/null
+++ b/datalad/interface/tests/test_shell_completion.py
@@ -0,0 +1,45 @@
+# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
+# ex: set sts=4 ts=4 sw=4 noet:
+# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
+#
+# See COPYING file distributed along with the datalad package for the
+# copyright and license terms.
+#
+# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
+"""test command datalad shell_completion
+
+"""
+
+__docformat__ = 'restructuredtext'
+
+# Not really worth to be there but it is ATM, so let's use that
+from datalad.api import (
+ shell_completion
+)
+from datalad.cmd import WitlessRunner
+from datalad.tests.utils import (
+ assert_cwd_unchanged,
+ eq_,
+ skip_if_on_windows,
+ swallow_outputs,
+)
+
+
+@assert_cwd_unchanged
+def test_shell_completion_python():
+ # largely a smoke test for our print("hello world")
+ with swallow_outputs() as cmo:
+ res = shell_completion()
+ out = cmo.out.rstrip()
+ # we get it printed and returned for double-pleasure
+ eq_(out, res[0]['content'].rstrip())
+
+
+@skip_if_on_windows # TODO: make it more specific since might work if bash is available
+def test_shell_completion_source():
+ # just smoke test that produced shell script sources nicely without error
+ WitlessRunner().run(['bash', '-c', 'source <(datalad shell-completion)'])
+ # ideally we should feed that shell with TAB to see the result of completion but
+ # yoh sees no easy way ATM, and googled up
+ # https://stackoverflow.com/questions/9137245/unit-test-for-bash-completion-script
+ # requires too much enthusiasm toward this goal.
\ No newline at end of file
diff --git a/datalad/plugin/tests/test_plugins.py b/datalad/plugin/tests/test_plugins.py
index 6a042b089..11f33fe6c 100644
--- a/datalad/plugin/tests/test_plugins.py
+++ b/datalad/plugin/tests/test_plugins.py
@@ -191,13 +191,30 @@ def test_no_annex(path):
# add inannex pre configuration
ds.save(opj('code', 'inannex'))
no_annex(pattern=['code/**', 'README'], dataset=ds.path)
+
+ inannex = (ds.pathobj / 'code' / 'inannex')
+ # Ensure that notinannex's mtime is as recent or newer than .git/index's so
+ # that, on an adjusted branch, the clean filter runs on the next save. This
+ # avoids a racy failure of the managed-branch assert_repo_status check
+ # below.
+ inannex.touch()
+
# add inannex and README post configuration
ds.save([opj('code', 'notinannex'), 'README'])
- assert_repo_status(ds.path)
- # one is annex'ed, the other is not, despite no change in add call
- # importantly, also .gitattribute is not annexed
- eq_([opj('code', 'inannex')],
- [str(Path(p)) for p in ds.repo.get_annexed_files()])
+
+ if ds.repo.is_managed_branch():
+ # For unlocked files, if .gitattributes is configured so that a file
+ # should go to git, an annexed file will switch to be tracked by git
+ # the next time the clean filter runs on it.
+ #
+ # https://git-annex.branchable.com/forum/one-off_unlocked_annex_files_that_go_against_large/
+ assert_repo_status(ds.path, modified=[inannex])
+ else:
+ assert_repo_status(ds.path)
+ # one is annex'ed, the other is not, despite no change in add call
+ # importantly, also .gitattribute is not annexed
+ eq_([opj('code', 'inannex')],
+ [str(Path(p)) for p in ds.repo.get_annexed_files()])
_ds_template = {
| annexjson2result cannot handle file=null results
Those are produced by `git annex export` -- although they likely should not be happening.
```
File "/home/mih/hacking/datalad/git/datalad/interface/results.py", line 228, in annexjson2result
res['path'] = str(ds.pathobj / PurePosixPath(d['file']))
File "/usr/lib/python3.9/pathlib.py", line 664, in __new__
return cls._from_parts(args)
File "/usr/lib/python3.9/pathlib.py", line 696, in _from_parts
drv, root, parts = self._parse_args(args)
File "/usr/lib/python3.9/pathlib.py", line 680, in _parse_args
a = os.fspath(a)
TypeError: expected str, bytes or os.PathLike object, not NoneType
```
Related: https://git-annex.branchable.com/bugs/JSON_results_for___96__export__96___have_file__58__null_property | 0.0 | [
"datalad/interface/tests/test_shell_completion.py::test_shell_completion_python",
"datalad/interface/tests/test_shell_completion.py::test_shell_completion_source"
] | [] | 2021-04-16 13:33:55+00:00 | 1,812 |
|
datalad__datalad-5603 | diff --git a/datalad/support/github_.py b/datalad/support/github_.py
index 6848a6785..10c3023d7 100644
--- a/datalad/support/github_.py
+++ b/datalad/support/github_.py
@@ -79,6 +79,17 @@ def _get_tokens_for_login(login, tokens):
return selected_tokens
+def _gh_exception(exc_cls, status, data):
+ """Compatibility wrapper for instantiating a GithubException.
+ """
+ try:
+ exc = exc_cls(status, data, None)
+ except TypeError:
+ # Before PyGithub 1.5, GithubException had only two required arguments.
+ exc = exc_cls(status, data)
+ return exc
+
+
def _gen_github_ses(github_login):
"""Generate viable Github sessions
@@ -96,7 +107,8 @@ def _gen_github_ses(github_login):
"""
if github_login == 'disabledloginfortesting':
- raise gh.BadCredentialsException(403, 'no login specified')
+ raise _gh_exception(gh.BadCredentialsException,
+ 403, 'no login specified')
# see if we have tokens - might be many. Doesn't cost us much so get at once
tokens = unique(
| datalad/datalad | d55866d55d20a0e54730dbb354bea04bba8a7973 | diff --git a/datalad/support/tests/test_github_.py b/datalad/support/tests/test_github_.py
index a8eaa212b..acbc22cda 100644
--- a/datalad/support/tests/test_github_.py
+++ b/datalad/support/tests/test_github_.py
@@ -33,6 +33,7 @@ from .. import github_
from ..github_ import (
_gen_github_entity,
_get_github_cred,
+ _gh_exception,
_token_str,
get_repo_url,
)
@@ -105,7 +106,8 @@ def test__make_github_repos():
def _make_github_repo(github_login, entity, reponame, *args):
if entity == 'entity1':
- raise gh.BadCredentialsException("very bad status", "some data")
+ raise _gh_exception(gh.BadCredentialsException,
+ "very bad status", "some data")
return reponame
with mock.patch.object(github_, '_gen_github_entity', _gen_github_entity), \
@@ -117,7 +119,8 @@ def test__make_github_repos():
def _make_github_repo(github_login, entity, reponame, *args):
# Always throw an exception
- raise gh.BadCredentialsException("very bad status", "some data")
+ raise _gh_exception(gh.BadCredentialsException,
+ "very bad status", "some data")
with mock.patch.object(github_, '_gen_github_entity', _gen_github_entity), \
mock.patch.object(github_, '_make_github_repo', _make_github_repo):
| Incompatible with PyGithub v1.55
```
======================================================================
ERROR: datalad.support.tests.test_github_.test__make_github_repos
----------------------------------------------------------------------
Traceback (most recent call last):
File "/tmp/dl-miniconda-z1552950/lib/python3.8/site-packages/nose/case.py", line 198, in runTest
self.test(*self.arg)
File "/tmp/dl-miniconda-z1552950/lib/python3.8/site-packages/datalad/support/tests/test_github_.py", line 110, in test__make_github_repos
res = list(github_._make_github_repos_(*args))
File "/tmp/dl-miniconda-z1552950/lib/python3.8/site-packages/datalad/support/github_.py", line 212, in _make_github_repos_
res_ = _make_github_repo(
File "/tmp/dl-miniconda-z1552950/lib/python3.8/site-packages/datalad/support/tests/test_github_.py", line 104, in _make_github_repo
raise gh.BadCredentialsException("very bad status", "some data")
TypeError: __init__() missing 1 required positional argument: 'headers'
```
https://travis-ci.com/github/datalad/datalad/jobs/501116612#L2417 | 0.0 | [
"datalad/support/tests/test_github_.py::test_get_repo_url",
"datalad/support/tests/test_github_.py::test__make_github_repos"
] | [] | 2021-04-26 15:35:00+00:00 | 1,813 |
|
datalad__datalad-5749 | diff --git a/datalad/core/distributed/clone.py b/datalad/core/distributed/clone.py
index dfed3f13f..6f1ff6778 100644
--- a/datalad/core/distributed/clone.py
+++ b/datalad/core/distributed/clone.py
@@ -11,7 +11,6 @@
import logging
import re
-import requests
from os.path import expanduser
from collections import OrderedDict
from urllib.parse import unquote as urlunquote
@@ -32,7 +31,6 @@ from datalad.cmd import (
CommandError,
GitWitlessRunner,
StdOutCapture,
- StdOutErrCapture,
)
from datalad.distributed.ora_remote import (
LocalIO,
@@ -47,6 +45,7 @@ from datalad.support.constraints import (
)
from datalad.support.exceptions import DownloadError
from datalad.support.param import Parameter
+from datalad.support.strings import get_replacement_dict
from datalad.support.network import (
get_local_file_url,
download_url,
@@ -62,6 +61,7 @@ from datalad.dochelpers import (
)
from datalad.utils import (
ensure_bool,
+ ensure_list,
knows_annex,
make_tempfile,
Path,
@@ -122,6 +122,26 @@ class Clone(Interface):
result), or found already installed from the specified source ('notneeded'
result).<< PYTHON ||
+ URL mapping configuration
+
+ 'clone' supports the transformation of URLs via (multi-part) substitution
+ specifications. A substitution specification is defined as a configuration
+ setting 'datalad.clone.url-substition.<seriesID>' with a string containing
+ a match and substitution expression, each following Python's regular
+ expression syntax. Both expressions are concatenated to a single string
+ with an arbitrary delimiter character. The delimiter is defined by
+ prefixing the string with the delimiter. Prefix and delimiter are stripped
+ from the expressions (Example: ",^http://(.*)$,https://\\1"). This setting
+ can be defined multiple times, using the same '<seriesID>'. Substitutions
+ in a series will be applied incrementally, in order of their definition.
+ The first substitution in such a series must match, otherwise no further
+ substitutions in a series will be considered. However, following the first
+ match all further substitutions in a series are processed, regardless
+ whether intermediate expressions match or not. Substitution series themselves
+ have no particular order, each matching series will result in a candidate
+ clone URL. Consequently, the initial match specification in a series should
+ be as precise as possible to prevent inflation of candidate URLs.
+
.. seealso::
:ref:`handbook:3-001`
@@ -351,6 +371,74 @@ class Clone(Interface):
ds.subdatasets(path, set_property=[("datalad-url", source)])
+def _get_url_mappings(cfg):
+ cfg_prefix = 'datalad.clone.url-substitute.'
+ # figure out which keys we should be looking for
+ # in the active config
+ subst_keys = set(k for k in cfg.keys() if k.startswith(cfg_prefix))
+ # and in the common config specs
+ from datalad.interface.common_cfg import definitions
+ subst_keys.update(k for k in definitions if k.startswith(cfg_prefix))
+ # TODO a potential sorting of substitution series could be implemented
+ # here
+ return [
+ # decode the rule specifications
+ get_replacement_dict(
+ # one or more could come out
+ ensure_list(
+ cfg.get(
+ k,
+ # make sure to pull the default from the common config
+ default=cfg.obtain(k),
+ # we specifically support declaration of multiple
+ # settings to build replacement chains
+ get_all=True)))
+ for k in subst_keys
+ ]
+
+
+def _map_urls(cfg, urls):
+ mapping_specs = _get_url_mappings(cfg)
+ if not mapping_specs:
+ return urls
+
+ mapped = []
+ # we process the candidate in order to maintain any prioritization
+ # encoded in it (e.g. _get_flexible_source_candidates_for_submodule)
+ # if we have a matching mapping replace the URL in its position
+ for u in urls:
+ # we only permit a single match
+ # TODO we likely want to RF this to pick the longest match
+ mapping_applied = False
+ # try one mapping set at a time
+ for mapping_spec in mapping_specs:
+ # process all substitution patterns in the specification
+ # always operate on strings (could be a Path instance too)
+ mu = str(u)
+ matched = False
+ for match_ex, subst_ex in mapping_spec.items():
+ if not matched:
+ matched = re.match(match_ex, mu) is not None
+ if not matched:
+ break
+ # try to map, would return unchanged, if there is no match
+ mu = re.sub(match_ex, subst_ex, mu)
+ if mu != u:
+ lgr.debug("URL substitution: '%s' -> '%s'", u, mu)
+ mapped.append(mu)
+ # we could consider breaking after the for effective mapping
+ # specification. however, that would mean any generic
+ # definition of a broadly matching substitution would derail
+ # the entroe system. moreover, suddently order would matter
+ # substantially
+ mapping_applied = True
+ if not mapping_applied:
+ # none of the mappings matches, go with the original URL
+ # (really original, not the stringified one)
+ mapped.append(u)
+ return mapped
+
+
def clone_dataset(
srcs,
destds,
@@ -410,6 +498,11 @@ def clone_dataset(
dest_path = destds.pathobj
+ # check for configured URL mappings, either in the given config manager
+ # or in the one of the destination dataset, which is typically not existent
+ # yet and the process config manager is then used effectively
+ srcs = _map_urls(cfg or destds.config, srcs)
+
# decode all source candidate specifications
candidate_sources = [decode_source_spec(s, cfg=cfg) for s in srcs]
diff --git a/datalad/interface/common_cfg.py b/datalad/interface/common_cfg.py
index 319d6c0be..7485cf577 100644
--- a/datalad/interface/common_cfg.py
+++ b/datalad/interface/common_cfg.py
@@ -25,8 +25,53 @@ from datalad.utils import on_windows
dirs = AppDirs("datalad", "datalad.org")
+subst_rule_docs = """\
+A substitution specification is a string with a match and substitution
+expression, each following Python's regular expression syntax. Both expressions
+are concatenated to a single string with an arbitrary delimiter character. The
+delimiter is defined by prefixing the string with the delimiter. Prefix and
+delimiter are stripped from the expressions (Example:
+",^http://(.*)$,https://\\1"). This setting can be defined multiple times.
+Substitutions will be applied incrementally, in order of their definition. The
+first substitution in such a series must match, otherwise no further
+substitutions in a series will be considered. However, following the first
+match all further substitutions in a series are processed, regardless whether
+intermediate expressions match or not."""
definitions = {
+ 'datalad.clone.url-substitute.github': {
+ 'ui': ('question', {
+ 'title': 'GitHub URL substitution rule',
+ 'text': 'Mangling for GitHub-related URL. ' + subst_rule_docs
+ }),
+ 'destination': 'global',
+ 'default': (
+ # take any github project URL apart into <org>###<identifier>
+ r',https?://github.com/([^/]+)/(.*)$,\1###\2',
+ # replace any (back)slashes with a single dash
+ r',[/\\]+,-',
+ # replace any whitespace (include urlquoted variant)
+ # with a single underscore
+ r',\s+|(%2520)+|(%20)+,_',
+ # rebuild functional project URL
+ r',([^#]+)###(.*),https://github.com/\1/\2',
+ )
+ },
+ # TODO this one should migrate to the datalad-osf extension. however, right
+ # now extensions cannot provide default configuration
+ # https://github.com/datalad/datalad/issues/5769
+ 'datalad.clone.url-substitute.osf': {
+ 'ui': ('question', {
+ 'title': 'Open Science Framework URL substitution rule',
+ 'text': 'Mangling for OSF-related URLs. ' + subst_rule_docs
+ }),
+ 'destination': 'global',
+ 'default': (
+ # accept browser-provided URL and convert to those accepted by
+ # the datalad-osf extension
+ r',^https://osf.io/([^/]+)[/]*$,osf://\1',
+ )
+ },
# this is actually used in downloaders, but kept cfg name original
'datalad.crawl.cache': {
'ui': ('yesno', {
| datalad/datalad | 78a7ca4b5b3521a0db29a41dc878597a733b524c | diff --git a/datalad/core/distributed/tests/test_clone.py b/datalad/core/distributed/tests/test_clone.py
index 7c7bd2f55..30c30ebbf 100644
--- a/datalad/core/distributed/tests/test_clone.py
+++ b/datalad/core/distributed/tests/test_clone.py
@@ -1564,3 +1564,85 @@ def test_clone_recorded_subds_reset(path):
eq_(ds_b.subdatasets()[0]["gitshasum"],
sub_repo.get_hexsha(
sub_repo.get_corresponding_branch(branch) or branch))
+
+
+@with_tempfile
+@with_tempfile
+def test_clone_url_mapping(src_path, dest_path):
+ src = create(src_path)
+ dest = Dataset(dest_path)
+ # check that the impossible doesn't work
+ assert_raises(IncompleteResultsError, clone, 'rambo', dest_path)
+ # rather than adding test URL mapping here, consider
+ # test_url_mapping_specs(), it is cheaper there
+
+ # anticipate windows test paths and escape them
+ escaped_subst = (r',rambo,%s' % src_path).replace('\\', '\\\\')
+ for specs in (
+ # we can clone with a simple substitution
+ {'datalad.clone.url-substitute.mike': escaped_subst},
+ # a prior match to a dysfunctional URL doesn't impact success
+ {
+ 'datalad.clone.url-substitute.no': ',rambo,picknick',
+ 'datalad.clone.url-substitute.mike': escaped_subst,
+ }):
+ try:
+ with patch.dict(dest.config._merged_store, specs):
+ clone('rambo', dest_path)
+ finally:
+ dest.remove(check=False)
+
+ # check submodule config impact
+ dest.create()
+ with patch.dict(dest.config._merged_store,
+ {'datalad.clone.url-substitute.mike': escaped_subst}):
+ dest.clone('rambo', 'subds')
+ submod_rec = dest.repo.get_submodules()[0]
+ # we record the original-original URL
+ eq_(submod_rec['gitmodule_datalad-url'], 'rambo')
+ # and put the effective one as the primary URL
+ eq_(submod_rec['gitmodule_url'], src_path)
+
+
+_nomatch_map = {
+ 'datalad.clone.url-substitute.nomatch': (
+ ',nomatch,NULL',
+ )
+}
+_windows_map = {
+ 'datalad.clone.url-substitute.win': (
+ r',C:\\Users\\datalad\\from,D:\\to',
+ )
+}
+
+
+def test_url_mapping_specs():
+ from datalad.core.distributed.clone import _map_urls
+ cfg = ConfigManager()
+ for m, i, o in (
+ # path redirect on windows
+ (_windows_map,
+ r'C:\Users\datalad\from',
+ r'D:\to'),
+ # test standard github mapping, no pathc needed
+ ({},
+ 'https://github.com/datalad/testrepo_gh/sub _1',
+ 'https://github.com/datalad/testrepo_gh-sub__1'),
+ # and on deep subdataset too
+ ({},
+ 'https://github.com/datalad/testrepo_gh/sub _1/d/sub_- 1',
+ 'https://github.com/datalad/testrepo_gh-sub__1-d-sub_-_1'),
+ # test that the presence of another mapping spec doesn't ruin
+ # the outcome
+ (_nomatch_map,
+ 'https://github.com/datalad/testrepo_gh/sub _1',
+ 'https://github.com/datalad/testrepo_gh-sub__1'),
+ # verify OSF mapping, but see
+ # https://github.com/datalad/datalad/issues/5769 for future
+ # implications
+ ({},
+ 'https://osf.io/q8xnk/',
+ 'osf://q8xnk'),
+ ):
+ with patch.dict(cfg._merged_store, m):
+ eq_(_map_urls(cfg, [i]), [o])
diff --git a/datalad/distribution/tests/test_install.py b/datalad/distribution/tests/test_install.py
index a86addfb8..769682e2a 100644
--- a/datalad/distribution/tests/test_install.py
+++ b/datalad/distribution/tests/test_install.py
@@ -919,3 +919,41 @@ def test_relpath_semantics(path):
sub = install(
dataset='super', source='subsrc', path=op.join('super', 'sub'))
eq_(sub.path, op.join(super.path, 'sub'))
+
+
+def _create_test_install_recursive_github(path): # pragma: no cover
+ # to be ran once to populate a hierarchy of test datasets on github
+ # Making it a full round-trip would require github credentials on CI etc
+ ds = create(opj(path, "testrepo gh"))
+ # making them with spaces and - to ensure that we consistently use the mapping
+ # for create and for get/clone/install
+ ds.create("sub _1")
+ ds.create("sub _1/d/sub_- 1")
+ import datalad.distribution.create_sibling_github # to bind API
+ ds.create_sibling_github(
+ "testrepo gh",
+ github_organization='datalad',
+ recursive=True,
+ # yarik forgot to push first, "replace" is not working in non-interactive IIRC
+ # existing='reconfigure'
+ )
+ return ds.push(recursive=True, to='github')
+
+
+@skip_if_no_network
+@with_tempfile(mkdir=True)
+def test_install_recursive_github(path):
+ # test recursive installation of a hierarchy of datasets created on github
+ # using datalad create-sibling-github. Following invocation was used to poplate it
+ #
+ # out = _create_test_install_recursive_github(path)
+
+ # "testrepo gh" was mapped by our sanitization in create_sibling_github to testrepo_gh, thus
+ for i, url in enumerate([
+ 'https://github.com/datalad/testrepo_gh',
+ # optionally made available to please paranoids, but with all takes too long (22sec)
+ #'https://github.com/datalad/testrepo_gh.git',
+ #'[email protected]:datalad/testrepo_gh.git',
+ ]):
+ ds = install(source=url, path=opj(path, "clone%i" % i), recursive=True)
+ eq_(len(ds.subdatasets(recursive=True, fulfilled=True)), 2)
| clone/install: introduce url mapping convenience?
#### What is the problem?
ATM for a user visiting some datalad-osf published dataset on osf.io it would require either user knowledge or every dataset having a description such as "this component can be git or datalad cloned from a 'osf://ID' URL, where 'ID' is the OSF node ID that shown in the OSF HTTP URL, e.g. https://osf.io/q8xnk/ can be cloned from osf://q8xnk", and then do all url tune up manually to get `datalad` `clone` or `install` working.
### Suggested solution:
I think datalad should make it easier and allow for a simple "copy paste URL I see in the browser bar", thus making `datalad clone https://osf.io/q8xnk/` work as well, while internally mapping it into `osf://q8xnk` for `git clone` operation.
Possible cons:
- if OSF changes their url schema -- would break our mapping. But given historical stability and presence of those URLs in the wild already, it is very unlikely to happen
Possible implementation:
- datalad internally and also allowing extensions to register new "URL mappers"
- such "mappers" possibly should allow for different/absent context/purpose behavior
- e.g. for OSF it would only matter for `clone`
- in a somewhat related (waiting for merge) #4816, where within datalad itself we introduce "mapping" of `shub://` URLs into (minted by singularity hub with timeouts) http urls so that those images could be downloaded from the singularity hub; context would be `download`
- we already hardcode `///` to be a mapping to `http://datasets.datalad.org` -- implementation could generalize it
- some mappers could be as simple as regex based transformation (like in the case of OSF, `///`) | 0.0 | [
"datalad/core/distributed/tests/test_clone.py::test_url_mapping_specs"
] | [
"datalad/core/distributed/tests/test_clone.py::test_installationpath_from_url",
"datalad/core/distributed/tests/test_clone.py::test_decode_source_spec",
"datalad/distribution/tests/test_install.py::test_insufficient_args"
] | 2021-06-16 15:58:55+00:00 | 1,814 |
|
datalad__datalad-5823 | diff --git a/datalad/distributed/ora_remote.py b/datalad/distributed/ora_remote.py
index a397806dd..dc80befb2 100644
--- a/datalad/distributed/ora_remote.py
+++ b/datalad/distributed/ora_remote.py
@@ -188,6 +188,12 @@ class LocalIO(IOBase):
)
def get_from_archive(self, archive, src, dst, progress_cb):
+ # Upfront check to avoid cryptic error output
+ # https://github.com/datalad/datalad/issues/4336
+ if not self.exists(archive):
+ raise RIARemoteError("archive {arc} does not exist."
+ "".format(arc=archive))
+
# this requires python 3.5
with open(dst, 'wb') as target_file:
subprocess.run([
@@ -1197,6 +1203,9 @@ class RIARemote(SpecialRemote):
def transfer_store(self, key, filename):
self._ensure_writeable()
+ # we need a file-system compatible name for the key
+ key = _sanitize_key(key)
+
dsobj_dir, archive_path, key_path = self._get_obj_location(key)
key_path = dsobj_dir / key_path
@@ -1234,6 +1243,8 @@ class RIARemote(SpecialRemote):
@handle_errors
def transfer_retrieve(self, key, filename):
+ # we need a file-system compatible name for the key
+ key = _sanitize_key(key)
if isinstance(self.io, HTTPRemoteIO):
self.io.get(PurePosixPath(self.annex.dirhash(key)) / key / key,
@@ -1259,6 +1270,8 @@ class RIARemote(SpecialRemote):
@handle_errors
def checkpresent(self, key):
+ # we need a file-system compatible name for the key
+ key = _sanitize_key(key)
if isinstance(self.io, HTTPRemoteIO):
return self.io.checkpresent(
@@ -1278,6 +1291,9 @@ class RIARemote(SpecialRemote):
@handle_errors
def remove(self, key):
+ # we need a file-system compatible name for the key
+ key = _sanitize_key(key)
+
self._ensure_writeable()
dsobj_dir, archive_path, key_path = self._get_obj_location(key)
@@ -1304,6 +1320,8 @@ class RIARemote(SpecialRemote):
@handle_errors
def whereis(self, key):
+ # we need a file-system compatible name for the key
+ key = _sanitize_key(key)
if isinstance(self.io, HTTPRemoteIO):
# display the URL for a request
@@ -1352,6 +1370,35 @@ class RIARemote(SpecialRemote):
# TODO: implement method 'error'
+def _sanitize_key(key):
+ """Returns a sanitized key that is a suitable directory/file name
+
+ Documentation from the analog implementation in git-annex
+ Annex/Locations.hs
+
+ Converts a key into a filename fragment without any directory.
+
+ Escape "/" in the key name, to keep a flat tree of files and avoid
+ issues with keys containing "/../" or ending with "/" etc.
+
+ "/" is escaped to "%" because it's short and rarely used, and resembles
+ a slash
+ "%" is escaped to "&s", and "&" to "&a"; this ensures that the mapping
+ is one to one.
+ ":" is escaped to "&c", because it seemed like a good idea at the time.
+
+ Changing what this function escapes and how is not a good idea, as it
+ can cause existing objects to get lost.
+ """
+ esc = {
+ '/': '%',
+ '%': '&s',
+ '&': '&a',
+ ':': '&c',
+ }
+ return ''.join(esc.get(c, c) for c in key)
+
+
def main():
"""cmdline entry point"""
from annexremote import Master
| datalad/datalad | 66fd8be2cadeda3b85665ae63d407e2241e0bb1a | diff --git a/datalad/distributed/tests/test_ria_basics.py b/datalad/distributed/tests/test_ria_basics.py
index 885d75558..e470e057e 100644
--- a/datalad/distributed/tests/test_ria_basics.py
+++ b/datalad/distributed/tests/test_ria_basics.py
@@ -11,6 +11,7 @@ import logging
from datalad.api import (
Dataset,
clone,
+ create_sibling_ria,
)
from datalad.utils import Path
from datalad.tests.utils import (
@@ -35,7 +36,8 @@ from datalad.tests.utils import (
)
from datalad.distributed.ora_remote import (
LocalIO,
- SSHRemoteIO
+ SSHRemoteIO,
+ _sanitize_key,
)
from datalad.support.exceptions import (
CommandError,
@@ -624,3 +626,51 @@ def test_push_url(storepath, dspath, blockfile):
known_sources = ds.repo.whereis('one.txt')
assert_in(here_uuid, known_sources)
assert_in(store_uuid, known_sources)
+
+
+@known_failure_windows
+@with_tempfile
+@with_tempfile
+def test_url_keys(dspath, storepath):
+ ds = Dataset(dspath).create()
+ repo = ds.repo
+ filename = 'url_no_size.html'
+ # URL-type key without size
+ repo.call_annex([
+ 'addurl', '--relaxed', '--raw', '--file', filename, 'http://example.com',
+ ])
+ ds.save()
+ # copy target
+ ds.create_sibling_ria(
+ name='ria',
+ url='ria+file://{}'.format(storepath),
+ storage_sibling='only',
+ )
+ ds.get(filename)
+ repo.call_annex(['copy', '--to', 'ria', filename])
+ ds.drop(filename)
+ # in the store and on the web
+ assert_equal(len(ds.repo.whereis(filename)), 2)
+ # try download, but needs special permissions to even be attempted
+ ds.config.set('annex.security.allow-unverified-downloads', 'ACKTHPPT', where='local')
+ repo.call_annex(['copy', '--from', 'ria', filename])
+ assert_equal(len(ds.repo.whereis(filename)), 3)
+ # smoke tests that execute the remaining pieces with the URL key
+ repo.call_annex(['fsck', '-f', 'ria'])
+ assert_equal(len(ds.repo.whereis(filename)), 3)
+ # mapped key in whereis output
+ assert_in('%%example', repo.call_annex(['whereis', filename]))
+
+ repo.call_annex(['move', '-f', 'ria', filename])
+ # check that it does not magically reappear, because it actually
+ # did not drop the file
+ repo.call_annex(['fsck', '-f', 'ria'])
+ assert_equal(len(ds.repo.whereis(filename)), 2)
+
+
+def test_sanitize_key():
+ for i, o in (
+ ('http://example.com/', 'http&c%%example.com%'),
+ ('/%&:', '%&s&a&c'),
+ ):
+ assert_equal(_sanitize_key(i), o)
| `datalad get` error message suboptimal
@mih
#### What is the problem?
Confusing error from a failed `datalad get`.
#### What steps will reproduce the problem?
```
$ datalad clone ria+file:///p/fastdata/inm7/data_store/tmp/store#cf8bf616-6cee-11ea-8199-7cd30ae51ae8 test4
[INFO ] Configure additional publication dependency on "store-storage"
configure-sibling(ok): . (sibling)
install(ok): /p/fastdata/inm7/data_store/tmp/test4 (dataset)
action summary:
configure-sibling (ok: 1)
install (ok: 1)
$ cd test4/
$ ls
file1
$ datalad get file1
[WARNING] Running get resulted in stderr output:
ERROR: No more files
/p/fastdata/inm7/data_store/tmp/alt_store/cf8/bf616-6cee-11ea-8199-7cd30ae51ae8/archives/archive.7z
System ERROR:
Unknown error -2147024872
git-annex: get: 1 failed
[ERROR ] verification of content failed [get(/p/fastdata/inm7/data_store/tmp/test4/file1)]
get(error): file1 (file) [verification of content failed]
```
#### What version of DataLad are you using?
<details><summary>datalad wtf</summary>
<p>
```
$ datalad wtf
# WTF
## configuration <SENSITIVE, report disabled by configuration>
## datalad
- full_version: 0.12.4.dev537-g7bb9
- version: 0.12.4.dev537
## dependencies
- appdirs: 1.4.3
- boto: 2.49.0
- cmd:7z: 16.02
- cmd:annex: 8.20200309-g07fcace
- cmd:bundled-git: 2.23.0
- cmd:git: 2.23.0
- cmd:system-git: 2.23.0
- cmd:system-ssh: 7.9p1
- exifread: 2.1.2
- git: 3.1.0
- gitdb: 4.0.2
- humanize: 2.1.0
- iso8601: 0.1.12
- keyring: 21.1.1
- keyrings.alt: 3.1
- msgpack: 1.0.0
- mutagen: 1.41.1
- requests: 2.22.0
- wrapt: 1.12.1
## environment
- LANG: en_US.UTF-8
- PATH: /p/home/jusers/waite1/judac/shared/miniconda3/bin:/p/home/jusers/waite1/judac/shared/miniconda3/condabin:/opt/lenovo/onecli:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/opt/ibutils/bin:/usr/lpp/mmfs/bin
## extensions
- ria:
- description: Helper for the remote indexed archive (RIA) special remote
- entrypoints:
- ria_remote.export_archive.ExportArchive:
- class: ExportArchive
- load_error: None
- module: ria_remote.export_archive
- names:
- ria-export-archive
- ria_export_archive
- ria_remote.install.Install:
- class: Install
- load_error: function has argument 'alt_sources' not described as a parameter [base.py:update_docstring_with_parameters:374]
- module: ria_remote.install
- names:
- ria-install
- ria_install
- load_error: None
- module: ria_remote
- version: 0.7
## git-annex
- build flags:
- Assistant
- Webapp
- Pairing
- S3
- WebDAV
- Inotify
- DBus
- DesktopNotify
- TorrentParser
- MagicMime
- Feeds
- Testsuite
- dependency versions:
- aws-0.21.1
- bloomfilter-2.0.1.0
- cryptonite-0.25
- DAV-1.3.4
- feed-1.2.0.1
- ghc-8.6.5
- http-client-0.6.4
- persistent-sqlite-2.9.3
- torrent-10000.1.1
- uuid-1.3.13
- yesod-1.6.0.1
- key/value backends:
- SHA256E
- SHA256
- SHA512E
- SHA512
- SHA224E
- SHA224
- SHA384E
- SHA384
- SHA3_256E
- SHA3_256
- SHA3_512E
- SHA3_512
- SHA3_224E
- SHA3_224
- SHA3_384E
- SHA3_384
- SKEIN256E
- SKEIN256
- SKEIN512E
- SKEIN512
- BLAKE2B256E
- BLAKE2B256
- BLAKE2B512E
- BLAKE2B512
- BLAKE2B160E
- BLAKE2B160
- BLAKE2B224E
- BLAKE2B224
- BLAKE2B384E
- BLAKE2B384
- BLAKE2BP512E
- BLAKE2BP512
- BLAKE2S256E
- BLAKE2S256
- BLAKE2S160E
- BLAKE2S160
- BLAKE2S224E
- BLAKE2S224
- BLAKE2SP256E
- BLAKE2SP256
- BLAKE2SP224E
- BLAKE2SP224
- SHA1E
- SHA1
- MD5E
- MD5
- WORM
- URL
- operating system: linux x86_64
- remote types:
- git
- gcrypt
- p2p
- S3
- bup
- directory
- rsync
- web
- bittorrent
- webdav
- adb
- tahoe
- glacier
- ddar
- git-lfs
- hook
- external
- supported repository versions:
- 8
- upgrade supported from repository versions:
- 0
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- version: 8.20200309-g07fcace
## location
- path: /p/fastdata/inm7/data_store
- type: directory
## metadata_extractors
- annex:
- load_error: None
- module: datalad.metadata.extractors.annex
- version: None
- audio:
- load_error: None
- module: datalad.metadata.extractors.audio
- version: None
- datacite:
- load_error: None
- module: datalad.metadata.extractors.datacite
- version: None
- datalad_core:
- load_error: None
- module: datalad.metadata.extractors.datalad_core
- version: None
- datalad_rfc822:
- load_error: None
- module: datalad.metadata.extractors.datalad_rfc822
- version: None
- exif:
- load_error: None
- module: datalad.metadata.extractors.exif
- version: None
- frictionless_datapackage:
- load_error: None
- module: datalad.metadata.extractors.frictionless_datapackage
- version: None
- image:
- load_error: None
- module: datalad.metadata.extractors.image
- version: None
- xmp:
- load_error: No module named 'libxmp' [xmp.py:<module>:20]
- module: datalad.metadata.extractors.xmp
## python
- implementation: CPython
- version: 3.7.6
## system
- distribution: CentOS Linux/7.7.1908/Core
- encoding:
- default: utf-8
- filesystem: utf-8
- locale.prefered: UTF-8
- max_path_length: 283
- name: Linux
- release: 3.10.0-1062.1.2.el7.x86_64
- type: posix
- version: #1 SMP Mon Sep 30 14:19:46 UTC 2019
```
</p>
</details> | 0.0 | [
"datalad/distributed/tests/test_ria_basics.py::test_sanitize_key"
] | [] | 2021-07-26 15:10:20+00:00 | 1,815 |
|
datalad__datalad-5881 | diff --git a/datalad/core/distributed/clone.py b/datalad/core/distributed/clone.py
index 12482f09f..62888b1f6 100644
--- a/datalad/core/distributed/clone.py
+++ b/datalad/core/distributed/clone.py
@@ -48,13 +48,14 @@ from datalad.support.constraints import (
from datalad.support.exceptions import DownloadError
from datalad.support.param import Parameter
from datalad.support.network import (
- get_local_file_url,
- download_url,
- is_url,
- URL,
- RI,
DataLadRI,
PathRI,
+ RI,
+ SSHRI,
+ URL,
+ download_url,
+ get_local_file_url,
+ is_url,
)
from datalad.dochelpers import (
exc_str,
@@ -1206,7 +1207,7 @@ def _get_installationpath_from_url(url):
from a URL, analog to what `git clone` does.
"""
ri = RI(url)
- if isinstance(ri, (URL, DataLadRI)): # decode only if URL
+ if isinstance(ri, (URL, DataLadRI, SSHRI)): # decode only if URL
path = ri.path.rstrip('/')
path = urlunquote(path) if path else ri.hostname
if '/' in path:
| datalad/datalad | 8004a5ee33669b969d755b56b2eb05aa6c911eba | diff --git a/datalad/core/distributed/tests/test_clone.py b/datalad/core/distributed/tests/test_clone.py
index a087c0a6d..08753a707 100644
--- a/datalad/core/distributed/tests/test_clone.py
+++ b/datalad/core/distributed/tests/test_clone.py
@@ -532,24 +532,41 @@ def test_clone_autoenable_msg_handles_sameas(repo, clone_path):
def test_installationpath_from_url():
- cases = (
+ # cases for all OSes
+ cases = [
'http://example.com/lastbit',
'http://example.com/lastbit.git',
'http://lastbit:8000',
- ) + (
+ # SSH
+ 'hostname:lastbit',
+ 'hostname:lastbit/',
+ 'hostname:subd/lastbit',
+ 'hostname:/full/path/lastbit',
+ 'hostname:lastbit/.git',
+ 'hostname:lastbit/.git/',
+ 'hostname:/full/path/lastbit/.git',
+ 'full.hostname.com:lastbit/.git',
+ '[email protected]:lastbit/.git',
+ 'ssh://user:[email protected]/full/path/lastbit',
+ 'ssh://user:[email protected]/full/path/lastbit/',
+ 'ssh://user:[email protected]/full/path/lastbit/.git',
+ ]
+ # OS specific cases
+ cases += [
'C:\\Users\\mih\\AppData\\Local\\Temp\\lastbit',
'C:\\Users\\mih\\AppData\\Local\\Temp\\lastbit\\',
'Temp\\lastbit',
'Temp\\lastbit\\',
'lastbit.git',
'lastbit.git\\',
- ) if on_windows else (
+ ] if on_windows else [
'lastbit',
'lastbit/',
'/lastbit',
'lastbit.git',
'lastbit.git/',
- )
+ ]
+
for p in cases:
eq_(_get_installationpath_from_url(p), 'lastbit')
# we need to deal with quoted urls
| clone: over ssh - (default) target directory contains `:` causing git to error out
could probably add to confusion in #5829 but that one has other issues preventing it even to get there, but would be seen there too eventually
`maint`:
```
$> datalad clone smaug:datalad
[ERROR ] Failed to clone from any candidate source URL. Encountered errors per each url were:
| - smaug:datalad
CommandError: 'git -c diff.ignoreSubmodules=none clone --progress smaug:datalad /tmp/smaug:datalad' failed with exitcode 128 [err: 'Cloning into '/tmp/smaug:datalad'...
done.
fatal: update_ref failed for ref 'HEAD': cannot update ref 'refs/heads/master': trying to write ref 'refs/heads/master' with nonexistent object 010db68540fe0940a3e857f1ec5640c2978d5eaa'] [install(/tmp/smaug:datalad)]
install(error): /tmp/smaug:datalad (dataset) [Failed to clone from any candidate source URL. Encountered errors per each url were:
- smaug:datalad
CommandError: 'git -c diff.ignoreSubmodules=none clone --progress smaug:datalad /tmp/smaug:datalad' failed with exitcode 128 [err: 'Cloning into '/tmp/smaug:datalad'...
done.
fatal: update_ref failed for ref 'HEAD': cannot update ref 'refs/heads/master': trying to write ref 'refs/heads/master' with nonexistent object 010db68540fe0940a3e857f1ec5640c2978d5eaa']]
```
master:
```
lena:/tmp
$> datalad clone smaug:datalad
[ERROR ] Failed to clone from any candidate source URL. Encountered errors per each url were:
| - smaug:datalad
[install(/tmp/smaug:datalad)]
install(error): /tmp/smaug:datalad (dataset) [Failed to clone from any candidate source URL. Encountered errors per each url were:
- smaug:datalad
]
```
more informative `maint` dump of errors is really confusing but does give a hint: `/tmp/smaug:datalad` as a target dir, which if you try
```
$> git clone smaug:datalad /tmp/smaug:datalad
Cloning into '/tmp/smaug:datalad'...
done.
fatal: update_ref failed for ref 'HEAD': cannot update ref 'refs/heads/master': trying to write ref 'refs/heads/master' with nonexistent object 010db68540fe0940a3e857f1ec5640c2978d5eaa
(dev3) 1 26922 ->128 [1].....................................:Mon 09 Aug 2021 02:43:33 PM EDT:.
lena:/tmp
$> fatal: the remote end hung up unexpectedly
```
you get that error msg.
I think here we should follow `git` behavior and do not use any portion of the host name for the target directory... will try to work out a fix now | 0.0 | [
"datalad/core/distributed/tests/test_clone.py::test_installationpath_from_url"
] | [
"datalad/core/distributed/tests/test_clone.py::test_decode_source_spec"
] | 2021-08-09 19:22:59+00:00 | 1,816 |
|
datalad__datalad-6242 | diff --git a/datalad/customremotes/__init__.py b/datalad/customremotes/__init__.py
index 81ee915b8..a35852f75 100644
--- a/datalad/customremotes/__init__.py
+++ b/datalad/customremotes/__init__.py
@@ -11,3 +11,62 @@
"""
__docformat__ = 'restructuredtext'
+
+__all__ = ['RemoteError', 'SpecialRemote']
+
+from annexremote import (
+ ProtocolError,
+ SpecialRemote as _SpecialRemote,
+ RemoteError as _RemoteError,
+)
+from datalad.support.exceptions import format_exception_with_cause
+
+
+class RemoteError(_RemoteError):
+ def __str__(self):
+ # this is a message given to remote error, if any
+ exc_str = super().__str__()
+ # this is the cause ala `raise from`
+ exc_cause = getattr(self, '__cause__', None)
+ if exc_cause:
+ # if we have a cause, collect the cause all the way down
+ # we can do quite some chaining
+ exc_cause = format_exception_with_cause(exc_cause)
+ if exc_str and exc_cause:
+ # with have the full picture
+ msg = f'{exc_str} caused by {exc_cause}'
+ elif exc_str and not exc_cause:
+ # only a custom message
+ msg = exc_str
+ elif not exc_str and exc_cause:
+ # only the cause
+ msg = exc_cause
+ else:
+ # nothing, shame!
+ msg = 'exception with unknown cause'
+ # prevent multiline messages, they would be swallowed
+ # or kill the protocol
+ return msg.replace('\n', '\\n')
+
+
+class SpecialRemote(_SpecialRemote):
+ """Common base class for all of DataLad's special remote implementations"""
+
+ def message(self, msg, type='debug'):
+ handler = dict(
+ debug=self.annex.debug,
+ info=self.annex.info,
+ error=self.annex.error,
+ ).get(type, self.annex.debug)
+
+ # ensure that no multiline messages are sent, they would cause a
+ # protocol error
+ msg = msg.replace('\n', '\\n')
+
+ try:
+ handler(msg)
+ except ProtocolError:
+ # INFO not supported by annex version.
+ # If we can't have an actual info message, at least have a
+ # debug message.
+ self.annex.debug(msg)
diff --git a/datalad/customremotes/archives.py b/datalad/customremotes/archives.py
index c9613c04d..a17ad5854 100644
--- a/datalad/customremotes/archives.py
+++ b/datalad/customremotes/archives.py
@@ -18,10 +18,7 @@ from collections import OrderedDict
from operator import itemgetter
from urllib.parse import urlparse
-from annexremote import (
- RemoteError,
- UnsupportedRequest,
-)
+from annexremote import UnsupportedRequest
from datalad.cmdline.helpers import get_repo_instance
from datalad.consts import ARCHIVES_SPECIAL_REMOTE
@@ -38,6 +35,7 @@ from datalad.utils import (
unlink,
)
+from datalad.customremotes import RemoteError
from .base import AnnexCustomRemote
lgr = logging.getLogger('datalad.customremotes.archive')
@@ -329,7 +327,7 @@ class ArchiveAnnexCustomRemote(AnnexCustomRemote):
return True
# it is unclear to MIH why this must be UNKNOWN rather than FALSE
# but this is how I found it
- raise RemoteError()
+ raise RemoteError('Key not present')
def remove(self, key):
raise UnsupportedRequest('This special remote cannot remove content')
@@ -410,17 +408,18 @@ class ArchiveAnnexCustomRemote(AnnexCustomRemote):
apath = self.cache[akey_path].get_extracted_file(afile)
link_file_load(apath, file)
if not was_extracted and self.cache[akey_path].is_extracted:
- self.annex.info(
+ self.message(
"%s special remote is using an extraction cache "
"under %s. Remove it with DataLad's 'clean' "
"command to save disk space." %
(ARCHIVES_SPECIAL_REMOTE,
- self.cache[akey_path].path)
+ self.cache[akey_path].path),
+ type='info',
)
return
except Exception as exc:
ce = CapturedException(exc)
- self.annex.debug(
+ self.message(
"Failed to fetch {akey} containing {key}: {msg}".format(
akey=akey,
key=key,
@@ -453,10 +452,11 @@ class ArchiveAnnexCustomRemote(AnnexCustomRemote):
from datalad.support.annexrepo import AnnexJsonProtocol
akey_size = self.repo.get_size_from_key(akey)
- self.annex.info(
+ self.message(
"To obtain some keys we need to fetch an archive "
"of size %s"
- % (naturalsize(akey_size) if akey_size else "unknown")
+ % (naturalsize(akey_size) if akey_size else "unknown"),
+ type='info',
)
try:
@@ -465,7 +465,7 @@ class ArchiveAnnexCustomRemote(AnnexCustomRemote):
protocol=AnnexJsonProtocol,
)
except Exception:
- self.annex.debug(f'Failed to fetch archive with key {akey}')
+ self.message(f'Failed to fetch archive with key {akey}')
raise
diff --git a/datalad/customremotes/base.py b/datalad/customremotes/base.py
index 3df0045e5..1dc1a6aac 100644
--- a/datalad/customremotes/base.py
+++ b/datalad/customremotes/base.py
@@ -20,9 +20,9 @@ lgr = logging.getLogger('datalad.customremotes')
from annexremote import (
RemoteError,
- SpecialRemote,
UnsupportedRequest,
)
+from datalad.customremotes import SpecialRemote
from datalad.ui import ui
diff --git a/datalad/customremotes/datalad.py b/datalad/customremotes/datalad.py
index 2bc639f81..b8795d317 100644
--- a/datalad/customremotes/datalad.py
+++ b/datalad/customremotes/datalad.py
@@ -13,15 +13,15 @@ __docformat__ = 'restructuredtext'
import logging
from urllib.parse import urlparse
-from annexremote import RemoteError
-
from datalad.downloaders.providers import Providers
from datalad.support.exceptions import (
CapturedException,
TargetFileAbsent,
)
+from datalad.utils import unique
-from .base import AnnexCustomRemote
+from datalad.customremotes import RemoteError
+from datalad.customremotes.base import AnnexCustomRemote
lgr = logging.getLogger('datalad.customremotes.datalad')
@@ -39,6 +39,7 @@ class DataladAnnexCustomRemote(AnnexCustomRemote):
def transfer_retrieve(self, key, file):
urls = []
+ error_causes = []
# TODO: priorities etc depending on previous experience or settings
for url in self.gen_URLS(key):
urls.append(url)
@@ -50,10 +51,17 @@ class DataladAnnexCustomRemote(AnnexCustomRemote):
return
except Exception as exc:
ce = CapturedException(exc)
- self.annex.debug("Failed to download url %s for key %s: %s"
- % (url, key, ce))
- raise RemoteError(
- f"Failed to download from any of {len(urls)} locations")
+ cause = getattr(exc, '__cause__', None)
+ debug_msg = f"Failed to download {url} for key {key}: {ce}"
+ if cause:
+ debug_msg += f' [{cause}]'
+ self.message(debug_msg)
+ error_causes.append(cause)
+
+ error_msg = f"Failed to download from any of {len(urls)} locations"
+ if error_causes:
+ error_msg += f' {unique(error_causes)}'
+ raise RemoteError(error_msg)
def checkurl(self, url):
try:
@@ -64,7 +72,7 @@ class DataladAnnexCustomRemote(AnnexCustomRemote):
return [props]
except Exception as exc:
ce = CapturedException(exc)
- self.annex.debug("Failed to check url %s: %s" % (url, ce))
+ self.message("Failed to check url %s: %s" % (url, ce))
return False
def checkpresent(self, key):
@@ -80,7 +88,7 @@ class DataladAnnexCustomRemote(AnnexCustomRemote):
# N/A, probably check the connection etc
except TargetFileAbsent as exc:
ce = CapturedException(exc)
- self.annex.debug(
+ self.message(
"Target url %s file seems to be missing: %s" % (url, ce))
if not resp:
# if it is already marked as UNKNOWN -- let it stay that
@@ -89,7 +97,7 @@ class DataladAnnexCustomRemote(AnnexCustomRemote):
return False
except Exception as exc:
ce = CapturedException(exc)
- self.annex.debug(
+ self.message(
"Failed to check status of url %s: %s" % (url, ce))
if resp is None:
raise RemoteError(f'Could not determine presence of key {key}')
diff --git a/datalad/distributed/ora_remote.py b/datalad/distributed/ora_remote.py
index 85bf1777b..282f2ab3e 100644
--- a/datalad/distributed/ora_remote.py
+++ b/datalad/distributed/ora_remote.py
@@ -1,7 +1,3 @@
-from annexremote import SpecialRemote
-from annexremote import RemoteError
-from annexremote import ProtocolError
-
import os
from pathlib import (
Path,
@@ -13,6 +9,11 @@ from shlex import quote as sh_quote
import subprocess
import logging
from functools import wraps
+
+from datalad.customremotes import (
+ RemoteError,
+ SpecialRemote,
+)
from datalad.customremotes.ria_utils import (
get_layout_locations,
UnknownLayoutVersion,
@@ -78,9 +79,7 @@ class RemoteCommandFailedError(Exception):
class RIARemoteError(RemoteError):
-
- def __init__(self, msg):
- super().__init__(msg.replace('\n', '\\n'))
+ pass
class IOBase(object):
@@ -214,9 +213,9 @@ class LocalIO(IOBase):
try:
path.unlink()
except PermissionError as e:
- raise RIARemoteError(str(e) + os.linesep +
- "Note: Write permissions for a key's parent"
- "directory are also required to drop content.")
+ raise RIARemoteError(
+ "Write permissions for a key's parent directory are "
+ "also required to drop content.") from e
def remove_dir(self, path):
path.rmdir()
@@ -472,7 +471,7 @@ class SSHRemoteIO(IOBase):
try:
size = self._get_download_size_from_key(key)
except RemoteError as e:
- raise RemoteError("src: {}".format(str(src)) + str(e))
+ raise RemoteError(f"src: {src}") from e
if size is None:
# rely on SCP for now
@@ -501,9 +500,8 @@ class SSHRemoteIO(IOBase):
self._run('rm {}'.format(sh_quote(str(path))))
except RemoteCommandFailedError as e:
raise RIARemoteError(
- str(e) + os.linesep +
- "Note: Write permissions for a key's parent"
- "directory are also required to drop content.")
+ "Write permissions for a key's parent"
+ "directory are also required to drop content.") from e
def remove_dir(self, path):
self._run('rmdir {}'.format(sh_quote(str(path))))
@@ -575,8 +573,8 @@ class SSHRemoteIO(IOBase):
cmd = "cat {}".format(sh_quote(str(file_path)))
try:
out = self._run(cmd, no_output=False, check=True)
- except RemoteCommandFailedError:
- raise RIARemoteError("Could not read {}".format(str(file_path)))
+ except RemoteCommandFailedError as e:
+ raise RIARemoteError(f"Could not read {file_path}") from e
return out
@@ -597,8 +595,8 @@ class SSHRemoteIO(IOBase):
sh_quote(str(file_path)))
try:
self._run(cmd, check=True)
- except RemoteCommandFailedError:
- raise RIARemoteError("Could not write to {}".format(str(file_path)))
+ except RemoteCommandFailedError as e:
+ raise RIARemoteError(f"Could not write to {file_path}") from e
def get_7z(self):
# TODO: To not rely on availability in PATH we might want to use `which`
@@ -666,7 +664,7 @@ class HTTPRemoteIO(object):
try:
response = requests.head(url, allow_redirects=True)
except Exception as e:
- raise RIARemoteError(str(e))
+ raise RIARemoteError from e
return response.status_code == 200
@@ -736,7 +734,7 @@ def handle_errors(func):
pass
if not isinstance(e, RIARemoteError):
- raise RIARemoteError(str(e))
+ raise RIARemoteError from e
else:
raise e
@@ -874,7 +872,7 @@ class RIARemote(SpecialRemote):
self._get_version_config(object_tree_version_file)
if self.remote_object_tree_version not in self.known_versions_objt:
raise UnknownLayoutVersion
- except (RemoteError, FileNotFoundError):
+ except (RemoteError, FileNotFoundError) as e:
# Exception class depends on whether self.io is local or SSH.
# assume file doesn't exist
# TODO: Is there a possibility RemoteError has a different reason
@@ -882,9 +880,9 @@ class RIARemote(SpecialRemote):
# Don't think so ATM. -> Reconsider with new execution layer.
if not self.io.exists(object_tree_version_file.parent):
# unify exception
- raise FileNotFoundError
+ raise e
else:
- raise NoLayoutVersion
+ raise NoLayoutVersion from e
def _load_cfg(self, gitdir, name):
# Whether or not to force writing to the remote. Currently used to
@@ -953,7 +951,8 @@ class RIARemote(SpecialRemote):
" Use 'git annex enableremote {} "
"url=<RIA-URL-TO-STORE>' to store a ria+<scheme>:"
"//... URL in the special remote's config."
- "".format(name))
+ "".format(name),
+ type='info')
if not self.store_base_path:
raise RIARemoteError(
@@ -997,7 +996,8 @@ class RIARemote(SpecialRemote):
file_content = self.io.read_file(path).strip().split('|')
if not (1 <= len(file_content) <= 2):
- self.message("invalid version file {}".format(path))
+ self.message("invalid version file {}".format(path),
+ type='info')
return None
remote_version = file_content[0]
@@ -1097,27 +1097,13 @@ class RIARemote(SpecialRemote):
# + just isinstance(LocalIO)?
return not self.storage_host
- def debug(self, msg):
- # Annex prints just the message, so prepend with
- # a "DEBUG" on our own.
- self.annex.debug("ORA-DEBUG: " + msg)
-
- def message(self, msg):
- try:
- self.annex.info(msg)
- except ProtocolError:
- # INFO not supported by annex version.
- # If we can't have an actual info message, at least have a
- # debug message.
- self.debug(msg)
-
def _set_read_only(self, msg):
if not self.force_write:
self.read_only = True
- self.message(msg)
+ self.message(msg, type='info')
else:
- self.message("Was instructed to force write")
+ self.message("Was instructed to force write", type='info')
def _ensure_writeable(self):
if self.read_only:
@@ -1169,7 +1155,7 @@ class RIARemote(SpecialRemote):
if not self._push_io:
if self.ria_store_pushurl:
- self.debug("switching ORA to push-url")
+ self.message("switching ORA to push-url")
# Not-implemented-push-HTTP is ruled out already when reading
# push-url, so either local or SSH:
if not self.storage_host_push:
@@ -1291,7 +1277,8 @@ class RIARemote(SpecialRemote):
self.io.get_from_archive(archive_path, key_path, filename,
self.annex.progress)
except Exception as e2:
- raise RIARemoteError('Failed to key: {}'
+ # TODO properly report the causes
+ raise RIARemoteError('Failed to obtain key: {}'
''.format([str(e1), str(e2)]))
@handle_errors
diff --git a/datalad/downloaders/s3.py b/datalad/downloaders/s3.py
index 48260d547..e0dc5f4dc 100644
--- a/datalad/downloaders/s3.py
+++ b/datalad/downloaders/s3.py
@@ -351,8 +351,8 @@ class S3Downloader(BaseDownloader):
url_filepath, version_id=params.get('versionId', None)
)
except S3ResponseError as e:
- raise TargetFileAbsent("S3 refused to provide the key for %s from url %s: %s"
- % (url_filepath, url, e))
+ raise TargetFileAbsent("S3 refused to provide the key for %s from url %s"
+ % (url_filepath, url)) from e
if key is None:
raise TargetFileAbsent("No key returned for %s from url %s" % (url_filepath, url))
diff --git a/datalad/support/exceptions.py b/datalad/support/exceptions.py
index 9cd5d11a6..00867a803 100644
--- a/datalad/support/exceptions.py
+++ b/datalad/support/exceptions.py
@@ -185,6 +185,26 @@ def format_oneline_tb(exc, tb=None, limit=None, include_str=True):
return out
+def format_exception_with_cause(e):
+ """Helper to recursively format an exception with all underlying causes
+
+ For each exception in the chain either the str() of it is taken, or the
+ class name of the exception, with the aim to generate a simple and
+ comprehensible description that can be used in user-facing messages.
+ It is explicitly not aiming to provide a detailed/comprehensive source
+ of information for in-depth debugging.
+
+ '-caused by-' is used a separator between exceptions to be human-readable
+ while being recognizably different from potential exception payload
+ messages.
+ """
+ s = str(e) or e.__class__.__name__
+ exc_cause = getattr(e, '__cause__', None)
+ if exc_cause:
+ s += f' -caused by- {format_exception_with_cause(exc_cause)}'
+ return s
+
+
class MissingExternalDependency(RuntimeError):
"""External dependency is missing error"""
| datalad/datalad | 6b4d5567eef9cef170b09895148a8cd991bea4ec | diff --git a/datalad/support/tests/test_captured_exception.py b/datalad/support/tests/test_captured_exception.py
index 4d56fa262..c8c22b77d 100644
--- a/datalad/support/tests/test_captured_exception.py
+++ b/datalad/support/tests/test_captured_exception.py
@@ -1,7 +1,13 @@
from unittest.mock import patch
-from nose.tools import assert_equal, assert_true
-from datalad.support.exceptions import CapturedException
-from datalad.tests.utils import assert_re_in
+from datalad.support.exceptions import (
+ format_exception_with_cause,
+ CapturedException,
+)
+from datalad.tests.utils import (
+ assert_equal,
+ assert_re_in,
+ assert_true,
+)
from datalad import cfg
@@ -78,3 +84,28 @@ def test_CapturedException():
# CapturedException.__repr__:
assert_re_in(r".*test_captured_exception.py:f2:[0-9]+\]$",
captured_exc.__repr__())
+
+
+def makeitraise():
+ def raise_valueerror():
+ try:
+ raise_runtimeerror()
+ except Exception as e:
+ raise ValueError from e
+
+ def raise_runtimeerror():
+ raise RuntimeError("Mike")
+
+ try:
+ raise_valueerror()
+ except Exception as e:
+ raise RuntimeError from e
+
+
+def test_format_exception_with_cause():
+ try:
+ makeitraise()
+ except Exception as e:
+ assert_equal(
+ format_exception_with_cause(e),
+ 'RuntimeError -caused by- ValueError -caused by- Mike')
| S3 credential issue not communicated and breaks special remote protocol in master
This is similar to #5469, but a different issue.
I have (intentionally) broken S3 credentials in my keyring. Now trying to download a file from the HCP dataset
```
% datalad get HCP1200/143426/T1w/T1w_acpc_dc.nii.gz
get(error): HCP1200/143426/T1w/T1w_acpc_dc.nii.gz (file) [TRANSFER failed with no reason given
TRANSFER failed with no reason given
TRANSFER failed with no reason given]
action summary:
get (error: 1, notneeded: 1)
```
Because I created the invalid credentials, I no what the reason is. However, in the general case it seems unresonable to not be able to tell that a download failed because of invalid authentication. Even turning on `-l debug` does not reveal more information. | 0.0 | [
"datalad/support/tests/test_captured_exception.py::test_CapturedException",
"datalad/support/tests/test_captured_exception.py::test_format_exception_with_cause"
] | [] | 2021-11-24 11:35:14+00:00 | 1,817 |
|
datalad__datalad-7226 | diff --git a/.appveyor.yml b/.appveyor.yml
index 86566adfc..e6e00dcab 100644
--- a/.appveyor.yml
+++ b/.appveyor.yml
@@ -95,7 +95,7 @@ environment:
- ID: MacP38core
# ~40min
DTS: datalad.core datalad.dataset datalad.runner datalad.support
- APPVEYOR_BUILD_WORKER_IMAGE: macOS
+ APPVEYOR_BUILD_WORKER_IMAGE: macos-monterey
PY: 3.8
# does not give a functional installation
# INSTALL_GITANNEX: git-annex -m snapshot
@@ -154,7 +154,7 @@ environment:
datalad.interface
datalad.tests
datalad.ui
- APPVEYOR_BUILD_WORKER_IMAGE: macOS
+ APPVEYOR_BUILD_WORKER_IMAGE: macos-monterey
PY: 3.8
INSTALL_GITANNEX: git-annex
DATALAD_LOCATIONS_SOCKETS: /Users/appveyor/DLTMP/sockets
@@ -164,7 +164,7 @@ environment:
DTS: >
datalad.local
datalad.distributed
- APPVEYOR_BUILD_WORKER_IMAGE: macOS
+ APPVEYOR_BUILD_WORKER_IMAGE: macos-monterey
PY: 3.8
INSTALL_GITANNEX: git-annex
DATALAD_LOCATIONS_SOCKETS: /Users/appveyor/DLTMP/sockets
diff --git a/changelog.d/pr-7226.md b/changelog.d/pr-7226.md
new file mode 100644
index 000000000..03f0d2171
--- /dev/null
+++ b/changelog.d/pr-7226.md
@@ -0,0 +1,6 @@
+### 🐛 Bug Fixes
+
+- Interface-specific (python vs CLI) doc generation for commands and their parameters was broken when brackets were used within the interface markups.
+ Fixes [#7225](https://github.com/datalad/datalad/issues/7225) via
+ [PR #7226](https://github.com/datalad/datalad/pull/7226)
+ (by [@bpoldrack](https://github.com/bpoldrack))
diff --git a/datalad/cli/interface.py b/datalad/cli/interface.py
index 6a5d60212..bee96588a 100644
--- a/datalad/cli/interface.py
+++ b/datalad/cli/interface.py
@@ -60,15 +60,15 @@ def alter_interface_docs_for_cmdline(docs):
flags=re.MULTILINE | re.DOTALL)
# clean cmdline in-line bits
docs = re.sub(
- r'\[PY:\s[^\[\]]*\sPY\]',
+ r'\[PY:\s.*?\sPY\]',
'',
docs,
flags=re.MULTILINE | re.DOTALL)
docs = re.sub(
- r'\[CMD:\s([^\[\]]*)\sCMD\]',
+ r'\[CMD:\s(.*?)\sCMD\]',
lambda match: match.group(1),
docs,
- flags=re.MULTILINE)
+ flags=re.MULTILINE | re.DOTALL)
docs = re.sub(
r'\|\| CMDLINE \>\>(.*?)\<\< CMDLINE \|\|',
lambda match: match.group(1),
diff --git a/datalad/interface/base.py b/datalad/interface/base.py
index 6dc512586..4bdbdd17e 100644
--- a/datalad/interface/base.py
+++ b/datalad/interface/base.py
@@ -184,15 +184,15 @@ def alter_interface_docs_for_api(docs):
flags=re.MULTILINE | re.DOTALL)
# clean cmdline in-line bits
docs = re.sub(
- r'\[CMD:\s[^\[\]]*\sCMD\]',
+ r'\[CMD:\s.*?\sCMD\]',
'',
docs,
flags=re.MULTILINE | re.DOTALL)
docs = re.sub(
- r'\[PY:\s([^\[\]]*)\sPY\]',
+ r'\[PY:\s(.*?)\sPY\]',
lambda match: match.group(1),
docs,
- flags=re.MULTILINE)
+ flags=re.MULTILINE | re.DOTALL)
# select only the python alternative from argument specifications
docs = re.sub(
r'``([a-zA-Z0-9_,.]+)\|\|([a-zA-Z0-9-,.]+)``',
| datalad/datalad | 95214312babc6d8fcff3e40b3d6540b372497bf1 | diff --git a/datalad/cli/tests/test_interface.py b/datalad/cli/tests/test_interface.py
index 7090988f6..9a00cbcb2 100644
--- a/datalad/cli/tests/test_interface.py
+++ b/datalad/cli/tests/test_interface.py
@@ -28,6 +28,12 @@ def test_alter_interface_docs_for_cmdline():
assert_in('a b', alt)
assert_in('not\n reflowed', alt)
assert_in("Something for the cmdline only Multiline!", alt)
+ assert_not_in("Some Python-only bits", alt)
+ assert_not_in("just for Python", alt)
+ assert_in("just for the command line", alt)
+ assert_in("multiline cli-only with [ brackets\n[] ]", alt)
+ assert_not_in("multiline\npython-only with [ brackets [] ]", alt)
+
# args
altarg = alter_interface_docs_for_cmdline(demo_argdoc)
# RST role markup
@@ -42,10 +48,15 @@ def test_alter_interface_docs_for_cmdline():
'one bla bla two bla')
altpd = alter_interface_docs_for_cmdline(demo_paramdoc)
+ assert_not_in("PY", altpd)
+ assert_not_in("CMD", altpd)
assert_not_in('python', altpd)
+ assert_not_in("python-only with [ some brackets []", altpd)
assert_in('in between', altpd)
assert_in('appended', altpd)
assert_in('cmdline', altpd)
+ assert_in("multiline cli-only [\n brackets included "
+ "[ can we also have || ?]", altpd)
def test_name_generation():
diff --git a/datalad/interface/tests/test_docs.py b/datalad/interface/tests/test_docs.py
index fdf0e872d..7dda2fab5 100644
--- a/datalad/interface/tests/test_docs.py
+++ b/datalad/interface/tests/test_docs.py
@@ -57,7 +57,9 @@ demo_doc = """\
<< PYTHON ||
And an example for in-line markup: [PY: just for Python PY] and
- the other one [CMD: just for the command line CMD]. End of demo.
+ the other one [CMD: just for the command line CMD]. [PY: multiline
+ python-only with [ brackets [] ] PY][CMD: multiline cli-only with [ brackets
+ [] ] CMD]. End of demo.
Generic appendix. Ding dong ding dong ding dong. Ding dong ding dong ding
dong. Ding dong ding dong ding dong. Ding dong ding dong ding dong. Ding
@@ -70,7 +72,10 @@ demo_paramdoc = """\
Parameters
----------
dataset : Dataset or None, optional
- something [PY: python only PY] in between [CMD: cmdline only CMD] appended [PY: more python PY]
+ something [PY: python only PY] in between [CMD: cmdline only CMD] appended
+ Brackets can also be within and we can deal with [PY: multiline
+ python-only with [ some brackets [] PY] [CMD: multiline cli-only [
+ brackets included [ can we also have || ?] CMD].
dataset is given, an attempt is made to identify the dataset based
Dataset (e.g. a path), or value must be `None`. [Default: None]
"""
@@ -102,9 +107,19 @@ def test_alter_interface_docs_for_api():
assert_in('a b', alt)
assert_in('not\n reflowed', alt)
assert_in("Some Python-only bits Multiline!", alt)
+ assert_in("Some Python-only bits", alt)
+ assert_in("just for Python", alt)
+ assert_not_in("just for the command line", alt)
+ assert_not_in("multiline cli-only with [ brackets\n[] ]", alt)
+ assert_in("multiline\npython-only with [ brackets [] ]", alt)
altpd = alter_interface_docs_for_api(demo_paramdoc)
+ assert_not_in("PY", altpd)
+ assert_not_in("CMD", altpd)
assert_in('python', altpd)
assert_in('in between', altpd)
assert_in('appended', altpd)
+ assert_in("multiline\n python-only with [ some brackets []", altpd)
assert_not_in('cmdline', altpd)
+ assert_not_in("multiline cli-only [\n brackets included "
+ "[ can we also have || ?]", altpd)
| dochelpers `[PY: PY]` macro handling cannot deal with `[]` in the conditional docs
The datalad-next `credentials` command shows it in the CLI for its `spec` parameter. It fails to find the macro and leaves it in | 0.0 | [
"datalad/cli/tests/test_interface.py::test_alter_interface_docs_for_cmdline",
"datalad/interface/tests/test_docs.py::test_alter_interface_docs_for_api"
] | [
"datalad/cli/tests/test_interface.py::test_name_generation",
"datalad/interface/tests/test_docs.py::test_dedent"
] | 2022-12-13 17:06:23+00:00 | 1,818 |
|
datalad__datalad-next-149 | diff --git a/datalad_next/exceptions.py b/datalad_next/exceptions.py
index 85015a3..4e99d16 100644
--- a/datalad_next/exceptions.py
+++ b/datalad_next/exceptions.py
@@ -4,8 +4,17 @@ from datalad.runner import CommandError
from datalad.support.exceptions import (
AccessDeniedError,
AccessFailedError,
- NoDatasetFound,
CapturedException,
- IncompleteResultsError,
DownloadError,
+ IncompleteResultsError,
+ NoDatasetFound,
+ TargetFileAbsent,
)
+
+# derive from TargetFileAbsent as the closest equivalent in datalad-core
+class UrlTargetNotFound(TargetFileAbsent):
+ """A connection request succeeded in principle, but target was not found
+
+ Equivalent of an HTTP404 response.
+ """
+ pass
diff --git a/datalad_next/url_operations/__init__.py b/datalad_next/url_operations/__init__.py
index 1a4f8ba..7b15a3a 100644
--- a/datalad_next/url_operations/__init__.py
+++ b/datalad_next/url_operations/__init__.py
@@ -64,6 +64,11 @@ class UrlOperations:
status code) as its `status` property. Any underlying exception must
be linked via the `__cause__` property (e.g. `raise
AccessFailedError(...) from ...`).
+ UrlTargetNotFound
+ Implementations that can distinguish a general "connection error"
+ from an absent download target raise `AccessFailedError` for
+ connection errors, and `UrlTargetNotFound` for download targets
+ found absent after a conenction was established successfully.
"""
raise NotImplementedError
@@ -111,6 +116,12 @@ class UrlOperations:
a status code (e.g. HTTP status code) as its `status` property.
Any underlying exception must be linked via the `__cause__`
property (e.g. `raise DownloadError(...) from ...`).
+ AccessFailedError
+ UrlTargetNotFound
+ Implementations that can distinguish a general "connection error"
+ from an absent download target raise `AccessFailedError` for
+ connection errors, and `UrlTargetNotFound` for download targets
+ found absent after a conenction was established successfully.
"""
raise NotImplementedError
diff --git a/datalad_next/url_operations/file.py b/datalad_next/url_operations/file.py
index e174d5e..39ebe9c 100644
--- a/datalad_next/url_operations/file.py
+++ b/datalad_next/url_operations/file.py
@@ -7,7 +7,7 @@ import logging
from pathlib import Path
try:
from shutil import COPY_BUFSIZE
-except ImportError:
+except ImportError: # pragma: no cover
# too old
from datalad_next.utils import on_windows
# from PY3.10
@@ -19,7 +19,7 @@ from urllib import (
parse,
)
-from datalad_next.exceptions import DownloadError
+from datalad_next.exceptions import UrlTargetNotFound
from . import UrlOperations
@@ -43,6 +43,16 @@ class FileUrlOperations(UrlOperations):
return Path(path)
def sniff(self, url: str, *, credential: str = None) -> Dict:
+ """Gather information on a URL target, without downloading it
+
+ See :meth:`datalad_next.url_operations.UrlOperations.sniff`
+ for parameter documentation.
+
+ Raises
+ ------
+ UrlTargetNotFound
+ Raises `UrlTargetNotFound` for download targets found absent.
+ """
# filter out internals
return {
k: v for k, v in self._sniff(url, credential).items()
@@ -54,7 +64,10 @@ class FileUrlOperations(UrlOperations):
from_path = self._file_url_to_path(url)
# if anything went wrong with the conversion, or we lack
# permissions: die here
- size = from_path.stat().st_size
+ try:
+ size = from_path.stat().st_size
+ except FileNotFoundError as e:
+ raise UrlTargetNotFound(url) from e
return {
'content-length': size,
'_path': from_path,
@@ -73,6 +86,11 @@ class FileUrlOperations(UrlOperations):
See :meth:`datalad_next.url_operations.UrlOperations.download`
for parameter documentation.
+
+ Raises
+ ------
+ UrlTargetNotFound
+ Raises `UrlTargetNotFound` for download targets found absent.
"""
# this is pretty much shutil.copyfileobj() with the necessary
# wrapping to perform hashing and progress reporting
@@ -107,7 +125,7 @@ class FileUrlOperations(UrlOperations):
except Exception as e:
# wrap this into the datalad-standard, but keep the
# original exception linked
- raise DownloadError(msg=str(e)) from e
+ raise UrlTargetNotFound(msg=str(e)) from e
finally:
if dst_fp and to_path is not None:
dst_fp.close()
diff --git a/datalad_next/url_operations/http.py b/datalad_next/url_operations/http.py
index a55c53b..2d73961 100644
--- a/datalad_next/url_operations/http.py
+++ b/datalad_next/url_operations/http.py
@@ -13,7 +13,11 @@ from requests_toolbelt.downloadutils.tee import tee as requests_tee
import www_authenticate
import datalad
-from datalad_next.exceptions import DownloadError
+from datalad_next.exceptions import (
+ AccessFailedError,
+ UrlTargetNotFound,
+ DownloadError,
+)
from datalad_next.requests_auth import DataladAuth
from . import UrlOperations
@@ -45,6 +49,19 @@ class HttpUrlOperations(UrlOperations):
return hdrs
def sniff(self, url: str, *, credential: str = None) -> Dict:
+ """Gather information on a URL target, without downloading it
+
+ See :meth:`datalad_next.url_operations.UrlOperations.sniff`
+ for parameter documentation.
+
+ Raises
+ ------
+ AccessFailedError
+ UrlTargetNotFound
+ Raises `AccessFailedError` for connection errors, and
+ `UrlTargetNotFound` for download targets found absent after a
+ connection was established successfully.
+ """
auth = DataladAuth(self.cfg, credential=credential)
with requests.head(
url,
@@ -61,8 +78,13 @@ class HttpUrlOperations(UrlOperations):
except requests.exceptions.RequestException as e:
# wrap this into the datalad-standard, but keep the
# original exception linked
- raise AccessFailedError(
- msg=str(e), status=e.response.status_code) from e
+ if e.response.status_code == 404:
+ # special case reporting for a 404
+ raise UrlTargetNotFound(
+ url, status=e.response.status_code) from e
+ else:
+ raise AccessFailedError(
+ msg=str(e), status=e.response.status_code) from e
props = {
# standardize on lower-case header keys.
# also prefix anything other than 'content-length' to make
@@ -75,6 +97,13 @@ class HttpUrlOperations(UrlOperations):
auth.save_entered_credential(
context=f'sniffing {url}'
)
+ if 'content-length' in props:
+ # make an effort to return size in bytes as int
+ try:
+ props['content-length'] = int(props['content-length'])
+ except (TypeError, ValueError):
+ # but be resonably robust against unexpected responses
+ pass
return props
def download(self,
@@ -87,6 +116,14 @@ class HttpUrlOperations(UrlOperations):
See :meth:`datalad_next.url_operations.UrlOperations.download`
for parameter documentation.
+
+ Raises
+ ------
+ AccessFailedError
+ UrlTargetNotFound
+ Raises `AccessFailedError` for connection errors, and
+ `UrlTargetNotFound` for download targets found absent after a
+ connection was established successfully.
"""
# a new manager per request
# TODO optimize later to cache credentials per target
@@ -104,8 +141,13 @@ class HttpUrlOperations(UrlOperations):
except requests.exceptions.RequestException as e:
# wrap this into the datalad-standard, but keep the
# original exception linked
- raise DownloadError(
- msg=str(e), status=e.response.status_code) from e
+ if e.response.status_code == 404:
+ # special case reporting for a 404
+ raise UrlTargetNotFound(
+ from_url, status=e.response.status_code) from e
+ else:
+ raise AccessFailedError(
+ msg=str(e), status=e.response.status_code) from e
download_props = self._stream_download_from_request(
r, to_path, hash=hash)
diff --git a/datalad_next/url_operations/ssh.py b/datalad_next/url_operations/ssh.py
index c6ae6f0..9b63b99 100644
--- a/datalad_next/url_operations/ssh.py
+++ b/datalad_next/url_operations/ssh.py
@@ -16,7 +16,12 @@ from urllib.parse import urlparse
from datalad.runner import StdOutCapture
from datalad.runner.protocol import GeneratorMixIn
from datalad.runner.nonasyncrunner import ThreadedRunner
-from datalad_next.exceptions import DownloadError
+from datalad_next.exceptions import (
+ AccessFailedError,
+ CommandError,
+ DownloadError,
+ UrlTargetNotFound,
+)
from . import UrlOperations
@@ -40,17 +45,42 @@ class SshUrlOperations(UrlOperations):
likely to be removed in the future, and connection multiplexing
supported where possible (non-Windows platforms).
"""
- _stat_cmd = "printf \"\1\2\3\"; ls -nl '{fpath}' | awk 'BEGIN {{ORS=\"\1\"}} {{print $5}}'"
+ # first try ls'ing the path, and catch a missing path with a dedicated 244
+ # exit code, to be able to distinguish the original exit=2 that ls-call
+ # from a later exit=2 from awk in case of a "fatal error".
+ # when executed through ssh, only a missing file would yield 244, while
+ # a conenction error or other problem unrelated to the present of a file
+ # would a different error code (255 in case of a connection error)
+ _stat_cmd = "printf \"\1\2\3\"; ls '{fpath}' &> /dev/null " \
+ "&& ls -nl '{fpath}' | awk 'BEGIN {{ORS=\"\1\"}} {{print $5}}' " \
+ "|| exit 244"
_cat_cmd = "cat '{fpath}'"
def sniff(self, url: str, *, credential: str = None) -> Dict:
+ """Gather information on a URL target, without downloading it
+
+ See :meth:`datalad_next.url_operations.UrlOperations.sniff`
+ for parameter documentation.
+
+ Raises
+ ------
+ AccessFailedError
+ UrlTargetNotFound
+ Raises `AccessFailedError` for connection errors, and
+ `UrlTargetNotFound` for download targets found absent after a
+ connection was established successfully.
+ """
try:
props = self._sniff(
url,
cmd=SshUrlOperations._stat_cmd,
)
- except Exception as e:
- raise AccessFailedError(str(e)) from e
+ except CommandError as e:
+ if e.code == 244:
+ # this is the special code for a file-not-found
+ raise UrlTargetNotFound(url) from e
+ else:
+ raise AccessFailedError(str(e)) from e
return {k: v for k, v in props.items() if not k.startswith('_')}
@@ -73,7 +103,8 @@ class SshUrlOperations(UrlOperations):
incoming_magic = chunk[:len(need_magic)]
# does the incoming data have the remaining magic bytes?
if incoming_magic != expected_magic:
- raise ValueError("magic missing")
+ raise RuntimeError(
+ "Protocol error: report header not received")
# reduce (still missing) magic, if any
need_magic = need_magic[len(expected_magic):]
# strip magic from input
@@ -113,6 +144,14 @@ class SshUrlOperations(UrlOperations):
See :meth:`datalad_next.url_operations.UrlOperations.download`
for parameter documentation.
+
+ Raises
+ ------
+ AccessFailedError
+ UrlTargetNotFound
+ Raises `AccessFailedError` for connection errors, and
+ `UrlTargetNotFound` for download targets found absent after a
+ connection was established successfully.
"""
# this is pretty much shutil.copyfileobj() with the necessary
# wrapping to perform hashing and progress reporting
@@ -144,10 +183,14 @@ class SshUrlOperations(UrlOperations):
self._progress_report_update(progress_id, len(chunk))
props.update(self._get_hash_report(hash, hasher))
return props
- except Exception as e:
- # wrap this into the datalad-standard, but keep the
- # original exception linked
- raise DownloadError(msg=str(e)) from e
+ except CommandError as e:
+ if e.code == 244:
+ # this is the special code for a file-not-found
+ raise UrlTargetNotFound(from_url) from e
+ else:
+ # wrap this into the datalad-standard, but keep the
+ # original exception linked
+ raise AccessFailedError(msg=str(e)) from e
finally:
if dst_fp and to_path is not None:
dst_fp.close()
| datalad/datalad-next | cb4aca3d5eb7c13d58fb44c504a89ad3d195a743 | diff --git a/datalad_next/tests/utils.py b/datalad_next/tests/utils.py
index ccd15a1..1608a4d 100644
--- a/datalad_next/tests/utils.py
+++ b/datalad_next/tests/utils.py
@@ -29,6 +29,7 @@ from datalad.tests.utils_pytest import (
rmtree,
serve_path_via_http,
skip_if_on_windows,
+ skip_ssh,
skip_wo_symlink_capability,
swallow_logs,
with_tempfile,
diff --git a/datalad_next/url_operations/tests/__init__.py b/datalad_next/url_operations/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/datalad_next/url_operations/tests/test_any.py b/datalad_next/url_operations/tests/test_any.py
new file mode 100644
index 0000000..bd07288
--- /dev/null
+++ b/datalad_next/url_operations/tests/test_any.py
@@ -0,0 +1,39 @@
+import pytest
+from datalad_next.exceptions import (
+ AccessFailedError,
+ UrlTargetNotFound,
+)
+from ..any import AnyUrlOperations
+
+
+def test_any_url_operations(tmp_path):
+ test_path = tmp_path / 'myfile'
+ test_url = test_path.as_uri()
+ ops = AnyUrlOperations()
+ # no target file (yet), precise exception
+ with pytest.raises(UrlTargetNotFound):
+ ops.sniff(test_url)
+ # now put something at the target location
+ test_path.write_text('surprise!')
+ # and now it works
+ props = ops.sniff(test_url)
+ # we get the correct file size reported
+ assert props['content-length'] == test_path.stat().st_size
+
+ # and download
+ download_path = tmp_path / 'download'
+ props = ops.download(test_url, download_path, hash=['sha256'])
+ assert props['sha256'] == '71de4622cf536ed4aa9b65fc3701f4fc5a198ace2fa0bda234fd71924267f696'
+ assert props['content-length'] == 9 == test_path.stat().st_size
+
+ # remove source and try again
+ test_path.unlink()
+ with pytest.raises(UrlTargetNotFound):
+ ops.download(test_url, download_path)
+
+ # try some obscure URL scheme
+ with pytest.raises(ValueError):
+ ops.sniff('weird://stuff')
+
+ # and it could have been figured out before
+ assert ops.is_supported_url('weird://stuff') == False
diff --git a/datalad_next/url_operations/tests/test_file.py b/datalad_next/url_operations/tests/test_file.py
new file mode 100644
index 0000000..aa7a392
--- /dev/null
+++ b/datalad_next/url_operations/tests/test_file.py
@@ -0,0 +1,32 @@
+import pytest
+from datalad_next.exceptions import (
+ AccessFailedError,
+ UrlTargetNotFound,
+)
+from ..file import FileUrlOperations
+
+
+def test_file_url_operations(tmp_path):
+ test_path = tmp_path / 'myfile'
+ test_url = test_path.as_uri()
+ ops = FileUrlOperations()
+ # no target file (yet), precise exception
+ with pytest.raises(UrlTargetNotFound):
+ ops.sniff(test_url)
+ # now put something at the target location
+ test_path.write_text('surprise!')
+ # and now it works
+ props = ops.sniff(test_url)
+ # we get the correct file size reported
+ assert props['content-length'] == test_path.stat().st_size
+
+ # and download
+ download_path = tmp_path / 'download'
+ props = ops.download(test_url, download_path, hash=['sha256'])
+ assert props['sha256'] == '71de4622cf536ed4aa9b65fc3701f4fc5a198ace2fa0bda234fd71924267f696'
+ assert props['content-length'] == 9 == test_path.stat().st_size
+
+ # remove source and try again
+ test_path.unlink()
+ with pytest.raises(UrlTargetNotFound):
+ ops.download(test_url, download_path)
diff --git a/datalad_next/url_operations/tests/test_http.py b/datalad_next/url_operations/tests/test_http.py
new file mode 100644
index 0000000..2f75c17
--- /dev/null
+++ b/datalad_next/url_operations/tests/test_http.py
@@ -0,0 +1,44 @@
+from pathlib import Path
+import pytest
+from datalad_next.exceptions import (
+ AccessFailedError,
+ UrlTargetNotFound,
+)
+from datalad_next.tests.utils import with_credential
+from ..http import HttpUrlOperations
+
+
+hbsurl = 'https://httpbin.org'
+hbscred = (
+ 'hbscred',
+ dict(user='mike', secret='dummy', type='user_password',
+ realm=f'{hbsurl}/Fake Realm'),
+)
+
+@with_credential(hbscred[0], **hbscred[1])
+def test_http_url_operations(tmp_path):
+ ops = HttpUrlOperations()
+ # authentication after redirect
+ target_url = f'{hbsurl}/basic-auth/mike/dummy'
+ props = ops.sniff(f'{hbsurl}/redirect-to?url={target_url}')
+ # we get the resolved URL after redirect back
+ assert props['url'] == target_url
+ # same again, but credentials are wrong
+ target_url = f'{hbsurl}/basic-auth/mike/WRONG'
+ with pytest.raises(AccessFailedError):
+ ops.sniff(f'{hbsurl}/redirect-to?url={target_url}')
+ # make sure we get the size info
+ assert ops.sniff(f'{hbsurl}/bytes/63')['content-length'] == 63
+
+ # download
+ # SFRUUEJJTiBpcyBhd2Vzb21l == 'HTTPBIN is awesome'
+ props = ops.download(f'{hbsurl}/base64/SFRUUEJJTiBpcyBhd2Vzb21l',
+ tmp_path / 'mydownload',
+ hash=['md5'])
+ assert (tmp_path / 'mydownload').read_text() == 'HTTPBIN is awesome'
+
+ # 404s
+ with pytest.raises(UrlTargetNotFound):
+ ops.sniff(f'{hbsurl}/status/404')
+ with pytest.raises(UrlTargetNotFound):
+ ops.download(f'{hbsurl}/status/404', tmp_path / 'dontmatter')
diff --git a/datalad_next/url_operations/tests/test_ssh.py b/datalad_next/url_operations/tests/test_ssh.py
new file mode 100644
index 0000000..713c68e
--- /dev/null
+++ b/datalad_next/url_operations/tests/test_ssh.py
@@ -0,0 +1,55 @@
+import pytest
+from datalad_next.exceptions import (
+ AccessFailedError,
+ UrlTargetNotFound,
+)
+from datalad_next.tests.utils import (
+ skip_ssh,
+ skip_if_on_windows,
+)
+from ..ssh import SshUrlOperations
+
+
+# path magic inside the test is posix only
+@skip_if_on_windows
+# SshUrlOperations does not work against a windows server
+# and the test uses 'localhost' as target
+@skip_ssh
+def test_ssh_url_operations(tmp_path, monkeypatch):
+ test_path = tmp_path / 'myfile'
+ test_url = f'ssh://localhost{test_path}'
+ ops = SshUrlOperations()
+ # no target file (yet), precise exception
+ with pytest.raises(UrlTargetNotFound):
+ ops.sniff(test_url)
+ # this is different for a general connection error
+ with pytest.raises(AccessFailedError):
+ ops.sniff(f'ssh://localhostnotaround{test_path}')
+ # now put something at the target location
+ test_path.write_text('surprise!')
+ # and now it works
+ props = ops.sniff(test_url)
+ # we get the correct file size reported
+ assert props['content-length'] == test_path.stat().st_size
+
+ # simulate a "protocol error" where the server-side command
+ # is not reporting the magic header
+ with monkeypatch.context() as m:
+ m.setattr(SshUrlOperations, '_stat_cmd', 'echo nothing')
+ # we get a distinct exception
+ with pytest.raises(RuntimeError):
+ ops.sniff(test_url)
+
+ # and download
+ download_path = tmp_path / 'download'
+ props = ops.download(test_url, download_path, hash=['sha256'])
+ assert props['sha256'] == '71de4622cf536ed4aa9b65fc3701f4fc5a198ace2fa0bda234fd71924267f696'
+ assert props['content-length'] == 9 == test_path.stat().st_size
+
+ # remove source and try again
+ test_path.unlink()
+ with pytest.raises(UrlTargetNotFound):
+ ops.download(test_url, download_path)
+ # this is different for a general connection error
+ with pytest.raises(AccessFailedError):
+ ops.download(f'ssh://localhostnotaround{test_path}', download_path)
| `UrlOperations.sniff()` exceptions should distinguish connection error from access error
When a URL works in principle, but points to something that is just not there (e.g. HTTP404) vs we cannot tell right now (HTTP5xx, or network down).
This is needed for a reliable operation of CHECKPRESENT in git-annex special remotes, and for meaningful error reporting in general. | 0.0 | [
"datalad_next/url_operations/tests/test_any.py::test_any_url_operations",
"datalad_next/url_operations/tests/test_file.py::test_file_url_operations",
"datalad_next/url_operations/tests/test_http.py::test_http_url_operations"
] | [] | 2022-11-25 13:43:46+00:00 | 1,819 |
|
datalad__datalad-next-222 | diff --git a/datalad_next/annexremotes/uncurl.py b/datalad_next/annexremotes/uncurl.py
index d9222b0..ead5ea0 100644
--- a/datalad_next/annexremotes/uncurl.py
+++ b/datalad_next/annexremotes/uncurl.py
@@ -203,10 +203,8 @@ names for each match-group to avoid collisions.
from __future__ import annotations
from functools import partial
-import json
from pathlib import Path
import re
-from urllib.parse import urlparse
# we intentionally limit ourselves to the most basic interface
# and even that we only need to get a `ConfigManager` instance.
@@ -225,12 +223,10 @@ from datalad_next.utils import ensure_list
from . import (
RemoteError,
SpecialRemote,
- UnsupportedRequest,
super_main
)
-
class UncurlRemote(SpecialRemote):
""" """
def __init__(self, annex):
@@ -303,7 +299,6 @@ class UncurlRemote(SpecialRemote):
annex_remoteuuid=self.annex.getuuid(),
)
-
def claimurl(self, url):
"""Needs to check if want to handle a given URL
@@ -402,7 +397,7 @@ class UncurlRemote(SpecialRemote):
lambda to_url: self.url_handler.delete(url=to_url),
'refuses to delete',
)
- except UrlOperationsResourceUnknown as e:
+ except UrlOperationsResourceUnknown:
self.message(
'f{key} not found at the remote, skipping', type='debug')
@@ -501,7 +496,7 @@ class UncurlRemote(SpecialRemote):
def _store_delete(self, key, handler, action: str):
if not self.url_tmpl:
- raise RemoteError(
+ raise RemoteError(
f'Remote {action} content without a configured URL template')
url = self.get_key_urls(key)
# we have a rewriting template, so we expect exactly one URL
diff --git a/datalad_next/url_operations/any.py b/datalad_next/url_operations/any.py
index 7ab94ad..c91d94a 100644
--- a/datalad_next/url_operations/any.py
+++ b/datalad_next/url_operations/any.py
@@ -4,18 +4,14 @@
from __future__ import annotations
from importlib import import_module
+import json
import logging
from pathlib import Path
import re
from typing import Dict
-from urllib import (
- request,
- parse,
-)
-
-from .http import HttpUrlOperations
-from .file import FileUrlOperations
-from .ssh import SshUrlOperations
+
+from datalad_next.exceptions import CapturedException
+
from . import UrlOperations
lgr = logging.getLogger('datalad.ext.next.url_operations.any')
@@ -24,16 +20,28 @@ lgr = logging.getLogger('datalad.ext.next.url_operations.any')
__all__ = ['AnyUrlOperations']
# define handlers for each supported URL pattern
-# the key in this dict is a regex match expression.
-# the value is a tuple of containing module, and name of the
-# class providing the handler
-# extensions could patch their's in
+# FORMAT OF HANDLER REGISTRY (dict)
+# - key: regex match expression to be apply on a URL (to test whether a
+# particular handler should be used for a given URL)
+# - value: tuple (handler specification, see below)
+# FORMAT OF HANDLER SPECIFICATION
+# - tuple of min-length 1
+# - item1: str, handler class to import
+# e.g., package.module.class
+# - item2: dict, optional, kwargs to pass to the handler constructor
+
# TODO support proper entrypoint mechanism
-_url_handlers = dict(
- http=('datalad_next.url_operations.http', 'HttpUrlOperations'),
- file=('datalad_next.url_operations.file', 'FileUrlOperations'),
- ssh=('datalad_next.url_operations.ssh', 'SshUrlOperations'),
-)
+# It is best to only record handlers here for which there is no alternative,
+# because the best handler is determined based on this information
+# and only this handler is imported. If that fails, there is no fallback.
+# Handlers that may or may not work under given conditions should only
+# be added via external logic after they have been found to be "working"
+# on a given installation.
+_url_handlers = {
+ 'http': ('datalad_next.url_operations.http.HttpUrlOperations',),
+ 'file': ('datalad_next.url_operations.file.FileUrlOperations',),
+ 'ssh': ('datalad_next.url_operations.ssh.SshUrlOperations',),
+}
class AnyUrlOperations(UrlOperations):
@@ -52,13 +60,64 @@ class AnyUrlOperations(UrlOperations):
efficiently.
"""
def __init__(self, cfg=None):
+ """
+ Parameters
+ ----------
+ cfg: ConfigManager, optional
+ A config manager instance that is consulted for any configuration
+ filesystem configuration individual handlers may support.
+ """
super().__init__(cfg=cfg)
- self._url_handlers = {
- re.compile(k): v for k, v in _url_handlers.items()
- }
+ self._load_handler_registery()
# cache of already used handlers
self._url_handler_cache = dict()
+ def _load_handler_registery(self):
+ # update with handlers from config
+ # https://github.com/datalad/datalad-next/issues/217
+ cfgh = {}
+ for citem in self.cfg.keys():
+ if not citem.startswith('datalad.url-handler.'):
+ # none of our business
+ continue
+ # the match expression is right in the item key
+ # (all but the first two and the last segment)
+ citem_l = citem.split('.')
+ match = '.'.join(citem_l[2:-1])
+ prop = citem_l[-1]
+ value = self.cfg[citem]
+ if prop != 'class':
+ try:
+ value = json.loads(value)
+ except Exception as e:
+ ce = CapturedException(e)
+ lgr.debug(
+ 'Ignoring invalid URL handler configuration '
+ 'for %r(%s): %r [%s]',
+ match, prop, value, ce)
+ continue
+ hc = cfgh.get(match, {})
+ hc[prop] = value
+ cfgh[match] = hc
+ # merge all specs
+ uh = dict(_url_handlers)
+ for match, spec in cfgh.items():
+ try:
+ uh[match] = (spec['class'], spec['kwargs'])
+ except KeyError:
+ try:
+ uh[match] = (spec['class'],)
+ except Exception as e:
+ CapturedException(e)
+ lgr.debug(
+ 'Ignoring incomplete URL handler specification '
+ 'for %r: %r', match, spec)
+ self._url_handlers = {}
+ for k, v in uh.items():
+ # compile matches to finalize
+ lgr.log(8, 'Add URL handler for %r: %r', k, v)
+ self._url_handlers[re.compile(k)] = v
+
def _get_handler(self, url: str) -> UrlOperations:
# match URL against all registered handlers and get the one with the
# longest (AKA best) match
@@ -83,10 +142,17 @@ class AnyUrlOperations(UrlOperations):
# we need to import the handler
try:
- mod, cls = self._url_handlers[best_match]
+ handler_spec = self._url_handlers[best_match]
+ # split the import declaration into units
+ toimport = handler_spec[0].split('.')
+ # the handler class is the last unit
+ cls = toimport[-1]
+ # the rest is the module
+ mod = '.'.join(toimport[:-1])
module = import_module(mod, package='datalad')
handler_cls = getattr(module, cls)
- url_handler = handler_cls(cfg=self.cfg)
+ handler_kwargs = handler_spec[1] if len(handler_spec) > 1 else {}
+ url_handler = handler_cls(cfg=self.cfg, **handler_kwargs)
except Exception as e:
raise ValueError(
'Cannot create URL handler instance for '
| datalad/datalad-next | cf24cdd44120928e23a35463981744114f0b02eb | diff --git a/datalad_next/url_operations/tests/test_any.py b/datalad_next/url_operations/tests/test_any.py
index 1ac5705..e7be8bb 100644
--- a/datalad_next/url_operations/tests/test_any.py
+++ b/datalad_next/url_operations/tests/test_any.py
@@ -5,9 +5,9 @@ from .. import (
from ..any import (
_url_handlers,
AnyUrlOperations,
- HttpUrlOperations,
- FileUrlOperations,
)
+from ..http import HttpUrlOperations
+from ..file import FileUrlOperations
def test_get_best_url_handler(monkeypatch):
@@ -19,7 +19,7 @@ def test_get_best_url_handler(monkeypatch):
m.setitem(
_url_handlers,
'https://ex.*\.co',
- ('datalad_next.url_operations.file', 'FileUrlOperations'),
+ ('datalad_next.url_operations.file.FileUrlOperations',),
)
# the handlers are sucked into the class, so we need a new instance
ops = AnyUrlOperations()
| Allow for URL handler definition in config
Presently `AnyUrlOperations` only consults a runtime/code registry. We could...
Specify URL handlers in configuration:
The items would be of syntax: `datalad.url-handler.<regex>.<property>`,
where `<regex>` is a match expression when to apply a particular handler.
Putting the regex into the section name would prevent the problem of undesired shadowing, e.g. when some software package installation defines `datalad.url-handler.openneuro` and I already had that, but for a different match expression. Ultimately the match expressions are the unique identifier and the git config syntax allows for putting them into the subsection names.
There can be any number of different `<property>`.
The special property `class` is something like `package.module.class` that identifies what class to import and use as a handler. Any other property would be passed on to the handlers constructor as `kwargs` (after applying `.replace('-', '_')` to the property name).
Alternatively (or maybe in addition), we could have a `kwargs` property to provide JSON-encoded arguments.
Or we could try JSON-decode any property-value, and only fall back on the
str-representation if that does not work.
Both of the latter approaches would make it easier to specify non-string
values. I am thinking of cases where it would be useful to provide
timeout durations, number of retries, boolean flags, etc.
Update: real-world arguments that need to be specified can become rather complex quickly. Example:
```
{'fs_kwargs': {'anon': True, 's3': {'anon': True}}}
```
anything other than a single `kwargs` property with a JSON encodes value seems to have little applicability.
| 0.0 | [
"datalad_next/url_operations/tests/test_any.py::test_get_best_url_handler"
] | [
"datalad_next/url_operations/tests/test_any.py::test_any_url_operations"
] | 2023-01-23 12:50:18+00:00 | 1,820 |
|
datalad__datalad-next-257 | diff --git a/datalad_next/credman/manager.py b/datalad_next/credman/manager.py
index 4d4167f..167a64c 100644
--- a/datalad_next/credman/manager.py
+++ b/datalad_next/credman/manager.py
@@ -201,8 +201,9 @@ class CredentialManager(object):
# no secret, no credential
if any(not p.startswith('_') for p in cred):
lgr.debug(
- 'Not reporting on credential fragment with no secret: %r',
- cred,
+ 'Not reporting on credential fragment '
+ '(name=%r) with no secret: %r',
+ name, cred,
)
return
@@ -727,6 +728,17 @@ class CredentialManager(object):
_type_hint = cred.get('type', _type_hint)
if _type_hint:
cred['type'] = _type_hint
+ return
+
+ # if we get here, we don't know what type this is
+ # let's derive one for a few clear-cut cases where we can be
+ # reasonable sure what type a credential is
+ if set(cred) == set(('token',)):
+ # all we have is a token property -- very likely a token-type
+ # credential. Move the token to the secret property and
+ # assign the type
+ cred['type'] = 'token'
+ cred['secret'] = cred.pop('token')
def _complete_credential_props(
self, name: str,
| datalad/datalad-next | 9db27268e574f55d655d5daf245c193439d17675 | diff --git a/datalad_next/credman/tests/test_credman.py b/datalad_next/credman/tests/test_credman.py
index 07523bf..bb04db5 100644
--- a/datalad_next/credman/tests/test_credman.py
+++ b/datalad_next/credman/tests/test_credman.py
@@ -215,6 +215,21 @@ def test_credman_get():
assert 'mysecret' == res['secret']
+def test_credman_get_guess_type():
+ # define token-only-no-type credential in config override
+ credman = CredentialManager(
+ ConfigManager(overrides={
+ 'datalad.credential.mike.token': 'some',
+ })
+ )
+ # we get it reported fine, token property converted to the
+ # 'secret' and a proper 'type' assigned
+ assert credman.get('mike') == {
+ 'secret': 'some',
+ 'type': 'token',
+ }
+
+
def test_credman_obtain(memory_keyring):
credman = CredentialManager(ConfigManager())
# senseless, but valid call
| Better support for lonely token credential specification
```sh
DATALAD_CREDENTIAL_MIKE_TOKEN=some datalad credentials get mike
mike(secret ✗): [token=some]
```
So right now it considers it to be a credential, but a credential without a secret and a `token` property.
This is suboptimal. This means that tokens can end up in a config file, more or less silently. It also means that we do not know anything about the nature of the credential (although we could), and we cannot say whether it is complete or not (although we could).
I'd say we should not only refuse to store credentials without a secret, but we should also disregard any credentials that can be determined to be incomplete.
In the case above a user could just use `secret` instead of `token`, but if they do not, we should either refuse (credential with no secret) or auto-convert (credential of type token with secret as given by the token env var). | 0.0 | [
"datalad_next/credman/tests/test_credman.py::test_credman_get_guess_type"
] | [
"datalad_next/credman/tests/test_credman.py::test_credmanager",
"datalad_next/credman/tests/test_credman.py::test_query",
"datalad_next/credman/tests/test_credman.py::test_credman_get",
"datalad_next/credman/tests/test_credman.py::test_credman_obtain"
] | 2023-02-23 15:58:13+00:00 | 1,821 |
|
datalad__datalad-next-278 | diff --git a/datalad_next/constraints/compound.py b/datalad_next/constraints/compound.py
index f808c27..31b529a 100644
--- a/datalad_next/constraints/compound.py
+++ b/datalad_next/constraints/compound.py
@@ -222,15 +222,28 @@ class EnsureGeneratorFromFileLike(Constraint):
existing file to be read from.
"""
- def __init__(self, item_constraint: Callable):
+ def __init__(
+ self,
+ item_constraint: Callable,
+ exc_mode: str = 'raise',
+ ):
"""
Parameters
----------
item_constraint:
Each incoming item will be mapped through this callable
before being yielded by the generator.
+ exc_mode: {'raise', 'yield'}, optional
+ How to deal with exceptions occurring when processing
+ individual lines/items. With 'yield' the respective
+ exception instance is yielded, and processing continues.
+ A caller can then decide whether to ignore, report, or raise
+ the exception. With 'raise', an exception is raised immediately
+ and processing stops.
"""
+ assert exc_mode in ('raise', 'yield')
self._item_constraint = item_constraint
+ self._exc_mode = exc_mode
super().__init__()
def __repr__(self):
@@ -267,11 +280,17 @@ class EnsureGeneratorFromFileLike(Constraint):
def _item_yielder(self, fp, close_file):
try:
for line in fp:
- yield self._item_constraint(
- # splitlines() removes the newline at the end of the string
- # that is left in by __iter__()
- line.splitlines()[0]
- )
+ try:
+ yield self._item_constraint(
+ # splitlines() removes the newline at the end of
+ # the string that is left in by __iter__()
+ line.splitlines()[0]
+ )
+ except Exception as e:
+ if self._exc_mode == 'raise':
+ raise
+ else:
+ yield e
finally:
if close_file:
fp.close()
| datalad/datalad-next | efe9fb30713f4b9f505fa2aa27203255a52883db | diff --git a/datalad_next/constraints/tests/test_compound.py b/datalad_next/constraints/tests/test_compound.py
index 12fcb38..8244d4a 100644
--- a/datalad_next/constraints/tests/test_compound.py
+++ b/datalad_next/constraints/tests/test_compound.py
@@ -137,10 +137,24 @@ def test_EnsureGeneratorFromFileLike():
assert list(c) == [{5: True}, {1234: False}]
# item constraint violation
- c = constraint(StringIO("5::yes\n1234::BANG"))
+ invalid_input = StringIO("1234::BANG\n5::yes")
+ # immediate raise is default
with pytest.raises(ValueError) as e:
- list(c)
+ list(constraint(invalid_input))
assert 'be convertible to boolean' in str(e)
+ # but optionally it yields the exception to be able to
+ # continue and enable a caller to raise/report/ignore
+ # (must redefine `invalid_input` to read from start)
+ invalid_input = StringIO("1234::BANG\n5::yes")
+ res = list(
+ EnsureGeneratorFromFileLike(
+ item_constraint,
+ exc_mode='yield',
+ )(invalid_input)
+ )
+ # we get the result after the exception occurred
+ assert isinstance(res[0], ValueError)
+ assert res[1] == {5: True}
# read from STDIN
with patch("sys.stdin", StringIO("5::yes\n1234::no")):
| EnsureGeneratorFromFileLike should not stop on exception
Right now any invalid input will cause it to stop and raise.
However for any batch mode application it would be essential to be able to ignore errors and/or simply report on them
The implementation should be changed to yield (not raise) a special value. Likely the internal exception itself. Either to be reraised outside, or ignored, or reported on. | 0.0 | [
"datalad_next/constraints/tests/test_compound.py::test_EnsureGeneratorFromFileLike"
] | [
"datalad_next/constraints/tests/test_compound.py::test_EnsureTupleOf",
"datalad_next/constraints/tests/test_compound.py::test_EnsureListOf",
"datalad_next/constraints/tests/test_compound.py::test_EnsureIterableOf",
"datalad_next/constraints/tests/test_compound.py::test_EnsureMapping",
"datalad_next/constraints/tests/test_compound.py::test_ConstraintWithPassthrough"
] | 2023-03-03 07:12:15+00:00 | 1,822 |
|
datalad__datalad-next-292 | diff --git a/datalad_next/commands/download.py b/datalad_next/commands/download.py
index 03d464b..4f91ccd 100644
--- a/datalad_next/commands/download.py
+++ b/datalad_next/commands/download.py
@@ -8,7 +8,6 @@ from pathlib import (
Path,
PurePosixPath,
)
-from typing import Dict
from urllib.parse import urlparse
import datalad
@@ -27,6 +26,7 @@ from datalad_next.exceptions import (
)
from datalad_next.utils import ensure_list
from datalad_next.constraints import (
+ AnyOf,
EnsureChoice,
EnsureGeneratorFromFileLike,
EnsureJSON,
@@ -34,10 +34,8 @@ from datalad_next.constraints import (
EnsureMapping,
EnsurePath,
EnsureURL,
- EnsureParsedURL,
EnsureValue,
)
-from datalad_next.constraints.base import AltConstraints
from datalad_next.constraints.dataset import EnsureDataset
from datalad_next.url_operations.any import AnyUrlOperations
@@ -127,10 +125,7 @@ class Download(ValidatedInterface):
# - a single item
# - as a list of items
# - a list given in a file, or via stdin (or any file-like in Python)
- #
- # Must not OR: https://github.com/datalad/datalad/issues/7164
- #spec=spec_item_constraint | EnsureListOf(spec_item_constraint)# \
- spec_constraint = AltConstraints(
+ spec_constraint = AnyOf(
spec_item_constraint,
EnsureListOf(spec_item_constraint),
EnsureGeneratorFromFileLike(
diff --git a/datalad_next/constraints/__init__.py b/datalad_next/constraints/__init__.py
index 1791ded..e6e97e0 100644
--- a/datalad_next/constraints/__init__.py
+++ b/datalad_next/constraints/__init__.py
@@ -13,7 +13,12 @@
dataset
exceptions
"""
-
+from .base import (
+ AllOf,
+ AnyOf,
+ Constraint,
+ DatasetParameter,
+)
# expose constraints with direct applicability, but not
# base and helper classes
from .basic import (
diff --git a/datalad_next/constraints/base.py b/datalad_next/constraints/base.py
index 6603aa4..78cd888 100644
--- a/datalad_next/constraints/base.py
+++ b/datalad_next/constraints/base.py
@@ -5,8 +5,7 @@ from __future__ import annotations
__docformat__ = 'restructuredtext'
-__all__ = ['Constraint', 'Constraints', 'AltConstraints',
- 'DatasetParameter']
+__all__ = ['Constraint', 'AllOf', 'AnyOf', 'DatasetParameter']
from .exceptions import ConstraintError
@@ -53,10 +52,10 @@ class Constraint:
raise ConstraintError(self, value, msg)
def __and__(self, other):
- return Constraints(self, other)
+ return AllOf(self, other)
def __or__(self, other):
- return AltConstraints(self, other)
+ return AnyOf(self, other)
def __call__(self, value):
# do any necessary checks or conversions, potentially catch exceptions
@@ -115,7 +114,7 @@ class _MultiConstraint(Constraint):
return doc
-class AltConstraints(_MultiConstraint):
+class AnyOf(_MultiConstraint):
"""Logical OR for constraints.
An arbitrary number of constraints can be given. They are evaluated in the
@@ -134,11 +133,12 @@ class AltConstraints(_MultiConstraint):
super().__init__(*constraints)
def __or__(self, other):
- if isinstance(other, AltConstraints):
- self.constraints.extend(other.constraints)
+ constraints = list(self.constraints)
+ if isinstance(other, AnyOf):
+ constraints.extend(other.constraints)
else:
- self.constraints.append(other)
- return self
+ constraints.append(other)
+ return AnyOf(*constraints)
def __call__(self, value):
e_list = []
@@ -159,7 +159,7 @@ class AltConstraints(_MultiConstraint):
return self._get_description('short_description', 'or')
-class Constraints(_MultiConstraint):
+class AllOf(_MultiConstraint):
"""Logical AND for constraints.
An arbitrary number of constraints can be given. They are evaluated in the
@@ -179,11 +179,12 @@ class Constraints(_MultiConstraint):
super().__init__(*constraints)
def __and__(self, other):
- if isinstance(other, Constraints):
- self.constraints.extend(other.constraints)
+ constraints = list(self.constraints)
+ if isinstance(other, AllOf):
+ constraints.extend(other.constraints)
else:
- self.constraints.append(other)
- return self
+ constraints.append(other)
+ return AllOf(*constraints)
def __call__(self, value):
for c in (self.constraints):
@@ -195,3 +196,9 @@ class Constraints(_MultiConstraint):
def short_description(self):
return self._get_description('short_description', 'and')
+
+
+# keep for backward compatibility
+Constraints = AllOf
+AltConstraints = AnyOf
+
| datalad/datalad-next | 10b5d92d48c0f2d801f9ae88373bc8bbcbceb40d | diff --git a/datalad_next/constraints/tests/test_base.py b/datalad_next/constraints/tests/test_base.py
index 56be147..fb89605 100644
--- a/datalad_next/constraints/tests/test_base.py
+++ b/datalad_next/constraints/tests/test_base.py
@@ -2,15 +2,17 @@ import pytest
from ..base import (
Constraint,
- Constraints,
- AltConstraints,
+ AllOf,
+ AnyOf,
)
from ..basic import (
+ EnsureDType,
EnsureInt,
EnsureFloat,
EnsureBool,
EnsureNone,
EnsureRange,
+ EnsureStr,
)
@@ -30,9 +32,9 @@ def test_base():
def test_constraints():
# this should always work
- c = Constraints(EnsureFloat())
+ c = AllOf(EnsureFloat())
assert c(7.0) == 7.0
- c = Constraints(EnsureFloat(), EnsureRange(min=4.0))
+ c = AllOf(EnsureFloat(), EnsureRange(min=4.0))
assert c(7.0) == 7.0
# __and__ form
c = EnsureFloat() & EnsureRange(min=4.0)
@@ -41,7 +43,7 @@ def test_constraints():
assert c(7.0) == 7.0
with pytest.raises(ValueError):
c(3.9)
- c = Constraints(EnsureFloat(), EnsureRange(min=4), EnsureRange(max=9))
+ c = AllOf(EnsureFloat(), EnsureRange(min=4), EnsureRange(max=9))
assert c(7.0) == 7.0
with pytest.raises(ValueError):
c(3.9)
@@ -55,14 +57,19 @@ def test_constraints():
with pytest.raises(ValueError):
c(9.01)
# and reordering should not have any effect
- c = Constraints(EnsureRange(max=4), EnsureRange(min=9), EnsureFloat())
+ c = AllOf(EnsureRange(max=4), EnsureRange(min=9), EnsureFloat())
with pytest.raises(ValueError):
c(3.99)
with pytest.raises(ValueError):
c(9.01)
# smoke test concat AND constraints
- c = Constraints(EnsureRange(max=10), EnsureRange(min=5)) & \
- Constraints(EnsureRange(max=6), EnsureRange(min=2))
+ c1 = AllOf(EnsureRange(max=10), EnsureRange(min=5))
+ c2 = AllOf(EnsureRange(max=6), EnsureRange(min=2))
+ c = c1 & c2
+ # make sure that neither c1, nor c2 is modified
+ assert len(c1.constraints) == 2
+ assert len(c2.constraints) == 2
+ assert len(c.constraints) == 4
assert c(6) == 6
with pytest.raises(ValueError):
c(4)
@@ -70,11 +77,11 @@ def test_constraints():
def test_altconstraints():
# this should always work
- c = AltConstraints(EnsureFloat())
+ c = AnyOf(EnsureFloat())
# passes the docs through
assert c.short_description() == EnsureFloat().short_description()
assert c(7.0) == 7.0
- c = AltConstraints(EnsureFloat(), EnsureNone())
+ c = AnyOf(EnsureFloat(), EnsureNone())
# wraps docs in parenthesis to help appreciate the scope of the
# OR'ing
assert c.short_description().startswith(
@@ -86,7 +93,7 @@ def test_altconstraints():
c = c | EnsureInt()
assert c.short_description(), '(float or None or int)'
# OR with an alternative combo also extends
- c = c | AltConstraints(EnsureBool(), EnsureInt())
+ c = c | AnyOf(EnsureBool(), EnsureInt())
# yes, no de-duplication
assert c.short_description(), '(float or None or int or bool or int)'
# spot check long_description, must have some number
@@ -97,7 +104,7 @@ def test_altconstraints():
assert c(None) is None
# this should always fail
- c = Constraints(EnsureRange(min=0, max=4), EnsureRange(min=9, max=11))
+ c = AllOf(EnsureRange(min=0, max=4), EnsureRange(min=9, max=11))
with pytest.raises(ValueError):
c(7.0)
c = EnsureRange(min=0, max=4) | EnsureRange(min=9, max=11)
@@ -108,14 +115,25 @@ def test_altconstraints():
with pytest.raises(ValueError):
c(-1.0)
+ # verify no inplace modification
+ c1 = EnsureInt() | EnsureStr()
+ c2 = c1 | EnsureDType(c1)
+ # OR'ing does not "append" the new alternative to c1.
+ assert len(c1.constraints) == 2
+ # at the same time, c2 does not contain an AnyOf
+ # as an internal constraint, because this would be needless
+ # complexity re the semantics of OR
+ assert len(c2.constraints) == 3
+
def test_both():
# this should always work
- c = AltConstraints(
- Constraints(
+ c = AnyOf(
+ AllOf(
EnsureFloat(),
EnsureRange(min=7.0, max=44.0)),
- EnsureNone())
+ EnsureNone(),
+ )
assert c(7.0) == 7.0
assert c(None) is None
# this should always fail
diff --git a/datalad_next/constraints/tests/test_cmdarg_validation.py b/datalad_next/constraints/tests/test_cmdarg_validation.py
index 2e60171..b735085 100644
--- a/datalad_next/constraints/tests/test_cmdarg_validation.py
+++ b/datalad_next/constraints/tests/test_cmdarg_validation.py
@@ -25,7 +25,7 @@ from .. import (
EnsureValue,
)
from ..base import (
- AltConstraints,
+ AnyOf,
Constraint,
)
from ..dataset import EnsureDataset
@@ -49,10 +49,7 @@ class BasicCmdValidator(EnsureCommandParameterization):
spec_item_constraint = url2path_constraint | url_constraint \
| (EnsureJSON() & url2path_constraint)
- # Must not OR: https://github.com/datalad/datalad/issues/7164
- #spec_constraint = \
- # spec_item_constraint | EnsureListOf(spec_item_constraint)
- spec_constraint = AltConstraints(
+ spec_constraint = AnyOf(
EnsureListOf(spec_item_constraint),
EnsureGeneratorFromFileLike(spec_item_constraint),
spec_item_constraint,
| Implement `AltConstraints.__or__` not `__ior__`
This is the sibling of https://github.com/datalad/datalad/issues/7164.
I think the real issue is that the current implementation matches the semantics of `__ior__()` and not `__or__()`.
And this needs to be fixed. | 0.0 | [
"datalad_next/constraints/tests/test_base.py::test_base",
"datalad_next/constraints/tests/test_base.py::test_constraints",
"datalad_next/constraints/tests/test_base.py::test_altconstraints",
"datalad_next/constraints/tests/test_base.py::test_both",
"datalad_next/constraints/tests/test_cmdarg_validation.py::test_multi_validation",
"datalad_next/constraints/tests/test_cmdarg_validation.py::test_invalid_multi_validation",
"datalad_next/constraints/tests/test_cmdarg_validation.py::test_cmd_with_validation"
] | [] | 2023-03-06 17:43:33+00:00 | 1,823 |
|
datalad__datalad-next-294 | diff --git a/datalad_next/constraints/__init__.py b/datalad_next/constraints/__init__.py
index e6e97e0..d910e28 100644
--- a/datalad_next/constraints/__init__.py
+++ b/datalad_next/constraints/__init__.py
@@ -41,6 +41,7 @@ from .compound import (
EnsureTupleOf,
EnsureMapping,
EnsureGeneratorFromFileLike,
+ WithDescription,
)
# this is the key type, almost all consuming code will want to
# have this for `except` clauses
diff --git a/datalad_next/constraints/base.py b/datalad_next/constraints/base.py
index 78cd888..f24be8b 100644
--- a/datalad_next/constraints/base.py
+++ b/datalad_next/constraints/base.py
@@ -62,12 +62,53 @@ class Constraint:
# and generate a meaningful error message
raise NotImplementedError("abstract class")
+ @property
+ def input_synopsis(self) -> str:
+ """Returns brief, single line summary of valid input for a constraint
+
+ This information is user-facing, and to be used in any place where
+ space is limited (tooltips, usage summaries, etc).
+
+ If possible, the synopsis should be written in a UI/API-agnostic
+ fashion. However, if this is impossible or leads to imprecisions or
+ confusion, it should focus on use within Python code and with Python
+ data types. Tailored documentation can be provided via the
+ ``WithDescription`` wrapper.
+ """
+ # return the legacy short description for now
+ return self.short_description()
+
+ @property
+ def input_description(self) -> str:
+ """Returns full description of valid input for a constraint
+
+ Like ``input_synopsis`` this information is user-facing. In contrast,
+ to the synopsis there is length/line limit. Nevertheless, the
+ information should be presented in a compact fashion that avoids
+ needless verbosity. If possible, a single paragraph is a good format.
+ If multiple paragraphs are necessary, they should be separated by
+ a single, empty line.
+
+ Rendering code may indent, or rewrap the text, so no line-by-line
+ formatting will be preserved.
+
+ If possible, the synopsis should be written in a UI/API-agnostic
+ fashion. However, if this is impossible or leads to imprecisions or
+ confusion, it should focus on use within Python code and with Python
+ data types. Tailored documentation can be provided via the
+ ``WithDescription`` wrapper.
+ """
+ # return the legacy short description for now
+ return self.long_description()
+
def long_description(self):
+ """This method is deprecated. Use ``input_description`` instead"""
# return meaningful docs or None
# used as a comprehensive description in the parameter list
return self.short_description()
def short_description(self):
+ """This method is deprecated. Use ``input_synopsis`` instead"""
# return meaningful docs or None
# used as a condensed primer for the parameter lists
raise NotImplementedError("abstract class")
diff --git a/datalad_next/constraints/compound.py b/datalad_next/constraints/compound.py
index 7ff5e10..1ee201e 100644
--- a/datalad_next/constraints/compound.py
+++ b/datalad_next/constraints/compound.py
@@ -369,3 +369,99 @@ class ConstraintWithPassthrough(Constraint):
def short_description(self) -> str:
return self._constraint.short_description()
+
+
+class WithDescription(Constraint):
+ """Contraint that wraps another constraint and replaces its description
+
+ Whenever a constraint's self-description does not fit an application
+ context, it can be wrapped with this class. The given synopsis and
+ description of valid inputs replaces those of the wrapped constraint.
+ """
+ def __init__(self,
+ constraint: Constraint,
+ *,
+ input_synopsis: str | None = None,
+ input_description: str | None = None,
+ input_synopsis_for_ds: str | None = None,
+ input_description_for_ds: str | None = None,
+ ):
+ """
+ Parameters
+ ----------
+ constraint: Constraint
+ Any ``Constraint`` subclass instance that will be used to validate
+ values.
+ input_synopsis: optional
+ If given, text to be returned as the constraint's ``input_synopsis``.
+ Otherwise the wrapped constraint's ``input_synopsis`` is returned.
+ input_description: optional
+ If given, text to be returned as the constraint's
+ ``input_description``. Otherwise the wrapped constraint's
+ ``input_description`` is returned.
+ input_synopsis_for_ds: optional
+ If either this or ``input_description_for_ds`` are given, the
+ result of tailoring a constraint for a particular dataset
+ (``for_dataset()``) will also be wrapped with this custom
+ synopsis.
+ input_description_for_ds: optional
+ If either this or ``input_synopsis_for_ds`` are given, the
+ result of tailoring a constraint for a particular dataset
+ (``for_dataset()``) will also be wrapped with this custom
+ description.
+ """
+ super().__init__()
+ self._constraint = constraint
+ self._synopsis = input_synopsis
+ self._description = input_description
+ self._synopsis_for_ds = input_synopsis_for_ds
+ self._description_for_ds = input_description_for_ds
+
+ @property
+ def constraint(self) -> Constraint:
+ """Returns the wrapped constraint instance"""
+ return self._constraint
+
+ def __call__(self, value) -> Any:
+ return self._constraint(value)
+
+ def __str__(self) -> str:
+ return \
+ f'<{self._constraint.__class__.__name__} with custom description>'
+
+ def __repr__(self) -> str:
+ return f'{self.__class__.__name__}' \
+ f'({self._constraint!r}, ' \
+ f'input_synopsis={self._synopsis!r}, ' \
+ f'input_description={self._description!r}, ' \
+ f'input_synopsis_for_ds={self._synopsis_for_ds!r}, ' \
+ f'input_description_for_ds={self._description_for_ds!r})'
+
+ def for_dataset(self, dataset: DatasetParameter) -> Constraint:
+ """Wrap the wrapped constraint again after tailoring it for the dataset
+ """
+ if self._synopsis_for_ds is not None \
+ or self._description_for_ds is not None:
+ # we also want to wrap the tailored constraint
+ return self.__class__(
+ self._constraint.for_dataset(dataset),
+ input_synopsis=self._synopsis_for_ds,
+ input_description=self._description_for_ds,
+ )
+ else:
+ return self._constraint.for_dataset(dataset)
+
+ @property
+ def input_synopsis(self):
+ return self._synopsis or self.constraint.input_synopsis
+
+ @property
+ def input_description(self):
+ return self._description or self.constraint.input_description
+
+ # legacy compatibility
+ def long_description(self) -> str:
+ return self.input_description
+
+ def short_description(self) -> str:
+ return self.input_synopsis
| datalad/datalad-next | 134e03a8fb1dcad20ebf32558a455f4babed95c9 | diff --git a/datalad_next/constraints/tests/test_compound.py b/datalad_next/constraints/tests/test_compound.py
index 351f404..ca81b1f 100644
--- a/datalad_next/constraints/tests/test_compound.py
+++ b/datalad_next/constraints/tests/test_compound.py
@@ -22,6 +22,7 @@ from ..compound import (
EnsureTupleOf,
EnsureMapping,
EnsureGeneratorFromFileLike,
+ WithDescription,
)
@@ -210,3 +211,49 @@ def test_ConstraintWithPassthrough(tmp_path):
cwp_ds = cwp.for_dataset(ds)
assert cwp_ds.passthrough == cwp.passthrough
assert cwp.constraint == wrapped.for_dataset(ds)
+
+
+def test_WithDescription(tmp_path):
+ wrapped = EnsureInt()
+ # confirm starting point
+ assert wrapped.input_synopsis == 'int'
+ assert wrapped.input_description \
+ == "value must be convertible to type 'int'"
+ # we are actually not replacing anything
+ c = WithDescription(wrapped)
+ assert c.input_synopsis == wrapped.input_synopsis
+ assert c.input_description == wrapped.input_description
+ # with no dataset docs, the wrapping is removed on tailoring
+ ds = Dataset(tmp_path)
+ assert isinstance(
+ c.for_dataset(DatasetParameter(None, ds)),
+ EnsureInt)
+ # check all replacements are working
+ c = WithDescription(
+ wrapped,
+ input_synopsis='mysynopsis',
+ input_description='mydescription',
+ input_synopsis_for_ds='dssynopsis',
+ input_description_for_ds='dsdescription',
+ )
+ # function is maintained
+ assert c('5') is 5
+ assert str(c) == '<EnsureInt with custom description>'
+ assert repr(c) == \
+ "WithDescription(EnsureInt(), " \
+ "input_synopsis='mysynopsis', " \
+ "input_description='mydescription', " \
+ "input_synopsis_for_ds='dssynopsis', " \
+ "input_description_for_ds='dsdescription')"
+ assert c.constraint is wrapped
+ assert c.input_synopsis == 'mysynopsis'
+ assert c.input_description == 'mydescription'
+ # description propagates through tailoring
+ cds = c.for_dataset(DatasetParameter(None, ds))
+ assert isinstance(cds, WithDescription)
+ assert cds.input_synopsis == 'dssynopsis'
+ assert cds.input_description == 'dsdescription'
+
+ # legacy functionality
+ c.short_description() == c.input_synopsis
+ c.long_description() == c.input_description
| Allow for `Constraint` documentation override
Right now all constraints self-document. It would be better to be able to override or amend that documentation by a user.
Constraints are now more flexible than originally planned. That leads to more complex auto-descriptions that are not always needed or desirable.
Sometimes a user would simply know better. | 0.0 | [
"datalad_next/constraints/tests/test_compound.py::test_EnsureTupleOf",
"datalad_next/constraints/tests/test_compound.py::test_EnsureListOf",
"datalad_next/constraints/tests/test_compound.py::test_EnsureIterableOf",
"datalad_next/constraints/tests/test_compound.py::test_EnsureMapping",
"datalad_next/constraints/tests/test_compound.py::test_EnsureGeneratorFromFileLike",
"datalad_next/constraints/tests/test_compound.py::test_ConstraintWithPassthrough",
"datalad_next/constraints/tests/test_compound.py::test_WithDescription"
] | [] | 2023-03-07 10:15:36+00:00 | 1,824 |
|
datalad__datalad-next-338 | diff --git a/changelog.d/20230505_074522_michael.hanke_httphdrs.md b/changelog.d/20230505_074522_michael.hanke_httphdrs.md
new file mode 100644
index 0000000..60f9534
--- /dev/null
+++ b/changelog.d/20230505_074522_michael.hanke_httphdrs.md
@@ -0,0 +1,7 @@
+### 💫 Enhancements and new features
+
+- The `HttpUrlOperations` handler now supports custom HTTP headers.
+ This makes it possible to define custom handlers in configuration
+ that include such header customization, for example to send
+ custom secret or session IDs.
+ Fixes https://github.com/datalad/datalad-next/issues/336 (by @mih)
diff --git a/datalad_next/url_operations/http.py b/datalad_next/url_operations/http.py
index 8700b7b..6b964aa 100644
--- a/datalad_next/url_operations/http.py
+++ b/datalad_next/url_operations/http.py
@@ -36,13 +36,28 @@ class HttpUrlOperations(UrlOperations):
authentication challenges.
"""
- _headers = {
- 'user-agent': user_agent('datalad', datalad.__version__),
- }
+ def __init__(self, cfg=None, headers: Dict | None = None):
+ """
+ Parameters
+ ----------
+ cfg: ConfigManager, optional
+ A config manager instance that is consulted for any configuration
+ filesystem configuration individual handlers may support.
+ headers: dict, optional
+ Additional or alternative headers to add to a request. The default
+ headers contain a ``user-agent`` declaration. Any headers provided
+ here override corresponding defaults.
+ """
+ super().__init__(cfg=cfg)
+ self._headers = {
+ 'user-agent': user_agent('datalad', datalad.__version__),
+ }
+ if headers:
+ self._headers.update(headers)
def get_headers(self, headers: Dict | None = None) -> Dict:
# start with the default
- hdrs = dict(HttpUrlOperations._headers)
+ hdrs = dict(self._headers)
if headers is not None:
hdrs.update(headers)
return hdrs
| datalad/datalad-next | cac92972f5fb43112a34907be6bff8305f579b20 | diff --git a/datalad_next/url_operations/tests/test_http.py b/datalad_next/url_operations/tests/test_http.py
index 62b1bc1..817d756 100644
--- a/datalad_next/url_operations/tests/test_http.py
+++ b/datalad_next/url_operations/tests/test_http.py
@@ -1,6 +1,7 @@
from pathlib import Path
import pytest
+from ..any import AnyUrlOperations
from ..http import (
HttpUrlOperations,
UrlOperationsRemoteError,
@@ -42,3 +43,17 @@ def test_http_url_operations(credman, httpbin, tmp_path):
ops.stat(f'{hbsurl}/status/404')
with pytest.raises(UrlOperationsResourceUnknown):
ops.download(f'{hbsurl}/status/404', tmp_path / 'dontmatter')
+
+
+def test_custom_http_headers_via_config(datalad_cfg):
+ for k, v in (
+ ('datalad.url-handler.http.*.class',
+ 'datalad_next.url_operations.http.HttpUrlOperations'),
+ ('datalad.url-handler.http.*.kwargs',
+ '{"headers": {"X-Funky": "Stuff"}}'),
+ ):
+ datalad_cfg.set(k, v, scope='global', reload=False)
+ datalad_cfg.reload()
+ auo = AnyUrlOperations()
+ huo = auo._get_handler(f'http://example.com')
+ assert huo._headers['X-Funky'] == 'Stuff'
| Enable HTTP header customization for URLOperations
this would be the patch:
```diff
diff --git a/datalad_next/url_operations/http.py b/datalad_next/url_operations/http.py
index 8700b7b..2d30c64 100644
--- a/datalad_next/url_operations/http.py
+++ b/datalad_next/url_operations/http.py
@@ -36,13 +36,24 @@ class HttpUrlOperations(UrlOperations):
authentication challenges.
"""
- _headers = {
- 'user-agent': user_agent('datalad', datalad.__version__),
- }
+ def __init__(self, cfg=None, headers=None):
+ """
+ Parameters
+ ----------
+ cfg: ConfigManager, optional
+ A config manager instance that is consulted for any configuration
+ filesystem configuration individual handlers may support.
+ """
+ super().__init__(cfg=cfg)
+ self._headers = {
+ 'user-agent': user_agent('datalad', datalad.__version__),
+ }
+ if headers:
+ self._headers.update(headers)
def get_headers(self, headers: Dict | None = None) -> Dict:
# start with the default
- hdrs = dict(HttpUrlOperations._headers)
+ hdrs = dict(self._headers)
if headers is not None:
hdrs.update(headers)
return hdrs
```
With the headers exposed in the constructor, we can use handler customization (that still needs to be documented #335) and supply custom headers like so:
```
datalad \
-c datalad.url-handler.https://fz-juelich.sciebo.de/public.php/webdav.class=datalad_next.url_operations.http.HttpUrlOperations \
-c 'datalad.url-handler.https://fz-juelich.sciebo.de/public.php/webdav.kwargs={"headers": {"X-Requested-With": "XMLHttpRequest"}}' \
download \
'https://fz-juelich.sciebo.de/public.php/webdav/king/hubert-neufeld-j-udI4zim2E-unsplash.jpg'
``` | 0.0 | [
"datalad_next/url_operations/tests/test_http.py::test_custom_http_headers_via_config"
] | [
"datalad_next/url_operations/tests/test_http.py::test_http_url_operations"
] | 2023-05-05 05:48:59+00:00 | 1,825 |
|
datalad__datalad-next-340 | diff --git a/changelog.d/20230508_072605_michael.hanke_cerror.md b/changelog.d/20230508_072605_michael.hanke_cerror.md
new file mode 100644
index 0000000..3301441
--- /dev/null
+++ b/changelog.d/20230508_072605_michael.hanke_cerror.md
@@ -0,0 +1,15 @@
+### 💫 Enhancements and new features
+
+- `Constraint` implementations now raise `ConstraintError` consistently
+ on a violation. This now makes it possible to distinguish properly
+ handled violations from improper implementation of such checks.
+ Moreover, `raise_for()` is now used consistently, providing
+ uniform, structured information on such violations.
+ `ConstraintError` is derived from `ValueError` (the exception
+ that was previously (mostly) raised. Therefore, client-code should
+ continue to work without modification, unless a specific wording
+ of an exception message is relied upon. In few cases, an implicit
+ `TypeError` (e.g., `EnsureIterableof`) has been replaced by an
+ explicit `ConstraintError`, and client code needs to be adjusted.
+ The underlying exception continues to be available via
+ `ConstraintError.caused_by`. (by @mih)
diff --git a/datalad_next/constraints/__init__.py b/datalad_next/constraints/__init__.py
index d910e28..05442fd 100644
--- a/datalad_next/constraints/__init__.py
+++ b/datalad_next/constraints/__init__.py
@@ -1,5 +1,38 @@
"""Data validation, coercion, and parameter documentation
+This module provides a set of uniform classes to validate and document
+particular aspects of inputs. In a nutshell, each of these
+:class:`~datalad_next.constraints.base.Constraint` class:
+
+- focuses on a specific aspect, such as data type coercion,
+ or checking particular input properties
+- is instantiated with a set of parameters to customize
+ such an instance for a particular task
+- performs its task by receiving an input via its ``__call__()``
+ method
+- provides default auto-documentation that can be customized
+ by wrapping an instance in
+ :class:`~datalad_next.constraints.compound.WithDescription`
+
+Individual ``Constraint`` instances can be combined with logical AND
+(:class:`~datalad_next.constraints.base.AllOf`) and OR
+(:class:`~datalad_next.constraints.base.AnyOf`) operations to form arbitrarily
+complex constructs.
+
+On (validation/coercion) error, instances raise
+:class:`~datalad_next.constraints.exceptions.ConstraintError`) via their
+``raise_for()`` method. This approach to error reporting helps to communicate
+standard (yet customizable) error messages, aids structured error reporting,
+and is capable of communication the underlying causes of an error in full
+detail without the need to generate long textual descriptions.
+
+:class:`~datalad_next.constraints.parameter.EnsureCommandParameterization` is a
+particular variant of a ``Constraint`` that is capable of validating a complete
+parameterization of a command (or function), for each parameter individually,
+and for arbitrary combinations of parameters. It puts a particular emphasis on
+structured error reporting.
+
+
.. currentmodule:: datalad_next.constraints
.. autosummary::
:toctree: generated
diff --git a/datalad_next/constraints/basic.py b/datalad_next/constraints/basic.py
index 186b4f8..0d9c56b 100644
--- a/datalad_next/constraints/basic.py
+++ b/datalad_next/constraints/basic.py
@@ -43,7 +43,11 @@ class EnsureValue(Constraint):
if value == self._target_value:
return value
else:
- raise ValueError(f"value must be {self._target_value!r}")
+ self.raise_for(
+ value,
+ "must be {target_value!r}",
+ target_value=self._target_value,
+ )
def short_description(self):
return f'{self._target_value!r}'
@@ -120,9 +124,7 @@ class EnsureBool(Constraint):
return False
elif value in ('1', 'yes', 'on', 'enable', 'true'):
return True
- raise ValueError(
- "value '{}' must be convertible to boolean".format(
- value))
+ self.raise_for(value, "must be convertible to boolean")
def long_description(self):
return 'value must be convertible to type bool'
@@ -162,14 +164,17 @@ class EnsureStr(Constraint):
# do not perform a blind conversion ala str(), as almost
# anything can be converted and the result is most likely
# unintended
- raise ValueError("%s is not a string" % repr(value))
+ self.raise_for(value, "must be a string")
if len(value) < self._min_len:
- raise ValueError("%r is shorter than of minimal length %d"
- % (value, self._min_len))
+ self.raise_for(value, "must have minimum length {len}",
+ len=self._min_len)
if self._match:
if not self._match.match(value):
- raise ValueError(
- f'{value} does not match {self._match.pattern}')
+ self.raise_for(
+ value,
+ 'does not match {pattern}',
+ pattern=self._match.pattern,
+ )
return value
def long_description(self):
@@ -203,8 +208,11 @@ class EnsureStrPrefix(EnsureStr):
def __call__(self, value):
super().__call__(value)
if not value.startswith(self._prefix):
- raise ValueError("%r does not start with '%s'"
- % (value, self._prefix))
+ self.raise_for(
+ value,
+ "does not start with {prefix!r}",
+ prefix=self._prefix,
+ )
return value
def long_description(self):
@@ -226,7 +234,7 @@ class EnsureCallable(Constraint):
if hasattr(value, '__call__'):
return value
else:
- raise ValueError("value must be a callable")
+ self.raise_for(value, "must be a callable")
def short_description(self):
return 'callable'
@@ -285,7 +293,7 @@ class EnsureKeyChoice(EnsureChoice):
def __call__(self, value):
if self._key not in value:
- raise ValueError("value not dict-like")
+ self.raise_for(value, "must be dict-like")
super(EnsureKeyChoice, self).__call__(value[self._key])
return value
@@ -382,7 +390,7 @@ class EnsurePath(Constraint):
ref:
If set, defines a reference Path any given path is compared to. The
comparison operation is given by `ref_is`.
- ref_is: {'parent-or-identical'}
+ ref_is: {'parent-or-same-as', 'parent-of'}
Comparison operation to perform when `ref` is given.
dsarg: DatasetParameter, optional
If given, incoming paths are resolved in the following fashion:
@@ -399,6 +407,8 @@ class EnsurePath(Constraint):
self._ref = ref
self._ref_is = ref_is
self._dsarg = dsarg
+ assert self._ref_is in ('parent-or-same-as', 'parent-of'), \
+ 'Unrecognized `ref_is` operation label'
def __call__(self, value):
# turn it into the target type to make everything below
@@ -410,9 +420,9 @@ class EnsurePath(Constraint):
if self._is_format is not None:
is_abs = path.is_absolute()
if self._is_format == 'absolute' and not is_abs:
- raise ValueError(f'{path} is not an absolute path')
+ self.raise_for(path, 'is not an absolute path')
elif self._is_format == 'relative' and is_abs:
- raise ValueError(f'{path} is not a relative path')
+ self.raise_for(path, 'is not a relative path')
# resolve relative paths against a dataset, if given
if self._dsarg:
@@ -430,24 +440,30 @@ class EnsurePath(Constraint):
pass
if self._lexists is not None:
if self._lexists and mode is None:
- raise ValueError(f'{path} does not exist')
+ self.raise_for(path, 'does not exist')
elif not self._lexists and mode is not None:
- raise ValueError(f'{path} does (already) exist')
+ self.raise_for(path, 'does (already) exist')
if self._is_mode is not None:
if not self._is_mode(mode):
- raise ValueError(f'{path} does not match desired mode')
+ self.raise_for(path, 'does not match desired mode')
if self._ref:
ok = True
if self._ref_is == 'parent-or-same-as':
ok = (path == self._ref or self._ref in path.parents)
elif self._ref_is == 'parent-of':
ok = self._ref in path.parents
- else:
- raise ValueError('Unknown `ref_is` operation label')
+ else: # pragma: nocover
+ # this code cannot be reached with normal usage.
+ # it is prevented by an assertion in __init__()
+ raise RuntimeError('Unknown `ref_is` operation label')
if not ok:
- raise ValueError(
- f'{self._ref} is not {self._ref_is} {path}')
+ self.raise_for(
+ path,
+ '{ref} is not {ref_is} {path}',
+ ref=self._ref,
+ ref_is=self._ref_is,
+ )
return path
def for_dataset(self, dataset: DatasetParameter) -> Constraint:
diff --git a/datalad_next/constraints/compound.py b/datalad_next/constraints/compound.py
index 2b2b2ae..99fe8d6 100644
--- a/datalad_next/constraints/compound.py
+++ b/datalad_next/constraints/compound.py
@@ -73,21 +73,32 @@ class EnsureIterableOf(Constraint):
return self._item_constraint
def __call__(self, value):
- iter = self._iter_type(
- self._item_constraint(i) for i in value
- )
+ try:
+ iter = self._iter_type(
+ self._item_constraint(i) for i in value
+ )
+ except TypeError as e:
+ self.raise_for(
+ value,
+ "cannot coerce to target (item) type",
+ __caused_by__=e,
+ )
if self._min_len is not None or self._max_len is not None:
# only do this if necessary, generators will not support
# __len__, for example
iter_len = len(iter)
if self._min_len is not None and iter_len < self._min_len:
- raise ValueError(
- f'Length-{iter_len} iterable is shorter than '
- f'required minimum length {self._min_len}')
+ self.raise_for(
+ iter,
+ 'must have minimum length {len}',
+ len=self._min_len,
+ )
if self._max_len is not None and iter_len > self._max_len:
- raise ValueError(
- f'Length-{iter_len} iterable is longer than '
- f'required maximum length {self._max_len}')
+ self.raise_for(
+ iter,
+ 'must not exceed maximum length {len}',
+ len=self._max_len,
+ )
return iter
def short_description(self):
diff --git a/datalad_next/constraints/dataset.py b/datalad_next/constraints/dataset.py
index 7d4bb38..1b9752b 100644
--- a/datalad_next/constraints/dataset.py
+++ b/datalad_next/constraints/dataset.py
@@ -70,21 +70,20 @@ class EnsureDataset(Constraint):
# anticipate what require_dataset() could handle and fail if we got
# something else
elif not isinstance(value, (str, PurePath, type(None))):
- raise TypeError(f"Cannot create Dataset from {type(value)}")
+ self.raise_for(
+ value, "cannot create Dataset from {type}", type=type(value)
+ )
else:
ds = self._require_dataset(value)
assert ds
if self._installed is not None:
is_installed = ds.is_installed()
if self._installed is False and is_installed:
- raise ValueError(f'{ds} already exists locally')
+ self.raise_for(ds, 'already exists locally')
if self._installed and not is_installed:
- # for uniformity with require_dataset() below, use
- # this custom exception
- raise NoDatasetFound(f'{ds} is not installed')
+ self.raise_for(ds, 'not installed')
if self._require_id and not ds.id:
- raise NoDatasetFound(f'{ds} does not have a valid '
- f'datalad-id')
+ self.raise_for(ds, 'does not have a valid datalad-id')
return DatasetParameter(value, ds)
def short_description(self) -> str:
diff --git a/datalad_next/constraints/exceptions.py b/datalad_next/constraints/exceptions.py
index b7022fa..a3e4d9a 100644
--- a/datalad_next/constraints/exceptions.py
+++ b/datalad_next/constraints/exceptions.py
@@ -100,8 +100,16 @@ class ConstraintError(ValueError):
return self.args[1]
@property
- def caused_by(self):
- return self.context.get('__caused_by__', None)
+ def caused_by(self) -> Tuple[Exception] | None:
+ """Returns a tuple of any underlying exceptions that caused a violation
+ """
+ cb = self.context.get('__caused_by__', None)
+ if cb is None:
+ return
+ elif isinstance(cb, Exception):
+ return (cb,)
+ else:
+ return tuple(cb)
@property
def value(self):
diff --git a/datalad_next/constraints/git.py b/datalad_next/constraints/git.py
index 31c773e..ead342b 100644
--- a/datalad_next/constraints/git.py
+++ b/datalad_next/constraints/git.py
@@ -35,7 +35,7 @@ class EnsureGitRefName(Constraint):
def __call__(self, value: str) -> str:
if not value:
# simple, do here
- raise ValueError('refname must not be empty')
+ self.raise_for(value, 'refname must not be empty')
from datalad.runner import GitRunner, StdOutCapture
from datalad_next.exceptions import CommandError
@@ -54,7 +54,11 @@ class EnsureGitRefName(Constraint):
try:
out = runner.run(cmd, protocol=StdOutCapture)
except CommandError as e:
- raise ValueError(f'{value} is not a valid refname') from e
+ self.raise_for(
+ value,
+ 'is not a valid refname',
+ __caused_by__=e,
+ )
if self._normalize:
return out['stdout'].strip()
| datalad/datalad-next | 26bc01241a23a20ba8ca315b842a32701396ca2c | diff --git a/datalad_next/constraints/tests/test_basic.py b/datalad_next/constraints/tests/test_basic.py
index f189783..2748a15 100644
--- a/datalad_next/constraints/tests/test_basic.py
+++ b/datalad_next/constraints/tests/test_basic.py
@@ -298,9 +298,8 @@ def test_EnsurePath(tmp_path):
with pytest.raises(ValueError):
assert c(target)
assert c.short_description() == f'path that is parent-of {target}'
- c = EnsurePath(ref=target, ref_is='stupid')
- with pytest.raises(ValueError):
- c('doesnotmatter')
+ with pytest.raises(AssertionError):
+ c = EnsurePath(ref=target, ref_is='stupid')
def test_EnsurePath_fordataset(existing_dataset):
diff --git a/datalad_next/constraints/tests/test_compound.py b/datalad_next/constraints/tests/test_compound.py
index 486b79b..59907d2 100644
--- a/datalad_next/constraints/tests/test_compound.py
+++ b/datalad_next/constraints/tests/test_compound.py
@@ -64,7 +64,7 @@ def test_EnsureIterableOf():
with pytest.raises(ValueError):
# invalid specification min>max
EnsureIterableOf(list, bool, min_len=1, max_len=0)
- with pytest.raises(TypeError):
+ with pytest.raises(ValueError):
# item_constraint fails
EnsureIterableOf(list, dict)([5.6, 3.2])
with pytest.raises(ValueError):
diff --git a/datalad_next/constraints/tests/test_special_purpose.py b/datalad_next/constraints/tests/test_special_purpose.py
index 5d35fa5..167c6b2 100644
--- a/datalad_next/constraints/tests/test_special_purpose.py
+++ b/datalad_next/constraints/tests/test_special_purpose.py
@@ -85,7 +85,7 @@ def test_EnsureParameterConstraint():
Parameter(nargs=2),
(None, None))
assert c({'some': [3, 4]}) == dict(some=[3, 4])
- with pytest.raises(TypeError):
+ with pytest.raises(ValueError):
c({'some': 3})
with pytest.raises(ValueError):
c({'some': [3, 4, 5]})
@@ -119,9 +119,9 @@ def test_EnsureParameterConstraint():
with pytest.raises(ValueError):
c({'some': [[3, 2], [1]]})
# no iterable
- with pytest.raises(TypeError):
+ with pytest.raises(ValueError):
c({'some': [3, [1, 2]]})
- with pytest.raises(TypeError):
+ with pytest.raises(ValueError):
c({'some': 3})
# overwrite an item constraint and nargs
c = EnsureParameterConstraint.from_parameter(
@@ -297,7 +297,7 @@ def test_EnsureURL_match():
def test_EnsureDataset(tmp_path):
- with pytest.raises(TypeError):
+ with pytest.raises(ValueError):
# will not return a Dataset from sensless input
EnsureDataset()(5)
# by default the installation state is not checked
@@ -367,5 +367,5 @@ def test_EnsureDataset(tmp_path):
# bring it back later in case future tests need it
id = ds.config.get('datalad.dataset.id')
ds.config.unset('datalad.dataset.id', scope='branch')
- with pytest.raises(NoDatasetFound):
+ with pytest.raises(ValueError):
EnsureDataset(require_id=True)(tmp_path)
| Double-check use of `raise_for()` in any `Constraint` implementation
This is necessary for compatibility with the auto-generation of (structured) error messages in command parameter validation, and helps "harness code" to distinguish between a `ValueError` occurring due to an improper validator implementation vs a detected validation error. A dedicated exception makes this trivially accessible. | 0.0 | [
"datalad_next/constraints/tests/test_basic.py::test_EnsurePath",
"datalad_next/constraints/tests/test_compound.py::test_EnsureIterableOf",
"datalad_next/constraints/tests/test_special_purpose.py::test_EnsureParameterConstraint"
] | [
"datalad_next/constraints/tests/test_basic.py::test_noconstraint",
"datalad_next/constraints/tests/test_basic.py::test_int",
"datalad_next/constraints/tests/test_basic.py::test_float",
"datalad_next/constraints/tests/test_basic.py::test_bool",
"datalad_next/constraints/tests/test_basic.py::test_str",
"datalad_next/constraints/tests/test_basic.py::test_str_min_len",
"datalad_next/constraints/tests/test_basic.py::test_EnsureStr_match",
"datalad_next/constraints/tests/test_basic.py::test_EnsureStrPrefix",
"datalad_next/constraints/tests/test_basic.py::test_EnsureValue",
"datalad_next/constraints/tests/test_basic.py::test_none",
"datalad_next/constraints/tests/test_basic.py::test_callable",
"datalad_next/constraints/tests/test_basic.py::test_choice",
"datalad_next/constraints/tests/test_basic.py::test_keychoice",
"datalad_next/constraints/tests/test_basic.py::test_range",
"datalad_next/constraints/tests/test_basic.py::test_type_str",
"datalad_next/constraints/tests/test_compound.py::test_EnsureTupleOf",
"datalad_next/constraints/tests/test_compound.py::test_EnsureListOf",
"datalad_next/constraints/tests/test_compound.py::test_EnsureMapping",
"datalad_next/constraints/tests/test_compound.py::test_EnsureGeneratorFromFileLike",
"datalad_next/constraints/tests/test_compound.py::test_ConstraintWithPassthrough",
"datalad_next/constraints/tests/test_compound.py::test_WithDescription",
"datalad_next/constraints/tests/test_special_purpose.py::test_EnsureGitRefName",
"datalad_next/constraints/tests/test_special_purpose.py::test_EnsureParameterConstraint_passthrough",
"datalad_next/constraints/tests/test_special_purpose.py::test_EnsureJSONLines",
"datalad_next/constraints/tests/test_special_purpose.py::test_EnsureURL",
"datalad_next/constraints/tests/test_special_purpose.py::test_EnsureURL_match"
] | 2023-05-07 15:09:45+00:00 | 1,826 |
|
datalad__datalad-next-362 | diff --git a/datalad_next/constraints/exceptions.py b/datalad_next/constraints/exceptions.py
index a3e4d9a..2955c50 100644
--- a/datalad_next/constraints/exceptions.py
+++ b/datalad_next/constraints/exceptions.py
@@ -273,8 +273,14 @@ class ParameterConstraintContext:
"""Like ``.label`` but each parameter will also state a value"""
# TODO truncate the values after repr() to ensure a somewhat compact
# output
+ from .parameter import NoValue
return '{param}{descr}'.format(
- param=", ".join(f'{p}={values[p]!r}' for p in self.parameters),
+ param=", ".join(
+ f'{p}=<no value>'
+ if isinstance(values[p], NoValue)
+ else f'{p}={values[p]!r}'
+ for p in self.parameters
+ ),
descr=f" ({self.description})" if self.description else '',
)
diff --git a/datalad_next/constraints/parameter.py b/datalad_next/constraints/parameter.py
index 6c40d39..1ae892e 100644
--- a/datalad_next/constraints/parameter.py
+++ b/datalad_next/constraints/parameter.py
@@ -243,6 +243,7 @@ class EnsureCommandParameterization(Constraint):
self,
kwargs,
at_default=None,
+ required=None,
on_error='raise-early',
) -> Dict:
"""
@@ -256,6 +257,8 @@ class EnsureCommandParameterization(Constraint):
match their respective defaults. This is used for deciding whether
or not to process them with an associated value constraint (see the
``validate_defaults`` constructor argument).
+ required: set or None
+ Set of parameter names that are known to be required.
on_error: {'raise-early', 'raise-at-end'}
Flag how to handle constraint violation. By default, validation is
stopped at the first error and an exception is raised. When an
@@ -273,6 +276,18 @@ class EnsureCommandParameterization(Constraint):
"""
assert on_error in ('raise-early', 'raise-at-end')
+ exceptions = {}
+ missing_args = tuple(a for a in (required or []) if a not in kwargs)
+ if missing_args:
+ exceptions[ParameterConstraintContext(missing_args)] = \
+ ConstraintError(
+ self,
+ dict(zip(missing_args, [NoValue()] * len(missing_args))),
+ 'missing required arguments',
+ )
+ if on_error == 'raise-early':
+ raise CommandParametrizationError(exceptions)
+
# validators to work with. make a copy of the dict to be able to tailor
# them for this run only
# TODO copy likely not needed
@@ -290,7 +305,6 @@ class EnsureCommandParameterization(Constraint):
# strip all args provider args that have not been provided
ds_provider_params.intersection_update(kwargs)
- exceptions = {}
validated = {}
# process all parameters. starts with those that are needed as
# dependencies for others.
diff --git a/datalad_next/patches/interface_utils.py b/datalad_next/patches/interface_utils.py
index fa16d65..4bc5179 100644
--- a/datalad_next/patches/interface_utils.py
+++ b/datalad_next/patches/interface_utils.py
@@ -50,12 +50,14 @@ def get_allargs_as_kwargs(call, args, kwargs):
Returns
-------
- (dict, set)
+ (dict, set, set)
The first return value is a mapping of argument names to their respective
values.
The second return value in the tuple is a set of argument names for
which the effective value is identical to the default declared in the
signature of the callable.
+ The third value is a set with names of all mandatory arguments, whether
+ or not they are included in the returned mapping.
"""
from datalad_next.utils import getargspec
argspec = getargspec(call, include_kwonlyargs=True)
@@ -83,7 +85,14 @@ def get_allargs_as_kwargs(call, args, kwargs):
# API commands support more kwargs than what is discoverable
# from their signature...
#assert (nargs == len(kwargs_))
- return kwargs_, at_default
+ return (
+ # argument name/value mapping
+ kwargs_,
+ # names of arguments that are at their default
+ at_default,
+ # names of mandatory arguments (set for uniformity)
+ set(argspec.args),
+ )
# This function interface is taken from
@@ -116,7 +125,7 @@ def _execute_command_(
# for result filters and validation
# we need to produce a dict with argname/argvalue pairs for all args
# incl. defaults and args given as positionals
- allkwargs, at_default = get_allargs_as_kwargs(
+ allkwargs, at_default, required_args = get_allargs_as_kwargs(
cmd,
cmd_args,
{**cmd_kwargs, **exec_kwargs},
@@ -133,6 +142,7 @@ def _execute_command_(
lgr.debug('Command parameter validation for %s', interface)
validator_kwargs = dict(
at_default=at_default,
+ required=required_args or None,
)
# make immediate vs exhaustive parameter validation
# configurable
| datalad/datalad-next | 46db8c6669b8a57eebf85bf001043ac1b4851f2d | diff --git a/datalad_next/constraints/tests/test_cmdarg_validation.py b/datalad_next/constraints/tests/test_cmdarg_validation.py
index b551fb6..d06111c 100644
--- a/datalad_next/constraints/tests/test_cmdarg_validation.py
+++ b/datalad_next/constraints/tests/test_cmdarg_validation.py
@@ -262,6 +262,15 @@ def test_cmd_with_validation():
return_type='item-or-list', result_renderer='disabled',
)
+ # no call with a required argument missing
+ with pytest.raises(ValueError) as e:
+ CmdWithValidation.__call__()
+ exc_rendering = str(e.value)
+ # must label the issue correctly
+ assert 'missing required argument' in exc_rendering
+ # must identify the missing argument
+ assert 'spec=<no value>' in exc_rendering
+
#
# test dataset tailoring
| Insufficient arguments not handled well in `EnsureCommandParameterization`
Not given all mandatory arguments via the Python API results in a plain
```
CommandParametrizationError: 0 command parameter constraint violation
```
while the CLI (where argparse is intercepting this immediately) provides a usage synopsis and a clear error message like
```
error: the following arguments are required: TYPE, LOCATION
``` | 0.0 | [
"datalad_next/constraints/tests/test_cmdarg_validation.py::test_cmd_with_validation"
] | [
"datalad_next/constraints/tests/test_cmdarg_validation.py::test_multi_validation",
"datalad_next/constraints/tests/test_cmdarg_validation.py::test_invalid_multi_validation"
] | 2023-05-17 09:02:43+00:00 | 1,827 |
|
datalad__datalad-next-365 | diff --git a/datalad_next/url_operations/http.py b/datalad_next/url_operations/http.py
index 8d31c03..63ddb89 100644
--- a/datalad_next/url_operations/http.py
+++ b/datalad_next/url_operations/http.py
@@ -9,7 +9,6 @@ import sys
from typing import Dict
import requests
from requests_toolbelt import user_agent
-from requests_toolbelt.downloadutils.tee import tee as requests_tee
import www_authenticate
import datalad
@@ -247,24 +246,50 @@ class HttpUrlOperations(UrlOperations):
progress_id = self._get_progress_id(from_url, to_path)
# get download size, but not every server provides it
try:
+ # for compressed downloads the content length refers to the
+ # compressed content
expected_size = int(r.headers.get('content-length'))
except (ValueError, TypeError):
+ # some responses do not have a `content-length` header,
+ # even though they HTTP200 and deliver the content.
+ # example:
+ # https://github.com/datalad/datalad-next/pull/365#issuecomment-1557114109
expected_size = None
self._progress_report_start(
progress_id,
('Download %s to %s', from_url, to_path),
'downloading',
+ # can be None, and that is OK
expected_size,
)
fp = None
props = {}
try:
+ # we can only write to file-likes opened in bytes mode
fp = sys.stdout.buffer if to_path is None else open(to_path, 'wb')
- # TODO make chunksize a config item
- for chunk in requests_tee(r, fp):
+ # we need to track how much came down the pipe for progress
+ # reporting
+ downloaded_bytes = 0
+ # TODO make chunksize a config item, 65536 is the default in
+ # requests_toolbelt
+ for chunk in r.raw.stream(amt=65536, decode_content=True):
+ # update how much data was transferred from the remote server,
+ # but we cannot use the size of the chunk for that,
+ # because content might be downloaded with transparent
+ # (de)compression. ask the download stream itself for its
+ # "position"
+ if expected_size:
+ tell = r.raw.tell()
+ else:
+ tell = downloaded_bytes + len(chunk)
self._progress_report_update(
- progress_id, ('Downloaded chunk',), len(chunk))
+ progress_id,
+ ('Downloaded chunk',),
+ tell - downloaded_bytes,
+ )
+ fp.write(chunk)
+ downloaded_bytes = tell
# compute hash simultaneously
hasher.update(chunk)
props.update(hasher.get_hexdigest())
| datalad/datalad-next | 944ecdb003b550f1a0da9162fcc66481d5d306fe | diff --git a/datalad_next/url_operations/tests/test_http.py b/datalad_next/url_operations/tests/test_http.py
index 817d756..9850b00 100644
--- a/datalad_next/url_operations/tests/test_http.py
+++ b/datalad_next/url_operations/tests/test_http.py
@@ -1,4 +1,4 @@
-from pathlib import Path
+import gzip
import pytest
from ..any import AnyUrlOperations
@@ -57,3 +57,42 @@ def test_custom_http_headers_via_config(datalad_cfg):
auo = AnyUrlOperations()
huo = auo._get_handler(f'http://example.com')
assert huo._headers['X-Funky'] == 'Stuff'
+
+
+def test_transparent_decompression(tmp_path):
+ # this file is offered with transparent compression/decompression
+ # by the github webserver
+ url = 'https://raw.githubusercontent.com/datalad/datalad-next/' \
+ 'd0c4746425a48ef20e3b1c218e68954db9412bee/pyproject.toml'
+ dpath = tmp_path / 'test.txt'
+ ops = HttpUrlOperations()
+ ops.download(from_url=url, to_path=dpath)
+
+ # make sure it ends up on disk uncompressed
+ assert dpath.read_text() == \
+ '[build-system]\nrequires = ["setuptools >= 43.0.0", "wheel"]\n'
+
+
+def test_compressed_file_stay_compressed(tmp_path):
+ # this file is offered with transparent compression/decompression
+ # by the github webserver, but is also actually gzip'ed
+ url = \
+ 'https://github.com/datalad/datalad-neuroimaging/raw/' \
+ '05b45c8c15d24b6b894eb59544daa17159a88945/' \
+ 'datalad_neuroimaging/tests/data/files/nifti1.nii.gz'
+
+ # first confirm validity of the test approach, opening an
+ # uncompressed file should raise an exception
+ with pytest.raises(gzip.BadGzipFile):
+ testpath = tmp_path / 'uncompressed'
+ testpath.write_text('some')
+ with gzip.open(testpath, 'rb') as f:
+ f.read(1000)
+
+ # and now with a compressed file
+ dpath = tmp_path / 'test.nii.gz'
+ ops = HttpUrlOperations()
+ ops.download(from_url=url, to_path=dpath)
+ # make sure it ends up on disk compressed!
+ with gzip.open(dpath, 'rb') as f:
+ f.read(1000)
| http-downloads via `download` (`HttpUrlOperations`) result in gzipped files on disk | 0.0 | [
"datalad_next/url_operations/tests/test_http.py::test_transparent_decompression"
] | [
"datalad_next/url_operations/tests/test_http.py::test_http_url_operations",
"datalad_next/url_operations/tests/test_http.py::test_custom_http_headers_via_config",
"datalad_next/url_operations/tests/test_http.py::test_compressed_file_stay_compressed"
] | 2023-05-17 11:53:40+00:00 | 1,828 |
|
datalad__datalad-next-371 | diff --git a/datalad_next/config/__init__.py b/datalad_next/config/__init__.py
new file mode 100644
index 0000000..cd5bc53
--- /dev/null
+++ b/datalad_next/config/__init__.py
@@ -0,0 +1,1 @@
+from datalad.config import ConfigManager
diff --git a/datalad_next/credman/manager.py b/datalad_next/credman/manager.py
index c5a6bf7..60124e2 100644
--- a/datalad_next/credman/manager.py
+++ b/datalad_next/credman/manager.py
@@ -26,6 +26,7 @@ from typing import (
)
import datalad
+from datalad_next.config import ConfigManager
from datalad_next.exceptions import (
CapturedException,
CommandError,
@@ -78,7 +79,7 @@ class CredentialManager(object):
'user_password': 'password',
}
- def __init__(self, cfg=None):
+ def __init__(self, cfg: ConfigManager | None = None):
"""
Parameters
diff --git a/datalad_next/url_operations/__init__.py b/datalad_next/url_operations/__init__.py
index d6f1edf..d4c218e 100644
--- a/datalad_next/url_operations/__init__.py
+++ b/datalad_next/url_operations/__init__.py
@@ -11,6 +11,7 @@ from typing import (
)
import datalad
+from datalad_next.config import ConfigManager
from datalad_next.utils import log_progress
from datalad_next.utils.multihash import (
MultiHash,
@@ -35,7 +36,7 @@ class UrlOperations:
This class provides a range of helper methods to aid computation of
hashes and progress reporting.
"""
- def __init__(self, *, cfg=None):
+ def __init__(self, *, cfg: ConfigManager | None = None):
"""
Parameters
----------
@@ -46,7 +47,7 @@ class UrlOperations:
self._cfg = cfg
@property
- def cfg(self):
+ def cfg(self) -> ConfigManager:
if self._cfg is None:
self._cfg = datalad.cfg
diff --git a/datalad_next/url_operations/any.py b/datalad_next/url_operations/any.py
index 258141d..9b2cd75 100644
--- a/datalad_next/url_operations/any.py
+++ b/datalad_next/url_operations/any.py
@@ -10,6 +10,7 @@ from pathlib import Path
import re
from typing import Dict
+from datalad_next.config import ConfigManager
from datalad_next.exceptions import CapturedException
from . import UrlOperations
@@ -59,7 +60,7 @@ class AnyUrlOperations(UrlOperations):
operations, such that held connections or cached credentials can be reused
efficiently.
"""
- def __init__(self, cfg=None):
+ def __init__(self, cfg: ConfigManager | None = None):
"""
Parameters
----------
diff --git a/docs/source/pyutils.rst b/docs/source/pyutils.rst
index ead2205..ef8e24c 100644
--- a/docs/source/pyutils.rst
+++ b/docs/source/pyutils.rst
@@ -8,6 +8,7 @@ Python utilities
:toctree: generated
commands.ValidatedInterface
+ config.ConfigManager
constraints
credman.manager
exceptions
| datalad/datalad-next | 57c4eebdcc6880a5c25c99b74dbacf865db3d27a | diff --git a/datalad_next/credman/tests/test_credman.py b/datalad_next/credman/tests/test_credman.py
index aafa36c..08b17eb 100644
--- a/datalad_next/credman/tests/test_credman.py
+++ b/datalad_next/credman/tests/test_credman.py
@@ -11,7 +11,7 @@
"""
import pytest
-from datalad.config import ConfigManager
+from datalad_next.config import ConfigManager
from ..manager import (
CredentialManager,
_get_cred_cfg_var,
| Add cannonical import for `ConfigManager` class
This is an essential component, and we need the import, at minimum, for type annotation. | 0.0 | [
"datalad_next/credman/tests/test_credman.py::test_credmanager",
"datalad_next/credman/tests/test_credman.py::test_query",
"datalad_next/credman/tests/test_credman.py::test_credman_get",
"datalad_next/credman/tests/test_credman.py::test_credman_get_guess_type",
"datalad_next/credman/tests/test_credman.py::test_credman_obtain"
] | [] | 2023-05-24 08:12:37+00:00 | 1,829 |
|
datalad__datalad-next-377 | diff --git a/datalad_next/types/__init__.py b/datalad_next/types/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/datalad_next/types/annexkey.py b/datalad_next/types/annexkey.py
new file mode 100644
index 0000000..74bbcb6
--- /dev/null
+++ b/datalad_next/types/annexkey.py
@@ -0,0 +1,48 @@
+from __future__ import annotations
+
+from dataclasses import dataclass
+import re
+
+
+# BACKEND[-sNNNN][-mNNNN][-SNNNN-CNNNN]--NAME
+_annexkey_regex = re.compile(
+ '(?P<backend>[A-Z0-9]+)'
+ '(|-s(?P<size>[0-9]+))'
+ '(|-m(?P<mtime>[0-9]+))'
+ '(|-S(?P<chunksize>[0-9]+)-C(?P<chunknumber>[0-9]+))'
+ '--(?P<name>.*)$'
+)
+
+
+@dataclass
+class AnnexKey:
+ """Representation of a git-annex key
+
+ https://git-annex.branchable.com/internals/key_format/
+ """
+ name: str
+ backend: str
+ size: int | None = None
+ mtime: int | None = None
+ chunksize: int | None = None
+ chunknumber: int | None = None
+
+ @classmethod
+ def from_str(cls, key: str):
+ key_matched = _annexkey_regex.match(key)
+ if not key_matched:
+ # without a sensible key there is no hope
+ raise ValueError(f'{key!r} is not a valid git-annex key')
+ return cls(**key_matched.groupdict())
+
+ def __str__(self) -> str:
+ return '{backend}{size}{mtime}{chunk}--{name}'.format(
+ name=self.name,
+ backend=self.backend,
+ size=f'-s{self.size}' if self.size else '',
+ mtime=f'-m{self.mtime}' if self.mtime else '',
+ # if me reading of the spec is correct, the two chunk props
+ # can only occur together
+ chunk=f'-S{self.chunksize}-C{self.chunknumber}'
+ if self.chunknumber else '',
+ )
diff --git a/docs/source/pyutils.rst b/docs/source/pyutils.rst
index ef8e24c..cff773d 100644
--- a/docs/source/pyutils.rst
+++ b/docs/source/pyutils.rst
@@ -23,3 +23,4 @@ Python utilities
utils.multihash
utils.requests_auth
tests.fixtures
+ types.annexkey
| datalad/datalad-next | aa90e77d1124c56ac31382e48ef71a50a1475fd3 | diff --git a/datalad_next/types/tests/__init__.py b/datalad_next/types/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/datalad_next/types/tests/test_annexkey.py b/datalad_next/types/tests/test_annexkey.py
new file mode 100644
index 0000000..80c68c0
--- /dev/null
+++ b/datalad_next/types/tests/test_annexkey.py
@@ -0,0 +1,9 @@
+from ..annexkey import AnnexKey
+
+
+def test_annexkey():
+ for key in (
+ 'MD5E-s792207360--985e680a221e47db05063a12b91d7d89.tar',
+ ):
+ # round-tripping for any key must give same outcome
+ assert key == str(AnnexKey.from_str(key))
| Establish `AnnexKey` class for any and all syntactic operations on keys
Something like this
```py
# BACKEND[-sNNNN][-mNNNN][-SNNNN-CNNNN]--NAME
_annexkey_regex = re.compile(
'(?P<backend>[A-Z0-9]+)'
'(|-s(?P<size>[0-9]+))'
'(|-m(?P<mtime>[0-9]+))'
'(|-S(?P<chunksize>[0-9]+)-C(?P<chunknumber>[0-9]+))'
'--(?P<name>.*)$'
)
@dataclass
class AnnexKey:
name: str
backend: str
size: int | None = None
mtime: int | None = None
chunksize: int | None = None
chunknumber: int | None = None
@classmethod
def from_str(cls, key):
key_matched = _annexkey_regex.match(key)
if not key_matched:
# without a sensible key there is no hope
raise ValueError(f'{key!r} is not a valid git-annex key')
return cls(**key_matched.groupdict())
def __str__(self):
return '{backend}{size}{mtime}{chunk}--{name}'.format(
name=self.name,
backend=self.backend,
size=f'-s{self.size}' if self.size else '',
mtime=f'-m{self.mtime}' if self.mtime else '',
# if me reading of the spec is correct, the two chunk props
# can only occur together
chunk=f'-S{self.chunksize}-C{self.chunknumber}'
if self.chunknumber else '',
)
```
I am not sure where to put this. Maybe something like `datalad_next.models.annexkey`? | 0.0 | [
"datalad_next/types/tests/test_annexkey.py::test_annexkey"
] | [] | 2023-05-24 13:51:10+00:00 | 1,830 |
|
datalad__datalad-next-381 | diff --git a/datalad_next/types/annexkey.py b/datalad_next/types/annexkey.py
index 74bbcb6..037280b 100644
--- a/datalad_next/types/annexkey.py
+++ b/datalad_next/types/annexkey.py
@@ -1,3 +1,5 @@
+"""git-annex key representation"""
+
from __future__ import annotations
from dataclasses import dataclass
@@ -14,7 +16,7 @@ _annexkey_regex = re.compile(
)
-@dataclass
+@dataclass(frozen=True)
class AnnexKey:
"""Representation of a git-annex key
@@ -29,6 +31,7 @@ class AnnexKey:
@classmethod
def from_str(cls, key: str):
+ """Return an ``AnnexKey`` instance from a key string"""
key_matched = _annexkey_regex.match(key)
if not key_matched:
# without a sensible key there is no hope
diff --git a/datalad_next/types/archivist.py b/datalad_next/types/archivist.py
new file mode 100644
index 0000000..12e9b2b
--- /dev/null
+++ b/datalad_next/types/archivist.py
@@ -0,0 +1,143 @@
+"""``dl+archive:`` archive member locator"""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from pathlib import PurePosixPath
+import re
+
+from .annexkey import AnnexKey
+from .enums import ArchiveType
+
+
+# be relatively permissive
+_recognized_urls = re.compile(r'^dl\+archive:(?P<key>.*)#(?P<props>.*)')
+# each archive member is identified by a (relative) path inside
+# the archive.
+_archive_member_props = re.compile(
+ # a path may contain any char but '&'
+ # TODO check that something in the machinery ensures proper
+ # quoting
+ 'path=(?P<path>[^&]+)'
+ # size info (in bytes) is optional
+ '(&size=(?P<size>[0-9]+)|)'
+ # archive type label is optional
+ '(&atype=(?P<atype>[a-z0-9]+)|)'
+)
+
+
+@dataclass
+class ArchivistLocator:
+ """Representation of a ``dl+archive:`` archive member locator
+
+ These locators are used by the ``datalad-archives`` and ``archivist``
+ git-annex special remotes. They identify a member of a archive that is
+ itself identified by an annex key.
+
+ Each member is annotated with its size (in bytes). Optionally,
+ the file format type of the archive can be annotated too.
+
+ Syntax of ``dl+archives:`` locators
+ -----------------------------------
+
+ The locators the following minimal form::
+
+ dl+archive:<archive-key>#path=<path-in-archive>
+
+ where ``<archive-key>`` is a regular git-annex key of an archive file,
+ and ``<path-in-archive>`` is a POSIX-style relative path pointing to
+ a member within the archive.
+
+ Two optional, additional attributes ``size`` and ``atype`` are recognized
+ (only ``size`` is also understood by the ``datalad-archives``
+ special remote).
+
+ ``size`` declares the size of the (extracted) archive member in bytes::
+
+ dl+archive:<archive-key>#path=<path-in-archive>&size=<size-in-bytes>
+
+ ``atype`` declares the type of the containing archive using a label.
+ Currently recognized labels are ``tar`` (a TAR archive, compressed or not),
+ and ``zip`` (a ZIP archive). See
+ :class:`~datalad_next.types.enums.ArchiveType` for all recognized labels.
+
+ If no type information is given, :func:`ArchivistLocator.from_str()` will
+ try to determine the archive type from the archive key (via ``*E``-type
+ git-annex backends, such as DataLad's default ``MD5E``).
+
+ The order in the fragment part of the URL (after ``#``) is significant.
+ ``path`` must come first, followed by ``size`` or ``atype``. If both
+ ``size`` and ``atype`` are present, ``size`` must be declared first. A
+ complete example of a URL is::
+
+ dl+archive:MD5-s389--e9f624eb778e6f945771c543b6e9c7b2#path=dir/file.csv&size=234&atype=tar
+ """
+ akey: AnnexKey
+ member: PurePosixPath
+ size: int
+ # datalad-archives did not have the type info, we want to be
+ # able to handle those too, make optional
+ atype: ArchiveType | None = None
+
+ def __str__(self) -> str:
+ return 'dl+archive:{akey}#path={member}&size={size}{atype}'.format(
+ akey=self.akey,
+ # TODO needs quoting?
+ member=self.member,
+ size=self.size,
+ atype=f'&atype={self.atype.value}' if self.atype else '',
+ )
+
+ @classmethod
+ def from_str(cls, url: str):
+ """Return ``ArchivistLocator`` from ``str`` form"""
+ url_matched = _recognized_urls.match(url)
+ if not url_matched:
+ raise ValueError('Unrecognized dl+archives locator syntax')
+ url_matched = url_matched.groupdict()
+ # convert to desired type
+ akey = AnnexKey.from_str(url_matched['key'])
+
+ # archive member properties
+ props_matched = _archive_member_props.match(url_matched['props'])
+ if not props_matched:
+ # without at least a 'path' there is nothing we can do here
+ raise ValueError(
+ 'dl+archives locator contains invalid archive member '
+ f'specification: {url_matched["props"]!r}')
+ props_matched = props_matched.groupdict()
+ amember_path = PurePosixPath(props_matched['path'])
+ if amember_path.is_absolute():
+ raise ValueError(
+ 'dl+archives locator contains absolute archive member path')
+ if '..' in amember_path.parts:
+ raise ValueError(
+ 'dl+archives locator archive member path contains ".."')
+
+ # size is optional, regex ensure that it is an int
+ size = props_matched.get('size')
+
+ # archive type, could be None
+ atype = props_matched.get('atype')
+ if atype is not None:
+ # if given, most be known type
+ try:
+ atype = getattr(ArchiveType, atype)
+ except AttributeError as e:
+ raise ValueError(
+ 'dl+archives locator archive type unrecognized') from e
+
+ if atype is None and akey.backend.endswith('E'):
+ # try by key name extension
+ suf = PurePosixPath(akey.name).suffixes
+ if '.zip' == suf[-1]:
+ atype = ArchiveType.zip
+ elif '.tar' in suf:
+ atype = ArchiveType.tar
+
+ return cls(
+ akey=akey,
+ member=amember_path,
+ size=size,
+ atype=atype,
+ )
diff --git a/datalad_next/types/enums.py b/datalad_next/types/enums.py
new file mode 100644
index 0000000..ad26721
--- /dev/null
+++ b/datalad_next/types/enums.py
@@ -0,0 +1,13 @@
+"""Type ENUMs"""
+
+from enum import Enum
+
+
+class ArchiveType(Enum):
+ """Enumeration of archive types
+
+ Each one should have an associated ArchiveOperations handler.
+ """
+ # TODO the values could also be handler classes ...
+ tar = 'tar'
+ zip = 'zip'
diff --git a/docs/source/pyutils.rst b/docs/source/pyutils.rst
index cff773d..d93505e 100644
--- a/docs/source/pyutils.rst
+++ b/docs/source/pyutils.rst
@@ -24,3 +24,5 @@ Python utilities
utils.requests_auth
tests.fixtures
types.annexkey
+ types.archivist
+ types.enums
| datalad/datalad-next | 85250d5fce519e9ef54ecaff2a970958807197f0 | diff --git a/datalad_next/types/tests/test_annexkey.py b/datalad_next/types/tests/test_annexkey.py
index 80c68c0..a4e3878 100644
--- a/datalad_next/types/tests/test_annexkey.py
+++ b/datalad_next/types/tests/test_annexkey.py
@@ -1,9 +1,26 @@
+import pytest
+
from ..annexkey import AnnexKey
def test_annexkey():
for key in (
'MD5E-s792207360--985e680a221e47db05063a12b91d7d89.tar',
+ 'SHA256E-s31390--f50d7ac4c6b9031379986bc362fcefb65f1e52621ce1708d537e740fefc59cc0.mp3',
+ 'URL-s1899248--http&c%%ai.stanford.edu%,126nilsson%MLBOOK.pdf/URL-s1899248--http&c%%ai.stanford.edu%,126nilsson%MLBOOK.pdf',
):
# round-tripping for any key must give same outcome
assert key == str(AnnexKey.from_str(key))
+
+ # check that it can be used as a dict-key, i.e. is hashable
+ key = AnnexKey.from_str('MD5-s9--985e680a221e47db05063a12b91d7d89')
+ d = {key: 'some'}
+
+
+def test_annexkey_errors():
+ for wrong in (
+ 'MD5E-985e680a221e47db05063a12b91d7d89.tar',
+ 'MD5E-SUPRISE--985e680a221e47db05063a12b91d7d89.tar',
+ ):
+ with pytest.raises(ValueError):
+ AnnexKey.from_str(wrong)
diff --git a/datalad_next/types/tests/test_archivist.py b/datalad_next/types/tests/test_archivist.py
new file mode 100644
index 0000000..8f78163
--- /dev/null
+++ b/datalad_next/types/tests/test_archivist.py
@@ -0,0 +1,53 @@
+import pytest
+
+from ..annexkey import AnnexKey
+from ..archivist import ArchivistLocator
+from ..enums import ArchiveType
+
+some_key = 'MD5-s389--e9f624eb778e6f945771c543b6e9c7b2'
+
+
+def test_archivistlocator():
+ test_locator = \
+ f'dl+archive:{some_key}#path=dir/file.csv&size=234&atype=tar'
+
+ al = ArchivistLocator.from_str(test_locator)
+
+ assert al.akey == AnnexKey.from_str(some_key)
+ assert al.atype == ArchiveType.tar
+
+ # round trip
+ assert str(al) == test_locator
+
+ # type determination from key
+ assert ArchivistLocator.from_str(
+ 'dl+archive:MD5E-s1--e9f624eb778e6f945771c543b6e9c7b2.tar#path=f.txt'
+ ).atype == ArchiveType.tar
+ assert ArchivistLocator.from_str(
+ 'dl+archive:MD5E-s1--e9f624eb778e6f945771c543b6e9c7b2.zip#path=f.txt'
+ ).atype == ArchiveType.zip
+
+
+def test_archivistlocatori_errors():
+ for wrong in (
+ # no chance without prefix
+ 'bogus',
+ # not just a prefix or some bogus properties
+ 'dl+archive:',
+ 'dl+archive:#',
+ 'dl+archive:keything',
+ 'dl+archive:#props',
+ 'dl+archive:keything#props',
+ # a real key is required, but not sufficient
+ f'dl+archive:{some_key}#props',
+ # we require a member path, the rest is optional
+ f'dl+archive:{some_key}#size=123',
+ f'dl+archive:{some_key}#size=123&atype=tar',
+ # must be a proper POSIX path, relative, no ..
+ f'dl+archive:{some_key}#path=/dummy',
+ f'dl+archive:{some_key}#path=../dd',
+ # cannot work with unknown archive type
+ f'dl+archive:{some_key}#path=good&size=123&atype=eh!',
+ ):
+ with pytest.raises(ValueError):
+ ArchivistLocator.from_str(wrong)
| `AnnexKey` should be hashable
I would think that using this type as a key in a mapping would be a standard use case. It would require the following change:
```diff
diff --git a/datalad_next/types/annexkey.py b/datalad_next/types/annexkey.py
index 74bbcb6..5a85342 100644
--- a/datalad_next/types/annexkey.py
+++ b/datalad_next/types/annexkey.py
@@ -14,7 +14,7 @@ _annexkey_regex = re.compile(
)
-@dataclass
+@dataclass(frozen=True)
class AnnexKey:
"""Representation of a git-annex key
```
This would mean that post-init assignment of properties is not possible. I tend to think that this is OK. | 0.0 | [
"datalad_next/types/tests/test_annexkey.py::test_annexkey",
"datalad_next/types/tests/test_annexkey.py::test_annexkey_errors",
"datalad_next/types/tests/test_archivist.py::test_archivistlocator",
"datalad_next/types/tests/test_archivist.py::test_archivistlocatori_errors"
] | [] | 2023-05-26 06:44:51+00:00 | 1,831 |
|
datalad__datalad-next-399 | diff --git a/.appveyor.yml b/.appveyor.yml
index 4f3e923..97005d7 100644
--- a/.appveyor.yml
+++ b/.appveyor.yml
@@ -94,7 +94,10 @@ environment:
datalad.core
# do not run tests that ensure behavior we intentionally changed
# - test_gh1811: is included in next in an alternative implementation
- KEYWORDS: not test_gh1811
+ # - test_librarymode: assumes that CLI config overrides end up in the
+ # session `datalad.cfg.overrides`, but -next changes that behavior
+ # to have `.overrides` be uniformly limited to instance overrides
+ KEYWORDS: not test_gh1811 and not test_librarymode
APPVEYOR_BUILD_WORKER_IMAGE: Ubuntu2004
PY: 3.7
INSTALL_SYSPKGS: python3-virtualenv
diff --git a/datalad_next/config/utils.py b/datalad_next/config/utils.py
new file mode 100644
index 0000000..053aa68
--- /dev/null
+++ b/datalad_next/config/utils.py
@@ -0,0 +1,104 @@
+from __future__ import annotations
+
+from os import environ
+from typing import (
+ Dict,
+ Mapping,
+ Tuple,
+)
+
+
+def get_gitconfig_items_from_env() -> Mapping[str, str | Tuple[str, ...]]:
+ """Parse git-config ENV (``GIT_CONFIG_COUNT|KEY|VALUE``) and return as dict
+
+ This implementation does not use ``git-config`` directly, but aims to
+ mimic its behavior with respect to parsing the environment as much
+ as possible.
+
+ Raises
+ ------
+ ValueError
+ Whenever ``git-config`` would also error out, and includes an
+ message in the respective exception that resembles ``git-config``'s
+ for that specific case.
+
+ Returns
+ -------
+ dict
+ Configuration key-value mappings. When a key is declared multiple
+ times, the respective values are aggregated in reported as a tuple
+ for that specific key.
+ """
+ items: Dict[str, str | Tuple[str, ...]] = {}
+ for k, v in ((_get_gitconfig_var_from_env(i, 'key'),
+ _get_gitconfig_var_from_env(i, 'value'))
+ for i in range(_get_gitconfig_itemcount())):
+ val = items.get(k)
+ if val is None:
+ items[k] = v
+ elif isinstance(val, tuple):
+ items[k] = val + (v,)
+ else:
+ items[k] = (val, v)
+ return items
+
+
+def _get_gitconfig_itemcount() -> int:
+ try:
+ return int(environ.get('GIT_CONFIG_COUNT', '0'))
+ except (TypeError, ValueError) as e:
+ raise ValueError("bogus count in GIT_CONFIG_COUNT") from e
+
+
+def _get_gitconfig_var_from_env(nid: int, kind: str) -> str:
+ envname = f'GIT_CONFIG_{kind.upper()}_{nid}'
+ var = environ.get(envname)
+ if var is None:
+ raise ValueError(f"missing config {kind} {envname}")
+ if kind != 'key':
+ return var
+ if not var:
+ raise ValueError(f"empty config key {envname}")
+ if '.' not in var:
+ raise ValueError(f"key {envname} does not contain a section: {var}")
+ return var
+
+
+def set_gitconfig_items_in_env(items: Mapping[str, str | Tuple[str, ...]]):
+ """Set git-config ENV (``GIT_CONFIG_COUNT|KEY|VALUE``) from a mapping
+
+ Any existing declaration of configuration items in the environment is
+ replaced. Any ENV variable of a *valid* existing declaration is removed,
+ before the set configuration items are posted in the ENV.
+
+ Multi-value configuration keys are supported (values provided as a tuple).
+
+ No verification (e.g., of syntax compliance) is performed.
+ """
+ _clean_env_from_gitconfig_items()
+
+ count = 0
+ for key, value in items.items():
+ # homogeneous processing of multiple value items, and single values
+ values = value if isinstance(value, tuple) else (value,)
+ for v in values:
+ environ[f'GIT_CONFIG_KEY_{count}'] = key
+ environ[f'GIT_CONFIG_VALUE_{count}'] = v
+ count += 1
+ if count:
+ environ['GIT_CONFIG_COUNT'] = str(count)
+
+
+def _clean_env_from_gitconfig_items():
+ # we only care about intact specifications here, if there was cruft
+ # to start with, we have no responsibilities
+ try:
+ count = _get_gitconfig_itemcount()
+ except ValueError:
+ return
+
+ for i in range(count):
+ environ.pop(f'GIT_CONFIG_KEY_{i}', None)
+ environ.pop(f'GIT_CONFIG_VALUE_{i}', None)
+
+ environ.pop('GIT_CONFIG_COUNT', None)
diff --git a/datalad_next/patches/cli_configoverrides.py b/datalad_next/patches/cli_configoverrides.py
new file mode 100644
index 0000000..59276b5
--- /dev/null
+++ b/datalad_next/patches/cli_configoverrides.py
@@ -0,0 +1,50 @@
+from datalad.config import _update_from_env as _update_from_datalad_env
+from datalad.cli.helpers import _parse_overrides_from_cmdline
+
+from datalad_next.config.utils import (
+ get_gitconfig_items_from_env,
+ set_gitconfig_items_in_env,
+)
+
+from . import apply_patch
+
+
+def parse_overrides_from_cmdline(cmdlineargs):
+ # read from cmdlineargs first to error on any syntax issues
+ # before any other processing
+ cli_overrides = _parse_overrides_from_cmdline(cmdlineargs)
+
+ # reuse datalad-core implementation of datalad-specific ENV parsing
+ # for config items
+ overrides = {}
+ _update_from_datalad_env(overrides)
+
+ # let CLI settings override any ENV -- in-line with the behavior of Git
+ overrides.update(cli_overrides)
+
+ # read any existing GIT_CONFIG ENV vars and superimpose our
+ # overrides on them, repost in ENV using git-native approach.
+ # This will apply the overrides to any git(-config) calls
+ # in this process and any subprocess
+ gc_overrides = get_gitconfig_items_from_env()
+ gc_overrides.update(overrides)
+ set_gitconfig_items_in_env(gc_overrides)
+
+ # we do not actually disclose any of these overrides.
+ # the CLI runs a `datalad.cfg.reload(force=True)`
+ # immediately after executing this function and thereby
+ # pulls in the overrides we just posted into the ENV
+ # here. This change reduced the scope of
+ # `datalad.cfg.overrides` to be mere instance overrides
+ # and no longer process overrides. This rectifies the mismatch
+ # between appearance and actual impact of this information
+ # in the ConfigManager
+ return {}
+
+
+apply_patch(
+ 'datalad.cli.helpers', None, '_parse_overrides_from_cmdline',
+ parse_overrides_from_cmdline,
+ msg='Enable posting DataLad config overrides CLI/ENV as '
+ 'GIT_CONFIG items in process ENV',
+)
diff --git a/datalad_next/patches/enabled.py b/datalad_next/patches/enabled.py
index 5b82ed4..84822c9 100644
--- a/datalad_next/patches/enabled.py
+++ b/datalad_next/patches/enabled.py
@@ -1,4 +1,5 @@
from . import (
+ cli_configoverrides,
commanderror,
common_cfg,
annexrepo,
| datalad/datalad-next | 73ec970bc038b55999bf6ba240314cc7adb7aecb | diff --git a/datalad_next/config/tests/__init__.py b/datalad_next/config/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/datalad_next/config/tests/test_utils.py b/datalad_next/config/tests/test_utils.py
new file mode 100644
index 0000000..f084f82
--- /dev/null
+++ b/datalad_next/config/tests/test_utils.py
@@ -0,0 +1,146 @@
+
+import pytest
+
+from .. import utils # for patching environ
+
+from ..utils import (
+ get_gitconfig_items_from_env,
+ set_gitconfig_items_in_env,
+)
+
+
+def test_get_gitconfig_items_from_env(monkeypatch):
+ with monkeypatch.context() as m:
+ # without the COUNT the rest does not matter and we always
+ # get an empty dict
+ m.delenv('GIT_CONFIG_COUNT', raising=False)
+ assert get_gitconfig_items_from_env() == {}
+
+ with monkeypatch.context() as m:
+ # setting zero items, also makes everything else irrelevant
+ m.setenv('GIT_CONFIG_COUNT', '0')
+ assert get_gitconfig_items_from_env() == {}
+
+ with monkeypatch.context() as m:
+ # predictable error for botched count
+ m.setenv('GIT_CONFIG_COUNT', 'rubbish')
+ with pytest.raises(ValueError) as e:
+ get_gitconfig_items_from_env()
+ assert 'bogus count in GIT_CONFIG_COUNT' in str(e)
+
+ # bunch of std error conditions
+ for env, excstr in (
+ ({'GIT_CONFIG_COUNT': 1,
+ 'GIT_CONFIG_KEY_0': 'section.name'},
+ 'missing config value'),
+ ({'GIT_CONFIG_COUNT': 1,
+ 'GIT_CONFIG_VALUE_0': 'value'},
+ 'missing config key'),
+ ({'GIT_CONFIG_COUNT': 1,
+ 'GIT_CONFIG_KEY_0': '',
+ 'GIT_CONFIG_VALUE_0': 'value'},
+ 'empty config key'),
+ ({'GIT_CONFIG_COUNT': 1,
+ 'GIT_CONFIG_KEY_0': 'nosection',
+ 'GIT_CONFIG_VALUE_0': 'value'},
+ 'does not contain a section'),
+ ):
+ with monkeypatch.context() as m:
+ m.setattr(utils, 'environ', env)
+ with pytest.raises(ValueError) as e:
+ get_gitconfig_items_from_env()
+ assert excstr in str(e)
+
+ # proper functioning
+ for env, target in (
+ ({'GIT_CONFIG_COUNT': 1,
+ 'GIT_CONFIG_KEY_0': 'section.name',
+ 'GIT_CONFIG_VALUE_0': 'value'},
+ {'section.name': 'value'}),
+ ({'GIT_CONFIG_COUNT': 2,
+ 'GIT_CONFIG_KEY_0': 'section.name1',
+ 'GIT_CONFIG_VALUE_0': 'value1',
+ 'GIT_CONFIG_KEY_1': 'section.name2',
+ 'GIT_CONFIG_VALUE_1': 'value2'},
+ {'section.name1': 'value1', 'section.name2': 'value2'}),
+ # double-specification appends
+ # ❯ GIT_CONFIG_COUNT=2 \
+ # GIT_CONFIG_KEY_0=section.name \
+ # GIT_CONFIG_VALUE_0=val1 \
+ # GIT_CONFIG_KEY_1=section.name \
+ # GIT_CONFIG_VALUE_1=val2 \
+ # git config --list --show-origin | grep 'command line:'
+ # command line: section.name=val1
+ # command line: section.name=val2
+ ({'GIT_CONFIG_COUNT': 3,
+ 'GIT_CONFIG_KEY_0': 'section.name',
+ 'GIT_CONFIG_VALUE_0': 'value0',
+ 'GIT_CONFIG_KEY_1': 'section.name',
+ 'GIT_CONFIG_VALUE_1': 'value1',
+ 'GIT_CONFIG_KEY_2': 'section.name',
+ 'GIT_CONFIG_VALUE_2': 'value2'},
+ {'section.name': ('value0', 'value1', 'value2')}),
+ ):
+ with monkeypatch.context() as m:
+ m.setattr(utils, 'environ', env)
+ assert get_gitconfig_items_from_env() == target
+
+
+def test_set_gitconfig_items_in_env(monkeypatch):
+ for start, items, target in (
+ # giving nothing preserves statusquo
+ ({}, {}, {}),
+ ({'DUMMY': 'value'}, {}, {'DUMMY': 'value'}),
+ # fixable specification is cleaned up
+ ({'GIT_CONFIG_COUNT': '526'}, {}, {}),
+ # but it has limits
+ ({'GIT_CONFIG_COUNT': 'nochance'}, {},
+ {'GIT_CONFIG_COUNT': 'nochance'}),
+ # and there is no exhaustive search
+ ({'GIT_CONFIG_KEY_3': 'dummy'}, {}, {'GIT_CONFIG_KEY_3': 'dummy'}),
+ # virgin territory
+ ({}, {'section.name': 'value'},
+ {'GIT_CONFIG_COUNT': '1',
+ 'GIT_CONFIG_KEY_0': 'section.name',
+ 'GIT_CONFIG_VALUE_0': 'value'}),
+ # "set" means "replace, not amend
+ ({'GIT_CONFIG_COUNT': '1',
+ 'GIT_CONFIG_KEY_0': 'section.name',
+ 'GIT_CONFIG_VALUE_0': 'value'},
+ {'altsection.name2': 'value2'},
+ {'GIT_CONFIG_COUNT': '1',
+ 'GIT_CONFIG_KEY_0': 'altsection.name2',
+ 'GIT_CONFIG_VALUE_0': 'value2'}),
+ # full cleanupage
+ ({'GIT_CONFIG_COUNT': '2',
+ 'GIT_CONFIG_KEY_0': 'section.name',
+ 'GIT_CONFIG_VALUE_0': 'value',
+ 'GIT_CONFIG_KEY_1': 'altsection.name2',
+ 'GIT_CONFIG_VALUE_1': 'value2'},
+ {}, {}),
+ # multi-value support, order preserved
+ ({}, {'section.name': ('c', 'a', 'b')},
+ {'GIT_CONFIG_COUNT': '3',
+ 'GIT_CONFIG_KEY_0': 'section.name',
+ 'GIT_CONFIG_VALUE_0': 'c',
+ 'GIT_CONFIG_KEY_1': 'section.name',
+ 'GIT_CONFIG_VALUE_1': 'a',
+ 'GIT_CONFIG_KEY_2': 'section.name',
+ 'GIT_CONFIG_VALUE_2': 'b'}),
+ ):
+ with monkeypatch.context() as m:
+ env = dict(start)
+ m.setattr(utils, 'environ', env)
+ set_gitconfig_items_in_env(items)
+ assert env == target
+
+
+def test_get_set_gitconfig_env_roundtrip(monkeypatch):
+ items = {'section.name': ('c', 'a', 'b'),
+ 'space section.na me.so me': 'v al'}
+ with monkeypatch.context() as m:
+ env = {}
+ m.setattr(utils, 'environ', env)
+ # feed in copy to ensure validity of the test
+ set_gitconfig_items_in_env(dict(items))
+ assert get_gitconfig_items_from_env() == items
diff --git a/datalad_next/patches/tests/test_cli_configoverrides.py b/datalad_next/patches/tests/test_cli_configoverrides.py
new file mode 100644
index 0000000..b1656e3
--- /dev/null
+++ b/datalad_next/patches/tests/test_cli_configoverrides.py
@@ -0,0 +1,18 @@
+from datalad_next.utils import chpwd
+from datalad_next.tests.utils import run_main
+
+
+def test_cli_configoverrides(existing_dataset):
+ # test whether a `datalad -c ...` is effective within the
+ # execution environment of a subprocess (for a non-datalad
+ # configuration item
+ with chpwd(existing_dataset.path):
+ out, err = run_main(
+ [
+ '-c', 'bogusdataladtestsec.subsec=unique',
+ 'run',
+ 'git config bogusdataladtestsec.subsec',
+ ],
+ # git-config would fail, if the config item is unknown
+ exit_code=0,
+ )
| Adjust `ConfigManager` to post overrides into the ENV
Overrides can come in via the CLI, or be directly set in a manager instance. Given that overrides are somewhat global, it makes sense to post them in the processes ENV too.
Here is a concrete example of where that would be useful https://github.com/psychoinformatics-de/knowledge-base/issues/15#issuecomment-1522907177 (instead of having to go git-native, a declaration via `datalad -c ...` would be sufficient).
This would also make a runner-specific change to pass on configuration, as proposed in https://github.com/datalad/datalad/pull/7344 (see issue https://github.com/datalad/datalad/issues/7352) unnecessary. While this would work as a solution for the clone issue linked above too, it would be limited to execution via the datalad runner. Simply posting overrides in the process ENV via `GIT_CONFIG_COUNT= GIT_CONFIG_KEY_ GIT_CONFIG_VALUE_` would be applicable to any means of executing subprocesses (e.g., plain `subprocess.run` -- something that is even done in the datalad codebase).
Issues this would address
- https://github.com/datalad/datalad/issues/3456 (closed but unresolved)
- https://github.com/datalad/datalad/issues/4119
### Challenges:
#### Interference with Git
If the Git-native variables are already present in the environment (see above), we need to ensure that any DataLad modification amends the existing setup and does not strip settings.
#### Override semantics
The nature of "overrides" in the datalad configuration is underspecified. Relevant statements from the documentation are:
- environment variables can be used to override any datalad configuration, and have precedence over any other configuration scope (any further elaboration is specific to `DATALAD_*` type variables.
- re `DATALAD_CONFIG_OVERRIDES_JSON`: f both individual configuration variables *and* JSON-overrides are used, the former take precedent over the latter, overriding the respective *individual* settings from configurations declared in the JSON-overrides
- the `configuration` command is not able to query overrides separately
- `ConfigManager` docs say: 'override' limits the modification to the `ConfigManager` instance, and the assigned value overrides any setting from any other source [remark: the use of "instance" is a concern here]
- `ConfigManager` declares: Each instance carries a public `overrides` attribute. This dictionary contains variables that override any setting read from a file. The overrides are persistent across reloads [remark: limitation to "from a file" is relevant here]
- CLI config overrides (`datalad -c`) become `ConfigManager` instance overrides
#### Potential inconsistencies
- on `ConfigManager.reload()` instance overrides are applied unconditionally to the "merged" config. But overrides from ENV only when `scope` is not `branch`. `ConfigManager.get_from_source()` implemented the same logic.
- `ConfigManager` runs `git config`. If `GIT_CONFIG_COUNT= GIT_CONFIG_KEY_ GIT_CONFIG_VALUE_` are declared, this should apply these git-native overrides, but datalad "sees" them. It needs to be checked whether scope associations are inferred properly. Possibly, `ConfigManager` could afterwards simply replace these settings, because it did already "read them in".
- No, that is not possible. The settings are "seen" (git-config reports them as origin='command line' , and the config manager umps them all into a `git` category. At this point they are no longer detectable as overrides. If the git-nativ declaration is then replaced, any sub-process will no longer get to see them, because they only live in the parent process's `ConfigManager`
- Overrides from ENV are not reported by `ConfigManager.repr()` (only instance overrides)
#### Threats
If the implementation is changed to post config overrides in git-native format into the ENV, we blur the distinction between ENV-overrides (cannot be manipulated via the `ConfigManager`, are applied differently than instance overrides), and instance/CLI overrides. Essentially, any instance override would affect the ENV. However, posting in Git-native format does not simply remove the distinction of instance and ENV overrides (although that may be desirable, given the complexity increase).
It remains to be evaluated, how git-native overrides interact with the attribution of origin of a configuration. Importantly, Git distinguished a dedicated scope `command` for this (separate from `system`, `global`, `local`, `worktree` (which would be most similar to our `branch`)). For this `command` scope, git-config says:
> These environment variables will override values in configuration files, but will be overridden by any explicit options passed via `git -c`
This is both similar and different from our notion of overrides. Yes, they are overrides, but CLI setting has the last word, whereras in datalad, we have ENV override CLI.
The latter could be aligned by the datalad CLI resorting to posting its `-c` setting via the Git-native mechanism into the ENV. This would give them the highest precedence for datalad-internal processing too. It would also allow for a declaration of any `datalad -c X`, in exactly the same way and with the same semantics as `git -c X`, and also result in any internal git call to behave as if called in that fashion.
| 0.0 | [
"datalad_next/config/tests/test_utils.py::test_get_gitconfig_items_from_env",
"datalad_next/config/tests/test_utils.py::test_set_gitconfig_items_in_env",
"datalad_next/config/tests/test_utils.py::test_get_set_gitconfig_env_roundtrip"
] | [] | 2023-06-02 07:52:56+00:00 | 1,832 |
|
datalad__datalad-next-464 | diff --git a/changelog.d/20231003_111547_michael.hanke_bf_462.md b/changelog.d/20231003_111547_michael.hanke_bf_462.md
new file mode 100644
index 0000000..b03c6af
--- /dev/null
+++ b/changelog.d/20231003_111547_michael.hanke_bf_462.md
@@ -0,0 +1,7 @@
+### 🐛 Bug Fixes
+
+- `FileSystemItem.from_path()` now honors its `link_target` parameter, and
+ resolves a target for any symlink item conditional on this setting.
+ Previously, a symlink target was always resolved.
+ Fixes https://github.com/datalad/datalad-next/issues/462 via
+ https://github.com/datalad/datalad-next/pull/464 (by @mih)
diff --git a/datalad_next/iter_collections/utils.py b/datalad_next/iter_collections/utils.py
index 0f00a2e..91fcdc4 100644
--- a/datalad_next/iter_collections/utils.py
+++ b/datalad_next/iter_collections/utils.py
@@ -96,7 +96,7 @@ class FileSystemItem(PathBasedItem, TypedItem):
uid=cstat.st_uid,
gid=cstat.st_gid,
)
- if ctype == FileSystemItemType.symlink:
+ if link_target and ctype == FileSystemItemType.symlink:
# could be p.readlink() from PY3.9+
item.link_target = PurePath(os.readlink(path))
return item
| datalad/datalad-next | e5a2e403fbe669f638b101e5f65494c6596fff97 | diff --git a/datalad_next/iter_collections/tests/test_utils.py b/datalad_next/iter_collections/tests/test_utils.py
new file mode 100644
index 0000000..1393431
--- /dev/null
+++ b/datalad_next/iter_collections/tests/test_utils.py
@@ -0,0 +1,32 @@
+from datalad_next.tests.utils import skip_wo_symlink_capability
+
+from ..utils import FileSystemItem
+
+
+def test_FileSystemItem(tmp_path):
+ testfile = tmp_path / 'file1.txt'
+ testfile_content = 'content'
+ testfile.write_text(testfile_content)
+
+ item = FileSystemItem.from_path(testfile)
+ assert item.size == len(testfile_content)
+ assert item.link_target is None
+
+
+@skip_wo_symlink_capability
+def test_FileSystemItem_linktarget(tmp_path):
+ testfile = tmp_path / 'file1.txt'
+ testfile_content = 'short'
+ testfile.write_text(testfile_content)
+ testlink = tmp_path / 'link'
+ testlink.symlink_to(testfile)
+
+ item = FileSystemItem.from_path(testlink)
+ assert testfile.samefile(item.link_target)
+ # size of the link file does not anyhow propagate the size of the
+ # link target
+ assert item.size != len(testfile_content)
+
+ # we can disable link resolution
+ item = FileSystemItem.from_path(testlink, link_target=False)
+ assert item.link_target is None
| `FileSystemItem.from_path()` ignores `link_target=False` parameter
It will call `readlink` unconditionally for any symlink.
The following patch should fix it. A corresponding test is TODO.
```diff
diff --git a/datalad_next/iter_collections/utils.py b/datalad_next/iter_collections/utils.py
index 0f00a2e..91fcdc4 100644
--- a/datalad_next/iter_collections/utils.py
+++ b/datalad_next/iter_collections/utils.py
@@ -96,7 +96,7 @@ class FileSystemItem(PathBasedItem, TypedItem):
uid=cstat.st_uid,
gid=cstat.st_gid,
)
- if ctype == FileSystemItemType.symlink:
+ if link_target and ctype == FileSystemItemType.symlink:
# could be p.readlink() from PY3.9+
item.link_target = PurePath(os.readlink(path))
return item
``` | 0.0 | [
"datalad_next/iter_collections/tests/test_utils.py::test_FileSystemItem_linktarget"
] | [
"datalad_next/iter_collections/tests/test_utils.py::test_FileSystemItem"
] | 2023-10-03 09:20:10+00:00 | 1,833 |
|
datalad__datalad-next-495 | diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml
new file mode 100644
index 0000000..e1e2d36
--- /dev/null
+++ b/.github/workflows/mypy.yml
@@ -0,0 +1,26 @@
+name: Type annotation
+
+on:
+ push:
+ paths:
+ - '*.py'
+
+jobs:
+ mypy:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Setup Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: 3.8
+ architecture: x64
+ - name: Checkout
+ uses: actions/checkout@v3
+ - name: Install mypy
+ run: pip install mypy
+ - name: Run mypy
+ uses: sasanquaneuf/mypy-github-action@releases/v1
+ with:
+ checkName: 'mypy' # NOTE: this needs to be the same as the job name
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/changelog.d/20231023_064405_michael.hanke_www_auth.md b/changelog.d/20231023_064405_michael.hanke_www_auth.md
new file mode 100644
index 0000000..f4752d5
--- /dev/null
+++ b/changelog.d/20231023_064405_michael.hanke_www_auth.md
@@ -0,0 +1,8 @@
+### 🏠 Internal
+
+- The `www-authenticate` dependencies is dropped. The functionality is
+ replaced by a `requests`-based implementation of an alternative parser.
+ This trims the dependency footprint and facilitates Debian-packaging.
+ The previous test cases are kept and further extended.
+ Fixes https://github.com/datalad/datalad-next/issues/493 via
+ https://github.com/datalad/datalad-next/pull/495 (by @mih)
diff --git a/datalad_next/url_operations/http.py b/datalad_next/url_operations/http.py
index 854677c..5d660e0 100644
--- a/datalad_next/url_operations/http.py
+++ b/datalad_next/url_operations/http.py
@@ -9,11 +9,13 @@ import sys
from typing import Dict
import requests
from requests_toolbelt import user_agent
-import www_authenticate
import datalad
-from datalad_next.utils.requests_auth import DataladAuth
+from datalad_next.utils.requests_auth import (
+ DataladAuth,
+ parse_www_authenticate,
+)
from . import (
UrlOperations,
UrlOperationsRemoteError,
@@ -233,7 +235,7 @@ class HttpUrlOperations(UrlOperations):
headers=headers,
)
if 'www-authenticate' in req.headers:
- props['auth'] = www_authenticate.parse(
+ props['auth'] = parse_www_authenticate(
req.headers['www-authenticate'])
props['is_redirect'] = True if req.history else False
props['status_code'] = req.status_code
diff --git a/datalad_next/utils/requests_auth.py b/datalad_next/utils/requests_auth.py
index 62cb5a4..fb4f3ce 100644
--- a/datalad_next/utils/requests_auth.py
+++ b/datalad_next/utils/requests_auth.py
@@ -7,7 +7,6 @@ import logging
from typing import Dict
from urllib.parse import urlparse
import requests
-import www_authenticate
from datalad_next.config import ConfigManager
from datalad_next.utils import CredentialManager
@@ -16,7 +15,77 @@ from datalad_next.utils.http_helpers import get_auth_realm
lgr = logging.getLogger('datalad.ext.next.utils.requests_auth')
-__all__ = ['DataladAuth', 'HTTPBearerTokenAuth']
+__all__ = ['DataladAuth', 'HTTPBearerTokenAuth', 'parse_www_authenticate']
+
+
+def parse_www_authenticate(hdr: str) -> dict:
+ """Parse HTTP www-authenticate header
+
+ This helper uses ``requests`` utilities to parse the ``www-authenticate``
+ header as represented in a ``requests.Response`` instance. The header may
+ contain any number of challenge specifications.
+
+ The implementation follows RFC7235, where a challenge parameters set is
+ specified as: either a comma-separated list of parameters, or a single
+ sequence of characters capable of holding base64-encoded information,
+ and parameters are name=value pairs, where the name token is matched
+ case-insensitively, and each parameter name MUST only occur once
+ per challenge.
+
+ Returns
+ -------
+ dict
+ Keys are casefolded challenge labels (e.g., 'basic', 'digest').
+ Values are: ``None`` (no parameter), ``str`` (a token68), or
+ ``dict`` (name/value mapping of challenge parameters)
+ """
+ plh = requests.utils.parse_list_header
+ pdh = requests.utils.parse_dict_header
+ challenges = {}
+ challenge = None
+ # challenges as well as their properties are in a single
+ # comma-separated list
+ for item in plh(hdr):
+ # parse the item into a key/value set
+ # the value will be `None` if this item was no mapping
+ k, v = pdh(item).popitem()
+ # split the key to check for a challenge spec start
+ key_split = k.split(' ', maxsplit=1)
+ if len(key_split) > 1 or v is None:
+ item_suffix = item[len(key_split[0]) + 1:]
+ challenge = [item[len(key_split[0]) + 1:]] if item_suffix else None
+ challenges[key_split[0].casefold()] = challenge
+ else:
+ # implementation logic assumes that the above conditional
+ # was triggered before we ever get here
+ assert challenge
+ challenge.append(item)
+
+ return {
+ challenge: _convert_www_authenticate_items(items)
+ for challenge, items in challenges.items()
+ }
+
+
+def _convert_www_authenticate_items(items: list) -> None | str | dict:
+ pdh = requests.utils.parse_dict_header
+ # according to RFC7235, items can be:
+ # either a comma-separated list of parameters
+ # or a single sequence of characters capable of holding base64-encoded
+ # information.
+ # parameters are name=value pairs, where the name token is matched
+ # case-insensitively, and each parameter name MUST only occur once
+ # per challenge.
+ if items is None:
+ return None
+ elif len(items) == 1 and pdh(items[0].rstrip('=')).popitem()[1] is None:
+ # this items matches the token68 appearance (no name value
+ # pair after potential base64 padding its removed
+ return items[0]
+ else:
+ return {
+ k.casefold(): v for i in items for k, v in pdh(i).items()
+ }
class DataladAuth(requests.auth.AuthBase):
@@ -201,7 +270,7 @@ class DataladAuth(requests.auth.AuthBase):
# www-authenticate with e.g. 403s
return r
# which auth schemes does the server support?
- auth_schemes = www_authenticate.parse(r.headers['www-authenticate'])
+ auth_schemes = parse_www_authenticate(r.headers['www-authenticate'])
ascheme, credname, cred = self._get_credential(r.url, auth_schemes)
if cred is None or 'secret' not in cred:
diff --git a/setup.cfg b/setup.cfg
index 8e31daa..3f6897a 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -16,7 +16,6 @@ python_requires = >= 3.8
install_requires =
annexremote
datalad >= 0.18.4
- www-authenticate
humanize
packages = find_namespace:
include_package_data = True
| datalad/datalad-next | 6a2d65eaec84a7e380a7077c4642c938f590ce88 | diff --git a/datalad_next/utils/tests/test_parse_www_authenticate.py b/datalad_next/utils/tests/test_parse_www_authenticate.py
new file mode 100644
index 0000000..d69fcd6
--- /dev/null
+++ b/datalad_next/utils/tests/test_parse_www_authenticate.py
@@ -0,0 +1,45 @@
+
+from ..requests_auth import parse_www_authenticate
+
+
+challenges = (
+ # just challenge type
+ ('Negotiate',
+ [('negotiate', None)]),
+ # challenge and just a token, tolerate any base64 padding
+ ('Negotiate abcdef',
+ [('negotiate', 'abcdef')]),
+ ('Negotiate abcdef=',
+ [('negotiate', 'abcdef=')]),
+ ('Negotiate abcdef==',
+ [('negotiate', 'abcdef==')]),
+ # standard bearer
+ ('Bearer realm=example.com',
+ [('bearer', {'realm': 'example.com'})]),
+ # standard digest
+ ('Digest realm="example.com", qop="auth,auth-int", nonce="abcdef", '
+ 'opaque="ghijkl"',
+ [('digest', {'realm': 'example.com', 'qop': 'auth,auth-int',
+ 'nonce': 'abcdef', 'opaque': 'ghijkl'})]),
+ # multi challenge
+ ('Basic speCial="paf ram", realm="basIC", '
+ 'Bearer, '
+ 'Digest realm="[email protected]", qop="auth, auth-int", '
+ 'algorithm=MD5',
+ [('basic', {'special': 'paf ram', 'realm': 'basIC'}),
+ ('bearer', None),
+ ('digest', {'realm': "[email protected]", 'qop': "auth, auth-int",
+ 'algorithm': 'MD5'})]),
+ # same challenge, multiple times, last one wins
+ ('Basic realm="basIC", '
+ 'Basic realm="complex"',
+ [('basic', {'realm': 'complex'})]),
+)
+
+
+def test_parse_www_authenticate():
+ for hdr, targets in challenges:
+ res = parse_www_authenticate(hdr)
+ for ctype, props in targets:
+ assert ctype in res
+ assert res[ctype] == props
| Replace `www-authenticate`
We need this as a dependency for #490. However, packaging it for Debian seems like overkill. The whole thing is ~80 lines of code in a single file.
The last update was 8 years ago.
I would consider adopting a copy, unless @basilgello has other preferences. | 0.0 | [
"datalad_next/utils/tests/test_parse_www_authenticate.py::test_parse_www_authenticate"
] | [] | 2023-10-22 20:17:25+00:00 | 1,834 |
|
datalad__datalad-next-518 | diff --git a/changelog.d/20231026_185357_michael.hanke_archivist_tgz.md b/changelog.d/20231026_185357_michael.hanke_archivist_tgz.md
new file mode 100644
index 0000000..f41dadc
--- /dev/null
+++ b/changelog.d/20231026_185357_michael.hanke_archivist_tgz.md
@@ -0,0 +1,6 @@
+### 💫 Enhancements and new features
+
+- The `archivist` remote now supports archive type detection
+ from `*E`-type annex keys for `.tgz` archives too.
+ Fixes https://github.com/datalad/datalad-next/issues/517 via
+ https://github.com/datalad/datalad-next/pull/518 (by @mih)
diff --git a/datalad_next/types/archivist.py b/datalad_next/types/archivist.py
index 12e9b2b..17c538d 100644
--- a/datalad_next/types/archivist.py
+++ b/datalad_next/types/archivist.py
@@ -74,7 +74,7 @@ class ArchivistLocator:
"""
akey: AnnexKey
member: PurePosixPath
- size: int
+ size: int | None = None
# datalad-archives did not have the type info, we want to be
# able to handle those too, make optional
atype: ArchiveType | None = None
@@ -91,21 +91,21 @@ class ArchivistLocator:
@classmethod
def from_str(cls, url: str):
"""Return ``ArchivistLocator`` from ``str`` form"""
- url_matched = _recognized_urls.match(url)
- if not url_matched:
+ url_match = _recognized_urls.match(url)
+ if not url_match:
raise ValueError('Unrecognized dl+archives locator syntax')
- url_matched = url_matched.groupdict()
+ url_matched = url_match.groupdict()
# convert to desired type
akey = AnnexKey.from_str(url_matched['key'])
# archive member properties
- props_matched = _archive_member_props.match(url_matched['props'])
- if not props_matched:
+ props_match = _archive_member_props.match(url_matched['props'])
+ if not props_match:
# without at least a 'path' there is nothing we can do here
raise ValueError(
'dl+archives locator contains invalid archive member '
f'specification: {url_matched["props"]!r}')
- props_matched = props_matched.groupdict()
+ props_matched = props_match.groupdict()
amember_path = PurePosixPath(props_matched['path'])
if amember_path.is_absolute():
raise ValueError(
@@ -116,6 +116,8 @@ class ArchivistLocator:
# size is optional, regex ensure that it is an int
size = props_matched.get('size')
+ if size is not None:
+ size = int(size)
# archive type, could be None
atype = props_matched.get('atype')
@@ -134,6 +136,8 @@ class ArchivistLocator:
atype = ArchiveType.zip
elif '.tar' in suf:
atype = ArchiveType.tar
+ elif '.tgz' in suf:
+ atype = ArchiveType.tar
return cls(
akey=akey,
| datalad/datalad-next | 1f7d9f5fc2874078600fc64009428550c720823f | diff --git a/datalad_next/types/tests/test_archivist.py b/datalad_next/types/tests/test_archivist.py
index 8f78163..b3d03ac 100644
--- a/datalad_next/types/tests/test_archivist.py
+++ b/datalad_next/types/tests/test_archivist.py
@@ -23,6 +23,12 @@ def test_archivistlocator():
assert ArchivistLocator.from_str(
'dl+archive:MD5E-s1--e9f624eb778e6f945771c543b6e9c7b2.tar#path=f.txt'
).atype == ArchiveType.tar
+ assert ArchivistLocator.from_str(
+ 'dl+archive:MD5E-s1--e9f624eb778e6f945771c543b6e9c7b2.tgz#path=f.txt'
+ ).atype == ArchiveType.tar
+ assert ArchivistLocator.from_str(
+ 'dl+archive:MD5E-s1--e9f624eb778e6f945771c543b6e9c7b2.tar.gz#path=f.txt'
+ ).atype == ArchiveType.tar
assert ArchivistLocator.from_str(
'dl+archive:MD5E-s1--e9f624eb778e6f945771c543b6e9c7b2.zip#path=f.txt'
).atype == ArchiveType.zip
| archivist special remote: add support for tar archives with `.tgz` extension
I'm working on building a dataset from `.tgz` archives using the replacement for `add-archive-content` demonstrated [here](https://github.com/datalad/datalad-next/issues/183#issuecomment-1539943754) in combination with the archivist special remote. The demo below works if the archive is a `.tar.gz` extension but not with `.tgz`. With `.tgz`, I need to configure the `archivist.legacy-mode` for a successful `datalad get`. Here's a quick demo:
```shell
% mkdir project
% touch project/file1.txt project/file2.txt project/file3.txt
% tar -czvf project.tgz project
```
```shell
% datalad create tmp && cd tmp
% cp ../project.tgz ./
% datalad save -m "add archive" project.tgz
% git annex initremote archivist type=external externaltype=archivist encryption=none autoenable=true
% archivekey=$(git annex lookupkey project.tgz)
% datalad -f json ls-file-collection tarfile project.tgz --hash md5 | jq '. | select(.type == "file")' | jq --slurp . | datalad addurls --key 'et:MD5-s{size}--{hash-md5}' - "dl+archive:${archivekey}#path={item}&size={size}" '{item}'
% filekey=$(git annex lookupkey project/file1.txt)
% archivist_uuid=$(git annex info archivist | grep 'uuid' | cut -d ' ' -f 2)
% git annex setpresentkey $filekey $archivist_uuid 1
% datalad get project/file1.txt
get(error): project/file1.txt (file) [Could not obtain 'MD5E-s0--d41d8cd98f00b204e9800998ecf8427e.txt' -caused by- NotImplementedError]
```
```shell
% datalad configuration --scope local set datalad.archivist.legacy-mode=yes 1 !
set_configuration(ok): . [datalad.archivist.legacy-mode=yes]
% datalad get project/file1.txt
[INFO ] datalad-archives special remote is using an extraction cache under /playground/loj/abcd/tmp3/.git/datalad/tmp/archives/8bc4249de3. Remove it with DataLad's 'clean' command to save disk space.
get(ok): project/file1.txt (file) [from archivist...]
```
<details><summary>datalad wtf</summary>
```
# WTF
## configuration <SENSITIVE, report disabled by configuration>
## credentials
- keyring:
- active_backends:
- PlaintextKeyring with no encyption v.1.0 at /home/loj/.local/share/python_keyring/keyring_pass.cfg
- config_file: /home/loj/.config/python_keyring/keyringrc.cfg
- data_root: /home/loj/.local/share/python_keyring
## datalad
- version: 0.19.3
## dependencies
- annexremote: 1.6.0
- boto: 2.49.0
- cmd:7z: 16.02
- cmd:annex: 10.20221003
- cmd:bundled-git: UNKNOWN
- cmd:git: 2.39.2
- cmd:ssh: 8.4p1
- cmd:system-git: 2.39.2
- cmd:system-ssh: 8.4p1
- humanize: 4.8.0
- iso8601: 2.1.0
- keyring: 24.2.0
- keyrings.alt: 5.0.0
- msgpack: 1.0.7
- platformdirs: 3.11.0
- requests: 2.31.0
## environment
- LANG: en_US.UTF-8
- LANGUAGE: en_US.UTF-8
- LC_ALL: en_US.UTF-8
- LC_CTYPE: en_US.UTF-8
- PATH: /home/loj/.venvs/abcd-long/bin:/home/loj/.dotfiles/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/X11R6/bin:/usr/local/games:/usr/games
## extensions
- container:
- description: Containerized environments
- entrypoints:
- datalad_container.containers_add.ContainersAdd:
- class: ContainersAdd
- module: datalad_container.containers_add
- names:
- containers-add
- containers_add
- datalad_container.containers_list.ContainersList:
- class: ContainersList
- module: datalad_container.containers_list
- names:
- containers-list
- containers_list
- datalad_container.containers_remove.ContainersRemove:
- class: ContainersRemove
- module: datalad_container.containers_remove
- names:
- containers-remove
- containers_remove
- datalad_container.containers_run.ContainersRun:
- class: ContainersRun
- module: datalad_container.containers_run
- names:
- containers-run
- containers_run
- module: datalad_container
- version: 1.2.3
- next:
- description: What is next in DataLad
- entrypoints:
- datalad_next.commands.create_sibling_webdav.CreateSiblingWebDAV:
- class: CreateSiblingWebDAV
- module: datalad_next.commands.create_sibling_webdav
- names:
- create-sibling-webdav
- datalad_next.commands.credentials.Credentials:
- class: Credentials
- module: datalad_next.commands.credentials
- names:
- datalad_next.commands.download.Download:
- class: Download
- module: datalad_next.commands.download
- names:
- download
- datalad_next.commands.ls_file_collection.LsFileCollection:
- class: LsFileCollection
- module: datalad_next.commands.ls_file_collection
- names:
- ls-file-collection
- datalad_next.commands.tree.TreeCommand:
- class: TreeCommand
- module: datalad_next.commands.tree
- names:
- tree
- module: datalad_next
- version: 1.0.1
## git-annex
- build flags:
- Assistant
- Webapp
- Pairing
- Inotify
- DBus
- DesktopNotify
- TorrentParser
- MagicMime
- Benchmark
- Feeds
- Testsuite
- S3
- WebDAV
- dependency versions:
- aws-0.22
- bloomfilter-2.0.1.0
- cryptonite-0.26
- DAV-1.3.4
- feed-1.3.0.1
- ghc-8.8.4
- http-client-0.6.4.1
- persistent-sqlite-2.10.6.2
- torrent-10000.1.1
- uuid-1.3.13
- yesod-1.6.1.0
- key/value backends:
- SHA256E
- SHA256
- SHA512E
- SHA512
- SHA224E
- SHA224
- SHA384E
- SHA384
- SHA3_256E
- SHA3_256
- SHA3_512E
- SHA3_512
- SHA3_224E
- SHA3_224
- SHA3_384E
- SHA3_384
- SKEIN256E
- SKEIN256
- SKEIN512E
- SKEIN512
- BLAKE2B256E
- BLAKE2B256
- BLAKE2B512E
- BLAKE2B512
- BLAKE2B160E
- BLAKE2B160
- BLAKE2B224E
- BLAKE2B224
- BLAKE2B384E
- BLAKE2B384
- BLAKE2BP512E
- BLAKE2BP512
- BLAKE2S256E
- BLAKE2S256
- BLAKE2S160E
- BLAKE2S160
- BLAKE2S224E
- BLAKE2S224
- BLAKE2SP256E
- BLAKE2SP256
- BLAKE2SP224E
- BLAKE2SP224
- SHA1E
- SHA1
- MD5E
- MD5
- WORM
- URL
- X*
- operating system: linux x86_64
- remote types:
- git
- gcrypt
- p2p
- S3
- bup
- directory
- rsync
- web
- bittorrent
- webdav
- adb
- tahoe
- glacier
- ddar
- git-lfs
- httpalso
- borg
- hook
- external
- supported repository versions:
- 8
- 9
- 10
- upgrade supported from repository versions:
- 0
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- version: 10.20221003
## location
- path: /playground/loj/abcd
- type: directory
## metadata.extractors
- container_inspect:
- distribution: datalad-container 1.2.3
- load_error: ModuleNotFoundError(No module named 'datalad_metalad')
- module: datalad_container.extractors.metalad_container
## metadata.filters
## metadata.indexers
## python
- implementation: CPython
- version: 3.9.2
## system
- distribution: debian/11/bullseye
- encoding:
- default: utf-8
- filesystem: utf-8
- locale.prefered: UTF-8
- filesystem:
- CWD:
- path: /playground/loj/abcd
- HOME:
- path: /home/loj
- TMP:
- path: /tmp
- max_path_length: 276
- name: Linux
- release: 5.10.0-23-amd64
- type: posix
- version: #1 SMP Debian 5.10.179-1 (2023-05-12)
```
</details> | 0.0 | [
"datalad_next/types/tests/test_archivist.py::test_archivistlocator"
] | [
"datalad_next/types/tests/test_archivist.py::test_archivistlocatori_errors"
] | 2023-10-26 16:56:54+00:00 | 1,835 |
|
datalad__datalad-next-577 | diff --git a/datalad_next/commands/create_sibling_webdav.py b/datalad_next/commands/create_sibling_webdav.py
index 1b286ed..ba808a9 100644
--- a/datalad_next/commands/create_sibling_webdav.py
+++ b/datalad_next/commands/create_sibling_webdav.py
@@ -9,9 +9,6 @@
"""High-level interface for creating a combi-target on a WebDAV capable server
"""
import logging
-from typing import (
- Dict,
-)
from unittest.mock import patch
from urllib.parse import (
quote as urlquote,
@@ -38,12 +35,19 @@ from datalad_next.constraints import (
EnsureInt,
EnsureParsedURL,
EnsureRange,
+ EnsureRemoteName,
EnsureStr,
)
-from datalad_next.constraints.dataset import EnsureDataset
+from datalad_next.constraints.dataset import (
+ DatasetParameter,
+ EnsureDataset,
+)
+from datalad_next.constraints.exceptions import (
+ ConstraintError,
+ ParameterConstraintContext,
+)
from datalad_next.utils import CredentialManager
from datalad_next.utils import (
- ParamDictator,
get_specialremote_credential_properties,
update_specialremote_credential,
_yield_ds_w_matching_siblings,
@@ -56,37 +60,119 @@ lgr = logging.getLogger('datalad.distributed.create_sibling_webdav')
class CreateSiblingWebDAVParamValidator(EnsureCommandParameterization):
- def joint_validation(self, params: Dict, on_error: str) -> Dict:
- p = ParamDictator(params)
- if p.url.scheme == "http":
+ def __init__(self):
+ super().__init__(
+ param_constraints=dict(
+ url=EnsureParsedURL(
+ required=['scheme', 'netloc'],
+ forbidden=['query', 'fragment'],
+ match='^(http|https)://',
+ ),
+ dataset=EnsureDataset(
+ installed=True, purpose='create WebDAV sibling(s)'),
+ name=EnsureRemoteName(),
+ storage_name=EnsureRemoteName(),
+ mode=EnsureChoice(
+ 'annex', 'filetree', 'annex-only', 'filetree-only',
+ 'git-only',
+ ),
+ # TODO https://github.com/datalad/datalad-next/issues/131
+ credential=EnsureStr(),
+ existing=EnsureChoice('skip', 'error', 'reconfigure'),
+ recursive=EnsureBool(),
+ recursion_limit=EnsureInt() & EnsureRange(min=0),
+ ),
+ validate_defaults=('dataset',),
+ joint_constraints={
+ ParameterConstraintContext(('url',), 'url'):
+ self._validate_param_url,
+ ParameterConstraintContext(
+ ('url', 'name'), 'default name'):
+ self._validate_default_name,
+ ParameterConstraintContext(
+ ('mode', 'name', 'storage_name'), 'default storage name'):
+ self._validate_default_storage_name,
+ ParameterConstraintContext(
+ ('mode', 'name', 'storage_name'), 'default storage name'):
+ self._validate_default_storage_name,
+ ParameterConstraintContext(
+ ('existing', 'recursive', 'name', 'storage_name',
+ 'dataset', 'mode')):
+ self._validate_existing_names,
+ },
+ )
+
+ def _validate_param_url(self, url):
+ if url.scheme == "http":
lgr.warning(
- f"Using 'http:' ({p.url.geturl()!r}) means that WebDAV "
+ f"Using 'http:' ({url.geturl()!r}) means that WebDAV "
"credentials are sent unencrypted over network links. "
"Consider using 'https:'.")
- if not params['name']:
+ def _validate_default_name(self, url, name):
+ if not name:
# not using .netloc to avoid ports to show up in the name
- params['name'] = p.url.hostname
+ return {'name': url.hostname}
- if p.mode in ('annex-only', 'filetree-only') and p.storage_name:
+ def _validate_default_storage_name(self, mode, name, storage_name):
+ if mode in ('annex-only', 'filetree-only') and storage_name:
lgr.warning(
"Sibling name will be used for storage sibling in "
"storage-sibling-only mode, but a storage sibling name "
"was provided"
)
- if p.mode == 'git-only' and p.storage_name:
+ if mode == 'git-only' and storage_name:
lgr.warning(
"Storage sibling setup disabled, but a storage sibling name "
"was provided"
)
- if p.mode != 'git-only' and not p.storage_name:
- p.storage_name = f"{p.name}-storage"
+ if mode != 'git-only' and not storage_name:
+ storage_name = f"{name}-storage"
- if p.mode != 'git-only' and p.name == p.storage_name:
+ if mode != 'git-only' and name == storage_name:
# leads to unresolvable, circular dependency with publish-depends
- raise ValueError("sibling names must not be equal")
+ self.raise_for(
+ dict(mode=mode, name=name, storage_name=storage_name),
+ "sibling names must not be equal",
+ )
+ return dict(mode=mode, name=name, storage_name=storage_name)
+
+ def _validate_existing_names(
+ self, existing, recursive, name, storage_name, dataset,
+ mode):
+ if recursive:
+ # we don't do additional validation for recursive processing,
+ # this has to be done when things are running, because an
+ # upfront validation would require an expensive traversal
+ return
- return params
+ if existing != 'error':
+ # nothing to check here
+ return
+
+ if not isinstance(dataset, DatasetParameter):
+ # we did not get a proper dataset parameter,
+ # hence cannot tailor to a dataset to check a remote
+ # name against
+ return
+
+ validator = EnsureRemoteName(known=False, dsarg=dataset)
+ try:
+ if mode != 'annex-only':
+ validator(name)
+ if mode != 'git-only':
+ validator(storage_name)
+ except ConstraintError as e:
+ self.raise_for(
+ dict(existing=existing,
+ recursive=recursive,
+ name=name,
+ storage_name=storage_name,
+ dataset=dataset,
+ mode=mode),
+ e.msg,
+ )
+ return
@build_doc
@@ -251,29 +337,7 @@ class CreateSiblingWebDAV(ValidatedInterface):
"""),
)
- _validators = dict(
- url=EnsureParsedURL(
- required=['scheme', 'netloc'],
- forbidden=['query', 'fragment'],
- match='^(http|https)://',
- ),
- dataset=EnsureDataset(
- installed=True, purpose='create WebDAV sibling(s)'),
- name=EnsureStr(),
- storage_name=EnsureStr(),
- mode=EnsureChoice(
- 'annex', 'filetree', 'annex-only', 'filetree-only', 'git-only'
- ),
- # TODO https://github.com/datalad/datalad-next/issues/131
- credential=EnsureStr(),
- existing=EnsureChoice('skip', 'error', 'reconfigure'),
- recursive=EnsureBool(),
- recursion_limit=EnsureInt() & EnsureRange(min=0),
- )
- _validator_ = CreateSiblingWebDAVParamValidator(
- _validators,
- validate_defaults=('dataset',),
- )
+ _validator_ = CreateSiblingWebDAVParamValidator()
@staticmethod
@datasetmethod(name='create_sibling_webdav')
diff --git a/datalad_next/constraints/__init__.py b/datalad_next/constraints/__init__.py
index e6f0139..42fea3f 100644
--- a/datalad_next/constraints/__init__.py
+++ b/datalad_next/constraints/__init__.py
@@ -85,3 +85,8 @@ from .formats import (
EnsureURL,
EnsureParsedURL,
)
+
+from .git import (
+ EnsureGitRefName,
+ EnsureRemoteName
+)
\ No newline at end of file
diff --git a/datalad_next/constraints/git.py b/datalad_next/constraints/git.py
index 25f8363..073c131 100644
--- a/datalad_next/constraints/git.py
+++ b/datalad_next/constraints/git.py
@@ -1,8 +1,12 @@
"""Constraints for Git-related concepts and parameters"""
+from __future__ import annotations
import subprocess
-from .base import Constraint
+from .base import (
+ Constraint,
+ DatasetParameter,
+)
class EnsureGitRefName(Constraint):
@@ -74,3 +78,101 @@ class EnsureGitRefName(Constraint):
'(single-level) ' if self._allow_onelevel else '',
' or refspec pattern' if self._refspec_pattern else '',
)
+
+
+class EnsureRemoteName(Constraint):
+ """Ensures a valid remote name, and optionally if such a remote is known
+ """
+ _label = 'remote'
+
+ def __init__(self,
+ known: bool | None = None,
+ dsarg: DatasetParameter | None = None):
+ """
+ Parameters
+ ----------
+ known: bool, optional
+ By default, a given value is only checked if it is a syntactically
+ correct remote name.
+ If ``True``, also checks that the given name corresponds to a
+ known remote in the dataset given by ``dsarg``. If ``False``,
+ checks that the given remote does not match any known remote
+ in that dataset.
+ dsarg: DatasetParameter, optional
+ Identifies a dataset for testing remote existence, if requested.
+ """
+ self._label = 'remote'
+ self._known = known
+ self._dsarg = dsarg
+
+ def __call__(self, value: str) -> str:
+ if not value:
+ # simple, do here
+ self.raise_for(
+ value,
+ f'missing {self._label} name',
+ )
+
+ if self._known is not None:
+ assert self._dsarg, \
+ f"Existence check for {self._label} requires dataset " \
+ "specification"
+
+ if self._known:
+ # we don't need to check much, only if a remote of this name
+ # already exists -- no need to check for syntax compliance
+ # again
+ if not any(
+ k.startswith(f"remote.{value}.")
+ for k in self._dsarg.ds.config.keys()
+ ):
+ self.raise_for(
+ value,
+ f'is not a known {self._label}',
+ )
+ else:
+ # whether or not the remote must not exist, or we would not care,
+ # in all cases we need to check for syntax compliance
+ EnsureGitRefName(
+ allow_onelevel=True,
+ refspec_pattern=False,
+ )(value)
+
+ if self._known is None:
+ # we only need to know that something was provided,
+ # no further check
+ return value
+
+ if self._known is False and any(
+ k.startswith(f"remote.{value}.")
+ for k in self._dsarg.ds.config.keys()
+ ):
+ self.raise_for(
+ value,
+ f'name conflicts with a known {self._label}',
+ )
+
+ return value
+
+ def short_description(self):
+ return f"Name of a{{desc}} {self._label}".format(
+ desc=' known' if self._known
+ else ' not-yet-known' if self._known is False else ''
+ )
+
+ def for_dataset(self, dataset: DatasetParameter) -> Constraint:
+ """Return an similarly parametrized variant that checks remote names
+ against a given dataset (argument)"""
+ return self.__class__(
+ known=self._known,
+ dsarg=dataset,
+ )
+
+
+class EnsureSiblingName(EnsureRemoteName):
+ """Identical to ``EnsureRemoteName``, but used the term "sibling"
+
+ Only error messages and documentation differ, with "remote" being
+ replaced with "sibling".
+ """
+ _label = 'sibling'
| datalad/datalad-next | 2138efef0cf69a9b6a57505c5a48b9b13ad27088 | diff --git a/datalad_next/commands/tests/test_create_sibling_webdav.py b/datalad_next/commands/tests/test_create_sibling_webdav.py
index 5fda1fd..cd22232 100644
--- a/datalad_next/commands/tests/test_create_sibling_webdav.py
+++ b/datalad_next/commands/tests/test_create_sibling_webdav.py
@@ -74,6 +74,26 @@ def check_common_workflow(
if declare_credential else None,
mode=mode,
)
+ # Ensure that remote name constraint check works
+ # second time should raise because the sibling exists already
+ with pytest.raises(ValueError) as e:
+ create_sibling_webdav(
+ url,
+ credential=webdav_credential['name']
+ if declare_credential else None,
+ mode=mode,
+ name='127.0.0.1',
+ )
+ with pytest.raises(ValueError) as e:
+ create_sibling_webdav(
+ url,
+ credential=webdav_credential['name']
+ if declare_credential else None,
+ mode=mode,
+ name='other',
+ storage_name='127.0.0.1-storage',
+ )
+
assert_in_results(
res,
action='create_sibling_webdav.storage',
diff --git a/datalad_next/constraints/tests/test_special_purpose.py b/datalad_next/constraints/tests/test_special_purpose.py
index fb3d508..c69b3be 100644
--- a/datalad_next/constraints/tests/test_special_purpose.py
+++ b/datalad_next/constraints/tests/test_special_purpose.py
@@ -4,6 +4,7 @@ import pytest
from datalad_next.commands import Parameter
from datalad_next.utils import chpwd
+from ..base import DatasetParameter
from ..basic import (
EnsureInt,
EnsureStr,
@@ -22,6 +23,7 @@ from ..formats import (
)
from ..git import (
EnsureGitRefName,
+ EnsureRemoteName
)
from ..parameter_legacy import EnsureParameterConstraint
@@ -52,6 +54,36 @@ def test_EnsureGitRefName():
'refs/heads/*') == 'refs/heads/*'
+def test_EnsureRemoteName(existing_dataset):
+ # empty sibling name must raise
+ with pytest.raises(ValueError):
+ EnsureRemoteName()('')
+ assert EnsureRemoteName().short_description() == 'Name of a remote'
+ assert EnsureRemoteName(
+ known=True).short_description() == 'Name of a known remote'
+ assert EnsureRemoteName(
+ known=False).short_description() == 'Name of a not-yet-known remote'
+ ds = existing_dataset
+ c = EnsureRemoteName(known=False)
+ tc = c.for_dataset(DatasetParameter(None, ds))
+ assert tc('newremotename') == 'newremotename'
+ # add a remote
+ ds._repo.add_remote('my-remote', 'here')
+ # check should fail when it shouldn't exist
+ with pytest.raises(ValueError):
+ tc('my-remote')
+ # should work when it should exist
+ c = EnsureRemoteName(known=True)
+ tc = c.for_dataset(DatasetParameter(None, ds))
+ assert tc('my-remote') == 'my-remote'
+ # but fail with non-existing remote
+ with pytest.raises(ValueError) as e:
+ tc('not-my-remote')
+ assert str(e.value) == "is not a known remote"
+ # return sibling name with no existence checks
+ assert EnsureRemoteName()('anything') == 'anything'
+
+
def test_EnsureParameterConstraint():
# most basic case, no value constraint
c = EnsureParameterConstraint(NoConstraint())
| Provide `EnsureRemoteName`
Many commands needs sibling names, existing or prospective.
Checking for syntax compliance is essentially
`EnsureGitRefName(allow_onelevel=True, refspec_pattern=False)`
for any given dataset, it can be turned into a `EnsureChoice()`, but it would also need some kind of `EnsureNotIn()` (for setting a non-existing remote name). So likely the best is to implement dataset interaction directly in `EnsureRemoteName`, and make it a subclass of `EnsureGitRefName` | 0.0 | [
"datalad_next/constraints/tests/test_special_purpose.py::test_EnsureGitRefName",
"datalad_next/constraints/tests/test_special_purpose.py::test_EnsureParameterConstraint",
"datalad_next/constraints/tests/test_special_purpose.py::test_EnsureParameterConstraint_passthrough",
"datalad_next/constraints/tests/test_special_purpose.py::test_EnsureJSONLines",
"datalad_next/constraints/tests/test_special_purpose.py::test_EnsureURL",
"datalad_next/constraints/tests/test_special_purpose.py::test_EnsureURL_match"
] | [] | 2023-12-17 19:39:48+00:00 | 1,836 |
|
datalad__datalad-next-632 | diff --git a/datalad_next/gitremotes/datalad_annex.py b/datalad_next/gitremotes/datalad_annex.py
index a39fa8d..561d4c4 100755
--- a/datalad_next/gitremotes/datalad_annex.py
+++ b/datalad_next/gitremotes/datalad_annex.py
@@ -210,8 +210,9 @@ from datalad_next.datasets import (
from datalad_next.exceptions import CapturedException
from datalad_next.runners import (
CommandError,
- NoCapture,
- StdOutCapture,
+ call_git,
+ call_git_oneline,
+ call_git_success,
)
from datalad_next.uis import ui_switcher as ui
from datalad_next.utils import (
@@ -224,6 +225,7 @@ from datalad_next.utils import (
get_specialremote_credential_envpatch,
get_specialremote_credential_properties,
needs_specialremote_credential_envpatch,
+ patched_env,
specialremote_credential_envmap,
update_specialremote_credential,
)
@@ -494,8 +496,9 @@ class RepoAnnexGitRemote(object):
try:
# send annex into private mode, if supported
# this repo will never ever be shared
- ra.call_git(['config', 'annex.private', 'true'])
- ra.call_git(['annex', 'init'])
+ call_git_success(['config', 'annex.private', 'true'],
+ cwd=ra.pathobj, capture_output=True)
+ call_git_success(['annex', 'init'], capture_output=True)
ra = AnnexRepo(self._repoannexdir)
if 'type=web' in self.initremote_params:
self._init_repoannex_type_web(ra)
@@ -620,8 +623,15 @@ class RepoAnnexGitRemote(object):
# otherwise we can end up in a conflict situation where the mirror
# points to 'master' (or something else) and the source actually
# has 'main' (or something different)
- src_head_ref = self.repo.call_git(['symbolic-ref', 'HEAD']).strip()
- mr.call_git(['symbolic-ref', 'HEAD', src_head_ref])
+ src_head_ref = call_git_oneline(
+ ['symbolic-ref', 'HEAD'],
+ cwd=self.repo.pathobj,
+ ).strip()
+ call_git_success(
+ ['symbolic-ref', 'HEAD', src_head_ref],
+ cwd=mr.pathobj,
+ capture_output=True,
+ )
self.log('Established mirror')
self._mirrorrepo = mr
@@ -669,9 +679,9 @@ class RepoAnnexGitRemote(object):
pre_refs = sorted(self.mirrorrepo.for_each_ref_(),
key=lambda x: x['refname'])
# must not capture -- git is talking to it directly from here
- self.mirrorrepo._git_runner.run(
- ['git', 'receive-pack', self.mirrorrepo.path],
- protocol=NoCapture,
+ call_git(
+ ['receive-pack', self.mirrorrepo.path],
+ cwd=self.mirrorrepo.pathobj,
)
post_refs = sorted(self.mirrorrepo.for_each_ref_(),
key=lambda x: x['refname'])
@@ -698,12 +708,15 @@ class RepoAnnexGitRemote(object):
for ref in post_refs:
# best MIH can think of is to leave behind another
# ref to indicate the unsuccessful upload
- self.repo.call_git([
+ call_git_success([
'update-ref',
# strip 'refs/heads/' from refname
f'refs/dlra-upload-failed/{self.remote_name}/'
f'{ref["refname"][11:]}',
- ref['objectname']])
+ ref['objectname']],
+ cwd=self.repo.pathobj,
+ capture_output=True,
+ )
raise
# clean-up potential upload failure markers for this particular
@@ -712,7 +725,11 @@ class RepoAnnexGitRemote(object):
for ref in self.repo.for_each_ref_(
fields=('refname',),
pattern=f'refs/dlra-upload-failed/{self.remote_name}'):
- self.repo.call_git(['update-ref', '-d', ref['refname']])
+ call_git_success(
+ ['update-ref', '-d', ref['refname']],
+ cwd=self.repo.pathobj,
+ capture_output=True,
+ )
# we do not need to update `self._cached_remote_refs`,
# because we end the remote-helper process here
# everything has worked, if we used a credential, update it
@@ -724,9 +741,9 @@ class RepoAnnexGitRemote(object):
# must not capture -- git is talking to it directly from here.
# the `self.mirrorrepo` access will ensure that the mirror
# is up-to-date
- self.mirrorrepo._git_runner.run(
- ['git', 'upload-pack', self.mirrorrepo.path],
- protocol=NoCapture,
+ call_git(
+ ['upload-pack', self.mirrorrepo.path],
+ cwd=self.mirrorrepo.pathobj,
)
# everything has worked, if we used a credential, update it
self._store_credential()
@@ -766,7 +783,7 @@ class RepoAnnexGitRemote(object):
repoannex = self.repoannex
# trim it down, as much as possible
- mirrorrepo.call_git(['gc'])
+ call_git(['gc'], cwd=mirrorrepo.pathobj)
# update the repo state keys
# it is critical to drop the local keys first, otherwise
@@ -1047,7 +1064,10 @@ def _format_refs(repo, refs=None):
if refstr:
refstr += '\n'
refstr += '@{} HEAD\n'.format(
- repo.call_git(['symbolic-ref', 'HEAD']).strip()
+ call_git_oneline(
+ ['symbolic-ref', 'HEAD'],
+ cwd=repo.pathobj,
+ ).strip()
)
return refstr
@@ -1156,69 +1176,67 @@ def make_export_tree(repo):
# we need to force Git to use a throwaway index file to maintain
# the bare nature of the repoannex, git-annex would stop functioning
# properly otherwise
- env = os.environ.copy()
index_file = repo.pathobj / 'datalad_tmp_index'
- env['GIT_INDEX_FILE'] = str(index_file)
- try:
- for key, kinfo in RepoAnnexGitRemote.xdlra_key_locations.items():
- # create a blob for the annex link
- out = repo._git_runner.run(
- ['git', 'hash-object', '-w', '--stdin'],
- stdin=bytes(
- f'../../.git/annex/objects/{kinfo["prefix"]}/{key}/{key}',
- 'utf-8'),
- protocol=StdOutCapture)
- linkhash = out['stdout'].strip()
- # place link into a tree
- out = repo._git_runner.run(
- ['git', 'update-index', '--add', '--cacheinfo', '120000',
- linkhash, kinfo["loc"]],
- protocol=StdOutCapture,
- env=env)
- # write the complete tree, and return ID
- out = repo._git_runner.run(
- ['git', 'write-tree'],
- protocol=StdOutCapture,
- env=env)
- exporttree = out['stdout'].strip()
- # this should always come out identically
- # unless we made changes in the composition of the export tree
- assert exporttree == '7f0e7953e93b4c9920c2bff9534773394f3a5762'
-
- # clean slate
- if index_file.exists():
- index_file.unlink()
- # fake export.log record
- # <unixepoch>s <here>:<origin> <exporttree>
- now_ts = datetime.datetime.now().timestamp()
- out = repo._git_runner.run(
- ['git', 'hash-object', '-w', '--stdin'],
- stdin=bytes(
- f'{now_ts}s {here}:{origin} {exporttree}\n', 'utf-8'),
- protocol=StdOutCapture)
- exportlog = out['stdout'].strip()
- repo._git_runner.run(
- ['git', 'read-tree', 'git-annex'],
- env=env)
- out = repo._git_runner.run(
- ['git', 'update-index', '--add', '--cacheinfo', '100644',
- exportlog, 'export.log'],
- protocol=StdOutCapture,
- env=env)
- out = repo._git_runner.run(
- ['git', 'write-tree'],
- protocol=StdOutCapture,
- env=env)
- gaupdate = out['stdout'].strip()
- out = repo._git_runner.run(
- ['git', 'commit-tree', '-m', 'Fake export', '-p', 'git-annex',
- gaupdate],
- protocol=StdOutCapture,
- env=env)
- gacommit = out['stdout'].strip()
- repo.call_git(['update-ref', 'refs/heads/git-annex', gacommit])
- finally:
- index_file.unlink()
+ with patched_env(GIT_INDEX_FILE=index_file):
+ try:
+ for key, kinfo in RepoAnnexGitRemote.xdlra_key_locations.items():
+ # create a blob for the annex link
+ linkhash = call_git_oneline(
+ ['hash-object', '-w', '--stdin'],
+ cwd=repo.pathobj,
+ input=f'../../.git/annex/objects/{kinfo["prefix"]}/{key}/{key}',
+ ).strip()
+ # place link into a tree
+ call_git_success(
+ ['update-index', '--add', '--cacheinfo', '120000',
+ linkhash, kinfo["loc"]],
+ cwd=repo.pathobj,
+ capture_output=True,
+ )
+ # write the complete tree, and return ID
+ exporttree = call_git_oneline(
+ ['write-tree'], cwd=repo.pathobj
+ ).strip()
+ # this should always come out identically
+ # unless we made changes in the composition of the export tree
+ assert exporttree == '7f0e7953e93b4c9920c2bff9534773394f3a5762'
+
+ # clean slate
+ if index_file.exists():
+ index_file.unlink()
+ # fake export.log record
+ # <unixepoch>s <here>:<origin> <exporttree>
+ now_ts = datetime.datetime.now().timestamp()
+ exportlog = call_git_oneline(
+ ['hash-object', '-w', '--stdin'],
+ input=f'{now_ts}s {here}:{origin} {exporttree}\n',
+ cwd=repo.pathobj,
+ ).strip()
+ call_git_success(
+ ['read-tree', 'git-annex'],
+ cwd=repo.pathobj,
+ )
+ call_git_success(
+ ['update-index', '--add', '--cacheinfo', '100644',
+ exportlog, 'export.log'],
+ cwd=repo.pathobj,
+ capture_output=True,
+ )
+ gaupdate = call_git_oneline(
+ ['write-tree'], cwd=repo.pathobj,
+ ).strip()
+ gacommit = call_git_oneline(
+ ['commit-tree', '-m', 'Fake export', '-p', 'git-annex',
+ gaupdate],
+ cwd=repo.pathobj,
+ ).strip()
+ call_git_success(
+ ['update-ref', 'refs/heads/git-annex', gacommit],
+ cwd=repo.pathobj,
+ )
+ finally:
+ if index_file.exists():
+ index_file.unlink()
return exporttree
diff --git a/datalad_next/runners/git.py b/datalad_next/runners/git.py
index ec158f2..bc6b74b 100644
--- a/datalad_next/runners/git.py
+++ b/datalad_next/runners/git.py
@@ -18,6 +18,7 @@ def _call_git(
cwd: Path | None = None,
check: bool = False,
text: bool | None = None,
+ input: str | bytes | None = None,
# TODO
#patch_env: dict[str, str] | None = None,
) -> subprocess.CompletedProcess:
@@ -39,6 +40,7 @@ def _call_git(
cwd=cwd,
check=check,
text=text,
+ input=input,
)
except subprocess.CalledProcessError as e:
# TODO we could support post-error forensics, but some client
@@ -77,6 +79,7 @@ def call_git_success(
args: list[str],
*,
cwd: Path | None = None,
+ capture_output: bool = False,
) -> bool:
"""Call Git for a single line of output.
@@ -86,11 +89,14 @@ def call_git_success(
If ``cwd`` is not None, the function changes the working directory to
``cwd`` before executing the command.
+
+ If ``capture_output`` is ``True``, process output is captured, but not
+ returned. By default process output is not captured.
"""
try:
_call_git(
args,
- capture_output=False,
+ capture_output=capture_output,
cwd=cwd,
check=True,
)
@@ -104,7 +110,8 @@ def call_git_lines(
args: list[str],
*,
cwd: Path | None = None,
-) -> bool:
+ input: str | None = None,
+) -> list[str]:
"""Call Git for any (small) number of lines of output.
``args`` is a list of arguments for the Git command. This list must not
@@ -114,6 +121,10 @@ def call_git_lines(
If ``cwd`` is not None, the function changes the working directory to
``cwd`` before executing the command.
+ If ``input`` is not None, the argument becomes the subprocess’s stdin.
+ This is intended for small-scale inputs. For call that require processing
+ large inputs, ``iter_git_subproc()`` is to be preferred.
+
Raises
------
CommandError if the call exits with a non-zero status.
@@ -124,6 +135,7 @@ def call_git_lines(
cwd=cwd,
check=True,
text=True,
+ input=input,
)
return res.stdout.splitlines()
@@ -132,6 +144,7 @@ def call_git_oneline(
args: list[str],
*,
cwd: Path | None = None,
+ input: str | None = None,
) -> str:
"""Call git for a single line of output.
@@ -143,7 +156,7 @@ def call_git_oneline(
CommandError if the call exits with a non-zero status.
AssertionError if there is more than one line of output.
"""
- lines = call_git_lines(args, cwd=cwd)
+ lines = call_git_lines(args, cwd=cwd, input=input)
if len(lines) > 1:
raise AssertionError(
f"Expected Git {args} to return a single line, but got {lines}"
diff --git a/datalad_next/utils/__init__.py b/datalad_next/utils/__init__.py
index 6729d0c..faed7c2 100644
--- a/datalad_next/utils/__init__.py
+++ b/datalad_next/utils/__init__.py
@@ -12,6 +12,7 @@
external_versions
log_progress
parse_www_authenticate
+ patched_env
rmtree
get_specialremote_param_dict
get_specialremote_credential_properties
@@ -72,7 +73,7 @@ from .specialremote import (
needs_specialremote_credential_envpatch,
get_specialremote_credential_envpatch,
)
-
+from .patch import patched_env
# TODO REMOVE EVERYTHING BELOW FOR V2.0
# https://github.com/datalad/datalad-next/issues/611
diff --git a/datalad_next/utils/patch.py b/datalad_next/utils/patch.py
index 299b321..3ea1cdb 100644
--- a/datalad_next/utils/patch.py
+++ b/datalad_next/utils/patch.py
@@ -1,2 +1,31 @@
+import contextlib
+from os import environ
+
# legacy import
from datalad_next.patches import apply_patch
+
+
[email protected]
+def patched_env(**env):
+ """Context manager for patching the process environment
+
+ Any number of kwargs can be given. Keys represent environment variable
+ names, and values their values. A value of ``None`` indicates that
+ the respective variable should be unset, i.e., removed from the
+ environment.
+ """
+ preserve = {}
+ for name, val in env.items():
+ preserve[name] = environ.get(name, None)
+ if val is None:
+ del environ[name]
+ else:
+ environ[name] = str(val)
+ try:
+ yield
+ finally:
+ for name, val in preserve.items():
+ if val is None:
+ del environ[name]
+ else:
+ environ[name] = val
| datalad/datalad-next | 7a08a58eb3110b491640c6628f74baf7ecc4faea | diff --git a/datalad_next/utils/tests/test_patch.py b/datalad_next/utils/tests/test_patch.py
new file mode 100644
index 0000000..c281cce
--- /dev/null
+++ b/datalad_next/utils/tests/test_patch.py
@@ -0,0 +1,15 @@
+from ..patch import patched_env
+from os import environ
+
+
+def test_patched_env():
+ if 'HOME' in environ:
+ home = environ['HOME']
+ with patched_env(HOME=None):
+ assert 'HOME' not in environ
+ assert environ['HOME'] == home
+ unusual_name = 'DATALADPATCHENVTESTVAR'
+ if unusual_name not in environ:
+ with patched_env(**{unusual_name: 'dummy'}):
+ assert environ[unusual_name] == 'dummy'
+ assert unusual_name not in environ
| Discontinue (direct) usage of any runner implementations
With #538 we made the start of a new paradigm: iterator-based interaction with (concurrent) subprocesses.
It is our (@mih and @christian-monch) understanding that this basic approach would work for most (if not all) use cases that involve the execution of subprocesses. Respective experiments have been made and the results look promising. The "ultimate" challenge will be the implementation of a queue-based remote shell.
In order to bring down the complexity of implementations after the introduction of this new paradigm, all subprocess execution in datalad-next should be switch to it.
Afterwards all runner-related imports from datalad-core should be discontinued (which involves updating any extensions that make use of it).
This plan answers the questions of #519. `GitRunner` has no future. | 0.0 | [
"datalad_next/utils/tests/test_patch.py::test_patched_env"
] | [] | 2024-02-07 11:52:00+00:00 | 1,837 |
|
datalad__datalad-next-93 | diff --git a/changelog.d/20220812_190404_benjaminpoldrack_fix_push_patch.md b/changelog.d/20220812_190404_benjaminpoldrack_fix_push_patch.md
new file mode 100644
index 0000000..b092504
--- /dev/null
+++ b/changelog.d/20220812_190404_benjaminpoldrack_fix_push_patch.md
@@ -0,0 +1,7 @@
+### 🐛 Bug Fixes
+
+- Fixed datalad-push always reporting success when pushing to
+ an export remote.
+ Fixes https://github.com/datalad/datalad-next/issues/88 via
+ https://github.com/datalad/datalad-next/pull/93 (by @bpoldrack)
+
diff --git a/datalad_next/patches/push_to_export_remote.py b/datalad_next/patches/push_to_export_remote.py
index d6b82ed..432f209 100644
--- a/datalad_next/patches/push_to_export_remote.py
+++ b/datalad_next/patches/push_to_export_remote.py
@@ -167,6 +167,8 @@ def _transfer_data(repo: AnnexRepo,
)
return
+ from datalad.interface.results import annexjson2result
+
# TODO:
# - check for configuration entries, e.g. what to export
@@ -221,12 +223,13 @@ def _transfer_data(repo: AnnexRepo,
],
progress=True
):
- yield {
- **res_kwargs,
- "action": "copy",
- "status": "ok",
- "path": str(Path(res_kwargs["path"]) / result["file"])
- }
+ result_adjusted = \
+ annexjson2result(result, ds, **res_kwargs)
+ # annexjson2result overwrites 'action' with annex' 'command',
+ # even if we provided our 'action' within res_kwargs. Therefore,
+ # change afterwards instead:
+ result_adjusted['action'] = "copy"
+ yield result_adjusted
except CommandError as cmd_error:
ce = CapturedException(cmd_error)
diff --git a/docs/policy/release-management.md b/docs/policy/release-management.md
new file mode 100644
index 0000000..fdf9313
--- /dev/null
+++ b/docs/policy/release-management.md
@@ -0,0 +1,15 @@
+# Release team
+
+The release team (RT) is an charge reviewing merge requests, and issuing new releases.
+
+The members of the RT are defined in `docs/CODEOWNERS` in the `main` branch of the repository.
+
+The RT itself adds or removes RT members.
+
+It is the RT's duty to act on any merge request in a timely manner.
+
+A code review of at least one RT member is required for any changeset to be merged into the `main` branch.
+
+When all technical checks pass (e.g., CI success, resolved pull-request conversations), any RT member approval is a sufficient condition for an (automatic) merge of a changeset into the `main` branch.
+
+RT members are not expected to be an expert in all techniques, features, and parts of the code base. Consequently, a team member should seek feedback prior to approving merge requests whenever necessary.
| datalad/datalad-next | 35af20542c46c523f4d635ffc187fcc3d50ade6a | diff --git a/datalad_next/conftest.py b/datalad_next/conftest.py
index 7c727c3..1b42ad0 100644
--- a/datalad_next/conftest.py
+++ b/datalad_next/conftest.py
@@ -1,16 +1,1 @@
-try:
- from datalad.conftest import setup_package
-except ImportError:
- # assume old datalad without pytest support introduced in
- # https://github.com/datalad/datalad/pull/6273
- import pytest
- from datalad import setup_package as _setup_package
- from datalad import teardown_package as _teardown_package
-
-
- @pytest.fixture(autouse=True, scope="session")
- def setup_package():
- _setup_package()
- yield
- _teardown_package()
-
+from datalad.conftest import setup_package
diff --git a/datalad_next/patches/tests/test_push_to_export_remote.py b/datalad_next/patches/tests/test_push_to_export_remote.py
index 3b0ef55..bb51c0d 100644
--- a/datalad_next/patches/tests/test_push_to_export_remote.py
+++ b/datalad_next/patches/tests/test_push_to_export_remote.py
@@ -11,6 +11,7 @@ from datalad.tests.utils_pytest import (
SkipTest,
assert_false,
assert_in,
+ assert_in_results,
assert_true,
eq_,
)
@@ -55,11 +56,19 @@ class MockRepo:
def _call_annex_records_items_(self, *args, **kwargs):
yield {
- 'target': args[0][3],
- 'action': 'copy',
- 'status': 'ok',
+ "command": f"export {args[0][3]}",
"file": "file.txt",
+ "success": True,
+ "input": [],
+ "error-messages": []
}
+ yield {
+ "command": f"export {args[0][3]}",
+ "success": False,
+ "input": [],
+ "error-messages":
+ ["external special remote error: WHATEVER WENT WRONG"],
+ "file": "somefile"}
def _call_transfer(target: str,
@@ -67,6 +76,7 @@ def _call_transfer(target: str,
return_special_remotes: bool = True) -> Generator:
ds_mock = MagicMock()
ds_mock.config.getbool.return_value = config_result
+ ds_mock.pathobj = Path("/root")
return _transfer_data(
repo=MockRepo(return_special_remotes),
ds=ds_mock,
@@ -107,14 +117,16 @@ def test_patch_execute_export():
gele_mock.return_value = None
results = tuple(_call_transfer("yes-target", False))
eq_(pd_mock.call_count, 0)
- assert_in(
- {
- "path": str(Path("/root/file.txt")),
- "target": "yes-target",
- "action": "copy",
- "status": "ok",
- },
- results)
+ assert_in_results(results,
+ path=str(Path("/root/file.txt")),
+ target="yes-target",
+ action="copy",
+ status="ok")
+ assert_in_results(results,
+ path=str(Path("/root/somefile")),
+ target="yes-target",
+ action="copy",
+ status="error")
def test_patch_skip_ignore_targets_export():
@@ -144,14 +156,16 @@ def test_patch_check_envpatch():
gc_mock.return_value = {"secret": "abc", "user": "hans"}
results = tuple(_call_transfer("yes-target", False))
eq_(pd_mock.call_count, 0)
- assert_in(
- {
- "path": str(Path("/root/file.txt")),
- "target": "yes-target",
- "action": "copy",
- "status": "ok",
- },
- results)
+ assert_in_results(results,
+ path=str(Path("/root/file.txt")),
+ target="yes-target",
+ action="copy",
+ status="ok")
+ assert_in_results(results,
+ path=str(Path("/root/somefile")),
+ target="yes-target",
+ action="copy",
+ status="error")
def test_no_special_remotes():
| push to export remote patch swallows errors
This `try ... except`: https://github.com/datalad/datalad-next/blob/864896e26af7361b739b05df4fcd302b0b407db3/datalad_next/patches/push_to_export_remote.py#L216
is wrong. If `annex export` fails we get an error result from `_call_annex_records_items_` rather than an exception. And this error result is unconditionally turned into an `status='ok'` record here. | 0.0 | [
"datalad_next/patches/tests/test_push_to_export_remote.py::test_patch_execute_export",
"datalad_next/patches/tests/test_push_to_export_remote.py::test_patch_check_envpatch"
] | [
"datalad_next/patches/tests/test_push_to_export_remote.py::test_is_export_remote",
"datalad_next/patches/tests/test_push_to_export_remote.py::test_patch_pass_through",
"datalad_next/patches/tests/test_push_to_export_remote.py::test_patch_skip_ignore_targets_export",
"datalad_next/patches/tests/test_push_to_export_remote.py::test_no_special_remotes",
"datalad_next/patches/tests/test_push_to_export_remote.py::test_get_export_records_no_exports",
"datalad_next/patches/tests/test_push_to_export_remote.py::test_get_export_records",
"datalad_next/patches/tests/test_push_to_export_remote.py::test_get_export_log_entry"
] | 2022-08-12 16:59:17+00:00 | 1,838 |
|
datapythonista__mnist-12 | diff --git a/README.md b/README.md
index b164037..80a9d4e 100644
--- a/README.md
+++ b/README.md
@@ -63,4 +63,18 @@ train_images = mnist.train_images()
x = images.reshape((images.shape[0], images.shape[1] * images.shape[2]))
```
+Both the url where the files can be found, and the temporary directory where
+they will be cached locally can be modified in the next way:
+```
+import mnist
+
+mnist.datasets_url = 'http://url-to-the/datasets'
+
+# temporary_dir is a function, so it can be dinamically created
+# like Python stdlib `tempfile.gettempdir` (which is the default)
+mnist.temporary_dir = lambda: '/tmp/mnist'
+
+train_images = mnist.train_images()
+```
+
It supports Python 2.7 and Python >= 3.5.
diff --git a/mnist/__init__.py b/mnist/__init__.py
index 2a41398..a567875 100644
--- a/mnist/__init__.py
+++ b/mnist/__init__.py
@@ -16,8 +16,11 @@ except ImportError:
import numpy
-# the url can be changed by the users of the library (not a constant)
+# `datasets_url` and `temporary_dir` can be set by the user using:
+# >>> mnist.datasets_url = 'http://my.mnist.url'
+# >>> mnist.temporary_dir = lambda: '/tmp/mnist'
datasets_url = 'http://yann.lecun.com/exdb/mnist/'
+temporary_dir = tempfile.gettempdir
class IdxDecodeError(ValueError):
@@ -45,8 +48,7 @@ def download_file(fname, target_dir=None, force=False):
fname : str
Full path of the downloaded file
"""
- if not target_dir:
- target_dir = tempfile.gettempdir()
+ target_dir = target_dir or temporary_dir()
target_fname = os.path.join(target_dir, fname)
if force or not os.path.isfile(target_fname):
| datapythonista/mnist | aeae1406afea11c1c23788f23856053edcfc536b | diff --git a/tests/test_download_mnist.py b/tests/test_download_mnist.py
index 631104d..e71aabe 100644
--- a/tests/test_download_mnist.py
+++ b/tests/test_download_mnist.py
@@ -97,8 +97,20 @@ class TestDownloadMNIST(unittest.TestCase):
@mock.patch('mnist.urlretrieve')
def test_datasets_url_is_used(self, urlretrieve):
+ original_url = mnist.datasets_url
mnist.datasets_url = 'http://aaa.com/'
mnist.download_file('mnist_datasets_url.gz')
fname = os.path.join(tempfile.gettempdir(), 'mnist_datasets_url.gz')
urlretrieve.assert_called_once_with(
'http://aaa.com/mnist_datasets_url.gz', fname)
+ mnist.datasets_url = original_url
+
+ @mock.patch('mnist.urlretrieve')
+ def test_temporary_dir_is_used(self, urlretrieve):
+ original_temp_dir = mnist.temporary_dir
+ mnist.temporary_dir = lambda: '/another/tmp/dir/'
+ fname = mnist.download_file('test')
+ urlretrieve.assert_called_once_with(mnist.datasets_url + 'test',
+ '/another/tmp/dir/test')
+ self.assertEqual(fname, '/another/tmp/dir/test')
+ mnist.temporary_dir = original_temp_dir
| Allowing specification of the download directory
Right now the functions listed on the README.md doesn't allow specifying the target directory. This functionality should be possible just by propagating extra parameters to the download function. | 0.0 | [
"tests/test_download_mnist.py::TestDownloadMNIST::test_temporary_dir_is_used"
] | [
"tests/test_download_mnist.py::TestDownloadMNIST::test_datasets_url_is_used",
"tests/test_download_mnist.py::TestDownloadMNIST::test_file_is_downloaded_to_target_dir",
"tests/test_download_mnist.py::TestDownloadMNIST::test_file_is_downloaded_when_exists_and_force_is_true",
"tests/test_download_mnist.py::TestDownloadMNIST::test_file_is_not_downloaded_when_force_is_false"
] | 2018-09-24 13:52:46+00:00 | 1,839 |
|
datosgobar__pydatajson-125 | diff --git a/docs/MANUAL.md b/docs/MANUAL.md
index 2191b01..39d9678 100644
--- a/docs/MANUAL.md
+++ b/docs/MANUAL.md
@@ -94,8 +94,11 @@ Toma los siguientes parámetros:
- **portal_url**: URL del portal de CKAN de destino.
- **apikey**: La apikey de un usuario del portal de destino con los permisos para crear el dataset bajo la
organización pasada como parámetro.
+ - **demote_superThemes** (opcional, default: True):Si está en true, los ids de los themes del dataset, se escriben
+ como groups de CKAN.
- **demote_themes** (opcional, default: True): Si está en true, los labels de los themes del dataset, se escriben como
tags de CKAN; sino,se pasan como grupo.
+
Retorna el id en el nodo de destino del dataset federado.
diff --git a/pydatajson/ckan_utils.py b/pydatajson/ckan_utils.py
index 31f6737..b915bde 100644
--- a/pydatajson/ckan_utils.py
+++ b/pydatajson/ckan_utils.py
@@ -14,7 +14,8 @@ def append_attribute_to_extra(package, dataset, attribute, serialize=False):
package['extras'].append({'key': attribute, 'value': value})
-def map_dataset_to_package(dataset, catalog_id, owner_org, theme_taxonomy, demote_themes=True):
+def map_dataset_to_package(dataset, catalog_id, owner_org, theme_taxonomy,
+ demote_superThemes=True, demote_themes=True):
package = dict()
package['extras'] = []
# Obligatorios
@@ -33,8 +34,10 @@ def map_dataset_to_package(dataset, catalog_id, owner_org, theme_taxonomy, demot
package['resources'] = map_distributions_to_resources(distributions, catalog_id)
super_themes = dataset['superTheme']
- package['groups'] = [{'name': title_to_name(super_theme, decode=False)} for super_theme in super_themes]
append_attribute_to_extra(package, dataset, 'superTheme', serialize=True)
+ if demote_superThemes:
+ package['groups'] = [{'name': title_to_name(super_theme, decode=False)} for super_theme in super_themes]
+
# Recomendados y opcionales
package['url'] = dataset.get('landingPage')
@@ -66,7 +69,8 @@ def map_dataset_to_package(dataset, catalog_id, owner_org, theme_taxonomy, demot
label = next(x['label'] for x in theme_taxonomy if x['id'] == theme)
package['tags'].append({'name': label})
else:
- package['groups'] += [{'name': title_to_name(theme, decode=False)} for theme in themes]
+ package['groups'] = package.get('groups', []) + [{'name': title_to_name(theme, decode=False)}
+ for theme in themes]
return package
diff --git a/pydatajson/federation.py b/pydatajson/federation.py
index f9a4f6b..7807dfe 100644
--- a/pydatajson/federation.py
+++ b/pydatajson/federation.py
@@ -10,7 +10,7 @@ from .search import get_datasets
def push_dataset_to_ckan(catalog, catalog_id, owner_org, dataset_origin_identifier, portal_url, apikey,
- demote_themes=True):
+ demote_superThemes=True, demote_themes=True):
"""Escribe la metadata de un dataset en el portal pasado por parámetro.
Args:
@@ -20,6 +20,7 @@ def push_dataset_to_ckan(catalog, catalog_id, owner_org, dataset_origin_identifi
dataset_origin_identifier (str): El id del dataset que se va a federar.
portal_url (str): La URL del portal CKAN de destino.
apikey (str): La apikey de un usuario con los permisos que le permitan crear o actualizar el dataset.
+ demote_superThemes(bool): Si está en true, los ids de los super themes del dataset, se propagan como grupo.
demote_themes(bool): Si está en true, los labels de los themes del dataset, pasan a ser tags. Sino,
se pasan como grupo.
@@ -30,7 +31,8 @@ def push_dataset_to_ckan(catalog, catalog_id, owner_org, dataset_origin_identifi
ckan_portal = RemoteCKAN(portal_url, apikey=apikey)
theme_taxonomy = catalog.themes
- package = map_dataset_to_package(dataset, catalog_id, owner_org, theme_taxonomy, demote_themes=demote_themes)
+ package = map_dataset_to_package(dataset, catalog_id, owner_org, theme_taxonomy,
+ demote_superThemes, demote_themes)
# Get license id
if dataset.get('license'):
| datosgobar/pydatajson | 3c428354f3f1b48b9b70815ba370e8cd1b11b07b | diff --git a/tests/test_ckan_utils.py b/tests/test_ckan_utils.py
index 3884251..dca112c 100644
--- a/tests/test_ckan_utils.py
+++ b/tests/test_ckan_utils.py
@@ -67,8 +67,8 @@ class DatasetConversionTestCase(unittest.TestCase):
self.assertCountEqual(keywords + theme_labels, tags)
def test_themes_are_preserved_if_not_demoted(self):
- package = map_dataset_to_package(self.dataset, self.catalog_id, 'owner', self.catalog.themes,
- demote_themes=False)
+ package = map_dataset_to_package(self.dataset, self.catalog_id, 'owner',
+ self.catalog.themes, demote_themes=False)
groups = [group['name'] for group in package.get('groups', [])]
super_themes = [title_to_name(s_theme.lower()) for s_theme in self.dataset.get('superTheme')]
themes = self.dataset.get('theme', [])
@@ -84,15 +84,48 @@ class DatasetConversionTestCase(unittest.TestCase):
except AttributeError:
self.assertCountEqual(keywords, tags)
+ def test_superThemes_dont_impact_groups_if_not_demoted(self):
+ package = map_dataset_to_package(self.dataset, self.catalog_id, 'owner',
+ self.catalog.themes, demote_superThemes=False)
+ groups = [group['name'] for group in package.get('groups', [])]
+ tags = [tag['name'] for tag in package['tags']]
+ keywords = self.dataset.get('keyword', [])
+ themes = self.dataset.get('theme', [])
+ theme_labels = []
+ for theme in themes:
+ label = next(x['label'] for x in self.catalog.themes if x['id'] == theme)
+ theme_labels.append(label)
+ try:
+ self.assertItemsEqual([], groups)
+ except AttributeError:
+ self.assertCountEqual([], groups)
+ try:
+ self.assertItemsEqual(keywords + theme_labels, tags)
+ except AttributeError:
+ self.assertCountEqual(keywords + theme_labels, tags)
+
+ def test_preserve_themes_and_superThemes(self):
+ package = map_dataset_to_package(self.dataset, self.catalog_id, 'owner',
+ self.catalog.themes, False, False)
+ groups = [group['name'] for group in package.get('groups', [])]
+ tags = [tag['name'] for tag in package['tags']]
+ keywords = self.dataset.get('keyword', [])
+ themes = self.dataset.get('theme', [])
+ try:
+ self.assertItemsEqual(themes, groups)
+ except AttributeError:
+ self.assertCountEqual(themes, groups)
+ try:
+ self.assertItemsEqual(keywords, tags)
+ except AttributeError:
+ self.assertCountEqual(keywords, tags)
+
def test_dataset_extra_attributes_are_correct(self):
package = map_dataset_to_package(self.dataset, self.catalog_id, 'owner', self.catalog.themes)
# extras are included in dataset
if package['extras']:
for extra in package['extras']:
- if extra['key'] == 'super_theme':
- dataset_value = self.dataset['superTheme']
- else:
- dataset_value = self.dataset[extra['key']]
+ dataset_value = self.dataset[extra['key']]
if type(dataset_value) is list:
extra_value = json.loads(extra['value'])
try:
@@ -106,7 +139,7 @@ class DatasetConversionTestCase(unittest.TestCase):
def test_dataset_extra_attributes_are_complete(self):
package = map_dataset_to_package(self.dataset, self.catalog_id, 'owner', self.catalog.themes)
# dataset attributes are included in extras
- extra_attrs = ['issued', 'modified', 'accrualPeriodicity', 'temporal', 'language', 'spatial']
+ extra_attrs = ['issued', 'modified', 'accrualPeriodicity', 'temporal', 'language', 'spatial', 'superTheme']
for key in extra_attrs:
value = self.dataset.get(key)
if value:
@@ -115,8 +148,6 @@ class DatasetConversionTestCase(unittest.TestCase):
resulting_dict = {'key': key, 'value': value}
self.assertTrue(resulting_dict in package['extras'])
- self.assertTrue({'key': 'super_theme', 'value': json.dumps(self.dataset['superTheme'])})
-
def test_resources_replicated_attributes_stay_the_same(self):
resources = map_distributions_to_resources(self.distributions, self.catalog_id+'_'+self.dataset_id)
for resource in resources:
| Agregar opción para copiar `superTheme` a `theme` en `push_dataset_to_ckan`
**Contexto**
Los temas globales (`superTheme`) son temas transversales bajo los cuales los nodos originales clasifican a sus datasets para indexarlos en un nodo integrador.
El `superTheme` de un dataset en su nodo original, es el `theme` de ese dataset en el nodo integrador (son los temas que usa CKAN para visualizar en la landing del Portal).
**Implementar**
Agregar a la función `push_dataset_to_ckan` un argumento opcional `superTheme_to_theme` con default `True`.
Este _flag_ copia todos los elementos del array `superTheme` en la metadata original de un dataset y los agrega al array `theme` del dataset que va a ser empujado a un CKAN.
**Notas**
Esto es una operación de transformación de los metadatos originales cuyo sentido es federar correctamente los metadatos de un dataset en un contexto de portal diferente (con un público y alcance temático _transversal_, mientras que el nodo original tiene un público y alcance temático _específico_).
**Entregable**
La nueva opción en `push_dataset_to_ckan`, junto con sus tests asociados y las modificaciones correspondientes a la documentación en los docs de RTD. | 0.0 | [
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_preserve_themes_and_superThemes",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_superThemes_dont_impact_groups_if_not_demoted"
] | [
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_dataset_array_attributes_are_correct",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_dataset_extra_attributes_are_complete",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_dataset_extra_attributes_are_correct",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_dataset_nested_replicated_attributes_stay_the_same",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_replicated_plain_attributes_are_corrext",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_resources_extra_attributes_are_created_correctly",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_resources_replicated_attributes_stay_the_same",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_resources_transformed_attributes_are_correct",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_themes_are_preserved_if_not_demoted",
"tests/test_ckan_utils.py::DatetimeConversionTests::test_dates_change_correctly",
"tests/test_ckan_utils.py::DatetimeConversionTests::test_dates_stay_the_same",
"tests/test_ckan_utils.py::DatetimeConversionTests::test_datetimes_without_microseconds_are_handled_correctly",
"tests/test_ckan_utils.py::DatetimeConversionTests::test_datetimes_without_seconds_are_handled_correctly",
"tests/test_ckan_utils.py::DatetimeConversionTests::test_datetimes_without_timezones_stay_the_same",
"tests/test_ckan_utils.py::DatetimeConversionTests::test_timezones_are_handled_correctly"
] | 2018-03-12 20:48:48+00:00 | 1,840 |
|
datosgobar__pydatajson-137 | diff --git a/docs/MANUAL.md b/docs/MANUAL.md
index cd72809..914f87c 100644
--- a/docs/MANUAL.md
+++ b/docs/MANUAL.md
@@ -107,7 +107,7 @@ Toma los siguientes parámetros:
mantener una consistencia más estricta dentro del catálogo a federar, es necesario validar los datos antes de pasarlos
a la función.
-- **pydatajson.DataJson.remove_dataset_from_ckan()**: Hace un borrado físico de un dataset en un portal de CKAN.
+- **pydatajson.federation.remove_dataset_from_ckan()**: Hace un borrado físico de un dataset en un portal de CKAN.
Toma los siguientes parámetros:
- **portal_url**: La URL del portal CKAN. Debe implementar la funcionalidad de `/data.json`.
- **apikey**: La apikey de un usuario con los permisos que le permitan borrar el dataset.
@@ -121,6 +121,16 @@ Toma los siguientes parámetros:
En caso de pasar más de un parámetro opcional, la función `remove_dataset_from_ckan()` borra aquellos datasets que
cumplan con todas las condiciones.
+- **pydatajson.DataJson.push_theme_to_ckan()**: Crea un tema en el portal de destino
+Toma los siguientes parámetros:
+ - **portal_url**: La URL del portal CKAN. Debe implementar la funcionalidad de `/data.json`.
+ - **apikey**: La apikey de un usuario con los permisos que le permitan borrar el dataset.
+ - **identifier** (opcional, default: None): Id del `theme` que se quiere federar, en el catálogo de origen.
+ - **label** (opcional, default: None): label del `theme` que se quiere federar, en el catálogo de origen.
+
+ Debe pasarse por lo menos uno de los 2 parámetros opcionales. En caso de que se provean los 2, se prioriza el
+ identifier sobre el label.
+
## Uso
### Setup
diff --git a/pydatajson/ckan_utils.py b/pydatajson/ckan_utils.py
index 9724f44..9f68692 100644
--- a/pydatajson/ckan_utils.py
+++ b/pydatajson/ckan_utils.py
@@ -2,7 +2,6 @@
# -*- coding: utf-8 -*-
import json
import re
-import sys
from datetime import time
from dateutil import parser, tz
from .helpers import title_to_name
@@ -109,10 +108,21 @@ def map_distributions_to_resources(distributions, catalog_id=None):
resource['mimetype'] = distribution.get('mediaType')
resource['size'] = distribution.get('byteSize')
resource['accessURL'] = distribution.get('accessURL')
- resource['fileName'] = distribution.get('fileName')
+ fileName = distribution.get('fileName')
+ if fileName:
+ resource['fileName'] = fileName
dist_fields = distribution.get('field')
if dist_fields:
resource['attributesDescription'] = json.dumps(dist_fields)
resources.append(resource)
return resources
+
+
+def map_theme_to_group(theme):
+
+ return {
+ "name": title_to_name(theme.get('id') or theme['label']),
+ "title": theme.get('label'),
+ "description": theme.get('description'),
+ }
diff --git a/pydatajson/federation.py b/pydatajson/federation.py
index 9573040..9f132cd 100644
--- a/pydatajson/federation.py
+++ b/pydatajson/federation.py
@@ -5,7 +5,7 @@
from __future__ import print_function
from ckanapi import RemoteCKAN
from ckanapi.errors import NotFound
-from .ckan_utils import map_dataset_to_package
+from .ckan_utils import map_dataset_to_package, map_theme_to_group
from .search import get_datasets
@@ -23,7 +23,6 @@ def push_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier, portal_u
demote_superThemes(bool): Si está en true, los ids de los super themes del dataset, se propagan como grupo.
demote_themes(bool): Si está en true, los labels de los themes del dataset, pasan a ser tags. Sino,
se pasan como grupo.
-
Returns:
str: El id del dataset en el catálogo de destino.
"""
@@ -103,3 +102,22 @@ def remove_datasets_from_ckan(portal_url, apikey, filter_in=None, filter_out=Non
for identifier in identifiers:
ckan_portal.call_action('dataset_purge', data_dict={'id': identifier})
+
+
+def push_theme_to_ckan(catalog, portal_url, apikey, identifier=None, label=None):
+ """Escribe la metadata de un theme en el portal pasado por parámetro.
+
+ Args:
+ catalog (DataJson): El catálogo de origen que contiene el theme.
+ portal_url (str): La URL del portal CKAN de destino.
+ apikey (str): La apikey de un usuario con los permisos que le permitan crear o actualizar el dataset.
+ identifier (str): El identificador para buscar el theme en la taxonomia.
+ label (str): El label para buscar el theme en la taxonomia.
+ Returns:
+ str: El name del theme en el catálogo de destino.
+ """
+ ckan_portal = RemoteCKAN(portal_url, apikey=apikey)
+ theme = catalog.get_theme(identifier=identifier, label=label)
+ group = map_theme_to_group(theme)
+ pushed_group = ckan_portal.call_action('group_create', data_dict=group)
+ return pushed_group['name']
| datosgobar/pydatajson | afc2856312e6ebea0e76396c2b1f663193b962e0 | diff --git a/tests/test_ckan_utils.py b/tests/test_ckan_utils.py
index f90406e..83a7697 100644
--- a/tests/test_ckan_utils.py
+++ b/tests/test_ckan_utils.py
@@ -2,12 +2,8 @@
import unittest
import os
-import json
-import re
-import sys
-from dateutil import parser, tz
from .context import pydatajson
-from pydatajson.ckan_utils import map_dataset_to_package, map_distributions_to_resources, convert_iso_string_to_utc
+from pydatajson.ckan_utils import *
from pydatajson.helpers import title_to_name
SAMPLES_DIR = os.path.join("tests", "samples")
@@ -216,6 +212,57 @@ class DatasetConversionTestCase(unittest.TestCase):
self.assertIsNone(resource.get('attributesDescription'))
+class ThemeConversionTests(unittest.TestCase):
+
+ @classmethod
+ def get_sample(cls, sample_filename):
+ return os.path.join(SAMPLES_DIR, sample_filename)
+
+ @classmethod
+ def setUpClass(cls):
+ catalog = pydatajson.DataJson(cls.get_sample('full_data.json'))
+ cls.theme = catalog.get_theme(identifier='adjudicaciones')
+
+ def test_all_attributes_are_replicated_if_present(self):
+ group = map_theme_to_group(self.theme)
+ self.assertEqual('adjudicaciones', group['name'])
+ self.assertEqual('Adjudicaciones', group['title'])
+ self.assertEqual('Datasets sobre licitaciones adjudicadas.', group['description'])
+
+ def test_label_is_used_as_name_if_id_not_present(self):
+ missing_id = dict(self.theme)
+ missing_id['label'] = u'#Will be used as name#'
+ missing_id.pop('id')
+ group = map_theme_to_group(missing_id)
+ self.assertEqual('will-be-used-as-name', group['name'])
+ self.assertEqual('#Will be used as name#', group['title'])
+ self.assertEqual('Datasets sobre licitaciones adjudicadas.', group['description'])
+
+ def test_theme_missing_label(self):
+ missing_label = dict(self.theme)
+ missing_label.pop('label')
+ group = map_theme_to_group(missing_label)
+ self.assertEqual('adjudicaciones', group['name'])
+ self.assertIsNone(group.get('title'))
+ self.assertEqual('Datasets sobre licitaciones adjudicadas.', group['description'])
+
+ def test_theme_missing_description(self):
+ missing_description = dict(self.theme)
+ missing_description.pop('description')
+ group = map_theme_to_group(missing_description)
+ self.assertEqual('adjudicaciones', group['name'])
+ self.assertEqual('Adjudicaciones', group['title'])
+ self.assertIsNone(group['description'])
+
+ def test_id_special_characters_are_removed(self):
+ special_char_id = dict(self.theme)
+ special_char_id['id'] = u'#Théme& $id?'
+ group = map_theme_to_group(special_char_id)
+ self.assertEqual('theme-id', group['name'])
+ self.assertEqual('Adjudicaciones', group['title'])
+ self.assertEqual('Datasets sobre licitaciones adjudicadas.', group['description'])
+
+
class DatetimeConversionTests(unittest.TestCase):
def test_timezones_are_handled_correctly(self):
diff --git a/tests/test_federation.py b/tests/test_federation.py
index e6804b9..fd8284e 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -3,14 +3,14 @@
import unittest
import os
import re
-import sys
+
try:
from mock import patch, MagicMock
except ImportError:
from unittest.mock import patch, MagicMock
from .context import pydatajson
-from pydatajson.federation import push_dataset_to_ckan, remove_datasets_from_ckan
+from pydatajson.federation import push_dataset_to_ckan, remove_datasets_from_ckan, push_theme_to_ckan
from ckanapi.errors import NotFound
SAMPLES_DIR = os.path.join("tests", "samples")
@@ -215,3 +215,40 @@ class RemoveDatasetTestCase(unittest.TestCase):
'portal', 'key', only_time_series=True, organization='some_org')
mock_portal.return_value.call_action.assert_called_with(
'dataset_purge', data_dict={'id': 'id_2'})
+
+
+class PushThemeTestCase(unittest.TestCase):
+
+ @classmethod
+ def get_sample(cls, sample_filename):
+ return os.path.join(SAMPLES_DIR, sample_filename)
+
+ @classmethod
+ def setUpClass(cls):
+ cls.catalog = pydatajson.DataJson(cls.get_sample('full_data.json'))
+
+ @patch('pydatajson.federation.RemoteCKAN', autospec=True)
+ def test_empty_theme_search_raises_exception(self, mock_portal):
+ with self.assertRaises(AssertionError):
+ push_theme_to_ckan(self.catalog, 'portal_url', 'apikey')
+
+ @patch('pydatajson.federation.RemoteCKAN', autospec=True)
+ def test_function_pushes_theme_by_identifier(self, mock_portal):
+ mock_portal.return_value.call_action = MagicMock(return_value={'name': 'group_name'})
+ result = push_theme_to_ckan(self.catalog, 'portal_url', 'apikey', identifier='compras')
+ self.assertEqual('group_name', result)
+
+ @patch('pydatajson.federation.RemoteCKAN', autospec=True)
+ def test_function_pushes_theme_by_label(self, mock_portal):
+ mock_portal.return_value.call_action = MagicMock(return_value={'name': 'other_name'})
+ result = push_theme_to_ckan(self.catalog, 'portal_url', 'apikey', label='Adjudicaciones')
+ self.assertEqual('other_name', result)
+
+ @patch('pydatajson.federation.RemoteCKAN', autospec=True)
+ def test_ckan_portal_is_called_with_correct_parametres(self, mock_portal):
+ mock_portal.return_value.call_action = MagicMock(return_value={'name': u'contrataciones'})
+ group = {'name': u'contrataciones',
+ 'title': u'Contrataciones',
+ 'description': u'Datasets sobre contrataciones.'}
+ push_theme_to_ckan(self.catalog, 'portal_url', 'apikey', identifier='contrataciones')
+ mock_portal.return_value.call_action.assert_called_once_with('group_create', data_dict=group)
| Función push theme_to_ckan()
**Contexto**
Por el momento, la función `push_dataset_to_ckan()` federa los datasets sin tener en cuenta los `themes` presentes en el catálogo de destino. La operación se concreta, pero cualquier `theme` del dataset que no esté presente en el catálogo es ignorado.
El objetivo es implementar una función `push_theme_to_ckan()`, que mediante la API, cree los grupos de CKAN indicados. Esta función será luego utilizada por si sola o en conjunto con los wrappers para generar los temas pertinentes de un catalogo. | 0.0 | [
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_catalog_id_is_prefixed_in_resource_id_if_passed",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_catalog_id_is_prepended_to_dataset_id_if_passed",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_dataset_array_attributes_are_correct",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_dataset_extra_attributes_are_complete",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_dataset_extra_attributes_are_correct",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_dataset_id_is_preserved_if_catlog_id_is_not_passed",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_dataset_nested_replicated_attributes_stay_the_same",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_preserve_themes_and_superThemes",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_replicated_plain_attributes_are_corrext",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_resource_id_is_preserved_if_catalog_id_is_not_passed",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_resources_extra_attributes_are_created_correctly",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_resources_replicated_attributes_stay_the_same",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_resources_transformed_attributes_are_correct",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_superThemes_dont_impact_groups_if_not_demoted",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_themes_are_preserved_if_not_demoted",
"tests/test_ckan_utils.py::ThemeConversionTests::test_all_attributes_are_replicated_if_present",
"tests/test_ckan_utils.py::ThemeConversionTests::test_id_special_characters_are_removed",
"tests/test_ckan_utils.py::ThemeConversionTests::test_label_is_used_as_name_if_id_not_present",
"tests/test_ckan_utils.py::ThemeConversionTests::test_theme_missing_description",
"tests/test_ckan_utils.py::ThemeConversionTests::test_theme_missing_label",
"tests/test_ckan_utils.py::DatetimeConversionTests::test_dates_change_correctly",
"tests/test_ckan_utils.py::DatetimeConversionTests::test_dates_stay_the_same",
"tests/test_ckan_utils.py::DatetimeConversionTests::test_datetimes_without_microseconds_are_handled_correctly",
"tests/test_ckan_utils.py::DatetimeConversionTests::test_datetimes_without_seconds_are_handled_correctly",
"tests/test_ckan_utils.py::DatetimeConversionTests::test_datetimes_without_timezones_stay_the_same",
"tests/test_ckan_utils.py::DatetimeConversionTests::test_timezones_are_handled_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_dataset_id_is_preserved_if_catalog_id_is_not_passed",
"tests/test_federation.py::PushDatasetTestCase::test_dataset_without_license_sets_notspecified",
"tests/test_federation.py::PushDatasetTestCase::test_id_is_created_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_id_is_updated_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_licenses_are_interpreted_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_tags_are_passed_correctly",
"tests/test_federation.py::RemoveDatasetTestCase::test_empty_search_doesnt_call_purge",
"tests/test_federation.py::RemoveDatasetTestCase::test_filter_in_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_filter_in_out_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_query_one_dataset",
"tests/test_federation.py::RemoveDatasetTestCase::test_query_over_500_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_remove_through_filters_and_organization",
"tests/test_federation.py::PushThemeTestCase::test_ckan_portal_is_called_with_correct_parametres",
"tests/test_federation.py::PushThemeTestCase::test_empty_theme_search_raises_exception",
"tests/test_federation.py::PushThemeTestCase::test_function_pushes_theme_by_identifier",
"tests/test_federation.py::PushThemeTestCase::test_function_pushes_theme_by_label"
] | [] | 2018-04-06 14:20:19+00:00 | 1,841 |
|
datosgobar__pydatajson-153 | diff --git a/pydatajson/ckan_utils.py b/pydatajson/ckan_utils.py
index 6df8eaa..c67d3dc 100644
--- a/pydatajson/ckan_utils.py
+++ b/pydatajson/ckan_utils.py
@@ -1,5 +1,7 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
+from __future__ import print_function
+
import json
import re
from datetime import time
@@ -76,12 +78,13 @@ def map_dataset_to_package(catalog, dataset, owner_org, catalog_id=None,
if themes and demote_themes:
package['tags'] = package.get('tags', [])
for theme in themes:
+ # si falla continúa sin agregar ese theme a los tags del dataset
try:
- label = catalog.get_theme(identifier=theme)['label']
- except:
- label = catalog.get_theme(label=theme)['label']
- label = re.sub(r'[^\wá-úÁ-ÚñÑ .-]+', '', label, flags=re.UNICODE)
- package['tags'].append({'name': label})
+ label = _get_theme_label(catalog, theme)
+ package['tags'].append({'name': label})
+ except Exception as e:
+ print(e)
+ continue
else:
package['groups'] = package.get('groups', []) + [
{'name': title_to_name(theme, decode=False)}
@@ -91,6 +94,18 @@ def map_dataset_to_package(catalog, dataset, owner_org, catalog_id=None,
return package
+def _get_theme_label(catalog, theme):
+ """Intenta conseguir el theme por id o por label."""
+ try:
+ label = catalog.get_theme(identifier=theme)['label']
+ except:
+ label = catalog.get_theme(label=theme)['label']
+
+ label = re.sub(r'[^\wá-úÁ-ÚñÑ .-]+', '',
+ label, flags=re.UNICODE)
+ return label
+
+
def convert_iso_string_to_utc(date_string):
date_time = parser.parse(date_string)
if date_time.time() == time(0):
diff --git a/pydatajson/federation.py b/pydatajson/federation.py
index 43e932e..b503d95 100644
--- a/pydatajson/federation.py
+++ b/pydatajson/federation.py
@@ -5,11 +5,13 @@ de la API de CKAN.
"""
from __future__ import print_function
+import logging
from ckanapi import RemoteCKAN
-from ckanapi.errors import NotFound
+from ckanapi.errors import NotFound, NotAuthorized
from .ckan_utils import map_dataset_to_package, map_theme_to_group
from .search import get_datasets
+logger = logging.getLogger(__name__)
def push_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier,
portal_url, apikey, catalog_id=None,
@@ -250,14 +252,20 @@ def harvest_catalog_to_ckan(catalog, portal_url, apikey, catalog_id,
Returns:
str: El id del dataset en el catálogo de destino.
"""
- dataset_list = dataset_list or [ds['identifier']
- for ds in catalog.datasets]
+ # Evitar entrar con valor falsy
+ if dataset_list is None:
+ dataset_list = [ds['identifier'] for ds in catalog.datasets]
owner_org = owner_org or catalog_id
harvested = []
for dataset_id in dataset_list:
- harvested_id = harvest_dataset_to_ckan(
- catalog, owner_org, dataset_id, portal_url, apikey, catalog_id)
- harvested.append(harvested_id)
+ try:
+ harvested_id = harvest_dataset_to_ckan(
+ catalog, owner_org, dataset_id, portal_url, apikey, catalog_id)
+ harvested.append(harvested_id)
+ except (NotAuthorized, NotFound, KeyError, TypeError) as e:
+ logger.error("Error federando catalogo:"+catalog_id+", dataset:"+dataset_id + "al portal: "+portal_url)
+ logger.error(str(e))
+
return harvested
| datosgobar/pydatajson | dae546a739eb2aab1c34b3d8bbb5896fe804e0aa | diff --git a/tests/test_federation.py b/tests/test_federation.py
index fe95079..9d0515f 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -223,6 +223,13 @@ class PushDatasetTestCase(unittest.TestCase):
self.assertCountEqual([self.catalog_id+'_'+ds['identifier'] for ds in self.catalog.datasets],
harvested_ids)
+ @patch('pydatajson.federation.RemoteCKAN', autospec=True)
+ def test_harvest_catalog_with_empty_list(self, mock_portal):
+ harvested_ids = harvest_catalog_to_ckan(self.catalog, 'portal', 'key', self.catalog_id,
+ owner_org='owner', dataset_list=[])
+ mock_portal.assert_not_called()
+ self.assertEqual([], harvested_ids)
+
class RemoveDatasetTestCase(unittest.TestCase):
| Robustecer el manejo de harvest_catalog_to_ckan()
Hay que corregir 2 problemas:
- En caso de pasar una lista vacía en el dataset list, no se debe federar ningún dataset. Actualmente se federan todos.
-En caso de que alguno de las llamadas a `harvest_dataset_to_ckan()` falle, loggear y continuar con el resto. Actualmente la federación entera del catálogo levanta la excepción. | 0.0 | [
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_empty_list"
] | [
"tests/test_federation.py::PushDatasetTestCase::test_dataset_id_is_preserved_if_catalog_id_is_not_passed",
"tests/test_federation.py::PushDatasetTestCase::test_dataset_level_wrappers",
"tests/test_federation.py::PushDatasetTestCase::test_dataset_without_license_sets_notspecified",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_dataset_list",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_no_optional_parametres",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_owner_org",
"tests/test_federation.py::PushDatasetTestCase::test_id_is_created_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_id_is_updated_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_licenses_are_interpreted_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_tags_are_passed_correctly",
"tests/test_federation.py::RemoveDatasetTestCase::test_empty_search_doesnt_call_purge",
"tests/test_federation.py::RemoveDatasetTestCase::test_filter_in_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_filter_in_out_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_query_one_dataset",
"tests/test_federation.py::RemoveDatasetTestCase::test_query_over_500_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_remove_through_filters_and_organization",
"tests/test_federation.py::PushThemeTestCase::test_ckan_portal_is_called_with_correct_parametres",
"tests/test_federation.py::PushThemeTestCase::test_empty_theme_search_raises_exception",
"tests/test_federation.py::PushThemeTestCase::test_function_pushes_theme_by_identifier",
"tests/test_federation.py::PushThemeTestCase::test_function_pushes_theme_by_label",
"tests/test_federation.py::PushCatalogThemesTestCase::test_empty_portal_pushes_every_theme",
"tests/test_federation.py::PushCatalogThemesTestCase::test_full_portal_pushes_nothing",
"tests/test_federation.py::PushCatalogThemesTestCase::test_non_empty_intersection_pushes_missing_themes"
] | 2018-04-24 17:27:32+00:00 | 1,842 |
|
datosgobar__pydatajson-181 | diff --git a/pydatajson/time_series.py b/pydatajson/time_series.py
index 683020d..182986c 100644
--- a/pydatajson/time_series.py
+++ b/pydatajson/time_series.py
@@ -10,7 +10,8 @@ definidas según la extensión del perfil de metadatos para series de tiempo.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import with_statement
-import os
+
+from . import custom_exceptions as ce
def field_is_time_series(field, distribution=None):
@@ -42,10 +43,10 @@ def get_distribution_time_index(distribution):
def distribution_has_time_index(distribution):
- for field in distribution.get('field', []):
- if field.get('specialType') == 'time_index':
- return True
- return False
+ try:
+ return any([field.get('specialType') == 'time_index' for field in distribution.get('field', [])])
+ except AttributeError:
+ return False
def dataset_has_time_series(dataset):
| datosgobar/pydatajson | 0a8a32e5c1def9f73f2f98b03c8c7d72d4c0ad54 | diff --git a/tests/test_time_series.py b/tests/test_time_series.py
new file mode 100644
index 0000000..716314c
--- /dev/null
+++ b/tests/test_time_series.py
@@ -0,0 +1,41 @@
+from __future__ import unicode_literals
+from __future__ import print_function
+from __future__ import with_statement
+
+import os.path
+import unittest
+from pydatajson.core import DataJson
+from pydatajson.time_series import get_distribution_time_index, distribution_has_time_index, dataset_has_time_series
+from pydatajson.custom_exceptions import DistributionTimeIndexNonExistentError
+
+SAMPLES_DIR = os.path.join("tests", "samples")
+
+
+class TimeSeriesTestCase(unittest.TestCase):
+
+ @classmethod
+ def get_sample(cls, sample_filename):
+ return os.path.join(SAMPLES_DIR, sample_filename)
+
+ def setUp(self):
+ ts_catalog = DataJson(self.get_sample('time_series_data.json'))
+ full_catalog = DataJson(self.get_sample('full_data.json'))
+ self.ts_dataset = ts_catalog.datasets[0]
+ self.non_ts_datasets = full_catalog.datasets[0]
+ self.ts_distribution = ts_catalog.distributions[1]
+ self.non_ts_distribution = full_catalog.distributions[0]
+
+ def test_get_distribution_time_index(self):
+ self.assertEqual('indice_tiempo', get_distribution_time_index(self.ts_distribution))
+ with self.assertRaises(DistributionTimeIndexNonExistentError):
+ get_distribution_time_index(self.non_ts_distribution)
+
+ def test_distribution_has_time_index(self):
+ self.assertTrue(distribution_has_time_index(self.ts_distribution))
+ self.assertFalse(distribution_has_time_index(self.non_ts_distribution))
+ self.ts_distribution['field'] = ['p', 'r', 'o', 'b', 'l', 'e', 'm']
+ self.assertFalse(distribution_has_time_index(self.ts_distribution))
+
+ def test_dataset_has_time_series(self):
+ self.assertTrue(dataset_has_time_series(self.ts_dataset))
+ self.assertFalse(dataset_has_time_series(self.non_ts_datasets))
| Robustecer la función `distribution_has_time_index` para que no se rompa
**Contexto**
Actualmente si el campo `field` de una distribución no tiene la estructura esperada, la función presume que puede realizar acciones contra ella para validar si tiene o no un índice de tiempo que pueden fallar (como presumir que hay un `dict` contra el que hacer un `get`).
**Propuesta**
Re-factorizar la función con un `try` `except` (o con cualquier otra estrategia) para que si falla por cualquier motivo interprete que la distribución *no tiene un índice de tiempo*.
La justificación detrás es que la función *sólo devuelve True cuando consigue encontrar una distribución que cumple con la especificación de series de tiempo para tener un índice de tiempo, si esto no es así - por el motivo que sea - se considera que la distribución _no tiene un índice de tiempo_*, pero no rompe. | 0.0 | [
"tests/test_time_series.py::TimeSeriesTestCase::test_distribution_has_time_index",
"tests/test_time_series.py::TimeSeriesTestCase::test_get_distribution_time_index"
] | [
"tests/test_time_series.py::TimeSeriesTestCase::test_dataset_has_time_series"
] | 2018-07-27 19:24:18+00:00 | 1,843 |
|
datosgobar__pydatajson-210 | diff --git a/docs/MANUAL.md b/docs/MANUAL.md
index 207d9fe..e03b74f 100644
--- a/docs/MANUAL.md
+++ b/docs/MANUAL.md
@@ -428,6 +428,15 @@ Toma los siguientes parámetros:
portal de destino. Si no se pasa, se toma como organización el catalog_id
Retorna el id en el nodo de destino de los datasets federados.
+
+### Métodos para manejo de organizaciones
+
+- **pydatajson.federation.get_organizations_from_ckan()**: Devuelve el árbol de organizaciones del portal pasado por parámetro.
+Toma los siguientes parámetros:
+ - **portal_url**: URL del portal de CKAN. Debe implementar el endpoint `/group_tree`.
+
+ Retorna una lista de diccionarios con la información de las organizaciones. Recursivamente, dentro del campo `children`,
+ se encuentran las organizaciones dependientes en la jerarquía.
## Anexo I: Estructura de respuestas
diff --git a/pydatajson/federation.py b/pydatajson/federation.py
index 6e57ec1..1138121 100644
--- a/pydatajson/federation.py
+++ b/pydatajson/federation.py
@@ -300,3 +300,17 @@ def push_new_themes(catalog, portal_url, apikey):
catalog, portal_url, apikey, identifier=new_theme)
pushed_names.append(name)
return pushed_names
+
+
+def get_organizations_from_ckan(portal_url):
+ """Toma la url de un portal y devuelve su árbol de organizaciones.
+
+ Args:
+ portal_url (str): La URL del portal CKAN de origen.
+ Returns:
+ dict: Diccionarios anidados con la información de
+ las organizaciones.
+ """
+ ckan_portal = RemoteCKAN(portal_url)
+ return ckan_portal.call_action('group_tree',
+ data_dict={'type': 'organization'})
| datosgobar/pydatajson | 3b9447bbe8f4250cc1b3def1e014f67c4fca0eb0 | diff --git a/tests/test_federation.py b/tests/test_federation.py
index 746ae23..58449e4 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -18,12 +18,15 @@ from ckanapi.errors import NotFound
SAMPLES_DIR = os.path.join("tests", "samples")
-class PushDatasetTestCase(unittest.TestCase):
-
+class FederationSuite(unittest.TestCase):
@classmethod
def get_sample(cls, sample_filename):
return os.path.join(SAMPLES_DIR, sample_filename)
+
+@patch('pydatajson.federation.RemoteCKAN', autospec=True)
+class PushDatasetTestCase(FederationSuite):
+
@classmethod
def setUpClass(cls):
cls.catalog = pydatajson.DataJson(cls.get_sample('full_data.json'))
@@ -43,7 +46,6 @@ class PushDatasetTestCase(unittest.TestCase):
cls.minimum_dataset['distribution'][0][
'identifier'] = cls.dataset['distribution'][0]['identifier']
- @patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_id_is_created_correctly(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'package_update':
@@ -62,7 +64,6 @@ class PushDatasetTestCase(unittest.TestCase):
catalog_id=self.catalog_id)
self.assertEqual(self.catalog_id + '_' + self.dataset_id, res_id)
- @patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_id_is_updated_correctly(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'package_update':
@@ -81,7 +82,6 @@ class PushDatasetTestCase(unittest.TestCase):
catalog_id=self.catalog_id)
self.assertEqual(self.catalog_id + '_' + self.dataset_id, res_id)
- @patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_dataset_id_is_preserved_if_catalog_id_is_not_passed(
self, mock_portal):
def mock_call_action(action, data_dict=None):
@@ -97,7 +97,6 @@ class PushDatasetTestCase(unittest.TestCase):
'portal', 'key')
self.assertEqual(self.dataset_id, res_id)
- @patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_tags_are_passed_correctly(self, mock_portal):
themes = self.dataset['theme']
keywords = [kw for kw in self.dataset['keyword']]
@@ -132,7 +131,6 @@ class PushDatasetTestCase(unittest.TestCase):
catalog_id=self.catalog_id)
self.assertEqual(self.catalog_id + '_' + self.dataset_id, res_id)
- @patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_licenses_are_interpreted_correctly(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'license_list':
@@ -149,7 +147,6 @@ class PushDatasetTestCase(unittest.TestCase):
push_dataset_to_ckan(self.catalog, 'owner', self.dataset_id,
'portal', 'key', catalog_id=self.catalog_id)
- @patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_dataset_without_license_sets_notspecified(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'license_list':
@@ -172,7 +169,6 @@ class PushDatasetTestCase(unittest.TestCase):
'key',
catalog_id=self.minimum_catalog_id)
- @patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_dataset_level_wrappers(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'package_update':
@@ -192,7 +188,6 @@ class PushDatasetTestCase(unittest.TestCase):
self.assertEqual(self.dataset_id, restored_id)
self.assertEqual(self.catalog_id + '_' + self.dataset_id, harvested_id)
- @patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_harvest_catalog_with_no_optional_parametres(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'package_update':
@@ -218,7 +213,6 @@ class PushDatasetTestCase(unittest.TestCase):
for ds in self.catalog.datasets],
harvested_ids)
- @patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_harvest_catalog_with_dataset_list(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'package_update':
@@ -254,7 +248,6 @@ class PushDatasetTestCase(unittest.TestCase):
[self.catalog_id + '_' + ds_id for ds_id in dataset_list],
harvested_ids)
- @patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_harvest_catalog_with_owner_org(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'package_update':
@@ -275,7 +268,6 @@ class PushDatasetTestCase(unittest.TestCase):
for ds in self.catalog.datasets],
harvested_ids)
- @patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_harvest_catalog_with_errors(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'package_update':
@@ -292,7 +284,6 @@ class PushDatasetTestCase(unittest.TestCase):
self.assertDictEqual(
{self.catalog.datasets[1]['identifier']: "some message"}, errors)
- @patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_harvest_catalog_with_empty_list(self, mock_portal):
harvested_ids, _ = harvest_catalog_to_ckan(
self.catalog, 'portal', 'key', self.catalog_id,
@@ -301,7 +292,7 @@ class PushDatasetTestCase(unittest.TestCase):
self.assertEqual([], harvested_ids)
-class RemoveDatasetTestCase(unittest.TestCase):
+class RemoveDatasetTestCase(FederationSuite):
@patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_empty_search_doesnt_call_purge(self, mock_portal):
@@ -378,22 +369,17 @@ class RemoveDatasetTestCase(unittest.TestCase):
'dataset_purge', data_dict={'id': 'id_2'})
-class PushThemeTestCase(unittest.TestCase):
-
- @classmethod
- def get_sample(cls, sample_filename):
- return os.path.join(SAMPLES_DIR, sample_filename)
+@patch('pydatajson.federation.RemoteCKAN', autospec=True)
+class PushThemeTestCase(FederationSuite):
@classmethod
def setUpClass(cls):
cls.catalog = pydatajson.DataJson(cls.get_sample('full_data.json'))
- @patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_empty_theme_search_raises_exception(self, mock_portal):
with self.assertRaises(AssertionError):
push_theme_to_ckan(self.catalog, 'portal_url', 'apikey')
- @patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_function_pushes_theme_by_identifier(self, mock_portal):
mock_portal.return_value.call_action = MagicMock(
return_value={'name': 'group_name'})
@@ -404,7 +390,6 @@ class PushThemeTestCase(unittest.TestCase):
identifier='compras')
self.assertEqual('group_name', result)
- @patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_function_pushes_theme_by_label(self, mock_portal):
mock_portal.return_value.call_action = MagicMock(
return_value={'name': 'other_name'})
@@ -415,7 +400,6 @@ class PushThemeTestCase(unittest.TestCase):
label='Adjudicaciones')
self.assertEqual('other_name', result)
- @patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_ckan_portal_is_called_with_correct_parametres(self, mock_portal):
mock_portal.return_value.call_action = MagicMock(
return_value={'name': u'contrataciones'})
@@ -431,16 +415,13 @@ class PushThemeTestCase(unittest.TestCase):
'group_create', data_dict=group)
-class PushCatalogThemesTestCase(unittest.TestCase):
- @classmethod
- def get_sample(cls, sample_filename):
- return os.path.join(SAMPLES_DIR, sample_filename)
+@patch('pydatajson.federation.RemoteCKAN', autospec=True)
+class PushCatalogThemesTestCase(FederationSuite):
@classmethod
def setUpClass(cls):
cls.catalog = pydatajson.DataJson(cls.get_sample('full_data.json'))
- @patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_empty_portal_pushes_every_theme(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'group_list':
@@ -461,7 +442,6 @@ class PushCatalogThemesTestCase(unittest.TestCase):
[theme['id'] for theme in self.catalog['themeTaxonomy']],
res_names)
- @patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_full_portal_pushes_nothing(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'group_list':
@@ -476,7 +456,6 @@ class PushCatalogThemesTestCase(unittest.TestCase):
except AttributeError:
self.assertCountEqual([], res_names)
- @patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_non_empty_intersection_pushes_missing_themes(self, mock_portal):
def mock_call_action(action, data_dict=None):
if action == 'group_list':
@@ -497,3 +476,12 @@ class PushCatalogThemesTestCase(unittest.TestCase):
self.assertCountEqual(
[theme['id'] for theme in self.catalog['themeTaxonomy']][2:],
res_names)
+
+
+@patch('pydatajson.federation.RemoteCKAN', autospec=True)
+class OrganizationsTestCase(FederationSuite):
+
+ def test_get_organization_calls_api_correctly(self, mock_portal):
+ get_organizations_from_ckan('portal_url')
+ mock_portal.return_value.call_action.assert_called_with(
+ 'group_tree', data_dict={'type': 'organization'})
| Get organizations from ckan
El objetivo es implementar un método que pasada la url de un portal, devuelva las organizaciones presentes completas con su jerarquía. | 0.0 | [
"tests/test_federation.py::OrganizationsTestCase::test_get_organization_calls_api_correctly"
] | [
"tests/test_federation.py::PushDatasetTestCase::test_dataset_id_is_preserved_if_catalog_id_is_not_passed",
"tests/test_federation.py::PushDatasetTestCase::test_dataset_level_wrappers",
"tests/test_federation.py::PushDatasetTestCase::test_dataset_without_license_sets_notspecified",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_dataset_list",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_empty_list",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_errors",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_no_optional_parametres",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_owner_org",
"tests/test_federation.py::PushDatasetTestCase::test_id_is_created_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_id_is_updated_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_licenses_are_interpreted_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_tags_are_passed_correctly",
"tests/test_federation.py::RemoveDatasetTestCase::test_empty_search_doesnt_call_purge",
"tests/test_federation.py::RemoveDatasetTestCase::test_filter_in_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_filter_in_out_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_query_one_dataset",
"tests/test_federation.py::RemoveDatasetTestCase::test_query_over_500_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_remove_through_filters_and_organization",
"tests/test_federation.py::PushThemeTestCase::test_ckan_portal_is_called_with_correct_parametres",
"tests/test_federation.py::PushThemeTestCase::test_empty_theme_search_raises_exception",
"tests/test_federation.py::PushThemeTestCase::test_function_pushes_theme_by_identifier",
"tests/test_federation.py::PushThemeTestCase::test_function_pushes_theme_by_label",
"tests/test_federation.py::PushCatalogThemesTestCase::test_empty_portal_pushes_every_theme",
"tests/test_federation.py::PushCatalogThemesTestCase::test_full_portal_pushes_nothing",
"tests/test_federation.py::PushCatalogThemesTestCase::test_non_empty_intersection_pushes_missing_themes"
] | 2018-10-19 14:09:54+00:00 | 1,844 |
|
datosgobar__pydatajson-211 | diff --git a/docs/MANUAL.md b/docs/MANUAL.md
index e03b74f..8a0c2f6 100644
--- a/docs/MANUAL.md
+++ b/docs/MANUAL.md
@@ -438,6 +438,19 @@ Toma los siguientes parámetros:
Retorna una lista de diccionarios con la información de las organizaciones. Recursivamente, dentro del campo `children`,
se encuentran las organizaciones dependientes en la jerarquía.
+- **pydatajson.federation.push_organization_tree_to_ckan()**: Tomando un árbol de organizaciones como el creado por
+`get_organizations_from_ckan()` crea en el portal de destino las organizaciones dentro de su jerarquía. Toma los siguientes
+parámetros:
+ - **portal_url**: La URL del portal CKAN de destino.
+ - **apikey**: La apikey de un usuario con los permisos que le permitan crear las organizaciones.
+ - **org_tree**: lista de diccionarios con la data de organizaciones a crear.
+ - **parent** (opcional, default: None): Si se pasa, el árbol de organizaciones pasado en `org_tree` se
+ crea bajo la organización con `name` pasado en `parent`. Si no se pasa un parámetro, las organizaciones son creadas
+ como primer nivel.
+
+ Retorna el árbol de organizaciones creadas. Cada nodo tiene un campo `success` que indica si fue creado exitosamente o
+ no. En caso de que `success` sea False, los hijos de esa organización no son creados.
+
## Anexo I: Estructura de respuestas
### validate_catalog()
diff --git a/pydatajson/documentation.py b/pydatajson/documentation.py
index 48d8b99..51745d1 100644
--- a/pydatajson/documentation.py
+++ b/pydatajson/documentation.py
@@ -47,10 +47,12 @@ def dataset_to_markdown(dataset):
def distribution_to_markdown(distribution):
- """Genera texto en markdown a partir de los metadatos de una `distribution`.
+ """Genera texto en markdown a partir de los metadatos de una
+ `distribution`.
Args:
- distribution (dict): Diccionario con metadatos de una `distribution`.
+ distribution (dict): Diccionario con metadatos de una
+ `distribution`.
Returns:
str: Texto que describe una `distribution`.
diff --git a/pydatajson/federation.py b/pydatajson/federation.py
index 1138121..fa0b31b 100644
--- a/pydatajson/federation.py
+++ b/pydatajson/federation.py
@@ -286,7 +286,7 @@ def push_new_themes(catalog, portal_url, apikey):
taxonomía.
portal_url (str): La URL del portal CKAN de destino.
apikey (str): La apikey de un usuario con los permisos que le
- permitan crear o actualizar el dataset.
+ permitan crear o actualizar los temas.
Returns:
str: Los ids de los temas creados.
"""
@@ -314,3 +314,42 @@ def get_organizations_from_ckan(portal_url):
ckan_portal = RemoteCKAN(portal_url)
return ckan_portal.call_action('group_tree',
data_dict={'type': 'organization'})
+
+
+def push_organization_tree_to_ckan(portal_url, apikey, org_tree, parent=None):
+ """Toma un árbol de organizaciones y lo replica en el portal de
+ destino.
+
+ Args:
+ portal_url (str): La URL del portal CKAN de destino.
+ apikey (str): La apikey de un usuario con los permisos que le
+ permitan crear las organizaciones.
+ org_tree(list): lista de diccionarios con la data de las
+ organizaciones a crear.
+ parent(str): campo name de la organizacion padre.
+ Returns:
+ (list): Devuelve el arbol de organizaciones recorridas,
+ junto con el status detallando si la creación fue
+ exitosa o no.
+
+ """
+ portal = RemoteCKAN(portal_url, apikey=apikey)
+ created = []
+ for node in org_tree:
+ if parent:
+ node['groups'] = [{'name': parent}]
+ try:
+ pushed_org = portal.call_action('organization_create',
+ data_dict=node)
+ pushed_org['success'] = True
+ except Exception as e:
+ logger.exception('Ocurrió un error creando la organización {}: {}'
+ .format(node['title'], str(e)))
+ pushed_org = {'name': node, 'success': False}
+
+ if pushed_org['success']:
+ pushed_org['children'] = push_organization_tree_to_ckan(
+ portal_url, apikey, node['children'], parent=node['name'])
+
+ created.append(pushed_org)
+ return created
| datosgobar/pydatajson | 185d79d95555dd3041404b208a09583cb95a9a19 | diff --git a/tests/samples/organization_tree.json b/tests/samples/organization_tree.json
new file mode 100644
index 0000000..0e0ff6d
--- /dev/null
+++ b/tests/samples/organization_tree.json
@@ -0,0 +1,235 @@
+[
+ {
+ "highlighted": false,
+ "title": " Jefatura de Gabinete de Ministros",
+ "children": [
+ {
+ "highlighted": false,
+ "title": "Agencia de Acceso a la Informaci\u00f3n P\u00fablica",
+ "children": [],
+ "name": "aaip",
+ "id": "daa8b40c-fa37-478c-a7ef-081305aeadd8"
+ },
+ {
+ "highlighted": false,
+ "title": "Secretar\u00eda de Gobierno de Ambiente y Desarrollo Sustentable",
+ "children": [
+ {
+ "highlighted": false,
+ "title": "Autoridad de Cuenca Matanza Riachuelo",
+ "children": [],
+ "name": "acumar",
+ "id": "1b6b28cd-098f-41d7-b43f-d5a01ffa8759"
+ }
+ ],
+ "name": "ambiente",
+ "id": "0e5aa328-825e-4509-851d-5421e866635e"
+ },
+ {
+ "highlighted": false,
+ "title": "Secretar\u00eda de Gobierno de Modernizaci\u00f3n",
+ "children": [
+ {
+ "highlighted": false,
+ "title": "Empresa Argentina de Soluciones Satelitales",
+ "children": [],
+ "name": "arsat",
+ "id": "b2509ac0-3af6-4f66-9ffa-8c6fb4206791"
+ },
+ {
+ "highlighted": false,
+ "title": "Ente Nacional de Comunicaciones",
+ "children": [],
+ "name": "enacom",
+ "id": "6eb7d19b-2d42-494f-8e57-d67c501d23eb"
+ }
+ ],
+ "name": "modernizacion",
+ "id": "4c7a6f6b-5caf-42a1-aae5-0a07e609a235"
+ },
+ {
+ "highlighted": false,
+ "title": "Secretar\u00eda de Gobierno de Turismo",
+ "children": [],
+ "name": "turismo",
+ "id": "3a751de6-128e-4f9a-a479-4672ef79a0e8"
+ }
+ ],
+ "name": "jgm",
+ "id": "f917ad65-28ea-42a9-81ae-61a2bb8f58d0"
+ },
+ {
+ "highlighted": false,
+ "title": " Ministerio de Defensa",
+ "children": [
+ {
+ "highlighted": false,
+ "title": "Instituto Geogr\u00e1fico Nacional",
+ "children": [],
+ "name": "ign",
+ "id": "9b47c8eb-bb5c-40df-bbaa-589374d14da8"
+ },
+ {
+ "highlighted": false,
+ "title": "Servicio Meteorol\u00f3gico Nacional",
+ "children": [],
+ "name": "smn",
+ "id": "dba2a17e-e2ea-4e0b-b0ef-bf4ffe9f9ad9"
+ }
+ ],
+ "name": "defensa",
+ "id": "5e80ed4f-8bfb-4451-8240-e8f39e695ee1"
+ },
+ {
+ "highlighted": false,
+ "title": "Ministerio de Educaci\u00f3n, Cultura, Ciencia y Tecnolog\u00eda",
+ "children": [
+ {
+ "highlighted": false,
+ "title": "Secretar\u00eda de Gobierno de Ciencia y Tecnolog\u00eda",
+ "children": [],
+ "name": "mincyt",
+ "id": "772ab9b7-056d-4ae4-b7a2-a1329e979690"
+ },
+ {
+ "highlighted": false,
+ "title": "Secretar\u00eda de Gobierno de Cultura",
+ "children": [],
+ "name": "cultura",
+ "id": "9fcc8ffc-3dcc-4437-acc8-72c5b08d8d51"
+ }
+ ],
+ "name": "educacion",
+ "id": "27d39ff7-7110-4c6d-b7e8-1b8eba392d7e"
+ },
+ {
+ "highlighted": false,
+ "title": " Ministerio de Hacienda",
+ "children": [
+ {
+ "highlighted": false,
+ "title": "Secretar\u00eda de Gobierno de Energ\u00eda",
+ "children": [
+ {
+ "highlighted": false,
+ "title": "Ente Nacional Regulador del Gas",
+ "children": [],
+ "name": "enargas",
+ "id": "dddda9d3-644d-4e3d-974b-302fd8945a86"
+ }
+ ],
+ "name": "energia",
+ "id": "cd1b32a2-2d3a-44ac-8c98-a425a7b62d42"
+ },
+ {
+ "highlighted": false,
+ "title": "Subsecretar\u00eda de Presupuesto",
+ "children": [],
+ "name": "sspre",
+ "id": "a753aeb8-52eb-4cfc-a6ee-6d930094b0f2"
+ },
+ {
+ "highlighted": false,
+ "title": "Subsecretar\u00eda de Programaci\u00f3n Macroecon\u00f3mica",
+ "children": [],
+ "name": "sspm",
+ "id": "68f9ee22-5ec3-44cf-a32b-b87d9db46b93"
+ },
+ {
+ "highlighted": false,
+ "title": "Subsecretar\u00eda de Programaci\u00f3n Microecon\u00f3mica",
+ "children": [],
+ "name": "sspmi",
+ "id": "b4baa84a-16a7-4db0-af9c-bd925971d26a"
+ }
+ ],
+ "name": "hacienda",
+ "id": "b2a6e77e-a3c8-4e1a-bcba-7134a9436051"
+ },
+ {
+ "highlighted": false,
+ "title": " Ministerio de Justicia y Derechos Humanos",
+ "children": [],
+ "name": "justicia",
+ "id": "06b380f8-888f-43c0-8367-16a7ddf47d4f"
+ },
+ {
+ "highlighted": false,
+ "title": " Ministerio del Interior, Obras P\u00fablicas y Vivienda ",
+ "children": [],
+ "name": "interior",
+ "id": "c88b1ad1-f78d-4cf9-848d-d73ccae6cd8e"
+ },
+ {
+ "highlighted": false,
+ "title": "Ministerio de Producci\u00f3n y Trabajo",
+ "children": [
+ {
+ "highlighted": false,
+ "title": "Secretar\u00eda de Gobierno de Agroindustria",
+ "children": [],
+ "name": "agroindustria",
+ "id": "b10c94e2-ade1-4a97-8c4c-b1ada7878d60"
+ },
+ {
+ "highlighted": false,
+ "title": "Secretar\u00eda de Gobierno de Trabajo y Empleo",
+ "children": [],
+ "name": "trabajo",
+ "id": "f31cb5dc-79ab-442b-ac7f-87ef6ad7749d"
+ },
+ {
+ "highlighted": false,
+ "title": "Secretar\u00eda de Transformaci\u00f3n Productiva",
+ "children": [],
+ "name": "siep",
+ "id": "83a04686-747a-4ba3-a0b1-2f05c3196bc4"
+ }
+ ],
+ "name": "produccion",
+ "id": "1ca8d15b-31fc-4f09-ba09-53edd3d4cce6"
+ },
+ {
+ "highlighted": false,
+ "title": "Ministerio de Relaciones Exteriores y Culto",
+ "children": [],
+ "name": "exterior",
+ "id": "89cd05f2-11c1-4f1a-875f-b4faf97eb4d4"
+ },
+ {
+ "highlighted": false,
+ "title": "Ministerio de Salud y Desarrollo Social",
+ "children": [
+ {
+ "highlighted": false,
+ "title": "Secretar\u00eda de Gobierno de Salud",
+ "children": [],
+ "name": "salud",
+ "id": "dd1f2018-587f-43af-9b07-fc32f5ab588b"
+ }
+ ],
+ "name": "desarrollo-social",
+ "id": "ef25c735-2abb-4165-94a6-a6798a103603"
+ },
+ {
+ "highlighted": false,
+ "title": " Ministerio de Seguridad",
+ "children": [],
+ "name": "seguridad",
+ "id": "908fd413-5a22-40e7-9204-38ef380ae232"
+ },
+ {
+ "highlighted": false,
+ "title": " Ministerio de Transporte",
+ "children": [],
+ "name": "transporte",
+ "id": "71418928-10c4-4625-aeaf-69a4d73d00ed"
+ },
+ {
+ "highlighted": false,
+ "title": "Sin organizaci\u00f3n asignada",
+ "children": [],
+ "name": "otros",
+ "id": "109d53ef-3ed3-498e-97d0-a456698969f7"
+ }
+]
\ No newline at end of file
diff --git a/tests/test_federation.py b/tests/test_federation.py
index 58449e4..d2a0af6 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -5,6 +5,7 @@ from __future__ import unicode_literals
import unittest
import os
import re
+import json
try:
from mock import patch, MagicMock
@@ -481,7 +482,49 @@ class PushCatalogThemesTestCase(FederationSuite):
@patch('pydatajson.federation.RemoteCKAN', autospec=True)
class OrganizationsTestCase(FederationSuite):
+ def setUp(self):
+ self.portal_url = 'portal_url'
+ self.apikey = 'apikey'
+ self.org_tree = json.load(open(
+ self.get_sample('organization_tree.json')))
+
+ def check_hierarchy(self, node, parent=None):
+ if not node['success']:
+ self.assertTrue('children' not in node)
+ return
+ if parent is None:
+ self.assertTrue('groups' not in node)
+ else:
+ self.assertDictEqual(node['groups'][0],
+ {'name': parent})
+
+ for child in node['children']:
+ self.check_hierarchy(child, parent=node['name'])
+
def test_get_organization_calls_api_correctly(self, mock_portal):
- get_organizations_from_ckan('portal_url')
+ get_organizations_from_ckan(self.portal_url)
mock_portal.return_value.call_action.assert_called_with(
'group_tree', data_dict={'type': 'organization'})
+
+ def test_push_organizations_sends_correct_hierarchy(self, mock_portal):
+ mock_portal.return_value.call_action = (lambda _, data_dict: data_dict)
+ pushed_tree = push_organization_tree_to_ckan(self.portal_url,
+ self.apikey,
+ self.org_tree)
+ for node in pushed_tree:
+ self.check_hierarchy(node)
+
+ def test_push_organizations_cuts_trees_on_failures(self, mock_portal):
+ def mock_org_create(_action, data_dict):
+ broken_orgs = ('acumar', 'modernizacion', 'hacienda')
+ if data_dict['name'] in broken_orgs:
+ raise Exception('broken org on each level')
+ else:
+ return data_dict
+
+ mock_portal.return_value.call_action = mock_org_create
+ pushed_tree = push_organization_tree_to_ckan(self.portal_url,
+ self.apikey,
+ self.org_tree)
+ for node in pushed_tree:
+ self.check_hierarchy(node)
| Push organizations to ckan
Dado un árbol jerárquico de organizaciones, el objetivo es crear las organizaciones allí descriptas, manteniendo la jerarquía original | 0.0 | [
"tests/test_federation.py::OrganizationsTestCase::test_push_organizations_cuts_trees_on_failures",
"tests/test_federation.py::OrganizationsTestCase::test_push_organizations_sends_correct_hierarchy"
] | [
"tests/test_federation.py::PushDatasetTestCase::test_dataset_id_is_preserved_if_catalog_id_is_not_passed",
"tests/test_federation.py::PushDatasetTestCase::test_dataset_level_wrappers",
"tests/test_federation.py::PushDatasetTestCase::test_dataset_without_license_sets_notspecified",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_dataset_list",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_empty_list",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_errors",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_no_optional_parametres",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_owner_org",
"tests/test_federation.py::PushDatasetTestCase::test_id_is_created_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_id_is_updated_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_licenses_are_interpreted_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_tags_are_passed_correctly",
"tests/test_federation.py::RemoveDatasetTestCase::test_empty_search_doesnt_call_purge",
"tests/test_federation.py::RemoveDatasetTestCase::test_filter_in_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_filter_in_out_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_query_one_dataset",
"tests/test_federation.py::RemoveDatasetTestCase::test_query_over_500_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_remove_through_filters_and_organization",
"tests/test_federation.py::PushThemeTestCase::test_ckan_portal_is_called_with_correct_parametres",
"tests/test_federation.py::PushThemeTestCase::test_empty_theme_search_raises_exception",
"tests/test_federation.py::PushThemeTestCase::test_function_pushes_theme_by_identifier",
"tests/test_federation.py::PushThemeTestCase::test_function_pushes_theme_by_label",
"tests/test_federation.py::PushCatalogThemesTestCase::test_empty_portal_pushes_every_theme",
"tests/test_federation.py::PushCatalogThemesTestCase::test_full_portal_pushes_nothing",
"tests/test_federation.py::PushCatalogThemesTestCase::test_non_empty_intersection_pushes_missing_themes",
"tests/test_federation.py::OrganizationsTestCase::test_get_organization_calls_api_correctly"
] | 2018-10-23 12:34:37+00:00 | 1,845 |
|
datosgobar__pydatajson-212 | diff --git a/docs/MANUAL.md b/docs/MANUAL.md
index 8a0c2f6..a84878b 100644
--- a/docs/MANUAL.md
+++ b/docs/MANUAL.md
@@ -438,6 +438,16 @@ Toma los siguientes parámetros:
Retorna una lista de diccionarios con la información de las organizaciones. Recursivamente, dentro del campo `children`,
se encuentran las organizaciones dependientes en la jerarquía.
+- **pydatajson.federation.get_organization_from_ckan()**: Devuelve un diccionario con la información de una
+organización en base a un id y un portal pasados por parámetro.
+Toma los siguientes parámetros:
+ - **portal_url**: URL del portal de CKAN. Debe implementar el endpoint `/organization_show`.
+ - **org_id**: Identificador o name de la organización a buscar.
+
+ Retorna un diccionario con la información de la organización correspondiente al identificador obtenido.
+ _No_ incluye su jerarquía, por lo cual ésta deberá ser conseguida mediante el uso de la función
+ `get_organizations_from_ckan`.
+
- **pydatajson.federation.push_organization_tree_to_ckan()**: Tomando un árbol de organizaciones como el creado por
`get_organizations_from_ckan()` crea en el portal de destino las organizaciones dentro de su jerarquía. Toma los siguientes
parámetros:
diff --git a/pydatajson/federation.py b/pydatajson/federation.py
index fa0b31b..eb5564b 100644
--- a/pydatajson/federation.py
+++ b/pydatajson/federation.py
@@ -308,7 +308,7 @@ def get_organizations_from_ckan(portal_url):
Args:
portal_url (str): La URL del portal CKAN de origen.
Returns:
- dict: Diccionarios anidados con la información de
+ list: Lista de diccionarios anidados con la información de
las organizaciones.
"""
ckan_portal = RemoteCKAN(portal_url)
@@ -316,6 +316,20 @@ def get_organizations_from_ckan(portal_url):
data_dict={'type': 'organization'})
+def get_organization_from_ckan(portal_url, org_id):
+ """Toma la url de un portal y un id, y devuelve la organización a buscar.
+
+ Args:
+ portal_url (str): La URL del portal CKAN de origen.
+ org_id (str): El id de la organización a buscar.
+ Returns:
+ dict: Diccionario con la información de la organización.
+ """
+ ckan_portal = RemoteCKAN(portal_url)
+ return ckan_portal.call_action('organization_show',
+ data_dict={'id': org_id})
+
+
def push_organization_tree_to_ckan(portal_url, apikey, org_tree, parent=None):
"""Toma un árbol de organizaciones y lo replica en el portal de
destino.
| datosgobar/pydatajson | bcc401111ada65362839f1f92490198ddc5d94ea | diff --git a/tests/test_federation.py b/tests/test_federation.py
index d2a0af6..daf9df4 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -501,11 +501,16 @@ class OrganizationsTestCase(FederationSuite):
for child in node['children']:
self.check_hierarchy(child, parent=node['name'])
- def test_get_organization_calls_api_correctly(self, mock_portal):
+ def test_get_organizations_calls_api_correctly(self, mock_portal):
get_organizations_from_ckan(self.portal_url)
mock_portal.return_value.call_action.assert_called_with(
'group_tree', data_dict={'type': 'organization'})
+ def test_get_organization_calls_api_correctly(self, mock_portal):
+ get_organization_from_ckan(self.portal_url, 'test_id')
+ mock_portal.return_value.call_action.assert_called_with(
+ 'organization_show', data_dict={'id': 'test_id'})
+
def test_push_organizations_sends_correct_hierarchy(self, mock_portal):
mock_portal.return_value.call_action = (lambda _, data_dict: data_dict)
pushed_tree = push_organization_tree_to_ckan(self.portal_url,
| Get organization from ckan
El objetivo es implementar un método que devuelva la información de una organizacion a partir de su nombre o id y la url del portal pasado por parametro. | 0.0 | [
"tests/test_federation.py::OrganizationsTestCase::test_get_organization_calls_api_correctly"
] | [
"tests/test_federation.py::PushDatasetTestCase::test_dataset_id_is_preserved_if_catalog_id_is_not_passed",
"tests/test_federation.py::PushDatasetTestCase::test_dataset_level_wrappers",
"tests/test_federation.py::PushDatasetTestCase::test_dataset_without_license_sets_notspecified",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_dataset_list",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_empty_list",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_errors",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_no_optional_parametres",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_owner_org",
"tests/test_federation.py::PushDatasetTestCase::test_id_is_created_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_id_is_updated_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_licenses_are_interpreted_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_tags_are_passed_correctly",
"tests/test_federation.py::RemoveDatasetTestCase::test_empty_search_doesnt_call_purge",
"tests/test_federation.py::RemoveDatasetTestCase::test_filter_in_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_filter_in_out_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_query_one_dataset",
"tests/test_federation.py::RemoveDatasetTestCase::test_query_over_500_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_remove_through_filters_and_organization",
"tests/test_federation.py::PushThemeTestCase::test_ckan_portal_is_called_with_correct_parametres",
"tests/test_federation.py::PushThemeTestCase::test_empty_theme_search_raises_exception",
"tests/test_federation.py::PushThemeTestCase::test_function_pushes_theme_by_identifier",
"tests/test_federation.py::PushThemeTestCase::test_function_pushes_theme_by_label",
"tests/test_federation.py::PushCatalogThemesTestCase::test_empty_portal_pushes_every_theme",
"tests/test_federation.py::PushCatalogThemesTestCase::test_full_portal_pushes_nothing",
"tests/test_federation.py::PushCatalogThemesTestCase::test_non_empty_intersection_pushes_missing_themes",
"tests/test_federation.py::OrganizationsTestCase::test_get_organizations_calls_api_correctly",
"tests/test_federation.py::OrganizationsTestCase::test_push_organizations_cuts_trees_on_failures",
"tests/test_federation.py::OrganizationsTestCase::test_push_organizations_sends_correct_hierarchy"
] | 2018-10-24 13:57:03+00:00 | 1,846 |
|
datosgobar__pydatajson-219 | diff --git a/docs/MANUAL.md b/docs/MANUAL.md
index 6ab393f..d68ab0a 100644
--- a/docs/MANUAL.md
+++ b/docs/MANUAL.md
@@ -350,6 +350,10 @@ Toma los siguientes parámetros:
como groups de CKAN.
- **demote_themes** (opcional, default: True): Si está en true, los labels de los themes del dataset, se escriben como
tags de CKAN; sino,se pasan como grupo.
+ - **download_strategy** (opcional, default None): La referencia a una función que toma (catalog, distribution) de
+ entrada y devuelve un booleano. Esta función se aplica sobre todas las distribuciones del dataset. Si devuelve `True`,
+ se descarga el archivo indicado en el `downloadURL` de la distribución y se lo sube al portal de destino. Si es None,
+ se omite esta operación.
Retorna el id en el nodo de destino del dataset federado.
@@ -400,6 +404,10 @@ Toma los siguientes parámetros:
organización pasada como parámetro.
- **catalog_id**: El prefijo que va a preceder el id y name del dataset en el portal
destino, separado por un guión.
+ - **download_strategy** (opcional, default None): La referencia a una función que toma (catalog, distribution) de
+ entrada y devuelve un booleano. Esta función se aplica sobre todas las distribuciones del dataset. Si devuelve `True`,
+ se descarga el archivo indicado en el `downloadURL` de la distribución y se lo sube al portal de destino. Si es None,
+ se omite esta operación.
Retorna el id en el nodo de destino del dataset federado.
@@ -411,6 +419,10 @@ Toma los siguientes parámetros:
- **portal_url**: URL del portal de CKAN de destino.
- **apikey**: La apikey de un usuario del portal de destino con los permisos para crear el dataset bajo la
organización pasada como parámetro.
+ - **download_strategy** (opcional, default None): La referencia a una función que toma (catalog, distribution) de
+ entrada y devuelve un booleano. Esta función se aplica sobre todas las distribuciones del dataset. Si devuelve `True`,
+ se descarga el archivo indicado en el `downloadURL` de la distribución y se lo sube al portal de destino. Si es None,
+ se omite esta operación.
Retorna el id del dataset restaurado.
@@ -424,7 +436,11 @@ Toma los siguientes parámetros:
- **dataset_list** (opcional, default: None): Lista de ids de los datasets a federar. Si no se pasa, se federan todos
los datasets del catálogo.
- **owner_org** (opcional, default: None): La organización a la que pertence el dataset. Debe encontrarse en el
- portal de destino. Si no se pasa, se toma como organización el catalog_id
+ portal de destino. Si no se pasa, se toma como organización el catalog_id.
+ - **download_strategy** (opcional, default None): La referencia a una función que toma (catalog, distribution) de
+ entrada y devuelve un booleano. Esta función se aplica sobre todas las distribuciones del catálogo. Si devuelve
+ `True`, se descarga el archivo indicado en el `downloadURL` de la distribución y se lo sube al portal de destino. Si
+ es None, se omite esta operación.
Retorna el id en el nodo de destino de los datasets federados.
diff --git a/pydatajson/federation.py b/pydatajson/federation.py
index 0cd1e5e..646f470 100644
--- a/pydatajson/federation.py
+++ b/pydatajson/federation.py
@@ -10,13 +10,15 @@ from ckanapi import RemoteCKAN
from ckanapi.errors import NotFound
from .ckan_utils import map_dataset_to_package, map_theme_to_group
from .search import get_datasets
+from .helpers import resource_files_download
logger = logging.getLogger('pydatajson.federation')
def push_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier,
portal_url, apikey, catalog_id=None,
- demote_superThemes=True, demote_themes=True):
+ demote_superThemes=True, demote_themes=True,
+ download_strategy=None):
"""Escribe la metadata de un dataset en el portal pasado por parámetro.
Args:
@@ -33,6 +35,10 @@ def push_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier,
themes del dataset, se propagan como grupo.
demote_themes(bool): Si está en true, los labels de los themes
del dataset, pasan a ser tags. Sino, se pasan como grupo.
+ download_strategy(callable): Una función (catálogo, distribución)->
+ bool. Sobre las distribuciones que evalúa True, descarga el
+ recurso en el downloadURL y lo sube al portal de destino.
+ Por default no sube ninguna distribución.
Returns:
str: El id del dataset en el catálogo de destino.
"""
@@ -64,11 +70,17 @@ def push_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier,
pushed_package = ckan_portal.call_action(
'package_create', data_dict=package)
+ if download_strategy:
+ with resource_files_download(catalog, dataset.get('distribution', []),
+ download_strategy) as resource_files:
+ resources_upload(portal_url, apikey, resource_files,
+ catalog_id=catalog_id)
+
ckan_portal.close()
return pushed_package['id']
-def resources_upload(portal_url, apikey, resource_files):
+def resources_upload(portal_url, apikey, resource_files, catalog_id=None):
"""Sube archivos locales a sus distribuciones correspondientes en el portal
pasado por parámetro.
@@ -78,15 +90,18 @@ def resources_upload(portal_url, apikey, resource_files):
permitan crear o actualizar el dataset.
resource_files(dict): Diccionario con entradas
id_de_distribucion:path_al_recurso a subir
+ catalog_id(str): prependea el id al id del recurso para
+ encontrarlo antes de subirlo
Returns:
list: los ids de los recursos modificados
"""
ckan_portal = RemoteCKAN(portal_url, apikey=apikey)
res = []
for resource in resource_files:
+ resource_id = catalog_id + '_' + resource if catalog_id else resource
try:
pushed = ckan_portal.action.resource_patch(
- id=resource,
+ id=resource_id,
resource_type='file.upload',
upload=open(resource_files[resource], 'rb'))
res.append(pushed['id'])
@@ -199,7 +214,7 @@ def push_theme_to_ckan(catalog, portal_url, apikey,
def restore_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier,
- portal_url, apikey):
+ portal_url, apikey, download_strategy=None):
"""Restaura la metadata de un dataset en el portal pasado por parámetro.
Args:
@@ -210,15 +225,22 @@ def restore_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier,
portal_url (str): La URL del portal CKAN de destino.
apikey (str): La apikey de un usuario con los permisos que le
permitan crear o actualizar el dataset.
+ download_strategy(callable): Una función (catálogo, distribución)->
+ bool. Sobre las distribuciones que evalúa True, descarga el
+ recurso en el downloadURL y lo sube al portal de destino.
+ Por default no sube ninguna distribución.
Returns:
str: El id del dataset restaurado.
"""
- return push_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier,
- portal_url, apikey, None, False, False)
+
+ return push_dataset_to_ckan(catalog, owner_org,
+ dataset_origin_identifier, portal_url,
+ apikey, None, False, False, download_strategy)
def harvest_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier,
- portal_url, apikey, catalog_id):
+ portal_url, apikey, catalog_id,
+ download_strategy=None):
"""Federa la metadata de un dataset en el portal pasado por parámetro.
Args:
@@ -229,17 +251,22 @@ def harvest_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier,
portal_url (str): La URL del portal CKAN de destino.
apikey (str): La apikey de un usuario con los permisos que le
permitan crear o actualizar el dataset.
- catalog_id(str): El id que prep
+ catalog_id(str): El id que prependea al dataset y recursos
+ download_strategy(callable): Una función (catálogo, distribución)->
+ bool. Sobre las distribuciones que evalúa True, descarga el
+ recurso en el downloadURL y lo sube al portal de destino.
+ Por default no sube ninguna distribución.
Returns:
str: El id del dataset restaurado.
"""
return push_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier,
- portal_url, apikey, catalog_id=catalog_id)
+ portal_url, apikey, catalog_id=catalog_id,
+ download_strategy=download_strategy)
def restore_catalog_to_ckan(catalog, owner_org, portal_url, apikey,
- dataset_list=None):
+ dataset_list=None, download_strategy=None):
"""Restaura los datasets de un catálogo al portal pasado por parámetro.
Si hay temas presentes en el DataJson que no están en el portal de
CKAN, los genera.
@@ -253,6 +280,10 @@ def restore_catalog_to_ckan(catalog, owner_org, portal_url, apikey,
se pasa una lista, todos los datasests se restauran.
owner_org (str): La organización a la cual pertencen los datasets.
Si no se pasa, se utiliza el catalog_id.
+ download_strategy(callable): Una función (catálogo, distribución)->
+ bool. Sobre las distribuciones que evalúa True, descarga el
+ recurso en el downloadURL y lo sube al portal de destino.
+ Por default no sube ninguna distribución.
Returns:
str: El id del dataset en el catálogo de destino.
"""
@@ -261,14 +292,16 @@ def restore_catalog_to_ckan(catalog, owner_org, portal_url, apikey,
for ds in catalog.datasets]
restored = []
for dataset_id in dataset_list:
- restored_id = restore_dataset_to_ckan(
- catalog, owner_org, dataset_id, portal_url, apikey)
+ restored_id = restore_dataset_to_ckan(catalog, owner_org, dataset_id,
+ portal_url, apikey,
+ download_strategy)
restored.append(restored_id)
return restored
def harvest_catalog_to_ckan(catalog, portal_url, apikey, catalog_id,
- dataset_list=None, owner_org=None):
+ dataset_list=None, owner_org=None,
+ download_strategy=None):
"""Federa los datasets de un catálogo al portal pasado por parámetro.
Args:
@@ -282,6 +315,10 @@ def harvest_catalog_to_ckan(catalog, portal_url, apikey, catalog_id,
se pasa una lista, todos los datasests se federan.
owner_org (str): La organización a la cual pertencen los datasets.
Si no se pasa, se utiliza el catalog_id.
+ download_strategy(callable): Una función (catálogo, distribución)->
+ bool. Sobre las distribuciones que evalúa True, descarga el
+ recurso en el downloadURL y lo sube al portal de destino.
+ Por default no sube ninguna distribución.
Returns:
str: El id del dataset en el catálogo de destino.
"""
@@ -293,8 +330,10 @@ def harvest_catalog_to_ckan(catalog, portal_url, apikey, catalog_id,
errors = {}
for dataset_id in dataset_list:
try:
- harvested_id = harvest_dataset_to_ckan(
- catalog, owner_org, dataset_id, portal_url, apikey, catalog_id)
+ harvested_id = harvest_dataset_to_ckan(catalog, owner_org,
+ dataset_id, portal_url,
+ apikey, catalog_id,
+ download_strategy)
harvested.append(harvested_id)
except Exception as e:
msg = "Error federando catalogo: %s, dataset: %s al portal: %s\n"\
diff --git a/pydatajson/helpers.py b/pydatajson/helpers.py
index 1e0b48c..8331c83 100644
--- a/pydatajson/helpers.py
+++ b/pydatajson/helpers.py
@@ -11,13 +11,20 @@ from datetime import datetime
import os
import json
import re
+import logging
+import tempfile
+from contextlib import contextmanager
from openpyxl import load_workbook
from six.moves.urllib_parse import urlparse
from six import string_types, iteritems
from unidecode import unidecode
+from pydatajson.download import download_to_file
+
+logger = logging.getLogger('pydatajson.helpers')
+
ABSOLUTE_PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
ABSOLUTE_SCHEMA_DIR = os.path.join(ABSOLUTE_PROJECT_DIR, "schemas")
STOP_WORDS = [
@@ -392,3 +399,39 @@ def pprint(result):
result, indent=4, separators=(",", ": "),
ensure_ascii=False
)))
+
+
+@contextmanager
+def resource_files_download(catalog, distributions, download_strategy):
+ resource_files = {}
+ distributions = [dist for dist in distributions if
+ download_strategy(catalog, dist)]
+ for dist in distributions:
+ try:
+ tmpfile = tempfile.NamedTemporaryFile(delete=False)
+ tmpfile.close()
+ download_to_file(dist['downloadURL'], tmpfile.name)
+ resource_files[dist['identifier']] = tmpfile.name
+ except Exception as e:
+ logger.exception(
+ "Error descargando el recurso {} de la distribución {}: {}"
+ .format(dist.get('downloadURL'),
+ dist.get('identifier'), str(e))
+ )
+ continue
+ try:
+ yield resource_files
+
+ finally:
+ for resource in resource_files:
+ os.remove(resource_files[resource])
+
+
+def is_local_andino_resource(catalog, distribution):
+ dist_type = distribution.get('type')
+ if dist_type is not None:
+ return dist_type == 'file.upload'
+ homepage = catalog.get('homepage')
+ if homepage is not None:
+ return distribution.get('downloadURL', '').startswith(homepage)
+ return False
| datosgobar/pydatajson | bdc7e8f4d6e1128c657ffa5b8e66215311da9e9c | diff --git a/tests/test_federation.py b/tests/test_federation.py
index 21c1c5e..900cf42 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -14,6 +14,7 @@ except ImportError:
from .context import pydatajson
from pydatajson.federation import *
+from pydatajson.helpers import is_local_andino_resource
from ckanapi.errors import NotFound
SAMPLES_DIR = os.path.join("tests", "samples")
@@ -317,6 +318,43 @@ class PushDatasetTestCase(FederationSuite):
)
self.assertEqual([], res)
+ @patch('pydatajson.helpers.download_to_file')
+ def test_push_dataset_upload_strategy(self, mock_download, mock_portal):
+ def mock_call_action(action, data_dict=None):
+ if action == 'package_update':
+ return data_dict
+ else:
+ return []
+ mock_portal.return_value.call_action = mock_call_action
+ push_dataset_to_ckan(
+ self.catalog,
+ 'owner',
+ self.dataset_id,
+ 'portal',
+ 'key',
+ download_strategy=(lambda _, x: x['identifier'] == '1.1'))
+ mock_portal.return_value.action.resource_patch.assert_called_with(
+ id='1.1',
+ resource_type='file.upload',
+ upload=ANY
+ )
+
+ def test_push_dataset_upload_empty_strategy(self, mock_portal):
+ def mock_call_action(action, data_dict=None):
+ if action == 'package_update':
+ return data_dict
+ else:
+ return []
+ mock_portal.return_value.call_action = mock_call_action
+ push_dataset_to_ckan(
+ self.minimum_catalog,
+ 'owner',
+ self.dataset_id,
+ 'portal',
+ 'key',
+ download_strategy=is_local_andino_resource)
+ mock_portal.return_value.action.resource_patch.not_called()
+
class RemoveDatasetTestCase(FederationSuite):
| Agregar la posibilidad de carga/descarga de distribuciones en el método `restore_catalog_to_ckan()`
El método tendría 2 flags que permitirían discriminar estas situaciones:
+ Quiero restaurar un catálogo solamente transfiriendo los metadatos (sin descargar ni volver a cargar distribuciones).
+ Quiero restaurar un catálogo descargando todas las distribuciones y cargándolas por API al CKAN de destino.
+ Quiero restaurar un catálogo descargando solamente las distribuciones que cumplen un determinado patrón (aka. el "pattern" de URL de descarga que tiene un CKAN) y cargándolas en el CKAN de destino, el resto de las distribuciones sólo se transfiere la metadata de downloadURL como está.
Nota: implementar el patrón regular de identificación de "qué me tengo que descargar" como una variable con el default hecho para las URLs de CKAN. | 0.0 | [
"tests/test_federation.py::PushDatasetTestCase::test_dataset_id_is_preserved_if_catalog_id_is_not_passed",
"tests/test_federation.py::PushDatasetTestCase::test_dataset_level_wrappers",
"tests/test_federation.py::PushDatasetTestCase::test_dataset_without_license_sets_notspecified",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_dataset_list",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_empty_list",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_errors",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_no_optional_parametres",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_owner_org",
"tests/test_federation.py::PushDatasetTestCase::test_id_is_created_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_id_is_updated_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_licenses_are_interpreted_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_push_dataset_upload_empty_strategy",
"tests/test_federation.py::PushDatasetTestCase::test_push_dataset_upload_strategy",
"tests/test_federation.py::PushDatasetTestCase::test_resource_upload_error",
"tests/test_federation.py::PushDatasetTestCase::test_resource_upload_succesfully",
"tests/test_federation.py::PushDatasetTestCase::test_tags_are_passed_correctly",
"tests/test_federation.py::RemoveDatasetTestCase::test_empty_search_doesnt_call_purge",
"tests/test_federation.py::RemoveDatasetTestCase::test_filter_in_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_filter_in_out_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_query_one_dataset",
"tests/test_federation.py::RemoveDatasetTestCase::test_query_over_500_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_remove_through_filters_and_organization",
"tests/test_federation.py::PushThemeTestCase::test_ckan_portal_is_called_with_correct_parametres",
"tests/test_federation.py::PushThemeTestCase::test_empty_theme_search_raises_exception",
"tests/test_federation.py::PushThemeTestCase::test_function_pushes_theme_by_identifier",
"tests/test_federation.py::PushThemeTestCase::test_function_pushes_theme_by_label",
"tests/test_federation.py::PushCatalogThemesTestCase::test_empty_portal_pushes_every_theme",
"tests/test_federation.py::PushCatalogThemesTestCase::test_full_portal_pushes_nothing",
"tests/test_federation.py::PushCatalogThemesTestCase::test_non_empty_intersection_pushes_missing_themes",
"tests/test_federation.py::OrganizationsTestCase::test_get_organization_calls_api_correctly",
"tests/test_federation.py::OrganizationsTestCase::test_get_organizations_calls_api_correctly",
"tests/test_federation.py::OrganizationsTestCase::test_push_organization_assigns_parent_correctly",
"tests/test_federation.py::OrganizationsTestCase::test_push_organization_sets_correct_attributes_on_failures",
"tests/test_federation.py::OrganizationsTestCase::test_push_organization_sets_correct_attributes_on_success",
"tests/test_federation.py::OrganizationsTestCase::test_push_organizations_cuts_trees_on_failures",
"tests/test_federation.py::OrganizationsTestCase::test_push_organizations_sends_correct_hierarchy"
] | [] | 2018-11-06 18:10:59+00:00 | 1,847 |
|
datosgobar__pydatajson-220 | diff --git a/docs/MANUAL.md b/docs/MANUAL.md
index 6ab393f..0e27a6b 100644
--- a/docs/MANUAL.md
+++ b/docs/MANUAL.md
@@ -350,6 +350,10 @@ Toma los siguientes parámetros:
como groups de CKAN.
- **demote_themes** (opcional, default: True): Si está en true, los labels de los themes del dataset, se escriben como
tags de CKAN; sino,se pasan como grupo.
+ - **download_strategy** (opcional, default None): La referencia a una función que toma (catalog, distribution) de
+ entrada y devuelve un booleano. Esta función se aplica sobre todas las distribuciones del dataset. Si devuelve `True`,
+ se descarga el archivo indicado en el `downloadURL` de la distribución y se lo sube al portal de destino. Si es None,
+ se omite esta operación.
Retorna el id en el nodo de destino del dataset federado.
@@ -400,6 +404,10 @@ Toma los siguientes parámetros:
organización pasada como parámetro.
- **catalog_id**: El prefijo que va a preceder el id y name del dataset en el portal
destino, separado por un guión.
+ - **download_strategy** (opcional, default None): La referencia a una función que toma (catalog, distribution) de
+ entrada y devuelve un booleano. Esta función se aplica sobre todas las distribuciones del dataset. Si devuelve `True`,
+ se descarga el archivo indicado en el `downloadURL` de la distribución y se lo sube al portal de destino. Si es None,
+ se omite esta operación.
Retorna el id en el nodo de destino del dataset federado.
@@ -411,6 +419,10 @@ Toma los siguientes parámetros:
- **portal_url**: URL del portal de CKAN de destino.
- **apikey**: La apikey de un usuario del portal de destino con los permisos para crear el dataset bajo la
organización pasada como parámetro.
+ - **download_strategy** (opcional, default None): La referencia a una función que toma (catalog, distribution) de
+ entrada y devuelve un booleano. Esta función se aplica sobre todas las distribuciones del dataset. Si devuelve `True`,
+ se descarga el archivo indicado en el `downloadURL` de la distribución y se lo sube al portal de destino. Si es None,
+ se omite esta operación.
Retorna el id del dataset restaurado.
@@ -424,7 +436,11 @@ Toma los siguientes parámetros:
- **dataset_list** (opcional, default: None): Lista de ids de los datasets a federar. Si no se pasa, se federan todos
los datasets del catálogo.
- **owner_org** (opcional, default: None): La organización a la que pertence el dataset. Debe encontrarse en el
- portal de destino. Si no se pasa, se toma como organización el catalog_id
+ portal de destino. Si no se pasa, se toma como organización el catalog_id.
+ - **download_strategy** (opcional, default None): La referencia a una función que toma (catalog, distribution) de
+ entrada y devuelve un booleano. Esta función se aplica sobre todas las distribuciones del catálogo. Si devuelve
+ `True`, se descarga el archivo indicado en el `downloadURL` de la distribución y se lo sube al portal de destino. Si
+ es None, se omite esta operación.
Retorna el id en el nodo de destino de los datasets federados.
@@ -483,6 +499,16 @@ en el portal de destino. Toma los siguientes parámetros:
Retorna el diccionario de la organización creada. El resultado tiene un campo `success` que indica si fue creado
exitosamente o no.
+- **pydatajson.federation.remove_organization_from_ckan()**: Tomando el id o name de una organización; la borra en el
+portal de destino. Toma los siguientes parámetros:
+ - **portal_url**: La URL del portal CKAN de destino.
+ - **apikey**: La apikey de un usuario con los permisos que le permitan borrar la organización.
+ - **organization_id**: Id o name de la organización a borrar.
+
+ Retorna None.
+
+ **Advertencia**: En caso de que la organización tenga hijos en la jerarquía, estos pasan a ser de primer nivel.
+
## Anexo I: Estructura de respuestas
diff --git a/pydatajson/federation.py b/pydatajson/federation.py
index 0cd1e5e..1b4465c 100644
--- a/pydatajson/federation.py
+++ b/pydatajson/federation.py
@@ -10,13 +10,15 @@ from ckanapi import RemoteCKAN
from ckanapi.errors import NotFound
from .ckan_utils import map_dataset_to_package, map_theme_to_group
from .search import get_datasets
+from .helpers import resource_files_download
logger = logging.getLogger('pydatajson.federation')
def push_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier,
portal_url, apikey, catalog_id=None,
- demote_superThemes=True, demote_themes=True):
+ demote_superThemes=True, demote_themes=True,
+ download_strategy=None):
"""Escribe la metadata de un dataset en el portal pasado por parámetro.
Args:
@@ -33,6 +35,10 @@ def push_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier,
themes del dataset, se propagan como grupo.
demote_themes(bool): Si está en true, los labels de los themes
del dataset, pasan a ser tags. Sino, se pasan como grupo.
+ download_strategy(callable): Una función (catálogo, distribución)->
+ bool. Sobre las distribuciones que evalúa True, descarga el
+ recurso en el downloadURL y lo sube al portal de destino.
+ Por default no sube ninguna distribución.
Returns:
str: El id del dataset en el catálogo de destino.
"""
@@ -64,11 +70,17 @@ def push_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier,
pushed_package = ckan_portal.call_action(
'package_create', data_dict=package)
+ if download_strategy:
+ with resource_files_download(catalog, dataset.get('distribution', []),
+ download_strategy) as resource_files:
+ resources_upload(portal_url, apikey, resource_files,
+ catalog_id=catalog_id)
+
ckan_portal.close()
return pushed_package['id']
-def resources_upload(portal_url, apikey, resource_files):
+def resources_upload(portal_url, apikey, resource_files, catalog_id=None):
"""Sube archivos locales a sus distribuciones correspondientes en el portal
pasado por parámetro.
@@ -78,15 +90,18 @@ def resources_upload(portal_url, apikey, resource_files):
permitan crear o actualizar el dataset.
resource_files(dict): Diccionario con entradas
id_de_distribucion:path_al_recurso a subir
+ catalog_id(str): prependea el id al id del recurso para
+ encontrarlo antes de subirlo
Returns:
list: los ids de los recursos modificados
"""
ckan_portal = RemoteCKAN(portal_url, apikey=apikey)
res = []
for resource in resource_files:
+ resource_id = catalog_id + '_' + resource if catalog_id else resource
try:
pushed = ckan_portal.action.resource_patch(
- id=resource,
+ id=resource_id,
resource_type='file.upload',
upload=open(resource_files[resource], 'rb'))
res.append(pushed['id'])
@@ -199,7 +214,7 @@ def push_theme_to_ckan(catalog, portal_url, apikey,
def restore_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier,
- portal_url, apikey):
+ portal_url, apikey, download_strategy=None):
"""Restaura la metadata de un dataset en el portal pasado por parámetro.
Args:
@@ -210,15 +225,22 @@ def restore_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier,
portal_url (str): La URL del portal CKAN de destino.
apikey (str): La apikey de un usuario con los permisos que le
permitan crear o actualizar el dataset.
+ download_strategy(callable): Una función (catálogo, distribución)->
+ bool. Sobre las distribuciones que evalúa True, descarga el
+ recurso en el downloadURL y lo sube al portal de destino.
+ Por default no sube ninguna distribución.
Returns:
str: El id del dataset restaurado.
"""
- return push_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier,
- portal_url, apikey, None, False, False)
+
+ return push_dataset_to_ckan(catalog, owner_org,
+ dataset_origin_identifier, portal_url,
+ apikey, None, False, False, download_strategy)
def harvest_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier,
- portal_url, apikey, catalog_id):
+ portal_url, apikey, catalog_id,
+ download_strategy=None):
"""Federa la metadata de un dataset en el portal pasado por parámetro.
Args:
@@ -229,17 +251,22 @@ def harvest_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier,
portal_url (str): La URL del portal CKAN de destino.
apikey (str): La apikey de un usuario con los permisos que le
permitan crear o actualizar el dataset.
- catalog_id(str): El id que prep
+ catalog_id(str): El id que prependea al dataset y recursos
+ download_strategy(callable): Una función (catálogo, distribución)->
+ bool. Sobre las distribuciones que evalúa True, descarga el
+ recurso en el downloadURL y lo sube al portal de destino.
+ Por default no sube ninguna distribución.
Returns:
str: El id del dataset restaurado.
"""
return push_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier,
- portal_url, apikey, catalog_id=catalog_id)
+ portal_url, apikey, catalog_id=catalog_id,
+ download_strategy=download_strategy)
def restore_catalog_to_ckan(catalog, owner_org, portal_url, apikey,
- dataset_list=None):
+ dataset_list=None, download_strategy=None):
"""Restaura los datasets de un catálogo al portal pasado por parámetro.
Si hay temas presentes en el DataJson que no están en el portal de
CKAN, los genera.
@@ -253,6 +280,10 @@ def restore_catalog_to_ckan(catalog, owner_org, portal_url, apikey,
se pasa una lista, todos los datasests se restauran.
owner_org (str): La organización a la cual pertencen los datasets.
Si no se pasa, se utiliza el catalog_id.
+ download_strategy(callable): Una función (catálogo, distribución)->
+ bool. Sobre las distribuciones que evalúa True, descarga el
+ recurso en el downloadURL y lo sube al portal de destino.
+ Por default no sube ninguna distribución.
Returns:
str: El id del dataset en el catálogo de destino.
"""
@@ -261,14 +292,16 @@ def restore_catalog_to_ckan(catalog, owner_org, portal_url, apikey,
for ds in catalog.datasets]
restored = []
for dataset_id in dataset_list:
- restored_id = restore_dataset_to_ckan(
- catalog, owner_org, dataset_id, portal_url, apikey)
+ restored_id = restore_dataset_to_ckan(catalog, owner_org, dataset_id,
+ portal_url, apikey,
+ download_strategy)
restored.append(restored_id)
return restored
def harvest_catalog_to_ckan(catalog, portal_url, apikey, catalog_id,
- dataset_list=None, owner_org=None):
+ dataset_list=None, owner_org=None,
+ download_strategy=None):
"""Federa los datasets de un catálogo al portal pasado por parámetro.
Args:
@@ -282,6 +315,10 @@ def harvest_catalog_to_ckan(catalog, portal_url, apikey, catalog_id,
se pasa una lista, todos los datasests se federan.
owner_org (str): La organización a la cual pertencen los datasets.
Si no se pasa, se utiliza el catalog_id.
+ download_strategy(callable): Una función (catálogo, distribución)->
+ bool. Sobre las distribuciones que evalúa True, descarga el
+ recurso en el downloadURL y lo sube al portal de destino.
+ Por default no sube ninguna distribución.
Returns:
str: El id del dataset en el catálogo de destino.
"""
@@ -293,8 +330,10 @@ def harvest_catalog_to_ckan(catalog, portal_url, apikey, catalog_id,
errors = {}
for dataset_id in dataset_list:
try:
- harvested_id = harvest_dataset_to_ckan(
- catalog, owner_org, dataset_id, portal_url, apikey, catalog_id)
+ harvested_id = harvest_dataset_to_ckan(catalog, owner_org,
+ dataset_id, portal_url,
+ apikey, catalog_id,
+ download_strategy)
harvested.append(harvested_id)
except Exception as e:
msg = "Error federando catalogo: %s, dataset: %s al portal: %s\n"\
@@ -417,3 +456,24 @@ def push_organization_to_ckan(portal_url, apikey, organization, parent=None):
pushed_org = {'name': organization, 'success': False}
return pushed_org
+
+
+def remove_organization_from_ckan(portal_url, apikey, organization_id):
+ """Toma un id de organización y la purga del portal de destino.
+ Args:
+ portal_url (str): La URL del portal CKAN de destino.
+ apikey (str): La apikey de un usuario con los permisos que le
+ permitan borrar la organización.
+ organization_id(str): Id o name de la organización a borrar.
+ Returns:
+ None.
+
+ """
+ portal = RemoteCKAN(portal_url, apikey=apikey)
+ try:
+ portal.call_action('organization_purge',
+ data_dict={'id': organization_id})
+
+ except Exception as e:
+ logger.exception('Ocurrió un error borrando la organización {}: {}'
+ .format(organization_id, str(e)))
diff --git a/pydatajson/helpers.py b/pydatajson/helpers.py
index 1e0b48c..8331c83 100644
--- a/pydatajson/helpers.py
+++ b/pydatajson/helpers.py
@@ -11,13 +11,20 @@ from datetime import datetime
import os
import json
import re
+import logging
+import tempfile
+from contextlib import contextmanager
from openpyxl import load_workbook
from six.moves.urllib_parse import urlparse
from six import string_types, iteritems
from unidecode import unidecode
+from pydatajson.download import download_to_file
+
+logger = logging.getLogger('pydatajson.helpers')
+
ABSOLUTE_PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
ABSOLUTE_SCHEMA_DIR = os.path.join(ABSOLUTE_PROJECT_DIR, "schemas")
STOP_WORDS = [
@@ -392,3 +399,39 @@ def pprint(result):
result, indent=4, separators=(",", ": "),
ensure_ascii=False
)))
+
+
+@contextmanager
+def resource_files_download(catalog, distributions, download_strategy):
+ resource_files = {}
+ distributions = [dist for dist in distributions if
+ download_strategy(catalog, dist)]
+ for dist in distributions:
+ try:
+ tmpfile = tempfile.NamedTemporaryFile(delete=False)
+ tmpfile.close()
+ download_to_file(dist['downloadURL'], tmpfile.name)
+ resource_files[dist['identifier']] = tmpfile.name
+ except Exception as e:
+ logger.exception(
+ "Error descargando el recurso {} de la distribución {}: {}"
+ .format(dist.get('downloadURL'),
+ dist.get('identifier'), str(e))
+ )
+ continue
+ try:
+ yield resource_files
+
+ finally:
+ for resource in resource_files:
+ os.remove(resource_files[resource])
+
+
+def is_local_andino_resource(catalog, distribution):
+ dist_type = distribution.get('type')
+ if dist_type is not None:
+ return dist_type == 'file.upload'
+ homepage = catalog.get('homepage')
+ if homepage is not None:
+ return distribution.get('downloadURL', '').startswith(homepage)
+ return False
| datosgobar/pydatajson | bdc7e8f4d6e1128c657ffa5b8e66215311da9e9c | diff --git a/tests/test_federation.py b/tests/test_federation.py
index 21c1c5e..14fac16 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -14,6 +14,7 @@ except ImportError:
from .context import pydatajson
from pydatajson.federation import *
+from pydatajson.helpers import is_local_andino_resource
from ckanapi.errors import NotFound
SAMPLES_DIR = os.path.join("tests", "samples")
@@ -317,6 +318,43 @@ class PushDatasetTestCase(FederationSuite):
)
self.assertEqual([], res)
+ @patch('pydatajson.helpers.download_to_file')
+ def test_push_dataset_upload_strategy(self, mock_download, mock_portal):
+ def mock_call_action(action, data_dict=None):
+ if action == 'package_update':
+ return data_dict
+ else:
+ return []
+ mock_portal.return_value.call_action = mock_call_action
+ push_dataset_to_ckan(
+ self.catalog,
+ 'owner',
+ self.dataset_id,
+ 'portal',
+ 'key',
+ download_strategy=(lambda _, x: x['identifier'] == '1.1'))
+ mock_portal.return_value.action.resource_patch.assert_called_with(
+ id='1.1',
+ resource_type='file.upload',
+ upload=ANY
+ )
+
+ def test_push_dataset_upload_empty_strategy(self, mock_portal):
+ def mock_call_action(action, data_dict=None):
+ if action == 'package_update':
+ return data_dict
+ else:
+ return []
+ mock_portal.return_value.call_action = mock_call_action
+ push_dataset_to_ckan(
+ self.minimum_catalog,
+ 'owner',
+ self.dataset_id,
+ 'portal',
+ 'key',
+ download_strategy=is_local_andino_resource)
+ mock_portal.return_value.action.resource_patch.not_called()
+
class RemoveDatasetTestCase(FederationSuite):
@@ -586,3 +624,17 @@ class OrganizationsTestCase(FederationSuite):
self.org_tree)
for node in pushed_tree:
self.check_hierarchy(node)
+
+ def test_remove_organization_sends_correct_parameters(self, mock_portal):
+ remove_organization_from_ckan(self.portal_url, self.apikey, 'test_id')
+ mock_portal.return_value.call_action.assert_called_with(
+ 'organization_purge', data_dict={'id': 'test_id'})
+
+ @patch('logging.Logger.exception')
+ def test_remove_organization_logs_failures(self, mock_logger, mock_portal):
+ mock_portal.return_value.call_action.side_effect = Exception('test')
+ remove_organization_from_ckan(self.portal_url, self.apikey, 'test_id')
+ mock_portal.return_value.call_action.assert_called_with(
+ 'organization_purge', data_dict={'id': 'test_id'})
+ mock_logger.assert_called_with(
+ 'Ocurrió un error borrando la organización test_id: test')
| remove_organization_from_ckan()
Implementar el método `remove_organization_from_ckan()`, el cual borra una organización de acuerdo a un id. | 0.0 | [
"tests/test_federation.py::PushDatasetTestCase::test_dataset_id_is_preserved_if_catalog_id_is_not_passed",
"tests/test_federation.py::PushDatasetTestCase::test_dataset_level_wrappers",
"tests/test_federation.py::PushDatasetTestCase::test_dataset_without_license_sets_notspecified",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_dataset_list",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_empty_list",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_errors",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_no_optional_parametres",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_owner_org",
"tests/test_federation.py::PushDatasetTestCase::test_id_is_created_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_id_is_updated_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_licenses_are_interpreted_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_push_dataset_upload_empty_strategy",
"tests/test_federation.py::PushDatasetTestCase::test_push_dataset_upload_strategy",
"tests/test_federation.py::PushDatasetTestCase::test_resource_upload_error",
"tests/test_federation.py::PushDatasetTestCase::test_resource_upload_succesfully",
"tests/test_federation.py::PushDatasetTestCase::test_tags_are_passed_correctly",
"tests/test_federation.py::RemoveDatasetTestCase::test_empty_search_doesnt_call_purge",
"tests/test_federation.py::RemoveDatasetTestCase::test_filter_in_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_filter_in_out_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_query_one_dataset",
"tests/test_federation.py::RemoveDatasetTestCase::test_query_over_500_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_remove_through_filters_and_organization",
"tests/test_federation.py::PushThemeTestCase::test_ckan_portal_is_called_with_correct_parametres",
"tests/test_federation.py::PushThemeTestCase::test_empty_theme_search_raises_exception",
"tests/test_federation.py::PushThemeTestCase::test_function_pushes_theme_by_identifier",
"tests/test_federation.py::PushThemeTestCase::test_function_pushes_theme_by_label",
"tests/test_federation.py::PushCatalogThemesTestCase::test_empty_portal_pushes_every_theme",
"tests/test_federation.py::PushCatalogThemesTestCase::test_full_portal_pushes_nothing",
"tests/test_federation.py::PushCatalogThemesTestCase::test_non_empty_intersection_pushes_missing_themes",
"tests/test_federation.py::OrganizationsTestCase::test_get_organization_calls_api_correctly",
"tests/test_federation.py::OrganizationsTestCase::test_get_organizations_calls_api_correctly",
"tests/test_federation.py::OrganizationsTestCase::test_push_organization_assigns_parent_correctly",
"tests/test_federation.py::OrganizationsTestCase::test_push_organization_sets_correct_attributes_on_failures",
"tests/test_federation.py::OrganizationsTestCase::test_push_organization_sets_correct_attributes_on_success",
"tests/test_federation.py::OrganizationsTestCase::test_push_organizations_cuts_trees_on_failures",
"tests/test_federation.py::OrganizationsTestCase::test_push_organizations_sends_correct_hierarchy",
"tests/test_federation.py::OrganizationsTestCase::test_remove_organization_logs_failures",
"tests/test_federation.py::OrganizationsTestCase::test_remove_organization_sends_correct_parameters"
] | [] | 2018-11-06 20:01:09+00:00 | 1,848 |
|
dave-shawley__setupext-janitor-14 | diff --git a/README.rst b/README.rst
index 6a767de..3b7d103 100644
--- a/README.rst
+++ b/README.rst
@@ -38,7 +38,7 @@ imported into *setup.py* so that it can be passed as a keyword parameter
import setuptools
try:
- from setupext import janitor
+ from setupext_janitor import janitor
CleanCommand = janitor.CleanCommand
except ImportError:
CleanCommand = None
@@ -49,8 +49,13 @@ imported into *setup.py* so that it can be passed as a keyword parameter
setup(
# normal parameters
- setup_requires=['setupext.janitor'],
+ setup_requires=['setupext_janitor'],
cmdclass=cmd_classes,
+ entry_points={
+ # normal parameters, ie. console_scripts[]
+ 'distutils.commands': [
+ ' clean = setupext_janitor.janitor:CleanCommand']
+ }
)
You can use a different approach if you are simply a developer that wants
@@ -67,7 +72,7 @@ few new command line parameters.
``setup.py clean --dist``
Removes directories that the various *dist* commands produce.
-``setup.py clean --egg``
+``setup.py clean --eggs``
Removes *.egg* and *.egg-info* directories.
``setup.py clean --environment``
diff --git a/docs/conf.py b/docs/conf.py
index 718339e..f1efef7 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -3,7 +3,7 @@
import sphinx_rtd_theme
-from setupext import janitor
+from setupext_janitor import janitor
project = 'Setupext: janitor'
diff --git a/setup.py b/setup.py
index d564df8..a267c85 100755
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,7 @@ import codecs
import setuptools
import sys
-from setupext import janitor
+from setupext_janitor import janitor
with codecs.open('README.rst', 'rb', encoding='utf-8') as file_obj:
@@ -26,7 +26,6 @@ setuptools.setup(
description='Making setup.py clean more useful.',
long_description=long_description,
packages=setuptools.find_packages(exclude=['tests', 'tests.*']),
- namespace_packages=['setupext'],
zip_safe=True,
platforms='any',
install_requires=install_requirements,
@@ -41,7 +40,7 @@ setuptools.setup(
],
entry_points={
'distutils.commands': [
- 'clean = setupext.janitor:CleanCommand',
+ 'clean = setupext_janitor.janitor:CleanCommand',
],
},
cmdclass={
diff --git a/setupext/__init__.py b/setupext/__init__.py
deleted file mode 100644
index de40ea7..0000000
--- a/setupext/__init__.py
+++ /dev/null
@@ -1,1 +0,0 @@
-__import__('pkg_resources').declare_namespace(__name__)
diff --git a/setupext_janitor/__init__.py b/setupext_janitor/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/setupext_janitor/__init__.py
@@ -0,0 +1,1 @@
+
diff --git a/setupext/janitor.py b/setupext_janitor/janitor.py
similarity index 83%
rename from setupext/janitor.py
rename to setupext_janitor/janitor.py
index 1a014e0..13b08c1 100644
--- a/setupext/janitor.py
+++ b/setupext_janitor/janitor.py
@@ -1,11 +1,11 @@
from distutils import dir_util, errors
from distutils.command.clean import clean as _CleanCommand
import os.path
+import traceback
-
-version_info = (1, 0, 0)
+version_info = (1, 0, 1)
__version__ = '.'.join(str(v) for v in version_info)
-
+debug = False
class CleanCommand(_CleanCommand):
"""
@@ -71,17 +71,32 @@ class CleanCommand(_CleanCommand):
for cmd_name, _ in self.distribution.get_command_list():
if 'dist' in cmd_name:
command = self.distribution.get_command_obj(cmd_name)
- command.ensure_finalized()
+ #command.ensure_finalized()
+ # Stop premature exit for RPM-on-NT err
+ # https://github.com/dave-shawley/setupext-janitor/issues/12
+ try:
+ command.ensure_finalized()
+ except Exception as err:
+ skip = "don't know how to create RPM distributions on platform nt"
+ if skip in err.args:
+ print('-'*50,'\nException encountered and ignored:')
+ print('{} {}'.format(err.__class__.__name__, err.args[0]))
+ if debug: traceback.print_exc()
+ print('-'*50)
+ else:
+ raise err
+
if getattr(command, 'dist_dir', None):
dir_names.add(command.dist_dir)
-
+
if self.eggs:
for name in os.listdir(self.egg_base):
if name.endswith('.egg-info'):
dir_names.add(os.path.join(self.egg_base, name))
for name in os.listdir(os.curdir):
- if name.endswith('.egg'):
- dir_names.add(name)
+ for e in ['.egg', '.eggs']:
+ if name.endswith(e):
+ dir_names.add(name)
if self.environment and self.virtualenv_dir:
dir_names.add(self.virtualenv_dir)
| dave-shawley/setupext-janitor | 801d4e51b10c8880be16c99fd6316051808141fa | diff --git a/tests.py b/tests.py
index 39af958..53f0d20 100644
--- a/tests.py
+++ b/tests.py
@@ -12,7 +12,7 @@ if sys.version_info >= (2, 7):
else: # noinspection PyPackageRequirements,PyUnresolvedReferences
import unittest2 as unittest
-from setupext import janitor
+from setupext_janitor import janitor
def run_setup(*command_line):
@@ -171,6 +171,16 @@ class EggDirectoryCleanupTests(DirectoryCleanupMixin, unittest.TestCase):
os.rmdir(dir_name)
raise
+ def test_that_eggs_directories_are_removed(self):
+ dir_name = uuid.uuid4().hex + '.eggs'
+ os.mkdir(dir_name)
+ try:
+ run_setup('clean', '--eggs')
+ self.assert_path_does_not_exist(dir_name)
+ except:
+ os.rmdir(dir_name)
+ raise
+
def test_that_directories_are_not_removed_in_dry_run_mode(self):
egg_root = self.create_directory('egg-info-root')
os.mkdir(os.path.join(egg_root, 'package.egg-info'))
| clean --egg misses .eggs (plural)
Eggs downloaded via `setup_requires=...` are put in `.eggs` not `.egg` and `python setup.py clean --eggs` misses them.
(sidebar: setup_requires isn't necessary post PEP-518 and pip >v10) | 0.0 | [
"tests.py::CommandOptionTests::test_that_distutils_options_are_present",
"tests.py::CommandOptionTests::test_that_janitor_defines_dist_command",
"tests.py::CommandOptionTests::test_that_janitor_user_options_are_not_clean_options",
"tests.py::PycacheCleanupTests::test_that_janitor_does_not_fail_when_cache_parent_is_removed",
"tests.py::RemoveAllTests::test_that_distdir_is_removed",
"tests.py::RemoveAllTests::test_that_eggdir_is_removed",
"tests.py::RemoveAllTests::test_that_envdir_is_removed",
"tests.py::RemoveAllTests::test_that_pycache_is_removed"
] | [] | 2019-02-07 23:36:43+00:00 | 1,849 |
|
dave-shawley__setupext-janitor-8 | diff --git a/README.rst b/README.rst
index 6a767de..c6a8596 100644
--- a/README.rst
+++ b/README.rst
@@ -38,7 +38,7 @@ imported into *setup.py* so that it can be passed as a keyword parameter
import setuptools
try:
- from setupext import janitor
+ from setupext_janitor import janitor
CleanCommand = janitor.CleanCommand
except ImportError:
CleanCommand = None
@@ -49,8 +49,13 @@ imported into *setup.py* so that it can be passed as a keyword parameter
setup(
# normal parameters
- setup_requires=['setupext.janitor'],
+ setup_requires=['setupext_janitor'],
cmdclass=cmd_classes,
+ entry_points={
+ # normal parameters, ie. console_scripts[]
+ 'distutils.commands': [
+ ' clean = setupext_janitor.janitor:CleanCommand']
+ }
)
You can use a different approach if you are simply a developer that wants
diff --git a/docs/conf.py b/docs/conf.py
index 718339e..f1efef7 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -3,7 +3,7 @@
import sphinx_rtd_theme
-from setupext import janitor
+from setupext_janitor import janitor
project = 'Setupext: janitor'
diff --git a/setup.py b/setup.py
index d564df8..a267c85 100755
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,7 @@ import codecs
import setuptools
import sys
-from setupext import janitor
+from setupext_janitor import janitor
with codecs.open('README.rst', 'rb', encoding='utf-8') as file_obj:
@@ -26,7 +26,6 @@ setuptools.setup(
description='Making setup.py clean more useful.',
long_description=long_description,
packages=setuptools.find_packages(exclude=['tests', 'tests.*']),
- namespace_packages=['setupext'],
zip_safe=True,
platforms='any',
install_requires=install_requirements,
@@ -41,7 +40,7 @@ setuptools.setup(
],
entry_points={
'distutils.commands': [
- 'clean = setupext.janitor:CleanCommand',
+ 'clean = setupext_janitor.janitor:CleanCommand',
],
},
cmdclass={
diff --git a/setupext/__init__.py b/setupext/__init__.py
deleted file mode 100644
index de40ea7..0000000
--- a/setupext/__init__.py
+++ /dev/null
@@ -1,1 +0,0 @@
-__import__('pkg_resources').declare_namespace(__name__)
diff --git a/setupext_janitor/__init__.py b/setupext_janitor/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/setupext_janitor/__init__.py
@@ -0,0 +1,1 @@
+
| dave-shawley/setupext-janitor | 801d4e51b10c8880be16c99fd6316051808141fa | diff --git a/setupext/janitor.py b/setupext_janitor/janitor.py
similarity index 100%
rename from setupext/janitor.py
rename to setupext_janitor/janitor.py
diff --git a/tests.py b/tests.py
index 39af958..a1d6b72 100644
--- a/tests.py
+++ b/tests.py
@@ -12,7 +12,7 @@ if sys.version_info >= (2, 7):
else: # noinspection PyPackageRequirements,PyUnresolvedReferences
import unittest2 as unittest
-from setupext import janitor
+from setupext_janitor import janitor
def run_setup(*command_line):
| Breaks matplotlib's setup.py
It appears that matplotlib has a setupext.py file, for whatever reason, and that breaks if setupext-janitor is installed.
Is the setupext nspackage thing really necessary?
| 0.0 | [
"tests.py::DistDirectoryCleanupTests::test_that_multiple_dist_directories_with_be_removed",
"tests.py::DistDirectoryCleanupTests::test_that_dist_directory_is_removed_for_bdist_dumb",
"tests.py::DistDirectoryCleanupTests::test_that_directories_are_not_removed_without_parameter",
"tests.py::DistDirectoryCleanupTests::test_that_dist_directory_is_removed_for_sdist",
"tests.py::DistDirectoryCleanupTests::test_that_directories_are_not_removed_in_dry_run_mode"
] | [
"tests.py::CommandOptionTests::test_that_distutils_options_are_present",
"tests.py::CommandOptionTests::test_that_janitor_user_options_are_not_clean_options",
"tests.py::CommandOptionTests::test_that_janitor_defines_dist_command",
"tests.py::RemoveAllTests::test_that_envdir_is_removed",
"tests.py::RemoveAllTests::test_that_distdir_is_removed",
"tests.py::RemoveAllTests::test_that_eggdir_is_removed",
"tests.py::RemoveAllTests::test_that_pycache_is_removed",
"tests.py::EggDirectoryCleanupTests::test_that_directories_are_not_removed_in_dry_run_mode",
"tests.py::EggDirectoryCleanupTests::test_that_egg_directories_are_removed",
"tests.py::EggDirectoryCleanupTests::test_that_egg_info_directories_are_removed",
"tests.py::PycacheCleanupTests::test_that_janitor_does_not_fail_when_cache_parent_is_removed"
] | 2015-11-16 22:59:02+00:00 | 1,850 |
|
davidchall__topas2numpy-13 | diff --git a/topas2numpy/binned.py b/topas2numpy/binned.py
index dac1dfe..2b65a6d 100644
--- a/topas2numpy/binned.py
+++ b/topas2numpy/binned.py
@@ -47,15 +47,15 @@ class BinnedResult(object):
dimensions: list of BinnedDimension objects
data: dict of scored data
"""
- def __init__(self, filepath):
+ def __init__(self, filepath, dtype=float):
self.path = filepath
_, ext = os.path.splitext(self.path)
if ext == '.bin':
- self._read_binary()
+ self._read_binary(dtype)
elif ext == '.csv':
- self._read_ascii()
+ self._read_ascii(dtype)
- def _read_binary(self):
+ def _read_binary(self, dtype):
"""Reads data and metadata from binary format."""
# NOTE: binary files store binned data using Fortran-like ordering.
# Dimensions are iterated like z, y, x (so x changes fastest)
@@ -64,7 +64,7 @@ class BinnedResult(object):
with open(header_path) as f_header:
self._read_header(f_header.read())
- data = np.fromfile(self.path)
+ data = np.fromfile(self.path, dtype=dtype)
# separate data by statistic
data = data.reshape((len(self.statistics), -1), order='F')
@@ -76,7 +76,7 @@ class BinnedResult(object):
self.data = data
- def _read_ascii(self):
+ def _read_ascii(self, dtype):
"""Reads data and metadata from ASCII format."""
# NOTE: ascii files store binned data using C-like ordering.
# Dimensions are iterated like x, y, z (so z changes fastest)
@@ -88,7 +88,7 @@ class BinnedResult(object):
header_str += line
self._read_header(header_str)
- data = np.loadtxt(self.path, delimiter=',', unpack=True, ndmin=1)
+ data = np.loadtxt(self.path, dtype=dtype, delimiter=',', unpack=True, ndmin=1)
# separate data by statistic (neglecting bin columns when necessary)
n_dim = len(self.dimensions)
| davidchall/topas2numpy | f20177d6930798e317033ab0e66117bb65ee08d6 | diff --git a/tests/test_binned.py b/tests/test_binned.py
index 19e68b3..04e7a37 100644
--- a/tests/test_binned.py
+++ b/tests/test_binned.py
@@ -12,6 +12,9 @@ Tests for TOPAS binned reading.
import unittest
import os.path
+# third-party imports
+import numpy as np
+
# project imports
from topas2numpy import BinnedResult
@@ -55,6 +58,7 @@ class TestAscii1D(unittest.TestCase):
assert self.result.statistics[0] == 'Sum'
assert len(self.result.data) == 1
data = self.result.data['Sum']
+ assert data.dtype == np.float64
assert data.shape[0] == self.result.dimensions[0].n_bins
assert data.shape[1] == self.result.dimensions[1].n_bins
assert data.shape[2] == self.result.dimensions[2].n_bins
@@ -62,7 +66,7 @@ class TestAscii1D(unittest.TestCase):
class TestAscii2D(unittest.TestCase):
def setUp(self):
- self.result = BinnedResult(ascii_2d_path)
+ self.result = BinnedResult(ascii_2d_path, dtype=np.uint32)
def test_quantity(self):
assert self.result.quantity == 'SurfaceTrackCount'
@@ -88,6 +92,7 @@ class TestAscii2D(unittest.TestCase):
assert self.result.statistics[0] == 'Sum'
assert len(self.result.data) == 1
data = self.result.data['Sum']
+ assert data.dtype == np.uint32
assert data.shape[0] == self.result.dimensions[0].n_bins
assert data.shape[1] == self.result.dimensions[1].n_bins
assert data.shape[2] == self.result.dimensions[2].n_bins
| Detect best NumPy dtype (use unsigned int for SurfaceTrackCount)
When the TOPAS scorer mode is set to 'SurfaceTrackCount' then the result is an integer. It would be best if the numpy dtype of the loaded data was set to an unsigned integer type in this case.
It seems this library loads such arrrays as 'float64' type which uses only 53 of the 64 bits to store the mantissa, meaning 11/64 bits are wasted in the case of 'SurfaceTrackCount', which unecessarily increases filesize.
It's also unexpected to receive a NumPy array of type 'float64' when the data consists of unsigned integers so this may have a usability impact also.
Is it possible to change this library so that it bases the data type whether the scorer is 'SurfaceTrackCount' or something else? | 0.0 | [
"tests/test_binned.py::TestAscii2D::test_data",
"tests/test_binned.py::TestAscii2D::test_dimensions",
"tests/test_binned.py::TestAscii2D::test_quantity"
] | [
"tests/test_binned.py::TestAscii1D::test_data",
"tests/test_binned.py::TestAscii1D::test_dimensions",
"tests/test_binned.py::TestAscii1D::test_quantity",
"tests/test_binned.py::TestBinary1D::test_data",
"tests/test_binned.py::TestBinary1D::test_dimensions",
"tests/test_binned.py::TestBinary1D::test_quantity"
] | 2022-06-22 05:16:22+00:00 | 1,851 |
|
daviddrysdale__python-phonenumbers-182 | diff --git a/python/phonenumbers/phonenumberutil.py b/python/phonenumbers/phonenumberutil.py
index 8ff04afd..0948934d 100644
--- a/python/phonenumbers/phonenumberutil.py
+++ b/python/phonenumbers/phonenumberutil.py
@@ -3223,6 +3223,9 @@ class NumberParseException(UnicodeMixin, Exception):
self.error_type = error_type
self._msg = msg
+ def __reduce__(self):
+ return (type(self), (self.error_type, self._msg))
+
def __unicode__(self):
return unicod("(%s) %s") % (self.error_type, self._msg)
| daviddrysdale/python-phonenumbers | 5bcfeb70b6ae8d4ebe525999910f3180f6870807 | diff --git a/python/tests/phonenumberutiltest.py b/python/tests/phonenumberutiltest.py
index 592fec80..c9d2cedf 100755
--- a/python/tests/phonenumberutiltest.py
+++ b/python/tests/phonenumberutiltest.py
@@ -17,6 +17,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
+import pickle
import phonenumbers
from phonenumbers import PhoneNumber, PhoneMetadata
@@ -26,6 +27,7 @@ from phonenumbers import ValidationResult, NumberFormat, CountryCodeSource
from phonenumbers import region_code_for_country_code
# Access internal functions of phonenumberutil.py
from phonenumbers import phonenumberutil, shortnumberinfo
+from phonenumbers.phonenumberutil import NumberParseException
from phonenumbers.util import u, to_long
from .testmetadatatest import TestMetadataTestCase
@@ -3165,6 +3167,12 @@ class PhoneNumberUtilTest(TestMetadataTestCase):
'register': True})
self.assertTrue(phonenumbers.example_number_for_type('XY', PhoneNumberType.PERSONAL_NUMBER) is None)
+ def testPickledException(self):
+ err = NumberParseException(NumberParseException.TOO_SHORT_AFTER_IDD, 'hello world')
+ pickled = pickle.dumps(err)
+ recovered = pickle.loads(pickled)
+ self.assertEqual("%r" % err, "%r" % recovered)
+
def testCoverage(self):
# Python version extra tests
self.assertTrue(phonenumberutil._region_code_for_number_from_list(GB_NUMBER, ("XX",)) is None)
| NumberParseException is not picklable
Hi,
I noticed that `NumberParseException` is not picklable. It deviates from the `__init__` signature of `Exception`, which itself implements `__reduce__` s.t. only the `msg` argument is pickled. This program:
```python
import pickle
from phonenumbers.phonenumberutil import NumberParseException
error = NumberParseException(NumberParseException.TOO_SHORT_AFTER_IDD, 'hello world')
s = pickle.dumps(error)
again = pickle.loads(s)
```
produces the output:
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: __init__() missing 1 required positional argument: 'msg'
```
Adding a `__reduce__` method to `NumberParseException` seems to fix it:
```python
def __reduce__(self):
return (type(self), (self.error_type, self._msg))
``` | 0.0 | [
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testPickledException"
] | [
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testCanBeInternationallyDialled",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testConvertAlphaCharactersInNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testCountryWithNoNumberDesc",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testCoverage",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testExtractPossibleNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testFailedParseOnInvalidNumbers",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testFormatARNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testFormatAUNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testFormatBSNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testFormatByPattern",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testFormatDENumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testFormatE164Number",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testFormatGBNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testFormatITNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testFormatInOriginalFormat",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testFormatMXNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testFormatNumberForMobileDialing",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testFormatNumberWithExtension",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testFormatOutOfCountryCallingNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testFormatOutOfCountryKeepingAlphaChars",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testFormatOutOfCountryWithInvalidRegion",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testFormatOutOfCountryWithPreferredIntlPrefix",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testFormatUSNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testFormatWithCarrierCode",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testFormatWithPreferredCarrierCode",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testFrozenPhoneNumberImmutable",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testGetCountryCodeForRegion",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testGetCountryMobileToken",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testGetExampleNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testGetExampleNumberForNonGeoEntity",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testGetExampleNumberWithoutRegion",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testGetInstanceLoadARMetadata",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testGetInstanceLoadDEMetadata",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testGetInstanceLoadInternationalTollFreeMetadata",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testGetInstanceLoadUSMetadata",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testGetInvalidExampleNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testGetLengthOfGeographicalAreaCode",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testGetLengthOfNationalDestinationCode",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testGetNationalDiallingPrefixForRegion",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testGetNationalSignificantNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testGetNationalSignificantNumber_ManyLeadingZeros",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testGetRegionCodeForCountryCode",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testGetRegionCodeForNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testGetRegionCodesForCountryCode",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testGetSupportedCallingCodes",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testGetSupportedGlobalNetworkCallingCodes",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testGetSupportedTypesForNonGeoEntity",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testGetSupportedTypesForRegion",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsAlphaNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsFixedLine",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsFixedLineAndMobile",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsMobile",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsMobileNumberPortableRegion",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsNANPACountry",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsNotPossibleNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsNotValidNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsNumberGeographical",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsNumberMatchAcceptsProtoDefaultsAsMatch",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsNumberMatchIgnoresSomeFields",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsNumberMatchMatches",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsNumberMatchMatchesDiffLeadingZerosIfItalianLeadingZeroFalse",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsNumberMatchNonMatches",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsNumberMatchNsnMatches",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsNumberMatchShortMatchIfDiffNumLeadingZeros",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsNumberMatchShortNsnMatches",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsPersonalNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsPossibleNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsPossibleNumberForTypeWithReason_DataMissingForSizeReasons",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsPossibleNumberForTypeWithReason_DifferentTypeLengths",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsPossibleNumberForTypeWithReason_FixedLineOrMobile",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsPossibleNumberForTypeWithReason_LocalOnly",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsPossibleNumberForTypeWithReason_NumberTypeNotSupportedForRegion",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsPossibleNumberForType_DataMissingForSizeReasons",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsPossibleNumberForType_DifferentTypeLengths",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsPossibleNumberForType_LocalOnly",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsPossibleNumberForType_NumberTypeNotSupportedForRegion",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsPossibleNumberWithReason",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsPremiumRate",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsSharedCost",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsTollFree",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsUnknown",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsValidForRegion",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsValidNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsViablePhoneNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsViablePhoneNumberNonAscii",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testIsVoip",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testMaybeExtractCountryCode",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testMaybeStripInternationalPrefix",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testMaybeStripNationalPrefix",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testMetadataAsString",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testMetadataEquality",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testMetadataEval",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testMetadataImmutable",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testNormaliseOtherDigits",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testNormaliseRemovePunctuation",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testNormaliseReplaceAlphaCharacters",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testNormaliseStripAlphaCharacters",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testNormaliseStripNonDiallableCharacters",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testParseAndKeepRaw",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testParseExtensions",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testParseHandlesLongExtensionsWithAutoDiallingLabels",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testParseHandlesLongExtensionsWithExplicitLabels",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testParseHandlesShortExtensionsWhenNotSureOfLabel",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testParseHandlesShortExtensionsWithAmbiguousChar",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testParseItalianLeadingZeros",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testParseMaliciousInput",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testParseNationalNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testParseNationalNumberArgentina",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testParseNonAscii",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testParseNumberTooShortIfNationalPrefixStripped",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testParseNumberWithAlphaCharacters",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testParseNumbersMexico",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testParseNumbersWithPlusWithNoRegion",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testParseWithInternationalPrefixes",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testParseWithLeadingZero",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testParseWithXInNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testSupportedRegions",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testTruncateTooLongNumber",
"python/tests/phonenumberutiltest.py::PhoneNumberUtilTest::testUnknownCountryCallingCode"
] | 2020-12-15 19:03:52+00:00 | 1,852 |
|
davidesarra__jupyter_spaces-15 | diff --git a/README.md b/README.md
index 398cbb9..222a688 100644
--- a/README.md
+++ b/README.md
@@ -37,7 +37,7 @@ Reloading the extension will remove all spaces.
```python
%%space <space-name>
alpha = 0.50
-print(alpha)
+alpha
```
When you execute a cell within a space, all references are firstly searched in
@@ -51,33 +51,6 @@ execution equivalent to not using such keyword.
Mutable objects in the user namespace can be altered (e.g. appending an item
to a list).
-#### Console output
-
-Conversely to the standard usage of the Python console, you need to print
-objects explicitly (e.g. by using `print`).
-
-- No output to console
- ```python
- %%space <space-name>
- 100
- ```
-- Output to console
- ```python
- %%space <space-name>
- print(100)
- ```
-
-If you want IPython to use more advanced representations, you can do so via
-IPython's display library (e.g. display a Pandas dataframe as a HTML table).
-
-```python
-%%space <space-name>
-from IPython.display import display
-from pandas import DataFrame
-dataframe = DataFrame(data=[[1, 2]])
-display(dataframe)
-```
-
### Remove a space
```python
diff --git a/jupyter_spaces/space.py b/jupyter_spaces/space.py
index 476a7a0..7c3b89e 100644
--- a/jupyter_spaces/space.py
+++ b/jupyter_spaces/space.py
@@ -1,3 +1,6 @@
+import ast
+import sys
+
from jupyter_spaces.errors import RegistryError
@@ -101,7 +104,16 @@ class Space:
Args:
source (str): Source code.
"""
- exec(source, self._execution_namespace)
+ tree = ast.parse(source=source)
+
+ interactive_tree = ast.Interactive(body=tree.body)
+ if sys.version_info > (3, 8):
+ interactive_tree.type_ignores = tree.type_ignores
+
+ compiled_interactive_tree = compile(
+ source=interactive_tree, filename="<string>", mode="single"
+ )
+ exec(compiled_interactive_tree, self._execution_namespace)
class ExecutionNamespace(dict):
| davidesarra/jupyter_spaces | 035e984dab3d6b29e46db982619a376c3c293b6a | diff --git a/tests/test_magics.py b/tests/test_magics.py
index e24cc1b..22da064 100644
--- a/tests/test_magics.py
+++ b/tests/test_magics.py
@@ -152,6 +152,11 @@ def test_get_spaces_reflects_extension_reload(ip):
assert not ip.user_global_ns["spaces"]
+def test_space_outputs_to_console(ip, capsys):
+ ip.run_cell_magic(magic_name="space", line="tomato", cell="100")
+ assert capsys.readouterr().out == "100\n"
+
+
def test_space_can_print_to_console(ip):
with capture_output() as captured:
ip.run_cell_magic(magic_name="space", line="tomato", cell="print(100)")
| Automatic console output
It would be great if space cells would automatically print their return result similar to a normal cell. Having to use the print statement for output is cumbersome. | 0.0 | [
"tests/test_magics.py::test_space_outputs_to_console"
] | [
"tests/test_magics.py::test_space_can_access_user_namespace_references",
"tests/test_magics.py::test_space_references_prioritized_over_user_namespace_references",
"tests/test_magics.py::test_space_cannot_alter_user_namespace_immutable_references",
"tests/test_magics.py::test_space_can_alter_user_namespace_mutable_references",
"tests/test_magics.py::test_space_cannot_alter_user_namespace_references_using_global",
"tests/test_magics.py::test_space_cannot_remove_user_namespace_references",
"tests/test_magics.py::test_space_cannot_remove_user_namespace_references_using_global",
"tests/test_magics.py::test_space_cannot_add_user_namespace_references",
"tests/test_magics.py::test_space_cannot_add_user_namespace_references_using_global",
"tests/test_magics.py::test_space_reference_assignments_persist_in_new_magic_call",
"tests/test_magics.py::test_space_reference_deletions_persist_in_new_magic_call",
"tests/test_magics.py::test_space_references_assignments_are_confined_in_one_space_only",
"tests/test_magics.py::test_space_references_deletions_are_confined_in_one_space_only",
"tests/test_magics.py::test_space_can_execute_newly_defined_lambda_functions",
"tests/test_magics.py::test_space_can_execute_newly_defined_functions",
"tests/test_magics.py::test_space_can_execute_top_level_non_closure_functions",
"tests/test_magics.py::test_get_spaces_can_access_space_references",
"tests/test_magics.py::test_get_spaces_can_alter_space_references",
"tests/test_magics.py::test_get_spaces_can_remove_space_references",
"tests/test_magics.py::test_get_spaces_reflects_space_references_changes",
"tests/test_magics.py::test_get_spaces_reflects_space_removal",
"tests/test_magics.py::test_get_spaces_reflects_extension_reload",
"tests/test_magics.py::test_space_can_print_to_console"
] | 2020-11-22 00:38:28+00:00 | 1,853 |
|
dbcli__athenacli-42 | diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
new file mode 100644
index 0000000..4f20546
--- /dev/null
+++ b/.github/pull_request_template.md
@@ -0,0 +1,7 @@
+## Description
+<!--- Describe your changes in detail. -->
+
+## Checklist
+<!--- We appreciate your help and want to give you credit. Please take a moment to put an `x` in the boxes below as you complete them. -->
+- [ ] I've added this contribution to the `changelog.md`.
+- [ ] I've added my name to the `AUTHORS` file (or it's already there).
diff --git a/AUTHORS.rst b/AUTHORS.rst
index 93b5168..58ee8a4 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -14,6 +14,7 @@ Contributors:
* Joe Block
* Jash Gala
* Hourann
+ * Paul Gross
Creator:
--------
diff --git a/athenacli/packages/completion_engine.py b/athenacli/packages/completion_engine.py
index d4e7281..86b9abb 100644
--- a/athenacli/packages/completion_engine.py
+++ b/athenacli/packages/completion_engine.py
@@ -290,7 +290,7 @@ def suggest_based_on_last_token(token, text_before_cursor, full_text, identifier
if parent:
# "ON parent.<suggestion>"
# parent can be either a schema name or table alias
- tables = tuple(t for t in tables if identifies(parent, t))
+ tables = tuple(t for t in tables if identifies(parent, *t))
return (
Column(tables=tables),
Table(schema=parent),
diff --git a/changelog.md b/changelog.md
index b0eb065..fe2b505 100644
--- a/changelog.md
+++ b/changelog.md
@@ -1,6 +1,10 @@
(Unreleased; add upcoming change notes here)
==============================================
+Bugfix
+----------
+* Fix bug when completing `ON parent.` clauses. (Thanks @pgr0ss)
+
1.1.2
========
diff --git a/docs/develop.rst b/docs/develop.rst
index 6c5d66d..084a312 100644
--- a/docs/develop.rst
+++ b/docs/develop.rst
@@ -20,12 +20,22 @@ The installation instructions in the README file are intended for users of athen
It is highly recommended to use virtualenv for development. If you don't know what a virtualenv is, `this guide <https://docs.python-guide.org/dev/virtualenvs/#virtual-environments>`_ will help you get started.
-Create a virtualenv (let's call it athenacli-dev). Activate it:
+Create a virtualenv (let's call it athenacli-dev):
+
+.. code-block:: bash
+
+ $ virtualenv athenacli-dev
+
+Activate it:
+
+.. code-block:: bash
$ source ./athenacli-dev/bin/activate
+
Once the virtualenv is activated, cd into the local clone of athenacli folder and install athenacli using pip as follows:
.. code-block:: bash
+
$ pip install -e .
This will install the necessary dependencies as well as install athenacli from the working folder into a virtualenv. Athenacli is installed in an editable way, so any changes made to the code is immediately available in the installed version of athenacli. This makes it easy to change something in the code, launch athenacli and check the effects of your change.
@@ -38,6 +48,7 @@ Currently we don't have enough tests for athenacli, because we haven't found an
First, install the requirements for testing:
.. code-block:: bash
+
$ pip install -r requirements-dev.txt
After that, tests can be run with:
| dbcli/athenacli | f413a24662cca6923b13563dedb7565e1e2b31a9 | diff --git a/test/test_completion_engine.py b/test/test_completion_engine.py
index 587fa6b..3a3c24a 100644
--- a/test/test_completion_engine.py
+++ b/test/test_completion_engine.py
@@ -2,7 +2,7 @@ import os
import pytest
from athenacli.packages.completion_engine import (
- suggest_type, Column, Function, Alias, Keyword
+ suggest_type, Column, Function, Alias, Keyword, Table, View
)
@@ -28,6 +28,15 @@ def test_select_suggests_cols_with_qualified_table_scope():
Alias(aliases=['tabl']),
Keyword(last_token='SELECT'))
+def test_join_suggests_cols_with_qualified_table_scope():
+ expression = 'SELECT * FROM tabl a JOIN tabl b on a.'
+ suggestions = suggest_type(expression, expression)
+
+ assert suggestions == (
+ Column(tables=((None, 'tabl', 'a'),), drop_unique=None),
+ Table(schema='a'),
+ View(schema='a'),
+ Function(schema='a', filter=None))
@pytest.mark.parametrize('expression', [
'SELECT * FROM tabl WHERE ',
| Error when typing a query
I started writing this query:
```
SELECT d.* FROM doodads d JOIN trinkets t on d.
```
## `JOIN` doesn't appear
`JOIN` did not auto-complete as an option (I only saw `JSON`)
## Unhandled Exception
When I type the `.` at the end (I'm trying to join on `d.trinket_id = doodad.id`) I get the following output:
```
Unhandled exception in event loop:
File "/home/admin/.local/lib/python3.7/site-packages/prompt_toolkit/eventloop/coroutine.py", line 92, in step_next
new_f = coroutine.throw(exc)
File "/home/admin/.local/lib/python3.7/site-packages/prompt_toolkit/buffer.py", line 1654, in new_coroutine
yield From(coroutine(*a, **kw))
File "/home/admin/.local/lib/python3.7/site-packages/prompt_toolkit/eventloop/coroutine.py", line 92, in step_next
new_f = coroutine.throw(exc)
File "/home/admin/.local/lib/python3.7/site-packages/prompt_toolkit/buffer.py", line 1506, in async_completer
cancel=lambda: not proceed()))
File "/home/admin/.local/lib/python3.7/site-packages/prompt_toolkit/eventloop/coroutine.py", line 88, in step_next
new_f = coroutine.send(None)
File "/home/admin/.local/lib/python3.7/site-packages/prompt_toolkit/eventloop/async_generator.py", line 117, in consume_async_generator
item = iterator.send(send)
File "/home/admin/.local/lib/python3.7/site-packages/prompt_toolkit/completion/base.py", line 176, in get_completions_async
for item in self.get_completions(document, complete_event):
File "/home/admin/.local/lib/python3.7/site-packages/athenacli/completer.py", line 210, in get_completions
suggestions = suggest_type(document.text, document.text_before_cursor)
File "/home/admin/.local/lib/python3.7/site-packages/athenacli/packages/completion_engine.py", line 121, in suggest_type
full_text, identifier)
File "/home/admin/.local/lib/python3.7/site-packages/athenacli/packages/completion_engine.py", line 293, in suggest_based_on_last_token
tables = tuple(t for t in tables if identifies(parent, t))
File "/home/admin/.local/lib/python3.7/site-packages/athenacli/packages/completion_engine.py", line 293, in <genexpr>
tables = tuple(t for t in tables if identifies(parent, t))
Exception identifies() missing 2 required positional arguments: 'table' and 'alias'
```
I also tried this without the table aliases, e.g.:
```
SELECT doodads.* FROM doodads JOIN trinkets on doodads.
```
and got the same "unhandled exception" error when typing the `.` at the end. | 0.0 | [
"test/test_completion_engine.py::test_join_suggests_cols_with_qualified_table_scope"
] | [
"test/test_completion_engine.py::test_select_suggests_cols_with_visible_table_scope",
"test/test_completion_engine.py::test_select_suggests_cols_with_qualified_table_scope",
"test/test_completion_engine.py::test_where_suggests_columns_functions[SELECT"
] | 2020-02-14 21:42:59+00:00 | 1,854 |
|
dbcli__athenacli-50 | diff --git a/athenacli/packages/format_utils.py b/athenacli/packages/format_utils.py
new file mode 100644
index 0000000..8450913
--- /dev/null
+++ b/athenacli/packages/format_utils.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+
+
+def format_status(rows_length=None, cursor=None):
+ return rows_status(rows_length) + statistics(cursor)
+
+def rows_status(rows_length):
+ if rows_length:
+ return '%d row%s in set' % (rows_length, '' if rows_length == 1 else 's')
+ else:
+ return 'Query OK'
+
+def statistics(cursor):
+ if cursor:
+ # Most regions are $5 per TB: https://aws.amazon.com/athena/pricing/
+ approx_cost = cursor.data_scanned_in_bytes / (1024 ** 4) * 5
+
+ return '\nExecution time: %d ms, Data scanned: %s, Approximate cost: $%.2f' % (
+ cursor.execution_time_in_millis,
+ humanize_size(cursor.data_scanned_in_bytes),
+ approx_cost)
+ else:
+ return ''
+
+def humanize_size(num_bytes):
+ suffixes = ['B', 'KB', 'MB', 'GB', 'TB']
+
+ suffix_index = 0
+ while num_bytes >= 1024 and suffix_index < len(suffixes) - 1:
+ num_bytes /= 1024.0
+ suffix_index += 1
+
+ num = ('%.2f' % num_bytes).rstrip('0').rstrip('.')
+ return '%s %s' % (num, suffixes[suffix_index])
diff --git a/athenacli/sqlexecute.py b/athenacli/sqlexecute.py
index 3a0d8c5..d560ecd 100644
--- a/athenacli/sqlexecute.py
+++ b/athenacli/sqlexecute.py
@@ -5,6 +5,7 @@ import sqlparse
import pyathena
from athenacli.packages import special
+from athenacli.packages.format_utils import format_status
logger = logging.getLogger(__name__)
@@ -92,11 +93,11 @@ class SQLExecute(object):
if cursor.description is not None:
headers = [x[0] for x in cursor.description]
rows = cursor.fetchall()
- status = '%d row%s in set' % (len(rows), '' if len(rows) == 1 else 's')
+ status = format_status(rows_length=len(rows), cursor=cursor)
else:
logger.debug('No rows in result.')
rows = None
- status = 'Query OK'
+ status = format_status(rows_length=None, cursor=cursor)
return (title, rows, headers, status)
def tables(self):
diff --git a/changelog.md b/changelog.md
index 22d8273..cafd121 100644
--- a/changelog.md
+++ b/changelog.md
@@ -1,6 +1,13 @@
(Unreleased; add upcoming change notes here)
==============================================
+1.3.0
+========
+
+Features
+----------
+* Show query execution statistics, such as the amount of data scanned and the approximate cost. (Thanks: @pgr0ss)
+
1.2.0
========
| dbcli/athenacli | c0e6869d7c023c3c2a2bda7f40f09286f5d0c0c0 | diff --git a/test/test_format_utils.py b/test/test_format_utils.py
new file mode 100644
index 0000000..abe8d75
--- /dev/null
+++ b/test/test_format_utils.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+
+
+from collections import namedtuple
+from athenacli.packages.format_utils import format_status, humanize_size
+
+
+def test_format_status_plural():
+ assert format_status(rows_length=1) == "1 row in set"
+ assert format_status(rows_length=2) == "2 rows in set"
+
+def test_format_status_no_results():
+ assert format_status(rows_length=None) == "Query OK"
+
+def test_format_status_with_stats():
+ FakeCursor = namedtuple("FakeCursor", ["execution_time_in_millis", "data_scanned_in_bytes"])
+
+ assert format_status(rows_length=1, cursor=FakeCursor(10, 12345678900)) == "1 row in set\nExecution time: 10 ms, Data scanned: 11.5 GB, Approximate cost: $0.06"
+ assert format_status(rows_length=2, cursor=FakeCursor(1000, 1234)) == "2 rows in set\nExecution time: 1000 ms, Data scanned: 1.21 KB, Approximate cost: $0.00"
+
+def test_humanize_size():
+ assert humanize_size(20) == "20 B"
+ assert humanize_size(2000) == "1.95 KB"
+ assert humanize_size(200000) == "195.31 KB"
+ assert humanize_size(20000000) == "19.07 MB"
+ assert humanize_size(200000000000) == "186.26 GB"
| Feature request: output cost and size of query
Something like:
submission_date = stats['QueryExecution']['Status']['SubmissionDateTime']
completion_date = stats['QueryExecution']['Status']['CompletionDateTime']
execution_time = stats['QueryExecution']['Statistics']['EngineExecutionTimeInMillis']
data_scanned = stats['QueryExecution']['Statistics']['DataScannedInBytes']
query_cost = data_scanned / 1000000000000.0 * 5.0
print('Time: {}, CPU Time: {}ms total, Data Scanned: {}, Cost: ${:,.2f}\n'.format(
str(completion_date - submission_date).split('.')[0],
execution_time,
human_readable(data_scanned),
query_cost
))
from
https://github.com/guardian/athena-cli/blob/0d5cc98a1cac21094077ed29cf168686ca61f10d/athena_cli.py#L217-L228
(Currently we only print `Time`.) | 0.0 | [
"test/test_format_utils.py::test_format_status_plural",
"test/test_format_utils.py::test_format_status_no_results",
"test/test_format_utils.py::test_format_status_with_stats",
"test/test_format_utils.py::test_humanize_size"
] | [] | 2020-04-18 00:03:20+00:00 | 1,855 |
|
dbcli__litecli-113 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index b8c8122..0ccb53a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,7 +1,5 @@
## Unreleased - TBD
-<!-- add upcoming change notes here -->
-
### Features
- Add verbose feature to `favorite_query` command. (Thanks: [Zhaolong Zhu])
@@ -12,6 +10,7 @@
### Bug Fixes
- Fix compatibility with sqlparse >= 0.4.0. (Thanks: [chocolateboy])
+- Fix invalid utf-8 exception. (Thanks: [Amjith])
## 1.4.1 - 2020-07-27
diff --git a/litecli/sqlexecute.py b/litecli/sqlexecute.py
index 7ef103c..93acd91 100644
--- a/litecli/sqlexecute.py
+++ b/litecli/sqlexecute.py
@@ -17,6 +17,13 @@ _logger = logging.getLogger(__name__)
# })
+def utf8_resilient_decoder(s):
+ try:
+ return s.decode("utf-8")
+ except UnicodeDecodeError:
+ return s.decode("latin-1")
+
+
class SQLExecute(object):
databases_query = """
@@ -61,6 +68,7 @@ class SQLExecute(object):
raise Exception("Path does not exist: {}".format(db_dir_name))
conn = sqlite3.connect(database=db_name, isolation_level=None)
+ conn.text_factory = utf8_resilient_decoder
if self.conn:
self.conn.close()
| dbcli/litecli | a1a01c11d6154b6f841b81fbdeb6b8b887b697d3 | diff --git a/tests/test_sqlexecute.py b/tests/test_sqlexecute.py
index 03e7f19..0ddfb8f 100644
--- a/tests/test_sqlexecute.py
+++ b/tests/test_sqlexecute.py
@@ -101,6 +101,17 @@ def test_unicode_support_in_output(executor):
assert_result_equal(results, headers=["t"], rows=[(u"é",)])
+@dbtest
+def test_invalid_unicode_values_dont_choke(executor):
+ run(executor, "create table unicodechars(t text)")
+ # \xc3 is not a valid utf-8 char. But we can insert it into the database
+ # which can break querying if not handled correctly.
+ run(executor, u"insert into unicodechars (t) values (cast(x'c3' as text))")
+
+ results = run(executor, u"select * from unicodechars")
+ assert_result_equal(results, headers=["t"], rows=[(u"Ã",)])
+
+
@dbtest
def test_multiple_queries_same_line(executor):
results = run(executor, "select 'foo'; select 'bar'")
@@ -199,11 +210,7 @@ def test_verbose_feature_of_favorite_query(executor):
results = run(executor, "\\f sh_param 1")
assert_result_equal(
- results,
- title=None,
- headers=["a", "id"],
- rows=[("abc", 1)],
- auto_status=False,
+ results, title=None, headers=["a", "id"], rows=[("abc", 1)], auto_status=False,
)
results = run(executor, "\\f+ sh_param 1")
| TabularOutputFormatter chokes on values that can't be converted to UTF-8 if it's a text column
I don't know how to solve this. If there is a single record with a non-convertible column it won't print anything.
`Could not decode to UTF-8 column 'verifier' with text '`'���U'`
sqlite3 prints
```
2bho15FMrSQhKAYnjBqRQ1x4LS3zcHANsRjKMJyiSwT9|GnyZktv2SaCehfNCGjoYdAgAirxpCjvBCUXH6MiEHEH7
`'ŜU|`'ŜU
```
Seeing this was actually helpful because it notified me that I had garbage data, but I still would've thought the other rows would print. | 0.0 | [
"tests/test_sqlexecute.py::test_invalid_unicode_values_dont_choke"
] | [
"tests/test_sqlexecute.py::test_conn",
"tests/test_sqlexecute.py::test_bools",
"tests/test_sqlexecute.py::test_binary",
"tests/test_sqlexecute.py::test_database_list",
"tests/test_sqlexecute.py::test_invalid_syntax",
"tests/test_sqlexecute.py::test_invalid_column_name",
"tests/test_sqlexecute.py::test_unicode_support_in_output",
"tests/test_sqlexecute.py::test_multiple_queries_same_line",
"tests/test_sqlexecute.py::test_multiple_queries_same_line_syntaxerror",
"tests/test_sqlexecute.py::test_favorite_query",
"tests/test_sqlexecute.py::test_bind_parameterized_favorite_query",
"tests/test_sqlexecute.py::test_verbose_feature_of_favorite_query",
"tests/test_sqlexecute.py::test_shell_parameterized_favorite_query",
"tests/test_sqlexecute.py::test_favorite_query_multiple_statement",
"tests/test_sqlexecute.py::test_favorite_query_expanded_output",
"tests/test_sqlexecute.py::test_special_command",
"tests/test_sqlexecute.py::test_cd_command_without_a_folder_name",
"tests/test_sqlexecute.py::test_system_command_not_found",
"tests/test_sqlexecute.py::test_system_command_output",
"tests/test_sqlexecute.py::test_cd_command_current_dir",
"tests/test_sqlexecute.py::test_unicode_support",
"tests/test_sqlexecute.py::test_timestamp_null",
"tests/test_sqlexecute.py::test_datetime_null",
"tests/test_sqlexecute.py::test_date_null",
"tests/test_sqlexecute.py::test_time_null"
] | 2021-03-14 20:14:14+00:00 | 1,856 |
|
dbcli__mssql-cli-396 | diff --git a/build.py b/build.py
index d8c7ce3..8a82eca 100644
--- a/build.py
+++ b/build.py
@@ -173,6 +173,7 @@ def get_active_test_filepaths():
'tests/test_config.py '
'tests/test_naive_completion.py '
'tests/test_main.py '
+ 'tests/test_multiline.py '
'tests/test_fuzzy_completion.py '
'tests/test_rowlimit.py '
'tests/test_sqlcompletion.py '
diff --git a/mssqlcli/mssqlbuffer.py b/mssqlcli/mssqlbuffer.py
index 581010c..d0fdced 100644
--- a/mssqlcli/mssqlbuffer.py
+++ b/mssqlcli/mssqlbuffer.py
@@ -1,5 +1,6 @@
from __future__ import unicode_literals
-
+import re
+import sqlparse
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.filters import Condition
from prompt_toolkit.application import get_app
@@ -21,10 +22,31 @@ def mssql_is_multiline(mssql_cli):
def _is_complete(sql):
- # A complete command is an sql statement that ends with a semicolon, unless
+ # A complete command is an sql statement that ends with a 'GO', unless
# there's an open quote surrounding it, as is common when writing a
# CREATE FUNCTION command
- return sql.endswith(';') and not is_open_quote(sql)
+ if sql is not None and sql != "":
+ # remove comments
+ sql = sqlparse.format(sql, strip_comments=True)
+
+ # check for open comments
+ # remove all closed quotes to isolate instances of open comments
+ sql_no_quotes = re.sub(r'".*?"|\'.*?\'', '', sql)
+ is_open_comment = len(re.findall(r'\/\*', sql_no_quotes)) > 0
+
+ # check that 'go' is only token on newline
+ lines = sql.split('\n')
+ lastline = lines[len(lines) - 1].lower().strip()
+ is_valid_go_on_lastline = lastline == 'go'
+
+ # check that 'go' is on last line, not in open quotes, and there's no open
+ # comment with closed comments and quotes removed.
+ # NOTE: this method fails when GO follows a closing '*/' block comment on the same line,
+ # we've taken a dependency with sqlparse
+ # (https://github.com/andialbrecht/sqlparse/issues/484)
+ return not is_open_quote(sql) and not is_open_comment and is_valid_go_on_lastline
+
+ return False
def _multiline_exception(text):
diff --git a/mssqlcli/mssqlcliclient.py b/mssqlcli/mssqlcliclient.py
index d7019cb..f4e26b0 100644
--- a/mssqlcli/mssqlcliclient.py
+++ b/mssqlcli/mssqlcliclient.py
@@ -230,7 +230,8 @@ class MssqlCliClient:
query_has_exception = query_response.exception_message
query_has_error_messages = query_messages[0].is_error if query_messages else False
query_has_batch_error = query_response.batch_summaries[0].has_error \
- if hasattr(query_response, 'batch_summaries') else False
+ if hasattr(query_response, 'batch_summaries') \
+ and len(query_response.batch_summaries) > 0 else False
query_failed = query_has_exception or query_has_batch_error or query_has_error_messages
@@ -277,7 +278,8 @@ class MssqlCliClient:
@staticmethod
def _no_results_found_in(query_response):
- return not query_response.batch_summaries[0].result_set_summaries
+ return not query_response.batch_summaries \
+ or not query_response.batch_summaries[0].result_set_summaries
@staticmethod
def _no_rows_found_in(query_response):
diff --git a/mssqlcli/mssqlclirc b/mssqlcli/mssqlclirc
index 75e8b38..fc8deeb 100644
--- a/mssqlcli/mssqlclirc
+++ b/mssqlcli/mssqlclirc
@@ -10,13 +10,13 @@ smart_completion = True
wider_completion_menu = False
# Multi-line mode allows breaking up the sql statements into multiple lines. If
-# this is set to True, then the end of the statements must have a semi-colon.
+# this is set to True, then the end of the statements must have 'GO'.
# If this is set to False then sql statements can't be split into multiple
# lines. End of line (return) is considered as the end of the statement.
multi_line = False
# If multi_line_mode is set to "tsql", in multi-line mode, [Enter] will execute
-# the current input if the input ends in a semicolon.
+# the current input if the input ends in 'GO'.
# If multi_line_mode is set to "safe", in multi-line mode, [Enter] will always
# insert a newline, and [Esc] [Enter] or [Alt]-[Enter] must be used to execute
# a command.
diff --git a/mssqlcli/mssqltoolbar.py b/mssqlcli/mssqltoolbar.py
index 38cca71..7ee5e2d 100644
--- a/mssqlcli/mssqltoolbar.py
+++ b/mssqlcli/mssqltoolbar.py
@@ -39,7 +39,7 @@ def create_toolbar_tokens_func(mssql_cli):
if mssql_cli.multiline_mode == 'safe':
result.append((token, ' ([Esc] [Enter] to execute]) '))
else:
- result.append((token, ' (Semi-colon [;] will end the line) '))
+ result.append((token, ' ([GO] statement will end the line) '))
if mssql_cli.vi_mode:
result.append(
diff --git a/mssqlcli/packages/parseutils/utils.py b/mssqlcli/packages/parseutils/utils.py
index 1376019..1f14772 100644
--- a/mssqlcli/packages/parseutils/utils.py
+++ b/mssqlcli/packages/parseutils/utils.py
@@ -113,7 +113,7 @@ def is_open_quote(sql):
def _parsed_is_open_quote(parsed):
# Look for unmatched single quotes, or unmatched dollar sign quotes
- return any(tok.match(Token.Error, ("'", "$")) for tok in parsed.flatten())
+ return any(tok.match(Token.Error, ("'", '"', "$")) for tok in parsed.flatten())
def parse_partial_identifier(word):
| dbcli/mssql-cli | 341fead174a009474af31fd2e7849ea07b66b251 | diff --git a/tests/test_multiline.py b/tests/test_multiline.py
new file mode 100644
index 0000000..81577d0
--- /dev/null
+++ b/tests/test_multiline.py
@@ -0,0 +1,37 @@
+import pytest
+from mssqlcli.mssqlbuffer import _is_complete
+
+
+class TestMssqlCliMultiline:
+ testdata = [
+ (None, False),
+ ('', False),
+ ('select 1 /* open comment!\ngo', False),
+ ('select 1\ngo -- another comment', True),
+ ('select 1; select 2, "open quote: go', False),
+ ('select 1\n"go"', False),
+ ('select 1; GO', False),
+ ('SELECT 4;\nGO', True),
+ ('select 1\n select 2;\ngo', True),
+ ('select 1;', False),
+ ('select 1 go', False),
+ ('select 1\ngo go go', False),
+ ('GO select 1', False),
+ ('GO', True)
+ # tests below to be enabled when sqlparse supports retaining newlines
+ # when stripping comments (tracking here:
+ # https://github.com/andialbrecht/sqlparse/issues/484):
+ # ('select 3 /* another open comment\n*/ GO', True),
+ # ('select 1\n*/go', False),
+ # ('select 1 /*\nmultiple lines!\n*/go', True)
+ ]
+
+ @staticmethod
+ @pytest.mark.parametrize("query_str, is_complete", testdata)
+ def test_multiline_completeness(query_str, is_complete):
+ """
+ Tests the _is_complete helper method, which parses a T-SQL multiline
+ statement on each newline and determines whether the script should
+ execute.
+ """
+ assert _is_complete(query_str) == is_complete
| Do not use a semicolon to end a multi-line query
Currently when editing a multiline query, we use use a semicolon+<enter> to delineate the query should be executed.
Feedback from the MVP summit: in multiline query mode, users like to enter more that one statement, which are delineated by semicolons, so using a semicolon is a bad choice. We should consider using the 'GO' keyword to execute the query, which aligns with sqlcmd. | 0.0 | [
"tests/test_multiline.py::TestMssqlCliMultiline::test_multiline_completeness[None-False]",
"tests/test_multiline.py::TestMssqlCliMultiline::test_multiline_completeness[select",
"tests/test_multiline.py::TestMssqlCliMultiline::test_multiline_completeness[SELECT",
"tests/test_multiline.py::TestMssqlCliMultiline::test_multiline_completeness[GO-True]"
] | [
"tests/test_multiline.py::TestMssqlCliMultiline::test_multiline_completeness[-False]",
"tests/test_multiline.py::TestMssqlCliMultiline::test_multiline_completeness[GO"
] | 2019-12-17 18:17:23+00:00 | 1,857 |
|
dbekaert__RAiDER-90 | diff --git a/tools/RAiDER/cli/parser.py b/tools/RAiDER/cli/parser.py
index 1d9c063..c540fed 100644
--- a/tools/RAiDER/cli/parser.py
+++ b/tools/RAiDER/cli/parser.py
@@ -37,5 +37,5 @@ def add_bbox(parser):
nargs=4,
type=float,
action=BBoxAction,
- metavar=('N', 'W', 'S', 'E')
+ metavar=('S', 'N', 'W', 'E')
)
diff --git a/tools/RAiDER/cli/validators.py b/tools/RAiDER/cli/validators.py
index c4bec15..5bec610 100644
--- a/tools/RAiDER/cli/validators.py
+++ b/tools/RAiDER/cli/validators.py
@@ -1,5 +1,5 @@
import itertools
-from argparse import Action, ArgumentTypeError
+from argparse import Action, ArgumentError, ArgumentTypeError
from datetime import date, time, timedelta
from time import strptime
@@ -128,7 +128,7 @@ class DateListAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
if len(values) > 2 or not values:
- raise TypeError("Only 1 or 2 dates may be supplied")
+ raise ArgumentError(self, "Only 1 or 2 dates may be supplied")
if len(values) == 2:
start, end = values
@@ -171,19 +171,18 @@ class BBoxAction(Action):
)
def __call__(self, parser, namespace, values, option_string=None):
- N, W, S, E = values
+ S, N, W, E = values
- lat = [N, S]
- lon = [W, E]
+ if N <= S or E <= W:
+ raise ArgumentError(self, 'Bounding box must have a size!')
- if N == S or E == W:
- raise ValueError('Bounding box must have a size!')
+ for sn in (S, N):
+ if sn < -90 or sn > 90:
+ raise ArgumentError(self, 'Lats are out of S/N bounds')
- if min(lat) < -90 or max(lat) > 90:
- raise ValueError('Lats are out of N/S bounds')
-
- if min(lon) < -180 or max(lon) > 180:
- raise ValueError('Lons are out of W/E bounds')
+ for we in (W, E):
+ if we < -180 or we > 180:
+ raise ArgumentError(self, 'Lons are out of W/E bounds')
setattr(namespace, self.dest, values)
diff --git a/tools/RAiDER/downloadGNSSDelays.py b/tools/RAiDER/downloadGNSSDelays.py
index 94b612f..7c41b54 100755
--- a/tools/RAiDER/downloadGNSSDelays.py
+++ b/tools/RAiDER/downloadGNSSDelays.py
@@ -35,7 +35,7 @@ Check for and download tropospheric zenith delays for a set of GNSS stations fro
Example call to virtually access and append zenith delay information to a CSV table in specified output
directory, across specified range of years and all available times of day, and confined to specified
geographic bounding box :
-downloadGNSSdelay.py --out products -y '2010,2014' -b '40 -79 39 -78'
+downloadGNSSdelay.py --out products -y '2010,2014' -b '39 40 -79 -78'
Example call to virtually access and append zenith delay information to a CSV table in specified output
directory, across specified range of years and specified time of day, and distributed globally :
@@ -51,7 +51,7 @@ necessary for most applications.
Example call to physically download and append zenith delay information to a CSV table in specified output
directory, across specified range of years and specified time of day, and confined to specified
geographic bounding box :
-downloadGNSSdelay.py --download --out products -y '2010,2014' --returntime '00:00:00' -b '40 -79 39 -78'
+downloadGNSSdelay.py --download --out products -y '2010,2014' --returntime '00:00:00' -b '39 40 -79 -78'
""")
# Stations to check/download
diff --git a/tools/RAiDER/runProgram.py b/tools/RAiDER/runProgram.py
index 2ec89f5..06be2b2 100644
--- a/tools/RAiDER/runProgram.py
+++ b/tools/RAiDER/runProgram.py
@@ -20,8 +20,8 @@ def create_parser():
description=dedent("""\
Calculate tropospheric delay from a weather model.
Usage examples:
- raiderDelay.py --date 20200103 --time 23:00:00 -b 40 -79 39 -78 --model ERA5 --zref 15000 -v
- raiderDelay.py --date 20200103 --time 23:00:00 -b 40 -79 39 -78 --model ERA5 --zref 15000 --heightlvs 0 100 200 -v
+ raiderDelay.py --date 20200103 --time 23:00:00 -b 39 40 -79 -78 --model ERA5 --zref 15000 -v
+ raiderDelay.py --date 20200103 --time 23:00:00 -b 39 40 -79 -78 --model ERA5 --zref 15000 --heightlvs 0 100 200 -v
raiderDelay.py --date 20200103 --time 23:00:00 --latlon test/scenario_1/geom/lat.dat test/scenario_1/geom/lon.dat --model ERA5 --zref 20000 -v --out test/scenario_1/
""")
)
| dbekaert/RAiDER | 618a97d36411b81c694e696a6c59c867cfc54c83 | diff --git a/test/cli/test_validators.py b/test/cli/test_validators.py
index 6c4d989..10bc4d7 100644
--- a/test/cli/test_validators.py
+++ b/test/cli/test_validators.py
@@ -120,7 +120,7 @@ def test_date_list_action(parser):
date(2020, 1, 1), date(2020, 1, 2), date(2020, 1, 3)
]
- with pytest.raises(TypeError):
+ with pytest.raises(SystemExit):
parser.parse_args(["--datelist", "2020-1-1", "2020-1-2", "2020-1-3"])
@@ -151,9 +151,9 @@ def test_bbox_action(parser):
args = parser.parse_args(["--bbox_int", "10", "20", "30", "40"])
assert args.bbox_int == [10, 20, 30, 40]
- with pytest.raises(ValueError):
- parser.parse_args(["--bbox_int", "10", "20", "10", "20"])
- with pytest.raises(ValueError):
- parser.parse_args(["--bbox_int", "100", "20", "30", "40"])
- with pytest.raises(ValueError):
- parser.parse_args(["--bbox_int", "10", "190", "30", "40"])
+ with pytest.raises(SystemExit):
+ parser.parse_args(["--bbox_int", "10", "10", "20", "20"])
+ with pytest.raises(SystemExit):
+ parser.parse_args(["--bbox_int", "30", "100", "20", "40"])
+ with pytest.raises(SystemExit):
+ parser.parse_args(["--bbox_int", "10", "30", "40", "190"])
| Choose a consistent bounding box format
Currently the bounding box is taken as an input in NWSE order, and printed out in various places in SNWE order. Needless to say this is confusing, we should pick one order and stick with it everywhere. | 0.0 | [
"test/cli/test_validators.py::test_bbox_action",
"test/cli/test_validators.py::test_date_list_action"
] | [
"test/cli/test_validators.py::test_time_type[z-23:00-expected10]",
"test/cli/test_validators.py::test_time_type[+0000-2300-expected11]",
"test/cli/test_validators.py::test_time_type[-T2300-expected9]",
"test/cli/test_validators.py::test_time_type[z-23:00:01-expected5]",
"test/cli/test_validators.py::test_time_type[-T230001.000000-expected2]",
"test/cli/test_validators.py::test_time_type_error",
"test/cli/test_validators.py::test_time_type[-23:00-expected10]",
"test/cli/test_validators.py::test_time_type[z-230001-expected7]",
"test/cli/test_validators.py::test_time_type[+0000-T23:00:01.000000-expected1]",
"test/cli/test_validators.py::test_time_type[+0000-T23:00-expected8]",
"test/cli/test_validators.py::test_time_type[+0000-23:00:01-expected5]",
"test/cli/test_validators.py::test_time_type[z-T230001-expected6]",
"test/cli/test_validators.py::test_time_type[+0000-T230001.000000-expected2]",
"test/cli/test_validators.py::test_time_type[z-T23:00:01.000000-expected1]",
"test/cli/test_validators.py::test_time_type[+0000-23:00-expected10]",
"test/cli/test_validators.py::test_time_type[-230001-expected7]",
"test/cli/test_validators.py::test_mapping_type_default",
"test/cli/test_validators.py::test_time_type[-T23:00:01.000000-expected1]",
"test/cli/test_validators.py::test_time_type[-T23:00:01.000000-expected0]",
"test/cli/test_validators.py::test_time_type[+0000-T2300-expected9]",
"test/cli/test_validators.py::test_time_type[z-T23:00:01-expected4]",
"test/cli/test_validators.py::test_time_type[z-T2300-expected9]",
"test/cli/test_validators.py::test_integer_mapping_type_default",
"test/cli/test_validators.py::test_time_type[z-T23:00-expected8]",
"test/cli/test_validators.py::test_integer_mapping_type_no_default",
"test/cli/test_validators.py::test_time_type[+0000-230001.000000-expected3]",
"test/cli/test_validators.py::test_time_type[z-T230001.000000-expected2]",
"test/cli/test_validators.py::test_integer_type",
"test/cli/test_validators.py::test_mapping_type_no_default",
"test/cli/test_validators.py::test_date_type",
"test/cli/test_validators.py::test_time_type[+0000-T23:00:01-expected4]",
"test/cli/test_validators.py::test_time_type[z-230001.000000-expected3]",
"test/cli/test_validators.py::test_time_type[-T230001-expected6]",
"test/cli/test_validators.py::test_time_type[-2300-expected11]",
"test/cli/test_validators.py::test_time_type[z-T23:00:01.000000-expected0]",
"test/cli/test_validators.py::test_time_type[z-2300-expected11]",
"test/cli/test_validators.py::test_time_type[+0000-230001-expected7]",
"test/cli/test_validators.py::test_time_type[+0000-T230001-expected6]",
"test/cli/test_validators.py::test_time_type[+0000-T23:00:01.000000-expected0]",
"test/cli/test_validators.py::test_time_type[-T23:00-expected8]",
"test/cli/test_validators.py::test_time_type[-T23:00:01-expected4]",
"test/cli/test_validators.py::test_time_type[-23:00:01-expected5]",
"test/cli/test_validators.py::test_time_type[-230001.000000-expected3]"
] | 2020-07-29 19:34:56+00:00 | 1,858 |
|
deardurham__ciprs-reader-21 | diff --git a/ciprs/parsers.py b/ciprs/parsers.py
index 9f2bfec..c648277 100644
--- a/ciprs/parsers.py
+++ b/ciprs/parsers.py
@@ -113,6 +113,25 @@ class OffenseRecordRow(Parser):
report["Offense Record"]["Records"].append(record)
+class OffenseRecordRowWithNumber(Parser):
+ """
+ Extract offense row like:
+ 54 CHARGED SPEEDING INFRACTION G.S. 20-141(B)
+ """
+
+ # pylint: disable=line-too-long
+ pattern = r"(?:\d*)?\s*(?P<action>\w+)\s+(?P<desc>[\w \-\(\)]+)[ ]{2,}(?P<severity>\w+)[ ]{2,}(?P<law>[\w. \-\(\)]+)"
+
+ def extract(self, matches, report):
+ record = {
+ "Action": matches["action"],
+ "Description": matches["desc"],
+ "Severity": matches["severity"],
+ "Law": matches["law"],
+ }
+ report["Offense Record"]["Records"].append(record)
+
+
class OffenseDisposedDate(Parser):
pattern = r".*Disposed on:\s*(?P<value>[\d/:]+)"
diff --git a/ciprs/reader.py b/ciprs/reader.py
index b084926..81f9faa 100644
--- a/ciprs/reader.py
+++ b/ciprs/reader.py
@@ -6,6 +6,7 @@ from ciprs.parsers import (
CaseDetails,
CaseStatus,
OffenseRecordRow,
+ OffenseRecordRowWithNumber,
OffenseDateTime,
OffenseDisposedDate,
CaseWasServedOnDate,
@@ -35,6 +36,7 @@ class PDFToTextReader:
CaseDetails(self.report),
CaseStatus(self.report),
OffenseRecordRow(self.report),
+ OffenseRecordRowWithNumber(self.report),
OffenseDateTime(self.report),
OffenseDisposedDate(self.report),
CaseWasServedOnDate(self.report),
| deardurham/ciprs-reader | a052cb447ca15183f6fb0eb2ac0b7cbd3d699f25 | diff --git a/tests/test_parsers.py b/tests/test_parsers.py
index ca14731..efa4bb8 100644
--- a/tests/test_parsers.py
+++ b/tests/test_parsers.py
@@ -39,6 +39,16 @@ def test_offense_record_charged():
assert matches["code"] == "4450"
+def test_offense_record_charged_with_number():
+ string = "54 CHARGED SPEEDING(80 mph in a 65 mph zone) INFRACTION G.S. 20-141(B)" # noqa
+ matches = parsers.OffenseRecordRowWithNumber().match(string)
+ assert matches is not None, "Regex match failed"
+ assert matches["action"] == "CHARGED"
+ assert matches["desc"] == "SPEEDING(80 mph in a 65 mph zone)"
+ assert matches["severity"] == "INFRACTION"
+ assert matches["law"] == "G.S. 20-141(B)"
+
+
def test_offense_record_arrainged():
string = "ARRAIGNED SPEEDING(80 mph in a 65 mph zone) INFRACTION G.S. 20-141(B) 4450" # noqa
matches = parsers.OffenseRecordRow().match(string)
| Parsers do not catch numbered offense rows
The offense row parser is designed to capture data from offense rows that look like this:
ACTION DESCRIPTION SEVERITY LAW CODE
However, on some records they look like this:
\# ACTION DESCRIPTION SEVERITY LAW
With # being either a zero padded number or blank | 0.0 | [
"tests/test_parsers.py::test_offense_record_charged_with_number"
] | [
"tests/test_parsers.py::test_case_details[expected0-",
"tests/test_parsers.py::test_case_details[expected1-",
"tests/test_parsers.py::test_case_status",
"tests/test_parsers.py::test_offense_record_charged",
"tests/test_parsers.py::test_offense_record_arrainged",
"tests/test_parsers.py::test_offense_record_convicted",
"tests/test_parsers.py::test_offense_date_time",
"tests/test_parsers.py::test_defendent_name",
"tests/test_parsers.py::test_defendent_name_no_middle",
"tests/test_parsers.py::test_defendent_race",
"tests/test_parsers.py::test_defendent_sex_male",
"tests/test_parsers.py::test_defendent_sex_female",
"tests/test_parsers.py::test_defendent_sex_bad",
"tests/test_parsers.py::test_defendent_dob",
"tests/test_parsers.py::test_offense_disposed_date[2000-01-01-",
"tests/test_parsers.py::test_offense_disposed_date[2016-07-20-",
"tests/test_parsers.py::test_case_was_served_on_date[2000-09-09-",
"tests/test_parsers.py::test_case_was_served_on_date[2015-05-17-Case",
"tests/test_parsers.py::test_known_offense_disposition_method",
"tests/test_parsers.py::test_unknown_offense_disposition_method",
"tests/test_parsers.py::test_court_type_other",
"tests/test_parsers.py::test_court_type_cr",
"tests/test_parsers.py::test_court_type_crs"
] | 2020-02-19 01:01:30+00:00 | 1,859 |
|
deardurham__ciprs-reader-38 | diff --git a/ciprs_reader/parser/section/header.py b/ciprs_reader/parser/section/header.py
index 1f18711..34edc94 100644
--- a/ciprs_reader/parser/section/header.py
+++ b/ciprs_reader/parser/section/header.py
@@ -13,7 +13,7 @@ class CaseDetails(HeaderParser):
"""Extract County and File No from header on top of first page"""
pattern = (
- r"\s*Case (Details|Summary) for Court Case[\s:]+(?P<county>\w+) (?P<fileno>\w+)"
+ r"\s*Case (Details|Summary) for Court Case[\s:]+(?P<county>(\w\s*)+) (?P<fileno>\w+)"
)
def extract(self, matches, report):
| deardurham/ciprs-reader | a39ef64b3e2ee9c14b377262c64230cce9bc2681 | diff --git a/tests/parsers/test_header.py b/tests/parsers/test_header.py
index 3610ac9..5fb3138 100644
--- a/tests/parsers/test_header.py
+++ b/tests/parsers/test_header.py
@@ -12,6 +12,14 @@ CASE_DETAIL_DATA = [
{"county": "ORANGE", "fileno": "99FN9999999"},
" Case Summary for Court Case: ORANGE 99FN9999999",
),
+ (
+ {"county": "NEW HANOVER", "fileno": "00GR000000"},
+ " Case Details for Court Case NEW HANOVER 00GR000000 ",
+ ),
+ (
+ {"county": "OLD HANOVER", "fileno": "99FN9999999"},
+ " Case Summary for Court Case: OLD HANOVER 99FN9999999",
+ ),
]
| County names with spaces are not parsed properly
The [CaseDetails](https://github.com/deardurham/ciprs-reader/blob/master/ciprs_reader/parser/section/header.py#L12) parser doesn't expect county names to contain spaces, so a line like
```
Case Summary for Court Case: NEW HANOVER 20CR000000
```
will result in a county name of ``NEW``.
AC
- [CaseDetails](https://github.com/deardurham/ciprs-reader/blob/master/ciprs_reader/parser/section/header.py#L12) parser updated to expect spaces
- A test is added wi spaces in the county name | 0.0 | [
"tests/parsers/test_header.py::test_case_details[expected2-",
"tests/parsers/test_header.py::test_case_details[expected3-"
] | [
"tests/parsers/test_header.py::test_case_details[expected0-",
"tests/parsers/test_header.py::test_case_details[expected1-",
"tests/parsers/test_header.py::test_defendent_name",
"tests/parsers/test_header.py::test_defendent_name_no_middle",
"tests/parsers/test_header.py::test_defendent_name_special_character"
] | 2020-06-26 03:08:23+00:00 | 1,860 |
|
decargroup__pykoop-132 | diff --git a/LICENSE b/LICENSE
index e8d5947..a4df98f 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2022 DECAR Systems Group
+Copyright (c) 2022 DECAR
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/pykoop/koopman_pipeline.py b/pykoop/koopman_pipeline.py
index 18f1d8b..80ad541 100644
--- a/pykoop/koopman_pipeline.py
+++ b/pykoop/koopman_pipeline.py
@@ -2821,7 +2821,9 @@ class KoopmanPipeline(metaestimators._BaseComposition, KoopmanLiftingFn):
an error has occured in estimator fitting. If set to ``'raise'``, a
:class:`ValueError` is raised. If a numerical value is given, a
:class:`sklearn.exceptions.FitFailedWarning` warning is raised and
- the specified score is returned.
+ the specified score is returned. The error score defines the worst
+ possible score. If a score is finite but lower than the error
+ score, the error score will be returned instead.
multistep : bool
If true, predict using :func:`predict_trajectory`. Otherwise,
@@ -3204,7 +3206,9 @@ def score_trajectory(
error has occured in estimator fitting. If set to ``'raise'``, a
:class:`ValueError` is raised. If a numerical value is given, a
:class:`sklearn.exceptions.FitFailedWarning` warning is raised and the
- specified score is returned.
+ specified score is returned. The error score defines the worst possible
+ score. If a score is finite but lower than the error score, the error
+ score will be returned instead.
min_samples : int
Number of samples in initial condition.
@@ -3298,6 +3302,13 @@ def score_trajectory(
# Invert losses
if regression_metric not in greater_is_better:
score *= -1
+ # If score is worse than error score, return that.
+ if np.isfinite(error_score) and (score < error_score):
+ warnings.warn(
+ f'Score `score={score}` is finite, but is lower than error '
+ f'score `error_score={error_score}`. Returning error score.',
+ sklearn.exceptions.FitFailedWarning)
+ return error_score
return score
| decargroup/pykoop | a9f1f516ca6dc273f96d2165d195c648417067c1 | diff --git a/tests/test_koopman_pipeline.py b/tests/test_koopman_pipeline.py
index 2762359..9d8c34a 100644
--- a/tests/test_koopman_pipeline.py
+++ b/tests/test_koopman_pipeline.py
@@ -560,6 +560,22 @@ class TestKoopmanPipelineScore:
False,
None,
),
+ # Finite score worse than error score should return error score.
+ (
+ np.array([
+ [1e-2, 1e-3],
+ ]).T,
+ np.array([
+ [1e5, 1e6],
+ ]).T,
+ None,
+ 1,
+ 'neg_mean_squared_error',
+ -100,
+ 1,
+ False,
+ -100,
+ ),
],
)
def test_score_trajectory(
| Change name in license file
DECAR Systems Group -> DECAR Group or just DECAR | 0.0 | [
"tests/test_koopman_pipeline.py::TestKoopmanPipelineScore::test_score_trajectory[X_predicted16-X_expected16-None-1-neg_mean_squared_error--100-1-False--100]"
] | [
"tests/test_koopman_pipeline.py::TestKoopmanPipelineTransform::test_koopman_pipeline_attrs[lf0-names_in0-X0-names_out0-Xt_exp0-1-False-attr_exp0]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineTransform::test_koopman_pipeline_attrs[lf1-names_in1-X1-names_out1-Xt_exp1-1-False-attr_exp1]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineTransform::test_koopman_pipeline_attrs[lf2-names_in2-X2-names_out2-Xt_exp2-1-False-attr_exp2]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineTransform::test_koopman_pipeline_attrs[lf3-names_in3-X3-names_out3-Xt_exp3-1-False-attr_exp3]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineTransform::test_transform[lf0-names_in0-X0-names_out0-Xt_exp0-1-False-attr_exp0]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineTransform::test_transform[lf1-names_in1-X1-names_out1-Xt_exp1-1-False-attr_exp1]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineTransform::test_transform[lf2-names_in2-X2-names_out2-Xt_exp2-1-False-attr_exp2]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineTransform::test_transform[lf3-names_in3-X3-names_out3-Xt_exp3-1-False-attr_exp3]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineTransform::test_inverse_transform[lf0-names_in0-X0-names_out0-Xt_exp0-1-False-attr_exp0]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineTransform::test_inverse_transform[lf1-names_in1-X1-names_out1-Xt_exp1-1-False-attr_exp1]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineTransform::test_inverse_transform[lf2-names_in2-X2-names_out2-Xt_exp2-1-False-attr_exp2]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineTransform::test_inverse_transform[lf3-names_in3-X3-names_out3-Xt_exp3-1-False-attr_exp3]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineTransform::test_feature_names_in[lf0-names_in0-X0-names_out0-Xt_exp0-1-False-attr_exp0]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineTransform::test_feature_names_in[lf1-names_in1-X1-names_out1-Xt_exp1-1-False-attr_exp1]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineTransform::test_feature_names_in[lf2-names_in2-X2-names_out2-Xt_exp2-1-False-attr_exp2]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineTransform::test_feature_names_in[lf3-names_in3-X3-names_out3-Xt_exp3-1-False-attr_exp3]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineTransform::test_feature_names_out[lf0-names_in0-X0-names_out0-Xt_exp0-1-False-attr_exp0]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineTransform::test_feature_names_out[lf1-names_in1-X1-names_out1-Xt_exp1-1-False-attr_exp1]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineTransform::test_feature_names_out[lf2-names_in2-X2-names_out2-Xt_exp2-1-False-attr_exp2]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineTransform::test_feature_names_out[lf3-names_in3-X3-names_out3-Xt_exp3-1-False-attr_exp3]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineFit::test_fit",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineFit::test_fit_feature_names",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineScore::test_score_trajectory[X_predicted0-X_expected0-None-1-neg_mean_squared_error-nan-1-False-0]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineScore::test_score_trajectory[X_predicted1-X_expected1-None-1-neg_mean_squared_error-nan-1-False--2.5]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineScore::test_score_trajectory[X_predicted2-X_expected2-None-1-neg_mean_squared_error-nan-1-False--1.3333333333333333]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineScore::test_score_trajectory[X_predicted3-X_expected3-None-1-neg_mean_absolute_error-nan-1-False--0.6666666666666666]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineScore::test_score_trajectory[X_predicted4-X_expected4-None-1-neg_mean_squared_error-nan-2-False--0.5]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineScore::test_score_trajectory[X_predicted5-X_expected5-2-1-neg_mean_squared_error-nan-1-False-0]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineScore::test_score_trajectory[X_predicted6-X_expected6-None-0.5-neg_mean_squared_error-nan-1-False--0.14285714285714285]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineScore::test_score_trajectory[X_predicted7-X_expected7-None-1-neg_mean_squared_error-nan-1-True--0.5]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineScore::test_score_trajectory[X_predicted8-X_expected8-1-1-neg_mean_squared_error-nan-1-True--0.5]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineScore::test_score_trajectory[X_predicted9-X_expected9-None-0.5-neg_mean_squared_error-nan-1-True--0.5]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineScore::test_score_trajectory[X_predicted10-X_expected10-1-0.5-neg_mean_squared_error-nan-1-True--0.5]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineScore::test_score_trajectory[X_predicted11-X_expected11-None-1-neg_mean_squared_error-nan-1-False-nan]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineScore::test_score_trajectory[X_predicted12-X_expected12-None-1-neg_mean_squared_error--100-1-False--100]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineScore::test_score_trajectory[X_predicted13-X_expected13-None-1-neg_mean_squared_error-raise-1-False-None]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineScore::test_score_trajectory[X_predicted14-X_expected14-None-1-neg_mean_squared_error--100-1-False--100]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineScore::test_score_trajectory[X_predicted15-X_expected15-None-1-neg_mean_squared_error-raise-1-False-None]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineScore::test_weights_from_data_matrix[X0-w_exp0-2-1-False]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineScore::test_weights_from_data_matrix[X1-w_exp1-3-0.5-False]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineScore::test_weights_from_data_matrix[X2-w_exp2-2-0.5-True]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineScore::test_weights_from_data_matrix[X3-w_exp3-10-1-True]",
"tests/test_koopman_pipeline.py::TestKoopmanPipelineScore::test_weights_from_data_matrix[X4-w_exp4-10-0.1-True]",
"tests/test_koopman_pipeline.py::TestEpisodeManipulation::test_shift_episodes[X0-X_unsh_exp0-X_sh_exp0-0-False]",
"tests/test_koopman_pipeline.py::TestEpisodeManipulation::test_shift_episodes[X1-X_unsh_exp1-X_sh_exp1-1-False]",
"tests/test_koopman_pipeline.py::TestEpisodeManipulation::test_shift_episodes[X2-X_unsh_exp2-X_sh_exp2-0-True]",
"tests/test_koopman_pipeline.py::TestEpisodeManipulation::test_shift_episodes[X3-X_unsh_exp3-X_sh_exp3-1-True]",
"tests/test_koopman_pipeline.py::TestEpisodeManipulation::test_extract_initial_conditions[X0-ic_exp0-1-0-False]",
"tests/test_koopman_pipeline.py::TestEpisodeManipulation::test_extract_initial_conditions[X1-ic_exp1-2-0-False]",
"tests/test_koopman_pipeline.py::TestEpisodeManipulation::test_extract_initial_conditions[X2-ic_exp2-1-1-False]",
"tests/test_koopman_pipeline.py::TestEpisodeManipulation::test_extract_initial_conditions[X3-ic_exp3-1-0-True]",
"tests/test_koopman_pipeline.py::TestEpisodeManipulation::test_extract_initial_conditions[X4-ic_exp4-1-1-True]",
"tests/test_koopman_pipeline.py::TestEpisodeManipulation::test_extract_initial_conditions[X5-ic_exp5-2-1-True]",
"tests/test_koopman_pipeline.py::TestEpisodeManipulation::test_extract_input[X0-u_exp0-1-False]",
"tests/test_koopman_pipeline.py::TestEpisodeManipulation::test_extract_input[X1-u_exp1-0-False]",
"tests/test_koopman_pipeline.py::TestEpisodeManipulation::test_extract_input[X2-u_exp2-1-True]",
"tests/test_koopman_pipeline.py::TestEpisodeManipulation::test_strip_initial_conditons",
"tests/test_koopman_pipeline.py::TestPrediction::test_predict_trajectory[kp0]",
"tests/test_koopman_pipeline.py::TestPrediction::test_predict_trajectory[kp1]",
"tests/test_koopman_pipeline.py::TestPrediction::test_predict_trajectory[kp2]",
"tests/test_koopman_pipeline.py::TestPrediction::test_predict_trajectory[kp3]",
"tests/test_koopman_pipeline.py::TestPrediction::test_predict_trajectory[kp4]",
"tests/test_koopman_pipeline.py::TestPrediction::test_predict_trajectory_no_U[kp0]",
"tests/test_koopman_pipeline.py::TestPrediction::test_predict_trajectory_no_U[kp1]",
"tests/test_koopman_pipeline.py::TestPrediction::test_predict_trajectory_no_U[kp2]",
"tests/test_koopman_pipeline.py::TestPrediction::test_predict_trajectory_no_U[kp3]",
"tests/test_koopman_pipeline.py::TestPrediction::test_predict_trajectory_no_U[kp4]",
"tests/test_koopman_pipeline.py::TestPrediction::test_predict_multistep[kp0]",
"tests/test_koopman_pipeline.py::TestPrediction::test_predict_multistep[kp1]",
"tests/test_koopman_pipeline.py::TestPrediction::test_predict_multistep[kp2]",
"tests/test_koopman_pipeline.py::TestPrediction::test_predict_multistep[kp3]",
"tests/test_koopman_pipeline.py::TestPrediction::test_predict_multistep[kp4]",
"tests/test_koopman_pipeline.py::TestSplitCombineEpisodes::test_split_episodes[X0-episodes0-True]",
"tests/test_koopman_pipeline.py::TestSplitCombineEpisodes::test_split_episodes[X1-episodes1-True]",
"tests/test_koopman_pipeline.py::TestSplitCombineEpisodes::test_split_episodes[X2-episodes2-False]",
"tests/test_koopman_pipeline.py::TestSplitCombineEpisodes::test_split_episodes[X3-episodes3-True]",
"tests/test_koopman_pipeline.py::TestSplitCombineEpisodes::test_combine_episodes[X0-episodes0-True]",
"tests/test_koopman_pipeline.py::TestSplitCombineEpisodes::test_combine_episodes[X1-episodes1-True]",
"tests/test_koopman_pipeline.py::TestSplitCombineEpisodes::test_combine_episodes[X2-episodes2-False]",
"tests/test_koopman_pipeline.py::TestSplitCombineEpisodes::test_combine_episodes[X3-episodes3-True]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_split_lifting_fn_attrs[lf0-names_in0-X0-names_out0-Xt_exp0-2-False-attr_exp0]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_split_lifting_fn_attrs[lf1-names_in1-X1-names_out1-Xt_exp1-2-True-attr_exp1]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_split_lifting_fn_attrs[lf2-names_in2-X2-names_out2-Xt_exp2-2-False-attr_exp2]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_split_lifting_fn_attrs[lf3-names_in3-X3-names_out3-Xt_exp3-2-False-attr_exp3]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_split_lifting_fn_attrs[lf4-names_in4-X4-names_out4-Xt_exp4-2-False-attr_exp4]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_split_lifting_fn_transform[lf0-names_in0-X0-names_out0-Xt_exp0-2-False-attr_exp0]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_split_lifting_fn_transform[lf1-names_in1-X1-names_out1-Xt_exp1-2-True-attr_exp1]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_split_lifting_fn_transform[lf2-names_in2-X2-names_out2-Xt_exp2-2-False-attr_exp2]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_split_lifting_fn_transform[lf3-names_in3-X3-names_out3-Xt_exp3-2-False-attr_exp3]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_split_lifting_fn_transform[lf4-names_in4-X4-names_out4-Xt_exp4-2-False-attr_exp4]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_split_lifting_fn_inverse_transform[lf0-names_in0-X0-names_out0-Xt_exp0-2-False-attr_exp0]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_split_lifting_fn_inverse_transform[lf1-names_in1-X1-names_out1-Xt_exp1-2-True-attr_exp1]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_split_lifting_fn_inverse_transform[lf2-names_in2-X2-names_out2-Xt_exp2-2-False-attr_exp2]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_split_lifting_fn_inverse_transform[lf3-names_in3-X3-names_out3-Xt_exp3-2-False-attr_exp3]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_split_lifting_fn_inverse_transform[lf4-names_in4-X4-names_out4-Xt_exp4-2-False-attr_exp4]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_feature_names_in[lf0-names_in0-X0-names_out0-Xt_exp0-2-False-attr_exp0]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_feature_names_in[lf1-names_in1-X1-names_out1-Xt_exp1-2-True-attr_exp1]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_feature_names_in[lf2-names_in2-X2-names_out2-Xt_exp2-2-False-attr_exp2]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_feature_names_in[lf3-names_in3-X3-names_out3-Xt_exp3-2-False-attr_exp3]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_feature_names_in[lf4-names_in4-X4-names_out4-Xt_exp4-2-False-attr_exp4]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_feature_names_out[lf0-names_in0-X0-names_out0-Xt_exp0-2-False-attr_exp0]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_feature_names_out[lf1-names_in1-X1-names_out1-Xt_exp1-2-True-attr_exp1]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_feature_names_out[lf2-names_in2-X2-names_out2-Xt_exp2-2-False-attr_exp2]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_feature_names_out[lf3-names_in3-X3-names_out3-Xt_exp3-2-False-attr_exp3]",
"tests/test_koopman_pipeline.py::TestSplitPipeline::test_feature_names_out[lf4-names_in4-X4-names_out4-Xt_exp4-2-False-attr_exp4]",
"tests/test_koopman_pipeline.py::TestLiftRetract::test_lift_retract_ff[lf0]",
"tests/test_koopman_pipeline.py::TestLiftRetract::test_lift_retract_ff[lf1]",
"tests/test_koopman_pipeline.py::TestLiftRetract::test_lift_retract_tt[lf0]",
"tests/test_koopman_pipeline.py::TestLiftRetract::test_lift_retract_tt[lf1]",
"tests/test_koopman_pipeline.py::TestLiftRetract::test_lift_retract_ft[lf0]",
"tests/test_koopman_pipeline.py::TestLiftRetract::test_lift_retract_ft[lf1]",
"tests/test_koopman_pipeline.py::TestLiftRetract::test_lift_retract_tf[lf0]",
"tests/test_koopman_pipeline.py::TestLiftRetract::test_lift_retract_tf[lf1]",
"tests/test_koopman_pipeline.py::TestFeatureNames::test_valid_names",
"tests/test_koopman_pipeline.py::TestFeatureNames::test_invalid_names",
"tests/test_koopman_pipeline.py::TestFeatureNames::test_numerical_names",
"tests/test_koopman_pipeline.py::TestFeatureNames::test_different_fit_transform[X_fit0-X_transform0]",
"tests/test_koopman_pipeline.py::TestFeatureNames::test_different_fit_transform[X_fit1-X_transform1]",
"tests/test_koopman_pipeline.py::TestFeatureNames::test_different_fit_transform[X_fit2-X_transform2]",
"tests/test_koopman_pipeline.py::TestSplitStateInputEpisodes::test_X_initial_onearg[kp0-X0-1-True]",
"tests/test_koopman_pipeline.py::TestSplitStateInputEpisodes::test_U_onearg[kp0-X0-1-True]",
"tests/test_koopman_pipeline.py::TestSplitStateInputEpisodes::test_X_initial_twoarg[kp0-X0-1-True]",
"tests/test_koopman_pipeline.py::TestSplitStateInputEpisodes::test_U_twoarg[kp0-X0-1-True]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_no_attributes_set_in_init]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_estimators_dtypes]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_fit_score_takes_y]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_estimators_fit_returns_self]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_estimators_fit_returns_self(readonly_memmap=True)]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_complex_data]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_dtype_object]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_estimators_empty_data_messages]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_pipeline_consistency]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_estimators_nan_inf]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_estimators_overwrite_params]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_estimator_sparse_array]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_estimator_sparse_matrix]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_estimators_pickle]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_estimators_pickle(readonly_memmap=True)]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_estimator_get_tags_default_keys]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_transformer_data_not_an_array]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_transformer_general]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_transformer_preserve_dtypes]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_transformer_general(readonly_memmap=True)]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_transformers_unfitted]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_transformer_n_iter]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_parameters_default_constructible]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_methods_sample_order_invariance]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_methods_subset_invariance]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_fit2d_1sample]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_fit2d_1feature]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_get_params_invariance]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_set_params]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_dict_unchanged]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_dont_overwrite_parameters]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_fit_idempotent]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_fit_check_is_fitted]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_n_features_in]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_fit1d]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(regressor=Edmd())-check_fit2d_predict1d]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_no_attributes_set_in_init]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_estimators_dtypes]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_fit_score_takes_y]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_estimators_fit_returns_self]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_estimators_fit_returns_self(readonly_memmap=True)]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_complex_data]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_dtype_object]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_estimators_empty_data_messages]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_pipeline_consistency]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_estimators_nan_inf]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_estimators_overwrite_params]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_estimator_sparse_array]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_estimator_sparse_matrix]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_estimators_pickle]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_estimators_pickle(readonly_memmap=True)]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_estimator_get_tags_default_keys]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_transformer_data_not_an_array]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_transformer_general]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_transformer_preserve_dtypes]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_transformer_general(readonly_memmap=True)]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_transformers_unfitted]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_transformer_n_iter]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_parameters_default_constructible]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_methods_sample_order_invariance]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_methods_subset_invariance]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_fit2d_1sample]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_fit2d_1feature]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_get_params_invariance]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_set_params]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_dict_unchanged]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_dont_overwrite_parameters]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_fit_idempotent]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_fit_check_is_fitted]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_n_features_in]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_fit1d]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[KoopmanPipeline(lifting_functions=[('pl',PolynomialLiftingFn())],regressor=Edmd())-check_fit2d_predict1d]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_no_attributes_set_in_init]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_estimators_dtypes]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_fit_score_takes_y]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_estimators_fit_returns_self]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_estimators_fit_returns_self(readonly_memmap=True)]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_complex_data]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_dtype_object]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_estimators_empty_data_messages]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_pipeline_consistency]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_estimators_nan_inf]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_estimators_overwrite_params]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_estimator_sparse_array]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_estimator_sparse_matrix]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_estimators_pickle]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_estimators_pickle(readonly_memmap=True)]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_estimator_get_tags_default_keys]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_transformer_data_not_an_array]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_transformer_general]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_transformer_preserve_dtypes]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_transformer_general(readonly_memmap=True)]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_transformers_unfitted]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_transformer_n_iter]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_parameters_default_constructible]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_methods_sample_order_invariance]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_methods_subset_invariance]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_fit2d_1sample]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_fit2d_1feature]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_get_params_invariance]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_set_params]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_dict_unchanged]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_dont_overwrite_parameters]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_fit_idempotent]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_fit_check_is_fitted]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_n_features_in]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_fit1d]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline()-check_fit2d_predict1d]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_no_attributes_set_in_init]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_estimators_dtypes]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_fit_score_takes_y]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_estimators_fit_returns_self]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_estimators_fit_returns_self(readonly_memmap=True)]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_complex_data]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_dtype_object]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_estimators_empty_data_messages]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_pipeline_consistency]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_estimators_nan_inf]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_estimators_overwrite_params]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_estimator_sparse_array]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_estimator_sparse_matrix]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_estimators_pickle]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_estimators_pickle(readonly_memmap=True)]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_estimator_get_tags_default_keys]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_transformer_data_not_an_array]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_transformer_general]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_transformer_preserve_dtypes]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_transformer_general(readonly_memmap=True)]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_transformers_unfitted]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_transformer_n_iter]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_parameters_default_constructible]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_methods_sample_order_invariance]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_methods_subset_invariance]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_fit2d_1sample]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_fit2d_1feature]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_get_params_invariance]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_set_params]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_dict_unchanged]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_dont_overwrite_parameters]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_fit_idempotent]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_fit_check_is_fitted]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_n_features_in]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_fit1d]",
"tests/test_koopman_pipeline.py::TestSkLearn::test_compatible_estimator[SplitPipeline(lifting_functions_state=[('pl',PolynomialLiftingFn())])-check_fit2d_predict1d]",
"tests/test_koopman_pipeline.py::TestDeepParams::test_get_set",
"tests/test_koopman_pipeline.py::TestDeepParams::test_nested_get_set",
"tests/test_koopman_pipeline.py::TestDeepParams::test_invalid_set"
] | 2023-01-18 16:51:15+00:00 | 1,861 |
|
dedupeio__dedupe-837 | diff --git a/dedupe/predicates.py b/dedupe/predicates.py
index 1934811..e796a6d 100644
--- a/dedupe/predicates.py
+++ b/dedupe/predicates.py
@@ -329,9 +329,14 @@ class CompoundPredicate(tuple):
def __call__(self, record, **kwargs):
predicate_keys = [predicate(record, **kwargs)
for predicate in self]
- return [u':'.join(block_key)
- for block_key
- in itertools.product(*predicate_keys)]
+ return [
+ u':'.join(
+ # must escape : to avoid confusion with : join separator
+ b.replace(u':', u'\\:') for b in block_key
+ )
+ for block_key
+ in itertools.product(*predicate_keys)
+ ]
def wholeFieldPredicate(field: Any) -> Tuple[str]:
| dedupeio/dedupe | 5092f2598f1f2bcb4f3b1f0ec85f88b95ce7454b | diff --git a/tests/test_predicates.py b/tests/test_predicates.py
index deca290..714ac31 100644
--- a/tests/test_predicates.py
+++ b/tests/test_predicates.py
@@ -80,5 +80,47 @@ class TestNumericPredicates(unittest.TestCase):
assert predicates.roundTo1(-22315) == (u'-20000',)
+class TestCompoundPredicate(unittest.TestCase):
+ def test_escapes_colon(self):
+ '''
+ Regression test for issue #836
+ '''
+ predicate_1 = predicates.SimplePredicate(
+ predicates.commonSetElementPredicate, 'col_1')
+ predicate_2 = predicates.SimplePredicate(
+ predicates.commonSetElementPredicate, 'col_2')
+ record = {
+ 'col_1': ['foo:', 'foo'],
+ 'col_2': [':bar', 'bar']
+ }
+
+ block_val = predicates.CompoundPredicate([
+ predicate_1,
+ predicate_2
+ ])(record)
+ assert len(set(block_val)) == 4
+ assert block_val == ['foo\\::\\:bar', 'foo\\::bar', 'foo:\\:bar', 'foo:bar']
+
+ def test_escapes_escaped_colon(self):
+ '''
+ Regression test for issue #836
+ '''
+ predicate_1 = predicates.SimplePredicate(
+ predicates.commonSetElementPredicate, 'col_1')
+ predicate_2 = predicates.SimplePredicate(
+ predicates.commonSetElementPredicate, 'col_2')
+ record = {
+ 'col_1': ['foo\\:', 'foo'],
+ 'col_2': ['\\:bar', 'bar']
+ }
+
+ block_val = predicates.CompoundPredicate([
+ predicate_1,
+ predicate_2
+ ])(record)
+ assert len(set(block_val)) == 4
+ assert block_val == ['foo\\\\::\\\\:bar', 'foo\\\\::bar', 'foo:\\\\:bar', 'foo:bar']
+
+
if __name__ == '__main__':
unittest.main()
| CompoundPredicate should escape `:` to avoid wrong block_keys
I've found an issue on `CompoundPredicate` caused by the lack of escaping `:` chars inside fields.
This causes two problems. First, it can generate duplicate `block_key`s for the same `id`. Here's the example:
```
(SimplePredicate: (commonSetElementPredicate, name), SimplePredicate: (commonSetElementPredicate, address))
```
```
id | name | address
---------------------------------------
1 | {'foo:', 'foo'} | {':bar', 'bar'}
```
```
id | block_key
---------------
1 | foo::bar:0
1 | foo::bar:0
```
That's probably not problematic for Dedupe 2, but that breaks [this line](https://github.com/dedupeio/dedupe-examples/blob/83dbf872674a5c3f6a209c367f46c7e6ef78e9a3/pgsql_big_dedupe_example/pgsql_big_dedupe_example.py#L246-L247) of the old `smaller_id`-based big Dedupe I still use.
Second, it can group together under the same `block_key` records that shouldn't be blocked together. See:
```
(SimplePredicate: (wholeFieldPredicate, name), SimplePredicate: (wholeFieldPredicate, address))
```
```
id | name | address
---------------------------
1 | flavio: | one street
2 | flavio | :one street
```
```
id | blocking_key
-------------------------
1 | flavio::one street:0
2 | flavio::one street:0
```
The solution is to escape `:` right before performing the `join` at this line:
https://github.com/dedupeio/dedupe/blob/5092f2598f1f2bcb4f3b1f0ec85f88b95ce7454b/dedupe/predicates.py#L332
Something like `u':'.join(block_key.replace(u':', u'\\:'))` would work, because the examples above would become this:
```
id | block_key
---------------
1 | foo\::bar:0
1 | foo:\:bar:0
```
```
id | blocking_key
-------------------------
1 | flavio\::one street:0
2 | flavio:\:one street:0
```
I'll open a PR for this. | 0.0 | [
"tests/test_predicates.py::TestCompoundPredicate::test_escapes_colon",
"tests/test_predicates.py::TestCompoundPredicate::test_escapes_escaped_colon"
] | [
"tests/test_predicates.py::TestPuncStrip::test_set",
"tests/test_predicates.py::TestPuncStrip::test_sevenchar",
"tests/test_predicates.py::TestMetaphone::test_metaphone_token",
"tests/test_predicates.py::TestWholeSet::test_full_set",
"tests/test_predicates.py::TestSetElement::test_empty_set",
"tests/test_predicates.py::TestSetElement::test_first_last",
"tests/test_predicates.py::TestSetElement::test_long_set",
"tests/test_predicates.py::TestSetElement::test_magnitude",
"tests/test_predicates.py::TestLatLongGrid::test_precise_latlong",
"tests/test_predicates.py::TestNumericPredicates::test_order_of_magnitude",
"tests/test_predicates.py::TestNumericPredicates::test_round_to_1"
] | 2020-07-13 17:49:49+00:00 | 1,862 |
|
deepchem__deepchem-2664 | diff --git a/README.md b/README.md
index fec472dbe..54d1c06d7 100644
--- a/README.md
+++ b/README.md
@@ -109,6 +109,10 @@ If GPU support is required, then make sure CUDA is installed and then install th
2. pytorch - https://pytorch.org/get-started/locally/#start-locally
3. jax - https://github.com/google/jax#pip-installation-gpu-cuda
+In `zsh` square brackets are used for globbing/pattern matching. This means you
+need to escape the square brackets in the above installation. You can do so
+by including the dependencies in quotes like `pip install --pre 'deepchem[jax]'`
+
### Docker
If you want to install deepchem using a docker, you can pull two kinds of images.
diff --git a/deepchem/data/datasets.py b/deepchem/data/datasets.py
index 1abdde3e7..27427927a 100644
--- a/deepchem/data/datasets.py
+++ b/deepchem/data/datasets.py
@@ -1500,10 +1500,10 @@ class DiskDataset(Dataset):
"""Gets size of shards on disk."""
if not len(self.metadata_df):
raise ValueError("No data in dataset.")
- sample_y = load_from_disk(
+ sample_ids = load_from_disk(
os.path.join(self.data_dir,
- next(self.metadata_df.iterrows())[1]['y']))
- return len(sample_y)
+ next(self.metadata_df.iterrows())[1]['ids']))
+ return len(sample_ids)
def _get_metadata_filename(self) -> Tuple[str, str]:
"""Get standard location for metadata file."""
@@ -2369,11 +2369,11 @@ class DiskDataset(Dataset):
if y is not None:
y_sel = y[shard_inds]
else:
- y_sel = None
+ y_sel = np.array([])
if w is not None:
w_sel = w[shard_inds]
else:
- w_sel = None
+ w_sel = np.array([])
ids_sel = ids[shard_inds]
Xs.append(X_sel)
ys.append(y_sel)
@@ -2399,9 +2399,16 @@ class DiskDataset(Dataset):
np.where(sorted_indices == orig_index)[0][0]
for orig_index in select_shard_indices
])
- X, y, w, ids = X[reverted_indices], y[reverted_indices], w[
- reverted_indices], ids[reverted_indices]
- yield (X, y, w, ids)
+ if y.size == 0:
+ tup_y = y
+ else:
+ tup_y = y[reverted_indices]
+ if w.size == 0:
+ tup_w = w
+ else:
+ tup_w = w[reverted_indices]
+ X, ids = X[reverted_indices], ids[reverted_indices]
+ yield (X, tup_y, tup_w, ids)
start = end
select_shard_num += 1
diff --git a/docs/source/get_started/installation.rst b/docs/source/get_started/installation.rst
index 198d1020c..1c9a0cd68 100644
--- a/docs/source/get_started/installation.rst
+++ b/docs/source/get_started/installation.rst
@@ -66,6 +66,9 @@ If GPU support is required, then make sure CUDA is installed and then install th
2. pytorch - https://pytorch.org/get-started/locally/#start-locally
3. jax - https://github.com/google/jax#pip-installation-gpu-cuda
+In :code:`zsh` square brackets are used for globbing/pattern matching. This means
+you need to escape the square brackets in the above installation. You can do so by
+including the dependencies in quotes like :code:`pip install --pre 'deepchem[jax]'`
Google Colab
------------
| deepchem/deepchem | 22a8fbd7350540d6e0410223c0c75d744c241495 | diff --git a/deepchem/data/tests/reaction_smiles.csv b/deepchem/data/tests/reaction_smiles.csv
new file mode 100644
index 000000000..ce90ba563
--- /dev/null
+++ b/deepchem/data/tests/reaction_smiles.csv
@@ -0,0 +1,5 @@
+reactions
+CCS(=O)(=O)Cl.OCCBr>CCN(CC)CC.CCOCC>CCS(=O)(=O)OCCBr
+CC(C)CS(=O)(=O)Cl.OCCCl>CCN(CC)CC.CCOCC>CC(C)CS(=O)(=O)OCCCl
+O=[N+]([O-])c1cccc2cnc(Cl)cc12>CC(=O)O.O.[Fe].[Na+].[OH-]>Nc1cccc2cnc(Cl)cc12
+Cc1cc2c([N+](=O)[O-])cccc2c[n+]1[O-].O=P(Cl)(Cl)Cl>>Cc1cc2c([N+](=O)[O-])cccc2c(Cl)n1
\ No newline at end of file
diff --git a/deepchem/data/tests/test_shape.py b/deepchem/data/tests/test_shape.py
index 346c7da31..aca120dbe 100644
--- a/deepchem/data/tests/test_shape.py
+++ b/deepchem/data/tests/test_shape.py
@@ -106,3 +106,29 @@ def test_disk_dataset_get_legacy_shape_multishard():
assert y_shape == (num_datapoints, num_tasks)
assert w_shape == (num_datapoints, num_tasks)
assert ids_shape == (num_datapoints,)
+
+
+def test_get_shard_size():
+ """
+ Test that using ids for getting the shard size does not break the method.
+ The issue arises when attempting to load a dataset that does not have a labels
+ column. The create_dataset method of the DataLoader class sets the y to None
+ in this case, which causes the existing implementation of the get_shard_size()
+ method to fail, as it relies on the dataset having a not None y column. This
+ consequently breaks all methods depending on this, like the splitters for
+ example.
+
+ Note
+ ----
+ DiskDatasets without labels cannot be resharded!
+ """
+
+ current_dir = os.path.dirname(os.path.abspath(__file__))
+ file_path = os.path.join(current_dir, "reaction_smiles.csv")
+
+ featurizer = dc.feat.DummyFeaturizer()
+ loader = dc.data.CSVLoader(
+ tasks=[], feature_field="reactions", featurizer=featurizer)
+
+ dataset = loader.create_dataset(file_path)
+ assert dataset.get_shard_size() == 4
| Clarification for installation issues
I was working with a new source install for deepchem following the new lightweight guide and tried
```
pip install -e .[jax]
```
And got the following error
```
zsh: no matches found: [jax]
```
I googled this and found https://stackoverflow.com/questions/30539798/zsh-no-matches-found-requestssecurity. The basic issue is we need to do something like
```
noglob pip install -e .[jax]
```
We should modify the install directions to clarify. | 0.0 | [
"deepchem/data/tests/test_shape.py::test_get_shard_size"
] | [
"deepchem/data/tests/test_shape.py::test_numpy_dataset_get_shape",
"deepchem/data/tests/test_shape.py::test_disk_dataset_get_shape_single_shard",
"deepchem/data/tests/test_shape.py::test_disk_dataset_get_shape_multishard",
"deepchem/data/tests/test_shape.py::test_disk_dataset_get_legacy_shape_single_shard",
"deepchem/data/tests/test_shape.py::test_disk_dataset_get_legacy_shape_multishard"
] | 2021-08-18 09:54:21+00:00 | 1,863 |
|
deepchem__deepchem-2741 | diff --git a/deepchem/dock/pose_generation.py b/deepchem/dock/pose_generation.py
index 97af3f929..59af72747 100644
--- a/deepchem/dock/pose_generation.py
+++ b/deepchem/dock/pose_generation.py
@@ -5,10 +5,8 @@ import platform
import logging
import os
import tempfile
-import tarfile
import numpy as np
-from subprocess import call, Popen, PIPE
-from subprocess import check_output
+from subprocess import Popen, PIPE
from typing import List, Optional, Tuple, Union
from deepchem.dock.binding_pocket import BindingPocketFinder
@@ -244,87 +242,50 @@ class VinaPoseGenerator(PoseGenerator):
"""Uses Autodock Vina to generate binding poses.
This class uses Autodock Vina to make make predictions of
- binding poses. It downloads the Autodock Vina executable for
- your system to your specified DEEPCHEM_DATA_DIR (remember this
- is an environment variable you set) and invokes the executable
- to perform pose generation for you.
+ binding poses.
+
+ Example
+ -------
+ >> import deepchem as dc
+ >> vpg = dc.dock.VinaPoseGenerator(pocket_finder=None)
+ >> protein_file = '1jld_protein.pdb'
+ >> ligand_file = '1jld_ligand.sdf'
+ >> poses, scores = vpg.generate_poses(
+ .. (protein_file, ligand_file),
+ .. exhaustiveness=1,
+ .. num_modes=1,
+ .. out_dir=tmp,
+ .. generate_scores=True)
Note
----
- This class requires RDKit to be installed.
+ This class requires RDKit and vina to be installed.
"""
- def __init__(self,
- sixty_four_bits: bool = True,
- pocket_finder: Optional[BindingPocketFinder] = None):
+ def __init__(self, pocket_finder: Optional[BindingPocketFinder] = None):
"""Initializes Vina Pose Generator
Parameters
----------
- sixty_four_bits: bool, optional (default True)
- Specifies whether this is a 64-bit machine. Needed to download
- the correct executable.
pocket_finder: BindingPocketFinder, optional (default None)
If specified should be an instance of
`dc.dock.BindingPocketFinder`.
"""
- data_dir = get_data_dir()
- if platform.system() == 'Linux':
- url = "http://vina.scripps.edu/download/autodock_vina_1_1_2_linux_x86.tgz"
- filename = "autodock_vina_1_1_2_linux_x86.tgz"
- dirname = "autodock_vina_1_1_2_linux_x86"
- self.vina_dir = os.path.join(data_dir, dirname)
- self.vina_cmd = os.path.join(self.vina_dir, "bin/vina")
- elif platform.system() == 'Darwin':
- if sixty_four_bits:
- url = "http://vina.scripps.edu/download/autodock_vina_1_1_2_mac_64bit.tar.gz"
- filename = "autodock_vina_1_1_2_mac_64bit.tar.gz"
- dirname = "autodock_vina_1_1_2_mac_catalina_64bit"
- else:
- url = "http://vina.scripps.edu/download/autodock_vina_1_1_2_mac.tgz"
- filename = "autodock_vina_1_1_2_mac.tgz"
- dirname = "autodock_vina_1_1_2_mac"
- self.vina_dir = os.path.join(data_dir, dirname)
- self.vina_cmd = os.path.join(self.vina_dir, "bin/vina")
- elif platform.system() == 'Windows':
- url = "http://vina.scripps.edu/download/autodock_vina_1_1_2_win32.msi"
- filename = "autodock_vina_1_1_2_win32.msi"
- self.vina_dir = "\\Program Files (x86)\\The Scripps Research Institute\\Vina"
- self.vina_cmd = os.path.join(self.vina_dir, "vina.exe")
- else:
- raise ValueError(
- "Unknown operating system. Try using a cloud platform to run this code instead."
- )
self.pocket_finder = pocket_finder
- if not os.path.exists(self.vina_dir):
- logger.info("Vina not available. Downloading")
- download_url(url, data_dir)
- downloaded_file = os.path.join(data_dir, filename)
- logger.info("Downloaded Vina. Extracting")
- if platform.system() == 'Windows':
- msi_cmd = "msiexec /i %s" % downloaded_file
- check_output(msi_cmd.split())
- else:
- with tarfile.open(downloaded_file) as tar:
- tar.extractall(data_dir)
- logger.info("Cleanup: removing downloaded vina tar.gz")
- os.remove(downloaded_file)
- def generate_poses(self,
- molecular_complex: Tuple[str, str],
- centroid: Optional[np.ndarray] = None,
- box_dims: Optional[np.ndarray] = None,
- exhaustiveness: int = 10,
- num_modes: int = 9,
- num_pockets: Optional[int] = None,
- out_dir: Optional[str] = None,
- generate_scores: Optional[bool] = False
- ) -> Union[Tuple[DOCKED_POSES, List[float]], DOCKED_POSES]:
+ def generate_poses(
+ self,
+ molecular_complex: Tuple[str, str],
+ centroid: Optional[np.ndarray] = None,
+ box_dims: Optional[np.ndarray] = None,
+ exhaustiveness: int = 10,
+ num_modes: int = 9,
+ num_pockets: Optional[int] = None,
+ out_dir: Optional[str] = None,
+ generate_scores: Optional[bool] = False,
+ **kwargs) -> Union[Tuple[DOCKED_POSES, List[float]], DOCKED_POSES]:
"""Generates the docked complex and outputs files for docked complex.
- TODO: How can this work on Windows? We need to install a .msi file and
- invoke it correctly from Python for this to work.
-
Parameters
----------
molecular_complexes: Tuple[str, str]
@@ -337,8 +298,9 @@ class VinaPoseGenerator(PoseGenerator):
A numpy array of shape `(3,)` holding the size of the box to dock. If not
specified is set to size of molecular complex plus 5 angstroms.
exhaustiveness: int, optional (default 10)
- Tells Autodock Vina how exhaustive it should be with pose
- generation.
+ Tells Autodock Vina how exhaustive it should be with pose generation. A
+ higher value of exhaustiveness implies more computation effort for the
+ docking experiment.
num_modes: int, optional (default 9)
Tells Autodock Vina how many binding modes it should generate at
each invocation.
@@ -352,6 +314,9 @@ class VinaPoseGenerator(PoseGenerator):
If `True`, the pose generator will return scores for complexes.
This is used typically when invoking external docking programs
that compute scores.
+ kwargs:
+ Any args supported by VINA as documented in
+ https://autodock-vina.readthedocs.io/en/latest/vina.html
Returns
-------
@@ -365,6 +330,28 @@ class VinaPoseGenerator(PoseGenerator):
------
`ValueError` if `num_pockets` is set but `self.pocket_finder is None`.
"""
+ if "cpu" in kwargs:
+ cpu = kwargs["cpu"]
+ else:
+ cpu = 0
+ if "min_rmsd" in kwargs:
+ min_rmsd = kwargs["min_rmsd"]
+ else:
+ min_rmsd = 1.0
+ if "max_evals" in kwargs:
+ max_evals = kwargs["max_evals"]
+ else:
+ max_evals = 0
+ if "energy_range" in kwargs:
+ energy_range = kwargs["energy_range"]
+ else:
+ energy_range = 3.0
+
+ try:
+ from vina import Vina
+ except ModuleNotFoundError:
+ raise ImportError("This function requires vina to be installed")
+
if out_dir is None:
out_dir = tempfile.mkdtemp()
@@ -435,6 +422,7 @@ class VinaPoseGenerator(PoseGenerator):
docked_complexes = []
all_scores = []
+ vpg = Vina(sf_name='vina', cpu=cpu, seed=0, no_refine=False, verbosity=1)
for i, (protein_centroid, box_dims) in enumerate(
zip(centroids, dimensions)):
logger.info("Docking in pocket %d/%d" % (i + 1, len(centroids)))
@@ -451,23 +439,25 @@ class VinaPoseGenerator(PoseGenerator):
num_modes=num_modes,
exhaustiveness=exhaustiveness)
- # Define locations of log and output files
- log_file = os.path.join(out_dir, "%s_log.txt" % ligand_name)
+ # Define locations of output files
out_pdbqt = os.path.join(out_dir, "%s_docked.pdbqt" % ligand_name)
logger.info("About to call Vina")
- if platform.system() == 'Windows':
- args = [
- self.vina_cmd, "--config", conf_file, "--log", log_file, "--out",
- out_pdbqt
- ]
- else:
- # I'm not sure why specifying the args as a list fails on other platforms,
- # but for some reason it only works if I pass it as a string.
- # FIXME: Incompatible types in assignment
- args = "%s --config %s --log %s --out %s" % ( # type: ignore
- self.vina_cmd, conf_file, log_file, out_pdbqt)
- # FIXME: We should use `subprocess.run` instead of `call`
- call(args, shell=True)
+
+ vpg.set_receptor(protein_pdbqt)
+ vpg.set_ligand_from_file(ligand_pdbqt)
+
+ vpg.compute_vina_maps(center=protein_centroid, box_size=box_dims)
+ vpg.dock(
+ exhaustiveness=exhaustiveness,
+ n_poses=num_modes,
+ min_rmsd=min_rmsd,
+ max_evals=max_evals)
+ vpg.write_poses(
+ out_pdbqt,
+ n_poses=num_modes,
+ energy_range=energy_range,
+ overwrite=True)
+
ligands, scores = load_docked_ligands(out_pdbqt)
docked_complexes += [(protein_mol[1], ligand) for ligand in ligands]
all_scores += scores
diff --git a/env.common.yml b/env.common.yml
index 46417eeb3..c94d13b49 100644
--- a/env.common.yml
+++ b/env.common.yml
@@ -30,3 +30,4 @@ dependencies:
- transformers==4.6.*
- xgboost==1.*
- git+https://github.com/samoturk/mol2vec
+ - vina
diff --git a/requirements/env_common.yml b/requirements/env_common.yml
index b1c6868df..45f78aac5 100644
--- a/requirements/env_common.yml
+++ b/requirements/env_common.yml
@@ -1,6 +1,5 @@
name: deepchem
channels:
- - omnia
- conda-forge
- defaults
dependencies:
@@ -24,3 +23,4 @@ dependencies:
- transformers==4.6.*
- xgboost==1.*
- git+https://github.com/samoturk/mol2vec
+ - vina
| deepchem/deepchem | 7740d91e185b411c65b06c5d3aea647b0b6dee66 | diff --git a/deepchem/dock/tests/test_pose_generation.py b/deepchem/dock/tests/test_pose_generation.py
index 79b21d667..66387f367 100644
--- a/deepchem/dock/tests/test_pose_generation.py
+++ b/deepchem/dock/tests/test_pose_generation.py
@@ -19,7 +19,6 @@ class TestPoseGeneration(unittest.TestCase):
Does sanity checks on pose generation.
"""
- @unittest.skipIf(IS_WINDOWS, 'Skip the test on Windows')
def test_vina_initialization(self):
"""Test that VinaPoseGenerator can be initialized."""
dc.dock.VinaPoseGenerator()
@@ -29,7 +28,6 @@ class TestPoseGeneration(unittest.TestCase):
"""Test that GninaPoseGenerator can be initialized."""
dc.dock.GninaPoseGenerator()
- @unittest.skipIf(IS_WINDOWS, 'Skip the test on Windows')
def test_pocket_vina_initialization(self):
"""Test that VinaPoseGenerator can be initialized."""
pocket_finder = dc.dock.ConvexHullPocketFinder()
| Error in intializing VinaPoseGenerator
## 🐛 Bug
## To Reproduce
Steps to reproduce the behavior:
1. Install deepchem latest version in colab `pip install --pre deepchem`
2. Run the following code sample:
```
import deepchem as dc
vpg = dc.dock.VinaPoseGenerator()
```
The above raises a`SSLCertVerificationError` but it is expected to initialize `vpg`.
There are two points regarding this:
- The download location of autodock vina executables was updated from the side of Vina team. Previously, DeepChem referenced them from [here](http://vina.scripps.edu/download/autodock_vina_1_1_2_mac_64bit.tar.gz) but now it is referenced from [here](https://vina.scripps.edu/wp-content/uploads/sites/55/2020/12/autodock_vina_1_1_2_mac_64bit.tar.gz)
- There is some problem in the server side which is causing `SSLCertVerificationError`, probably an expired SSL Certificate.
Some possible fixes:
- Move the executables to DeepChem's infrastructure
- Upgrade to the latest version of Autodock vina - 1.2.2 | 0.0 | [
"deepchem/dock/tests/test_pose_generation.py::TestPoseGeneration::test_gnina_initialization",
"deepchem/dock/tests/test_pose_generation.py::TestPoseGeneration::test_pocket_vina_initialization",
"deepchem/dock/tests/test_pose_generation.py::TestPoseGeneration::test_vina_initialization"
] | [] | 2021-11-04 16:57:48+00:00 | 1,864 |
|
deepchem__deepchem-2802 | diff --git a/deepchem/data/datasets.py b/deepchem/data/datasets.py
index f80a399ca..dd5ba637f 100644
--- a/deepchem/data/datasets.py
+++ b/deepchem/data/datasets.py
@@ -2286,6 +2286,7 @@ class DiskDataset(Dataset):
basename = "shard-%d" % shard_num
DiskDataset.write_data_to_disk(self.data_dir, basename, X, y, w, ids)
self._cached_shards = None
+ self.legacy_metadata = True
def select(self,
indices: Sequence[int],
| deepchem/deepchem | 9ef6c58eefd4e5f9bee40743ca14defa6f764f80 | diff --git a/deepchem/data/tests/test_setshard.py b/deepchem/data/tests/test_setshard.py
new file mode 100644
index 000000000..0fcf4b03e
--- /dev/null
+++ b/deepchem/data/tests/test_setshard.py
@@ -0,0 +1,21 @@
+import deepchem as dc
+import numpy as np
+
+
+def test_setshard_with_X_y():
+ """Test setsharding on a simple example"""
+ X = np.random.rand(10, 3)
+ y = np.random.rand(10,)
+ dataset = dc.data.DiskDataset.from_numpy(X, y)
+ X_shape, y_shape, _, _ = dataset.get_shape()
+ assert X_shape[0] == 10
+ assert y_shape[0] == 10
+ for i, (X, y, w, ids) in enumerate(dataset.itershards()):
+ X = X[1:]
+ y = y[1:]
+ w = w[1:]
+ ids = ids[1:]
+ dataset.set_shard(i, X, y, w, ids)
+ X_shape, y_shape, _, _ = dataset.get_shape()
+ assert X_shape[0] == 9
+ assert y_shape[0] == 9
| Bug in dataset.get_shape() when used after dataset.set_shard()
## 🐛 Bug
## To Reproduce
<!-- If you have a code sample, error messages, stack traces, please provide it here as well. -->
## Expected behavior
```
import deepchem as dc
import numpy as np
X = np.random.randn(10, 3)
y = np.random.randn(10)
dataset = dc.data.DiskDataset.from_numpy(X, y)
dataset.get_shape() # Output: ((10, 3), (10,), (10,), (10,))
for i, (X, y, w, ids) in enumerate(dataset.itershards()):
X = X[1:]
y = y[1:]
w = w[1:]
ids = ids[1:]
dataset.set_shard(i, X, y, w, ids)
dataset.get_shape()
# Output: ((10, 3), (10,), (10,), (10,))
# Expected output: ((9, 3), (9, ), (9, ), (9,))
```
Edit 1:
This prints correctly:
```
for i, (X, y, w, ids) in enumerate(dataset.itershards()):
print (X.shape) # Output: (9, 3)
```
## Environment
* DeepChem version: 2.6.0.dev
## Additional context
The fix is probably a simple one.
| 0.0 | [
"deepchem/data/tests/test_setshard.py::test_setshard_with_X_y"
] | [] | 2022-01-01 16:38:29+00:00 | 1,865 |
|
deepchem__deepchem-2979 | diff --git a/deepchem/feat/graph_data.py b/deepchem/feat/graph_data.py
index b9d037e95..000b4d750 100644
--- a/deepchem/feat/graph_data.py
+++ b/deepchem/feat/graph_data.py
@@ -69,7 +69,9 @@ class GraphData:
raise ValueError('edge_index.dtype must contains integers.')
elif edge_index.shape[0] != 2:
raise ValueError('The shape of edge_index is [2, num_edges].')
- elif np.max(edge_index) >= len(node_features):
+
+ # np.max() method works only for a non-empty array, so size of the array should be non-zero
+ elif (edge_index.size != 0) and (np.max(edge_index) >= len(node_features)):
raise ValueError('edge_index contains the invalid node number.')
if edge_features is not None:
| deepchem/deepchem | ba780d3d21013b2924fd3301913ff530939a2ccb | diff --git a/deepchem/feat/tests/test_graph_data.py b/deepchem/feat/tests/test_graph_data.py
index 4f28809c0..1d13cc76b 100644
--- a/deepchem/feat/tests/test_graph_data.py
+++ b/deepchem/feat/tests/test_graph_data.py
@@ -20,12 +20,11 @@ class TestGraph(unittest.TestCase):
# z is kwargs
z = np.random.random(5)
- graph = GraphData(
- node_features=node_features,
- edge_index=edge_index,
- edge_features=edge_features,
- node_pos_features=node_pos_features,
- z=z)
+ graph = GraphData(node_features=node_features,
+ edge_index=edge_index,
+ edge_features=edge_features,
+ node_pos_features=node_pos_features,
+ z=z)
assert graph.num_nodes == num_nodes
assert graph.num_node_features == num_node_features
@@ -97,13 +96,12 @@ class TestGraph(unittest.TestCase):
]
graph_list = [
- GraphData(
- node_features=np.random.random_sample((num_nodes_list[i],
- num_node_features)),
- edge_index=edge_index_list[i],
- edge_features=np.random.random_sample((num_edge_list[i],
- num_edge_features)),
- node_pos_features=None) for i in range(len(num_edge_list))
+ GraphData(node_features=np.random.random_sample(
+ (num_nodes_list[i], num_node_features)),
+ edge_index=edge_index_list[i],
+ edge_features=np.random.random_sample(
+ (num_edge_list[i], num_edge_features)),
+ node_pos_features=None) for i in range(len(num_edge_list))
]
batch = BatchGraphData(graph_list)
@@ -112,3 +110,22 @@ class TestGraph(unittest.TestCase):
assert batch.num_edges == sum(num_edge_list)
assert batch.num_edge_features == num_edge_features
assert batch.graph_index.shape == (sum(num_nodes_list),)
+
+ @pytest.mark.torch
+ def test_graph_data_single_atom_mol(self):
+ """
+ Test for graph data when no edges in the graph (example: single atom mol)
+ """
+ num_nodes, num_node_features = 1, 32
+ num_edges = 0
+ node_features = np.random.random_sample((num_nodes, num_node_features))
+ edge_index = np.empty((2, 0), dtype=int)
+
+ graph = GraphData(node_features=node_features, edge_index=edge_index)
+
+ assert graph.num_nodes == num_nodes
+ assert graph.num_node_features == num_node_features
+ assert graph.num_edges == num_edges
+ assert str(
+ graph
+ ) == 'GraphData(node_features=[1, 32], edge_index=[2, 0], edge_features=None)'
| GraphData class not working for single atom mols
## 🐛 Bug
`GraphData` class checks if the `edge_index` contains the invalid node number, using the condition `np.max(edge_index) >= len(node_features)`. In case of single atom mols, `edge_index` in an empty array of `shape = (2, 0)` and np.max() method raises error for empty array : `Error: zero-size array to reduction operation maximum which has no identity`.
## To Reproduce
Steps to reproduce the behaviour:
code to check the error:
```
num_nodes, num_node_features = 1, 32
num_edges = 0
node_features = np.random.random_sample((num_nodes, num_node_features))
edge_index = np.empty((2, 0), dtype=int)
graph = GraphData(node_features=node_features, edge_index=edge_index)
assert graph.num_nodes == num_nodes
assert graph.num_edges == num_edges
assert str(graph) == 'GraphData(node_features=[1, 32], edge_index=[2, 0], edge_features=None)'
```
error message:
`py::TestGraph::test_graph_data_single_atom_mol Failed with Error: zero-size array to reduction operation maximum which has no identity`
## Expected behaviour
No error should have raised and `str(graph)` should return `'GraphData(node_features=[1, 32], edge_index=[2, 0], edge_features=None)'`
## Environment
* OS: Ubuntu 20.04.3 LTS
* Python version: Python 3.8.8
* DeepChem version: 2.6.1
* RDKit version (optional):
* TensorFlow version (optional):
* PyTorch version (optional):
* Any other relevant information: NA
## Additional context
This is how Deepchem generally computes edge index:
```
# construct edge (bond) index
src, dest = [], []
for bond in datapoint.GetBonds():
# add edge list considering a directed graph
start, end = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
src += [start, end]
dest += [end, start]
``` | 0.0 | [
"deepchem/feat/tests/test_graph_data.py::TestGraph::test_graph_data_single_atom_mol"
] | [
"deepchem/feat/tests/test_graph_data.py::TestGraph::test_batch_graph_data",
"deepchem/feat/tests/test_graph_data.py::TestGraph::test_invalid_graph_data"
] | 2022-07-06 08:15:57+00:00 | 1,866 |
|
deepset-ai__haystack-6594 | diff --git a/haystack/components/writers/document_writer.py b/haystack/components/writers/document_writer.py
index f4debb17..170ee24d 100644
--- a/haystack/components/writers/document_writer.py
+++ b/haystack/components/writers/document_writer.py
@@ -15,11 +15,15 @@ class DocumentWriter:
A component for writing documents to a DocumentStore.
"""
- def __init__(self, document_store: DocumentStore, policy: DuplicatePolicy = DuplicatePolicy.FAIL):
+ def __init__(self, document_store: DocumentStore, policy: DuplicatePolicy = DuplicatePolicy.NONE):
"""
Create a DocumentWriter component.
- :param policy: The policy to use when encountering duplicate documents (default is DuplicatePolicy.FAIL).
+ :param policy: the policy to apply when a Document with the same id already exists in the DocumentStore.
+ - `DuplicatePolicy.NONE`: Default policy, behaviour depends on the Document Store.
+ - `DuplicatePolicy.SKIP`: If a Document with the same id already exists, it is skipped and not written.
+ - `DuplicatePolicy.OVERWRITE`: If a Document with the same id already exists, it is overwritten.
+ - `DuplicatePolicy.FAIL`: If a Document with the same id already exists, an error is raised.
"""
self.document_store = document_store
self.policy = policy
diff --git a/haystack/core/component/connection.py b/haystack/core/component/connection.py
index c84c9b5b..c3118aa4 100644
--- a/haystack/core/component/connection.py
+++ b/haystack/core/component/connection.py
@@ -116,21 +116,24 @@ class Connection:
name_matches = [
(out_sock, in_sock) for out_sock, in_sock in possible_connections if in_sock.name == out_sock.name
]
- if len(name_matches) != 1:
- # TODO allow for multiple connections at once if there is no ambiguity?
- # TODO give priority to sockets that have no default values?
- connections_status_str = _connections_status(
- sender_node=sender_node,
- sender_sockets=sender_sockets,
- receiver_node=receiver_node,
- receiver_sockets=receiver_sockets,
- )
- raise PipelineConnectError(
- f"Cannot connect '{sender_node}' with '{receiver_node}': more than one connection is possible "
- "between these components. Please specify the connection name, like: "
- f"pipeline.connect('{sender_node}.{possible_connections[0][0].name}', "
- f"'{receiver_node}.{possible_connections[0][1].name}').\n{connections_status_str}"
- )
+ if len(name_matches) == 1:
+ # Sockets match by type and name, let's use this
+ return Connection(sender_node, name_matches[0][0], receiver_node, name_matches[0][1])
+
+ # TODO allow for multiple connections at once if there is no ambiguity?
+ # TODO give priority to sockets that have no default values?
+ connections_status_str = _connections_status(
+ sender_node=sender_node,
+ sender_sockets=sender_sockets,
+ receiver_node=receiver_node,
+ receiver_sockets=receiver_sockets,
+ )
+ raise PipelineConnectError(
+ f"Cannot connect '{sender_node}' with '{receiver_node}': more than one connection is possible "
+ "between these components. Please specify the connection name, like: "
+ f"pipeline.connect('{sender_node}.{possible_connections[0][0].name}', "
+ f"'{receiver_node}.{possible_connections[0][1].name}').\n{connections_status_str}"
+ )
match = possible_connections[0]
return Connection(sender_node, match[0], receiver_node, match[1])
diff --git a/releasenotes/notes/document-writer-default-policy-693027781629fc73.yaml b/releasenotes/notes/document-writer-default-policy-693027781629fc73.yaml
new file mode 100644
index 00000000..b90629fe
--- /dev/null
+++ b/releasenotes/notes/document-writer-default-policy-693027781629fc73.yaml
@@ -0,0 +1,6 @@
+---
+enhancements:
+ - |
+ Change `DocumentWriter` default `policy` from `DuplicatePolicy.FAIL` to `DuplicatePolicy.NONE`.
+ The `DocumentStore` protocol uses the same default so that different Document Stores can choose
+ the default policy that better fit.
diff --git a/releasenotes/notes/fix-connect-with-same-name-5ce470f7f0451362.yaml b/releasenotes/notes/fix-connect-with-same-name-5ce470f7f0451362.yaml
new file mode 100644
index 00000000..529cd69a
--- /dev/null
+++ b/releasenotes/notes/fix-connect-with-same-name-5ce470f7f0451362.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Fix `Pipeline.connect()` so it connects sockets with same name if multiple sockets with compatible types are found.
| deepset-ai/haystack | f877704839dc585cf47305d97fd5011be367b907 | diff --git a/test/components/writers/test_document_writer.py b/test/components/writers/test_document_writer.py
index 9858538f..623d5ee5 100644
--- a/test/components/writers/test_document_writer.py
+++ b/test/components/writers/test_document_writer.py
@@ -16,7 +16,7 @@ class TestDocumentWriter:
"type": "haystack.components.writers.document_writer.DocumentWriter",
"init_parameters": {
"document_store": {"type": "haystack.testing.factory.MockedDocumentStore", "init_parameters": {}},
- "policy": "FAIL",
+ "policy": "NONE",
},
}
diff --git a/test/core/pipeline/test_connections.py b/test/core/pipeline/test_connections.py
index ba4d70ca..a28a038a 100644
--- a/test/core/pipeline/test_connections.py
+++ b/test/core/pipeline/test_connections.py
@@ -386,3 +386,18 @@ def test_parse_connection():
assert parse_connect_string("foobar") == ("foobar", None)
assert parse_connect_string("foo.bar") == ("foo", "bar")
assert parse_connect_string("foo.bar.baz") == ("foo", "bar.baz")
+
+
+def test_connect_with_same_socket_names():
+ SimpleComponent = factory.component_class("SimpleComponent", output_types={"documents": List})
+ ComponentWithMultipleInputs = factory.component_class(
+ "ComponentWithMultipleInputs", input_types={"question": Any, "documents": Any}
+ )
+
+ pipe = Pipeline()
+ pipe.add_component("simple", SimpleComponent())
+ pipe.add_component("multiple", ComponentWithMultipleInputs())
+
+ pipe.connect("simple", "multiple")
+
+ assert list(pipe.graph.edges) == [("simple", "multiple", "documents/documents")]
| Auto-connect between pipeline components intermittently failing
**Describe the bug**
The connections between components when using the `.connect()` method of a pipeline are non-deterministic.
We did find that *explicitly* defining the connection (as showcased in the documentation found [here](https://docs.haystack.deepset.ai/v2.0/docs/transformerssimilarityranker)) worked to resolve this issue, however it seems like it might be possible to ensure that if there *are* default connections that share a name, they be preferentially connected during graph construction.
Hopefully all this makes sense - if not, I'm happy to follow-up!
**Error message**
There is no specific error.
**Expected behavior**
The logic should not connect two components by incompatible IDs or should not default to a random selection/decision.
**Additional context**
Creating connections like this will lead to a semi-random incidence of the pipeline graph being constructed incorrectly:
```python
pipeline.connect("tgi_fetcher", "tgi_converter")
pipeline.connect("tgi_converter", "tgi_splitter")
pipeline.connect("tgi_splitter", "tgi_ranker")
pipeline.connect("tgi_ranker", "tgi_prompt_builder")
pipeline.connect("tgi_prompt_builder", "tgi_llm")
```

As you can hopefully see, the output of `ranker` (documents) is being connected to the `question` input of the `prompt_builder`. This is despite the fact that the `prompt_builder` has a `documents` input.
You can get around the error by keying `documents` in your input - and swapping any potential prompt-templates, but this seems like undesirable behaviour.
```python
pipeline.connect("tgi_fetcher", "tgi_converter")
pipeline.connect("tgi_converter", "tgi_splitter")
pipeline.connect("tgi_splitter", "tgi_ranker")
pipeline.connect("tgi_ranker.documents", "tgi_prompt_builder.documents")
pipeline.connect("tgi_prompt_builder", "tgi_llm")
```
**To Reproduce**
```python
prompt_template = """
According to these documents:
{% for doc in documents %}
{{ doc.content }}
{% endfor %}
Answer the given question: {{question}}
Answer:
"""
prompt_builder = PromptBuilder(template=prompt_template)
fetcher = LinkContentFetcher()
converter = HTMLToDocument()
splitter = DocumentSplitter(split_length=100, split_overlap=5)
llm = GPTGenerator(api_key = openai_api_key, model_name = "gpt-4")
ranker = TransformersSimilarityRanker()
pipeline = Pipeline()
pipeline.add_component(name="fetcher", instance=fetcher)
pipeline.add_component(name="converter", instance=converter)
pipeline.add_component(name="splitter", instance=splitter)
pipeline.add_component(name="ranker", instance=ranker)
pipeline.add_component(name="prompt_builder", instance=prompt_builder)
pipeline.add_component(name="llm", instance=llm)
pipeline.connect("fetcher", "converter")
pipeline.connect("converter", "splitter")
pipeline.connect("splitter", "ranker")
pipeline.connect("ranker", "prompt_builder")
pipeline.connect("prompt_builder", "llm")
question = "What is our favorite animal?"
result = pipeline.run({"prompt_builder": {"question": question},
"ranker": {"query": question},
"fetcher": {"urls": ["https://haystack.deepset.ai/advent-of-haystack/day-1#challenge"]}})
print(result['llm']['replies'][0])
```
**FAQ Check**
- [x ] Have you had a look at [our new FAQ page](https://docs.haystack.deepset.ai/docs/faq)?
**System:**
- OS: Linux
- GPU/CPU: CPU
- Haystack version (commit or version number): 2.0.0b2
- DocumentStore: N/A
- Reader: N/A
- Retriever: N/A
| 0.0 | [
"test/components/writers/test_document_writer.py::TestDocumentWriter::test_to_dict",
"test/core/pipeline/test_connections.py::test_connect_with_same_socket_names"
] | [
"[",
"test/components/writers/test_document_writer.py::TestDocumentWriter::test_to_dict_with_custom_init_parameters",
"test/components/writers/test_document_writer.py::TestDocumentWriter::test_from_dict",
"test/components/writers/test_document_writer.py::TestDocumentWriter::test_from_dict_without_docstore",
"test/components/writers/test_document_writer.py::TestDocumentWriter::test_from_dict_without_docstore_type",
"test/components/writers/test_document_writer.py::TestDocumentWriter::test_from_dict_nonexisting_docstore",
"test/components/writers/test_document_writer.py::TestDocumentWriter::test_run",
"test/components/writers/test_document_writer.py::TestDocumentWriter::test_run_skip_policy",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[same-primitives]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[receiving-primitive-is-optional]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[receiving-type-is-union-of-primitives]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[identical-unions]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[receiving-union-is-superset-of-sender]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[primitive-to-any]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[same-class]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[receiving-class-is-optional]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[class-to-any]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[subclass-to-class]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[receiving-type-is-union-of-classes]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[receiving-type-is-union-of-superclasses]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[same-lists]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[receiving-list-is-optional]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[list-of-primitive-to-list-of-any]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[list-of-same-classes]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[list-of-subclass-to-list-of-class]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[list-of-classes-to-list-of-any]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[nested-sequences-of-same-primitives]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[nested-sequences-of-primitives-to-nested-sequences-of-any]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[nested-sequences-of-same-classes]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[nested-sequences-of-subclasses-to-nested-sequences-of-classes]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[nested-sequences-of-classes-to-nested-sequences-of-any]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[same-dicts-of-primitives]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[dict-of-primitives-to-dict-of-any-keys]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[dict-of-primitives-to-dict-of-any-values]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[dict-of-primitives-to-dict-of-any-key-and-values]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[same-dicts-of-classes-values]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[dict-of-subclasses-to-dict-of-classes]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[dict-of-classes-to-dict-of-any-keys]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[dict-of-classes-to-dict-of-any-values]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[dict-of-classes-to-dict-of-any-key-and-values]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[nested-mappings-of-same-primitives]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[nested-mapping-of-primitives-to-nested-mapping-of-any-keys]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[nested-mapping-of-primitives-to-nested-mapping-of-higher-level-any-keys]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[nested-mapping-of-primitives-to-nested-mapping-of-any-values]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[nested-mapping-of-primitives-to-nested-mapping-of-any-keys-and-values]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[nested-mappings-of-same-classes]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[nested-mapping-of-subclasses-to-nested-mapping-of-classes]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[nested-mapping-of-classes-to-nested-mapping-of-any-keys]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[nested-mapping-of-classes-to-nested-mapping-of-higher-level-any-keys]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[nested-mapping-of-classes-to-nested-mapping-of-any-values]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[nested-mapping-of-classes-to-nested-mapping-of-any-keys-and-values]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[same-primitive-literal]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[same-enum-literal]",
"test/core/pipeline/test_connections.py::test_connect_compatible_types[identical-deeply-nested-complex-type]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[different-primitives]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[different-classes]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[class-to-subclass]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[any-to-primitive]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[any-to-class]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[sending-primitive-is-optional]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[sending-class-is-optional]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[sending-list-is-optional]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[sending-type-is-union]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[sending-union-is-superset-of-receiver]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[partially-overlapping-unions-with-primitives]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[partially-overlapping-unions-with-classes]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[different-lists-of-primitives]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[list-of-primitive-to-bare-list]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[list-of-primitive-to-list-object]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[different-lists-of-classes]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[lists-of-classes-to-subclasses]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[list-of-any-to-list-of-primitives]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[list-of-any-to-list-of-classes]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[nested-sequences-of-different-primitives]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[different-nested-sequences-of-same-primitives]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[nested-sequences-of-different-classes]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[nested-sequences-of-classes-to-subclasses]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[different-nested-sequences-of-same-class]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[nested-list-of-Any-to-nested-list-of-primitives]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[nested-list-of-Any-to-nested-list-of-classes]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[different-dict-of-primitive-keys]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[different-dict-of-primitive-values]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[different-dict-of-class-values]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[different-dict-of-class-to-subclass-values]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[dict-of-Any-keys-to-dict-of-primitives]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[dict-of-Any-values-to-dict-of-primitives]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[dict-of-Any-values-to-dict-of-classes]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[dict-of-Any-keys-and-values-to-dict-of-primitives]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[dict-of-Any-keys-and-values-to-dict-of-classes]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[different-nested-mappings-of-same-primitives]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[same-nested-mappings-of-different-primitive-keys]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[same-nested-mappings-of-different-higer-level-primitive-keys]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[same-nested-mappings-of-different-primitive-values]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[same-nested-mappings-of-different-class-values]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[same-nested-mappings-of-class-to-subclass-values]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[nested-mapping-of-Any-keys-to-nested-mapping-of-primitives]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[nested-mapping-of-higher-level-Any-keys-to-nested-mapping-of-primitives]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[nested-mapping-of-Any-values-to-nested-mapping-of-primitives]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[nested-mapping-of-Any-values-to-nested-mapping-of-classes]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[nested-mapping-of-Any-keys-and-values-to-nested-mapping-of-primitives]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[nested-mapping-of-Any-keys-and-values-to-nested-mapping-of-classes]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[different-literal-of-same-primitive]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[subset-literal]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[different-literal-of-same-enum]",
"test/core/pipeline/test_connections.py::test_connect_non_compatible_types[deeply-nested-complex-type-is-compatible-but-cannot-be-checked]",
"test/core/pipeline/test_connections.py::test_connect_sender_component_does_not_exist",
"test/core/pipeline/test_connections.py::test_connect_receiver_component_does_not_exist",
"test/core/pipeline/test_connections.py::test_connect_sender_socket_does_not_exist",
"test/core/pipeline/test_connections.py::test_connect_receiver_socket_does_not_exist",
"test/core/pipeline/test_connections.py::test_connect_many_outputs_to_the_same_input",
"test/core/pipeline/test_connections.py::test_connect_many_connections_possible_name_matches",
"test/core/pipeline/test_connections.py::test_connect_many_connections_possible_no_name_matches",
"test/core/pipeline/test_connections.py::test_parse_connection"
] | 2023-12-19 15:44:45+00:00 | 1,867 |
|
deepset-ai__haystack-6717 | diff --git a/haystack/core/component/component.py b/haystack/core/component/component.py
index 4a082873..7d5eb00d 100644
--- a/haystack/core/component/component.py
+++ b/haystack/core/component/component.py
@@ -139,7 +139,10 @@ class ComponentMeta(type):
instance.__canals_input__ = {}
run_signature = inspect.signature(getattr(cls, "run"))
for param in list(run_signature.parameters)[1:]: # First is 'self' and it doesn't matter.
- if run_signature.parameters[param].kind == inspect.Parameter.POSITIONAL_OR_KEYWORD: # ignore `**kwargs`
+ if run_signature.parameters[param].kind not in (
+ inspect.Parameter.VAR_POSITIONAL,
+ inspect.Parameter.VAR_KEYWORD,
+ ): # ignore variable args
socket_kwargs = {"name": param, "type": run_signature.parameters[param].annotation}
if run_signature.parameters[param].default != inspect.Parameter.empty:
socket_kwargs["default_value"] = run_signature.parameters[param].default
diff --git a/haystack/document_stores/in_memory/document_store.py b/haystack/document_stores/in_memory/document_store.py
index 027e9c4b..44b2f6d5 100644
--- a/haystack/document_stores/in_memory/document_store.py
+++ b/haystack/document_stores/in_memory/document_store.py
@@ -212,8 +212,11 @@ class InMemoryDocumentStore:
return_documents = []
for i in top_docs_positions:
doc = all_documents[i]
+ score = docs_scores[i]
+ if score <= 0.0:
+ continue
doc_fields = doc.to_dict()
- doc_fields["score"] = docs_scores[i]
+ doc_fields["score"] = score
return_document = Document.from_dict(doc_fields)
return_documents.append(return_document)
return return_documents
diff --git a/releasenotes/notes/component-kw-only-run-args-eedee8907232d2d4.yaml b/releasenotes/notes/component-kw-only-run-args-eedee8907232d2d4.yaml
new file mode 100644
index 00000000..68ef50fb
--- /dev/null
+++ b/releasenotes/notes/component-kw-only-run-args-eedee8907232d2d4.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Fix ComponentMeta ignoring keyword-only parameters in the `run` method. ComponentMeta.__call__ handles the creation of InputSockets for the component's inputs when the latter has not explicitly called _Component.set_input_types(). This logic was not correctly handling keyword-only parameters.
diff --git a/releasenotes/notes/inmemorybm25retriever-zero-score-docs-67406062a76aa7f4.yaml b/releasenotes/notes/inmemorybm25retriever-zero-score-docs-67406062a76aa7f4.yaml
new file mode 100644
index 00000000..3ae44016
--- /dev/null
+++ b/releasenotes/notes/inmemorybm25retriever-zero-score-docs-67406062a76aa7f4.yaml
@@ -0,0 +1,3 @@
+---
+fixes:
+ - Prevent InMemoryBM25Retriever from returning documents with a score of 0.0.
| deepset-ai/haystack | 0616197b44ae95da03aad7e5ac3997b243bd735d | diff --git a/test/components/retrievers/test_in_memory_bm25_retriever.py b/test/components/retrievers/test_in_memory_bm25_retriever.py
index db5e82a3..4c1df2f2 100644
--- a/test/components/retrievers/test_in_memory_bm25_retriever.py
+++ b/test/components/retrievers/test_in_memory_bm25_retriever.py
@@ -113,15 +113,14 @@ class TestMemoryBM25Retriever:
InMemoryBM25Retriever.from_dict(data)
def test_retriever_valid_run(self, mock_docs):
- top_k = 5
ds = InMemoryDocumentStore()
ds.write_documents(mock_docs)
- retriever = InMemoryBM25Retriever(ds, top_k=top_k)
+ retriever = InMemoryBM25Retriever(ds, top_k=5)
result = retriever.run(query="PHP")
assert "documents" in result
- assert len(result["documents"]) == top_k
+ assert len(result["documents"]) == 1
assert result["documents"][0].content == "PHP is a popular programming language"
def test_invalid_run_wrong_store_type(self):
@@ -174,5 +173,5 @@ class TestMemoryBM25Retriever:
assert "retriever" in result
results_docs = result["retriever"]["documents"]
assert results_docs
- assert len(results_docs) == top_k
+ assert len(results_docs) == 1
assert results_docs[0].content == query_result
diff --git a/test/core/component/test_component.py b/test/core/component/test_component.py
index cba6fc42..f2835e62 100644
--- a/test/core/component/test_component.py
+++ b/test/core/component/test_component.py
@@ -176,3 +176,17 @@ def test_input_has_default_value():
comp = MockComponent()
assert comp.__canals_input__["value"].default_value == 42
assert not comp.__canals_input__["value"].is_mandatory
+
+
+def test_keyword_only_args():
+ @component
+ class MockComponent:
+ def __init__(self):
+ component.set_output_types(self, value=int)
+
+ def run(self, *, arg: int):
+ return {"value": arg}
+
+ comp = MockComponent()
+ component_inputs = {name: {"type": socket.type} for name, socket in comp.__canals_input__.items()}
+ assert component_inputs == {"arg": {"type": int}}
diff --git a/test/document_stores/test_in_memory.py b/test/document_stores/test_in_memory.py
index 1d3a3613..9ebcae41 100644
--- a/test/document_stores/test_in_memory.py
+++ b/test/document_stores/test_in_memory.py
@@ -5,8 +5,8 @@ import pandas as pd
import pytest
from haystack import Document
-from haystack.document_stores.in_memory import InMemoryDocumentStore
from haystack.document_stores.errors import DocumentStoreError, DuplicateDocumentError
+from haystack.document_stores.in_memory import InMemoryDocumentStore
from haystack.testing.document_store import DocumentStoreBaseTests
@@ -17,7 +17,7 @@ class TestMemoryDocumentStore(DocumentStoreBaseTests): # pylint: disable=R0904
@pytest.fixture
def document_store(self) -> InMemoryDocumentStore:
- return InMemoryDocumentStore()
+ return InMemoryDocumentStore(bm25_algorithm="BM25L")
def test_to_dict(self):
store = InMemoryDocumentStore()
@@ -73,7 +73,6 @@ class TestMemoryDocumentStore(DocumentStoreBaseTests): # pylint: disable=R0904
document_store.write_documents(docs)
def test_bm25_retrieval(self, document_store: InMemoryDocumentStore):
- document_store = InMemoryDocumentStore()
# Tests if the bm25_retrieval method returns the correct document based on the input query.
docs = [Document(content="Hello world"), Document(content="Haystack supports multiple languages")]
document_store.write_documents(docs)
@@ -106,7 +105,7 @@ class TestMemoryDocumentStore(DocumentStoreBaseTests): # pylint: disable=R0904
document_store.write_documents(docs)
# top_k = 2
- results = document_store.bm25_retrieval(query="languages", top_k=2)
+ results = document_store.bm25_retrieval(query="language", top_k=2)
assert len(results) == 2
# top_k = 3
@@ -141,7 +140,7 @@ class TestMemoryDocumentStore(DocumentStoreBaseTests): # pylint: disable=R0904
document_store.write_documents(docs)
results = document_store.bm25_retrieval(query="Python", top_k=1)
- assert len(results) == 1
+ assert len(results) == 0
document_store.write_documents([Document(content="Python is a popular programming language")])
results = document_store.bm25_retrieval(query="Python", top_k=1)
@@ -199,10 +198,10 @@ class TestMemoryDocumentStore(DocumentStoreBaseTests): # pylint: disable=R0904
docs = [Document(), Document(content="Gardening"), Document(content="Bird watching")]
document_store.write_documents(docs)
results = document_store.bm25_retrieval(query="doesn't matter, top_k is 10", top_k=10)
- assert len(results) == 2
+ assert len(results) == 0
def test_bm25_retrieval_with_filters(self, document_store: InMemoryDocumentStore):
- selected_document = Document(content="Gardening", meta={"selected": True})
+ selected_document = Document(content="Java is, well...", meta={"selected": True})
docs = [Document(), selected_document, Document(content="Bird watching")]
document_store.write_documents(docs)
results = document_store.bm25_retrieval(query="Java", top_k=10, filters={"selected": True})
@@ -224,10 +223,10 @@ class TestMemoryDocumentStore(DocumentStoreBaseTests): # pylint: disable=R0904
assert results[0].id == document.id
def test_bm25_retrieval_with_documents_with_mixed_content(self, document_store: InMemoryDocumentStore):
- double_document = Document(content="Gardening", embedding=[1.0, 2.0, 3.0])
+ double_document = Document(content="Gardening is a hobby", embedding=[1.0, 2.0, 3.0])
docs = [Document(embedding=[1.0, 2.0, 3.0]), double_document, Document(content="Bird watching")]
document_store.write_documents(docs)
- results = document_store.bm25_retrieval(query="Java", top_k=10, filters={"embedding": {"$not": None}})
+ results = document_store.bm25_retrieval(query="Gardening", top_k=10, filters={"embedding": {"$not": None}})
assert len(results) == 1
assert results[0].id == double_document.id
| MemoryBM25Retriever returns non relevant documents
**Describe the bug**
`MemoryBM25Retriever` currently always returns `top_k` number of `Document` even if they're not relevant.
The [`test_retriever_valid_run`](https://github.com/deepset-ai/haystack/blob/ccc9f010bbdc75e1c5f1d269a43a5f06f36acba1/test/preview/components/retrievers/test_memory_bm25_retriever.py#L119-L129) is a good example of the current behavior.
A simple fix would be to filter out `Document`s with `score==0.0` before scaling the score.
**Expected behavior**
Non relevant `Document`, those with score `0.0`, are not returned.
**Additional context**
@tstadel confirmed that it's unexpected we're returning `Document`s with a score of `0.0`.
**To Reproduce**
```
from haystack.preview.document_stores import MemoryDocumentStore
from haystack.preview.dataclasses import Document
from haystack.preview.components.retrievers import MemoryBM25Retriever
docs = [
Document(text="Javascript is a popular programming language"),
Document(text="Java is a popular programming language"),
Document(text="Python is a popular programming language"),
Document(text="Ruby is a popular programming language"),
Document(text="PHP is a popular programming language"),
]
ds = MemoryDocumentStore()
ds.write_documents(docs)
retriever = MemoryBM25Retriever(ds, top_k=5, scale_score=False)
result = retriever.run(query="PHP")
# This is wrong
assert len(result["documents"]) == 5
assert result["documents"][0].text == "PHP is a popular programming language"
assert result["documents"][1].score == 0.0
assert result["documents"][2].score == 0.0
assert result["documents"][3].score == 0.0
assert result["documents"][4].score == 0.0
# This is the expected behaviour
assert len(result["documents"]) == 1
assert result["documents"][0].text == "PHP is a popular programming language"
assert result["documents"][0].score > 0.0
```
| 0.0 | [
"test/components/retrievers/test_in_memory_bm25_retriever.py::TestMemoryBM25Retriever::test_retriever_valid_run",
"test/components/retrievers/test_in_memory_bm25_retriever.py::TestMemoryBM25Retriever::test_run_with_pipeline_and_top_k[Java-Java",
"test/components/retrievers/test_in_memory_bm25_retriever.py::TestMemoryBM25Retriever::test_run_with_pipeline_and_top_k[Ruby-Ruby",
"test/core/component/test_component.py::test_keyword_only_args",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_bm25_retrieval_with_updated_docs",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_bm25_retrieval_default_filter_for_text_and_dataframes"
] | [
"[",
"test/components/retrievers/test_in_memory_bm25_retriever.py::TestMemoryBM25Retriever::test_init_default",
"test/components/retrievers/test_in_memory_bm25_retriever.py::TestMemoryBM25Retriever::test_init_with_parameters",
"test/components/retrievers/test_in_memory_bm25_retriever.py::TestMemoryBM25Retriever::test_init_with_invalid_top_k_parameter",
"test/components/retrievers/test_in_memory_bm25_retriever.py::TestMemoryBM25Retriever::test_to_dict",
"test/components/retrievers/test_in_memory_bm25_retriever.py::TestMemoryBM25Retriever::test_to_dict_with_custom_init_parameters",
"test/components/retrievers/test_in_memory_bm25_retriever.py::TestMemoryBM25Retriever::test_from_dict",
"test/components/retrievers/test_in_memory_bm25_retriever.py::TestMemoryBM25Retriever::test_from_dict_without_docstore",
"test/components/retrievers/test_in_memory_bm25_retriever.py::TestMemoryBM25Retriever::test_from_dict_without_docstore_type",
"test/components/retrievers/test_in_memory_bm25_retriever.py::TestMemoryBM25Retriever::test_from_dict_nonexisting_docstore",
"test/components/retrievers/test_in_memory_bm25_retriever.py::TestMemoryBM25Retriever::test_invalid_run_wrong_store_type",
"test/components/retrievers/test_in_memory_bm25_retriever.py::TestMemoryBM25Retriever::test_run_with_pipeline[Javascript-Javascript",
"test/components/retrievers/test_in_memory_bm25_retriever.py::TestMemoryBM25Retriever::test_run_with_pipeline[Java-Java",
"test/components/retrievers/test_in_memory_bm25_retriever.py::TestMemoryBM25Retriever::test_run_with_pipeline_and_top_k[Javascript-Javascript",
"test/core/component/test_component.py::test_correct_declaration",
"test/core/component/test_component.py::test_correct_declaration_with_additional_readonly_property",
"test/core/component/test_component.py::test_correct_declaration_with_additional_writable_property",
"test/core/component/test_component.py::test_missing_run",
"test/core/component/test_component.py::test_set_input_types",
"test/core/component/test_component.py::test_set_output_types",
"test/core/component/test_component.py::test_output_types_decorator_with_compatible_type",
"test/core/component/test_component.py::test_component_decorator_set_it_as_component",
"test/core/component/test_component.py::test_input_has_default_value",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_no_filters",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_equal",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_equal_with_dataframe",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_equal_with_none",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_not_equal",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_not_equal_with_dataframe",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_not_equal_with_none",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_greater_than",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_greater_than_with_iso_date",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_greater_than_with_string",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_greater_than_with_dataframe",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_greater_than_with_list",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_greater_than_with_none",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_greater_than_equal",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_greater_than_equal_with_iso_date",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_greater_than_equal_with_string",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_greater_than_equal_with_dataframe",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_greater_than_equal_with_list",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_greater_than_equal_with_none",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_less_than",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_less_than_with_iso_date",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_less_than_with_string",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_less_than_with_dataframe",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_less_than_with_list",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_less_than_with_none",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_less_than_equal",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_less_than_equal_with_iso_date",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_less_than_equal_with_string",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_less_than_equal_with_dataframe",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_less_than_equal_with_list",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_less_than_equal_with_none",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_in",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_in_with_with_non_list",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_in_with_with_non_list_iterable",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_not_in",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_not_in_with_with_non_list",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_comparison_not_in_with_with_non_list_iterable",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_and_operator",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_or_operator",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_not_operator",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_missing_top_level_operator_key",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_missing_top_level_conditions_key",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_missing_condition_field_key",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_missing_condition_operator_key",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_missing_condition_value_key",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_delete_documents",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_delete_documents_empty_document_store",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_delete_documents_non_existing_document",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_write_documents_duplicate_fail",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_write_documents_duplicate_skip",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_write_documents_duplicate_overwrite",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_write_documents_invalid_input",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_count_empty",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_count_not_empty",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_to_dict",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_to_dict_with_custom_init_parameters",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_from_dict",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_write_documents",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_bm25_retrieval",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_bm25_retrieval_with_empty_document_store",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_bm25_retrieval_empty_query",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_bm25_retrieval_with_different_top_k",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_bm25_retrieval_with_two_queries",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_bm25_retrieval_with_scale_score",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_bm25_retrieval_with_table_content",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_bm25_retrieval_with_text_and_table_content",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_bm25_retrieval_with_filters",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_bm25_retrieval_with_filters_keeps_default_filters",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_bm25_retrieval_with_filters_on_text_or_dataframe",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_bm25_retrieval_with_documents_with_mixed_content",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_embedding_retrieval",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_embedding_retrieval_invalid_query",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_embedding_retrieval_no_embeddings",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_embedding_retrieval_some_documents_wo_embeddings",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_embedding_retrieval_documents_different_embedding_sizes",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_embedding_retrieval_query_documents_different_embedding_sizes",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_embedding_retrieval_with_different_top_k",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_embedding_retrieval_with_scale_score",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_embedding_retrieval_return_embedding",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_compute_cosine_similarity_scores",
"test/document_stores/test_in_memory.py::TestMemoryDocumentStore::test_compute_dot_product_similarity_scores"
] | 2024-01-10 10:15:54+00:00 | 1,868 |
|
deepset-ai__haystack-6941 | diff --git a/haystack/components/extractors/named_entity_extractor.py b/haystack/components/extractors/named_entity_extractor.py
index 6ca988fa..f8d8d717 100644
--- a/haystack/components/extractors/named_entity_extractor.py
+++ b/haystack/components/extractors/named_entity_extractor.py
@@ -126,6 +126,9 @@ class NamedEntityExtractor:
raise ComponentError(f"Unknown NER backend '{type(backend).__name__}' for extractor")
def warm_up(self):
+ """
+ Initialize the named entity extractor backend.
+ """
try:
self._backend.initialize()
except Exception as e:
@@ -135,6 +138,16 @@ class NamedEntityExtractor:
@component.output_types(documents=List[Document])
def run(self, documents: List[Document], batch_size: int = 1) -> Dict[str, Any]:
+ """
+ Run the named-entity extractor.
+
+ :param documents:
+ Documents to process.
+ :param batch_size:
+ Batch size used for processing the documents.
+ :returns:
+ The processed documents.
+ """
texts = [doc.content if doc.content is not None else "" for doc in documents]
annotations = self._backend.annotate(texts, batch_size=batch_size)
@@ -150,6 +163,9 @@ class NamedEntityExtractor:
return {"documents": documents}
def to_dict(self) -> Dict[str, Any]:
+ """
+ Serialize this component to a dictionary.
+ """
return default_to_dict(
self,
backend=self._backend.type,
@@ -160,6 +176,12 @@ class NamedEntityExtractor:
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "NamedEntityExtractor":
+ """
+ Deserialize the component from a dictionary.
+
+ :param data:
+ The dictionary to deserialize from.
+ """
try:
init_params = data["init_parameters"]
init_params["device"] = ComponentDevice.from_dict(init_params["device"])
diff --git a/haystack/core/component/component.py b/haystack/core/component/component.py
index 95ae87d7..c231e41d 100644
--- a/haystack/core/component/component.py
+++ b/haystack/core/component/component.py
@@ -160,6 +160,21 @@ class ComponentMeta(type):
return instance
+def _component_repr(component: Component) -> str:
+ """
+ All Components override their __repr__ method with this one.
+ It prints the component name and the input/output sockets.
+ """
+ result = object.__repr__(component)
+ if pipeline := getattr(component, "__haystack_added_to_pipeline__"):
+ # This Component has been added in a Pipeline, let's get the name from there.
+ result += f"\n{pipeline.get_component_name(component)}"
+
+ # We're explicitly ignoring the type here because we're sure that the component
+ # has the __haystack_input__ and __haystack_output__ attributes at this point
+ return f"{result}\n{component.__haystack_input__}\n{component.__haystack_output__}" # type: ignore[attr-defined]
+
+
class _Component:
"""
See module's docstring.
@@ -332,6 +347,9 @@ class _Component:
self.registry[class_path] = class_
logger.debug("Registered Component %s", class_)
+ # Override the __repr__ method with a default one
+ class_.__repr__ = _component_repr
+
return class_
def __call__(self, class_):
diff --git a/haystack/core/component/sockets.py b/haystack/core/component/sockets.py
index 25bf4fdc..374ae630 100644
--- a/haystack/core/component/sockets.py
+++ b/haystack/core/component/sockets.py
@@ -82,8 +82,9 @@ class Sockets:
return pipeline.get_component_name(self._component)
# This Component has not been added to a Pipeline yet, so we can't know its name.
- # Let's use the class name instead.
- return str(self._component)
+ # Let's use default __repr__. We don't call repr() directly as Components have a custom
+ # __repr__ method and that would lead to infinite recursion since we call Sockets.__repr__ in it.
+ return object.__repr__(self._component)
def __getattribute__(self, name):
try:
@@ -96,12 +97,10 @@ class Sockets:
return object.__getattribute__(self, name)
def __repr__(self) -> str:
- result = self._component_name()
+ result = ""
if self._sockets_io_type == InputSocket:
- result += " inputs:\n"
+ result = "Inputs:\n"
elif self._sockets_io_type == OutputSocket:
- result += " outputs:\n"
+ result = "Outputs:\n"
- result += "\n".join([f" - {n}: {_type_name(s.type)}" for n, s in self._sockets_dict.items()])
-
- return result
+ return result + "\n".join([f" - {n}: {_type_name(s.type)}" for n, s in self._sockets_dict.items()])
diff --git a/releasenotes/notes/component-repr-a6486af81530bc3b.yaml b/releasenotes/notes/component-repr-a6486af81530bc3b.yaml
new file mode 100644
index 00000000..3a7439e9
--- /dev/null
+++ b/releasenotes/notes/component-repr-a6486af81530bc3b.yaml
@@ -0,0 +1,6 @@
+---
+enhancements:
+ - |
+ Add `__repr__` to all Components to print their I/O.
+ This can also be useful in Jupyter notebooks as this will be shown as a cell output
+ if the it's the last expression in a cell.
| deepset-ai/haystack | 74683fe74d400820a442cca03bb69473824e841a | diff --git a/e2e/pipelines/test_rag_pipelines_e2e.py b/e2e/pipelines/test_rag_pipelines_e2e.py
index fa3aeb8c..d38053c9 100644
--- a/e2e/pipelines/test_rag_pipelines_e2e.py
+++ b/e2e/pipelines/test_rag_pipelines_e2e.py
@@ -30,7 +30,7 @@ def test_bm25_rag_pipeline(tmp_path):
rag_pipeline = Pipeline()
rag_pipeline.add_component(instance=InMemoryBM25Retriever(document_store=InMemoryDocumentStore()), name="retriever")
rag_pipeline.add_component(instance=PromptBuilder(template=prompt_template), name="prompt_builder")
- rag_pipeline.add_component(instance=OpenAIGenerator(api_key=os.environ.get("OPENAI_API_KEY")), name="llm")
+ rag_pipeline.add_component(instance=OpenAIGenerator(), name="llm")
rag_pipeline.add_component(instance=AnswerBuilder(), name="answer_builder")
rag_pipeline.connect("retriever", "prompt_builder.documents")
rag_pipeline.connect("prompt_builder", "llm")
@@ -101,7 +101,7 @@ def test_embedding_retrieval_rag_pipeline(tmp_path):
instance=InMemoryEmbeddingRetriever(document_store=InMemoryDocumentStore()), name="retriever"
)
rag_pipeline.add_component(instance=PromptBuilder(template=prompt_template), name="prompt_builder")
- rag_pipeline.add_component(instance=OpenAIGenerator(api_key=os.environ.get("OPENAI_API_KEY")), name="llm")
+ rag_pipeline.add_component(instance=OpenAIGenerator(), name="llm")
rag_pipeline.add_component(instance=AnswerBuilder(), name="answer_builder")
rag_pipeline.connect("text_embedder", "retriever")
rag_pipeline.connect("retriever", "prompt_builder.documents")
diff --git a/test/core/component/test_component.py b/test/core/component/test_component.py
index bbe2605f..b093c32b 100644
--- a/test/core/component/test_component.py
+++ b/test/core/component/test_component.py
@@ -4,6 +4,7 @@ import pytest
from haystack.core.component import Component, InputSocket, OutputSocket, component
from haystack.core.errors import ComponentError
+from haystack.core.pipeline import Pipeline
def test_correct_declaration():
@@ -189,3 +190,31 @@ def test_keyword_only_args():
comp = MockComponent()
component_inputs = {name: {"type": socket.type} for name, socket in comp.__haystack_input__._sockets_dict.items()}
assert component_inputs == {"arg": {"type": int}}
+
+
+def test_repr():
+ @component
+ class MockComponent:
+ def __init__(self):
+ component.set_output_types(self, value=int)
+
+ def run(self, value: int):
+ return {"value": value}
+
+ comp = MockComponent()
+ assert repr(comp) == f"{object.__repr__(comp)}\nInputs:\n - value: int\nOutputs:\n - value: int"
+
+
+def test_repr_added_to_pipeline():
+ @component
+ class MockComponent:
+ def __init__(self):
+ component.set_output_types(self, value=int)
+
+ def run(self, value: int):
+ return {"value": value}
+
+ pipe = Pipeline()
+ comp = MockComponent()
+ pipe.add_component("my_component", comp)
+ assert repr(comp) == f"{object.__repr__(comp)}\nmy_component\nInputs:\n - value: int\nOutputs:\n - value: int"
diff --git a/test/core/component/test_sockets.py b/test/core/component/test_sockets.py
index ac3b01bd..6e942b84 100644
--- a/test/core/component/test_sockets.py
+++ b/test/core/component/test_sockets.py
@@ -23,19 +23,6 @@ class TestSockets:
assert io._component == comp
assert io._sockets_dict == {}
- def test_component_name(self):
- comp = component_class("SomeComponent")()
- io = Sockets(component=comp, sockets_dict={}, sockets_io_type=InputSocket)
- assert io._component_name() == str(comp)
-
- def test_component_name_added_to_pipeline(self):
- comp = component_class("SomeComponent")()
- pipeline = Pipeline()
- pipeline.add_component("my_component", comp)
-
- io = Sockets(component=comp, sockets_dict={}, sockets_io_type=InputSocket)
- assert io._component_name() == "my_component"
-
def test_getattribute(self):
comp = component_class("SomeComponent", input_types={"input_1": int, "input_2": int})()
io = Sockets(component=comp, sockets_dict=comp.__haystack_input__._sockets_dict, sockets_io_type=InputSocket)
@@ -54,4 +41,4 @@ class TestSockets:
comp = component_class("SomeComponent", input_types={"input_1": int, "input_2": int})()
io = Sockets(component=comp, sockets_dict=comp.__haystack_input__._sockets_dict, sockets_io_type=InputSocket)
res = repr(io)
- assert res == f"{comp} inputs:\n - input_1: int\n - input_2: int"
+ assert res == "Inputs:\n - input_1: int\n - input_2: int"
| RAG pipeline e2e test fails to resolve api key
**Describe the bug**
`e2e/pipelines/test_rag_pipelines_e2e.py` fails resolving the OpenAI api key after the PR that changed the secret management. https://github.com/deepset-ai/haystack/pull/6887
Here is the failing test run: https://github.com/deepset-ai/haystack/actions/runs/7792596449/job/21250892136
**Error message**
```python
self.client = OpenAI(api_key=api_key.resolve_value(), organization=organization, base_url=api_base_url)
E AttributeError: 'str' object has no attribute 'resolve_value'
``` | 0.0 | [
"test/core/component/test_component.py::test_repr",
"test/core/component/test_component.py::test_repr_added_to_pipeline",
"test/core/component/test_sockets.py::TestSockets::test_repr"
] | [
"test/core/component/test_component.py::test_correct_declaration",
"test/core/component/test_component.py::test_correct_declaration_with_additional_readonly_property",
"test/core/component/test_component.py::test_correct_declaration_with_additional_writable_property",
"test/core/component/test_component.py::test_missing_run",
"test/core/component/test_component.py::test_set_input_types",
"test/core/component/test_component.py::test_set_output_types",
"test/core/component/test_component.py::test_output_types_decorator_with_compatible_type",
"test/core/component/test_component.py::test_component_decorator_set_it_as_component",
"test/core/component/test_component.py::test_input_has_default_value",
"test/core/component/test_component.py::test_keyword_only_args",
"test/core/component/test_sockets.py::TestSockets::test_init",
"test/core/component/test_sockets.py::TestSockets::test_init_with_empty_sockets",
"test/core/component/test_sockets.py::TestSockets::test_getattribute",
"test/core/component/test_sockets.py::TestSockets::test_getattribute_non_existing_socket"
] | 2024-02-08 09:38:11+00:00 | 1,869 |
|
deepset-ai__haystack-6963 | diff --git a/haystack/core/pipeline/pipeline.py b/haystack/core/pipeline/pipeline.py
index 632cc73e..98ba8df8 100644
--- a/haystack/core/pipeline/pipeline.py
+++ b/haystack/core/pipeline/pipeline.py
@@ -71,6 +71,34 @@ class Pipeline:
return False
return self.to_dict() == other.to_dict()
+ def __repr__(self) -> str:
+ """
+ Returns a text representation of the Pipeline.
+ If this runs in a Jupyter notebook, it will instead display the Pipeline image.
+ """
+ if is_in_jupyter():
+ # If we're in a Jupyter notebook we want to display the image instead of the text repr.
+ self.show()
+ return ""
+
+ res = f"{object.__repr__(self)}\n"
+ if self.metadata:
+ res += "🧱 Metadata\n"
+ for k, v in self.metadata.items():
+ res += f" - {k}: {v}\n"
+
+ res += "🚅 Components\n"
+ for name, instance in self.graph.nodes(data="instance"):
+ res += f" - {name}: {instance.__class__.__name__}\n"
+
+ res += "🛤️ Connections\n"
+ for sender, receiver, edge_data in self.graph.edges(data=True):
+ sender_socket = edge_data["from_socket"].name
+ receiver_socket = edge_data["to_socket"].name
+ res += f" - {sender}.{sender_socket} -> {receiver}.{receiver_socket} ({edge_data['conn_type']})\n"
+
+ return res
+
def to_dict(self) -> Dict[str, Any]:
"""
Returns this Pipeline instance as a dictionary.
diff --git a/releasenotes/notes/enhance-repr-0c5efa1e2ca6bafa.yaml b/releasenotes/notes/enhance-repr-0c5efa1e2ca6bafa.yaml
new file mode 100644
index 00000000..a9f1914e
--- /dev/null
+++ b/releasenotes/notes/enhance-repr-0c5efa1e2ca6bafa.yaml
@@ -0,0 +1,5 @@
+---
+enhancements:
+ - |
+ Customize `Pipeline.__repr__()` to return a nice text representation of it.
+ If run on a Jupyter notebook it will instead have the same behaviour as `Pipeline.show()`.
| deepset-ai/haystack | a7f36fdd3226cd822c600c724c2b72005180269a | diff --git a/test/core/pipeline/test_pipeline.py b/test/core/pipeline/test_pipeline.py
index c6dec132..4e66f38f 100644
--- a/test/core/pipeline/test_pipeline.py
+++ b/test/core/pipeline/test_pipeline.py
@@ -79,6 +79,49 @@ def test_get_component_name_not_added_to_pipeline():
assert pipe.get_component_name(some_component) == ""
+@patch("haystack.core.pipeline.pipeline.is_in_jupyter")
+def test_repr(mock_is_in_jupyter):
+ pipe = Pipeline(metadata={"test": "test"}, max_loops_allowed=42)
+ pipe.add_component("add_two", AddFixedValue(add=2))
+ pipe.add_component("add_default", AddFixedValue())
+ pipe.add_component("double", Double())
+ pipe.connect("add_two", "double")
+ pipe.connect("double", "add_default")
+
+ expected_repr = (
+ f"{object.__repr__(pipe)}\n"
+ "🧱 Metadata\n"
+ " - test: test\n"
+ "🚅 Components\n"
+ " - add_two: AddFixedValue\n"
+ " - add_default: AddFixedValue\n"
+ " - double: Double\n"
+ "🛤️ Connections\n"
+ " - add_two.result -> double.value (int)\n"
+ " - double.value -> add_default.value (int)\n"
+ )
+ # Simulate not being in a notebook
+ mock_is_in_jupyter.return_value = False
+ assert repr(pipe) == expected_repr
+
+
+@patch("haystack.core.pipeline.pipeline.is_in_jupyter")
+def test_repr_in_notebook(mock_is_in_jupyter):
+ pipe = Pipeline(metadata={"test": "test"}, max_loops_allowed=42)
+ pipe.add_component("add_two", AddFixedValue(add=2))
+ pipe.add_component("add_default", AddFixedValue())
+ pipe.add_component("double", Double())
+ pipe.connect("add_two", "double")
+ pipe.connect("double", "add_default")
+
+ # Simulate being in a notebook
+ mock_is_in_jupyter.return_value = True
+
+ with patch.object(Pipeline, "show") as mock_show:
+ assert repr(pipe) == ""
+ mock_show.assert_called_once_with()
+
+
def test_run_with_component_that_does_not_return_dict():
BrokenComponent = component_class(
"BrokenComponent", input_types={"a": int}, output_types={"b": int}, output=1 # type:ignore
| `Pipeline.__repr__` should display an easy to understand representation of the Pipeline
When working and creating a `Pipeline` it's useful to quickly get an easy to understand text representation of it.
This should display connections between components, the I/O of the Pipeline, missing mandatory connections and other useful information.
This is also useful when working in a Jupyter notebook, if the `Pipeline` is the last statement in a cell the user will get a nice representation of the `Pipeline` for free.
Some examples:
Pandas' DataFrame

DocArray document

| 0.0 | [
"test/core/pipeline/test_pipeline.py::test_repr",
"test/core/pipeline/test_pipeline.py::test_repr_in_notebook"
] | [
"test/core/pipeline/test_pipeline.py::test_show_in_notebook",
"test/core/pipeline/test_pipeline.py::test_show_not_in_notebook",
"test/core/pipeline/test_pipeline.py::test_draw",
"test/core/pipeline/test_pipeline.py::test_add_component_to_different_pipelines",
"test/core/pipeline/test_pipeline.py::test_get_component_name",
"test/core/pipeline/test_pipeline.py::test_get_component_name_not_added_to_pipeline",
"test/core/pipeline/test_pipeline.py::test_run_with_component_that_does_not_return_dict",
"test/core/pipeline/test_pipeline.py::test_to_dict",
"test/core/pipeline/test_pipeline.py::test_from_dict",
"test/core/pipeline/test_pipeline.py::test_from_dict_with_empty_dict",
"test/core/pipeline/test_pipeline.py::test_from_dict_with_components_instances",
"test/core/pipeline/test_pipeline.py::test_from_dict_without_component_type",
"test/core/pipeline/test_pipeline.py::test_from_dict_without_registered_component_type",
"test/core/pipeline/test_pipeline.py::test_from_dict_without_connection_sender",
"test/core/pipeline/test_pipeline.py::test_from_dict_without_connection_receiver",
"test/core/pipeline/test_pipeline.py::test_falsy_connection",
"test/core/pipeline/test_pipeline.py::test_describe_input_only_no_inputs_components",
"test/core/pipeline/test_pipeline.py::test_describe_input_some_components_with_no_inputs",
"test/core/pipeline/test_pipeline.py::test_describe_input_all_components_have_inputs",
"test/core/pipeline/test_pipeline.py::test_describe_output_multiple_possible",
"test/core/pipeline/test_pipeline.py::test_describe_output_single",
"test/core/pipeline/test_pipeline.py::test_describe_no_outputs"
] | 2024-02-08 17:34:44+00:00 | 1,870 |
|
deepset-ai__haystack-7038 | diff --git a/haystack/components/evaluators/statistical_evaluator.py b/haystack/components/evaluators/statistical_evaluator.py
index 6f65fc10..a6c00964 100644
--- a/haystack/components/evaluators/statistical_evaluator.py
+++ b/haystack/components/evaluators/statistical_evaluator.py
@@ -1,4 +1,5 @@
import collections
+import itertools
from enum import Enum
from typing import Any, Dict, List, Union
@@ -16,6 +17,8 @@ class StatisticalMetric(Enum):
F1 = "f1"
EM = "exact_match"
+ RECALL_SINGLE_HIT = "recall_single_hit"
+ RECALL_MULTI_HIT = "recall_multi_hit"
@classmethod
def from_str(cls, metric: str) -> "StatisticalMetric":
@@ -47,7 +50,12 @@ class StatisticalEvaluator:
metric = StatisticalMetric.from_str(metric)
self._metric = metric
- self._metric_function = {StatisticalMetric.F1: self._f1, StatisticalMetric.EM: self._exact_match}[self._metric]
+ self._metric_function = {
+ StatisticalMetric.F1: self._f1,
+ StatisticalMetric.EM: self._exact_match,
+ StatisticalMetric.RECALL_SINGLE_HIT: self._recall_single_hit,
+ StatisticalMetric.RECALL_MULTI_HIT: self._recall_multi_hit,
+ }[self._metric]
def to_dict(self) -> Dict[str, Any]:
return default_to_dict(self, metric=self._metric.value)
@@ -68,9 +76,6 @@ class StatisticalEvaluator:
:returns: A dictionary with the following outputs:
* `result` - Calculated result of the chosen metric.
"""
- if len(labels) != len(predictions):
- raise ValueError("The number of predictions and labels must be the same.")
-
return {"result": self._metric_function(labels, predictions)}
@staticmethod
@@ -78,6 +83,9 @@ class StatisticalEvaluator:
"""
Measure word overlap between predictions and labels.
"""
+ if len(labels) != len(predictions):
+ raise ValueError("The number of predictions and labels must be the same.")
+
if len(predictions) == 0:
# We expect callers of this function already checked if predictions and labels are equal length
return 0.0
@@ -105,8 +113,40 @@ class StatisticalEvaluator:
"""
Measure the proportion of cases where predictiond is identical to the the expected label.
"""
+ if len(labels) != len(predictions):
+ raise ValueError("The number of predictions and labels must be the same.")
+
if len(predictions) == 0:
# We expect callers of this function already checked if predictions and labels are equal length
return 0.0
score_list = np_array(predictions) == np_array(labels)
return np_mean(score_list)
+
+ @staticmethod
+ def _recall_single_hit(labels: List[str], predictions: List[str]) -> float:
+ """
+ Measures how many times a label is present in at least one prediction.
+ If the same label is found in multiple predictions it is only counted once.
+ """
+ if len(labels) == 0:
+ return 0.0
+
+ # In Recall Single Hit we only consider if a label is present in at least one prediction.
+ # No need to count multiple occurrences of the same label in different predictions
+ retrieved_labels = {l for l, p in itertools.product(labels, predictions) if l in p}
+ return len(retrieved_labels) / len(labels)
+
+ @staticmethod
+ def _recall_multi_hit(labels: List[str], predictions: List[str]) -> float:
+ """
+ Measures how many times a label is present in at least one or more predictions.
+ """
+ if len(labels) == 0:
+ return 0.0
+
+ correct_retrievals = 0
+ for label, prediction in itertools.product(labels, predictions):
+ if label in prediction:
+ correct_retrievals += 1
+
+ return correct_retrievals / len(labels)
| deepset-ai/haystack | 5910b4adc9b2688155abb8d2290e5cf56833eb0b | diff --git a/test/components/evaluators/test_statistical_evaluator.py b/test/components/evaluators/test_statistical_evaluator.py
index e98899cb..619b2584 100644
--- a/test/components/evaluators/test_statistical_evaluator.py
+++ b/test/components/evaluators/test_statistical_evaluator.py
@@ -121,3 +121,71 @@ class TestStatisticalEvaluatorExactMatch:
result = evaluator.run(labels=labels, predictions=predictions)
assert len(result) == 1
assert result["result"] == 2 / 3
+
+
+class TestStatisticalEvaluatorRecallSingleHit:
+ def test_run(self):
+ evaluator = StatisticalEvaluator(metric=StatisticalMetric.RECALL_SINGLE_HIT)
+ labels = ["Eiffel Tower", "Louvre Museum", "Colosseum", "Trajan's Column"]
+ predictions = [
+ "The Eiffel Tower, completed in 1889, symbolizes Paris's cultural magnificence.",
+ "The Eiffel Tower max height is 330 meters.",
+ "Louvre Museum is the world's largest art museum and a historic monument in Paris, France.",
+ "The Leaning Tower of Pisa is the campanile, or freestanding bell tower, of Pisa Cathedral.",
+ ]
+ result = evaluator.run(labels=labels, predictions=predictions)
+ assert len(result) == 1
+ assert result["result"] == 2 / 4
+
+ def test_run_with_empty_labels(self):
+ evaluator = StatisticalEvaluator(metric=StatisticalMetric.RECALL_SINGLE_HIT)
+ predictions = [
+ "The Eiffel Tower, completed in 1889, symbolizes Paris's cultural magnificence.",
+ "The Eiffel Tower max height is 330 meters.",
+ "Louvre Museum is the world's largest art museum and a historic monument in Paris, France.",
+ "The Leaning Tower of Pisa is the campanile, or freestanding bell tower, of Pisa Cathedral.",
+ ]
+ result = evaluator.run(labels=[], predictions=predictions)
+ assert len(result) == 1
+ assert result["result"] == 0.0
+
+ def test_run_with_empty_predictions(self):
+ evaluator = StatisticalEvaluator(metric=StatisticalMetric.RECALL_SINGLE_HIT)
+ labels = ["Eiffel Tower", "Louvre Museum", "Colosseum", "Trajan's Column"]
+ result = evaluator.run(labels=labels, predictions=[])
+ assert len(result) == 1
+ assert result["result"] == 0.0
+
+
+class TestStatisticalEvaluatorRecallMultiHit:
+ def test_run(self):
+ evaluator = StatisticalEvaluator(metric=StatisticalMetric.RECALL_MULTI_HIT)
+ labels = ["Eiffel Tower", "Louvre Museum", "Colosseum", "Trajan's Column"]
+ predictions = [
+ "The Eiffel Tower, completed in 1889, symbolizes Paris's cultural magnificence.",
+ "The Eiffel Tower max height is 330 meters.",
+ "Louvre Museum is the world's largest art museum and a historic monument in Paris, France.",
+ "The Leaning Tower of Pisa is the campanile, or freestanding bell tower, of Pisa Cathedral.",
+ ]
+ result = evaluator.run(labels=labels, predictions=predictions)
+ assert len(result) == 1
+ assert result["result"] == 0.75
+
+ def test_run_with_empty_labels(self):
+ evaluator = StatisticalEvaluator(metric=StatisticalMetric.RECALL_MULTI_HIT)
+ predictions = [
+ "The Eiffel Tower, completed in 1889, symbolizes Paris's cultural magnificence.",
+ "The Eiffel Tower max height is 330 meters.",
+ "Louvre Museum is the world's largest art museum and a historic monument in Paris, France.",
+ "The Leaning Tower of Pisa is the campanile, or freestanding bell tower, of Pisa Cathedral.",
+ ]
+ result = evaluator.run(labels=[], predictions=predictions)
+ assert len(result) == 1
+ assert result["result"] == 0.0
+
+ def test_run_with_empty_predictions(self):
+ evaluator = StatisticalEvaluator(metric=StatisticalMetric.RECALL_MULTI_HIT)
+ labels = ["Eiffel Tower", "Louvre Museum", "Colosseum", "Trajan's Column"]
+ result = evaluator.run(labels=labels, predictions=[])
+ assert len(result) == 1
+ assert result["result"] == 0.0
| Implement function to calculate Recall metric
As specified in proposal #5794 we need to implement a function to calculate the Recall metric.
Ideally the function should be part of the private interface and called only through the `calculate_metrics` function (see #6063). `_calculate_recall()` could be a nice name.
For more detailed information check out the original proposal. | 0.0 | [
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluatorRecallSingleHit::test_run",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluatorRecallSingleHit::test_run_with_empty_labels",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluatorRecallSingleHit::test_run_with_empty_predictions",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluatorRecallMultiHit::test_run",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluatorRecallMultiHit::test_run_with_empty_labels",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluatorRecallMultiHit::test_run_with_empty_predictions"
] | [
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluator::test_init_default",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluator::test_init_with_string",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluator::test_to_dict",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluator::test_from_dict",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluatorF1::test_run_with_empty_inputs",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluatorF1::test_run_with_different_lengths",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluatorF1::test_run_with_matching_predictions",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluatorF1::test_run_with_single_prediction",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluatorF1::test_run_with_mismatched_predictions",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluatorExactMatch::test_run_with_empty_inputs",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluatorExactMatch::test_run_with_different_lengths",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluatorExactMatch::test_run_with_matching_predictions",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluatorExactMatch::test_run_with_single_prediction",
"test/components/evaluators/test_statistical_evaluator.py::TestStatisticalEvaluatorExactMatch::test_run_with_mismatched_predictions"
] | 2024-02-19 15:37:23+00:00 | 1,871 |
|
deepset-ai__haystack-7381 | diff --git a/haystack/components/evaluators/__init__.py b/haystack/components/evaluators/__init__.py
new file mode 100644
index 00000000..9550a5f4
--- /dev/null
+++ b/haystack/components/evaluators/__init__.py
@@ -0,0 +1,3 @@
+from .answer_exact_match import AnswerExactMatchEvaluator
+
+__all__ = ["AnswerExactMatchEvaluator"]
diff --git a/haystack/components/evaluators/answer_exact_match.py b/haystack/components/evaluators/answer_exact_match.py
new file mode 100644
index 00000000..4927f4e1
--- /dev/null
+++ b/haystack/components/evaluators/answer_exact_match.py
@@ -0,0 +1,59 @@
+from typing import Dict, List
+
+from haystack.core.component import component
+
+
+@component
+class AnswerExactMatchEvaluator:
+ """
+ Evaluator that checks if the predicted answers matches any of the ground truth answers exactly.
+ The result is a number from 0.0 to 1.0, it represents the proportion of questions where any predicted answer
+ matched one of the ground truth answers.
+ Each question can have multiple ground truth answers and multiple predicted answers.
+
+ Usage example:
+ ```python
+ from haystack.components.evaluators import AnswerExactMatchEvaluator
+
+ evaluator = AnswerExactMatchEvaluator()
+ result = evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_answers=[["Berlin"], ["Paris"]],
+ predicted_answers=[["Berlin"], ["Paris"]],
+ )
+ print(result["result"])
+ # 1.0
+ ```
+ """
+
+ @component.output_types(result=float)
+ def run(
+ self, questions: List[str], ground_truth_answers: List[List[str]], predicted_answers: List[List[str]]
+ ) -> Dict[str, float]:
+ """
+ Run the AnswerExactMatchEvaluator on the given inputs.
+ All lists must have the same length.
+
+ :param questions:
+ A list of questions.
+ :param ground_truth_answers:
+ A list of expected answers for each question.
+ :param predicted_answers:
+ A list of predicted answers for each question.
+ :returns:
+ A dictionary with the following outputs:
+ - `result` - A number from 0.0 to 1.0 that represents the proportion of questions where any predicted
+ answer matched one of the ground truth answers.
+ """
+ if not len(questions) == len(ground_truth_answers) == len(predicted_answers):
+ raise ValueError("The length of questions, ground_truth_answers, and predicted_answers must be the same.")
+
+ matches = 0
+ for truths, extracted in zip(ground_truth_answers, predicted_answers):
+ if set(truths) & set(extracted):
+ matches += 1
+
+ # The proportion of questions where any predicted answer matched one of the ground truth answers
+ result = matches / len(questions)
+
+ return {"result": result}
diff --git a/releasenotes/notes/exact-match-evaluator-197bb87b65e19d0c.yaml b/releasenotes/notes/exact-match-evaluator-197bb87b65e19d0c.yaml
new file mode 100644
index 00000000..ad380617
--- /dev/null
+++ b/releasenotes/notes/exact-match-evaluator-197bb87b65e19d0c.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add `AnswerExactMatchEvaluator`, a Component that can be used to calculate the Exact Match metric
+ given a list of questions, a list of expected answers for each question and the list of predicted
+ answers for each question.
| deepset-ai/haystack | f69c3e5cd26046b826927a39cad02af93b2ccbbf | diff --git a/test/components/evaluators/test_answer_exact_match.py b/test/components/evaluators/test_answer_exact_match.py
new file mode 100644
index 00000000..c179c74a
--- /dev/null
+++ b/test/components/evaluators/test_answer_exact_match.py
@@ -0,0 +1,61 @@
+import pytest
+
+from haystack.components.evaluators import AnswerExactMatchEvaluator
+
+
+def test_run_with_all_matching():
+ evaluator = AnswerExactMatchEvaluator()
+ result = evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_answers=[["Berlin"], ["Paris"]],
+ predicted_answers=[["Berlin"], ["Paris"]],
+ )
+
+ assert result["result"] == 1.0
+
+
+def test_run_with_no_matching():
+ evaluator = AnswerExactMatchEvaluator()
+ result = evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_answers=[["Berlin"], ["Paris"]],
+ predicted_answers=[["Paris"], ["London"]],
+ )
+
+ assert result["result"] == 0.0
+
+
+def test_run_with_partial_matching():
+ evaluator = AnswerExactMatchEvaluator()
+ result = evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_answers=[["Berlin"], ["Paris"]],
+ predicted_answers=[["Berlin"], ["London"]],
+ )
+
+ assert result["result"] == 0.5
+
+
+def test_run_with_different_lengths():
+ evaluator = AnswerExactMatchEvaluator()
+
+ with pytest.raises(ValueError):
+ evaluator.run(
+ questions=["What is the capital of Germany?"],
+ ground_truth_answers=[["Berlin"], ["Paris"]],
+ predicted_answers=[["Berlin"], ["London"]],
+ )
+
+ with pytest.raises(ValueError):
+ evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_answers=[["Berlin"]],
+ predicted_answers=[["Berlin"], ["London"]],
+ )
+
+ with pytest.raises(ValueError):
+ evaluator.run(
+ questions=["What is the capital of Germany?", "What is the capital of France?"],
+ ground_truth_answers=[["Berlin"], ["Paris"]],
+ predicted_answers=[["Berlin"]],
+ )
| Implement function to calculate Exact Match metric
As specified in proposal #5794 we need to implement a function to calculate the Exact Match metric.
Ideally the function should be part of the private interface and called only through the `calculate_metrics` function (see #6063). `_calculate_em()` could be a nice name.
For more detailed information check out the original proposal. | 0.0 | [
"test/components/evaluators/test_answer_exact_match.py::test_run_with_all_matching",
"test/components/evaluators/test_answer_exact_match.py::test_run_with_no_matching",
"test/components/evaluators/test_answer_exact_match.py::test_run_with_partial_matching",
"test/components/evaluators/test_answer_exact_match.py::test_run_with_different_lengths"
] | [] | 2024-03-19 14:33:17+00:00 | 1,872 |
|
deepset-ai__haystack-7424 | diff --git a/haystack/components/evaluators/__init__.py b/haystack/components/evaluators/__init__.py
index 479cd500..0da03f91 100644
--- a/haystack/components/evaluators/__init__.py
+++ b/haystack/components/evaluators/__init__.py
@@ -2,6 +2,7 @@ from .answer_exact_match import AnswerExactMatchEvaluator
from .document_map import DocumentMAPEvaluator
from .document_mrr import DocumentMRREvaluator
from .document_recall import DocumentRecallEvaluator
+from .faithfulness import FaithfulnessEvaluator
from .llm_evaluator import LLMEvaluator
from .sas_evaluator import SASEvaluator
@@ -10,6 +11,7 @@ __all__ = [
"DocumentMAPEvaluator",
"DocumentMRREvaluator",
"DocumentRecallEvaluator",
+ "FaithfulnessEvaluator",
"LLMEvaluator",
"SASEvaluator",
]
diff --git a/haystack/components/evaluators/faithfulness.py b/haystack/components/evaluators/faithfulness.py
new file mode 100644
index 00000000..9ceb9973
--- /dev/null
+++ b/haystack/components/evaluators/faithfulness.py
@@ -0,0 +1,161 @@
+from typing import Any, Dict, List, Optional
+
+from numpy import mean as np_mean
+
+from haystack import default_from_dict
+from haystack.components.evaluators.llm_evaluator import LLMEvaluator
+from haystack.core.component import component
+from haystack.utils import Secret, deserialize_secrets_inplace
+
+
+class FaithfulnessEvaluator(LLMEvaluator):
+ """
+ Evaluator that checks if a generated answer can be inferred from the provided contexts.
+
+ An LLM separates the answer into multiple statements and checks whether the statement can be inferred from the
+ context or not. The final score for the full answer is a number from 0.0 to 1.0. It represents the proportion of
+ statements that can be inferred from the provided contexts.
+
+ Usage example:
+ ```python
+ from haystack.components.evaluators import FaithfulnessEvaluator
+
+ questions = ["Who created the Python language?"]
+ contexts = [
+ [
+ "Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language. Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects."
+ ],
+ ]
+ responses = ["Python is a high-level general-purpose programming language that was created by George Lucas."]
+ evaluator = FaithfulnessEvaluator()
+ result = evaluator.run(questions=questions, contexts=contexts, responses=responses)
+ print(results["evaluator"])
+ # {'results': [{'statements': ['Python is a high-level general-purpose programming language.',
+ # 'Python was created by George Lucas.'], 'statement_scores':
+ # [1, 0], 'score': 0.5}], 'score': 0.5, 'individual_scores': [0.5]}
+
+ ```
+ """
+
+ def __init__(
+ self,
+ examples: Optional[List[Dict[str, Any]]] = None,
+ api: str = "openai",
+ api_key: Secret = Secret.from_env_var("OPENAI_API_KEY"),
+ ):
+ """
+ Creates an instance of LLMEvaluator.
+
+ :param examples:
+ Few-shot examples conforming to the expected input and output format of FaithfulnessEvaluator.
+ Each example must be a dictionary with keys "inputs" and "outputs".
+ "inputs" must be a dictionary with keys "questions", "contexts", and "responses".
+ "outputs" must be a dictionary with "statements" and "statement_scores".
+ Expected format:
+ [{
+ "inputs": {
+ "questions": "What is the capital of Italy?", "contexts": ["Rome is the capital of Italy."],
+ "responses": "Rome is the capital of Italy with more than 4 million inhabitants.",
+ },
+ "outputs": {
+ "statements": ["Rome is the capital of Italy.", "Rome has more than 4 million inhabitants."],
+ "statement_scores": [1, 0],
+ },
+ }]
+ :param api:
+ The API to use for calling an LLM through a Generator.
+ Supported APIs: "openai".
+ :param api_key:
+ The API key.
+
+ """
+ self.instructions = (
+ "Your task is to judge the faithfulness or groundedness of statements based "
+ "on context information. First, please extract statements from a provided "
+ "response to a question. Second, calculate a faithfulness score for each "
+ "statement made in the response. The score is 1 if the statement can be "
+ "inferred from the provided context or 0 if it cannot be inferred."
+ )
+ self.inputs = [("questions", List[str]), ("contexts", List[List[str]]), ("responses", List[str])]
+ self.outputs = ["statements", "statement_scores"]
+ self.examples = examples or [
+ {
+ "inputs": {
+ "questions": "What is the capital of Germany and when was it founded?",
+ "contexts": ["Berlin is the capital of Germany and was founded in 1244."],
+ "responses": "The capital of Germany, Berlin, was founded in the 13th century.",
+ },
+ "outputs": {
+ "statements": ["Berlin is the capital of Germany.", "Berlin was founded in 1244."],
+ "statement_scores": [1, 1],
+ },
+ },
+ {
+ "inputs": {
+ "questions": "What is the capital of France?",
+ "contexts": ["Berlin is the capital of Germany."],
+ "responses": "Paris",
+ },
+ "outputs": {"statements": ["Paris is the capital of France."], "statement_scores": [0]},
+ },
+ {
+ "inputs": {
+ "questions": "What is the capital of Italy?",
+ "contexts": ["Rome is the capital of Italy."],
+ "responses": "Rome is the capital of Italy with more than 4 million inhabitants.",
+ },
+ "outputs": {
+ "statements": ["Rome is the capital of Italy.", "Rome has more than 4 million inhabitants."],
+ "statement_scores": [1, 0],
+ },
+ },
+ ]
+ self.api = api
+ self.api_key = api_key
+
+ super().__init__(
+ instructions=self.instructions,
+ inputs=self.inputs,
+ outputs=self.outputs,
+ examples=self.examples,
+ api=self.api,
+ api_key=self.api_key,
+ )
+
+ @component.output_types(results=List[Dict[str, Any]])
+ def run(self, **inputs) -> Dict[str, Any]:
+ """
+ Run the LLM evaluator.
+
+ :param inputs:
+ The input values to evaluate. The keys are the input names and the values are lists of input values.
+ :returns:
+ A dictionary with the following outputs:
+ - `score`: Mean faithfulness score over all the provided input answers.
+ - `individual_scores`: A list of faithfulness scores for each input answer.
+ - `results`: A list of dictionaries with `statements` and `statement_scores` for each input answer.
+ """
+ result = super().run(**inputs)
+
+ # calculate average statement faithfulness score per query
+ for res in result["results"]:
+ res["score"] = np_mean(res["statement_scores"])
+
+ # calculate average answer faithfulness score over all queries
+ result["score"] = np_mean([res["score"] for res in result["results"]])
+ result["individual_scores"] = [res["score"] for res in result["results"]]
+
+ return result
+
+ @classmethod
+ def from_dict(cls, data: Dict[str, Any]) -> "FaithfulnessEvaluator":
+ """
+ Deserialize this component from a dictionary.
+
+ :param data:
+ The dictionary representation of this component.
+ :returns:
+ The deserialized component instance.
+ """
+ deserialize_secrets_inplace(data["init_parameters"], keys=["api_key"])
+ return default_from_dict(cls, data)
diff --git a/releasenotes/notes/faithfulness-evaluator-2e039a697c847d1c.yaml b/releasenotes/notes/faithfulness-evaluator-2e039a697c847d1c.yaml
new file mode 100644
index 00000000..5279d0d9
--- /dev/null
+++ b/releasenotes/notes/faithfulness-evaluator-2e039a697c847d1c.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add a new FaithfulnessEvaluator component that can be used to evaluate faithfulness / groundedness / hallucinations of LLMs in a RAG pipeline.
+ Given a question, a list of retrieved document contents (contexts), and a predicted answer, FaithfulnessEvaluator returns a score ranging from 0 (poor faithfulness) to 1 (perfect faithfulness).
+ The score is the proportion of statements in the predicted answer that could by inferred from the documents.
| deepset-ai/haystack | 189dfaf640caf7993d4ba367d6ea3bcb1b4eca11 | diff --git a/test/components/evaluators/test_faithfulness_evaluator.py b/test/components/evaluators/test_faithfulness_evaluator.py
new file mode 100644
index 00000000..57764373
--- /dev/null
+++ b/test/components/evaluators/test_faithfulness_evaluator.py
@@ -0,0 +1,129 @@
+from typing import List
+
+import pytest
+
+from haystack.components.evaluators import FaithfulnessEvaluator
+from haystack.utils.auth import Secret
+
+
+class TestFaithfulnessEvaluator:
+ def test_init_default(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
+ component = FaithfulnessEvaluator()
+ assert component.api == "openai"
+ assert component.generator.client.api_key == "test-api-key"
+ assert component.instructions == (
+ "Your task is to judge the faithfulness or groundedness of statements based "
+ "on context information. First, please extract statements from a provided "
+ "response to a question. Second, calculate a faithfulness score for each "
+ "statement made in the response. The score is 1 if the statement can be "
+ "inferred from the provided context or 0 if it cannot be inferred."
+ )
+ assert component.inputs == [("questions", List[str]), ("contexts", List[List[str]]), ("responses", List[str])]
+ assert component.outputs == ["statements", "statement_scores"]
+ assert component.examples == [
+ {
+ "inputs": {
+ "questions": "What is the capital of Germany and when was it founded?",
+ "contexts": ["Berlin is the capital of Germany and was founded in 1244."],
+ "responses": "The capital of Germany, Berlin, was founded in the 13th century.",
+ },
+ "outputs": {
+ "statements": ["Berlin is the capital of Germany.", "Berlin was founded in 1244."],
+ "statement_scores": [1, 1],
+ },
+ },
+ {
+ "inputs": {
+ "questions": "What is the capital of France?",
+ "contexts": ["Berlin is the capital of Germany."],
+ "responses": "Paris",
+ },
+ "outputs": {"statements": ["Paris is the capital of France."], "statement_scores": [0]},
+ },
+ {
+ "inputs": {
+ "questions": "What is the capital of Italy?",
+ "contexts": ["Rome is the capital of Italy."],
+ "responses": "Rome is the capital of Italy with more than 4 million inhabitants.",
+ },
+ "outputs": {
+ "statements": ["Rome is the capital of Italy.", "Rome has more than 4 million inhabitants."],
+ "statement_scores": [1, 0],
+ },
+ },
+ ]
+
+ def test_init_fail_wo_openai_api_key(self, monkeypatch):
+ monkeypatch.delenv("OPENAI_API_KEY", raising=False)
+ with pytest.raises(ValueError, match="None of the .* environment variables are set"):
+ FaithfulnessEvaluator()
+
+ def test_init_with_parameters(self):
+ component = FaithfulnessEvaluator(
+ api_key=Secret.from_token("test-api-key"),
+ api="openai",
+ examples=[
+ {"inputs": {"responses": "Damn, this is straight outta hell!!!"}, "outputs": {"custom_score": 1}},
+ {"inputs": {"responses": "Football is the most popular sport."}, "outputs": {"custom_score": 0}},
+ ],
+ )
+ assert component.generator.client.api_key == "test-api-key"
+ assert component.api == "openai"
+ assert component.examples == [
+ {"inputs": {"responses": "Damn, this is straight outta hell!!!"}, "outputs": {"custom_score": 1}},
+ {"inputs": {"responses": "Football is the most popular sport."}, "outputs": {"custom_score": 0}},
+ ]
+
+ def test_from_dict(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
+
+ data = {
+ "type": "haystack.components.evaluators.faithfulness.FaithfulnessEvaluator",
+ "init_parameters": {
+ "api_key": {"env_vars": ["OPENAI_API_KEY"], "strict": True, "type": "env_var"},
+ "api": "openai",
+ "examples": [{"inputs": {"responses": "Football is the most popular sport."}, "outputs": {"score": 0}}],
+ },
+ }
+ component = FaithfulnessEvaluator.from_dict(data)
+ assert component.api == "openai"
+ assert component.generator.client.api_key == "test-api-key"
+ assert component.examples == [
+ {"inputs": {"responses": "Football is the most popular sport."}, "outputs": {"score": 0}}
+ ]
+
+ def test_run_calculates_mean_score(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
+ component = FaithfulnessEvaluator()
+
+ def generator_run(self, *args, **kwargs):
+ if "Football" in kwargs["prompt"]:
+ return {"replies": ['{"statements": ["a", "b"], "statement_scores": [1, 0]}']}
+ else:
+ return {"replies": ['{"statements": ["c", "d"], "statement_scores": [1, 1]}']}
+
+ monkeypatch.setattr("haystack.components.generators.openai.OpenAIGenerator.run", generator_run)
+
+ questions = ["Which is the most popular global sport?", "Who created the Python language?"]
+ contexts = [
+ [
+ "The popularity of sports can be measured in various ways, including TV viewership, social media presence, number of participants, and economic impact. Football is undoubtedly the world's most popular sport with major events like the FIFA World Cup and sports personalities like Ronaldo and Messi, drawing a followership of more than 4 billion people."
+ ],
+ [
+ "Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language. Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects."
+ ],
+ ]
+ responses = [
+ "Football is the most popular sport with around 4 billion followers worldwide.",
+ "Python is a high-level general-purpose programming language that was created by George Lucas.",
+ ]
+ results = component.run(questions=questions, contexts=contexts, responses=responses)
+ assert results == {
+ "individual_scores": [0.5, 1],
+ "results": [
+ {"score": 0.5, "statement_scores": [1, 0], "statements": ["a", "b"]},
+ {"score": 1, "statement_scores": [1, 1], "statements": ["c", "d"]},
+ ],
+ "score": 0.75,
+ }
| LLM Eval - Implement Faithfulness/Factual Accuracy metric
Depends on https://github.com/deepset-ai/haystack/issues/7022.
Wrap `LLMEvaluator` to provide a component that calculates the "Faithfulness" or "Factual accuracy" based on the following inputs:
- Questions
- Contexts
- Responses
This component is meant to be plug-n-play, meaning it will provide a good enough starting prompt and examples. These should also be customizable by the user.
A requirement for this component is that the LLM is expected to return a binary value for each input tuple. This will let us calculate a final score for the dataset ourselves. | 0.0 | [
"test/components/evaluators/test_faithfulness_evaluator.py::TestFaithfulnessEvaluator::test_init_default",
"test/components/evaluators/test_faithfulness_evaluator.py::TestFaithfulnessEvaluator::test_init_fail_wo_openai_api_key",
"test/components/evaluators/test_faithfulness_evaluator.py::TestFaithfulnessEvaluator::test_init_with_parameters",
"test/components/evaluators/test_faithfulness_evaluator.py::TestFaithfulnessEvaluator::test_from_dict",
"test/components/evaluators/test_faithfulness_evaluator.py::TestFaithfulnessEvaluator::test_run_calculates_mean_score"
] | [] | 2024-03-26 10:10:20+00:00 | 1,873 |
|
deepset-ai__haystack-7519 | diff --git a/docs/pydoc/config/evaluators_api.yml b/docs/pydoc/config/evaluators_api.yml
index 9acd64ef..b24b3003 100644
--- a/docs/pydoc/config/evaluators_api.yml
+++ b/docs/pydoc/config/evaluators_api.yml
@@ -4,6 +4,7 @@ loaders:
modules:
[
"answer_exact_match",
+ "context_relevance",
"document_map",
"document_mrr",
"document_recall",
diff --git a/haystack/components/evaluators/__init__.py b/haystack/components/evaluators/__init__.py
index f69c8257..631691c5 100644
--- a/haystack/components/evaluators/__init__.py
+++ b/haystack/components/evaluators/__init__.py
@@ -1,4 +1,5 @@
from .answer_exact_match import AnswerExactMatchEvaluator
+from .context_relevance import ContextRelevanceEvaluator
from .document_map import DocumentMAPEvaluator
from .document_mrr import DocumentMRREvaluator
from .document_recall import DocumentRecallEvaluator
@@ -9,6 +10,7 @@ from .sas_evaluator import SASEvaluator
__all__ = [
"AnswerExactMatchEvaluator",
+ "ContextRelevanceEvaluator",
"DocumentMAPEvaluator",
"DocumentMRREvaluator",
"DocumentRecallEvaluator",
diff --git a/haystack/components/evaluators/context_relevance.py b/haystack/components/evaluators/context_relevance.py
new file mode 100644
index 00000000..d78ccfc7
--- /dev/null
+++ b/haystack/components/evaluators/context_relevance.py
@@ -0,0 +1,154 @@
+from typing import Any, Dict, List, Optional
+
+from numpy import mean as np_mean
+
+from haystack import default_from_dict
+from haystack.components.evaluators.llm_evaluator import LLMEvaluator
+from haystack.core.component import component
+from haystack.utils import Secret, deserialize_secrets_inplace
+
+# Private global variable for default examples to include in the prompt if the user does not provide any examples
+_DEFAULT_EXAMPLES = [
+ {
+ "inputs": {
+ "questions": "What is the capital of Germany?",
+ "contexts": ["Berlin is the capital of Germany and was founded in 1244."],
+ },
+ "outputs": {
+ "statements": ["Berlin is the capital of Germany.", "Berlin was founded in 1244."],
+ "statement_scores": [1, 0],
+ },
+ },
+ {
+ "inputs": {"questions": "What is the capital of France?", "contexts": ["Berlin is the capital of Germany."]},
+ "outputs": {"statements": ["Berlin is the capital of Germany."], "statement_scores": [0]},
+ },
+ {
+ "inputs": {"questions": "What is the capital of Italy?", "contexts": ["Rome is the capital of Italy."]},
+ "outputs": {"statements": ["Rome is the capital of Italy."], "statement_scores": [1]},
+ },
+]
+
+
+class ContextRelevanceEvaluator(LLMEvaluator):
+ """
+ Evaluator that checks if a provided context is relevant to the question.
+
+ An LLM separates the answer into multiple statements and checks whether the statement can be inferred from the
+ context or not. The final score for the full answer is a number from 0.0 to 1.0. It represents the proportion of
+ statements that can be inferred from the provided contexts.
+
+ Usage example:
+ ```python
+ from haystack.components.evaluators import ContextRelevanceEvaluator
+
+ questions = ["Who created the Python language?"]
+ contexts = [
+ [
+ "Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming language. Its design philosophy emphasizes code readability, and its language constructs aim to help programmers write clear, logical code for both small and large-scale software projects."
+ ],
+ ]
+
+ evaluator = ContextRelevanceEvaluator()
+ result = evaluator.run(questions=questions, contexts=contexts)
+ print(result["score"])
+ # 1.0
+ print(result["individual_scores"])
+ # [1.0]
+ print(result["results"])
+ # [{'statements': ['Python, created by Guido van Rossum in the late 1980s.'], 'statement_scores': [1], 'score': 1.0}]
+ ```
+ """
+
+ def __init__(
+ self,
+ examples: Optional[List[Dict[str, Any]]] = None,
+ api: str = "openai",
+ api_key: Secret = Secret.from_env_var("OPENAI_API_KEY"),
+ ):
+ """
+ Creates an instance of ContextRelevanceEvaluator.
+
+ :param examples:
+ Optional few-shot examples conforming to the expected input and output format of ContextRelevanceEvaluator.
+ Default examples will be used if none are provided.
+ Each example must be a dictionary with keys "inputs" and "outputs".
+ "inputs" must be a dictionary with keys "questions" and "contexts".
+ "outputs" must be a dictionary with "statements" and "statement_scores".
+ Expected format:
+ [{
+ "inputs": {
+ "questions": "What is the capital of Italy?", "contexts": ["Rome is the capital of Italy."],
+ },
+ "outputs": {
+ "statements": ["Rome is the capital of Italy."],
+ "statement_scores": [1],
+ },
+ }]
+ :param api:
+ The API to use for calling an LLM through a Generator.
+ Supported APIs: "openai".
+ :param api_key:
+ The API key.
+
+ """
+ self.instructions = (
+ "Your task is to judge how relevant the provided context is for answering a question. "
+ "First, please extract statements from the provided context. "
+ "Second, calculate a relevance score for each statement in the context. "
+ "The score is 1 if the statement is relevant to answer the question or 0 if it is not relevant."
+ )
+ self.inputs = [("questions", List[str]), ("contexts", List[List[str]])]
+ self.outputs = ["statements", "statement_scores"]
+ self.examples = examples or _DEFAULT_EXAMPLES
+ self.api = api
+ self.api_key = api_key
+
+ super().__init__(
+ instructions=self.instructions,
+ inputs=self.inputs,
+ outputs=self.outputs,
+ examples=self.examples,
+ api=self.api,
+ api_key=self.api_key,
+ )
+
+ @component.output_types(results=List[Dict[str, Any]])
+ def run(self, questions: List[str], contexts: List[List[str]]) -> Dict[str, Any]:
+ """
+ Run the LLM evaluator.
+
+ :param questions:
+ A list of questions.
+ :param contexts:
+ A list of lists of contexts. Each list of contexts corresponds to one question.
+ :returns:
+ A dictionary with the following outputs:
+ - `score`: Mean context relevance score over all the provided input questions.
+ - `individual_scores`: A list of context relevance scores for each input question.
+ - `results`: A list of dictionaries with `statements` and `statement_scores` for each input context.
+ """
+ result = super().run(questions=questions, contexts=contexts)
+
+ # calculate average statement relevance score per query
+ for res in result["results"]:
+ res["score"] = np_mean(res["statement_scores"])
+
+ # calculate average context relevance score over all queries
+ result["score"] = np_mean([res["score"] for res in result["results"]])
+ result["individual_scores"] = [res["score"] for res in result["results"]]
+
+ return result
+
+ @classmethod
+ def from_dict(cls, data: Dict[str, Any]) -> "ContextRelevanceEvaluator":
+ """
+ Deserialize this component from a dictionary.
+
+ :param data:
+ The dictionary representation of this component.
+ :returns:
+ The deserialized component instance.
+ """
+ deserialize_secrets_inplace(data["init_parameters"], keys=["api_key"])
+ return default_from_dict(cls, data)
diff --git a/pyproject.toml b/pyproject.toml
index b2e9202f..3c3833c8 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -139,18 +139,18 @@ extra-dependencies = [
[tool.hatch.envs.test.scripts]
e2e = "pytest e2e"
-unit = 'pytest --cov-report xml:coverage.xml --cov="haystack" -m "not integration" test {args:test}'
-integration = 'pytest --maxfail=5 -m "integration" test'
-integration-mac = 'pytest --maxfail=5 -m "integration" test -k "not tika"'
-integration-windows = 'pytest --maxfail=5 -m "integration" test -k "not tika"'
+unit = 'pytest --cov-report xml:coverage.xml --cov="haystack" -m "not integration" {args:test}'
+integration = 'pytest --maxfail=5 -m "integration" {args:test}'
+integration-mac = 'pytest --maxfail=5 -m "integration" -k "not tika" {args:test}'
+integration-windows = 'pytest --maxfail=5 -m "integration" -k "not tika" {args:test}'
types = "mypy --install-types --non-interactive --cache-dir=.mypy_cache/ {args:haystack}"
lint = [
- "ruff {args:haystack}",
+ "ruff check {args:haystack}",
"pylint -ry -j 0 {args:haystack}"
]
lint-fix = [
"black .",
- "ruff {args:haystack} --fix",
+ "ruff check {args:haystack} --fix",
]
[tool.hatch.envs.readme]
@@ -303,12 +303,10 @@ select = [
"ASYNC", # flake8-async
"C4", # flake8-comprehensions
"C90", # McCabe cyclomatic complexity
- "CPY", # flake8-copyright
"DJ", # flake8-django
"E501", # Long lines
"EXE", # flake8-executable
"F", # Pyflakes
- "FURB", # refurb
"INT", # flake8-gettext
"PERF", # Perflint
"PL", # Pylint
diff --git a/releasenotes/notes/context-relevance-04063b9dc9fe7379.yaml b/releasenotes/notes/context-relevance-04063b9dc9fe7379.yaml
new file mode 100644
index 00000000..2ab79f87
--- /dev/null
+++ b/releasenotes/notes/context-relevance-04063b9dc9fe7379.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add a new ContextRelevanceEvaluator component that can be used to evaluate whether retrieved documents are relevant to answer a question with a RAG pipeline.
+ Given a question and a list of retrieved document contents (contexts), an LLM is used to score to what extent the provided context is relevant. The score ranges from 0 to 1.
| deepset-ai/haystack | 3d0f7affed7b192d32d295a6c92bdff5e8f97de4 | diff --git a/test/components/evaluators/test_context_relevance_evaluator.py b/test/components/evaluators/test_context_relevance_evaluator.py
new file mode 100644
index 00000000..8bd1a3cf
--- /dev/null
+++ b/test/components/evaluators/test_context_relevance_evaluator.py
@@ -0,0 +1,142 @@
+import os
+from typing import List
+
+import pytest
+
+from haystack.components.evaluators import ContextRelevanceEvaluator
+from haystack.utils.auth import Secret
+
+
+class TestContextRelevanceEvaluator:
+ def test_init_default(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
+ component = ContextRelevanceEvaluator()
+ assert component.api == "openai"
+ assert component.generator.client.api_key == "test-api-key"
+ assert component.instructions == (
+ "Your task is to judge how relevant the provided context is for answering a question. "
+ "First, please extract statements from the provided context. "
+ "Second, calculate a relevance score for each statement in the context. "
+ "The score is 1 if the statement is relevant to answer the question or 0 if it is not relevant."
+ )
+ assert component.inputs == [("questions", List[str]), ("contexts", List[List[str]])]
+ assert component.outputs == ["statements", "statement_scores"]
+ assert component.examples == [
+ {
+ "inputs": {
+ "questions": "What is the capital of Germany?",
+ "contexts": ["Berlin is the capital of Germany and was founded in 1244."],
+ },
+ "outputs": {
+ "statements": ["Berlin is the capital of Germany.", "Berlin was founded in 1244."],
+ "statement_scores": [1, 0],
+ },
+ },
+ {
+ "inputs": {
+ "questions": "What is the capital of France?",
+ "contexts": ["Berlin is the capital of Germany."],
+ },
+ "outputs": {"statements": ["Berlin is the capital of Germany."], "statement_scores": [0]},
+ },
+ {
+ "inputs": {"questions": "What is the capital of Italy?", "contexts": ["Rome is the capital of Italy."]},
+ "outputs": {"statements": ["Rome is the capital of Italy."], "statement_scores": [1]},
+ },
+ ]
+
+ def test_init_fail_wo_openai_api_key(self, monkeypatch):
+ monkeypatch.delenv("OPENAI_API_KEY", raising=False)
+ with pytest.raises(ValueError, match="None of the .* environment variables are set"):
+ ContextRelevanceEvaluator()
+
+ def test_init_with_parameters(self):
+ component = ContextRelevanceEvaluator(
+ api_key=Secret.from_token("test-api-key"),
+ api="openai",
+ examples=[
+ {"inputs": {"questions": "Damn, this is straight outta hell!!!"}, "outputs": {"custom_score": 1}},
+ {"inputs": {"questions": "Football is the most popular sport."}, "outputs": {"custom_score": 0}},
+ ],
+ )
+ assert component.generator.client.api_key == "test-api-key"
+ assert component.api == "openai"
+ assert component.examples == [
+ {"inputs": {"questions": "Damn, this is straight outta hell!!!"}, "outputs": {"custom_score": 1}},
+ {"inputs": {"questions": "Football is the most popular sport."}, "outputs": {"custom_score": 0}},
+ ]
+
+ def test_from_dict(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
+
+ data = {
+ "type": "haystack.components.evaluators.context_relevance.ContextRelevanceEvaluator",
+ "init_parameters": {
+ "api_key": {"env_vars": ["OPENAI_API_KEY"], "strict": True, "type": "env_var"},
+ "api": "openai",
+ "examples": [{"inputs": {"questions": "What is football?"}, "outputs": {"score": 0}}],
+ },
+ }
+ component = ContextRelevanceEvaluator.from_dict(data)
+ assert component.api == "openai"
+ assert component.generator.client.api_key == "test-api-key"
+ assert component.examples == [{"inputs": {"questions": "What is football?"}, "outputs": {"score": 0}}]
+
+ def test_run_calculates_mean_score(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
+ component = ContextRelevanceEvaluator()
+
+ def generator_run(self, *args, **kwargs):
+ if "Football" in kwargs["prompt"]:
+ return {"replies": ['{"statements": ["a", "b"], "statement_scores": [1, 0]}']}
+ else:
+ return {"replies": ['{"statements": ["c", "d"], "statement_scores": [1, 1]}']}
+
+ monkeypatch.setattr("haystack.components.generators.openai.OpenAIGenerator.run", generator_run)
+
+ questions = ["Which is the most popular global sport?", "Who created the Python language?"]
+ contexts = [
+ [
+ "The popularity of sports can be measured in various ways, including TV viewership, social media "
+ "presence, number of participants, and economic impact. Football is undoubtedly the world's most "
+ "popular sport with major events like the FIFA World Cup and sports personalities like Ronaldo and "
+ "Messi, drawing a followership of more than 4 billion people."
+ ],
+ [
+ "Python, created by Guido van Rossum in the late 1980s, is a high-level general-purpose programming "
+ "language. Its design philosophy emphasizes code readability, and its language constructs aim to help "
+ "programmers write clear, logical code for both small and large-scale software projects."
+ ],
+ ]
+ results = component.run(questions=questions, contexts=contexts)
+ assert results == {
+ "individual_scores": [0.5, 1],
+ "results": [
+ {"score": 0.5, "statement_scores": [1, 0], "statements": ["a", "b"]},
+ {"score": 1, "statement_scores": [1, 1], "statements": ["c", "d"]},
+ ],
+ "score": 0.75,
+ }
+
+ def test_run_missing_parameters(self, monkeypatch):
+ monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")
+ component = ContextRelevanceEvaluator()
+ with pytest.raises(TypeError, match="missing 2 required positional arguments"):
+ component.run()
+
+ @pytest.mark.skipif(
+ not os.environ.get("OPENAI_API_KEY", None),
+ reason="Export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.",
+ )
+ @pytest.mark.integration
+ def test_live_run(self):
+ questions = ["Who created the Python language?"]
+ contexts = [["Python, created by Guido van Rossum, is a high-level general-purpose programming language."]]
+
+ evaluator = ContextRelevanceEvaluator()
+ result = evaluator.run(questions=questions, contexts=contexts)
+ assert result["score"] == 1.0
+ assert result["individual_scores"] == [1.0]
+ assert result["results"][0]["score"] == 1.0
+ assert result["results"][0]["statement_scores"] == [1.0]
+ assert "Guido van Rossum" in result["results"][0]["statements"][0]
| Custom LLM-based evaluator in Haystack core
Now that we have integrations for third party LLM eval frameworks, we need to add support for a handful of LLM-based metrics that we officially support as part of core. This will be done by implementing a custom `LLMEvaluator` component that wraps around one or more of our generator APIs. We'll then build a small section of curated metrics on top of this component, all the while allowing the user to change the underlying service (OpenAI, Cohere, etc) and the associated prompts at will
```[tasklist]
### Tasks
- [ ] LLM Eval - Implement custom LLM evaluator component in core
- [ ] LLM Eval - Implement Faithfulness/Factual Accuracy metric
- [ ] LLM Eval - Implement Context Relevance metric
```
| 0.0 | [
"test/components/evaluators/test_context_relevance_evaluator.py::TestContextRelevanceEvaluator::test_init_default",
"test/components/evaluators/test_context_relevance_evaluator.py::TestContextRelevanceEvaluator::test_init_fail_wo_openai_api_key",
"test/components/evaluators/test_context_relevance_evaluator.py::TestContextRelevanceEvaluator::test_init_with_parameters",
"test/components/evaluators/test_context_relevance_evaluator.py::TestContextRelevanceEvaluator::test_from_dict",
"test/components/evaluators/test_context_relevance_evaluator.py::TestContextRelevanceEvaluator::test_run_calculates_mean_score",
"test/components/evaluators/test_context_relevance_evaluator.py::TestContextRelevanceEvaluator::test_run_missing_parameters"
] | [] | 2024-04-09 15:52:30+00:00 | 1,874 |
|
deepset-ai__haystack-7599 | diff --git a/haystack/components/preprocessors/document_splitter.py b/haystack/components/preprocessors/document_splitter.py
index adea7cc3..033f55a8 100644
--- a/haystack/components/preprocessors/document_splitter.py
+++ b/haystack/components/preprocessors/document_splitter.py
@@ -1,5 +1,5 @@
from copy import deepcopy
-from typing import List, Literal
+from typing import Dict, List, Literal, Tuple
from more_itertools import windowed
@@ -53,7 +53,7 @@ class DocumentSplitter:
:returns: A dictionary with the following key:
- `documents`: List of documents with the split texts. A metadata field "source_id" is added to each
- document to keep track of the original document that was split. Other metadata are copied from the original
+ document to keep track of the original document that was split. Another metadata field "page_number" is added to each number to keep track of the page it belonged to in the original document. Other metadata are copied from the original
document.
:raises TypeError: if the input is not a list of Documents.
@@ -70,10 +70,12 @@ class DocumentSplitter:
f"DocumentSplitter only works with text documents but document.content for document ID {doc.id} is None."
)
units = self._split_into_units(doc.content, self.split_by)
- text_splits = self._concatenate_units(units, self.split_length, self.split_overlap)
+ text_splits, splits_pages = self._concatenate_units(units, self.split_length, self.split_overlap)
metadata = deepcopy(doc.meta)
metadata["source_id"] = doc.id
- split_docs += [Document(content=txt, meta=metadata) for txt in text_splits]
+ split_docs += self._create_docs_from_splits(
+ text_splits=text_splits, splits_pages=splits_pages, meta=metadata
+ )
return {"documents": split_docs}
def _split_into_units(self, text: str, split_by: Literal["word", "sentence", "passage", "page"]) -> List[str]:
@@ -95,15 +97,40 @@ class DocumentSplitter:
units[i] += split_at
return units
- def _concatenate_units(self, elements: List[str], split_length: int, split_overlap: int) -> List[str]:
+ def _concatenate_units(
+ self, elements: List[str], split_length: int, split_overlap: int
+ ) -> Tuple[List[str], List[int]]:
"""
- Concatenates the elements into parts of split_length units.
+ Concatenates the elements into parts of split_length units keeping track of the original page number that each element belongs.
"""
text_splits = []
+ splits_pages = []
+ cur_page = 1
segments = windowed(elements, n=split_length, step=split_length - split_overlap)
for seg in segments:
current_units = [unit for unit in seg if unit is not None]
txt = "".join(current_units)
if len(txt) > 0:
text_splits.append(txt)
- return text_splits
+ splits_pages.append(cur_page)
+ processed_units = current_units[: split_length - split_overlap]
+ if self.split_by == "page":
+ num_page_breaks = len(processed_units)
+ else:
+ num_page_breaks = sum(processed_unit.count("\f") for processed_unit in processed_units)
+ cur_page += num_page_breaks
+ return text_splits, splits_pages
+
+ @staticmethod
+ def _create_docs_from_splits(text_splits: List[str], splits_pages: List[int], meta: Dict) -> List[Document]:
+ """
+ Creates Document objects from text splits enriching them with page number and the metadata of the original document.
+ """
+ documents: List[Document] = []
+
+ for i, txt in enumerate(text_splits):
+ meta = deepcopy(meta)
+ doc = Document(content=txt, meta=meta)
+ doc.meta["page_number"] = splits_pages[i]
+ documents.append(doc)
+ return documents
diff --git a/releasenotes/notes/add-page-number-to-document-splitter-162e9dc7443575f0.yaml b/releasenotes/notes/add-page-number-to-document-splitter-162e9dc7443575f0.yaml
new file mode 100644
index 00000000..8c97663c
--- /dev/null
+++ b/releasenotes/notes/add-page-number-to-document-splitter-162e9dc7443575f0.yaml
@@ -0,0 +1,7 @@
+---
+highlights: >
+ Add the "page_number" field to the metadata of all output documents.
+
+enhancements:
+ - |
+ Now the DocumentSplitter adds the "page_number" field to the metadata of all output documents to keep track of the page of the original document it belongs to.
| deepset-ai/haystack | 8d04e530da24b5e5c8c11af29829714eeea47db2 | diff --git a/test/components/preprocessors/test_document_splitter.py b/test/components/preprocessors/test_document_splitter.py
index 479f0d50..4874c25b 100644
--- a/test/components/preprocessors/test_document_splitter.py
+++ b/test/components/preprocessors/test_document_splitter.py
@@ -141,3 +141,98 @@ class TestDocumentSplitter:
for doc, split_doc in zip(documents, result["documents"]):
assert doc.meta.items() <= split_doc.meta.items()
assert split_doc.content == "Text."
+
+ def test_add_page_number_to_metadata_with_no_overlap_word_split(self):
+ splitter = DocumentSplitter(split_by="word", split_length=2)
+ doc1 = Document(content="This is some text.\f This text is on another page.")
+ doc2 = Document(content="This content has two.\f\f page brakes.")
+ result = splitter.run(documents=[doc1, doc2])
+
+ expected_pages = [1, 1, 2, 2, 2, 1, 1, 3]
+ for doc, p in zip(result["documents"], expected_pages):
+ assert doc.meta["page_number"] == p
+
+ def test_add_page_number_to_metadata_with_no_overlap_sentence_split(self):
+ splitter = DocumentSplitter(split_by="sentence", split_length=1)
+ doc1 = Document(content="This is some text.\f This text is on another page.")
+ doc2 = Document(content="This content has two.\f\f page brakes.")
+ result = splitter.run(documents=[doc1, doc2])
+
+ expected_pages = [1, 1, 1, 1]
+ for doc, p in zip(result["documents"], expected_pages):
+ assert doc.meta["page_number"] == p
+
+ def test_add_page_number_to_metadata_with_no_overlap_passage_split(self):
+ splitter = DocumentSplitter(split_by="passage", split_length=1)
+ doc1 = Document(
+ content="This is a text with some words.\f There is a second sentence.\n\nAnd there is a third sentence.\n\nAnd more passages.\n\n\f And another passage."
+ )
+ result = splitter.run(documents=[doc1])
+
+ expected_pages = [1, 2, 2, 2]
+ for doc, p in zip(result["documents"], expected_pages):
+ assert doc.meta["page_number"] == p
+
+ def test_add_page_number_to_metadata_with_no_overlap_page_split(self):
+ splitter = DocumentSplitter(split_by="page", split_length=1)
+ doc1 = Document(
+ content="This is a text with some words. There is a second sentence.\f And there is a third sentence.\f And another passage."
+ )
+ result = splitter.run(documents=[doc1])
+ expected_pages = [1, 2, 3]
+ for doc, p in zip(result["documents"], expected_pages):
+ assert doc.meta["page_number"] == p
+
+ splitter = DocumentSplitter(split_by="page", split_length=2)
+ doc1 = Document(
+ content="This is a text with some words. There is a second sentence.\f And there is a third sentence.\f And another passage."
+ )
+ result = splitter.run(documents=[doc1])
+ expected_pages = [1, 3]
+
+ for doc, p in zip(result["documents"], expected_pages):
+ assert doc.meta["page_number"] == p
+
+ def test_add_page_number_to_metadata_with_overlap_word_split(self):
+ splitter = DocumentSplitter(split_by="word", split_length=3, split_overlap=1)
+ doc1 = Document(content="This is some text. And\f this text is on another page.")
+ doc2 = Document(content="This content has two.\f\f page brakes.")
+ result = splitter.run(documents=[doc1, doc2])
+
+ expected_pages = [1, 1, 1, 2, 2, 1, 1, 3]
+ for doc, p in zip(result["documents"], expected_pages):
+ print(doc.content, doc.meta, p)
+ assert doc.meta["page_number"] == p
+
+ def test_add_page_number_to_metadata_with_overlap_sentence_split(self):
+ splitter = DocumentSplitter(split_by="sentence", split_length=2, split_overlap=1)
+ doc1 = Document(content="This is some text. And this is more text.\f This text is on another page. End.")
+ doc2 = Document(content="This content has two.\f\f page brakes. More text.")
+ result = splitter.run(documents=[doc1, doc2])
+
+ expected_pages = [1, 1, 1, 2, 1, 1]
+ for doc, p in zip(result["documents"], expected_pages):
+ print(doc.content, doc.meta, p)
+ assert doc.meta["page_number"] == p
+
+ def test_add_page_number_to_metadata_with_overlap_passage_split(self):
+ splitter = DocumentSplitter(split_by="passage", split_length=2, split_overlap=1)
+ doc1 = Document(
+ content="This is a text with some words.\f There is a second sentence.\n\nAnd there is a third sentence.\n\nAnd more passages.\n\n\f And another passage."
+ )
+ result = splitter.run(documents=[doc1])
+
+ expected_pages = [1, 2, 2]
+ for doc, p in zip(result["documents"], expected_pages):
+ assert doc.meta["page_number"] == p
+
+ def test_add_page_number_to_metadata_with_overlap_page_split(self):
+ splitter = DocumentSplitter(split_by="page", split_length=2, split_overlap=1)
+ doc1 = Document(
+ content="This is a text with some words. There is a second sentence.\f And there is a third sentence.\f And another passage."
+ )
+ result = splitter.run(documents=[doc1])
+ expected_pages = [1, 2, 3]
+
+ for doc, p in zip(result["documents"], expected_pages):
+ assert doc.meta["page_number"] == p
| feat: Add `page_number` to meta of Documents in `DocumentSplitter`
**Is your feature request related to a problem? Please describe.**
In Haystack v1 we had an option in the Preprocessor to add the original `page_number` to a Document's meta data when it was split into a chunk. This feature made down stream applications of visualizing the retrieved text from original files (e.g. PDFs) very easy and straightforward, so I'd like to see it in Haystack v2 as well.
**Describe the solution you'd like**
I would like to add the option to store the `page_number` in the meta info of the Document in the DocumentSplitter component. I believe we can use a similar/same implementation of calculating this like we did for the Preprocessor.
| 0.0 | [
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_add_page_number_to_metadata_with_no_overlap_word_split",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_add_page_number_to_metadata_with_no_overlap_sentence_split",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_add_page_number_to_metadata_with_no_overlap_passage_split",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_add_page_number_to_metadata_with_no_overlap_page_split",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_add_page_number_to_metadata_with_overlap_word_split",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_add_page_number_to_metadata_with_overlap_sentence_split",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_add_page_number_to_metadata_with_overlap_passage_split",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_add_page_number_to_metadata_with_overlap_page_split"
] | [
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_non_text_document",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_single_doc",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_empty_list",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_unsupported_split_by",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_unsupported_split_length",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_unsupported_split_overlap",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_split_by_word",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_split_by_word_multiple_input_docs",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_split_by_sentence",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_split_by_passage",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_split_by_page",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_split_by_word_with_overlap",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_source_id_stored_in_metadata",
"test/components/preprocessors/test_document_splitter.py::TestDocumentSplitter::test_copy_metadata"
] | 2024-04-25 19:09:50+00:00 | 1,875 |
|
delph-in__pydelphin-359 | diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml
index 2a5298f..0ef3a60 100644
--- a/.github/workflows/checks.yml
+++ b/.github/workflows/checks.yml
@@ -37,4 +37,4 @@ jobs:
pytest .
- name: Type-check with mypy
run: |
- mypy delphin --namespace-packages --explicit-package-bases --ignore-missing-imports
+ mypy delphin --namespace-packages --explicit-package-bases --ignore-missing-imports --implicit-optional
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e7ef17a..b11f97b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,14 @@
# Change Log
+## Unreleased
+
+### Fixed
+
+* `delphin.tdl.ConsList.values()` and `delphin.tdl.DiffList.values()`
+ now treat explicit sublists as regular items instead of descending
+ into their structures ([#357])
+
+
## [v1.7.0]
**Release date: 2022-10-13**
@@ -1556,3 +1565,4 @@ information about changes, except for
[#343]: https://github.com/delph-in/pydelphin/issues/343
[#344]: https://github.com/delph-in/pydelphin/issues/344
[#352]: https://github.com/delph-in/pydelphin/issues/352
+[#357]: https://github.com/delph-in/pydelphin/issues/357
diff --git a/delphin/cli/convert.py b/delphin/cli/convert.py
index 5d50e28..4dadf80 100644
--- a/delphin/cli/convert.py
+++ b/delphin/cli/convert.py
@@ -9,9 +9,7 @@ with "-lines" to enable line-based reading/writing, in which case the
import sys
import argparse
-import warnings
-from delphin.exceptions import PyDelphinWarning
from delphin.commands import convert
from delphin import util
diff --git a/delphin/cli/repp.py b/delphin/cli/repp.py
index f47571c..ed0cdb8 100644
--- a/delphin/cli/repp.py
+++ b/delphin/cli/repp.py
@@ -11,9 +11,7 @@ useful for debugging REPP modules.
import sys
import argparse
-import warnings
-from delphin.exceptions import PyDelphinWarning
from delphin.commands import repp
diff --git a/delphin/tdl.py b/delphin/tdl.py
index aa7a18a..ec330f4 100644
--- a/delphin/tdl.py
+++ b/delphin/tdl.py
@@ -253,6 +253,10 @@ class AVM(FeatureStructure, Term):
return fs
+class _ImplicitAVM(AVM):
+ """AVM implicitly constructed by list syntax."""
+
+
class ConsList(AVM):
"""
AVM subclass for cons-lists (``< ... >``)
@@ -308,12 +312,7 @@ class ConsList(AVM):
if self._avm is None:
return []
else:
- vals = [val for _, val in _collect_list_items(self)]
- # the < a . b > notation puts b on the last REST path,
- # which is not returned by _collect_list_items()
- if self.terminated and self[self._last_path] is not None:
- vals.append(self[self._last_path])
- return vals
+ return [val for _, val in _collect_list_items(self)]
def append(self, value):
"""
@@ -330,7 +329,7 @@ class ConsList(AVM):
path += '.'
self[path + LIST_HEAD] = value
self._last_path = path + LIST_TAIL
- self[self._last_path] = AVM()
+ self[self._last_path] = _ImplicitAVM()
else:
raise TDLError('Cannot append to a closed list.')
@@ -395,7 +394,7 @@ class DiffList(AVM):
if values:
# use ConsList to construct the list, but discard the class
tmplist = ConsList(values, end=cr)
- dl_list = AVM()
+ dl_list = _ImplicitAVM()
dl_list._avm.update(tmplist._avm)
dl_list._feats = tmplist._feats
self.last = 'LIST.' + tmplist._last_path
@@ -416,16 +415,25 @@ class DiffList(AVM):
"""
Return the list of values in the DiffList feature structure.
"""
- return [val for _, val
- in _collect_list_items(self.get(DIFF_LIST_LIST))]
+ if isinstance(self[DIFF_LIST_LIST], Coreference):
+ vals = []
+ else:
+ vals = [val for _, val
+ in _collect_list_items(self.get(DIFF_LIST_LIST))]
+ vals.pop() # last item of diff list is coreference
+ return vals
def _collect_list_items(d):
- if d is None or not isinstance(d, AVM) or d.get(LIST_HEAD) is None:
+ if not isinstance(d, AVM) or d.get(LIST_HEAD) is None:
return []
vals = [(LIST_HEAD, d[LIST_HEAD])]
- vals.extend((LIST_TAIL + '.' + path, val)
- for path, val in _collect_list_items(d.get(LIST_TAIL)))
+ rest = d[LIST_TAIL]
+ if isinstance(rest, _ImplicitAVM):
+ vals.extend((LIST_TAIL + '.' + path, val)
+ for path, val in _collect_list_items(rest))
+ elif rest is not None:
+ vals.append((LIST_TAIL, rest))
return vals
| delph-in/pydelphin | b5c7094691f01a161983224fe78b2ad26e7fea9b | diff --git a/tests/tdl_test.py b/tests/tdl_test.py
index b4b88d2..5cae05c 100644
--- a/tests/tdl_test.py
+++ b/tests/tdl_test.py
@@ -960,3 +960,18 @@ def test_format_environments():
' :end :instance.\n'
' :include "another.tdl".\n'
':end :type.')
+
+
+def test_issue_357():
+ # https://github.com/delph-in/pydelphin/issues/357
+ t = TypeDefinition(
+ 'id',
+ ConsList(
+ [TypeIdentifier('a')],
+ end=ConsList([TypeIdentifier('b')], end=TypeIdentifier('c'))
+ )
+ )
+ c = t.conjunction.terms[0]
+ assert isinstance(c, ConsList)
+ assert len(c.values()) == 2
+ assert tdl.format(t) == 'id := < a . < b . c > >.'
| format function for TypeDefinition seems to mess up some of the Lists
If I `iterparse` this TypeDefinition
```
main-vprn := basic-main-verb & norm-pronominal-verb &
[ SYNSEM.LOCAL.CAT.VAL [ SUBJ < #subj >,
COMPS < #comps >,
CLTS #clt ],
ARG-ST < #subj . < #comps . #clt > > ].
```
and then print it back out using the `format` function, I get this:
```
main-vprn := basic-main-verb & norm-pronominal-verb &
[ SYNSEM.LOCAL.CAT.VAL [ SUBJ < #subj >,
COMPS < #comps >,
CLTS #clt ],
ARG-ST < #subj, #comps . < #comps . #clt > > ]. <----- Note the extra `,#comps`which used to not be there before
``` | 0.0 | [
"tests/tdl_test.py::test_issue_357"
] | [
"tests/tdl_test.py::test_Term",
"tests/tdl_test.py::test_TypeIdentifier",
"tests/tdl_test.py::test_String",
"tests/tdl_test.py::test_Regex",
"tests/tdl_test.py::test_AVM",
"tests/tdl_test.py::test_ConsList",
"tests/tdl_test.py::test_DiffList",
"tests/tdl_test.py::test_Coreference",
"tests/tdl_test.py::TestConjunction::test_init",
"tests/tdl_test.py::TestConjunction::test_and",
"tests/tdl_test.py::TestConjunction::test_eq",
"tests/tdl_test.py::TestConjunction::test__contains__",
"tests/tdl_test.py::TestConjunction::test__getitem__",
"tests/tdl_test.py::TestConjunction::test__setitem__",
"tests/tdl_test.py::TestConjunction::test__setitem__issue293",
"tests/tdl_test.py::TestConjunction::test__delitem__",
"tests/tdl_test.py::TestConjunction::test_get",
"tests/tdl_test.py::TestConjunction::test_normalize",
"tests/tdl_test.py::TestConjunction::test_terms",
"tests/tdl_test.py::TestConjunction::test_add",
"tests/tdl_test.py::TestConjunction::test_types",
"tests/tdl_test.py::TestConjunction::test_features",
"tests/tdl_test.py::TestConjunction::test_string",
"tests/tdl_test.py::test_TypeDefinition",
"tests/tdl_test.py::test_TypeAddendum",
"tests/tdl_test.py::test_LexicalRuleDefinition",
"tests/tdl_test.py::test_parse_identifiers",
"tests/tdl_test.py::test_parse_supertypes",
"tests/tdl_test.py::test_parse_no_features",
"tests/tdl_test.py::test_parse_string_features",
"tests/tdl_test.py::test_quoted_symbol",
"tests/tdl_test.py::test_parse_type_features",
"tests/tdl_test.py::test_parse_cons_list",
"tests/tdl_test.py::test_issue_294",
"tests/tdl_test.py::test_parse_diff_list",
"tests/tdl_test.py::test_parse_multiple_features",
"tests/tdl_test.py::test_parse_multiple_avms",
"tests/tdl_test.py::test_parse_feature_path",
"tests/tdl_test.py::test_parse_coreferences",
"tests/tdl_test.py::test_parse_typedef",
"tests/tdl_test.py::test_parse_typeaddendum",
"tests/tdl_test.py::test_parse_lexicalruledefinition",
"tests/tdl_test.py::test_parse_docstrings",
"tests/tdl_test.py::test_parse_letterset",
"tests/tdl_test.py::test_parse_wildcard",
"tests/tdl_test.py::test_parse_linecomment",
"tests/tdl_test.py::test_parse_blockcomment",
"tests/tdl_test.py::test_parse_environments",
"tests/tdl_test.py::test_format_TypeTerms",
"tests/tdl_test.py::test_format_AVM",
"tests/tdl_test.py::test_format_lists",
"tests/tdl_test.py::test_format_docstring_terms",
"tests/tdl_test.py::test_format_Conjunction",
"tests/tdl_test.py::test_format_typedefs",
"tests/tdl_test.py::test_format_morphsets",
"tests/tdl_test.py::test_format_environments"
] | 2023-01-03 07:08:39+00:00 | 1,876 |
|
delph-in__pydelphin-362 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index b99690f..1163e8a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,11 @@
## Unreleased
+### Added
+
+* `delphin.tdl.LineComment` class ([#356])
+* `delphin.tdl.BlockComment` class ([#356])
+
### Fixed
* `delphin.tdl.ConsList.values()` and `delphin.tdl.DiffList.values()`
@@ -9,6 +14,11 @@
into their structures ([#357])
* Implicit optional types are made explicit ([#360])
+### Changed
+
+* TDL parsing now models line and block comments ([#356])
+* TDL formatting now formats `LineComment` and `BlockComment` classes ([#356])
+
## [v1.7.0]
@@ -1566,5 +1576,6 @@ information about changes, except for
[#343]: https://github.com/delph-in/pydelphin/issues/343
[#344]: https://github.com/delph-in/pydelphin/issues/344
[#352]: https://github.com/delph-in/pydelphin/issues/352
+[#356]: https://github.com/delph-in/pydelphin/issues/356
[#357]: https://github.com/delph-in/pydelphin/issues/357
[#360]: https://github.com/delph-in/pydelphin/issues/360
diff --git a/delphin/tdl.py b/delphin/tdl.py
index ec330f4..4bb0ae5 100644
--- a/delphin/tdl.py
+++ b/delphin/tdl.py
@@ -853,6 +853,14 @@ class FileInclude:
self.path = Path(basedir, value).with_suffix('.tdl')
+class LineComment(str):
+ """Single-line comments in TDL."""
+
+
+class BlockComment(str):
+ """Multi-line comments in TDL."""
+
+
# NOTE: be careful rearranging subpatterns in _tdl_lex_re; some must
# appear before others, e.g., """ before ", <! before <, etc.,
# to prevent short-circuiting from blocking the larger patterns
@@ -1055,9 +1063,9 @@ def _parse_tdl(tokens, path):
except StopIteration: # normal EOF
break
if gid == 2:
- yield ('BlockComment', token, line_no)
+ yield ('BlockComment', BlockComment(token), line_no)
elif gid == 3:
- yield ('LineComment', token, line_no)
+ yield ('LineComment', LineComment(token), line_no)
elif gid == 20:
obj = _parse_letterset(token, line_no)
yield (obj.__class__.__name__, obj, line_no)
@@ -1371,6 +1379,10 @@ def format(obj, indent=0):
return _format_environment(obj, indent)
elif isinstance(obj, FileInclude):
return _format_include(obj, indent)
+ elif isinstance(obj, LineComment):
+ return _format_linecomment(obj, indent)
+ elif isinstance(obj, BlockComment):
+ return _format_blockcomment(obj, indent)
else:
raise ValueError(f'cannot format object as TDL: {obj!r}')
@@ -1584,3 +1596,11 @@ def _format_environment(env, indent):
def _format_include(fi, indent):
return '{}:include "{}".'.format(' ' * indent, fi.value)
+
+
+def _format_linecomment(obj, indent):
+ return '{};{}'.format(' ' * indent, str(obj))
+
+
+def _format_blockcomment(obj, indent):
+ return '{}#|{}|#'.format(' ' * indent, str(obj))
diff --git a/docs/api/delphin.tdl.rst b/docs/api/delphin.tdl.rst
index 2d45274..f07b863 100644
--- a/docs/api/delphin.tdl.rst
+++ b/docs/api/delphin.tdl.rst
@@ -166,6 +166,16 @@ Environments and File Inclusion
:members:
+Comments
+''''''''
+
+.. autoclass:: LineComment
+ :members:
+
+.. autoclass:: BlockComment
+ :members:
+
+
Exceptions and Warnings
-----------------------
| delph-in/pydelphin | 4f71897b21f19be1c7efe219c02495cea911714b | diff --git a/tests/tdl_test.py b/tests/tdl_test.py
index 5cae05c..968691d 100644
--- a/tests/tdl_test.py
+++ b/tests/tdl_test.py
@@ -24,6 +24,8 @@ from delphin.tdl import (
TypeEnvironment,
InstanceEnvironment,
FileInclude,
+ LineComment,
+ BlockComment,
TDLError,
TDLSyntaxError,
TDLWarning)
@@ -721,11 +723,13 @@ def test_parse_wildcard():
def test_parse_linecomment():
lc = tdlparse('; this is a comment\n')
assert lc == ' this is a comment'
+ assert isinstance(lc, LineComment)
def test_parse_blockcomment():
bc = tdlparse('#| this is a comment\n on multiple lines|#')
assert bc == ' this is a comment\n on multiple lines'
+ assert isinstance(bc, BlockComment)
def test_parse_environments():
@@ -962,6 +966,24 @@ def test_format_environments():
':end :type.')
+def test_format_fileinclude():
+ assert tdl.format(FileInclude('foo.tdl')) == ':include "foo.tdl".'
+
+
+def test_format_linecomment():
+ assert tdl.format(LineComment(' a comment')) == '; a comment'
+ assert tdl.format(LineComment('; two semicolons')) == ';; two semicolons'
+
+
+def test_format_blockcomment():
+ assert tdl.format(
+ BlockComment(' a block comment ')
+ ) == '#| a block comment |#'
+ assert tdl.format(
+ BlockComment('\n one\n two\n')
+ ) == '#|\n one\n two\n|#'
+
+
def test_issue_357():
# https://github.com/delph-in/pydelphin/issues/357
t = TypeDefinition(
| format function for LineComment and BlockComment
If I understand right, the `format` method is not currently implemented for all Events, in particular it is not implemented for LineComment and BlockComment? This would be useful in scenarios when someone is automatically updating part of a large TDL file and wants to copy large parts of it without modifying them (while modifying other parts). | 0.0 | [
"tests/tdl_test.py::test_Term",
"tests/tdl_test.py::test_TypeIdentifier",
"tests/tdl_test.py::test_String",
"tests/tdl_test.py::test_Regex",
"tests/tdl_test.py::test_AVM",
"tests/tdl_test.py::test_ConsList",
"tests/tdl_test.py::test_DiffList",
"tests/tdl_test.py::test_Coreference",
"tests/tdl_test.py::TestConjunction::test_init",
"tests/tdl_test.py::TestConjunction::test_and",
"tests/tdl_test.py::TestConjunction::test_eq",
"tests/tdl_test.py::TestConjunction::test__contains__",
"tests/tdl_test.py::TestConjunction::test__getitem__",
"tests/tdl_test.py::TestConjunction::test__setitem__",
"tests/tdl_test.py::TestConjunction::test__setitem__issue293",
"tests/tdl_test.py::TestConjunction::test__delitem__",
"tests/tdl_test.py::TestConjunction::test_get",
"tests/tdl_test.py::TestConjunction::test_normalize",
"tests/tdl_test.py::TestConjunction::test_terms",
"tests/tdl_test.py::TestConjunction::test_add",
"tests/tdl_test.py::TestConjunction::test_types",
"tests/tdl_test.py::TestConjunction::test_features",
"tests/tdl_test.py::TestConjunction::test_string",
"tests/tdl_test.py::test_TypeDefinition",
"tests/tdl_test.py::test_TypeAddendum",
"tests/tdl_test.py::test_LexicalRuleDefinition",
"tests/tdl_test.py::test_parse_identifiers",
"tests/tdl_test.py::test_parse_supertypes",
"tests/tdl_test.py::test_parse_no_features",
"tests/tdl_test.py::test_parse_string_features",
"tests/tdl_test.py::test_quoted_symbol",
"tests/tdl_test.py::test_parse_type_features",
"tests/tdl_test.py::test_parse_cons_list",
"tests/tdl_test.py::test_issue_294",
"tests/tdl_test.py::test_parse_diff_list",
"tests/tdl_test.py::test_parse_multiple_features",
"tests/tdl_test.py::test_parse_multiple_avms",
"tests/tdl_test.py::test_parse_feature_path",
"tests/tdl_test.py::test_parse_coreferences",
"tests/tdl_test.py::test_parse_typedef",
"tests/tdl_test.py::test_parse_typeaddendum",
"tests/tdl_test.py::test_parse_lexicalruledefinition",
"tests/tdl_test.py::test_parse_docstrings",
"tests/tdl_test.py::test_parse_letterset",
"tests/tdl_test.py::test_parse_wildcard",
"tests/tdl_test.py::test_parse_linecomment",
"tests/tdl_test.py::test_parse_blockcomment",
"tests/tdl_test.py::test_parse_environments",
"tests/tdl_test.py::test_format_TypeTerms",
"tests/tdl_test.py::test_format_AVM",
"tests/tdl_test.py::test_format_lists",
"tests/tdl_test.py::test_format_docstring_terms",
"tests/tdl_test.py::test_format_Conjunction",
"tests/tdl_test.py::test_format_typedefs",
"tests/tdl_test.py::test_format_morphsets",
"tests/tdl_test.py::test_format_environments",
"tests/tdl_test.py::test_format_fileinclude",
"tests/tdl_test.py::test_format_linecomment",
"tests/tdl_test.py::test_format_blockcomment",
"tests/tdl_test.py::test_issue_357"
] | [] | 2023-01-08 02:33:35+00:00 | 1,877 |
|
deppen8__pandas-vet-127 | diff --git a/src/pandas_vet/__init__.py b/src/pandas_vet/__init__.py
index 9060511..2a4f0e0 100644
--- a/src/pandas_vet/__init__.py
+++ b/src/pandas_vet/__init__.py
@@ -173,7 +173,11 @@ def check_inplace_false(node: ast.Call) -> List:
"""
errors = []
for kw in node.keywords:
- if kw.arg == "inplace" and kw.value.value is True:
+ if (
+ kw.arg == "inplace"
+ and hasattr(kw.value, "value")
+ and kw.value.value is True
+ ):
errors.append(PD002(node.lineno, node.col_offset))
return errors
| deppen8/pandas-vet | b45e2283b2d880f88401992424c59682d0e4c380 | diff --git a/tests/test_PD002.py b/tests/test_PD002.py
index 7034674..f34fb1b 100644
--- a/tests/test_PD002.py
+++ b/tests/test_PD002.py
@@ -24,3 +24,16 @@ def test_PD002_fail():
actual = list(VetPlugin(tree).run())
expected = [PD002(1, 0)]
assert actual == expected
+
+
+def test_PD002_with_variable_does_not_crash():
+ """
+ Test that using inplace=<some variable> does not raise Exceptions.
+
+ It will not be able to infer the value of the variable, so no errors either.
+ """
+ statement = """use_inplace=True; df.drop(['a'], axis=1, inplace=use_inplace)"""
+ tree = ast.parse(statement)
+ actual = list(VetPlugin(tree).run())
+ expected = []
+ assert actual == expected
| inplace set to a variable raises exception
**Describe the bug**
raised exception
**To Reproduce**
Steps to reproduce the behavior:
Have the following code in a file
```python
def some_function(dataFrame, in_place=False):
return dataFrame.drop([], inplace=in_place)
```
**Expected behavior**
Allow `flake8` to report violations and not throw exceptions.
**Screenshots**
<img width="732" alt="Screen Shot 2021-09-11 at 11 42 54 AM" src="https://user-images.githubusercontent.com/82820859/132955083-596471df-a18d-4b6d-b45b-afbdaae3596d.png">
**Additional context**
```
bash-5.1# cat /usr/lib/python3.9/site-packages/pandas_vet/version.py
__version__ = "0.2.2"
```
This is running on a docker container based on alpine:3.14.1. Same results obtained on a mac.
Things work if we do not provide a variable:
```python
def some_function(dataFrame, in_place=False):
return dataFrame.drop([], inplace=False)
```
| 0.0 | [
"tests/test_PD002.py::test_PD002_with_variable_does_not_crash"
] | [
"tests/test_PD002.py::test_PD002_pass",
"tests/test_PD002.py::test_PD002_fail"
] | 2023-08-09 03:15:39+00:00 | 1,878 |
|
deppen8__pandas-vet-39 | diff --git a/pandas_vet/__init__.py b/pandas_vet/__init__.py
index b675fd8..3e5e351 100644
--- a/pandas_vet/__init__.py
+++ b/pandas_vet/__init__.py
@@ -33,6 +33,7 @@ class Visitor(ast.NodeVisitor):
self.errors.extend(check_for_unstack(node))
self.errors.extend(check_for_arithmetic_methods(node))
self.errors.extend(check_for_comparison_methods(node))
+ self.errors.extend(check_for_read_table(node))
def visit_Subscript(self, node):
self.generic_visit(node) # continue checking children
@@ -136,19 +137,19 @@ def check_for_comparison_methods(node: ast.Call) -> List:
def check_for_ix(node: ast.Subscript) -> List:
- if node.value.attr == "ix":
+ if isinstance(node.value, ast.Attribute) and node.value.attr == "ix":
return [PD007(node.lineno, node.col_offset)]
return []
-def check_for_at(node: ast.Call) -> List:
- if node.value.attr == "at":
+def check_for_at(node: ast.Subscript) -> List:
+ if isinstance(node.value, ast.Attribute) and node.value.attr == "at":
return [PD008(node.lineno, node.col_offset)]
return []
-def check_for_iat(node: ast.Call) -> List:
- if node.value.attr == "iat":
+def check_for_iat(node: ast.Subscript) -> List:
+ if isinstance(node.value, ast.Attribute) and node.value.attr == "iat":
return [PD009(node.lineno, node.col_offset)]
return []
@@ -177,6 +178,17 @@ def check_for_unstack(node: ast.Call) -> List:
return []
+def check_for_read_table(node: ast.Call) -> List:
+ """
+ Check AST for occurence of the `.read_table()` method on the pandas object.
+
+ Error/warning message to recommend use of `.read_csv()` method instead.
+ """
+ if isinstance(node.func, ast.Attribute) and node.func.attr == "read_table":
+ return [PD012(node.lineno, node.col_offset)]
+ return []
+
+
error = namedtuple("Error", ["lineno", "col", "message", "type"])
VetError = partial(partial, error, type=VetPlugin)
@@ -210,3 +222,6 @@ PD009 = VetError(
PD010 = VetError(
message="PD010 '.pivot_table' is preferred to '.pivot' or '.unstack'; provides same functionality"
)
+PD012 = VetError(
+ message="PDO12 '.read_csv' is preferred to '.read_table'; provides same functionality"
+)
| deppen8/pandas-vet | f0865882b8f857ae38fc1c0e42e671418d8bb3ee | diff --git a/tests/test_PD012.py b/tests/test_PD012.py
new file mode 100644
index 0000000..ee45e2e
--- /dev/null
+++ b/tests/test_PD012.py
@@ -0,0 +1,41 @@
+"""
+Test to check for use of the pandas soon-to-be-deprecated `.read_table()`
+method.
+"""
+import ast
+
+from pandas_vet import VetPlugin
+from pandas_vet import PD012
+
+
+def test_PD012_pass_read_csv():
+ """
+ Test that using .read_csv() explicitly does not result in an error.
+ """
+ statement = "df = pd.read_csv(input_file)"
+ tree = ast.parse(statement)
+ actual = list(VetPlugin(tree).run())
+ expected = []
+ assert actual == expected
+
+
+def test_PD012_fail_read_table():
+ """
+ Test that using .read_table() method results in an error.
+ """
+ statement = "df = pd.read_table(input_file)"
+ tree = ast.parse(statement)
+ actual = list(VetPlugin(tree).run())
+ expected = [PD012(1, 5)]
+ assert actual == expected
+
+
+def test_PD012_node_Name_pass():
+ """
+ Test that where 'read_table' is a Name does NOT raise an error
+ """
+ statement = "df = read_table"
+ tree = ast.parse(statement)
+ actual = list(VetPlugin(tree).run())
+ expected = []
+ assert actual == expected
| Check for pd.read_table
Check for `pd.read_table` function call. [See flashcard here](https://deppen8.github.io/pandas-bw/reading-data/). Give error message:
> 'pd.read_table' is deprecated. Use 'pd.read_csv' for all delimited files. | 0.0 | [
"tests/test_PD012.py::test_PD012_pass_read_csv",
"tests/test_PD012.py::test_PD012_fail_read_table",
"tests/test_PD012.py::test_PD012_node_Name_pass"
] | [] | 2019-02-28 21:33:20+00:00 | 1,879 |
|
deppen8__pandas-vet-42 | diff --git a/pandas_vet/__init__.py b/pandas_vet/__init__.py
index 8c286e0..b675fd8 100644
--- a/pandas_vet/__init__.py
+++ b/pandas_vet/__init__.py
@@ -14,6 +14,9 @@ class Visitor(ast.NodeVisitor):
ast.NodeVisitor will automatically call the appropriate method for a given node type
i.e. calling self.visit on an Import node calls visit_import
+
+ The `check` functions should be called from the `visit_` method that
+ would produce a 'fail' condition.
"""
errors = attr.ib(default=attr.Factory(list))
@@ -28,6 +31,8 @@ class Visitor(ast.NodeVisitor):
self.errors.extend(check_for_notnull(node))
self.errors.extend(check_for_pivot(node))
self.errors.extend(check_for_unstack(node))
+ self.errors.extend(check_for_arithmetic_methods(node))
+ self.errors.extend(check_for_comparison_methods(node))
def visit_Subscript(self, node):
self.generic_visit(node) # continue checking children
@@ -86,6 +91,50 @@ def check_for_notnull(node: ast.Call) -> List:
return [PD004(node.lineno, node.col_offset)]
return []
+def check_for_arithmetic_methods(node: ast.Call) -> List:
+ """
+ Check AST for occurence of explicit arithmetic methods.
+
+ Error/warning message to recommend use of binary arithmetic operators instead.
+ """
+ arithmetic_methods = [
+ 'add',
+ 'sub', 'subtract',
+ 'mul', 'multiply',
+ 'div', 'divide', 'truediv',
+ 'pow',
+ 'floordiv',
+ 'mod',
+ ]
+ arithmetic_operators = [
+ '+',
+ '-',
+ '*',
+ '/',
+ '**',
+ '//',
+ '%',
+ ]
+
+ if isinstance(node.func, ast.Attribute) and node.func.attr in arithmetic_methods:
+ return [PD005(node.lineno, node.col_offset)]
+ return []
+
+
+def check_for_comparison_methods(node: ast.Call) -> List:
+ """
+ Check AST for occurence of explicit comparison methods.
+
+ Error/warning message to recommend use of binary comparison operators instead.
+ """
+ comparison_methods = ['gt', 'lt', 'ge', 'le', 'eq', 'ne']
+ comparison_operators = ['>', '<', '>=', '<=', '==', '!=']
+
+ if isinstance(node.func, ast.Attribute) and node.func.attr in comparison_methods:
+ return [PD006(node.lineno, node.col_offset)]
+ return []
+
+
def check_for_ix(node: ast.Subscript) -> List:
if node.value.attr == "ix":
return [PD007(node.lineno, node.col_offset)]
| deppen8/pandas-vet | 128232a43e8dd2819e77da5a03f494691e494da1 | diff --git a/tests/test_PD005.py b/tests/test_PD005.py
new file mode 100644
index 0000000..9c68e0e
--- /dev/null
+++ b/tests/test_PD005.py
@@ -0,0 +1,51 @@
+"""
+Test to check for use of explicit arithmetic methods.
+
+Recommend use of binary arithmetic operators instead.
+"""
+import ast
+
+from pandas_vet import VetPlugin
+from pandas_vet import PD005
+
+
+def test_PD005_pass_arithmetic_operator():
+ """
+ Test that using binary arithmetic operator explicitly does not result in an error.
+ """
+ arithmetic_operators = [
+ '+',
+ '-',
+ '*',
+ '/',
+ '**',
+ '//',
+ '%',
+ ]
+ for op in arithmetic_operators:
+ statement = 'C = A {0} B'.format(op)
+ tree = ast.parse(statement)
+ actual = list(VetPlugin(tree).run())
+ expected = []
+ assert actual == expected
+
+
+def test_PD005_fail_arithmetic_method():
+ """
+ Test that using arithmetic method results in an error.
+ """
+ arithmetic_methods = [
+ 'add',
+ 'sub', 'subtract',
+ 'mul', 'multiply',
+ 'div', 'divide', 'truediv',
+ 'pow',
+ 'floordiv',
+ 'mod',
+ ]
+ for op in arithmetic_methods:
+ statement = 'C = A.{0}(B)'.format(op)
+ tree = ast.parse(statement)
+ actual = list(VetPlugin(tree).run())
+ expected = [PD005(1, 4)]
+ assert actual == expected
diff --git a/tests/test_PD006.py b/tests/test_PD006.py
new file mode 100644
index 0000000..cb30365
--- /dev/null
+++ b/tests/test_PD006.py
@@ -0,0 +1,35 @@
+"""
+Test to check for use of explicit comparison methods.
+
+Recommend use of binary comparison operators instead.
+"""
+import ast
+
+from pandas_vet import VetPlugin
+from pandas_vet import PD006
+
+
+def test_PD006_pass_comparison_operator():
+ """
+ Test that using binary comparison operator explicitly does not result in an error.
+ """
+ comparison_operators = ['>', '<', '>=', '<=', '==', '!=']
+ for op in comparison_operators:
+ statement = 'C = A {0} B'.format(op)
+ tree = ast.parse(statement)
+ actual = list(VetPlugin(tree).run())
+ expected = []
+ assert actual == expected
+
+
+def test_PD006_fail_comparison_method():
+ """
+ Test that using comparison method results in an error.
+ """
+ comparison_methods = ['gt', 'lt', 'ge', 'le', 'eq', 'ne']
+ for op in comparison_methods:
+ statement = 'C = A.{0}(B)'.format(op)
+ tree = ast.parse(statement)
+ actual = list(VetPlugin(tree).run())
+ expected = [PD006(1, 4)]
+ assert actual == expected
| Arithmetic and comparison operators
We should implement checks for all of the text-based arithmetic and comparison operators. If found, recommend using the operator itself. Something like:
> Use <operator> instead of <text method>
| use | check for |
| --- | ---------- |
| `+` | `.add` |
| `-` | `.sub` and `.subtract` |
| `*` | `.mul` and `.multiply` |
| `/` | `.div`, `.divide` and `.truediv` |
| `**` | `.pow` |
| `//` | `.floordiv` |
| `%` | `.mod` |
| `>` | `.gt` |
| `<` | `.lt` |
| `>=` | `.ge` |
| `<=` | `.le` |
| `==` | `.eq` |
| `!=` | `.ne` | | 0.0 | [
"tests/test_PD005.py::test_PD005_fail_arithmetic_method",
"tests/test_PD006.py::test_PD006_fail_comparison_method"
] | [
"tests/test_PD005.py::test_PD005_pass_arithmetic_operator",
"tests/test_PD006.py::test_PD006_pass_comparison_operator"
] | 2019-03-03 00:22:37+00:00 | 1,880 |
|
deppen8__pandas-vet-52 | diff --git a/pandas_vet/__init__.py b/pandas_vet/__init__.py
index 1b54d23..6237dd5 100644
--- a/pandas_vet/__init__.py
+++ b/pandas_vet/__init__.py
@@ -37,6 +37,7 @@ class Visitor(ast.NodeVisitor):
self.errors.extend(check_for_notnull(node))
self.errors.extend(check_for_pivot(node))
self.errors.extend(check_for_unstack(node))
+ self.errors.extend(check_for_stack(node))
self.errors.extend(check_for_arithmetic_methods(node))
self.errors.extend(check_for_comparison_methods(node))
self.errors.extend(check_for_read_table(node))
@@ -193,6 +194,17 @@ def check_for_unstack(node: ast.Call) -> List:
return []
+def check_for_stack(node: ast.Call) -> List:
+ """
+ Check AST for occurence of the `.stack()` method on the pandas data frame.
+
+ Error/warning message to recommend use of `.melt()` method instead.
+ """
+ if isinstance(node.func, ast.Attribute) and node.func.attr == "stack":
+ return [PD013(node.lineno, node.col_offset)]
+ return []
+
+
def check_for_values(node: ast.Attribute) -> List:
"""
Check AST for occurence of the `.values` attribute on the pandas data frame.
@@ -255,3 +267,6 @@ PD011 = VetError(
PD012 = VetError(
message="PDO12 '.read_csv' is preferred to '.read_table'; provides same functionality"
)
+PD013 = VetError(
+ message="PD013 '.melt' is preferred to '.stack'; provides same functionality"
+)
| deppen8/pandas-vet | 8605bebf1cb5b59b9fa0c541ed5c026a9d3acbed | diff --git a/tests/test_PD005.py b/tests/test_PD005.py
index 9c68e0e..1fcc596 100644
--- a/tests/test_PD005.py
+++ b/tests/test_PD005.py
@@ -11,7 +11,8 @@ from pandas_vet import PD005
def test_PD005_pass_arithmetic_operator():
"""
- Test that using binary arithmetic operator explicitly does not result in an error.
+ Test that explicit use of binary arithmetic operator does not
+ result in an error.
"""
arithmetic_operators = [
'+',
diff --git a/tests/test_PD006.py b/tests/test_PD006.py
index cb30365..7dae856 100644
--- a/tests/test_PD006.py
+++ b/tests/test_PD006.py
@@ -11,7 +11,8 @@ from pandas_vet import PD006
def test_PD006_pass_comparison_operator():
"""
- Test that using binary comparison operator explicitly does not result in an error.
+ Test that explicit use of binary comparison operator does not
+ result in an error.
"""
comparison_operators = ['>', '<', '>=', '<=', '==', '!=']
for op in comparison_operators:
diff --git a/tests/test_PD010.py b/tests/test_PD010.py
index d3ec020..1e897bd 100644
--- a/tests/test_PD010.py
+++ b/tests/test_PD010.py
@@ -12,7 +12,15 @@ def test_PD010_pass():
"""
Test that using .pivot_table() explicitly does not result in an error.
"""
- statement = "table = df.pivot_table(df, values='D', index=['A', 'B'], columns=['C'], aggfunc=np.sum, fill_value=0)"
+ statement = """table = df.pivot_table(
+ df,
+ values='D',
+ index=['A', 'B'],
+ columns=['C'],
+ aggfunc=np.sum,
+ fill_value=0
+ )
+ """
tree = ast.parse(statement)
actual = list(VetPlugin(tree).run())
expected = []
@@ -21,9 +29,16 @@ def test_PD010_pass():
def test_PD010_fail_pivot():
"""
- Test that using either pd.pivot(df) or df.pivot() methods results in an error.
+ Test that using either pd.pivot(df) or df.pivot() methods
+ results in an error.
+ """
+ statement = """table = pd.pivot(
+ df,
+ index='foo',
+ columns='bar',
+ values='baz'
+ )
"""
- statement = "table = pd.pivot(df, index='foo', columns='bar', values='baz')"
tree = ast.parse(statement)
actual = list(VetPlugin(tree).run())
expected = [PD010(1, 8)]
diff --git a/tests/test_PD011.py b/tests/test_PD011.py
index b8f9cf1..6a671e2 100644
--- a/tests/test_PD011.py
+++ b/tests/test_PD011.py
@@ -1,6 +1,6 @@
"""
-Test to check for use of the pandas dataframe `.array` attribute
-or `.to_array()` method in preference to `.values` attribute.
+Test to check for use of the pandas dataframe `.array` attribute
+or `.to_array()` method in preference to `.values` attribute.
"""
import ast
@@ -40,6 +40,7 @@ def test_PD011_fail_values():
expected = [PD011(1, 9)]
assert actual == expected
+
def test_PD011_pass_node_Name():
"""
Test that where 'values' is a Name does NOT raise an error
diff --git a/tests/test_PD012.py b/tests/test_PD012.py
index ee45e2e..176b2d4 100644
--- a/tests/test_PD012.py
+++ b/tests/test_PD012.py
@@ -1,6 +1,6 @@
"""
Test to check for use of the pandas soon-to-be-deprecated `.read_table()`
-method.
+method.
"""
import ast
diff --git a/tests/test_PD013.py b/tests/test_PD013.py
new file mode 100644
index 0000000..dededc0
--- /dev/null
+++ b/tests/test_PD013.py
@@ -0,0 +1,35 @@
+"""
+Test to check functionality for use of the `.melt()` data frame
+method in preference to `.stack()` method.
+"""
+import ast
+
+from pandas_vet import VetPlugin
+from pandas_vet import PD013
+
+
+def test_PD013_pass():
+ """
+ Test that using .melt() explicitly does not result in an error.
+ """
+ statement = """table = df.melt(
+ id_vars='airline',
+ value_vars=['ATL', 'DEN', 'DFW'],
+ value_name='airline delay'
+ )
+ """
+ tree = ast.parse(statement)
+ actual = list(VetPlugin(tree).run())
+ expected = []
+ assert actual == expected
+
+
+def test_PD013_fail_stack():
+ """
+ Test that using .stack() results in an error.
+ """
+ statement = "table = df.stack(level=-1, dropna=True)"
+ tree = ast.parse(statement)
+ actual = list(VetPlugin(tree).run())
+ expected = [PD013(1, 8)]
+ assert actual == expected
| Check for .stack
Check for `.stack` method. [See flashcard](https://deppen8.github.io/pandas-bw/reshape-split-apply-combine/melt-vs-stack.png). Give error message:
> Prefer '.melt' to '.stack'. '.melt' allows direct column renaming and avoids a MultiIndex | 0.0 | [
"tests/test_PD005.py::test_PD005_pass_arithmetic_operator",
"tests/test_PD005.py::test_PD005_fail_arithmetic_method",
"tests/test_PD006.py::test_PD006_pass_comparison_operator",
"tests/test_PD006.py::test_PD006_fail_comparison_method",
"tests/test_PD010.py::test_PD010_pass",
"tests/test_PD010.py::test_PD010_fail_pivot",
"tests/test_PD010.py::test_PD010_fail_unstack",
"tests/test_PD011.py::test_PD011_pass_to_array",
"tests/test_PD011.py::test_PD011_pass_array",
"tests/test_PD011.py::test_PD011_fail_values",
"tests/test_PD011.py::test_PD011_pass_node_Name",
"tests/test_PD012.py::test_PD012_pass_read_csv",
"tests/test_PD012.py::test_PD012_fail_read_table",
"tests/test_PD012.py::test_PD012_node_Name_pass",
"tests/test_PD013.py::test_PD013_pass",
"tests/test_PD013.py::test_PD013_fail_stack"
] | [] | 2019-03-07 18:36:43+00:00 | 1,881 |
|
deppen8__pandas-vet-69 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6b26e92..fb5f7e5 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,12 +5,42 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html) (to the best of our ability).
+## Unreleased
+
+### Added
+
+- New check `PD901 'df' is a bad variable name. Be kinder to your future self.` ([#69](https://github.com/deppen8/pandas-vet/pull/69))
+- An `--annoy` flag that can be used to activate checks that set to "off" by default. The off-by-default checks should use the convention `PD9xx` ([#69](https://github.com/deppen8/pandas-vet/pull/69))
+- Added `PD901` to README along with an example use of the `--annoy` flag ([#69](https://github.com/deppen8/pandas-vet/pull/69))
+
+### Changed
+
+- `test_PD012.py` had test cases that used `df = <something>`, which conflicted with the new `PD901` check. These were changed to `employees = <something>` ([#69](https://github.com/deppen8/pandas-vet/pull/69))
+- Applied the `black` formatter to the entire pandas-vet package.
+
+### Deprecated
+
+- None
+
+### Removed
+
+- A few extraneous variables ([455d1f0](https://github.com/deppen8/pandas-vet/pull/69/commits/455d1f0525dd4e9590cd10efdcd39c9d9a7923a2))
+
+### Fixed
+
+- None
+
+### Security
+
+- None
+
+
## [0.2.1] - 2019-07-27
### Added
-- Leandro Leites added as contributor (#66)
-- This CHANGELOG.md added
+- Leandro Leites added as contributor ([#66](https://github.com/deppen8/pandas-vet/pull/66))
+- This CHANGELOG.md added ([#68](https://github.com/deppen8/pandas-vet/pull/68))
### Changed
@@ -22,12 +52,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Removed
-- Unnecessary commented line from `setup.py` (#67)
+- Unnecessary commented line from `setup.py` ([#67](https://github.com/deppen8/pandas-vet/pull/67))
### Fixed
-- PD015 would fail if `node.func.value` did not have an `id`. Fixed with #65
-- `version.py` now correctly uses v0.2.x. This version file was not bumped with the last release. (#67)
+- PD015 would fail if `node.func.value` did not have an `id`. Fixed with [#65](https://github.com/deppen8/pandas-vet/pull/65)
+- `version.py` now correctly uses v0.2.x. This version file was not bumped with the last release. ([#67](https://github.com/deppen8/pandas-vet/pull/67))
### Security
diff --git a/README.md b/README.md
index bf5cb1c..f875ae7 100644
--- a/README.md
+++ b/README.md
@@ -138,3 +138,13 @@ flake8 pandas_vet setup.py tests --exclude tests/data
**PD013** '.melt' is preferred to '.stack'; provides same functionality
**PD015** Use '.merge' method instead of 'pd.merge' function. They have equivalent functionality.
+
+### *Very* Opinionated Warnings
+
+These warnings are turned off by default. To enable them, add the `-annoy` flag to your command, e.g.,
+
+```bash
+$ flake8 --annoy my_file.py
+```
+
+**PD901** 'df' is a bad variable name. Be kinder to your future self.
diff --git a/pandas_vet/__init__.py b/pandas_vet/__init__.py
index af81139..fd7bee9 100644
--- a/pandas_vet/__init__.py
+++ b/pandas_vet/__init__.py
@@ -18,6 +18,7 @@ class Visitor(ast.NodeVisitor):
The `check` functions should be called from the `visit_` method that
would produce a 'fail' condition.
"""
+
errors = attr.ib(default=attr.Factory(list))
def visit_Import(self, node):
@@ -56,8 +57,16 @@ class Visitor(ast.NodeVisitor):
"""
Called for `.attribute` nodes.
"""
+ self.generic_visit(node) # continue checking children
self.errors.extend(check_for_values(node))
+ def visit_Name(self, node):
+ """
+ Called for `Assignment` nodes.
+ """
+ self.generic_visit(node) # continue checking children
+ self.errors.extend(check_for_df(node))
+
def check(self, node):
self.errors = []
self.visit(node)
@@ -81,6 +90,22 @@ class VetPlugin:
except Exception as e:
raise PandasVetException(e)
+ @staticmethod
+ def add_options(optmanager):
+ """Informs flake8 to ignore PD9xx by default."""
+ optmanager.extend_default_ignore(disabled_by_default)
+
+ optmanager.add_option(
+ long_option_name="--annoy",
+ action="store_true",
+ dest="annoy",
+ default=False,
+ )
+
+ options, xargs = optmanager.parse_args()
+ if options.annoy:
+ optmanager.remove_from_default_ignore(disabled_by_default)
+
def check_import_name(node: ast.Import) -> List:
"""Check AST for imports of pandas not using the preferred alias 'pd'.
@@ -163,26 +188,23 @@ def check_for_arithmetic_methods(node: ast.Call) -> List:
Error/warning message to recommend use of binary arithmetic operators.
"""
arithmetic_methods = [
- 'add',
- 'sub', 'subtract',
- 'mul', 'multiply',
- 'div', 'divide', 'truediv',
- 'pow',
- 'floordiv',
- 'mod',
- ]
- arithmetic_operators = [
- '+',
- '-',
- '*',
- '/',
- '**',
- '//',
- '%',
- ]
-
- if isinstance(node.func, ast.Attribute) and \
- node.func.attr in arithmetic_methods:
+ "add",
+ "sub",
+ "subtract",
+ "mul",
+ "multiply",
+ "div",
+ "divide",
+ "truediv",
+ "pow",
+ "floordiv",
+ "mod",
+ ]
+
+ if (
+ isinstance(node.func, ast.Attribute)
+ and node.func.attr in arithmetic_methods
+ ):
return [PD005(node.lineno, node.col_offset)]
return []
@@ -193,11 +215,12 @@ def check_for_comparison_methods(node: ast.Call) -> List:
Error/warning message to recommend use of binary comparison operators.
"""
- comparison_methods = ['gt', 'lt', 'ge', 'le', 'eq', 'ne']
- comparison_operators = ['>', '<', '>=', '<=', '==', '!=']
+ comparison_methods = ["gt", "lt", "ge", "le", "eq", "ne"]
- if isinstance(node.func, ast.Attribute) and \
- node.func.attr in comparison_methods:
+ if (
+ isinstance(node.func, ast.Attribute)
+ and node.func.attr in comparison_methods
+ ):
return [PD006(node.lineno, node.col_offset)]
return []
@@ -304,42 +327,55 @@ def check_for_merge(node: ast.Call) -> List:
# object. If the object name is `pd`, and if the `.merge()` method has at
# least two arguments (left, right, ... ) we will assume that it matches
# the pattern that we are trying to check, `pd.merge(left, right)`
- if not hasattr(node.func, 'value'):
- return [] # ignore functions
- elif not hasattr(node.func.value, 'id'):
- return [] # it could be the case that id is not present
+ if not hasattr(node.func, "value"):
+ return [] # ignore functions
+ elif not hasattr(node.func.value, "id"):
+ return [] # it could be the case that id is not present
- if node.func.value.id != 'pd': return[] # assume object name is `pd`
+ if node.func.value.id != "pd":
+ return [] # assume object name is `pd`
- if not len(node.args) >= 2: return [] # at least two arguments
+ if not len(node.args) >= 2:
+ return [] # at least two arguments
- if isinstance(node.func, ast.Attribute) and \
- node.func.attr == "merge":
+ if isinstance(node.func, ast.Attribute) and node.func.attr == "merge":
return [PD015(node.lineno, node.col_offset)]
return []
+def check_for_df(node: ast.Name) -> List:
+ """
+ Check for variables named `df`
+ """
+ if node.id == "df" and isinstance(node.ctx, ast.Store):
+ return [PD901(node.lineno, node.col_offset)]
+ return []
+
+
error = namedtuple("Error", ["lineno", "col", "message", "type"])
VetError = partial(partial, error, type=VetPlugin)
+disabled_by_default = ["PD9"]
+
PD001 = VetError(
message="PD001 pandas should always be imported as 'import pandas as pd'"
)
+
PD002 = VetError(
message="PD002 'inplace = True' should be avoided; it has inconsistent behavior"
)
+
PD003 = VetError(
message="PD003 '.isna' is preferred to '.isnull'; functionality is equivalent"
)
+
PD004 = VetError(
message="PD004 '.notna' is preferred to '.notnull'; functionality is equivalent"
)
-PD005 = VetError(
- message="PD005 Use arithmetic operator instead of method"
-)
-PD006 = VetError(
- message="PD006 Use comparison operator instead of method"
-)
+PD005 = VetError(message="PD005 Use arithmetic operator instead of method")
+
+PD006 = VetError(message="PD006 Use comparison operator instead of method")
+
PD007 = VetError(
message="PD007 '.ix' is deprecated; use more explicit '.loc' or '.iloc'"
)
@@ -364,3 +400,7 @@ PD013 = VetError(
PD015 = VetError(
message="PD015 Use '.merge' method instead of 'pd.merge' function. They have equivalent functionality."
)
+
+PD901 = VetError(
+ message="PD901 'df' is a bad variable name. Be kinder to your future self."
+)
diff --git a/setup.py b/setup.py
index 96c1e4c..10e1b69 100644
--- a/setup.py
+++ b/setup.py
@@ -1,15 +1,10 @@
import setuptools
-requires = [
- "flake8 > 3.0.0",
- "attrs",
-]
+requires = ["flake8 > 3.0.0", "attrs"]
-tests_requires = [
- "pytest > 4.0.0"
-]
+tests_requires = ["pytest > 4.0.0"]
-flake8_entry_point = 'flake8.extension'
+flake8_entry_point = "flake8.extension"
with open("README.md", "rt") as f:
long_description = f.read()
@@ -26,16 +21,10 @@ setuptools.setup(
author="Jacob Deppen",
author_email="[email protected]",
url="https://github.com/deppen8/pandas-vet",
- packages=[
- "pandas_vet",
- ],
+ packages=["pandas_vet"],
install_requires=requires,
tests_require=tests_requires,
- entry_points={
- flake8_entry_point: [
- 'PD = pandas_vet:VetPlugin',
- ],
- },
+ entry_points={flake8_entry_point: ["PD = pandas_vet:VetPlugin"]},
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
| deppen8/pandas-vet | 4d0611ec5fb91fa9fc8c00a3bcc94e3d8beb94f7 | diff --git a/tests/test_PD005.py b/tests/test_PD005.py
index 1fcc596..78b3eca 100644
--- a/tests/test_PD005.py
+++ b/tests/test_PD005.py
@@ -14,17 +14,9 @@ def test_PD005_pass_arithmetic_operator():
Test that explicit use of binary arithmetic operator does not
result in an error.
"""
- arithmetic_operators = [
- '+',
- '-',
- '*',
- '/',
- '**',
- '//',
- '%',
- ]
+ arithmetic_operators = ["+", "-", "*", "/", "**", "//", "%"]
for op in arithmetic_operators:
- statement = 'C = A {0} B'.format(op)
+ statement = "C = A {0} B".format(op)
tree = ast.parse(statement)
actual = list(VetPlugin(tree).run())
expected = []
@@ -36,16 +28,20 @@ def test_PD005_fail_arithmetic_method():
Test that using arithmetic method results in an error.
"""
arithmetic_methods = [
- 'add',
- 'sub', 'subtract',
- 'mul', 'multiply',
- 'div', 'divide', 'truediv',
- 'pow',
- 'floordiv',
- 'mod',
- ]
+ "add",
+ "sub",
+ "subtract",
+ "mul",
+ "multiply",
+ "div",
+ "divide",
+ "truediv",
+ "pow",
+ "floordiv",
+ "mod",
+ ]
for op in arithmetic_methods:
- statement = 'C = A.{0}(B)'.format(op)
+ statement = "C = A.{0}(B)".format(op)
tree = ast.parse(statement)
actual = list(VetPlugin(tree).run())
expected = [PD005(1, 4)]
diff --git a/tests/test_PD006.py b/tests/test_PD006.py
index 7dae856..b1e1b62 100644
--- a/tests/test_PD006.py
+++ b/tests/test_PD006.py
@@ -14,9 +14,9 @@ def test_PD006_pass_comparison_operator():
Test that explicit use of binary comparison operator does not
result in an error.
"""
- comparison_operators = ['>', '<', '>=', '<=', '==', '!=']
+ comparison_operators = [">", "<", ">=", "<=", "==", "!="]
for op in comparison_operators:
- statement = 'C = A {0} B'.format(op)
+ statement = "C = A {0} B".format(op)
tree = ast.parse(statement)
actual = list(VetPlugin(tree).run())
expected = []
@@ -27,9 +27,9 @@ def test_PD006_fail_comparison_method():
"""
Test that using comparison method results in an error.
"""
- comparison_methods = ['gt', 'lt', 'ge', 'le', 'eq', 'ne']
+ comparison_methods = ["gt", "lt", "ge", "le", "eq", "ne"]
for op in comparison_methods:
- statement = 'C = A.{0}(B)'.format(op)
+ statement = "C = A.{0}(B)".format(op)
tree = ast.parse(statement)
actual = list(VetPlugin(tree).run())
expected = [PD006(1, 4)]
diff --git a/tests/test_PD012.py b/tests/test_PD012.py
index 176b2d4..d669b42 100644
--- a/tests/test_PD012.py
+++ b/tests/test_PD012.py
@@ -12,7 +12,7 @@ def test_PD012_pass_read_csv():
"""
Test that using .read_csv() explicitly does not result in an error.
"""
- statement = "df = pd.read_csv(input_file)"
+ statement = "employees = pd.read_csv(input_file)"
tree = ast.parse(statement)
actual = list(VetPlugin(tree).run())
expected = []
@@ -23,10 +23,10 @@ def test_PD012_fail_read_table():
"""
Test that using .read_table() method results in an error.
"""
- statement = "df = pd.read_table(input_file)"
+ statement = "employees = pd.read_table(input_file)"
tree = ast.parse(statement)
actual = list(VetPlugin(tree).run())
- expected = [PD012(1, 5)]
+ expected = [PD012(1, 12)]
assert actual == expected
@@ -34,7 +34,7 @@ def test_PD012_node_Name_pass():
"""
Test that where 'read_table' is a Name does NOT raise an error
"""
- statement = "df = read_table"
+ statement = "employees = read_table"
tree = ast.parse(statement)
actual = list(VetPlugin(tree).run())
expected = []
diff --git a/tests/test_PD901.py b/tests/test_PD901.py
new file mode 100644
index 0000000..f4093f1
--- /dev/null
+++ b/tests/test_PD901.py
@@ -0,0 +1,36 @@
+import ast
+
+from pandas_vet import VetPlugin
+from pandas_vet import PD901
+
+
+def test_PD901_pass_non_df():
+ statement = "employees = pd.DataFrame(employee_dict)"
+ tree = ast.parse(statement)
+ actual = list(VetPlugin(tree).run())
+ expected = []
+ assert actual == expected
+
+
+def test_PD901_pass_part_df():
+ statement = "employees_df = pd.DataFrame(employee_dict)"
+ tree = ast.parse(statement)
+ actual = list(VetPlugin(tree).run())
+ expected = []
+ assert actual == expected
+
+
+def test_PD901_pass_df_param():
+ statement = "my_function(df=data)"
+ tree = ast.parse(statement)
+ actual = list(VetPlugin(tree).run())
+ expected = []
+ assert actual == expected
+
+
+def test_PD901_fail_df_var():
+ statement = "df = pd.DataFrame()"
+ tree = ast.parse(statement)
+ actual = list(VetPlugin(tree).run())
+ expected = [PD901(1, 0)]
+ assert actual == expected
| Method for off-by-default linter checks
As discussed in #64, it would be good to have some checks that can be implemented but are "off" by default. These would be the most opinionated checks that would be a bit too strict to be activated out-of-the-box. | 0.0 | [
"tests/test_PD005.py::test_PD005_pass_arithmetic_operator",
"tests/test_PD005.py::test_PD005_fail_arithmetic_method",
"tests/test_PD006.py::test_PD006_pass_comparison_operator",
"tests/test_PD006.py::test_PD006_fail_comparison_method",
"tests/test_PD012.py::test_PD012_pass_read_csv",
"tests/test_PD012.py::test_PD012_fail_read_table",
"tests/test_PD012.py::test_PD012_node_Name_pass",
"tests/test_PD901.py::test_PD901_pass_non_df",
"tests/test_PD901.py::test_PD901_pass_part_df",
"tests/test_PD901.py::test_PD901_pass_df_param",
"tests/test_PD901.py::test_PD901_fail_df_var"
] | [] | 2019-08-03 21:46:13+00:00 | 1,882 |
|
deshima-dev__decode-132 | diff --git a/decode/__init__.py b/decode/__init__.py
index 4ea797c..1646883 100644
--- a/decode/__init__.py
+++ b/decode/__init__.py
@@ -8,6 +8,7 @@ __all__ = [
"plot",
"qlook",
"select",
+ "utils",
]
__version__ = "2.7.2"
@@ -22,3 +23,4 @@ from . import make
from . import plot
from . import qlook
from . import select
+from . import utils
diff --git a/decode/qlook.py b/decode/qlook.py
index 7e5305a..388160b 100644
--- a/decode/qlook.py
+++ b/decode/qlook.py
@@ -1,4 +1,4 @@
-__all__ = ["raster", "skydip", "zscan"]
+__all__ = ["pswsc", "raster", "skydip", "zscan"]
# standard library
@@ -11,7 +11,7 @@ import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
from fire import Fire
-from . import assign, convert, load, make, plot, select
+from . import assign, convert, load, make, plot, select, utils
# constants
@@ -22,6 +22,98 @@ BAD_MKID_IDS = (
283, 296, 297, 299, 301, 313,
)
# fmt: on
+DFOF_TO_TSKY = -(300 - 77) / 3e-5
+TSKY_TO_DFOF = -3e-5 / (300 - 77)
+
+
+def pswsc(
+ dems: Path,
+ /,
+ *,
+ include_mkid_ids: Optional[Sequence[int]] = None,
+ exclude_mkid_ids: Optional[Sequence[int]] = BAD_MKID_IDS,
+ data_type: Literal["df/f", "brightness"] = "brightness",
+ frequency_units: str = "GHz",
+ outdir: Path = Path(),
+ format: str = "png",
+) -> None:
+ """Quick-look at a PSW observation with sky chopper.
+
+ Args:
+ dems: Input DEMS file (netCDF or Zarr).
+ include_mkid_ids: MKID IDs to be included in analysis.
+ Defaults to all MKID IDs.
+ exclude_mkid_ids: MKID IDs to be excluded in analysis.
+ Defaults to bad MKID IDs found on 2023-11-07.
+ data_type: Data type of the input DEMS file.
+ frequency_units: Units of the frequency axis.
+ outdir: Output directory for the analysis result.
+ format: Output data format of the analysis result.
+
+ """
+ dems = Path(dems)
+ out = Path(outdir) / dems.with_suffix(f".pswsc.{format}").name
+
+ # load DEMS
+ da = load.dems(dems, chunks=None)
+ da = assign.scan(da)
+ da = convert.frame(da, "relative")
+ da = convert.coord_units(da, "frequency", frequency_units)
+ da = convert.coord_units(da, "d2_mkid_frequency", frequency_units)
+
+ if data_type == "df/f":
+ da = cast(xr.DataArray, np.abs(da))
+ da.attrs.update(long_name="|df/f|", units="dimensionless")
+
+ # select DEMS
+ da = select.by(da, "d2_mkid_type", include="filter")
+ da = select.by(
+ da,
+ "d2_mkid_id",
+ include=include_mkid_ids,
+ exclude=exclude_mkid_ids,
+ )
+ da = select.by(da, "state", include=["ON", "OFF"])
+ da_sub = da.groupby("scan").map(subtract_per_scan)
+
+ # export output
+ spec = da_sub.mean("scan")
+ mad = utils.mad(spec)
+
+ if format == "csv":
+ spec.to_dataset(name=data_type).to_pandas().to_csv(out)
+ elif format == "nc":
+ spec.to_netcdf(out)
+ elif format.startswith("zarr"):
+ spec.to_zarr(out)
+ else:
+ fig, axes = plt.subplots(1, 2, figsize=(12, 4))
+
+ ax = axes[0]
+ plot.data(da.scan, ax=ax)
+ ax.set_title(Path(dems).name)
+ ax.grid(True)
+
+ ax = axes[1]
+ plot.data(spec, x="frequency", s=5, hue=None, ax=ax)
+ ax.set_ylim(-mad, spec.max() + mad)
+ ax.set_title(Path(dems).name)
+ ax.grid(True)
+
+ if data_type == "df/f":
+ ax = ax.secondary_yaxis(
+ "right",
+ functions=(
+ lambda x: -DFOF_TO_TSKY * x,
+ lambda x: -TSKY_TO_DFOF * x,
+ ),
+ )
+ ax.set_ylabel("Approx. brightness [K]")
+
+ fig.tight_layout()
+ fig.savefig(out)
+
+ print(str(out))
def raster(
@@ -341,11 +433,30 @@ def mean_in_time(dems: xr.DataArray) -> xr.DataArray:
return xr.zeros_like(middle) + dems.mean("time")
+def subtract_per_scan(dems: xr.DataArray) -> xr.DataArray:
+ """Apply source-sky subtraction to a single-scan DEMS."""
+ if len(states := np.unique(dems.state)) != 1:
+ raise ValueError("State must be unique.")
+
+ if (state := states[0]) == "ON":
+ src = select.by(dems, "beam", include="B")
+ sky = select.by(dems, "beam", include="A")
+ return src.mean("time") - sky.mean("time").data
+
+ if state == "OFF":
+ src = select.by(dems, "beam", include="A")
+ sky = select.by(dems, "beam", include="B")
+ return src.mean("time") - sky.mean("time").data
+
+ raise ValueError("State must be either ON or OFF.")
+
+
def main() -> None:
"""Entry point of the decode-qlook command."""
with xr.set_options(keep_attrs=True):
Fire(
{
+ "pswsc": pswsc,
"raster": raster,
"skydip": skydip,
"zscan": zscan,
diff --git a/decode/utils.py b/decode/utils.py
new file mode 100644
index 0000000..53e4f74
--- /dev/null
+++ b/decode/utils.py
@@ -0,0 +1,40 @@
+__all__ = ["mad"]
+
+
+# dependencies
+from typing import Any, Optional, cast
+import numpy as np
+import xarray as xr
+from xarray.core.types import Dims
+
+
+def mad(
+ da: xr.DataArray,
+ dim: Dims = None,
+ skipna: Optional[bool] = None,
+ keep_attrs: Optional[bool] = None,
+ **kwargs: Any,
+) -> xr.DataArray:
+ """Calculate median absolute deviation (MAD) of a DataArray.
+
+ Args:
+ da: Input DataArray.
+ dim: Name of dimension(s) along which MAD is calculated.
+ skipna: Same-name option to be passed to ``DataArray.median``.
+ keep_attrs: Same-name option to be passed to ``DataArray.median``.
+ kwargs: Same-name option(s) to be passed to ``DataArray.median``.
+
+ Returns:
+ MAD of the input DataArray.
+
+ """
+
+ def median(da: xr.DataArray) -> xr.DataArray:
+ return da.median(
+ dim=dim,
+ skipna=skipna,
+ keep_attrs=keep_attrs,
+ **kwargs,
+ )
+
+ return median(cast(xr.DataArray, np.abs(da - median(da))))
| deshima-dev/decode | 15c93d59f42a9b3ad367d687de2ea24190213511 | diff --git a/tests/test_utils.py b/tests/test_utils.py
new file mode 100644
index 0000000..177a62c
--- /dev/null
+++ b/tests/test_utils.py
@@ -0,0 +1,10 @@
+# dependencies
+import numpy as np
+import xarray as xr
+from decode import utils
+from dems.d2 import MS
+
+
+def test_mad() -> None:
+ dems = MS.new(np.arange(25).reshape(5, 5))
+ assert (utils.mad(dems, "time") == 5.0).all()
| Add qlook command for PSW + Sky chopper (pswsc) | 0.0 | [
"tests/test_utils.py::test_mad"
] | [] | 2023-11-12 18:56:23+00:00 | 1,883 |
|
desihub__desitransfer-10 | diff --git a/bin/desi_transfer_status_restore b/bin/desi_transfer_status_restore
new file mode 100755
index 0000000..4766fc8
--- /dev/null
+++ b/bin/desi_transfer_status_restore
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+"""
+Quick and dirty script to restore raw data transfer status.
+
+1. Obtain rsync time from modification time of exposure directory.
+2. Set checksum time to rsync time.
+3. Ignore pipeline time (as of early 2020).
+4. Obtain backup time from HPSS backup files.
+
+This script is deliberately kept separate from the package because it
+uses hpsspy.
+"""
+from sys import exit
+import json
+import os
+import hpsspy.os as hpos
+
+
+def backup_times(path='desi/spectro/data'):
+ """Obtain backup times from HPSS files.
+
+ Parameters
+ ----------
+ path : :class:`str`
+ The HPSS path to the raw data backup files.
+
+ Returns
+ -------
+ :class:`dict`
+ A mapping of night to backup time. The backup time is in milliseconds
+ for compatibility with JavaScript.
+ """
+ ls = hpos.listdir(path)
+ return dict([(os.path.splitext(f.name)[0].split('_')[-1], f.st_mtime*1000)
+ for f in ls if f.name.endswith('.tar')])
+
+
+def main():
+ """Entry point for :command:`desi_transfer_status_restore`.
+
+ Returns
+ -------
+ :class:`int`
+ An integer suitable for passing to :func:`sys.exit`.
+ """
+ bt = backup_times()
+ nights = os.listdir(os.environ['DESI_SPECTRO_DATA'])
+ status = list()
+ for night in nights:
+ if night != 'README.html':
+ exposures = os.listdir(os.path.join(os.environ['DESI_SPECTRO_DATA'], night))
+ for exp in exposures:
+ rt = int(os.stat(os.path.join(os.environ['DESI_SPECTRO_DATA'], night, exp)).st_mtime * 1000)
+ status.append([int(night), int(exp), 'rsync', True, '', rt])
+ status.append([int(night), int(exp), 'checksum', True, '', rt])
+ try:
+ status.append([int(night), int(exp), 'backup', True, '', bt[night]])
+ except KeyError:
+ pass
+ status = sorted(status, key=lambda x: x[0]*10000000 + x[1], reverse=True)
+ with open('desi_transfer_status_restore.json', 'w') as j:
+ json.dump(status, j, indent=None, separators=(',', ':'))
+ return 0
+
+
+if __name__ == '__main__':
+ try:
+ foo = os.environ['HPSS_DIR']
+ except KeyError:
+ os.environ['HPSS_DIR'] = '/usr/common/mss'
+ exit(main())
diff --git a/doc/api.rst b/doc/api.rst
index 418593d..9ee155f 100644
--- a/doc/api.rst
+++ b/doc/api.rst
@@ -5,6 +5,9 @@ desitransfer API
.. automodule:: desitransfer
:members:
+.. automodule:: desitransfer.common
+ :members:
+
.. automodule:: desitransfer.daemon
:members:
diff --git a/doc/changes.rst b/doc/changes.rst
index c55f0ac..44903e1 100644
--- a/doc/changes.rst
+++ b/doc/changes.rst
@@ -5,7 +5,10 @@ Change Log
0.3.4 (unreleased)
------------------
-* No changes yet.
+* Guard against corrupted status JSON files; restore transfer status;
+ additional daily transfers (PR `#10`_).
+
+.. _`#10`: https://github.com/desihub/desitransfer/pull/10
0.3.3 (2019-12-18)
------------------
diff --git a/py/desitransfer/daily.py b/py/desitransfer/daily.py
index 94b63d5..d27b1f7 100644
--- a/py/desitransfer/daily.py
+++ b/py/desitransfer/daily.py
@@ -98,6 +98,8 @@ def _config():
# os.path.join(engineering, 'fxc')),
DailyDirectory('/data/focalplane/logs/calib_logs',
os.path.join(engineering, 'focalplane', 'logs', 'calib_logs')),
+ DailyDirectory('/data/focalplane/logs/kpno',
+ os.path.join(engineering, 'focalplane', 'logs', 'kpno')),
DailyDirectory('/data/focalplane/logs/xytest_data',
os.path.join(engineering, 'focalplane', 'logs', 'xytest_data')),
DailyDirectory('/data/fvc/data',
diff --git a/py/desitransfer/status.py b/py/desitransfer/status.py
index 21cb7a9..e92998f 100644
--- a/py/desitransfer/status.py
+++ b/py/desitransfer/status.py
@@ -14,10 +14,6 @@ import time
from argparse import ArgumentParser
from pkg_resources import resource_filename
from . import __version__ as dtVersion
-# from desiutil.log import get_logger
-
-
-# log = None
class TransferStatus(object):
@@ -47,11 +43,44 @@ class TransferStatus(object):
return
try:
with open(self.json) as j:
- self.status = json.load(j)
+ try:
+ self.status = json.load(j)
+ except json.JSONDecodeError:
+ self._handle_malformed()
except FileNotFoundError:
pass
return
+ def _handle_malformed(self):
+ """Handle malformed JSON files.
+
+ This function will save the malformed file to a .bad file for
+ later analysis, and write an empty array to a new status file.
+ """
+ from .daemon import log
+ bad = self.json + '.bad'
+ m = "Malformed JSON file detected: %s; saving original file as %s."
+ try:
+ log.error(m, self.json, bad)
+ except AttributeError:
+ # If the status code is running stand-alone, the log object
+ # will be None.
+ print("ERROR: " + (m % (self.json, bad)))
+ m = "shutil.copy2('%s', '%s')"
+ try:
+ log.debug(m, self.json, bad)
+ except AttributeError:
+ print("DEBUG: " + (m % (self.json, bad)))
+ shutil.copy2(self.json, bad)
+ m = "Writing empty array to %s."
+ try:
+ log.info(m, self.json)
+ except AttributeError:
+ print("INFO: " + (m % (self.json,)))
+ with open(self.json, 'w') as j:
+ j.write('[]')
+ return
+
def update(self, night, exposure, stage, failure=False, last=''):
"""Update the transfer status.
@@ -92,6 +121,14 @@ class TransferStatus(object):
self.status.insert(0, row)
self.status = sorted(self.status, key=lambda x: x[0]*10000000 + x[1],
reverse=True)
+ #
+ # Copy the original file before modifying.
+ # This will overwrite any existing .bak file
+ #
+ try:
+ shutil.copy2(self.json, self.json + '.bak')
+ except FileNotFoundError:
+ pass
with open(self.json, 'w') as j:
json.dump(self.status, j, indent=None, separators=(',', ':'))
diff --git a/requirements.txt b/requirements.txt
index 345f714..98fc19f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,3 @@
setuptools
pytz
-git+https://github.com/desihub/[email protected]#egg=desiutil
+git+https://github.com/desihub/[email protected]#egg=desiutil
| desihub/desitransfer | 9e0011389a87b6e881c99349dfb03ef4c9f3d85c | diff --git a/py/desitransfer/test/t/bad.json b/py/desitransfer/test/t/bad.json
new file mode 100644
index 0000000..0f25ee4
--- /dev/null
+++ b/py/desitransfer/test/t/bad.json
@@ -0,0 +1,1 @@
+This is a bad JSON file!
diff --git a/py/desitransfer/test/test_daemon.py b/py/desitransfer/test/test_daemon.py
index f6247df..ab51215 100644
--- a/py/desitransfer/test/test_daemon.py
+++ b/py/desitransfer/test/test_daemon.py
@@ -135,10 +135,10 @@ class TestDaemon(unittest.TestCase):
@patch('desitransfer.daemon.SMTPHandler')
@patch('desitransfer.daemon.RotatingFileHandler')
@patch('desitransfer.daemon.get_logger')
- def test_TransferDaemon_configure_log(self, gl, rfh, smtp):
+ @patch('desitransfer.daemon.log') # Needed to restore the module-level log object after test.
+ def test_TransferDaemon_configure_log(self, mock_log, gl, rfh, smtp):
"""Test logging configuration.
"""
- ll = gl.return_value = MagicMock()
with patch.dict('os.environ',
{'CSCRATCH': self.tmp.name,
'DESI_ROOT': '/desi/root',
@@ -149,7 +149,7 @@ class TestDaemon(unittest.TestCase):
rfh.assert_called_once_with('/desi/root/spectro/staging/logs/desi_transfer_daemon.log',
backupCount=100, maxBytes=100000000)
gl.assert_called_once_with(timestamp=True)
- ll.setLevel.assert_called_once_with(logging.DEBUG)
+ gl().setLevel.assert_called_once_with(logging.DEBUG)
@patch.object(TransferDaemon, 'checksum_lock')
@patch.object(TransferDaemon, 'directory')
diff --git a/py/desitransfer/test/test_daily.py b/py/desitransfer/test/test_daily.py
index 1d8c180..9089131 100644
--- a/py/desitransfer/test/test_daily.py
+++ b/py/desitransfer/test/test_daily.py
@@ -110,7 +110,7 @@ class TestDaily(unittest.TestCase):
call().__exit__(None, None, None)])
mock_popen.assert_has_calls([call(),
call(['fix_permissions.sh', '-a', '/dst/d0'],
- stdout=mo(), stderr=-2),
+ stdout=mo(), stderr=-2),
call().wait()])
diff --git a/py/desitransfer/test/test_status.py b/py/desitransfer/test/test_status.py
index 0194031..23adbe1 100644
--- a/py/desitransfer/test/test_status.py
+++ b/py/desitransfer/test/test_status.py
@@ -4,9 +4,10 @@
"""
import json
import os
+import shutil
import sys
import unittest
-from unittest.mock import patch
+from unittest.mock import patch, call
from tempfile import TemporaryDirectory
from pkg_resources import resource_filename
from ..status import TransferStatus, _options
@@ -85,6 +86,45 @@ class TestStatus(unittest.TestCase):
cp.assert_called_once_with(j, d)
cf.assert_called_once_with(h, os.path.join(d, 'index.html'))
+ @patch('desitransfer.daemon.log')
+ def test_TransferStatus_handle_malformed_with_log(self, mock_log):
+ """Test handling of malformed JSON files.
+ """
+ bad = resource_filename('desitransfer.test', 't/bad.json')
+ with TemporaryDirectory() as d:
+ shutil.copy(bad, os.path.join(d, 'desi_transfer_status.json'))
+ s = TransferStatus(d)
+ self.assertTrue(os.path.exists(os.path.join(d, 'desi_transfer_status.json.bad')))
+ self.assertListEqual(s.status, [])
+ self.assertListEqual(os.listdir(d), ['desi_transfer_status.json.bad',
+ 'desi_transfer_status.json'])
+ mock_log.error.assert_called_once_with('Malformed JSON file detected: %s; saving original file as %s.',
+ os.path.join(d, 'desi_transfer_status.json'),
+ os.path.join(d, 'desi_transfer_status.json.bad'))
+ mock_log.debug.assert_called_once_with("shutil.copy2('%s', '%s')",
+ os.path.join(d, 'desi_transfer_status.json'),
+ os.path.join(d, 'desi_transfer_status.json.bad'))
+ mock_log.info.assert_called_once_with('Writing empty array to %s.',
+ os.path.join(d, 'desi_transfer_status.json'))
+
+ @patch('builtins.print')
+ def test_TransferStatus_handle_malformed_without_log(self, mock_print):
+ """Test handling of malformed JSON files (no log object).
+ """
+ bad = resource_filename('desitransfer.test', 't/bad.json')
+ with TemporaryDirectory() as d:
+ shutil.copy(bad, os.path.join(d, 'desi_transfer_status.json'))
+ s = TransferStatus(d)
+ self.assertTrue(os.path.exists(os.path.join(d, 'desi_transfer_status.json.bad')))
+ self.assertListEqual(s.status, [])
+ self.assertListEqual(os.listdir(d), ['desi_transfer_status.json.bad',
+ 'desi_transfer_status.json'])
+ mock_print.assert_has_calls([call('ERROR: Malformed JSON file detected: %s; saving original file as %s.' % (os.path.join(d, 'desi_transfer_status.json'),
+ os.path.join(d, 'desi_transfer_status.json.bad'))),
+ call("DEBUG: shutil.copy2('%s', '%s')" % (os.path.join(d, 'desi_transfer_status.json'),
+ os.path.join(d, 'desi_transfer_status.json.bad'))),
+ call("INFO: Writing empty array to %s." % (os.path.join(d, 'desi_transfer_status.json'),))])
+
@patch('time.time')
def test_TransferStatus_update(self, mock_time):
"""Test status reporting mechanism updates.
@@ -98,6 +138,7 @@ class TestStatus(unittest.TestCase):
json.dump(st, f, indent=None, separators=(',', ':'))
s = TransferStatus(d)
s.update('20200703', '12345678', 'checksum')
+ self.assertTrue(os.path.exists(js + '.bak'))
self.assertEqual(s.status[0], [20200703, 12345678, 'checksum', True, '', 1565300090000])
s.update('20200703', '12345680', 'rsync')
self.assertEqual(s.status[0], [20200703, 12345680, 'rsync', True, '', 1565300090000])
@@ -112,6 +153,35 @@ class TestStatus(unittest.TestCase):
self.assertTrue(all(b))
self.assertEqual(len(b), 4)
+ @patch('time.time')
+ def test_TransferStatus_update_empty(self, mock_time):
+ """Test status reporting mechanism updates (with no initial JSON file).
+ """
+ mock_time.return_value = 1565300090
+ # st = [[20200703, 12345678, 'rsync', True, '', 1565300074664],
+ # [20200703, 12345677, 'rsync', True, '', 1565300073000]]
+ with TemporaryDirectory() as d:
+ js = os.path.join(d, 'desi_transfer_status.json')
+ # with open(js, 'w') as f:
+ # json.dump(st, f, indent=None, separators=(',', ':'))
+ s = TransferStatus(d)
+ s.update('20200703', '12345678', 'checksum')
+ self.assertFalse(os.path.exists(js + '.bak'))
+ self.assertEqual(s.status[0], [20200703, 12345678, 'checksum', True, '', 1565300090000])
+ s.update('20200703', '12345680', 'rsync')
+ self.assertTrue(os.path.exists(js + '.bak'))
+ self.assertEqual(s.status[0], [20200703, 12345680, 'rsync', True, '', 1565300090000])
+ s.update('20200703', '12345678', 'checksum', failure=True)
+ self.assertEqual(s.status[1], [20200703, 12345678, 'checksum', False, '', 1565300090000])
+ s.update('20200703', '12345681', 'pipeline')
+ self.assertEqual(s.status[0], [20200703, 12345681, 'pipeline', True, '', 1565300090000])
+ s.update('20200703', '12345681', 'pipeline', last='arcs')
+ self.assertEqual(s.status[0], [20200703, 12345681, 'pipeline', True, 'arcs', 1565300090000])
+ s.update('20200703', 'all', 'backup')
+ b = [i[3] for i in s.status if i[2] == 'backup']
+ self.assertTrue(all(b))
+ self.assertEqual(len(b), 3)
+
def test_TransferStatus_find(self):
"""Test status search.
"""
| Guard against truncated status file
At 2020-01-09 01:00 PST, the desi_transfer_status.json file was truncated, causing the transfer daemon to crash when it subsequently tried to read the empty file.
Workaround for quick restart: Add `[]` (an array) to the empty file.
dtn04.nersc.gov was rebooted sometime after the file was truncated (approximately 02:30 PST), suggesting transient problems with the filesystem. | 0.0 | [
"py/desitransfer/test/test_status.py::TestStatus::test_TransferStatus_handle_malformed_with_log",
"py/desitransfer/test/test_status.py::TestStatus::test_TransferStatus_handle_malformed_without_log",
"py/desitransfer/test/test_status.py::TestStatus::test_TransferStatus_update",
"py/desitransfer/test/test_status.py::TestStatus::test_TransferStatus_update_empty"
] | [
"py/desitransfer/test/test_daemon.py::TestDaemon::test_lock_directory",
"py/desitransfer/test/test_daemon.py::TestDaemon::test_popen",
"py/desitransfer/test/test_daemon.py::TestDaemon::test_rsync_night",
"py/desitransfer/test/test_daemon.py::TestDaemon::test_unlock_directory",
"py/desitransfer/test/test_daemon.py::TestDaemon::test_verify_checksum",
"py/desitransfer/test/test_daemon.py::test_suite",
"py/desitransfer/test/test_daily.py::TestDaily::test_apache",
"py/desitransfer/test/test_daily.py::TestDaily::test_config",
"py/desitransfer/test/test_daily.py::TestDaily::test_lock",
"py/desitransfer/test/test_daily.py::TestDaily::test_transfer",
"py/desitransfer/test/test_daily.py::test_suite",
"py/desitransfer/test/test_status.py::TestStatus::test_TransferStatus_find",
"py/desitransfer/test/test_status.py::TestStatus::test_TransferStatus_init",
"py/desitransfer/test/test_status.py::TestStatus::test_options",
"py/desitransfer/test/test_status.py::test_suite"
] | 2020-01-10 05:53:30+00:00 | 1,884 |
|
desihub__desitransfer-21 | diff --git a/bin/desi_tucson_transfer.sh b/bin/desi_tucson_transfer.sh
index 94f1cc0..77cb2d5 100755
--- a/bin/desi_tucson_transfer.sh
+++ b/bin/desi_tucson_transfer.sh
@@ -29,7 +29,7 @@ set -o noglob
#
# Static data sets don't need to be updated as frequently.
#
-static='protodesi public/ets spectro/redux/andes spectro/redux/minisv2 spectro/redux/oak1'
+static='protodesi public/epo public/ets spectro/redux/andes spectro/redux/minisv2 spectro/redux/oak1'
#
# Dynamic data sets may change daily.
#
diff --git a/doc/changes.rst b/doc/changes.rst
index 81a4637..f41a9af 100644
--- a/doc/changes.rst
+++ b/doc/changes.rst
@@ -5,7 +5,12 @@ Change Log
0.3.9 (unreleased)
------------------
-* No changes yet.
+* Deprecate continuous nightwatch transfers; nightwatch is now part of the
+ daily engineering transfer (PR `#21`_).
+* Allow alternate scratch directory to be chosen if :envvar:`CSCRATCH` is
+ unavailable (PR `#21`_).
+
+.. _`#21`: https://github.com/desihub/desitransfer/pull/21
0.3.8 (2020-10-26)
------------------
diff --git a/py/desitransfer/common.py b/py/desitransfer/common.py
index 0b9bb1b..6eba487 100644
--- a/py/desitransfer/common.py
+++ b/py/desitransfer/common.py
@@ -81,6 +81,35 @@ def stamp(zone='US/Pacific'):
return n.astimezone(tz).strftime('%Y-%m-%d %H:%M:%S %Z')
+def ensure_scratch(primary, alternate):
+ """Try an alternate temporary directory if the primary temporary directory
+ is unavilable.
+
+ Parameters
+ ----------
+ primary : :class:`str`
+ Primary temporary directory.
+ alternate : :class:`list`
+ A list of alternate directories.
+
+ Returns
+ -------
+ The first available temporary directory found.
+ """
+ if not isinstance(alternate, list):
+ alternate = [alternate]
+ try:
+ l = os.listdir(primary)
+ except FileNotFoundError:
+ for a in alternate:
+ try:
+ l = os.listdir(a)
+ except FileNotFoundError:
+ continue
+ return a
+ return primary
+
+
def yesterday():
"""Yesterday's date in DESI "NIGHT" format, YYYYMMDD.
"""
diff --git a/py/desitransfer/daemon.py b/py/desitransfer/daemon.py
index a3b5813..7113f27 100644
--- a/py/desitransfer/daemon.py
+++ b/py/desitransfer/daemon.py
@@ -24,7 +24,7 @@ from socket import getfqdn
from tempfile import TemporaryFile
from pkg_resources import resource_filename
from desiutil.log import get_logger
-from .common import dir_perm, file_perm, rsync, yesterday, empty_rsync
+from .common import dir_perm, file_perm, rsync, yesterday, empty_rsync, ensure_scratch
from .status import TransferStatus
from . import __version__ as dtVersion
@@ -94,7 +94,7 @@ class TransferDaemon(object):
self.conf[s].getlist('expected_files'),
self.conf[s]['checksum_file'])
for s in self.sections]
- self.scratch = self.conf['common']['scratch']
+ self.scratch = ensure_scratch(self.conf['common']['scratch'], self.conf['common']['alternate_scratch'].split(','))
self._configure_log(options.debug)
return
@@ -365,7 +365,7 @@ The DESI Collaboration Account
#
pass
else:
- log.error('rsync problem detected!')
+ log.error('rsync problem detected for %s/%s!', night, exposure)
log.debug("status.update('%s', '%s', 'rsync', failure=True)", night, exposure)
status.update(night, exposure, 'rsync', failure=True)
diff --git a/py/desitransfer/daily.py b/py/desitransfer/daily.py
index 642825d..0ce4c30 100644
--- a/py/desitransfer/daily.py
+++ b/py/desitransfer/daily.py
@@ -11,6 +11,7 @@ import subprocess as sub
import sys
import time
from argparse import ArgumentParser
+from pkg_resources import resource_filename
from .common import dir_perm, file_perm, rsync, stamp
from . import __version__ as dtVersion
@@ -24,20 +25,26 @@ class DailyDirectory(object):
Source directory.
destination : :class:`str`
Desitination directory.
+ extra : :class:`list`, optional
+ Extra :command:`rsync` arguments to splice into command.
+ dirlinks : :class:`bool`, optional
+ If ``True``, convert source links into linked directory.
"""
- def __init__(self, source, destination):
+ def __init__(self, source, destination, extra=[], dirlinks=False):
self.source = source
self.destination = destination
self.log = self.destination + '.log'
+ self.extra = extra
+ self.dirlinks = dirlinks
- def transfer(self, apache=True):
+ def transfer(self, permission=True):
"""Data transfer operations for a single destination directory.
Parameters
----------
- apache : :class:`bool`
- If ``True`` set file ACLs for Apache httpd access.
+ permission : :class:`bool`, optional
+ If ``True``, set permissions for DESI collaboration access.
Returns
-------
@@ -45,6 +52,11 @@ class DailyDirectory(object):
The status returned by :command:`rsync`.
"""
cmd = rsync(self.source, self.destination)
+ if not self.dirlinks:
+ cmd[cmd.index('--copy-dirlinks')] = '--links'
+ if self.extra:
+ for i, e in enumerate(self.extra):
+ cmd.insert(cmd.index('--omit-dir-times') + 1 + i, e)
with open(self.log, 'ab') as l:
l.write(("DEBUG: desi_daily_transfer %s\n" % dtVersion).encode('utf-8'))
l.write(("DEBUG: %s\n" % stamp()).encode('utf-8'))
@@ -54,8 +66,8 @@ class DailyDirectory(object):
status = p.wait()
if status == 0:
self.lock()
- if apache:
- s = self.apache()
+ if permission:
+ s = self.permission()
return status
def lock(self):
@@ -66,8 +78,8 @@ class DailyDirectory(object):
for f in filenames:
os.chmod(os.path.join(dirpath, f), file_perm)
- def apache(self):
- """Grant apache/www read access.
+ def permission(self):
+ """Set permissions for DESI collaboration access.
In theory this should not change any permissions set by
:meth:`~DailyDirectory.lock`.
@@ -90,16 +102,20 @@ def _config():
"""Wrap configuration so that module can be imported without
environment variables set.
"""
+ nightwatch_exclude = resource_filename('desitransfer',
+ 'data/desi_nightwatch_transfer_exclude.txt')
engineering = os.path.realpath(os.path.join(os.environ['DESI_ROOT'],
'engineering'))
spectro = os.path.realpath(os.path.join(os.environ['DESI_ROOT'],
'spectro'))
return [DailyDirectory('/exposures/desi/sps',
os.path.join(engineering, 'spectrograph', 'sps')),
- # DailyDirectory('/exposures/nightwatch',
- # os.path.join(spectro, 'nightwatch', 'kpno')),
+ DailyDirectory('/exposures/nightwatch',
+ os.path.join(spectro, 'nightwatch', 'kpno'),
+ extra=['--exclude-from', nightwatch_exclude]),
DailyDirectory('/data/dts/exposures/lost+found',
- os.path.join(spectro, 'staging', 'lost+found')),
+ os.path.join(spectro, 'staging', 'lost+found'),
+ dirlinks=True),
# DailyDirectory('/data/fxc',
# os.path.join(engineering, 'fxc')),
DailyDirectory('/data/focalplane/logs/calib_logs',
@@ -127,8 +143,6 @@ def _options(*args):
"""
desc = "Transfer non-critical DESI data from KPNO to NERSC."
prsr = ArgumentParser(description=desc)
- prsr.add_argument('-A', '--no-apache', action='store_false', dest='apache',
- help='Do not set ACL for Apache httpd access.')
# prsr.add_argument('-b', '--backup', metavar='H', type=int, default=20,
# help='UTC time in hours to trigger HPSS backups (default %(default)s:00 UTC).')
# prsr.add_argument('-d', '--debug', action='store_true',
@@ -142,8 +156,8 @@ def _options(*args):
help="Exit the script when FILE is detected (default %(default)s).")
# prsr.add_argument('-n', '--nersc', default='cori', metavar='NERSC_HOST',
# help="Trigger DESI pipeline on this NERSC system (default %(default)s).")
- # prsr.add_argument('-P', '--no-pipeline', action='store_false', dest='pipeline',
- # help="Only transfer files, don't start the DESI pipeline.")
+ prsr.add_argument('-P', '--no-permission', action='store_false', dest='permission',
+ help='Do not set permissions for DESI collaboration access.')
prsr.add_argument('-s', '--sleep', metavar='H', type=int, default=24,
help='In daemon mode, sleep H hours before checking for new data (default %(default)s hours).')
# prsr.add_argument('-S', '--shadow', action='store_true',
@@ -167,7 +181,7 @@ def main():
print("INFO: %s detected, shutting down daily transfer script." % options.kill)
return 0
for d in _config():
- status = d.transfer(apache=options.apache)
+ status = d.transfer(permission=options.permission)
if status != 0:
print("ERROR: rsync problem detected for {0.source} -> {0.destination}!".format(d))
return status
diff --git a/py/desitransfer/data/desi_nightwatch_transfer_exclude.txt b/py/desitransfer/data/desi_nightwatch_transfer_exclude.txt
index 2f1f1fa..446d0a0 100644
--- a/py/desitransfer/data/desi_nightwatch_transfer_exclude.txt
+++ b/py/desitransfer/data/desi_nightwatch_transfer_exclude.txt
@@ -1,3 +1,9 @@
-*/preproc*.fits
-*/qsky-*.fits
-*/*.tmp
+preproc*.fits
+qsky-*.fits
+*.tmp
+nightwatch.*
+nightwatch-debug.*
+nightwatch-webapp.*
+webapp.log
+redux
+test
diff --git a/py/desitransfer/data/desi_transfer_daemon.ini b/py/desitransfer/data/desi_transfer_daemon.ini
index c1a8ade..222baed 100644
--- a/py/desitransfer/data/desi_transfer_daemon.ini
+++ b/py/desitransfer/data/desi_transfer_daemon.ini
@@ -22,7 +22,7 @@ checksum_file = checksum-{exposure}.sha256sum
[common]
# Use this directory for temporary files.
scratch = ${CSCRATCH}
-# scratch = ${HOME}/tmp
+alternate_scratch = ${HOME}/tmp
# The presence of this file indicates checksums are being computed.
checksum_lock = /tmp/checksum-running
# UTC time in hours to look for delayed files.
| desihub/desitransfer | e6e6d9ee728d4525608213214f00ff6127ccb376 | diff --git a/py/desitransfer/test/test_common.py b/py/desitransfer/test/test_common.py
index 1642616..8be6d5e 100644
--- a/py/desitransfer/test/test_common.py
+++ b/py/desitransfer/test/test_common.py
@@ -6,7 +6,8 @@ import datetime
import os
import unittest
from unittest.mock import patch
-from ..common import dir_perm, file_perm, empty_rsync, rsync, stamp, yesterday, today
+from tempfile import TemporaryDirectory
+from ..common import dir_perm, file_perm, empty_rsync, rsync, stamp, ensure_scratch, yesterday, today
class TestCommon(unittest.TestCase):
@@ -22,10 +23,14 @@ class TestCommon(unittest.TestCase):
pass
def setUp(self):
- pass
+ """Create a temporary directory to simulate CSCRATCH.
+ """
+ self.tmp = TemporaryDirectory()
def tearDown(self):
- pass
+ """Clean up temporary directory.
+ """
+ self.tmp.cleanup()
def test_permissions(self):
"""Ensure that file and directory permissions do not change.
@@ -72,6 +77,19 @@ total size is 118,417,836,324 speedup is 494,367.55
s = stamp('US/Arizona')
self.assertEqual(s, '2019-07-03 05:00:00 MST')
+ def test_ensure_scratch(self):
+ """Test ensure_scratch.
+ """
+ tmp = self.tmp.name
+ t = ensure_scratch(tmp, ['/foo', '/bar'])
+ self.assertEqual(t, tmp)
+ t = ensure_scratch('/foo', tmp)
+ self.assertEqual(t, tmp)
+ t = ensure_scratch('/foo', ['/bar', tmp])
+ self.assertEqual(t, tmp)
+ t = ensure_scratch('/foo', ['/bar', '/abcdefg', tmp])
+ self.assertEqual(t, tmp)
+
@patch('desitransfer.common.dt')
def test_yesterday(self, mock_dt):
"""Test yesterday's date.
diff --git a/py/desitransfer/test/test_daemon.py b/py/desitransfer/test/test_daemon.py
index 0944a75..fa0551e 100644
--- a/py/desitransfer/test/test_daemon.py
+++ b/py/desitransfer/test/test_daemon.py
@@ -288,7 +288,7 @@ class TestDaemon(unittest.TestCase):
mock_popen.assert_called_once_with(['/bin/rsync', '--verbose', '--recursive',
'--copy-dirlinks', '--times', '--omit-dir-times',
'dts:/data/dts/exposures/raw/20190703/00000127/', '/desi/root/spectro/staging/raw/20190703/00000127/'])
- mock_log.error.assert_called_once_with('rsync problem detected!')
+ mock_log.error.assert_called_once_with('rsync problem detected for %s/%s!', '20190703', '00000127')
mock_status.update.assert_called_once_with('20190703', '00000127', 'rsync', failure=True)
#
# Actually run the pipeline
diff --git a/py/desitransfer/test/test_daily.py b/py/desitransfer/test/test_daily.py
index 863f9e9..18feddd 100644
--- a/py/desitransfer/test/test_daily.py
+++ b/py/desitransfer/test/test_daily.py
@@ -37,6 +37,8 @@ class TestDaily(unittest.TestCase):
self.assertEqual(c[0].source, '/exposures/desi/sps')
self.assertEqual(c[0].destination, os.path.join(os.environ['DESI_ROOT'],
'engineering', 'spectrograph', 'sps'))
+ self.assertEqual(c[1].extra[0], '--exclude-from')
+ self.assertTrue(c[2].dirlinks)
def test_options(self):
"""Test command-line arguments.
@@ -45,7 +47,7 @@ class TestDaily(unittest.TestCase):
['desi_daily_transfer', '--daemon', '--kill',
os.path.expanduser('~/stop_daily_transfer')]):
options = _options()
- self.assertTrue(options.apache)
+ self.assertTrue(options.permission)
self.assertEqual(options.sleep, 24)
self.assertTrue(options.daemon)
self.assertEqual(options.kill,
@@ -69,7 +71,7 @@ class TestDaily(unittest.TestCase):
call().__enter__(),
call().write(('DEBUG: desi_daily_transfer {}\n'.format(dtVersion)).encode('utf-8')),
call().write(b'DEBUG: 2019-07-03\n'),
- call().write(b'DEBUG: /bin/rsync --verbose --recursive --copy-dirlinks --times --omit-dir-times dts:/src/d0/ /dst/d0/\n'),
+ call().write(b'DEBUG: /bin/rsync --verbose --recursive --links --times --omit-dir-times dts:/src/d0/ /dst/d0/\n'),
call().flush(),
call().__exit__(None, None, None)])
mock_walk.assert_called_once_with('/dst/d0')
@@ -79,6 +81,30 @@ class TestDaily(unittest.TestCase):
@patch('os.walk')
@patch('os.chmod')
+ @patch('subprocess.Popen')
+ @patch('desitransfer.daily.stamp')
+ @patch('builtins.open', new_callable=mock_open)
+ def test_transfer_extra(self, mo, mock_stamp, mock_popen, mock_chmod, mock_walk):
+ """Test the transfer functions in DailyDirectory.transfer() with extra options.
+ """
+ mock_walk.return_value = [('/dst/d0', [], ['f1', 'f2'])]
+ mock_stamp.return_value = '2019-07-03'
+ mock_popen().wait.return_value = 0
+ d = DailyDirectory('/src/d0', '/dst/d0', extra=['--exclude-from', 'foo'])
+ d.transfer()
+ mo.assert_has_calls([call('/dst/d0.log', 'ab'),
+ call().__enter__(),
+ call().write(('DEBUG: desi_daily_transfer {}\n'.format(dtVersion)).encode('utf-8')),
+ call().write(b'DEBUG: 2019-07-03\n'),
+ call().write(b'DEBUG: /bin/rsync --verbose --recursive --links --times --omit-dir-times --exclude-from foo dts:/src/d0/ /dst/d0/\n'),
+ call().flush(),
+ call().__exit__(None, None, None)])
+ mock_walk.assert_called_once_with('/dst/d0')
+ mock_chmod.assert_has_calls([call('/dst/d0', 1512),
+ call('/dst/d0/f1', 288),
+ call('/dst/d0/f2', 288)])
+ @patch('os.walk')
+ @patch('os.chmod')
def test_lock(self, mock_chmod, mock_walk):
"""Test the lock functions in DailyDirectory.lock().
"""
@@ -98,12 +124,12 @@ class TestDaily(unittest.TestCase):
@patch('subprocess.Popen')
@patch('builtins.open', new_callable=mock_open)
- def test_apache(self, mo, mock_popen):
- """Test granting apache/www permissions.
+ def test_permission(self, mo, mock_popen):
+ """Test granting permissions.
"""
mock_popen().wait.return_value = 0
d = DailyDirectory('/src/d0', '/dst/d0')
- d.apache()
+ d.permission()
mo.assert_has_calls([call('/dst/d0.log', 'ab'),
call().__enter__(),
call().write(b'DEBUG: fix_permissions.sh /dst/d0\n'),
| Shift nightwatch transfer to daily engineering data transfer
Nightwatch is now primarily being run at NERSC, so there is no need for near-real-time transfers of nightwatch data produced at KPNO. | 0.0 | [
"py/desitransfer/test/test_common.py::TestCommon::test_empty_rsync",
"py/desitransfer/test/test_common.py::TestCommon::test_ensure_scratch",
"py/desitransfer/test/test_common.py::TestCommon::test_permissions",
"py/desitransfer/test/test_common.py::TestCommon::test_rsync",
"py/desitransfer/test/test_common.py::TestCommon::test_stamp",
"py/desitransfer/test/test_common.py::TestCommon::test_today",
"py/desitransfer/test/test_common.py::TestCommon::test_yesterday",
"py/desitransfer/test/test_common.py::test_suite",
"py/desitransfer/test/test_daemon.py::TestDaemon::test_lock_directory",
"py/desitransfer/test/test_daemon.py::TestDaemon::test_popen",
"py/desitransfer/test/test_daemon.py::TestDaemon::test_rsync_night",
"py/desitransfer/test/test_daemon.py::TestDaemon::test_unlock_directory",
"py/desitransfer/test/test_daemon.py::TestDaemon::test_verify_checksum",
"py/desitransfer/test/test_daemon.py::test_suite",
"py/desitransfer/test/test_daily.py::TestDaily::test_config",
"py/desitransfer/test/test_daily.py::TestDaily::test_lock",
"py/desitransfer/test/test_daily.py::TestDaily::test_permission",
"py/desitransfer/test/test_daily.py::TestDaily::test_transfer",
"py/desitransfer/test/test_daily.py::TestDaily::test_transfer_extra",
"py/desitransfer/test/test_daily.py::test_suite"
] | [] | 2020-11-20 18:43:42+00:00 | 1,885 |
|
desihub__desiutil-125 | diff --git a/doc/changes.rst b/doc/changes.rst
index f154327..6f23c11 100644
--- a/doc/changes.rst
+++ b/doc/changes.rst
@@ -10,7 +10,11 @@ Change Log
1.9.15 (unreleased)
-------------------
-* Draw ecliptic in all-sky plots.
+* Set read-only permissions on all Module files, and unlock them as needed (PR `#125`_).
+* Draw ecliptic in all-sky plots (PR `#124`_).
+
+.. _`#125`: https://github.com/desihub/desiutil/pull/125
+.. _`#124`: https://github.com/desihub/desiutil/pull/124
1.9.14 (2018-10-05)
-------------------
diff --git a/py/desiutil/install.py b/py/desiutil/install.py
index 11f22e1..1f7077f 100644
--- a/py/desiutil/install.py
+++ b/py/desiutil/install.py
@@ -720,7 +720,6 @@ class DesiInstall(object):
outfile = os.path.join(module_directory,
self.module_keywords['name'],
self.module_keywords['version'])
- os.chmod(outfile, 0o440)
except OSError as ose:
self.log.critical(ose.strerror)
raise DesiInstallException(ose.strerror)
diff --git a/py/desiutil/io.py b/py/desiutil/io.py
index 4a9d2a5..b515e43 100644
--- a/py/desiutil/io.py
+++ b/py/desiutil/io.py
@@ -9,6 +9,8 @@ Module for I/O related code.
"""
from __future__ import (print_function, absolute_import, division,
unicode_literals)
+from contextlib import contextmanager
+
try:
basestring
@@ -218,3 +220,55 @@ def decode_table(data, encoding='ascii', native=True):
table.meta['ENCODING'] = encoding
return table
+
+
+@contextmanager
+def unlock_file(*args, **kwargs):
+ """Unlock a read-only file, return a file-like object, and restore the
+ read-only state when done. Arguments are the same as :func:`open`.
+
+ Returns
+ -------
+ file-like
+ A file-like object, as returned by :func:`open`.
+
+ Notes
+ -----
+ * This assumes that the user of this function is also the owner of the
+ file. :func:`os.chmod` would not be expected to work in any other
+ circumstance.
+ * Technically, this restores the *original* permissions of the file, it
+ does not care what the original permissions were.
+ * If the named file does not exist, this function effectively does not
+ attempt to guess what the final permissions of the file would be. In
+ other words, it just does whatever :func:`open` would do. In this case
+ it is the user's responsibilty to change permissions as needed after
+ creating the file.
+
+ Examples
+ --------
+ >>> with unlock_file('read-only.txt', 'w') as f:
+ ... f.write(new_data)
+ """
+ import os
+ import stat
+ w = stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
+ #
+ # Get original permissions, unlock permissions
+ #
+ # uid = os.getuid()
+ old_mode = None
+ if os.path.exists(args[0]):
+ old_mode = stat.S_IMODE(os.stat(args[0]).st_mode)
+ os.chmod(args[0], old_mode | stat.S_IWUSR)
+ f = open(*args, **kwargs)
+ try:
+ yield f
+ finally:
+ #
+ # Restore permissions to read-only state.
+ #
+ f.close()
+ if old_mode is None:
+ old_mode = stat.S_IMODE(os.stat(args[0]).st_mode)
+ os.chmod(args[0], old_mode & ~w)
diff --git a/py/desiutil/modules.py b/py/desiutil/modules.py
index f8be62f..97aebb4 100644
--- a/py/desiutil/modules.py
+++ b/py/desiutil/modules.py
@@ -253,8 +253,7 @@ def process_module(module_file, module_keywords, module_dir):
module_keywords['version'])
with open(module_file) as m:
mod = m.read().format(**module_keywords)
- with open(install_module_file, 'w') as m:
- m.write(mod)
+ _write_module_data(install_module_file, mod)
return mod
@@ -278,6 +277,18 @@ def default_module(module_keywords, module_dir):
install_version_file = join(module_dir, module_keywords['name'],
'.version')
dot_version = dot_template.format(**module_keywords)
- with open(install_version_file, 'w') as v:
- v.write(dot_version)
+ _write_module_data(install_version_file, dot_version)
return dot_version
+
+
+def _write_module_data(filename, data):
+ """Write and permission-lock Module file data. This is intended
+ to consolidate some duplicated code.
+ """
+ from os import chmod
+ from stat import S_IRUSR, S_IRGRP
+ from .io import unlock_file
+ with unlock_file(filename, 'w') as f:
+ f.write(data)
+ chmod(filename, S_IRUSR | S_IRGRP)
+ return
| desihub/desiutil | 07df949c0cd4e94db02a8941b369899245df5af0 | diff --git a/py/desiutil/test/test_io.py b/py/desiutil/test/test_io.py
index a8b17dd..4c75f51 100644
--- a/py/desiutil/test/test_io.py
+++ b/py/desiutil/test/test_io.py
@@ -6,16 +6,24 @@ from __future__ import (absolute_import, division,
print_function, unicode_literals)
# The line above will help with 2to3 support.
import unittest
+import os
+import stat
import sys
import numpy as np
from astropy.table import Table
-from ..io import combine_dicts, decode_table, encode_table, yamlify
+from ..io import combine_dicts, decode_table, encode_table, yamlify, unlock_file
try:
basestring
except NameError: # For Python 3
basestring = str
+skipTemp = False
+try:
+ from tempfile import TemporaryDirectory
+except ImportError:
+ skipTemp = True
+
class TestIO(unittest.TestCase):
"""Test desiutil.io
@@ -178,6 +186,33 @@ class TestIO(unittest.TestCase):
self.assertEqual(dict1, {'a': {'b': {'x': 1, 'y': 2}}})
self.assertEqual(dict2, {'a': {'b': {'p': 3, 'q': 4}}})
+ @unittest.skipIf(skipTemp, "Skipping test that requires tempfile.TemporaryDirectory.")
+ def test_unlock_file(self):
+ """Test the permission unlock file manager.
+ """
+ fff = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
+ www = stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
+ with TemporaryDirectory() as dirname:
+ filename = os.path.join(dirname, 'tempfile')
+ with open(filename, 'wb') as f:
+ f.write(b'Content\n')
+ s0 = os.stat(filename)
+ ro = stat.S_IFMT(s0.st_mode) | fff
+ os.chmod(filename, ro)
+ s1 = os.stat(filename)
+ self.assertEqual(stat.S_IMODE(s1.st_mode), fff)
+ with unlock_file(filename, 'ab') as f:
+ f.write(b'More content\n')
+ s2 = os.stat(filename)
+ self.assertEqual(stat.S_IMODE(s2.st_mode), fff | stat.S_IWUSR)
+ s3 = os.stat(filename)
+ self.assertEqual(stat.S_IMODE(s3.st_mode), fff)
+ filename = os.path.join(dirname, 'newfile')
+ with unlock_file(filename, 'wb') as f:
+ f.write(b'Some content\n')
+ s0 = os.stat(filename)
+ self.assertEqual(stat.S_IMODE(s0.st_mode) & www, 0)
+
def test_suite():
"""Allows testing of only this module with the command::
diff --git a/py/desiutil/test/test_modules.py b/py/desiutil/test/test_modules.py
index a05a62c..72c073a 100644
--- a/py/desiutil/test/test_modules.py
+++ b/py/desiutil/test/test_modules.py
@@ -12,6 +12,8 @@ from os import chmod, environ, mkdir, pathsep, remove, rmdir
from os.path import dirname, exists, isdir, join
from sys import version_info
from shutil import rmtree
+from tempfile import mkdtemp
+from pkg_resources import resource_filename
from ..modules import (init_modules, configure_module, process_module,
default_module)
@@ -23,7 +25,7 @@ class TestModules(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Data directory
- cls.data_dir = join(dirname(__file__), 't')
+ cls.data_dir = mkdtemp()
cls.bin_dir = join(cls.data_dir, 'libexec')
cls.orig_env_cache = dict()
cls.env_cache = dict()
@@ -56,7 +58,7 @@ class TestModules(unittest.TestCase):
del environ[e]
else:
environ[e] = cls.orig_env_cache[e]
- rmtree(cls.bin_dir)
+ rmtree(cls.data_dir)
def cache_env(self, envs):
"""Store existing environment variables in a cache and delete them.
@@ -227,7 +229,7 @@ class TestModules(unittest.TestCase):
def test_process_module(self):
"""Test processing of module file templates.
"""
- module_file = join(self.data_dir, 'test.module')
+ module_file = resource_filename('desiutil.test', 't/test.module')
module_keywords = {'name': 'foo', 'version': 'bar'}
process_module(module_file, module_keywords, self.data_dir)
self.assertTrue(isdir(join(self.data_dir, 'foo')))
| desiInstall can't alter .version files with new permissions settings.
So, I tagged 1.9.14, and went to install it with `desiInstall -d desiutil 1.9.14`. In the process of restricting write access, even to the desi user, I also made .version files read-only. However, `desiInstall` expects to be able to write to such files. For now, I am going to restore user-read to all .version files in 20180709-1.2.6-spec. | 0.0 | [
"py/desiutil/test/test_io.py::TestIO::test_combinedicts",
"py/desiutil/test/test_io.py::TestIO::test_endecode_table",
"py/desiutil/test/test_io.py::TestIO::test_unlock_file",
"py/desiutil/test/test_io.py::test_suite",
"py/desiutil/test/test_modules.py::TestModules::test_configure_module",
"py/desiutil/test/test_modules.py::TestModules::test_default_module",
"py/desiutil/test/test_modules.py::TestModules::test_init_modules",
"py/desiutil/test/test_modules.py::TestModules::test_process_module",
"py/desiutil/test/test_modules.py::test_suite"
] | [] | 2018-10-30 16:59:46+00:00 | 1,886 |
|
desihub__desiutil-173 | diff --git a/doc/changes.rst b/doc/changes.rst
index ef3bf1d..0fc8189 100644
--- a/doc/changes.rst
+++ b/doc/changes.rst
@@ -5,7 +5,10 @@ Change Log
3.2.2 (unreleased)
------------------
-* No changes yet.
+* Add module config support for packages like QuasarNP where the GitHub
+ name is capitalized by the python package isn't (PR `#173`_).
+
+.. _`#173`: https://github.com/desihub/desiutil/pull/173
3.2.1 (2021-05-13)
------------------
diff --git a/py/desiutil/modules.py b/py/desiutil/modules.py
index 33a2ca9..4accae3 100644
--- a/py/desiutil/modules.py
+++ b/py/desiutil/modules.py
@@ -199,7 +199,10 @@ def configure_module(product, version, product_root, working_dir=None, dev=False
module_keywords['needs_ld_lib'] = ''
if isdir(join(working_dir, 'pro')):
module_keywords['needs_idl'] = ''
- if (exists(join(working_dir, 'setup.py')) and isdir(join(working_dir, product))):
+ if (exists(join(working_dir, 'setup.py')) and
+ (isdir(join(working_dir, product)) or
+ isdir(join(working_dir, product.lower())))
+ ):
if dev:
module_keywords['needs_trunk_py'] = ''
module_keywords['trunk_py_dir'] = ''
| desihub/desiutil | 93f40ac22f0d5629bb80713e11918e1d1cd3f36b | diff --git a/py/desiutil/test/test_modules.py b/py/desiutil/test/test_modules.py
index 3b5233b..4c710f0 100644
--- a/py/desiutil/test/test_modules.py
+++ b/py/desiutil/test/test_modules.py
@@ -222,6 +222,35 @@ class TestModules(unittest.TestCase):
rmdir(join(self.data_dir, t))
for t in test_files:
remove(join(self.data_dir, t))
+ #
+ # Test mixed case product directory (Blat) vs. python package (blat)
+ #
+ test_dirs = ('blat',)
+ test_files = {'setup.py': '#!/usr/bin/env python\n'}
+ for t in test_dirs:
+ mkdir(join(self.data_dir, t))
+ for t in test_files:
+ with open(join(self.data_dir, t), 'w') as s:
+ s.write(test_files[t])
+ results['name'] = 'Blat'
+ results['version'] = '1.2.3'
+ results['needs_bin'] = '# '
+ results['needs_python'] = ''
+ results['needs_trunk_py'] = '# '
+ results['trunk_py_dir'] = '/py'
+ results['needs_ld_lib'] = '# '
+ results['needs_idl'] = '# '
+
+ conf = configure_module('Blat', '1.2.3', '/my/product/root',
+ working_dir=self.data_dir)
+
+ for key in results:
+ self.assertEqual(conf[key], results[key], key)
+ for t in test_dirs:
+ rmdir(join(self.data_dir, t))
+ for t in test_files:
+ remove(join(self.data_dir, t))
+
def test_process_module(self):
"""Test processing of module file templates.
| desiInstall creates incorrect module file for QuasarNP
QuasarNP is a semi-external package hosted in desihub. It doesn't have a boilerplate etc/quasarnp.module file, but it also doesn't require anything custom so I was expecting the desiInstall default module file to work. desiInstall does correctly identify the "py" install and pip installs QuasarNP, but the resulting module file doesn't add $PRODUCT_DIR/lib/python3.8/site-packages to $PYTHONPATH.
```
[cori06 ~] desiInstall -v -r $SCRATCH/desi/test QuasarNP 0.1.0
WARNING:install.py:183:get_options:2021-06-03T14:21:29: The environment variable LANG is not set!
DEBUG:install.py:251:get_options:2021-06-03T14:21:29: Set log level to DEBUG.
DEBUG:install.py:320:get_product_version:2021-06-03T14:21:29: Detected GitHub install.
DEBUG:install.py:351:identify_branch:2021-06-03T14:21:29: Using https://github.com/desihub/QuasarNP/archive/0.1.0.tar.gz as the URL of this product.
INFO:install.py:412:get_code:2021-06-03T14:21:29: Detected old working directory, /global/u2/s/sjbailey/QuasarNP-0.1.0. Deleting...
DEBUG:install.py:414:get_code:2021-06-03T14:21:29: shutil.rmtree('/global/u2/s/sjbailey/QuasarNP-0.1.0')
DEBUG:install.py:638:start_modules:2021-06-03T14:21:30: Initializing Modules with MODULESHOME=/opt/cray/pe/modules/3.2.11.4.
DEBUG:install.py:538:build_type:2021-06-03T14:21:30: Detected build type: py
DEBUG:install.py:700:install_module:2021-06-03T14:21:30: configure_module(QuasarNP, 0.1.0, working_dir=/global/u2/s/sjbailey/QuasarNP-0.1.0, dev=False)
DEBUG:install.py:720:install_module:2021-06-03T14:21:30: process_module('/global/common/software/desi/cori/desiconda/20200801-1.4.0-spec/code/desiutil/3.2.1/lib/python3.8/site-packages/desiutil/data/desiutil.module', self.module_keywords, '/global/cscratch1/sd/sjbailey/desi/test/modulefiles')
DEBUG:install.py:757:prepare_environment:2021-06-03T14:21:31: module('switch', 'QuasarNP/0.1.0')
DEBUG:install.py:538:build_type:2021-06-03T14:21:31: Detected build type: py
DEBUG:install.py:538:build_type:2021-06-03T14:21:31: Detected build type: py
DEBUG:install.py:802:install:2021-06-03T14:21:31: os.makedirs('/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages')
DEBUG:install.py:824:install:2021-06-03T14:21:31: /global/common/software/desi/cori/desiconda/20200801-1.4.0-spec/conda/bin/python -m pip install --no-deps --disable-pip-version-check --ignore-installed --prefix=/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0 .
DEBUG:install.py:833:install:2021-06-03T14:21:33: Processing /global/u2/s/sjbailey/QuasarNP-0.1.0
Building wheels for collected packages: quasarnp
Building wheel for quasarnp (setup.py): started
Building wheel for quasarnp (setup.py): finished with status 'done'
Created wheel for quasarnp: filename=quasarnp-0.1.0-py3-none-any.whl size=13407 sha256=17d7dbdf89520f3a0e5d751edd4b7591b45563425a190550ff31c71e48b1b855
Stored in directory: /global/u2/s/sjbailey/.cache/pip/wheels/b3/5d/19/08d052aecb141666e9fca7cef889a0c9393b18766c47f69300
Successfully built quasarnp
Installing collected packages: quasarnp
Successfully installed quasarnp-0.1.0
DEBUG:install.py:538:build_type:2021-06-03T14:21:33: Detected build type: py
DEBUG:install.py:538:build_type:2021-06-03T14:21:33: Detected build type: py
DEBUG:install.py:957:permissions:2021-06-03T14:21:33: fix_permissions.sh -v /global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0
DEBUG:install.py:962:permissions:2021-06-03T14:21:35: Fixing permissions on /global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0 ...
/usr/bin/find /global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0 -user sjbailey -not -group desi -exec chgrp -c -h desi {} ;
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0' from sjbailey to desi
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib' from sjbailey to desi
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8' from sjbailey to desi
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages' from sjbailey to desi
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages/quasarnp' from sjbailey to desi
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages/quasarnp/model.py' from sjbailey to desi
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages/quasarnp/io.py' from sjbailey to desi
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages/quasarnp/__init__.py' from sjbailey to desi
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages/quasarnp/__pycache__' from sjbailey to desi
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages/quasarnp/__pycache__/layers.cpython-38.pyc' from sjbailey to desi
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages/quasarnp/__pycache__/model.cpython-38.pyc' from sjbailey to desi
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages/quasarnp/__pycache__/__init__.cpython-38.pyc' from sjbailey to desi
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages/quasarnp/__pycache__/utils.cpython-38.pyc' from sjbailey to desi
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages/quasarnp/__pycache__/io.cpython-38.pyc' from sjbailey to desi
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages/quasarnp/layers.py' from sjbailey to desi
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages/quasarnp/utils.py' from sjbailey to desi
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages/quasarnp-0.1.0.dist-info' from sjbailey to desi
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages/quasarnp-0.1.0.dist-info/INSTALLER' from sjbailey to desi
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages/quasarnp-0.1.0.dist-info/WHEEL' from sjbailey to desi
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages/quasarnp-0.1.0.dist-info/LICENSE' from sjbailey to desi
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages/quasarnp-0.1.0.dist-info/top_level.txt' from sjbailey to desi
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages/quasarnp-0.1.0.dist-info/METADATA' from sjbailey to desi
changed group of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages/quasarnp-0.1.0.dist-info/RECORD' from sjbailey to desi
/usr/bin/find /global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0 -user sjbailey -type f -not -perm /g+r -exec chmod -c g+r {} ;
/usr/bin/find /global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0 -user sjbailey -type d -not -perm -g+rxs -exec chmod -c g+rxs {} ;
mode of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0' changed from 0755 (rwxr-xr-x) to 2755 (rwxr-sr-x)
mode of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib' changed from 0755 (rwxr-xr-x) to 2755 (rwxr-sr-x)
mode of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8' changed from 0755 (rwxr-xr-x) to 2755 (rwxr-sr-x)
mode of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages' changed from 0755 (rwxr-xr-x) to 2755 (rwxr-sr-x)
mode of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages/quasarnp' changed from 0755 (rwxr-xr-x) to 2755 (rwxr-sr-x)
mode of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages/quasarnp/__pycache__' changed from 0755 (rwxr-xr-x) to 2755 (rwxr-sr-x)
mode of '/global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0/lib/python3.8/site-packages/quasarnp-0.1.0.dist-info' changed from 0755 (rwxr-xr-x) to 2755 (rwxr-sr-x)
DEBUG:install.py:970:permissions:2021-06-03T14:21:35: chmod -R a-w /global/cscratch1/sd/sjbailey/desi/test/code/QuasarNP/0.1.0
DEBUG:install.py:975:permissions:2021-06-03T14:21:35:
DEBUG:install.py:987:cleanup:2021-06-03T14:21:35: os.chdir('/global/u2/s/sjbailey')
DEBUG:install.py:991:cleanup:2021-06-03T14:21:35: shutil.rmtree('/global/u2/s/sjbailey/QuasarNP-0.1.0')
DEBUG:install.py:1024:run:2021-06-03T14:21:35: run() complete.
[cori06 ~] tail -15 $SCRATCH/desi/test/modulefiles/QuasarNP/0.1.0
#
setenv [string toupper $product] $PRODUCT_DIR
#
# The lines below set various other environment variables. They assume the
# template product layout. These will be set or commented as needed by the
# desiInstall script.
#
# prepend-path PATH $PRODUCT_DIR/bin
# prepend-path PYTHONPATH $PRODUCT_DIR/lib/python3.8/site-packages
# prepend-path PYTHONPATH $PRODUCT_DIR/py
# prepend-path LD_LIBRARY_PATH $PRODUCT_DIR/lib
# prepend-path IDL_PATH +$PRODUCT_DIR/pro
#
# Add any non-standard Module code below this point.
#
``` | 0.0 | [
"py/desiutil/test/test_modules.py::TestModules::test_configure_module"
] | [
"py/desiutil/test/test_modules.py::TestModules::test_default_module",
"py/desiutil/test/test_modules.py::TestModules::test_init_modules",
"py/desiutil/test/test_modules.py::TestModules::test_process_module",
"py/desiutil/test/test_modules.py::test_suite"
] | 2021-06-03 22:34:52+00:00 | 1,887 |
|
desihub__desiutil-188 | diff --git a/doc/changes.rst b/doc/changes.rst
index 8697970..93581d9 100644
--- a/doc/changes.rst
+++ b/doc/changes.rst
@@ -8,9 +8,14 @@ Change Log
* :command:`desiInstall` uses desihub location of simqso fork (commit e963344_).
* Allow :command:`desiInstall` to remove permission-locked directories;
suppress certain :command:`pip` warnings (PR `#185`_).
+* Allow :command:`desiInstall` to compile code in certain branch installs (PR `#188`_).
+* Add `gpu_specter`_ to known packages (PR `#189`_).
.. _e963344: https://github.com/desihub/desiutil/commit/e963344cd072255174187d2bd6da72d085745abd
.. _`#185`: https://github.com/desihub/desiutil/pull/185
+.. _`#188`: https://github.com/desihub/desiutil/pull/188
+.. _`#189`: https://github.com/desihub/desiutil/pull/189
+.. _`gpu_specter`: https://github.com/desihub/gpu_specter
3.2.5 (2022-01-20)
------------------
diff --git a/doc/desiInstall.rst b/doc/desiInstall.rst
index e433192..8c926f0 100644
--- a/doc/desiInstall.rst
+++ b/doc/desiInstall.rst
@@ -322,6 +322,23 @@ not bundled with the code. The script should download data *directly* to
with :command:`desiInstall` and unit tests. Note that here are other, better ways to
install and manipulate data that is bundled *with* a Python package.
+Compile in Branch Installs
+--------------------------
+
+In a few cases (fiberassign_, specex_) code needs to be compiled even when
+installing a branch. If :command:`desiInstall` detects a branch install *and*
+the script ``etc/product_compile.sh`` exists, :command:`desiInstall` will run this
+script, supplying the Python executable path as a single command-line argument.
+The script itself is intended to be a thin wrapper on *e.g.*::
+
+ #!/bin/bash
+ py=$1
+ ${py} setup.py build_ext --inplace
+
+
+.. _fiberassign: https://github.com/desihub/fiberassign
+.. _specex: https://github.com/desihub/specex
+
Fix Permissions
---------------
diff --git a/py/desiutil/install.py b/py/desiutil/install.py
index 28c92ad..7dfc4cf 100644
--- a/py/desiutil/install.py
+++ b/py/desiutil/install.py
@@ -910,6 +910,33 @@ class DesiInstall(object):
raise DesiInstallException(message)
return
+ def compile_branch(self):
+ """Certain packages need C/C++ code compiled even for a branch install.
+ """
+ if self.is_branch:
+ compile_script = os.path.join(self.install_dir, 'etc',
+ '{0}_compile.sh'.format(self.baseproduct))
+ if os.path.exists(compile_script):
+ self.log.debug("Detected compile script: %s.", compile_script)
+ if self.options.test:
+ self.log.debug('Test Mode. Skipping compile script.')
+ else:
+ current_dir = os.getcwd()
+ self.log.debug("os.chdir('%s')", self.install_dir)
+ os.chdir(self.install_dir)
+ proc = Popen([compile_script, sys.executable], universal_newlines=True,
+ stdout=PIPE, stderr=PIPE)
+ out, err = proc.communicate()
+ status = proc.returncode
+ self.log.debug(out)
+ self.log.debug("os.chdir('%s')", current_dir)
+ os.chdir(current_dir)
+ if status != 0 and len(err) > 0:
+ message = "Error compiling code: {0}".format(err)
+ self.log.critical(message)
+ raise DesiInstallException(message)
+ return
+
def verify_bootstrap(self):
"""Make sure that desiutil/desiInstall was installed with
an explicit Python executable path.
@@ -1027,6 +1054,7 @@ class DesiInstall(object):
self.prepare_environment()
self.install()
self.get_extra()
+ self.compile_branch()
self.verify_bootstrap()
self.permissions()
except DesiInstallException:
| desihub/desiutil | 4f910f407c4b7fa9636aa64fc65acd72c49a3cf7 | diff --git a/py/desiutil/test/test_install.py b/py/desiutil/test/test_install.py
index 73c18f7..a12433f 100644
--- a/py/desiutil/test/test_install.py
+++ b/py/desiutil/test/test_install.py
@@ -2,6 +2,7 @@
# -*- coding: utf-8 -*-
"""Test desiutil.install.
"""
+import sys
import unittest
from unittest.mock import patch, call, MagicMock, mock_open
from os import chdir, environ, getcwd, mkdir, remove, rmdir
@@ -507,6 +508,42 @@ class TestInstall(unittest.TestCase):
self.assertLog(-1, message)
self.assertEqual(str(cm.exception), message)
+ @patch('os.chdir')
+ @patch('os.path.exists')
+ @patch('desiutil.install.Popen')
+ def test_compile_branch(self, mock_popen, mock_exists, mock_chdir):
+ """Test compiling code in certain cases.
+ """
+ current_dir = getcwd()
+ options = self.desiInstall.get_options(['fiberassign', 'branches/main'])
+ self.desiInstall.baseproduct = 'fiberassign'
+ self.desiInstall.is_branch = True
+ self.desiInstall.install_dir = join(self.data_dir, 'fiberassign')
+ mock_exists.return_value = True
+ mock_proc = mock_popen()
+ mock_proc.returncode = 0
+ mock_proc.communicate.return_value = ('out', 'err')
+ self.desiInstall.compile_branch()
+ mock_chdir.assert_has_calls([call(self.desiInstall.install_dir),
+ call(current_dir)])
+ mock_exists.assert_has_calls([call(join(self.desiInstall.install_dir, 'etc', 'fiberassign_compile.sh'))])
+ mock_popen.assert_has_calls([call([join(self.desiInstall.install_dir, 'etc', 'fiberassign_compile.sh'), sys.executable],
+ stderr=-1, stdout=-1, universal_newlines=True)], any_order=True)
+ mock_popen.reset_mock()
+ self.desiInstall.options.test = True
+ self.desiInstall.compile_branch()
+ self.assertLog(-1, 'Test Mode. Skipping compile script.')
+ mock_popen.reset_mock()
+ self.desiInstall.options.test = False
+ mock_proc = mock_popen()
+ mock_proc.returncode = 1
+ mock_proc.communicate.return_value = ('out', 'err')
+ with self.assertRaises(DesiInstallException) as cm:
+ self.desiInstall.compile_branch()
+ message = "Error compiling code: err"
+ self.assertLog(-1, message)
+ self.assertEqual(str(cm.exception), message)
+
def test_verify_bootstrap(self):
"""Test proper installation of the desiInstall executable.
"""
| desiInstall of specex/main and fiberassign/main, which need compilation too
desiInstall supports "in-place" installations of python repos that adds `$PRODUCT_DIR/py` to `$PYTHONPATH` and `$PRODUCT_DIR/bin` to `$PATH` so that any changes to the repo are automatically available without having to do a separate installation step. Good.
specex and fiberassign, however, are hybrid repos that have python code with compiled extensions. An in-place install is handy when making changes to any of the python code, but if any of the C++ code changes it still has to be compiled using:
```
python setup.py build_ext --inplace
```
desiInstall doesn't know this, and this pattern doesn't fit any of the build types listed at https://desiutil.readthedocs.io/en/latest/desiInstall.html#determine-build-type .
What's the best way to get desiInstall to know that it needs to run this extra step for these two repos?
A somewhat hacky solution that may not require changing desiInstall is to leverage its special case of looking for an `etc/{productname}_data.sh` script and executing that, e.g. as used by desimodel to get the data from svn. specex and fiberassign could add their own `etc/*_data.sh` scripts to run `python setup.py build_ext --inplace`, but that is somewhat cryptically using a data-download hook for other purposes.
It might be better to define another hook similar to `etc/*_data.sh`, and if desiInstall detects that it will run it for in-place branch installations, but not for regular installations. That requires an update to both desiInstall and the specex+fiberassign repos, but it might be more obvious and maintainable in the future.
For context, both specex and fiberassign used to have a Makefile that desiInstall knew to run, but both have migrated to a python-first approach with compiled extensions without a Makefile. Current master (now main) installations have bootstrapped the `python setup.py build_ext --inplace` upon first installation, after which the desitest nightly update cronjob re-runs that every night after `git pull`. The point of this ticket is so that the end-user doesn't have to remember to do special steps whenever they make a fresh main installation.
@weaverba137 thoughts? | 0.0 | [
"py/desiutil/test/test_install.py::TestInstall::test_compile_branch"
] | [
"py/desiutil/test/test_install.py::TestInstall::test_anaconda_version",
"py/desiutil/test/test_install.py::TestInstall::test_build_type",
"py/desiutil/test/test_install.py::TestInstall::test_cleanup",
"py/desiutil/test/test_install.py::TestInstall::test_default_nersc_dir",
"py/desiutil/test/test_install.py::TestInstall::test_dependencies",
"py/desiutil/test/test_install.py::TestInstall::test_get_extra",
"py/desiutil/test/test_install.py::TestInstall::test_get_product_version",
"py/desiutil/test/test_install.py::TestInstall::test_identify_branch",
"py/desiutil/test/test_install.py::TestInstall::test_install",
"py/desiutil/test/test_install.py::TestInstall::test_install_module",
"py/desiutil/test/test_install.py::TestInstall::test_module_dependencies",
"py/desiutil/test/test_install.py::TestInstall::test_nersc_module_dir",
"py/desiutil/test/test_install.py::TestInstall::test_permissions",
"py/desiutil/test/test_install.py::TestInstall::test_prepare_environment",
"py/desiutil/test/test_install.py::TestInstall::test_sanity_check",
"py/desiutil/test/test_install.py::TestInstall::test_set_install_dir",
"py/desiutil/test/test_install.py::TestInstall::test_unlock_permissions",
"py/desiutil/test/test_install.py::TestInstall::test_verify_bootstrap",
"py/desiutil/test/test_install.py::test_suite"
] | 2022-08-18 00:08:44+00:00 | 1,888 |
|
destag__at-date-17 | diff --git a/atdate/api.py b/atdate/api.py
index b42b2ac..7f1af8e 100644
--- a/atdate/api.py
+++ b/atdate/api.py
@@ -15,7 +15,9 @@ class AtDateParser:
tree = self.parser.parse(string_to_parse.lower())
new_tree = transformer.transform(tree)
- next_time_run = new_tree if isinstance(new_tree, datetime) else new_tree.children[-1]
+ next_time_run = new_tree
+ while not isinstance(next_time_run, datetime):
+ next_time_run = next_time_run.children[-1]
if next_time_run < transformer.now:
raise ValueError
@@ -65,7 +67,7 @@ class AtDateTransformer(Transformer):
self.datetime_params['second'] = 0
return datetime(**self.datetime_params)
- def _hr24clock_hour_minute(self, matches):
+ def _iso_time(self, matches):
hour = int(matches[0])
minute = int(matches[1])
next_day = self._check_if_next_day(hour, minute)
@@ -152,6 +154,13 @@ class AtDateTransformer(Transformer):
self.datetime_params['year'] = year
return datetime(**self.datetime_params)
+ def _iso_date(self, matches):
+ year, month, day = map(int, matches)
+ self.datetime_params['day'] = day
+ self.datetime_params['month'] = month
+ self.datetime_params['year'] = year
+ return datetime(**self.datetime_params)
+
def _next(self, matches):
inc_period = matches[0] if matches[0].endswith('s') else matches[0] + 's'
dt = datetime(**self.datetime_params)
diff --git a/atdate/atdate_format.py b/atdate/atdate_format.py
index 42e910b..f163137 100644
--- a/atdate/atdate_format.py
+++ b/atdate/atdate_format.py
@@ -3,25 +3,30 @@ format_string = r'''
| time date
| time increment
| time date increment
+ | date time
+ | isodate "t" isotime
| date
| date increment
| nowspec
| nowspec increment
| increment
-time: HR24CLOCK_HR_MIN -> _hr24clock_hr_min
- | HR24CLOCK_HOUR ":" MINUTE -> _hr24clock_hour_minute
- | WALLCLOCK_HR_MIN AM_PM -> _wallclock_hr_min_am_pm
- | WALLCLOCK_HOUR ":" MINUTE AM_PM -> _wallclock_hour_minute_am_pm
- | "noon" -> _noon
- | "midnight" -> _midnight
-date: MONTH_NAME DAY_NUMBER -> _month_name_day_number
- | MONTH_NUMBER "/" DAY_NUMBER -> _month_number_day_number
- | MONTH_NUMBER "/" DAY_NUMBER "/" YEAR_NUMBER -> _month_number_day_number_year_number
- | DAY_NUMBER "." MONTH_NUMBER -> _day_number_month_number
- | DAY_NUMBER "." MONTH_NUMBER "." YEAR_NUMBER -> _day_number_month_number_year_number
-increment: "next" INC_PERIOD -> _next
- | "+" INT INC_PERIOD -> _inc_number
-nowspec: "now" -> _now
+time: HR24CLOCK_HR_MIN -> _hr24clock_hr_min
+ | WALLCLOCK_HR_MIN AM_PM -> _wallclock_hr_min_am_pm
+ | WALLCLOCK_HOUR ":" MINUTE AM_PM -> _wallclock_hour_minute_am_pm
+ | "noon" -> _noon
+ | "midnight" -> _midnight
+ | isotime
+date: MONTH_NAME DAY_NUMBER -> _month_name_day_number
+ | MONTH_NUMBER "/" DAY_NUMBER -> _month_number_day_number
+ | MONTH_NUMBER "/" DAY_NUMBER "/" YEAR_NUMBER -> _month_number_day_number_year_number
+ | DAY_NUMBER "." MONTH_NUMBER -> _day_number_month_number
+ | DAY_NUMBER "." MONTH_NUMBER "." YEAR_NUMBER -> _day_number_month_number_year_number
+ | isodate
+isodate: YEAR_NUMBER "-" MONTH_NUMBER "-" DAY_NUMBER -> _iso_date
+isotime: HR24CLOCK_HOUR ":" MINUTE -> _iso_time
+increment: "next" INC_PERIOD -> _next
+ | "+" INT INC_PERIOD -> _inc_number
+nowspec: "now" -> _now
INC_PERIOD: "minutes" | "minute"
| "hours" | "hour"
| "days" | "day"
diff --git a/docs/README.md b/docs/README.md
index b33fdbd..ab82ea5 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -98,15 +98,18 @@ tokens|example
[time] [increment]|17:32 next day
[time] [date] [increment]|17:32 11/22/2033 next day
[date]|11/22/2033
+[date] [time]|11/22/2033 17:32
[date] [increment]|11/22/2033 next month
[now]|now
[now] [increment]|now next day
[increment]|next month
+[isodatetime]|2033-11-22T17:32
[time]: #time
[date]: #date
[increment]: #increment
[now]: #now
+[isodatetime]: #isodatetime
### At date tokens
@@ -135,6 +138,7 @@ format|example
\[1-12\] / \[1-31\] / \[0-9999\]|10/27/2006
\[1-12\] . \[1-31\]|10.27
\[1-12\] . \[1-31\] . \[0-9999\]|10.27.2006
+\[0-9999\] - \[1-12\] - \[1-31\]|2006-10-27
#### increment
@@ -145,6 +149,14 @@ format|example
next \[[period](#period)\]|next month
\+ \[0-9999\] \[[period](#period)\]|\+ 12 minutes
+#### isodatetime
+
+Format for ISO 8601 date time.
+
+format|example
+---|---
+\[0-9999\] - \[1-12\] - \[1-31\] T \[0-23\] : \[0-59\]|2033-11-22T17:32
+
#### now
Format for this token is literally `now`.
| destag/at-date | 3957d0b750ef1205adb0ba2fc3dccf34c3f0ce62 | diff --git a/test/test_atdate.py b/test/test_atdate.py
index fdbf0ce..8c23f7c 100644
--- a/test/test_atdate.py
+++ b/test/test_atdate.py
@@ -220,3 +220,31 @@ def test_plus_one_day_without_now():
test_string = '+1days'
result = atdate.parse(test_string)
assert result == datetime(2000, 7, 3, 3, 4, 5, 0)
+
+
+@freeze_time('2000-07-02 03:04:05')
+def test_isodate():
+ test_string = '2011-09-22'
+ result = atdate.parse(test_string)
+ assert result == datetime(2011, 9, 22, 3, 4, 5, 0)
+
+
+@freeze_time('2000-07-02 03:04:05')
+def test_time_date():
+ test_string = "12:24 01.02.2011"
+ result = atdate.parse(test_string)
+ assert result == datetime(2011, 2, 1, 12, 24, 0, 0)
+
+
+@freeze_time('2000-07-02 03:04:05')
+def test_isodatetime():
+ test_string = '2011-09-22T11:44'
+ result = atdate.parse(test_string)
+ assert result == datetime(2011, 9, 22, 11, 44, 0, 0)
+
+
+@freeze_time('2000-07-02 03:04:05')
+def test_isodatetime_without_t():
+ test_string = '2011-09-22 11:44'
+ result = atdate.parse(test_string)
+ assert result == datetime(2011, 9, 22, 11, 44, 0, 0)
| ISO 8601 compatibility
I think that it would be nice to support dates and times in ISO 8601 format. | 0.0 | [
"test/test_atdate.py::test_isodate",
"test/test_atdate.py::test_isodatetime_without_t",
"test/test_atdate.py::test_isodatetime"
] | [
"test/test_atdate.py::test_at_midnight_month_change",
"test/test_atdate.py::test_day_number_month_number_year_number",
"test/test_atdate.py::test_at_now_next_week",
"test/test_atdate.py::test_at_now_next_day",
"test/test_atdate.py::test_at_midnight",
"test/test_atdate.py::test_at_now_next_minutes",
"test/test_atdate.py::test_at_noon_after_noon",
"test/test_atdate.py::test_at_now_next_minute_change_minute",
"test/test_atdate.py::test_next_month_without_now",
"test/test_atdate.py::test_month_number_day_number",
"test/test_atdate.py::test_at_now_next_minute_change_day",
"test/test_atdate.py::test_wallclock_hour_minute_am_pm",
"test/test_atdate.py::test_at_now_next_year",
"test/test_atdate.py::test_at_now_next_hour",
"test/test_atdate.py::test_at_noon_before_noon",
"test/test_atdate.py::test_at_now_next_month",
"test/test_atdate.py::test_hr24clock_hr_min",
"test/test_atdate.py::test_at_date_has_atdateparser_attribute",
"test/test_atdate.py::test_day_number_month_number",
"test/test_atdate.py::test_month_name_day_number",
"test/test_atdate.py::test_wallclock_hr_min_am_pm",
"test/test_atdate.py::test_at_now_next_minute_change_hour",
"test/test_atdate.py::test_plus_one_day_without_now",
"test/test_atdate.py::test_at_noon_month_change",
"test/test_atdate.py::test_at_noon_year_change",
"test/test_atdate.py::test_parse_return_datetime_object",
"test/test_atdate.py::test_at_now",
"test/test_atdate.py::test_inc_period",
"test/test_atdate.py::test_hr24clock_hour_minute",
"test/test_atdate.py::test_month_number_day_number_year_number",
"test/test_atdate.py::test_at_date_has_parse_attribute",
"test/test_atdate.py::test_time_date",
"test/test_atdate.py::test_at_midnight_year_change"
] | 2018-10-17 19:42:01+00:00 | 1,889 |
|
dev-cafe__parselglossy-52 | diff --git a/parselglossy/documentation.py b/parselglossy/documentation.py
index f101cb7..0b8ed79 100644
--- a/parselglossy/documentation.py
+++ b/parselglossy/documentation.py
@@ -51,14 +51,19 @@ def documentation_generator(
"""
comment = (
+ ".. raw:: html\n\n"
+ " <style> .red {color:#aa0060; font-weight:bold; font-size:18px} </style>\n\n" # noqa: E501
+ ".. role:: red\n\n"
".. This documentation was autogenerated using parselglossy."
" Editing by hand is not recommended.\n"
)
header_fmt = (
"{comment:s}\n{markup:s}\n{header:s}\n{markup:s}\n\n"
- "Keywords without a default value are **required**.\n"
- "Sections where all keywords have a default value can be omitted.\n"
+ "- Keywords without a default value are **required**.\n"
+ "- Default values are either explicit or computed from the value of other keywords in the input.\n" # noqa: E501
+ "- Sections where all keywords have a default value can be omitted.\n"
+ "- Predicates, if present, are the functions run to validate user input.\n"
)
docs = rec_documentation_generator(template=template)
@@ -78,14 +83,26 @@ def document_keyword(keyword: JSONDict) -> str:
**Type** ``{2:s}``
"""
- doc = kw_fmt.format(keyword["name"], keyword["docstring"], keyword["type"])
+ doc = kw_fmt.format(
+ keyword["name"], keyword["docstring"].replace("\n", " "), keyword["type"]
+ )
if "default" in keyword.keys():
doc += """
- **Default** {}""".format(
+ **Default** ``{}``
+""".format(
keyword["default"]
)
+ if "predicates" in keyword.keys():
+ preds = "\n ".join(("- ``{}``".format(x) for x in keyword["predicates"]))
+ doc += """
+ **Predicates**
+ {}
+""".format(
+ preds
+ )
+
return doc
@@ -106,20 +123,20 @@ def rec_documentation_generator(template, *, level: int = 0) -> str:
keywords = template["keywords"] if "keywords" in template.keys() else []
if keywords:
- doc = "\n**Keywords**"
+ docs.append(indent("\n:red:`Keywords`", level))
+
for k in keywords:
- doc += document_keyword(k)
+ doc = document_keyword(k)
docs.extend(indent(doc, level))
sections = template["sections"] if "sections" in template.keys() else []
if sections:
- doc = "\n" if level == 0 else "\n\n"
- doc += "**Sections**"
+ docs.append(indent("\n:red:`Sections`", level))
fmt = r"""
:{0:s}: {1:s}
"""
for s in sections:
- doc += fmt.format(s["name"], s["docstring"])
+ doc = fmt.format(s["name"], s["docstring"].replace("\n", " "))
doc += rec_documentation_generator(s, level=level + 1)
docs.extend(indent(doc, level))
| dev-cafe/parselglossy | b28084a6ca692dd1cecc2e07a229f20d1630e162 | diff --git a/tests/api/docs_template.yml b/tests/api/docs_template.yml
index 9966f26..2425855 100644
--- a/tests/api/docs_template.yml
+++ b/tests/api/docs_template.yml
@@ -1,20 +1,60 @@
keywords:
-- docstring: Title of the calculation.
+- docstring: |
+ Title of the calculation. I had to write an extremely long documentation
+ string for this one, as it is extremely important to let you know that this
+ keyword will define the title of the calculation and, not having a default,
+ you will be required to set it!
name: title
type: str
+- docstring: Some integer
+ name: an_integer
+ type: int
+ default: 15
+- docstring: A list of floats
+ name: float_list
+ type: List[float]
+ default: [1.0, 2.0, 3.0]
+ predicates:
+ - "len(value) < 10"
+ - "max(value) < user['foo']['fooffa]['another_float']"
sections:
- docstring: brilliant
+ name: foo
keywords:
- default: Wham! Bang! Pow! Let's Rock Out!
docstring: Title of the calculation.
name: tragic
type: str
- name: foo
+ - name: a_float
+ type: float
+ docstring: A floating point number
+ predicates:
+ - "value < 35.0"
+ - "value in user['float_list']"
sections:
- docstring: A ba-bar section
+ name: Bar
keywords:
- default: Bobson Dugnutt
docstring: Title of the calculation.
name: amazing
type: str
- name: Bar
+ - name: coolio
+ type: bool
+ docstring: A cool bool
+ default: True
+- docstring: |
+ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec luctus elit
+ ut posuere dictum. Proin ipsum libero, maximus vitae placerat a, bibendum
+ viverra ante.
+ name: fooffa
+ keywords:
+ - name: dwigt
+ docstring: An unusual name
+ type: str
+ predicates:
+ - "len(value) < 80"
+ - name: another_float
+ type: float
+ docstring: Another floating point number
+ default: "user['foo']['a_float'] * 2"
diff --git a/tests/cli/docs_template.yml b/tests/cli/docs_template.yml
index 9966f26..2425855 100644
--- a/tests/cli/docs_template.yml
+++ b/tests/cli/docs_template.yml
@@ -1,20 +1,60 @@
keywords:
-- docstring: Title of the calculation.
+- docstring: |
+ Title of the calculation. I had to write an extremely long documentation
+ string for this one, as it is extremely important to let you know that this
+ keyword will define the title of the calculation and, not having a default,
+ you will be required to set it!
name: title
type: str
+- docstring: Some integer
+ name: an_integer
+ type: int
+ default: 15
+- docstring: A list of floats
+ name: float_list
+ type: List[float]
+ default: [1.0, 2.0, 3.0]
+ predicates:
+ - "len(value) < 10"
+ - "max(value) < user['foo']['fooffa]['another_float']"
sections:
- docstring: brilliant
+ name: foo
keywords:
- default: Wham! Bang! Pow! Let's Rock Out!
docstring: Title of the calculation.
name: tragic
type: str
- name: foo
+ - name: a_float
+ type: float
+ docstring: A floating point number
+ predicates:
+ - "value < 35.0"
+ - "value in user['float_list']"
sections:
- docstring: A ba-bar section
+ name: Bar
keywords:
- default: Bobson Dugnutt
docstring: Title of the calculation.
name: amazing
type: str
- name: Bar
+ - name: coolio
+ type: bool
+ docstring: A cool bool
+ default: True
+- docstring: |
+ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec luctus elit
+ ut posuere dictum. Proin ipsum libero, maximus vitae placerat a, bibendum
+ viverra ante.
+ name: fooffa
+ keywords:
+ - name: dwigt
+ docstring: An unusual name
+ type: str
+ predicates:
+ - "len(value) < 80"
+ - name: another_float
+ type: float
+ docstring: Another floating point number
+ default: "user['foo']['a_float'] * 2"
diff --git a/tests/ref/dwigt.rst b/tests/ref/dwigt.rst
index 6d92abd..86670c0 100644
--- a/tests/ref/dwigt.rst
+++ b/tests/ref/dwigt.rst
@@ -1,33 +1,88 @@
+.. raw:: html
+
+ <style> .red {color:#aa0060; font-weight:bold; font-size:18px} </style>
+
+.. role:: red
+
.. This documentation was autogenerated using parselglossy. Editing by hand is not recommended.
==========================================
Dwigt Rortugal's guide to input parameters
==========================================
-Keywords without a default value are **required**.
-Sections where all keywords have a default value can be omitted.
+- Keywords without a default value are **required**.
+- Default values are either explicit or computed from the value of other keywords in the input.
+- Sections where all keywords have a default value can be omitted.
+- Predicates, if present, are the functions run to validate user input.
-**Keywords**
- :title: Title of the calculation.
+:red:`Keywords`
+ :title: Title of the calculation. I had to write an extremely long documentation string for this one, as it is extremely important to let you know that this keyword will define the title of the calculation and, not having a default, you will be required to set it!
**Type** ``str``
-**Sections**
+ :an_integer: Some integer
+
+ **Type** ``int``
+
+ **Default** ``15``
+
+ :float_list: A list of floats
+
+ **Type** ``List[float]``
+
+ **Default** ``[1.0, 2.0, 3.0]``
+
+ **Predicates**
+ - ``len(value) < 10``
+ - ``max(value) < user['foo']['fooffa]['another_float']``
+
+:red:`Sections`
:foo: brilliant
- **Keywords**
+ :red:`Keywords`
:tragic: Title of the calculation.
**Type** ``str``
- **Default** Wham! Bang! Pow! Let's Rock Out!
+ **Default** ``Wham! Bang! Pow! Let's Rock Out!``
+
+ :a_float: A floating point number
+
+ **Type** ``float``
- **Sections**
+ **Predicates**
+ - ``value < 35.0``
+ - ``value in user['float_list']``
+
+ :red:`Sections`
:Bar: A ba-bar section
- **Keywords**
+ :red:`Keywords`
:amazing: Title of the calculation.
**Type** ``str``
- **Default** Bobson Dugnutt
\ No newline at end of file
+ **Default** ``Bobson Dugnutt``
+
+ :coolio: A cool bool
+
+ **Type** ``bool``
+
+ **Default** ``True``
+
+ :fooffa: Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec luctus elit ut posuere dictum. Proin ipsum libero, maximus vitae placerat a, bibendum viverra ante.
+
+ :red:`Keywords`
+ :dwigt: An unusual name
+
+ **Type** ``str``
+
+ **Predicates**
+ - ``len(value) < 80``
+
+ :another_float: Another floating point number
+
+ **Type** ``float``
+
+ **Default** ``user['foo']['a_float'] * 2``
+
\ No newline at end of file
diff --git a/tests/ref/input.rst b/tests/ref/input.rst
index 6217605..a5f9cea 100644
--- a/tests/ref/input.rst
+++ b/tests/ref/input.rst
@@ -1,33 +1,88 @@
+.. raw:: html
+
+ <style> .red {color:#aa0060; font-weight:bold; font-size:18px} </style>
+
+.. role:: red
+
.. This documentation was autogenerated using parselglossy. Editing by hand is not recommended.
================
Input parameters
================
-Keywords without a default value are **required**.
-Sections where all keywords have a default value can be omitted.
+- Keywords without a default value are **required**.
+- Default values are either explicit or computed from the value of other keywords in the input.
+- Sections where all keywords have a default value can be omitted.
+- Predicates, if present, are the functions run to validate user input.
-**Keywords**
- :title: Title of the calculation.
+:red:`Keywords`
+ :title: Title of the calculation. I had to write an extremely long documentation string for this one, as it is extremely important to let you know that this keyword will define the title of the calculation and, not having a default, you will be required to set it!
**Type** ``str``
-**Sections**
+ :an_integer: Some integer
+
+ **Type** ``int``
+
+ **Default** ``15``
+
+ :float_list: A list of floats
+
+ **Type** ``List[float]``
+
+ **Default** ``[1.0, 2.0, 3.0]``
+
+ **Predicates**
+ - ``len(value) < 10``
+ - ``max(value) < user['foo']['fooffa]['another_float']``
+
+:red:`Sections`
:foo: brilliant
- **Keywords**
+ :red:`Keywords`
:tragic: Title of the calculation.
**Type** ``str``
- **Default** Wham! Bang! Pow! Let's Rock Out!
+ **Default** ``Wham! Bang! Pow! Let's Rock Out!``
+
+ :a_float: A floating point number
+
+ **Type** ``float``
- **Sections**
+ **Predicates**
+ - ``value < 35.0``
+ - ``value in user['float_list']``
+
+ :red:`Sections`
:Bar: A ba-bar section
- **Keywords**
+ :red:`Keywords`
:amazing: Title of the calculation.
**Type** ``str``
- **Default** Bobson Dugnutt
\ No newline at end of file
+ **Default** ``Bobson Dugnutt``
+
+ :coolio: A cool bool
+
+ **Type** ``bool``
+
+ **Default** ``True``
+
+ :fooffa: Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec luctus elit ut posuere dictum. Proin ipsum libero, maximus vitae placerat a, bibendum viverra ante.
+
+ :red:`Keywords`
+ :dwigt: An unusual name
+
+ **Type** ``str``
+
+ **Predicates**
+ - ``len(value) < 80``
+
+ :another_float: Another floating point number
+
+ **Type** ``float``
+
+ **Default** ``user['foo']['a_float'] * 2``
+
\ No newline at end of file
diff --git a/tests/ref/simple.rst b/tests/ref/simple.rst
new file mode 100644
index 0000000..fab7a8c
--- /dev/null
+++ b/tests/ref/simple.rst
@@ -0,0 +1,42 @@
+.. raw:: html
+
+ <style> .red {color:#aa0060; font-weight:bold; font-size:18px} </style>
+
+.. role:: red
+
+.. This documentation was autogenerated using parselglossy. Editing by hand is not recommended.
+
+================
+Input parameters
+================
+
+- Keywords without a default value are **required**.
+- Default values are either explicit or computed from the value of other keywords in the input.
+- Sections where all keywords have a default value can be omitted.
+- Predicates, if present, are the functions run to validate user input.
+
+:red:`Keywords`
+ :title: Title of the calculation.
+
+ **Type** ``str``
+
+:red:`Sections`
+ :foo: brilliant
+
+ :red:`Keywords`
+ :tragic: Title of the calculation.
+
+ **Type** ``str``
+
+ **Default** ``Wham! Bang! Pow! Let's Rock Out!``
+
+ :red:`Sections`
+ :Bar: A ba-bar section
+
+ :red:`Keywords`
+ :amazing: Title of the calculation.
+
+ **Type** ``str``
+
+ **Default** ``Bobson Dugnutt``
+
\ No newline at end of file
diff --git a/tests/test_documentation.py b/tests/test_documentation.py
index f8d3aca..fcbb12e 100644
--- a/tests/test_documentation.py
+++ b/tests/test_documentation.py
@@ -71,7 +71,7 @@ def template():
def test_documentation(template):
- doc_ref = Path(__file__).parent / Path("ref/input.rst")
+ doc_ref = Path(__file__).parent / Path("ref/simple.rst")
with doc_ref.open("r") as ref:
stuff = ref.read().rstrip("\n")
| Repeated keywords in the autogenerated documentation
* parselglossy version: b28084a
* Python version: 3.7.0
* Operating System: Ubuntu-18.10
### What I Did
The following `template.yml`
```
keywords:
- name: foo
type: int
docstring: foo
- name: bar
type: int
docstring: bar
- name: baz
type: int
docstring: baz
```
is documented using the CLI command `parselglossy doc template.yml` to give:
```
**Keywords**
:foo: foo
**Type** ``int``
**Keywords**
:foo: foo
**Type** ``int``
:bar: bar
**Type** ``int``
**Keywords**
:foo: foo
**Type** ``int``
:bar: bar
**Type** ``int``
:baz: baz
**Type** ``int``
```
| 0.0 | [
"tests/test_documentation.py::test_documentation"
] | [] | 2019-03-27 16:50:22+00:00 | 1,890 |
|
developmentseed__cogeo-mosaic-228 | diff --git a/CHANGES.md b/CHANGES.md
index 12763d0..1ec014f 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,6 @@
+## 7.1.0
+
+* Automatically remove/add `asset_prefix` in Mosaic Backends
## 7.0.1 (2023-10-17)
diff --git a/cogeo_mosaic/backends/base.py b/cogeo_mosaic/backends/base.py
index 4fe4f64..7cb8a91 100644
--- a/cogeo_mosaic/backends/base.py
+++ b/cogeo_mosaic/backends/base.py
@@ -236,13 +236,17 @@ class BaseBackend(BaseReader):
def get_assets(self, x: int, y: int, z: int) -> List[str]:
"""Find assets."""
quadkeys = self.find_quadkeys(Tile(x=x, y=y, z=z), self.quadkey_zoom)
- return list(
+ assets = list(
dict.fromkeys(
itertools.chain.from_iterable(
[self.mosaic_def.tiles.get(qk, []) for qk in quadkeys]
)
)
)
+ if self.mosaic_def.asset_prefix:
+ assets = [self.mosaic_def.asset_prefix + asset for asset in assets]
+
+ return assets
def find_quadkeys(self, tile: Tile, quadkey_zoom: int) -> List[str]:
"""
diff --git a/cogeo_mosaic/backends/dynamodb.py b/cogeo_mosaic/backends/dynamodb.py
index 5a1e5f8..8c14e6a 100644
--- a/cogeo_mosaic/backends/dynamodb.py
+++ b/cogeo_mosaic/backends/dynamodb.py
@@ -222,13 +222,17 @@ class DynamoDBBackend(BaseBackend):
def get_assets(self, x: int, y: int, z: int) -> List[str]:
"""Find assets."""
quadkeys = self.find_quadkeys(Tile(x=x, y=y, z=z), self.quadkey_zoom)
- return list(
+ assets = list(
dict.fromkeys(
itertools.chain.from_iterable(
[self._fetch_dynamodb(qk).get("assets", []) for qk in quadkeys]
)
)
)
+ if self.mosaic_def.asset_prefix:
+ assets = [self.mosaic_def.asset_prefix + asset for asset in assets]
+
+ return assets
@property
def _quadkeys(self) -> List[str]:
diff --git a/cogeo_mosaic/backends/sqlite.py b/cogeo_mosaic/backends/sqlite.py
index d4ae7d7..d336c9e 100644
--- a/cogeo_mosaic/backends/sqlite.py
+++ b/cogeo_mosaic/backends/sqlite.py
@@ -316,11 +316,15 @@ class SQLiteBackend(BaseBackend):
"""Find assets."""
mercator_tile = morecantile.Tile(x=x, y=y, z=z)
quadkeys = self.find_quadkeys(mercator_tile, self.quadkey_zoom)
- return list(
+ assets = list(
dict.fromkeys(
itertools.chain.from_iterable([self._fetch(qk) for qk in quadkeys])
)
)
+ if self.mosaic_def.asset_prefix:
+ assets = [self.mosaic_def.asset_prefix + asset for asset in assets]
+
+ return assets
@property
def _quadkeys(self) -> List[str]:
diff --git a/cogeo_mosaic/mosaic.py b/cogeo_mosaic/mosaic.py
index 21d0dbc..4ba709c 100644
--- a/cogeo_mosaic/mosaic.py
+++ b/cogeo_mosaic/mosaic.py
@@ -1,6 +1,7 @@
"""cogeo_mosaic.mosaic MosaicJSON models and helper functions."""
import os
+import re
import sys
import warnings
from contextlib import ExitStack
@@ -230,9 +231,16 @@ class MosaicJSON(BaseModel, validate_assignment=True):
)
if dataset:
- mosaic_definition["tiles"][quadkey] = [
- accessor(f) for f in dataset
- ]
+ assets = [accessor(f) for f in dataset]
+ if asset_prefix:
+ assets = [
+ re.sub(rf"^{asset_prefix}", "", asset)
+ if asset.startswith(asset_prefix)
+ else asset
+ for asset in assets
+ ]
+
+ mosaic_definition["tiles"][quadkey] = assets
return cls(**mosaic_definition)
| developmentseed/cogeo-mosaic | 2e12ff197f3c64bd3ba36c99ce2f27597342881a | diff --git a/tests/test_backends.py b/tests/test_backends.py
index c2e7d57..852157e 100644
--- a/tests/test_backends.py
+++ b/tests/test_backends.py
@@ -1217,3 +1217,15 @@ def test_point_crs_coordinates():
assert ptsR[0][0] == pts[0][0]
assert ptsR[0][1].crs == "epsg:3857"
assert ptsR[0][1].coordinates == (-8200051.8694, 5782905.49327)
+
+
+def test_InMemoryReader_asset_prefix():
+ """Test MemoryBackend."""
+ assets = [asset1, asset2]
+ prefix = os.path.join(os.path.dirname(__file__), "fixtures")
+ mosaicdef = MosaicJSON.from_urls(assets, quiet=False, asset_prefix=prefix)
+
+ assert mosaicdef.tiles["0302310"] == ["/cog1.tif", "/cog2.tif"]
+ with MemoryBackend(mosaic_def=mosaicdef) as mosaic:
+ assets = mosaic.assets_for_tile(150, 182, 9)
+ assert assets[0].startswith(prefix)
diff --git a/tests/test_create.py b/tests/test_create.py
index 6998267..4fb30e5 100644
--- a/tests/test_create.py
+++ b/tests/test_create.py
@@ -127,7 +127,7 @@ def test_mosaic_create_additional_metadata():
quiet=True,
tilematrixset=tms_3857,
asset_type="COG",
- asset_prefix="s3://my-bucket/",
+ asset_prefix=basepath,
data_type="uint16",
layers={
"true-color": {
@@ -137,6 +137,7 @@ def test_mosaic_create_additional_metadata():
},
)
assert mosaic.asset_type == "COG"
- assert mosaic.asset_prefix == "s3://my-bucket/"
+ assert mosaic.asset_prefix == basepath
assert mosaic.data_type == "uint16"
assert mosaic.layers["true-color"]
+ assert mosaic.tiles["0302301"] == ["/cog1.tif", "/cog2.tif"]
| add`asset_prefix` to assets
### Discussed in https://github.com/developmentseed/cogeo-mosaic/discussions/226
<div type='discussions-op-text'>
<sup>Originally posted by **scottyhq** December 6, 2023</sup>
I tired rendering the 0.0.3 example which uses public data:
https://titiler.xyz/mosaicjson/map?url=https://raw.githubusercontent.com/developmentseed/mosaicjson-spec/main/0.0.3/example/dg_post_idai.json
But get a lot of 500 errors, I also tried modifying the docs example to just use `"asset_prefix":"s3://noaa-eri-pds/2020_Nashville_Tornado/20200307a_RGB/"`:
https://titiler.xyz/mosaicjson/map?url=https://gist.githubusercontent.com/scottyhq/4c288a2dbf7b97ef7833c5e0ee19d6d6/raw/636083cf7117a96e38d271eeef932a3ee507d7d1/NOAA_Nashville_Tornado_v3.json
So I suspect titiler by default doesn't have logic to pick up the `asset_prefix`
</div> | 0.0 | [
"tests/test_backends.py::test_InMemoryReader_asset_prefix",
"tests/test_create.py::test_mosaic_create_additional_metadata"
] | [
"tests/test_backends.py::test_file_backend",
"tests/test_backends.py::test_http_backend",
"tests/test_backends.py::test_s3_backend",
"tests/test_backends.py::test_gs_backend",
"tests/test_backends.py::test_dynamoDB_backend",
"tests/test_backends.py::test_stac_backend",
"tests/test_backends.py::test_stac_search",
"tests/test_backends.py::test_stac_accessor",
"tests/test_backends.py::test_mosaic_crud_error[file:///path/to/mosaic.json]",
"tests/test_backends.py::test_mosaic_crud_error[https://developmentseed.org/cogeo-mosaic/amosaic.json.gz]",
"tests/test_backends.py::test_InMemoryReader",
"tests/test_backends.py::test_sqlite_backend",
"tests/test_backends.py::test_tms_and_coordinates",
"tests/test_backends.py::test_point_crs_coordinates",
"tests/test_create.py::test_mosaic_create",
"tests/test_create.py::test_mosaic_create_tms"
] | 2023-12-06 19:13:07+00:00 | 1,891 |
|
devopshq__artifactory-439 | diff --git a/artifactory.py b/artifactory.py
index 4325018..69bc540 100755
--- a/artifactory.py
+++ b/artifactory.py
@@ -410,13 +410,13 @@ def quote_url(url):
parsed_url = urllib3.util.parse_url(url)
if parsed_url.port:
quoted_path = requests.utils.quote(
- url.rpartition(f"{parsed_url.host}:{parsed_url.port}")[2]
+ url.partition(f"{parsed_url.host}:{parsed_url.port}")[2]
)
quoted_url = (
f"{parsed_url.scheme}://{parsed_url.host}:{parsed_url.port}{quoted_path}"
)
else:
- quoted_path = requests.utils.quote(url.rpartition(parsed_url.host)[2])
+ quoted_path = requests.utils.quote(url.partition(parsed_url.host)[2])
quoted_url = f"{parsed_url.scheme}://{parsed_url.host}{quoted_path}"
return quoted_url
| devopshq/artifactory | 2e9274273aefba744accb3d5a012c4dbdb111b49 | diff --git a/tests/unit/test_artifactory_path.py b/tests/unit/test_artifactory_path.py
index f2f57d9..16e528d 100644
--- a/tests/unit/test_artifactory_path.py
+++ b/tests/unit/test_artifactory_path.py
@@ -100,6 +100,10 @@ class ArtifactoryFlavorTest(unittest.TestCase):
check(
"https://example.com/artifactory/foo", "https://example.com/artifactory/foo"
)
+ check(
+ "https://example.com/artifactory/foo/example.com/bar",
+ "https://example.com/artifactory/foo/example.com/bar"
+ )
check(
"https://example.com/artifactory/foo/#1",
"https://example.com/artifactory/foo/%231",
| path.stat() doesn't handle ArtifactoryPath() properly
Hi,
Here is my environment:
````
python 3.8.4
dohq-artifactory 0.8.4
````
I want to get the stats of 2 files. I have a piece of code looking like this:
````python
from artifactory import ArtifactoryPath
path1 = ArtifactoryPath(
"https://artifactory.domain.com/artifactory/repo-docker-local/rdl/rd-image/0.2.0/manifest.json", apikey=token
)
path2 = ArtifactoryPath(
"https://artifactory.domain.com/artifactory/repo-docker-local/rdl/artifactory.domain.com/repo-docker/rd/rd-image/0.2.0/manifest.json", apikey=token
)
# Get FileStat
stat1 = path1.stat() # works
print(stat1)
stat2 = path2.stat() # error
print(stat2)
````
As you can see the path in `path2` is containing 2 times the artifactory host `artifactory.domain.com`.
In the error message below I can see it tries to fetch the file but only using half the real path: it crops everything before the second `artifactory.domain.com`.
````bash
$ python tests.py --token=****
ArtifactoryFileStat(ctime='[...]', mtime='[...]', created_by='[...]', modified_by='[...]') # print(stat1), shorted for libility
Traceback (most recent call last):
File "[...]/.env/lib/python3.8/site-packages/dohq_artifactory/exception.py", line 20, in raise_for_status
response.raise_for_status()
File "[...]/.env/lib/python3.8/site-packages/requests/models.py", line 1021, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://artifactory.domain.com/repo-docker/rd/rd-image/0.2.0/manifest.json
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "tests.py", line 35, in <module>
stats2 = path2.stat()
File "[...]/.env/lib/python3.8/site-packages/artifactory.py", line 1610, in stat
return self._accessor.stat(pathobj=pathobj)
File "[...]/.env/lib/python3.8/site-packages/artifactory.py", line 878, in stat
jsn = self.get_stat_json(pathobj)
File "[...]/.env/lib/python3.8/site-packages/artifactory.py", line 869, in get_stat_json
raise_for_status(response)
File "[...]/.env/lib/python3.8/site-packages/dohq_artifactory/exception.py", line 28, in raise_for_status
raise ArtifactoryException(str(exception)) from exception
dohq_artifactory.exception.ArtifactoryException: 404 Client Error: Not Found for url: https://artifactory.domain.com/repo-docker/rd/rd-image/0.2.0/manifest.json
````
I cannot change the files names. It is not in my perimeter.
To identify where the problem comes from I tried the same thing but using JFrog Web API and it works:
````bash
$ curl -u user:passwd https://artifactory.domain.com/artifactory/api/storage/repo-docker-local/rdl/artifactory.domain.com/repo-docker/rdl/rd-image/0.2.0 -v
* About to connect() to artifactory.domain.com port <port> (#0)
* Trying <ip>...
* Connected to artifactory.domain.com (<ip>) port <port> (#0)
* [...]
> GET /artifactory/api/storage/repo-docker-local/rdl/artifactory.domain.com/repo-docker/rdl/rd-image/0.2.0 HTTP/1.1
> Authorization: Basic xxxx
> User-Agent: <user_agent>
> Host: artifactory.domain.com
> Accept: */*
>
< HTTP/1.1 200
< Date: Wed, 17 May 2023 11:45:28 GMT
< Content-Type: application/vnd.org.jfrog.artifactory.storage.FolderInfo+json
< Transfer-Encoding: chunked
< Connection: keep-alive
< X-JFrog-Version: <jfrog_version>
< X-Artifactory-Id: <artifactory_id>
< X-Artifactory-Node-Id: <artifactory_node_id>
< Cache-Control: no-store
< Set-Cookie: <cookie>
<
{
"repo" : "repo-docker-local",
"path" : "/rdl/artifactory.domain.com/repo-docker/rdl/rd-image/0.2.0",
"created" : "<created>",
"createdBy" : "<createdBy>",
"lastModified" : "<lastModified>",
"modifiedBy" : "<modifiedBy>",
"lastUpdated" : "<lastUpdated>",
"children" : [{
"uri" : "/manifest.json",
"folder" : false,
[...]
}],
"uri" : "https://artifactory.domain.com:<port>/artifactory/api/storage/repo-docker-local/rdl/artifactory.domain.com/repo-docker/rdl/rd-image/0.2.0"
}
* Connection #0 to host artifactory.domain.com left intact
````
For me the problem is from how the `stat()` method parses an ArtifactoryPath.
Do you have any solutions ?
| 0.0 | [
"tests/unit/test_artifactory_path.py::ArtifactoryFlavorTest::test_quote_url"
] | [
"tests/unit/test_artifactory_path.py::UtilTest::test_checksum",
"tests/unit/test_artifactory_path.py::UtilTest::test_escape_chars",
"tests/unit/test_artifactory_path.py::UtilTest::test_matrix_encode",
"tests/unit/test_artifactory_path.py::UtilTest::test_properties_encode",
"tests/unit/test_artifactory_path.py::UtilTest::test_properties_encode_multi",
"tests/unit/test_artifactory_path.py::ArtifactoryFlavorTest::test_parse_parts",
"tests/unit/test_artifactory_path.py::ArtifactoryFlavorTest::test_special_characters",
"tests/unit/test_artifactory_path.py::ArtifactoryFlavorTest::test_splitroot",
"tests/unit/test_artifactory_path.py::ArtifactoryFlavorTest::test_splitroot_custom_drv",
"tests/unit/test_artifactory_path.py::ArtifactoryFlavorTest::test_splitroot_custom_root",
"tests/unit/test_artifactory_path.py::PureArtifactoryPathTest::test_anchor",
"tests/unit/test_artifactory_path.py::PureArtifactoryPathTest::test_join_endswith_slash",
"tests/unit/test_artifactory_path.py::PureArtifactoryPathTest::test_join_endswithout_slash",
"tests/unit/test_artifactory_path.py::PureArtifactoryPathTest::test_join_with_artifactory_folder",
"tests/unit/test_artifactory_path.py::PureArtifactoryPathTest::test_join_with_multiple_folder_and_artifactory_substr_in_it",
"tests/unit/test_artifactory_path.py::PureArtifactoryPathTest::test_join_with_repo",
"tests/unit/test_artifactory_path.py::PureArtifactoryPathTest::test_join_with_repo_folder",
"tests/unit/test_artifactory_path.py::PureArtifactoryPathTest::test_root",
"tests/unit/test_artifactory_path.py::PureArtifactoryPathTest::test_with_suffix",
"tests/unit/test_artifactory_path.py::ArtifactoryAccessorTest::test_get_properties",
"tests/unit/test_artifactory_path.py::ArtifactoryAccessorTest::test_listdir",
"tests/unit/test_artifactory_path.py::ArtifactoryAccessorTest::test_mkdir",
"tests/unit/test_artifactory_path.py::ArtifactoryAccessorTest::test_set_properties",
"tests/unit/test_artifactory_path.py::ArtifactoryAccessorTest::test_set_properties_without_remove",
"tests/unit/test_artifactory_path.py::ArtifactoryAccessorTest::test_stat",
"tests/unit/test_artifactory_path.py::ArtifactoryAccessorTest::test_stat_no_sha256",
"tests/unit/test_artifactory_path.py::ArtifactoryAccessorTest::test_unlink",
"tests/unit/test_artifactory_path.py::ArtifactoryAccessorTest::test_unlink_raises_not_found",
"tests/unit/test_artifactory_path.py::ArtifactoryAccessorTest::test_unlink_raises_on_404",
"tests/unit/test_artifactory_path.py::ArtifactoryPathTest::test_archive",
"tests/unit/test_artifactory_path.py::ArtifactoryPathTest::test_archive_download",
"tests/unit/test_artifactory_path.py::ArtifactoryPathTest::test_auth",
"tests/unit/test_artifactory_path.py::ArtifactoryPathTest::test_auth_inheritance",
"tests/unit/test_artifactory_path.py::ArtifactoryPathTest::test_basic",
"tests/unit/test_artifactory_path.py::ArtifactoryPathTest::test_deploy_by_checksum_error",
"tests/unit/test_artifactory_path.py::ArtifactoryPathTest::test_deploy_by_checksum_sha1",
"tests/unit/test_artifactory_path.py::ArtifactoryPathTest::test_deploy_by_checksum_sha1_or_sha256",
"tests/unit/test_artifactory_path.py::ArtifactoryPathTest::test_deploy_by_checksum_sha256",
"tests/unit/test_artifactory_path.py::ArtifactoryPathTest::test_deploy_deb",
"tests/unit/test_artifactory_path.py::ArtifactoryPathTest::test_deploy_file",
"tests/unit/test_artifactory_path.py::ArtifactoryPathTest::test_joinpath_repo",
"tests/unit/test_artifactory_path.py::ArtifactoryPathTest::test_path_in_repo",
"tests/unit/test_artifactory_path.py::ArtifactoryPathTest::test_repo",
"tests/unit/test_artifactory_path.py::ArtifactorySaaSPathTest::test_basic",
"tests/unit/test_artifactory_path.py::ArtifactorySaaSPathTest::test_drive",
"tests/unit/test_artifactory_path.py::ArtifactorySaaSPathTest::test_path_in_repo",
"tests/unit/test_artifactory_path.py::ArtifactorySaaSPathTest::test_repo",
"tests/unit/test_artifactory_path.py::TestArtifactoryConfig::test_artifactory_config",
"tests/unit/test_artifactory_path.py::TestArtifactoryAql::test_create_aql_text_list",
"tests/unit/test_artifactory_path.py::TestArtifactoryAql::test_create_aql_text_list_in_dict",
"tests/unit/test_artifactory_path.py::TestArtifactoryAql::test_create_aql_text_simple",
"tests/unit/test_artifactory_path.py::TestArtifactoryAql::test_from_aql_file",
"tests/unit/test_artifactory_path.py::TestArtifactoryPathGetAll::test_get_groups",
"tests/unit/test_artifactory_path.py::TestArtifactoryPathGetAll::test_get_groups_lazy",
"tests/unit/test_artifactory_path.py::TestArtifactoryPathGetAll::test_get_projects",
"tests/unit/test_artifactory_path.py::TestArtifactoryPathGetAll::test_get_projects_lazy",
"tests/unit/test_artifactory_path.py::TestArtifactoryPathGetAll::test_get_users",
"tests/unit/test_artifactory_path.py::TestArtifactoryPathGetAll::test_get_users_lazy"
] | 2023-12-29 09:24:40+00:00 | 1,892 |
Subsets and Splits