instance_id
stringlengths 10
57
| patch
stringlengths 261
37.7k
| repo
stringlengths 7
53
| base_commit
stringlengths 40
40
| hints_text
stringclasses 301
values | test_patch
stringlengths 212
2.22M
| problem_statement
stringlengths 23
37.7k
| version
stringclasses 1
value | environment_setup_commit
stringlengths 40
40
| FAIL_TO_PASS
listlengths 1
4.94k
| PASS_TO_PASS
listlengths 0
7.82k
| meta
dict | created_at
stringlengths 25
25
| license
stringclasses 8
values | __index_level_0__
int64 0
6.41k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dwavesystems__dwave-cloud-client-289
|
diff --git a/dwave/cloud/client.py b/dwave/cloud/client.py
index bf97812..7aa7d1e 100644
--- a/dwave/cloud/client.py
+++ b/dwave/cloud/client.py
@@ -53,7 +53,7 @@ import warnings
import operator
import collections
from itertools import chain
-from functools import partial
+from functools import partial, wraps
from dateutil.parser import parse as parse_datetime
from plucky import pluck
@@ -734,24 +734,32 @@ class Client(object):
return set(tuple(x) for x in iterable)
return set(iterable)
+ def with_valid_lhs(op):
+ @wraps(op)
+ def _wrapper(prop, val):
+ if prop is None:
+ return False
+ return op(prop, val)
+ return _wrapper
+
# available filtering operators
ops = {
- 'lt': operator.lt,
- 'lte': operator.le,
- 'gt': operator.gt,
- 'gte': operator.ge,
+ 'lt': with_valid_lhs(operator.lt),
+ 'lte': with_valid_lhs(operator.le),
+ 'gt': with_valid_lhs(operator.gt),
+ 'gte': with_valid_lhs(operator.ge),
'eq': operator.eq,
'available': lambda prop, val: prop is not None if val else prop is None,
- 'regex': lambda prop, val: re.match("^{}$".format(val), prop),
+ 'regex': with_valid_lhs(lambda prop, val: re.match("^{}$".format(val), prop)),
# range operations
- 'covers': covers_op,
- 'within': within_op,
+ 'covers': with_valid_lhs(covers_op),
+ 'within': with_valid_lhs(within_op),
# membership tests
'in': lambda prop, val: prop in val,
- 'contains': lambda prop, val: val in prop,
+ 'contains': with_valid_lhs(lambda prop, val: val in prop),
# set tests
- 'issubset': lambda prop, val: _set(prop).issubset(_set(val)),
- 'issuperset': lambda prop, val: _set(prop).issuperset(_set(val)),
+ 'issubset': with_valid_lhs(lambda prop, val: _set(prop).issubset(_set(val))),
+ 'issuperset': with_valid_lhs(lambda prop, val: _set(prop).issuperset(_set(val))),
}
# features available as `Solver` attribute/properties
|
dwavesystems/dwave-cloud-client
|
307046b5423cf8279c6c982ddc57c1bd272bf0d0
|
diff --git a/tests/test_client.py b/tests/test_client.py
index aa0e8f8..a62e25e 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -278,7 +278,11 @@ class FeatureBasedSolverSelection(unittest.TestCase):
"num_reads_range": [0, 1000],
"parameters": {"num_reads": "Number of samples to return."},
"vfyc": False,
- "avg_load": 0.7
+ # the following are only present in this solver
+ "avg_load": 0.7,
+ "some_set": [1, 2],
+ "some_range": [1, 2],
+ "some_string": "x"
},
"id": "c4-sw_solver3",
"description": "A test of software solver"
@@ -364,6 +368,16 @@ class FeatureBasedSolverSelection(unittest.TestCase):
self.assertSolvers(self.client.get_solvers(num_qubits__lte=7), self.solvers)
self.assertSolvers(self.client.get_solvers(num_qubits__lt=7), [self.solver1, self.solver2])
+ # skip solver if LHS value not defined (None)
+ self.assertSolvers(self.client.get_solvers(avg_load__gt=0), [self.solver3])
+ self.assertSolvers(self.client.get_solvers(avg_load__gte=0), [self.solver3])
+ self.assertSolvers(self.client.get_solvers(avg_load__lt=1), [self.solver3])
+ self.assertSolvers(self.client.get_solvers(avg_load__lte=1), [self.solver3])
+ self.assertSolvers(self.client.get_solvers(avg_load=0.7), [self.solver3])
+ self.assertSolvers(self.client.get_solvers(avg_load__eq=0.7), [self.solver3])
+ self.assertSolvers(self.client.get_solvers(avg_load=None), [self.solver1, self.solver2])
+ self.assertSolvers(self.client.get_solvers(avg_load__eq=None), [self.solver1, self.solver2])
+
def test_range_ops(self):
# value within range
self.assertSolvers(self.client.get_solvers(num_qubits__within=[3, 7]), self.solvers)
@@ -376,6 +390,9 @@ class FeatureBasedSolverSelection(unittest.TestCase):
self.assertSolvers(self.client.get_solvers(num_reads_range__within=(0, 500)), [self.solver1, self.solver2])
self.assertSolvers(self.client.get_solvers(num_reads_range__within=(1, 500)), [])
+ # invalid LHS
+ self.assertSolvers(self.client.get_solvers(some_range__within=[0, 2]), [self.solver3])
+
# range covering a value (value included in range)
self.assertSolvers(self.client.get_solvers(num_reads_range__covers=0), self.solvers)
self.assertSolvers(self.client.get_solvers(num_reads_range__covers=150), [self.solver2, self.solver3])
@@ -387,6 +404,9 @@ class FeatureBasedSolverSelection(unittest.TestCase):
self.assertSolvers(self.client.get_solvers(num_reads_range__covers=(10, 90)), self.solvers)
self.assertSolvers(self.client.get_solvers(num_reads_range__covers=(110, 200)), [self.solver2, self.solver3])
+ # invalid LHS
+ self.assertSolvers(self.client.get_solvers(some_range__covers=1.5), [self.solver3])
+
def test_membership_ops(self):
# property contains
self.assertSolvers(self.client.get_solvers(supported_problem_types__contains="qubo"), self.solvers)
@@ -399,6 +419,11 @@ class FeatureBasedSolverSelection(unittest.TestCase):
self.assertSolvers(self.client.get_solvers(num_qubits__in=[7]), [self.solver3])
self.assertSolvers(self.client.get_solvers(num_qubits__in=[]), [])
+ # invalid LHS
+ self.assertSolvers(self.client.get_solvers(some_set__contains=1), [self.solver3])
+ self.assertSolvers(self.client.get_solvers(avg_load__in=[None]), [self.solver1, self.solver2])
+ self.assertSolvers(self.client.get_solvers(avg_load__in=[None, 0.7]), self.solvers)
+
def test_set_ops(self):
# property issubset
self.assertSolvers(self.client.get_solvers(supported_problem_types__issubset=("qubo", "ising", "other")), self.solvers)
@@ -418,12 +443,19 @@ class FeatureBasedSolverSelection(unittest.TestCase):
self.assertSolvers(self.client.get_solvers(couplers__issuperset={(0, 1), (0, 2), (2, 3)}), [self.solver2])
self.assertSolvers(self.client.get_solvers(couplers__issuperset={(0, 1), (0, 2), (2, 3), (0, 5)}), [])
+ # invalid LHS
+ self.assertSolvers(self.client.get_solvers(some_set__issubset={0, 1, 2}), [self.solver3])
+ self.assertSolvers(self.client.get_solvers(some_set__issuperset={1}), [self.solver3])
+
def test_regex(self):
self.assertSolvers(self.client.get_solvers(num_reads__regex='.*number.*'), [])
self.assertSolvers(self.client.get_solvers(num_reads__regex='.*Number.*'), self.solvers)
self.assertSolvers(self.client.get_solvers(num_reads__regex='Number.*'), self.solvers)
self.assertSolvers(self.client.get_solvers(num_reads__regex='Number'), [])
+ # invalid LHS
+ self.assertSolvers(self.client.get_solvers(some_string__regex='x'), [self.solver3])
+
def test_range_boolean_combo(self):
self.assertSolvers(self.client.get_solvers(num_qubits=3, vfyc=True), [])
self.assertSolvers(self.client.get_solvers(num_qubits__gte=3, vfyc=True), [self.solver2])
|
Solver filtering failing for some ops with non-existing properties
For example, relational and set operators will fail when applied to a property that a particular solver does not have:
```
$ dwave solvers -s '{"max_anneal_schedule_points__gt": 2}'
Traceback (most recent call last):
File "/home/radomir/work/dwave-cloud-client/env3/bin/dwave", line 11, in <module>
load_entry_point('dwave-cloud-client', 'console_scripts', 'dwave')()
File "/home/radomir/work/dwave-cloud-client/env3/lib/python3.5/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/home/radomir/work/dwave-cloud-client/env3/lib/python3.5/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/home/radomir/work/dwave-cloud-client/env3/lib/python3.5/site-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/radomir/work/dwave-cloud-client/env3/lib/python3.5/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/radomir/work/dwave-cloud-client/env3/lib/python3.5/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/home/radomir/work/dwave-cloud-client/dwave/cloud/cli.py", line 314, in solvers
solvers = client.get_solvers(**client.default_solver)
File "/home/radomir/work/dwave-cloud-client/dwave/cloud/client.py", line 820, in get_solvers
solvers = [s for s in solvers if all(p(s) for p in predicates)]
File "/home/radomir/work/dwave-cloud-client/dwave/cloud/client.py", line 820, in <listcomp>
solvers = [s for s in solvers if all(p(s) for p in predicates)]
File "/home/radomir/work/dwave-cloud-client/dwave/cloud/client.py", line 820, in <genexpr>
solvers = [s for s in solvers if all(p(s) for p in predicates)]
File "/home/radomir/work/dwave-cloud-client/dwave/cloud/client.py", line 780, in predicate
return op(None, val)
TypeError: unorderable types: NoneType() > int()
```
Some operators should accept `None` as a LHS value (`eq`, `available`, `in`), but for some (relational, set) LHS `None` doesn't make too much sense.
Proposal: patch the affected operators/predicates to shortcircuit to `False` if LHS is `None`.
|
0.0
|
307046b5423cf8279c6c982ddc57c1bd272bf0d0
|
[
"tests/test_client.py::FeatureBasedSolverSelection::test_membership_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_range_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_regex",
"tests/test_client.py::FeatureBasedSolverSelection::test_relational_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_set_ops"
] |
[
"tests/test_client.py::ClientFactory::test_client_type",
"tests/test_client.py::ClientFactory::test_custom_kwargs",
"tests/test_client.py::ClientFactory::test_custom_kwargs_overrides_config",
"tests/test_client.py::ClientFactory::test_default",
"tests/test_client.py::ClientFactory::test_legacy_config_load_fallback",
"tests/test_client.py::ClientFactory::test_solver_features_from_config",
"tests/test_client.py::ClientFactory::test_solver_features_kwargs_override_config",
"tests/test_client.py::ClientFactory::test_solver_name_from_config",
"tests/test_client.py::ClientFactory::test_solver_name_overrides_config_features",
"tests/test_client.py::FeatureBasedSolverSelection::test_anneal_schedule",
"tests/test_client.py::FeatureBasedSolverSelection::test_availability_combo",
"tests/test_client.py::FeatureBasedSolverSelection::test_default",
"tests/test_client.py::FeatureBasedSolverSelection::test_name",
"tests/test_client.py::FeatureBasedSolverSelection::test_num_qubits",
"tests/test_client.py::FeatureBasedSolverSelection::test_online",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_callable",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_edgecases",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_string",
"tests/test_client.py::FeatureBasedSolverSelection::test_parameter_availability_check",
"tests/test_client.py::FeatureBasedSolverSelection::test_property_availability_check",
"tests/test_client.py::FeatureBasedSolverSelection::test_qpu_software",
"tests/test_client.py::FeatureBasedSolverSelection::test_range_boolean_combo",
"tests/test_client.py::FeatureBasedSolverSelection::test_solvers_deprecation"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2018-12-21 02:35:57+00:00
|
apache-2.0
| 2,035 |
|
dwavesystems__dwave-cloud-client-307
|
diff --git a/dwave/cloud/client.py b/dwave/cloud/client.py
index 7008cd9..d468257 100644
--- a/dwave/cloud/client.py
+++ b/dwave/cloud/client.py
@@ -602,9 +602,10 @@ class Client(object):
See `Filtering forms` and `Operators` below.
Solver filters are defined, similarly to Django QuerySet filters, with
- keyword arguments of form `<name>__<operator>=<value>`. Each `<operator>`
- is a predicate (boolean) function that acts on two arguments: value of
- feature `<name>` and the required `<value>`.
+ keyword arguments of form `<key1>__...__<keyN>[__<operator>]=<value>`.
+ Each `<operator>` is a predicate (boolean) function that acts on two
+ arguments: value of feature `<name>` (described with keys path
+ `<key1.key2...keyN>`) and the required `<value>`.
Feature `<name>` can be:
@@ -613,6 +614,7 @@ class Client(object):
`num_active_qubits`, `avg_load`)
2) a solver parameter, available in :obj:`Solver.parameters`
3) a solver property, available in :obj:`Solver.properties`
+ 4) a path describing a property in nested dictionaries
Filtering forms are:
@@ -727,7 +729,8 @@ class Client(object):
# require Ising, QUBO or both to be supported
name='DW_2000Q_3', # full solver name/ID match
name__regex='.*2000.*', # partial/regex-based solver name match
- chip_id__regex='DW_.*' # chip ID prefix must be DW_
+ chip_id__regex='DW_.*', # chip ID prefix must be DW_
+ topology__type__eq="chimera" # topology.type must be chimera
)
"""
@@ -792,18 +795,38 @@ class Client(object):
'issuperset': with_valid_lhs(lambda prop, val: _set(prop).issuperset(_set(val))),
}
- def predicate(solver, name, opname, val):
- if name in solver.derived_properties:
- op = ops[opname or 'eq']
- return op(getattr(solver, name), val)
- elif name in solver.parameters:
- op = ops[opname or 'available']
- return op(solver.parameters[name], val)
- elif name in solver.properties:
- op = ops[opname or 'eq']
- return op(solver.properties[name], val)
+ def predicate(solver, query, val):
+ # needs to handle kwargs like these:
+ # key=val
+ # key__op=val
+ # key__key=val
+ # key__key__op=val
+ # LHS is split on __ in `query`
+ assert len(query) >= 1
+
+ potential_path, potential_op_name = query[:-1], query[-1]
+
+ if potential_op_name in ops:
+ # op is explicit, and potential path is correct
+ op_name = potential_op_name
+ else:
+ # op is implied and depends on property type, path is the whole query
+ op_name = None
+ potential_path = query
+
+ path = '.'.join(potential_path)
+
+ if path in solver.derived_properties:
+ op = ops[op_name or 'eq']
+ return op(getattr(solver, path), val)
+ elif pluck(solver.parameters, path, None) is not None:
+ op = ops[op_name or 'available']
+ return op(pluck(solver.parameters, path), val)
+ elif pluck(solver.properties, path, None) is not None:
+ op = ops[op_name or 'eq']
+ return op(pluck(solver.properties, path), val)
else:
- op = ops[opname or 'eq']
+ op = ops[op_name or 'eq']
return op(None, val)
# param validation
@@ -828,8 +851,8 @@ class Client(object):
predicates = []
for lhs, val in filters.items():
- propname, opname = (lhs.rsplit('__', 1) + [None])[:2]
- predicates.append(partial(predicate, name=propname, opname=opname, val=val))
+ query = lhs.split('__')
+ predicates.append(partial(predicate, query=query, val=val))
_LOGGER.debug("Filtering solvers with predicates=%r", predicates)
|
dwavesystems/dwave-cloud-client
|
71aae7acfcabac87d0fcd54032307fd02dc4ae50
|
diff --git a/tests/test_client.py b/tests/test_client.py
index 9d2bae4..f6c3a75 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -246,6 +246,10 @@ class FeatureBasedSolverSelection(unittest.TestCase):
"parameters": {
"num_reads": "Number of samples to return.",
"postprocess": "either 'sampling' or 'optimization'"
+ },
+ "topology": {
+ "type": "chimera",
+ "shape": [16, 16, 4]
}
},
"id": "solver1",
@@ -264,6 +268,10 @@ class FeatureBasedSolverSelection(unittest.TestCase):
"flux_biases": "Supported ...",
"anneal_schedule": "Supported ..."
},
+ "topology": {
+ "type": "pegasus",
+ "shape": [6, 6, 12]
+ },
"vfyc": True
},
"id": "solver2",
@@ -278,6 +286,10 @@ class FeatureBasedSolverSelection(unittest.TestCase):
"num_reads_range": [0, 1000],
"parameters": {"num_reads": "Number of samples to return."},
"vfyc": False,
+ "topology": {
+ "type": "chimera",
+ "shape": [4, 4, 4]
+ },
# the following are only present in this solver
"some_set": [1, 2],
"some_range": [1, 2],
@@ -463,6 +475,17 @@ class FeatureBasedSolverSelection(unittest.TestCase):
self.assertSolvers(self.client.get_solvers(num_qubits__within=(3, 6), flux_biases=True), [self.solver2])
self.assertSolvers(self.client.get_solvers(num_qubits=5, flux_biases=True), [self.solver2])
+ def test_nested_properties_leaf_lookup(self):
+ self.assertSolvers(self.client.get_solvers(topology__type="chimera"), [self.solver1, self.solver3])
+ self.assertSolvers(self.client.get_solvers(topology__type="pegasus"), [self.solver2])
+ self.assertSolvers(self.client.get_solvers(topology__type__eq="pegasus"), [self.solver2])
+ self.assertSolvers(self.client.get_solvers(topology__shape=[6,6,12]), [self.solver2])
+ self.assertSolvers(self.client.get_solvers(topology__type="chimera", topology__shape__contains=16), [self.solver1])
+
+ def test_nested_properties_intermediate_key_lookup(self):
+ self.assertSolvers(self.client.get_solvers(topology__contains="shape"), self.solvers)
+ self.assertSolvers(self.client.get_solvers(topology={"type": "pegasus", "shape": [6, 6, 12]}), [self.solver2])
+
def test_anneal_schedule(self):
self.assertSolvers(self.client.get_solvers(anneal_schedule__available=True), [self.solver2])
self.assertSolvers(self.client.get_solvers(anneal_schedule=True), [self.solver2])
|
Support nested property dictionaries in solver filtering kwargs
To select solvers with, e.g. `{"topology": {"shape": "chimera"}}` property, we'll say:
```
client.get_solvers(topology__shape="chimera")
```
or
```
client.get_solvers(topology__shape__eq="chimera")
```
|
0.0
|
71aae7acfcabac87d0fcd54032307fd02dc4ae50
|
[
"tests/test_client.py::FeatureBasedSolverSelection::test_nested_properties_leaf_lookup"
] |
[
"tests/test_client.py::ClientFactory::test_client_type",
"tests/test_client.py::ClientFactory::test_custom_kwargs",
"tests/test_client.py::ClientFactory::test_custom_kwargs_overrides_config",
"tests/test_client.py::ClientFactory::test_default",
"tests/test_client.py::ClientFactory::test_legacy_config_load_fallback",
"tests/test_client.py::ClientFactory::test_solver_features_from_config",
"tests/test_client.py::ClientFactory::test_solver_features_kwargs_override_config",
"tests/test_client.py::ClientFactory::test_solver_name_from_config",
"tests/test_client.py::ClientFactory::test_solver_name_overrides_config_features",
"tests/test_client.py::FeatureBasedSolverSelection::test_anneal_schedule",
"tests/test_client.py::FeatureBasedSolverSelection::test_availability_combo",
"tests/test_client.py::FeatureBasedSolverSelection::test_default",
"tests/test_client.py::FeatureBasedSolverSelection::test_membership_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_name",
"tests/test_client.py::FeatureBasedSolverSelection::test_nested_properties_intermediate_key_lookup",
"tests/test_client.py::FeatureBasedSolverSelection::test_num_qubits",
"tests/test_client.py::FeatureBasedSolverSelection::test_online",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_callable",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_edgecases",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_string",
"tests/test_client.py::FeatureBasedSolverSelection::test_parameter_availability_check",
"tests/test_client.py::FeatureBasedSolverSelection::test_property_availability_check",
"tests/test_client.py::FeatureBasedSolverSelection::test_qpu_software",
"tests/test_client.py::FeatureBasedSolverSelection::test_range_boolean_combo",
"tests/test_client.py::FeatureBasedSolverSelection::test_range_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_regex",
"tests/test_client.py::FeatureBasedSolverSelection::test_relational_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_set_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_solvers_deprecation"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-04-11 01:23:44+00:00
|
apache-2.0
| 2,036 |
|
dwavesystems__dwave-cloud-client-309
|
diff --git a/dwave/cloud/solver.py b/dwave/cloud/solver.py
index 35b6221..d04eeec 100644
--- a/dwave/cloud/solver.py
+++ b/dwave/cloud/solver.py
@@ -143,7 +143,8 @@ class Solver(object):
# Derived solver properties (not present in solver data properties dict)
self.derived_properties = {
- 'qpu', 'software', 'online', 'num_active_qubits', 'avg_load', 'name'
+ 'qpu', 'software', 'online', 'num_active_qubits', 'avg_load', 'name',
+ 'lower_noise'
}
def __repr__(self):
@@ -219,6 +220,10 @@ class Solver(object):
"Nominal number of qubits on chip (includes active AND inactive)."
return self.properties.get('num_qubits')
+ @property
+ def lower_noise(self):
+ return "lower_noise" in self.properties.get("tags", [])
+
def max_num_reads(self, **params):
"""Returns the maximum number of reads for the given solver parameters.
|
dwavesystems/dwave-cloud-client
|
67a993249845daa8a794d601be779b97c30296f6
|
diff --git a/tests/test_client.py b/tests/test_client.py
index f6c3a75..009e167 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -250,7 +250,8 @@ class FeatureBasedSolverSelection(unittest.TestCase):
"topology": {
"type": "chimera",
"shape": [16, 16, 4]
- }
+ },
+ "tags": ["lower_noise"]
},
"id": "solver1",
"description": "A test solver 1",
@@ -293,7 +294,8 @@ class FeatureBasedSolverSelection(unittest.TestCase):
# the following are only present in this solver
"some_set": [1, 2],
"some_range": [1, 2],
- "some_string": "x"
+ "some_string": "x",
+ "tags": ["tag"]
},
"id": "c4-sw_solver3",
"description": "A test of software solver",
@@ -339,6 +341,10 @@ class FeatureBasedSolverSelection(unittest.TestCase):
self.assertSolvers(self.client.get_solvers(num_active_qubits=2), [self.solver3])
self.assertSolvers(self.client.get_solvers(num_active_qubits__in=[2, 3]), [self.solver1, self.solver3])
+ def test_lower_noise_derived_property(self):
+ self.assertSolvers(self.client.get_solvers(lower_noise=True), [self.solver1])
+ self.assertSolvers(self.client.get_solvers(lower_noise=False), [self.solver2, self.solver3])
+
def test_parameter_availability_check(self):
self.assertSolvers(self.client.get_solvers(postprocess__available=True), [self.solver1])
self.assertSolvers(self.client.get_solvers(postprocess=True), [self.solver1])
|
Expose lower_noise solver property
Create a derived property based on solver `tags`.
|
0.0
|
67a993249845daa8a794d601be779b97c30296f6
|
[
"tests/test_client.py::FeatureBasedSolverSelection::test_lower_noise_derived_property"
] |
[
"tests/test_client.py::ClientFactory::test_client_type",
"tests/test_client.py::ClientFactory::test_custom_kwargs",
"tests/test_client.py::ClientFactory::test_custom_kwargs_overrides_config",
"tests/test_client.py::ClientFactory::test_default",
"tests/test_client.py::ClientFactory::test_legacy_config_load_fallback",
"tests/test_client.py::ClientFactory::test_solver_features_from_config",
"tests/test_client.py::ClientFactory::test_solver_features_kwargs_override_config",
"tests/test_client.py::ClientFactory::test_solver_name_from_config",
"tests/test_client.py::ClientFactory::test_solver_name_overrides_config_features",
"tests/test_client.py::FeatureBasedSolverSelection::test_anneal_schedule",
"tests/test_client.py::FeatureBasedSolverSelection::test_availability_combo",
"tests/test_client.py::FeatureBasedSolverSelection::test_default",
"tests/test_client.py::FeatureBasedSolverSelection::test_membership_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_name",
"tests/test_client.py::FeatureBasedSolverSelection::test_nested_properties_intermediate_key_lookup",
"tests/test_client.py::FeatureBasedSolverSelection::test_nested_properties_leaf_lookup",
"tests/test_client.py::FeatureBasedSolverSelection::test_num_qubits",
"tests/test_client.py::FeatureBasedSolverSelection::test_online",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_callable",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_edgecases",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_string",
"tests/test_client.py::FeatureBasedSolverSelection::test_parameter_availability_check",
"tests/test_client.py::FeatureBasedSolverSelection::test_property_availability_check",
"tests/test_client.py::FeatureBasedSolverSelection::test_qpu_software",
"tests/test_client.py::FeatureBasedSolverSelection::test_range_boolean_combo",
"tests/test_client.py::FeatureBasedSolverSelection::test_range_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_regex",
"tests/test_client.py::FeatureBasedSolverSelection::test_relational_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_set_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_solvers_deprecation"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-04-11 21:52:15+00:00
|
apache-2.0
| 2,037 |
|
dwavesystems__dwave-cloud-client-325
|
diff --git a/dwave/cloud/client.py b/dwave/cloud/client.py
index ff37d7e..886eb16 100644
--- a/dwave/cloud/client.py
+++ b/dwave/cloud/client.py
@@ -1030,18 +1030,22 @@ class Client(object):
def _handle_problem_status(self, message, future):
"""Handle the results of a problem submission or results request.
- This method checks the status of the problem and puts it in the correct queue.
+ This method checks the status of the problem and puts it in the correct
+ queue.
Args:
- message (dict): Update message from the SAPI server wrt. this problem.
- future `Future`: future corresponding to the problem
+ message (dict):
+ Update message from the SAPI server wrt. this problem.
+ future (:class:`dwave.cloud.computation.Future`:
+ future corresponding to the problem
Note:
This method is always run inside of a daemon thread.
"""
try:
logger.trace("Handling response: %r", message)
- logger.debug("Handling response for %s with status %s", message.get('id'), message.get('status'))
+ logger.debug("Handling response for %s with status %s",
+ message.get('id'), message.get('status'))
# Handle errors in batch mode
if 'error_code' in message and 'error_msg' in message:
@@ -1273,6 +1277,7 @@ class Client(object):
else:
logger.trace("Skipping non-positive delay of %.2f sec", delay)
+ # execute and handle the polling request
try:
logger.trace("Executing poll API request")
@@ -1283,11 +1288,27 @@ class Client(object):
if response.status_code == 401:
raise SolverAuthenticationError()
- response.raise_for_status()
- statuses = response.json()
- for status in statuses:
- self._handle_problem_status(status, frame_futures[status['id']])
+ # assume 5xx errors are transient, and don't abort polling
+ if 500 <= response.status_code < 600:
+ logger.warning(
+ "Received an internal server error response on "
+ "problem status polling request (%s). Assuming "
+ "error is transient, and resuming polling.",
+ response.status_code)
+ # add all futures in this frame back to the polling queue
+ # XXX: logic split between `_handle_problem_status` and here
+ for future in frame_futures.values():
+ self._poll(future)
+
+ else:
+ # otherwise, fail
+ response.raise_for_status()
+
+ # or handle a successful request
+ statuses = response.json()
+ for status in statuses:
+ self._handle_problem_status(status, frame_futures[status['id']])
except BaseException as exception:
if not isinstance(exception, SolverAuthenticationError):
|
dwavesystems/dwave-cloud-client
|
0364541298402a481562060759d3716abd220c6a
|
diff --git a/tests/test_mock_submission.py b/tests/test_mock_submission.py
index eb7ae73..1b6ecb2 100644
--- a/tests/test_mock_submission.py
+++ b/tests/test_mock_submission.py
@@ -21,11 +21,13 @@ import json
import unittest
import itertools
import threading
+import collections
from datetime import datetime, timedelta
from dateutil.tz import UTC
from dateutil.parser import parse as parse_datetime
from requests.structures import CaseInsensitiveDict
+from requests.exceptions import HTTPError
from dwave.cloud.utils import evaluate_ising, generate_random_ising_problem
from dwave.cloud.client import Client, Solver
@@ -149,17 +151,24 @@ def continue_reply(id_, solver_name, now=None, eta_min=None, eta_max=None):
return json.dumps(resp)
-def choose_reply(path, replies, date=None):
+def choose_reply(path, replies, statuses=None, date=None):
"""Choose the right response based on the path and make a mock response."""
+ if statuses is None:
+ statuses = collections.defaultdict(lambda: iter([200]))
+
if date is None:
date = datetime_in_future(0)
if path in replies:
response = mock.Mock(['json', 'raise_for_status', 'headers'])
- response.status_code = 200
+ response.status_code = next(statuses[path])
response.json.side_effect = lambda: json.loads(replies[path])
response.headers = CaseInsensitiveDict({'Date': date.isoformat()})
+ def raise_for_status():
+ if not 200 <= response.status_code < 300:
+ raise HTTPError(response.status_code)
+ response.raise_for_status = raise_for_status
return response
else:
raise NotImplementedError(path)
@@ -475,6 +484,47 @@ class MockSubmission(_QueryTest):
future = solver.sample_qubo({})
future.result()
+ # Reduce the number of poll and submission threads so that the system can be tested
+ @mock.patch.object(Client, "_POLL_THREAD_COUNT", 1)
+ @mock.patch.object(Client, "_SUBMISSION_THREAD_COUNT", 1)
+ def test_polling_recovery_after_5xx(self):
+ "Polling shouldn't be aborted on 5xx responses."
+
+ with Client('endpoint', 'token') as client:
+ client.session = mock.Mock()
+ # on submit, return status pending
+ client.session.post = lambda path, _: choose_reply(path, {
+ 'endpoint/problems/': '[%s]' % continue_reply('123', 'abc123')
+ })
+ # on first and second status poll, fail with 503 and 504
+ # on third status poll, return completed
+ statuses = iter([503, 504])
+ def continue_then_complete(path, state={'count': 0}):
+ state['count'] += 1
+ if state['count'] < 3:
+ return choose_reply(path, replies={
+ 'endpoint/problems/?id=123': '[%s]' % continue_reply('123', 'abc123'),
+ 'endpoint/problems/123/': continue_reply('123', 'abc123')
+ }, statuses={
+ 'endpoint/problems/?id=123': statuses,
+ 'endpoint/problems/123/': statuses
+ })
+ else:
+ return choose_reply(path, {
+ 'endpoint/problems/?id=123': '[%s]' % complete_no_answer_reply('123', 'abc123'),
+ 'endpoint/problems/123/': complete_reply('123', 'abc123')
+ })
+
+ client.session.get = continue_then_complete
+
+ solver = Solver(client, solver_data('abc123'))
+
+ future = solver.sample_qubo({})
+ future.result()
+
+ # after third poll, back-off interval should be 4 x initial back-off
+ self.assertEqual(future._poll_backoff, Client._POLL_BACKOFF_MIN * 2**2)
+
class DeleteEvent(Exception):
"""Throws exception when mocked client submits an HTTP DELETE request."""
|
Assume 5xx responses during problem polling are transient errors, don't abort polling
Although this can be incorrect, as a good first approximation we can assume server-side errors (5xx) are temporary, meaning we should not stop polling, but rather retry later.
At the same time we're assuming client-side errors (4xx) are unrecoverable by retrying.
|
0.0
|
0364541298402a481562060759d3716abd220c6a
|
[
"tests/test_mock_submission.py::MockSubmission::test_polling_recovery_after_5xx"
] |
[
"tests/test_mock_submission.py::MockSubmission::test_eta_min_is_ignored_on_first_poll",
"tests/test_mock_submission.py::MockSubmission::test_exponential_backoff_polling",
"tests/test_mock_submission.py::MockSubmission::test_immediate_polling_with_local_clock_unsynced",
"tests/test_mock_submission.py::MockSubmission::test_immediate_polling_without_eta_min",
"tests/test_mock_submission.py::MockSubmission::test_submit_cancel_reply",
"tests/test_mock_submission.py::MockSubmission::test_submit_continue_then_error_reply",
"tests/test_mock_submission.py::MockSubmission::test_submit_continue_then_ok_and_error_reply",
"tests/test_mock_submission.py::MockSubmission::test_submit_continue_then_ok_reply",
"tests/test_mock_submission.py::MockSubmission::test_submit_error_reply",
"tests/test_mock_submission.py::MockSubmission::test_submit_immediate_error_reply",
"tests/test_mock_submission.py::MockSubmission::test_submit_null_reply",
"tests/test_mock_submission.py::MockSubmission::test_submit_ok_reply",
"tests/test_mock_submission.py::MockCancel::test_cancel_with_id",
"tests/test_mock_submission.py::MockCancel::test_cancel_without_id"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-09-02 19:58:13+00:00
|
apache-2.0
| 2,038 |
|
dwavesystems__dwave-cloud-client-335
|
diff --git a/dwave/cloud/client.py b/dwave/cloud/client.py
index b13cb8e..4055220 100644
--- a/dwave/cloud/client.py
+++ b/dwave/cloud/client.py
@@ -114,6 +114,9 @@ class Client(object):
connection_close (bool, default=False):
Force HTTP(S) connection close after each request.
+ headers (dict/str):
+ Additional HTTP headers.
+
Other Parameters:
Unrecognized keys (str):
All unrecognized keys are passed through to the appropriate client class constructor
@@ -183,7 +186,7 @@ class Client(object):
@classmethod
def from_config(cls, config_file=None, profile=None, client=None,
endpoint=None, token=None, solver=None, proxy=None,
- legacy_config_fallback=False, **kwargs):
+ headers=None, legacy_config_fallback=False, **kwargs):
"""Client factory method to instantiate a client instance from configuration.
Configuration values can be specified in multiple ways, ranked in the following
@@ -211,8 +214,9 @@ class Client(object):
file is not explicitly specified, detected on the system, or defined via
an environment variable.
- Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``, ``DWAVE_API_CLIENT``,
- ``DWAVE_API_ENDPOINT``, ``DWAVE_API_TOKEN``, ``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``.
+ Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``,
+ ``DWAVE_API_CLIENT``, ``DWAVE_API_ENDPOINT``, ``DWAVE_API_TOKEN``,
+ ``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``, ``DWAVE_API_HEADERS``.
Environment variables are described in :mod:`dwave.cloud.config`.
@@ -268,6 +272,10 @@ class Client(object):
username/password, port, scheme, etc. If undefined, client
uses the system-level proxy, if defined, or connects directly to the API.
+ headers (dict/str, default=None):
+ Newline-separated additional HTTP headers to include with each
+ API request, or a dictionary of (key, value) pairs.
+
legacy_config_fallback (bool, default=False):
If True and loading from a standard D-Wave Cloud Client configuration
file (``dwave.conf``) fails, tries loading a legacy configuration file (``~/.dwrc``).
@@ -292,7 +300,6 @@ class Client(object):
Config file parse failed.
Examples:
-
A variety of examples are given in :mod:`dwave.cloud.config`.
This example initializes :class:`~dwave.cloud.client.Client` from an
@@ -309,7 +316,8 @@ class Client(object):
# (`./dwave.conf`, `~/.config/dwave/dwave.conf`, etc)
config = load_config(
config_file=config_file, profile=profile, client=client,
- endpoint=endpoint, token=token, solver=solver, proxy=proxy)
+ endpoint=endpoint, token=token, solver=solver, proxy=proxy,
+ headers=headers)
logger.debug("Config loaded: %r", config)
# fallback to legacy `.dwrc` if key variables missing
@@ -319,8 +327,8 @@ class Client(object):
if not config.get('token'):
config = legacy_load_config(
- profile=profile, client=client,
- endpoint=endpoint, token=token, solver=solver, proxy=proxy)
+ profile=profile, client=client, endpoint=endpoint,
+ token=token, solver=solver, proxy=proxy, headers=headers)
logger.debug("Legacy config loaded: %r", config)
# manual override of other (client-custom) arguments
@@ -335,7 +343,7 @@ class Client(object):
def __init__(self, endpoint=None, token=None, solver=None, proxy=None,
permissive_ssl=False, request_timeout=60, polling_timeout=None,
- connection_close=False, **kwargs):
+ connection_close=False, headers=None, **kwargs):
"""To setup the connection a pipeline of queues/workers is constructed.
There are five interactions with the server the connection manages:
@@ -357,16 +365,18 @@ class Client(object):
logger.debug(
"Creating a client for (endpoint=%r, token=%r, solver=%r, proxy=%r, "
- "permissive_ssl=%r, request_timeout=%r, polling_timeout=%r, **kwargs=%r)",
- endpoint, token, solver, proxy, permissive_ssl, request_timeout, polling_timeout, kwargs
+ "permissive_ssl=%r, request_timeout=%r, polling_timeout=%r, "
+ "connection_close=%r, headers=%r, **kwargs=%r)",
+ endpoint, token, solver, proxy,
+ permissive_ssl, request_timeout, polling_timeout,
+ connection_close, headers, kwargs
)
+ # parse solver
if not solver:
solver_def = {}
-
elif isinstance(solver, collections.Mapping):
solver_def = solver
-
elif isinstance(solver, six.string_types):
# support features dict encoded as JSON in our config INI file
# TODO: push this decoding to the config module, once we switch to a
@@ -379,9 +389,27 @@ class Client(object):
# features dict (equality constraint on full solver name)
logger.debug("Invalid solver JSON, assuming string name: %r", solver)
solver_def = dict(name__eq=solver)
-
else:
raise ValueError("Expecting a features dictionary or a string name for 'solver'")
+ logger.debug("Parsed solver=%r", solver_def)
+
+ # parse headers
+ if not headers:
+ headers_dict = {}
+ elif isinstance(headers, collections.Mapping):
+ headers_dict = headers
+ elif isinstance(headers, six.string_types):
+ try:
+ # valid headers = "Field-1: value-1\nField-2: value-2"
+ headers_dict = {key.strip(): val.strip()
+ for key, val in [line.split(':')
+ for line in headers.strip().split('\n')]}
+ except Exception as e:
+ logger.debug("Invalid headers: %r", headers)
+ headers_dict = {}
+ else:
+ raise ValueError("HTTP headers expected in a dict, or a string")
+ logger.debug("Parsed headers=%r", headers_dict)
# Store connection/session parameters
self.endpoint = endpoint
@@ -392,6 +420,7 @@ class Client(object):
self.polling_timeout = parse_float(polling_timeout)
self.proxy = proxy
+ self.headers = headers_dict
self.permissive_ssl = permissive_ssl
self.connection_close = connection_close
@@ -456,6 +485,8 @@ class Client(object):
session = BaseUrlSession(base_url=endpoint)
session.mount('http://', TimeoutingHTTPAdapter(timeout=self.request_timeout))
session.mount('https://', TimeoutingHTTPAdapter(timeout=self.request_timeout))
+ if self.headers:
+ session.headers.update(self.headers)
session.headers.update({'X-Auth-Token': self.token,
'User-Agent': user_agent(__packagename__, __version__)})
session.proxies = {'http': self.proxy, 'https': self.proxy}
diff --git a/dwave/cloud/config.py b/dwave/cloud/config.py
index cf26496..7bdaf70 100644
--- a/dwave/cloud/config.py
+++ b/dwave/cloud/config.py
@@ -59,6 +59,8 @@ Environment variables:
``DWAVE_API_PROXY``: URL for proxy connections to D-Wave API.
+ ``DWAVE_API_HEADERS``: Optional additional HTTP headers.
+
Examples:
The following are typical examples of using :func:`~dwave.cloud.client.Client.from_config`
to create a configured client.
@@ -195,12 +197,14 @@ Examples:
{'client': u'sw',
'endpoint': u'https://url.of.some.dwavesystem.com/sapi',
'proxy': None,
+ 'headers': None,
'solver': 'EXAMPLE_2000Q_SYSTEM',
'token': u'ABC-123456789123456789123456789'}
>>> dc.config.load_config("./dwave_c.conf", profile='dw2000b', solver='Solver3') # doctest: +SKIP
{'client': u'qpu',
'endpoint': u'https://url.of.some.dwavesystem.com/sapi',
'proxy': None,
+ 'headers': None,
'solver': 'Solver3',
'token': u'DEF-987654321987654321987654321'}
@@ -540,18 +544,18 @@ def load_profile_from_files(filenames=None, profile=None):
The following example code loads profile values from parsing both these files,
by default loading the first profile encountered or an explicitly specified profile.
- >>> import dwave.cloud as dc
- >>> dc.config.load_profile_from_files(["./dwave_a.conf", "./dwave_b.conf"]) # doctest: +SKIP
- {'client': u'sw',
- 'endpoint': u'https://url.of.some.dwavesystem.com/sapi',
- 'solver': u'EXAMPLE_2000Q_SYSTEM_A',
- 'token': u'DEF-987654321987654321987654321'}
- >>> dc.config.load_profile_from_files(["./dwave_a.conf", "./dwave_b.conf"],
- ... profile='dw2000b') # doctest: +SKIP
- {'client': u'qpu',
- 'endpoint': u'https://url.of.some.other.dwavesystem.com/sapi',
- 'solver': u'EXAMPLE_2000Q_SYSTEM_B',
- 'token': u'ABC-123456789123456789123456789'}
+ >>> from dwave.cloud import config
+ >>> config.load_profile_from_files(["./dwave_a.conf", "./dwave_b.conf"]) # doctest: +SKIP
+ {'client': 'sw',
+ 'endpoint': 'https://url.of.some.dwavesystem.com/sapi',
+ 'solver': 'EXAMPLE_2000Q_SYSTEM_A',
+ 'token': 'DEF-987654321987654321987654321'}
+ >>> config.load_profile_from_files(["./dwave_a.conf", "./dwave_b.conf"],
+ ... profile='dw2000b') # doctest: +SKIP
+ {'client': 'qpu',
+ 'endpoint': 'https://url.of.some.other.dwavesystem.com/sapi',
+ 'solver': 'EXAMPLE_2000Q_SYSTEM_B',
+ 'token': 'ABC-123456789123456789123456789'}
"""
@@ -617,7 +621,8 @@ def get_default_config():
def load_config(config_file=None, profile=None, client=None,
- endpoint=None, token=None, solver=None, proxy=None):
+ endpoint=None, token=None, solver=None,
+ proxy=None, headers=None):
"""Load D-Wave Cloud Client configuration based on a configuration file.
Configuration values can be specified in multiple ways, ranked in the following
@@ -648,13 +653,13 @@ def load_config(config_file=None, profile=None, client=None,
file is not explicitly specified, detected on the system, or defined via
an environment variable.
- Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``, ``DWAVE_API_CLIENT``,
- ``DWAVE_API_ENDPOINT``, ``DWAVE_API_TOKEN``, ``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``.
+ Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``,
+ ``DWAVE_API_CLIENT``, ``DWAVE_API_ENDPOINT``, ``DWAVE_API_TOKEN``,
+ ``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``, ``DWAVE_API_HEADERS``.
Environment variables are described in :mod:`dwave.cloud.config`.
Args:
-
config_file (str/[str]/None/False/True, default=None):
Path to configuration file(s).
@@ -689,7 +694,7 @@ def load_config(config_file=None, profile=None, client=None,
token (str, default=None):
API authorization token.
- solver (str, default=None):
+ solver (dict/str, default=None):
:term:`solver` features, as a JSON-encoded dictionary of feature constraints,
the client should use. See :meth:`~dwave.cloud.client.Client.get_solvers` for
semantics of supported feature constraints.
@@ -705,13 +710,16 @@ def load_config(config_file=None, profile=None, client=None,
username/password, port, scheme, etc. If undefined, client
uses the system-level proxy, if defined, or connects directly to the API.
+ headers (dict/str, default=None):
+ Header lines to include in API calls, each line formatted as
+ ``Key: value``, or a parsed dictionary.
+
Returns:
dict:
- Mapping of configuration keys to values for the profile
- (section), as read from the configuration file and optionally overridden by
- environment values and specified keyword arguments.
- Always contains the `client`, `endpoint`, `token`, `solver`, and `proxy`
- keys.
+ Mapping of configuration keys to values for the profile (section),
+ as read from the configuration file and optionally overridden by
+ environment values and specified keyword arguments. Always contains
+ the `client`, `endpoint`, `token`, `solver`, and `proxy` keys.
Raises:
:exc:`ValueError`:
@@ -727,16 +735,17 @@ def load_config(config_file=None, profile=None, client=None,
This example loads the configuration from an auto-detected configuration file
in the home directory of a Windows system user.
- >>> import dwave.cloud as dc
- >>> dc.config.load_config()
- {'client': u'qpu',
- 'endpoint': u'https://url.of.some.dwavesystem.com/sapi',
+ >>> from dwave.cloud import config
+ >>> config.load_config()
+ {'client': 'qpu',
+ 'endpoint': 'https://url.of.some.dwavesystem.com/sapi',
'proxy': None,
- 'solver': u'EXAMPLE_2000Q_SYSTEM_A',
- 'token': u'DEF-987654321987654321987654321'}
+ 'solver': 'EXAMPLE_2000Q_SYSTEM_A',
+ 'token': 'DEF-987654321987654321987654321',
+ 'headers': None}
>>> See which configuration file was loaded
- >>> dc.config.get_configfile_paths()
- [u'C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf']
+ >>> config.get_configfile_paths()
+ ['C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf']
Additional examples are given in :mod:`dwave.cloud.config`.
@@ -774,6 +783,7 @@ def load_config(config_file=None, profile=None, client=None,
section['token'] = token or os.getenv("DWAVE_API_TOKEN", section.get('token'))
section['solver'] = solver or os.getenv("DWAVE_API_SOLVER", section.get('solver'))
section['proxy'] = proxy or os.getenv("DWAVE_API_PROXY", section.get('proxy'))
+ section['headers'] = headers or os.getenv("DWAVE_API_HEADERS", section.get('headers'))
return section
|
dwavesystems/dwave-cloud-client
|
ee8aae92ac10be096ed1a34cec384afcc0c6e5a8
|
diff --git a/tests/test_client.py b/tests/test_client.py
index 009e167..1b5145c 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -230,6 +230,35 @@ class ClientFactory(unittest.TestCase):
with dwave.cloud.Client.from_config(solver='solver') as client:
self.assertEqual(client.default_solver, {"name__eq": "solver"})
+ def test_headers_from_config(self):
+ headers_dict = {"key-1": "value-1", "key-2": "value-2"}
+ headers_str = """ key-1:value-1
+ key-2: value-2
+ """
+ conf = dict(token='token', headers=headers_str)
+
+ with mock.patch("dwave.cloud.client.load_config", lambda **kwargs: conf):
+ with dwave.cloud.Client.from_config() as client:
+ self.assertDictEqual(client.headers, headers_dict)
+
+ def test_headers_from_kwargs(self):
+ headers_dict = {"key-1": "value-1", "key-2": "value-2"}
+ headers_str = "key-2:value-2\nkey-1:value-1"
+ conf = dict(token='token')
+
+ def load_config(**kwargs):
+ return merge(kwargs, conf, op=lambda a, b: a or b)
+
+ # headers as dict
+ with mock.patch("dwave.cloud.client.load_config", load_config):
+ with dwave.cloud.Client.from_config(headers=headers_dict) as client:
+ self.assertDictEqual(client.headers, headers_dict)
+
+ # headers as str
+ with mock.patch("dwave.cloud.client.load_config", load_config):
+ with dwave.cloud.Client.from_config(headers=headers_str) as client:
+ self.assertDictEqual(client.headers, headers_dict)
+
class FeatureBasedSolverSelection(unittest.TestCase):
"""Test Client.get_solvers(**filters)."""
diff --git a/tests/test_config.py b/tests/test_config.py
index 20e4914..2b34bea 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -47,6 +47,8 @@ class TestConfig(unittest.TestCase):
endpoint = https://url.to.alpha/api
proxy = http://user:[email protected]:8080/
token = alpha-token
+ headers = key-1:value-1
+ key-2: value-2
"""
def parse_config_string(self, text):
@@ -127,6 +129,8 @@ class TestConfig(unittest.TestCase):
self.assertEqual(config['endpoint'], "https://url.to.alpha/api")
# default values are inherited
self.assertEqual(config['client'], "qpu")
+ # multi-line values are read
+ self.assertEqual(config['headers'], "key-1:value-1\nkey-2: value-2")
def _load_config_from_files(self, asked, provided=None, data=None):
self.assertEqual(asked, provided)
@@ -210,6 +214,7 @@ class TestConfig(unittest.TestCase):
self.assertEqual(load_config(client='manual')['client'], 'manual')
self.assertEqual(load_config(solver='manual')['solver'], 'manual')
self.assertEqual(load_config(proxy='manual')['proxy'], 'manual')
+ self.assertEqual(load_config(headers='headers')['headers'], 'headers')
def test_config_load__profile_arg_nonexisting(self):
"""load_config should fail if the profile specified in kwargs or env in
@@ -332,3 +337,25 @@ class TestConfig(unittest.TestCase):
m.assert_has_calls([mock.call('file1', 'r'), mock.call('file2', 'r')])
self.assertEqual(section['endpoint'], 'alpha')
self.assertEqual(section['solver'], 'DW_2000Q_2')
+
+ def test_config_load_env_override(self):
+ with mock.patch("dwave.cloud.config.load_config_from_files",
+ partial(self._load_config_from_files, data=u"", provided=['myfile'])):
+
+ with mock.patch.dict(os.environ, {'DWAVE_API_CLIENT': 'test'}):
+ self.assertEqual(load_config(config_file='myfile')['client'], 'test')
+
+ with mock.patch.dict(os.environ, {'DWAVE_API_ENDPOINT': 'test'}):
+ self.assertEqual(load_config(config_file='myfile')['endpoint'], 'test')
+
+ with mock.patch.dict(os.environ, {'DWAVE_API_TOKEN': 'test'}):
+ self.assertEqual(load_config(config_file='myfile')['token'], 'test')
+
+ with mock.patch.dict(os.environ, {'DWAVE_API_SOLVER': 'test'}):
+ self.assertEqual(load_config(config_file='myfile')['solver'], 'test')
+
+ with mock.patch.dict(os.environ, {'DWAVE_API_PROXY': 'test'}):
+ self.assertEqual(load_config(config_file='myfile')['proxy'], 'test')
+
+ with mock.patch.dict(os.environ, {'DWAVE_API_HEADERS': 'test'}):
+ self.assertEqual(load_config(config_file='myfile')['headers'], 'test')
|
Add support for custom headers
It's useful during development and testing to be able to pass-in a custom HTTP header in every API call.
Add support for `headers` config parameter (file, constructor) and `DWAVE_API_HEADERS` environment variable.
|
0.0
|
ee8aae92ac10be096ed1a34cec384afcc0c6e5a8
|
[
"tests/test_client.py::ClientFactory::test_headers_from_config",
"tests/test_client.py::ClientFactory::test_headers_from_kwargs",
"tests/test_config.py::TestConfig::test_config_load_configfile_env_profile_env_key_arg",
"tests/test_config.py::TestConfig::test_config_load_env_override"
] |
[
"tests/test_client.py::ClientFactory::test_client_type",
"tests/test_client.py::ClientFactory::test_custom_kwargs",
"tests/test_client.py::ClientFactory::test_custom_kwargs_overrides_config",
"tests/test_client.py::ClientFactory::test_default",
"tests/test_client.py::ClientFactory::test_legacy_config_load_fallback",
"tests/test_client.py::ClientFactory::test_solver_features_from_config",
"tests/test_client.py::ClientFactory::test_solver_features_kwargs_override_config",
"tests/test_client.py::ClientFactory::test_solver_name_from_config",
"tests/test_client.py::ClientFactory::test_solver_name_overrides_config_features",
"tests/test_client.py::FeatureBasedSolverSelection::test_anneal_schedule",
"tests/test_client.py::FeatureBasedSolverSelection::test_availability_combo",
"tests/test_client.py::FeatureBasedSolverSelection::test_default",
"tests/test_client.py::FeatureBasedSolverSelection::test_lower_noise_derived_property",
"tests/test_client.py::FeatureBasedSolverSelection::test_membership_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_name",
"tests/test_client.py::FeatureBasedSolverSelection::test_nested_properties_intermediate_key_lookup",
"tests/test_client.py::FeatureBasedSolverSelection::test_nested_properties_leaf_lookup",
"tests/test_client.py::FeatureBasedSolverSelection::test_num_qubits",
"tests/test_client.py::FeatureBasedSolverSelection::test_online",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_callable",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_edgecases",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_string",
"tests/test_client.py::FeatureBasedSolverSelection::test_parameter_availability_check",
"tests/test_client.py::FeatureBasedSolverSelection::test_property_availability_check",
"tests/test_client.py::FeatureBasedSolverSelection::test_qpu_software",
"tests/test_client.py::FeatureBasedSolverSelection::test_range_boolean_combo",
"tests/test_client.py::FeatureBasedSolverSelection::test_range_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_regex",
"tests/test_client.py::FeatureBasedSolverSelection::test_relational_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_set_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_solvers_deprecation",
"tests/test_config.py::TestConfig::test_config_file_detection_cwd",
"tests/test_config.py::TestConfig::test_config_file_detection_nonexisting",
"tests/test_config.py::TestConfig::test_config_file_detection_system",
"tests/test_config.py::TestConfig::test_config_file_detection_user",
"tests/test_config.py::TestConfig::test_config_load__profile_arg_nonexisting",
"tests/test_config.py::TestConfig::test_config_load__profile_first_section",
"tests/test_config.py::TestConfig::test_config_load__profile_from_defaults",
"tests/test_config.py::TestConfig::test_config_load_configfile_arg",
"tests/test_config.py::TestConfig::test_config_load_configfile_arg_profile_default",
"tests/test_config.py::TestConfig::test_config_load_configfile_arg_profile_default_nonexisting",
"tests/test_config.py::TestConfig::test_config_load_configfile_detect",
"tests/test_config.py::TestConfig::test_config_load_configfile_detect_profile_env",
"tests/test_config.py::TestConfig::test_config_load_configfile_env",
"tests/test_config.py::TestConfig::test_config_load_configfile_env_profile_env",
"tests/test_config.py::TestConfig::test_config_load_force_autodetection",
"tests/test_config.py::TestConfig::test_config_load_from_file",
"tests/test_config.py::TestConfig::test_config_load_from_file__invalid_format__duplicate_sections",
"tests/test_config.py::TestConfig::test_config_load_multiple_autodetected_configfiles",
"tests/test_config.py::TestConfig::test_config_load_multiple_explicit_configfiles",
"tests/test_config.py::TestConfig::test_config_load_skip_configfiles",
"tests/test_config.py::TestConfig::test_invalid_filename_given",
"tests/test_config.py::TestConfig::test_no_config_detected"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-10-30 14:18:51+00:00
|
apache-2.0
| 2,039 |
|
dwavesystems__dwave-cloud-client-338
|
diff --git a/dwave/cloud/cli.py b/dwave/cloud/cli.py
index f11bdb2..af6ecc7 100644
--- a/dwave/cloud/cli.py
+++ b/dwave/cloud/cli.py
@@ -234,11 +234,18 @@ def _ping(config_file, profile, solver_def, request_timeout, polling_timeout, ou
except Exception as e:
raise CLIError("Unexpected error while fetching solver: {!r}".format(e), 5)
+ if hasattr(solver, 'nodes'):
+ # structured solver: use the first existing node
+ problem = ({min(solver.nodes): 0}, {})
+ else:
+ # unstructured solver doesn't constrain problem graph
+ problem = ({0: 1}, {})
+
t1 = timer()
output("Using solver: {solver_id}", solver_id=solver.id)
try:
- future = solver.sample_ising({0: 1}, {})
+ future = solver.sample_ising(*problem)
timing = future.timing
except RequestTimeout:
raise CLIError("API connection timed out.", 8)
diff --git a/dwave/cloud/utils.py b/dwave/cloud/utils.py
index 0608f67..802684a 100644
--- a/dwave/cloud/utils.py
+++ b/dwave/cloud/utils.py
@@ -486,10 +486,13 @@ class tictoc(object):
self.dt = perf_counter() - self.tick
-def parse_loglevel(level_name):
+def parse_loglevel(level_name, default=logging.NOTSET):
"""Resolve numeric and symbolic log level names to numeric levels."""
- level_name = (level_name or '').strip().lower()
+ try:
+ level_name = str(level_name or '').strip().lower()
+ except:
+ return default
# note: make sure `TRACE` level is added to `logging` before calling this
known_levels = {
@@ -507,7 +510,7 @@ def parse_loglevel(level_name):
try:
level = int(level_name)
except ValueError:
- level = known_levels.get(level_name, logging.NOTSET)
+ level = known_levels.get(level_name, default)
return level
|
dwavesystems/dwave-cloud-client
|
a983749f8ce208eb45c4f121bfff6a2b2e31ee63
|
diff --git a/tests/test_cli.py b/tests/test_cli.py
index ec342e5..0e9c136 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -180,6 +180,9 @@ class TestCli(unittest.TestCase):
profile = 'profile'
with mock.patch('dwave.cloud.cli.Client') as m:
+ # mock returned solver
+ client = m.from_config.return_value
+ client.get_solver.return_value.nodes = [5, 7, 3]
runner = CliRunner()
with runner.isolated_filesystem():
@@ -196,12 +199,11 @@ class TestCli(unittest.TestCase):
request_timeout=0.5, polling_timeout=30)
# get solver called?
- c = m.from_config.return_value
- c.get_solver.assert_called_with()
+ client.get_solver.assert_called_with()
# sampling method called on solver?
- s = c.get_solver.return_value
- s.sample_ising.assert_called_with({0: 1}, {})
+ solver = client.get_solver.return_value
+ solver.sample_ising.assert_called_with({3: 0}, {})
self.assertEqual(result.exit_code, 0)
diff --git a/tests/test_mock_solver_loading.py b/tests/test_mock_solver_loading.py
index 158462b..953c7cd 100644
--- a/tests/test_mock_solver_loading.py
+++ b/tests/test_mock_solver_loading.py
@@ -47,8 +47,8 @@ def structured_solver_data(id_, incomplete=False):
obj = {
"properties": {
"supported_problem_types": ["qubo", "ising"],
- "qubits": [0, 1, 2],
- "couplers": [[0, 1], [0, 2], [1, 2]],
+ "qubits": [1, 2, 3],
+ "couplers": [[1, 2], [1, 3], [2, 3]],
"num_qubits": 3,
"parameters": {"num_reads": "Number of samples to return."}
},
diff --git a/tests/test_utils.py b/tests/test_utils.py
index a3cecf5..8a2f7d1 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import logging
import unittest
from collections import OrderedDict
from itertools import count
@@ -20,7 +21,7 @@ from datetime import datetime
from dwave.cloud.utils import (
uniform_iterator, uniform_get, strip_head, strip_tail,
active_qubits, generate_random_ising_problem,
- default_text_input, utcnow, cached, retried)
+ default_text_input, utcnow, cached, retried, parse_loglevel)
from dwave.cloud.testing import mock
@@ -118,6 +119,25 @@ class TestSimpleUtils(unittest.TestCase):
unaware = t.replace(tzinfo=None)
self.assertLess((now - unaware).total_seconds(), 1.0)
+ def test_parse_loglevel_invalid(self):
+ """Parsing invalid log levels returns NOTSET."""
+ notset = logging.NOTSET
+
+ self.assertEqual(parse_loglevel(''), notset)
+ self.assertEqual(parse_loglevel(' '), notset)
+ self.assertEqual(parse_loglevel(None), notset)
+ self.assertEqual(parse_loglevel(notset), notset)
+ self.assertEqual(parse_loglevel('nonexisting'), notset)
+ self.assertEqual(parse_loglevel({'a': 1}), notset)
+ self.assertIsNone(parse_loglevel('nonexisting', default=None))
+
+ def test_parse_loglevel_numeric_and_symbolic(self):
+ self.assertEqual(parse_loglevel('info'), logging.INFO)
+ self.assertEqual(parse_loglevel('INFO'), logging.INFO)
+ self.assertEqual(parse_loglevel(logging.INFO), logging.INFO)
+ self.assertEqual(parse_loglevel(str(logging.INFO)), logging.INFO)
+ self.assertEqual(parse_loglevel(' %d ' % logging.INFO), logging.INFO)
+
class TestCachedDecorator(unittest.TestCase):
|
Ping should respect solver's graph
Currently, a fixed Ising problem `({0: 1}, {})` is always submitted, obviously ignoring the actual solver's graph.
Note that `sample --random-problem` respects the graph structure.
|
0.0
|
a983749f8ce208eb45c4f121bfff6a2b2e31ee63
|
[
"tests/test_cli.py::TestCli::test_ping",
"tests/test_utils.py::TestSimpleUtils::test_parse_loglevel_invalid",
"tests/test_utils.py::TestSimpleUtils::test_parse_loglevel_numeric_and_symbolic"
] |
[
"tests/test_cli.py::TestCli::test_config_create",
"tests/test_cli.py::TestCli::test_config_ls",
"tests/test_cli.py::TestCli::test_configure_inspect",
"tests/test_cli.py::TestCli::test_sample",
"tests/test_cli.py::TestCli::test_solvers",
"tests/test_cli.py::TestCli::test_upload",
"tests/test_mock_solver_loading.py::MockConnectivityTests::test_bad_token",
"tests/test_mock_solver_loading.py::MockConnectivityTests::test_bad_url",
"tests/test_mock_solver_loading.py::MockConnectivityTests::test_good_connection",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_all_solvers",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_missing_solver",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_solver",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_solver_broken_response",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_solver_missing_data",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_solver_feature_properties",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_solver_filtering_in_client",
"tests/test_mock_solver_loading.py::MockLegacyConfiguration::test_env_args_set",
"tests/test_mock_solver_loading.py::MockLegacyConfiguration::test_env_with_file_set",
"tests/test_mock_solver_loading.py::MockLegacyConfiguration::test_explicit_only",
"tests/test_mock_solver_loading.py::MockLegacyConfiguration::test_explicit_with_file",
"tests/test_mock_solver_loading.py::MockLegacyConfiguration::test_file_read_error",
"tests/test_mock_solver_loading.py::MockLegacyConfiguration::test_nonexisting_file",
"tests/test_mock_solver_loading.py::MockLegacyConfiguration::test_only_file",
"tests/test_mock_solver_loading.py::MockLegacyConfiguration::test_only_file_key",
"tests/test_mock_solver_loading.py::MockConfiguration::test_custom_options",
"tests/test_utils.py::TestSimpleUtils::test_active_qubits_dict",
"tests/test_utils.py::TestSimpleUtils::test_active_qubits_list",
"tests/test_utils.py::TestSimpleUtils::test_default_text_input",
"tests/test_utils.py::TestSimpleUtils::test_generate_random_ising_problem",
"tests/test_utils.py::TestSimpleUtils::test_generate_random_ising_problem_default_solver_ranges",
"tests/test_utils.py::TestSimpleUtils::test_generate_random_ising_problem_with_user_constrained_ranges",
"tests/test_utils.py::TestSimpleUtils::test_strip_head",
"tests/test_utils.py::TestSimpleUtils::test_strip_tail",
"tests/test_utils.py::TestSimpleUtils::test_uniform_get",
"tests/test_utils.py::TestSimpleUtils::test_uniform_iterator",
"tests/test_utils.py::TestSimpleUtils::test_utcnow",
"tests/test_utils.py::TestCachedDecorator::test_args_collision",
"tests/test_utils.py::TestCachedDecorator::test_args_hashing",
"tests/test_utils.py::TestCachedDecorator::test_default_maxage",
"tests/test_utils.py::TestCachedDecorator::test_exceptions",
"tests/test_utils.py::TestCachedDecorator::test_expiry",
"tests/test_utils.py::TestRetriedDecorator::test_backoff_constant",
"tests/test_utils.py::TestRetriedDecorator::test_backoff_func",
"tests/test_utils.py::TestRetriedDecorator::test_backoff_seq",
"tests/test_utils.py::TestRetriedDecorator::test_decorator",
"tests/test_utils.py::TestRetriedDecorator::test_exc_raised",
"tests/test_utils.py::TestRetriedDecorator::test_func_called",
"tests/test_utils.py::TestRetriedDecorator::test_func_called_only_until_succeeds"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-10-31 17:09:44+00:00
|
apache-2.0
| 2,040 |
|
dwavesystems__dwave-cloud-client-358
|
diff --git a/.travis.yml b/.travis.yml
index 20f5ae3..4cc0ac9 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -60,6 +60,8 @@ matrix:
allow_failures:
- python: "nightly"
+ - os: osx
+ env: TOXENV=py27
install:
- pip install --upgrade setuptools
diff --git a/dwave/cloud/client.py b/dwave/cloud/client.py
index 2605872..ec06111 100644
--- a/dwave/cloud/client.py
+++ b/dwave/cloud/client.py
@@ -161,6 +161,7 @@ class Client(object):
_SUBMISSION_THREAD_COUNT = 5
_UPLOAD_PROBLEM_THREAD_COUNT = 1
_UPLOAD_PART_THREAD_COUNT = 10
+ _ENCODE_PROBLEM_THREAD_COUNT = _UPLOAD_PROBLEM_THREAD_COUNT
_CANCEL_THREAD_COUNT = 1
_POLL_THREAD_COUNT = 2
_LOAD_THREAD_COUNT = 5
@@ -481,6 +482,9 @@ class Client(object):
self._upload_part_executor = \
PriorityThreadPoolExecutor(self._UPLOAD_PART_THREAD_COUNT)
+ self._encode_problem_executor = \
+ ThreadPoolExecutor(self._ENCODE_PROBLEM_THREAD_COUNT)
+
dispatch_event(
'after_client_init', obj=self, args=args, return_value=None)
@@ -549,6 +553,8 @@ class Client(object):
self._upload_problem_executor.shutdown()
logger.debug("Shutting down problem part upload executor")
self._upload_part_executor.shutdown()
+ logger.debug("Shutting down problem encoder executor")
+ self._encode_problem_executor.shutdown()
# Send kill-task to all worker threads
# Note: threads can't be 'killed' in Python, they have to die by
@@ -1072,6 +1078,38 @@ class Client(object):
Note:
This method is always run inside of a daemon thread.
"""
+
+ def task_done():
+ self._submission_queue.task_done()
+
+ def filter_ready(item):
+ """Pass-through ready (encoded) problems, re-enqueue ones for which
+ the encoding is in progress, and fail the ones for which encoding
+ failed.
+ """
+
+ # body is a `concurrent.futures.Future`, so make sure
+ # it's ready for submitting
+ if item.body.done():
+ exc = item.body.exception()
+ if exc:
+ # encoding failed, submit should fail as well
+ logger.info("Problem encoding prior to submit "
+ "failed with: %r", exc)
+ item.future._set_error(exc)
+ task_done()
+
+ else:
+ # problem ready for submit
+ return [item]
+
+ else:
+ # body not ready, return the item to queue
+ self._submission_queue.put(item)
+ task_done()
+
+ return []
+
session = self.create_session()
try:
while True:
@@ -1083,19 +1121,25 @@ class Client(object):
item = self._submission_queue.get()
if item is None:
+ task_done()
break
- ready_problems = [item]
+ ready_problems = filter_ready(item)
while len(ready_problems) < self._SUBMIT_BATCH_SIZE:
try:
- ready_problems.append(self._submission_queue.get_nowait())
+ item = self._submission_queue.get_nowait()
except queue.Empty:
break
+ ready_problems.extend(filter_ready(item))
+
+ if not ready_problems:
+ continue
+
# Submit the problems
logger.debug("Submitting %d problems", len(ready_problems))
- body = '[' + ','.join(mess.body for mess in ready_problems) + ']'
try:
+ body = '[' + ','.join(mess.body.result() for mess in ready_problems) + ']'
try:
response = session.post('problems/', body)
localtime_of_response = epochnow()
@@ -1115,14 +1159,14 @@ class Client(object):
for mess in ready_problems:
mess.future._set_error(exception, sys.exc_info())
- self._submission_queue.task_done()
+ task_done()
continue
# Pass on the information
for submission, res in zip(ready_problems, message):
submission.future._set_clock_diff(response, localtime_of_response)
self._handle_problem_status(res, submission.future)
- self._submission_queue.task_done()
+ task_done()
# this is equivalent to a yield to scheduler in other threading libraries
time.sleep(0)
diff --git a/dwave/cloud/concurrency.py b/dwave/cloud/concurrency.py
index fb39b97..b0a2676 100644
--- a/dwave/cloud/concurrency.py
+++ b/dwave/cloud/concurrency.py
@@ -111,3 +111,21 @@ class PriorityThreadPoolExecutor(concurrent.futures.ThreadPoolExecutor):
def __init__(self, *args, **kwargs):
super(PriorityThreadPoolExecutor, self).__init__(*args, **kwargs)
self._work_queue = _PrioritizingQueue()
+
+
+class Present(concurrent.futures.Future):
+ """Already resolved :class:`~concurrent.futures.Future` object.
+
+ Users should treat this class as just another
+ :class:`~concurrent.futures.Future`, the difference being an implementation
+ detail: :class:`Present` is "resolved" at construction time.
+ """
+
+ def __init__(self, result=None, exception=None):
+ super(Present, self).__init__()
+ if result is not None:
+ self.set_result(result)
+ elif exception is not None:
+ self.set_exception(exception)
+ else:
+ raise ValueError("can't provide both 'result' and 'exception'")
diff --git a/dwave/cloud/solver.py b/dwave/cloud/solver.py
index d153453..43e9611 100644
--- a/dwave/cloud/solver.py
+++ b/dwave/cloud/solver.py
@@ -35,12 +35,15 @@ import logging
import warnings
from collections import Mapping
+import six
+
from dwave.cloud.exceptions import *
from dwave.cloud.coders import (
encode_problem_as_qp, encode_problem_as_bq,
decode_qp_numpy, decode_qp, decode_bq)
from dwave.cloud.utils import uniform_iterator, reformat_qubo_as_ising
from dwave.cloud.computation import Future
+from dwave.cloud.concurrency import Present
from dwave.cloud.events import dispatch_event
# Use numpy if available for fast encoding/decoding
@@ -300,6 +303,40 @@ class UnstructuredSolver(BaseSolver):
bqm = dimod.BinaryQuadraticModel.from_qubo(qubo)
return self.sample_bqm(bqm, **params)
+ def _encode_any_problem_as_bqm_ref(self, problem, params):
+ """Encode `problem` for submitting in `bqm-ref` format. Upload the
+ problem if it's not already uploaded.
+
+ Args:
+ problem (:class:`~dimod.BinaryQuadraticModel`/str):
+ A binary quadratic model, or a reference to one (Problem ID).
+
+ params (dict):
+ Parameters for the sampling method, solver-specific.
+
+ Returns:
+ str:
+ JSON-encoded problem submit body
+
+ """
+
+ if isinstance(problem, six.string_types):
+ problem_id = problem
+ else:
+ logger.debug("To encode the problem for submit via 'bqm-ref', "
+ "we need to upload it first.")
+ problem_id = self.upload_bqm(problem).result()
+
+ body = json.dumps({
+ 'solver': self.id,
+ 'data': encode_problem_as_bq(problem_id),
+ 'type': 'bqm',
+ 'params': params
+ })
+ logger.trace("Sampling request encoded as: %s", body)
+
+ return body
+
def sample_bqm(self, bqm, **params):
"""Sample from the specified :term:`BQM`.
@@ -317,21 +354,19 @@ class UnstructuredSolver(BaseSolver):
Note:
To use this method, dimod package has to be installed.
"""
- # encode the request
- body = json.dumps({
- 'solver': self.id,
- 'data': encode_problem_as_bq(bqm),
- 'type': 'bqm',
- 'params': params
- })
- logger.trace("Encoded sample request: %s", body)
- future = Future(solver=self, id_=None, return_matrix=self.return_matrix)
+ # encode the request (body as future)
+ body = self.client._encode_problem_executor.submit(
+ self._encode_any_problem_as_bqm_ref,
+ problem=bqm, params=params)
+
+ # computation future holds a reference to the remote job
+ computation = Future(solver=self, id_=None, return_matrix=self.return_matrix)
logger.debug("Submitting new problem to: %s", self.id)
- self.client._submit(body, future)
+ self.client._submit(body, computation)
- return future
+ return computation
def upload_bqm(self, bqm):
"""Upload the specified :term:`BQM` to SAPI, returning a Problem ID
@@ -631,22 +666,23 @@ class StructuredSolver(BaseSolver):
# transform some of the parameters in-place
self._format_params(type_, combined_params)
- body = json.dumps({
+ body_data = json.dumps({
'solver': self.id,
'data': encode_problem_as_qp(self, linear, quadratic),
'type': type_,
'params': combined_params
})
- logger.trace("Encoded sample request: %s", body)
+ logger.trace("Encoded sample request: %s", body_data)
- future = Future(solver=self, id_=None, return_matrix=self.return_matrix)
+ body = Present(result=body_data)
+ computation = Future(solver=self, id_=None, return_matrix=self.return_matrix)
logger.debug("Submitting new problem to: %s", self.id)
- self.client._submit(body, future)
+ self.client._submit(body, computation)
- dispatch_event('after_sample', obj=self, args=args, return_value=future)
+ dispatch_event('after_sample', obj=self, args=args, return_value=computation)
- return future
+ return computation
def _format_params(self, type_, params):
"""Reformat some of the parameters for sapi."""
|
dwavesystems/dwave-cloud-client
|
24717ae2d01fd7046746502e51454dacf96b09d6
|
diff --git a/tests/test_mock_unstructured_submission.py b/tests/test_mock_unstructured_submission.py
index 0b9a002..22c958a 100644
--- a/tests/test_mock_unstructured_submission.py
+++ b/tests/test_mock_unstructured_submission.py
@@ -23,6 +23,7 @@ import numpy
from dwave.cloud.client import Client
from dwave.cloud.solver import UnstructuredSolver
from dwave.cloud.testing import mock
+from dwave.cloud.concurrency import Present
def unstructured_solver_data():
@@ -74,42 +75,111 @@ class TestUnstructuredSolver(unittest.TestCase):
# use a global mocked session, so we can modify it on-fly
session = mock.Mock()
+ # upload is now part of submit, so we need to mock it
+ mock_problem_id = 'mock-problem-id'
+ def mock_upload(self, bqm):
+ return Present(result=mock_problem_id)
+
# construct a functional solver by mocking client and api response data
with mock.patch.object(Client, 'create_session', lambda self: session):
with Client('endpoint', 'token') as client:
- solver = UnstructuredSolver(client, unstructured_solver_data())
-
- # direct bqm sampling
- ss = dimod.ExactSolver().sample(bqm)
- session.post = lambda path, _: choose_reply(
- path, {'problems/': complete_reply(ss)})
-
- fut = solver.sample_bqm(bqm)
- numpy.testing.assert_array_equal(fut.sampleset, ss)
- numpy.testing.assert_array_equal(fut.samples, ss.record.sample)
- numpy.testing.assert_array_equal(fut.energies, ss.record.energy)
- numpy.testing.assert_array_equal(fut.occurrences, ss.record.num_occurrences)
-
- # ising sampling
- lin, quad, _ = bqm.to_ising()
- ss = dimod.ExactSolver().sample_ising(lin, quad)
- session.post = lambda path, _: choose_reply(
- path, {'problems/': complete_reply(ss)})
-
- fut = solver.sample_ising(lin, quad)
- numpy.testing.assert_array_equal(fut.sampleset, ss)
- numpy.testing.assert_array_equal(fut.samples, ss.record.sample)
- numpy.testing.assert_array_equal(fut.energies, ss.record.energy)
- numpy.testing.assert_array_equal(fut.occurrences, ss.record.num_occurrences)
-
- # qubo sampling
- qubo, _ = bqm.to_qubo()
- ss = dimod.ExactSolver().sample_qubo(qubo)
- session.post = lambda path, _: choose_reply(
- path, {'problems/': complete_reply(ss)})
-
- fut = solver.sample_qubo(qubo)
- numpy.testing.assert_array_equal(fut.sampleset, ss)
- numpy.testing.assert_array_equal(fut.samples, ss.record.sample)
- numpy.testing.assert_array_equal(fut.energies, ss.record.energy)
- numpy.testing.assert_array_equal(fut.occurrences, ss.record.num_occurrences)
+ with mock.patch.object(UnstructuredSolver, 'upload_bqm', mock_upload):
+ solver = UnstructuredSolver(client, unstructured_solver_data())
+
+ # direct bqm sampling
+ ss = dimod.ExactSolver().sample(bqm)
+ session.post = lambda path, _: choose_reply(
+ path, {'problems/': complete_reply(ss)})
+
+ fut = solver.sample_bqm(bqm)
+ numpy.testing.assert_array_equal(fut.sampleset, ss)
+ numpy.testing.assert_array_equal(fut.samples, ss.record.sample)
+ numpy.testing.assert_array_equal(fut.energies, ss.record.energy)
+ numpy.testing.assert_array_equal(fut.occurrences, ss.record.num_occurrences)
+
+ # submit of pre-uploaded bqm problem
+ fut = solver.sample_bqm(mock_problem_id)
+ numpy.testing.assert_array_equal(fut.sampleset, ss)
+ numpy.testing.assert_array_equal(fut.samples, ss.record.sample)
+ numpy.testing.assert_array_equal(fut.energies, ss.record.energy)
+ numpy.testing.assert_array_equal(fut.occurrences, ss.record.num_occurrences)
+
+ # ising sampling
+ lin, quad, _ = bqm.to_ising()
+ ss = dimod.ExactSolver().sample_ising(lin, quad)
+ session.post = lambda path, _: choose_reply(
+ path, {'problems/': complete_reply(ss)})
+
+ fut = solver.sample_ising(lin, quad)
+ numpy.testing.assert_array_equal(fut.sampleset, ss)
+ numpy.testing.assert_array_equal(fut.samples, ss.record.sample)
+ numpy.testing.assert_array_equal(fut.energies, ss.record.energy)
+ numpy.testing.assert_array_equal(fut.occurrences, ss.record.num_occurrences)
+
+ # qubo sampling
+ qubo, _ = bqm.to_qubo()
+ ss = dimod.ExactSolver().sample_qubo(qubo)
+ session.post = lambda path, _: choose_reply(
+ path, {'problems/': complete_reply(ss)})
+
+ fut = solver.sample_qubo(qubo)
+ numpy.testing.assert_array_equal(fut.sampleset, ss)
+ numpy.testing.assert_array_equal(fut.samples, ss.record.sample)
+ numpy.testing.assert_array_equal(fut.energies, ss.record.energy)
+ numpy.testing.assert_array_equal(fut.occurrences, ss.record.num_occurrences)
+
+ def test_upload_failure(self):
+ """Submit should gracefully fail if upload as part of submit fails."""
+
+ # build a test problem
+ bqm = dimod.BQM.from_ising({}, {'ab': 1})
+
+ # use a global mocked session, so we can modify it on-fly
+ session = mock.Mock()
+
+ # upload is now part of submit, so we need to mock it
+ mock_upload_exc = ValueError('error')
+ def mock_upload(self, bqm):
+ return Present(exception=mock_upload_exc)
+
+ # construct a functional solver by mocking client and api response data
+ with mock.patch.object(Client, 'create_session', lambda self: session):
+ with Client('endpoint', 'token') as client:
+ with mock.patch.object(UnstructuredSolver, 'upload_bqm', mock_upload):
+ solver = UnstructuredSolver(client, unstructured_solver_data())
+
+ # direct bqm sampling
+ ss = dimod.ExactSolver().sample(bqm)
+ session.post = lambda path, _: choose_reply(
+ path, {'problems/': complete_reply(ss)})
+
+ fut = solver.sample_bqm(bqm)
+
+ with self.assertRaises(type(mock_upload_exc)):
+ fut.result()
+
+ def test_many_upload_failures(self):
+ """Failure handling in high concurrency mode works correctly."""
+
+ # build a test problem
+ bqm = dimod.BQM.from_ising({}, {'ab': 1})
+
+ # use a global mocked session, so we can modify it on-fly
+ session = mock.Mock()
+
+ # upload is now part of submit, so we need to mock it
+ mock_upload_exc = ValueError('error')
+ def mock_upload(self, bqm):
+ return Present(exception=mock_upload_exc)
+
+ # construct a functional solver by mocking client and api response data
+ with mock.patch.object(Client, 'create_session', lambda self: session):
+ with Client('endpoint', 'token') as client:
+ with mock.patch.object(UnstructuredSolver, 'upload_bqm', mock_upload):
+ solver = UnstructuredSolver(client, unstructured_solver_data())
+
+ futs = [solver.sample_bqm(bqm) for _ in range(100)]
+
+ for fut in futs:
+ with self.assertRaises(type(mock_upload_exc)):
+ fut.result()
|
Combine upload and submit steps for unstructured solvers
|
0.0
|
24717ae2d01fd7046746502e51454dacf96b09d6
|
[
"tests/test_mock_unstructured_submission.py::TestUnstructuredSolver::test_many_upload_failures",
"tests/test_mock_unstructured_submission.py::TestUnstructuredSolver::test_submit_immediate_reply",
"tests/test_mock_unstructured_submission.py::TestUnstructuredSolver::test_upload_failure"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-02-10 14:12:19+00:00
|
apache-2.0
| 2,041 |
|
dwavesystems__dwave-cloud-client-386
|
diff --git a/dwave/cloud/config.py b/dwave/cloud/config.py
index 9067215..87971a4 100644
--- a/dwave/cloud/config.py
+++ b/dwave/cloud/config.py
@@ -462,6 +462,7 @@ def load_config_from_files(filenames=None):
config = configparser.ConfigParser(default_section="defaults")
for filename in filenames:
try:
+ filename = os.path.expandvars(os.path.expanduser(filename))
with open(filename, 'r') as f:
config.read_file(f, filename)
except (IOError, OSError):
|
dwavesystems/dwave-cloud-client
|
50f49313de3f12d6b7bd86d056790cae8a5e1533
|
diff --git a/tests/test_config.py b/tests/test_config.py
index 44ad05d..ed150ae 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -27,7 +27,7 @@ from dwave.cloud.config import (
class TestConfig(unittest.TestCase):
- config_body = u"""
+ config_body = """
[defaults]
endpoint = https://cloud.dwavesys.com/sapi
client = qpu
@@ -72,7 +72,7 @@ class TestConfig(unittest.TestCase):
def test_config_load_from_file__invalid_format__duplicate_sections(self):
"""Config loading should fail with ``ConfigFileParseError`` for invalid
config files."""
- myconfig = u"""
+ myconfig = """
[section]
key = val
[section]
@@ -122,6 +122,34 @@ class TestConfig(unittest.TestCase):
with mock.patch("os.path.exists", lambda path: False):
self.assertEqual(get_configfile_paths(), [])
+ def test_config_file_path_expansion(self):
+ """Home dir and env vars are expanded when resolving config path."""
+
+ env = {"var": "val"}
+ config_file = "~/path/${var}/to/$var/my.conf"
+ expected_path = os.path.expanduser("~/path/val/to/val/my.conf")
+ profile = "profile"
+
+ conf_content = """
+ [{}]
+ valid = yes
+ """.format(profile)
+
+ def mock_open(filename, *pa, **kw):
+ self.assertEqual(filename, expected_path)
+ return iterable_mock_open(conf_content)()
+
+ # config file via kwarg
+ with mock.patch.dict(os.environ, env):
+ with mock.patch('dwave.cloud.config.open', mock_open) as m:
+ conf = load_config(config_file=config_file, profile=profile)
+ self.assertEqual(conf['valid'], 'yes')
+
+ # config file via env var
+ with mock.patch.dict(os.environ, env, DWAVE_CONFIG_FILE=config_file):
+ with mock.patch('dwave.cloud.config.open', mock_open) as m:
+ conf = load_config(profile=profile)
+ self.assertEqual(conf['valid'], 'yes')
def _assert_config_valid(self, config):
# profile 'alpha' is loaded
@@ -238,7 +266,7 @@ class TestConfig(unittest.TestCase):
"""load_config should load the first section for profile, if profile
is nowhere else specified.
"""
- myconfig = u"""
+ myconfig = """
[first]
solver = DW_2000Q_1
"""
@@ -254,7 +282,7 @@ class TestConfig(unittest.TestCase):
is nowhere else specified *and* not even a single non-[defaults] section
exists.
"""
- myconfig = u"""
+ myconfig = """
[defaults]
solver = DW_2000Q_1
"""
@@ -269,7 +297,7 @@ class TestConfig(unittest.TestCase):
"""load_config should fail if the profile specified in the defaults
section is non-existing.
"""
- myconfig = u"""
+ myconfig = """
[defaults]
profile = nonexisting
@@ -285,12 +313,12 @@ class TestConfig(unittest.TestCase):
"""Test more specific config overrides less specific one,
on a key by key basis, in a list of auto-detected config files."""
- config_system = u"""
+ config_system = """
[alpha]
endpoint = alpha
solver = DW_2000Q_1
"""
- config_user = u"""
+ config_user = """
[alpha]
solver = DW_2000Q_2
[beta]
@@ -302,16 +330,16 @@ class TestConfig(unittest.TestCase):
# test per-key override
with mock.patch('dwave.cloud.config.open', create=True) as m:
- m.side_effect=[iterable_mock_open(config_system)(),
- iterable_mock_open(config_user)()]
+ m.side_effect = [iterable_mock_open(config_system)(),
+ iterable_mock_open(config_user)()]
section = load_config(profile='alpha')
self.assertEqual(section['endpoint'], 'alpha')
self.assertEqual(section['solver'], 'DW_2000Q_2')
# test per-section override (section addition)
with mock.patch('dwave.cloud.config.open', create=True) as m:
- m.side_effect=[iterable_mock_open(config_system)(),
- iterable_mock_open(config_user)()]
+ m.side_effect = [iterable_mock_open(config_system)(),
+ iterable_mock_open(config_user)()]
section = load_config(profile='beta')
self.assertEqual(section['endpoint'], 'beta')
@@ -319,12 +347,12 @@ class TestConfig(unittest.TestCase):
"""Test more specific config overrides less specific one,
on a key by key basis, in a list of explicitly given files."""
- file1 = u"""
+ file1 = """
[alpha]
endpoint = alpha
solver = DW_2000Q_1
"""
- file2 = u"""
+ file2 = """
[alpha]
solver = DW_2000Q_2
"""
@@ -339,7 +367,7 @@ class TestConfig(unittest.TestCase):
def test_config_load_env_override(self):
with mock.patch("dwave.cloud.config.load_config_from_files",
- partial(self._load_config_from_files, data=u"", provided=['myfile'])):
+ partial(self._load_config_from_files, data="", provided=['myfile'])):
with mock.patch.dict(os.environ, {'DWAVE_API_CLIENT': 'test'}):
self.assertEqual(load_config(config_file='myfile')['client'], 'test')
|
Home-Based Config Paths
It seems that at least on OS X the config file paths cannot include home-based paths. For example, `~/foo/my.conf` or `$HOME/foo/my.conf`. If it is easy to support these it would provide some added convenience.
|
0.0
|
50f49313de3f12d6b7bd86d056790cae8a5e1533
|
[
"tests/test_config.py::TestConfig::test_config_file_path_expansion"
] |
[
"tests/test_config.py::TestConfig::test_config_file_detection_cwd",
"tests/test_config.py::TestConfig::test_config_file_detection_nonexisting",
"tests/test_config.py::TestConfig::test_config_file_detection_system",
"tests/test_config.py::TestConfig::test_config_file_detection_user",
"tests/test_config.py::TestConfig::test_config_load__profile_arg_nonexisting",
"tests/test_config.py::TestConfig::test_config_load__profile_first_section",
"tests/test_config.py::TestConfig::test_config_load__profile_from_defaults",
"tests/test_config.py::TestConfig::test_config_load_configfile_arg",
"tests/test_config.py::TestConfig::test_config_load_configfile_arg_profile_default",
"tests/test_config.py::TestConfig::test_config_load_configfile_arg_profile_default_nonexisting",
"tests/test_config.py::TestConfig::test_config_load_configfile_detect",
"tests/test_config.py::TestConfig::test_config_load_configfile_detect_profile_env",
"tests/test_config.py::TestConfig::test_config_load_configfile_env",
"tests/test_config.py::TestConfig::test_config_load_configfile_env_profile_env",
"tests/test_config.py::TestConfig::test_config_load_configfile_env_profile_env_key_arg",
"tests/test_config.py::TestConfig::test_config_load_env_override",
"tests/test_config.py::TestConfig::test_config_load_force_autodetection",
"tests/test_config.py::TestConfig::test_config_load_from_file",
"tests/test_config.py::TestConfig::test_config_load_from_file__invalid_format__duplicate_sections",
"tests/test_config.py::TestConfig::test_config_load_multiple_autodetected_configfiles",
"tests/test_config.py::TestConfig::test_config_load_multiple_explicit_configfiles",
"tests/test_config.py::TestConfig::test_config_load_skip_configfiles",
"tests/test_config.py::TestConfig::test_invalid_filename_given",
"tests/test_config.py::TestConfig::test_no_config_detected"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-06-19 12:08:27+00:00
|
apache-2.0
| 2,042 |
|
dwavesystems__dwave-cloud-client-397
|
diff --git a/dwave/cloud/solver.py b/dwave/cloud/solver.py
index da11b7f..762658a 100644
--- a/dwave/cloud/solver.py
+++ b/dwave/cloud/solver.py
@@ -204,17 +204,35 @@ class BaseSolver(object):
@property
def qpu(self):
"Is this a QPU-based solver?"
- return self.properties.get('category', '').lower() == 'qpu'
+ category = self.properties.get('category', '').lower()
+ if category:
+ return category == 'qpu'
+ else:
+ # fallback for legacy solvers without the `category` property
+ # TODO: remove when all production solvers are updated
+ return not (self.software or self.hybrid)
@property
def software(self):
"Is this a software-based solver?"
- return self.properties.get('category', '').lower() == 'software'
+ category = self.properties.get('category', '').lower()
+ if category:
+ return category == 'software'
+ else:
+ # fallback for legacy solvers without the `category` property
+ # TODO: remove when all production solvers are updated
+ return self.id.startswith('c4-sw_')
@property
def hybrid(self):
"Is this a hybrid quantum-classical solver?"
- return self.properties.get('category', '').lower() == 'hybrid'
+ category = self.properties.get('category', '').lower()
+ if category:
+ return category == 'hybrid'
+ else:
+ # fallback for legacy solvers without the `category` property
+ # TODO: remove when all production solvers are updated
+ return self.id.startswith('hybrid')
@property
def is_qpu(self):
|
dwavesystems/dwave-cloud-client
|
a178c2f40f3d107c5b7e2a7ae43602fb24046016
|
diff --git a/tests/test_client.py b/tests/test_client.py
index a4f5fc9..12f1726 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -23,6 +23,7 @@ import time
import warnings
import unittest
from unittest import mock
+from contextlib import contextmanager
import requests.exceptions
from plucky import merge
@@ -479,7 +480,7 @@ class FeatureBasedSolverSelection(unittest.TestCase):
self.assertSolvers(self.client.get_solvers(online=True), self.solvers)
self.assertSolvers(self.client.get_solvers(online=False), [])
- def test_qpu_hybrid_software(self):
+ def test_derived_category_properties(self):
self.assertSolvers(self.client.get_solvers(qpu=True), self.qpu_solvers)
self.assertSolvers(self.client.get_solvers(qpu=False), self.software_solvers + self.hybrid_solvers)
self.assertSolvers(self.client.get_solvers(software=True), self.software_solvers)
@@ -487,6 +488,30 @@ class FeatureBasedSolverSelection(unittest.TestCase):
self.assertSolvers(self.client.get_solvers(hybrid=True), self.hybrid_solvers)
self.assertSolvers(self.client.get_solvers(hybrid=False), self.qpu_solvers + self.software_solvers)
+ # Test fallback for legacy solvers without the `category` property
+ # TODO: remove when all production solvers are updated
+ def test_derived_category_properties_without_category(self):
+ "Category-based filtering works without explicit `category` property."
+
+ @contextmanager
+ def multi_solver_properties_patch(solvers, update):
+ """Update properties for all `solvers` at once."""
+ patchers = [mock.patch.dict(s.properties, update) for s in solvers]
+ try:
+ yield (p.start() for p in patchers)
+ finally:
+ return (p.stop() for p in patchers)
+
+ with mock.patch.object(self.software, 'id', 'c4-sw_solver3'):
+ # patch categories and re-run the category-based filtering test
+ with multi_solver_properties_patch(self.solvers, {'category': ''}):
+ self.test_derived_category_properties()
+
+ # verify patching
+ with multi_solver_properties_patch(self.solvers, {'category': 'x'}):
+ with self.assertRaises(AssertionError):
+ self.test_derived_category_properties()
+
def test_category(self):
self.assertSolvers(self.client.get_solvers(category='qpu'), self.qpu_solvers)
self.assertSolvers(self.client.get_solvers(category='software'), self.software_solvers)
diff --git a/tests/test_mock_solver_loading.py b/tests/test_mock_solver_loading.py
index 492226c..3fdc8ec 100644
--- a/tests/test_mock_solver_loading.py
+++ b/tests/test_mock_solver_loading.py
@@ -25,6 +25,7 @@ import requests_mock
from dwave.cloud.client import Client, Solver
from dwave.cloud.qpu import Client as QPUClient
from dwave.cloud.sw import Client as SoftwareClient
+from dwave.cloud.hybrid import Client as HybridClient
from dwave.cloud.exceptions import (
SolverPropertyMissingError, ConfigFileReadError, ConfigFileParseError,
SolverError, SolverNotFoundError)
@@ -226,6 +227,12 @@ class MockSolverLoading(unittest.TestCase):
self.assertFalse(SoftwareClient.is_solver_handled(solver_object('test', 'hybrid')))
self.assertFalse(SoftwareClient.is_solver_handled(solver_object('test', 'whatever')))
self.assertFalse(SoftwareClient.is_solver_handled(None))
+ # hybrid client
+ self.assertFalse(HybridClient.is_solver_handled(solver_object('test', 'qpu')))
+ self.assertFalse(HybridClient.is_solver_handled(solver_object('test', 'software')))
+ self.assertTrue(HybridClient.is_solver_handled(solver_object('test', 'hybrid')))
+ self.assertFalse(HybridClient.is_solver_handled(solver_object('test', 'whatever')))
+ self.assertFalse(HybridClient.is_solver_handled(None))
def test_solver_feature_properties(self):
self.assertTrue(solver_object('solver', 'qpu').qpu)
@@ -275,6 +282,26 @@ class MockSolverLoading(unittest.TestCase):
del data['status']
self.assertTrue(Solver(None, data).online)
+ # Test fallback for legacy solvers without the `category` property
+ # TODO: remove when all production solvers are updated
+ def test_solver_with_category_missing(self):
+
+ # client type filtering support
+ self.assertTrue(QPUClient.is_solver_handled(solver_object('solver', cat='')))
+ self.assertTrue(SoftwareClient.is_solver_handled(solver_object('c4-sw_x', cat='')))
+ self.assertTrue(HybridClient.is_solver_handled(solver_object('hybrid_x', cat='')))
+
+ # derived properties are correct
+ self.assertTrue(solver_object('solver', cat='').qpu)
+ self.assertFalse(solver_object('solver', cat='').software)
+ self.assertFalse(solver_object('solver', cat='').hybrid)
+ self.assertFalse(solver_object('c4-sw_x', cat='').qpu)
+ self.assertTrue(solver_object('c4-sw_x', cat='').software)
+ self.assertFalse(solver_object('c4-sw_x', cat='').hybrid)
+ self.assertFalse(solver_object('hybrid_x', cat='').qpu)
+ self.assertFalse(solver_object('hybrid_x', cat='').software)
+ self.assertTrue(solver_object('hybrid_x', cat='').hybrid)
+
class RequestEvent(Exception):
"""Throws exception when mocked client submits an HTTP request."""
|
Add support for solvers without `category` property
In [`0.7.3`](https://github.com/dwavesystems/dwave-cloud-client/releases/tag/0.7.3)+ we use solver's `category` property to filter by type.
Since some old solvers are missing this property, we'll have to partially revert a nice clean-up done in #379 in order to fallback to the old hacky method of determining solver type (category) if the `category` property is missing.
|
0.0
|
a178c2f40f3d107c5b7e2a7ae43602fb24046016
|
[
"tests/test_mock_solver_loading.py::MockSolverLoading::test_solver_with_category_missing"
] |
[
"tests/test_client.py::ClientFactory::test_client_cert_from_config",
"tests/test_client.py::ClientFactory::test_client_cert_from_kwargs",
"tests/test_client.py::ClientFactory::test_client_type",
"tests/test_client.py::ClientFactory::test_custom_kwargs",
"tests/test_client.py::ClientFactory::test_custom_kwargs_overrides_config",
"tests/test_client.py::ClientFactory::test_default",
"tests/test_client.py::ClientFactory::test_headers_from_config",
"tests/test_client.py::ClientFactory::test_headers_from_kwargs",
"tests/test_client.py::ClientFactory::test_legacy_config_load_fallback",
"tests/test_client.py::ClientFactory::test_solver_features_from_config",
"tests/test_client.py::ClientFactory::test_solver_features_kwargs_override_config",
"tests/test_client.py::ClientFactory::test_solver_name_from_config",
"tests/test_client.py::ClientFactory::test_solver_name_overrides_config_features",
"tests/test_client.py::FeatureBasedSolverSelection::test_anneal_schedule",
"tests/test_client.py::FeatureBasedSolverSelection::test_availability_combo",
"tests/test_client.py::FeatureBasedSolverSelection::test_category",
"tests/test_client.py::FeatureBasedSolverSelection::test_default",
"tests/test_client.py::FeatureBasedSolverSelection::test_derived_category_properties",
"tests/test_client.py::FeatureBasedSolverSelection::test_derived_category_properties_without_category",
"tests/test_client.py::FeatureBasedSolverSelection::test_lower_noise_derived_property",
"tests/test_client.py::FeatureBasedSolverSelection::test_membership_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_name",
"tests/test_client.py::FeatureBasedSolverSelection::test_nested_properties_intermediate_key_lookup",
"tests/test_client.py::FeatureBasedSolverSelection::test_nested_properties_leaf_lookup",
"tests/test_client.py::FeatureBasedSolverSelection::test_num_qubits",
"tests/test_client.py::FeatureBasedSolverSelection::test_online",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_callable",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_edgecases",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_string",
"tests/test_client.py::FeatureBasedSolverSelection::test_parameter_availability_check",
"tests/test_client.py::FeatureBasedSolverSelection::test_property_availability_check",
"tests/test_client.py::FeatureBasedSolverSelection::test_range_boolean_combo",
"tests/test_client.py::FeatureBasedSolverSelection::test_range_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_regex",
"tests/test_client.py::FeatureBasedSolverSelection::test_relational_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_set_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_solvers_deprecation",
"tests/test_mock_solver_loading.py::MockConnectivityTests::test_bad_token",
"tests/test_mock_solver_loading.py::MockConnectivityTests::test_bad_url",
"tests/test_mock_solver_loading.py::MockConnectivityTests::test_good_connection",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_all_solvers",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_missing_solver",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_solver",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_solver_broken_response",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_solver_missing_data",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_solver_feature_properties",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_solver_filtering_in_client",
"tests/test_mock_solver_loading.py::MockLegacyConfiguration::test_env_args_set",
"tests/test_mock_solver_loading.py::MockLegacyConfiguration::test_env_with_file_set",
"tests/test_mock_solver_loading.py::MockLegacyConfiguration::test_explicit_only",
"tests/test_mock_solver_loading.py::MockLegacyConfiguration::test_explicit_with_file",
"tests/test_mock_solver_loading.py::MockLegacyConfiguration::test_file_read_error",
"tests/test_mock_solver_loading.py::MockLegacyConfiguration::test_nonexisting_file",
"tests/test_mock_solver_loading.py::MockLegacyConfiguration::test_only_file",
"tests/test_mock_solver_loading.py::MockLegacyConfiguration::test_only_file_key",
"tests/test_mock_solver_loading.py::MockConfiguration::test_custom_options"
] |
{
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-14 20:52:38+00:00
|
apache-2.0
| 2,043 |
|
dwavesystems__dwave-cloud-client-404
|
diff --git a/dwave/cloud/client.py b/dwave/cloud/client.py
index 05bd128..2dd109b 100644
--- a/dwave/cloud/client.py
+++ b/dwave/cloud/client.py
@@ -697,10 +697,10 @@ class Client(object):
Force refresh of cached list of solvers/properties.
order_by (callable/str/None, default='avg_load'):
- Solver sorting key function (or :class:`Solver` attribute/item
- dot-separated path). By default, solvers are sorted by average
- load. To explicitly not sort the solvers (and use the API-returned
- order), set ``order_by=None``.
+ Solver sorting key function (or :class:`~dwave.cloud.solver.Solver`
+ attribute/item dot-separated path). By default, solvers are sorted
+ by average load. To explicitly not sort the solvers (and use the
+ API-returned order), set ``order_by=None``.
Signature of the `key` `callable` is::
@@ -1035,7 +1035,7 @@ class Client(object):
warnings.warn("'solvers' is deprecated in favor of 'get_solvers'.", DeprecationWarning)
return self.get_solvers(refresh=refresh, **filters)
- def get_solver(self, name=None, refresh=False, **filters):
+ def get_solver(self, name=None, refresh=False, order_by='avg_load', **filters):
"""Load the configuration for a single solver.
Makes a blocking web call to `{endpoint}/solvers/remote/{solver_name}/`, where `{endpoint}`
@@ -1052,13 +1052,15 @@ class Client(object):
Dictionary of filters over features this solver has to have. For a list of
feature names and values, see: :meth:`~dwave.cloud.client.Client.get_solvers`.
- order_by (callable/str, default='id'):
- Solver sorting key function (or :class:`Solver` attribute name).
- By default, solvers are sorted by ID/name.
+ order_by (callable/str/None, default='avg_load'):
+ Solver sorting key function (or :class:`~dwave.cloud.solver.Solver`
+ attribute/item dot-separated path). By default, solvers are sorted by average
+ load. For details, see :meth:`~dwave.cloud.client.Client.get_solvers`.
refresh (bool):
- Return solver from cache (if cached with ``get_solvers()``),
- unless set to ``True``.
+ Return solver from cache (if cached with
+ :meth:`~dwave.cloud.client.Client.get_solvers`), unless set to
+ ``True``.
Returns:
:class:`.Solver`
@@ -1096,7 +1098,7 @@ class Client(object):
# get the first solver that satisfies all filters
try:
logger.debug("Fetching solvers according to filters=%r", filters)
- return self.get_solvers(refresh=refresh, **filters)[0]
+ return self.get_solvers(refresh=refresh, order_by=order_by, **filters)[0]
except IndexError:
raise SolverNotFoundError("Solver with the requested features not available")
@@ -1668,7 +1670,7 @@ class Client(object):
@staticmethod
@retried(_UPLOAD_PART_RETRIES, backoff=_UPLOAD_RETRIES_BACKOFF)
- def _upload_multipart_part(session, problem_id, part_id, part_stream,
+ def _upload_multipart_part(session, problem_id, part_id, part_generator,
uploaded_part_checksum=None):
"""Upload one problem part. Sync http request.
@@ -1679,8 +1681,9 @@ class Client(object):
Problem id.
part_id (int):
Part number/id.
- part_stream (:class:`io.BufferedIOBase`/binary-stream-like):
- Problem part data container that supports `read` operation.
+ part_generator (generator of :class:`io.BufferedIOBase`/binary-stream-like):
+ Callable that produces problem part data container that supports
+ `read` and `seek` operations.
uploaded_part_checksum (str/None):
Checksum of previously uploaded part. Optional, but if specified
checksum is verified, and part is uploaded only if checksums
@@ -1692,6 +1695,9 @@ class Client(object):
logger.debug("Uploading part_id=%r of problem_id=%r", part_id, problem_id)
+ # generate the mutable part stream from immutable stream generator
+ part_stream = part_generator()
+
# TODO: work-around to get a checksum of a binary stream (avoid 2x read)
data = part_stream.read()
digest = Client._digest(data)
@@ -1808,12 +1814,12 @@ class Client(object):
uploaded_parts[part_no] = checksum
return uploaded_parts
- def _upload_part_worker(self, problem_id, part_no, chunk_stream,
+ def _upload_part_worker(self, problem_id, part_no, chunk_generator,
uploaded_part_checksum=None):
with self.create_session() as session:
part_checksum = self._upload_multipart_part(
- session, problem_id, part_id=part_no, part_stream=chunk_stream,
+ session, problem_id, part_id=part_no, part_generator=chunk_generator,
uploaded_part_checksum=uploaded_part_checksum)
return part_no, part_checksum
@@ -1859,12 +1865,11 @@ class Client(object):
# enqueue all parts, worker skips if checksum matches
parts = {}
- streams = collections.OrderedDict(enumerate(chunks))
- for chunk_no, chunk_stream in streams.items():
+ for chunk_no, chunk_generator in enumerate(chunks.generators()):
part_no = chunk_no + 1
part_future = self._upload_part_executor.submit(
self._upload_part_worker,
- problem_id, part_no, chunk_stream,
+ problem_id, part_no, chunk_generator,
uploaded_part_checksum=uploaded_parts.get(part_no))
parts[part_no] = part_future
diff --git a/dwave/cloud/upload.py b/dwave/cloud/upload.py
index 2b48bb8..85d603b 100644
--- a/dwave/cloud/upload.py
+++ b/dwave/cloud/upload.py
@@ -22,6 +22,7 @@ import threading
from abc import abstractmethod
from collections.abc import Sized
+from functools import partial
__all__ = ['ChunkedData']
@@ -400,3 +401,9 @@ class ChunkedData(object):
def __iter__(self):
for idx in range(len(self)):
yield self.chunk(idx)
+
+ def generators(self):
+ """Iterator of (immutable) chunk generators."""
+
+ for idx in range(len(self)):
+ yield partial(self.chunk, idx=idx)
|
dwavesystems/dwave-cloud-client
|
88e14e9b745957569d86ca8d8fcd1461ff7101b9
|
diff --git a/tests/test_client.py b/tests/test_client.py
index 12f1726..e5536aa 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -717,6 +717,19 @@ class FeatureBasedSolverSelection(unittest.TestCase):
with self.assertRaises(TypeError):
self.client.get_solvers(order_by=list)
+ def test_order_by_respects_default_solver(self):
+ """order_by used in isolation should not affect default_solver filters (issue #401)"""
+
+ with Client('endpoint', 'token', solver=dict(name='qpu2')) as client:
+ # mock the network call to fetch all solvers
+ client._fetch_solvers = lambda **kw: self.solvers
+
+ # the default solver is set on client
+ self.assertEqual(client.get_solver(), self.qpu2)
+
+ # the default solver should not change when we add order_by
+ self.assertEqual(client.get_solver(order_by='id'), self.qpu2)
+
def test_order_by_string(self):
# sort by Solver inferred properties
self.assertEqual(self.client.get_solvers(order_by='id'), [self.hybrid, self.qpu1, self.qpu2, self.software])
diff --git a/tests/test_upload.py b/tests/test_upload.py
index baf12a2..6717d1e 100644
--- a/tests/test_upload.py
+++ b/tests/test_upload.py
@@ -358,6 +358,12 @@ class TestChunkedData(unittest.TestCase):
chunks_expected = [self.data]
self.verify_chunking(cd, chunks_expected)
+ def test_chunk_generators(self):
+ cd = ChunkedData(self.data, chunk_size=3)
+ chunks_expected = [b'012', b'345', b'678', b'9']
+ chunks_generated = [g().read() for g in cd.generators()]
+ self.assertListEqual(chunks_expected, chunks_generated)
+
@unittest.skipUnless(config, "No live server configuration available.")
class TestMultipartUpload(unittest.TestCase):
@@ -620,7 +626,6 @@ class TestMockedMultipartUpload(unittest.TestCase):
def put(path, data, headers, seq=iter(range(Client._UPLOAD_PART_RETRIES+1))):
body = data.read()
- data.seek(0)
headers = json.dumps(sorted(headers.items()))
keys = [
(
|
Upload part worker does not recover from failures to correctly retry upload
Individual part problem upload should be retried 2 times in case the upload fails due to any unhandled exception (e.g. SAPI returns `502 Bad Gateway` during overload). This is achieved with the `retried` decorator.
https://github.com/dwavesystems/dwave-cloud-client/blob/88e14e9b745957569d86ca8d8fcd1461ff7101b9/dwave/cloud/client.py#L1670-L1672
The problem is not all arguments are immutable. `part_stream` (uploaded part data binary stream) should always be at the beginning.
We can either always rewind the `part_stream`, or instead to pass in an immutable "chunk/part stream factory".
|
0.0
|
88e14e9b745957569d86ca8d8fcd1461ff7101b9
|
[
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_respects_default_solver",
"tests/test_upload.py::TestChunkedData::test_chunk_generators",
"tests/test_upload.py::TestMockedMultipartUpload::test_part_upload_retried"
] |
[
"tests/test_client.py::ClientFactory::test_client_cert_from_config",
"tests/test_client.py::ClientFactory::test_client_cert_from_kwargs",
"tests/test_client.py::ClientFactory::test_client_type",
"tests/test_client.py::ClientFactory::test_custom_kwargs",
"tests/test_client.py::ClientFactory::test_custom_kwargs_overrides_config",
"tests/test_client.py::ClientFactory::test_default",
"tests/test_client.py::ClientFactory::test_headers_from_config",
"tests/test_client.py::ClientFactory::test_headers_from_kwargs",
"tests/test_client.py::ClientFactory::test_legacy_config_load_fallback",
"tests/test_client.py::ClientFactory::test_solver_features_from_config",
"tests/test_client.py::ClientFactory::test_solver_features_kwargs_override_config",
"tests/test_client.py::ClientFactory::test_solver_name_from_config",
"tests/test_client.py::ClientFactory::test_solver_name_overrides_config_features",
"tests/test_client.py::FeatureBasedSolverSelection::test_anneal_schedule",
"tests/test_client.py::FeatureBasedSolverSelection::test_availability_combo",
"tests/test_client.py::FeatureBasedSolverSelection::test_category",
"tests/test_client.py::FeatureBasedSolverSelection::test_default",
"tests/test_client.py::FeatureBasedSolverSelection::test_derived_category_properties",
"tests/test_client.py::FeatureBasedSolverSelection::test_derived_category_properties_without_category",
"tests/test_client.py::FeatureBasedSolverSelection::test_lower_noise_derived_property",
"tests/test_client.py::FeatureBasedSolverSelection::test_membership_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_name",
"tests/test_client.py::FeatureBasedSolverSelection::test_nested_properties_intermediate_key_lookup",
"tests/test_client.py::FeatureBasedSolverSelection::test_nested_properties_leaf_lookup",
"tests/test_client.py::FeatureBasedSolverSelection::test_num_qubits",
"tests/test_client.py::FeatureBasedSolverSelection::test_online",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_callable",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_edgecases",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_string",
"tests/test_client.py::FeatureBasedSolverSelection::test_parameter_availability_check",
"tests/test_client.py::FeatureBasedSolverSelection::test_property_availability_check",
"tests/test_client.py::FeatureBasedSolverSelection::test_range_boolean_combo",
"tests/test_client.py::FeatureBasedSolverSelection::test_range_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_regex",
"tests/test_client.py::FeatureBasedSolverSelection::test_relational_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_set_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_solvers_deprecation",
"tests/test_upload.py::TestGettableABC::test_invalid",
"tests/test_upload.py::TestGettableABC::test_valid",
"tests/test_upload.py::TestGettables::test_gettable_file_critical_section_respected",
"tests/test_upload.py::TestGettables::test_gettable_file_from_disk_file",
"tests/test_upload.py::TestGettables::test_gettable_file_from_file_like",
"tests/test_upload.py::TestGettables::test_gettable_file_from_memory_bytes",
"tests/test_upload.py::TestGettables::test_gettable_file_from_memory_string",
"tests/test_upload.py::TestGettables::test_gettable_memory_from_bytes_like",
"tests/test_upload.py::TestFileView::test_file_interface",
"tests/test_upload.py::TestFileView::test_view_interface",
"tests/test_upload.py::TestChunkedData::test_chunk_size_edges",
"tests/test_upload.py::TestChunkedData::test_chunks_from_bytearray",
"tests/test_upload.py::TestChunkedData::test_chunks_from_bytes",
"tests/test_upload.py::TestChunkedData::test_chunks_from_memory_file",
"tests/test_upload.py::TestChunkedData::test_chunks_from_str",
"tests/test_upload.py::TestMockedMultipartUpload::test_partial_upload",
"tests/test_upload.py::TestMockedMultipartUpload::test_problem_reupload_end_to_end",
"tests/test_upload.py::TestMockedMultipartUpload::test_single_problem_end_to_end"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-21 19:13:27+00:00
|
apache-2.0
| 2,044 |
|
dwavesystems__dwave-cloud-client-417
|
diff --git a/dwave/cloud/client.py b/dwave/cloud/client.py
index a660dab..1c7b7e0 100644
--- a/dwave/cloud/client.py
+++ b/dwave/cloud/client.py
@@ -1041,7 +1041,7 @@ class Client(object):
warnings.warn("'solvers' is deprecated in favor of 'get_solvers'.", DeprecationWarning)
return self.get_solvers(refresh=refresh, **filters)
- def get_solver(self, name=None, refresh=False, order_by='avg_load', **filters):
+ def get_solver(self, name=None, refresh=False, **filters):
"""Load the configuration for a single solver.
Makes a blocking web call to `{endpoint}/solvers/remote/{solver_name}/`, where `{endpoint}`
@@ -1097,13 +1097,21 @@ class Client(object):
if name is not None:
filters.setdefault('name', name)
+ # allow `order_by` to be specified as part of solver features dict
+ order_by = filters.pop('order_by', None)
+
# in absence of other filters, config/env solver filters/name are used
if not filters and self.default_solver:
filters = self.default_solver
+ # allow `order_by` from default config/init override
+ if order_by is None:
+ order_by = filters.pop('order_by', 'avg_load')
+
# get the first solver that satisfies all filters
try:
- logger.debug("Fetching solvers according to filters=%r", filters)
+ logger.debug("Fetching solvers according to filters=%r, order_by=%r",
+ filters, order_by)
return self.get_solvers(refresh=refresh, order_by=order_by, **filters)[0]
except IndexError:
raise SolverNotFoundError("Solver with the requested features not available")
|
dwavesystems/dwave-cloud-client
|
b0b22210be5c5cb516166d9f9425759392b87b11
|
diff --git a/tests/test_client.py b/tests/test_client.py
index 9718acc..0ceef46 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -429,7 +429,8 @@ class FeatureBasedSolverSelection(unittest.TestCase):
},
"id": "qpu1",
"description": "QPU Chimera solver",
- "status": "online"
+ "status": "online",
+ "avg_load": 0.1
})
self.qpu2 = StructuredSolver(client=None, data={
"properties": {
@@ -451,7 +452,8 @@ class FeatureBasedSolverSelection(unittest.TestCase):
"vfyc": True
},
"id": "qpu2",
- "description": "QPU Pegasus solver"
+ "description": "QPU Pegasus solver",
+ "avg_load": 0.2
})
self.software = StructuredSolver(client=None, data={
"properties": {
@@ -618,14 +620,14 @@ class FeatureBasedSolverSelection(unittest.TestCase):
self.assertSolvers(self.client.get_solvers(num_qubits__lt=7), [self.qpu1, self.qpu2])
# skip solver if LHS value not defined (None)
- self.assertSolvers(self.client.get_solvers(avg_load__gt=0), [self.software])
- self.assertSolvers(self.client.get_solvers(avg_load__gte=0), [self.software])
- self.assertSolvers(self.client.get_solvers(avg_load__lt=1), [self.software])
- self.assertSolvers(self.client.get_solvers(avg_load__lte=1), [self.software])
+ self.assertSolvers(self.client.get_solvers(avg_load__gt=0), [self.qpu1, self.qpu2, self.software])
+ self.assertSolvers(self.client.get_solvers(avg_load__gte=0), [self.qpu1, self.qpu2, self.software])
+ self.assertSolvers(self.client.get_solvers(avg_load__lt=1), [self.qpu1, self.qpu2, self.software])
+ self.assertSolvers(self.client.get_solvers(avg_load__lte=1), [self.qpu1, self.qpu2, self.software])
self.assertSolvers(self.client.get_solvers(avg_load=0.7), [self.software])
self.assertSolvers(self.client.get_solvers(avg_load__eq=0.7), [self.software])
- self.assertSolvers(self.client.get_solvers(avg_load=None), [self.qpu1, self.qpu2, self.hybrid])
- self.assertSolvers(self.client.get_solvers(avg_load__eq=None), [self.qpu1, self.qpu2, self.hybrid])
+ self.assertSolvers(self.client.get_solvers(avg_load=None), [self.hybrid])
+ self.assertSolvers(self.client.get_solvers(avg_load__eq=None), [self.hybrid])
def test_range_ops(self):
# value within range
@@ -671,8 +673,8 @@ class FeatureBasedSolverSelection(unittest.TestCase):
# invalid LHS
self.assertSolvers(self.client.get_solvers(some_set__contains=1), [self.software])
- self.assertSolvers(self.client.get_solvers(avg_load__in=[None]), [self.qpu1, self.qpu2, self.hybrid])
- self.assertSolvers(self.client.get_solvers(avg_load__in=[None, 0.7]), self.solvers)
+ self.assertSolvers(self.client.get_solvers(avg_load__in=[None]), [self.hybrid])
+ self.assertSolvers(self.client.get_solvers(avg_load__in=[None, 0.1, 0.2, 0.7]), self.solvers)
def test_set_ops(self):
# property issubset
@@ -740,7 +742,7 @@ class FeatureBasedSolverSelection(unittest.TestCase):
def test_order_by_edgecases(self):
# default: sort by avg_load
- self.assertEqual(self.client.get_solvers(), [self.software, self.qpu1, self.qpu2, self.hybrid])
+ self.assertEqual(self.client.get_solvers(), [self.qpu1, self.qpu2, self.software, self.hybrid])
# explicit no sort
self.assertEqual(self.client.get_solvers(order_by=None), self.solvers)
@@ -763,12 +765,45 @@ class FeatureBasedSolverSelection(unittest.TestCase):
# mock the network call to fetch all solvers
client._fetch_solvers = lambda **kw: self.solvers
- # the default solver is set on client
+ # the default solver was set on client init
self.assertEqual(client.get_solver(), self.qpu2)
# the default solver should not change when we add order_by
self.assertEqual(client.get_solver(order_by='id'), self.qpu2)
+ with Client('endpoint', 'token', solver=dict(category='qpu')) as client:
+ # mock the network call to fetch all solvers
+ client._fetch_solvers = lambda **kw: self.solvers
+
+ # test default order_by is avg_load
+ self.assertEqual(client.get_solver(), self.qpu1)
+
+ # but we can change it, without affecting solver filters
+ self.assertEqual(client.get_solver(order_by='-avg_load'), self.qpu2)
+
+ def test_order_by_in_default_solver(self):
+ """order_by can be specified as part of default_solver filters (issue #407)"""
+
+ with Client('endpoint', 'token', solver=dict(order_by='id')) as client:
+ # mock the network call to fetch all solvers
+ client._fetch_solvers = lambda **kw: self.solvers
+
+ # the default solver was set on client init
+ self.assertEqual(client.get_solver(), self.hybrid)
+
+ # the default solver can be overridden
+ self.assertEqual(client.get_solver(order_by='-id'), self.software)
+
+ with Client('endpoint', 'token', solver=dict(qpu=True, order_by='-num_active_qubits')) as client:
+ # mock the network call to fetch all solvers
+ client._fetch_solvers = lambda **kw: self.solvers
+
+ # the default solver was set on client init
+ self.assertEqual(client.get_solver(), self.qpu2)
+
+ # adding order_by doesn't change other default solver features
+ self.assertEqual(client.get_solver(order_by='num_active_qubits'), self.qpu1)
+
def test_order_by_string(self):
# sort by Solver inferred properties
self.assertEqual(self.client.get_solvers(order_by='id'), [self.hybrid, self.qpu1, self.qpu2, self.software])
@@ -794,7 +829,7 @@ class FeatureBasedSolverSelection(unittest.TestCase):
def test_order_by_callable(self):
# sort by Solver inferred properties
self.assertEqual(self.client.get_solvers(order_by=lambda solver: solver.id), [self.hybrid, self.qpu1, self.qpu2, self.software])
- self.assertEqual(self.client.get_solvers(order_by=lambda solver: solver.avg_load), [self.software, self.qpu1, self.qpu2, self.hybrid])
+ self.assertEqual(self.client.get_solvers(order_by=lambda solver: solver.avg_load), [self.qpu1, self.qpu2, self.software, self.hybrid])
# sort by solver property
self.assertEqual(self.client.get_solvers(order_by=lambda solver: solver.properties.get('num_qubits')), self.solvers)
|
Allow `order_by` to be part of solver definition dict, again
#405 broke the original intentional behavior, where `order_by` could have been specified as part of the solver definition, e.g.:
```
client = Client.from_config(solver=dict(qpu=True, order_by='-num_active_qubits'))
```
|
0.0
|
b0b22210be5c5cb516166d9f9425759392b87b11
|
[
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_in_default_solver"
] |
[
"tests/test_client.py::ClientFactory::test_client_cert_from_config",
"tests/test_client.py::ClientFactory::test_client_cert_from_kwargs",
"tests/test_client.py::ClientFactory::test_client_type",
"tests/test_client.py::ClientFactory::test_custom_kwargs",
"tests/test_client.py::ClientFactory::test_custom_kwargs_overrides_config",
"tests/test_client.py::ClientFactory::test_default",
"tests/test_client.py::ClientFactory::test_headers_from_config",
"tests/test_client.py::ClientFactory::test_headers_from_kwargs",
"tests/test_client.py::ClientFactory::test_legacy_config_load_fallback",
"tests/test_client.py::ClientFactory::test_polling_params_from_config",
"tests/test_client.py::ClientFactory::test_polling_params_from_kwargs",
"tests/test_client.py::ClientFactory::test_solver_features_from_config",
"tests/test_client.py::ClientFactory::test_solver_features_kwargs_override_config",
"tests/test_client.py::ClientFactory::test_solver_name_from_config",
"tests/test_client.py::ClientFactory::test_solver_name_overrides_config_features",
"tests/test_client.py::FeatureBasedSolverSelection::test_anneal_schedule",
"tests/test_client.py::FeatureBasedSolverSelection::test_availability_combo",
"tests/test_client.py::FeatureBasedSolverSelection::test_category",
"tests/test_client.py::FeatureBasedSolverSelection::test_default",
"tests/test_client.py::FeatureBasedSolverSelection::test_derived_category_properties",
"tests/test_client.py::FeatureBasedSolverSelection::test_derived_category_properties_without_category",
"tests/test_client.py::FeatureBasedSolverSelection::test_lower_noise_derived_property",
"tests/test_client.py::FeatureBasedSolverSelection::test_membership_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_name",
"tests/test_client.py::FeatureBasedSolverSelection::test_nested_properties_intermediate_key_lookup",
"tests/test_client.py::FeatureBasedSolverSelection::test_nested_properties_leaf_lookup",
"tests/test_client.py::FeatureBasedSolverSelection::test_num_qubits",
"tests/test_client.py::FeatureBasedSolverSelection::test_online",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_callable",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_edgecases",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_respects_default_solver",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_string",
"tests/test_client.py::FeatureBasedSolverSelection::test_parameter_availability_check",
"tests/test_client.py::FeatureBasedSolverSelection::test_property_availability_check",
"tests/test_client.py::FeatureBasedSolverSelection::test_range_boolean_combo",
"tests/test_client.py::FeatureBasedSolverSelection::test_range_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_regex",
"tests/test_client.py::FeatureBasedSolverSelection::test_relational_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_set_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_solvers_deprecation"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-08-26 15:27:51+00:00
|
apache-2.0
| 2,045 |
|
dwavesystems__dwave-cloud-client-421
|
diff --git a/dwave/cloud/utils.py b/dwave/cloud/utils.py
index fbc82ad..41a026c 100644
--- a/dwave/cloud/utils.py
+++ b/dwave/cloud/utils.py
@@ -321,13 +321,13 @@ def user_agent(name=None, version=None):
if name and version:
tags.append((name, version))
- tags = [
+ tags.extend([
("python", platform.python_version()),
_interpreter(),
("machine", platform.machine() or 'unknown'),
("system", platform.system() or 'unknown'),
("platform", platform.platform() or 'unknown'),
- ]
+ ])
# add platform-specific tags
tags.extend(get_platform_tags())
|
dwavesystems/dwave-cloud-client
|
c8ec9a195a363aa6ddb0d7f1e56729b9d587d2f9
|
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 162456b..887f6e1 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -348,6 +348,19 @@ class TestCli(unittest.TestCase):
self.assertEqual(result.exit_code, 0)
+ def test_platform(self):
+ runner = CliRunner()
+ result = runner.invoke(cli, ['--platform'])
+
+ # verify exit code and stdout printout
+ self.assertEqual(result.exit_code, 0)
+
+ from dwave.cloud.package_info import __packagename__, __version__
+ self.assertNotIn(__packagename__, result.output)
+ required = ['python', 'machine', 'system', 'platform']
+ for key in required:
+ self.assertIn(key, result.output)
+
if __name__ == '__main__':
unittest.main()
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 1ea5bda..2ec424f 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -22,7 +22,8 @@ from datetime import datetime
from dwave.cloud.utils import (
uniform_iterator, uniform_get, strip_head, strip_tail,
active_qubits, generate_random_ising_problem,
- default_text_input, utcnow, cached, retried, parse_loglevel)
+ default_text_input, utcnow, cached, retried, parse_loglevel,
+ user_agent)
class TestSimpleUtils(unittest.TestCase):
@@ -138,6 +139,14 @@ class TestSimpleUtils(unittest.TestCase):
self.assertEqual(parse_loglevel(str(logging.INFO)), logging.INFO)
self.assertEqual(parse_loglevel(' %d ' % logging.INFO), logging.INFO)
+ def test_user_agent(self):
+ from dwave.cloud.package_info import __packagename__, __version__
+ ua = user_agent(__packagename__, __version__)
+
+ required = [__packagename__, 'python', 'machine', 'system', 'platform']
+ for key in required:
+ self.assertIn(key, ua)
+
class TestCachedDecorator(unittest.TestCase):
|
User-Agent is missing dwave-cloud-client version
The bug was introduced in 331c37b67, and released in [0.7.5](https://github.com/dwavesystems/dwave-cloud-client/compare/0.7.4...0.7.5), so it affects 0.7.5 and 0.7.6.
|
0.0
|
c8ec9a195a363aa6ddb0d7f1e56729b9d587d2f9
|
[
"tests/test_utils.py::TestSimpleUtils::test_user_agent"
] |
[
"tests/test_cli.py::TestCli::test_config_create",
"tests/test_cli.py::TestCli::test_config_ls",
"tests/test_cli.py::TestCli::test_configure_inspect",
"tests/test_cli.py::TestCli::test_ping",
"tests/test_cli.py::TestCli::test_platform",
"tests/test_cli.py::TestCli::test_sample",
"tests/test_cli.py::TestCli::test_solvers",
"tests/test_cli.py::TestCli::test_upload",
"tests/test_utils.py::TestSimpleUtils::test_active_qubits_dict",
"tests/test_utils.py::TestSimpleUtils::test_active_qubits_list",
"tests/test_utils.py::TestSimpleUtils::test_default_text_input",
"tests/test_utils.py::TestSimpleUtils::test_generate_random_ising_problem",
"tests/test_utils.py::TestSimpleUtils::test_generate_random_ising_problem_default_solver_ranges",
"tests/test_utils.py::TestSimpleUtils::test_generate_random_ising_problem_with_user_constrained_ranges",
"tests/test_utils.py::TestSimpleUtils::test_parse_loglevel_invalid",
"tests/test_utils.py::TestSimpleUtils::test_parse_loglevel_numeric_and_symbolic",
"tests/test_utils.py::TestSimpleUtils::test_strip_head",
"tests/test_utils.py::TestSimpleUtils::test_strip_tail",
"tests/test_utils.py::TestSimpleUtils::test_uniform_get",
"tests/test_utils.py::TestSimpleUtils::test_uniform_iterator",
"tests/test_utils.py::TestSimpleUtils::test_utcnow",
"tests/test_utils.py::TestCachedDecorator::test_args_collision",
"tests/test_utils.py::TestCachedDecorator::test_args_hashing",
"tests/test_utils.py::TestCachedDecorator::test_default_maxage",
"tests/test_utils.py::TestCachedDecorator::test_exceptions",
"tests/test_utils.py::TestCachedDecorator::test_expiry",
"tests/test_utils.py::TestRetriedDecorator::test_backoff_constant",
"tests/test_utils.py::TestRetriedDecorator::test_backoff_func",
"tests/test_utils.py::TestRetriedDecorator::test_backoff_seq",
"tests/test_utils.py::TestRetriedDecorator::test_decorator",
"tests/test_utils.py::TestRetriedDecorator::test_exc_raised",
"tests/test_utils.py::TestRetriedDecorator::test_func_called",
"tests/test_utils.py::TestRetriedDecorator::test_func_called_only_until_succeeds"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-08-28 16:45:45+00:00
|
apache-2.0
| 2,046 |
|
dwavesystems__dwave-cloud-client-429
|
diff --git a/dwave/cloud/client.py b/dwave/cloud/client.py
index dd68167..abace57 100644
--- a/dwave/cloud/client.py
+++ b/dwave/cloud/client.py
@@ -1068,11 +1068,13 @@ class Client(object):
# in absence of other filters, config/env solver filters/name are used
if not filters and self.default_solver:
- filters = self.default_solver
+ filters = copy.deepcopy(self.default_solver)
# allow `order_by` from default config/init override
if order_by is None:
order_by = filters.pop('order_by', 'avg_load')
+ else:
+ filters.pop('order_by', None)
# get the first solver that satisfies all filters
try:
|
dwavesystems/dwave-cloud-client
|
405c1d1642a966b1f5e4e56c818f070715b7b4fb
|
diff --git a/tests/test_mock_solver_loading.py b/tests/test_mock_solver_loading.py
index e6fa54e..a4db844 100644
--- a/tests/test_mock_solver_loading.py
+++ b/tests/test_mock_solver_loading.py
@@ -208,6 +208,22 @@ class MockSolverLoading(unittest.TestCase):
with self.assertRaises(ValueError):
client.get_solver(solver_name)
+ def test_get_solver_reproducible(self):
+ """get_solver should return same solver (assuming cache hasn't changed)"""
+
+ with requests_mock.mock() as m:
+ setup_server(m)
+
+ # prefer solvers with longer name: that's our second solver
+ defaults = dict(solver=dict(order_by=lambda s: -len(s.id)))
+
+ with Client(url, token, defaults=defaults) as client:
+ solver = client.get_solver()
+ self.assertEqual(solver.id, second_solver_name)
+
+ solver = client.get_solver()
+ self.assertEqual(solver.id, second_solver_name)
+
def test_solver_filtering_in_client(self):
# base client
self.assertTrue(Client.is_solver_handled(solver_object('test', 'qpu')))
|
Client.get_solver mutates instance defaults
```
defaults = dict(solver=dict(order_by='id'))
with Client.from_config(defaults=defaults) as client:
assert 'order_by' in client.defaults['solver']
solver = client.get_solver()
assert 'order_by' not in client.defaults['solver']
```
|
0.0
|
405c1d1642a966b1f5e4e56c818f070715b7b4fb
|
[
"tests/test_mock_solver_loading.py::MockSolverLoading::test_get_solver_reproducible"
] |
[
"tests/test_mock_solver_loading.py::MockConnectivityTests::test_bad_token",
"tests/test_mock_solver_loading.py::MockConnectivityTests::test_bad_url",
"tests/test_mock_solver_loading.py::MockConnectivityTests::test_good_connection",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_all_solvers",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_missing_solver",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_solver",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_solver_broken_response",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_load_solver_missing_data",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_solver_feature_properties",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_solver_filtering_in_client",
"tests/test_mock_solver_loading.py::MockSolverLoading::test_solver_with_category_missing",
"tests/test_mock_solver_loading.py::MockConfiguration::test_custom_options"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-22 20:07:38+00:00
|
apache-2.0
| 2,047 |
|
dwavesystems__dwave-cloud-client-431
|
diff --git a/dwave/cloud/client.py b/dwave/cloud/client.py
index abace57..baf8341 100644
--- a/dwave/cloud/client.py
+++ b/dwave/cloud/client.py
@@ -283,7 +283,8 @@ class Client(object):
logger.debug("File/env config loaded: %r", config)
# manual config override with client constructor options
- config.update(client=client, **kwargs)
+ kwargs.update(client=client)
+ config.update({k: v for k, v in kwargs.items() if v is not None})
logger.debug("Code config loaded: %r", config)
from dwave.cloud import qpu, sw, hybrid
diff --git a/dwave/cloud/config.py b/dwave/cloud/config.py
index a8418ed..455f086 100644
--- a/dwave/cloud/config.py
+++ b/dwave/cloud/config.py
@@ -754,7 +754,12 @@ def load_config(config_file=None, profile=None, **kwargs):
:exc:`~dwave.cloud.exceptions.ConfigFileParseError`:
Config file parse failed.
- Examples
+ Note:
+ Prior to 0.8.0, some keyword arguments did not overwrite config
+ variables when their value was ``None``. Now we consistently do
+ :meth:`dict.update` on the config read from file/env for all ``kwargs``.
+
+ Examples:
This example loads the configuration from an auto-detected configuration file
in the home directory of a Windows system user.
|
dwavesystems/dwave-cloud-client
|
5deb326e53637b5cf8a9b1d7f32ef46207faa29a
|
diff --git a/tests/test_client.py b/tests/test_client.py
index bc6717a..6c468c2 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -28,7 +28,6 @@ from contextlib import contextmanager
import requests.exceptions
from plucky import merge
-from dwave.cloud.config import load_config
from dwave.cloud.client import Client
from dwave.cloud.solver import StructuredSolver, UnstructuredSolver
from dwave.cloud.exceptions import (
@@ -174,7 +173,7 @@ class ClientFactory(unittest.TestCase):
def test_default(self):
conf = {k: k for k in 'endpoint token'.split()}
- with mock.patch("dwave.cloud.client.load_config", lambda **kwargs: conf):
+ with mock.patch("dwave.cloud.client.load_config", lambda **kw: conf):
with dwave.cloud.Client.from_config() as client:
self.assertEqual(client.endpoint, 'endpoint')
self.assertEqual(client.token, 'token')
@@ -185,11 +184,8 @@ class ClientFactory(unittest.TestCase):
def test_client_type(self):
conf = {k: k for k in 'endpoint token'.split()}
- def mocked_load_config(**kwargs):
- kwargs.update(conf)
- return kwargs
- with mock.patch("dwave.cloud.client.load_config", mocked_load_config):
+ with mock.patch("dwave.cloud.client.load_config", lambda **kw: conf):
with dwave.cloud.Client.from_config() as client:
self.assertIsInstance(client, dwave.cloud.client.Client)
@@ -219,7 +215,7 @@ class ClientFactory(unittest.TestCase):
def test_custom_kwargs(self):
conf = {k: k for k in 'endpoint token'.split()}
- with mock.patch("dwave.cloud.client.load_config", lambda **kwargs: conf):
+ with mock.patch("dwave.cloud.client.load_config", lambda **kw: conf):
with mock.patch("dwave.cloud.client.Client.__init__", return_value=None) as init:
dwave.cloud.Client.from_config(custom='custom')
init.assert_called_once_with(
@@ -227,7 +223,7 @@ class ClientFactory(unittest.TestCase):
def test_custom_kwargs_overrides_config(self):
conf = {k: k for k in 'endpoint token custom'.split()}
- with mock.patch("dwave.cloud.client.load_config", lambda **kwargs: conf):
+ with mock.patch("dwave.cloud.client.load_config", lambda **kw: conf):
with mock.patch("dwave.cloud.client.Client.__init__", return_value=None) as init:
dwave.cloud.Client.from_config(custom='new-custom')
init.assert_called_once_with(
@@ -238,7 +234,7 @@ class ClientFactory(unittest.TestCase):
conf = {k: k for k in 'endpoint token'.split()}
conf.update(solver=json.dumps(solver_def))
- with mock.patch("dwave.cloud.client.load_config", lambda **kwargs: conf):
+ with mock.patch("dwave.cloud.client.load_config", lambda **kw: conf):
with dwave.cloud.Client.from_config() as client:
self.assertEqual(client.default_solver, solver_def)
@@ -246,7 +242,7 @@ class ClientFactory(unittest.TestCase):
solver_def = {"name__eq": "solver"}
conf = {k: k for k in 'endpoint token solver'.split()}
- with mock.patch("dwave.cloud.client.load_config", lambda **kwargs: conf):
+ with mock.patch("dwave.cloud.client.load_config", lambda **kw: conf):
with dwave.cloud.Client.from_config() as client:
self.assertEqual(client.default_solver, solver_def)
@@ -254,28 +250,34 @@ class ClientFactory(unittest.TestCase):
new_solver_def = {"software": True}
conf = {k: k for k in 'endpoint token solver'.split()}
- def load_config(**kwargs):
- return merge(kwargs, conf, op=lambda a, b: a or b)
-
- with mock.patch("dwave.cloud.client.load_config", load_config):
+ with mock.patch("dwave.cloud.client.load_config", lambda **kw: conf):
with dwave.cloud.Client.from_config(solver=new_solver_def) as client:
self.assertEqual(client.default_solver, new_solver_def)
+ def test_none_kwargs_do_not_override_config(self):
+ """kwargs with value ``None`` should be ignored (issue #430)"""
+ conf = {k: k for k in 'endpoint token'.split()}
+ solver_json = '{"qpu": true}'
+ conf.update(solver=solver_json)
+ solver = json.loads(solver_json)
+
+ with mock.patch("dwave.cloud.client.load_config", lambda **kw: conf):
+ with dwave.cloud.Client.from_config(endpoint=None, solver=None) as client:
+ self.assertEqual(client.endpoint, conf['endpoint'])
+ self.assertEqual(client.default_solver, solver)
+
def test_solver_name_overrides_config_features(self):
conf = {k: k for k in 'endpoint token solver'.split()}
conf.update(solver=json.dumps({"software": True}))
- def load_config(**kwargs):
- return merge(kwargs, conf, op=lambda a, b: a or b)
-
- with mock.patch("dwave.cloud.client.load_config", load_config):
+ with mock.patch("dwave.cloud.client.load_config", lambda **kw: conf):
with dwave.cloud.Client.from_config(solver='solver') as client:
self.assertEqual(client.default_solver, {"name__eq": "solver"})
def test_boolean_options_parsed_from_config(self):
conf = {'connection_close': 'off', 'permissive_ssl': 'true'}
- with mock.patch("dwave.cloud.client.load_config", lambda **kwargs: conf):
+ with mock.patch("dwave.cloud.client.load_config", lambda **kw: conf):
with dwave.cloud.Client.from_config(token='token') as client:
self.assertFalse(client.connection_close)
self.assertTrue(client.permissive_ssl)
@@ -285,16 +287,32 @@ class ClientFactory(unittest.TestCase):
DEFAULTS = Client.DEFAULTS.copy()
DEFAULTS.update(token=token)
- with mock.patch("dwave.cloud.client.load_config", lambda **kwargs: {}):
+ with mock.patch("dwave.cloud.client.load_config", lambda **kw: {}):
with mock.patch.multiple("dwave.cloud.Client", DEFAULTS=DEFAULTS):
with dwave.cloud.Client.from_config() as client:
self.assertEqual(client.token, token)
+ # None defaults are ignored
+ with dwave.cloud.Client(defaults=None) as client:
+ self.assertEqual(client.token, token)
+
+ # explicit None kwargs do not modify defaults
+ with dwave.cloud.Client(
+ endpoint=None, token=None, solver=None,
+ connection_close=None, poll_backoff_min=None) as client:
+
+ self.assertEqual(client.endpoint, client.DEFAULT_API_ENDPOINT)
+ self.assertEqual(client.token, token)
+ self.assertEqual(client.default_solver, {})
+
+ self.assertEqual(client.connection_close, DEFAULTS['connection_close'])
+ self.assertEqual(client.poll_backoff_min, DEFAULTS['poll_backoff_min'])
+
def test_defaults_as_kwarg(self):
token = 'value'
defaults = dict(token=token)
- with mock.patch("dwave.cloud.client.load_config", lambda **kwargs: {}):
+ with mock.patch("dwave.cloud.client.load_config", lambda **kw: {}):
with dwave.cloud.Client.from_config(defaults=defaults) as client:
self.assertEqual(client.token, token)
@@ -303,19 +321,18 @@ class ClientFactory(unittest.TestCase):
token = 'value'
solver = {'feature': 'value'}
+ request_timeout = 10
DEFAULTS = Client.DEFAULTS.copy()
DEFAULTS.update(token='wrong')
defaults = dict(solver='wrong')
- conf = dict(solver=solver)
- def load_config(**kwargs):
- return merge(kwargs, conf, op=lambda a, b: a or b)
+ conf = dict(solver=solver, request_timeout=request_timeout)
- kwargs = dict(token=token, defaults=defaults)
+ kwargs = dict(token=token, defaults=defaults, request_timeout=None)
- with mock.patch("dwave.cloud.client.load_config", load_config):
+ with mock.patch("dwave.cloud.client.load_config", lambda **kw: conf):
with mock.patch.multiple("dwave.cloud.Client", DEFAULTS=DEFAULTS):
with dwave.cloud.Client.from_config(**kwargs) as client:
@@ -328,6 +345,9 @@ class ClientFactory(unittest.TestCase):
# endpoint: used from class defaults
self.assertEqual(client.endpoint, DEFAULTS['endpoint'])
+ # None kwarg: used from class defaults
+ self.assertEqual(client.request_timeout, request_timeout)
+
def test_headers_from_config(self):
headers_dict = {"key-1": "value-1", "key-2": "value-2"}
headers_str = """ key-1:value-1
@@ -335,7 +355,7 @@ class ClientFactory(unittest.TestCase):
"""
conf = dict(token='token', headers=headers_str)
- with mock.patch("dwave.cloud.client.load_config", lambda **kwargs: conf):
+ with mock.patch("dwave.cloud.client.load_config", lambda **kw: conf):
with dwave.cloud.Client.from_config() as client:
self.assertDictEqual(client.headers, headers_dict)
@@ -344,16 +364,13 @@ class ClientFactory(unittest.TestCase):
headers_str = "key-2:value-2\nkey-1:value-1"
conf = dict(token='token')
- def load_config(**kwargs):
- return merge(kwargs, conf, op=lambda a, b: a or b)
-
# headers as dict
- with mock.patch("dwave.cloud.client.load_config", load_config):
+ with mock.patch("dwave.cloud.client.load_config", lambda **kw: conf):
with dwave.cloud.Client.from_config(headers=headers_dict) as client:
self.assertDictEqual(client.headers, headers_dict)
# headers as str
- with mock.patch("dwave.cloud.client.load_config", load_config):
+ with mock.patch("dwave.cloud.client.load_config", lambda **kw: conf):
with dwave.cloud.Client.from_config(headers=headers_str) as client:
self.assertDictEqual(client.headers, headers_dict)
@@ -365,7 +382,7 @@ class ClientFactory(unittest.TestCase):
client_cert = crt
conf = dict(token='token', client_cert=crt)
- with mock.patch("dwave.cloud.client.load_config", lambda **kwargs: conf):
+ with mock.patch("dwave.cloud.client.load_config", lambda **kw: conf):
with dwave.cloud.Client.from_config() as client:
self.assertEqual(client.client_cert, client_cert)
@@ -376,7 +393,7 @@ class ClientFactory(unittest.TestCase):
client_cert = (crt, key)
conf = dict(token='token', client_cert=crt, client_cert_key=key)
- with mock.patch("dwave.cloud.client.load_config", lambda **kwargs: conf):
+ with mock.patch("dwave.cloud.client.load_config", lambda **kw: conf):
with dwave.cloud.Client.from_config() as client:
self.assertEqual(client.client_cert, client_cert)
@@ -420,14 +437,14 @@ class ClientFactory(unittest.TestCase):
conf = dict(token='token', **poll_conf)
# polling params from config file propagated to client object
- with mock.patch("dwave.cloud.client.load_config", lambda **kwargs: conf):
+ with mock.patch("dwave.cloud.client.load_config", lambda **kw: conf):
with dwave.cloud.Client.from_config() as client:
self.assertEqual(client.poll_backoff_min, 0.1)
self.assertEqual(client.poll_backoff_max, 1.0)
# test defaults
conf = dict(token='token')
- with mock.patch("dwave.cloud.client.load_config", lambda **kwargs: conf):
+ with mock.patch("dwave.cloud.client.load_config", lambda **kw: conf):
with dwave.cloud.Client.from_config() as client:
self.assertEqual(client.poll_backoff_min, Client.DEFAULTS['poll_backoff_min'])
self.assertEqual(client.poll_backoff_max, Client.DEFAULTS['poll_backoff_max'])
@@ -436,10 +453,7 @@ class ClientFactory(unittest.TestCase):
poll_conf = {"poll_backoff_min": "0.1", "poll_backoff_max": "1"}
conf = dict(token='token', **poll_conf)
- def load_config(**kwargs):
- return merge(kwargs, conf, op=lambda a, b: a or b)
-
- with mock.patch("dwave.cloud.client.load_config", load_config):
+ with mock.patch("dwave.cloud.client.load_config", lambda **kw: conf):
with dwave.cloud.Client.from_config(poll_backoff_min=0.5) as client:
self.assertEqual(client.poll_backoff_min, 0.5)
self.assertEqual(client.poll_backoff_max, 1.0)
|
Compatibility break in from_config for None kwargs
Prior to 0.8.0, kwargs with value `None` did not overwrite the file/env config.
We should restore that as a default behavior, as it easily propagates up to libraries that call `Client.from_config(**kwargs)`.
|
0.0
|
5deb326e53637b5cf8a9b1d7f32ef46207faa29a
|
[
"tests/test_client.py::ClientFactory::test_defaults_partial_update",
"tests/test_client.py::ClientFactory::test_none_kwargs_do_not_override_config"
] |
[
"tests/test_client.py::ClientFactory::test_boolean_options_parsed_from_config",
"tests/test_client.py::ClientFactory::test_class_defaults",
"tests/test_client.py::ClientFactory::test_client_cert_from_config",
"tests/test_client.py::ClientFactory::test_client_cert_from_kwargs",
"tests/test_client.py::ClientFactory::test_client_type",
"tests/test_client.py::ClientFactory::test_custom_kwargs",
"tests/test_client.py::ClientFactory::test_custom_kwargs_overrides_config",
"tests/test_client.py::ClientFactory::test_default",
"tests/test_client.py::ClientFactory::test_defaults_as_kwarg",
"tests/test_client.py::ClientFactory::test_headers_from_config",
"tests/test_client.py::ClientFactory::test_headers_from_kwargs",
"tests/test_client.py::ClientFactory::test_polling_params_from_config",
"tests/test_client.py::ClientFactory::test_polling_params_from_kwargs",
"tests/test_client.py::ClientFactory::test_solver_features_from_config",
"tests/test_client.py::ClientFactory::test_solver_features_kwargs_override_config",
"tests/test_client.py::ClientFactory::test_solver_name_from_config",
"tests/test_client.py::ClientFactory::test_solver_name_overrides_config_features",
"tests/test_client.py::FeatureBasedSolverSelection::test_anneal_schedule",
"tests/test_client.py::FeatureBasedSolverSelection::test_availability_combo",
"tests/test_client.py::FeatureBasedSolverSelection::test_category",
"tests/test_client.py::FeatureBasedSolverSelection::test_default",
"tests/test_client.py::FeatureBasedSolverSelection::test_derived_category_properties",
"tests/test_client.py::FeatureBasedSolverSelection::test_derived_category_properties_without_category",
"tests/test_client.py::FeatureBasedSolverSelection::test_lower_noise_derived_property",
"tests/test_client.py::FeatureBasedSolverSelection::test_membership_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_name",
"tests/test_client.py::FeatureBasedSolverSelection::test_nested_properties_intermediate_key_lookup",
"tests/test_client.py::FeatureBasedSolverSelection::test_nested_properties_leaf_lookup",
"tests/test_client.py::FeatureBasedSolverSelection::test_num_qubits",
"tests/test_client.py::FeatureBasedSolverSelection::test_online",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_callable",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_edgecases",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_in_default_solver",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_respects_default_solver",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_string",
"tests/test_client.py::FeatureBasedSolverSelection::test_parameter_availability_check",
"tests/test_client.py::FeatureBasedSolverSelection::test_property_availability_check",
"tests/test_client.py::FeatureBasedSolverSelection::test_range_boolean_combo",
"tests/test_client.py::FeatureBasedSolverSelection::test_range_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_regex",
"tests/test_client.py::FeatureBasedSolverSelection::test_relational_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_set_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_solvers_deprecation"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-23 11:17:55+00:00
|
apache-2.0
| 2,048 |
|
dwavesystems__dwave-cloud-client-434
|
diff --git a/dwave/cloud/client.py b/dwave/cloud/client.py
index baf8341..4744bf9 100644
--- a/dwave/cloud/client.py
+++ b/dwave/cloud/client.py
@@ -44,12 +44,12 @@ import sys
import time
import json
import copy
+import queue
import logging
-import threading
-import requests
+import inspect
import warnings
import operator
-import queue
+import threading
import base64
import hashlib
@@ -61,6 +61,7 @@ from functools import partial, wraps
from collections import abc, namedtuple, OrderedDict
from concurrent.futures import ThreadPoolExecutor
+import requests
from dateutil.parser import parse as parse_datetime
from plucky import pluck
@@ -1598,6 +1599,31 @@ class Client(object):
return self._upload_problem_executor.submit(
self._upload_problem_worker, problem=problem, problem_id=problem_id)
+ @staticmethod
+ def _parse_multipart_upload_response(response):
+ """Parse the JSON response body, raising appropriate exceptions."""
+
+ caller = inspect.stack()[1].function
+ logger.trace("%s response: (code=%r, text=%r)",
+ caller, response.status_code, response.text)
+
+ if response.status_code == 401:
+ raise SolverAuthenticationError()
+
+ try:
+ msg = response.json()
+ except:
+ response.raise_for_status()
+
+ if response.status_code != 200:
+ try:
+ error_msg = msg['error_msg']
+ except KeyError:
+ response.raise_for_status()
+ raise ProblemUploadError(error_msg)
+
+ return msg
+
@staticmethod
@retried(_UPLOAD_REQUEST_RETRIES, backoff=_UPLOAD_RETRIES_BACKOFF)
def _initiate_multipart_upload(session, size):
@@ -1614,14 +1640,10 @@ class Client(object):
except requests.exceptions.Timeout:
raise RequestTimeout
- if response.status_code == 401:
- raise SolverAuthenticationError()
- else:
- logger.trace("Multipart upload initiate response: %r", response.text)
- response.raise_for_status()
+ msg = Client._parse_multipart_upload_response(response)
try:
- problem_id = response.json()['id']
+ problem_id = msg['id']
except KeyError:
raise InvalidAPIResponseError("problem ID missing")
@@ -1715,11 +1737,7 @@ class Client(object):
except requests.exceptions.Timeout:
raise RequestTimeout
- if response.status_code == 401:
- raise SolverAuthenticationError()
- else:
- logger.trace("Part upload response: %r", response.text)
- response.raise_for_status()
+ msg = Client._parse_multipart_upload_response(response)
logger.debug("Uploaded part_id=%r of problem_id=%r", part_id, problem_id)
@@ -1738,23 +1756,18 @@ class Client(object):
except requests.exceptions.Timeout:
raise RequestTimeout
- if response.status_code == 401:
- raise SolverAuthenticationError()
- else:
- logger.trace("Upload status response: %r", response.text)
- response.raise_for_status()
+ msg = Client._parse_multipart_upload_response(response)
try:
- problem_status = response.json()
- problem_status['status']
- problem_status['parts']
+ msg['status']
+ msg['parts']
except KeyError:
raise InvalidAPIResponseError("'status' and/or 'parts' missing")
logger.debug("Got upload status=%r for problem_id=%r",
- problem_status['status'], problem_id)
+ msg['status'], problem_id)
- return problem_status
+ return msg
@staticmethod
def _failsafe_get_multipart_upload_status(session, problem_id):
@@ -1780,11 +1793,7 @@ class Client(object):
except requests.exceptions.Timeout:
raise RequestTimeout
- if response.status_code == 401:
- raise SolverAuthenticationError()
- else:
- logger.trace("Combine parts response: %r", response.text)
- response.raise_for_status()
+ msg = Client._parse_multipart_upload_response(response)
logger.debug("Issued a combine command for problem_id=%r", problem_id)
@@ -1825,7 +1834,7 @@ class Client(object):
# via executor initializer
with self.create_session() as session:
chunks = ChunkedData(problem, chunk_size=self._UPLOAD_PART_SIZE_BYTES)
- size = len(chunks.view)
+ size = chunks.total_size
if problem_id is None:
try:
@@ -1834,7 +1843,7 @@ class Client(object):
errmsg = ("Multipart upload initialization failed "
"with {!r}.".format(e))
logger.error(errmsg)
- raise ProblemUploadError(errmsg)
+ raise ProblemUploadError(errmsg) from e
# check problem status, so we only upload parts missing or invalid
problem_status = \
@@ -1869,7 +1878,7 @@ class Client(object):
errmsg = ("Multipart upload of problem_id={!r} failed for "
"part_no={!r} with {!r}.".format(problem_id, part_no, e))
logger.error(errmsg)
- raise ProblemUploadError(errmsg)
+ raise ProblemUploadError(errmsg) from e
# verify all parts uploaded via status call
# (check remote checksum matches the local one)
@@ -1904,6 +1913,6 @@ class Client(object):
errmsg = ("Multipart upload of problem_id={!r} failed on parts "
"combine with {!r}".format(problem_id, e))
logger.error(errmsg)
- raise ProblemUploadError(errmsg)
+ raise ProblemUploadError(errmsg) from e
return problem_id
diff --git a/dwave/cloud/events.py b/dwave/cloud/events.py
index efe1809..8e9b53d 100644
--- a/dwave/cloud/events.py
+++ b/dwave/cloud/events.py
@@ -62,6 +62,8 @@ def add_handler(name, handler):
def dispatch_event(name, *args, **kwargs):
"""Call the complete chain of event handlers attached to event `name`."""
+ logger.trace("dispatch_event(%r, *%r, **%r)", name, args, kwargs)
+
if name not in _client_event_hooks_registry:
raise ValueError('invalid event name')
diff --git a/dwave/cloud/upload.py b/dwave/cloud/upload.py
index 85d603b..87bc306 100644
--- a/dwave/cloud/upload.py
+++ b/dwave/cloud/upload.py
@@ -372,12 +372,15 @@ class ChunkedData(object):
if self.view is None:
raise TypeError("bytes/str/IOBase-subclass data required")
+ @property
+ def total_size(self):
+ """Total data size, in bytes."""
+ return len(self.view)
+
@property
def num_chunks(self):
"""Total number of chunks."""
-
- total_size = len(self.view)
- return math.ceil(total_size / self.chunk_size)
+ return math.ceil(self.total_size / self.chunk_size)
def __len__(self):
return self.num_chunks
|
dwavesystems/dwave-cloud-client
|
846dca27ea616377c3e82e4d1c2f7e65caea699a
|
diff --git a/tests/test_upload.py b/tests/test_upload.py
index 6717d1e..4a62f68 100644
--- a/tests/test_upload.py
+++ b/tests/test_upload.py
@@ -316,6 +316,7 @@ class TestChunkedData(unittest.TestCase):
def verify_chunking(self, cd, chunks_expected):
self.assertEqual(len(cd), len(chunks_expected))
self.assertEqual(cd.num_chunks, len(chunks_expected))
+ self.assertEqual(cd.total_size, sum(map(len, chunks_expected)))
chunks_iter = [c.read() for c in cd]
chunks_explicit = []
@@ -365,19 +366,6 @@ class TestChunkedData(unittest.TestCase):
self.assertListEqual(chunks_expected, chunks_generated)
[email protected](config, "No live server configuration available.")
-class TestMultipartUpload(unittest.TestCase):
-
- def test_smoke_test(self):
- data = b'123'
- with Client(**config) as client:
- future = client.upload_problem_encoded(data)
- try:
- problem_id = future.result()
- except Exception as e:
- self.fail(e)
-
-
def choose_reply(key, replies, statuses=None):
"""Choose the right response based on a hashable `key` and make a mock
response.
@@ -462,7 +450,7 @@ class TestMockedMultipartUpload(unittest.TestCase):
part_data[i],
json.dumps(sorted([
('Content-MD5', _b64(part_digest[i])),
- ('Content-Type', 'application/octet-stream')
+ ('Content-Type', 'application/octet-stream')
]))
): json.dumps({})
for i in parts
@@ -548,7 +536,7 @@ class TestMockedMultipartUpload(unittest.TestCase):
part_data[i],
json.dumps(sorted([
('Content-MD5', _b64(part_digest[i])),
- ('Content-Type', 'application/octet-stream')
+ ('Content-Type', 'application/octet-stream')
]))
): json.dumps({})
for i in parts[:1]
@@ -633,7 +621,7 @@ class TestMockedMultipartUpload(unittest.TestCase):
part_data[i],
json.dumps(sorted([
('Content-MD5', _b64(part_digest[i])),
- ('Content-Type', 'application/octet-stream')
+ ('Content-Type', 'application/octet-stream')
]))
) for i in parts
]
@@ -729,7 +717,7 @@ class TestMockedMultipartUpload(unittest.TestCase):
part_data[i],
json.dumps(sorted([
('Content-MD5', _b64(part_digest[i])),
- ('Content-Type', 'application/octet-stream')
+ ('Content-Type', 'application/octet-stream')
]))
): json.dumps({})
for i in parts[2:]
@@ -756,3 +744,35 @@ class TestMockedMultipartUpload(unittest.TestCase):
self.fail(e)
self.assertEqual(returned_problem_id, upload_problem_id)
+
+
[email protected](config, "No live server configuration available.")
+class TestMultipartUpload(unittest.TestCase):
+ _100gb = 100 * 2**30
+
+ def test_smoke_test(self):
+ data = b'123'
+ with Client(**config) as client:
+ future = client.upload_problem_encoded(data)
+ try:
+ problem_id = future.result()
+ except Exception as e:
+ self.fail(e)
+
+ def test_initiate_size_limit(self):
+ size = self._100gb
+
+ with Client(**config) as client:
+ with client.create_session() as session:
+ with self.assertRaisesRegex(ProblemUploadError,
+ 'bigger than the maximum'):
+ client._initiate_multipart_upload(session, size)
+
+ @mock.patch.object(ChunkedData, 'total_size', _100gb)
+ def test_initiate_size_limit_end_to_end(self):
+
+ with Client(**config) as client:
+ future = client.upload_problem_encoded(b'')
+ with self.assertRaisesRegex(ProblemUploadError,
+ 'bigger than the maximum'):
+ problem_id = future.result()
|
Bubble up SAPI error messages during all stages of multipart upload
For example, if a too big upload size is requested (during multipart upload initialize), SAPI responds with:
```
{"error_code": 400, "error_msg": "The proposed upload is bigger than the maximum upload allowed of 42949672960"}
```
but the exception raised is `HTTPError('400 Client Error: Bad Request ...')`.
The expected behavior in this case is to fail with `ProblemUploadError("The proposed upload is bigger than ...")` or similar.
|
0.0
|
846dca27ea616377c3e82e4d1c2f7e65caea699a
|
[
"tests/test_upload.py::TestChunkedData::test_chunk_size_edges",
"tests/test_upload.py::TestChunkedData::test_chunks_from_bytearray",
"tests/test_upload.py::TestChunkedData::test_chunks_from_bytes",
"tests/test_upload.py::TestChunkedData::test_chunks_from_memory_file",
"tests/test_upload.py::TestChunkedData::test_chunks_from_str"
] |
[
"tests/test_upload.py::TestGettableABC::test_invalid",
"tests/test_upload.py::TestGettableABC::test_valid",
"tests/test_upload.py::TestGettables::test_gettable_file_critical_section_respected",
"tests/test_upload.py::TestGettables::test_gettable_file_from_disk_file",
"tests/test_upload.py::TestGettables::test_gettable_file_from_file_like",
"tests/test_upload.py::TestGettables::test_gettable_file_from_memory_bytes",
"tests/test_upload.py::TestGettables::test_gettable_file_from_memory_string",
"tests/test_upload.py::TestGettables::test_gettable_memory_from_bytes_like",
"tests/test_upload.py::TestFileView::test_file_interface",
"tests/test_upload.py::TestFileView::test_view_interface",
"tests/test_upload.py::TestChunkedData::test_chunk_generators",
"tests/test_upload.py::TestMockedMultipartUpload::test_part_upload_retried",
"tests/test_upload.py::TestMockedMultipartUpload::test_partial_upload",
"tests/test_upload.py::TestMockedMultipartUpload::test_problem_reupload_end_to_end",
"tests/test_upload.py::TestMockedMultipartUpload::test_single_problem_end_to_end"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-06 20:54:21+00:00
|
apache-2.0
| 2,049 |
|
dwavesystems__dwave-cloud-client-455
|
diff --git a/dwave/cloud/client.py b/dwave/cloud/client.py
index 7986b84..db3c444 100644
--- a/dwave/cloud/client.py
+++ b/dwave/cloud/client.py
@@ -73,7 +73,7 @@ from dwave.cloud.config import load_config, parse_float, parse_int, parse_boolea
from dwave.cloud.solver import Solver, available_solvers
from dwave.cloud.concurrency import PriorityThreadPoolExecutor
from dwave.cloud.upload import ChunkedData
-from dwave.cloud.events import dispatch_event
+from dwave.cloud.events import dispatches_events
from dwave.cloud.utils import (
TimeoutingHTTPAdapter, BaseUrlSession, user_agent,
datetime_to_timestamp, utcnow, epochnow, cached, retried, is_caused_by)
@@ -343,6 +343,7 @@ class Client(object):
logger.debug("Creating %s.Client() with: %r", _client, config)
return _clients[_client](**config)
+ @dispatches_events('client_init')
def __init__(self, endpoint=None, token=None, solver=None, **kwargs):
# for (reasonable) backwards compatibility, accept only the first few
# positional args.
@@ -354,9 +355,6 @@ class Client(object):
if solver is not None:
kwargs.setdefault('solver', solver)
- dispatched_args = kwargs.copy()
- dispatch_event('before_client_init', obj=self, args=dispatched_args)
-
logger.debug("Client init called with: %r", kwargs)
# derive instance-level defaults from class defaults and init defaults
@@ -523,9 +521,6 @@ class Client(object):
self._encode_problem_executor = \
ThreadPoolExecutor(self._ENCODE_PROBLEM_THREAD_COUNT)
- dispatch_event(
- 'after_client_init', obj=self, args=dispatched_args, return_value=None)
-
def create_session(self):
"""Create a new requests session based on client's (self) params.
@@ -728,6 +723,7 @@ class Client(object):
self._load(future)
return future
+ @dispatches_events('get_solvers')
def get_solvers(self, refresh=False, order_by='avg_load', **filters):
"""Return a filtered list of solvers handled by this client.
@@ -913,9 +909,6 @@ class Client(object):
)
"""
- args = dict(refresh=refresh, order_by=order_by, filters=filters)
- dispatch_event('before_get_solvers', obj=self, args=args)
-
def covers_op(prop, val):
"""Does LHS `prop` (range) fully cover RHS `val` (range or item)?"""
@@ -1064,9 +1057,6 @@ class Client(object):
if sort_reverse:
solvers.reverse()
- dispatch_event(
- 'after_get_solvers', obj=self, args=args, return_value=solvers)
-
return solvers
def solvers(self, refresh=False, **filters):
diff --git a/dwave/cloud/events.py b/dwave/cloud/events.py
index 8e9b53d..66a0a39 100644
--- a/dwave/cloud/events.py
+++ b/dwave/cloud/events.py
@@ -19,6 +19,8 @@ inspection.
"""
import logging
+import inspect
+from functools import wraps
__all__ = ['add_handler']
@@ -41,13 +43,17 @@ _client_event_hooks_registry = {
def add_handler(name, handler):
"""Register a `handler` function to be called on event `name`.
- Handler's signature are::
+ Handler signatures are::
def before_event_handler(event_name, obj, args):
- pass
+ # called just before `obj.method(**args)` executes
def after_event_handler(event_name, obj, args, return_value):
- pass
+ # function succeeded with `return_value`
+
+ def after_event_handler(event_name, obj, args, exception):
+ # function failed with `exception` raised
+ # after event handler invocation, exception is re-raised
"""
@@ -73,3 +79,35 @@ def dispatch_event(name, *args, **kwargs):
except Exception as e:
logger.debug("Exception in {!r} event handler {!r}: {!r}".format(
name, handler, e))
+
+
+class dispatches_events:
+ """Decorate function to :func:`.dispatch_event` on entry and exit."""
+
+ def __init__(self, basename):
+ self.before_eventname = 'before_' + basename
+ self.after_eventname = 'after_' + basename
+
+ def __call__(self, fn):
+ if not callable(fn):
+ raise TypeError("decorated object must be callable")
+
+ @wraps(fn)
+ def wrapped(*pargs, **kwargs):
+ sig = inspect.signature(fn)
+ bound = sig.bind(*pargs, **kwargs)
+ bound.apply_defaults()
+ args = bound.arguments
+ obj = args.pop('self', None)
+
+ dispatch_event(self.before_eventname, obj=obj, args=args)
+ try:
+ rval = fn(*pargs, **kwargs)
+ except Exception as exc:
+ dispatch_event(self.after_eventname, obj=obj, args=args, exception=exc)
+ raise
+ else:
+ dispatch_event(self.after_eventname, obj=obj, args=args, return_value=rval)
+ return rval
+
+ return wrapped
diff --git a/dwave/cloud/solver.py b/dwave/cloud/solver.py
index 56b7f34..fbaf0a8 100644
--- a/dwave/cloud/solver.py
+++ b/dwave/cloud/solver.py
@@ -40,7 +40,7 @@ from dwave.cloud.coders import (
from dwave.cloud.utils import uniform_iterator, reformat_qubo_as_ising
from dwave.cloud.computation import Future
from dwave.cloud.concurrency import Present
-from dwave.cloud.events import dispatch_event
+from dwave.cloud.events import dispatches_events
# Use numpy if available for fast encoding/decoding
try:
@@ -404,6 +404,7 @@ class BaseUnstructuredSolver(BaseSolver):
return body_data
+ @dispatches_events('sample')
def sample_problem(self, problem, problem_type=None, label=None, **params):
"""Sample from the specified problem.
@@ -877,6 +878,7 @@ class StructuredSolver(BaseSolver):
return self._sample(problem_type, bqm.linear, bqm.quadratic, bqm.offset,
params, label=label, undirected_biases=True)
+ @dispatches_events('sample')
def _sample(self, type_, linear, quadratic, offset, params,
label=None, undirected_biases=False):
"""Internal method for `sample_ising`, `sample_qubo` and `sample_bqm`.
@@ -906,11 +908,6 @@ class StructuredSolver(BaseSolver):
:class:`~dwave.cloud.computation.Future`
"""
- args = dict(type_=type_, linear=linear, quadratic=quadratic,
- offset=offset, params=params, label=label,
- undirected_biases=undirected_biases)
- dispatch_event('before_sample', obj=self, args=args)
-
# Check the problem
if not self.check_problem(linear, quadratic):
raise InvalidProblemError("Problem graph incompatible with solver.")
@@ -948,8 +945,6 @@ class StructuredSolver(BaseSolver):
logger.debug("Submitting new problem to: %s", self.id)
self.client._submit(body, computation)
- dispatch_event('after_sample', obj=self, args=args, return_value=computation)
-
return computation
def _format_params(self, type_, params):
|
dwavesystems/dwave-cloud-client
|
f1ab09ef68f89685ef5801b27a1da5f208262131
|
diff --git a/tests/test_events.py b/tests/test_events.py
index 76a933c..ca95168 100644
--- a/tests/test_events.py
+++ b/tests/test_events.py
@@ -14,21 +14,28 @@
import unittest
+try:
+ import dimod
+except ImportError:
+ dimod = None
+
from dwave.cloud.client import Client
-from dwave.cloud.solver import Solver
-from dwave.cloud.events import add_handler
+from dwave.cloud.solver import StructuredSolver, UnstructuredSolver
+from dwave.cloud.events import add_handler, dispatches_events
+from dwave.cloud.concurrency import Present
class TestEventDispatch(unittest.TestCase):
def setUp(self):
# mock client
- self.client = Client(token='token', solver={'qpu': True})
+ self.client = Client(token='token', solver=dict(name__contains='test'))
self.client._fetch_solvers = lambda **kw: self.solvers
self.client._submit = lambda *pa, **kw: None
+ self.client.upload_problem_encoded = lambda *pa, **kw: Present(result=mock_problem_id)
# mock solvers
- self.solver = Solver(client=self.client, data={
+ self.structured_solver = StructuredSolver(client=self.client, data={
"properties": {
"supported_problem_types": ["qubo", "ising"],
"qubits": [0, 1, 2],
@@ -46,11 +53,24 @@ class TestEventDispatch(unittest.TestCase):
"category": "qpu",
"tags": ["lower_noise"]
},
- "id": "solver1",
+ "id": "test-qpu-solver",
"description": "A test solver 1",
"status": "online"
})
- self.solvers = [self.solver]
+ self.unstructured_solver = UnstructuredSolver(client=self.client, data={
+ "properties": {
+ "supported_problem_types": ["bqm"],
+ "parameters": {"num_reads": "Number of samples to return."},
+ "category": "hybrid",
+ },
+ "id": "test-unstructured-solver",
+ "description": "A test unstructured solver"
+ })
+ self.solvers = [self.structured_solver, self.unstructured_solver]
+
+ # reset all event handlers
+ from dwave.cloud.events import _client_event_hooks_registry as reg
+ reg.update({k: [] for k in reg})
def test_validation(self):
"""Event name and handler are validated."""
@@ -77,16 +97,18 @@ class TestEventDispatch(unittest.TestCase):
# test entry values
before = memo['before_client_init']
self.assertEqual(before['obj'], client)
- self.assertNotIn('endpoint', before['args'])
+ self.assertIn('endpoint', before['args'])
self.assertIn('token', before['args'])
+ self.assertIn('kwargs', before['args'])
self.assertEqual(before['args']['token'], 'token')
- self.assertEqual(before['args']['unknown'], 'unknown')
+ self.assertEqual(before['args']['kwargs']['unknown'], 'unknown')
# test exit values
after = memo['after_client_init']
self.assertEqual(after['obj'], client)
self.assertEqual(after['args']['token'], 'token')
- self.assertEqual(after['args']['unknown'], 'unknown')
+ self.assertEqual(after['args']['kwargs']['unknown'], 'unknown')
+ self.assertEqual(after['args']['endpoint'], None)
self.assertEqual(after['return_value'], None)
def test_get_solvers(self):
@@ -108,17 +130,15 @@ class TestEventDispatch(unittest.TestCase):
self.assertEqual(before['obj'], self.client)
self.assertIn('refresh', before['args'])
self.assertIn('filters', before['args'])
- self.assertIn('qpu', before['args']['filters'])
+ self.assertIn('name__contains', before['args']['filters'])
# test exit values
after = memo['after_get_solvers']
self.assertEqual(after['obj'], self.client)
- self.assertIn('qpu', after['args']['filters'])
+ self.assertIn('name__contains', after['args']['filters'])
self.assertEqual(after['return_value'], self.solvers)
- def test_sample(self):
- """Before/After solver sample events are dispatched with correct signatures."""
-
+ def subtest_sample(self, solver):
# setup event handlers
memo = {}
def handler(event, **data):
@@ -132,18 +152,75 @@ class TestEventDispatch(unittest.TestCase):
quad = {(0, 1): 1}
offset = 2
params = dict(num_reads=100)
- future = self.solver.sample_ising(lin, quad, offset, **params)
+ future = solver.sample_ising(lin, quad, offset, **params)
# test entry values
before = memo['before_sample']
- args = dict(type_='ising', linear=lin, quadratic=quad,
- offset=offset, params=params,
- undirected_biases=False, label=None)
- self.assertEqual(before['obj'], self.solver)
+ if solver.qpu:
+ args = dict(type_='ising', linear=lin, quadratic=quad,
+ offset=offset, params=params,
+ undirected_biases=False, label=None)
+ elif solver.hybrid:
+ if not dimod:
+ self.skipTest("dimod not installed")
+ bqm = dimod.BQM.from_ising(lin, quad, offset)
+ args = dict(problem=bqm, problem_type=None, label=None, params=params)
+
+ self.assertEqual(before['obj'], solver)
self.assertDictEqual(before['args'], args)
# test exit values
after = memo['after_sample']
- self.assertEqual(after['obj'], self.solver)
+ self.assertEqual(after['obj'], solver)
self.assertDictEqual(after['args'], args)
self.assertEqual(after['return_value'], future)
+
+ def test_sample(self):
+ """Before/After solver sample events are dispatched with correct signatures."""
+
+ for solver in self.solvers:
+ with self.subTest("solver=%r" % solver.id):
+ self.subtest_sample(solver)
+
+
+class TestEventDispatchDecorator(unittest.TestCase):
+
+ def setUp(self):
+ # reset all event handlers
+ from dwave.cloud.events import _client_event_hooks_registry as reg
+ reg.update({k: [] for k in reg})
+
+ def test_decorator(self):
+ """Decorator adds on-entry and on-exit event calls, with correct args."""
+
+ class MockSampler:
+ @dispatches_events('sample')
+ def mock_sample(self, h, J, offset=0, fail=False, **kwargs):
+ if fail:
+ raise ValueError
+ return offset + 1
+
+ mock_object = MockSampler()
+ h = [1, 1]
+ J = {(0, 1): 1}
+
+ def before(name, obj, args):
+ self.assertEqual(obj, mock_object)
+ args.pop('fail')
+ self.assertEqual(args, dict(h=h, J=J, offset=0, kwargs={}))
+
+ def after(name, obj, args, return_value=None, exception=None):
+ self.assertEqual(obj, mock_object)
+ fail = args.pop('fail')
+ self.assertEqual(args, dict(h=h, J=J, offset=0, kwargs={}))
+ if fail:
+ self.assertIsInstance(exception, ValueError)
+ else:
+ self.assertEqual(return_value, 1)
+
+ add_handler('before_sample', before)
+ add_handler('before_sample', after)
+
+ mock_object.mock_sample(h, J)
+ with self.assertRaises(ValueError):
+ mock_object.mock_sample(h, J, fail=True)
|
Event dispatch decorator
Call `dispatch_event` before and after decorated function execution, automating args and rval collection/reporting.
|
0.0
|
f1ab09ef68f89685ef5801b27a1da5f208262131
|
[
"tests/test_events.py::TestEventDispatch::test_client_init",
"tests/test_events.py::TestEventDispatch::test_get_solvers",
"tests/test_events.py::TestEventDispatch::test_sample",
"tests/test_events.py::TestEventDispatch::test_validation",
"tests/test_events.py::TestEventDispatchDecorator::test_decorator"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-01-29 19:51:19+00:00
|
apache-2.0
| 2,050 |
|
dwavesystems__dwave-cloud-client-567
|
diff --git a/dwave/cloud/api/client.py b/dwave/cloud/api/client.py
index 470f6e0..d330eda 100644
--- a/dwave/cloud/api/client.py
+++ b/dwave/cloud/api/client.py
@@ -320,9 +320,10 @@ class DWaveAPIClient:
retry = urllib3.Retry(**kwargs)
- # note: `Retry.BACKOFF_MAX` can't be set on construction
+ # note: prior to `urllib3==2`, backoff_max had to be set manually on object
if backoff_max is not None:
- retry.BACKOFF_MAX = backoff_max
+ # handle `urllib3>=1.21.1,<1.27` AND `urllib3>=1.21.1,<3`
+ retry.BACKOFF_MAX = retry.backoff_max = backoff_max
return retry
diff --git a/dwave/cloud/client/base.py b/dwave/cloud/client/base.py
index 28a9e9a..43f797e 100644
--- a/dwave/cloud/client/base.py
+++ b/dwave/cloud/client/base.py
@@ -622,11 +622,7 @@ class Client(object):
# create http idempotent Retry config
def get_retry_conf():
- # need a subclass to override the backoff_max
- class Retry(urllib3.Retry):
- BACKOFF_MAX = self.http_retry_backoff_max
-
- return Retry(
+ retry = urllib3.Retry(
total=self.http_retry_total,
connect=self.http_retry_connect,
read=self.http_retry_read,
@@ -637,6 +633,12 @@ class Client(object):
raise_on_status=True,
respect_retry_after_header=True)
+ if self.http_retry_backoff_max is not None:
+ # handle `urllib3>=1.21.1,<1.27` AND `urllib3>=1.21.1,<3`
+ retry.BACKOFF_MAX = retry.backoff_max = self.http_retry_backoff_max
+
+ return retry
+
session = BaseUrlSession(base_url=endpoint)
session.mount('http://',
TimeoutingHTTPAdapter(timeout=self.request_timeout,
diff --git a/releasenotes/notes/support-urllib3-v2-5c5a2cb29a47b43d.yaml b/releasenotes/notes/support-urllib3-v2-5c5a2cb29a47b43d.yaml
new file mode 100644
index 0000000..c8e7dad
--- /dev/null
+++ b/releasenotes/notes/support-urllib3-v2-5c5a2cb29a47b43d.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Correctly set `backoff_max` time for retried requests when ``urllib3>=2.0``
+ is used.
+ See `#566 <https://github.com/dwavesystems/dwave-cloud-client/issues/566>`_.
\ No newline at end of file
|
dwavesystems/dwave-cloud-client
|
d0f7e6935f3069ffe4d1e04b548427f0413a4013
|
diff --git a/tests/test_client.py b/tests/test_client.py
index 5b72294..e59df11 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -555,7 +555,8 @@ class ClientConstruction(unittest.TestCase):
self.assertEqual(retry.redirect, opts['http_retry_redirect'])
self.assertEqual(retry.status, opts['http_retry_status'])
self.assertEqual(retry.backoff_factor, opts['http_retry_backoff_factor'])
- self.assertEqual(retry.BACKOFF_MAX, opts['http_retry_backoff_max'])
+ backoff_max = getattr(retry, 'backoff_max', getattr(retry, 'BACKOFF_MAX'))
+ self.assertEqual(backoff_max, opts['http_retry_backoff_max'])
def test_http_retry_params_from_config(self):
retry_opts = {
|
Http request retry `backoff_max` not working with `urllib3>=2`
### Problem
Prior to `requests==2.30.0` (May 2023), `urllib3` was upper-bounded to [`<1.27`](https://github.com/psf/requests/blob/87d63de8739263bbe17034fba2285c79780da7e8/setup.py#L64C6-L64C27), but in `2.30.0` Requests added support for urllib3, v2.
In `urllib3==2`, way of specifying `backoff_max` time for request retries (`urllib3.Retry`) changed. Now they finally support it as a construction-time parameter, but before we had to override `Retry.BACKOFF_MAX` and in later versions `Retry.DEFAULT_BACKOFF_MAX` attributes either before or after construction.
Note that we don't use urllib3 directly in the cloud client, but via Requests, which accepts (passes) urllib3's spec for retries, [`max_retries`](https://github.com/dwavesystems/dwave-cloud-client/blob/d0f7e6935f3069ffe4d1e04b548427f0413a4013/dwave/cloud/client/base.py#L643).
### Impact
For users with `urllib3>=2` installed (fresh installs after May 3, 2023):
- minimal for `dwave.cloud.Client` usage (primarily multipart upload requests), and
- minimal for `dwave.cloud.api` clients (`Region` requests),
i.e. `backoff_max` would not be set to 60 sec (our default), but to 120 sec (urllib3's default).
Only if user explicitly wanted to modify the `backoff_max` time, via our `http_retry_backoff_max` config parameter, they would notice somewhat significant impact of this issue.
|
0.0
|
d0f7e6935f3069ffe4d1e04b548427f0413a4013
|
[
"tests/test_client.py::ClientConstruction::test_http_retry_params_from_config",
"tests/test_client.py::ClientConstruction::test_http_retry_params_from_kwargs"
] |
[
"tests/test_client.py::ClientConstruction::test_boolean_options_parsed_from_config",
"tests/test_client.py::ClientConstruction::test_class_defaults",
"tests/test_client.py::ClientConstruction::test_client_cert_from_config",
"tests/test_client.py::ClientConstruction::test_client_cert_from_kwargs",
"tests/test_client.py::ClientConstruction::test_client_type",
"tests/test_client.py::ClientConstruction::test_custom_kwargs",
"tests/test_client.py::ClientConstruction::test_custom_kwargs_overrides_config",
"tests/test_client.py::ClientConstruction::test_default",
"tests/test_client.py::ClientConstruction::test_defaults_as_kwarg",
"tests/test_client.py::ClientConstruction::test_defaults_partial_update",
"tests/test_client.py::ClientConstruction::test_headers_from_config",
"tests/test_client.py::ClientConstruction::test_headers_from_kwargs",
"tests/test_client.py::ClientConstruction::test_metadata_api_endpoint_from_env_accepted",
"tests/test_client.py::ClientConstruction::test_none_kwargs_do_not_override_config",
"tests/test_client.py::ClientConstruction::test_polling_params_from_config",
"tests/test_client.py::ClientConstruction::test_polling_params_from_kwargs",
"tests/test_client.py::ClientConstruction::test_positional_args",
"tests/test_client.py::ClientConstruction::test_region_default",
"tests/test_client.py::ClientConstruction::test_region_endpoint_pair_kwarg_overrides_region_endpoint_pair_from_config",
"tests/test_client.py::ClientConstruction::test_region_from_env_overrides_endpoint_from_config",
"tests/test_client.py::ClientConstruction::test_region_kwarg_overrides_endpoint_from_config",
"tests/test_client.py::ClientConstruction::test_region_selection_over_defaults",
"tests/test_client.py::ClientConstruction::test_solver_features_from_config",
"tests/test_client.py::ClientConstruction::test_solver_features_kwargs_override_config",
"tests/test_client.py::ClientConstruction::test_solver_name_from_config",
"tests/test_client.py::ClientConstruction::test_solver_name_overrides_config_features",
"tests/test_client.py::ClientConfigIntegration::test_custom_options",
"tests/test_client.py::MultiRegionSupport::test_region_endpoint_fallback_when_no_metadata_api",
"tests/test_client.py::MultiRegionSupport::test_region_endpoint_fallback_when_region_unknown",
"tests/test_client.py::MultiRegionSupport::test_region_endpoint_null_case",
"tests/test_client.py::MultiRegionSupport::test_region_selection_mocked_end_to_end",
"tests/test_client.py::FeatureBasedSolverSelection::test_anneal_schedule",
"tests/test_client.py::FeatureBasedSolverSelection::test_availability_combo",
"tests/test_client.py::FeatureBasedSolverSelection::test_category",
"tests/test_client.py::FeatureBasedSolverSelection::test_default",
"tests/test_client.py::FeatureBasedSolverSelection::test_derived_category_properties",
"tests/test_client.py::FeatureBasedSolverSelection::test_derived_category_properties_without_category",
"tests/test_client.py::FeatureBasedSolverSelection::test_lower_noise_derived_property",
"tests/test_client.py::FeatureBasedSolverSelection::test_membership_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_name",
"tests/test_client.py::FeatureBasedSolverSelection::test_nested_properties_intermediate_key_lookup",
"tests/test_client.py::FeatureBasedSolverSelection::test_nested_properties_leaf_lookup",
"tests/test_client.py::FeatureBasedSolverSelection::test_num_qubits",
"tests/test_client.py::FeatureBasedSolverSelection::test_online",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_callable",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_edgecases",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_in_default_solver",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_respects_default_solver",
"tests/test_client.py::FeatureBasedSolverSelection::test_order_by_string",
"tests/test_client.py::FeatureBasedSolverSelection::test_parameter_availability_check",
"tests/test_client.py::FeatureBasedSolverSelection::test_property_availability_check",
"tests/test_client.py::FeatureBasedSolverSelection::test_range_boolean_combo",
"tests/test_client.py::FeatureBasedSolverSelection::test_range_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_regex",
"tests/test_client.py::FeatureBasedSolverSelection::test_relational_ops",
"tests/test_client.py::FeatureBasedSolverSelection::test_set_ops"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-08-29 12:44:12+00:00
|
apache-2.0
| 2,051 |
|
dwavesystems__dwave-cloud-client-606
|
diff --git a/dwave/cloud/auth/flows.py b/dwave/cloud/auth/flows.py
index 9776baa..679d752 100644
--- a/dwave/cloud/auth/flows.py
+++ b/dwave/cloud/auth/flows.py
@@ -189,6 +189,7 @@ class AuthFlow:
url=self.token_endpoint,
grant_type='authorization_code',
code=code,
+ code_verifier=self.code_verifier,
**kwargs)
logger.debug(f"{type(self).__name__}.fetch_token() = {token!r}")
diff --git a/releasenotes/notes/fix-pkce-missing-code-verifier-in-fetch-token-b5cc871cc9d6dfac.yaml b/releasenotes/notes/fix-pkce-missing-code-verifier-in-fetch-token-b5cc871cc9d6dfac.yaml
new file mode 100644
index 0000000..4b6df21
--- /dev/null
+++ b/releasenotes/notes/fix-pkce-missing-code-verifier-in-fetch-token-b5cc871cc9d6dfac.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fix PKCE support in ``dwave.cloud.auth.flow.AuthFlow`` by properly including
+ ``code_verifier`` in fetch token (code exchange) requests.
+ See `#605 <https://github.com/dwavesystems/dwave-cloud-client/issues/605>`_.
|
dwavesystems/dwave-cloud-client
|
40c8d5c1985f70f467c088aef78b2fd6542c7e45
|
diff --git a/tests/auth/test_flows.py b/tests/auth/test_flows.py
index 1506b19..8c897a4 100644
--- a/tests/auth/test_flows.py
+++ b/tests/auth/test_flows.py
@@ -91,14 +91,18 @@ class TestAuthFlow(unittest.TestCase):
m.get(requests_mock.ANY, status_code=404)
m.post(requests_mock.ANY, status_code=404)
+ m.post(self.token_endpoint, json=dict(error="error", error_description="bad request"))
m.post(self.token_endpoint, additional_matcher=post_body_matcher, json=self.token)
# reset creds
self.creds.clear()
- # verify token fetch flow
+ # make auth request to generate all request params (like PKCE's verifier)
flow = AuthFlow(**self.test_args)
+ _ = flow.get_authorization_url()
+ expected_params.update(code_verifier=flow.code_verifier)
+ # verify token fetch flow
response = flow.fetch_token(code=code)
self.assertEqual(response, self.token)
|
PKCE `code_verifier` absent from fetch token request
Although `code_challenge` is present in the initial authorization request ([rfc7636, section 4.3](https://datatracker.ietf.org/doc/html/rfc7636#section-4.3)), and `code_verifier` is generated client-side, it's missing in the code exchange request (see [section 4.5](https://datatracker.ietf.org/doc/html/rfc7636#section-4.5)).
This represents a security vulnerability for public clients.
|
0.0
|
40c8d5c1985f70f467c088aef78b2fd6542c7e45
|
[
"tests/auth/test_flows.py::TestAuthFlow::test_fetch_token"
] |
[
"tests/auth/test_flows.py::TestAuthFlow::test_auth_url",
"tests/auth/test_flows.py::TestAuthFlow::test_ensure_active_token",
"tests/auth/test_flows.py::TestAuthFlow::test_fetch_token_state",
"tests/auth/test_flows.py::TestAuthFlow::test_refresh_token",
"tests/auth/test_flows.py::TestAuthFlow::test_session_config",
"tests/auth/test_flows.py::TestAuthFlow::test_token_expires_soon",
"tests/auth/test_flows.py::TestAuthFlow::test_token_setter",
"tests/auth/test_flows.py::TestLeapAuthFlow::test_client_id_from_config",
"tests/auth/test_flows.py::TestLeapAuthFlow::test_from_common_config",
"tests/auth/test_flows.py::TestLeapAuthFlow::test_from_default_config",
"tests/auth/test_flows.py::TestLeapAuthFlow::test_from_minimal_config",
"tests/auth/test_flows.py::TestLeapAuthFlow::test_from_minimal_config_with_overrides",
"tests/auth/test_flows.py::TestLeapAuthFlowOOB::test_oob",
"tests/auth/test_flows.py::TestLeapAuthFlowRedirect::test_auth_denied",
"tests/auth/test_flows.py::TestLeapAuthFlowRedirect::test_exchange_fails",
"tests/auth/test_flows.py::TestLeapAuthFlowRedirect::test_non_auth_failure_during_code_exchange",
"tests/auth/test_flows.py::TestLeapAuthFlowRedirect::test_success"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-02-27 14:38:43+00:00
|
apache-2.0
| 2,052 |
|
dwavesystems__dwave-system-422
|
diff --git a/docs/reference/samplers.rst b/docs/reference/samplers.rst
index e4659b3..444a51f 100644
--- a/docs/reference/samplers.rst
+++ b/docs/reference/samplers.rst
@@ -102,6 +102,7 @@ Properties
LeapHybridSampler.properties
LeapHybridSampler.parameters
+ LeapHybridSampler.default_solver
Methods
@@ -128,6 +129,7 @@ Properties
LeapHybridDQMSampler.properties
LeapHybridDQMSampler.parameters
+ LeapHybridDQMSampler.default_solver
Methods
diff --git a/dwave/system/__init__.py b/dwave/system/__init__.py
index bd41344..8112a19 100644
--- a/dwave/system/__init__.py
+++ b/dwave/system/__init__.py
@@ -21,3 +21,5 @@ from dwave.system.composites import *
import dwave.system.composites
from dwave.system.utilities import *
+
+from dwave.system.package_info import __version__
diff --git a/dwave/system/samplers/dwave_sampler.py b/dwave/system/samplers/dwave_sampler.py
index b15ba74..c0fd6d0 100644
--- a/dwave/system/samplers/dwave_sampler.py
+++ b/dwave/system/samplers/dwave_sampler.py
@@ -128,7 +128,6 @@ class DWaveSampler(dimod.Sampler, dimod.Structured):
"""
def __init__(self, failover=False, retry_interval=-1, **config):
-
# strongly prefer QPU solvers; requires kwarg-level override
config.setdefault('client', 'qpu')
diff --git a/dwave/system/samplers/leap_hybrid_sampler.py b/dwave/system/samplers/leap_hybrid_sampler.py
index b5d9a25..45d0fcc 100644
--- a/dwave/system/samplers/leap_hybrid_sampler.py
+++ b/dwave/system/samplers/leap_hybrid_sampler.py
@@ -41,6 +41,13 @@ from dwave.cloud import Client
__all__ = ['LeapHybridSampler', 'LeapHybridDQMSampler']
+# taken from https://stackoverflow.com/a/39542816, licensed under CC BY-SA 3.0
+# not needed in py39+
+class classproperty(property):
+ def __get__(self, obj, objtype=None):
+ return super(classproperty, self).__get__(objtype)
+
+
class LeapHybridSampler(dimod.Sampler):
"""A class for using Leap's cloud-based hybrid BQM solvers.
@@ -58,7 +65,11 @@ class LeapHybridSampler(dimod.Sampler):
``category=hybrid`` and ``supported_problem_type=bqm``. By default, online
hybrid BQM solvers are returned ordered by latest ``version``.
- Inherits from :class:`dimod.Sampler`.
+ The default specification for filtering and ordering solvers by features is
+ available as :attr:`.default_solver` property. Explicitly specifying a
+ solver in a configuration file, an environment variable, or keyword
+ arguments overrides this specification. See the example below on how to
+ extend it instead.
Args:
**config:
@@ -81,37 +92,50 @@ class LeapHybridSampler(dimod.Sampler):
>>> bqm = dimod.BQM.from_qubo(qubo)
...
>>> # Find a good solution
- >>> sampler = LeapHybridSampler() # doctest: +SKIP
- >>> sampleset = sampler.sample(bqm) # doctest: +SKIP
+ >>> sampler = LeapHybridSampler() # doctest: +SKIP
+ >>> sampleset = sampler.sample(bqm) # doctest: +SKIP
+
+ This example specializes the default solver selection by filtering out
+ bulk BQM solvers. (Bulk solvers are throughput-optimal for heavy/batch
+ workloads, have a higher start-up latency, and are not well suited for
+ live workloads. Not all Leap accounts have access to bulk solvers.)
+
+ >>> from dwave.system import LeapHybridSampler
+ ...
+ >>> solver = LeapHybridSampler.default_solver
+ >>> solver.update(name__regex=".*(?<!bulk)$") # name shouldn't end with "bulk"
+ >>> sampler = LeapHybridSampler(solver=solver) # doctest: +SKIP
+ >>> sampler.solver # doctest: +SKIP
+ BQMSolver(id='hybrid_binary_quadratic_model_version2')
"""
_INTEGER_BQM_SIZE_THRESHOLD = 10000
- def __init__(self, solver=None, connection_close=True, **config):
+ @classproperty
+ def default_solver(cls):
+ """dict: Features used to select the latest accessible hybrid BQM solver."""
+ return dict(supported_problem_types__contains='bqm',
+ order_by='-properties.version')
- # we want a Hybrid solver by default, but allow override
+ def __init__(self, **config):
+ # strongly prefer hybrid solvers; requires kwarg-level override
config.setdefault('client', 'hybrid')
- if solver is None:
- solver = {}
-
- if isinstance(solver, abc.Mapping):
- # TODO: instead of solver selection, try with user's default first
- if solver.setdefault('category', 'hybrid') != 'hybrid':
- raise ValueError("the only 'category' this sampler supports is 'hybrid'")
- if solver.setdefault('supported_problem_types__contains', 'bqm') != 'bqm':
- raise ValueError("the only problem type this sampler supports is 'bqm'")
+ # default to short-lived session to prevent resets on slow uploads
+ config.setdefault('connection_close', True)
- # prefer the latest version, but allow kwarg override
- solver.setdefault('order_by', '-properties.version')
-
- self.client = Client.from_config(
- solver=solver, connection_close=connection_close, **config)
+ # prefer the latest hybrid BQM solver available, but allow for an easy
+ # override on any config level above the defaults (file/env/kwarg)
+ defaults = config.setdefault('defaults', {})
+ if not isinstance(defaults, abc.Mapping):
+ raise TypeError("mapping expected for 'defaults'")
+ defaults.update(solver=self.default_solver)
+ self.client = Client.from_config(**config)
self.solver = self.client.get_solver()
- # For explicitly named solvers:
+ # check user-specified solver conforms to our requirements
if self.properties.get('category') != 'hybrid':
raise ValueError("selected solver is not a hybrid solver.")
if 'bqm' not in self.solver.supported_problem_types:
@@ -277,6 +301,12 @@ class LeapHybridDQMSampler:
``category=hybrid`` and ``supported_problem_type=dqm``. By default, online
hybrid DQM solvers are returned ordered by latest ``version``.
+ The default specification for filtering and ordering solvers by features is
+ available as :attr:`.default_solver` property. Explicitly specifying a
+ solver in a configuration file, an environment variable, or keyword
+ arguments overrides this specification. See the example in :class:`.LeapHybridSampler`
+ on how to extend it instead.
+
Args:
**config:
Keyword arguments passed to :meth:`dwave.cloud.client.Client.from_config`.
@@ -316,40 +346,37 @@ class LeapHybridDQMSampler:
>>> print("{} beats {}".format(cases[sampleset.first.sample['my_hand']],
... cases[sampleset.first.sample['their_hand']])) # doctest: +SKIP
rock beats scissors
-
"""
- def __init__(self, solver=None, connection_close=True, **config):
+ @classproperty
+ def default_solver(self):
+ """dict: Features used to select the latest accessible hybrid DQM solver."""
+ return dict(supported_problem_types__contains='dqm',
+ order_by='-properties.version')
- # we want a Hybrid solver by default, but allow override
+ def __init__(self, **config):
+ # strongly prefer hybrid solvers; requires kwarg-level override
config.setdefault('client', 'hybrid')
- if solver is None:
- solver = {}
+ # default to short-lived session to prevent resets on slow uploads
+ config.setdefault('connection_close', True)
- if isinstance(solver, abc.Mapping):
- # TODO: instead of solver selection, try with user's default first
- if solver.setdefault('category', 'hybrid') != 'hybrid':
- raise ValueError("the only 'category' this sampler supports is 'hybrid'")
- if solver.setdefault('supported_problem_types__contains', 'dqm') != 'dqm':
- raise ValueError("the only problem type this sampler supports is 'dqm'")
-
- # prefer the latest version, but allow kwarg override
- solver.setdefault('order_by', '-properties.version')
-
- self.client = Client.from_config(
- solver=solver, connection_close=connection_close, **config)
+ # prefer the latest hybrid DQM solver available, but allow for an easy
+ # override on any config level above the defaults (file/env/kwarg)
+ defaults = config.setdefault('defaults', {})
+ if not isinstance(defaults, abc.Mapping):
+ raise TypeError("mapping expected for 'defaults'")
+ defaults.update(solver=self.default_solver)
+ self.client = Client.from_config(**config)
self.solver = self.client.get_solver()
- # For explicitly named solvers:
+ # check user-specified solver conforms to our requirements
if self.properties.get('category') != 'hybrid':
raise ValueError("selected solver is not a hybrid solver.")
if 'dqm' not in self.solver.supported_problem_types:
raise ValueError("selected solver does not support the 'dqm' problem type.")
- # overwrite the (static)
-
@property
def properties(self):
"""dict: Solver properties as returned by a SAPI query.
diff --git a/requirements.txt b/requirements.txt
index df3f999..b6e2618 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,11 +1,10 @@
--extra-index-url https://pypi.dwavesys.com/simple
-dimod==0.9.15
-dwave-cloud-client==0.8.4
-dwave-networkx==0.8.4
+dimod==0.9.16
+dwave-cloud-client==0.8.7
+dwave-networkx==0.8.8
dwave-drivers==0.4.4
-dwave-tabu==0.3.1
+dwave-tabu==0.4.1
homebase==1.0.1
-minorminer==0.2.4
-numpy==1.19.4; python_version >= '3.6'
-numpy==1.18.5; python_version == '3.5'
+minorminer==0.2.6
+numpy==1.19.4
|
dwavesystems/dwave-system
|
d4d83992067336f9e63ba48d67a7560077d4c108
|
diff --git a/tests/test_leaphybridsolver.py b/tests/test_leaphybridsolver.py
index e3eaeb5..fd2c23b 100644
--- a/tests/test_leaphybridsolver.py
+++ b/tests/test_leaphybridsolver.py
@@ -36,7 +36,7 @@ class MockClient:
def get_solver(self, **filters):
- if isinstance(self.args['solver'], str) and self.args['solver'] == 'not_hybrid_solver':
+ if self.args.get('solver') == 'not_hybrid_solver':
return MockBadLeapHybridSolver()
if self.args.get('client', 'base') not in ['base', 'hybrid']:
@@ -48,87 +48,66 @@ class TestLeapHybridSampler(unittest.TestCase):
@mock.patch('dwave.system.samplers.leap_hybrid_sampler.Client')
def test_solver_init(self, mock_client):
-
mock_client.from_config.side_effect = MockClient
+ default_solver = dict(
+ supported_problem_types__contains='bqm',
+ order_by='-properties.version')
+ self.assertEqual(LeapHybridSampler.default_solver, default_solver)
+
+ defaults = dict(solver=default_solver)
+
# Default call
mock_client.reset_mock()
LeapHybridSampler()
mock_client.from_config.assert_called_once_with(
- client='hybrid', connection_close=True,
- solver={'category': 'hybrid',
- 'supported_problem_types__contains': 'bqm',
- 'order_by': '-properties.version'})
+ client='hybrid',
+ connection_close=True,
+ defaults=defaults)
# Non-hybrid client setting
mock_client.reset_mock()
with self.assertRaises(SolverNotFoundError):
LeapHybridSampler(client='qpu')
- # Explicitly set category to hybrid
- mock_client.reset_mock()
- LeapHybridSampler(solver={'category': 'hybrid',
- 'supported_problem_types__contains': 'bqm'})
- mock_client.from_config.assert_called_once_with(
- client='hybrid', connection_close=True,
- solver={'category': 'hybrid',
- 'supported_problem_types__contains': 'bqm',
- 'order_by': '-properties.version'})
-
- # Explicitly set category to not hybrid
- with self.assertRaises(ValueError):
- LeapHybridSampler(solver={'category': 'not hybrid'})
-
- # Set irrelevant paremeters
- mock_client.reset_mock()
- LeapHybridSampler(solver={'qpu': True})
- mock_client.from_config.assert_called_once_with(
- client='hybrid', connection_close=True,
- solver={'qpu': True, 'category': 'hybrid',
- 'supported_problem_types__contains': 'bqm',
- 'order_by': '-properties.version'})
-
- mock_client.reset_mock()
- LeapHybridSampler(solver={'qpu': True, 'anneal_schedule': False})
- mock_client.from_config.assert_called_once_with(
- client='hybrid', connection_close=True,
- solver={'anneal_schedule': False, 'qpu': True, 'category': 'hybrid',
- 'supported_problem_types__contains': 'bqm',
- 'order_by': '-properties.version'})
-
- # Named solver: hybrid
+ # Explicitly set solver def
mock_client.reset_mock()
- LeapHybridSampler(solver="hybrid_solver")
+ LeapHybridSampler(solver={'supported_problem_types__contains': 'bqm'})
mock_client.from_config.assert_called_once_with(
- client='hybrid', connection_close=True, solver="hybrid_solver")
+ client='hybrid',
+ solver={'supported_problem_types__contains': 'bqm'},
+ connection_close=True,
+ defaults=defaults)
+ # Named solver
+ solver_name = 'hybrid-solver-name'
mock_client.reset_mock()
- LeapHybridSampler(connection_close=False, solver="hybrid_solver")
+ LeapHybridSampler(solver=solver_name)
mock_client.from_config.assert_called_once_with(
- client='hybrid', connection_close=False, solver="hybrid_solver")
+ client='hybrid',
+ solver=solver_name,
+ connection_close=True,
+ defaults=defaults)
# Named solver: non-hybrid
with self.assertRaises(ValueError):
- LeapHybridSampler(solver="not_hybrid_solver")
+ LeapHybridSampler(solver='not_hybrid_solver')
# Set connection_close to False
mock_client.reset_mock()
LeapHybridSampler(connection_close=False)
mock_client.from_config.assert_called_once_with(
- client='hybrid', connection_close=False,
- solver={'category': 'hybrid',
- 'supported_problem_types__contains': 'bqm',
- 'order_by': '-properties.version'})
+ client='hybrid',
+ connection_close=False,
+ defaults=defaults)
mock_client.reset_mock()
- LeapHybridSampler(connection_close=False,
- solver={'category': 'hybrid',
- 'supported_problem_types__contains': 'bqm'})
+ LeapHybridSampler(connection_close=False, solver=solver_name)
mock_client.from_config.assert_called_once_with(
- client='hybrid', connection_close=False,
- solver={'category': 'hybrid',
- 'supported_problem_types__contains': 'bqm',
- 'order_by': '-properties.version'})
+ client='hybrid',
+ solver=solver_name,
+ connection_close=False,
+ defaults=defaults)
@mock.patch('dwave.system.samplers.leap_hybrid_sampler.Client')
def test_sample_bqm(self, mock_client):
|
Hybrid samplers ignore user's config value for solver
`LeapHybridSampler` and `LeapHybridDQMSampler` [update required solver features](https://github.com/dwavesystems/dwave-system/blob/2f82124f518f7a57ae6b7f74c84a8aa62715ca4d/dwave/system/samplers/leap_hybrid_sampler.py#L82-L90) on kwargs level only, effectively ignoring user's config from file/env.
This behaviour predates #288 and fixes in #317, where we addressed such behaviour only in the QPU samplers.
This issue can be seen as a hybrid analogue of #335.
|
0.0
|
d4d83992067336f9e63ba48d67a7560077d4c108
|
[
"tests/test_leaphybridsolver.py::TestLeapHybridSampler::test_solver_init"
] |
[
"tests/test_leaphybridsolver.py::TestLeapHybridSampler::test_sample_bqm",
"tests/test_leaphybridsolver.py::TestLeapHybridSampler::test_sample_ising_variables",
"tests/test_leaphybridsolver.py::TestLeapHybridSampler::test_sample_qubo_variables"
] |
{
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-09-07 23:26:19+00:00
|
apache-2.0
| 2,053 |
|
e2nIEE__pandapipes-501
|
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 783719d..2ba5825 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,6 +1,10 @@
Change Log
=============
+[upcoming release] - 2023-..-..
+-------------------------------
+- [ADDED] support Python 3.11 (now included in test pipeline)
+
[0.8.3] - 2023-01-09
-------------------------------
- [FIXED] inconsistency between testpypi and pypi
diff --git a/pandapipes/pipeflow.py b/pandapipes/pipeflow.py
index a106c23..9f9544a 100644
--- a/pandapipes/pipeflow.py
+++ b/pandapipes/pipeflow.py
@@ -83,8 +83,8 @@ def pipeflow(net, sol_vec=None, **kwargs):
nodes_connected, branches_connected = check_connectivity(
net, branch_pit, node_pit, check_heat=calculate_heat)
else:
- nodes_connected = node_pit[:, ACTIVE_ND].astype(np.bool)
- branches_connected = branch_pit[:, ACTIVE_BR].astype(np.bool)
+ nodes_connected = node_pit[:, ACTIVE_ND].astype(bool)
+ branches_connected = branch_pit[:, ACTIVE_BR].astype(bool)
reduce_pit(net, node_pit, branch_pit, nodes_connected, branches_connected)
|
e2nIEE/pandapipes
|
5491aff63039b20d44c72c471392bbeca3b260d1
|
diff --git a/.github/workflows/run_tests_develop.yml b/.github/workflows/run_tests_develop.yml
index 30dfe43..30ed69b 100644
--- a/.github/workflows/run_tests_develop.yml
+++ b/.github/workflows/run_tests_develop.yml
@@ -18,17 +18,18 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: ['3.7', '3.8', '3.9', '3.10']
+ python-version: ['3.7', '3.8', '3.9', '3.10', '3.11']
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
- python -m pip install pytest python-igraph pytest-split numba
+ python -m pip install pytest python-igraph pytest-split
+ if ${{ matrix.python-version != '3.11' }}; then python -m pip install numba; fi
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
python -m pip install git+https://github.com/e2nIEE/pandapower@develop#egg=pandapower
pip install .
@@ -58,9 +59,9 @@ jobs:
python-version: ['3.8']
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
@@ -86,17 +87,18 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: ['3.7', '3.8', '3.9', '3.10']
+ python-version: ['3.7', '3.8', '3.9', '3.10', '3.11']
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
- python -m pip install pytest nbmake pytest-xdist pytest-split python-igraph numba
+ python -m pip install pytest nbmake pytest-xdist pytest-split python-igraph
+ if ${{ matrix.python-version != '3.11' }}; then python -m pip install numba; fi
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
python -m pip install git+https://github.com/e2nIEE/pandapower@develop#egg=pandapower
pip install .
@@ -113,9 +115,9 @@ jobs:
matrix:
python-version: [ '3.8' ]
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Check docs for Python ${{ matrix.python-version }}
diff --git a/.github/workflows/run_tests_master.yml b/.github/workflows/run_tests_master.yml
index d360b78..3597d16 100644
--- a/.github/workflows/run_tests_master.yml
+++ b/.github/workflows/run_tests_master.yml
@@ -17,18 +17,19 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: ['3.7', '3.8', '3.9', '3.10']
+ python-version: ['3.7', '3.8', '3.9', '3.10', '3.11']
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
- python -m pip install pytest python-igraph pytest-split numba
+ python -m pip install pytest python-igraph pytest-split
+ if ${{ matrix.python-version != '3.11' }}; then python -m pip install numba; fi
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
python -m pip install git+https://github.com/e2nIEE/pandapower@master#egg=pandapower
pip install .
@@ -56,15 +57,16 @@ jobs:
matrix:
python-version: ['3.7', '3.8', '3.9', '3.10']
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
- python -m pip install pytest nbmake pytest-xdist pytest-split python-igraph numba
+ python -m pip install pytest nbmake pytest-xdist pytest-split python-igraph
+ if ${{ matrix.python-version != '3.11' }}; then python -m pip install numba; fi
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
python -m pip install git+https://github.com/e2nIEE/pandapower@master#egg=pandapower
pip install .
@@ -81,9 +83,9 @@ jobs:
matrix:
python-version: [ '3.8' ]
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Check docs for Python ${{ matrix.python-version }}
|
Add support for Python 3.11
We should add Python 3.11 to the test pipeline and fix any issues that are related to deprecations.
|
0.0
|
5491aff63039b20d44c72c471392bbeca3b260d1
|
[
"pandapipes/test/pipeflow_internals/test_inservice.py::test_mixed_indexing_oos1[True]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_mixed_indexing_oos1[False]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_mixed_indexing_oos2[True]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_mixed_indexing_oos2[False]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_mixed_indexing_oos3[True]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_mixed_indexing_oos3[False]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_mixed_indexing_oos4[True]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_mixed_indexing_oos4[False]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_mixed_indexing_oos5[True]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_mixed_indexing_oos5[False]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_mixed_indexing_oos6[True]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_mixed_indexing_oos6[False]"
] |
[
"pandapipes/test/api/test_aux_function.py::test_select_from_pit",
"pandapipes/test/api/test_components/test_circ_pump_mass.py::test_circulation_pump_constant_mass[True]",
"pandapipes/test/api/test_components/test_circ_pump_mass.py::test_circulation_pump_constant_mass[False]",
"pandapipes/test/api/test_components/test_circ_pump_pressure.py::test_circulation_pump_constant_pressure[True]",
"pandapipes/test/api/test_components/test_circ_pump_pressure.py::test_circulation_pump_constant_pressure[False]",
"pandapipes/test/api/test_components/test_compressor.py::test_compressor_pressure_ratio[True]",
"pandapipes/test/api/test_components/test_compressor.py::test_compressor_pressure_ratio[False]",
"pandapipes/test/api/test_components/test_ext_grid.py::test_ext_grid_sorting[True]",
"pandapipes/test/api/test_components/test_ext_grid.py::test_ext_grid_sorting[False]",
"pandapipes/test/api/test_components/test_ext_grid.py::test_p_type[True]",
"pandapipes/test/api/test_components/test_ext_grid.py::test_p_type[False]",
"pandapipes/test/api/test_components/test_ext_grid.py::test_t_type_single_pipe[True]",
"pandapipes/test/api/test_components/test_ext_grid.py::test_t_type_single_pipe[False]",
"pandapipes/test/api/test_components/test_ext_grid.py::test_t_type_tee[True]",
"pandapipes/test/api/test_components/test_ext_grid.py::test_t_type_tee[False]",
"pandapipes/test/api/test_components/test_ext_grid.py::test_t_type_tee_2zu_2ab[True]",
"pandapipes/test/api/test_components/test_ext_grid.py::test_t_type_tee_2zu_2ab[False]",
"pandapipes/test/api/test_components/test_ext_grid.py::test_t_type_tee_2zu_2ab2[True]",
"pandapipes/test/api/test_components/test_ext_grid.py::test_t_type_tee_2zu_2ab2[False]",
"pandapipes/test/api/test_components/test_ext_grid.py::test_t_type_tee_2zu_2ab3[True]",
"pandapipes/test/api/test_components/test_ext_grid.py::test_t_type_tee_2zu_2ab3[False]",
"pandapipes/test/api/test_components/test_flow_control.py::test_flow_control_simple_heat[True]",
"pandapipes/test/api/test_components/test_flow_control.py::test_flow_control_simple_heat[False]",
"pandapipes/test/api/test_components/test_flow_control.py::test_flow_control_simple_gas[True]",
"pandapipes/test/api/test_components/test_flow_control.py::test_flow_control_simple_gas[False]",
"pandapipes/test/api/test_components/test_flow_control.py::test_flow_control_simple_gas_two_eg[True]",
"pandapipes/test/api/test_components/test_flow_control.py::test_flow_control_simple_gas_two_eg[False]",
"pandapipes/test/api/test_components/test_heat_exchanger.py::test_heat_exchanger[True]",
"pandapipes/test/api/test_components/test_heat_exchanger.py::test_heat_exchanger[False]",
"pandapipes/test/api/test_components/test_mass_storage.py::test_mass_storage[True]",
"pandapipes/test/api/test_components/test_mass_storage.py::test_mass_storage[False]",
"pandapipes/test/api/test_components/test_pipe_results.py::test_pipe_velocity_results[True]",
"pandapipes/test/api/test_components/test_pipe_results.py::test_pipe_velocity_results[False]",
"pandapipes/test/api/test_components/test_pressure_control.py::test_pressure_control_from_measurement_parameters[True]",
"pandapipes/test/api/test_components/test_pressure_control.py::test_pressure_control_from_measurement_parameters[False]",
"pandapipes/test/api/test_components/test_pressure_control.py::test_2pressure_controller_controllability",
"pandapipes/test/api/test_components/test_pump.py::test_pump_from_measurement_parameteres[True]",
"pandapipes/test/api/test_components/test_pump.py::test_pump_from_measurement_parameteres[False]",
"pandapipes/test/api/test_components/test_pump.py::test_pump_from_regression_parameteres[True]",
"pandapipes/test/api/test_components/test_pump.py::test_pump_from_regression_parameteres[False]",
"pandapipes/test/api/test_components/test_pump.py::test_pump_from_std_type[True]",
"pandapipes/test/api/test_components/test_pump.py::test_pump_from_std_type[False]",
"pandapipes/test/api/test_components/test_pump.py::test_pump_bypass_on_reverse_flow[True]",
"pandapipes/test/api/test_components/test_pump.py::test_pump_bypass_on_reverse_flow[False]",
"pandapipes/test/api/test_components/test_pump.py::test_pump_bypass_high_vdot[True]",
"pandapipes/test/api/test_components/test_pump.py::test_pump_bypass_high_vdot[False]",
"pandapipes/test/api/test_components/test_valve.py::test_valve[True]",
"pandapipes/test/api/test_components/test_valve.py::test_valve[False]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.1.0-True]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.1.0-False]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.1.1-True]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.1.1-False]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.1.2-True]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.1.2-False]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.2.0-True]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.2.0-False]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.4.0-True]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.4.0-False]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.5.0-True]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.5.0-False]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.6.0-True]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.6.0-False]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.7.0-True]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.7.0-False]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.8.0-True]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.8.0-False]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.8.1-True]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.8.1-False]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.8.2-True]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.8.2-False]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.8.3-True]",
"pandapipes/test/api/test_convert_format.py::test_convert_format[0.8.3-False]",
"pandapipes/test/api/test_create.py::test_create_network",
"pandapipes/test/api/test_create.py::test_create_junction",
"pandapipes/test/api/test_create.py::test_create_sink",
"pandapipes/test/api/test_create.py::test_create_source",
"pandapipes/test/api/test_create.py::test_create_ext_grid",
"pandapipes/test/api/test_create.py::test_create_heat_exchanger",
"pandapipes/test/api/test_create.py::test_create_pipe",
"pandapipes/test/api/test_create.py::test_create_pipe_from_parameters",
"pandapipes/test/api/test_create.py::test_create_valve",
"pandapipes/test/api/test_create.py::test_create_pump",
"pandapipes/test/api/test_create.py::test_create_pump_from_parameters",
"pandapipes/test/api/test_create.py::test_create_mass_storage",
"pandapipes/test/api/test_create.py::test_create_junctions",
"pandapipes/test/api/test_create.py::test_create_pipes_from_parameters",
"pandapipes/test/api/test_create.py::test_create_pipes_from_parameters_raise_except",
"pandapipes/test/api/test_create.py::test_create_pipes",
"pandapipes/test/api/test_create.py::test_create_pipes_raise_except",
"pandapipes/test/api/test_create.py::test_create_valves",
"pandapipes/test/api/test_create.py::test_create_valves_raise_except",
"pandapipes/test/api/test_create.py::test_create_pressure_controls",
"pandapipes/test/api/test_create.py::test_create_pressure_controls_raise_except",
"pandapipes/test/api/test_create.py::test_create_sinks",
"pandapipes/test/api/test_create.py::test_create_sinks_raise_except",
"pandapipes/test/api/test_create.py::test_create_sources",
"pandapipes/test/api/test_create.py::test_create_sources_raise_except",
"pandapipes/test/api/test_network_tables.py::test_default_input_tables",
"pandapipes/test/api/test_network_tables.py::test_additional_tables",
"pandapipes/test/api/test_special_networks.py::test_one_node_net[True]",
"pandapipes/test/api/test_special_networks.py::test_one_node_net[False]",
"pandapipes/test/api/test_special_networks.py::test_two_node_net[True]",
"pandapipes/test/api/test_special_networks.py::test_two_node_net[False]",
"pandapipes/test/api/test_special_networks.py::test_random_net_and_one_node_net[True]",
"pandapipes/test/api/test_special_networks.py::test_random_net_and_one_node_net[False]",
"pandapipes/test/api/test_std_types.py::test_create_and_load_std_type_pipe",
"pandapipes/test/api/test_std_types.py::test_create_std_types_pipe",
"pandapipes/test/api/test_std_types.py::test_copy_std_types_from_net_pipe",
"pandapipes/test/api/test_std_types.py::test_delete_std_type",
"pandapipes/test/api/test_std_types.py::test_available_std_types",
"pandapipes/test/api/test_time_series.py::test_person_run_fct_time_series",
"pandapipes/test/converter/test_stanet_converter.py::test_mini_exampelonia",
"pandapipes/test/converter/test_stanet_converter.py::test_mini_exampelonia_not_stanetlike",
"pandapipes/test/converter/test_stanet_converter.py::test_mini_exampelonia_stanetlike",
"pandapipes/test/converter/test_stanet_converter.py::test_mini_exampelonia_sliders_open",
"pandapipes/test/converter/test_stanet_converter.py::test_mini_exampelonia_sliders_closed",
"pandapipes/test/io/test_file_io.py::test_pickle",
"pandapipes/test/io/test_file_io.py::test_json",
"pandapipes/test/io/test_file_io.py::test_json_multinet",
"pandapipes/test/io/test_file_io.py::test_json_string",
"pandapipes/test/io/test_file_io.py::test_json_string_multinet",
"pandapipes/test/multinet/test_control_multinet.py::test_p2g_single",
"pandapipes/test/multinet/test_control_multinet.py::test_g2p_single",
"pandapipes/test/multinet/test_control_multinet.py::test_g2g_single",
"pandapipes/test/multinet/test_control_multinet.py::test_p2g_multiple",
"pandapipes/test/multinet/test_control_multinet.py::test_g2p_multiple",
"pandapipes/test/multinet/test_control_multinet.py::test_g2g_multiple",
"pandapipes/test/multinet/test_control_multinet.py::test_const_p2g_control",
"pandapipes/test/multinet/test_control_multinet.py::test_run_control_wo_controller",
"pandapipes/test/multinet/test_control_multinet.py::test_p2g_single_run_parameter",
"pandapipes/test/multinet/test_time_series_multinet.py::test_time_series_p2g_control",
"pandapipes/test/multinet/test_time_series_multinet.py::test_time_series_p2g_control_run_parameter",
"pandapipes/test/networks/test_networks.py::test_schutterwald",
"pandapipes/test/openmodelica_comparison/test_heat_transfer_openmodelica.py::test_case_delta[True]",
"pandapipes/test/openmodelica_comparison/test_heat_transfer_openmodelica.py::test_case_delta[False]",
"pandapipes/test/openmodelica_comparison/test_heat_transfer_openmodelica.py::test_case_delta_2sinks[True]",
"pandapipes/test/openmodelica_comparison/test_heat_transfer_openmodelica.py::test_case_delta_2sinks[False]",
"pandapipes/test/openmodelica_comparison/test_heat_transfer_openmodelica.py::test_case_heights[True]",
"pandapipes/test/openmodelica_comparison/test_heat_transfer_openmodelica.py::test_case_heights[False]",
"pandapipes/test/openmodelica_comparison/test_heat_transfer_openmodelica.py::test_case_one_pipe[True]",
"pandapipes/test/openmodelica_comparison/test_heat_transfer_openmodelica.py::test_case_one_pipe[False]",
"pandapipes/test/openmodelica_comparison/test_heat_transfer_openmodelica.py::test_case_one_source[True]",
"pandapipes/test/openmodelica_comparison/test_heat_transfer_openmodelica.py::test_case_one_source[False]",
"pandapipes/test/openmodelica_comparison/test_heat_transfer_openmodelica.py::test_case_section_variation[True]",
"pandapipes/test/openmodelica_comparison/test_heat_transfer_openmodelica.py::test_case_section_variation[False]",
"pandapipes/test/openmodelica_comparison/test_heat_transfer_openmodelica.py::test_case_t_cross[True]",
"pandapipes/test/openmodelica_comparison/test_heat_transfer_openmodelica.py::test_case_t_cross[False]",
"pandapipes/test/openmodelica_comparison/test_heat_transfer_openmodelica.py::test_case_two_pipes[True]",
"pandapipes/test/openmodelica_comparison/test_heat_transfer_openmodelica.py::test_case_two_pipes[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_combined_mixed_pc[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_combined_mixed_pc[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_combined_mixed_sj[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_combined_mixed_sj[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_combined_versatility_pc[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_combined_versatility_pc[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_combined_versatility_sj[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_combined_versatility_sj[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_meshed_delta_pc[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_meshed_delta_pc[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_meshed_delta_sj[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_meshed_delta_sj[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_meshed_2valves_pc[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_meshed_2valves_pc[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_meshed_2valves_sj[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_meshed_2valves_sj[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_meshed_pumps_pc[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_meshed_pumps_pc[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_meshed_pumps_sj[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_meshed_pumps_sj[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_meshed_heights_pc[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_meshed_heights_pc[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_meshed_heights_sj[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_meshed_heights_sj[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_one_pipe_1_pc[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_one_pipe_1_pc[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_one_pipe_1_sj[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_one_pipe_1_sj[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_one_pipe_2_pc[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_one_pipe_2_pc[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_one_pipe_2_sj[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_one_pipe_2_sj[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_one_pipe_3_pc[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_one_pipe_3_pc[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_one_pipe_3_sj[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_one_pipe_3_sj[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_strand_net_cross3ext_pc[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_strand_net_cross3ext_pc[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_strand_net_cross3ext_sj[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_strand_net_cross3ext_sj[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_strand_net_pc[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_strand_net_pc[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_strand_net_sj[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_strand_net_sj[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_strand_net_2pipes_pc[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_strand_net_2pipes_pc[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_strand_net_2pipes_sj[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_strand_net_2pipes_sj[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_strand_net_2pumps_pc[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_strand_net_2pumps_pc[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_strand_net_2pumps_sj[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_strand_net_2pumps_sj[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_tcross_pc[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_tcross_pc[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_tcross_sj[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_tcross_sj[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_tcross_valves_pc[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_tcross_valves_pc[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_tcross_valves_sj[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_tcross_valves_sj[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_2eg_two_pipes_pc[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_2eg_two_pipes_pc[False]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_2eg_two_pipes_sj[True]",
"pandapipes/test/openmodelica_comparison/test_water_openmodelica.py::test_case_2eg_two_pipes_sj[False]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_inservice_gas[True]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_inservice_gas[False]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_inservice_water[True]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_inservice_water[False]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_connectivity_hydraulic[True]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_connectivity_hydraulic[False]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_connectivity_hydraulic2[True]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_connectivity_hydraulic2[False]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_connectivity_heat1[True]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_connectivity_heat1[False]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_connectivity_heat2[True]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_connectivity_heat2[False]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_connectivity_heat3[True]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_connectivity_heat3[False]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_exclude_unconnected_junction[True]",
"pandapipes/test/pipeflow_internals/test_inservice.py::test_exclude_unconnected_junction[False]",
"pandapipes/test/pipeflow_internals/test_non_convergence.py::test_pipeflow_non_convergence[True]",
"pandapipes/test/pipeflow_internals/test_non_convergence.py::test_pipeflow_non_convergence[False]",
"pandapipes/test/pipeflow_internals/test_options.py::test_set_user_pf_options[True]",
"pandapipes/test/pipeflow_internals/test_options.py::test_set_user_pf_options[False]",
"pandapipes/test/pipeflow_internals/test_pipeflow_analytic_comparison.py::test_gas_internal_nodes[True]",
"pandapipes/test/pipeflow_internals/test_pipeflow_analytic_comparison.py::test_gas_internal_nodes[False]",
"pandapipes/test/pipeflow_internals/test_pipeflow_analytic_comparison.py::test_temperature_internal_nodes_single_pipe[True]",
"pandapipes/test/pipeflow_internals/test_pipeflow_analytic_comparison.py::test_temperature_internal_nodes_single_pipe[False]",
"pandapipes/test/pipeflow_internals/test_pipeflow_analytic_comparison.py::test_temperature_internal_nodes_tee_2ab_1zu[True]",
"pandapipes/test/pipeflow_internals/test_pipeflow_analytic_comparison.py::test_temperature_internal_nodes_tee_2ab_1zu[False]",
"pandapipes/test/pipeflow_internals/test_pipeflow_analytic_comparison.py::test_temperature_internal_nodes_tee_2zu_1ab[True]",
"pandapipes/test/pipeflow_internals/test_pipeflow_analytic_comparison.py::test_temperature_internal_nodes_tee_2zu_1ab[False]",
"pandapipes/test/pipeflow_internals/test_pipeflow_analytic_comparison.py::test_temperature_internal_nodes_tee_2zu_1ab_direction_changed[True]",
"pandapipes/test/pipeflow_internals/test_pipeflow_analytic_comparison.py::test_temperature_internal_nodes_tee_2zu_1ab_direction_changed[False]",
"pandapipes/test/pipeflow_internals/test_pipeflow_analytic_comparison.py::test_temperature_internal_nodes_2zu_2ab[True]",
"pandapipes/test/pipeflow_internals/test_pipeflow_analytic_comparison.py::test_temperature_internal_nodes_2zu_2ab[False]",
"pandapipes/test/pipeflow_internals/test_pipeflow_analytic_comparison.py::test_temperature_internal_nodes_masche_1load[True]",
"pandapipes/test/pipeflow_internals/test_pipeflow_analytic_comparison.py::test_temperature_internal_nodes_masche_1load[False]",
"pandapipes/test/pipeflow_internals/test_pipeflow_analytic_comparison.py::test_temperature_internal_nodes_masche_1load_changed_direction[True]",
"pandapipes/test/pipeflow_internals/test_pipeflow_analytic_comparison.py::test_temperature_internal_nodes_masche_1load_changed_direction[False]",
"pandapipes/test/pipeflow_internals/test_pipeflow_modes.py::test_hydraulic_only[True]",
"pandapipes/test/pipeflow_internals/test_pipeflow_modes.py::test_hydraulic_only[False]",
"pandapipes/test/pipeflow_internals/test_pipeflow_modes.py::test_heat_only[True]",
"pandapipes/test/pipeflow_internals/test_pipeflow_modes.py::test_heat_only[False]",
"pandapipes/test/pipeflow_internals/test_time_series.py::test_time_series",
"pandapipes/test/pipeflow_internals/test_time_series.py::test_time_series_default_ow",
"pandapipes/test/pipeflow_internals/test_update_matrix.py::test_update[True]",
"pandapipes/test/pipeflow_internals/test_update_matrix.py::test_update[False]",
"pandapipes/test/plotting/test_collections.py::test_collection_lengths",
"pandapipes/test/plotting/test_collections.py::test_collections2",
"pandapipes/test/plotting/test_collections.py::test_collection_valve_pipe",
"pandapipes/test/plotting/test_pipeflow_results.py::test_pressure_profile_to_junction_geodata",
"pandapipes/test/plotting/test_simple_collections.py::test_simple_collections",
"pandapipes/test/plotting/test_simple_collections.py::test_simple_collections_out_of_service",
"pandapipes/test/properties/test_fluid_specials.py::test_add_fluid",
"pandapipes/test/properties/test_fluid_specials.py::test_property_adaptation",
"pandapipes/test/properties/test_fluid_specials.py::test_fluid_exceptions",
"pandapipes/test/properties/test_properties_toolbox.py::test_mixture_viscosity_lgas",
"pandapipes/test/properties/test_properties_toolbox.py::test_mixture_viscosity_hgas",
"pandapipes/test/properties/test_properties_toolbox.py::test_mixture_density_lgas",
"pandapipes/test/properties/test_properties_toolbox.py::test_mixture_density_hgas",
"pandapipes/test/properties/test_properties_toolbox.py::test_mixture_heat_capacity_lgas",
"pandapipes/test/properties/test_properties_toolbox.py::test_mixture_heat_capacity_hgas",
"pandapipes/test/properties/test_properties_toolbox.py::test_mixture_molar_mass_lgas",
"pandapipes/test/properties/test_properties_toolbox.py::test_mixture_molar_mass_hgas",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_3parallel_n[True]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_3parallel_n[False]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_combined_3parallel_pc[True]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_combined_3parallel_pc[False]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_square_n[True]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_square_n[False]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_square_pc[True]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_square_pc[False]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_meshed_delta_pc[True]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_meshed_delta_pc[False]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_meshed_pumps[True]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_meshed_pumps[False]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_meshed_2valves_n[True]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_meshed_2valves_n[False]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_meshed_2valves_pc[True]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_meshed_2valves_pc[False]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_one_pipe1_n[True]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_one_pipe1_n[False]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_one_pipe1_pc[True]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_one_pipe1_pc[False]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_one_pipe2_n[True]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_one_pipe2_n[False]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_one_pipe2_pc[True]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_one_pipe2_pc[False]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_strand_2pipes_n[True]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_strand_2pipes_n[False]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_strand_2pipes_pc[True]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_strand_2pipes_pc[False]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_strand_pump[True]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_strand_pump[False]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_tcross1_n[True]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_tcross1_n[False]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_tcross1_pc[True]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_tcross1_pc[False]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_tcross2_n[True]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_tcross2_n[False]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_tcross2_pc[True]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_tcross2_pc[False]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_2eg_hnet_n[True]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_2eg_hnet_n[False]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_2eg_hnet_pc[True]",
"pandapipes/test/stanet_comparison/test_gas_stanet.py::test_case_2eg_hnet_pc[False]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_district_grid_n[True]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_district_grid_n[False]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_district_grid_pc[True]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_district_grid_pc[False]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_pumps_n[True]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_pumps_n[False]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_delta_n[True]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_delta_n[False]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_meshed_2valves_n[True]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_meshed_2valves_n[False]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_meshed_2valves_pc[True]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_meshed_2valves_pc[False]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_one_pipe1_n[True]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_one_pipe1_n[False]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_one_pipe1_pc[True]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_one_pipe1_pc[False]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_one_pipe2_n[True]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_one_pipe2_n[False]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_one_pipe2_pc[True]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_one_pipe2_pc[False]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_one_pipe3_n[True]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_one_pipe3_n[False]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_one_pipe3_pc[True]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_one_pipe3_pc[False]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_simple_strand_net_n[True]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_simple_strand_net_n[False]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_simple_strand_net_pc[True]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_simple_strand_net_pc[False]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_two_pipes_n[True]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_two_pipes_n[False]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_two_pipes_pc[True]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_two_pipes_pc[False]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_cross_pc[True]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_cross_pc[False]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_pump_n[True]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_pump_n[False]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_tcross_n[True]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_tcross_n[False]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_tcross_pc[True]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_tcross_pc[False]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_2eg_two_pipes_n[True]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_2eg_two_pipes_n[False]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_2eg_two_pipes_pc[True]",
"pandapipes/test/stanet_comparison/test_water_stanet.py::test_case_2eg_two_pipes_pc[False]",
"pandapipes/test/test_imports.py::test_import_packages",
"pandapipes/test/test_toolbox.py::test_reindex_junctions",
"pandapipes/test/test_toolbox.py::test_fuse_junctions",
"pandapipes/test/test_toolbox.py::test_create_continuous_index",
"pandapipes/test/test_toolbox.py::test_select_subnet",
"pandapipes/test/test_toolbox.py::test_pit_extraction",
"pandapipes/test/topology/test_graph_searches.py::test_connected_components",
"pandapipes/test/topology/test_nxgraph.py::test_include_branches"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-01-30 15:14:05+00:00
|
bsd-3-clause
| 2,054 |
|
eEcoLiDAR__laserchicken-131
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index d2e3ef4..e6ac92a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,12 +6,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
### Added
+- Normalization module
- General tests that all current and future feature extractors will be checked against.
+- Possibility to have a randomly subsampled (fixed) number of neighbors (eg for faster feature calculation)
## Changed
+- Many feature calculations are done in a vectorized way
## Fixed
-- Fixed many feature extractors for corner cases (e.g. zero points)
+- Corner cases for many feature extractors (e.g. zero points)
## Removed
diff --git a/laserchicken/feature_extractor/__init__.py b/laserchicken/feature_extractor/__init__.py
index f909613..f2106f9 100644
--- a/laserchicken/feature_extractor/__init__.py
+++ b/laserchicken/feature_extractor/__init__.py
@@ -95,26 +95,6 @@ def compute_features(env_point_cloud, neighborhoods, target_idx_base, target_poi
if provided_feature in features_to_do:
features_to_do.remove(provided_feature)
- # for feature in extended_features:
- # if (target_idx_base == 0) and (not overwrite) and (feature in target_point_cloud[keys.point]):
- # continue # Skip feature calc if it is already there and we do not overwrite
- #
- # if verbose:
- # sys.stdout.write('Feature "{}"\n'.format(feature))
- # sys.stdout.flush()
- # start = time.time()
- #
- # extractor = FEATURES[feature]()
- # _add_or_update_feature(env_point_cloud, neighborhoods, target_idx_base,
- # target_point_cloud, extractor, volume, overwrite, kwargs)
- # utils.add_metadata(target_point_cloud, type(
- # extractor).__module__, extractor.get_params())
- #
- # if verbose:
- # elapsed = time.time() - start
- # sys.stdout.write(' took {:.2f} seconds\n'.format(elapsed))
- # sys.stdout.flush()
-
_keep_only_wanted_features(target_point_cloud, wanted_feature_names)
diff --git a/laserchicken/keys.py b/laserchicken/keys.py
index 761dfa8..3c2f9f7 100644
--- a/laserchicken/keys.py
+++ b/laserchicken/keys.py
@@ -1,3 +1,11 @@
+# Name of point data section in point cloud structure
point = 'vertex'
+
+# Name of the normalized height point attribute
+normalized_height = 'normalized_height'
+
+#
point_cloud = 'pointcloud'
+
+#
provenance = 'log'
diff --git a/laserchicken/normalization.py b/laserchicken/normalization.py
new file mode 100644
index 0000000..f793faf
--- /dev/null
+++ b/laserchicken/normalization.py
@@ -0,0 +1,56 @@
+from laserchicken.compute_neighbors import compute_neighborhoods
+from laserchicken import keys
+from laserchicken.feature_extractor.range_z_feature_extractor import RangeZFeatureExtractor as range_extractor
+from laserchicken.keys import normalized_height
+import numpy as np
+
+from laserchicken.test_tools import create_point_cloud
+from laserchicken.volume_specification import Cell
+
+
+def normalize(point_cloud, cell_size=None):
+ z = point_cloud[keys.point]['z']['data']
+ point_cloud[keys.point][normalized_height] = {"type": 'float64', "data": np.array(z)}
+ if cell_size is None:
+ n_points = point_cloud[keys.point][normalized_height]['data'].size
+ _, min_z, _ = range_extractor().extract(point_cloud, range(n_points), None, None, None)
+ point_cloud[keys.point][normalized_height]['data'] = z - min_z
+ else:
+ targets = create_spanning_grid(point_cloud, cell_size)
+
+ neighborhood_sets = compute_neighborhoods(point_cloud, targets, Cell(cell_size), sample_size=None)
+
+ for neighborhood_set in neighborhood_sets:
+ for neighborhood in neighborhood_set:
+ _, min_z, _ = range_extractor().extract(point_cloud, neighborhood, None, None, None)
+ point_cloud[keys.point][normalized_height]['data'][neighborhood] = z[neighborhood] - min_z
+
+ return point_cloud
+
+
+def create_spanning_grid(point_cloud, cell_size):
+ x = point_cloud[keys.point]['x']['data']
+ y = point_cloud[keys.point]['y']['data']
+ min_x = np.min(x)
+ max_x = np.max(x)
+ min_y = np.min(y)
+ max_y = np.max(y)
+
+ cell_x_lengths, n_grid_points = _count_steps_and_points(cell_size, max_x, max_y, min_x, min_y)
+
+ xs = [min_x + cell_size * (0.5 + (i % cell_x_lengths)) for i in range(n_grid_points)]
+ ys = [min_y + cell_size * (0.5 + np.floor(i / cell_x_lengths)) for i in range(n_grid_points)]
+ zs = np.zeros_like(xs)
+ return create_point_cloud(xs, ys, zs)
+
+
+def _count_steps_and_points(cell_size, max_x, max_y, min_x, min_y):
+ cell_x_lengths = _count_steps(min_x, max_x, cell_size)
+ cell_y_lengths = _count_steps(min_y, max_y, cell_size)
+ n_grid_points = cell_x_lengths * cell_y_lengths
+ return cell_x_lengths, n_grid_points
+
+
+def _count_steps(min_x, max_x, cell_size):
+ """Count the number of steps in a grid in a single direction."""
+ return max(int(np.ceil((max_x - min_x) / float(cell_size))), 1)
diff --git a/laserchicken/utils.py b/laserchicken/utils.py
index aa923d7..c660eac 100644
--- a/laserchicken/utils.py
+++ b/laserchicken/utils.py
@@ -67,6 +67,23 @@ def get_features(point_cloud, attribute_names, index=None):
return (point_cloud[keys.point][f]["data"][index] for f in attribute_names)
+def create_point_cloud(x, y, z):
+ """
+ Create a point cloud object given only the x y z values.
+
+ :param x: x attribute values
+ :param y: y attribute values
+ :param z: z attribute values
+ :return: point cloud object
+ """
+ return {keys.point: {'x': {'type': 'float', 'data': np.array(x)},
+ 'y': {'type': 'float', 'data': np.array(y)},
+ 'z': {'type': 'float', 'data': np.array(z)}},
+ keys.point_cloud: {},
+ keys.provenance: []
+ }
+
+
def copy_point_cloud(source_point_cloud, array_mask=None):
"""
Makes a deep copy of a point cloud dict using the array mask when copying the points.
|
eEcoLiDAR/laserchicken
|
b08520951a160d0758e73526bdf0d39b4cefa4ad
|
diff --git a/laserchicken/test_normalize.py b/laserchicken/test_normalize.py
new file mode 100644
index 0000000..bfe02a1
--- /dev/null
+++ b/laserchicken/test_normalize.py
@@ -0,0 +1,41 @@
+import os
+import shutil
+import unittest
+import pytest
+
+import numpy as np
+import pandas as pd
+from numpy.testing import assert_almost_equal, assert_equal
+
+from laserchicken.keys import point, normalized_height
+from laserchicken.normalization import normalize
+from laserchicken.spatial_selections import points_in_polygon_wkt, points_in_polygon_wkt_file, \
+ points_in_polygon_shp_file
+from laserchicken.test_tools import create_point_cloud
+from laserchicken.utils import get_attribute_value
+
+
+class TestNormalize(unittest.TestCase):
+ def test_normalize_empty_point_cloud(self):
+ point_cloud = create_point_cloud([], [], [])
+ normalized_point_cloud = normalize(point_cloud)
+ self.assertTrue(normalized_height in normalized_point_cloud[point])
+
+ def test_normalize_tiny_equal_point_cloud(self):
+ point_cloud = create_point_cloud([0, 0, 0], [0, 0, 0], [0, 0, 0])
+ normalized_point_cloud = normalize(point_cloud)
+ normalized_values = get_attribute_value(normalized_point_cloud, range(3), normalized_height)
+ np.testing.assert_allclose(normalized_values, np.array([0, 0, 0]), atol=1e-7)
+
+ def test_normalize_tiny_unequal_point_cloud(self):
+ point_cloud = create_point_cloud([0, 0, 0], [0, 0, 0], [1, 2, 3])
+ normalized_point_cloud = normalize(point_cloud)
+ normalized_values = get_attribute_value(normalized_point_cloud, range(3), normalized_height)
+ np.testing.assert_allclose(normalized_values, np.array([0, 1, 2]), atol=1e-7)
+
+ def test_normalize_tiny_unequal_point_cloud_multiple_cells(self):
+ """Last of the 3 points is not in the neighborhood of the others."""
+ point_cloud = create_point_cloud([0, 0, 5], [0, 0, 0], [1, 2, 3])
+ normalized_point_cloud = normalize(point_cloud, cell_size=2)
+ normalized_values = get_attribute_value(normalized_point_cloud, range(3), normalized_height)
+ np.testing.assert_allclose(normalized_values, np.array([0, 1, 0]), atol=1e-7)
diff --git a/laserchicken/test_tools.py b/laserchicken/test_tools.py
index 9ed84b2..993099a 100644
--- a/laserchicken/test_tools.py
+++ b/laserchicken/test_tools.py
@@ -6,7 +6,7 @@ import numpy as np
from laserchicken import keys
-def generate_test_point_cloud():
+def generate_tiny_test_point_cloud():
"""Generate a simple but valid point cloud with 3 points."""
pc = {keys.point: {'x': {'type': 'double', 'data': np.array([1, 2, 3], dtype=np.float64)},
'y': {'type': 'double', 'data': np.array([2, 3, 4], dtype=np.float64)},
diff --git a/laserchicken/test_utils.py b/laserchicken/test_utils.py
index b028d1e..b691ef8 100644
--- a/laserchicken/test_utils.py
+++ b/laserchicken/test_utils.py
@@ -8,7 +8,7 @@ from time import time
class TestUtils(unittest.TestCase):
def test_GetPointCloudPoint(self):
""" Should not raise exception. """
- pc = test_tools.generate_test_point_cloud()
+ pc = test_tools.generate_tiny_test_point_cloud()
x, y, z = utils.get_point(pc, 1)
self.assertEqual(2, x)
self.assertEqual(3, y)
@@ -16,7 +16,7 @@ class TestUtils(unittest.TestCase):
def test_GetPointCloudPointFeature(self):
""" Should not raise exception. """
- pc = test_tools.generate_test_point_cloud()
+ pc = test_tools.generate_tiny_test_point_cloud()
cols = 0.5 * (pc[keys.point]["x"]["data"] + pc[keys.point]["y"]["data"])
pc[keys.point]["color"] = {"type": "double", "data": cols}
x, y, z = utils.get_point(pc, 1)
@@ -25,7 +25,7 @@ class TestUtils(unittest.TestCase):
def test_GetPointCloudPointFeatures(self):
""" Should not raise exception. """
- pc = test_tools.generate_test_point_cloud()
+ pc = test_tools.generate_tiny_test_point_cloud()
cols = 0.5 * (pc[keys.point]["x"]["data"] + pc[keys.point]["y"]["data"])
flavs = 0.5 * (pc[keys.point]["x"]["data"] - pc[keys.point]["y"]["data"])
pc[keys.point]["color"] = {"type": "double", "data": cols}
@@ -37,7 +37,7 @@ class TestUtils(unittest.TestCase):
def test_CopyEmptyPointCloud(self):
""" Should not raise exception. """
- pc = test_tools.generate_test_point_cloud()
+ pc = test_tools.generate_tiny_test_point_cloud()
pc[keys.point]["x"]["data"] = np.array([])
pc[keys.point]["y"]["data"] = np.array([])
pc[keys.point]["z"]["data"] = np.array([])
@@ -47,7 +47,7 @@ class TestUtils(unittest.TestCase):
def test_CopyNonEmptyPointCloud(self):
""" Test whether coordinates are copied """
- pc = test_tools.generate_test_point_cloud()
+ pc = test_tools.generate_tiny_test_point_cloud()
x = pc[keys.point]["x"]["data"]
y = pc[keys.point]["y"]["data"]
z = pc[keys.point]["z"]["data"]
@@ -59,7 +59,7 @@ class TestUtils(unittest.TestCase):
def test_CopyPointCloudMetaData(self):
""" Test whether metadata are copied """
- pc = test_tools.generate_test_point_cloud()
+ pc = test_tools.generate_tiny_test_point_cloud()
pc["log"] = [
{"time": datetime.datetime(2018, 1, 23, 12, 15, 59), "module": "filter", "parameters": [("z", "gt", 0.5)]}]
@@ -70,7 +70,7 @@ class TestUtils(unittest.TestCase):
def test_CopyNonEmptyPointCloudBoolMask(self):
""" Test whether coordinates are copied with boolean mask """
- pc = test_tools.generate_test_point_cloud()
+ pc = test_tools.generate_tiny_test_point_cloud()
x = pc[keys.point]["x"]["data"][2]
y = pc[keys.point]["y"]["data"][2]
z = pc[keys.point]["z"]["data"][2]
@@ -82,7 +82,7 @@ class TestUtils(unittest.TestCase):
def test_CopyNonEmptyPointCloudIntMask(self):
""" Test whether coordinates are copied with array indexing """
- pc = test_tools.generate_test_point_cloud()
+ pc = test_tools.generate_tiny_test_point_cloud()
x0, x1 = pc[keys.point]["x"]["data"][0], pc[keys.point]["x"]["data"][1]
y0, y1 = pc[keys.point]["y"]["data"][0], pc[keys.point]["y"]["data"][1]
z0, z1 = pc[keys.point]["z"]["data"][0], pc[keys.point]["z"]["data"][1]
@@ -94,7 +94,7 @@ class TestUtils(unittest.TestCase):
def test_AddMetaDataToPointCloud(self):
""" Test adding info to the point cloud for test module """
- pc = test_tools.generate_test_point_cloud()
+ pc = test_tools.generate_tiny_test_point_cloud()
from laserchicken import select as somemodule
utils.add_metadata(pc,somemodule,params = (0.5,"cylinder",4))
self.assertEqual(len(pc[keys.provenance]),1)
|
calculate normalized height
This module should have the digital surface model DTM and a point cloud as input and add the normalized height attribute to the point cloud by subtracting the local height of the DTM from each point.
|
0.0
|
b08520951a160d0758e73526bdf0d39b4cefa4ad
|
[
"laserchicken/test_normalize.py::TestNormalize::test_normalize_tiny_unequal_point_cloud",
"laserchicken/test_normalize.py::TestNormalize::test_normalize_tiny_equal_point_cloud",
"laserchicken/test_utils.py::TestUtils::test_CopyPointCloudMetaData",
"laserchicken/test_utils.py::TestUtils::test_CopyEmptyPointCloud",
"laserchicken/test_utils.py::TestUtils::test_CopyNonEmptyPointCloud",
"laserchicken/test_utils.py::TestUtils::test_GetPointCloudPointFeatures",
"laserchicken/test_utils.py::TestUtils::test_GetPointCloudPointFeature",
"laserchicken/test_utils.py::TestUtils::test_AddMetaDataToPointCloud",
"laserchicken/test_utils.py::TestUtils::test_GetPointCloudPoint",
"laserchicken/test_utils.py::TestUtils::test_CopyNonEmptyPointCloudIntMask",
"laserchicken/test_utils.py::TestUtils::test_CopyNonEmptyPointCloudBoolMask",
"laserchicken/test_utils.py::TestPlaneFit::test_leastsqr",
"laserchicken/test_utils.py::TestPlaneFit::test_FitPlaneSVD"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-01-30 09:44:29+00:00
|
apache-2.0
| 2,055 |
|
eEcoLiDAR__laserchicken-135
|
diff --git a/laserchicken/feature_extractor/pulse_penetration_feature_extractor.py b/laserchicken/feature_extractor/pulse_penetration_feature_extractor.py
index f32c94a..65b7abb 100644
--- a/laserchicken/feature_extractor/pulse_penetration_feature_extractor.py
+++ b/laserchicken/feature_extractor/pulse_penetration_feature_extractor.py
@@ -13,6 +13,10 @@ from laserchicken.keys import point
GROUND_TAGS = [2]
+def _is_ground(i, point_cloud):
+ return point_cloud[point]['raw_classification']["data"][i] in GROUND_TAGS
+
+
class PulsePenetrationFeatureExtractor(AbstractFeatureExtractor):
"""Feature extractor for the point density."""
@@ -57,14 +61,13 @@ class PulsePenetrationFeatureExtractor(AbstractFeatureExtractor):
'Missing raw_classification attribute which is necessary for calculating pulse_penetratio and '
'density_absolute_mean features.')
- class_neighbors = [point_cloud[point]['raw_classification']["data"][n] for n in neighborhood]
-
- ground_indices = self._get_ground_indices(class_neighbors, GROUND_TAGS)
-
+ ground_indices = [i for i in neighborhood if _is_ground(i, point_cloud)]
pulse_penetration_ratio = self._get_pulse_penetration_ratio(
- ground_indices, class_neighbors)
+ ground_indices, len(neighborhood))
+
+ non_ground_indices = [i for i in neighborhood if not _is_ground(i, point_cloud)]
density_absolute_mean = self._get_density_absolute_mean(
- ground_indices, point_cloud)
+ non_ground_indices, point_cloud)
return pulse_penetration_ratio, density_absolute_mean
@@ -77,20 +80,20 @@ class PulsePenetrationFeatureExtractor(AbstractFeatureExtractor):
return index_grd
@staticmethod
- def _get_pulse_penetration_ratio(ground_indices, class_neighbors):
- n_total = np.max((len(class_neighbors), 1))
+ def _get_pulse_penetration_ratio(ground_indices, n_total_points):
+ n_total = max(n_total_points, 1)
n_ground = len(ground_indices)
return float(n_ground) / n_total
@staticmethod
- def _get_density_absolute_mean(ground_indices, source_point_cloud):
- n_ground = len(ground_indices)
- z_ground = source_point_cloud[point]['z']["data"][ground_indices]
- if n_ground == 0:
+ def _get_density_absolute_mean(non_ground_indices, source_point_cloud):
+ n_non_ground = len(non_ground_indices)
+ z_non_ground = source_point_cloud[point]['z']["data"][non_ground_indices]
+ if n_non_ground == 0:
density_absolute_mean = 0.
else:
density_absolute_mean = float(
- len(z_ground[z_ground > np.mean(z_ground)])) / n_ground * 100.
+ len(z_non_ground[z_non_ground > np.mean(z_non_ground)])) / n_non_ground * 100.
return density_absolute_mean
def get_params(self):
|
eEcoLiDAR/laserchicken
|
6bcc443bbe4ee93b0a51ff5eeb39d0ade4a64ae9
|
diff --git a/laserchicken/feature_extractor/test_pulse_penetration_extractor.py b/laserchicken/feature_extractor/test_pulse_penetration_extractor.py
index bf88823..ff37642 100644
--- a/laserchicken/feature_extractor/test_pulse_penetration_extractor.py
+++ b/laserchicken/feature_extractor/test_pulse_penetration_extractor.py
@@ -7,6 +7,8 @@ import numpy as np
from laserchicken import keys, read_las, utils
from laserchicken.compute_neighbors import compute_neighborhoods
from laserchicken.feature_extractor.pulse_penetration_feature_extractor import PulsePenetrationFeatureExtractor
+from laserchicken.keys import point
+from laserchicken.test_tools import create_point_cloud
from laserchicken.volume_specification import InfiniteCylinder
@@ -16,10 +18,9 @@ class TestPulsePenetrationFeatureExtractorArtificialData(unittest.TestCase):
def test_pulse(self):
"""Pulse extractor on artificial data should yield expected feature values."""
extractor = PulsePenetrationFeatureExtractor()
- pp_ratio, density_absolute_mean = extractor.extract(
+ pp_ratio, _ = extractor.extract(
self.point_cloud, self.neighborhood, None, None, None)
self.assertEqual(pp_ratio, self.expected_pp_ratio)
- self.assertEqual(density_absolute_mean, 50.)
def _set_plane_data(self):
"""Create two planes of ground point at z = +- 0.1."""
@@ -70,6 +71,23 @@ class TestPulsePenetrationFeatureExtractorArtificialData(unittest.TestCase):
self.expected_pp_ratio = float(self.points_per_plane) / n_points
+class TestDensityAbsoluteMeanFeatureExtractorArtificialData(unittest.TestCase):
+ def test_simle_case_correct(self):
+ """Check that one out of 4 points above mean of only vegetation points yields a value of 25"""
+ ground = 2 # Ground tag
+ veg = 4 # Medium vegetation tag
+ x = y = z = np.array([10, 10, 10, 1, 1, 1, 2])
+ point_cloud = create_point_cloud(x, y, z)
+ point_cloud[point]['raw_classification'] = {'data': np.array([ground, ground, ground, veg, veg, veg, veg]),
+ 'type': 'double'}
+ neighborhood = list(range(len(x)))
+
+ extractor = PulsePenetrationFeatureExtractor()
+ _, density_absolute_mean = extractor.extract(point_cloud, neighborhood, None, None, None)
+
+ self.assertAlmostEqual(density_absolute_mean, 25)
+
+
class TestPulsePenetratioFeatureExtractorRealData(unittest.TestCase):
"""Test the pulse extractor on real data and make sure it doesn't crash."""
_test_file_name = 'AHN3.las'
|
density absolute mean bug
uses ground points only where it should use everything but ground points
|
0.0
|
6bcc443bbe4ee93b0a51ff5eeb39d0ade4a64ae9
|
[
"laserchicken/feature_extractor/test_pulse_penetration_extractor.py::TestDensityAbsoluteMeanFeatureExtractorArtificialData::test_simle_case_correct"
] |
[
"laserchicken/feature_extractor/test_pulse_penetration_extractor.py::TestPulsePenetrationFeatureExtractorArtificialData::test_pulse"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-01-30 13:53:19+00:00
|
apache-2.0
| 2,056 |
|
eEcoLiDAR__laserchicken-181
|
diff --git a/.github/workflows/pypi.yml b/.github/workflows/pypi.yml
new file mode 100644
index 0000000..28cdccc
--- /dev/null
+++ b/.github/workflows/pypi.yml
@@ -0,0 +1,25 @@
+name: Publish
+
+on:
+ release:
+ types: [published]
+
+jobs:
+ publish:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python
+ uses: actions/setup-python@v1
+ with:
+ python-version: 3.7
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install setuptools wheel twine
+ python setup.py bdist_wheel
+ - name: Publish package
+ uses: pypa/gh-action-pypi-publish@master
+ with:
+ user: __token__
+ password: ${{ secrets.PYPI_TOKEN }}
diff --git a/laserchicken/feature_extractor/band_ratio_feature_extractor.py b/laserchicken/feature_extractor/band_ratio_feature_extractor.py
index 318dd7c..3bf65eb 100644
--- a/laserchicken/feature_extractor/band_ratio_feature_extractor.py
+++ b/laserchicken/feature_extractor/band_ratio_feature_extractor.py
@@ -47,10 +47,10 @@ class BandRatioFeatureExtractor(FeatureExtractor):
"""
name = 'band_ratio_'
if self.lower_limit is not None:
- name += str(self.lower_limit) + '<'
+ name += str(self.lower_limit) + '_'
name += self.data_key
if self.upper_limit is not None:
- name += '<' + str(self.upper_limit)
+ name += '_' + str(self.upper_limit)
return [name]
def extract(self, point_cloud, neighborhoods, target_point_cloud, target_index, volume_description):
|
eEcoLiDAR/laserchicken
|
a393326c8c78b0ada048a09328c0587d3ff44505
|
diff --git a/laserchicken/feature_extractor/test_band_ratio_feature_extractor.py b/laserchicken/feature_extractor/test_band_ratio_feature_extractor.py
index 43e3c83..c08403a 100644
--- a/laserchicken/feature_extractor/test_band_ratio_feature_extractor.py
+++ b/laserchicken/feature_extractor/test_band_ratio_feature_extractor.py
@@ -86,22 +86,22 @@ class TestBandRatioFeatureExtractorSimpleArtificialData(unittest.TestCase):
assert_expected_ratio(volume=InfiniteCylinder(5))
def test_provides_simple(self):
- self.assertEqual(['band_ratio_6<z<20'], BandRatioFeatureExtractor(6, 20).provides())
+ self.assertEqual(['band_ratio_6_z_20'], BandRatioFeatureExtractor(6, 20).provides())
def test_provides_with_only_upper_limit(self):
- self.assertEqual(['band_ratio_z<20'], BandRatioFeatureExtractor(None, 20).provides())
+ self.assertEqual(['band_ratio_z_20'], BandRatioFeatureExtractor(None, 20).provides())
def test_provides_with_only_lower_limit(self):
- self.assertEqual(['band_ratio_20<z'], BandRatioFeatureExtractor(20, None).provides())
+ self.assertEqual(['band_ratio_20_z'], BandRatioFeatureExtractor(20, None).provides())
def test_provides_with_zero_lower_limit(self):
- self.assertEqual(['band_ratio_0<z'], BandRatioFeatureExtractor(0, None).provides())
+ self.assertEqual(['band_ratio_0_z'], BandRatioFeatureExtractor(0, None).provides())
def test_provides_with_zero_upper_limit(self):
- self.assertEqual(['band_ratio_z<0'], BandRatioFeatureExtractor(None, 0).provides())
+ self.assertEqual(['band_ratio_z_0'], BandRatioFeatureExtractor(None, 0).provides())
def test_provides_with_data_key(self):
- self.assertEqual(['band_ratio_1<normalized_height<3'],
+ self.assertEqual(['band_ratio_1_normalized_height_3'],
BandRatioFeatureExtractor(1, 3, data_key=keys.normalized_height).provides())
diff --git a/laserchicken/test_integration_tests.py b/laserchicken/test_integration_tests.py
index 48d28c8..0749999 100644
--- a/laserchicken/test_integration_tests.py
+++ b/laserchicken/test_integration_tests.py
@@ -48,7 +48,7 @@ class FromTutorial(unittest.TestCase):
cylinder = build_volume("infinite cylinder", radius=5)
neighborhoods = compute_neighborhoods(point_cloud, targets, cylinder)
- compute_features(point_cloud, neighborhoods, targets, ['band_ratio_1<normalized_height<2'], cylinder)
+ compute_features(point_cloud, neighborhoods, targets, ['band_ratio_1_normalized_height_2'], cylinder)
from laserchicken import export
export(point_cloud, 'my_output.ply')
|
Rename band ratio
Drop the "<" symbols from the band ratio features. This significantly simplify the handling of files named after these features on Windows (which does not support such characters in the file name).
|
0.0
|
a393326c8c78b0ada048a09328c0587d3ff44505
|
[
"laserchicken/feature_extractor/test_band_ratio_feature_extractor.py::TestBandRatioFeatureExtractorSimpleArtificialData::test_provides_simple",
"laserchicken/feature_extractor/test_band_ratio_feature_extractor.py::TestBandRatioFeatureExtractorSimpleArtificialData::test_provides_with_data_key",
"laserchicken/feature_extractor/test_band_ratio_feature_extractor.py::TestBandRatioFeatureExtractorSimpleArtificialData::test_provides_with_only_lower_limit",
"laserchicken/feature_extractor/test_band_ratio_feature_extractor.py::TestBandRatioFeatureExtractorSimpleArtificialData::test_provides_with_only_upper_limit",
"laserchicken/feature_extractor/test_band_ratio_feature_extractor.py::TestBandRatioFeatureExtractorSimpleArtificialData::test_provides_with_zero_lower_limit",
"laserchicken/feature_extractor/test_band_ratio_feature_extractor.py::TestBandRatioFeatureExtractorSimpleArtificialData::test_provides_with_zero_upper_limit"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-09-05 11:31:38+00:00
|
apache-2.0
| 2,057 |
|
eWaterCycle__era5cli-102
|
diff --git a/era5cli/cli.py b/era5cli/cli.py
index fadea62..f7cb0d7 100644
--- a/era5cli/cli.py
+++ b/era5cli/cli.py
@@ -425,10 +425,12 @@ def _execute(args):
return True
-def main():
+def main(argv=None):
"""Main."""
# get arguments
- args = _parse_args(sys.argv[1:])
+ if argv is None:
+ argv = sys.argv
+ args = _parse_args(argv[1:])
_execute(args)
|
eWaterCycle/era5cli
|
ad5508a481add3116da9b7f95aad6eafe7cc0b4b
|
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 6390f32..1184ed1 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -1,4 +1,4 @@
-"""Tests for era5cli utility functios."""
+"""Tests for era5cli utility functions."""
import unittest.mock as mock
import pytest
diff --git a/tests/test_integration.py b/tests/test_integration.py
new file mode 100644
index 0000000..1055dbc
--- /dev/null
+++ b/tests/test_integration.py
@@ -0,0 +1,128 @@
+"""Tests to check the full era5cli workflow."""
+
+import logging
+import pytest
+
+from textwrap import dedent
+
+from era5cli.cli import main
+
+
+# combine calls with result and possible warning message
+call_result = [
+ {
+ # orography is translated to geopotential in the query
+ "call": dedent("""\
+ era5cli hourly --variables orography --startyear 2008 --dryrun
+ """),
+ "result": dedent("""\
+ reanalysis-era5-single-levels {'variable': 'geopotential', 'year':
+ 2008, 'month': ['01', '02', '03', '04', '05', '06', '07', '08',
+ '09', '10', '11', '12'], 'time': ['00:00', '01:00', '02:00',
+ '03:00', '04:00', '05:00', '06:00', '07:00', '08:00', '09:00',
+ '10:00', '11:00', '12:00', '13:00', '14:00', '15:00', '16:00',
+ '17:00', '18:00', '19:00', '20:00', '21:00', '22:00', '23:00'],
+ 'format': 'netcdf', 'product_type': 'reanalysis', 'day': ['01',
+ '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12',
+ '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23',
+ '24', '25', '26', '27', '28', '29', '30', '31']}
+ era5_orography_2008_hourly.nc"""),
+ "warn": "The variable 'orography' has been deprecated by CDS."
+ },
+ {
+ # geopotential needs '--levels surface' to be correctly interpreted
+ "call": dedent("""\
+ era5cli hourly --variables geopotential --startyear 2008 --dryrun
+ --levels surface"""),
+ "result": dedent("""\
+ reanalysis-era5-single-levels {'variable': 'geopotential', 'year':
+ 2008, 'month': ['01', '02', '03', '04', '05', '06', '07', '08',
+ '09', '10', '11', '12'], 'time': ['00:00', '01:00', '02:00',
+ '03:00', '04:00', '05:00', '06:00', '07:00', '08:00', '09:00',
+ '10:00', '11:00', '12:00', '13:00', '14:00', '15:00', '16:00',
+ '17:00', '18:00', '19:00', '20:00', '21:00', '22:00', '23:00'],
+ 'format': 'netcdf', 'product_type': 'reanalysis', 'day': ['01',
+ '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12',
+ '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23',
+ '24', '25', '26', '27', '28', '29', '30', '31']}
+ era5_geopotential_2008_hourly.nc"""),
+ "warn": "Getting variable from surface level data."
+ },
+ {
+ # without --levels surface, geopotential calls pressure level data
+ "call": dedent("""\
+ era5cli hourly --variables geopotential --startyear 2008
+ --dryrun"""),
+ "result": dedent("""\
+ reanalysis-era5-pressure-levels {'variable': 'geopotential',
+ 'year': 2008, 'month': ['01', '02', '03', '04', '05', '06', '07',
+ '08', '09', '10', '11', '12'], 'time': ['00:00', '01:00', '02:00',
+ '03:00', '04:00', '05:00', '06:00', '07:00', '08:00', '09:00',
+ '10:00', '11:00', '12:00', '13:00', '14:00', '15:00', '16:00',
+ '17:00', '18:00', '19:00', '20:00', '21:00', '22:00', '23:00'],
+ 'format': 'netcdf', 'pressure_level': [1, 2, 3, 5, 7, 10, 20, 30,
+ 50, 70, 100, 125, 150, 175, 200, 225, 250, 300, 350, 400, 450, 500,
+ 550, 600, 650, 700, 750, 775, 800, 825, 850, 875, 900, 925, 950,
+ 975, 1000], 'product_type': 'reanalysis', 'day': ['01', '02', '03',
+ '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14',
+ '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25',
+ '26', '27', '28', '29', '30', '31']}
+ era5_geopotential_2008_hourly.nc"""),
+ "warn": "Getting variable from pressure level data."
+ },
+ {
+ # preliminary-back-extension is combined with monthly-means
+ "call": dedent("""\
+ era5cli monthly --variables temperature --startyear 1960 --prelimbe
+ --dryrun"""),
+ "result": dedent("""\
+ reanalysis-era5-pressure-levels-monthly-means-preliminary-back-extension
+ {'variable': 'temperature', 'year': 1960, 'month': ['01', '02',
+ '03', '04', '05', '06', '07', '08', '09', '10', '11', '12'],
+ 'time': ['00:00'], 'format': 'netcdf', 'pressure_level': [1, 2, 3,
+ 5, 7, 10, 20, 30, 50, 70, 100, 125, 150, 175, 200, 225, 250, 300,
+ 350, 400, 450, 500, 550, 600, 650, 700, 750, 775, 800, 825, 850,
+ 875, 900, 925, 950, 975, 1000], 'product_type':
+ 'reanalysis-monthly-means-of-daily-means'}
+ era5_temperature_1960_monthly.nc""")
+ },
+ {
+ # era5-Land is combined with monthly means
+ "call": dedent("""\
+ era5cli monthly --variables snow_cover --startyear 2008 --land
+ --dryrun"""),
+ "result": dedent("""\
+ reanalysis-era5-land-monthly-means {'variable': 'snow_cover',
+ 'year': 2008, 'month': ['01', '02', '03', '04', '05', '06', '07',
+ '08', '09', '10', '11', '12'], 'time': ['00:00'], 'format':
+ 'netcdf', 'product_type': 'monthly_averaged_reanalysis'}
+ era5-land_snow_cover_2008_monthly.nc""")
+ }
+]
+
+
+def clean_ids(call):
+ call = call.replace('\n', ' ')
+ call = call.replace('--dryrun', '')
+ return(call)
+
+
+ids = [clean_ids(item["call"]) for item in call_result]
+
+
[email protected]("call_result", call_result, ids=ids)
+def test_main(call_result, capsys, caplog):
+ call = call_result["call"].split()
+ result = call_result["result"].replace('\n', ' ') + '\n'
+ # until the actual fetch is monkeypatched, make sure the tests are dryruns
+ if '--dryrun' not in call:
+ pytest.fail('call must be a dryrun')
+ with caplog.at_level(logging.INFO):
+ main(call)
+ captured = capsys.readouterr().out
+ assert result == captured
+ try:
+ warn = call_result["warn"]
+ assert warn in caplog.text
+ except KeyError:
+ assert caplog.text == ''
|
Add integration tests
At the moment, we test era5cli in two ways:
- Test if arguments given on the command line are parsed correctly and entered correctly to the Fetch class (`test_cli.py`)
- Test if information given to the Fetch class result in a correct CDS query (`test_fetch.py`).
It would be good to also add integration tests that combine these two, where arguments are given and the final query is asserted. This would make it easier to write tests for the tool to assert correct behavior of the tool as a whole.
|
0.0
|
ad5508a481add3116da9b7f95aad6eafe7cc0b4b
|
[
"tests/test_integration.py::test_main[era5cli"
] |
[
"tests/test_cli.py::test_parse_args",
"tests/test_cli.py::test_area_argument",
"tests/test_cli.py::test_period_args",
"tests/test_cli.py::test_level_arguments",
"tests/test_cli.py::test_main_fetch",
"tests/test_cli.py::test_main_info"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2021-11-22 20:35:48+00:00
|
apache-2.0
| 2,058 |
|
eWaterCycle__era5cli-131
|
diff --git a/.zenodo.json b/.zenodo.json
index 8c63ee1..233162f 100644
--- a/.zenodo.json
+++ b/.zenodo.json
@@ -72,6 +72,11 @@
"affiliation": "Netherlands eScience Center",
"name": "Verhoeven, Stefan",
"orcid": "0000-0002-5821-2060"
+ },
+ {
+ "affiliation": "Environment and Climate Change Canada",
+ "name": "Malinina, Elizaveta",
+ "orcid": "0000-0002-4102-2877"
}
],
"description": "A command line interface to download ERA5 data from the Climate Data Store.\n",
diff --git a/CITATION.cff b/CITATION.cff
index dc47990..1df530f 100644
--- a/CITATION.cff
+++ b/CITATION.cff
@@ -77,6 +77,11 @@ authors:
family-names: Verhoeven
given-names: Stefan
orcid: https://orcid.org/0000-0002-5821-2060
+ -
+ affiliation: "Environment and Climate Change Canada"
+ family-names: Malinina
+ given-names: Elizaveta
+ orcid: https://orcid.org/0000-0002-4102-2877
cff-version: 1.2.0
date-released: 2021-11-30
diff --git a/era5cli/cli.py b/era5cli/cli.py
index f7cb0d7..13f6c23 100644
--- a/era5cli/cli.py
+++ b/era5cli/cli.py
@@ -148,7 +148,8 @@ def _build_parser():
Whether to download the preliminary back extension
(1950-1978). Note that when `--prelimbe` is used,
`--startyear` and `--endyear` should be set
- between 1950 and 1978.
+ between 1950 and 1978. Please, be aware that
+ ERA5 data is available from 1959.
`--prelimbe` is incompatible with `--land`.
''')
@@ -159,7 +160,7 @@ def _build_parser():
help=textwrap.dedent('''\
Whether to download data from the ERA5-Land
dataset. Note that the ERA5-Land dataset starts in
- 1981.
+ 1950.
`--land` is incompatible with the use of
`--prelimbe` and `--ensemble`.
@@ -343,12 +344,12 @@ def _construct_year_list(args):
'year should be between 1950 and 1978'
)
elif args.land:
- assert 1981 <= year <= datetime.now().year, (
- 'for ERA5-Land, year should be between 1981 and present'
+ assert 1950 <= year <= datetime.now().year, (
+ 'for ERA5-Land, year should be between 1950 and present'
)
else:
- assert 1979 <= year <= datetime.now().year, (
- 'year should be between 1979 and present'
+ assert 1959 <= year <= datetime.now().year, (
+ 'year should be between 1959 and present'
)
assert endyear >= args.startyear, (
diff --git a/era5cli/fetch.py b/era5cli/fetch.py
index ad3883c..e623f9f 100644
--- a/era5cli/fetch.py
+++ b/era5cli/fetch.py
@@ -250,6 +250,10 @@ class Fetch:
def _product_type(self):
"""Construct the product type name from the options."""
+ assert not (self.land and self.ensemble), (
+ 'ERA5-Land does not contain Ensemble statistics.'
+ )
+
if self.period == 'hourly' and self.ensemble and self.statistics:
# The only configuration to return a list
return [
@@ -387,7 +391,8 @@ class Fetch:
if self.prelimbe:
if self.land:
raise ValueError(
- "Back extension not (yet) available for ERA5-Land.")
+ "Back extension not available for ERA5-Land. "
+ "ERA5-Land data is available from 1950 on.")
name += "-preliminary-back-extension"
return name, variable
|
eWaterCycle/era5cli
|
2b8e44948f11f3247187ca18e231227b5476cb9e
|
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 1184ed1..e4e5155 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -138,7 +138,7 @@ def test_main_fetch(fetch):
assert cli._execute(args)
# should give an AssertionError if years are out of bounds
- argv = ['hourly', '--startyear', '1950',
+ argv = ['hourly', '--startyear', '1949',
'--variables', 'total_precipitation', '--statistics',
'--endyear', '2007', '--ensemble']
args = cli._parse_args(argv)
@@ -160,14 +160,6 @@ def test_main_fetch(fetch):
args = cli._parse_args(argv)
cli._execute(args)
- # no land available for back extension
- argv = ['monthly', '--startyear', '1980', '--endyear', '1980',
- '--variables', 'total_precipitation', '--synoptic',
- '--ensemble', '--land']
- args = cli._parse_args(argv)
- with pytest.raises(AssertionError):
- cli._execute(args)
-
@mock.patch("era5cli.info.Info", autospec=True)
def test_main_info(info):
diff --git a/tests/test_fetch.py b/tests/test_fetch.py
index f35f2d4..7187c62 100644
--- a/tests/test_fetch.py
+++ b/tests/test_fetch.py
@@ -334,6 +334,10 @@ def test_product_type():
producttype = era5._product_type()
assert producttype is None
+ era5.ensemble = True
+ with pytest.raises(AssertionError):
+ producttype = era5._product_type()
+
def test_check_levels():
"""Test _check_levels function of Fetch class"""
|
No error message if using --land and --ensemble
While I was changing the test for `--land` and `--prelimbe` flags, I have realized that a request for a combination of `--land` and `--ensemble` goes through and [https://github.com/eWaterCycle/era5cli/blob/main/era5cli/fetch.py](url) doesn't raise an error. Should I add it, since I'm anyway working on it?
`era5cli monthly --startyear 1990 --endyear 1990 --variables total_precipitation --land --ensemble`
Download request is being queued at Copernicus.
It can take some time before downloading starts, please do not kill this process in the meantime.
2022-10-24 16:14:54,411 INFO Welcome to the CDS
2022-10-24 16:14:54,411 INFO Sending request to https://cds.climate.copernicus.eu/api/v2/resources/reanalysis-era5-land-monthly-means
2022-10-24 16:14:54,784 INFO Request is queued
2022-10-24 16:14:55,954 INFO Request is running
2022-10-24 16:14:57,625 INFO Request is failed
2022-10-24 16:14:57,625 ERROR Message: no data is available within your requested subset
2022-10-24 16:14:57,625 ERROR Reason: Request returned no data
2022-10-24 16:14:57,626 ERROR Traceback (most recent call last):
2022-10-24 16:14:57,626 ERROR File "/opt/cdstoolbox/cdscompute/cdscompute/cdshandlers/services/handler.py", line 59, in handle_request
2022-10-24 16:14:57,626 ERROR result = cached(context.method, proc, context, context.args, context.kwargs)
2022-10-24 16:14:57,626 ERROR File "/opt/cdstoolbox/cdscompute/cdscompute/caching.py", line 108, in cached
2022-10-24 16:14:57,626 ERROR result = proc(context, *context.args, **context.kwargs)
2022-10-24 16:14:57,626 ERROR File "/opt/cdstoolbox/cdscompute/cdscompute/services.py", line 124, in __call__
2022-10-24 16:14:57,626 ERROR return p(*args, **kwargs)
2022-10-24 16:14:57,626 ERROR File "/opt/cdstoolbox/cdscompute/cdscompute/services.py", line 60, in __call__
2022-10-24 16:14:57,626 ERROR return self.proc(context, *args, **kwargs)
2022-10-24 16:14:57,626 ERROR File "/home/cds/cdsservices/services/mars/mars.py", line 47, in internal
2022-10-24 16:14:57,626 ERROR return mars(context, request, **kwargs)
2022-10-24 16:14:57,626 ERROR File "/home/cds/cdsservices/services/mars/mars.py", line 19, in mars
2022-10-24 16:14:57,626 ERROR execute_mars(context, requests)
2022-10-24 16:14:57,626 ERROR File "/home/cds/cdsservices/services/mars/execute_mars.py", line 25, in execute_mars
2022-10-24 16:14:57,626 ERROR raise NoDataException("Request returned no data", '')
2022-10-24 16:14:57,626 ERROR cdsinf.exceptions.NoDataException: Request returned no data
Traceback (most recent call last):
File "/home/acrnemr/miniconda3/envs/era5cl/bin/era5cli", line 8, in <module>
sys.exit(main())
File "/home/acrnemr/miniconda3/envs/era5cl/lib/python3.10/site-packages/era5cli/cli.py", line 435, in main
_execute(args)
File "/home/acrnemr/miniconda3/envs/era5cl/lib/python3.10/site-packages/era5cli/cli.py", line 425, in _execute
era5.fetch(dryrun=args.dryrun)
File "/home/acrnemr/miniconda3/envs/era5cl/lib/python3.10/site-packages/era5cli/fetch.py", line 176, in fetch
self._split_variable_yr()
File "/home/acrnemr/miniconda3/envs/era5cl/lib/python3.10/site-packages/era5cli/fetch.py", line 249, in _split_variable_yr
pool.map(self._getdata, variables, years, outputfiles)
File "/home/acrnemr/miniconda3/envs/era5cl/lib/python3.10/site-packages/pathos/threading.py", line 136, in map
return _pool.map(star(f), zip(*args)) # chunksize
File "/home/acrnemr/miniconda3/envs/era5cl/lib/python3.10/site-packages/multiprocess/pool.py", line 364, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "/home/acrnemr/miniconda3/envs/era5cl/lib/python3.10/site-packages/multiprocess/pool.py", line 771, in get
raise self._value
File "/home/acrnemr/miniconda3/envs/era5cl/lib/python3.10/site-packages/multiprocess/pool.py", line 125, in worker
result = (True, func(*args, **kwds))
File "/home/acrnemr/miniconda3/envs/era5cl/lib/python3.10/site-packages/multiprocess/pool.py", line 48, in mapstar
return list(map(*args))
File "/home/acrnemr/miniconda3/envs/era5cl/lib/python3.10/site-packages/pathos/helpers/mp_helper.py", line 15, in <lambda>
func = lambda args: f(*args)
File "/home/acrnemr/miniconda3/envs/era5cl/lib/python3.10/site-packages/era5cli/fetch.py", line 441, in _getdata
connection.retrieve(name, request, outputfile)
File "/home/acrnemr/miniconda3/envs/era5cl/lib/python3.10/site-packages/cdsapi/api.py", line 348, in retrieve
result = self._api("%s/resources/%s" % (self.url, name), request, "POST")
File "/home/acrnemr/miniconda3/envs/era5cl/lib/python3.10/site-packages/cdsapi/api.py", line 505, in _api
raise Exception(
Exception: no data is available within your requested subset. Request returned no data.
|
0.0
|
2b8e44948f11f3247187ca18e231227b5476cb9e
|
[
"tests/test_fetch.py::test_product_type"
] |
[
"tests/test_cli.py::test_parse_args",
"tests/test_cli.py::test_area_argument",
"tests/test_cli.py::test_period_args",
"tests/test_cli.py::test_level_arguments",
"tests/test_cli.py::test_main_fetch",
"tests/test_cli.py::test_main_info",
"tests/test_fetch.py::test_init",
"tests/test_fetch.py::test_fetch_nodryrun",
"tests/test_fetch.py::test_fetch_dryrun",
"tests/test_fetch.py::test_extension",
"tests/test_fetch.py::test_define_outputfilename",
"tests/test_fetch.py::test_number_outputfiles",
"tests/test_fetch.py::test_check_levels",
"tests/test_fetch.py::test_check_variable",
"tests/test_fetch.py::test_build_name",
"tests/test_fetch.py::test_build_request",
"tests/test_fetch.py::test_incompatible_options",
"tests/test_fetch.py::test_area"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-11-23 14:33:30+00:00
|
apache-2.0
| 2,059 |
|
eWaterCycle__era5cli-143
|
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 216f9be..71d3e3c 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -16,6 +16,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Change CDS keys from `.cdsapirc` file to `.config/eracli.txt` file. This will avoid conflict with e.g. ADS.
- If a user makes a request without `--splitmonths` they are warned that the behavior will change in the future, and that they have to choose between `--splitmonths False` and `--splitmonths True`.
- When a request would encounter a Request Too Large error in the CDS API, they are warned, and given a suggestion to use `--splitmonths`.
+ - When a file already exists and would be overwritten, the user is prompted for confirmation. This should prevent accidental overwriting of files. This check can be skipped with the `--overwrite` flag.
- The earliest valid start year of requests has been updated to 1950.
- Usage of `--prelimbe` now raises a deprecation warning. It will be deprecated in a future release, as all the back extension years are now included in the main products.
diff --git a/era5cli/args/common.py b/era5cli/args/common.py
index 35194b5..73d6c4d 100644
--- a/era5cli/args/common.py
+++ b/era5cli/args/common.py
@@ -27,6 +27,7 @@ def add_common_args(argument_parser: ArgumentParser) -> None:
--prelimbe,
--land,
--area,
+ --overwrite
Args:
argument_parser: the ArgumentParser that the arguments are added to.
@@ -242,6 +243,22 @@ def add_common_args(argument_parser: ArgumentParser) -> None:
),
)
+ argument_parser.add_argument(
+ "--overwrite",
+ action="store_true",
+ default=False,
+ help=textwrap.dedent(
+ """
+ Whether to overwrite existing files or not.
+ Providing the `--overwrite` argument will make
+ era5cli overwrite existing files. By default,
+ you will be prompted if a file already exists, with
+ the question if you want to overwrite it or not.
+
+ """
+ ),
+ )
+
def construct_year_list(args):
"""Make a continous list of years from the startyear and endyear arguments."""
diff --git a/era5cli/args/periods.py b/era5cli/args/periods.py
index d6988fd..a049590 100644
--- a/era5cli/args/periods.py
+++ b/era5cli/args/periods.py
@@ -185,7 +185,6 @@ def set_period_args(args):
)
else:
splitmonths: bool = args.splitmonths
- print(splitmonths)
statistics: bool = args.statistics
if statistics:
diff --git a/era5cli/cli.py b/era5cli/cli.py
index 4a2d2c6..8bc6d06 100644
--- a/era5cli/cli.py
+++ b/era5cli/cli.py
@@ -69,6 +69,7 @@ def _execute(input_args: argparse.Namespace) -> True:
merge=input_args.merge,
prelimbe=input_args.prelimbe,
land=input_args.land,
+ overwrite=input_args.overwrite,
)
era5.fetch(dryrun=input_args.dryrun)
return True
diff --git a/era5cli/fetch.py b/era5cli/fetch.py
index 02d13ce..9f76188 100644
--- a/era5cli/fetch.py
+++ b/era5cli/fetch.py
@@ -86,6 +86,12 @@ class Fetch:
Note that the ERA5-Land dataset starts in 1981.
`land = True` is incompatible with the use of
`prelimbe = True` and `ensemble = True`.
+ overwrite: bool
+ Whether to overwrite existing files or not.
+ Setting `overwrite = True` will make
+ era5cli overwrite existing files. By default,
+ you will be prompted if a file already exists, with
+ the question if you want to overwrite it or not.
"""
def __init__(
@@ -108,6 +114,7 @@ class Fetch:
threads=None,
prelimbe=False,
land=False,
+ overwrite=False,
):
"""Initialization of Fetch class."""
self._get_login() # Get login info from config file.
@@ -166,6 +173,8 @@ class Fetch:
self.land = land
"""bool: Whether to download from the ERA5-Land
dataset."""
+ self.overwrite = overwrite
+ """bool: Whether to overwrite existing files."""
if self.merge and self.splitmonths:
raise ValueError(
@@ -197,6 +206,9 @@ class Fetch:
)
def _get_login(self):
+ # First check if the config exists, and guide the user if it does not.
+ key_management.check_era5cli_config()
+ # Only then load the keys (as they should be there now).
self.url, self.key = key_management.load_era5cli_config()
def fetch(self, dryrun=False):
@@ -271,11 +283,12 @@ class Fetch:
outputfiles = [
self._define_outputfilename(var, self.years) for var in self.variables
]
+ if not self.overwrite:
+ era5cli.utils.assert_outputfiles_not_exist(outputfiles)
+
years = len(outputfiles) * [self.years]
- if not self.threads:
- pool = Pool()
- else:
- pool = Pool(nodes=self.threads)
+
+ pool = Pool(nodes=self.threads) if self.threads else Pool()
pool.map(self._getdata, self.variables, years, outputfiles)
def _split_variable_yr(self):
@@ -285,7 +298,12 @@ class Fetch:
for var in self.variables:
outputfiles += [self._define_outputfilename(var, [yr]) for yr in self.years]
variables += len(self.years) * [var]
+
+ if not self.overwrite:
+ era5cli.utils.assert_outputfiles_not_exist(outputfiles)
+
years = len(self.variables) * self.years
+
pool = Pool(nodes=self.threads) if self.threads else Pool()
pool.map(self._getdata, variables, years, outputfiles)
@@ -304,6 +322,9 @@ class Fetch:
years += [year]
months += [month]
+ if not self.overwrite:
+ era5cli.utils.assert_outputfiles_not_exist(outputfiles)
+
pool = Pool(nodes=self.threads) if self.threads else Pool()
pool.map(self._getdata, variables, years, outputfiles, months)
@@ -501,6 +522,7 @@ class Fetch:
def _getdata(self, variables: list, years: list, outputfile: str, months=None):
"""Fetch variables using cds api call."""
name, request = self._build_request(variables, years, months)
+
if self.dryrun:
print(name, request, outputfile)
else:
diff --git a/era5cli/utils.py b/era5cli/utils.py
index 2f8545a..37650a8 100644
--- a/era5cli/utils.py
+++ b/era5cli/utils.py
@@ -4,6 +4,7 @@ import datetime
import shutil
import textwrap
from pathlib import Path
+from typing import List
import prettytable
from netCDF4 import Dataset
import era5cli
@@ -205,3 +206,18 @@ def strtobool(value: str) -> bool:
"Could not convert string to boolean. Valid inputs are:"
f"{trues} and {falses} (case insensitive)."
)
+
+
+def assert_outputfiles_not_exist(outputfiles: List[str]) -> None:
+ """Check if files already exist, and prompt the user if they do."""
+ if any(Path(file).exists() for file in outputfiles):
+ answer = input(
+ "\n Some filenames already exists in this folder."
+ "\n Do you want to overwrite them? (Y/N)"
+ "\n Tip: to skip this flag, use `--overwrite`."
+ )
+ if answer.lower() in ["n", "no", "nope"]:
+ raise FileExistsError(
+ "\n One or more files already exist in this folder."
+ "\n Please remove them, or change to a different folder to continue"
+ )
|
eWaterCycle/era5cli
|
634d323a5b2067199452d34578331a5566a45c8e
|
diff --git a/tests/test_fetch.py b/tests/test_fetch.py
index 34b67f1..97b21d4 100644
--- a/tests/test_fetch.py
+++ b/tests/test_fetch.py
@@ -1,5 +1,6 @@
"""Tests for era5cli Fetch class."""
+import pathlib
import unittest.mock as mock
import pytest
from era5cli import _request_size
@@ -23,6 +24,14 @@ ALL_MONTHS = ["01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11",
# fmt: on
[email protected](scope="module", autouse=True)
+def my_thing_mock():
+ with mock.patch(
+ "era5cli.fetch.key_management.check_era5cli_config", autospec=True
+ ) as _fixture:
+ yield _fixture
+
+
def initialize(
outputformat="netcdf",
merge=False,
@@ -41,6 +50,7 @@ def initialize(
prelimbe=False,
land=False,
splitmonths=False,
+ overwrite=False,
):
with mock.patch(
"era5cli.fetch.key_management.load_era5cli_config",
@@ -66,6 +76,7 @@ def initialize(
prelimbe=prelimbe,
land=land,
splitmonths=splitmonths,
+ overwrite=overwrite,
)
@@ -643,3 +654,21 @@ def test_area():
with pytest.raises(ValueError):
era5 = initialize(area=[-180, 180, -90])
era5._build_request("total_precipitation", [2008])
+
+
+def test_file_exists():
+ with mock.patch.object(pathlib.Path, "exists", return_value=True):
+ era5 = initialize()
+
+ with mock.patch("builtins.input", return_value="Y"):
+ era5.fetch(dryrun=True)
+
+ with mock.patch("builtins.input", return_value="N"):
+ with pytest.raises(FileExistsError):
+ era5.fetch(dryrun=True)
+
+
+def test_overwrite():
+ with mock.patch.object(pathlib.Path, "exists", return_value=True):
+ era5 = initialize(overwrite=True)
+ era5.fetch(dryrun=True)
diff --git a/tests/test_integration.py b/tests/test_integration.py
index 8f348d7..859fa77 100644
--- a/tests/test_integration.py
+++ b/tests/test_integration.py
@@ -7,6 +7,14 @@ import pytest
from era5cli.cli import main
[email protected](scope="module", autouse=True)
+def my_thing_mock():
+ with mock.patch(
+ "era5cli.fetch.key_management.check_era5cli_config", autospec=True
+ ) as _fixture:
+ yield _fixture
+
+
# combine calls with result and possible warning message
call_result = [
{
|
An option for overwriting the existing file and checking its size
Implement an option for checking the existing file in the downloading directory and its size.
to find out the size, it might be useful to have a look at:
**Use MARS "list" verb to find out the size of your request**
https://confluence.ecmwf.int/pages/viewpage.action?pageId=77223119#HRES:Atmospheric(oper),Modellevel(ml),Forecast(fc)-UseMARS%22list%22verbtofindoutthesizeofyourrequest
|
0.0
|
634d323a5b2067199452d34578331a5566a45c8e
|
[
"tests/test_fetch.py::test_init",
"tests/test_fetch.py::test_fetch_nodryrun",
"tests/test_fetch.py::test_fetch_dryrun",
"tests/test_fetch.py::test_extension",
"tests/test_fetch.py::test_define_outputfilename",
"tests/test_fetch.py::test_number_outputfiles[variables0-years0-False-False-False-6]",
"tests/test_fetch.py::test_number_outputfiles[variables1-years1-True-False-False-2]",
"tests/test_fetch.py::test_number_outputfiles[variables2-years2-False-True-False-6]",
"tests/test_fetch.py::test_number_outputfiles[variables3-years3-False-True-True-72]",
"tests/test_fetch.py::test_number_outputfiles[variables4-years4-False-False-False-2]",
"tests/test_fetch.py::test_number_outputfiles[variables5-years5-False-False-False-3]",
"tests/test_fetch.py::test_product_type",
"tests/test_fetch.py::test_check_levels",
"tests/test_fetch.py::test_check_variable",
"tests/test_fetch.py::test_build_name",
"tests/test_fetch.py::test_build_request",
"tests/test_fetch.py::test_incompatible_options",
"tests/test_fetch.py::test_area",
"tests/test_fetch.py::test_file_exists",
"tests/test_fetch.py::test_overwrite"
] |
[
"tests/test_integration.py::test_main[era5cli"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-03-28 12:18:54+00:00
|
apache-2.0
| 2,060 |
|
eWaterCycle__era5cli-154
|
diff --git a/.bumpversion.cfg b/.bumpversion.cfg
index 92fd404..56ae836 100644
--- a/.bumpversion.cfg
+++ b/.bumpversion.cfg
@@ -1,5 +1,5 @@
[bumpversion]
-current_version = 1.4.0
+current_version = 1.4.1
[comment]
comment = The contents of this file cannot be merged with that of pyproject.toml until https://github.com/c4urself/bump2version/issues/42 is resolved
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..a46fe40
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,8 @@
+repos:
+- repo: local
+ hooks:
+ - id: run-formatter
+ name: run-formatter
+ entry: hatch run format
+ language: system
+ types: [python]
diff --git a/CITATION.cff b/CITATION.cff
index e0741c5..01fa5f8 100644
--- a/CITATION.cff
+++ b/CITATION.cff
@@ -97,4 +97,4 @@ license: Apache-2.0
message: "If you use this software, please cite it using these metadata."
repository-code: "https://github.com/ewatercycle/era5cli"
title: era5cli
-version: "1.4.0"
+version: "1.4.1"
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 8879e23..a04cf3c 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -6,13 +6,24 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## Unreleased
+
+
+## 1.4.1 - 2023-06-30
+**Fixed:**
+
+ - Fix a bug that prevented the creation of the configuration file, if the "~/.config" folder did not exist yet ([#153](https://github.com/eWaterCycle/era5cli/pull/154)).
+
**Added:**
-- The developer documentation now contains instructions on how to maintain the conda-forge feedstock for era5cli.
+- The developer documentation now contains instructions on how to maintain the conda-forge feedstock for era5cli ([#150](https://github.com/eWaterCycle/era5cli/pull/154)).
**Changed:**
- - Before asking for a user input, a check is made if the code is running in an interactive terminal or not. If not (e.g. if era5cli is called through a different script and stdin is not available), the input request is skipped.
+ - Before asking for a user input, a check is made if the code is running in an interactive terminal or not. If not (e.g. if era5cli is called through a different script and stdin is not available), the input request is skipped ([#152](https://github.com/eWaterCycle/era5cli/pull/154)).
+
+**Dev changes:**
+
+ - A pre-commit hook has been added, to facilitate pre-commit users. Documentation on the setup is added to the developer documentation ([#153](https://github.com/eWaterCycle/era5cli/pull/154)).
## 1.4.0 - 2023-04-21
diff --git a/docs/general_development.md b/docs/general_development.md
index 73ea613..03d0041 100644
--- a/docs/general_development.md
+++ b/docs/general_development.md
@@ -80,6 +80,17 @@ hatch run format
This will apply the `black` and `isort` formatting, and then check the code style.
+??? tip "Using pre-commit"
+ For pre-commit users, a pre-commit configuration has been added. This hook will execute the `hatch run format` command.
+
+ After installing pre-commit in your python environment (`pip install pre-commit`), you can do
+ ```
+ pre-commit install
+ ```
+ to set up the git hook scripts.
+
+ For more information, see the [pre-commit website](https://pre-commit.com/).
+
## Generating the documentation
To view the documentation locally, simply run the following command:
diff --git a/era5cli/__version__.py b/era5cli/__version__.py
index 447910f..ab34b7c 100644
--- a/era5cli/__version__.py
+++ b/era5cli/__version__.py
@@ -26,4 +26,4 @@ __author__ = (
"Bart Schilperoort",
)
__email__ = "[email protected]"
-__version__ = "1.4.0"
+__version__ = "1.4.1"
diff --git a/era5cli/key_management.py b/era5cli/key_management.py
index be6af29..fc13abf 100644
--- a/era5cli/key_management.py
+++ b/era5cli/key_management.py
@@ -160,7 +160,7 @@ def load_era5cli_config() -> Tuple[str, str]:
def write_era5cli_config(url: str, uid: str, key: str):
- ERA5CLI_CONFIG_PATH.parent.mkdir(exist_ok=True)
+ ERA5CLI_CONFIG_PATH.parent.mkdir(exist_ok=True, parents=True)
with open(ERA5CLI_CONFIG_PATH, mode="w", encoding="utf-8") as f:
f.write(f"url: {url}\n")
f.write(f"uid: {uid}\n")
|
eWaterCycle/era5cli
|
127494b2c071ccf0af3b6022c7da6ef293558a55
|
diff --git a/tests/test_config.py b/tests/test_config.py
index 04fcd45..f5c8026 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -6,12 +6,13 @@ from era5cli import key_management
@pytest.fixture(scope="function")
def empty_path_era5(tmp_path_factory):
- return tmp_path_factory.mktemp(".config") / "era5cli.txt"
+ return tmp_path_factory.mktemp("usrhome") / ".config" / "era5cli" / "cds_keys.txt"
@pytest.fixture(scope="function")
def valid_path_era5(tmp_path_factory):
- fn = tmp_path_factory.mktemp(".config") / "era5cli.txt"
+ fn = tmp_path_factory.mktemp(".config") / "era5cli" / "cds_keys.txt"
+ fn.parent.mkdir(parents=True)
with open(fn, mode="w", encoding="utf-8") as f:
f.write("url: b\nuid: 123\nkey: abc-def\n")
return fn
@@ -31,7 +32,12 @@ def valid_path_cds(tmp_path_factory):
class TestEra5CliConfig:
- """Test the functionality when the /.config/era5cli.txt file exists."""
+ """Test the functionality for writing and loading the config file."""
+
+ def test_set_config(self, empty_path_era5):
+ with patch("era5cli.key_management.ERA5CLI_CONFIG_PATH", empty_path_era5):
+ key_management.write_era5cli_config(url="b", uid="123", key="abc-def")
+ assert key_management.load_era5cli_config() == ("b", "123:abc-def")
def test_load_era5cli_config(self, valid_path_era5):
with patch("era5cli.key_management.ERA5CLI_CONFIG_PATH", valid_path_era5):
|
Error when adding keys

I got this error while when adding my CDS key on a Windows pc. We fixed this by manually creating .config directory.
|
0.0
|
127494b2c071ccf0af3b6022c7da6ef293558a55
|
[
"tests/test_config.py::TestEra5CliConfig::test_set_config",
"tests/test_config.py::TestConfigCdsrc::test_cdsrcfile_user_says_yes"
] |
[
"tests/test_config.py::TestEra5CliConfig::test_load_era5cli_config",
"tests/test_config.py::TestEra5CliConfig::test_check_era5cli_config",
"tests/test_config.py::TestConfigCdsrc::test_cdsrcfile_user_says_no",
"tests/test_config.py::TestConfigCdsrc::test_cdsrcfile_invalid_keys",
"tests/test_config.py::TestAttemptCdsLogin::test_status_fail",
"tests/test_config.py::TestAttemptCdsLogin::test_connection_fail",
"tests/test_config.py::TestAttemptCdsLogin::test_retrieve_fail",
"tests/test_config.py::TestAttemptCdsLogin::test_all_pass"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-06-29 06:22:59+00:00
|
apache-2.0
| 2,061 |
|
eWaterCycle__era5cli-58
|
diff --git a/era5cli/cli.py b/era5cli/cli.py
index a13ce9c..4dc2de2 100644
--- a/era5cli/cli.py
+++ b/era5cli/cli.py
@@ -5,6 +5,8 @@ import argparse
import textwrap
import sys
+from datetime import datetime
+
import era5cli.inputref as ref
import era5cli.info as einfo
import era5cli.fetch as efetch
@@ -127,6 +129,17 @@ def _build_parser():
''')
)
+ common.add_argument(
+ "--prelimbe", action="store_true", default=False,
+ help=textwrap.dedent('''\
+ Whether to download the preliminary back extension
+ (1950-1978). Providing the
+ "--prelimbe" argument downloads data from
+ the preliminary back extension.
+
+ ''')
+ )
+
mnth = argparse.ArgumentParser(add_help=False)
mnth.add_argument(
@@ -271,6 +284,32 @@ def _run_info(args):
return True
+def _construct_year_list(args):
+ if not args.endyear:
+ endyear = args.startyear
+ else:
+ endyear = args.endyear
+
+ # check whether correct years have been entered
+ for year in (args.startyear, endyear):
+ if args.prelimbe:
+ assert 1950 <= year <= 1978, (
+ 'year should be between 1950 and 1978'
+ )
+ else:
+ assert 1979 <= year <= datetime.now().year, (
+ 'year should be between 1979 and present'
+ )
+
+ assert endyear >= args.startyear, (
+ 'endyear should be >= startyear or None')
+
+ # make list of years to be downloaded
+ years = list(range(args.startyear, endyear + 1))
+
+ return years
+
+
def _set_period_args(args):
# set subroutine specific arguments for monthly and hourly fetch
if args.command == "monthly":
@@ -288,6 +327,11 @@ def _set_period_args(args):
elif args.command == "hourly":
synoptic = None
statistics = args.statistics
+ if statistics:
+ assert args.ensemble, (
+ "Statistics can only be computed over an ensemble, "
+ "add --ensemble or remove --statistics."
+ )
days = args.days
hours = args.hours
else:
@@ -305,30 +349,26 @@ def _execute(args):
# the fetching subroutines
else:
- # make list of years to be downloaded
- if not args.endyear:
- years = [args.startyear]
- else:
- assert (args.endyear >= args.startyear), (
- 'endyear should be >= startyear or None')
- years = list(range(args.startyear, args.endyear + 1))
-
+ years = _construct_year_list(args)
synoptic, statistics, days, hours = _set_period_args(args)
# try to build and send download request
- era5 = efetch.Fetch(years,
- months=args.months,
- days=days,
- hours=hours,
- variables=args.variables,
- outputformat=args.format,
- outputprefix=args.outputprefix,
- period=args.command,
- ensemble=args.ensemble,
- synoptic=synoptic,
- statistics=statistics,
- pressurelevels=args.levels,
- threads=args.threads,
- merge=args.merge)
+ era5 = efetch.Fetch(
+ years,
+ months=args.months,
+ days=days,
+ hours=hours,
+ variables=args.variables,
+ outputformat=args.format,
+ outputprefix=args.outputprefix,
+ period=args.command,
+ ensemble=args.ensemble,
+ synoptic=synoptic,
+ statistics=statistics,
+ pressurelevels=args.levels,
+ threads=args.threads,
+ merge=args.merge,
+ prelimbe=args.prelimbe,
+ )
era5.fetch(dryrun=args.dryrun)
return True
diff --git a/era5cli/fetch.py b/era5cli/fetch.py
index 6333e79..d67b1e6 100644
--- a/era5cli/fetch.py
+++ b/era5cli/fetch.py
@@ -57,13 +57,15 @@ class Fetch:
Indicating if files should be downloaded. By default
files will be downloaded. For a dryrun the cdsapi request will
be written to stdout.
+ prelimbe: bool
+ Whether to download the preliminary back extension (1950-1978).
"""
def __init__(self, years: list, months: list, days: list,
hours: list, variables: list, outputformat: str,
outputprefix: str, period: str, ensemble: bool,
statistics=None, synoptic=None, pressurelevels=None,
- merge=False, threads=None):
+ merge=False, threads=None, prelimbe=False):
"""Initialization of Fetch class."""
self.months = era5cli.utils._zpad_months(months)
"""list(str): List of zero-padded strings of months
@@ -107,6 +109,9 @@ class Fetch:
"""bool: Whether to get monthly averaged by hour of day
(synoptic=True) or monthly means of daily means
(synoptic=False)."""
+ self.prelimbe = prelimbe
+ """bool: Whether to select from the ERA5 preliminary back
+ extension which supports years from 1950 to 1978"""
def fetch(self, dryrun=False):
"""Split calls and fetch results.
@@ -195,17 +200,25 @@ class Fetch:
elif not self.ensemble:
producttype += "reanalysis"
- if self.period == "monthly":
+ if self.period == "monthly" and not self.prelimbe:
producttype = "monthly_averaged_" + producttype
if self.synoptic:
producttype += "_by_hour_of_day"
- elif self.period == "hourly":
- if self.ensemble and self.statistics:
- producttype = [
- "ensemble_members",
- "ensemble_mean",
- "ensemble_spread",
- ]
+ elif self.period == "monthly" and self.prelimbe:
+ if self.ensemble:
+ producttype = "members-"
+ elif not self.ensemble:
+ producttype = "reanalysis-"
+ if self.synoptic:
+ producttype += "synoptic-monthly-means"
+ elif not self.synoptic:
+ producttype += "monthly-means-of-daily-means"
+ elif self.period == "hourly" and self.ensemble and self.statistics:
+ producttype = [
+ "ensemble_members",
+ "ensemble_mean",
+ "ensemble_spread",
+ ]
return producttype
@@ -253,6 +266,9 @@ class Fetch:
if self.days:
request["day"] = self.days
+ if self.prelimbe:
+ name += "-preliminary-back-extension"
+
return(name, request)
def _exit(self):
|
eWaterCycle/era5cli
|
2c2dffd3aabe02e29461e8e8bfb6ac9ce85c4465
|
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 2be478d..13c1cda 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -47,6 +47,14 @@ def test_period_args():
# Period_args consists of (synoptic, statistics, days, hours)
assert period_args == (True, None, None, [4, 7])
+ argv = ['monthly', '--startyear', '2008',
+ '--variables', 'total_precipitation',
+ '--synoptic', '--ensemble']
+ args = cli._parse_args(argv)
+ period_args = cli._set_period_args(args)
+ # Period_args consists of (synoptic, statistics, days, hours)
+ assert period_args == (True, None, None, range(0, 24))
+
# test whether the info option does not end up in _set_period_args
argv = ['info', '2Dvars']
args = cli._parse_args(argv)
@@ -71,6 +79,22 @@ def test_main_fetch(fetch):
with pytest.raises(AssertionError):
assert cli._execute(args)
+ # should give an AssertionError if years are out of bounds
+ argv = ['hourly', '--startyear', '1950',
+ '--variables', 'total_precipitation', '--statistics',
+ '--endyear', '2007', '--ensemble']
+ args = cli._parse_args(argv)
+ with pytest.raises(AssertionError):
+ assert cli._execute(args)
+
+ # should give an AssertionError if years are out of bounds
+ argv = ['hourly', '--startyear', '1950',
+ '--variables', 'total_precipitation', '--statistics',
+ '--endyear', '2007', '--ensemble', '--prelimbe']
+ args = cli._parse_args(argv)
+ with pytest.raises(AssertionError):
+ assert cli._execute(args)
+
# monthly call without endyear
argv = ['monthly', '--startyear', '2008',
'--variables', 'total_precipitation', '--synoptic',
diff --git a/tests/test_fetch.py b/tests/test_fetch.py
index 1690c26..cff9ff8 100644
--- a/tests/test_fetch.py
+++ b/tests/test_fetch.py
@@ -9,7 +9,8 @@ def initialize(outputformat='netcdf', merge=False, statistics=None,
synoptic=None, ensemble=True, pressurelevels=None,
threads=2, period='hourly', variables=['total_precipitation'],
years=[2008, 2009], months=list(range(1, 13)),
- days=list(range(1, 32)), hours=list(range(0, 24))):
+ days=list(range(1, 32)), hours=list(range(0, 24)),
+ prelimbe=False):
"""Initializer of the class."""
era5 = fetch.Fetch(years=years,
months=months,
@@ -24,7 +25,8 @@ def initialize(outputformat='netcdf', merge=False, statistics=None,
synoptic=synoptic,
pressurelevels=pressurelevels,
merge=merge,
- threads=threads)
+ threads=threads,
+ prelimbe=prelimbe)
return era5
@@ -43,7 +45,8 @@ def test_init():
synoptic=None,
pressurelevels=None,
merge=False,
- threads=2)
+ threads=2,
+ prelimbe=False)
valid_months = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10',
'11', '12']
@@ -71,6 +74,7 @@ def test_init():
assert era5.pressure_levels is None
assert not era5.merge
assert era5.threads == 2
+ assert not era5.prelimbe
# initializing hourly variable with days=None should result in ValueError
with pytest.raises(TypeError):
@@ -247,10 +251,24 @@ def test_product_type():
producttype = era5._product_type()
assert producttype == 'monthly_averaged_reanalysis'
+ era5.prelimbe = True
+ producttype = era5._product_type()
+ assert producttype == 'reanalysis-monthly-means-of-daily-means'
+
+ era5.prelimbe = False
era5.synoptic = True
producttype = era5._product_type()
assert producttype == 'monthly_averaged_reanalysis_by_hour_of_day'
+ era5.prelimbe = True
+ producttype = era5._product_type()
+ assert producttype == 'reanalysis-synoptic-monthly-means'
+
+ era5.ensemble = True
+ producttype = era5._product_type()
+ assert producttype == 'members-synoptic-monthly-means'
+
+ era5.prelimbe = False
era5.ensemble = False
era5.statistics = True
producttype = era5._product_type()
@@ -278,7 +296,6 @@ def test_build_request():
'12:00', '13:00', '14:00', '15:00', '16:00', '17:00',
'18:00', '19:00', '20:00', '21:00', '22:00', '23:00'],
'format': 'netcdf'}
- print(request['day'])
assert request == req
# monthly data
@@ -298,6 +315,28 @@ def test_build_request():
'format': 'netcdf'}
assert request == req
+ # preliminary back extension
+ era5 = initialize(period='monthly',
+ variables=['total_precipitation'],
+ years=[1970],
+ prelimbe=True)
+ (name, request) = era5._build_request('total_precipitation', [1970])
+ print(request)
+ assert name == (
+ "reanalysis-era5-single-levels-monthly"
+ "-means-preliminary-back-extension"
+ )
+ req = {'variable': 'total_precipitation', 'year': [1970],
+ 'product_type': 'members-monthly-means-of-daily-means',
+ 'month': ['01', '02', '03', '04', '05', '06',
+ '07', '08', '09', '10', '11', '12'],
+ 'time': ['00:00', '01:00', '02:00', '03:00', '04:00', '05:00',
+ '06:00', '07:00', '08:00', '09:00', '10:00', '11:00',
+ '12:00', '13:00', '14:00', '15:00', '16:00', '17:00',
+ '18:00', '19:00', '20:00', '21:00', '22:00', '23:00'],
+ 'format': 'netcdf'}
+ assert request == req
+
# requesting 3d variable with pressurelevels=None should give a ValueError
era5 = initialize(variables=['temperature'], pressurelevels=None)
with pytest.raises(ValueError):
|
Unexpected behavior for --statistics flag
When downloading hourly data you can select the `--statistics` flag without using the `--ensemble` flag. This will result in downloading the reanalysis data instead and will ignore the `--statistics` flag. The `--statistics` flag without `--ensemble` should at least give a warning or an error, as adding this flag is meaningless when not downloading ensemble data.
|
0.0
|
2c2dffd3aabe02e29461e8e8bfb6ac9ce85c4465
|
[
"tests/test_cli.py::test_main_fetch",
"tests/test_fetch.py::test_init",
"tests/test_fetch.py::test_fetch_nodryrun",
"tests/test_fetch.py::test_fetch_dryrun",
"tests/test_fetch.py::test_extension",
"tests/test_fetch.py::test_define_outputfilename",
"tests/test_fetch.py::test_number_outputfiles",
"tests/test_fetch.py::test_product_type",
"tests/test_fetch.py::test_build_request"
] |
[
"tests/test_cli.py::test_parse_args",
"tests/test_cli.py::test_period_args",
"tests/test_cli.py::test_main_info"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-12-02 13:33:29+00:00
|
apache-2.0
| 2,062 |
|
eWaterCycle__grpc4bmi-129
|
diff --git a/grpc4bmi/bmi_client_apptainer.py b/grpc4bmi/bmi_client_apptainer.py
index 100a85b..a4169e2 100644
--- a/grpc4bmi/bmi_client_apptainer.py
+++ b/grpc4bmi/bmi_client_apptainer.py
@@ -8,7 +8,7 @@ from typing import Iterable
from packaging.specifiers import SpecifierSet
from packaging.version import Version
-from typeguard import check_argument_types, qualified_name
+from typeguard import typechecked
from grpc4bmi.bmi_grpc_client import BmiClient
from grpc4bmi.exceptions import ApptainerVersionException, DeadContainerException
@@ -194,13 +194,13 @@ class BmiClientApptainer(BmiClient):
"""
+ @typechecked
def __init__(self, image: str, work_dir: str, input_dirs: Iterable[str] = tuple(), delay=0, timeout=None,
capture_logs=True,
):
- assert check_argument_types()
if type(input_dirs) == str:
msg = f'type of argument "input_dirs" must be collections.abc.Iterable; ' \
- f'got {qualified_name(input_dirs)} instead'
+ f'got {type(input_dirs)} instead'
raise TypeError(msg)
check_apptainer_version()
host = 'localhost'
diff --git a/grpc4bmi/bmi_client_docker.py b/grpc4bmi/bmi_client_docker.py
index d14fe67..328400a 100644
--- a/grpc4bmi/bmi_client_docker.py
+++ b/grpc4bmi/bmi_client_docker.py
@@ -5,7 +5,7 @@ from typing import Iterable
import docker
from docker.models.containers import Container
-from typeguard import check_argument_types, qualified_name
+from typeguard import typechecked
from grpc4bmi.bmi_grpc_client import BmiClient
from grpc4bmi.exceptions import DeadContainerException
@@ -58,14 +58,14 @@ class BmiClientDocker(BmiClient):
See :py:class:`grpc4bmi.bmi_client_apptainer.BmiClientApptainer` for examples using `input_dirs` and `work_dir`.
"""
+ @typechecked
def __init__(self, image: str, work_dir: str, image_port=50051, host=None,
input_dirs: Iterable[str] = tuple(),
user=os.getuid(), remove=False, delay=5,
timeout=None):
- assert check_argument_types()
if type(input_dirs) == str:
msg = f'type of argument "input_dirs" must be collections.abc.Iterable; ' \
- f'got {qualified_name(input_dirs)} instead'
+ f'got {type(input_dirs)} instead'
raise TypeError(msg)
port = BmiClient.get_unique_port()
client = docker.from_env()
diff --git a/grpc4bmi/bmi_client_singularity.py b/grpc4bmi/bmi_client_singularity.py
index 3a49871..f2f1e4a 100644
--- a/grpc4bmi/bmi_client_singularity.py
+++ b/grpc4bmi/bmi_client_singularity.py
@@ -8,7 +8,7 @@ from typing import Iterable
from packaging.specifiers import SpecifierSet
from packaging.version import Version
-from typeguard import check_argument_types, qualified_name
+from typeguard import typechecked
from grpc4bmi.bmi_grpc_client import BmiClient
from grpc4bmi.exceptions import ApptainerVersionException, DeadContainerException, SingularityVersionException
@@ -197,13 +197,13 @@ class BmiClientSingularity(BmiClient):
"""
+ @typechecked
def __init__(self, image: str, work_dir: str, input_dirs: Iterable[str] = tuple(), delay=0, timeout=None,
capture_logs=True,
):
- assert check_argument_types()
if type(input_dirs) == str:
msg = f'type of argument "input_dirs" must be collections.abc.Iterable; ' \
- f'got {qualified_name(input_dirs)} instead'
+ f'got {type(input_dirs)} instead'
raise TypeError(msg)
check_singularity_version()
host = 'localhost'
diff --git a/grpc4bmi/bmi_grpc_client.py b/grpc4bmi/bmi_grpc_client.py
index cacfa34..2db4cfe 100644
--- a/grpc4bmi/bmi_grpc_client.py
+++ b/grpc4bmi/bmi_grpc_client.py
@@ -9,7 +9,7 @@ import numpy as np
from bmipy import Bmi
import grpc
import numpy
-from typeguard import check_argument_types
+from typeguard import typechecked
from grpc_status import rpc_status
from google.rpc import error_details_pb2
@@ -94,8 +94,8 @@ class BmiClient(Bmi):
s.bind(("" if host is None else host, 0))
return int(s.getsockname()[1])
+ @typechecked
def initialize(self, filename: Optional[str]):
- assert check_argument_types()
fname = "" if filename is None else filename
try:
return self.stub.initialize(bmi_pb2.InitializeRequest(config_file=fname))
diff --git a/pyproject.toml b/pyproject.toml
index 4d4d9e8..de9cf52 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -28,7 +28,7 @@ dependencies = [
"grpcio-reflection",
"grpcio-status",
"googleapis-common-protos>=1.5.5",
- "protobuf",
+ "protobuf>=4,<5",
"numpy",
"docker",
"bmipy",
|
eWaterCycle/grpc4bmi
|
3798bbc460494783e47c53a8e58be07e89d3f855
|
diff --git a/test/test_apptainer.py b/test/test_apptainer.py
index 8dd570b..cd5ea5e 100644
--- a/test/test_apptainer.py
+++ b/test/test_apptainer.py
@@ -1,7 +1,8 @@
import os
-from typing import Type, Union
+from typing import Type
import pytest
+from typeguard import TypeCheckError
from grpc4bmi.bmi_client_apptainer import SUPPORTED_APPTAINER_VERSIONS, BmiClientApptainer, check_apptainer_version_string
from grpc4bmi.exceptions import ApptainerVersionException, DeadContainerException
@@ -64,17 +65,17 @@ class TestBmiClientApptainerBadDays:
BmiClientApptainer(image=IMAGE_NAME, input_dirs=(some_dir,), work_dir=some_dir)
def test_workdir_as_number(self):
- with pytest.raises(TypeError, match='must be str'):
+ with pytest.raises(TypeCheckError, match='is not an instance of str'):
BmiClientApptainer(image=IMAGE_NAME, work_dir=42)
def test_inputdirs_as_str(self, tmp_path):
some_dir = str(tmp_path)
- with pytest.raises(TypeError, match='must be collections.abc.Iterable; got str instead'):
+ with pytest.raises(TypeError, match=' must be collections.abc.Iterable'):
BmiClientApptainer(image=IMAGE_NAME, input_dirs='old type', work_dir=some_dir)
def test_inputdirs_as_number(self, tmp_path):
some_dir = str(tmp_path)
- with pytest.raises(TypeError, match='must be collections.abc.Iterable; got int instead'):
+ with pytest.raises(TypeCheckError, match='is not an instance of collections.abc.Iterable'):
BmiClientApptainer(image=IMAGE_NAME, input_dirs=42, work_dir=some_dir)
diff --git a/test/test_client.py b/test/test_client.py
index e1c643a..cf00c3b 100644
--- a/test/test_client.py
+++ b/test/test_client.py
@@ -9,6 +9,7 @@ from google.protobuf import any_pb2
from google.rpc import error_details_pb2, status_pb2, code_pb2
from grpc_status import rpc_status
from heat import BmiHeat
+from typeguard import TypeCheckError
from grpc4bmi.bmi_grpc_server import BmiServer
from grpc4bmi.bmi_grpc_client import BmiClient, RemoteException, handle_error
@@ -108,7 +109,7 @@ def test_initialize():
def test_initialize_with_nonstring():
client, local = make_bmi_classes(False)
assert client is not None
- with pytest.raises(TypeError, match='got int instead'):
+ with pytest.raises(TypeCheckError, match='did not match any element in the union'):
client.initialize(42)
client.finalize()
del client
diff --git a/test/test_docker.py b/test/test_docker.py
index 7b8fc65..812ec21 100644
--- a/test/test_docker.py
+++ b/test/test_docker.py
@@ -3,6 +3,7 @@ from io import BytesIO
import docker
import numpy as np
import pytest
+from typeguard import TypeCheckError
from grpc4bmi.bmi_client_docker import BmiClientDocker
from grpc4bmi.exceptions import DeadContainerException
@@ -102,17 +103,17 @@ class TestBmiClientDocker:
BmiClientDocker(image=walrus_docker_image, image_port=55555, input_dirs=(some_dir,), work_dir=some_dir)
def test_workdir_as_number(self):
- with pytest.raises(TypeError, match='must be str'):
+ with pytest.raises(TypeCheckError, match='is not an instance of str'):
BmiClientDocker(image=walrus_docker_image, work_dir=42)
def test_inputdirs_as_str(self, tmp_path):
some_dir = str(tmp_path)
- with pytest.raises(TypeError, match='must be collections.abc.Iterable; got str instead'):
+ with pytest.raises(TypeError, match='must be collections.abc.Iterable'):
BmiClientDocker(image=walrus_docker_image, input_dirs='old type', work_dir=some_dir)
def test_inputdirs_as_number(self, tmp_path):
some_dir = str(tmp_path)
- with pytest.raises(TypeError, match='must be collections.abc.Iterable; got int instead'):
+ with pytest.raises(TypeCheckError, match='is not an instance of collections.abc.Iterable'):
BmiClientDocker(image=walrus_docker_image, input_dirs=42, work_dir=some_dir)
def test_logs(self, walrus_model, capfd):
diff --git a/test/test_singularity.py b/test/test_singularity.py
index 41e4265..1f9d8b1 100644
--- a/test/test_singularity.py
+++ b/test/test_singularity.py
@@ -10,6 +10,7 @@ from grpc import RpcError
from nbconvert.preprocessors import ExecutePreprocessor
from nbformat.v4 import new_notebook, new_code_cell
import numpy as np
+from typeguard import TypeCheckError
from grpc4bmi.bmi_client_singularity import SUPPORTED_APPTAINER_VERSIONS, SUPPORTED_SINGULARITY_VERSIONS, BmiClientSingularity, check_singularity_version_string
from grpc4bmi.exceptions import ApptainerVersionException, DeadContainerException, SingularityVersionException
@@ -167,17 +168,17 @@ class TestBmiClientSingularity:
assert len(model.get_value('Q', np.zeros(1,))) == 1
def test_workdir_as_number(self):
- with pytest.raises(TypeError, match='must be str'):
+ with pytest.raises(TypeCheckError, match='is not an instance of str'):
BmiClientSingularity(image=IMAGE_NAME, work_dir=42)
def test_inputdirs_as_str(self, tmp_path):
some_dir = str(tmp_path)
- with pytest.raises(TypeError, match='must be collections.abc.Iterable; got str instead'):
+ with pytest.raises(TypeError, match='must be collections.abc.Iterable'):
BmiClientSingularity(image=IMAGE_NAME, input_dirs='old type', work_dir=some_dir)
def test_inputdirs_as_number(self, tmp_path):
some_dir = str(tmp_path)
- with pytest.raises(TypeError, match='must be collections.abc.Iterable; got int instead'):
+ with pytest.raises(TypeCheckError, match='is not an instance of collections.abc.Iterable'):
BmiClientSingularity(image=IMAGE_NAME, input_dirs=42, work_dir=some_dir)
|
Incompatibility with typeguard 3
Seems typeguard 3 was released which deprecates functions used in this repo. We should use the new way or pin typeguard to <3.
For example in ewatercycle test we got:
````
________________ ERROR collecting tests/forcing/test_default.py ________________
16
ImportError while importing test module '/home/runner/work/ewatercycle/ewatercycle/tests/forcing/test_default.py'.
17
Hint: make sure your test modules/packages have valid Python names.
18
Traceback:
19
/usr/share/miniconda3/envs/ewatercycle/lib/python3.9/importlib/__init__.py:127: in import_module
20
return _bootstrap._gcd_import(name[level:], package, level)
21
tests/forcing/test_default.py:5: in <module>
22
from ewatercycle.forcing import (
23
src/ewatercycle/forcing/__init__.py:8: in <module>
24
from . import _hype, _lisflood, _marrmot, _pcrglobwb, _wflow
25
src/ewatercycle/forcing/_lisflood.py:18: in <module>
26
from ._lisvap import create_lisvap_config, lisvap
27
src/ewatercycle/forcing/_lisvap.py:29: in <module>
28
from ewatercycle.container import ContainerEngine
29
src/ewatercycle/container.py:7: in <module>
30
from grpc4bmi.bmi_client_docker import BmiClientDocker
31
/usr/share/miniconda3/envs/ewatercycle/lib/python3.9/site-packages/grpc4bmi/bmi_client_docker.py:8: in <module>
32
from typeguard import check_argument_types, qualified_name
33
E ImportError: cannot import name 'check_argument_types' from 'typeguard' (/usr/share/miniconda3/envs/ewatercycle/lib/python3.9/site-packages/typeguard/__init__.py)
```
Found while running bmi2 branch.
|
0.0
|
3798bbc460494783e47c53a8e58be07e89d3f855
|
[
"test/test_apptainer.py::Test_check_apptainer_version_string::test_ok[apptainer",
"test/test_apptainer.py::Test_check_apptainer_version_string::test_too_old[apptainer",
"test/test_apptainer.py::TestBmiClientApptainerBadDays::test_workdir_as_number",
"test/test_apptainer.py::TestBmiClientApptainerBadDays::test_inputdirs_as_str",
"test/test_apptainer.py::TestBmiClientApptainerBadDays::test_inputdirs_as_number",
"test/test_client.py::test_server_start",
"test/test_client.py::test_component_name",
"test/test_client.py::test_input_item_count",
"test/test_client.py::test_output_item_count",
"test/test_client.py::test_input_var_names",
"test/test_client.py::test_output_var_names",
"test/test_client.py::test_initialize",
"test/test_client.py::test_initialize_with_nonstring",
"test/test_client.py::test_update",
"test/test_client.py::test_update_until",
"test/test_client.py::test_get_time_unit",
"test/test_client.py::test_get_time_step",
"test/test_client.py::test_get_current_time",
"test/test_client.py::test_get_updated_time",
"test/test_client.py::test_get_start_end_time",
"test/test_client.py::test_get_var_grid",
"test/test_client.py::test_get_var_type",
"test/test_client.py::test_get_var_units",
"test/test_client.py::test_get_var_nbytes",
"test/test_client.py::test_get_var_location",
"test/test_client.py::test_get_var_value",
"test/test_client.py::test_get_value_ptr",
"test/test_client.py::test_get_vals_indices",
"test/test_client.py::test_set_var_value",
"test/test_client.py::test_set_values_indices",
"test/test_client.py::test_get_grid_size",
"test/test_client.py::test_get_grid_rank",
"test/test_client.py::test_get_grid_type",
"test/test_client.py::test_get_grid_shape",
"test/test_client.py::test_get_grid_spacing",
"test/test_client.py::test_get_grid_origin",
"test/test_client.py::test_method_exception[initialize-client_request0]",
"test/test_client.py::test_method_exception[update-client_request1]",
"test/test_client.py::test_method_exception[update_until-client_request2]",
"test/test_client.py::test_method_exception[finalize-client_request3]",
"test/test_client.py::test_method_exception[get_component_name-client_request4]",
"test/test_client.py::test_method_exception[get_input_item_count-client_request5]",
"test/test_client.py::test_method_exception[get_output_item_count-client_request6]",
"test/test_client.py::test_method_exception[get_input_var_names-client_request7]",
"test/test_client.py::test_method_exception[get_output_var_names-client_request8]",
"test/test_client.py::test_method_exception[get_time_units-client_request9]",
"test/test_client.py::test_method_exception[get_time_step-client_request10]",
"test/test_client.py::test_method_exception[get_current_time-client_request11]",
"test/test_client.py::test_method_exception[get_start_time-client_request12]",
"test/test_client.py::test_method_exception[get_end_time-client_request13]",
"test/test_client.py::test_method_exception[get_var_grid-client_request14]",
"test/test_client.py::test_method_exception[get_var_type-client_request15]",
"test/test_client.py::test_method_exception[get_var_itemsize-client_request16]",
"test/test_client.py::test_method_exception[get_var_units-client_request17]",
"test/test_client.py::test_method_exception[get_var_nbytes-client_request18]",
"test/test_client.py::test_method_exception[get_var_location-client_request19]",
"test/test_client.py::test_method_exception[get_value-client_request20]",
"test/test_client.py::test_method_exception[get_value_at_indices-client_request21]",
"test/test_client.py::test_method_exception[set_value-client_request22]",
"test/test_client.py::test_method_exception[set_value_at_indices-client_request23]",
"test/test_client.py::test_method_exception[get_grid_size-client_request24]",
"test/test_client.py::test_method_exception[get_grid_type-client_request25]",
"test/test_client.py::test_method_exception[get_grid_rank-client_request26]",
"test/test_client.py::test_method_exception[get_grid_x-client_request27]",
"test/test_client.py::test_method_exception[get_grid_y-client_request28]",
"test/test_client.py::test_method_exception[get_grid_z-client_request29]",
"test/test_client.py::test_method_exception[get_grid_shape-client_request30]",
"test/test_client.py::test_method_exception[get_grid_spacing-client_request31]",
"test/test_client.py::test_method_exception[get_grid_origin-client_request32]",
"test/test_client.py::test_method_exception[get_grid_node_count-client_request33]",
"test/test_client.py::test_method_exception[get_grid_edge_count-client_request34]",
"test/test_client.py::test_method_exception[get_grid_face_count-client_request35]",
"test/test_client.py::test_method_exception[get_grid_edge_nodes-client_request36]",
"test/test_client.py::test_method_exception[get_grid_face_nodes-client_request37]",
"test/test_client.py::test_method_exception[get_grid_face_edges-client_request38]",
"test/test_client.py::test_method_exception[get_grid_nodes_per_face-client_request39]",
"test/test_client.py::TestUniRectGridModel::test_grid_type",
"test/test_client.py::TestUniRectGridModel::test_grid_size",
"test/test_client.py::TestUniRectGridModel::test_grid_rank",
"test/test_client.py::TestUniRectGridModel::test_grid_shape",
"test/test_client.py::TestUniRectGridModel::test_grid_origin",
"test/test_client.py::TestUniRectGridModel::test_grid_spacing",
"test/test_client.py::TestRect3DGridModel::test_grid_size",
"test/test_client.py::TestRect3DGridModel::test_grid_rank",
"test/test_client.py::TestRect3DGridModel::test_grid_x",
"test/test_client.py::TestRect3DGridModel::test_grid_y",
"test/test_client.py::TestRect3DGridModel::test_grid_z",
"test/test_client.py::TestRect2DGridModel::test_grid_size",
"test/test_client.py::TestRect2DGridModel::test_grid_rank",
"test/test_client.py::TestRect2DGridModel::test_grid_x",
"test/test_client.py::TestRect2DGridModel::test_grid_y",
"test/test_client.py::TestRect2DGridModel::test_grid_z",
"test/test_client.py::TestStructured3DQuadrilateralsGridModel::test_grid_size",
"test/test_client.py::TestStructured3DQuadrilateralsGridModel::test_grid_rank",
"test/test_client.py::TestStructured3DQuadrilateralsGridModel::test_grid_shape",
"test/test_client.py::TestStructured3DQuadrilateralsGridModel::test_grid_x",
"test/test_client.py::TestStructured3DQuadrilateralsGridModel::test_grid_y",
"test/test_client.py::TestStructured3DQuadrilateralsGridModel::test_grid_z",
"test/test_client.py::TestStructured2DQuadrilateralsGridModel::test_grid_size",
"test/test_client.py::TestStructured2DQuadrilateralsGridModel::test_grid_rank",
"test/test_client.py::TestStructured2DQuadrilateralsGridModel::test_grid_shape",
"test/test_client.py::TestStructured2DQuadrilateralsGridModel::test_grid_x",
"test/test_client.py::TestStructured2DQuadrilateralsGridModel::test_grid_y",
"test/test_client.py::TestStructured2DQuadrilateralsGridModel::test_grid_z",
"test/test_client.py::TestUnstructuredGridBmiModel::test_get_grid_shape",
"test/test_client.py::TestUnstructuredGridBmiModel::test_grid_size",
"test/test_client.py::TestUnstructuredGridBmiModel::test_grid_rank",
"test/test_client.py::TestUnstructuredGridBmiModel::test_get_grid_node_count",
"test/test_client.py::TestUnstructuredGridBmiModel::test_get_grid_edge_count",
"test/test_client.py::TestUnstructuredGridBmiModel::test_get_grid_face_count",
"test/test_client.py::TestUnstructuredGridBmiModel::test_get_grid_edge_nodes",
"test/test_client.py::TestUnstructuredGridBmiModel::test_grid_face_nodes",
"test/test_client.py::TestUnstructuredGridBmiModel::test_grid_face_edges",
"test/test_client.py::TestUnstructuredGridBmiModel::test_grid_nodes_per_face",
"test/test_client.py::TestUnstructuredGridBmiModel::test_grid_x",
"test/test_client.py::TestUnstructuredGridBmiModel::test_grid_y",
"test/test_client.py::TestUnstructuredGridBmiModel::test_grid_z",
"test/test_client.py::TestFloat32Model::test_get_value",
"test/test_client.py::TestFloat32Model::test_get_value_at_indices",
"test/test_client.py::TestFloat32Model::test_set_value",
"test/test_client.py::TestFloat32Model::test_set_value_at_indices",
"test/test_client.py::TestInt32Model::test_get_value",
"test/test_client.py::TestInt32Model::test_get_value_at_indices",
"test/test_client.py::TestInt32Model::test_set_value",
"test/test_client.py::TestInt32Model::test_set_value_at_indices",
"test/test_client.py::TestBooleanModel::test_get_value",
"test/test_client.py::TestBooleanModel::test_get_value_at_indices",
"test/test_client.py::TestBooleanModel::test_set_value",
"test/test_client.py::TestBooleanModel::test_set_value_at_indices",
"test/test_client.py::test_handle_error_with_stacktrace",
"test/test_client.py::test_handle_error_without_stacktrace",
"test/test_client.py::test_handle_error_without_status",
"test/test_client.py::TestModelWithItemSizeZeroAndVarTypeFloat32::test_get_var_itemsize",
"test/test_client.py::TestModelWithItemSizeZeroAndUnknownVarType::test_get_var_itemsize",
"test/test_docker.py::TestBmiClientDocker::test_workdir_as_number",
"test/test_docker.py::TestBmiClientDocker::test_inputdirs_as_str",
"test/test_docker.py::TestBmiClientDocker::test_inputdirs_as_number",
"test/test_singularity.py::TestBmiClientSingularity::test_workdir_as_number",
"test/test_singularity.py::TestBmiClientSingularity::test_inputdirs_as_str",
"test/test_singularity.py::TestBmiClientSingularity::test_inputdirs_as_number",
"test/test_singularity.py::Test_check_singularity_version_string::test_ok[singularity",
"test/test_singularity.py::Test_check_singularity_version_string::test_ok[apptainer",
"test/test_singularity.py::Test_check_singularity_version_string::test_too_old[singularity",
"test/test_singularity.py::Test_check_singularity_version_string::test_too_old[apptainer"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-03-16 07:04:55+00:00
|
apache-2.0
| 2,063 |
|
eWaterCycle__grpc4bmi-133
|
diff --git a/grpc4bmi/bmi_client_apptainer.py b/grpc4bmi/bmi_client_apptainer.py
index 9daccf4..8692bf8 100644
--- a/grpc4bmi/bmi_client_apptainer.py
+++ b/grpc4bmi/bmi_client_apptainer.py
@@ -17,7 +17,7 @@ SUPPORTED_APPTAINER_VERSIONS = '>=1.0.0-rc.2' # First apptainer release with bi
def check_apptainer_version_string(version_output: str) -> bool:
version = version_output.split(' ').pop()
- local_version = Version(version)
+ local_version = Version(version.replace('.el', ''))
if local_version not in SpecifierSet(SUPPORTED_APPTAINER_VERSIONS):
raise ApptainerVersionException(f'Unsupported version ({version_output}) of apptainer found, '
f'supported versions {SUPPORTED_APPTAINER_VERSIONS}')
diff --git a/grpc4bmi/bmi_client_singularity.py b/grpc4bmi/bmi_client_singularity.py
index c3d3a61..9d240b7 100644
--- a/grpc4bmi/bmi_client_singularity.py
+++ b/grpc4bmi/bmi_client_singularity.py
@@ -18,7 +18,7 @@ SUPPORTED_APPTAINER_VERSIONS = '>=1.0.0-rc.2' # First apptainer release with bi
def check_singularity_version_string(version_output: str) -> bool:
(app, _, version) = version_output.split(' ')
- local_version = Version(version)
+ local_version = Version(version.replace('.el', ''))
if app == 'singularity' and local_version not in SpecifierSet(SUPPORTED_SINGULARITY_VERSIONS):
raise SingularityVersionException(f'Unsupported version ({version_output}) of singularity found, '
f'supported versions {SUPPORTED_SINGULARITY_VERSIONS}')
|
eWaterCycle/grpc4bmi
|
5a9fd3fc92c1ae2c98e865f466d09ffcbed0482d
|
diff --git a/test/test_apptainer.py b/test/test_apptainer.py
index cd5ea5e..3a144b8 100644
--- a/test/test_apptainer.py
+++ b/test/test_apptainer.py
@@ -14,6 +14,8 @@ class Test_check_apptainer_version_string:
('apptainer version 1.0.3'),
('apptainer version 1.1.0-rc.3'),
('apptainer version 1.1.2'),
+ # From snellius cluster at SURF.
+ ('apptainer version 1.1.5-2.el8'),
])
def test_ok(self, test_input: str):
result = check_apptainer_version_string(test_input)
diff --git a/test/test_singularity.py b/test/test_singularity.py
index 1f9d8b1..73c1e12 100644
--- a/test/test_singularity.py
+++ b/test/test_singularity.py
@@ -235,6 +235,8 @@ class Test_check_singularity_version_string:
('apptainer version 1.0.3'),
('apptainer version 1.1.0-rc.3'),
('apptainer version 1.1.2'),
+ # From snellius cluster at SURF.
+ ('apptainer version 1.1.5-2.el8'),
])
def test_ok(self, test_input: str):
result = check_singularity_version_string(test_input)
|
Singularity version on Cartesius cluster @SURFSara gives ValueError
The version installed is `3.7.1-1.el7_9`. the semver package gives ValueError
|
0.0
|
5a9fd3fc92c1ae2c98e865f466d09ffcbed0482d
|
[
"test/test_apptainer.py::Test_check_apptainer_version_string::test_ok[apptainer",
"test/test_singularity.py::Test_check_singularity_version_string::test_ok[apptainer"
] |
[
"test/test_apptainer.py::Test_check_apptainer_version_string::test_too_old[apptainer",
"test/test_apptainer.py::TestBmiClientApptainerBadDays::test_workdir_as_number",
"test/test_apptainer.py::TestBmiClientApptainerBadDays::test_inputdirs_as_str",
"test/test_apptainer.py::TestBmiClientApptainerBadDays::test_inputdirs_as_number",
"test/test_singularity.py::TestBmiClientSingularity::test_workdir_as_number",
"test/test_singularity.py::TestBmiClientSingularity::test_inputdirs_as_str",
"test/test_singularity.py::TestBmiClientSingularity::test_inputdirs_as_number",
"test/test_singularity.py::Test_check_singularity_version_string::test_ok[singularity",
"test/test_singularity.py::Test_check_singularity_version_string::test_too_old[singularity",
"test/test_singularity.py::Test_check_singularity_version_string::test_too_old[apptainer"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-03-20 14:45:32+00:00
|
apache-2.0
| 2,064 |
|
eadwinCode__shoutcast_api-9
|
diff --git a/shoutcast_api/genres.py b/shoutcast_api/genres.py
index 020f8e2..d9d02f4 100755
--- a/shoutcast_api/genres.py
+++ b/shoutcast_api/genres.py
@@ -4,9 +4,9 @@ from .models import Genre, GenreList
from .utils import genre_xml_strip
-def _handle_url_action_json(url):
+def _handle_url_action_json(endpoint, session):
list_genre = []
- response = shoutcast_request.call_api_json(url)
+ response = shoutcast_request.call_api_json(endpoint, session=session)
genrelist = response.get('genrelist')
@@ -18,15 +18,15 @@ def _handle_url_action_json(url):
return GenreList(list_genre)
-def get_all_genres(k):
+def get_all_genres(k, session=None):
"""
Get all the genres on SHOUTcast Radio Directory
:param k: API Dev ID
:return: `class GenreList()`
"""
list_genre = []
- url = "legacy/genrelist?k={}".format(k)
- response = shoutcast_request.call_api_xml(url)
+ endpoint = "/legacy/genrelist?k={}".format(k)
+ response = shoutcast_request.call_api_xml(endpoint, session=session)
genrelist = response.get('genrelist')
@@ -38,19 +38,19 @@ def get_all_genres(k):
return GenreList(list_genre)
-def get_primary_genres_json(k):
+def get_primary_genres_json(k, session=None):
"""
Get only the Primary Genres on SHOUTcast Radio Directory
:param k: API Dev ID
:return: `class GenreList()`
"""
- url = "genre/primary?k={}&f=json".format(k)
+ endpoint = "/genre/primary?k={}&f=json".format(k)
- return _handle_url_action_json(url)
+ return _handle_url_action_json(endpoint, session)
-def get_secondary_genres_json(k, parentid: int = 0):
+def get_secondary_genres_json(k, parentid: int = 0, session=None):
"""
Get secondary genre list (if present) for a specified primary genre.
:param parentid: Genreid of the primary genre. You can retrieve the entire genre set by passing parentid=0.
@@ -58,12 +58,12 @@ def get_secondary_genres_json(k, parentid: int = 0):
:return: `class GenreList()`
"""
- url = "genre/secondary?k={}&f=json".format(k)
- url += "&parentid={}".format(parentid)
- return _handle_url_action_json(url)
+ endpoint = "/genre/secondary?k={}&f=json".format(k)
+ endpoint += "&parentid={}".format(parentid)
+ return _handle_url_action_json(endpoint, session=session)
-def get_genres_details_by_id(k, genre_id: int = None) -> Genre:
+def get_genres_details_by_id(k, genre_id: int = None, session=None) -> Genre:
"""
Get details such as Genre Name, Sub Genres (if its a primary genre), has children by passing the genre-id.
:param genre_id: Input respective genre or sub-genre id.
@@ -73,8 +73,8 @@ def get_genres_details_by_id(k, genre_id: int = None) -> Genre:
if not genre_id:
raise Exception('id is required')
- url = "genre/secondary?k={}&f=json&id={}".format(k, genre_id)
- response = shoutcast_request.call_api_json(url)
+ endpoint = "/genre/secondary?k={}&f=json&id={}".format(k, genre_id)
+ response = shoutcast_request.call_api_json(endpoint, session=session)
genrelist = response.get('genrelist')
@@ -84,7 +84,7 @@ def get_genres_details_by_id(k, genre_id: int = None) -> Genre:
return Genre(genrelist.get('genre'))
-def get_genres_by_sub_genres(k, haschildren: bool = False):
+def get_genres_by_sub_genres(k, haschildren: bool = False, session=None):
"""
Get genres based on their sub-genre availability at any node level in the genre hierarchy of SHOUTcast.
:param haschildren: Input respective genre or sub-genre id.
@@ -95,10 +95,10 @@ def get_genres_by_sub_genres(k, haschildren: bool = False):
:return: `class GenreList()`
"""
- url = "genre/secondary?k={}&f=json".format(k)
+ endpoint = "/genre/secondary?k={}&f=json".format(k)
if haschildren:
- url += '&haschildren=true'
+ endpoint += '&haschildren=true'
else:
- url += '&haschildren=false'
+ endpoint += '&haschildren=false'
- return _handle_url_action_json(url)
+ return _handle_url_action_json(endpoint, session)
diff --git a/shoutcast_api/shoutcast_request.py b/shoutcast_api/shoutcast_request.py
index a1e7c86..e04da5e 100755
--- a/shoutcast_api/shoutcast_request.py
+++ b/shoutcast_api/shoutcast_request.py
@@ -1,18 +1,19 @@
import xmltodict
import json
-from requests import get
from .models import Tunein
-
+from .utils import _init_session
from .Exceptions import APIException
-base_url = 'http://api.shoutcast.com/'
+
+base_url = 'http://api.shoutcast.com'
tunein_url = 'http://yp.shoutcast.com/{base}?id={id}'
tuneins = [Tunein('/sbin/tunein-station.pls'), Tunein('/sbin/tunein-station.m3u'), Tunein('/sbin/tunein-station.xspf')]
-def call_api_xml(url):
- request_url = "{}{}".format(base_url, url)
- response = get(request_url)
+def call_api_xml(endpoint, params=None, session=None):
+ session = _init_session(session)
+ request_url = "{}{}".format(base_url, endpoint)
+ response = session.get(request_url, params=params)
if response.status_code == 200:
response_as_dict = xmltodict.parse(response.content)
api_response = response_as_dict.get('response')
@@ -28,9 +29,10 @@ def call_api_xml(url):
raise APIException(response.content, code=response.status_code)
-def call_api_json(url):
- request_url = "{}{}".format(base_url, url)
- response = get(request_url)
+def call_api_json(endpoint, params=None, session=None):
+ session = _init_session(session)
+ request_url = "{}{}".format(base_url, endpoint)
+ response = session.get(request_url, params=params)
if response.status_code == 200:
json_response = json.loads(response.content.decode('utf-8'))
@@ -47,18 +49,20 @@ def call_api_json(url):
raise APIException(response.reason, code=response.status_code)
-def call_api_tunein(station_id: int):
+def call_api_tunein(station_id: int, session=None):
+ session = _init_session(session)
url = tunein_url.format(base=tuneins[2], id=station_id)
- response = get(url)
+ response = session.get(url)
if response.status_code == 200:
api_response = xmltodict.parse(response.content.decode('utf-8'))
return api_response
raise APIException(response.reason, code=response.status_code)
-def call_api_tunein_any(base: Tunein, station_id: int):
+def call_api_tunein_any(base: Tunein, station_id: int, session=None):
+ session = _init_session(session)
url = tunein_url.format(base=base, id=station_id)
- response = get(url)
+ response = session.get(url)
if response.status_code == 200:
return response.content.decode('utf-8')
raise APIException(response.reason, code=response.status_code)
diff --git a/shoutcast_api/stations.py b/shoutcast_api/stations.py
index 5f9bf96..9c9d41d 100755
--- a/shoutcast_api/stations.py
+++ b/shoutcast_api/stations.py
@@ -1,12 +1,13 @@
+from requests.sessions import session
from shoutcast_api import shoutcast_request
from typing import Tuple, AnyStr, List
from .models import Station, StationList
from .utils import _build_url, station_xml_strip, station_json_strip
-def _handle_url_action_xml(url: str):
+def _handle_url_action_xml(endpoint: str, session):
stations = list()
- response = shoutcast_request.call_api_xml(url)
+ response = shoutcast_request.call_api_xml(endpoint, session=session)
api_station_list = response.get('stationlist')
if not api_station_list.get('station'):
@@ -22,9 +23,9 @@ def _handle_url_action_xml(url: str):
return StationList(tunein=shoutcast_request.tuneins, stations=stations)
-def _handle_url_action_json(url: str) -> StationList:
+def _handle_url_action_json(endpoint: str, session) -> StationList:
stations = list()
- response = shoutcast_request.call_api_json(url)
+ response = shoutcast_request.call_api_json(endpoint, session=session)
api_station_list = response.get('stationlist')
if not api_station_list.get('station'):
@@ -40,7 +41,7 @@ def _handle_url_action_json(url: str) -> StationList:
return StationList(tunein=shoutcast_request.tuneins, stations=stations)
-def get_top_500(k: AnyStr, limit: (int, Tuple) = None, **kwargs) -> StationList:
+def get_top_500(k: AnyStr, limit: (int, Tuple) = None, session=None, **kwargs) -> StationList:
"""
gets top 500 stations from shoutcast api
:param k: API Dev Key.
@@ -52,13 +53,13 @@ def get_top_500(k: AnyStr, limit: (int, Tuple) = None, **kwargs) -> StationList:
:return: list of stations
"""
- url = '/legacy/Top500?k={}'.format(k)
- url += _build_url(limit=limit, **kwargs)
+ endpoint = '/legacy/Top500?k={}'.format(k)
+ endpoint += _build_url(limit=limit, **kwargs)
- return _handle_url_action_xml(url)
+ return _handle_url_action_xml(endpoint, session=session)
-def get_stations_keywords(k, search: str, limit: (int, Tuple) = None, **kwargs) -> StationList:
+def get_stations_keywords(k, search: str, limit: (int, Tuple) = None, session=None, **kwargs) -> StationList:
"""
Get stations which match the keyword searched on SHOUTcast Radio Directory.
:param search: Specify the query to search
@@ -73,13 +74,13 @@ def get_stations_keywords(k, search: str, limit: (int, Tuple) = None, **kwargs)
if not search:
raise Exception('Search query is required')
- url = "legacy/stationsearch?k={}&search={}".format(k, search.replace(' ', '+').strip())
- url += _build_url(limit, **kwargs)
+ endpoint = "/legacy/stationsearch?k={}&search={}".format(k, search.replace(' ', '+').strip())
+ endpoint += _build_url(limit, **kwargs)
- return _handle_url_action_xml(url)
+ return _handle_url_action_xml(endpoint, session=session)
-def get_stations_by_genre(k, genre: str, limit: (int, Tuple) = None, **kwargs) -> StationList:
+def get_stations_by_genre(k, genre: str, limit: (int, Tuple) = None, session=None, **kwargs) -> StationList:
"""
Get stations which match the genre specified as query.
:param genre: genre
@@ -94,15 +95,15 @@ def get_stations_by_genre(k, genre: str, limit: (int, Tuple) = None, **kwargs) -
if not genre:
raise Exception('genre is required')
- url = "legacy/stationsearch?k={}&search={}".format(
+ endpoint = "/legacy/stationsearch?k={}&search={}".format(
k, genre.replace(' ', '+').strip()
)
- url += _build_url(limit, **kwargs)
+ endpoint += _build_url(limit, **kwargs)
- return _handle_url_action_xml(url)
+ return _handle_url_action_xml(endpoint, session=session)
-def get_stations_by_now_playing(k, ct: str, limit: (int, Tuple) = None, **kwargs) -> StationList:
+def get_stations_by_now_playing(k, ct: str, limit: (int, Tuple) = None, session=None, **kwargs) -> StationList:
"""
Return stations which match a specified query in the now playing node.
:param ct: Query to search in Now Playing node. This parameter also supports querying multiple artists in the same query by using "||". ex: ct=madonna||u2||beyonce up to 10 artists
@@ -117,16 +118,16 @@ def get_stations_by_now_playing(k, ct: str, limit: (int, Tuple) = None, **kwargs
if not ct:
raise Exception('genre is required')
- url = "station/nowplaying?k={}&ct={}&f=json".format(
+ endpoint = "/station/nowplaying?k={}&ct={}&f=json".format(
k, ct.replace(' ', '+').strip()
)
- url += _build_url(limit, **kwargs)
+ endpoint += _build_url(limit, **kwargs)
- return _handle_url_action_json(url)
+ return _handle_url_action_json(endpoint, session=session)
def get_stations_bitrate_or_genre_id(k, br: int = 128,
- genre_id: int = None, limit: (int, Tuple) = None, **kwargs) -> StationList:
+ genre_id: int = None, limit: (int, Tuple) = None, session=None, **kwargs) -> StationList:
"""
Get stations which match the genre specified as query.
:param genre_id: genre id
@@ -142,13 +143,13 @@ def get_stations_bitrate_or_genre_id(k, br: int = 128,
if not br and not genre_id:
raise Exception('genre_id or br is required')
- url = "station/advancedsearch?k={}&f=json".format(k)
- url += _build_url(limit, br=br, genre_id=genre_id, **kwargs)
+ endpoint = "/station/advancedsearch?k={}&f=json".format(k)
+ endpoint += _build_url(limit, br=br, genre_id=genre_id, **kwargs)
- return _handle_url_action_json(url)
+ return _handle_url_action_json(endpoint, session=session)
-def get_random_station(k, limit: (int, Tuple) = None, **kwargs):
+def get_random_station(k, limit: (int, Tuple) = None, session=None, **kwargs):
"""
Get random stations on SHOUTcast Radio Directory. Random stations can be restricted
to the Bitrate/Genre/Media type specified.
@@ -162,7 +163,7 @@ def get_random_station(k, limit: (int, Tuple) = None, **kwargs):
:return: `List[Stations]`
"""
- url = "station/randomstations?k={}&f=json".format(k)
- url += _build_url(limit, **kwargs)
+ endpoint = "/station/randomstations?k={}&f=json".format(k)
+ endpoint += _build_url(limit, **kwargs)
- return _handle_url_action_json(url)
+ return _handle_url_action_json(endpoint, session=session)
diff --git a/shoutcast_api/tunein.py b/shoutcast_api/tunein.py
index ff1eddd..c7b30b0 100755
--- a/shoutcast_api/tunein.py
+++ b/shoutcast_api/tunein.py
@@ -3,14 +3,14 @@ from .models import Track, TrackList, Tunein
from .shoutcast_request import call_api_tunein, call_api_tunein_any
-def get_stations_stream_url(station_id: int) -> TrackList:
+def get_stations_stream_url(station_id: int, session=None) -> TrackList:
"""
Get station streaming url as List[Track]
:param station_id: shoutcast station id
:return: class `TrackList`
"""
tracks = []
- response = call_api_tunein(station_id)
+ response = call_api_tunein(station_id, session=session)
playlist = response.get('playlist')
api_track_list = playlist.get('trackList')
@@ -25,7 +25,7 @@ def get_stations_stream_url(station_id: int) -> TrackList:
return TrackList(tracks)
-def tunein_to_station(base: Tunein, station_id: int) -> str:
+def tunein_to_station(base: Tunein, station_id: int, session=None) -> str:
"""
:param base: value is taken from the tunein node and based on the playlist format required
@@ -33,4 +33,4 @@ def tunein_to_station(base: Tunein, station_id: int) -> str:
:param station_id: station id
:return: str
"""
- return call_api_tunein_any(base, station_id)
+ return call_api_tunein_any(base, station_id, session=session)
diff --git a/shoutcast_api/utils.py b/shoutcast_api/utils.py
index c3b152e..811f99d 100755
--- a/shoutcast_api/utils.py
+++ b/shoutcast_api/utils.py
@@ -1,3 +1,4 @@
+import requests
from typing import Tuple
@@ -68,3 +69,9 @@ def genre_xml_strip(genre):
item['name'] = genre.get('@name')
item['count'] = int(genre.get('@count'))
return item
+
+
+def _init_session(session):
+ if session is None:
+ session = requests.Session()
+ return session
|
eadwinCode/shoutcast_api
|
4f591de98da96826bf82cd20cf00e430ddacd9b6
|
diff --git a/tests/test_base.py b/tests/test_base.py
new file mode 100644
index 0000000..0ac0ed9
--- /dev/null
+++ b/tests/test_base.py
@@ -0,0 +1,14 @@
+
+from unittest import TestCase
+
+
+class BaseTestCase(TestCase):
+ def setUp(self):
+ try:
+ import requests_cache
+ import datetime
+
+ expire_after = datetime.timedelta(days=3)
+ self.session = requests_cache.CachedSession(cache_name='cache', backend='sqlite', expire_after=expire_after)
+ except ImportError:
+ self.session = None
diff --git a/tests/test_genres.py b/tests/test_genres.py
index 267a8fa..c04d902 100755
--- a/tests/test_genres.py
+++ b/tests/test_genres.py
@@ -1,5 +1,5 @@
import os
-from unittest import TestCase
+from .test_base import BaseTestCase
from shoutcast_api.genres import (
get_all_genres, get_primary_genres_json, get_genres_details_by_id,
get_genres_by_sub_genres, GenreList, Genre, get_secondary_genres_json
@@ -8,31 +8,31 @@ from shoutcast_api.genres import (
api_key = os.getenv('SHOUTCAST_API_KEY')
-class TestGenre(TestCase):
+class TestGenre(BaseTestCase):
def test_get_all_genres(self):
- response = get_all_genres(api_key)
+ response = get_all_genres(api_key, session=self.session)
self.assertIsInstance(response, GenreList)
def test_get_primary_genres_json(self):
- response = get_primary_genres_json(api_key)
+ response = get_primary_genres_json(api_key, session=self.session)
self.assertIsInstance(response, GenreList)
def test_get_genres_details_by_id(self):
- response = get_genres_details_by_id(api_key, genre_id=25)
+ response = get_genres_details_by_id(api_key, genre_id=25, session=self.session)
self.assertIsInstance(response, Genre)
self.assertEqual(response.id, 25)
def test_get_genres_by_sub_genres_haschildren_false_return_genre_with_haschildren_false(self):
- response = get_genres_by_sub_genres(api_key, haschildren=False)
+ response = get_genres_by_sub_genres(api_key, haschildren=False, session=self.session)
self.assertIsInstance(response, GenreList)
self.assertEqual(response.genres[0].haschildren, False)
def test_get_genres_by_sub_genres_haschildren_true_return_genre_with_haschildren_true(self):
- response = get_genres_by_sub_genres(api_key, haschildren=True)
+ response = get_genres_by_sub_genres(api_key, haschildren=True, session=self.session)
self.assertIsInstance(response, GenreList)
self.assertEqual(response.genres[0].haschildren, True)
def test_get_secondary_genres_json(self):
- response = get_secondary_genres_json(api_key, parentid=1)
+ response = get_secondary_genres_json(api_key, parentid=1, session=self.session)
self.assertIsInstance(response, GenreList)
self.assertEqual(response.genres[0].parentid, 1)
diff --git a/tests/test_stations.py b/tests/test_stations.py
index cac0ebf..28a9339 100755
--- a/tests/test_stations.py
+++ b/tests/test_stations.py
@@ -1,5 +1,5 @@
import os
-from unittest import TestCase
+from .test_base import BaseTestCase
from shoutcast_api.stations import (
StationList, get_random_station, get_stations_keywords, get_stations_by_genre,
get_stations_bitrate_or_genre_id, get_stations_by_now_playing, get_top_500
@@ -8,35 +8,34 @@ from shoutcast_api.stations import (
api_key = os.getenv('SHOUTCAST_API_KEY')
-class TestStations(TestCase):
+class TestStations(BaseTestCase):
def test_get_top_500(self):
- response = get_top_500(api_key, limit=5, br=128)
+ response = get_top_500(api_key, limit=5, br=128, session=self.session)
self.assertIsInstance(response, StationList)
self.assertEqual(response.station[0].br, 128)
def test_get_stations_by_now_playing(self):
- response = get_stations_by_now_playing(api_key, ct='john legend', limit=2)
+ response = get_stations_by_now_playing(api_key, ct='john legend', limit=2, session=self.session)
self.assertIsInstance(response, StationList)
if len(response.station) > 0:
self.assertTrue('john legend' in response.station[0].ct.lower())
def test_get_stations_bitrate_or_genre_id(self):
- response = get_stations_bitrate_or_genre_id(api_key, br=128, genre_id=25, limit=2)
+ response = get_stations_bitrate_or_genre_id(api_key, br=128, genre_id=25, limit=2, session=self.session)
self.assertIsInstance(response, StationList)
self.assertEqual(response.station[0].br, 128)
def test_get_stations_by_genre(self):
- response = get_stations_by_genre(api_key, genre='hip hop', limit=2)
+ response = get_stations_by_genre(api_key, genre='hip hop', limit=2, session=self.session)
self.assertIsInstance(response, StationList)
self.assertTrue('hip hop' in response.station[0].genre.lower())
def test_get_stations_keywords(self):
- response = get_stations_keywords(api_key, search='Hot', br=128, limit=2)
+ response = get_stations_keywords(api_key, search='Hot', br=128, limit=2, session=self.session)
self.assertIsInstance(response, StationList)
self.assertEqual(response.station[0].br, 128)
def test_get_random_station(self):
- response = get_random_station(api_key, limit=2)
+ response = get_random_station(api_key, limit=2, session=self.session)
self.assertIsInstance(response, StationList)
-
diff --git a/tests/test_tunein.py b/tests/test_tunein.py
index b56952c..21b8654 100755
--- a/tests/test_tunein.py
+++ b/tests/test_tunein.py
@@ -1,18 +1,18 @@
-from unittest import TestCase
+from .test_base import BaseTestCase
from shoutcast_api.tunein import (
Track, TrackList, get_stations_stream_url, tunein_to_station
)
from shoutcast_api.shoutcast_request import tuneins
-class TestTunein(TestCase):
+class TestTunein(BaseTestCase):
def test_get_stations_stream_url(self):
- response = get_stations_stream_url(station_id=99311623)
+ response = get_stations_stream_url(station_id=99311623, session=self.session)
self.assertIsInstance(response, TrackList)
if len(response.tracks) > 0:
track = response.tracks[0]
self.assertIsInstance(track, Track)
def test_tunein_to_station(self):
- response = tunein_to_station(base=tuneins[0], station_id=99466001)
+ response = tunein_to_station(base=tuneins[0], station_id=99466001, session=self.session)
self.assertIsInstance(response, str)
|
Caching requests
Hello @eadwinCode,
I noticed you are using [python-requests](https://requests.readthedocs.io/).
I think you can quite easily implement a way to cache queries using [requests-cache](https://github.com/reclosedev/requests-cache)
See how it's being used in https://pandas-datareader.readthedocs.io/en/latest/cache.html passing `session=None` by default or passing `session=requests_cache.CachedSession(...)`
See also
https://github.com/pydata/pandas-datareader/blob/adb67b2e959f2060a9db18f8b343d06e917f23da/pandas_datareader/_utils.py#L56-L60
Kind regards
|
0.0
|
4f591de98da96826bf82cd20cf00e430ddacd9b6
|
[
"tests/test_tunein.py::TestTunein::test_get_stations_stream_url",
"tests/test_tunein.py::TestTunein::test_tunein_to_station"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-11-21 10:52:25+00:00
|
mit
| 2,065 |
|
easy-as-python__django-webmention-24
|
diff --git a/setup.cfg b/setup.cfg
index a6d40d4..65e0534 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -13,9 +13,6 @@ classifiers =
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
Framework :: Django
- Framework :: Django :: 1.11
- Framework :: Django :: 2.0
- Framework :: Django :: 2.1
Framework :: Django :: 2.2
Framework :: Django :: 3.0
Framework :: Django :: 3.1
@@ -32,7 +29,7 @@ classifiers =
package_dir = =src
packages = find:
install_requires =
- Django>=1.8.2
+ Django>=2.2.0
requests>=2.7.0
[options.packages.find]
@@ -72,15 +69,13 @@ python_files =
addopts = -ra -q --cov=webmention
[tox:tox]
-envlist = {py36,py37,py38}-django{1.11,2.1,2.2,3.0,3.1}
+envlist = {py36,py37,py38}-django{2.2,3.0,3.1}
[testenv]
extras = test
commands =
pytest {posargs}
deps =
- django1.11: Django>=1.11,<2
- django2.1: Django>=2.1,<2.2
django2.2: Django>=2.2,<2.3
django3.0: Django>=3.0,<3.1
django3.1: Django>=3.1,<3.2
diff --git a/src/webmention/resolution.py b/src/webmention/resolution.py
index 87bc418..d01f98e 100644
--- a/src/webmention/resolution.py
+++ b/src/webmention/resolution.py
@@ -19,8 +19,8 @@ def url_resolves(url):
def fetch_and_validate_source(source, target):
response = requests.get(source)
if response.status_code == 200:
- if target in str(response.content):
- return response.content
+ if target in response.text:
+ return response.text
else:
raise TargetNotFoundError("Source URL did not contain target URL")
else:
|
easy-as-python/django-webmention
|
9faab3dad9529d14399eb8dad016370adbd3054a
|
diff --git a/tests/test_resolution.py b/tests/test_resolution.py
index 501dcec..a404659 100644
--- a/tests/test_resolution.py
+++ b/tests/test_resolution.py
@@ -26,10 +26,10 @@ def test_url_resolves_when_does_not_resolve(mock_resolve):
def test_fetch_and_validate_source_happy_path(mock_get, test_source, test_target):
mock_response = Mock()
mock_response.status_code = 200
- mock_response.content = '<a href="{href}">{href}</a>'.format(href=test_target)
+ mock_response.text = '<a href="{href}">{href}</a>'.format(href=test_target)
mock_get.return_value = mock_response
- assert fetch_and_validate_source(test_source, test_target) == mock_response.content
+ assert fetch_and_validate_source(test_source, test_target) == mock_response.text
@patch("requests.get")
@@ -46,7 +46,7 @@ def test_fetch_and_validate_source_when_source_unavailable(mock_get, test_source
def test_fetch_and_validate_source_when_source_does_not_contain_target(mock_get, test_source, test_target):
mock_response = Mock()
mock_response.status_code = 200
- mock_response.content = "foo"
+ mock_response.text = "foo"
mock_get.return_value = mock_response
with pytest.raises(TargetNotFoundError):
|
response_body saving bytes instead of a decoded string
I noticed a subtle bug in the [resolution.py::fetch_and_validate_source](https://github.com/easy-as-python/django-webmention/blob/9faab3dad9529d14399eb8dad016370adbd3054a/src/webmention/resolution.py#L19-L27).
While response.content is a bytes-string ( [docs](https://requests.readthedocs.io/en/master/user/quickstart/#binary-response-content)), it's never decoded before saving to the database. As a result it saves as a string into the db as follows: `"b'\n<!DOCTYPE html><html><head><meta charset="utf-8" />\n<...."`.
Practically speaking this is fine for pure ascii data, but any unicode characters aren't be properly decoded before saving, resulting in double-escaped characters and must be corrected before you can display it properly to the user. e.g. `"Çelik"` will be saved as `"\xc3\x87elik"`
```
>>> b'\xc3\x87elik' # raw bytes from response.content
b'\xc3\x87elik'
>>> str(b'\xc3\x87elik') # raw string conversion gets double esacped
"b'\\xc3\\x87elik'"
>>> b'\xc3\x87elik'.decode("utf-8") # properly decoded
'Çelik'
```
Existing response_bodies can be fixed using [ftfy](https://ftfy.readthedocs.io/en/latest/) package by running the string through something like this `ftfy.fix_text(ftfy.fixes.decode_escapes(mention.response_body))`.
Changing fetch_and_validate_source to return `response.text`, which is simply the decoded response.content would fix this issue. Looking for the target in `str(response.content)` might also have a subtle bug if you used a utf-8 domain as well, so I believe it should validate using response.text as well.
Refactored I think it should look something like this:
```python
def fetch_and_validate_source(source, target):
response = requests.get(source)
if response.status_code == 200:
if target in response.text:
return response.text
else:
raise TargetNotFoundError("Source URL did not contain target URL")
else:
raise SourceFetchError("Could not fetch source URL")
```
|
0.0
|
9faab3dad9529d14399eb8dad016370adbd3054a
|
[
"tests/test_resolution.py::test_fetch_and_validate_source_happy_path"
] |
[
"tests/test_resolution.py::test_url_resolves_when_resolves",
"tests/test_resolution.py::test_url_resolves_when_does_not_resolve",
"tests/test_resolution.py::test_fetch_and_validate_source_when_source_unavailable",
"tests/test_resolution.py::test_fetch_and_validate_source_when_source_does_not_contain_target"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-02-14 17:29:27+00:00
|
mit
| 2,066 |
|
ebmdatalab__datalab-pandas-29
|
diff --git a/ebmdatalab/bq.py b/ebmdatalab/bq.py
index f6709a9..113bb5c 100644
--- a/ebmdatalab/bq.py
+++ b/ebmdatalab/bq.py
@@ -41,6 +41,7 @@ def cached_read(sql, csv_path=None, use_cache=True, **kwargs):
if use_cache and already_cached:
df = pd.read_csv(csv_path)
else:
+ os.makedirs(csv_dir, exist_ok=True)
temp_path = os.path.join(
csv_dir, '.tmp{}.{}'.format(_random_str(8), csv_filename)
)
|
ebmdatalab/datalab-pandas
|
92099b8c8ea47616545c42a0dc8a394348979103
|
diff --git a/ebmdatalab/tests/test_bq.py b/ebmdatalab/tests/test_bq.py
index 6f447bb..01a3684 100644
--- a/ebmdatalab/tests/test_bq.py
+++ b/ebmdatalab/tests/test_bq.py
@@ -2,7 +2,8 @@ from mock import patch
from ebmdatalab import bq
from pandas import DataFrame
import tempfile
-
+import pytest
+import os
def test_fingerprint_sql():
input_sql = 'select *, "Frob" from x -- comment\n' "where (a >= 4);"
@@ -39,6 +40,46 @@ def test_cached_read(mock_read_gbq):
# and now with `use_cache` param
df = bq.cached_read(sql, csv_path=csv_file.name, use_cache=False)
assert mock_read_gbq.call_count == 2
+ assert False
+
+
+@patch("ebmdatalab.bq.pd.read_gbq")
+def test_cached_read_no_csv_path(mock_read_gbq):
+ mock_read_gbq.return_value = DataFrame([{"a": 3}])
+ sql = "select * from foobar"
+
+ # Test no csv path raises error
+ with tempfile.NamedTemporaryFile() as csv_file:
+ with pytest.raises(AssertionError) as exc_info:
+ df = bq.cached_read(sql, csv_path="")
+
+ assert "You must supply csv_path" in str(exc_info.value)
+
+
+@patch("ebmdatalab.bq.pd.read_gbq")
+def test_cached_read_non_existing_csv_dir_made(mock_read_gbq):
+ mock_read_gbq.return_value = DataFrame([{"a": 3}])
+ sql = "select * from foobar"
+
+ # Make temporary folder to save temporary files in
+ folder = tempfile.TemporaryDirectory()
+
+ with tempfile.NamedTemporaryFile(dir=folder.name) as csv_file:
+ # Test csv_dir exists
+ df = bq.cached_read(sql, csv_path=csv_file.name)
+ assert os.path.exists(folder.name)
+
+ # Delete contents of temporary folder
+ for file in os.listdir(folder.name):
+ os.remove(f"{folder.name}/{file}")
+
+ # Delete temporary folder
+ os.rmdir(folder.name)
+ assert os.path.exists(folder.name) is False
+
+ # Test temporary folder is remade
+ df = bq.cached_read(sql, csv_path=csv_file.name)
+ assert os.path.exists(folder.name)
def _check_cached_read(csv_file, mock_read, sql, expected):
|
`bq.cached_read` fails confusingly if parent directory doesn't exist
If the parent of `csv_path` (which is `csv_dir` below) doesn't exist then this line throws an error:
https://github.com/ebmdatalab/datalab-pandas/blob/92099b8c8ea47616545c42a0dc8a394348979103/ebmdatalab/bq.py#L49-L51
Given that this is usually called from a notebook and it's already slightly confusing as to what the current working directory is, this creates even more confusion.
The function should either:
* Automatically create any parent directories
* Explicitly check whether `csv_dir` exists and throw a helpful error
|
0.0
|
92099b8c8ea47616545c42a0dc8a394348979103
|
[
"ebmdatalab/tests/test_bq.py::test_cached_read_non_existing_csv_dir_made"
] |
[
"ebmdatalab/tests/test_bq.py::test_cached_read_no_csv_path",
"ebmdatalab/tests/test_bq.py::test_old_cache_markers_removed",
"ebmdatalab/tests/test_bq.py::test_fingerprint_sql"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2021-10-04 16:24:52+00:00
|
mit
| 2,067 |
|
ebroecker__canmatrix-470
|
diff --git a/src/canmatrix/canmatrix.py b/src/canmatrix/canmatrix.py
index cbeef4a..355b7c3 100644
--- a/src/canmatrix/canmatrix.py
+++ b/src/canmatrix/canmatrix.py
@@ -1485,10 +1485,11 @@ class CanMatrix(object):
"""
if attributeName in self.attributes:
return self.attributes[attributeName]
- else:
- if attributeName in self.global_defines:
+ elif attributeName in self.global_defines:
define = self.global_defines[attributeName]
return define.defaultValue
+ else:
+ return default
def add_value_table(self, name, valueTable): # type: (str, typing.Mapping) -> None
"""Add named value table.
diff --git a/src/canmatrix/formats/sym.py b/src/canmatrix/formats/sym.py
index 3076cdf..a67d137 100644
--- a/src/canmatrix/formats/sym.py
+++ b/src/canmatrix/formats/sym.py
@@ -181,9 +181,10 @@ def dump(db, f, **options): # type: (canmatrix.CanMatrix, typing.IO, **typing.A
enum_dict = {}
enums = "{ENUMS}\n"
- header = """FormatVersion=5.0 // Do not edit this line!
-Title=\"canmatrix-Export\"
-"""
+ header = """\
+FormatVersion=5.0 // Do not edit this line!
+Title=\"{}\"
+""".format(db.attribute("Title", "canmatrix-Export"))
f.write(header.encode(sym_encoding, ignore_encoding_errors))
def send_receive(for_frame):
@@ -330,7 +331,11 @@ def load(f, **options): # type: (typing.IO, **typing.Any) -> canmatrix.CanMatri
# ignore empty line:
if line.__len__() == 0:
continue
-
+ if line[0:6] == "Title=":
+ title = line[6:].strip('"')
+ db.add_global_defines("Title", "STRING")
+ db.global_defines['Title'].set_default("canmatrix-Export")
+ db.add_attribute("Title", title)
# switch mode:
if line[0:7] == "{ENUMS}":
mode = Mode.enums
@@ -363,7 +368,7 @@ def load(f, **options): # type: (typing.IO, **typing.Any) -> canmatrix.CanMatri
line = line.split('//')[0]
temp_array = line[5:].strip().rstrip(')').split('(', 1)
val_table_name = temp_array[0]
- split = canmatrix.utils.quote_aware_space_split(temp_array[1])
+ split = canmatrix.utils.quote_aware_comma_split(temp_array[1])
temp_array = [s.rstrip(',') for s in split]
temp_val_table = {}
for entry in temp_array:
diff --git a/src/canmatrix/utils.py b/src/canmatrix/utils.py
index 5d9e309..57ad792 100644
--- a/src/canmatrix/utils.py
+++ b/src/canmatrix/utils.py
@@ -21,11 +21,37 @@ def quote_aware_space_split(in_line): # type: (str) -> typing.List[str]
def quote_aware_comma_split(string): # type: (str) -> typing.List[str]
- if sys.version_info >= (3, 0):
- temp = list(csv.reader([string], skipinitialspace=True))
- else:
- temp = list(csv.reader([string.encode("utf8")], skipinitialspace=True))
- return temp[0]
+ """
+ Split a string containing comma separated list of fields.
+ Removing surrounding whitespace, to allow fields to be separated by ", ".
+ Preserves double quotes within fields, but not double quotes surrounding fields.
+ Suppresses comma separators which are within double quoted sections.
+ :param string: ('a, b", c", "d"',
+ :return: ['a', 'b", c"', 'd']),
+ """
+ fields = []
+ quoted = False
+ field = ""
+ # Separate string by unquoted commas
+ for char in string:
+ if char == ',':
+ if not quoted:
+ fields.append(field)
+ field = ""
+ continue
+ if char == '"':
+ quoted = not quoted
+ field += char
+ if field:
+ fields.append(field)
+ # Remove surrounding whitespace from fields
+ fields = [f.strip() for f in fields]
+ # Remove "" that surround entire fields
+ for i, f in enumerate(fields):
+ if len(f) > 1:
+ if f.startswith('"') and f.endswith('"'):
+ fields[i] = f[1:-1]
+ return fields
def guess_value(text_value): # type: (str) -> str
|
ebroecker/canmatrix
|
09c32eaff27d4957e2b1c819231d11cb891a999a
|
diff --git a/src/canmatrix/tests/test_sym.py b/src/canmatrix/tests/test_sym.py
index 962747e..d5b5ccd 100644
--- a/src/canmatrix/tests/test_sym.py
+++ b/src/canmatrix/tests/test_sym.py
@@ -173,3 +173,48 @@ def test_unterminated_enum():
else:
assert isinstance(matrix.load_errors[0], StopIteration)
+
+def test_title_read_and_write():
+ f = io.BytesIO(
+ textwrap.dedent(
+ '''\
+ FormatVersion=5.0 // Do not edit this line!
+ Title="An Example Title"
+
+ '''
+ ).encode('utf-8'),
+ )
+
+ matrix = canmatrix.formats.sym.load(f)
+ assert matrix.attribute("Title") == "An Example Title"
+ f_out = io.BytesIO()
+ canmatrix.formats.sym.dump(matrix, f_out)
+ assert f_out.getvalue().decode('utf-8').splitlines()[1] == 'Title="An Example Title"'
+
[email protected](
+ 'enum_str, enum_dict, enum_label',
+ (
+ ('enum Animal(0="Dog", 1="Cat", 2="Fox")', {"Animal": {0: "Dog", 1: "Cat", 2: "Fox"}}, "Simple enum"),
+ ('''\
+enum Animal(0="Dog", //A Comment
+1="Cat",
+2="Fox")''',
+ {"Animal": {0: "Dog", 1: "Cat", 2: "Fox"}}, "Multiline enum"),
+ ('enum Animal(0="Dog",1="Cat",2="Fox")', {"Animal": {0: "Dog", 1: "Cat", 2: "Fox"}}, "No Space in Separator"),
+ )
+)
+def test_enums_read(enum_str, enum_dict, enum_label):
+ f = io.BytesIO('''\
+FormatVersion=5.0 // Do not edit this line!
+Title="An Example Title"
+
+{{ENUMS}}
+{}
+'''.format(enum_str).encode('utf-8'),
+ )
+
+ matrix = canmatrix.formats.sym.load(f)
+ assert matrix.load_errors == [], "Failed to load canmatrix, when testing enum case : '{}'".format(enum_label)
+ assert matrix.value_tables == enum_dict, "Enum not parsed correctly : '{}'".format(enum_label)
+ f_out = io.BytesIO()
+ canmatrix.formats.sym.dump(matrix, f_out)
diff --git a/src/canmatrix/tests/test_utils.py b/src/canmatrix/tests/test_utils.py
index 356990c..2aad410 100644
--- a/src/canmatrix/tests/test_utils.py
+++ b/src/canmatrix/tests/test_utils.py
@@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
+import pytest
+
import canmatrix.utils
@@ -10,7 +12,31 @@ def test_utils_guess_value():
assert canmatrix.utils.guess_value("False") == "0"
assert canmatrix.utils.guess_value("faLse") == "0"
+
def test_decode_number():
assert canmatrix.utils.decode_number("0x10") == 16
assert canmatrix.utils.decode_number("0b10") == 2
assert canmatrix.utils.decode_number("10") == 10
+
+
[email protected](
+ 'input_string, expected_list',
+ (
+ ('a,b,c,d',
+ ["a", "b", "c", "d"]),
+
+ ('a, b, c, d',
+ ["a", "b", "c", "d"]),
+
+ ('a, b", c", "d"',
+ ['a', 'b", c"', 'd']),
+
+ ('0="a", 1=b, 3="c"d, 4=e',
+ ['0="a"', '1=b', '3="c"d', '4=e']),
+
+ ('"a,b",","b,c","\'\'d"e',
+ ['a,b', '","b', 'c","\'\'d\"e']),
+ )
+)
+def test_quote_aware_comma_split_function(input_string, expected_list):
+ assert canmatrix.utils.quote_aware_comma_split(input_string) == expected_list
|
"Title" field in .sym files not processed
One of my use cases is to read .sym files modify them in python and then to export them again. The title field is written by PCAN symbol editor and I think it makes sense to have it be preserved if the file if read and then written back to file.
|
0.0
|
09c32eaff27d4957e2b1c819231d11cb891a999a
|
[
"src/canmatrix/tests/test_sym.py::test_title_read_and_write",
"src/canmatrix/tests/test_sym.py::test_enums_read[enum",
"src/canmatrix/tests/test_utils.py::test_quote_aware_comma_split_function[a,",
"src/canmatrix/tests/test_utils.py::test_quote_aware_comma_split_function[\"a,b\",\",\"b,c\",\"''d\"e-expected_list4]"
] |
[
"src/canmatrix/tests/test_sym.py::test_colliding_mux_values",
"src/canmatrix/tests/test_sym.py::test_parse_longname_with_colon",
"src/canmatrix/tests/test_sym.py::test_export_default_decimal_places[False-37-37]",
"src/canmatrix/tests/test_sym.py::test_export_default_decimal_places[True-37.1-37.1]",
"src/canmatrix/tests/test_sym.py::tests_parse_float[float-32]",
"src/canmatrix/tests/test_sym.py::tests_parse_float[double-64]",
"src/canmatrix/tests/test_sym.py::test_unterminated_enum",
"src/canmatrix/tests/test_utils.py::test_utils_guess_value",
"src/canmatrix/tests/test_utils.py::test_decode_number",
"src/canmatrix/tests/test_utils.py::test_quote_aware_comma_split_function[a,b,c,d-expected_list0]",
"src/canmatrix/tests/test_utils.py::test_quote_aware_comma_split_function[0=\"a\","
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-04-09 14:42:29+00:00
|
bsd-2-clause
| 2,068 |
|
ebroecker__canmatrix-475
|
diff --git a/src/canmatrix/canmatrix.py b/src/canmatrix/canmatrix.py
index 355b7c3..78dd771 100644
--- a/src/canmatrix/canmatrix.py
+++ b/src/canmatrix/canmatrix.py
@@ -606,25 +606,25 @@ class ArbitrationId(object):
def pgn(self):
if not self.extended:
raise J1939needsExtendedIdetifier
+ # PGN is bits 8-25 of the 29-Bit Extended CAN-ID
+ # Made up of PDU-S (8-15), PDU-F (16-23), Data Page (24) & Extended Data Page (25)
+ # If PDU-F >= 240 the PDU-S is interpreted as Group Extension
+ # If PDU-F < 240 the PDU-S is interpreted as a Destination Address
+ _pgn = 0
+ if self.j1939_pdu_format == 2:
+ _pgn += self.j1939_ps
+ _pgn += self.j1939_pf << 8
+ _pgn += self.j1939_dp << 16
+ _pgn += self.j1939_edp << 17
- ps = (self.id >> 8) & 0xFF
- pf = (self.id >> 16) & 0xFF
- _pgn = pf << 8
- if pf >= 240:
- _pgn += ps
return _pgn
@pgn.setter
def pgn(self, value): # type: (int) -> None
self.extended = True
- ps = value & 0xff
- pf = (value >> 8) & 0xFF
- _pgn = pf << 8
- if pf >= 240:
- _pgn += ps
-
- self.id &= 0xff0000ff
- self.id |= (_pgn & 0xffff) << 8 # default pgn is None -> mypy reports error
+ _pgn = value & 0x3FFFF
+ self.id &= 0xfc0000ff
+ self.id |= (_pgn << 8 & 0x3FFFF00) # default pgn is None -> mypy reports error
@@ -640,7 +640,7 @@ class ArbitrationId(object):
def j1939_destination(self):
if not self.extended:
raise J1939needsExtendedIdetifier
- if self.j1939_pf < 240:
+ if self.j1939_pdu_format == 1:
destination = self.j1939_ps
else:
destination = None
@@ -669,11 +669,21 @@ class ArbitrationId(object):
raise J1939needsExtendedIdetifier
return (self.id >> 16) & 0xFF
+ @property
+ def j1939_pdu_format(self):
+ return 1 if (self.j1939_pf < 240) else 2
+
+ @property
+ def j1939_dp(self):
+ if not self.extended:
+ raise J1939needsExtendedIdetifier
+ return (self.id >> 24) & 0x1
+
@property
def j1939_edp(self):
if not self.extended:
raise J1939needsExtendedIdetifier
- return (self.id >> 24) & 0x03
+ return (self.id >> 25) & 0x1
@property
def j1939_priority(self):
@@ -684,7 +694,7 @@ class ArbitrationId(object):
@j1939_priority.setter
def j1939_priority(self, value): # type: (int) -> None
self.extended = True
- self.id = (self.id & 0x2ffffff) | ((value & 0x7) << 26)
+ self.id = (self.id & 0x3ffffff) | ((value & 0x7) << 26)
@property
def j1939_str(self): # type: () -> str
diff --git a/src/canmatrix/j1939_decoder.py b/src/canmatrix/j1939_decoder.py
index 9653d34..c5e01cc 100644
--- a/src/canmatrix/j1939_decoder.py
+++ b/src/canmatrix/j1939_decoder.py
@@ -78,7 +78,6 @@ class j1939_decoder(object):
elif arbitration_id.pgn == canmatrix.ArbitrationId.from_pgn(0xEBFF).pgn:
# transfer data
-
self._data = self._data + can_data[1:min(8, self.bytes_left + 1)]
self.bytes_left = max(self.bytes_left - 7, 0)
|
ebroecker/canmatrix
|
08ec80e87dc8144f68716bb981a78903428192f6
|
diff --git a/src/canmatrix/tests/test_canmatrix.py b/src/canmatrix/tests/test_canmatrix.py
index 2bbf935..c08bcd8 100644
--- a/src/canmatrix/tests/test_canmatrix.py
+++ b/src/canmatrix/tests/test_canmatrix.py
@@ -597,8 +597,58 @@ def test_frame_calc_j1939_id():
frame.source = 0x22
frame.pgn = 0xAAAA
frame.priority = 3
- assert frame.arbitration_id.id == 0xcaa0022
-
+ assert frame.arbitration_id.id == 0xCAAAA22
+
[email protected](
+ 'priority, pgn, source, id',
+ (
+ (0, 0, 0, 0),
+ (1, 1, 1, 0x4000101),
+ (2, 2, 2, 0x8000202),
+ (3, 0xAAAA, 0x22, 0xCAAAA22),
+ (0, 0x1F004, 0xEE, 0x1F004EE),
+ (3, 0x1F004, 0xEE, 0xDF004EE),
+ (7, 0x1FFFF, 0xFF, 0x1DFFFFFF),
+ (3, 0, 0xB, 0xC00000B),
+ (3, 0xEF27, 0xFD, 0xCEF27FD),
+ (3, 0xFFCA, 0xFD, 0xCFFCAFD),
+ (3, 0, 3, 0xC000003),
+ (3, 0xF002, 3, 0xCF00203),
+ (6, 0xFE4A, 3, 0x18FE4A03),
+ (3, 0x103, 5, 0xC010305),
+ ), )
+def test_frame_j1939_id_from_components(priority, pgn, source, id):
+ # we have to set all j1939 properties in the __init__ otherwise the setters crash
+ frame = canmatrix.canmatrix.Frame()
+ frame.source = source
+ frame.pgn = pgn
+ frame.priority = priority
+ assert hex(frame.arbitration_id.id) == hex(id)
+
[email protected](
+ 'priority, pgn, source, id',
+ (
+ (0, 0, 0, 0),
+ (1, 0, 1, 0x4000101),
+ (2, 0, 2, 0x8000202),
+ (3, 0xAA00, 0x22, 0xCAAAA22),
+ (0, 0x1F004, 0xEE, 0x1F004EE),
+ (3, 0x1F004, 0xEE, 0xDF004EE),
+ (7, 0x1FFFF, 0xFF, 0x1DFFFFFF),
+ (3, 0, 0xB, 0xC00000B),
+ (3, 0xEF00, 0xFD, 0xCEF27FD),
+ (3, 0xFFCA, 0xFD, 0xCFFCAFD),
+ (3, 0, 3, 0xC000003),
+ (3, 0xF002, 3, 0xCF00203),
+ (6, 0xFE4A, 3, 0x18FE4A03),
+ (3, 0x100, 5, 0xC010305),
+ ), )
+def test_frame_decode_j1939_id(source, pgn, priority, id):
+ # we have to set all j1939 properties in the __init__ otherwise the setters crash
+ frame = canmatrix.canmatrix.Frame(arbitration_id=canmatrix.ArbitrationId(id=id, extended=True))
+ assert hex(frame.source) == hex(source)
+ assert hex(frame.pgn) == hex(pgn)
+ assert hex(frame.priority) == hex(priority)
def test_frame_add_transmitter(empty_frame):
empty_frame.add_transmitter("BCM")
@@ -781,18 +831,15 @@ def test_canid_parse_values():
can_id = canmatrix.ArbitrationId(id=0x01ABCD02, extended=True)
assert can_id.j1939_source == 0x02
assert can_id.j1939_destination == 0xcd
- assert can_id.j1939_pgn == 0xAB00
+ assert can_id.j1939_pgn == 0x1AB00
assert can_id.j1939_destination == 0xCD
assert can_id.j1939_priority == 0
- assert can_id.j1939_tuple == (0xCD, 0xAB00, 2)
+ assert can_id.j1939_tuple == (0xCD, 0x1AB00, 2)
- test_data = {0xc00000b : 0, 0xcef27fd : 61184, 0xcffcafd : 65482, 0xc000003 : 0, 0xcf00203 : 61442, 0x18fe4a03 : 65098, 0xc010305 : 256}
- for canId, pgn in test_data.items():
- assert canmatrix.ArbitrationId(id=canId, extended=True).pgn == pgn
def test_canid_repr():
can_id = canmatrix.ArbitrationId(id=0x01ABCD02, extended=True)
- assert can_id.j1939_str == "DA:0xCD PGN:0xAB00 SA:0x02"
+ assert can_id.j1939_str == "DA:0xCD PGN:0x1AB00 SA:0x02"
# DecodedSignal tests
@@ -878,7 +925,7 @@ def test_canmatrix_get_frame_by_pgn(empty_matrix, empty_frame):
empty_frame.arbitration_id.id = 0xA123456
empty_frame.arbitration_id.extended = True
empty_matrix.add_frame(empty_frame)
- assert empty_matrix.frame_by_pgn(0x1234) == empty_frame
+ assert empty_matrix.frame_by_pgn(0x21234) == empty_frame
def test_canmatrix_get_frame_by_wrong_pgn(empty_matrix, empty_frame):
empty_frame.arbitration_id.id = 0xAB123456
diff --git a/src/canmatrix/tests/test_j1939_decoder.py b/src/canmatrix/tests/test_j1939_decoder.py
index d318060..bb42f45 100644
--- a/src/canmatrix/tests/test_j1939_decoder.py
+++ b/src/canmatrix/tests/test_j1939_decoder.py
@@ -27,19 +27,19 @@ def test_j1939_decoder():
t = canmatrix.j1939_decoder.j1939_decoder()
# BAM
- (type, signals) = t.decode(canmatrix.ArbitrationId(id = 0xec0000, extended= True),
+ (type, signals) = t.decode(canmatrix.ArbitrationId(id = 0xecFF00, extended= True),
bytearray([0x20,10,0,1,0xff,0x66,0x1,0]), matrix)
assert "BAM " in type
# print (type, signals)
# data 1
- (type, signals) = t.decode(canmatrix.ArbitrationId(id = 0xeb0000, extended= True),
+ (type, signals) = t.decode(canmatrix.ArbitrationId(id = 0xebFF00, extended= True),
bytearray([0x0,1,1,1,1,1,1,1]), matrix)
assert "BAM data" in type
#print (type, signals)
# data 2
- (type, signals) = t.decode(canmatrix.ArbitrationId(id = 0xeb0000, extended= True),
+ (type, signals) = t.decode(canmatrix.ArbitrationId(id = 0xebFF00, extended= True),
bytearray([0x1,1,1,1,1,1,1,1]), matrix)
assert "BAM last data" in type
# print (type, signals)
@@ -54,17 +54,15 @@ def test_j1939_decoder():
can_data[i], matrix)
print ("-------- test data -------- ")
- test_frames = collections.OrderedDict ([
- (0xcef27fd , "fffae1ff00ffff"),
- (0xcffcafd , "c0fffffffffff800"),
- (0xcf00203 , "cc00000000b812ff"),
- (0xfe4a03 , "fffcffffffffffff"),
- (0xc010305 , "ccfffaffff204e0a"),
- (0x0CF00400, "F4DEDE3028FFF0FF")])
+ test_frames = collections.OrderedDict([
+ (0xcef27fd, ("fffae1ff00ffff", "")),
+ (0xcffcafd, ("c0fffffffffff800", "")),
+ (0xcf00203, ("cc00000000b812ff", "J1939 known: ETC1")),
+ (0xfe4a03, ("fffcffffffffffff", "J1939 known: ETC7")),
+ (0xc010305, ("ccfffaffff204e0a", "J1939 known: TC1")),
+ (0x0CF00400, ("F4DEDE3028FFF0FF", "J1939 known: EEC1"))])
- expected = ["EEC1","TC1","ETC7","ETC1"]
- for arb_id, asc_data in test_frames.items():
+ for arb_id, (asc_data, expected) in test_frames.items():
(type, signals) = t.decode(canmatrix.ArbitrationId(id=arb_id, extended=True),
bytearray.fromhex(asc_data), matrix)
- if type is not None and "J1939 known" in type:
- assert expected.pop() in type
+ assert expected in type
\ No newline at end of file
|
J1939 PGN does not include Extended Data Page (EDP) and Data Page (DP) bits
I think the code for parsing J1939 IDs isn't quite up to date.
In the code for the PGN property of the ArbitrationId class, the PGN is only constructed from the PS and PF fields. However, there are two more bits in the 29 bit ID which contribute to the PGN. I believe that initially they were reserved for future expansions, so are only set in newer PGNs.
I think it is simple enough to fix, so I will add some tests and try to make a Pull Request.

See these sites for reference.
https://www.kvaser.com/about-can/higher-layer-protocols/j1939-introduction/
https://www.vector.com/int/en/know-how/technologies/protocols/sae-j1939/#c103492
|
0.0
|
08ec80e87dc8144f68716bb981a78903428192f6
|
[
"src/canmatrix/tests/test_canmatrix.py::test_frame_calc_j1939_id",
"src/canmatrix/tests/test_canmatrix.py::test_frame_j1939_id_from_components[1-1-1-67109121]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_j1939_id_from_components[2-2-2-134218242]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_j1939_id_from_components[3-43690-34-212511266]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_j1939_id_from_components[0-126980-238-32507118]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_j1939_id_from_components[3-126980-238-233833710]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_j1939_id_from_components[7-131071-255-503316479]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_j1939_id_from_components[3-61223-253-216999933]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_j1939_id_from_components[3-259-5-201392901]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_decode_j1939_id[0-126980-238-32507118]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_decode_j1939_id[3-126980-238-233833710]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_decode_j1939_id[7-131071-255-503316479]",
"src/canmatrix/tests/test_canmatrix.py::test_canid_parse_values",
"src/canmatrix/tests/test_canmatrix.py::test_canid_repr"
] |
[
"src/canmatrix/tests/test_canmatrix.py::test_signal_defaults_to_decimal",
"src/canmatrix/tests/test_canmatrix.py::test_encode_signal",
"src/canmatrix/tests/test_canmatrix.py::test_decode_signal",
"src/canmatrix/tests/test_canmatrix.py::test_ecu_find_attribute",
"src/canmatrix/tests/test_canmatrix.py::test_ecu_no_attribute",
"src/canmatrix/tests/test_canmatrix.py::test_ecu_default_attr_from_db",
"src/canmatrix/tests/test_canmatrix.py::test_ecu_repr",
"src/canmatrix/tests/test_canmatrix.py::test_signal_has_comment",
"src/canmatrix/tests/test_canmatrix.py::test_signal_find_mandatory_attribute",
"src/canmatrix/tests/test_canmatrix.py::test_signal_find_optional_attribute",
"src/canmatrix/tests/test_canmatrix.py::test_signal_no_attribute",
"src/canmatrix/tests/test_canmatrix.py::test_signal_no_attribute_with_default",
"src/canmatrix/tests/test_canmatrix.py::test_signal_default_attr_from_db",
"src/canmatrix/tests/test_canmatrix.py::test_signal_no_duplicate_receiver",
"src/canmatrix/tests/test_canmatrix.py::test_signal_delete_receiver",
"src/canmatrix/tests/test_canmatrix.py::test_signal_delete_wrong_receiver_doesnt_raise",
"src/canmatrix/tests/test_canmatrix.py::test_signal_has_attributes",
"src/canmatrix/tests/test_canmatrix.py::test_signal_delete_attribute",
"src/canmatrix/tests/test_canmatrix.py::test_signal_delete_wrong_attribute_doesnt_raise",
"src/canmatrix/tests/test_canmatrix.py::test_signal_spn",
"src/canmatrix/tests/test_canmatrix.py::test_signal_set_startbit",
"src/canmatrix/tests/test_canmatrix.py::test_signal_set_startbit_conversion",
"src/canmatrix/tests/test_canmatrix.py::test_signal_set_startbit_raise",
"src/canmatrix/tests/test_canmatrix.py::test_signal_get_startbit",
"src/canmatrix/tests/test_canmatrix.py::test_signal_get_startbit_conversion",
"src/canmatrix/tests/test_canmatrix.py::test_signal_range",
"src/canmatrix/tests/test_canmatrix.py::test_signal_set_min_max",
"src/canmatrix/tests/test_canmatrix.py::test_signal_set_default_min_max",
"src/canmatrix/tests/test_canmatrix.py::test_signal_decode_named_value",
"src/canmatrix/tests/test_canmatrix.py::test_signal_encode_named_value",
"src/canmatrix/tests/test_canmatrix.py::test_signal_encode_invalid_named_value",
"src/canmatrix/tests/test_canmatrix.py::test_signal_min_unspecified_respects_calc_for_min_none_false",
"src/canmatrix/tests/test_canmatrix.py::test_signal_min_unspecified_respects_calc_for_min_none_true",
"src/canmatrix/tests/test_canmatrix.py::test_signal_min_specified_respects_calc_for_min_none_false",
"src/canmatrix/tests/test_canmatrix.py::test_signal_min_specified_respects_calc_for_min_none_true",
"src/canmatrix/tests/test_canmatrix.py::test_signal_max_unspecified_respects_calc_for_max_none_false",
"src/canmatrix/tests/test_canmatrix.py::test_signal_max_unspecified_respects_calc_for_max_none_true",
"src/canmatrix/tests/test_canmatrix.py::test_signal_max_specified_respects_calc_for_max_none_false",
"src/canmatrix/tests/test_canmatrix.py::test_signal_max_specified_respects_calc_for_max_none_true",
"src/canmatrix/tests/test_canmatrix.py::test_signal_range_type_int",
"src/canmatrix/tests/test_canmatrix.py::test_signal_range_type_float",
"src/canmatrix/tests/test_canmatrix.py::test_signal_multiplexer_value_in_range",
"src/canmatrix/tests/test_canmatrix.py::test_signalgroup_empty",
"src/canmatrix/tests/test_canmatrix.py::test_signalgroup_can_add",
"src/canmatrix/tests/test_canmatrix.py::test_signalgroup_can_remove",
"src/canmatrix/tests/test_canmatrix.py::test_signalgroup_no_duplicates",
"src/canmatrix/tests/test_canmatrix.py::test_signalgroup_is_iterable",
"src/canmatrix/tests/test_canmatrix.py::test_signalgroup_find_something",
"src/canmatrix/tests/test_canmatrix.py::test_signalgroup_find_nothing",
"src/canmatrix/tests/test_canmatrix.py::test_signalgroup_delete_nothing",
"src/canmatrix/tests/test_canmatrix.py::test_encode_decode_frame",
"src/canmatrix/tests/test_canmatrix.py::test_frame_has_comment",
"src/canmatrix/tests/test_canmatrix.py::test_frame_compute_dlc",
"src/canmatrix/tests/test_canmatrix.py::test_frame_fit_dlc",
"src/canmatrix/tests/test_canmatrix.py::test_frame_find_unused_bits",
"src/canmatrix/tests/test_canmatrix.py::test_frame_create_dummy_signals_covers_all_bits",
"src/canmatrix/tests/test_canmatrix.py::test_frame_update_receivers",
"src/canmatrix/tests/test_canmatrix.py::test_frame_to_str",
"src/canmatrix/tests/test_canmatrix.py::test_frame_is_multiplexed",
"src/canmatrix/tests/test_canmatrix.py::test_get_multiplexer",
"src/canmatrix/tests/test_canmatrix.py::test_get_multiplexer_values",
"src/canmatrix/tests/test_canmatrix.py::test_frame_not_multiplexed",
"src/canmatrix/tests/test_canmatrix.py::test_frame_j1939_id_from_components[0-0-0-0]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_j1939_id_from_components[3-0-11-201326603]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_j1939_id_from_components[3-65482-253-218090237]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_j1939_id_from_components[3-0-3-201326595]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_j1939_id_from_components[3-61442-3-217055747]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_j1939_id_from_components[6-65098-3-419318275]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_decode_j1939_id[0-0-0-0]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_decode_j1939_id[1-0-1-67109121]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_decode_j1939_id[2-0-2-134218242]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_decode_j1939_id[3-43520-34-212511266]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_decode_j1939_id[3-0-11-201326603]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_decode_j1939_id[3-61184-253-216999933]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_decode_j1939_id[3-65482-253-218090237]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_decode_j1939_id[3-0-3-201326595]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_decode_j1939_id[3-61442-3-217055747]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_decode_j1939_id[6-65098-3-419318275]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_decode_j1939_id[3-256-5-201392901]",
"src/canmatrix/tests/test_canmatrix.py::test_frame_add_transmitter",
"src/canmatrix/tests/test_canmatrix.py::test_frame_add_transmitter_no_duplicities",
"src/canmatrix/tests/test_canmatrix.py::test_frame_delete_transmitter",
"src/canmatrix/tests/test_canmatrix.py::test_frame_delete_wrong_transmitter_doesnt_raise",
"src/canmatrix/tests/test_canmatrix.py::test_frame_find_signal",
"src/canmatrix/tests/test_canmatrix.py::test_frame_find_missing_signal",
"src/canmatrix/tests/test_canmatrix.py::test_frame_glob_signals",
"src/canmatrix/tests/test_canmatrix.py::test_frame_add_attribute",
"src/canmatrix/tests/test_canmatrix.py::test_frame_del_attribute",
"src/canmatrix/tests/test_canmatrix.py::test_frame_del_missing_attribute_doesnt_raise",
"src/canmatrix/tests/test_canmatrix.py::test_frame_is_iterable",
"src/canmatrix/tests/test_canmatrix.py::test_frame_find_mandatory_attribute",
"src/canmatrix/tests/test_canmatrix.py::test_frame_find_optional_attribute",
"src/canmatrix/tests/test_canmatrix.py::test_frame_no_attribute",
"src/canmatrix/tests/test_canmatrix.py::test_frame_no_attribute_with_default",
"src/canmatrix/tests/test_canmatrix.py::test_frame_default_attr_from_db",
"src/canmatrix/tests/test_canmatrix.py::test_frame_add_signal_group",
"src/canmatrix/tests/test_canmatrix.py::test_frame_add_signal_group_wrong_signal",
"src/canmatrix/tests/test_canmatrix.py::test_frame_find_signal_group",
"src/canmatrix/tests/test_canmatrix.py::test_frame_find_wrong_signal_group",
"src/canmatrix/tests/test_canmatrix.py::test_define_set_default",
"src/canmatrix/tests/test_canmatrix.py::test_define_update_enum_definition",
"src/canmatrix/tests/test_canmatrix.py::test_define_update_ingored_non_enum",
"src/canmatrix/tests/test_canmatrix.py::test_define_for_int",
"src/canmatrix/tests/test_canmatrix.py::test_define_for_hex",
"src/canmatrix/tests/test_canmatrix.py::test_define_for_string",
"src/canmatrix/tests/test_canmatrix.py::test_define_for_enum",
"src/canmatrix/tests/test_canmatrix.py::test_define_for_enum_strip_quotes",
"src/canmatrix/tests/test_canmatrix.py::test_define_for_float",
"src/canmatrix/tests/test_canmatrix.py::test_decoded_signal_phys_value",
"src/canmatrix/tests/test_canmatrix.py::test_decoded_signal_named_value",
"src/canmatrix/tests/test_canmatrix.py::test_Arbitration_id",
"src/canmatrix/tests/test_canmatrix.py::test_arbitration_id_is_instance",
"src/canmatrix/tests/test_canmatrix.py::test_arbitration_id_j1939_direct_setters",
"src/canmatrix/tests/test_canmatrix.py::test_canmatrix_add_attribure",
"src/canmatrix/tests/test_canmatrix.py::test_canmatrix_get_frame_by_glob",
"src/canmatrix/tests/test_canmatrix.py::test_canmatrix_get_frame_by_name",
"src/canmatrix/tests/test_canmatrix.py::test_canmatrix_get_frame_by_wrong_name",
"src/canmatrix/tests/test_canmatrix.py::test_canmatrix_get_frame_by_pgn",
"src/canmatrix/tests/test_canmatrix.py::test_canmatrix_get_frame_by_wrong_pgn",
"src/canmatrix/tests/test_canmatrix.py::test_canmatrix_iterate_over_frames",
"src/canmatrix/tests/test_canmatrix.py::test_canmatrix_remove_frame",
"src/canmatrix/tests/test_canmatrix.py::test_canmatrix_rename_ecu_by_name",
"src/canmatrix/tests/test_canmatrix.py::test_canmatrix_rename_ecu_by_wrong_name",
"src/canmatrix/tests/test_canmatrix.py::test_canmatrix_rename_ecu_by_instance",
"src/canmatrix/tests/test_canmatrix.py::test_canmatrix_del_ecu_by_glob",
"src/canmatrix/tests/test_canmatrix.py::test_canmatrix_del_ecu_by_instance",
"src/canmatrix/tests/test_canmatrix.py::test_canmatrix_del_obsolete_ecus",
"src/canmatrix/tests/test_canmatrix.py::test_canmatrix_rename_frame_by_name",
"src/canmatrix/tests/test_canmatrix.py::test_canmatrix_rename_frame_by_instance",
"src/canmatrix/tests/test_canmatrix.py::test_canmatrix_del_frame_by_name",
"src/canmatrix/tests/test_canmatrix.py::test_canmatrix_del_frame_by_instance",
"src/canmatrix/tests/test_canmatrix.py::test_effective_cycle_time",
"src/canmatrix/tests/test_canmatrix.py::test_baudrate",
"src/canmatrix/tests/test_j1939_decoder.py::test_j1939_decoder"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-04-23 10:12:16+00:00
|
bsd-2-clause
| 2,069 |
|
ebroecker__canmatrix-479
|
diff --git a/src/canmatrix/canmatrix.py b/src/canmatrix/canmatrix.py
index 9a5dd66..bb21b55 100644
--- a/src/canmatrix/canmatrix.py
+++ b/src/canmatrix/canmatrix.py
@@ -156,6 +156,8 @@ class Signal(object):
mux_value = attr.ib(default=None)
is_float = attr.ib(default=False) # type: bool
+ is_ascii = attr.ib(default=False) # type: bool
+ type_label = attr.ib(default="")
enumeration = attr.ib(default=None) # type: typing.Optional[str]
comments = attr.ib(factory=dict) # type: typing.MutableMapping[int, str]
attributes = attr.ib(factory=dict) # type: typing.MutableMapping[str, typing.Any]
diff --git a/src/canmatrix/formats/sym.py b/src/canmatrix/formats/sym.py
index a67d137..7567864 100644
--- a/src/canmatrix/formats/sym.py
+++ b/src/canmatrix/formats/sym.py
@@ -104,11 +104,25 @@ def create_signal(db, signal): # type: (canmatrix.CanMatrix, canmatrix.Signal)
global enums
global enum_dict
output = ""
- output += "Var=%s " % signal.name
- if not signal.is_signed:
- output += "unsigned "
+ if sys.version_info > (3, 0):
+ quote_name = not signal.name.isidentifier()
else:
- output += "signed "
+ from future.utils import isidentifier
+ quote_name = not isidentifier(signal.name)
+ if quote_name:
+ output += 'Var="%s" ' % signal.name
+ else:
+ output += "Var=%s " % signal.name
+ if signal.type_label:
+ output += signal.type_label + " "
+ else:
+ if signal.is_signed:
+ output += "signed "
+ elif signal.is_float:
+ output += "float "
+ else:
+ output += "unsigned "
+
start_bit = signal.get_startbit()
if not signal.is_little_endian:
# Motorola
@@ -419,29 +433,29 @@ def load(f, **options): # type: (typing.IO, **typing.Any) -> canmatrix.CanMatri
sig_name = temp_array[0]
is_float = False
+ is_ascii = False
if index_offset != 1:
is_signed = True
else:
is_signed = False
- if temp_array[1] == 'unsigned':
+ type_label = temp_array[1]
+
+ if type_label == 'unsigned':
+ pass
+ elif type_label == 'bit':
pass
- elif temp_array[1] == 'bit':
- # TODO: actually support bit instead of interpreting as
- # an unsigned
+ elif type_label == 'raw':
pass
- elif temp_array[1] == 'signed':
+ elif type_label == 'signed':
is_signed = True
- elif temp_array[1] in ['float', 'double']:
+ elif type_label in ['float', 'double']:
is_float = True
- elif temp_array[1] in ['string']:
- # TODO: actually support these variable types instead
- # of skipping
- print('Variable type \'{}\' found and skipped'
- .format(temp_array[1]))
- continue
+ elif type_label in ['char', 'string']:
+ is_ascii = True
+ pass
else:
- raise ValueError('Unknown type \'{}\' found'.format(temp_array[1]))
+ raise ValueError('Unknown type \'{}\' found'.format(type_label))
start_bit = int(temp_array[index_offset + 1].split(',')[0])
signal_length = int(temp_array[index_offset + 1].split(',')[1])
@@ -521,6 +535,7 @@ def load(f, **options): # type: (typing.IO, **typing.Any) -> canmatrix.CanMatri
is_little_endian=intel,
is_signed=is_signed,
is_float=is_float,
+ is_ascii=is_ascii,
factor=factor,
offset=offset,
unit=unit,
@@ -560,6 +575,7 @@ def load(f, **options): # type: (typing.IO, **typing.Any) -> canmatrix.CanMatri
unit=unit,
multiplex=multiplexor,
comment=comment,
+ type_label=type_label,
**extras)
if min_value is not None:
signal.min = float_factory(min_value)
|
ebroecker/canmatrix
|
4ab128cb9cfd01def8dbfc3a7faff82cbf86977d
|
diff --git a/src/canmatrix/tests/test_sym.py b/src/canmatrix/tests/test_sym.py
index d5b5ccd..cb99d69 100644
--- a/src/canmatrix/tests/test_sym.py
+++ b/src/canmatrix/tests/test_sym.py
@@ -2,6 +2,7 @@
import io
import sys
import textwrap
+from itertools import chain
import pytest
@@ -218,3 +219,101 @@ Title="An Example Title"
assert matrix.value_tables == enum_dict, "Enum not parsed correctly : '{}'".format(enum_label)
f_out = io.BytesIO()
canmatrix.formats.sym.dump(matrix, f_out)
+
+
+def test_types_read():
+ f = io.BytesIO('''\
+FormatVersion=5.0 // Do not edit this line!
+Title="Types Test"
+
+{ENUMS}
+enum EnumAnimals(0="Cat", // An enum value for cats
+ 1="Dog", // An enum value for dogs
+ 2="Horse", 3="Monkey",
+ 4="Lion")// An enum with a comment for the final value
+
+{SENDRECEIVE}
+
+[SymbolLengths]
+ID=000h
+DLC=8
+Var="1Bit" unsigned 0,1
+Var="3Bits" unsigned 1,3
+Var="4Bits" unsigned 4,4
+Var="21Bits" unsigned 8,21
+Var="6Bits" unsigned 29,6
+Var="29Bits" unsigned 35,29
+
+[SymbolTypes]
+ID=001h
+DLC=8
+Var=Bit bit 0,1
+Var=Char char 1,8
+Var=String string 16,16
+Var=Signed signed 32,4
+Var=Unsigned unsigned 36,4
+''' # Var=Enum EnumAnimals 40,4
+ '''
+Var=Raw raw 48,16
+
+[SymbolDouble]
+ID=002h
+DLC=8
+Var=Double double 0,64 // Must be 8 Bytes according to PCAN Symbol Editor V5
+
+[SymbolFloat]
+ID=003h
+DLC=4
+Var=Float float 0,32 // Must be 4 Bytes according to PCAN Symbol Editor V5
+'''.encode('utf-8'),
+ )
+ matrix = canmatrix.formats.sym.load(f)
+ # Check no errors loading the matrix
+ assert matrix.load_errors == []
+
+ f_out = io.BytesIO()
+ canmatrix.formats.sym.dump(matrix, f_out)
+ f_out_bytes = f_out.getvalue()
+ f_out_string = f_out_bytes.decode("utf-8")
+
+ # Check that types are preserved when saving back to .SYM format
+ assert "Var=Bit bit" in f_out_string
+ assert "Var=Char char" in f_out_string
+ assert "Var=String string" in f_out_string
+ assert "Var=Signed signed" in f_out_string
+ assert 'Var="21Bits" unsigned' in f_out_string
+ assert 'Var=Float float' in f_out_string
+ assert 'Var=Double double' in f_out_string
+
+ # Read matrix back in to check all symbols/frames preserved
+ f_in = io.BytesIO(f_out_bytes)
+ new_matrix = canmatrix.formats.sym.load(f_in)
+
+ # Check no errors loading the matrix
+ assert new_matrix.load_errors == []
+
+ # Check that both matrices have the same Frames
+ frames = [f.name for f in matrix.frames]
+ new_frames = [f.name for f in new_matrix.frames]
+ assert sorted(frames) == sorted(new_frames)
+
+ # Check that both matrices have the same signals, and that all the expected signals are present
+ signals = chain(*[[s.name for s in frame.signals] for frame in matrix.frames])
+ new_signals = chain(*[[s.name for s in frame.signals] for frame in new_matrix.frames])
+ assert sorted(signals) == sorted(new_signals) == sorted([
+ "1Bit",
+ "3Bits",
+ "4Bits",
+ "21Bits",
+ "6Bits",
+ "29Bits",
+ "Bit",
+ "Char",
+ "String",
+ "Signed",
+ "Unsigned",
+ "Raw",
+ "Double",
+ "Float", ])
+
+
|
Cannot parse "Char", "String" or "Raw" signal types in .Sym files
The PCAN Symbol editor allows several types to be set for signals. These are:
- Bit
- Char
- String
- Signed
- Unsigned
- Float
- Enum
- Double
- Raw
Currently Canmatrix will accept some of these but not all.
The ones which do not work yet are:
- Char
- String
- Enum
- Raw
I think that the first thing to do is to make it interpret all of these except Enum as unsigned. So that it doesn't throw any signals away.
I think it would also be useful to add a property to indicate what type the symbol file specified the signal to be, so that a symbol file can be written back with the same types as it was read with.
(Adding Enums is a bit more complex, so ill open a separate issue for those.)
|
0.0
|
4ab128cb9cfd01def8dbfc3a7faff82cbf86977d
|
[
"src/canmatrix/tests/test_sym.py::test_types_read"
] |
[
"src/canmatrix/tests/test_sym.py::test_colliding_mux_values",
"src/canmatrix/tests/test_sym.py::test_parse_longname_with_colon",
"src/canmatrix/tests/test_sym.py::test_export_default_decimal_places[False-37-37]",
"src/canmatrix/tests/test_sym.py::test_export_default_decimal_places[True-37.1-37.1]",
"src/canmatrix/tests/test_sym.py::tests_parse_float[float-32]",
"src/canmatrix/tests/test_sym.py::tests_parse_float[double-64]",
"src/canmatrix/tests/test_sym.py::test_unterminated_enum",
"src/canmatrix/tests/test_sym.py::test_title_read_and_write",
"src/canmatrix/tests/test_sym.py::test_enums_read[enum"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-04-28 17:14:49+00:00
|
bsd-2-clause
| 2,070 |
|
ebroecker__canmatrix-481
|
diff --git a/src/canmatrix/formats/sym.py b/src/canmatrix/formats/sym.py
index 7567864..b9557d5 100644
--- a/src/canmatrix/formats/sym.py
+++ b/src/canmatrix/formats/sym.py
@@ -38,8 +38,6 @@ import canmatrix
import canmatrix.utils
logger = logging.getLogger(__name__)
-enum_dict = {} # type: typing.Dict[str, str]
-enums = "{ENUMS}\n"
def default_float_factory(value): # type: (typing.Any) -> decimal.Decimal
@@ -101,8 +99,6 @@ def format_float(f): # type: (typing.Any) -> str
def create_signal(db, signal): # type: (canmatrix.CanMatrix, canmatrix.Signal) -> str
- global enums
- global enum_dict
output = ""
if sys.version_info > (3, 0):
quote_name = not signal.name.isidentifier()
@@ -159,12 +155,7 @@ def create_signal(db, signal): # type: (canmatrix.CanMatrix, canmatrix.Signal)
val_tab_name = signal.name
output += "/e:%s " % val_tab_name
- if val_tab_name not in enum_dict:
- enum_dict[val_tab_name] = "enum " + val_tab_name + "(" + ', '.join(
- '%s="%s"' %
- (key, val) for (
- key, val) in sorted(
- signal.values.items())) + ")"
+
default = signal.initial_value # type: ignore
min_ok = signal.min is None or default >= signal.min
@@ -182,17 +173,31 @@ def create_signal(db, signal): # type: (canmatrix.CanMatrix, canmatrix.Signal)
output += "\n"
return output
+def create_enum_from_signal_values(signal):
+ enum_dict = {}
+ if len(signal.values) > 0:
+ val_tab_name = signal.enumeration
+ if val_tab_name is None:
+ val_tab_name = signal.name
+
+ if val_tab_name not in enum_dict:
+ enum_dict[val_tab_name] = "enum " + val_tab_name + "(" + ', '.join(
+ '%s="%s"' %
+ (key, val) for (
+ key, val) in sorted(
+ signal.values.items())) + ")"
+ return enum_dict
def dump(db, f, **options): # type: (canmatrix.CanMatrix, typing.IO, **typing.Any) -> None
"""
export canmatrix-object as .sym file (compatible to PEAK-Systems)
"""
- global enum_dict
- global enums
sym_encoding = options.get('symExportEncoding', 'iso-8859-1')
ignore_encoding_errors = options.get("ignoreExportEncodingErrors", "")
enum_dict = {}
+ for enum_name, enum_values in db.value_tables.items():
+ enum_dict[enum_name] = "enum {}({})".format(enum_name, ', '.join('{}="{}"'.format(*items) for items in sorted(enum_values.items())))
enums = "{ENUMS}\n"
header = """\
@@ -308,6 +313,7 @@ Title=\"{}\"
output += "CycleTime=" + str(frame.effective_cycle_time) + "\n"
for signal in frame.signals:
output += create_signal(db, signal)
+ enum_dict.update(create_enum_from_signal_values(signal))
output += "\n"
enums += '\n'.join(sorted(enum_dict.values()))
# write output file
|
ebroecker/canmatrix
|
ef4d76ebed8d1fc49279bd027372856bdaa7c4ae
|
diff --git a/src/canmatrix/tests/test_sym.py b/src/canmatrix/tests/test_sym.py
index cb99d69..ff89974 100644
--- a/src/canmatrix/tests/test_sym.py
+++ b/src/canmatrix/tests/test_sym.py
@@ -212,13 +212,49 @@ Title="An Example Title"
{{ENUMS}}
{}
'''.format(enum_str).encode('utf-8'),
- )
+ )
matrix = canmatrix.formats.sym.load(f)
assert matrix.load_errors == [], "Failed to load canmatrix, when testing enum case : '{}'".format(enum_label)
assert matrix.value_tables == enum_dict, "Enum not parsed correctly : '{}'".format(enum_label)
- f_out = io.BytesIO()
- canmatrix.formats.sym.dump(matrix, f_out)
+
+
+def test_enums_export():
+ f = io.BytesIO('''\
+FormatVersion=5.0 // Do not edit this line!
+Title="An Example Title"
+
+{ENUMS}
+enum Animal(0="Dog",1="Cat",2="Fox")
+
+{SENDRECEIVE}
+
+[Frame1]
+ID=000h
+DLC=8
+Var=Signal1 unsigned 0,16
+'''.encode('utf-8'),
+ )
+
+ matrix = canmatrix.formats.sym.load(f)
+ assert matrix.load_errors == [], "Failed to load canmatrix"
+
+ # Add an enum to Signal1
+ matrix.frame_by_name("Frame1").signal_by_name("Signal1").enumeration = "Plants"
+ matrix.frame_by_name("Frame1").signal_by_name("Signal1").values = {0: "Grass", 1: "Flower", 2: "Tree"}
+
+ # Export and reimport
+ f_out = io.BytesIO()
+ canmatrix.formats.sym.dump(matrix, f_out)
+ f_in = io.BytesIO(f_out.getvalue())
+ new_matrix = canmatrix.formats.sym.load(f_in)
+
+ # Check that Enums from Enums table exported and reimported correctly
+ assert new_matrix.value_tables["Animal"] == {0: "Dog", 1: "Cat", 2: "Fox"}
+
+ # Check that Enums from a Signal.Values property exported and reimported correctly
+ assert new_matrix.value_tables["Plants"] == {0: "Grass", 1: "Flower", 2: "Tree"}
+
def test_types_read():
|
Value Tables/Enums not exported when using sym.dump()
The sym dump method doesn't seem to actually save the enumerations into the output file if they aren't used by any signals within the matrix.
I'm a bit confused by how the code is structured as it seems to expect the enums to all be saved in a gloabal variable called enums_dict, but it never actually populates it.. I cant quite see how this is intended to work.
@ebroecker Do you know what the use of global variables here is for?
Modifying this test shows that it doesn't work:
```
@pytest.mark.parametrize(
'enum_str, enum_dict, enum_label',
(
('enum Animal(0="Dog", 1="Cat", 2="Fox")', {"Animal": {0: "Dog", 1: "Cat", 2: "Fox"}}, "Simple enum"),
('''\
enum Animal(0="Dog", //A Comment
1="Cat",
2="Fox")''',
{"Animal": {0: "Dog", 1: "Cat", 2: "Fox"}}, "Multiline enum"),
('enum Animal(0="Dog",1="Cat",2="Fox")', {"Animal": {0: "Dog", 1: "Cat", 2: "Fox"}}, "No Space in Separator"),
)
)
def test_enums_read(enum_str, enum_dict, enum_label):
f = io.BytesIO('''\
FormatVersion=5.0 // Do not edit this line!
Title="An Example Title"
{{ENUMS}}
{}
'''.format(enum_str).encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
assert matrix.load_errors == [], "Failed to load canmatrix, when testing enum case : '{}'".format(enum_label)
assert matrix.value_tables == enum_dict, "Enum not parsed correctly : '{}'".format(enum_label)
f_out = io.BytesIO()
canmatrix.formats.sym.dump(matrix, f_out)
f_in = io.BytesIO(f_out.getvalue())
exported_sym_string = f_out.getvalue().decode('utf-8')
new_matrix = canmatrix.formats.sym.load(f_in)
assert matrix.value_tables == new_matrix.value_tables, "Enums not exported and reimported correctly"
```
(The last 4 lines have been added)
The fix seems like it could be simple enough to just build up the text from the value_table, but without knowing what the current code is trying to do I feel like there may be a better fix, or some refactoring required.
|
0.0
|
ef4d76ebed8d1fc49279bd027372856bdaa7c4ae
|
[
"src/canmatrix/tests/test_sym.py::test_enums_export"
] |
[
"src/canmatrix/tests/test_sym.py::test_colliding_mux_values",
"src/canmatrix/tests/test_sym.py::test_parse_longname_with_colon",
"src/canmatrix/tests/test_sym.py::test_export_default_decimal_places[False-37-37]",
"src/canmatrix/tests/test_sym.py::test_export_default_decimal_places[True-37.1-37.1]",
"src/canmatrix/tests/test_sym.py::tests_parse_float[float-32]",
"src/canmatrix/tests/test_sym.py::tests_parse_float[double-64]",
"src/canmatrix/tests/test_sym.py::test_unterminated_enum",
"src/canmatrix/tests/test_sym.py::test_title_read_and_write",
"src/canmatrix/tests/test_sym.py::test_enums_read[enum",
"src/canmatrix/tests/test_sym.py::test_types_read"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-05-11 13:49:30+00:00
|
bsd-2-clause
| 2,071 |
|
ecies__py-87
|
diff --git a/README.md b/README.md
index 76b13b0..592e497 100644
--- a/README.md
+++ b/README.md
@@ -253,6 +253,10 @@ b'helloworld'
## Release Notes
+### 0.3.0
+
+- API change: use `HKDF` to derive shared keys instead of `sha256`
+
### 0.2.0
- API change: `ecies.encrypt` and `ecies.decrypt` now can take both hex str and raw bytes
diff --git a/ecies/__init__.py b/ecies/__init__.py
index 4323f0d..4f50bca 100644
--- a/ecies/__init__.py
+++ b/ecies/__init__.py
@@ -1,7 +1,7 @@
from typing import Union
from coincurve import PrivateKey, PublicKey
-from ecies.utils import generate_key, hex2prv, hex2pub, derive, aes_encrypt, aes_decrypt
+from ecies.utils import generate_key, hex2prv, hex2pub, encapsulate, decapsulate, aes_encrypt, aes_decrypt
__all__ = ["encrypt", "decrypt"]
@@ -23,7 +23,6 @@ def encrypt(receiver_pk: Union[str, bytes], msg: bytes) -> bytes:
Encrypted data
"""
ephemeral_key = generate_key()
-
if isinstance(receiver_pk, str):
receiver_pubkey = hex2pub(receiver_pk)
elif isinstance(receiver_pk, bytes):
@@ -31,7 +30,7 @@ def encrypt(receiver_pk: Union[str, bytes], msg: bytes) -> bytes:
else:
raise TypeError("Invalid public key type")
- aes_key = derive(ephemeral_key, receiver_pubkey)
+ aes_key = encapsulate(ephemeral_key, receiver_pubkey)
cipher_text = aes_encrypt(aes_key, msg)
return ephemeral_key.public_key.format(False) + cipher_text
@@ -52,7 +51,6 @@ def decrypt(receiver_sk: Union[str, bytes], msg: bytes) -> bytes:
bytes
Plain text
"""
-
if isinstance(receiver_sk, str):
private_key = hex2prv(receiver_sk)
elif isinstance(receiver_sk, bytes):
@@ -62,7 +60,7 @@ def decrypt(receiver_sk: Union[str, bytes], msg: bytes) -> bytes:
pubkey = msg[0:65] # uncompressed pubkey's length is 65 bytes
encrypted = msg[65:]
- sender_public_key = PublicKey(pubkey)
+ ephemeral_public_key = PublicKey(pubkey)
- aes_key = derive(private_key, sender_public_key)
+ aes_key = decapsulate(ephemeral_public_key, private_key)
return aes_decrypt(aes_key, encrypted)
diff --git a/ecies/__version__.py b/ecies/__version__.py
index 54e3abf..90f3874 100644
--- a/ecies/__version__.py
+++ b/ecies/__version__.py
@@ -1,5 +1,5 @@
__title__ = "eciespy"
-__version__ = "0.2.0"
+__version__ = "0.3.0"
__description__ = "Elliptic Curve Integrated Encryption Scheme for secp256k1 in Python"
__url__ = "https://github.com/ecies/py"
__author__ = "Weiliang Li"
diff --git a/ecies/utils.py b/ecies/utils.py
index 414c5d7..2cae0d3 100644
--- a/ecies/utils.py
+++ b/ecies/utils.py
@@ -2,11 +2,14 @@ import hashlib
import codecs
from Cryptodome.Cipher import AES
+from Cryptodome.Protocol.KDF import HKDF
+from Cryptodome.Hash import SHA256
from coincurve import PrivateKey, PublicKey
from coincurve.utils import get_valid_secret
from eth_keys import keys
AES_CIPHER_MODE = AES.MODE_GCM
+AES_KEY_BYTES_LEN = 32
__all__ = [
"sha256",
@@ -14,7 +17,6 @@ __all__ = [
"generate_eth_key",
"hex2prv",
"hex2pub",
- "derive",
"aes_encrypt",
"aes_decrypt",
]
@@ -136,32 +138,18 @@ def hex2prv(prv_hex: str) -> PrivateKey:
return PrivateKey(decode_hex(prv_hex))
-def derive(private_key: PrivateKey, peer_public_key: PublicKey) -> bytes:
- """
- Key exchange between private key and peer's public key,
- `derive(k1, k2.public_key)` should be equal to `derive(k2, k1.public_key)`.
+def encapsulate(private_key: PrivateKey, peer_public_key: PublicKey) -> bytes:
+ shared_point = peer_public_key.multiply(private_key.secret)
+ master = private_key.public_key.format(compressed=False) + shared_point.format(compressed=False)
+ derived = HKDF(master, AES_KEY_BYTES_LEN, b'', SHA256)
+ return derived
- Parameters
- ----------
- private_key: coincurve.PrivateKey
- A secp256k1 private key
- peer_public_key: coincurve.PublicKey
- Peer's public key
- Returns
- -------
- bytes
- A secret key used for symmetric encryption
-
- >>> from coincurve import PrivateKey
- >>> ke1 = generate_eth_key()
- >>> ke2 = generate_eth_key()
- >>> k1 = hex2prv(ke1.to_hex())
- >>> k2 = hex2prv(ke2.to_hex())
- >>> derive(k1, k2.public_key) == derive(k2, k1.public_key)
- True
- """
- return private_key.ecdh(peer_public_key.format())
+def decapsulate(public_key: PublicKey, peer_private_key: PrivateKey) -> bytes:
+ shared_point = public_key.multiply(peer_private_key.secret)
+ master = public_key.format(compressed=False) + shared_point.format(compressed=False)
+ derived = HKDF(master, AES_KEY_BYTES_LEN, b'', SHA256)
+ return derived
def aes_encrypt(key: bytes, plain_text: bytes) -> bytes:
|
ecies/py
|
630163c33a41ac1ba08a9bcdcb6a43a6ec5d994b
|
diff --git a/ecies/tests/test_crypt.py b/ecies/tests/test_crypt.py
index 7143031..74eee6b 100644
--- a/ecies/tests/test_crypt.py
+++ b/ecies/tests/test_crypt.py
@@ -1,8 +1,12 @@
import os
import unittest
+from coincurve import PrivateKey
+from Cryptodome.Protocol.KDF import HKDF
+from Cryptodome.Hash import SHA256
+
from ecies import encrypt, decrypt
-from ecies.utils import sha256, generate_eth_key, generate_key, aes_encrypt, aes_decrypt
+from ecies.utils import sha256, encapsulate, decapsulate, generate_eth_key, generate_key, aes_encrypt, aes_decrypt
class TestCrypt(unittest.TestCase):
@@ -13,6 +17,27 @@ class TestCrypt(unittest.TestCase):
def test_hash(self):
self.assertEqual(sha256(b"0" * 16).hex()[:8], "fcdb4b42")
+ def test_hdkf(self):
+ derived = HKDF(b'secret', 32, b'', SHA256).hex()
+ self.assertEqual(
+ derived,
+ "2f34e5ff91ec85d53ca9b543683174d0cf550b60d5f52b24c97b386cfcf6cbbf"
+ )
+
+ k1 = PrivateKey(secret=bytes([2]))
+ self.assertEqual(k1.to_int(), 2)
+
+ k2 = PrivateKey(secret=bytes([3]))
+ self.assertEqual(k2.to_int(), 3)
+
+ self.assertEqual(
+ encapsulate(k1, k2.public_key), decapsulate(k1.public_key, k2)
+ )
+ self.assertEqual(
+ encapsulate(k1, k2.public_key).hex(),
+ "6f982d63e8590c9d9b5b4c1959ff80315d772edd8f60287c9361d548d5200f82"
+ )
+
def test_elliptic(self):
data = self.test_string
k = generate_eth_key()
|
Using proper KDF
Currently KDF is just SHA256, but there are a bunch of standards which defines what KDF **should be**. There is an old one ANSI-X9.63: http://www.secg.org/sec1-v2.pdf (clause 3.6.1).
But there is also newer ISO 18033-2 document that defines new KDF: https://www.shoup.net/iso/std6.pdf (clause 6.2). (Drafts contain discussion why not just Hash (https://www.shoup.net/papers/iso-2_1.pdf))
And pay an attention: Both documents define generic KDF for every algorithm described in the documents.
I guess we should replace current KDF with ISO 18033-2 variant.
|
0.0
|
630163c33a41ac1ba08a9bcdcb6a43a6ec5d994b
|
[
"ecies/tests/test_crypt.py::TestCrypt::test_aes",
"ecies/tests/test_crypt.py::TestCrypt::test_elliptic",
"ecies/tests/test_crypt.py::TestCrypt::test_hash",
"ecies/tests/test_crypt.py::TestCrypt::test_hdkf"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-08-14 22:24:37+00:00
|
mit
| 2,072 |
|
edgi-govdata-archiving__wayback-139
|
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 7d0559d..a7aaa14 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -2,7 +2,7 @@
# etc.) but not necessarily required for _using_ it. Some dev tooling requires
# newer Python versions than the package itself (>=3.8)
wheel
-check-wheel-contents ~=0.5.0
+check-wheel-contents ~=0.6.0
flake8 ~=6.1.0
twine
-r requirements-test.txt
diff --git a/wayback/_client.py b/wayback/_client.py
index 3383f53..d2e4a97 100644
--- a/wayback/_client.py
+++ b/wayback/_client.py
@@ -347,7 +347,7 @@ class WaybackSession(_utils.DisableAfterCloseSession, requests.Session):
user_agent : str, optional
A custom user-agent string to use in all requests. Defaults to:
`wayback/{version} (+https://github.com/edgi-govdata-archiving/wayback)`
- search_calls_per_second : int or float, default: 1.5
+ search_calls_per_second : int or float, default: 1
The maximum number of calls made to the search API per second.
To disable the rate limit, set this to 0.
memento_calls_per_second : int or float, default: 30
@@ -366,7 +366,7 @@ class WaybackSession(_utils.DisableAfterCloseSession, requests.Session):
handleable_errors = (ConnectionError,) + retryable_errors
def __init__(self, retries=6, backoff=2, timeout=60, user_agent=None,
- search_calls_per_second=1.5, memento_calls_per_second=30):
+ search_calls_per_second=1, memento_calls_per_second=30):
super().__init__()
self.retries = retries
self.backoff = backoff
@@ -400,11 +400,16 @@ def send(self, *args, **kwargs):
retries = 0
while True:
try:
+ logger.debug('sending HTTP request %s "%s", %s', args[0].method, args[0].url, kwargs)
result = super().send(*args, **kwargs)
if retries >= maximum or not self.should_retry(result):
if result.status_code == 429:
raise RateLimitError(result)
return result
+ else:
+ # TODO: parse and use Retry-After header if present.
+ # TODO: add additional delay for 429 responses.
+ logger.debug('Received error response (status: %s), will retry', result.status_code)
except WaybackSession.handleable_errors as error:
response = getattr(error, 'response', None)
if response:
@@ -412,13 +417,16 @@ def send(self, *args, **kwargs):
if retries >= maximum:
raise WaybackRetryError(retries, total_time, error) from error
- elif not self.should_retry_error(error):
+ elif self.should_retry_error(error):
+ logger.warn('Caught exception during request, will retry: %s', error)
+ else:
raise
# The first retry has no delay.
if retries > 0:
seconds = self.backoff * 2 ** (retries - 1)
total_time += seconds
+ logger.debug('Will retry after sleeping for %s seconds...', seconds)
time.sleep(seconds)
retries += 1
diff --git a/wayback/_utils.py b/wayback/_utils.py
index 00b1a4f..1df4be2 100644
--- a/wayback/_utils.py
+++ b/wayback/_utils.py
@@ -204,7 +204,7 @@ def set_memento_url_mode(url, mode):
@contextmanager
-def rate_limited(calls_per_second=2, group='default'):
+def rate_limited(calls_per_second=1, group='default'):
"""
A context manager that restricts entries to its body to occur only N times
per second (N can be a float). The current thread will be put to sleep in
@@ -226,7 +226,9 @@ def rate_limited(calls_per_second=2, group='default'):
minimum_wait = 1.0 / calls_per_second
current_time = time.time()
if current_time - last_call < minimum_wait:
- time.sleep(minimum_wait - (current_time - last_call))
+ seconds = minimum_wait - (current_time - last_call)
+ logger.debug('Hit %s rate limit, sleeping for %s seconds', group, seconds)
+ time.sleep(seconds)
_last_call_by_group[group] = time.time()
yield
|
edgi-govdata-archiving/wayback
|
03b83883cd7aca2dbef83d31782d880c22aabb50
|
diff --git a/wayback/tests/test_client.py b/wayback/tests/test_client.py
index cc5548b..cd8e676 100644
--- a/wayback/tests/test_client.py
+++ b/wayback/tests/test_client.py
@@ -792,7 +792,7 @@ def test_search_rate_limits(self):
next(client.search('zew.de'))
duration_with_limits_custom = time.time() - start_time
- assert 1.3 <= duration_with_limits <= 1.4
+ assert 1.9 <= duration_with_limits <= 2.1
assert 0.0 <= duration_without_limits <= 0.05
assert 0.0 <= duration_with_limits_custom <= 1.05
|
More logging
I think it would be useful to see more debug logging when requests are being made and when sleeping happens as the result of errors. When I was trying to diagnose #137 I had to sprinkle some `logger.warn` statements in.
|
0.0
|
03b83883cd7aca2dbef83d31782d880c22aabb50
|
[
"wayback/tests/test_client.py::TestWaybackSession::test_search_rate_limits"
] |
[
"wayback/tests/test_client.py::test_search",
"wayback/tests/test_client.py::test_search_with_date",
"wayback/tests/test_client.py::test_search_with_timezone",
"wayback/tests/test_client.py::test_search_multipage",
"wayback/tests/test_client.py::test_search_cannot_iterate_after_session_closing",
"wayback/tests/test_client.py::test_search_does_not_repeat_results",
"wayback/tests/test_client.py::test_search_raises_for_blocked_urls",
"wayback/tests/test_client.py::test_search_with_filter",
"wayback/tests/test_client.py::test_search_with_filter_list",
"wayback/tests/test_client.py::test_search_with_filter_tuple",
"wayback/tests/test_client.py::test_search_removes_malformed_entries",
"wayback/tests/test_client.py::test_search_handles_no_length_cdx_records",
"wayback/tests/test_client.py::test_search_handles_bad_timestamp_cdx_records",
"wayback/tests/test_client.py::test_get_memento",
"wayback/tests/test_client.py::test_get_memento_with_date_datetime",
"wayback/tests/test_client.py::test_get_memento_with_string_datetime",
"wayback/tests/test_client.py::test_get_memento_with_inexact_string_datetime",
"wayback/tests/test_client.py::test_get_memento_handles_non_utc_datetime",
"wayback/tests/test_client.py::test_get_memento_with_invalid_datetime_type",
"wayback/tests/test_client.py::test_get_memento_with_requires_datetime_with_regular_url",
"wayback/tests/test_client.py::test_get_memento_with_archive_url",
"wayback/tests/test_client.py::test_get_memento_with_cdx_record",
"wayback/tests/test_client.py::test_get_memento_with_mode",
"wayback/tests/test_client.py::test_get_memento_with_mode_string",
"wayback/tests/test_client.py::test_get_memento_with_mode_boolean_is_not_allowed",
"wayback/tests/test_client.py::test_get_memento_target_window",
"wayback/tests/test_client.py::test_get_memento_raises_when_memento_is_outside_target_window",
"wayback/tests/test_client.py::test_get_memento_with_redirects",
"wayback/tests/test_client.py::test_get_memento_with_path_based_redirects",
"wayback/tests/test_client.py::test_get_memento_with_schemeless_redirects",
"wayback/tests/test_client.py::test_get_memento_raises_for_mementos_that_redirect_in_a_loop",
"wayback/tests/test_client.py::test_get_memento_with_redirect_in_view_mode",
"wayback/tests/test_client.py::test_get_memento_should_fail_for_non_playbackable_mementos",
"wayback/tests/test_client.py::test_get_memento_raises_blocked_error",
"wayback/tests/test_client.py::test_get_memento_raises_no_memento_error",
"wayback/tests/test_client.py::test_get_memento_follows_historical_redirects",
"wayback/tests/test_client.py::test_get_memento_follow_redirects_does_not_follow_historical_redirects",
"wayback/tests/test_client.py::test_get_memento_returns_memento_with_accurate_url",
"wayback/tests/test_client.py::TestWaybackSession::test_request_retries",
"wayback/tests/test_client.py::TestWaybackSession::test_stops_after_given_retries",
"wayback/tests/test_client.py::TestWaybackSession::test_only_retries_some_errors",
"wayback/tests/test_client.py::TestWaybackSession::test_raises_rate_limit_error",
"wayback/tests/test_client.py::TestWaybackSession::test_rate_limit_error_includes_retry_after",
"wayback/tests/test_client.py::TestWaybackSession::test_timeout_applied_session",
"wayback/tests/test_client.py::TestWaybackSession::test_timeout_applied_request",
"wayback/tests/test_client.py::TestWaybackSession::test_timeout_empty",
"wayback/tests/test_client.py::TestWaybackSession::test_memento_rate_limits"
] |
{
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-11-01 20:30:34+00:00
|
bsd-3-clause
| 2,073 |
|
edgi-govdata-archiving__wayback-140
|
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 7d0559d..a7aaa14 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -2,7 +2,7 @@
# etc.) but not necessarily required for _using_ it. Some dev tooling requires
# newer Python versions than the package itself (>=3.8)
wheel
-check-wheel-contents ~=0.5.0
+check-wheel-contents ~=0.6.0
flake8 ~=6.1.0
twine
-r requirements-test.txt
diff --git a/wayback/_client.py b/wayback/_client.py
index 3383f53..f26acf0 100644
--- a/wayback/_client.py
+++ b/wayback/_client.py
@@ -347,7 +347,7 @@ class WaybackSession(_utils.DisableAfterCloseSession, requests.Session):
user_agent : str, optional
A custom user-agent string to use in all requests. Defaults to:
`wayback/{version} (+https://github.com/edgi-govdata-archiving/wayback)`
- search_calls_per_second : int or float, default: 1.5
+ search_calls_per_second : int or float, default: 1
The maximum number of calls made to the search API per second.
To disable the rate limit, set this to 0.
memento_calls_per_second : int or float, default: 30
@@ -366,7 +366,7 @@ class WaybackSession(_utils.DisableAfterCloseSession, requests.Session):
handleable_errors = (ConnectionError,) + retryable_errors
def __init__(self, retries=6, backoff=2, timeout=60, user_agent=None,
- search_calls_per_second=1.5, memento_calls_per_second=30):
+ search_calls_per_second=1, memento_calls_per_second=30):
super().__init__()
self.retries = retries
self.backoff = backoff
diff --git a/wayback/_utils.py b/wayback/_utils.py
index 00b1a4f..c99bc75 100644
--- a/wayback/_utils.py
+++ b/wayback/_utils.py
@@ -204,7 +204,7 @@ def set_memento_url_mode(url, mode):
@contextmanager
-def rate_limited(calls_per_second=2, group='default'):
+def rate_limited(calls_per_second=1, group='default'):
"""
A context manager that restricts entries to its body to occur only N times
per second (N can be a float). The current thread will be put to sleep in
|
edgi-govdata-archiving/wayback
|
03b83883cd7aca2dbef83d31782d880c22aabb50
|
diff --git a/wayback/tests/test_client.py b/wayback/tests/test_client.py
index cc5548b..cd8e676 100644
--- a/wayback/tests/test_client.py
+++ b/wayback/tests/test_client.py
@@ -792,7 +792,7 @@ def test_search_rate_limits(self):
next(client.search('zew.de'))
duration_with_limits_custom = time.time() - start_time
- assert 1.3 <= duration_with_limits <= 1.4
+ assert 1.9 <= duration_with_limits <= 2.1
assert 0.0 <= duration_without_limits <= 0.05
assert 0.0 <= duration_with_limits_custom <= 1.05
|
search_calls_per_second needs to be dialed down
I was running some fairly simple data retrieval in [this Notebook](https://github.com/edsu/foiaonline/blob/main/Notebook.ipynb)(see the Wayback section) and I discovered that I got completely blocked from accessing web.archive.org! Luckily I remembered that there was Internet Archive's #wayback-researchers Slack channel, where I got this reponse.
> Hi edsu - I found your /cdx requests from 4:29UTC. Those requests are limited to an average of 60/min. Over that and we start sending 429s. If 429s are ignored for more than a minute we block the IP at the firewall (no connection) for 1 hour, which is what happened to you. Subsequent 429s over a given period will double that time each occurrence. If you can keeping your api request < 60/minute you will prevent this from happening.
I thought that the openwayback module's defaults would have prevented me from going over the 60 requests per minute (one per second) and I thought wayback's support for handling 429 responses would have backed off sufficiently fast. I suspect that the goal posts on the server side have changed recently because I had code that worked a month or so ago, which stopped working (resulting in the block).
I was able to get around this by using a custom WaybackSession where I set the `search_calls_per_second` to `0.5`, but I suspect `1.0` would probably work better. Maybe the default could me moved down from 1.5 to 1.0?
Also, perhaps there needs to be some logic to make sure to wait a minute when encountering a 429 as well?
|
0.0
|
03b83883cd7aca2dbef83d31782d880c22aabb50
|
[
"wayback/tests/test_client.py::TestWaybackSession::test_search_rate_limits"
] |
[
"wayback/tests/test_client.py::test_search",
"wayback/tests/test_client.py::test_search_with_date",
"wayback/tests/test_client.py::test_search_with_timezone",
"wayback/tests/test_client.py::test_search_multipage",
"wayback/tests/test_client.py::test_search_cannot_iterate_after_session_closing",
"wayback/tests/test_client.py::test_search_does_not_repeat_results",
"wayback/tests/test_client.py::test_search_raises_for_blocked_urls",
"wayback/tests/test_client.py::test_search_with_filter",
"wayback/tests/test_client.py::test_search_with_filter_list",
"wayback/tests/test_client.py::test_search_with_filter_tuple",
"wayback/tests/test_client.py::test_search_removes_malformed_entries",
"wayback/tests/test_client.py::test_search_handles_no_length_cdx_records",
"wayback/tests/test_client.py::test_search_handles_bad_timestamp_cdx_records",
"wayback/tests/test_client.py::test_get_memento",
"wayback/tests/test_client.py::test_get_memento_with_date_datetime",
"wayback/tests/test_client.py::test_get_memento_with_string_datetime",
"wayback/tests/test_client.py::test_get_memento_with_inexact_string_datetime",
"wayback/tests/test_client.py::test_get_memento_handles_non_utc_datetime",
"wayback/tests/test_client.py::test_get_memento_with_invalid_datetime_type",
"wayback/tests/test_client.py::test_get_memento_with_requires_datetime_with_regular_url",
"wayback/tests/test_client.py::test_get_memento_with_archive_url",
"wayback/tests/test_client.py::test_get_memento_with_cdx_record",
"wayback/tests/test_client.py::test_get_memento_with_mode",
"wayback/tests/test_client.py::test_get_memento_with_mode_string",
"wayback/tests/test_client.py::test_get_memento_with_mode_boolean_is_not_allowed",
"wayback/tests/test_client.py::test_get_memento_target_window",
"wayback/tests/test_client.py::test_get_memento_raises_when_memento_is_outside_target_window",
"wayback/tests/test_client.py::test_get_memento_with_redirects",
"wayback/tests/test_client.py::test_get_memento_with_path_based_redirects",
"wayback/tests/test_client.py::test_get_memento_with_schemeless_redirects",
"wayback/tests/test_client.py::test_get_memento_raises_for_mementos_that_redirect_in_a_loop",
"wayback/tests/test_client.py::test_get_memento_with_redirect_in_view_mode",
"wayback/tests/test_client.py::test_get_memento_should_fail_for_non_playbackable_mementos",
"wayback/tests/test_client.py::test_get_memento_raises_blocked_error",
"wayback/tests/test_client.py::test_get_memento_raises_no_memento_error",
"wayback/tests/test_client.py::test_get_memento_follows_historical_redirects",
"wayback/tests/test_client.py::test_get_memento_follow_redirects_does_not_follow_historical_redirects",
"wayback/tests/test_client.py::test_get_memento_returns_memento_with_accurate_url",
"wayback/tests/test_client.py::TestWaybackSession::test_request_retries",
"wayback/tests/test_client.py::TestWaybackSession::test_stops_after_given_retries",
"wayback/tests/test_client.py::TestWaybackSession::test_only_retries_some_errors",
"wayback/tests/test_client.py::TestWaybackSession::test_raises_rate_limit_error",
"wayback/tests/test_client.py::TestWaybackSession::test_rate_limit_error_includes_retry_after",
"wayback/tests/test_client.py::TestWaybackSession::test_timeout_applied_session",
"wayback/tests/test_client.py::TestWaybackSession::test_timeout_applied_request",
"wayback/tests/test_client.py::TestWaybackSession::test_timeout_empty",
"wayback/tests/test_client.py::TestWaybackSession::test_memento_rate_limits"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-11-01 20:45:14+00:00
|
bsd-3-clause
| 2,074 |
|
edgi-govdata-archiving__wayback-159
|
diff --git a/src/wayback/_client.py b/src/wayback/_client.py
index 0593e8f..4d1a3cf 100644
--- a/src/wayback/_client.py
+++ b/src/wayback/_client.py
@@ -455,7 +455,11 @@ def send(self, request: requests.PreparedRequest, **kwargs):
response = super().send(request, **kwargs)
retry_delay = self.get_retry_delay(retries, response)
- if retries >= maximum or not self.should_retry(response):
+ if is_memento_response(response):
+ # Mementos are necessarily successful responses, so just
+ # return them without any other checks.
+ return response
+ elif retries >= maximum or not self.should_retry(response):
if response.status_code == 429:
read_and_close(response)
raise RateLimitError(response, retry_delay)
@@ -498,10 +502,6 @@ def request(self, method, url, **kwargs):
return super().request(method, url, **kwargs)
def should_retry(self, response):
- # A memento may actually be a capture of an error, so don't retry it :P
- if is_memento_response(response):
- return False
-
return response.status_code in self.retryable_statuses
def should_retry_error(self, error):
|
edgi-govdata-archiving/wayback
|
477c44439abdebaf4d9942c7cb2aa55c3099b846
|
diff --git a/src/wayback/tests/cassettes/test_get_memento_works_on_archived_rate_limit_responses.yaml b/src/wayback/tests/cassettes/test_get_memento_works_on_archived_rate_limit_responses.yaml
new file mode 100644
index 0000000..dab5f46
--- /dev/null
+++ b/src/wayback/tests/cassettes/test_get_memento_works_on_archived_rate_limit_responses.yaml
@@ -0,0 +1,113 @@
+interactions:
+- request:
+ body: null
+ headers:
+ Accept-Encoding:
+ - gzip, deflate
+ User-Agent:
+ - wayback/0.4.5.dev10+gb7a16cd.d20231218 (+https://github.com/edgi-govdata-archiving/wayback)
+ method: GET
+ uri: https://web.archive.org/web/20150129034904id_/http://www.reddit.com/r/PokemonGiveaway
+ response:
+ body:
+ string: "\n<!doctype html>\n<html>\n <head>\n <title>Too Many Requests</title>\n
+ \ <style>\n body {\n font: small verdana, arial, helvetica,
+ sans-serif;\n width: 600px;\n margin: 0 auto;\n }\n\n
+ \ h1 {\n height: 40px;\n background: transparent url(//www.redditstatic.com/reddit.com.header.png)
+ no-repeat scroll top right;\n }\n </style>\n </head>\n <body>\n
+ \ <h1>whoa there, pardner!</h1>\n \n\n\n<p>we're sorry, but you appear
+ to be a bot and we've seen too many requests\nfrom you lately. we enforce
+ a hard speed limit on requests that appear to come\nfrom bots to prevent abuse.</p>\n\n<p>if
+ you are not a bot but are spoofing one via your browser's user agent\nstring:
+ please change your user agent string to avoid seeing this message\nagain.</p>\n\n<p>please
+ wait 6 second(s) and try again.</p>\n\n <p>as a reminder to developers,
+ we recommend that clients make no\n more than <a href=\"http://github.com/reddit/reddit/wiki/API\">one\n
+ \ request every two seconds</a> to avoid seeing this message.</p>\n </body>\n</html>\n"
+ headers:
+ Connection:
+ - keep-alive
+ Content-Type:
+ - text/html; charset=UTF-8
+ Date:
+ - Thu, 01 Feb 2024 18:20:31 GMT
+ Permissions-Policy:
+ - interest-cohort=()
+ Referrer-Policy:
+ - no-referrer-when-downgrade
+ Server:
+ - nginx
+ Transfer-Encoding:
+ - chunked
+ X-NA:
+ - '0'
+ X-NID:
+ - '-'
+ X-Page-Cache:
+ - MISS
+ X-RL:
+ - '1'
+ X-location:
+ - All
+ cache-control:
+ - max-age=1800
+ content-security-policy:
+ - 'default-src ''self'' ''unsafe-eval'' ''unsafe-inline'' data: blob: archive.org
+ web.archive.org web-static.archive.org wayback-api.archive.org analytics.archive.org
+ pragma.archivelab.org'
+ link:
+ - <http://www.reddit.com/r/PokemonGiveaway>; rel="original", <https://web.archive.org/web/timemap/link/http://www.reddit.com/r/PokemonGiveaway>;
+ rel="timemap"; type="application/link-format", <https://web.archive.org/web/http://www.reddit.com/r/PokemonGiveaway>;
+ rel="timegate", <https://web.archive.org/web/20120626000027/http://www.reddit.com:80/r/Pokemongiveaway>;
+ rel="first memento"; datetime="Tue, 26 Jun 2012 00:00:27 GMT", <https://web.archive.org/web/20141209120144/http://www.reddit.com:80/r/Pokemongiveaway/>;
+ rel="prev memento"; datetime="Tue, 09 Dec 2014 12:01:44 GMT", <https://web.archive.org/web/20150129034904/http://www.reddit.com/r/PokemonGiveaway>;
+ rel="memento"; datetime="Thu, 29 Jan 2015 03:49:04 GMT", <https://web.archive.org/web/20150208032710/http://www.reddit.com:80/r/Pokemongiveaway>;
+ rel="next memento"; datetime="Sun, 08 Feb 2015 03:27:10 GMT", <https://web.archive.org/web/20231020104350/https://www.reddit.com/r/Pokemongiveaway/>;
+ rel="last memento"; datetime="Fri, 20 Oct 2023 10:43:50 GMT"
+ memento-datetime:
+ - Thu, 29 Jan 2015 03:49:04 GMT
+ server-timing:
+ - exclusion.robots;dur=1.346979, exclusion.robots.policy;dur=1.258865, cdx.remote;dur=0.566878,
+ esindex;dur=0.070942, LoadShardBlock;dur=668.835646, PetaboxLoader3.datanode;dur=362.949615,
+ PetaboxLoader3.resolve;dur=109.386489, load_resource;dur=78.884440
+ x-app-server:
+ - wwwb-app220
+ x-archive-orig-cache-control:
+ - no-cache
+ x-archive-orig-cf-cache-status:
+ - EXPIRED
+ x-archive-orig-cf-ray:
+ - 1b02752d98b0012c-SJC
+ x-archive-orig-connection:
+ - close
+ x-archive-orig-content-length:
+ - '-1'
+ x-archive-orig-date:
+ - Thu, 29 Jan 2015 03:49:04 GMT
+ x-archive-orig-edge-control:
+ - bypass-cache
+ x-archive-orig-retry-after:
+ - '6'
+ x-archive-orig-server:
+ - cloudflare-nginx
+ x-archive-orig-vary:
+ - accept-encoding
+ x-archive-orig-x-content-type-options:
+ - nosniff
+ x-archive-orig-x-frame-options:
+ - SAMEORIGIN
+ x-archive-orig-x-moose:
+ - majestic
+ x-archive-orig-x-ua-compatible:
+ - IE=edge
+ x-archive-orig-x-xss-protection:
+ - 1; mode=block
+ x-archive-src:
+ - liveweb-20150129011011/live-20150129000440-wwwb-app16.us.archive.org.warc.gz
+ x-tr:
+ - '1820'
+ x-ts:
+ - '429'
+ status:
+ code: 429
+ message: Too Many Requests
+version: 1
diff --git a/src/wayback/tests/test_client.py b/src/wayback/tests/test_client.py
index f236a76..0a30f82 100644
--- a/src/wayback/tests/test_client.py
+++ b/src/wayback/tests/test_client.py
@@ -609,6 +609,16 @@ def test_get_memento_raises_no_memento_error():
'20170929002712')
+@ia_vcr.use_cassette()
+def test_get_memento_works_on_archived_rate_limit_responses():
+ with WaybackClient() as client:
+ memento = client.get_memento('http://www.reddit.com/r/PokemonGiveaway',
+ timestamp=datetime(2015, 1, 29, 3, 49, 4),
+ exact=True)
+ assert 'http://www.reddit.com/r/PokemonGiveaway' == memento.url
+ assert 429 == memento.status_code
+
+
@ia_vcr.use_cassette()
def test_get_memento_follows_historical_redirects():
with WaybackClient() as client:
|
WaybackSession not properly handling archived 429 (rate limit) responses
As per the memento spec and as noted in the comments in _client.py (line ~987), 4XX status responses should also be returned by the memento server exactly as they were archived. WaybackClient has code to work around this, but Wayback session currently assumes that all 429s are real and throws an exception instead of returning the response.
Here's an example archived link where reddit returned a 429 to the archive bot: https://web.archive.org/web/20150129034904/http://www.reddit.com/r/PokemonGiveaway
This happens in _client.py around line ~458 because it has the memento-datetime header the executation is gated through the if statements. The second only checks if the response code is 429 without seeing if it has memento-datetime, and then throws an exception instead of returning the response.
*untested*: I suspect there should be an outer if statement that checks for the memento-datetime, and if present always return the response as these are recorded errors and the client/calling code can handle if needed. In the else, the rest can be handled? I can't currently reproduce a real 429 from archive, so I can't validate whether this would fully solve the issue.
|
0.0
|
477c44439abdebaf4d9942c7cb2aa55c3099b846
|
[
"src/wayback/tests/test_client.py::test_get_memento_works_on_archived_rate_limit_responses"
] |
[
"src/wayback/tests/test_client.py::test_search",
"src/wayback/tests/test_client.py::test_search_with_date",
"src/wayback/tests/test_client.py::test_search_with_timezone",
"src/wayback/tests/test_client.py::test_search_multipage",
"src/wayback/tests/test_client.py::test_search_cannot_iterate_after_session_closing",
"src/wayback/tests/test_client.py::test_search_does_not_repeat_results",
"src/wayback/tests/test_client.py::test_search_raises_for_blocked_urls",
"src/wayback/tests/test_client.py::test_search_with_filter",
"src/wayback/tests/test_client.py::test_search_with_filter_list",
"src/wayback/tests/test_client.py::test_search_with_filter_tuple",
"src/wayback/tests/test_client.py::test_search_removes_malformed_entries",
"src/wayback/tests/test_client.py::test_search_handles_no_length_cdx_records",
"src/wayback/tests/test_client.py::test_search_handles_bad_timestamp_cdx_records",
"src/wayback/tests/test_client.py::test_get_memento",
"src/wayback/tests/test_client.py::test_get_memento_with_date_datetime",
"src/wayback/tests/test_client.py::test_get_memento_with_string_datetime",
"src/wayback/tests/test_client.py::test_get_memento_with_inexact_string_datetime",
"src/wayback/tests/test_client.py::test_get_memento_handles_non_utc_datetime",
"src/wayback/tests/test_client.py::test_get_memento_with_invalid_datetime_type",
"src/wayback/tests/test_client.py::test_get_memento_with_requires_datetime_with_regular_url",
"src/wayback/tests/test_client.py::test_get_memento_with_archive_url",
"src/wayback/tests/test_client.py::test_get_memento_with_cdx_record",
"src/wayback/tests/test_client.py::test_get_memento_with_mode",
"src/wayback/tests/test_client.py::test_get_memento_with_mode_string",
"src/wayback/tests/test_client.py::test_get_memento_with_mode_boolean_is_not_allowed",
"src/wayback/tests/test_client.py::test_get_memento_target_window",
"src/wayback/tests/test_client.py::test_get_memento_raises_when_memento_is_outside_target_window",
"src/wayback/tests/test_client.py::test_get_memento_with_redirects",
"src/wayback/tests/test_client.py::test_get_memento_with_path_based_redirects",
"src/wayback/tests/test_client.py::test_get_memento_with_schemeless_redirects",
"src/wayback/tests/test_client.py::test_get_memento_raises_for_mementos_that_redirect_in_a_loop",
"src/wayback/tests/test_client.py::test_get_memento_with_redirect_in_view_mode",
"src/wayback/tests/test_client.py::test_get_memento_should_fail_for_non_playbackable_mementos",
"src/wayback/tests/test_client.py::test_get_memento_raises_blocked_error",
"src/wayback/tests/test_client.py::test_get_memento_raises_no_memento_error",
"src/wayback/tests/test_client.py::test_get_memento_follows_historical_redirects",
"src/wayback/tests/test_client.py::test_get_memento_follow_redirects_does_not_follow_historical_redirects",
"src/wayback/tests/test_client.py::test_get_memento_returns_memento_with_accurate_url",
"src/wayback/tests/test_client.py::TestWaybackSession::test_request_retries",
"src/wayback/tests/test_client.py::TestWaybackSession::test_stops_after_given_retries",
"src/wayback/tests/test_client.py::TestWaybackSession::test_only_retries_some_errors",
"src/wayback/tests/test_client.py::TestWaybackSession::test_raises_rate_limit_error",
"src/wayback/tests/test_client.py::TestWaybackSession::test_rate_limit_error_includes_retry_after",
"src/wayback/tests/test_client.py::TestWaybackSession::test_timeout_applied_session",
"src/wayback/tests/test_client.py::TestWaybackSession::test_timeout_applied_request",
"src/wayback/tests/test_client.py::TestWaybackSession::test_timeout_empty",
"src/wayback/tests/test_client.py::TestWaybackSession::test_search_rate_limits",
"src/wayback/tests/test_client.py::TestWaybackSession::test_memento_rate_limits"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-02-01 18:36:00+00:00
|
bsd-3-clause
| 2,075 |
|
edkrueger__python-tree-2
|
diff --git a/resc/tree.py b/resc/tree.py
index a280c8e..647f16a 100644
--- a/resc/tree.py
+++ b/resc/tree.py
@@ -13,7 +13,8 @@ class Tree:
self.node_id = node_id
self.children = children
self.steps_from_root = None
- self.steps_from_leaf = None
+ self.min_steps_from_leaf = None
+ self.max_steps_from_leaf = None
@classmethod
def from_dict(cls, tree_dict):
@@ -72,29 +73,42 @@ class Tree:
for child in node.children:
queue.append((child, steps + 1))
- def _find_steps_from_leaf(self):
+ def _find_steps_from_leaf(self, min_max):
"""Finds and sets steps_from_leaf for each node in a non-circular tree.
- steps_from_leaf is the minimum number of steps form a leaf."""
+ steps_from_leaf is the minimum / maximum number of steps form a leaf."""
if self.children == []:
- self.steps_from_leaf = 0
+
+ if min_max == "min":
+ self.min_steps_from_leaf = 0
+ if min_max == "max":
+ self.max_steps_from_leaf = 0
else:
children_steps = []
for child in self.children:
- child._find_steps_from_leaf() # pylint: disable=protected-access
- children_steps.append(child.steps_from_leaf)
- self.steps_from_leaf = min(children_steps) + 1
+ # pylint: disable=protected-access
+ child._find_steps_from_leaf(min_max=min_max)
+
+ if min_max == "min":
+ children_steps.append(child.min_steps_from_leaf)
+ if min_max == "max":
+ children_steps.append(child.max_steps_from_leaf)
+
+ if min_max == "min":
+ self.min_steps_from_leaf = min(children_steps) + 1
+ if min_max == "max":
+ self.max_steps_from_leaf = max(children_steps) + 1
- def find_steps_from_leaf(self):
- """Finds and sets steps_from_leaf for each node.
- steps_from_leaf is the minimum number of steps form a leaf.
+ def find_steps_from_leaf(self, min_max):
+ """Finds and sets the min/max steps_from_leaf for each node.
+ The result is set in the min_steps_from_leaf / max_steps_from_leaf attr.
Only works on non-circular trees."""
if self.is_circular():
raise ValueError("Cannot call compute_steps_from_root on a circular Tree.")
- self._find_steps_from_leaf()
+ self._find_steps_from_leaf(min_max=min_max)
|
edkrueger/python-tree
|
b8bddb1f03218e59ea8027c76e6900017d281382
|
diff --git a/tests/test_tree.py b/tests/test_tree.py
index 88a5f50..e4ee167 100644
--- a/tests/test_tree.py
+++ b/tests/test_tree.py
@@ -62,14 +62,14 @@ def test_find_steps_from_root():
circular_tree.find_steps_from_root()
-def test_find_steps_from_leaf():
+def test_find_min_steps_from_leaf():
"""Tests if Tree.find_steps_from_root."""
tree = Tree.from_dict({"a": {"b": {"d": {}}, "c": {"e": {"f": {}}}}})
- tree.find_steps_from_leaf()
+ tree.find_steps_from_leaf(min_max="min")
steps_lookup = {}
- tree.visit_all(lambda e: steps_lookup.update({e.node_id: e.steps_from_leaf}))
+ tree.visit_all(lambda e: steps_lookup.update({e.node_id: e.min_steps_from_leaf}))
assert steps_lookup["a"] == 2
assert steps_lookup["b"] == 1
assert steps_lookup["c"] == 2
@@ -80,4 +80,25 @@ def test_find_steps_from_leaf():
circular_tree = Tree.from_dict({"a": {"b": {}, "c": {"a": {}}}})
with pytest.raises(ValueError):
- circular_tree.find_steps_from_leaf()
+ circular_tree.find_steps_from_leaf(min_max="min")
+
+
+def test_find_max_steps_from_leaf():
+ """Tests if Tree.find_steps_from_root."""
+
+ tree = Tree.from_dict({"a": {"b": {"d": {}}, "c": {"e": {"f": {}}}}})
+ tree.find_steps_from_leaf(min_max="max")
+
+ steps_lookup = {}
+ tree.visit_all(lambda e: steps_lookup.update({e.node_id: e.max_steps_from_leaf}))
+ assert steps_lookup["a"] == 3
+ assert steps_lookup["b"] == 1
+ assert steps_lookup["c"] == 2
+ assert steps_lookup["d"] == 0
+ assert steps_lookup["e"] == 1
+ assert steps_lookup["f"] == 0
+
+ circular_tree = Tree.from_dict({"a": {"b": {}, "c": {"a": {}}}})
+
+ with pytest.raises(ValueError):
+ circular_tree.find_steps_from_leaf(min_max="max")
|
change find_steps_from_leaf into find_min_steps_from_leaf, find_max_steps_from_leaf
|
0.0
|
b8bddb1f03218e59ea8027c76e6900017d281382
|
[
"tests/test_tree.py::test_find_min_steps_from_leaf",
"tests/test_tree.py::test_find_max_steps_from_leaf"
] |
[
"tests/test_tree.py::test_tree_from_constructor",
"tests/test_tree.py::test_tree_from_dict",
"tests/test_tree.py::test_is_circular",
"tests/test_tree.py::test_find_steps_from_root"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-02-02 16:36:49+00:00
|
mit
| 2,076 |
|
eerimoq__bincopy-41
|
diff --git a/bincopy.py b/bincopy.py
index c26b486..fbe3ba5 100755
--- a/bincopy.py
+++ b/bincopy.py
@@ -361,22 +361,39 @@ class Segment:
def address(self):
return self.minimum_address // self.word_size_bytes
- def chunks(self, size=32, alignment=1):
- """Return chunks of the data aligned as given by `alignment`. `size`
- must be a multiple of `alignment`. Each chunk is itself a Segment.
- Both `size` and `alignment` are in words.
+ def chunks(self, size=32, alignment=1, padding=b''):
+ """Yield data chunks of `size` words, aligned as given by `alignment`.
+
+ Each chunk is itself a Segment.
+
+ `size` and `alignment` are in words. `size` must be a multiple of
+ `alignment`. If set, `padding` must be a word value.
+
+ If `padding` is set, the first and final chunks are padded so that:
+ 1. The first chunk is aligned even if the segment itself is not.
+ 2. The final chunk's size is a multiple of `alignment`.
"""
if (size % alignment) != 0:
raise Error(f'size {size} is not a multiple of alignment {alignment}')
+ if padding and len(padding) != self.word_size_bytes:
+ raise Error(f'padding must be a word value (size {self.word_size_bytes}),'
+ f' got {padding}')
+
size *= self.word_size_bytes
alignment *= self.word_size_bytes
address = self.minimum_address
data = self.data
- # First chunk may be shorter than `size` due to alignment.
+ # Apply padding to first and final chunk, if padding is non-empty.
+ align_offset = address % alignment
+ address -= align_offset * bool(padding)
+ data = align_offset // self.word_size_bytes * padding + data
+ data += (alignment - len(data)) % alignment // self.word_size_bytes * padding
+
+ # First chunk may be non-aligned and shorter than `size` if padding is empty.
chunk_offset = (address % alignment)
if chunk_offset != 0:
@@ -632,21 +649,51 @@ class Segments:
self._list = new_list
- def chunks(self, size=32, alignment=1):
- """Iterate over all segments and return chunks of the data aligned as
- given by `alignment`. `size` must be a multiple of
- `alignment`. Each chunk is in turn a smaller Segment. Both `size` and
- `alignment` are in words.
+ def chunks(self, size=32, alignment=1, padding=b''):
+ """Iterate over all segments and yield chunks of the data.
+
+ The chunks are `size` words long, aligned as given by `alignment`.
+
+ Each chunk is itself a Segment.
+
+ `size` and `alignment` are in words. `size` must be a multiple of
+ `alignment`. If set, `padding` must be a word value.
+
+ If `padding` is set, the first and final chunks of each segment are
+ padded so that:
+ 1. The first chunk is aligned even if the segment itself is not.
+ 2. The final chunk's size is a multiple of `alignment`.
"""
if (size % alignment) != 0:
raise Error(f'size {size} is not a multiple of alignment {alignment}')
+ if padding and len(padding) != self.word_size_bytes:
+ raise Error(f'padding must be a word value (size {self.word_size_bytes}),'
+ f' got {padding}')
+
+ previous = Segment(-1, -1, b'', 1)
+
for segment in self:
- for chunk in segment.chunks(size, alignment):
+ for chunk in segment.chunks(size, alignment, padding):
+ # When chunks are padded to alignment, the final chunk of the previous
+ # segment and the first chunk of the current segment may overlap by
+ # one alignment block. To avoid overwriting data from the lower
+ # segment, the chunks must be merged.
+ if chunk.address < previous.address + len(previous):
+ low = previous.data[-alignment * self.word_size_bytes:]
+ high = chunk.data[:alignment * self.word_size_bytes]
+ merged = int.to_bytes(int.from_bytes(low, 'big') ^
+ int.from_bytes(high, 'big') ^
+ int.from_bytes(alignment * padding, 'big'),
+ alignment * self.word_size_bytes, 'big')
+ chunk.data = merged + chunk.data[alignment * self.word_size_bytes:]
+
yield chunk
+ previous = chunk
+
def __len__(self):
"""Get the number of segments.
|
eerimoq/bincopy
|
e608554fc8f0f828ca9a5547e87916f17f9f52ca
|
diff --git a/tests/test_bincopy.py b/tests/test_bincopy.py
index 3a81584..945086a 100644
--- a/tests/test_bincopy.py
+++ b/tests/test_bincopy.py
@@ -910,6 +910,12 @@ class BinCopyTest(unittest.TestCase):
self.assertEqual(str(cm.exception),
'size 4 is not a multiple of alignment 8')
+ with self.assertRaises(bincopy.Error) as cm:
+ list(binfile.segments.chunks(padding=b'\xff\xff'))
+
+ self.assertEqual(str(cm.exception),
+ r"padding must be a word value (size 1), got b'\xff\xff'")
+
def test_segment(self):
binfile = bincopy.BinFile()
binfile.add_binary(b'\x00\x01\x02\x03\x04', 2)
@@ -1863,6 +1869,44 @@ Data ranges:
first_word = int.from_bytes(binfile[:binfile.minimum_address + 1], 'little')
self.assertEqual(0xC9E4, first_word)
+ def test_chunk_padding(self):
+ records = (':02000004000AF0\n'
+ ':10B8440000000000000000009630000007770000B0\n')
+ hexfile = bincopy.BinFile()
+ hexfile.add_ihex(records)
+ align = 8
+ size = 16
+ chunks = hexfile.segments.chunks(size=size, alignment=align, padding=b'\xff')
+ chunks = list(chunks)
+ assert not any(c.address % align for c in chunks)
+ assert not any(len(c) % align for c in chunks)
+
+ def test_merge_chunks(self):
+ records = (':0A0000001010101010101010101056\n'
+ ':0A000E001010101010101010101048\n')
+ hexfile = bincopy.BinFile()
+ hexfile.add_ihex(records)
+ align = 8
+ size = 16
+ chunks = hexfile.segments.chunks(size=size, alignment=align, padding=b'\xff')
+ chunks = list(chunks)
+ assert list(chunks[-1]) == [8, b'\x10\x10\xff\xff\xff\xff\x10\x10\x10\x10\x10'
+ b'\x10\x10\x10\x10\x10']
+
+ def test_merge_chunks_16(self):
+ records = (':1000000010101010101010101010101010101010F0\n'
+ ':10000A0010101010101010101010101010101010E6\n')
+ hexfile = bincopy.BinFile(word_size_bits=16)
+ hexfile.add_ihex(records)
+ align = 6
+ size = 12
+ chunks = hexfile.segments.chunks(size=size, alignment=align,
+ padding=b'\xff\xff')
+ chunks = list(chunks)
+ assert list(chunks[-1]) == [6, b'\x10\x10\x10\x10\xff\xff\xff\xff\x10\x10\x10'
+ b'\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10'
+ b'\x10\x10']
+
if __name__ == '__main__':
unittest.main()
|
First chunk is misaligned if segment has non-aligned start address
```python
records = """
:02000004000AF0
:10B8440000000000000000009630000007770000B0
"""
hexfile = bincopy.BinFile()
hexfile.add_ihex(records)
align = 8
chunks = hexfile.segments.chunks(size=16, alignment=align)
assert not any(c.address % align for c in chunks)
```
IMO, the first chunk should be padded such that it is also aligned.
|
0.0
|
e608554fc8f0f828ca9a5547e87916f17f9f52ca
|
[
"tests/test_bincopy.py::BinCopyTest::test_chunk_padding",
"tests/test_bincopy.py::BinCopyTest::test_chunks_bad_arguments",
"tests/test_bincopy.py::BinCopyTest::test_merge_chunks",
"tests/test_bincopy.py::BinCopyTest::test_merge_chunks_16"
] |
[
"tests/test_bincopy.py::BinCopyTest::test_add",
"tests/test_bincopy.py::BinCopyTest::test_add_elf",
"tests/test_bincopy.py::BinCopyTest::test_add_elf_blinky",
"tests/test_bincopy.py::BinCopyTest::test_add_elf_gcc",
"tests/test_bincopy.py::BinCopyTest::test_add_elf_iar",
"tests/test_bincopy.py::BinCopyTest::test_add_elf_keil",
"tests/test_bincopy.py::BinCopyTest::test_add_file",
"tests/test_bincopy.py::BinCopyTest::test_add_files",
"tests/test_bincopy.py::BinCopyTest::test_add_ihex_bad_record_type_6",
"tests/test_bincopy.py::BinCopyTest::test_add_ihex_record_type_3",
"tests/test_bincopy.py::BinCopyTest::test_add_ihex_record_type_5",
"tests/test_bincopy.py::BinCopyTest::test_add_microchip_hex_record",
"tests/test_bincopy.py::BinCopyTest::test_array",
"tests/test_bincopy.py::BinCopyTest::test_as_ihex_bad_address_length_bits",
"tests/test_bincopy.py::BinCopyTest::test_as_srec_bad_address_length",
"tests/test_bincopy.py::BinCopyTest::test_as_srec_record_5",
"tests/test_bincopy.py::BinCopyTest::test_as_srec_record_6",
"tests/test_bincopy.py::BinCopyTest::test_as_srec_record_8",
"tests/test_bincopy.py::BinCopyTest::test_bad_ihex",
"tests/test_bincopy.py::BinCopyTest::test_bad_srec",
"tests/test_bincopy.py::BinCopyTest::test_bad_ti_txt",
"tests/test_bincopy.py::BinCopyTest::test_bad_word_size",
"tests/test_bincopy.py::BinCopyTest::test_binary",
"tests/test_bincopy.py::BinCopyTest::test_binary_16",
"tests/test_bincopy.py::BinCopyTest::test_chunks_list",
"tests/test_bincopy.py::BinCopyTest::test_command_line_convert_elf",
"tests/test_bincopy.py::BinCopyTest::test_command_line_convert_input_formats",
"tests/test_bincopy.py::BinCopyTest::test_command_line_convert_output_format_binary",
"tests/test_bincopy.py::BinCopyTest::test_command_line_convert_output_format_binary_bad_addresses",
"tests/test_bincopy.py::BinCopyTest::test_command_line_convert_output_formats",
"tests/test_bincopy.py::BinCopyTest::test_command_line_convert_output_formats_bad_parameters",
"tests/test_bincopy.py::BinCopyTest::test_command_line_convert_overlapping",
"tests/test_bincopy.py::BinCopyTest::test_command_line_convert_overwrite",
"tests/test_bincopy.py::BinCopyTest::test_command_line_dump_commands_one_file",
"tests/test_bincopy.py::BinCopyTest::test_command_line_fill",
"tests/test_bincopy.py::BinCopyTest::test_command_line_fill_max_words",
"tests/test_bincopy.py::BinCopyTest::test_command_line_fill_outfile",
"tests/test_bincopy.py::BinCopyTest::test_command_line_fill_stdout",
"tests/test_bincopy.py::BinCopyTest::test_command_line_fill_value",
"tests/test_bincopy.py::BinCopyTest::test_command_line_info_one_file",
"tests/test_bincopy.py::BinCopyTest::test_command_line_info_one_file_16_bits_words",
"tests/test_bincopy.py::BinCopyTest::test_command_line_info_two_files",
"tests/test_bincopy.py::BinCopyTest::test_command_line_info_two_files_with_header_encoding",
"tests/test_bincopy.py::BinCopyTest::test_command_line_non_existing_file",
"tests/test_bincopy.py::BinCopyTest::test_command_line_non_existing_file_debug",
"tests/test_bincopy.py::BinCopyTest::test_command_line_pretty",
"tests/test_bincopy.py::BinCopyTest::test_compare_ti_txt",
"tests/test_bincopy.py::BinCopyTest::test_exclude_crop",
"tests/test_bincopy.py::BinCopyTest::test_exclude_edge_cases",
"tests/test_bincopy.py::BinCopyTest::test_execution_start_address",
"tests/test_bincopy.py::BinCopyTest::test_fill",
"tests/test_bincopy.py::BinCopyTest::test_fill_max_words",
"tests/test_bincopy.py::BinCopyTest::test_fill_word_size_16",
"tests/test_bincopy.py::BinCopyTest::test_header_default_encoding",
"tests/test_bincopy.py::BinCopyTest::test_header_no_encoding",
"tests/test_bincopy.py::BinCopyTest::test_hexdump_1",
"tests/test_bincopy.py::BinCopyTest::test_hexdump_2",
"tests/test_bincopy.py::BinCopyTest::test_hexdump_empty",
"tests/test_bincopy.py::BinCopyTest::test_hexdump_gaps",
"tests/test_bincopy.py::BinCopyTest::test_i16hex",
"tests/test_bincopy.py::BinCopyTest::test_i16hex_address_above_1meg",
"tests/test_bincopy.py::BinCopyTest::test_i32hex",
"tests/test_bincopy.py::BinCopyTest::test_i32hex_address_above_4gig",
"tests/test_bincopy.py::BinCopyTest::test_i8hex",
"tests/test_bincopy.py::BinCopyTest::test_i8hex_address_above_64k",
"tests/test_bincopy.py::BinCopyTest::test_ignore_blank_lines_hex",
"tests/test_bincopy.py::BinCopyTest::test_ignore_blank_lines_srec",
"tests/test_bincopy.py::BinCopyTest::test_ihex",
"tests/test_bincopy.py::BinCopyTest::test_ihex_crc",
"tests/test_bincopy.py::BinCopyTest::test_info",
"tests/test_bincopy.py::BinCopyTest::test_init_files",
"tests/test_bincopy.py::BinCopyTest::test_issue_4_1",
"tests/test_bincopy.py::BinCopyTest::test_issue_4_2",
"tests/test_bincopy.py::BinCopyTest::test_iterate_segments",
"tests/test_bincopy.py::BinCopyTest::test_layout_empty_main",
"tests/test_bincopy.py::BinCopyTest::test_layout_in_exclude_2_4",
"tests/test_bincopy.py::BinCopyTest::test_layout_out",
"tests/test_bincopy.py::BinCopyTest::test_minimum_maximum_length",
"tests/test_bincopy.py::BinCopyTest::test_non_sorted_segments",
"tests/test_bincopy.py::BinCopyTest::test_overwrite",
"tests/test_bincopy.py::BinCopyTest::test_performance",
"tests/test_bincopy.py::BinCopyTest::test_print",
"tests/test_bincopy.py::BinCopyTest::test_segment",
"tests/test_bincopy.py::BinCopyTest::test_segment_len",
"tests/test_bincopy.py::BinCopyTest::test_segment_len_16",
"tests/test_bincopy.py::BinCopyTest::test_segments_list",
"tests/test_bincopy.py::BinCopyTest::test_set_get_item",
"tests/test_bincopy.py::BinCopyTest::test_set_get_item_16",
"tests/test_bincopy.py::BinCopyTest::test_srec",
"tests/test_bincopy.py::BinCopyTest::test_srec_ihex_binary",
"tests/test_bincopy.py::BinCopyTest::test_srec_no_header_encoding",
"tests/test_bincopy.py::BinCopyTest::test_ti_txt",
"tests/test_bincopy.py::BinCopyTest::test_verilog_vmem",
"tests/test_bincopy.py::BinCopyTest::test_word_size",
"tests/test_bincopy.py::BinCopyTest::test_word_size_custom_padding",
"tests/test_bincopy.py::BinCopyTest::test_word_size_default_padding"
] |
{
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-10-07 09:55:00+00:00
|
mit
| 2,077 |
|
eigenein__protobuf-37
|
diff --git a/README.md b/README.md
index 2f30b57..6f72ed6 100644
--- a/README.md
+++ b/README.md
@@ -101,12 +101,12 @@ class Message:
foo: List[int32] = field(1, default_factory=list)
```
+It's also possible to wrap a field type with [`typing.Optional`](https://docs.python.org/3/library/typing.html#typing.Optional). If `None` is assigned to an `Optional` field, then the field will be skipped during serialization.
+
### Default values
In `pure-protobuf` it's developer's responsibility to take care of default values. If encoded message does not contain a particular element, the corresponding field stays unassigned. It means that the standard `default` and `default_factory` parameters of the `field` function work as usual.
-It's allowed to wrap a field type with [`typing.Optional`](https://docs.python.org/3/library/typing.html#typing.Optional). It's discarded by `pure-protobuf` but allows you to hint a nullable field properly.
-
### Enumerations
Subclasses of the standard [`IntEnum`](https://docs.python.org/3/library/enum.html#intenum) class are supported:
diff --git a/pure_protobuf/dataclasses_.py b/pure_protobuf/dataclasses_.py
index b51e930..2ce4e96 100644
--- a/pure_protobuf/dataclasses_.py
+++ b/pure_protobuf/dataclasses_.py
@@ -124,7 +124,7 @@ def make_field(number: int, name: str, type_: Any) -> Tuple[int, Field]:
Figure out how to serialize and de-serialize the field.
Returns the field number and a corresponding ``Field`` instance.
"""
- type_ = remove_optional(type_)
+ is_optional, type_ = get_optional(type_)
is_repeated, type_ = get_repeated(type_)
if isinstance(type_, type) and issubclass(type_, Message):
@@ -144,7 +144,7 @@ def make_field(number: int, name: str, type_: Any) -> Tuple[int, Field]:
if not is_repeated:
# Non-repeated field.
- return number, NonRepeatedField(number, name, serializer)
+ return number, NonRepeatedField(number, name, serializer, is_optional)
elif serializer.wire_type != WireType.BYTES:
# Repeated fields of scalar numeric types are packed by default.
# See also: https://developers.google.com/protocol-buffers/docs/encoding#packed
@@ -154,9 +154,9 @@ def make_field(number: int, name: str, type_: Any) -> Tuple[int, Field]:
return number, UnpackedRepeatedField(number, name, serializer)
-def remove_optional(type_: Any) -> Any:
+def get_optional(type_: Any) -> Tuple[bool, Any]:
"""
- Removes ``Optional`` type annotation if present.
+ Extracts ``Optional`` type annotation if present.
This may be useful if a user wants to annotate a field with ``Optional[...]`` and set default to ``None``.
"""
if getattr(type_, '__origin__', None) is Union:
@@ -166,9 +166,9 @@ def remove_optional(type_: Any) -> Any:
if len(args) == 2 and NoneType in args:
# Extract inner type.
type_, = args - {NoneType}
- return type_
+ return True, type_
- return type_
+ return False, type_
def get_repeated(type_: Any) -> Tuple[bool, Any]:
diff --git a/pure_protobuf/fields.py b/pure_protobuf/fields.py
index a4976b9..0a8b4fb 100644
--- a/pure_protobuf/fields.py
+++ b/pure_protobuf/fields.py
@@ -61,8 +61,13 @@ class NonRepeatedField(Field):
See also: https://developers.google.com/protocol-buffers/docs/encoding#optional
"""
+ def __init__(self, number: int, name: str, serializer: Serializer, is_optional: bool):
+ super().__init__(number, name, serializer)
+ self.is_optional = is_optional
+
def validate(self, value: Any):
- self.serializer.validate(value)
+ if value is not None or not self.is_optional:
+ self.serializer.validate(value)
def dump(self, value: Any, io: IO):
if value is not None:
|
eigenein/protobuf
|
774403668782a332f1a5382268712ba092c8500f
|
diff --git a/tests/test_dataclasses.py b/tests/test_dataclasses.py
index 61af66b..25151d7 100644
--- a/tests/test_dataclasses.py
+++ b/tests/test_dataclasses.py
@@ -4,7 +4,7 @@
# noinspection PyCompatibility
from dataclasses import dataclass
-from typing import Any, List, Tuple
+from typing import Any, List, Optional, Tuple
from pytest import mark, raises
@@ -17,6 +17,7 @@ from pure_protobuf.dataclasses_ import make_field, message
(1, 'a', types.int32, 150, b'\x08\x96\x01'),
(1, 'a', List[types.int32], [1, 150, 2], b'\x0A\x04\x01\x96\x01\x02'),
(1, 'a', List[bytes], [b'\x42', b'\x43'], b'\x0A\x01\x42' b'\x0A\x01\x43'),
+ (1, 'a', Optional[bytes], None, b''),
# TODO: repeated embedded message.
])
def test_make_field(number: int, name: str, type_: Any, value: Any, expected: bytes):
diff --git a/tests/test_fields.py b/tests/test_fields.py
index feadd82..54f852d 100644
--- a/tests/test_fields.py
+++ b/tests/test_fields.py
@@ -4,7 +4,7 @@
from io import BytesIO
from typing import List, Optional
-from pytest import mark
+from pytest import mark, raises
from pure_protobuf.enums import WireType
from pure_protobuf.fields import NonRepeatedField, PackedRepeatedField, UnpackedRepeatedField
@@ -14,8 +14,8 @@ from pure_protobuf.serializers import BytesSerializer, UnsignedVarintSerializer
@mark.parametrize('value, bytes_', [
(b'testing', b'\x0A\x07testing'),
])
-def test_scalar_field(value: bytes, bytes_: bytes):
- field = NonRepeatedField(1, 'a', BytesSerializer())
+def test_non_repeated_field(value: bytes, bytes_: bytes):
+ field = NonRepeatedField(1, 'a', BytesSerializer(), False)
assert field.dumps(value) == bytes_
with BytesIO(bytes_) as io:
assert field.load(WireType(UnsignedVarintSerializer().load(io) & 0b111), io) == value
@@ -25,8 +25,16 @@ def test_scalar_field(value: bytes, bytes_: bytes):
(1, b'\x08\x01'),
(None, b''),
])
-def test_optional_scalar_field(value: Optional[int], expected: bytes):
- assert NonRepeatedField(1, 'a', UnsignedVarintSerializer()).dumps(value) == expected
+def test_optional_non_repeated_field(value: Optional[int], expected: bytes):
+ assert NonRepeatedField(1, 'a', UnsignedVarintSerializer(), True).dumps(value) == expected
+
+
[email protected]('value', [
+ None,
+])
+def test_non_repeated_field_value_error(value: Optional[int]):
+ with raises(ValueError):
+ NonRepeatedField(1, 'a', UnsignedVarintSerializer(), False).validate(value)
@mark.parametrize('value, bytes_', [
|
Skip None values when serialising
With the `dataclasses` interface it's not possible to omit values while serialising a message.
|
0.0
|
774403668782a332f1a5382268712ba092c8500f
|
[
"tests/test_dataclasses.py::test_make_field[1-a-type_3-None-]",
"tests/test_fields.py::test_non_repeated_field[testing-\\n\\x07testing]",
"tests/test_fields.py::test_optional_non_repeated_field[1-\\x08\\x01]",
"tests/test_fields.py::test_optional_non_repeated_field[None-]",
"tests/test_fields.py::test_non_repeated_field_value_error[None]"
] |
[
"tests/test_dataclasses.py::test_make_field[1-a-uint32-150-\\x08\\x96\\x01]",
"tests/test_dataclasses.py::test_make_field[1-a-type_1-value1-\\n\\x04\\x01\\x96\\x01\\x02]",
"tests/test_dataclasses.py::test_make_field[1-a-type_2-value2-\\n\\x01B\\n\\x01C]",
"tests/test_dataclasses.py::test_make_field_value_error[1-a-uint32-None]",
"tests/test_dataclasses.py::test_make_field_key_error[type_0]",
"tests/test_dataclasses.py::test_message_type_error[type_0]",
"tests/test_fields.py::test_packed_repeated_field[value0-\\n\\x00]",
"tests/test_fields.py::test_packed_repeated_field[value1-\\n\\x01\\x03]",
"tests/test_fields.py::test_packed_repeated_field[value2-\\n\\x02\\x04\\x05]",
"tests/test_fields.py::test_unpacked_repeated_field[value0-]",
"tests/test_fields.py::test_unpacked_repeated_field[value1-\\x08\\x03]",
"tests/test_fields.py::test_unpacked_repeated_field[value2-\\x08\\x04\\x08\\x05]"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-03-26 09:30:03+00:00
|
mit
| 2,078 |
|
eigenfoo__littlemcmc-52
|
diff --git a/littlemcmc/sampling.py b/littlemcmc/sampling.py
index 2c13ed8..87d046a 100644
--- a/littlemcmc/sampling.py
+++ b/littlemcmc/sampling.py
@@ -248,8 +248,10 @@ def init_nuts(
on the variance of the tuning samples.
* jitter+adapt_diag : Same as `'adapt_diag'`, but use uniform jitter in [-1, 1]
as starting point in each chain.
- * adapt_full: Sample as `'adapt_diag'`, but adapts a dense mass matrix using the
+ * adapt_full: Same as `'adapt_diag'`, but adapts a dense mass matrix using the
sample covariances.
+ * jitter+adapt_full: Same as `'adapt_full'`, but use uniform jitter in [-1, 1]
+ as starting point in each chain.
**kwargs: keyword arguments
Extra keyword arguments are forwarded to littlemcmc.NUTS.
@@ -290,6 +292,11 @@ def init_nuts(
mean = start
cov = np.eye(size)
potential = QuadPotentialFullAdapt(size, mean, cov, 10)
+ elif init == "jitter+adapt_full":
+ start = 2 * np.random.rand(size) - 1
+ mean = start
+ cov = np.eye(size)
+ potential = QuadPotentialFullAdapt(size, mean, cov, 10)
else:
raise ValueError("Unknown initializer: {}.".format(init))
diff --git a/requirements-dev.txt b/requirements-dev.txt
index abd5ec6..8cae25b 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -1,3 +1,4 @@
+# Development tools
black
jupyter
mypy
@@ -10,7 +11,6 @@ pytest-html
# Packaging requirements
setuptools
twine
-wheel
# Sphinx requirements
numpydoc
diff --git a/requirements.txt b/requirements.txt
index 5fcd45a..e48c91e 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,4 @@
+wheel
joblib>=0.14.1
numpy
scipy>=0.18.1
|
eigenfoo/littlemcmc
|
0c75163e2afe5aff0cdcce17c60c7998d2ace91d
|
diff --git a/tests/test_sampling.py b/tests/test_sampling.py
index 1892c73..d47e49f 100644
--- a/tests/test_sampling.py
+++ b/tests/test_sampling.py
@@ -13,17 +13,19 @@
# limitations under the License.
import numpy as np
+import pytest
import littlemcmc as lmc
from test_utils import logp_dlogp_func
-def test_init_nuts():
- size = 1
- init = "auto"
- random_seed = 42
- foo = lmc.init_nuts(
- logp_dlogp_func=logp_dlogp_func, size=size, init=init, random_seed=random_seed,
- )
[email protected](
+ "method", ["adapt_diag", "jitter+adapt_diag", "adapt_full", "jitter+adapt_full",],
+)
+def test_init_nuts(method):
+ start, step = lmc.init_nuts(logp_dlogp_func=logp_dlogp_func, size=1, init=method)
+ assert isinstance(start, np.ndarray)
+ assert len(start) == 1
+ assert isinstance(step, lmc.NUTS)
def test_hmc_sampling_runs():
|
New commits to pymc3/step_methods/hmc/
> This is an auto-generated issue, triggered by the `even-with-pymc3` workflow.
There have been new commits to `pymc3/step_methods/hmc/` since yesterday: please see
[the latest run of the `even-with-pymc3` GitHub Action](https://github.com/eigenfoo/littlemcmc/actions?query=workflow%3Aeven-with-pymc3)
and consider if these commits are worth mirroring in `littlemcmc`.
|
0.0
|
0c75163e2afe5aff0cdcce17c60c7998d2ace91d
|
[
"tests/test_sampling.py::test_init_nuts[jitter+adapt_full]",
"tests/test_sampling.py::test_hmc_sampling_runs",
"tests/test_sampling.py::test_multiprocess_sampling_runs"
] |
[
"tests/test_sampling.py::test_init_nuts[adapt_diag]",
"tests/test_sampling.py::test_init_nuts[jitter+adapt_diag]",
"tests/test_sampling.py::test_init_nuts[adapt_full]",
"tests/test_sampling.py::test_nuts_sampling_runs"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-04-29 23:37:16+00:00
|
apache-2.0
| 2,079 |
|
eigenfoo__littlemcmc-62
|
diff --git a/docs/_static/notebooks/quickstart.ipynb b/docs/_static/notebooks/quickstart.ipynb
index 3ea0e3d..c171dbc 100644
--- a/docs/_static/notebooks/quickstart.ipynb
+++ b/docs/_static/notebooks/quickstart.ipynb
@@ -72,10 +72,6 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "/home/george/littlemcmc/venv/lib/python3.6/site-packages/ipykernel_launcher.py:2: RuntimeWarning: divide by zero encountered in log\n",
- " \n",
- "/home/george/littlemcmc/venv/lib/python3.6/site-packages/ipykernel_launcher.py:2: RuntimeWarning: divide by zero encountered in log\n",
- " \n",
"/home/george/littlemcmc/venv/lib/python3.6/site-packages/ipykernel_launcher.py:2: RuntimeWarning: divide by zero encountered in log\n",
" \n"
]
@@ -84,7 +80,7 @@
"source": [
"trace, stats = lmc.sample(\n",
" logp_dlogp_func=logp_dlogp_func,\n",
- " size=1,\n",
+ " model_ndim=1,\n",
" draws=1000,\n",
" tune=500,\n",
" progressbar=None, # HTML progress bars don't render well in RST.\n",
@@ -106,8 +102,8 @@
{
"data": {
"text/plain": [
- "array([[ 0.08150886, -0.91618983, -1.20928858, ..., -0.06641805,\n",
- " -1.00700313, 1.09983883]])"
+ "array([[ 0.91989736, 0.89910128, 0.88961585, ..., 1.31528136,\n",
+ " -1.52068354, -0.61179308]])"
]
},
"execution_count": 4,
@@ -147,24 +143,24 @@
{
"data": {
"text/plain": [
- "{'depth': array([2, 2, 1, ..., 1, 2, 2]),\n",
- " 'step_size': array([1.17740792, 1.17740792, 1.17740792, ..., 1.2966478 , 1.2966478 ,\n",
- " 1.2966478 ]),\n",
+ "{'depth': array([2, 2, 1, ..., 2, 2, 1]),\n",
+ " 'step_size': array([1.15894778, 1.15894778, 1.15894778, ..., 1.50735926, 1.50735926,\n",
+ " 1.50735926]),\n",
" 'tune': array([False, False, False, ..., False, False, False]),\n",
- " 'mean_tree_accept': array([0.87820821, 0.46292512, 0.87717804, ..., 1. , 0.86187468,\n",
- " 0.98578587]),\n",
- " 'step_size_bar': array([1.20720148, 1.20720148, 1.20720148, ..., 1.24048036, 1.24048036,\n",
- " 1.24048036]),\n",
- " 'tree_size': array([3., 3., 1., ..., 1., 3., 3.]),\n",
+ " 'mean_tree_accept': array([0.75240493, 1. , 1. , ..., 0.96866415, 0.96184355,\n",
+ " 1. ]),\n",
+ " 'step_size_bar': array([1.24138909, 1.24138909, 1.24138909, ..., 1.38460359, 1.38460359,\n",
+ " 1.38460359]),\n",
+ " 'tree_size': array([3., 3., 1., ..., 3., 3., 1.]),\n",
" 'diverging': array([False, False, False, ..., False, False, False]),\n",
- " 'energy_error': array([ 0. , 0.17517448, 0.13104529, ..., -0.00202233,\n",
- " 0.22495278, 0.04357828]),\n",
- " 'energy': array([1.18523089, 3.17544136, 1.8060492 , ..., 0.9244717 , 1.42648794,\n",
- " 1.55020885]),\n",
- " 'max_energy_error': array([ 0.18993171, 1.45544483, 0.13104529, ..., -0.00202233,\n",
- " 0.22495278, -0.21811027]),\n",
- " 'model_logp': array([-0.92226038, -1.33864044, -1.65012797, ..., -0.92114421,\n",
- " -1.42596619, -1.52376126])}"
+ " 'energy_error': array([ 0.13713966, -0.00798758, -0.00358263, ..., 0.09872431,\n",
+ " 0.1215682 , -0.40449202]),\n",
+ " 'energy': array([1.913862 , 1.5033014 , 1.48600433, ..., 1.82498413, 2.11625971,\n",
+ " 1.70961724]),\n",
+ " 'max_energy_error': array([ 0.45459613, -0.09927514, -0.00358263, ..., -0.25742807,\n",
+ " -0.35807476, -0.40449202]),\n",
+ " 'model_logp': array([-1.34204411, -1.32313009, -1.31464672, ..., -1.78392107,\n",
+ " -2.07517774, -1.10608392])}"
]
},
"execution_count": 6,
@@ -218,7 +214,7 @@
"source": [
"trace, stats = lmc.sample(\n",
" logp_dlogp_func=logp_dlogp_func,\n",
- " size=1,\n",
+ " model_ndim=1,\n",
" draws=1000,\n",
" tune=500,\n",
" progressbar=None,\n",
@@ -232,10 +228,10 @@
"metadata": {},
"outputs": [],
"source": [
- "step = lmc.NUTS(logp_dlogp_func=logp_dlogp_func, size=1, target_accept=0.9)\n",
+ "step = lmc.NUTS(logp_dlogp_func=logp_dlogp_func, model_ndim=1, target_accept=0.9)\n",
"trace, stats = lmc.sample(\n",
" logp_dlogp_func=logp_dlogp_func,\n",
- " size=1,\n",
+ " model_ndim=1,\n",
" step=step,\n",
" draws=1000,\n",
" tune=500,\n",
diff --git a/littlemcmc/base_hmc.py b/littlemcmc/base_hmc.py
index 4946ad2..1d08bd4 100644
--- a/littlemcmc/base_hmc.py
+++ b/littlemcmc/base_hmc.py
@@ -32,7 +32,7 @@ class BaseHMC:
def __init__(
self,
logp_dlogp_func: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],
- size: int,
+ model_ndim: int,
scaling: Optional[np.ndarray],
is_cov: bool,
potential: QuadPotential,
@@ -52,7 +52,7 @@ class BaseHMC:
logp_dlogp_func : Python callable
Python callable that returns the log-probability and derivative of
the log-probability, respectively.
- size : int
+ model_ndim : int
Total number of parameters. Dimensionality of the output of
``logp_dlogp_func``.
scaling : 1 or 2-dimensional array-like
@@ -98,8 +98,8 @@ class BaseHMC:
self.adapt_step_size = adapt_step_size
self.Emax = Emax
self.iter_count = 0
- self.size = size
- self.step_size = step_scale / (size ** 0.25)
+ self.model_ndim = model_ndim
+ self.step_size = step_scale / (model_ndim ** 0.25)
self.target_accept = target_accept
self.step_adapt = step_sizes.DualAverageAdaptation(
self.step_size, target_accept, gamma, k, t0
@@ -108,9 +108,9 @@ class BaseHMC:
if scaling is None and potential is None:
# Default to diagonal quadpotential
- mean = np.zeros(size)
- var = np.ones(size)
- potential = QuadPotentialDiagAdapt(size, mean, var, 10)
+ mean = np.zeros(model_ndim)
+ var = np.ones(model_ndim)
+ potential = QuadPotentialDiagAdapt(model_ndim, mean, var, 10)
if scaling is not None and potential is not None:
raise ValueError("Cannot specify both `potential` and `scaling`.")
diff --git a/littlemcmc/hmc.py b/littlemcmc/hmc.py
index 926e172..2c01f69 100644
--- a/littlemcmc/hmc.py
+++ b/littlemcmc/hmc.py
@@ -52,7 +52,7 @@ class HamiltonianMC(BaseHMC):
def __init__(
self,
logp_dlogp_func: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],
- size: int,
+ model_ndim: int,
scaling: Optional[np.ndarray] = None,
is_cov: bool = False,
potential=None,
@@ -74,7 +74,7 @@ class HamiltonianMC(BaseHMC):
logp_dlogp_func : Python callable
Python callable that returns the log-probability and derivative of
the log-probability, respectively.
- size : int
+ model_ndim : int
Total number of parameters. Dimensionality of the output of
``logp_dlogp_func``.
scaling : 1 or 2-dimensional array-like
@@ -124,7 +124,7 @@ class HamiltonianMC(BaseHMC):
step_scale=step_scale,
is_cov=is_cov,
logp_dlogp_func=logp_dlogp_func,
- size=size,
+ model_ndim=model_ndim,
potential=potential,
Emax=Emax,
target_accept=target_accept,
diff --git a/littlemcmc/nuts.py b/littlemcmc/nuts.py
index 2c9aa33..d3123bf 100644
--- a/littlemcmc/nuts.py
+++ b/littlemcmc/nuts.py
@@ -109,7 +109,7 @@ class NUTS(BaseHMC):
def __init__(
self,
logp_dlogp_func: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],
- size: int,
+ model_ndim: int,
scaling: Optional[np.ndarray] = None,
is_cov: bool = False,
potential=None,
@@ -132,7 +132,7 @@ class NUTS(BaseHMC):
logp_dlogp_func : Python callable
Python callable that returns the log-probability and derivative of
the log-probability, respectively.
- size : int
+ model_ndim : int
Total number of parameters. Dimensionality of the output of
``logp_dlogp_func``.
scaling : 1 or 2-dimensional array-like
@@ -188,7 +188,7 @@ class NUTS(BaseHMC):
"""
super(NUTS, self).__init__(
logp_dlogp_func=logp_dlogp_func,
- size=size,
+ model_ndim=model_ndim,
scaling=scaling,
is_cov=is_cov,
potential=potential,
diff --git a/littlemcmc/sampling.py b/littlemcmc/sampling.py
index 87d046a..50a3c96 100644
--- a/littlemcmc/sampling.py
+++ b/littlemcmc/sampling.py
@@ -31,7 +31,7 @@ _log = logging.getLogger("littlemcmc")
def _sample_one_chain(
logp_dlogp_func: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],
- size: int,
+ model_ndim: int,
draws: int,
tune: int,
step: Union[NUTS, HamiltonianMC],
@@ -49,7 +49,7 @@ def _sample_one_chain(
progressbar_position = 0
q = start
- trace = np.zeros([size, tune + draws])
+ trace = np.zeros([model_ndim, tune + draws])
stats: List[SamplerWarning] = []
if progressbar == "notebook":
@@ -75,7 +75,7 @@ def _sample_one_chain(
def sample(
logp_dlogp_func: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],
- size: int,
+ model_ndim: int,
draws: int = 1000,
tune: int = 1000,
step: Union[NUTS, HamiltonianMC] = None,
@@ -96,7 +96,7 @@ def sample(
logp_dlogp_func: Python callable
Python callable that returns a tuple of the model joint log probability and its
derivative, in that order.
- size: int
+ model_ndim: int
The number of parameters of the model.
draws: int
The number of samples to draw. Defaults to 1000. The number of tuned samples are
@@ -183,7 +183,7 @@ def sample(
if step is None or start is None:
start_, step_ = init_nuts(
logp_dlogp_func=logp_dlogp_func,
- size=size,
+ model_ndim=model_ndim,
init=init,
random_seed=random_seed,
**kwargs,
@@ -197,7 +197,7 @@ def sample(
results = Parallel(n_jobs=cores, backend="multiprocessing")(
delayed(_sample_one_chain)(
logp_dlogp_func=logp_dlogp_func,
- size=size,
+ model_ndim=model_ndim,
draws=draws,
tune=tune,
step=step,
@@ -225,7 +225,7 @@ def sample(
def init_nuts(
logp_dlogp_func: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],
- size: int,
+ model_ndim: int,
init: str = "auto",
random_seed: Union[None, int, List[int]] = None,
**kwargs,
@@ -278,28 +278,30 @@ def init_nuts(
np.random.seed(random_seed)
if init == "adapt_diag":
- start = np.zeros(size)
+ start = np.zeros(model_ndim)
mean = start
- var = np.ones(size)
- potential: QuadPotential = QuadPotentialDiagAdapt(size, mean, var, 10)
+ var = np.ones(model_ndim)
+ potential: QuadPotential = QuadPotentialDiagAdapt(model_ndim, mean, var, 10)
elif init == "jitter+adapt_diag":
- start = 2 * np.random.rand(size) - 1
+ start = 2 * np.random.rand(model_ndim) - 1
mean = start
- var = np.ones(size)
- potential = QuadPotentialDiagAdapt(size, mean, var, 10)
+ var = np.ones(model_ndim)
+ potential = QuadPotentialDiagAdapt(model_ndim, mean, var, 10)
elif init == "adapt_full":
- start = np.zeros(size)
+ start = np.zeros(model_ndim)
mean = start
- cov = np.eye(size)
- potential = QuadPotentialFullAdapt(size, mean, cov, 10)
+ cov = np.eye(model_ndim)
+ potential = QuadPotentialFullAdapt(model_ndim, mean, cov, 10)
elif init == "jitter+adapt_full":
- start = 2 * np.random.rand(size) - 1
+ start = 2 * np.random.rand(model_ndim) - 1
mean = start
- cov = np.eye(size)
- potential = QuadPotentialFullAdapt(size, mean, cov, 10)
+ cov = np.eye(model_ndim)
+ potential = QuadPotentialFullAdapt(model_ndim, mean, cov, 10)
else:
raise ValueError("Unknown initializer: {}.".format(init))
- step = NUTS(logp_dlogp_func=logp_dlogp_func, size=size, potential=potential, **kwargs)
+ step = NUTS(
+ logp_dlogp_func=logp_dlogp_func, model_ndim=model_ndim, potential=potential, **kwargs
+ )
return start, step
|
eigenfoo/littlemcmc
|
18380c9f2dba16e0cc842b548fd00ffa76a63ec1
|
diff --git a/tests/test_hmc.py b/tests/test_hmc.py
index 3fd96c9..375ea0c 100644
--- a/tests/test_hmc.py
+++ b/tests/test_hmc.py
@@ -22,11 +22,11 @@ from test_utils import logp_dlogp_func
def test_leapfrog_reversible():
np.random.seed(42)
- size = 1
- scaling = np.random.rand(size)
- step = HamiltonianMC(logp_dlogp_func=logp_dlogp_func, size=size, scaling=scaling)
+ model_ndim = 1
+ scaling = np.random.rand(model_ndim)
+ step = HamiltonianMC(logp_dlogp_func=logp_dlogp_func, model_ndim=model_ndim, scaling=scaling)
p = step.potential.random()
- q = np.random.randn(size)
+ q = np.random.randn(model_ndim)
start = step.integrator.compute_state(p, q)
for epsilon in [0.01, 0.1]:
@@ -41,14 +41,14 @@ def test_leapfrog_reversible():
def test_nuts_tuning():
- size = 1
+ model_ndim = 1
draws = 5
tune = 5
- step = lmc.NUTS(logp_dlogp_func=logp_dlogp_func, size=size)
+ step = lmc.NUTS(logp_dlogp_func=logp_dlogp_func, model_ndim=model_ndim)
chains = 1
cores = 1
trace, stats = lmc.sample(
- logp_dlogp_func, size, draws, tune, step=step, chains=chains, cores=cores
+ logp_dlogp_func, model_ndim, draws, tune, step=step, chains=chains, cores=cores
)
assert not step.tune
diff --git a/tests/test_sampling.py b/tests/test_sampling.py
index d47e49f..19fca98 100644
--- a/tests/test_sampling.py
+++ b/tests/test_sampling.py
@@ -22,59 +22,59 @@ from test_utils import logp_dlogp_func
"method", ["adapt_diag", "jitter+adapt_diag", "adapt_full", "jitter+adapt_full",],
)
def test_init_nuts(method):
- start, step = lmc.init_nuts(logp_dlogp_func=logp_dlogp_func, size=1, init=method)
+ start, step = lmc.init_nuts(logp_dlogp_func=logp_dlogp_func, model_ndim=1, init=method)
assert isinstance(start, np.ndarray)
assert len(start) == 1
assert isinstance(step, lmc.NUTS)
def test_hmc_sampling_runs():
- size = 1
- step = lmc.HamiltonianMC(logp_dlogp_func=logp_dlogp_func, size=size)
+ model_ndim = 1
+ step = lmc.HamiltonianMC(logp_dlogp_func=logp_dlogp_func, model_ndim=model_ndim)
draws = 2
tune = 1
chains = 1
cores = 1
trace, stats = lmc.sample(
- logp_dlogp_func, size, draws, tune, step=step, chains=chains, cores=cores
+ logp_dlogp_func, model_ndim, draws, tune, step=step, chains=chains, cores=cores
)
assert trace.shape == (1, 2)
def test_nuts_sampling_runs():
- size = 1
- step = lmc.NUTS(logp_dlogp_func=logp_dlogp_func, size=size)
+ model_ndim = 1
+ step = lmc.NUTS(logp_dlogp_func=logp_dlogp_func, model_ndim=model_ndim)
draws = 2
tune = 1
chains = 1
cores = 1
trace, stats = lmc.sample(
- logp_dlogp_func, size, draws, tune, step=step, chains=chains, cores=cores
+ logp_dlogp_func, model_ndim, draws, tune, step=step, chains=chains, cores=cores
)
assert trace.shape == (1, 2)
def test_multiprocess_sampling_runs():
- size = 1
- step = lmc.NUTS(logp_dlogp_func=logp_dlogp_func, size=size)
+ model_ndim = 1
+ step = lmc.NUTS(logp_dlogp_func=logp_dlogp_func, model_ndim=model_ndim)
draws = 1
tune = 1
chains = None
cores = None
trace, stats = lmc.sample(
- logp_dlogp_func, size, draws, tune, step=step, chains=chains, cores=cores
+ logp_dlogp_func, model_ndim, draws, tune, step=step, chains=chains, cores=cores
)
def test_hmc_recovers_1d_normal():
- size = 1
- step = lmc.HamiltonianMC(logp_dlogp_func=logp_dlogp_func, size=size)
+ model_ndim = 1
+ step = lmc.HamiltonianMC(logp_dlogp_func=logp_dlogp_func, model_ndim=model_ndim)
draws = 1000
tune = 1000
chains = 1
cores = 1
trace, stats = lmc.sample(
- logp_dlogp_func, size, draws, tune, step=step, chains=chains, cores=cores
+ logp_dlogp_func, model_ndim, draws, tune, step=step, chains=chains, cores=cores
)
assert np.allclose(np.mean(trace), 0, atol=1)
@@ -82,14 +82,14 @@ def test_hmc_recovers_1d_normal():
def test_nuts_recovers_1d_normal():
- size = 1
- step = lmc.NUTS(logp_dlogp_func=logp_dlogp_func, size=size)
+ model_ndim = 1
+ step = lmc.NUTS(logp_dlogp_func=logp_dlogp_func, model_ndim=model_ndim)
draws = 1000
tune = 1000
chains = 1
cores = 1
trace, stats = lmc.sample(
- logp_dlogp_func, size, draws, tune, step=step, chains=chains, cores=cores
+ logp_dlogp_func, model_ndim, draws, tune, step=step, chains=chains, cores=cores
)
assert np.allclose(np.mean(trace), 0, atol=1)
|
s/size/model_ndim/g
Currently we have the `size` parameter in `lmc.sample`. However, `model_ndim` might be a more descriptive variable name (and also mirrors what PyMC3 uses).
|
0.0
|
18380c9f2dba16e0cc842b548fd00ffa76a63ec1
|
[
"tests/test_hmc.py::test_leapfrog_reversible",
"tests/test_hmc.py::test_nuts_tuning",
"tests/test_sampling.py::test_init_nuts[adapt_diag]",
"tests/test_sampling.py::test_init_nuts[jitter+adapt_diag]",
"tests/test_sampling.py::test_init_nuts[adapt_full]",
"tests/test_sampling.py::test_init_nuts[jitter+adapt_full]",
"tests/test_sampling.py::test_nuts_sampling_runs"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-05-05 23:15:06+00:00
|
apache-2.0
| 2,080 |
|
eigenfoo__littlemcmc-63
|
diff --git a/littlemcmc/sampling.py b/littlemcmc/sampling.py
index 50a3c96..74e22fd 100644
--- a/littlemcmc/sampling.py
+++ b/littlemcmc/sampling.py
@@ -210,13 +210,18 @@ def sample(
for i, seed in enumerate(random_seed)
)
- # Flatten `trace` to have shape [num_variables, num_chains * num_samples]
- trace = np.hstack([np.atleast_2d(chain_trace) for (chain_trace, _) in results])
+ # Reshape `trace` to have shape [num_chains, num_samples, num_variables]
+ trace = np.array([np.atleast_2d(chain_trace).T for (chain_trace, _) in results])
- # Reshape `stats` to a dictionary
- stats_ = [iter_stats for (_, chain_stats) in results for iter_stats in chain_stats]
+ # Reshape `stats` to a dictionary with keys = string of sampling stat name,
+ # values = np.array with shape [num_chains, num_samples, num_variables]
stats = {
- name: np.squeeze(np.array([iter_stats[name] for iter_stats in stats_])).astype(dtype)
+ name: np.array(
+ [
+ [np.atleast_1d(iter_stats[name]) for iter_stats in chain_stats]
+ for (_, chain_stats) in results
+ ]
+ ).astype(dtype)
for (name, dtype) in step.stats_dtypes[0].items()
}
|
eigenfoo/littlemcmc
|
6ab62cc644efde9aec9258c7a5477c3ecdc16284
|
diff --git a/tests/test_sampling.py b/tests/test_sampling.py
index 19fca98..7b1d71e 100644
--- a/tests/test_sampling.py
+++ b/tests/test_sampling.py
@@ -31,27 +31,47 @@ def test_init_nuts(method):
def test_hmc_sampling_runs():
model_ndim = 1
step = lmc.HamiltonianMC(logp_dlogp_func=logp_dlogp_func, model_ndim=model_ndim)
- draws = 2
+ draws = 3
tune = 1
- chains = 1
+ chains = 2
cores = 1
+
+ expected_shape = (2, 3, 1)
+
trace, stats = lmc.sample(
logp_dlogp_func, model_ndim, draws, tune, step=step, chains=chains, cores=cores
)
- assert trace.shape == (1, 2)
+ assert trace.shape == expected_shape
+ assert all([stats[name].shape == expected_shape for (name, _) in step.stats_dtypes[0].items()])
+ assert all(
+ [
+ stats[name].dtype == expected_dtype
+ for (name, expected_dtype) in step.stats_dtypes[0].items()
+ ]
+ )
def test_nuts_sampling_runs():
model_ndim = 1
step = lmc.NUTS(logp_dlogp_func=logp_dlogp_func, model_ndim=model_ndim)
- draws = 2
+ draws = 3
tune = 1
- chains = 1
+ chains = 2
cores = 1
+
+ expected_shape = (2, 3, 1)
+
trace, stats = lmc.sample(
logp_dlogp_func, model_ndim, draws, tune, step=step, chains=chains, cores=cores
)
- assert trace.shape == (1, 2)
+ assert trace.shape == (2, 3, 1)
+ assert all([stats[name].shape == expected_shape for (name, _) in step.stats_dtypes[0].items()])
+ assert all(
+ [
+ stats[name].dtype == expected_dtype
+ for (name, expected_dtype) in step.stats_dtypes[0].items()
+ ]
+ )
def test_multiprocess_sampling_runs():
|
Use a better shape for trace object
Currently we flatten the `trace` object to have shape `[num_variables, num_chains * num_samples]`. Really, it ought to be `[num_chains, num_samples, num_variables]` (or whatever reasonable choice ArviZ makes: we should target compatibility with them.
https://github.com/eigenfoo/littlemcmc/blob/master/littlemcmc/sampling.py#L213-L214
|
0.0
|
6ab62cc644efde9aec9258c7a5477c3ecdc16284
|
[
"tests/test_sampling.py::test_hmc_sampling_runs",
"tests/test_sampling.py::test_nuts_sampling_runs",
"tests/test_sampling.py::test_hmc_recovers_1d_normal",
"tests/test_sampling.py::test_nuts_recovers_1d_normal"
] |
[
"tests/test_sampling.py::test_init_nuts[adapt_diag]",
"tests/test_sampling.py::test_init_nuts[jitter+adapt_diag]",
"tests/test_sampling.py::test_init_nuts[adapt_full]",
"tests/test_sampling.py::test_init_nuts[jitter+adapt_full]",
"tests/test_sampling.py::test_multiprocess_sampling_runs"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2020-05-09 16:04:32+00:00
|
apache-2.0
| 2,081 |
|
einsteinpy__einsteinpy-427
|
diff --git a/src/einsteinpy/symbolic/constants.py b/src/einsteinpy/symbolic/constants.py
index 509082c..09da6fc 100644
--- a/src/einsteinpy/symbolic/constants.py
+++ b/src/einsteinpy/symbolic/constants.py
@@ -39,6 +39,7 @@ class SymbolicConstant(Symbol):
c = SymbolicConstant("c", "Speed Of Light")
G = SymbolicConstant("G", "Gravitational Constant")
Cosmo_Const = SymbolicConstant("Lambda", "Cosmological Constant")
+eps_0 = SymbolicConstant("eps_0", "Permittivity of free space")
def get_constant(name):
@@ -49,7 +50,7 @@ def get_constant(name):
----------
name : str
Name of the constant.
- Currently available names are 'c', 'G', 'Cosmo_Const'.
+ Currently available names are 'c', 'G', 'Cosmo_Const', 'eps_0'.
Returns
-------
@@ -57,5 +58,5 @@ def get_constant(name):
An instance of the required constant
"""
- const_dict = {"c": c, "G": G, "Cosmo_Const": Cosmo_Const}
+ const_dict = {"c": c, "G": G, "Cosmo_Const": Cosmo_Const, "eps_0": eps_0}
return const_dict[name]
diff --git a/src/einsteinpy/symbolic/predefined/__init__.py b/src/einsteinpy/symbolic/predefined/__init__.py
index c7b6d12..eb20cb8 100644
--- a/src/einsteinpy/symbolic/predefined/__init__.py
+++ b/src/einsteinpy/symbolic/predefined/__init__.py
@@ -4,4 +4,4 @@ from .de_sitter import AntiDeSitter, AntiDeSitterStatic, DeSitter
from .find import find
from .godel import Godel
from .minkowski import Minkowski
-from .vacuum_solutions import Kerr, Schwarzschild
+from .vacuum_solutions import Kerr, KerrNewman, Schwarzschild
diff --git a/src/einsteinpy/symbolic/predefined/vacuum_solutions.py b/src/einsteinpy/symbolic/predefined/vacuum_solutions.py
index 414b435..5f13f9a 100644
--- a/src/einsteinpy/symbolic/predefined/vacuum_solutions.py
+++ b/src/einsteinpy/symbolic/predefined/vacuum_solutions.py
@@ -1,4 +1,4 @@
-from sympy import cos, diag, sin, symbols
+from sympy import cos, diag, pi, sin, symbols
from einsteinpy.symbolic import constants
from einsteinpy.symbolic.metric import MetricTensor
@@ -66,6 +66,52 @@ def Kerr(c=constants.c, sch=symbols("r_s"), a=symbols("a")):
def KerrNewman(
- c=constants.c, G=constants.G, sch=symbols("r_s"), a=symbols("a"), Q=symbols("Q")
+ c=constants.c,
+ G=constants.G,
+ eps_0=constants.eps_0,
+ sch=symbols("r_s"),
+ a=symbols("a"),
+ Q=symbols("Q"),
):
- raise NotImplementedError
+ """
+ Kerr-Newman Metric in Boyer Lindquist coordinates.
+
+ Parameters
+ ----------
+ c : ~sympy.core.basic.Basic or int or float
+ Any value to assign to speed of light. Defaults to ``c``.
+ G : ~sympy.core.basic.Basic or int or float
+ Any value to assign to the Newton's (or gravitational) constant. Defaults to ``G``.
+ eps_0 : ~sympy.core.basic.Basic or int or float
+ Any value to assign to the electric constant or permittivity of free space. Defaults to ``eps_0``.
+ sch : ~sympy.core.basic.Basic or int or float
+ Any value to assign to Schwarzschild Radius of the central object.
+ Defaults to ``r_s``.
+ a : ~sympy.core.basic.Basic or int or float
+ Spin factor of the heavy body. Usually, given by ``J/(Mc)``,
+ where ``J`` is the angular momentum.
+ Defaults to ``a``.
+ Q: ~sympy.core.basic.Basic or int or float
+ Any value to assign to eletric charge of the central object.
+ Defaults to ``Q``.
+
+ """
+ coords = symbols("t r theta phi")
+ t, r, theta, phi = coords
+ Sigma = r ** 2 + (a ** 2 * cos(theta) ** 2)
+ rQsq = ((Q ** 2) * G) / (4 * pi * eps_0 * (c ** 4))
+ Delta = r ** 2 - sch * r + a ** 2 + rQsq
+ c2 = c ** 2
+
+ metric = diag(
+ 1 - (sch * r / Sigma),
+ -Sigma / (Delta * c2),
+ -Sigma / c2,
+ -(
+ (r ** 2 + a ** 2 + (sch * r * (a ** 2) * (sin(theta) ** 2) / Sigma))
+ * (sin(theta) ** 2)
+ )
+ / c2,
+ ).tolist()
+ metric[0][3] = metric[3][0] = sch * r * a * (sin(theta) ** 2) / (Sigma * c)
+ return MetricTensor(metric, coords, "ll")
|
einsteinpy/einsteinpy
|
7461ec70054b789fa6a71e81892dc4b52a2bf742
|
diff --git a/src/einsteinpy/tests/test_symbolic/test_predefined/test_all.py b/src/einsteinpy/tests/test_symbolic/test_predefined/test_all.py
index 2bc70b3..37772f8 100644
--- a/src/einsteinpy/tests/test_symbolic/test_predefined/test_all.py
+++ b/src/einsteinpy/tests/test_symbolic/test_predefined/test_all.py
@@ -1,6 +1,8 @@
+import numpy as np
import pytest
+from sympy import Array
-from einsteinpy.symbolic import MetricTensor
+from einsteinpy.symbolic import MetricTensor, simplify_sympy_array
from einsteinpy.symbolic.predefined import (
AntiDeSitter,
AntiDeSitterStatic,
@@ -9,6 +11,7 @@ from einsteinpy.symbolic.predefined import (
DeSitter,
Godel,
Kerr,
+ KerrNewman,
Minkowski,
Schwarzschild,
)
@@ -23,6 +26,7 @@ from einsteinpy.symbolic.predefined import (
Schwarzschild(),
Schwarzschild(c=1, sch=2),
Kerr(),
+ KerrNewman(),
Minkowski(),
CMetric(),
Davidson(),
@@ -31,3 +35,15 @@ from einsteinpy.symbolic.predefined import (
)
def test_all_predefined_metrics(metric_instance):
assert isinstance(metric_instance, MetricTensor)
+
+
[email protected](
+ "m1, m2",
+ [
+ (Schwarzschild(), Kerr(a=0)), # Schwarzschild is a special case of Kerr
+ (Kerr(), KerrNewman(Q=0)), # Kerr is a special case of Kerr-Newman
+ ],
+)
+def test_check_two_metrics_are_equal(m1, m2):
+ zero_arr = Array(np.zeros(shape=m1.tensor().shape, dtype=int))
+ assert simplify_sympy_array(m1.tensor() - m2.tensor()) == zero_arr
|
Add Kerr and Kerr-Newman metric in Boyer-Lindquist coordinates to predefined metrics
🐞 **Problem**
`symbolic/predefined/vacuum_solutions.py` needs Kerr and Kerr-Newman Metrics. Implement them similar to already implemented `Schwarzschild()` in the same file.
💡 **Possible solutions**
- [x] Kerr (c737c8b)
- [ ] Kerr Newman
The metrics can be found be simple search on wikipedia. Also implementation can be found in `utils/kerr_utils.py` and `utils/kerrnewman_utils.py`
📋 **Steps to solve the problem**
* Comment below about what you've started working on.
* Add, commit, push your changes
* Submit a pull request and add this in comments - `Addresses #<put issue number here>`
* Ask for a review in comments section of pull request
* Celebrate your contribution to this project 🎉
|
0.0
|
7461ec70054b789fa6a71e81892dc4b52a2bf742
|
[
"src/einsteinpy/tests/test_symbolic/test_predefined/test_all.py::test_all_predefined_metrics[metric_instance0]",
"src/einsteinpy/tests/test_symbolic/test_predefined/test_all.py::test_all_predefined_metrics[metric_instance1]",
"src/einsteinpy/tests/test_symbolic/test_predefined/test_all.py::test_all_predefined_metrics[metric_instance2]",
"src/einsteinpy/tests/test_symbolic/test_predefined/test_all.py::test_all_predefined_metrics[metric_instance3]",
"src/einsteinpy/tests/test_symbolic/test_predefined/test_all.py::test_all_predefined_metrics[metric_instance4]",
"src/einsteinpy/tests/test_symbolic/test_predefined/test_all.py::test_all_predefined_metrics[metric_instance5]",
"src/einsteinpy/tests/test_symbolic/test_predefined/test_all.py::test_all_predefined_metrics[metric_instance6]",
"src/einsteinpy/tests/test_symbolic/test_predefined/test_all.py::test_all_predefined_metrics[metric_instance7]",
"src/einsteinpy/tests/test_symbolic/test_predefined/test_all.py::test_all_predefined_metrics[metric_instance8]",
"src/einsteinpy/tests/test_symbolic/test_predefined/test_all.py::test_all_predefined_metrics[metric_instance9]",
"src/einsteinpy/tests/test_symbolic/test_predefined/test_all.py::test_all_predefined_metrics[metric_instance10]",
"src/einsteinpy/tests/test_symbolic/test_predefined/test_all.py::test_check_two_metrics_are_equal[m10-m20]",
"src/einsteinpy/tests/test_symbolic/test_predefined/test_all.py::test_check_two_metrics_are_equal[m11-m21]"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-02-11 11:14:37+00:00
|
mit
| 2,082 |
|
einsteinpy__einsteinpy-84
|
diff --git a/docs/source/api/plotting/geodesics.rst b/docs/source/api/plotting/geodesics_static.rst
similarity index 52%
rename from docs/source/api/plotting/geodesics.rst
rename to docs/source/api/plotting/geodesics_static.rst
index d09d445..cff12c0 100644
--- a/docs/source/api/plotting/geodesics.rst
+++ b/docs/source/api/plotting/geodesics_static.rst
@@ -1,8 +1,8 @@
-Geodesics module
-================
+Static 2D Plotting module
+=========================
This module contains the basic classes for static plottings in
2-dimensions for scatter and line:
-.. automodule:: einsteinpy.plotting.geodesics
+.. automodule:: einsteinpy.plotting.geodesics_static
:members:
diff --git a/docs/source/api/plotting/plotting_index.rst b/docs/source/api/plotting/plotting_index.rst
index c08e0fa..d8dce95 100644
--- a/docs/source/api/plotting/plotting_index.rst
+++ b/docs/source/api/plotting/plotting_index.rst
@@ -7,4 +7,4 @@ This module contains the basic classes for static and interactive
.. toctree::
:maxdepth: 2
- geodesics
+ geodesics_static
diff --git a/src/einsteinpy/ijit.py b/src/einsteinpy/ijit.py
new file mode 100644
index 0000000..0b85935
--- /dev/null
+++ b/src/einsteinpy/ijit.py
@@ -0,0 +1,36 @@
+"""Just-in-time compiler.
+
+Wraps numba if it is available as a module, uses an identity
+decorator instead.
+
+"""
+import inspect
+import warnings
+
+
+def ijit(first=None, *args, **kwargs):
+ """Identity JIT, returns unchanged function.
+
+ """
+
+ def _jit(f):
+ return f
+
+ if inspect.isfunction(first):
+ return first
+ else:
+ return _jit
+
+
+try:
+ import numba
+
+ jit = numba.njit
+except ImportError:
+ warnings.warn(
+ "Could not import numba package. All einsteinpy "
+ "functions will work properly but the CPU intensive "
+ "algorithms will be slow. Consider installing numba to "
+ "boost performance."
+ )
+ jit = ijit
diff --git a/src/einsteinpy/plotting/__init__.py b/src/einsteinpy/plotting/__init__.py
index 9dce831..9a44108 100644
--- a/src/einsteinpy/plotting/__init__.py
+++ b/src/einsteinpy/plotting/__init__.py
@@ -1,1 +1,1 @@
-from .geodesics import ScatterGeodesicPlotter, StaticGeodesicPlotter
+from .geodesics_static import ScatterGeodesicPlotter, StaticGeodesicPlotter
|
einsteinpy/einsteinpy
|
c72cf310a2319793969fea4a68b21a25a62e32be
|
diff --git a/src/einsteinpy/plotting/geodesics.py b/src/einsteinpy/plotting/geodesics_static.py
similarity index 100%
rename from src/einsteinpy/plotting/geodesics.py
rename to src/einsteinpy/plotting/geodesics_static.py
diff --git a/src/einsteinpy/tests/test_plotting/test_scattergeodesicplotter.py b/src/einsteinpy/tests/test_plotting/test_scattergeodesicplotter.py
index c34f626..4fc3714 100644
--- a/src/einsteinpy/tests/test_plotting/test_scattergeodesicplotter.py
+++ b/src/einsteinpy/tests/test_plotting/test_scattergeodesicplotter.py
@@ -27,7 +27,9 @@ def test_plot_attractor_is_called_only_once(dummy_data):
assert cl._attractor_present == True
[email protected]("einsteinpy.plotting.geodesics.ScatterGeodesicPlotter._plot_attractor")
[email protected](
+ "einsteinpy.plotting.geodesics_static.ScatterGeodesicPlotter._plot_attractor"
+)
def test_plot_calls_plot_attractor(mock_plot_attractor):
r = [306 * u.m, np.pi / 2 * u.rad, np.pi / 2 * u.rad]
v = [0 * u.m / u.s, 0 * u.rad / u.s, 951.0 * u.rad / u.s]
@@ -39,7 +41,7 @@ def test_plot_calls_plot_attractor(mock_plot_attractor):
mock_plot_attractor.assert_called_with()
[email protected]("einsteinpy.plotting.geodesics.plt.show")
[email protected]("einsteinpy.plotting.geodesics_static.plt.show")
def test_plot_show_shows_plot(mock_show):
r = [306 * u.m, np.pi / 2 * u.rad, np.pi / 2 * u.rad]
v = [0 * u.m / u.s, 0 * u.rad / u.s, 951.0 * u.rad / u.s]
diff --git a/src/einsteinpy/tests/test_plotting/test_staticgeodesicplotter.py b/src/einsteinpy/tests/test_plotting/test_staticgeodesicplotter.py
index 980b9a6..e421105 100644
--- a/src/einsteinpy/tests/test_plotting/test_staticgeodesicplotter.py
+++ b/src/einsteinpy/tests/test_plotting/test_staticgeodesicplotter.py
@@ -29,7 +29,7 @@ def test_staticgeodesicplotter_has_axes(dummy_data):
assert cl._attractor_present is False
[email protected]("einsteinpy.plotting.geodesics.StaticGeodesicPlotter.plot_attractor")
[email protected]("einsteinpy.plotting.geodesics_static.StaticGeodesicPlotter.plot_attractor")
def test_plot_calls_plot_attractor(mock_plot_attractor):
r = [306 * u.m, np.pi / 2 * u.rad, np.pi / 2 * u.rad]
v = [0 * u.m / u.s, 0 * u.rad / u.s, 951.0 * u.rad / u.s]
|
Leverage numba jit
<!--
WELCOME ABOARD!
Hi and welcome to the poliastro project. We appreciate bug reports, questions
about documentation, and suggestions for new features.
IMPORTANT: If you are reporting a problem with poliastro, please follow the
template below. If it's a suggestion, a possible new addition to the library,
or just want to say "Thank you!", you can remove all this text and start
from scratch.
-->
<!--- Provide a general summary of the issue in the Title above -->
🐞 **Problem**
As of now, very simple computations take a lot of time to run. We can use `@jit` decorator provided by numba.
<!--- Provide a detailed description of the problem you have -->
<!---
If you can provide some short code to reproduce the issue
and copy the complete traceback, it will make it easier for us to fix it.
We will ask you for your installation details in the following sections.
-->
🎯 **Goal**
<!--- Why is this change important to you? How would you use it? -->
<!--- How can it benefit other users? -->
Make einsteinpy faster.
💡 **Possible solutions**
http://numba.pydata.org/numba-doc/latest/user/jit.html
<!--- Not obligatory, but suggest an idea for implementing addition or change -->
📋 **Steps to solve the problem**
* Comment below about what you've started working on.
* Add, commit, push your changes
* Submit a pull request and add this in comments - `Addresses #<put issue number here>`
* Ask for a review in comments section of pull request
* Celebrate your contribution to this project 🎉
|
0.0
|
c72cf310a2319793969fea4a68b21a25a62e32be
|
[
"src/einsteinpy/tests/test_plotting/test_scattergeodesicplotter.py::test_plot_attractor_is_called_only_once",
"src/einsteinpy/tests/test_plotting/test_scattergeodesicplotter.py::test_plot_calls_plot_attractor",
"src/einsteinpy/tests/test_plotting/test_scattergeodesicplotter.py::test_plot_show_shows_plot",
"src/einsteinpy/tests/test_plotting/test_staticgeodesicplotter.py::test_staticgeodesicplotter_has_axes",
"src/einsteinpy/tests/test_plotting/test_staticgeodesicplotter.py::test_plot_calls_plot_attractor"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-03-03 11:36:25+00:00
|
mit
| 2,083 |
|
ej2__python-quickbooks-180
|
diff --git a/quickbooks/objects/customer.py b/quickbooks/objects/customer.py
index 1bbdeed..0f43bfd 100644
--- a/quickbooks/objects/customer.py
+++ b/quickbooks/objects/customer.py
@@ -57,6 +57,7 @@ class Customer(QuickbooksManagedObject, QuickbooksTransactionEntity):
self.ResaleNum = ""
self.Level = 0
self.OpenBalanceDate = ""
+ self.PrimaryTaxIdentifier = ""
self.BillAddr = None
self.ShipAddr = None
diff --git a/quickbooks/objects/paymentmethod.py b/quickbooks/objects/paymentmethod.py
index ba4a9eb..dd4da4c 100644
--- a/quickbooks/objects/paymentmethod.py
+++ b/quickbooks/objects/paymentmethod.py
@@ -29,3 +29,6 @@ class PaymentMethod(QuickbooksManagedObject, QuickbooksTransactionEntity):
ref.name = self.Name
ref.type = self.qbo_object_name
ref.value = self.Id
+
+ return ref
+
diff --git a/quickbooks/objects/purchase.py b/quickbooks/objects/purchase.py
index 18fbe9d..853dd67 100644
--- a/quickbooks/objects/purchase.py
+++ b/quickbooks/objects/purchase.py
@@ -62,7 +62,7 @@ class Purchase(DeleteMixin, QuickbooksManagedObject, QuickbooksTransactionEntity
self.TxnTaxDetail = None
self.DepartmentRef = None
self.AccountRef = None
- self.EnitityRef = None
+ self.EntityRef = None
self.CurrencyRef = None
self.PaymentMethodRef = None
self.RemitToAddr = None
|
ej2/python-quickbooks
|
3e8b24d7d3b2156ba868d415dfc98c2a5a9d2cb5
|
diff --git a/tests/unit/objects/test_paymentmethod.py b/tests/unit/objects/test_paymentmethod.py
index 8016214..5978808 100644
--- a/tests/unit/objects/test_paymentmethod.py
+++ b/tests/unit/objects/test_paymentmethod.py
@@ -17,3 +17,14 @@ class PaymentMethodTests(unittest.TestCase):
result = client.isvalid_object_name(obj.qbo_object_name)
self.assertTrue(result)
+
+ def test_to_ref(self):
+ obj = PaymentMethod()
+ obj.Name = "test"
+ obj.Id = 12
+
+ ref = obj.to_ref()
+
+ self.assertEquals(ref.name, "test")
+ self.assertEquals(ref.type, "PaymentMethod")
+ self.assertEquals(ref.value, 12)
|
Typo in Purchase attribute : self.EnitityRef = None
/quickbooks/objects/purchase.py:65
Should be `self.EntityRef`
|
0.0
|
3e8b24d7d3b2156ba868d415dfc98c2a5a9d2cb5
|
[
"tests/unit/objects/test_paymentmethod.py::PaymentMethodTests::test_to_ref"
] |
[
"tests/unit/objects/test_paymentmethod.py::PaymentMethodTests::test_unicode",
"tests/unit/objects/test_paymentmethod.py::PaymentMethodTests::test_valid_object_name"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-03-20 12:02:02+00:00
|
mit
| 2,084 |
|
ej2__python-quickbooks-322
|
diff --git a/quickbooks/mixins.py b/quickbooks/mixins.py
index 9b51a42..2083139 100644
--- a/quickbooks/mixins.py
+++ b/quickbooks/mixins.py
@@ -119,6 +119,53 @@ class SendMixin(object):
class VoidMixin(object):
+
+ def get_void_params(self):
+ qb_object_params_map = {
+ "Payment": {
+ "operation": "update",
+ "include": "void"
+ },
+ "SalesReceipt": {
+ "operation": "update",
+ "include": "void"
+ },
+ "BillPayment": {
+ "operation": "update",
+ "include": "void"
+ },
+ "Invoice": {
+ "operation": "void",
+ },
+ }
+ # setting the default operation to void (the original behavior)
+ return qb_object_params_map.get(self.qbo_object_name, {"operation": "void"})
+
+ def get_void_data(self):
+ qb_object_params_map = {
+ "Payment": {
+ "Id": self.Id,
+ "SyncToken": self.SyncToken,
+ "sparse": True
+ },
+ "SalesReceipt": {
+ "Id": self.Id,
+ "SyncToken": self.SyncToken,
+ "sparse": True
+ },
+ "BillPayment": {
+ "Id": self.Id,
+ "SyncToken": self.SyncToken,
+ "sparse": True
+ },
+ "Invoice": {
+ "Id": self.Id,
+ "SyncToken": self.SyncToken,
+ },
+ }
+ # setting the default operation to void (the original behavior)
+ return qb_object_params_map.get(self.qbo_object_name, {"operation": "void"})
+
def void(self, qb=None):
if not qb:
qb = QuickBooks()
@@ -126,14 +173,12 @@ class VoidMixin(object):
if not self.Id:
raise QuickbooksException('Cannot void unsaved object')
- data = {
- 'Id': self.Id,
- 'SyncToken': self.SyncToken,
- }
-
endpoint = self.qbo_object_name.lower()
url = "{0}/company/{1}/{2}".format(qb.api_url, qb.company_id, endpoint)
- results = qb.post(url, json.dumps(data), params={'operation': 'void'})
+
+ data = self.get_void_data()
+ params = self.get_void_params()
+ results = qb.post(url, json.dumps(data), params=params)
return results
diff --git a/quickbooks/objects/attachable.py b/quickbooks/objects/attachable.py
index e7d23d0..23e7132 100644
--- a/quickbooks/objects/attachable.py
+++ b/quickbooks/objects/attachable.py
@@ -58,7 +58,7 @@ class Attachable(DeleteMixin, QuickbooksManagedObject, QuickbooksTransactionEnti
else:
json_data = qb.create_object(self.qbo_object_name, self.to_json(), _file_path=self._FilePath)
- if self.FileName:
+ if self.Id is None and self.FileName:
obj = type(self).from_json(json_data['AttachableResponse'][0]['Attachable'])
else:
obj = type(self).from_json(json_data['Attachable'])
diff --git a/quickbooks/objects/billpayment.py b/quickbooks/objects/billpayment.py
index 3f175d9..64569d9 100644
--- a/quickbooks/objects/billpayment.py
+++ b/quickbooks/objects/billpayment.py
@@ -1,6 +1,6 @@
from .base import QuickbooksBaseObject, Ref, LinkedTxn, QuickbooksManagedObject, LinkedTxnMixin, \
QuickbooksTransactionEntity
-from ..mixins import DeleteMixin
+from ..mixins import DeleteMixin, VoidMixin
class CheckPayment(QuickbooksBaseObject):
@@ -47,7 +47,7 @@ class BillPaymentLine(QuickbooksBaseObject):
return str(self.Amount)
-class BillPayment(DeleteMixin, QuickbooksManagedObject, QuickbooksTransactionEntity, LinkedTxnMixin):
+class BillPayment(DeleteMixin, QuickbooksManagedObject, QuickbooksTransactionEntity, LinkedTxnMixin, VoidMixin):
"""
QBO definition: A BillPayment entity represents the financial transaction of payment
of bills that the business owner receives from a vendor for goods or services purchased
diff --git a/quickbooks/objects/payment.py b/quickbooks/objects/payment.py
index 07e1522..75cbf57 100644
--- a/quickbooks/objects/payment.py
+++ b/quickbooks/objects/payment.py
@@ -3,7 +3,7 @@ from .base import QuickbooksBaseObject, Ref, LinkedTxn, \
LinkedTxnMixin, MetaData
from ..client import QuickBooks
from .creditcardpayment import CreditCardPayment
-from ..mixins import DeleteMixin
+from ..mixins import DeleteMixin, VoidMixin
import json
@@ -21,7 +21,7 @@ class PaymentLine(QuickbooksBaseObject):
return str(self.Amount)
-class Payment(DeleteMixin, QuickbooksManagedObject, QuickbooksTransactionEntity, LinkedTxnMixin):
+class Payment(DeleteMixin, QuickbooksManagedObject, QuickbooksTransactionEntity, LinkedTxnMixin, VoidMixin):
"""
QBO definition: A Payment entity records a payment in QuickBooks. The payment can be
applied for a particular customer against multiple Invoices and Credit Memos. It can also
@@ -81,24 +81,5 @@ class Payment(DeleteMixin, QuickbooksManagedObject, QuickbooksTransactionEntity,
# These fields are for minor version 4
self.TransactionLocationType = None
- def void(self, qb=None):
- if not qb:
- qb = QuickBooks()
-
- if not self.Id:
- raise qb.QuickbooksException('Cannot void unsaved object')
-
- data = {
- 'Id': self.Id,
- 'SyncToken': self.SyncToken,
- 'sparse': True
- }
-
- endpoint = self.qbo_object_name.lower()
- url = "{0}/company/{1}/{2}".format(qb.api_url, qb.company_id, endpoint)
- results = qb.post(url, json.dumps(data), params={'operation': 'update', 'include': 'void'})
-
- return results
-
def __str__(self):
return str(self.TotalAmt)
diff --git a/quickbooks/objects/salesreceipt.py b/quickbooks/objects/salesreceipt.py
index 83b55a2..0a42925 100644
--- a/quickbooks/objects/salesreceipt.py
+++ b/quickbooks/objects/salesreceipt.py
@@ -2,11 +2,11 @@ from .base import Ref, CustomField, QuickbooksManagedObject, LinkedTxnMixin, Add
EmailAddress, QuickbooksTransactionEntity, LinkedTxn
from .tax import TxnTaxDetail
from .detailline import DetailLine
-from ..mixins import QuickbooksPdfDownloadable, DeleteMixin
+from ..mixins import QuickbooksPdfDownloadable, DeleteMixin, VoidMixin
class SalesReceipt(DeleteMixin, QuickbooksPdfDownloadable, QuickbooksManagedObject,
- QuickbooksTransactionEntity, LinkedTxnMixin):
+ QuickbooksTransactionEntity, LinkedTxnMixin, VoidMixin):
"""
QBO definition: SalesReceipt represents the sales receipt that is given to a customer.
A sales receipt is similar to an invoice. However, for a sales receipt, payment is received
|
ej2/python-quickbooks
|
a02ca1ba6bac7ced8e1af07e6a04d7a46818df22
|
diff --git a/tests/integration/test_billpayment.py b/tests/integration/test_billpayment.py
index fab990c..c7ce650 100644
--- a/tests/integration/test_billpayment.py
+++ b/tests/integration/test_billpayment.py
@@ -1,5 +1,6 @@
from datetime import datetime
+from quickbooks.objects import AccountBasedExpenseLine, Ref, AccountBasedExpenseLineDetail
from quickbooks.objects.account import Account
from quickbooks.objects.bill import Bill
from quickbooks.objects.billpayment import BillPayment, BillPaymentLine, CheckPayment
@@ -14,12 +15,30 @@ class BillPaymentTest(QuickbooksTestCase):
self.account_number = datetime.now().strftime('%d%H%M')
self.name = "Test Account {0}".format(self.account_number)
- def test_create(self):
+ def create_bill(self, amount):
+ bill = Bill()
+ line = AccountBasedExpenseLine()
+ line.Amount = amount
+ line.DetailType = "AccountBasedExpenseLineDetail"
+
+ account_ref = Ref()
+ account_ref.type = "Account"
+ account_ref.value = 1
+ line.AccountBasedExpenseLineDetail = AccountBasedExpenseLineDetail()
+ line.AccountBasedExpenseLineDetail.AccountRef = account_ref
+ bill.Line.append(line)
+
+ vendor = Vendor.all(max_results=1, qb=self.qb_client)[0]
+ bill.VendorRef = vendor.to_ref()
+
+ return bill.save(qb=self.qb_client)
+
+ def create_bill_payment(self, bill, amount, private_note, pay_type):
bill_payment = BillPayment()
- bill_payment.PayType = "Check"
- bill_payment.TotalAmt = 200
- bill_payment.PrivateNote = "Private Note"
+ bill_payment.PayType = pay_type
+ bill_payment.TotalAmt = amount
+ bill_payment.PrivateNote = private_note
vendor = Vendor.all(max_results=1, qb=self.qb_client)[0]
bill_payment.VendorRef = vendor.to_ref()
@@ -31,14 +50,18 @@ class BillPaymentTest(QuickbooksTestCase):
ap_account = Account.where("AccountSubType = 'AccountsPayable'", qb=self.qb_client)[0]
bill_payment.APAccountRef = ap_account.to_ref()
- bill = Bill.all(max_results=1, qb=self.qb_client)[0]
-
line = BillPaymentLine()
line.LinkedTxn.append(bill.to_linked_txn())
line.Amount = 200
bill_payment.Line.append(line)
- bill_payment.save(qb=self.qb_client)
+ return bill_payment.save(qb=self.qb_client)
+
+ def test_create(self):
+ # create new bill for testing, reusing the same bill will cause Line to be empty
+ # and the new bill payment will be voided automatically
+ bill = self.create_bill(amount=200)
+ bill_payment = self.create_bill_payment(bill, 200, "Private Note", "Check")
query_bill_payment = BillPayment.get(bill_payment.Id, qb=self.qb_client)
@@ -48,3 +71,16 @@ class BillPaymentTest(QuickbooksTestCase):
self.assertEqual(len(query_bill_payment.Line), 1)
self.assertEqual(query_bill_payment.Line[0].Amount, 200.0)
+
+ def test_void(self):
+ bill = self.create_bill(amount=200)
+ bill_payment = self.create_bill_payment(bill, 200, "Private Note", "Check")
+ query_payment = BillPayment.get(bill_payment.Id, qb=self.qb_client)
+ self.assertEqual(query_payment.TotalAmt, 200.0)
+ self.assertNotIn('Voided', query_payment.PrivateNote)
+
+ bill_payment.void(qb=self.qb_client)
+ query_payment = BillPayment.get(bill_payment.Id, qb=self.qb_client)
+
+ self.assertEqual(query_payment.TotalAmt, 0.0)
+ self.assertIn('Voided', query_payment.PrivateNote)
\ No newline at end of file
diff --git a/tests/integration/test_invoice.py b/tests/integration/test_invoice.py
index 4a686a5..c02e40e 100644
--- a/tests/integration/test_invoice.py
+++ b/tests/integration/test_invoice.py
@@ -75,3 +75,14 @@ class InvoiceTest(QuickbooksTestCase):
query_invoice = Invoice.filter(Id=invoice_id, qb=self.qb_client)
self.assertEqual([], query_invoice)
+
+ def test_void(self):
+ customer = Customer.all(max_results=1, qb=self.qb_client)[0]
+ invoice = self.create_invoice(customer)
+ invoice_id = invoice.Id
+ invoice.void(qb=self.qb_client)
+
+ query_invoice = Invoice.get(invoice_id, qb=self.qb_client)
+ self.assertEqual(query_invoice.Balance, 0.0)
+ self.assertEqual(query_invoice.TotalAmt, 0.0)
+ self.assertIn('Voided', query_invoice.PrivateNote)
diff --git a/tests/integration/test_salesreceipt.py b/tests/integration/test_salesreceipt.py
new file mode 100644
index 0000000..ce3bd2a
--- /dev/null
+++ b/tests/integration/test_salesreceipt.py
@@ -0,0 +1,59 @@
+from datetime import datetime
+
+from quickbooks.objects import SalesReceipt, Customer, \
+ SalesItemLine, SalesItemLineDetail, Item
+from tests.integration.test_base import QuickbooksTestCase
+
+
+class SalesReceiptTest(QuickbooksTestCase):
+ def setUp(self):
+ super(SalesReceiptTest, self).setUp()
+
+ self.account_number = datetime.now().strftime('%d%H%M')
+ self.name = "Test Account {0}".format(self.account_number)
+
+ def create_sales_receipt(self, qty=1, unit_price=100.0):
+ sales_receipt = SalesReceipt()
+ sales_receipt.TotalAmt = qty * unit_price
+ customer = Customer.all(max_results=1, qb=self.qb_client)[0]
+ sales_receipt.CustomerRef = customer.to_ref()
+ item = Item.all(max_results=1, qb=self.qb_client)[0]
+ line = SalesItemLine()
+ sales_item_line_detail = SalesItemLineDetail()
+ sales_item_line_detail.ItemRef = item.to_ref()
+ sales_item_line_detail.Qty = qty
+ sales_item_line_detail.UnitPrice = unit_price
+ today = datetime.now()
+ sales_item_line_detail.ServiceDate = today.strftime(
+ "%Y-%m-%d"
+ )
+ line.SalesItemLineDetail = sales_item_line_detail
+ line.Amount = qty * unit_price
+ sales_receipt.Line = [line]
+
+ return sales_receipt.save(qb=self.qb_client)
+
+ def test_create(self):
+ sales_receipt = self.create_sales_receipt(
+ qty=1,
+ unit_price=100.0
+ )
+ query_sales_receipt = SalesReceipt.get(sales_receipt.Id, qb=self.qb_client)
+
+ self.assertEqual(query_sales_receipt.TotalAmt, 100.0)
+ self.assertEqual(query_sales_receipt.Line[0].Amount, 100.0)
+ self.assertEqual(query_sales_receipt.Line[0].SalesItemLineDetail['Qty'], 1)
+ self.assertEqual(query_sales_receipt.Line[0].SalesItemLineDetail['UnitPrice'], 100.0)
+
+ def test_void(self):
+ sales_receipt = self.create_sales_receipt(
+ qty=1,
+ unit_price=100.0
+ )
+ query_sales_receipt = SalesReceipt.get(sales_receipt.Id, qb=self.qb_client)
+ self.assertEqual(query_sales_receipt.TotalAmt, 100.0)
+ self.assertNotIn('Voided', query_sales_receipt.PrivateNote)
+ sales_receipt.void(qb=self.qb_client)
+ query_sales_receipt = SalesReceipt.get(sales_receipt.Id, qb=self.qb_client)
+ self.assertEqual(query_sales_receipt.TotalAmt, 0.0)
+ self.assertIn('Voided', query_sales_receipt.PrivateNote)
diff --git a/tests/unit/test_mixins.py b/tests/unit/test_mixins.py
index c3ff3ed..017df5d 100644
--- a/tests/unit/test_mixins.py
+++ b/tests/unit/test_mixins.py
@@ -4,7 +4,7 @@ import os
import unittest
from urllib.parse import quote
-from quickbooks.objects import Bill, Invoice
+from quickbooks.objects import Bill, Invoice, Payment, BillPayment
from tests.integration.test_base import QuickbooksUnitTestCase
@@ -381,12 +381,33 @@ class SendMixinTest(QuickbooksUnitTestCase):
class VoidMixinTest(QuickbooksUnitTestCase):
@patch('quickbooks.mixins.QuickBooks.post')
- def test_void(self, post):
+ def test_void_invoice(self, post):
invoice = Invoice()
invoice.Id = 2
invoice.void(qb=self.qb_client)
self.assertTrue(post.called)
+ @patch('quickbooks.mixins.QuickBooks.post')
+ def test_void_payment(self, post):
+ payment = Payment()
+ payment.Id = 2
+ payment.void(qb=self.qb_client)
+ self.assertTrue(post.called)
+
+ @patch('quickbooks.mixins.QuickBooks.post')
+ def test_void_sales_receipt(self, post):
+ sales_receipt = SalesReceipt()
+ sales_receipt.Id = 2
+ sales_receipt.void(qb=self.qb_client)
+ self.assertTrue(post.called)
+
+ @patch('quickbooks.mixins.QuickBooks.post')
+ def test_void_bill_payment(self, post):
+ bill_payment = BillPayment()
+ bill_payment.Id = 2
+ bill_payment.void(qb=self.qb_client)
+ self.assertTrue(post.called)
+
def test_delete_unsaved_exception(self):
from quickbooks.exceptions import QuickbooksException
|
Add the ability to void all voidable QB types
Currently, the [VoidMixin](https://github.com/ej2/python-quickbooks/blob/master/quickbooks/mixins.py#L127) is built to be able to void Invoices and uses the `operation=void` param. When it comes to voidable types in QB, we have
* Payment
* SalesReceipt
* BillPayment
* Invoice
Here, Invoice is actually the exception, not the rule. For the first three types, you void them with an `operation=update` and `inlcude=void` params. Invoice is the only one that uses the `operation=void` param.
So currently, the VoidMixin only works for Invoices, and the [Payment type has a special `void` method](https://github.com/ej2/python-quickbooks/blob/master/quickbooks/objects/payment.py#L87) with this one-off functionality, added by [ZedObaia](https://github.com/ZedObaia) to fix his [issue on voiding Payments](https://github.com/ej2/python-quickbooks/issues/247). The other types are not voidable without custom code like
```
def void_receipt(receipt_id: str) -> bool:
qb = get_client()
receipt = QBObjects.SalesReceipt()
sparse_update_data = {
"Id": receipt_id,
"SyncToken": 0,
"sparse": True
}
try:
endpoint = receipt.qbo_object_name.lower()
url = "{0}/company/{1}/{2}".format(qb.api_url, qb.company_id, endpoint)
qb.post(url, json.dumps(sparse_update_data), params={"include": "void"})
return True
except:
log.exception(f"Failed to void receipt {receipt_id}")
return False
```
I would propose then that the VoidMixin be updated to use the `operation=update` and `inlcude=void` params and that the Invoice type should be the only one to have it's own special `void` method with this one-off functionality that uses the `operation=void` param.
|
0.0
|
a02ca1ba6bac7ced8e1af07e6a04d7a46818df22
|
[
"tests/unit/test_mixins.py::VoidMixinTest::test_void_bill_payment",
"tests/unit/test_mixins.py::VoidMixinTest::test_void_sales_receipt"
] |
[
"tests/unit/test_mixins.py::ToJsonMixinTest::test_to_json",
"tests/unit/test_mixins.py::FromJsonMixinTest::test_from_json",
"tests/unit/test_mixins.py::FromJsonMixinTest::test_from_json_missing_detail_object",
"tests/unit/test_mixins.py::ToDictMixinTest::test_to_dict",
"tests/unit/test_mixins.py::ListMixinTest::test_all",
"tests/unit/test_mixins.py::ListMixinTest::test_all_with_qb",
"tests/unit/test_mixins.py::ListMixinTest::test_choose",
"tests/unit/test_mixins.py::ListMixinTest::test_choose_with_qb",
"tests/unit/test_mixins.py::ListMixinTest::test_count",
"tests/unit/test_mixins.py::ListMixinTest::test_filter",
"tests/unit/test_mixins.py::ListMixinTest::test_filter_with_qb",
"tests/unit/test_mixins.py::ListMixinTest::test_order_by",
"tests/unit/test_mixins.py::ListMixinTest::test_order_by_with_qb",
"tests/unit/test_mixins.py::ListMixinTest::test_query",
"tests/unit/test_mixins.py::ListMixinTest::test_query_with_qb",
"tests/unit/test_mixins.py::ListMixinTest::test_where",
"tests/unit/test_mixins.py::ListMixinTest::test_where_start_position_0",
"tests/unit/test_mixins.py::ListMixinTest::test_where_with_qb",
"tests/unit/test_mixins.py::ReadMixinTest::test_get",
"tests/unit/test_mixins.py::ReadMixinTest::test_get_with_qb",
"tests/unit/test_mixins.py::UpdateMixinTest::test_save_create",
"tests/unit/test_mixins.py::UpdateMixinTest::test_save_create_with_qb",
"tests/unit/test_mixins.py::UpdateMixinTest::test_save_update",
"tests/unit/test_mixins.py::UpdateMixinTest::test_save_update_with_qb",
"tests/unit/test_mixins.py::DownloadPdfTest::test_download_invoice",
"tests/unit/test_mixins.py::DownloadPdfTest::test_download_missing_id",
"tests/unit/test_mixins.py::ObjectListTest::test_object_list_mixin_with_primitives",
"tests/unit/test_mixins.py::ObjectListTest::test_object_list_mixin_with_qb_objects",
"tests/unit/test_mixins.py::DeleteMixinTest::test_delete",
"tests/unit/test_mixins.py::DeleteMixinTest::test_delete_unsaved_exception",
"tests/unit/test_mixins.py::DeleteNoIdMixinTest::test_delete",
"tests/unit/test_mixins.py::SendMixinTest::test_send",
"tests/unit/test_mixins.py::SendMixinTest::test_send_with_send_to_email",
"tests/unit/test_mixins.py::VoidMixinTest::test_delete_unsaved_exception",
"tests/unit/test_mixins.py::VoidMixinTest::test_void_invoice",
"tests/unit/test_mixins.py::VoidMixinTest::test_void_payment"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-09-20 12:48:04+00:00
|
mit
| 2,085 |
|
ejulio__spider-feeder-18
|
diff --git a/CHANGES.md b/CHANGES.md
index c610c2f..ed13d9e 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -11,6 +11,12 @@ Whenever possible, link the given PR with the feature/fix.
[Keep a Changelog](https://keepachangelog.com/en/1.0.0/), [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## Development
+
+### Added
+
+* New setting `SPIDERFEEDER_INPUT_FORMAT` to set the file format or fall back to the file extension. [PR#18](https://github.com/ejulio/spider-feeder/pull/18)
+
## 0.2.0 (2019-08-27)
diff --git a/README.md b/README.md
index 1505701..d06fa3a 100644
--- a/README.md
+++ b/README.md
@@ -97,6 +97,11 @@ There are two extensions to load input data to your spiders.
`SPIDERFEEDER_INPUT_FILE_ENCODING` sets the file encoding. DEFAULT = `'utf-8'`.
+`SPIDERFEEDER_INPUT_FORMAT` sets the file format (`txt`, `csv`, `json`). DEFAULT = `None`.
+This setting is preferred over the file extension in `SPIDERFEEDER_INPUT_URI`.
+So, if `SPIDERFEEDER_INPUT_FORMAT` is set, this is the one to be used, otherwise
+it will fall back to the file extension in `SPIDERFEEDER_INPUT_URI`.
+
`SPIDERFEEDER_INPUT_FIELD` sets the url field when parsing `json` or `csv` files.
`SPIDERFEEDER_FILE_HANDLERS` is a set of functions to be matched with the given file scheme.
diff --git a/spider_feeder/store/file_store.py b/spider_feeder/store/file_store.py
index 8cd7ba1..b88a1e0 100644
--- a/spider_feeder/store/file_store.py
+++ b/spider_feeder/store/file_store.py
@@ -38,7 +38,8 @@ class FileStore(BaseStore):
super().__init__(settings)
self._input_file_uri = input_file_uri
self._settings = settings
- self._file_encoding = settings.get('SPIDERFEEDER_INPUT_FILE_ENCODING', 'utf-8')
+ self._input_file_encoding = settings.get('SPIDERFEEDER_INPUT_FILE_ENCODING', 'utf-8')
+ self._input_format = settings.get('SPIDERFEEDER_INPUT_FORMAT', None)
handlers = settings.getdict('SPIDERFEEDER_FILE_HANDLERS', {})
self._handlers = dict(self.FILE_HANDLERS, **handlers)
@@ -46,17 +47,24 @@ class FileStore(BaseStore):
parsers = settings.getdict('SPIDERFEEDER_FILE_PARSERS', {})
self._parsers = dict(self.FILE_PARSERS, **parsers)
+ @property
+ def _file_format(self):
+ if self._input_format:
+ return self._input_format
+
+ (_, file_extension) = path.splitext(self._input_file_uri)
+ return file_extension[1:] # remove the "."
+
def _open(self):
parsed = urlparse(self._input_file_uri)
logger.info(f'Opening file {self._input_file_uri} with scheme {parsed.scheme}.')
open = load_object(self._handlers[parsed.scheme])
- return open(self._input_file_uri, encoding=self._file_encoding)
+ return open(self._input_file_uri, encoding=self._input_file_encoding)
def _parse(self, fd):
- (_, file_extension) = path.splitext(self._input_file_uri)
- file_extension = file_extension[1:]
- logger.info(f'Parsing file {self._input_file_uri} with format {file_extension}.')
- parser = load_object(self._parsers[file_extension])
+ file_format = self._file_format
+ logger.info(f'Parsing file {self._input_file_uri} with format {file_format}.')
+ parser = load_object(self._parsers[file_format])
return parser(fd, self._settings)
def read_input_items(self):
|
ejulio/spider-feeder
|
33b16cc74b46b119cf3fd483c789dad70f3a9102
|
diff --git a/tests/store/test_file_store.py b/tests/store/test_file_store.py
index a255af3..913f215 100644
--- a/tests/store/test_file_store.py
+++ b/tests/store/test_file_store.py
@@ -98,6 +98,54 @@ def test_load_json_file(mocker, uri_scheme, file_opener):
]
[email protected]('uri_scheme, file_opener', [
+ ('file://', 'spider_feeder.store.file_handler.local.open'),
+ ('s3://', 'spider_feeder.store.file_handler.s3.open'),
+ ('', 'spider_feeder.store.file_handler.local.open'),
+])
+def test_get_file_format_from_setting(mocker, uri_scheme, file_opener):
+ file_content = StringIO('\n'.join(['http://url1.com', 'http://url2.com']))
+ mock = mocker.patch(file_opener, return_value=file_content, autospec=True)
+
+ store = FileStore(f'{uri_scheme}temp', Settings({
+ 'SPIDERFEEDER_INPUT_FORMAT': 'txt'
+ }))
+
+ store_meta = []
+ store_urls = []
+ for (url, meta) in store:
+ store_urls.append(url)
+ store_meta.append(meta)
+
+ mock.assert_called_with(f'{uri_scheme}temp', encoding='utf-8')
+ assert store_meta == [{}, {}]
+ assert store_urls == ['http://url1.com', 'http://url2.com']
+
+
[email protected]('uri_scheme, file_opener', [
+ ('file://', 'spider_feeder.store.file_handler.local.open'),
+ ('s3://', 'spider_feeder.store.file_handler.s3.open'),
+ ('', 'spider_feeder.store.file_handler.local.open'),
+])
+def test_get_file_format_setting_is_preferred_over_file_extension(mocker, uri_scheme, file_opener):
+ file_content = StringIO('\n'.join(['http://url1.com', 'http://url2.com']))
+ mock = mocker.patch(file_opener, return_value=file_content, autospec=True)
+
+ store = FileStore(f'{uri_scheme}temp.csv', Settings({
+ 'SPIDERFEEDER_INPUT_FORMAT': 'txt'
+ }))
+
+ store_meta = []
+ store_urls = []
+ for (url, meta) in store:
+ store_urls.append(url)
+ store_meta.append(meta)
+
+ mock.assert_called_with(f'{uri_scheme}temp.csv', encoding='utf-8')
+ assert store_meta == [{}, {}]
+ assert store_urls == ['http://url1.com', 'http://url2.com']
+
+
def test_fail_if_input_field_and_not_dict_data(mocker):
mocker.patch(
'spider_feeder.store.file_handler.local.open',
|
Add a setting for input format
Currently the input format is inferred from the URI extension.
However, it may fail in cases of URLs or files without extension.
Add a new setting, `SPIDERFEEDER_INPUT_FORMAT`? that sets the file format.
If it is not set, the it falls back to the file extension
|
0.0
|
33b16cc74b46b119cf3fd483c789dad70f3a9102
|
[
"tests/store/test_file_store.py::test_get_file_format_from_setting[file://-spider_feeder.store.file_handler.local.open]",
"tests/store/test_file_store.py::test_get_file_format_from_setting[s3://-spider_feeder.store.file_handler.s3.open]",
"tests/store/test_file_store.py::test_get_file_format_from_setting[-spider_feeder.store.file_handler.local.open]",
"tests/store/test_file_store.py::test_get_file_format_setting_is_preferred_over_file_extension[file://-spider_feeder.store.file_handler.local.open]",
"tests/store/test_file_store.py::test_get_file_format_setting_is_preferred_over_file_extension[s3://-spider_feeder.store.file_handler.s3.open]",
"tests/store/test_file_store.py::test_get_file_format_setting_is_preferred_over_file_extension[-spider_feeder.store.file_handler.local.open]"
] |
[
"tests/store/test_file_store.py::test_load_txt_file[file://-spider_feeder.store.file_handler.local.open]",
"tests/store/test_file_store.py::test_load_txt_file[s3://-spider_feeder.store.file_handler.s3.open]",
"tests/store/test_file_store.py::test_load_txt_file[-spider_feeder.store.file_handler.local.open]",
"tests/store/test_file_store.py::test_load_csv_file[file://-spider_feeder.store.file_handler.local.open]",
"tests/store/test_file_store.py::test_load_csv_file[s3://-spider_feeder.store.file_handler.s3.open]",
"tests/store/test_file_store.py::test_load_csv_file[-spider_feeder.store.file_handler.local.open]",
"tests/store/test_file_store.py::test_load_json_file[file://-spider_feeder.store.file_handler.local.open]",
"tests/store/test_file_store.py::test_load_json_file[s3://-spider_feeder.store.file_handler.s3.open]",
"tests/store/test_file_store.py::test_load_json_file[-spider_feeder.store.file_handler.local.open]",
"tests/store/test_file_store.py::test_fail_if_input_field_and_not_dict_data",
"tests/store/test_file_store.py::test_file_encoding",
"tests/store/test_file_store.py::test_custom_file_handler",
"tests/store/test_file_store.py::test_custom_file_parser"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-09-04 12:29:53+00:00
|
mit
| 2,086 |
|
ejulio__spider-feeder-20
|
diff --git a/CHANGES.md b/CHANGES.md
index 56a5d28..31b5399 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -16,6 +16,7 @@ Whenever possible, link the given PR with the feature/fix.
### Added
* New setting `SPIDERFEEDER_INPUT_FORMAT` to set the file format or fall back to the file extension. [PR#18](https://github.com/ejulio/spider-feeder/pull/18)
+* Fall back to `scrapy` AWS settings if not provided. [PR#20](https://github.com/ejulio/spider-feeder/pull/20)
* Fixed AWS settings set in Dash (Scrapy Cloud) UI. [PR#21](https://github.com/ejulio/spider-feeder/pull/21)
## 0.2.0 (2019-08-27)
diff --git a/README.md b/README.md
index e57a373..747b426 100644
--- a/README.md
+++ b/README.md
@@ -93,6 +93,8 @@ There are two extensions to load input data to your spiders.
* Supported schemes are: `''` or `file` for local files and `s3` for AWS S3 (requires `botocore`)
* When using `s3`, the URI must be formatted as `s3://key_id:secret_key@bucket/blob.txt`
* If `key_id` and `secret_key` are not provided in the URI, they can be provided by the following settings: `SPIDERFEEDER_AWS_ACCESS_KEY_ID` and `SPIDERFEEDER_AWS_SECRET_ACCESS_KEY`.
+ * If they are not provided by these settings, they will fall back to `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`.
+ * If not set, they can be set as environment variables from `botocore`, but a warning will be logged by `spider-feeder`.
* When using `collections`, it'll load URLs from [Scrapinghub Collections](https://doc.scrapinghub.com/api/collections.html)
`SPIDERFEEDER_INPUT_FILE_ENCODING` sets the file encoding. DEFAULT = `'utf-8'`.
diff --git a/spider_feeder/store/file_handler/s3.py b/spider_feeder/store/file_handler/s3.py
index edd0389..dc46e4b 100644
--- a/spider_feeder/store/file_handler/s3.py
+++ b/spider_feeder/store/file_handler/s3.py
@@ -3,23 +3,52 @@ This module handles `open()` for files stored in AWS S3.
'''
from io import StringIO
from urllib.parse import urlparse
+import logging
from botocore.session import get_session
+logger = logging.getLogger(__name__)
+
+
def open(blob_uri, encoding, settings):
parsed = urlparse(blob_uri)
+ (aws_access_key_id, aws_secret_access_key) = _get_aws_keys(parsed, settings)
session = get_session()
client = session.create_client(
's3',
- aws_access_key_id=parsed.username or settings['SPIDERFEEDER_AWS_ACCESS_KEY_ID'],
- aws_secret_access_key=parsed.password or settings['SPIDERFEEDER_AWS_SECRET_ACCESS_KEY'],
+ aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key,
)
bucket_name = parsed.hostname
key_name = parsed.path[1:]
-
- response = client.get_object(Bucket=bucket_name, Key=key_name, ResponseContentEncoding=encoding)
+ response = client.get_object(
+ Bucket=bucket_name,
+ Key=key_name,
+ ResponseContentEncoding=encoding
+ )
content = response['Body'].read()
return StringIO(content.decode(encoding))
+
+
+def _get_aws_keys(parsed_uri, settings):
+ aws_access_key_id = parsed_uri.username
+ aws_secret_access_key = parsed_uri.password
+
+ if not aws_access_key_id and not aws_secret_access_key:
+ aws_access_key_id = settings.get('SPIDERFEEDER_AWS_ACCESS_KEY_ID')
+ aws_secret_access_key = settings.get('SPIDERFEEDER_AWS_SECRET_ACCESS_KEY')
+
+ if not aws_access_key_id and not aws_secret_access_key:
+ aws_access_key_id = settings.get('AWS_ACCESS_KEY_ID')
+ aws_secret_access_key = settings.get('AWS_SECRET_ACCESS_KEY')
+
+ if not aws_access_key_id and not aws_secret_access_key:
+ logger.warning(
+ 'No AWS keys were set in the input URI or project settings. '
+ 'If that was intentional, make sure to have them set as environment variables.'
+ )
+
+ return (aws_access_key_id, aws_secret_access_key)
|
ejulio/spider-feeder
|
b00dbb19cb120d8583ce6213389b9a49e81de4db
|
diff --git a/tests/store/file_handler/test_s3.py b/tests/store/file_handler/test_s3.py
index 8abbebd..5895be6 100644
--- a/tests/store/file_handler/test_s3.py
+++ b/tests/store/file_handler/test_s3.py
@@ -134,3 +134,29 @@ def test_open_s3_blob_using_project_credentials(botocore_client, mocker):
aws_access_key_id='key_id',
aws_secret_access_key='secret',
)
+
+
+def test_open_s3_blob_using_scrapy_credentials(botocore_client, mocker):
+ (stubber, session_mock) = botocore_client(mocker)
+ with stubber:
+ file_content = 'http://url1.com\nhttps://url1.com'
+ response = get_object_response(file_content)
+ expected_params = {
+ 'Bucket': 'bucket', 'Key': 'blob.txt', 'ResponseContentEncoding': 'utf-8'
+ }
+ stubber.add_response('get_object', response, expected_params)
+
+ s3.open(
+ 's3://bucket/blob.txt',
+ encoding='utf-8',
+ settings=Settings({
+ 'AWS_ACCESS_KEY_ID': 'some_key_id',
+ 'AWS_SECRET_ACCESS_KEY': 'some_secret'
+ })
+ )
+
+ session_mock.create_client.assert_called_once_with(
+ 's3',
+ aws_access_key_id='some_key_id',
+ aws_secret_access_key='some_secret',
+ )
|
Fallback to scrapy AWS settings if not provided
Currently, when loading files from _S3_, we need to set `SPIDERFEEDER_AWS_ACCESS_KEY_ID`.
However, it should be common to have the same user for data delivery and loading the files from _S3_.
So, to avoid configuring `spider-feeder` and `scrapy`, we can fallback in `spider-feeder` to `scrapy` settings if the specific ones for `spider-feeder` are not set.
[scrapy aws settings](https://docs.scrapy.org/en/latest/topics/feed-exports.html#s3)
Basically, read from `SPIDERFEEDER_AWS_ACCESS_KEY_ID` or the URI.
If it is empty, get from `AWS_ACCESS_KEY_ID`
|
0.0
|
b00dbb19cb120d8583ce6213389b9a49e81de4db
|
[
"tests/store/file_handler/test_s3.py::test_open_s3_blob_using_scrapy_credentials"
] |
[
"tests/store/file_handler/test_s3.py::test_open_s3_blob",
"tests/store/file_handler/test_s3.py::test_open_encoded_s3_blob",
"tests/store/file_handler/test_s3.py::test_open_s3_blob_using_uri_credentials",
"tests/store/file_handler/test_s3.py::test_open_s3_blob_using_project_credentials"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-09-12 20:17:47+00:00
|
mit
| 2,087 |
|
ejulio__spider-feeder-22
|
diff --git a/CHANGES.md b/CHANGES.md
index 31b5399..2557fdd 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -18,6 +18,7 @@ Whenever possible, link the given PR with the feature/fix.
* New setting `SPIDERFEEDER_INPUT_FORMAT` to set the file format or fall back to the file extension. [PR#18](https://github.com/ejulio/spider-feeder/pull/18)
* Fall back to `scrapy` AWS settings if not provided. [PR#20](https://github.com/ejulio/spider-feeder/pull/20)
* Fixed AWS settings set in Dash (Scrapy Cloud) UI. [PR#21](https://github.com/ejulio/spider-feeder/pull/21)
+* New _schemes_ `http` and `https`. [PR#22](https://github.com/ejulio/spider-feeder/pull/22)
## 0.2.0 (2019-08-27)
diff --git a/README.md b/README.md
index 747b426..9bc97d8 100644
--- a/README.md
+++ b/README.md
@@ -88,14 +88,17 @@ There are two extensions to load input data to your spiders.
## Settings
`SPIDERFEEDER_INPUT_URI` is the URI to load URLs from.
-* If _scheme_ (`local`, `s3`, `collections`) is not provided, it'll use `local`
+* If _scheme_ (`file`, `s3`, `collections`) is not provided, it'll default to `file`
* It can be formatted using spider attributes like `%(param)s` (similar to `FEED_URI` in scrapy)
-* Supported schemes are: `''` or `file` for local files and `s3` for AWS S3 (requires `botocore`)
-* When using `s3`, the URI must be formatted as `s3://key_id:secret_key@bucket/blob.txt`
-* If `key_id` and `secret_key` are not provided in the URI, they can be provided by the following settings: `SPIDERFEEDER_AWS_ACCESS_KEY_ID` and `SPIDERFEEDER_AWS_SECRET_ACCESS_KEY`.
- * If they are not provided by these settings, they will fall back to `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`.
- * If not set, they can be set as environment variables from `botocore`, but a warning will be logged by `spider-feeder`.
-* When using `collections`, it'll load URLs from [Scrapinghub Collections](https://doc.scrapinghub.com/api/collections.html)
+* Supported schemes are:
+ * `''` or `file` for local files
+ * `s3` for AWS S3 (requires `botocore`)
+ * The URI can be formatted as `s3://key_id:secret_key@bucket/blob.txt`
+ * If `key_id` and `secret_key` are not provided in the URI, they can be provided by the following settings: `SPIDERFEEDER_AWS_ACCESS_KEY_ID` and `SPIDERFEEDER_AWS_SECRET_ACCESS_KEY`.
+ * If they are not provided by these settings, they will fall back to `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`.
+ * If not set, they can be set as environment variables from `botocore`, but a warning will be logged by `spider-feeder`.
+ * `collections` for [Scrapinghub Collections](https://doc.scrapinghub.com/api/collections.html)
+ * `http` or `https` to load from any URI
`SPIDERFEEDER_INPUT_FILE_ENCODING` sets the file encoding. DEFAULT = `'utf-8'`.
diff --git a/spider_feeder/loaders.py b/spider_feeder/loaders.py
index 2853dc6..5dfed4a 100644
--- a/spider_feeder/loaders.py
+++ b/spider_feeder/loaders.py
@@ -23,6 +23,8 @@ class BaseLoader:
'': 'spider_feeder.store.file_store.FileStore',
'file': 'spider_feeder.store.file_store.FileStore',
's3': 'spider_feeder.store.file_store.FileStore',
+ 'http': 'spider_feeder.store.file_store.FileStore',
+ 'https': 'spider_feeder.store.file_store.FileStore',
'collections': 'spider_feeder.store.scrapinghub_collection.ScrapinghubCollectionStore',
}
diff --git a/spider_feeder/store/file_handler/http.py b/spider_feeder/store/file_handler/http.py
new file mode 100644
index 0000000..e64fb01
--- /dev/null
+++ b/spider_feeder/store/file_handler/http.py
@@ -0,0 +1,7 @@
+from io import StringIO
+from urllib.request import urlopen
+
+
+def open(url, encoding, settings):
+ response = urlopen(url)
+ return StringIO(response.read())
diff --git a/spider_feeder/store/file_store.py b/spider_feeder/store/file_store.py
index 870c0ce..70b7f23 100644
--- a/spider_feeder/store/file_store.py
+++ b/spider_feeder/store/file_store.py
@@ -26,6 +26,8 @@ class FileStore(BaseStore):
'': 'spider_feeder.store.file_handler.local.open',
'file': 'spider_feeder.store.file_handler.local.open',
's3': 'spider_feeder.store.file_handler.s3.open',
+ 'http': 'spider_feeder.store.file_handler.http.open',
+ 'https': 'spider_feeder.store.file_handler.http.open',
}
FILE_PARSERS = {
|
ejulio/spider-feeder
|
aaf30fb13fe1e7b22ee94e350748cd69a0cb7d5b
|
diff --git a/tests/store/file_handler/test_http.py b/tests/store/file_handler/test_http.py
new file mode 100644
index 0000000..7e9cc6e
--- /dev/null
+++ b/tests/store/file_handler/test_http.py
@@ -0,0 +1,12 @@
+from spider_feeder.store.file_handler import http
+
+
+def test_open_http_file(mocker):
+ urlopen_mock = mocker.patch('spider_feeder.store.file_handler.http.urlopen')
+ urlopen_mock().read.return_value = 'FILE CONTENT'
+
+ url = 'https://someurl.com/index?qs=1'
+ fd = http.open(url, encoding='utf-8', settings=None)
+
+ urlopen_mock.assert_called_with(url)
+ assert fd.read() == 'FILE CONTENT'
diff --git a/tests/store/test_file_store.py b/tests/store/test_file_store.py
index b74e6e2..fca5e60 100644
--- a/tests/store/test_file_store.py
+++ b/tests/store/test_file_store.py
@@ -15,11 +15,16 @@ def custom_parser():
pass
[email protected]('uri_scheme, file_opener', [
+SCHEMES_AND_OPENERS_TO_MOCK = [
('file://', 'spider_feeder.store.file_handler.local.open'),
('s3://', 'spider_feeder.store.file_handler.s3.open'),
('', 'spider_feeder.store.file_handler.local.open'),
-])
+ ('http://', 'spider_feeder.store.file_handler.http.open'),
+ ('https://', 'spider_feeder.store.file_handler.http.open'),
+]
+
+
[email protected]('uri_scheme, file_opener', SCHEMES_AND_OPENERS_TO_MOCK)
def test_load_txt_file(mocker, uri_scheme, file_opener):
file_content = StringIO('\n'.join(['http://url1.com', 'http://url2.com']))
mock = mocker.patch(file_opener, return_value=file_content, autospec=True)
@@ -38,11 +43,7 @@ def test_load_txt_file(mocker, uri_scheme, file_opener):
assert store_urls == ['http://url1.com', 'http://url2.com']
[email protected]('uri_scheme, file_opener', [
- ('file://', 'spider_feeder.store.file_handler.local.open'),
- ('s3://', 'spider_feeder.store.file_handler.s3.open'),
- ('', 'spider_feeder.store.file_handler.local.open'),
-])
[email protected]('uri_scheme, file_opener', SCHEMES_AND_OPENERS_TO_MOCK)
def test_load_csv_file(mocker, uri_scheme, file_opener):
file_content = StringIO('\n'.join([
'"url_id","url"',
@@ -70,11 +71,7 @@ def test_load_csv_file(mocker, uri_scheme, file_opener):
]
[email protected]('uri_scheme, file_opener', [
- ('file://', 'spider_feeder.store.file_handler.local.open'),
- ('s3://', 'spider_feeder.store.file_handler.s3.open'),
- ('', 'spider_feeder.store.file_handler.local.open'),
-])
[email protected]('uri_scheme, file_opener', SCHEMES_AND_OPENERS_TO_MOCK)
def test_load_json_file(mocker, uri_scheme, file_opener):
file_content = StringIO(json.dumps([
{'url_id': '1', 'url': 'http://url1.com'},
@@ -101,11 +98,7 @@ def test_load_json_file(mocker, uri_scheme, file_opener):
]
[email protected]('uri_scheme, file_opener', [
- ('file://', 'spider_feeder.store.file_handler.local.open'),
- ('s3://', 'spider_feeder.store.file_handler.s3.open'),
- ('', 'spider_feeder.store.file_handler.local.open'),
-])
[email protected]('uri_scheme, file_opener', SCHEMES_AND_OPENERS_TO_MOCK)
def test_get_file_format_from_setting(mocker, uri_scheme, file_opener):
file_content = StringIO('\n'.join(['http://url1.com', 'http://url2.com']))
mock = mocker.patch(file_opener, return_value=file_content, autospec=True)
@@ -126,11 +119,7 @@ def test_get_file_format_from_setting(mocker, uri_scheme, file_opener):
assert store_urls == ['http://url1.com', 'http://url2.com']
[email protected]('uri_scheme, file_opener', [
- ('file://', 'spider_feeder.store.file_handler.local.open'),
- ('s3://', 'spider_feeder.store.file_handler.s3.open'),
- ('', 'spider_feeder.store.file_handler.local.open'),
-])
[email protected]('uri_scheme, file_opener', SCHEMES_AND_OPENERS_TO_MOCK)
def test_get_file_format_setting_is_preferred_over_file_extension(mocker, uri_scheme, file_opener):
file_content = StringIO('\n'.join(['http://url1.com', 'http://url2.com']))
mock = mocker.patch(file_opener, return_value=file_content, autospec=True)
diff --git a/tests/test_loaders.py b/tests/test_loaders.py
index 4a9c140..01427fd 100644
--- a/tests/test_loaders.py
+++ b/tests/test_loaders.py
@@ -32,6 +32,8 @@ def test_start_urls_loader_not_configured(get_crawler):
('s3://', 'spider_feeder.store.file_store.FileStore'),
('file://', 'spider_feeder.store.file_store.FileStore'),
('', 'spider_feeder.store.file_store.FileStore'),
+ ('http://', 'spider_feeder.store.file_store.FileStore'),
+ ('https://', 'spider_feeder.store.file_store.FileStore'),
('collections://', 'spider_feeder.store.scrapinghub_collection.ScrapinghubCollectionStore'),
])
def test_start_urls_loader_open_store_given_scheme(get_crawler, mocker, scheme, store_cls):
|
Allow http inputs
Add a new format do allow http/https input files.
Also, add a test case for google drive public files.
Say the URL https://drive.google.com/drive/folders/file-id?usp=sharing is given
Then we can download it from https://drive.google.com/uc?export=download&id=file-id
|
0.0
|
aaf30fb13fe1e7b22ee94e350748cd69a0cb7d5b
|
[
"tests/store/file_handler/test_http.py::test_open_http_file",
"tests/store/test_file_store.py::test_load_txt_file[file://-spider_feeder.store.file_handler.local.open]",
"tests/store/test_file_store.py::test_load_txt_file[s3://-spider_feeder.store.file_handler.s3.open]",
"tests/store/test_file_store.py::test_load_txt_file[-spider_feeder.store.file_handler.local.open]",
"tests/store/test_file_store.py::test_load_txt_file[http://-spider_feeder.store.file_handler.http.open]",
"tests/store/test_file_store.py::test_load_txt_file[https://-spider_feeder.store.file_handler.http.open]",
"tests/store/test_file_store.py::test_load_csv_file[file://-spider_feeder.store.file_handler.local.open]",
"tests/store/test_file_store.py::test_load_csv_file[s3://-spider_feeder.store.file_handler.s3.open]",
"tests/store/test_file_store.py::test_load_csv_file[-spider_feeder.store.file_handler.local.open]",
"tests/store/test_file_store.py::test_load_csv_file[http://-spider_feeder.store.file_handler.http.open]",
"tests/store/test_file_store.py::test_load_csv_file[https://-spider_feeder.store.file_handler.http.open]",
"tests/store/test_file_store.py::test_load_json_file[file://-spider_feeder.store.file_handler.local.open]",
"tests/store/test_file_store.py::test_load_json_file[s3://-spider_feeder.store.file_handler.s3.open]",
"tests/store/test_file_store.py::test_load_json_file[-spider_feeder.store.file_handler.local.open]",
"tests/store/test_file_store.py::test_load_json_file[http://-spider_feeder.store.file_handler.http.open]",
"tests/store/test_file_store.py::test_load_json_file[https://-spider_feeder.store.file_handler.http.open]",
"tests/store/test_file_store.py::test_get_file_format_from_setting[file://-spider_feeder.store.file_handler.local.open]",
"tests/store/test_file_store.py::test_get_file_format_from_setting[s3://-spider_feeder.store.file_handler.s3.open]",
"tests/store/test_file_store.py::test_get_file_format_from_setting[-spider_feeder.store.file_handler.local.open]",
"tests/store/test_file_store.py::test_get_file_format_from_setting[http://-spider_feeder.store.file_handler.http.open]",
"tests/store/test_file_store.py::test_get_file_format_from_setting[https://-spider_feeder.store.file_handler.http.open]",
"tests/store/test_file_store.py::test_get_file_format_setting_is_preferred_over_file_extension[file://-spider_feeder.store.file_handler.local.open]",
"tests/store/test_file_store.py::test_get_file_format_setting_is_preferred_over_file_extension[s3://-spider_feeder.store.file_handler.s3.open]",
"tests/store/test_file_store.py::test_get_file_format_setting_is_preferred_over_file_extension[-spider_feeder.store.file_handler.local.open]",
"tests/store/test_file_store.py::test_get_file_format_setting_is_preferred_over_file_extension[http://-spider_feeder.store.file_handler.http.open]",
"tests/store/test_file_store.py::test_get_file_format_setting_is_preferred_over_file_extension[https://-spider_feeder.store.file_handler.http.open]",
"tests/store/test_file_store.py::test_fail_if_input_field_and_not_dict_data",
"tests/store/test_file_store.py::test_file_encoding",
"tests/store/test_file_store.py::test_custom_file_handler",
"tests/store/test_file_store.py::test_custom_file_parser",
"tests/test_loaders.py::test_start_urls_loader_not_configured"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-09-17 19:08:25+00:00
|
mit
| 2,088 |
|
ekzhu__datasketch-234
|
diff --git a/datasketch/lshforest.py b/datasketch/lshforest.py
index 9f3455b..a02569d 100644
--- a/datasketch/lshforest.py
+++ b/datasketch/lshforest.py
@@ -1,5 +1,6 @@
from collections import defaultdict
from typing import Hashable, List
+import numpy as np
from datasketch.minhash import MinHash
@@ -128,6 +129,30 @@ class MinHashLSHForest(object):
r -= 1
return list(results)
+ def get_minhash_hashvalues(self, key: Hashable) -> np.ndarray:
+ """
+ Returns the hashvalues from the MinHash object that corresponds to the given key in the LSHForest,
+ if it exists. This is useful for when we want to reconstruct the original MinHash
+ object to manually check the Jaccard Similarity for the top-k results from a query.
+
+ Args:
+ key (Hashable): The key whose MinHash hashvalues we want to retrieve.
+
+ Returns:
+ hashvalues: The hashvalues for the MinHash object corresponding to the given key.
+ """
+ byteslist = self.keys.get(key, None)
+ if byteslist is None:
+ raise KeyError(f"The provided key does not exist in the LSHForest: {key}")
+ hashvalue_byte_size = len(byteslist[0])//8
+ hashvalues = np.empty(len(byteslist)*hashvalue_byte_size, dtype=np.uint64)
+ for index, item in enumerate(byteslist):
+ # unswap the bytes, as their representation is flipped during storage
+ hv_segment = np.frombuffer(item, dtype=np.uint64).byteswap()
+ curr_index = index*hashvalue_byte_size
+ hashvalues[curr_index:curr_index+hashvalue_byte_size] = hv_segment
+ return hashvalues
+
def _binary_search(self, n, func):
"""
https://golang.org/src/sort/search.go?s=2247:2287#L49
|
ekzhu/datasketch
|
9973b09852a5018f23d831b1868da3a5d2ce6a3b
|
diff --git a/test/test_lshforest.py b/test/test_lshforest.py
index 77e7bf4..400a9af 100644
--- a/test/test_lshforest.py
+++ b/test/test_lshforest.py
@@ -62,6 +62,18 @@ class TestMinHashLSHForest(unittest.TestCase):
results = forest.query(data[key], 10)
self.assertIn(key, results)
+ def test_get_minhash_hashvalues(self):
+ forest, data = self._setup()
+ for key in data:
+ minhash_ori = data[key]
+ hashvalues = forest.get_minhash_hashvalues(key)
+ minhash_retrieved = MinHash(hashvalues=hashvalues)
+ retrieved_hashvalues = minhash_retrieved.hashvalues
+ self.assertEqual(len(hashvalues), len(retrieved_hashvalues))
+ self.assertEqual(minhash_retrieved.jaccard(minhash_ori), 1.0)
+ for i in range(len(retrieved_hashvalues)):
+ self.assertEqual(hashvalues[i], retrieved_hashvalues[i])
+
def test_pickle(self):
forest, _ = self._setup()
forest2 = pickle.loads(pickle.dumps(forest))
|
Implementing MinHash retrieval from keys for MinHashLSHForest
Hi, I've been using the `MinHashLSHForest` class to do some deduplication, and part of that pipeline is to retrieve the top-k items and then estimate the Jaccard similarities with each of those items, obviously this requires reconstructing the `MinHash` object related to the given key returned by `MinHashLSHForest.query()`.
This seems like a decently common use-case since we often screen the results of LSH Forest using the Jaccard Similarity, my question is do you feel that this is common enough to implement such a function as a part of the MinHashLSHForest class?
I've implemented a simple way to recompute the original `hashvalues` array from the `keys` dictionary in `MinHashLSHForest` as follows, I'd be happy to submit a PR but just wanted to know how this aligned with the vision for this package
```python
"""
Takes the list of bytes-like generated by the LSH Forest
that corresponds to some given key and recovers the hashvalues
which can be converted back into a MinHash to compute Jaccard Similarity
Given a number of prefix trees, L, when we insert a (key, MinHash) pair
the LSH Forest creates L byteslike items each corresponding to a range of
hashvalues from the original MinHash object for a given key. Each range is of
size num_perm / L. Therefore here we convert these items from byteslikes back into
arrays of unsigned integers and then concatenate them so that they are in a representation
that we can build a MinHash object with. Namely, we return an array of unsigned integers
of length num_perm that represent hashvalues from each of num_perm hash functions
chosen during the MinHash creation.
"""
def byteslist_to_hashvalues(byteslist):
hashvalues = np.array([], dtype=np.uint64)
for item in byteslist:
# unswap the bytes, as their representation is flipped during storage
hv_segment = np.frombuffer(item, dtype=np.uint64).byteswap()
hashvalues = np.append(hashvalues, hv_segment)
return hashvalues
```
where we might call this by using
```python
lsh = MinHashLSHForest(...)
hv = byteslist_to_hashvalues(lsh.keys[mykey])
mh = MinHash(hashvalues=hv)
# now use mh.jaccard() ...
...
```
A unit test would involve inserting a `MinHash` into `MinHashLSHForest` and then reconstructing it and checking for `jaccard_sim == 1.0`.
|
0.0
|
9973b09852a5018f23d831b1868da3a5d2ce6a3b
|
[
"test/test_lshforest.py::TestMinHashLSHForest::test_get_minhash_hashvalues"
] |
[
"test/test_lshforest.py::TestMinHashLSHForest::test__H",
"test/test_lshforest.py::TestMinHashLSHForest::test_init",
"test/test_lshforest.py::TestMinHashLSHForest::test_pickle",
"test/test_lshforest.py::TestMinHashLSHForest::test_query",
"test/test_lshforest.py::TestWeightedMinHashLSHForest::test__H",
"test/test_lshforest.py::TestWeightedMinHashLSHForest::test_pickle",
"test/test_lshforest.py::TestWeightedMinHashLSHForest::test_query"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2024-03-02 20:52:33+00:00
|
mit
| 2,089 |
|
elastic__elasticsearch-dsl-py-752
|
diff --git a/elasticsearch_dsl/analysis.py b/elasticsearch_dsl/analysis.py
index 8424283..c2abd94 100644
--- a/elasticsearch_dsl/analysis.py
+++ b/elasticsearch_dsl/analysis.py
@@ -19,9 +19,9 @@ class AnalysisBase(object):
class CustomAnalysis(object):
name = 'custom'
- def __init__(self, name, builtin_type='custom', **kwargs):
+ def __init__(self, filter_name, builtin_type='custom', **kwargs):
self._builtin_type = builtin_type
- self._name = name
+ self._name = filter_name
super(CustomAnalysis, self).__init__(**kwargs)
def to_dict(self):
diff --git a/elasticsearch_dsl/faceted_search.py b/elasticsearch_dsl/faceted_search.py
index 795132f..129b4fc 100644
--- a/elasticsearch_dsl/faceted_search.py
+++ b/elasticsearch_dsl/faceted_search.py
@@ -145,7 +145,8 @@ class DateHistogramFacet(Facet):
# so we need to set key to 0 to avoid TypeError exception
if bucket['key'] is None:
bucket['key'] = 0
- return datetime.utcfromtimestamp(int(bucket['key']) / 1000)
+ # Preserve milliseconds in the datetime
+ return datetime.utcfromtimestamp(int(bucket['key']) / 1000.0)
else:
return bucket['key']
diff --git a/elasticsearch_dsl/field.py b/elasticsearch_dsl/field.py
index 129b53c..d895e7e 100644
--- a/elasticsearch_dsl/field.py
+++ b/elasticsearch_dsl/field.py
@@ -218,7 +218,8 @@ class Date(Field):
if isinstance(data, date):
return data
if isinstance(data, int):
- return datetime.utcfromtimestamp(data / 1000)
+ # Divide by a float to preserve milliseconds on the datetime.
+ return datetime.utcfromtimestamp(data / 1000.0)
try:
# TODO: add format awareness
|
elastic/elasticsearch-dsl-py
|
269fef7fa12333f7622c3694df75a1b296d87ae2
|
diff --git a/test_elasticsearch_dsl/test_analysis.py b/test_elasticsearch_dsl/test_analysis.py
index 014c43d..6dc3c09 100644
--- a/test_elasticsearch_dsl/test_analysis.py
+++ b/test_elasticsearch_dsl/test_analysis.py
@@ -79,3 +79,11 @@ def test_custom_analyzer_can_collect_custom_items():
}
} == a.get_analysis_definition()
+def test_stemmer_analyzer_can_pass_name():
+ t = analysis.token_filter('my_english_filter', name="minimal_english", type="stemmer")
+ assert t.to_dict() == 'my_english_filter'
+ assert {
+ "type" : "stemmer",
+ "name" : "minimal_english"
+ } == t.get_definition()
+
|
Datetimes dropping miliseconds
The problems causes dates like '1970-01-01T00:17:47.045Z' in ElasticSearch to end up as a datetime like
datetime.datetime(1970, 1, 1, 0, 17, 47)
My expectation is that this value should produce a datetime like
datetime.datetime(1970, 1, 1, 0, 17, 47, 45000)
https://github.com/elastic/elasticsearch-dsl-py/blob/269fef7fa12333f7622c3694df75a1b296d87ae2/elasticsearch_dsl/field.py#L221
This is caused by integer division on the line referenced above. Changing this line to divide by 1000.0 fixes the issue.
|
0.0
|
269fef7fa12333f7622c3694df75a1b296d87ae2
|
[
"test_elasticsearch_dsl/test_analysis.py::test_stemmer_analyzer_can_pass_name"
] |
[
"test_elasticsearch_dsl/test_analysis.py::test_tokenizer",
"test_elasticsearch_dsl/test_analysis.py::test_analyzer_has_definition",
"test_elasticsearch_dsl/test_analysis.py::test_analyzer_serializes_as_name",
"test_elasticsearch_dsl/test_analysis.py::test_normalizer_has_definition",
"test_elasticsearch_dsl/test_analysis.py::test_normalizer_serializes_as_name",
"test_elasticsearch_dsl/test_analysis.py::test_custom_analyzer_can_collect_custom_items"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-10-16 02:08:29+00:00
|
apache-2.0
| 2,090 |
|
elastic__elasticsearch-dsl-py-759
|
diff --git a/elasticsearch_dsl/analysis.py b/elasticsearch_dsl/analysis.py
index 8424283..c2abd94 100644
--- a/elasticsearch_dsl/analysis.py
+++ b/elasticsearch_dsl/analysis.py
@@ -19,9 +19,9 @@ class AnalysisBase(object):
class CustomAnalysis(object):
name = 'custom'
- def __init__(self, name, builtin_type='custom', **kwargs):
+ def __init__(self, filter_name, builtin_type='custom', **kwargs):
self._builtin_type = builtin_type
- self._name = name
+ self._name = filter_name
super(CustomAnalysis, self).__init__(**kwargs)
def to_dict(self):
|
elastic/elasticsearch-dsl-py
|
269fef7fa12333f7622c3694df75a1b296d87ae2
|
diff --git a/test_elasticsearch_dsl/test_analysis.py b/test_elasticsearch_dsl/test_analysis.py
index 014c43d..6dc3c09 100644
--- a/test_elasticsearch_dsl/test_analysis.py
+++ b/test_elasticsearch_dsl/test_analysis.py
@@ -79,3 +79,11 @@ def test_custom_analyzer_can_collect_custom_items():
}
} == a.get_analysis_definition()
+def test_stemmer_analyzer_can_pass_name():
+ t = analysis.token_filter('my_english_filter', name="minimal_english", type="stemmer")
+ assert t.to_dict() == 'my_english_filter'
+ assert {
+ "type" : "stemmer",
+ "name" : "minimal_english"
+ } == t.get_definition()
+
|
Can't create custom stemming filter
The stemming filter, requires the use a property called `name`, however the the library is using this property.
(https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-stemmer-tokenfilter.html)
This code
```python
light_stemmer_ = token_filter(name="minimal_english",type= "stemmer")
```
will produce this:
```json
"filter" : {
"minimal_english" : {
"type" : "stemmer"
}
}
```
while I would like this
```json
"filter" : {
"light_stemmer_" : {
"type" : "stemmer",
"name" : "minimal_english"
}
}
```
I suggest either changing the name of the variable the user is using, or allowing a variable like `_name` to become `name` when serializing.
as a workaround I am changed this line https://github.com/elastic/elasticsearch-dsl-py/blob/29d28a012a5a3a930e66cee56178208f21cb5fdf/elasticsearch_dsl/analysis.py#L33 to only pop if the type is not stemming.
like this
```python
if self._builtin_type is 'stemmer' and 'name' in d[self.name] :
d['name'] = d[self.name]['name']
d = d.pop(self.name)
```
and them in my code I do
```python
light_stemmer_ = token_filter("light_stemmer_", "stemmer")
light_stemmer_.name = "minimal_english"
```
but I know it is a hacky solution
|
0.0
|
269fef7fa12333f7622c3694df75a1b296d87ae2
|
[
"test_elasticsearch_dsl/test_analysis.py::test_stemmer_analyzer_can_pass_name"
] |
[
"test_elasticsearch_dsl/test_analysis.py::test_analyzer_serializes_as_name",
"test_elasticsearch_dsl/test_analysis.py::test_normalizer_serializes_as_name",
"test_elasticsearch_dsl/test_analysis.py::test_normalizer_has_definition",
"test_elasticsearch_dsl/test_analysis.py::test_analyzer_has_definition",
"test_elasticsearch_dsl/test_analysis.py::test_tokenizer",
"test_elasticsearch_dsl/test_analysis.py::test_custom_analyzer_can_collect_custom_items"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-10-25 12:36:19+00:00
|
apache-2.0
| 2,091 |
|
elastic__elasticsearch-py-1143
|
diff --git a/elasticsearch/client/cluster.py b/elasticsearch/client/cluster.py
index 0b3dee93..14fa7e94 100644
--- a/elasticsearch/client/cluster.py
+++ b/elasticsearch/client/cluster.py
@@ -102,6 +102,9 @@ class ClusterClient(NamespacedClient):
:arg wait_for_timeout: The maximum time to wait for
wait_for_metadata_version before timing out
"""
+ if index and metric in SKIP_IN_PATH:
+ metric = "_all"
+
return self.transport.perform_request(
"GET", _make_path("_cluster", "state", metric, index), params=params
)
diff --git a/utils/templates/overrides/cluster/state b/utils/templates/overrides/cluster/state
new file mode 100644
index 00000000..27d599cb
--- /dev/null
+++ b/utils/templates/overrides/cluster/state
@@ -0,0 +1,8 @@
+{% extends "base" %}
+{% block request %}
+ if index and metric in SKIP_IN_PATH:
+ metric = "_all"
+
+ {{ super()|trim }}
+{% endblock %}
+
|
elastic/elasticsearch-py
|
5611445203ebabb1a450b17c2c93cd3546a12071
|
diff --git a/test_elasticsearch/test_client/test_cluster.py b/test_elasticsearch/test_client/test_cluster.py
new file mode 100644
index 00000000..a0b9d741
--- /dev/null
+++ b/test_elasticsearch/test_client/test_cluster.py
@@ -0,0 +1,27 @@
+from test_elasticsearch.test_cases import ElasticsearchTestCase
+
+
+class TestCluster(ElasticsearchTestCase):
+ def test_stats_without_node_id(self):
+ self.client.cluster.stats()
+ self.assert_url_called("GET", "/_cluster/stats")
+
+ def test_stats_with_node_id(self):
+ self.client.cluster.stats("node-1")
+ self.assert_url_called("GET", "/_cluster/stats/nodes/node-1")
+
+ self.client.cluster.stats(node_id="node-2")
+ self.assert_url_called("GET", "/_cluster/stats/nodes/node-2")
+
+ def test_state_with_index_without_metric_defaults_to_all(self):
+ self.client.cluster.state()
+ self.assert_url_called("GET", "/_cluster/state")
+
+ self.client.cluster.state(metric="cluster_name")
+ self.assert_url_called("GET", "/_cluster/state/cluster_name")
+
+ self.client.cluster.state(index="index-1")
+ self.assert_url_called("GET", "/_cluster/state/_all/index-1")
+
+ self.client.cluster.state(index="index-1", metric="cluster_name")
+ self.assert_url_called("GET", "/_cluster/state/cluster_name/index-1")
diff --git a/test_elasticsearch/test_client/test_indices.py b/test_elasticsearch/test_client/test_indices.py
index 7d80562a..7fdfc734 100644
--- a/test_elasticsearch/test_client/test_indices.py
+++ b/test_elasticsearch/test_client/test_indices.py
@@ -18,3 +18,7 @@ class TestIndices(ElasticsearchTestCase):
self.assertRaises(ValueError, self.client.indices.exists, index=None)
self.assertRaises(ValueError, self.client.indices.exists, index=[])
self.assertRaises(ValueError, self.client.indices.exists, index="")
+
+ def test_put_mapping_without_index(self):
+ self.client.indices.put_mapping(doc_type="doc-type", body={})
+ self.assert_url_called("PUT", "/_all/doc-type/_mapping")
diff --git a/test_elasticsearch/test_server/__init__.py b/test_elasticsearch/test_server/__init__.py
index 9c116675..159750a6 100644
--- a/test_elasticsearch/test_server/__init__.py
+++ b/test_elasticsearch/test_server/__init__.py
@@ -1,3 +1,4 @@
+from unittest import SkipTest
from elasticsearch.helpers import test
from elasticsearch.helpers.test import ElasticsearchTestCase as BaseTestCase
@@ -6,6 +7,8 @@ client = None
def get_client(**kwargs):
global client
+ if client is False:
+ raise SkipTest("No client is available")
if client is not None and not kwargs:
return client
@@ -16,7 +19,11 @@ def get_client(**kwargs):
new_client = local_get_client(**kwargs)
except ImportError:
# fallback to using vanilla client
- new_client = test.get_test_client(**kwargs)
+ try:
+ new_client = test.get_test_client(**kwargs)
+ except SkipTest:
+ client = False
+ raise
if not kwargs:
client = new_client
|
cluster.state(index='name')['routing_table'] not found in client 7.5.1
Using the most recent release of the Elasticsearch Python client (7.5.1 at this time), the cluster state endpoint is not defaulting to show `_all` keys.
### 7.5.1
```
client.cluster.state(index=idx).keys()
dict_keys(['cluster_name', 'cluster_uuid'])
```
### 7.1.1:
```
client.cluster.state(index=idx).keys()
dict_keys(['cluster_name', 'cluster_uuid', 'version', 'state_uuid', 'master_node', 'blocks', 'nodes', 'metadata', 'routing_table', 'routing_nodes'])
```
This omission, if intended, would seem to be something to change in a major release version, rather than a minor release.
This was detected and reported [here](https://github.com/elastic/support-dev-help/issues/10455#issue-558049925). This compels me to pin Curator at client version `elasticsearch==7.1.0` for now.
|
0.0
|
5611445203ebabb1a450b17c2c93cd3546a12071
|
[
"test_elasticsearch/test_client/test_cluster.py::TestCluster::test_state_with_index_without_metric_defaults_to_all"
] |
[
"test_elasticsearch/test_client/test_cluster.py::TestCluster::test_stats_with_node_id",
"test_elasticsearch/test_client/test_cluster.py::TestCluster::test_stats_without_node_id",
"test_elasticsearch/test_client/test_indices.py::TestIndices::test_create_one_index",
"test_elasticsearch/test_client/test_indices.py::TestIndices::test_delete_multiple_indices",
"test_elasticsearch/test_client/test_indices.py::TestIndices::test_exists_index",
"test_elasticsearch/test_client/test_indices.py::TestIndices::test_passing_empty_value_for_required_param_raises_exception",
"test_elasticsearch/test_client/test_indices.py::TestIndices::test_put_mapping_without_index"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-03-10 15:29:40+00:00
|
apache-2.0
| 2,092 |
|
elastic__elasticsearch-py-1157
|
diff --git a/elasticsearch/client/tasks.py b/elasticsearch/client/tasks.py
index 0f253ca1..4c4f6d1f 100644
--- a/elasticsearch/client/tasks.py
+++ b/elasticsearch/client/tasks.py
@@ -1,3 +1,4 @@
+import warnings
from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
@@ -58,7 +59,7 @@ class TasksClient(NamespacedClient):
)
@query_params("timeout", "wait_for_completion")
- def get(self, task_id, params=None, headers=None):
+ def get(self, task_id=None, params=None, headers=None):
"""
Returns information about a task.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html>`_
@@ -70,7 +71,11 @@ class TasksClient(NamespacedClient):
complete (default: false)
"""
if task_id in SKIP_IN_PATH:
- raise ValueError("Empty value passed for a required argument 'task_id'.")
+ warnings.warn(
+ "Calling client.tasks.get() without a task_id is deprecated "
+ "and will be removed in v8.0. Use client.tasks.list() instead.",
+ DeprecationWarning,
+ )
return self.transport.perform_request(
"GET", _make_path("_tasks", task_id), params=params, headers=headers
diff --git a/utils/generate_api.py b/utils/generate_api.py
index 0e76617d..183934e4 100644
--- a/utils/generate_api.py
+++ b/utils/generate_api.py
@@ -146,6 +146,13 @@ class API:
p in url.get("parts", {}) for url in self._def["url"]["paths"]
)
+ # This piece of logic corresponds to calling
+ # client.tasks.get() w/o a task_id which was erroneously
+ # allowed in the 7.1 client library. This functionality
+ # is deprecated and will be removed in 8.x.
+ if self.namespace == "tasks" and self.name == "get":
+ parts["task_id"]["required"] = False
+
for k, sub in SUBSTITUTIONS.items():
if k in parts:
parts[sub] = parts.pop(k)
diff --git a/utils/templates/overrides/tasks/get b/utils/templates/overrides/tasks/get
new file mode 100644
index 00000000..ca855ab9
--- /dev/null
+++ b/utils/templates/overrides/tasks/get
@@ -0,0 +1,12 @@
+{% extends "base" %}
+{% block request %}
+ if task_id in SKIP_IN_PATH:
+ warnings.warn(
+ "Calling client.tasks.get() without a task_id is deprecated "
+ "and will be removed in v8.0. Use client.tasks.list() instead.",
+ DeprecationWarning,
+ )
+
+ {{ super()|trim }}
+{% endblock %}
+
|
elastic/elasticsearch-py
|
883287551ab37f8197e6a02459c647cb08f9ba84
|
diff --git a/test_elasticsearch/test_client/__init__.py b/test_elasticsearch/test_client/__init__.py
index b7fa18ad..5fa01f76 100644
--- a/test_elasticsearch/test_client/__init__.py
+++ b/test_elasticsearch/test_client/__init__.py
@@ -1,4 +1,5 @@
from __future__ import unicode_literals
+import warnings
from elasticsearch.client import _normalize_hosts, Elasticsearch
@@ -110,3 +111,27 @@ class TestClient(ElasticsearchTestCase):
self.client.index(index="my-index", id=0, body={})
self.assert_url_called("PUT", "/my-index/_doc/0")
+
+ def test_tasks_get_without_task_id_deprecated(self):
+ warnings.simplefilter("always", DeprecationWarning)
+ with warnings.catch_warnings(record=True) as w:
+ self.client.tasks.get()
+
+ self.assert_url_called("GET", "/_tasks")
+ self.assertEquals(len(w), 1)
+ self.assertIs(w[0].category, DeprecationWarning)
+ self.assertEquals(
+ str(w[0].message),
+ "Calling client.tasks.get() without a task_id is deprecated "
+ "and will be removed in v8.0. Use client.tasks.list() instead.",
+ )
+
+ def test_tasks_get_with_task_id_not_deprecated(self):
+ warnings.simplefilter("always", DeprecationWarning)
+ with warnings.catch_warnings(record=True) as w:
+ self.client.tasks.get("task-1")
+ self.client.tasks.get(task_id="task-2")
+
+ self.assert_url_called("GET", "/_tasks/task-1")
+ self.assert_url_called("GET", "/_tasks/task-2")
+ self.assertEquals(len(w), 0)
|
TasksClient.get() requiring an argument on v7.5.1
<!--
** Please read the guidelines below. **
1. GitHub is reserved for bug reports and feature requests. The best place to
ask a general question is at the Elastic [forums](https://discuss.elastic.co).
GitHub is not the place for general questions.
2. Please fill out EITHER the feature request block or the bug report block
below, and delete the other block.
3. For issues with API definition please note that the API is [auto
generated](https://github.com/elastic/elasticsearch-py/blob/master/CONTRIBUTING.md#api-code-generation)
so any problems should be checked and reported against [the Elasticsearch
repo](https://github.com/elastic/elasticsearch) instead.
Thank you!
-->
<!-- Bug report -->
**Elasticsearch version** (`bin/elasticsearch --version`): 7.1.1, 7.5.1
**`elasticsearch-py` version (`elasticsearch.__versionstr__`)**: 7.5.1
Please make sure the major version matches the Elasticsearch server you are running.
**Description of the problem including expected versus actual behavior**:
Elasticsearch().tasks.get() fails using elasticsearch-py 7.5.1, whereas it succeeds using 7.1.1.
```
>>> import elasticsearch
>>> elasticsearch.__versionstr__
'7.5.1'
>>> elasticsearch.Elasticsearch().tasks.get()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/nfox/myenv-7.5.1/lib/python3.7/site-packages/elasticsearch/client/utils.py", line 84, in _wrapped
return func(*args, params=params, **kwargs)
TypeError: get() missing 1 required positional argument: 'task_id'
```
```
>>> import elasticsearch
>>> elasticsearch.__versionstr__
'7.1.0'
>>> elasticsearch.Elasticsearch().tasks.get()
{'nodes': {'GkSSDiZPTBq4Tlv5xV9wtg': {'name': 'e6a01a4d549f', 'transport_address': '172.17.0.3:9300', 'host': '172.17.0.3', 'ip': '172.17.0.3:9300', 'roles': ['ingest', 'master', 'data', 'ml'], 'attributes': {'ml.machine_memory': '4129972224', 'xpack.installed': 'true', 'ml.max_open_jobs': '20'}, 'tasks': {'GkSSDiZPTBq4Tlv5xV9wtg:56': {'node': 'GkSSDiZPTBq4Tlv5xV9wtg', 'id': 56, 'type': 'direct', 'action': 'cluster:monitor/tasks/lists[n]', 'start_time_in_millis': 1582238526445, 'running_time_in_nanos': 8142200, 'cancellable': False, 'parent_task_id': 'GkSSDiZPTBq4Tlv5xV9wtg:55', 'headers': {}}, 'GkSSDiZPTBq4Tlv5xV9wtg:55': {'node': 'GkSSDiZPTBq4Tlv5xV9wtg', 'id': 55, 'type': 'transport', 'action': 'cluster:monitor/tasks/lists', 'start_time_in_millis': 1582238526442, 'running_time_in_nanos': 11192200, 'cancellable': False, 'headers': {}}}}}}
```
I've verified this against running both ES 7.5.1 and ES 7.1.1 (via docker on localhost)
**Steps to reproduce**:
run `elasticsearch.Elasticsearch().tasks.get()` while using elasticsearch-py 7.5.1
Ultimately, this is breaking curator's DeleteSnapshot as it checks for tasks.
|
0.0
|
883287551ab37f8197e6a02459c647cb08f9ba84
|
[
"test_elasticsearch/test_client/__init__.py::TestClient::test_tasks_get_without_task_id_deprecated"
] |
[
"test_elasticsearch/test_client/__init__.py::TestNormalizeHosts::test_dicts_are_left_unchanged",
"test_elasticsearch/test_client/__init__.py::TestNormalizeHosts::test_none_uses_defaults",
"test_elasticsearch/test_client/__init__.py::TestNormalizeHosts::test_single_string_is_wrapped_in_list",
"test_elasticsearch/test_client/__init__.py::TestNormalizeHosts::test_strings_are_parsed_for_port_and_user",
"test_elasticsearch/test_client/__init__.py::TestNormalizeHosts::test_strings_are_parsed_for_scheme",
"test_elasticsearch/test_client/__init__.py::TestNormalizeHosts::test_strings_are_used_as_hostnames",
"test_elasticsearch/test_client/__init__.py::TestClient::test_from_in_search",
"test_elasticsearch/test_client/__init__.py::TestClient::test_headers_is_copied_when",
"test_elasticsearch/test_client/__init__.py::TestClient::test_index_uses_post_if_id_is_empty",
"test_elasticsearch/test_client/__init__.py::TestClient::test_index_uses_put_if_id_is_not_empty",
"test_elasticsearch/test_client/__init__.py::TestClient::test_params_is_copied_when",
"test_elasticsearch/test_client/__init__.py::TestClient::test_repr_contains_hosts",
"test_elasticsearch/test_client/__init__.py::TestClient::test_repr_contains_hosts_passed_in",
"test_elasticsearch/test_client/__init__.py::TestClient::test_repr_subclass",
"test_elasticsearch/test_client/__init__.py::TestClient::test_repr_truncates_host_to_5",
"test_elasticsearch/test_client/__init__.py::TestClient::test_request_timeout_is_passed_through_unescaped",
"test_elasticsearch/test_client/__init__.py::TestClient::test_tasks_get_with_task_id_not_deprecated"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-03-13 17:57:57+00:00
|
apache-2.0
| 2,093 |
|
elastic__elasticsearch-py-2030
|
diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py
index aae2173e..9ec82bf2 100644
--- a/elasticsearch/_async/client/__init__.py
+++ b/elasticsearch/_async/client/__init__.py
@@ -519,26 +519,36 @@ class AsyncElasticsearch(BaseClient):
if request_timeout is not DEFAULT:
client._request_timeout = request_timeout
+ else:
+ client._request_timeout = self._request_timeout
if ignore_status is not DEFAULT:
if isinstance(ignore_status, int):
ignore_status = (ignore_status,)
client._ignore_status = ignore_status
+ else:
+ client._ignore_status = self._ignore_status
if max_retries is not DEFAULT:
if not isinstance(max_retries, int):
raise TypeError("'max_retries' must be of type 'int'")
client._max_retries = max_retries
+ else:
+ client._max_retries = self._max_retries
if retry_on_status is not DEFAULT:
if isinstance(retry_on_status, int):
retry_on_status = (retry_on_status,)
client._retry_on_status = retry_on_status
+ else:
+ client._retry_on_status = self._retry_on_status
if retry_on_timeout is not DEFAULT:
if not isinstance(retry_on_timeout, bool):
raise TypeError("'retry_on_timeout' must be of type 'bool'")
client._retry_on_timeout = retry_on_timeout
+ else:
+ client._retry_on_timeout = self._retry_on_timeout
return client
diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py
index c1860766..af10f2db 100644
--- a/elasticsearch/_sync/client/__init__.py
+++ b/elasticsearch/_sync/client/__init__.py
@@ -519,26 +519,36 @@ class Elasticsearch(BaseClient):
if request_timeout is not DEFAULT:
client._request_timeout = request_timeout
+ else:
+ client._request_timeout = self._request_timeout
if ignore_status is not DEFAULT:
if isinstance(ignore_status, int):
ignore_status = (ignore_status,)
client._ignore_status = ignore_status
+ else:
+ client._ignore_status = self._ignore_status
if max_retries is not DEFAULT:
if not isinstance(max_retries, int):
raise TypeError("'max_retries' must be of type 'int'")
client._max_retries = max_retries
+ else:
+ client._max_retries = self._max_retries
if retry_on_status is not DEFAULT:
if isinstance(retry_on_status, int):
retry_on_status = (retry_on_status,)
client._retry_on_status = retry_on_status
+ else:
+ client._retry_on_status = self._retry_on_status
if retry_on_timeout is not DEFAULT:
if not isinstance(retry_on_timeout, bool):
raise TypeError("'retry_on_timeout' must be of type 'bool'")
client._retry_on_timeout = retry_on_timeout
+ else:
+ client._retry_on_timeout = self._retry_on_timeout
return client
|
elastic/elasticsearch-py
|
2f64b0ccbaf71ac1f1dc7ab498bd0ccafd1777f4
|
diff --git a/test_elasticsearch/test_client/test_options.py b/test_elasticsearch/test_client/test_options.py
index 16e89af5..72d5edf4 100644
--- a/test_elasticsearch/test_client/test_options.py
+++ b/test_elasticsearch/test_client/test_options.py
@@ -368,3 +368,104 @@ class TestOptions(DummyTransportTestCase):
"user-agent": "custom4",
"accept": "application/vnd.elasticsearch+json; compatible-with=8",
}
+
+ def test_options_timeout_parameters(self):
+ client = Elasticsearch(
+ "http://localhost:9200",
+ transport_class=DummyTransport,
+ request_timeout=1,
+ max_retries=2,
+ retry_on_status=(404,),
+ retry_on_timeout=True,
+ )
+
+ # timeout parameters are preserved with .options()
+ client.options().indices.get(index="test")
+
+ calls = client.transport.calls
+ call = calls[("GET", "/test")][0]
+ assert call.pop("client_meta") is DEFAULT
+ assert call == {
+ "headers": {
+ "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+ },
+ "body": None,
+ "request_timeout": 1,
+ "max_retries": 2,
+ "retry_on_status": (404,),
+ "retry_on_timeout": True,
+ }
+
+ client = Elasticsearch(
+ "http://localhost:9200",
+ transport_class=DummyTransport,
+ request_timeout=1,
+ max_retries=2,
+ retry_on_status=(404,),
+ retry_on_timeout=True,
+ )
+ client.options(
+ request_timeout=2,
+ max_retries=3,
+ retry_on_status=(400,),
+ retry_on_timeout=False,
+ ).indices.get(index="test")
+
+ calls = client.transport.calls
+ call = calls[("GET", "/test")][0]
+ assert call.pop("client_meta") is DEFAULT
+ assert call == {
+ "headers": {
+ "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+ },
+ "body": None,
+ "request_timeout": 2,
+ "max_retries": 3,
+ "retry_on_status": (400,),
+ "retry_on_timeout": False,
+ }
+
+ client = Elasticsearch(
+ "http://localhost:9200",
+ transport_class=DummyTransport,
+ )
+ client.options().indices.get(index="test")
+
+ calls = client.transport.calls
+ call = calls[("GET", "/test")][0]
+ assert call.pop("request_timeout") is DEFAULT
+ assert call.pop("max_retries") is DEFAULT
+ assert call.pop("retry_on_timeout") is DEFAULT
+ assert call.pop("retry_on_status") is DEFAULT
+ assert call.pop("client_meta") is DEFAULT
+ assert call == {
+ "headers": {
+ "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+ },
+ "body": None,
+ }
+
+ client = Elasticsearch(
+ "http://localhost:9200",
+ transport_class=DummyTransport,
+ )
+ client.options(
+ request_timeout=1,
+ max_retries=2,
+ retry_on_status=(404,),
+ retry_on_timeout=True,
+ ).indices.get(index="test")
+
+ calls = client.transport.calls
+ call = calls[("GET", "/test")][0]
+ assert call.pop("client_meta") is DEFAULT
+ assert call == {
+ "headers": {
+ "accept": "application/vnd.elasticsearch+json; compatible-with=8",
+ },
+ "body": None,
+ "request_timeout": 1,
+ "max_retries": 2,
+ "retry_on_status": (404,),
+ "retry_on_timeout": True,
+ }
|
Calling options() on a client resets timeout related attributes
<!-- Bug report -->
**Elasticsearch version** (8.1.1):
**`elasticsearch-py` version (`8.1.1`)**:
Please make sure the major version matches the Elasticsearch server you are running.
**Description of the problem including expected versus actual behavior**:
When the `options()` method is called for an existing `ElasticSearch` client instance, the timeout related parameters are not preserved. These parameters are:
- `self._request_timeout`
- `self._max_retries`
- `self._retry_on_timeout`
- `self._retry_on_status`
This is problematic in the `helpers` module, particularly `scan()` because that method calls the `options` and resets any timeout related settings we may have made on the client. We can re-specify the `request_timeout` there but retries parameters cannot be replicated
**Steps to reproduce**:
```
>>> import elasticsearch as ES
>>> es=ES.Elasticsearch("http://esmasters:9200", sniff_on_connection_fail=True, sniff_on_start=True, min_delay_between_sniffing=600, request_timeout=600, sniff_timeout=300, max_retries=5, retry_on_timeout=True)
>>> es._retry_on_timeout
True
>>> es2=es.options()
>>> es2._retry_on_timeout
<DEFAULT>
>>> es._max_retries
5
>>> es2._max_retries
<DEFAULT>
>>> es2._retry_on_status
<DEFAULT>
>>> es._retry_on_status
<DEFAULT>
>>> es._request_timeout
600
>>> es2._request_timeout
<DEFAULT>
```
|
0.0
|
2f64b0ccbaf71ac1f1dc7ab498bd0ccafd1777f4
|
[
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_timeout_parameters"
] |
[
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_to_headers[options0-headers0]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_to_headers[options1-headers1]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_to_headers[options2-headers2]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_to_headers[options3-headers3]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_to_headers[options4-headers4]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_to_headers[options5-headers5]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_to_headers[options6-headers6]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_to_headers[options7-headers7]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[None-None-bearer-api-key]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[None-None-bearer-api_key2]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[None-user:pass-None-api-key]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[None-user:pass-None-api_key2]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[None-user:pass-bearer-None]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[None-user:pass-bearer-api-key]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[None-user:pass-bearer-api_key2]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[None-basic_auth2-None-api-key]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[None-basic_auth2-None-api_key2]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[None-basic_auth2-bearer-None]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[None-basic_auth2-bearer-api-key]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[None-basic_auth2-bearer-api_key2]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers1-None-None-api-key]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers1-None-None-api_key2]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers1-None-bearer-None]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers1-None-bearer-api-key]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers1-None-bearer-api_key2]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers1-user:pass-None-None]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers1-user:pass-None-api-key]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers1-user:pass-None-api_key2]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers1-user:pass-bearer-None]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers1-user:pass-bearer-api-key]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers1-user:pass-bearer-api_key2]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers1-basic_auth2-None-None]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers1-basic_auth2-None-api-key]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers1-basic_auth2-None-api_key2]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers1-basic_auth2-bearer-None]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers1-basic_auth2-bearer-api-key]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers1-basic_auth2-bearer-api_key2]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers2-None-None-api-key]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers2-None-None-api_key2]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers2-None-bearer-None]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers2-None-bearer-api-key]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers2-None-bearer-api_key2]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers2-user:pass-None-None]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers2-user:pass-None-api-key]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers2-user:pass-None-api_key2]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers2-user:pass-bearer-None]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers2-user:pass-bearer-api-key]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers2-user:pass-bearer-api_key2]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers2-basic_auth2-None-None]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers2-basic_auth2-None-api-key]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers2-basic_auth2-None-api_key2]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers2-basic_auth2-bearer-None]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers2-basic_auth2-bearer-api-key]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_auth_conflicts[headers2-basic_auth2-bearer-api_key2]",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_passed_to_perform_request",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_options_passed_to_async_perform_request",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_default_node_configs",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_http_headers_overrides",
"test_elasticsearch/test_client/test_options.py::TestOptions::test_user_agent_override"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-07-14 11:09:50+00:00
|
apache-2.0
| 2,094 |
|
elastic__elasticsearch-py-2427
|
diff --git a/elasticsearch/_sync/client/utils.py b/elasticsearch/_sync/client/utils.py
index 4febb035..1d1a983a 100644
--- a/elasticsearch/_sync/client/utils.py
+++ b/elasticsearch/_sync/client/utils.py
@@ -298,7 +298,8 @@ def _merge_kwargs_no_duplicates(kwargs: Dict[str, Any], values: Dict[str, Any])
def _merge_body_fields_no_duplicates(
body: _TYPE_BODY, kwargs: Dict[str, Any], body_fields: Tuple[str, ...]
-) -> None:
+) -> bool:
+ mixed_body_and_params = False
for key in list(kwargs.keys()):
if key in body_fields:
if isinstance(body, (str, bytes)):
@@ -315,11 +316,13 @@ def _merge_body_fields_no_duplicates(
warnings.warn(
f"Received '{key}' via a specific parameter in the presence of a "
"'body' parameter, which is deprecated and will be removed in a future "
- "version. Instead, use only 'body' or only specific paremeters.",
+ "version. Instead, use only 'body' or only specific parameters.",
category=DeprecationWarning,
stacklevel=warn_stacklevel(),
)
body[key] = kwargs.pop(key)
+ mixed_body_and_params = True
+ return mixed_body_and_params
def _rewrite_parameters(
@@ -401,6 +404,7 @@ def _rewrite_parameters(
not ignore_deprecated_options or "body" not in ignore_deprecated_options
):
body: Optional[_TYPE_BODY] = kwargs.pop("body")
+ mixed_body_and_params = False
if body is not None:
if body_name:
if body_name in kwargs:
@@ -411,11 +415,27 @@ def _rewrite_parameters(
"issues/1698 for more information"
)
kwargs[body_name] = body
-
elif body_fields is not None:
- _merge_body_fields_no_duplicates(body, kwargs, body_fields)
+ mixed_body_and_params = _merge_body_fields_no_duplicates(
+ body, kwargs, body_fields
+ )
kwargs["body"] = body
+ if parameter_aliases and not isinstance(body, (str, bytes)):
+ for alias, rename_to in parameter_aliases.items():
+ if rename_to in body:
+ body[alias] = body.pop(rename_to)
+ # If body and params are mixed, the alias may come from a param,
+ # in which case the warning below will not make sense.
+ if not mixed_body_and_params:
+ warnings.warn(
+ f"Using '{rename_to}' alias in 'body' is deprecated and will be removed "
+ f"in a future version of elasticsearch-py. Use '{alias}' directly instead. "
+ "See https://github.com/elastic/elasticsearch-py/issues/1698 for more information",
+ category=DeprecationWarning,
+ stacklevel=2,
+ )
+
if parameter_aliases:
for alias, rename_to in parameter_aliases.items():
try:
|
elastic/elasticsearch-py
|
7d4a34b6e3ceceb82396be62682b7e8623c72665
|
diff --git a/test_elasticsearch/test_client/test_rewrite_parameters.py b/test_elasticsearch/test_client/test_rewrite_parameters.py
index 26218040..50a23256 100644
--- a/test_elasticsearch/test_client/test_rewrite_parameters.py
+++ b/test_elasticsearch/test_client/test_rewrite_parameters.py
@@ -191,7 +191,7 @@ class TestRewriteParameters:
assert str(w[0].message) == (
"Received 'source' via a specific parameter in the presence of a "
"'body' parameter, which is deprecated and will be removed in a future "
- "version. Instead, use only 'body' or only specific paremeters."
+ "version. Instead, use only 'body' or only specific parameters."
)
def test_body_fields_conflict(self):
@@ -238,6 +238,41 @@ class TestRewriteParameters:
self.wrapped_func_aliases(source=["key3"])
assert self.calls[-1] == ((), {"source": ["key3"]})
+ def test_parameter_aliases_body(self):
+ with pytest.warns(
+ DeprecationWarning,
+ match=(
+ "Using 'source' alias in 'body' is deprecated and will be removed in a future version of elasticsearch-py. "
+ "Use '_source' directly instead."
+ ),
+ ):
+ self.wrapped_func_aliases(body={"source": ["key4"]})
+
+ # using the correct name does not warn
+ with warnings.catch_warnings():
+ warnings.simplefilter("error")
+ self.wrapped_func_aliases(body={"_source": ["key4"]})
+
+ def test_parameter_aliases_body_param(self):
+ with pytest.warns(
+ DeprecationWarning,
+ match=(
+ "Received 'source' via a specific parameter in the presence of a "
+ "'body' parameter, which is deprecated and will be removed in a future "
+ "version. Instead, use only 'body' or only specific parameters."
+ ),
+ ):
+ self.wrapped_func_aliases(
+ source=["key4"], body={"query": {"match_all": {}}}
+ )
+
+ # using the correct name does not warn
+ with warnings.catch_warnings():
+ warnings.simplefilter("error")
+ self.wrapped_func_aliases(
+ body={"query": {"match_all": {}}, "_source": ["key4"]}
+ )
+
@pytest.mark.parametrize("client_cls", [Elasticsearch, AsyncElasticsearch])
def test_positional_argument_error(self, client_cls):
client = client_cls("https://localhost:9200")
|
elasticsearch-py 8.12 breaks the use of `from_` in body parameters
This bug was initially reported in the [Elastic Community Slack](https://www.elastic.co/blog/join-our-elastic-stack-workspace-on-slack). But first, come context.
## Context
Since the early days of the Elasticsearch Python client, [back in July 2013](https://github.com/elastic/elasticsearch-py/commit/48ec1ab4bbc0b49ac5dfcdd39fb341d93c7f4538), the `body` parameter is the way to specify the request body for requests that accept it. API calls using body look like this:
```python
es.search(index="bonsais", body={"query": {"match_all": {}}, "size": 50})
```
However, this parameter is an untyped Python dictionary which is not validated by the client. That said, thanks to the [Elasticsearch specification](https://github.com/elastic/elasticsearch-specification/) which provides the full types of each Elasticsearch API, we can provide a better experience. elasticsearch-py 8.0 did just that, introducing this new way of calling APIs, where the first level of body keys can be specified using Python parameters:
```python
es.search(index="bonsais", query={"match_all": {}}, size=50)
```
This has various advantages, including better autocompletion and type checks. For example, mypy will raise an error if size is not an integer. And since we realized we could [unpack](https://docs.python.org/3/tutorial/controlflow.html#unpacking-argument-lists) body to typed parameters like this:
```python
es.search(index="bonsais", **{"query": {"match_all": {}}, "size": 50})
```
We decided to deprecate the body API altogether. However, deprecating body has the following downsides:
* A lot of code written in the past decade was now triggering a deprecation warning
* Unknown parameters such as `sub_searches` or unintentional omissions from the Elasticsearch specification were rejected, causing queries to outright fail, unnecessarily forcing the use of raw requests.
* Optimizations such as passing an already encoded body to avoid paying the cost of serializing JSON were no longer possible.
The original author of the client, Honza Král, [pointed out those issues](https://github.com/elastic/elasticsearch-py/issues/2181), and we decided to allow `body` to work as before, without any warnings, [alongside the new API](https://github.com/elastic/elasticsearch-py/pull/2383). This is available elasticsearch-py 8.12.0.
## The case of Python keywords, like `from`
One subtlety with the above is that some identifiers are [reserved by Python](https://docs.python.org/3/reference/lexical_analysis.html#keywords) and can't be used as parameters. This is the case of `from`, for example. As such, `es.search(index="bonsais", query={"match_all": {}}, from=100, size=50)`, is invalid Python code. For this reason, parameter aliases were introduced, and the correct way to write that query was to use `from_`, eg. `es.search(index="bonsais", query={"match_all": {}}, from_=100, size=50)`. And then, under the hood, `from` is actually sent to Elasticsearch:
https://github.com/elastic/elasticsearch-py/blob/5014ce5337594f66040c81a2610220b1e8c0527e/elasticsearch/_sync/client/__init__.py#L1280-L1281
However, when the `body` parameter was deprecated in elasticsearch-py 8.12, it was deprecated by converting all `body` subfields to Python parameters internally, and *then* updated parameter aliases like `from_` to `from`. This means it was possible to write:
```python
es.search(index="bonsais", body={"query": {"match_all": {}}, "from_": 100, "size": 50})
```
which was then converted as if we had called:
```python
es.search(index="bonsais", query={"match_all": {}, from_=100, size=50)
```
to finally send `{"query": {"match_all": {}}, "from": 100, "size": 50}` as the body to Elasticsearch. This no longer works with elasticsearch-py 8.12.0. The body is used as is, without any inspection, and the correct way to use `from` with the `body` parameter is the one that always worked:
```python
es.search(
index="*",
body={
"query": {"match_all": {}},
"from": 10,
"size": 10,
},
)
```
I'm still not sure what the solution is here.
|
0.0
|
7d4a34b6e3ceceb82396be62682b7e8623c72665
|
[
"test_elasticsearch/test_client/test_rewrite_parameters.py::TestRewriteParameters::test_parameter_aliases_body",
"test_elasticsearch/test_client/test_rewrite_parameters.py::TestRewriteParameters::test_parameter_aliases_body_param"
] |
[
"test_elasticsearch/test_client/test_rewrite_parameters.py::TestRewriteParameters::test_default_params_conflict",
"test_elasticsearch/test_client/test_rewrite_parameters.py::TestRewriteParameters::test_body_name_duplicate",
"test_elasticsearch/test_client/test_rewrite_parameters.py::TestRewriteParameters::test_error_on_body_merge[{\"query\":",
"test_elasticsearch/test_client/test_rewrite_parameters.py::TestRewriteParameters::test_error_on_params_merge[{\"query\":",
"test_elasticsearch/test_client/test_rewrite_parameters.py::TestRewriteParameters::test_body_fields_conflict",
"test_elasticsearch/test_client/test_rewrite_parameters.py::TestRewriteParameters::test_parameter_aliases",
"test_elasticsearch/test_client/test_rewrite_parameters.py::TestRewriteParameters::test_positional_argument_error[Elasticsearch]"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-01-31 12:21:36+00:00
|
apache-2.0
| 2,095 |
|
elastic__elasticsearch-py-569
|
diff --git a/elasticsearch/client/__init__.py b/elasticsearch/client/__init__.py
index c735b70a..488cf8be 100644
--- a/elasticsearch/client/__init__.py
+++ b/elasticsearch/client/__init__.py
@@ -3,7 +3,7 @@ import logging
from ..transport import Transport
from ..exceptions import TransportError
-from ..compat import string_types, urlparse
+from ..compat import string_types, urlparse, unquote
from .indices import IndicesClient
from .ingest import IngestClient
from .cluster import ClusterClient
@@ -49,7 +49,8 @@ def _normalize_hosts(hosts):
h['scheme'] = parsed_url.scheme
if parsed_url.username or parsed_url.password:
- h['http_auth'] = '%s:%s' % (parsed_url.username, parsed_url.password)
+ h['http_auth'] = '%s:%s' % (unquote(parsed_url.username),
+ unquote(parsed_url.password))
if parsed_url.path and parsed_url.path != '/':
h['url_prefix'] = parsed_url.path
diff --git a/elasticsearch/compat.py b/elasticsearch/compat.py
index deee3c52..a5b615d2 100644
--- a/elasticsearch/compat.py
+++ b/elasticsearch/compat.py
@@ -4,10 +4,10 @@ PY2 = sys.version_info[0] == 2
if PY2:
string_types = basestring,
- from urllib import quote_plus, urlencode
+ from urllib import quote_plus, urlencode, unquote
from urlparse import urlparse
from itertools import imap as map
else:
string_types = str, bytes
- from urllib.parse import quote_plus, urlencode, urlparse
+ from urllib.parse import quote_plus, urlencode, urlparse, unquote
map = map
|
elastic/elasticsearch-py
|
fe897ebe0d2167e91ea19fa9a81f448a861d58d1
|
diff --git a/test_elasticsearch/test_client/__init__.py b/test_elasticsearch/test_client/__init__.py
index 4bf2978c..ec01145e 100644
--- a/test_elasticsearch/test_client/__init__.py
+++ b/test_elasticsearch/test_client/__init__.py
@@ -13,8 +13,8 @@ class TestNormalizeHosts(TestCase):
def test_strings_are_parsed_for_port_and_user(self):
self.assertEquals(
- [{"host": "elastic.co", "port": 42}, {"host": "elastic.co", "http_auth": "user:secret"}],
- _normalize_hosts(["elastic.co:42", "user:[email protected]"])
+ [{"host": "elastic.co", "port": 42}, {"host": "elastic.co", "http_auth": "user:secre]"}],
+ _normalize_hosts(["elastic.co:42", "user:secre%[email protected]"])
)
def test_strings_are_parsed_for_scheme(self):
|
user:password in hosts are not url decoded
Hello,
If I have a character like "]" in my username or password I must urlencode it or `urlparse` will interpret the host as IPv6. Unfortunately, `urlparse` does not urldecode fragments :
```
The components are not broken up in smaller parts (for example, the network location is a single string), and % escapes are not expanded.
```
(https://docs.python.org/3/library/urllib.parse.html)
`_normalize_hosts` should urldecode explicitely, which I will propose in a PR.
|
0.0
|
fe897ebe0d2167e91ea19fa9a81f448a861d58d1
|
[
"test_elasticsearch/test_client/__init__.py::TestNormalizeHosts::test_strings_are_parsed_for_port_and_user"
] |
[
"test_elasticsearch/test_client/__init__.py::TestNormalizeHosts::test_dicts_are_left_unchanged",
"test_elasticsearch/test_client/__init__.py::TestNormalizeHosts::test_none_uses_defaults",
"test_elasticsearch/test_client/__init__.py::TestNormalizeHosts::test_single_string_is_wrapped_in_list",
"test_elasticsearch/test_client/__init__.py::TestNormalizeHosts::test_strings_are_parsed_for_scheme",
"test_elasticsearch/test_client/__init__.py::TestNormalizeHosts::test_strings_are_used_as_hostnames",
"test_elasticsearch/test_client/__init__.py::TestClient::test_from_in_search",
"test_elasticsearch/test_client/__init__.py::TestClient::test_index_uses_post_if_id_is_empty",
"test_elasticsearch/test_client/__init__.py::TestClient::test_index_uses_put_if_id_is_not_empty",
"test_elasticsearch/test_client/__init__.py::TestClient::test_params_is_copied_when",
"test_elasticsearch/test_client/__init__.py::TestClient::test_repr_contains_hosts",
"test_elasticsearch/test_client/__init__.py::TestClient::test_repr_contains_hosts_passed_in",
"test_elasticsearch/test_client/__init__.py::TestClient::test_repr_truncates_host_to_10",
"test_elasticsearch/test_client/__init__.py::TestClient::test_request_timeout_is_passed_through_unescaped"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-04-13 11:28:46+00:00
|
apache-2.0
| 2,096 |
|
elastic__elasticsearch-py-618
|
diff --git a/elasticsearch/client/__init__.py b/elasticsearch/client/__init__.py
index 59858549..834f1d86 100644
--- a/elasticsearch/client/__init__.py
+++ b/elasticsearch/client/__init__.py
@@ -1127,7 +1127,8 @@ class Elasticsearch(object):
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request('POST', _make_path(index,
- doc_type, '_bulk'), params=params, body=self._bulk_body(body))
+ doc_type, '_bulk'), params=params, body=self._bulk_body(body),
+ headers={'content-type': 'application/x-ndjson'})
@query_params('max_concurrent_searches', 'pre_filter_shard_size',
'search_type', 'typed_keys')
@@ -1159,7 +1160,8 @@ class Elasticsearch(object):
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request('GET', _make_path(index,
- doc_type, '_msearch'), params=params, body=self._bulk_body(body))
+ doc_type, '_msearch'), params=params, body=self._bulk_body(body),
+ headers={'content-type': 'application/x-ndjson'})
@query_params('field_statistics', 'fields', 'offsets', 'parent', 'payloads',
'positions', 'preference', 'realtime', 'routing', 'term_statistics',
@@ -1363,7 +1365,8 @@ class Elasticsearch(object):
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request('GET', _make_path(index, doc_type,
- '_msearch', 'template'), params=params, body=self._bulk_body(body))
+ '_msearch', 'template'), params=params, body=self._bulk_body(body),
+ headers={'content-type': 'application/x-ndjson'})
@query_params('allow_no_indices', 'expand_wildcards', 'fields',
'ignore_unavailable')
@@ -1387,3 +1390,4 @@ class Elasticsearch(object):
"""
return self.transport.perform_request('GET', _make_path(index,
'_field_caps'), params=params, body=body)
+
diff --git a/elasticsearch/connection/http_requests.py b/elasticsearch/connection/http_requests.py
index 59dd381c..b98e7772 100644
--- a/elasticsearch/connection/http_requests.py
+++ b/elasticsearch/connection/http_requests.py
@@ -61,13 +61,13 @@ class RequestsHttpConnection(Connection):
warnings.warn(
'Connecting to %s using SSL with verify_certs=False is insecure.' % self.base_url)
- def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=()):
+ def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=(), headers=None):
url = self.base_url + url
if params:
url = '%s?%s' % (url, urlencode(params or {}))
start = time.time()
- request = requests.Request(method=method, url=url, data=body)
+ request = requests.Request(method=method, headers=headers, url=url, data=body)
prepared_request = self.session.prepare_request(request)
settings = self.session.merge_environment_settings(prepared_request.url, {}, None, None, None)
send_kwargs = {'timeout': timeout or self.timeout}
diff --git a/elasticsearch/connection/http_urllib3.py b/elasticsearch/connection/http_urllib3.py
index 7b4e6c79..62957ed2 100644
--- a/elasticsearch/connection/http_urllib3.py
+++ b/elasticsearch/connection/http_urllib3.py
@@ -91,7 +91,7 @@ class Urllib3HttpConnection(Connection):
self.pool = pool_class(host, port=port, timeout=self.timeout, maxsize=maxsize, **kw)
- def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=()):
+ def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=(), headers=None):
url = self.url_prefix + url
if params:
url = '%s?%s' % (url, urlencode(params))
@@ -111,6 +111,9 @@ class Urllib3HttpConnection(Connection):
if not isinstance(method, str):
method = method.encode('utf-8')
+ if headers:
+ request_headers = dict(self.headers)
+ request_headers.update(headers or {})
response = self.pool.urlopen(method, url, body, retries=False, headers=self.headers, **kw)
duration = time.time() - start
raw_data = response.data.decode('utf-8')
diff --git a/elasticsearch/transport.py b/elasticsearch/transport.py
index dc8cd891..f876a945 100644
--- a/elasticsearch/transport.py
+++ b/elasticsearch/transport.py
@@ -255,7 +255,7 @@ class Transport(object):
if self.sniff_on_connection_fail:
self.sniff_hosts()
- def perform_request(self, method, url, params=None, body=None):
+ def perform_request(self, method, url, headers=None, params=None, body=None):
"""
Perform the actual request. Retrieve a connection from the connection
pool, pass all the information to it's perform_request method and
@@ -269,6 +269,8 @@ class Transport(object):
:arg method: HTTP method to use
:arg url: absolute url (without host) to target
+ :arg headers: dictionary of headers, will be handed over to the
+ underlying :class:`~elasticsearch.Connection` class
:arg params: dictionary of query parameters, will be handed over to the
underlying :class:`~elasticsearch.Connection` class for serialization
:arg body: body of the request, will be serializes using serializer and
@@ -309,7 +311,7 @@ class Transport(object):
connection = self.get_connection()
try:
- status, headers, data = connection.perform_request(method, url, params, body, ignore=ignore, timeout=timeout)
+ status, headers, data = connection.perform_request(method, url, params, body, headers=headers, ignore=ignore, timeout=timeout)
except TransportError as e:
if method == 'HEAD' and e.status_code == 404:
|
elastic/elasticsearch-py
|
0397527d12bcb43274ce9054111c5f7f673a7ad6
|
diff --git a/test_elasticsearch/test_connection.py b/test_elasticsearch/test_connection.py
index b2d84996..e4e0ae63 100644
--- a/test_elasticsearch/test_connection.py
+++ b/test_elasticsearch/test_connection.py
@@ -104,6 +104,13 @@ class TestRequestsConnection(TestCase):
self.assertEquals('GET', request.method)
self.assertEquals(None, request.body)
+ def test_merge_headers(self):
+ con = self._get_mock_connection(connection_params={'headers': {'h1': 'v1', 'h2': 'v2'}})
+ req = self._get_request(con, 'GET', '/', headers={'h2': 'v2p', 'h3': 'v3'})
+ self.assertEquals(req.headers['h1'], 'v1')
+ self.assertEquals(req.headers['h2'], 'v2p')
+ self.assertEquals(req.headers['h3'], 'v3')
+
def test_http_auth(self):
con = RequestsHttpConnection(http_auth='username:secret')
self.assertEquals(('username', 'secret'), con.session.auth)
diff --git a/test_elasticsearch/test_transport.py b/test_elasticsearch/test_transport.py
index 328325c1..50acb7fa 100644
--- a/test_elasticsearch/test_transport.py
+++ b/test_elasticsearch/test_transport.py
@@ -74,7 +74,7 @@ class TestTransport(TestCase):
t.perform_request('GET', '/', params={'request_timeout': 42})
self.assertEquals(1, len(t.get_connection().calls))
self.assertEquals(('GET', '/', {}, None), t.get_connection().calls[0][0])
- self.assertEquals({'timeout': 42, 'ignore': ()}, t.get_connection().calls[0][1])
+ self.assertEquals({'timeout': 42, 'ignore': (), 'headers': None}, t.get_connection().calls[0][1])
def test_send_get_body_as_source(self):
t = Transport([{}], send_get_body_as='source', connection_class=DummyConnection)
|
Set `application/x-ndjson` content type on bulk requests
As of elastic 5.x, requests to `/_bulk` should set `Content-Type` to `application/x-ndjson`. If not, elastic logs a warning. It looks like this library defaults to `application/json`. To fix, I'm thinking we should accept an optional dict of headers at `https://github.com/elastic/elasticsearch-py/blob/master/elasticsearch/connection/http_urllib3.py#L94`. If that sounds reasonable, I'd be happy to submit a patch.
cc @cnelson @wjwoodson
|
0.0
|
0397527d12bcb43274ce9054111c5f7f673a7ad6
|
[
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_merge_headers",
"test_elasticsearch/test_transport.py::TestTransport::test_request_timeout_extracted_from_params_and_passed"
] |
[
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_url_prefix",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_repr",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_body_attached",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_head_with_404_doesnt_get_logged",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_failed_request_logs_and_traces",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_conflict_error_is_returned_on_409",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_defaults",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_http_auth_list",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_not_found_error_is_returned_on_404",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_uses_https_if_verify_certs_is_off",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_timeout_set",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_params_properly_encoded",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_custom_http_auth_is_allowed",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_request_error_is_returned_on_400",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_http_auth_tuple",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_http_auth",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_http_auth_attached",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_success_logs_and_traces",
"test_elasticsearch/test_connection.py::TestUrllib3Connection::test_doesnt_use_https_if_not_specified",
"test_elasticsearch/test_connection.py::TestUrllib3Connection::test_http_auth_list",
"test_elasticsearch/test_connection.py::TestUrllib3Connection::test_http_auth",
"test_elasticsearch/test_connection.py::TestUrllib3Connection::test_http_auth_tuple",
"test_elasticsearch/test_connection.py::TestUrllib3Connection::test_uses_https_if_verify_certs_is_off",
"test_elasticsearch/test_connection.py::TestUrllib3Connection::test_keep_alive_is_on_by_default",
"test_elasticsearch/test_connection.py::TestUrllib3Connection::test_timeout_set",
"test_elasticsearch/test_transport.py::TestHostsInfoCallback::test_master_only_nodes_are_ignored",
"test_elasticsearch/test_transport.py::TestTransport::test_custom_connection_class",
"test_elasticsearch/test_transport.py::TestTransport::test_add_connection",
"test_elasticsearch/test_transport.py::TestTransport::test_kwargs_passed_on_to_connections",
"test_elasticsearch/test_transport.py::TestTransport::test_body_bytes_get_passed_untouched",
"test_elasticsearch/test_transport.py::TestTransport::test_sniff_after_n_seconds",
"test_elasticsearch/test_transport.py::TestTransport::test_sniff_on_start_ignores_sniff_timeout",
"test_elasticsearch/test_transport.py::TestTransport::test_sniff_uses_sniff_timeout",
"test_elasticsearch/test_transport.py::TestTransport::test_body_gets_encoded_into_bytes",
"test_elasticsearch/test_transport.py::TestTransport::test_request_will_fail_after_X_retries",
"test_elasticsearch/test_transport.py::TestTransport::test_resurrected_connection_will_be_marked_as_live_on_success",
"test_elasticsearch/test_transport.py::TestTransport::test_sniff_reuses_connection_instances_if_possible",
"test_elasticsearch/test_transport.py::TestTransport::test_kwargs_passed_on_to_connection_pool",
"test_elasticsearch/test_transport.py::TestTransport::test_send_get_body_as_post",
"test_elasticsearch/test_transport.py::TestTransport::test_send_get_body_as_source",
"test_elasticsearch/test_transport.py::TestTransport::test_body_surrogates_replaced_encoded_into_bytes",
"test_elasticsearch/test_transport.py::TestTransport::test_failed_connection_will_be_marked_as_dead",
"test_elasticsearch/test_transport.py::TestTransport::test_sniff_on_start_fetches_and_uses_nodes_list",
"test_elasticsearch/test_transport.py::TestTransport::test_sniff_on_fail_triggers_sniffing_on_fail",
"test_elasticsearch/test_transport.py::TestTransport::test_sniff_will_use_seed_connections",
"test_elasticsearch/test_transport.py::TestTransport::test_single_connection_uses_dummy_connection_pool"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-07-13 02:52:29+00:00
|
apache-2.0
| 2,097 |
|
elastic__elasticsearch-py-628
|
diff --git a/elasticsearch/client/utils.py b/elasticsearch/client/utils.py
index 78b005cd..120b4abe 100644
--- a/elasticsearch/client/utils.py
+++ b/elasticsearch/client/utils.py
@@ -26,13 +26,13 @@ def _escape(value):
elif isinstance(value, bool):
value = str(value).lower()
+ # don't decode bytestrings
+ elif isinstance(value, bytes):
+ return value
+
# encode strings to utf-8
if isinstance(value, string_types):
- try:
- return value.encode('utf-8')
- except UnicodeDecodeError:
- # Python 2 and str, no need to re-encode
- pass
+ return value.encode('utf-8')
return str(value)
|
elastic/elasticsearch-py
|
4e6f63571105545914c07fa4846f996d6049f44b
|
diff --git a/test_elasticsearch/test_client/test_utils.py b/test_elasticsearch/test_client/test_utils.py
index 9fd6df4f..66a83293 100644
--- a/test_elasticsearch/test_client/test_utils.py
+++ b/test_elasticsearch/test_client/test_utils.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
-from elasticsearch.client.utils import _make_path
+from elasticsearch.client.utils import _make_path, _escape
from elasticsearch.compat import PY2
from ..test_cases import TestCase, SkipTest
@@ -17,3 +17,24 @@ class TestMakePath(TestCase):
id = "中文".encode('utf-8')
self.assertEquals('/some-index/type/%E4%B8%AD%E6%96%87', _make_path('some-index', 'type', id))
+
+class TestEscape(TestCase):
+ def test_handles_ascii(self):
+ string = "abc123"
+ self.assertEquals(
+ b'abc123',
+ _escape(string)
+ )
+ def test_handles_unicode(self):
+ string = "中文"
+ self.assertEquals(
+ b'\xe4\xb8\xad\xe6\x96\x87',
+ _escape(string)
+ )
+
+ def test_handles_bytestring(self):
+ string = b'celery-task-meta-c4f1201f-eb7b-41d5-9318-a75a8cfbdaa0'
+ self.assertEquals(
+ string,
+ _escape(string)
+ )
|
'bytes' object has no attribute 'encode'
File "/root/.local/share/virtualenvs/celery-jR7fJ1bi/lib/python3.5/site-packages/elasticsearch/client/utils.py", line 32, in _escape
return value.encode('utf-8')
AttributeError: 'bytes' object has no attribute 'encode'
It appears that when _escape receives value, which in my case is b'celery-task-meta-c4f1201f-eb7b-41d5-9318-a75a8cfbdaa0', it gets checked against (<class 'str'>, <class 'bytes'>), and therefore passes the test but <class 'bytes'> does not have the **encode** method.
try does not catch as error is AttributeError: 'bytes' object has no attribute 'encode'
def _escape(value):
"""
Escape a single value of a URL string or a query parameter. If it is a list
or tuple, turn it into a comma-separated string first.
"""
# make sequences into comma-separated stings
if isinstance(value, (list, tuple)):
value = ','.join(value)
# dates and datetimes into isoformat
elif isinstance(value, (date, datetime)):
value = value.isoformat()
# make bools into true/false strings
elif isinstance(value, bool):
value = str(value).lower()
# encode strings to utf-8
if isinstance(value, string_types):
try:
return value.encode('utf-8')
except UnicodeDecodeError:
# Python 2 and str, no need to re-encode
pass
return str(value)
|
0.0
|
4e6f63571105545914c07fa4846f996d6049f44b
|
[
"test_elasticsearch/test_client/test_utils.py::TestEscape::test_handles_bytestring"
] |
[
"test_elasticsearch/test_client/test_utils.py::TestMakePath::test_handles_unicode",
"test_elasticsearch/test_client/test_utils.py::TestEscape::test_handles_ascii",
"test_elasticsearch/test_client/test_utils.py::TestEscape::test_handles_unicode"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2017-08-07 21:39:19+00:00
|
apache-2.0
| 2,098 |
|
elcaminoreal__elcaminoreal-10
|
diff --git a/src/elcaminoreal/_gather.py b/src/elcaminoreal/_gather.py
index 63f85a9..cbe7637 100644
--- a/src/elcaminoreal/_gather.py
+++ b/src/elcaminoreal/_gather.py
@@ -24,12 +24,13 @@ class Commands(object):
def command(self,
name=None,
parser=caparg.command(''),
- dependencies=pyrsistent.v()):
+ dependencies=pyrsistent.v(),
+ regular=False):
"""
Register as a command.
"""
- transform = gather.Wrapper.glue((dependencies, parser))
+ transform = gather.Wrapper.glue((dependencies, parser, regular))
ret = self._command_collector.register(name, transform=transform)
return ret
@@ -47,20 +48,27 @@ class Commands(object):
parsed = command.parse(args)
subcommand = ' '.join(parsed['__caparg_subcommand__'])
func = collection[subcommand].original
- dependencies, _ignored = collection[subcommand].extra
+ dependencies, _ignored, regular = collection[subcommand].extra
graph = self.mkgraph(dependencies)
graph.update(override_dependencies)
- return func(parsed, graph)
+ if not regular:
+ return func(parsed, graph)
+ args = {dependency: graph[dependency]
+ for dependency in dependencies}
+ args.update(parsed)
+ del args['__caparg_subcommand__']
+ return func(**args)
def dependency(self,
name=None,
dependencies=pyrsistent.v(),
- possible_dependencies=pyrsistent.v()):
+ possible_dependencies=pyrsistent.v(),
+ regular=False):
"""
Register as a dependency.
"""
- glue = (dependencies, possible_dependencies)
+ glue = (dependencies, possible_dependencies, regular)
transform = gather.Wrapper.glue(glue)
ret = self._collector.register(name, transform=transform)
return ret
@@ -83,14 +91,20 @@ class Commands(object):
on_route = on_route.add(thing)
plugin = collection[thing]
func = plugin.original
- dependencies, possible_dependencies = plugin.extra
+ dependencies, possible_dependencies, regular = plugin.extra
my_dependencies, my_possible_dependencies = {}, {}
for other_thing in dependencies:
my_dependencies[other_thing] = _build(other_thing, on_route)
for other_thing in possible_dependencies:
builder = functools.partial(_build, other_thing, on_route)
my_possible_dependencies[other_thing] = builder
- ret[thing] = func(my_dependencies, my_possible_dependencies)
+ if regular:
+ args = {'build_' + key: value
+ for key, value in my_possible_dependencies.items()}
+ args.update(my_dependencies)
+ ret[thing] = func(**args)
+ else:
+ ret[thing] = func(my_dependencies, my_possible_dependencies)
return ret[thing]
for thing in things:
_build(thing)
diff --git a/tox.ini b/tox.ini
index 0ef3c06..297cc23 100644
--- a/tox.ini
+++ b/tox.ini
@@ -30,7 +30,7 @@ commands =
# E0704 -- bare raise outside except (rare, when it's done I mean it)
# R0201 -- unused self in methods (methods can be used for polymorphism)
# R0903 -- too few public methods (attrs-based classes have implicit ones)
- py27-lint: pylint --disable=unsupported-assignment-operation --disable=no-member --disable=not-callable --disable=unsubscriptable-object --disable=E0704 --disable=R0903 --disable=R0201 src/elcaminoreal
+ py27-lint: pylint --disable=blacklisted-name --disable=unsupported-assignment-operation --disable=no-member --disable=not-callable --disable=unsubscriptable-object --disable=E0704 --disable=R0903 --disable=R0201 src/elcaminoreal
py27-lint: flake8 src/elcaminoreal
#{py27,pypy,py36,py35}-func: python -m elcaminoreal.example selftest
#{py27,pypy,py35}-func: python -m elcaminoreal.example selftest
|
elcaminoreal/elcaminoreal
|
a76600014ed645b6f92b98bf491f2b49e5625d6d
|
diff --git a/src/elcaminoreal/test/some_plugins.py b/src/elcaminoreal/test/some_plugins.py
index a885594..9507b74 100644
--- a/src/elcaminoreal/test/some_plugins.py
+++ b/src/elcaminoreal/test/some_plugins.py
@@ -21,6 +21,18 @@ def a_foo(dependencies, _possible_dependencies):
return dict(bar=dependencies['bar'])
[email protected](dependencies=["bar", "quux"],
+ possible_dependencies=["foo"],
+ regular=True)
+def regular(bar, quux, build_foo):
+ """
+ Depend on bar, maybe on foo
+
+ Use regular arguments.
+ """
+ return dict(bar=bar, quux=quux, foo=build_foo())
+
+
@COMMANDS.dependency(possible_dependencies=["bar"])
def foo_2(_dependencies, possible_dependencies):
"""
@@ -37,6 +49,14 @@ def a_bar(_dependencies, _possible_dependencies):
return "I'm a bar"
[email protected](name="quux")
+def a_quux(_dependencies, _possible_dependencies):
+ """
+ Return a quux-like object.
+ """
+ return "I'm a quux"
+
+
@COMMANDS.dependency()
def rand(_dependencies, _possible_dependencies):
"""
@@ -83,6 +103,28 @@ def _print(_dependencies, _possible_dependencies):
return print
[email protected](name='output')
+def dummy_output(_dependencies, _possible_dependencies):
+ """
+ Literally do nothing.
+
+ This is designed for being overridden.
+ """
+
+
[email protected](dependencies=['foo', 'output'],
+ parser=ca.command('',
+ ca.positional('lili', type=str)),
+ regular=True)
+def regular_command(foo, lili, output):
+ """
+ Use regular arguments
+
+ Output results
+ """
+ output(foo, lili)
+
+
@COMMANDS.command(dependencies=['foo', 'print'],
parser=ca.command('',
ca.positional('lala', type=str)))
diff --git a/src/elcaminoreal/test/test_gathering.py b/src/elcaminoreal/test/test_gathering.py
index 8049cd1..7ec2028 100644
--- a/src/elcaminoreal/test/test_gathering.py
+++ b/src/elcaminoreal/test/test_gathering.py
@@ -46,6 +46,15 @@ class DependencyResolverTester(unittest.TestCase):
result = some_plugins.COMMANDS.mkgraph(['foo_2'])
self.assertEquals(result['foo_2'], dict(bar="I'm a bar"))
+ def test_mkgraph_regular(self):
+ """
+ mkgraph regular functions work
+ """
+ result = some_plugins.COMMANDS.mkgraph(['regular'])
+ self.assertEquals(result['regular']['bar'], result['bar'])
+ self.assertEquals(result['regular']['quux'], result['quux'])
+ self.assertEquals(result['regular']['foo'], result['foo'])
+
class RunnerResolverTester(unittest.TestCase):
@@ -100,3 +109,18 @@ class RunnerResolverTester(unittest.TestCase):
some_plugins.COMMANDS.run(['no-such-command'])
error_message = filep.getvalue().splitlines()
self.assertEquals(error_message.pop(0), 'Usage:')
+
+ def test_regular(self):
+ """
+ Asking for regular arguments calls functions with argument names
+ """
+ output = []
+
+ def _my_output(*args):
+ output.append(args)
+ dependencies = dict(output=_my_output)
+ some_plugins.COMMANDS.run(['regular-command', 'thing'],
+ override_dependencies=dependencies)
+ self.assertEquals(len(output), 1)
+ self.assertEquals(output[0][0]['bar'], "I'm a bar")
+ self.assertEquals(output[0][1], 'thing')
|
Allow regular arguments to commands and dependencies
The following should work:
```
@COMMANDS.dependency(
dependencies=['foo', 'bar'],
possible_dependencies=['baz'],
regular=True)
def thing(foo , bar, build_baz):
val = foo()
if val > 5:
val -= build_baz()
return val + bar()
@COMMANDS.command(
dependencies=['thing'],
parser=ca.command('',
ca.positional('lala', type=str)),
regular=True)
def do_stuff(thing, lala):
return str(thing) + lala
```
|
0.0
|
a76600014ed645b6f92b98bf491f2b49e5625d6d
|
[
"src/elcaminoreal/test/test_gathering.py::RunnerResolverTester::test_required"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-04-12 03:32:11+00:00
|
mit
| 2,099 |
|
eliben__pycparser-236
|
diff --git a/pycparser/c_parser.py b/pycparser/c_parser.py
index f01f67f..47d958f 100644
--- a/pycparser/c_parser.py
+++ b/pycparser/c_parser.py
@@ -616,6 +616,59 @@ class CParser(PLYParser):
"""
p[0] = p[1]
+ # A pragma is generally considered a decorator rather than an actual statement.
+ # Still, for the purposes of analyzing an abstract syntax tree of C code,
+ # pragma's should not be ignored and were previously treated as a statement.
+ # This presents a problem for constructs that take a statement such as labeled_statements,
+ # selection_statements, and iteration_statements, causing a misleading structure
+ # in the AST. For example, consider the following C code.
+ #
+ # for (int i = 0; i < 3; i++)
+ # #pragma omp critical
+ # sum += 1;
+ #
+ # This code will compile and execute "sum += 1;" as the body of the for loop.
+ # Previous implementations of PyCParser would render the AST for this
+ # block of code as follows:
+ #
+ # For:
+ # DeclList:
+ # Decl: i, [], [], []
+ # TypeDecl: i, []
+ # IdentifierType: ['int']
+ # Constant: int, 0
+ # BinaryOp: <
+ # ID: i
+ # Constant: int, 3
+ # UnaryOp: p++
+ # ID: i
+ # Pragma: omp critical
+ # Assignment: +=
+ # ID: sum
+ # Constant: int, 1
+ #
+ # This AST misleadingly takes the Pragma as the body of the loop and the
+ # assignment then becomes a sibling of the loop.
+ #
+ # To solve edge cases like these, the pragmacomp_or_statement rule groups
+ # a pragma and its following statement (which would otherwise be orphaned)
+ # using a compound block, effectively turning the above code into:
+ #
+ # for (int i = 0; i < 3; i++) {
+ # #pragma omp critical
+ # sum += 1;
+ # }
+ def p_pragmacomp_or_statement(self, p):
+ """ pragmacomp_or_statement : pppragma_directive statement
+ | statement
+ """
+ if isinstance(p[1], c_ast.Pragma) and len(p) == 3:
+ p[0] = c_ast.Compound(
+ block_items=[p[1], p[2]],
+ coord=self._token_coord(p, 1))
+ else:
+ p[0] = p[1]
+
# In C, declarations can come several in a line:
# int x, *px, romulo = 5;
#
@@ -1410,44 +1463,44 @@ class CParser(PLYParser):
coord=self._token_coord(p, 1))
def p_labeled_statement_1(self, p):
- """ labeled_statement : ID COLON statement """
+ """ labeled_statement : ID COLON pragmacomp_or_statement """
p[0] = c_ast.Label(p[1], p[3], self._token_coord(p, 1))
def p_labeled_statement_2(self, p):
- """ labeled_statement : CASE constant_expression COLON statement """
+ """ labeled_statement : CASE constant_expression COLON pragmacomp_or_statement """
p[0] = c_ast.Case(p[2], [p[4]], self._token_coord(p, 1))
def p_labeled_statement_3(self, p):
- """ labeled_statement : DEFAULT COLON statement """
+ """ labeled_statement : DEFAULT COLON pragmacomp_or_statement """
p[0] = c_ast.Default([p[3]], self._token_coord(p, 1))
def p_selection_statement_1(self, p):
- """ selection_statement : IF LPAREN expression RPAREN statement """
+ """ selection_statement : IF LPAREN expression RPAREN pragmacomp_or_statement """
p[0] = c_ast.If(p[3], p[5], None, self._token_coord(p, 1))
def p_selection_statement_2(self, p):
- """ selection_statement : IF LPAREN expression RPAREN statement ELSE statement """
+ """ selection_statement : IF LPAREN expression RPAREN statement ELSE pragmacomp_or_statement """
p[0] = c_ast.If(p[3], p[5], p[7], self._token_coord(p, 1))
def p_selection_statement_3(self, p):
- """ selection_statement : SWITCH LPAREN expression RPAREN statement """
+ """ selection_statement : SWITCH LPAREN expression RPAREN pragmacomp_or_statement """
p[0] = fix_switch_cases(
c_ast.Switch(p[3], p[5], self._token_coord(p, 1)))
def p_iteration_statement_1(self, p):
- """ iteration_statement : WHILE LPAREN expression RPAREN statement """
+ """ iteration_statement : WHILE LPAREN expression RPAREN pragmacomp_or_statement """
p[0] = c_ast.While(p[3], p[5], self._token_coord(p, 1))
def p_iteration_statement_2(self, p):
- """ iteration_statement : DO statement WHILE LPAREN expression RPAREN SEMI """
+ """ iteration_statement : DO pragmacomp_or_statement WHILE LPAREN expression RPAREN SEMI """
p[0] = c_ast.DoWhile(p[5], p[2], self._token_coord(p, 1))
def p_iteration_statement_3(self, p):
- """ iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN statement """
+ """ iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN pragmacomp_or_statement """
p[0] = c_ast.For(p[3], p[5], p[7], p[9], self._token_coord(p, 1))
def p_iteration_statement_4(self, p):
- """ iteration_statement : FOR LPAREN declaration expression_opt SEMI expression_opt RPAREN statement """
+ """ iteration_statement : FOR LPAREN declaration expression_opt SEMI expression_opt RPAREN pragmacomp_or_statement """
p[0] = c_ast.For(c_ast.DeclList(p[3], self._token_coord(p, 1)),
p[4], p[6], p[8], self._token_coord(p, 1))
|
eliben/pycparser
|
4992410bf8c2d6d7eb94703d0f6f94b5a9acaa0a
|
diff --git a/tests/test_c_parser.py b/tests/test_c_parser.py
index ab6143f..3b336bf 100755
--- a/tests/test_c_parser.py
+++ b/tests/test_c_parser.py
@@ -1369,6 +1369,54 @@ class TestCParser_fundamentals(TestCParser_base):
self.assertEqual(s1_ast.ext[2].type.type.decls[0].string, 'baz')
self.assertEqual(s1_ast.ext[2].type.type.decls[0].coord.line, 9)
+ def test_pragmacomp_or_statement(self):
+ s1 = r'''
+ void main() {
+ int sum = 0;
+ for (int i; i < 3; i++)
+ #pragma omp critical
+ sum += 1;
+
+ while(sum < 10)
+ #pragma omp critical
+ sum += 1;
+
+ mylabel:
+ #pragma foo
+ sum += 10;
+
+ if (sum > 10)
+ #pragma bar
+ sum = 10;
+
+ switch (sum)
+ case 10:
+ #pragma foo
+ sum = 20;
+ }
+ '''
+ s1_ast = self.parse(s1)
+ self.assertTrue(isinstance(s1_ast.ext[0].body.block_items[1], For))
+ self.assertTrue(isinstance(s1_ast.ext[0].body.block_items[1].stmt, Compound))
+ self.assertTrue(isinstance(s1_ast.ext[0].body.block_items[1].stmt.block_items[0], Pragma))
+ self.assertTrue(isinstance(s1_ast.ext[0].body.block_items[1].stmt.block_items[1], Assignment))
+ self.assertTrue(isinstance(s1_ast.ext[0].body.block_items[2], While))
+ self.assertTrue(isinstance(s1_ast.ext[0].body.block_items[2].stmt, Compound))
+ self.assertTrue(isinstance(s1_ast.ext[0].body.block_items[2].stmt.block_items[0], Pragma))
+ self.assertTrue(isinstance(s1_ast.ext[0].body.block_items[2].stmt.block_items[1], Assignment))
+ self.assertTrue(isinstance(s1_ast.ext[0].body.block_items[3], Label))
+ self.assertTrue(isinstance(s1_ast.ext[0].body.block_items[3].stmt, Compound))
+ self.assertTrue(isinstance(s1_ast.ext[0].body.block_items[3].stmt.block_items[0], Pragma))
+ self.assertTrue(isinstance(s1_ast.ext[0].body.block_items[3].stmt.block_items[1], Assignment))
+ self.assertTrue(isinstance(s1_ast.ext[0].body.block_items[4], If))
+ self.assertTrue(isinstance(s1_ast.ext[0].body.block_items[4].iftrue, Compound))
+ self.assertTrue(isinstance(s1_ast.ext[0].body.block_items[4].iftrue.block_items[0], Pragma))
+ self.assertTrue(isinstance(s1_ast.ext[0].body.block_items[4].iftrue.block_items[1], Assignment))
+ self.assertTrue(isinstance(s1_ast.ext[0].body.block_items[5], Switch))
+ self.assertTrue(isinstance(s1_ast.ext[0].body.block_items[5].stmt.stmts[0], Compound))
+ self.assertTrue(isinstance(s1_ast.ext[0].body.block_items[5].stmt.stmts[0].block_items[0], Pragma))
+ self.assertTrue(isinstance(s1_ast.ext[0].body.block_items[5].stmt.stmts[0].block_items[1], Assignment))
+
class TestCParser_whole_code(TestCParser_base):
""" Testing of parsing whole chunks of code.
|
Incorrect AST Structure when #pragma follows For loop.
Consider the following code snippet:
```c
for(int i = 0; i < 3; i++)
#pragma omp critical
sum += 1;
```
When compiled without Open MP, the #pragma should be ignored completely, so that the statement ``sum += 1`` should be a descendent of the for loop. However, in the current implementation of pycparser, it is parsed a _sibling_ of the for loop, instead of as a descendant.
```
For:
DeclList:
Decl: i, [], [], []
TypeDecl: i, []
IdentifierType: ['int']
Constant: int, 0
BinaryOp: <
ID: i
Constant: int, 3
UnaryOp: p++
ID: i
Pragma: omp critical
Assignment: +=
ID: sum
Constant: int, 1
```
The same problem applies to other loops, Labels, and If statements, as in the following:
```c
for(int i = 0; i < 3; i++)
myLabel:
#pragma omp critical
sum += 1;
```
```c
while(sum < 100)
#pragma omp critical
sum += 1;
```
```c
if (sum < 100)
#pragma omp critical
sum += 1;
```
The following will not even parse, but it should:
```c
do
#pragma omp critical
sum += 1;
while(sum < 100)
```
|
0.0
|
4992410bf8c2d6d7eb94703d0f6f94b5a9acaa0a
|
[
"tests/test_c_parser.py::TestCParser_fundamentals::test_pragmacomp_or_statement"
] |
[
"tests/test_c_parser.py::TestCParser_fundamentals::test_FileAST",
"tests/test_c_parser.py::TestCParser_fundamentals::test_anonymous_struct_union",
"tests/test_c_parser.py::TestCParser_fundamentals::test_compound_literals",
"tests/test_c_parser.py::TestCParser_fundamentals::test_compound_statement",
"tests/test_c_parser.py::TestCParser_fundamentals::test_coords",
"tests/test_c_parser.py::TestCParser_fundamentals::test_decl_inits",
"tests/test_c_parser.py::TestCParser_fundamentals::test_decl_named_inits",
"tests/test_c_parser.py::TestCParser_fundamentals::test_duplicate_typedef",
"tests/test_c_parser.py::TestCParser_fundamentals::test_empty_toplevel_decl",
"tests/test_c_parser.py::TestCParser_fundamentals::test_enums",
"tests/test_c_parser.py::TestCParser_fundamentals::test_forloop_coord",
"tests/test_c_parser.py::TestCParser_fundamentals::test_func_decls_with_array_dim_qualifiers",
"tests/test_c_parser.py::TestCParser_fundamentals::test_function_definitions",
"tests/test_c_parser.py::TestCParser_fundamentals::test_inline_specifier",
"tests/test_c_parser.py::TestCParser_fundamentals::test_int128",
"tests/test_c_parser.py::TestCParser_fundamentals::test_invalid_multiple_types_error",
"tests/test_c_parser.py::TestCParser_fundamentals::test_multi_decls",
"tests/test_c_parser.py::TestCParser_fundamentals::test_nested_decls",
"tests/test_c_parser.py::TestCParser_fundamentals::test_offsetof",
"tests/test_c_parser.py::TestCParser_fundamentals::test_pragma",
"tests/test_c_parser.py::TestCParser_fundamentals::test_qualifiers_storage_specifiers",
"tests/test_c_parser.py::TestCParser_fundamentals::test_simple_decls",
"tests/test_c_parser.py::TestCParser_fundamentals::test_sizeof",
"tests/test_c_parser.py::TestCParser_fundamentals::test_struct_bitfields",
"tests/test_c_parser.py::TestCParser_fundamentals::test_struct_members_namespace",
"tests/test_c_parser.py::TestCParser_fundamentals::test_struct_union",
"tests/test_c_parser.py::TestCParser_fundamentals::test_struct_with_extra_semis_inside",
"tests/test_c_parser.py::TestCParser_fundamentals::test_tags_namespace",
"tests/test_c_parser.py::TestCParser_fundamentals::test_typedef",
"tests/test_c_parser.py::TestCParser_fundamentals::test_unified_string_literals",
"tests/test_c_parser.py::TestCParser_fundamentals::test_unified_wstring_literals",
"tests/test_c_parser.py::TestCParser_fundamentals::test_vla",
"tests/test_c_parser.py::TestCParser_whole_code::test_empty_statements",
"tests/test_c_parser.py::TestCParser_whole_code::test_expressions",
"tests/test_c_parser.py::TestCParser_whole_code::test_for_statement",
"tests/test_c_parser.py::TestCParser_whole_code::test_statements",
"tests/test_c_parser.py::TestCParser_whole_code::test_switch_statement",
"tests/test_c_parser.py::TestCParser_whole_code::test_whole_file",
"tests/test_c_parser.py::TestCParser_whole_code::test_whole_file_with_stdio",
"tests/test_c_parser.py::TestCParser_typenames::test_ambiguous_parameters",
"tests/test_c_parser.py::TestCParser_typenames::test_innerscope_reuse_typedef_name",
"tests/test_c_parser.py::TestCParser_typenames::test_innerscope_typedef",
"tests/test_c_parser.py::TestCParser_typenames::test_nested_function_decls",
"tests/test_c_parser.py::TestCParser_typenames::test_parameter_reuse_typedef_name",
"tests/test_c_parser.py::TestCParser_typenames::test_samescope_reuse_name"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2018-03-01 22:45:54+00:00
|
bsd-3-clause
| 2,100 |
|
eliben__pycparser-255
|
diff --git a/pycparser/c_generator.py b/pycparser/c_generator.py
index 0575b8b..4c86f84 100644
--- a/pycparser/c_generator.py
+++ b/pycparser/c_generator.py
@@ -283,8 +283,8 @@ class CGenerator(object):
for name in n.name:
if isinstance(name, c_ast.ID):
s += '.' + name.name
- elif isinstance(name, c_ast.Constant):
- s += '[' + name.value + ']'
+ else:
+ s += '[' + self.visit(name) + ']'
s += ' = ' + self._visit_expr(n.expr)
return s
|
eliben/pycparser
|
168f54c3ae324c3827d22fb90e456653e6fe584a
|
diff --git a/tests/test_c_generator.py b/tests/test_c_generator.py
index 9385e80..4e38f28 100644
--- a/tests/test_c_generator.py
+++ b/tests/test_c_generator.py
@@ -228,6 +228,11 @@ class TestCtoC(unittest.TestCase):
}
''')
+ def test_issue246(self):
+ self._assert_ctoc_correct(r'''
+ int array[3] = {[0] = 0, [1] = 1, [1+1] = 2};
+ ''')
+
def test_exprlist_with_semi(self):
self._assert_ctoc_correct(r'''
void x() {
|
Constant expressions in designated initializers are not generated back to C
While pycparser correctly parses a constant-expression in a designated initializer (the AST is correct), it fails to write it back when generating C code.
Consider the following code:
```C
void myFunction(void)
{
int array[3] = {[0] = 0, [1] = 1, [1+1] = 2};
}
```
Parsing it, then using `CGenerator` to generate the source produces:
```C
void myFunction(void)
{
int array[3] = {[0] = 0, [1] = 1, = 2};
}
```
The C99 grammar describes the designator part of designated initializers as:
```ebnf
designator: [ constant-expression ]
. identifier
```
(See §6.7.8 in http://www.open-std.org/jtc1/sc22/WG14/www/docs/n1256.pdf)
The ```CGenerator.visit_NamedInitializer``` currently only considers the `ID` and `Constant` types.
The `Constant` branch should either be extended to other types or be an `else:` branch.
|
0.0
|
168f54c3ae324c3827d22fb90e456653e6fe584a
|
[
"tests/test_c_generator.py::TestCtoC::test_issue246"
] |
[
"tests/test_c_generator.py::TestFunctionDeclGeneration::test_partial_funcdecl_generation",
"tests/test_c_generator.py::TestCtoC::test_casts",
"tests/test_c_generator.py::TestCtoC::test_comma_op_assignment",
"tests/test_c_generator.py::TestCtoC::test_comma_op_in_ternary",
"tests/test_c_generator.py::TestCtoC::test_comma_operator_funcarg",
"tests/test_c_generator.py::TestCtoC::test_complex_decls",
"tests/test_c_generator.py::TestCtoC::test_compound_literal",
"tests/test_c_generator.py::TestCtoC::test_enum",
"tests/test_c_generator.py::TestCtoC::test_enum_typedef",
"tests/test_c_generator.py::TestCtoC::test_expr_list_in_initializer_list",
"tests/test_c_generator.py::TestCtoC::test_exprlist_with_semi",
"tests/test_c_generator.py::TestCtoC::test_exprlist_with_subexprlist",
"tests/test_c_generator.py::TestCtoC::test_exprs",
"tests/test_c_generator.py::TestCtoC::test_generate_struct_union_enum_exception",
"tests/test_c_generator.py::TestCtoC::test_initlist",
"tests/test_c_generator.py::TestCtoC::test_issue36",
"tests/test_c_generator.py::TestCtoC::test_issue37",
"tests/test_c_generator.py::TestCtoC::test_issue83",
"tests/test_c_generator.py::TestCtoC::test_issue84",
"tests/test_c_generator.py::TestCtoC::test_krstyle",
"tests/test_c_generator.py::TestCtoC::test_nest_initializer_list",
"tests/test_c_generator.py::TestCtoC::test_nest_named_initializer",
"tests/test_c_generator.py::TestCtoC::test_pragma",
"tests/test_c_generator.py::TestCtoC::test_statements",
"tests/test_c_generator.py::TestCtoC::test_struct_decl",
"tests/test_c_generator.py::TestCtoC::test_switchcase",
"tests/test_c_generator.py::TestCtoC::test_ternary",
"tests/test_c_generator.py::TestCtoC::test_trivial_decls"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-04-26 10:13:25+00:00
|
bsd-3-clause
| 2,101 |
|
eliben__pycparser-346
|
diff --git a/pycparser/ast_transforms.py b/pycparser/ast_transforms.py
index ba50966..0aeb88f 100644
--- a/pycparser/ast_transforms.py
+++ b/pycparser/ast_transforms.py
@@ -74,7 +74,8 @@ def fix_switch_cases(switch_node):
# Goes over the children of the Compound below the Switch, adding them
# either directly below new_compound or below the last Case as appropriate
- for child in switch_node.stmt.block_items:
+ # (for `switch(cond) {}`, block_items would have been None)
+ for child in (switch_node.stmt.block_items or []):
if isinstance(child, (c_ast.Case, c_ast.Default)):
# If it's a Case/Default:
# 1. Add it to the Compound and mark as "last case"
|
eliben/pycparser
|
bc2010aea92535cb1d70be9fc1bebeb6eff229d8
|
diff --git a/tests/test_c_parser.py b/tests/test_c_parser.py
index ad9a218..49cada3 100755
--- a/tests/test_c_parser.py
+++ b/tests/test_c_parser.py
@@ -1792,6 +1792,7 @@ class TestCParser_whole_code(TestCParser_base):
switch = ps1.ext[0].body.block_items[0]
block = switch.stmt.block_items
+ self.assertEqual(len(block), 4)
assert_case_node(block[0], '10')
self.assertEqual(len(block[0].stmts), 3)
assert_case_node(block[1], '20')
@@ -1819,6 +1820,7 @@ class TestCParser_whole_code(TestCParser_base):
switch = ps2.ext[0].body.block_items[0]
block = switch.stmt.block_items
+ self.assertEqual(len(block), 5)
assert_default_node(block[0])
self.assertEqual(len(block[0].stmts), 2)
assert_case_node(block[1], '10')
@@ -1830,6 +1832,18 @@ class TestCParser_whole_code(TestCParser_base):
assert_case_node(block[4], '40')
self.assertEqual(len(block[4].stmts), 1)
+ s3 = r'''
+ int foo(void) {
+ switch (myvar) {
+ }
+ return 0;
+ }
+ '''
+ ps3 = self.parse(s3)
+ switch = ps3.ext[0].body.block_items[0]
+
+ self.assertEqual(switch.stmt.block_items, [])
+
def test_for_statement(self):
s2 = r'''
void x(void)
|
"'NoneType' object is not iterable" seen for switch statement without cases
Seen calling parse_file on a file containing the below snippet (similar code was generated by a preprocessor). This compiles with `gcc`. I think checking if block_items is None may be sufficient.
```c
int main() {
int type = 2;
switch(type) {}
}
```
```
/usr/local/lib/python3.7/site-packages/pycparser/ply/yacc.py in parseopt_notrack(self, input, lexer, debug, tracking, tokenfunc)
1116 del symstack[-plen:]
1117 self.state = state
-> 1118 p.callable(pslice)
1119 del statestack[-plen:]
1120 symstack.append(sym)
/usr/local/lib/python3.7/site-packages/pycparser/c_parser.py in p_selection_statement_3(self, p)
1505 """ selection_statement : SWITCH LPAREN expression RPAREN pragmacomp_or_statement """
1506 p[0] = fix_switch_cases(
-> 1507 c_ast.Switch(p[3], p[5], self._token_coord(p, 1)))
1508
1509 def p_iteration_statement_1(self, p):
/usr/local/lib/python3.7/site-packages/pycparser/ast_transforms.py in fix_switch_cases(switch_node)
75 # Goes over the children of the Compound below the Switch, adding them
76 # either directly below new_compound or below the last Case as appropriate
---> 77 for child in switch_node.stmt.block_items:
78 if isinstance(child, (c_ast.Case, c_ast.Default)):
79 # If it's a Case/Default:
TypeError: 'NoneType' object is not iterable
```
|
0.0
|
bc2010aea92535cb1d70be9fc1bebeb6eff229d8
|
[
"tests/test_c_parser.py::TestCParser_whole_code::test_switch_statement"
] |
[
"tests/test_c_parser.py::TestCParser_fundamentals::test_FileAST",
"tests/test_c_parser.py::TestCParser_fundamentals::test_anonymous_struct_union",
"tests/test_c_parser.py::TestCParser_fundamentals::test_compound_literals",
"tests/test_c_parser.py::TestCParser_fundamentals::test_compound_statement",
"tests/test_c_parser.py::TestCParser_fundamentals::test_coords",
"tests/test_c_parser.py::TestCParser_fundamentals::test_decl_inits",
"tests/test_c_parser.py::TestCParser_fundamentals::test_decl_named_inits",
"tests/test_c_parser.py::TestCParser_fundamentals::test_duplicate_typedef",
"tests/test_c_parser.py::TestCParser_fundamentals::test_empty_toplevel_decl",
"tests/test_c_parser.py::TestCParser_fundamentals::test_enums",
"tests/test_c_parser.py::TestCParser_fundamentals::test_forloop_coord",
"tests/test_c_parser.py::TestCParser_fundamentals::test_func_decls_with_array_dim_qualifiers",
"tests/test_c_parser.py::TestCParser_fundamentals::test_function_definitions",
"tests/test_c_parser.py::TestCParser_fundamentals::test_initial_semi",
"tests/test_c_parser.py::TestCParser_fundamentals::test_inline_specifier",
"tests/test_c_parser.py::TestCParser_fundamentals::test_int128",
"tests/test_c_parser.py::TestCParser_fundamentals::test_invalid_multiple_types_error",
"tests/test_c_parser.py::TestCParser_fundamentals::test_multi_decls",
"tests/test_c_parser.py::TestCParser_fundamentals::test_nested_decls",
"tests/test_c_parser.py::TestCParser_fundamentals::test_offsetof",
"tests/test_c_parser.py::TestCParser_fundamentals::test_pragma",
"tests/test_c_parser.py::TestCParser_fundamentals::test_pragmacomp_or_statement",
"tests/test_c_parser.py::TestCParser_fundamentals::test_qualifiers_storage_specifiers",
"tests/test_c_parser.py::TestCParser_fundamentals::test_simple_decls",
"tests/test_c_parser.py::TestCParser_fundamentals::test_sizeof",
"tests/test_c_parser.py::TestCParser_fundamentals::test_struct_bitfields",
"tests/test_c_parser.py::TestCParser_fundamentals::test_struct_empty",
"tests/test_c_parser.py::TestCParser_fundamentals::test_struct_members_namespace",
"tests/test_c_parser.py::TestCParser_fundamentals::test_struct_union",
"tests/test_c_parser.py::TestCParser_fundamentals::test_struct_with_extra_semis_inside",
"tests/test_c_parser.py::TestCParser_fundamentals::test_struct_with_initial_semi",
"tests/test_c_parser.py::TestCParser_fundamentals::test_tags_namespace",
"tests/test_c_parser.py::TestCParser_fundamentals::test_typedef",
"tests/test_c_parser.py::TestCParser_fundamentals::test_unified_string_literals",
"tests/test_c_parser.py::TestCParser_fundamentals::test_unified_wstring_literals",
"tests/test_c_parser.py::TestCParser_fundamentals::test_vla",
"tests/test_c_parser.py::TestCParser_whole_code::test_empty_statements",
"tests/test_c_parser.py::TestCParser_whole_code::test_expressions",
"tests/test_c_parser.py::TestCParser_whole_code::test_for_statement",
"tests/test_c_parser.py::TestCParser_whole_code::test_statements",
"tests/test_c_parser.py::TestCParser_whole_code::test_whole_file",
"tests/test_c_parser.py::TestCParser_whole_code::test_whole_file_with_stdio",
"tests/test_c_parser.py::TestCParser_typenames::test_ambiguous_parameters",
"tests/test_c_parser.py::TestCParser_typenames::test_innerscope_reuse_typedef_name",
"tests/test_c_parser.py::TestCParser_typenames::test_innerscope_typedef",
"tests/test_c_parser.py::TestCParser_typenames::test_nested_function_decls",
"tests/test_c_parser.py::TestCParser_typenames::test_parameter_reuse_typedef_name",
"tests/test_c_parser.py::TestCParser_typenames::test_samescope_reuse_name"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-08-21 00:12:44+00:00
|
bsd-3-clause
| 2,102 |
|
eliben__pycparser-364
|
diff --git a/README.rst b/README.rst
index df9025c..682abf7 100644
--- a/README.rst
+++ b/README.rst
@@ -161,6 +161,9 @@ See `this blog post
<https://eli.thegreenplace.net/2015/on-parsing-c-type-declarations-and-fake-headers>`_
for more details.
+Note that the fake headers are not included in the ``pip`` package nor installed
+via ``setup.py`` (`#224 <https://github.com/eliben/pycparser/issues/224>`_).
+
Basic usage
-----------
diff --git a/pycparser/c_parser.py b/pycparser/c_parser.py
index 4cf96fa..744ede8 100644
--- a/pycparser/c_parser.py
+++ b/pycparser/c_parser.py
@@ -1740,8 +1740,7 @@ class CParser(PLYParser):
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
- field = c_ast.ID(p[3], self._token_coord(p, 3))
- p[0] = c_ast.StructRef(p[1], p[2], field, p[1].coord)
+ p[0] = c_ast.StructRef(p[1], p[2], p[3], p[1].coord)
elif len(p) == 5:
p[0] = c_ast.ArrayRef(p[1], p[3], p[1].coord)
else:
|
eliben/pycparser
|
1166ea11785ce12cdfd5e8bf8b3a69b5e6b76f9c
|
diff --git a/tests/test_c_parser.py b/tests/test_c_parser.py
index 49cada3..b6ecdd5 100755
--- a/tests/test_c_parser.py
+++ b/tests/test_c_parser.py
@@ -529,6 +529,18 @@ class TestCParser_fundamentals(TestCParser_base):
['IdentifierType', ['int']]]]]])
def test_offsetof(self):
+ def expand_ref(n):
+ if isinstance(n, StructRef):
+ return ['StructRef', expand_ref(n.name), expand_ref(n.field)]
+ elif isinstance(n, ArrayRef):
+ return ['ArrayRef', expand_ref(n.name), expand_ref(n.subscript)]
+ elif isinstance(n, ID):
+ return ['ID', n.name]
+ elif isinstance(n, Constant):
+ return ['Constant', n.type, n.value]
+ else:
+ raise TypeError("Unexpected type " + n.__class__.__name__)
+
e = """
void foo() {
int a = offsetof(struct S, p);
@@ -546,8 +558,20 @@ class TestCParser_fundamentals(TestCParser_base):
self.assertIsInstance(s1.args.exprs[1], ID)
s3 = compound.block_items[2].init
self.assertIsInstance(s3.args.exprs[1], StructRef)
+ self.assertEqual(expand_ref(s3.args.exprs[1]),
+ ['StructRef',
+ ['StructRef', ['ID', 'p'], ['ID', 'q']],
+ ['ID', 'r']])
s4 = compound.block_items[3].init
self.assertIsInstance(s4.args.exprs[1], ArrayRef)
+ self.assertEqual(expand_ref(s4.args.exprs[1]),
+ ['ArrayRef',
+ ['ArrayRef',
+ ['StructRef',
+ ['ArrayRef', ['ID', 'p'], ['Constant', 'int', '5']],
+ ['ID', 'q']],
+ ['Constant', 'int', '4']],
+ ['Constant', 'int', '5']])
def test_compound_statement(self):
e = """
|
Incorrect AST structure when parsing `offsetof` expressions.
I am using the latest commit on `master` branch (`1166ea1`).
Take the following code as example:
```c
int x = offsetof(struct A, a.b);
```
The generated parse tree is:
```python
FileAST(ext=[Decl(name='x',
quals=[],
storage=[],
funcspec=[],
type=TypeDecl(declname='x',
quals=[],
type=IdentifierType(names=['int'])),
init=FuncCall(name=ID(name='offsetof'),
args=ExprList(exprs=[Typename(name=None,
quals=[],
type=TypeDecl(declname=None,
quals=[],
type=Struct(name='A',
decls=None))),
StructRef(name=ID(name='a'),
type='.',
field=ID(name=ID(name='b')))])),
bitsize=None)])
```
where the `field` attribute of `StructRef` has a value of `ID(name=ID(name='b'))` instead of just `ID(name='b')`.
I think the issue is with the production rules of `offsetof_member_designator` that was introduced in #145, namely:
https://github.com/eliben/pycparser/blob/1166ea11785ce12cdfd5e8bf8b3a69b5e6b76f9c/pycparser/c_parser.py#L1735-L1748
In the `len(p) == 4` case, `StructRef` should be created using `p[3]` directly, instead of constructing an `ID` first.
If you agree with my analysis, I'd be happy to create a patch.
|
0.0
|
1166ea11785ce12cdfd5e8bf8b3a69b5e6b76f9c
|
[
"tests/test_c_parser.py::TestCParser_fundamentals::test_offsetof"
] |
[
"tests/test_c_parser.py::TestCParser_fundamentals::test_FileAST",
"tests/test_c_parser.py::TestCParser_fundamentals::test_anonymous_struct_union",
"tests/test_c_parser.py::TestCParser_fundamentals::test_compound_literals",
"tests/test_c_parser.py::TestCParser_fundamentals::test_compound_statement",
"tests/test_c_parser.py::TestCParser_fundamentals::test_coords",
"tests/test_c_parser.py::TestCParser_fundamentals::test_decl_inits",
"tests/test_c_parser.py::TestCParser_fundamentals::test_decl_named_inits",
"tests/test_c_parser.py::TestCParser_fundamentals::test_duplicate_typedef",
"tests/test_c_parser.py::TestCParser_fundamentals::test_empty_toplevel_decl",
"tests/test_c_parser.py::TestCParser_fundamentals::test_enums",
"tests/test_c_parser.py::TestCParser_fundamentals::test_forloop_coord",
"tests/test_c_parser.py::TestCParser_fundamentals::test_func_decls_with_array_dim_qualifiers",
"tests/test_c_parser.py::TestCParser_fundamentals::test_function_definitions",
"tests/test_c_parser.py::TestCParser_fundamentals::test_initial_semi",
"tests/test_c_parser.py::TestCParser_fundamentals::test_inline_specifier",
"tests/test_c_parser.py::TestCParser_fundamentals::test_int128",
"tests/test_c_parser.py::TestCParser_fundamentals::test_invalid_multiple_types_error",
"tests/test_c_parser.py::TestCParser_fundamentals::test_multi_decls",
"tests/test_c_parser.py::TestCParser_fundamentals::test_nested_decls",
"tests/test_c_parser.py::TestCParser_fundamentals::test_pragma",
"tests/test_c_parser.py::TestCParser_fundamentals::test_pragmacomp_or_statement",
"tests/test_c_parser.py::TestCParser_fundamentals::test_qualifiers_storage_specifiers",
"tests/test_c_parser.py::TestCParser_fundamentals::test_simple_decls",
"tests/test_c_parser.py::TestCParser_fundamentals::test_sizeof",
"tests/test_c_parser.py::TestCParser_fundamentals::test_struct_bitfields",
"tests/test_c_parser.py::TestCParser_fundamentals::test_struct_empty",
"tests/test_c_parser.py::TestCParser_fundamentals::test_struct_members_namespace",
"tests/test_c_parser.py::TestCParser_fundamentals::test_struct_union",
"tests/test_c_parser.py::TestCParser_fundamentals::test_struct_with_extra_semis_inside",
"tests/test_c_parser.py::TestCParser_fundamentals::test_struct_with_initial_semi",
"tests/test_c_parser.py::TestCParser_fundamentals::test_tags_namespace",
"tests/test_c_parser.py::TestCParser_fundamentals::test_typedef",
"tests/test_c_parser.py::TestCParser_fundamentals::test_unified_string_literals",
"tests/test_c_parser.py::TestCParser_fundamentals::test_unified_wstring_literals",
"tests/test_c_parser.py::TestCParser_fundamentals::test_vla",
"tests/test_c_parser.py::TestCParser_whole_code::test_empty_statements",
"tests/test_c_parser.py::TestCParser_whole_code::test_expressions",
"tests/test_c_parser.py::TestCParser_whole_code::test_for_statement",
"tests/test_c_parser.py::TestCParser_whole_code::test_statements",
"tests/test_c_parser.py::TestCParser_whole_code::test_switch_statement",
"tests/test_c_parser.py::TestCParser_whole_code::test_whole_file",
"tests/test_c_parser.py::TestCParser_whole_code::test_whole_file_with_stdio",
"tests/test_c_parser.py::TestCParser_typenames::test_ambiguous_parameters",
"tests/test_c_parser.py::TestCParser_typenames::test_innerscope_reuse_typedef_name",
"tests/test_c_parser.py::TestCParser_typenames::test_innerscope_typedef",
"tests/test_c_parser.py::TestCParser_typenames::test_nested_function_decls",
"tests/test_c_parser.py::TestCParser_typenames::test_parameter_reuse_typedef_name",
"tests/test_c_parser.py::TestCParser_typenames::test_samescope_reuse_name"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-02-25 16:22:57+00:00
|
bsd-3-clause
| 2,103 |
|
eliben__pycparser-394
|
diff --git a/pycparser/c_generator.py b/pycparser/c_generator.py
index 53c26fd..c494176 100644
--- a/pycparser/c_generator.py
+++ b/pycparser/c_generator.py
@@ -14,11 +14,17 @@ class CGenerator(object):
return a value from each visit method, using string accumulation in
generic_visit.
"""
- def __init__(self):
+ def __init__(self, reduce_parentheses=False):
+ """ Constructs C-code generator
+
+ reduce_parentheses:
+ if True, eliminates needless parentheses on binary operators
+ """
# Statements start with indentation of self.indent_level spaces, using
# the _make_indent method
#
self.indent_level = 0
+ self.reduce_parentheses = reduce_parentheses
def _make_indent(self):
return ' ' * self.indent_level
@@ -72,11 +78,49 @@ class CGenerator(object):
else:
return '%s%s' % (n.op, operand)
+ # Precedence map of binary operators:
+ precedence_map = {
+ # Some what of a duplicate of c_paarser.CParser.precedence
+ # Higher numbers are stronger binding
+ '||': 0, # weakest binding
+ '&&': 1,
+ '|': 2,
+ '^': 3,
+ '&': 4,
+ '==': 5, '!=': 5,
+ '>': 6, '>=': 6, '<': 6, '<=': 6,
+ '>>': 7, '<<': 7,
+ '+': 8, '-': 8,
+ '*': 9, '/': 9, '%': 9 # strongest binding
+ }
+
def visit_BinaryOp(self, n):
- lval_str = self._parenthesize_if(n.left,
- lambda d: not self._is_simple_node(d))
- rval_str = self._parenthesize_if(n.right,
- lambda d: not self._is_simple_node(d))
+ # Note: all binary operators are left-to-right associative
+ #
+ # If `n.left.op` has a stronger or equally binding precedence in
+ # comparison to `n.op`, no parenthesis are needed for the left:
+ # e.g., `(a*b) + c` is equivelent to `a*b + c`, as well as
+ # `(a+b) - c` is equivelent to `a+b - c` (same precedence).
+ # If the left operator is weaker binding than the current, then
+ # parentheses are necessary:
+ # e.g., `(a+b) * c` is NOT equivelent to `a+b * c`.
+ lval_str = self._parenthesize_if(
+ n.left,
+ lambda d: not (self._is_simple_node(d) or
+ self.reduce_parentheses and isinstance(d, c_ast.BinaryOp) and
+ self.precedence_map[d.op] >= self.precedence_map[n.op]))
+ # If `n.right.op` has a stronger -but not equal- binding precedence,
+ # parenthesis can be omitted on the right:
+ # e.g., `a + (b*c)` is equivelent to `a + b*c`.
+ # If the right operator is weaker or equally binding, then parentheses
+ # are necessary:
+ # e.g., `a * (b+c)` is NOT equivelent to `a * b+c` and
+ # `a - (b+c)` is NOT equivelent to `a - b+c` (same precedence).
+ rval_str = self._parenthesize_if(
+ n.right,
+ lambda d: not (self._is_simple_node(d) or
+ self.reduce_parentheses and isinstance(d, c_ast.BinaryOp) and
+ self.precedence_map[d.op] > self.precedence_map[n.op]))
return '%s %s %s' % (lval_str, n.op, rval_str)
def visit_Assignment(self, n):
diff --git a/pycparser/c_parser.py b/pycparser/c_parser.py
index c2d82f7..0536e58 100644
--- a/pycparser/c_parser.py
+++ b/pycparser/c_parser.py
@@ -491,6 +491,7 @@ class CParser(PLYParser):
##
## Precedence and associativity of operators
##
+ # If this changes, c_generator.CGenerator.precedence_map needs to change as well
precedence = (
('left', 'LOR'),
('left', 'LAND'),
|
eliben/pycparser
|
ba80794f1460a87eed2f8f918e47868878f3a5ad
|
diff --git a/tests/test_c_generator.py b/tests/test_c_generator.py
index 159c763..7937525 100644
--- a/tests/test_c_generator.py
+++ b/tests/test_c_generator.py
@@ -61,19 +61,22 @@ class TestFunctionDeclGeneration(unittest.TestCase):
class TestCtoC(unittest.TestCase):
- def _run_c_to_c(self, src):
+ def _run_c_to_c(self, src, *args, **kwargs):
ast = parse_to_ast(src)
- generator = c_generator.CGenerator()
+ generator = c_generator.CGenerator(*args, **kwargs)
return generator.visit(ast)
- def _assert_ctoc_correct(self, src):
+ def _assert_ctoc_correct(self, src, *args, **kwargs):
""" Checks that the c2c translation was correct by parsing the code
generated by c2c for src and comparing the AST with the original
AST.
+
+ Additional arguments are passed to CGenerator.__init__.
"""
- src2 = self._run_c_to_c(src)
+ src2 = self._run_c_to_c(src, *args, **kwargs)
self.assertTrue(compare_asts(parse_to_ast(src), parse_to_ast(src2)),
- src2)
+ "{!r} != {!r}".format(src, src2))
+ return src2
def test_trivial_decls(self):
self._assert_ctoc_correct('int a;')
@@ -361,6 +364,26 @@ class TestCtoC(unittest.TestCase):
src = 'int x = ' + src + ';'
self._assert_ctoc_correct(src)
+ def test_flattened_binaryops(self):
+ # codes with minimum number of (necessary) parenthesis:
+ test_snippets = [
+ 'int x = a*b*c*d;',
+ 'int x = a+b*c*d;',
+ 'int x = a*b+c*d;',
+ 'int x = a*b*c+d;',
+ 'int x = (a+b)*c*d;',
+ 'int x = (a+b)*(c+d);',
+ 'int x = (a+b)/(c-d);',
+ 'int x = a+b-c-d;',
+ 'int x = a+(b-c)-d;'
+ ]
+ for src in test_snippets:
+ src2 = self._assert_ctoc_correct(src, reduce_parentheses=True)
+ self.assertTrue(
+ src2.count('(') == src.count('('),
+ msg="{!r} did not have minimum number of parenthesis, should be like {!r}.".format(
+ src2, src))
+
class TestCasttoC(unittest.TestCase):
def _find_file(self, name):
|
Precedence-aware CGenerator
I ran into the issue that in a code more than 256 variables are summed up. The `CGenerator` turned this into >256 nested BinaryOperators with that many parenthesis, that in turn was something clang did not like (be default).
I wrote a small generator which takes precedence into account for binary operators:
```python
class LessParanthesizingCGenerator(CGenerator):
def get_BinaryOp_precedence(self, n):
"""
Gives precedence for op of n, otherwise -1.
Lower number have precedence over higher numbers.
"""
binary_op_precedence = {
# based on https://en.cppreference.com/w/c/language/operator_precedence
'*': 3, '%': 3,
'-': 4, '+': 4,
'<<': 5, '>>': 5,
'<': 6, '<=': 6, '>': 6, '>=': 6,
'==': 7, '!=': 7,
'&': 8,
'^': 9,
'|': 10,
'&&': 11,
'||': 12
}
if not isinstance(n, c_ast.BinaryOp):
return -1
else:
return binary_op_precedence[n.op]
def visit_BinaryOp(self, n):
p = self.get_BinaryOp_precedence(n)
lval_str = self._parenthesize_if(n.left,
lambda d: not self._is_simple_node(d) and
self.get_BinaryOp_precedence(d) > p)
rval_str = self._parenthesize_if(n.right,
lambda d: not self._is_simple_node(d) and
self.get_BinaryOp_precedence(d) >= p)
return '%s %s %s' % (lval_str, n.op, rval_str)
```
Would you like me to make a pull request and if so, is this implementation sufficient for now? Tests I would of cause add.
|
0.0
|
ba80794f1460a87eed2f8f918e47868878f3a5ad
|
[
"tests/test_c_generator.py::TestCtoC::test_flattened_binaryops"
] |
[
"tests/test_c_generator.py::TestFunctionDeclGeneration::test_partial_funcdecl_generation",
"tests/test_c_generator.py::TestCtoC::test_array_decl",
"tests/test_c_generator.py::TestCtoC::test_casts",
"tests/test_c_generator.py::TestCtoC::test_comma_op_assignment",
"tests/test_c_generator.py::TestCtoC::test_comma_op_in_ternary",
"tests/test_c_generator.py::TestCtoC::test_comma_operator_funcarg",
"tests/test_c_generator.py::TestCtoC::test_complex_decls",
"tests/test_c_generator.py::TestCtoC::test_compound_literal",
"tests/test_c_generator.py::TestCtoC::test_enum",
"tests/test_c_generator.py::TestCtoC::test_enum_typedef",
"tests/test_c_generator.py::TestCtoC::test_expr_list_in_initializer_list",
"tests/test_c_generator.py::TestCtoC::test_exprlist_with_semi",
"tests/test_c_generator.py::TestCtoC::test_exprlist_with_subexprlist",
"tests/test_c_generator.py::TestCtoC::test_exprs",
"tests/test_c_generator.py::TestCtoC::test_generate_struct_union_enum_exception",
"tests/test_c_generator.py::TestCtoC::test_initlist",
"tests/test_c_generator.py::TestCtoC::test_issue246",
"tests/test_c_generator.py::TestCtoC::test_issue36",
"tests/test_c_generator.py::TestCtoC::test_issue37",
"tests/test_c_generator.py::TestCtoC::test_issue66",
"tests/test_c_generator.py::TestCtoC::test_issue83",
"tests/test_c_generator.py::TestCtoC::test_issue84",
"tests/test_c_generator.py::TestCtoC::test_krstyle",
"tests/test_c_generator.py::TestCtoC::test_nest_initializer_list",
"tests/test_c_generator.py::TestCtoC::test_nest_named_initializer",
"tests/test_c_generator.py::TestCtoC::test_nested_sizeof",
"tests/test_c_generator.py::TestCtoC::test_pragma",
"tests/test_c_generator.py::TestCtoC::test_ptr_decl",
"tests/test_c_generator.py::TestCtoC::test_statements",
"tests/test_c_generator.py::TestCtoC::test_struct_decl",
"tests/test_c_generator.py::TestCtoC::test_switchcase",
"tests/test_c_generator.py::TestCtoC::test_ternary",
"tests/test_c_generator.py::TestCtoC::test_trivial_decls",
"tests/test_c_generator.py::TestCasttoC::test_to_type",
"tests/test_c_generator.py::TestCasttoC::test_to_type_with_cpp"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-01 17:17:55+00:00
|
bsd-3-clause
| 2,104 |
|
emissions-api__sentinel5dl-29
|
diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000..2301243
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,3 @@
+[report]
+omit =
+ */tests/*
diff --git a/.travis.yml b/.travis.yml
index a4c4ae5..8511f0b 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,8 +4,9 @@ dist: bionic
# https://devguide.python.org/#branchstatus
python:
- - "3.6"
- - "3.7"
+ - 3.6
+ - 3.7
+ - 3.8
addons:
apt:
@@ -13,18 +14,18 @@ addons:
- libgnutls28-dev
install:
- - pip install flake8 coverage
+ - pip install flake8 coverage coveralls
- python setup.py install
script:
- flake8 sentinel5dl
- coverage run --source=sentinel5dl -m tests
- - coverage report
after_success:
- pip install sphinx sphinx-rtd-theme
- make -C docs clean html
- touch docs/build/html/.nojekyll # create this file to prevent Github's Jekyll processing
+ - coveralls
deploy:
provider: pages
diff --git a/README.rst b/README.rst
index a5aed3e..47748ce 100644
--- a/README.rst
+++ b/README.rst
@@ -3,6 +3,10 @@ Sentinel-5P Downloader
.. image:: https://travis-ci.com/emissions-api/sentinel5dl.svg?branch=master
:target: https://travis-ci.com/emissions-api/sentinel5dl
+ :alt: CI Builds
+.. image:: https://coveralls.io/repos/github/emissions-api/sentinel5dl/badge.svg?branch=master
+ :target: https://coveralls.io/github/emissions-api/sentinel5dl?branch=master
+ :alt: Test Coverage
.. image:: https://img.shields.io/github/issues-raw/emissions-api/sentinel5dl?color=blue
:target: https://github.com/emissions-api/sentinel5dl/issues
:alt: GitHub issues
diff --git a/docs/Makefile b/docs/Makefile
index d0c3cbf..a4de0bf 100644
--- a/docs/Makefile
+++ b/docs/Makefile
@@ -3,7 +3,7 @@
# You can set these variables from the command line, and also
# from the environment for the first two.
-SPHINXOPTS ?=
+SPHINXOPTS ?= -W
SPHINXBUILD ?= sphinx-build
SOURCEDIR = source
BUILDDIR = build
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 7d41c3d..5ead6ed 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -1,4 +1,4 @@
-.. include:: ../../readme.rst
+.. include:: ../../README.rst
.. toctree::
diff --git a/sentinel5dl/__init__.py b/sentinel5dl/__init__.py
index 278956d..0be61eb 100644
--- a/sentinel5dl/__init__.py
+++ b/sentinel5dl/__init__.py
@@ -38,6 +38,28 @@ def __md5(filename):
return hash_md5.hexdigest().upper()
+def __check_md5(filename, base_path):
+ '''Check the md5 sum of a given file against the ESA API.
+
+ :param filename: Path of local file to check
+ :param base_path: Base API path to for this product
+ :returns: If the local file matches the md5 checksum
+ :rtype: bool
+ '''
+ md5file = f'{filename}.md5sum'
+ try:
+ with open(md5file, 'r') as f:
+ md5sum = f.read()
+ except FileNotFoundError:
+ md5sum = __http_request(f'{base_path}/Checksum/Value/$value')
+ md5sum = md5sum.decode('ascii')
+ with open(md5file, 'w') as f:
+ f.write(md5sum)
+
+ # Compare md5 sum
+ return __md5(filename) == md5sum
+
+
def __http_request(path, filename=None):
'''Make an HTTP request to the API via HTTP, optionally downloading the
response.
@@ -140,20 +162,15 @@ def download(products, output_dir='.'):
uuid = product['uuid']
filename = os.path.join(output_dir, product['identifier'] + '.nc')
logger.info(f'Downloading {uuid} to {filename}')
- path = f'/odata/v1/Products(\'{uuid}\')/$value'
+ base_path = f"/odata/v1/Products('{uuid}')"
# Check if file exist
if os.path.exists(filename):
- # Get md5 sum
- md5um_path = f"/odata/v1/Products('{uuid}')/Checksum/Value/$value"
- md5sum = __http_request(md5um_path)
- md5sum = md5sum.decode()
-
- # Compare md5 sum
- if __md5(filename) == md5sum:
+ # Skip download if checksum matches
+ if __check_md5(filename, base_path):
logger.info(f'Skipping {filename} since it already exist.')
continue
logger.info(f'Overriding {filename} since md5 hash differs.')
# Download file
- __http_request(path, filename)
+ __http_request(f'{base_path}/$value', filename)
diff --git a/setup.py b/setup.py
index 9bfe958..1e7a6c5 100644
--- a/setup.py
+++ b/setup.py
@@ -11,7 +11,7 @@ def read(filename):
setup(
name='sentinel5dl',
- version='0.4',
+ version='0.5',
description='Sentinel-5p Downloader',
author='Emissions API Developers',
license='MIT',
@@ -24,6 +24,7 @@ setup(
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
+ 'Programming Language :: Python :: 3.8',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering :: GIS',
],
|
emissions-api/sentinel5dl
|
03f4148332a9b7cba0b8ea420aa0fb885d234694
|
diff --git a/tests/__main__.py b/tests/__main__.py
index a3472df..ad1d698 100644
--- a/tests/__main__.py
+++ b/tests/__main__.py
@@ -12,18 +12,22 @@ class TestSentinel5dl(unittest.TestCase):
def _mock_http_request(self, path, filename=None):
'''Mock HTTP requests to the ESA API
'''
+ # download
if filename is not None:
self._count_download += 1
with open(filename, 'wb') as f:
f.write(b'123')
return
- # no nownload
- self._count_request += 1
+ # search request
if path.startswith('/api/stub/products?'):
+ self._count_search_request += 1
with open(os.path.join(testpath, 'products.json'), 'rb') as f:
return f.read()
+
+ # checksum request
if path.endswith('/Checksum/Value/$value'):
+ self._count_checksum_request += 1
# MD5 checksum for string `123`
return b'202CB962AC59075B964B07152D234B70'
@@ -32,7 +36,8 @@ class TestSentinel5dl(unittest.TestCase):
make any HTTP requests and reset the request counters.
'''
setattr(sentinel5dl, '__http_request', self._mock_http_request)
- self._count_request = 0
+ self._count_search_request = 0
+ self._count_checksum_request = 0
self._count_download = 0
def test(self):
@@ -46,7 +51,7 @@ class TestSentinel5dl(unittest.TestCase):
# The result returned by the mock contains four products but claims a
# total of eight products, making sentinel5dl request resources twice.
- self.assertEqual(self._count_request, 2)
+ self.assertEqual(self._count_search_request, 2)
self.assertEqual(result['totalresults'], 8)
self.assertEqual(result['totalresults'], len(result['products']))
@@ -66,10 +71,12 @@ class TestSentinel5dl(unittest.TestCase):
with open(filename, 'rb') as f:
self.assertEqual(f.read(), b'123')
- # We should have made an additional five requests for checksums:
- # - one for the file we created manually
- # - four for the duplicated entries in the loaded test data
- self.assertEqual(self._count_request, 7)
+ # We should have downloaded four files and have an additional four
+ # files storing md5 checksums
+ self.assertEqual(len(os.listdir(tmpdir)), 8)
+
+ # We should have four checksum requests. One for each file
+ self.assertEqual(self._count_checksum_request, 4)
# We should have downloaded four unique files
self.assertEqual(self._count_download, 4)
|
Store also hash files
We should store the hash files after we have downloaded them. So we can check if the hash file matches the downloaded .nc file without any network connection. This is pretty handy, since the ESA website is painfully slow even for the hash files.
|
0.0
|
03f4148332a9b7cba0b8ea420aa0fb885d234694
|
[
"tests/__main__.py::TestSentinel5dl::test"
] |
[] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-10-27 11:00:02+00:00
|
mit
| 2,105 |
|
emsig__emg3d-325
|
diff --git a/emg3d/electrodes.py b/emg3d/electrodes.py
index 8a31385..e31ec0f 100644
--- a/emg3d/electrodes.py
+++ b/emg3d/electrodes.py
@@ -323,7 +323,7 @@ class Dipole(Wire):
if is_flat:
# Re-arrange for points.
- points = np.array([coordinates[::2], coordinates[1::2]])
+ points = coordinates.reshape((2, 3), order='F')
# Store original input.
self._coordinates = coordinates
@@ -375,10 +375,20 @@ class Dipole(Wire):
# Finite dipole.
else:
- s1 = (f" e1={{{self.points[0, 0]:,.1f}; "
- f"{self.points[0, 1]:,.1f}; {self.points[0, 2]:,.1f}}} m; ")
- s2 = (f"e2={{{self.points[1, 0]:,.1f}; "
- f"{self.points[1, 1]:,.1f}; {self.points[1, 2]:,.1f}}} m")
+
+ # Finite magnetic dipole.
+ if self._xtype == 'magnetic':
+ if self.coordinates.ndim == 1:
+ points = self.coordinates
+ else:
+ points = self.coordinates.ravel('F')
+ else:
+ points = self.points.ravel('F')
+
+ s1 = (f" e1={{{points[0]:,.1f}; "
+ f"{points[2]:,.1f}; {points[4]:,.1f}}} m; ")
+ s2 = (f"e2={{{points[1]:,.1f}; "
+ f"{points[3]:,.1f}; {points[5]:,.1f}}} m")
return s0 + s1 + s2 if len(s1+s2) < 80 else s0 + s1 + "\n " + s2
|
emsig/emg3d
|
8750f85bfcebaf67ed2e6beff941b960a048a1b3
|
diff --git a/tests/test_electrodes.py b/tests/test_electrodes.py
index baa98ba..d0a5e06 100644
--- a/tests/test_electrodes.py
+++ b/tests/test_electrodes.py
@@ -273,6 +273,8 @@ def test_tx_magnetic_dipole():
s5a = electrodes.TxMagneticDipole(
(-1, 1, -1, 1, -np.sqrt(2), np.sqrt(2)), strength=np.pi)
s5b = electrodes.TxMagneticDipole.from_dict(s5a.to_dict())
+ rep = s5b.__repr__()
+ assert "m; e2={1.0; 1.0; " in rep
assert s5a == s5b
s6a = electrodes.TxMagneticDipole(
[[-1, -1, -np.sqrt(2)], [1, 1, np.sqrt(2)]], strength=np.pi)
@@ -287,7 +289,7 @@ def test_tx_magnetic_dipole():
rep = s6b.__repr__()
assert "3.1 A" in rep
- assert "m; e2={-0.7" in rep
+ assert "m; e2={1.0; 1.0; " in rep
def test_tx_electric_wire():
|
`emg3d.TxMagneticDipole` shows wrong coordinates.
```
> src = emg3d.TxMagneticDipole(coordinates=[0.0, 0.0, 0.0, 0.0, -0.5, 0.5])
> src
TxMagneticDipole: 1.0 A;
e1={0.0; 0.7; 0.0} m; e2={-0.7; 0.0; 0.0} m
```
which are just the first two points of it,
```
> src.points
array([[ 0. , 0.70710678, 0. ],
[-0.70710678, 0. , 0. ],
[ 0. , -0.70710678, 0. ],
[ 0.70710678, 0. , 0. ],
[ 0. , 0.70710678, 0. ]])
```
|
0.0
|
8750f85bfcebaf67ed2e6beff941b960a048a1b3
|
[
"tests/test_electrodes.py::test_tx_magnetic_dipole"
] |
[
"tests/test_electrodes.py::TestWire::test_basics",
"tests/test_electrodes.py::TestWire::test_basic_repr",
"tests/test_electrodes.py::TestWire::test_warnings",
"tests/test_electrodes.py::test_point",
"tests/test_electrodes.py::TestDipole::test_point",
"tests/test_electrodes.py::TestDipole::test_flat",
"tests/test_electrodes.py::TestDipole::test_dipole",
"tests/test_electrodes.py::TestDipole::test_to_from_dict",
"tests/test_electrodes.py::TestDipole::test_basic_repr",
"tests/test_electrodes.py::TestDipole::test_warnings",
"tests/test_electrodes.py::test_source",
"tests/test_electrodes.py::test_tx_electric_point",
"tests/test_electrodes.py::test_tx_electric_dipole",
"tests/test_electrodes.py::test_tx_magnetic_point",
"tests/test_electrodes.py::test_tx_electric_wire",
"tests/test_electrodes.py::test_receiver",
"tests/test_electrodes.py::test_rx_electric_point",
"tests/test_electrodes.py::test_rx_magnetic_point",
"tests/test_electrodes.py::test_point_to_dipole",
"tests/test_electrodes.py::TestDipoleToPoint::test_axes_faces_quadrants",
"tests/test_electrodes.py::TestDipoleToPoint::test_arbitrary",
"tests/test_electrodes.py::test_point_to_square_loop",
"tests/test_electrodes.py::test_rotation",
"tests/test_electrodes.py::test_all_dir"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2024-03-05 15:10:11+00:00
|
apache-2.0
| 2,106 |
|
en0__dnry-config-4
|
diff --git a/dnry/config/yaml/source.py b/dnry/config/yaml/source.py
index 8742b25..23d9e8d 100644
--- a/dnry/config/yaml/source.py
+++ b/dnry/config/yaml/source.py
@@ -17,6 +17,9 @@ class YamlSource(IConfigSource):
paths = ",".join(self.__paths)
raise RuntimeError(f"Configuration Error: None of these paths could be found: {paths}")
+ elif path is None and not self.__required:
+ return dict()
+
with open(path, 'r') as f:
return yaml.load(f, Loader=self.__loader) or dict()
|
en0/dnry-config
|
296dfdc2eee8f82f0926d102f42db2f5fd8f2efb
|
diff --git a/test/test_bugs.py b/test/test_bugs.py
index e34fa06..e83dc27 100644
--- a/test/test_bugs.py
+++ b/test/test_bugs.py
@@ -9,7 +9,7 @@ from dnry.config.yaml import YamlSource
class TestBugs(unittest.TestCase):
def test_empty_yaml_file(self):
- temp_file = f"./${uuid4()}.yaml"
+ temp_file = f"./{uuid4()}.yaml"
with open(temp_file, 'w') as fd:
fd.write('\n')
try:
@@ -21,3 +21,10 @@ class TestBugs(unittest.TestCase):
finally:
os.remove(temp_file)
+
+ def test_optional_flag(self):
+ fact = ConfigFactory()
+ fact.add_source(YamlSource(f"./{uuid4()}", required=False))
+ conf = fact.build()
+ none_val = conf.get("no:key")
+ self.assertIsNone(none_val)
|
Optional flag for yaml files is not obeyed
When trying to add an optional YamlSource where the given yaml file does not exist, the following error is raised:
```expected str, bytes or os.PathLike object, not NoneType```
It appears the YamlSource load method is checking if the source is required or not to raise a runtime error. It correctly skips raising the error but still tries to read the file using open(). This results in the TypeError.
Line 14 of `dnry/config/yaml/source.py`
```python
def load(self, fact: IConfigFactory, conf: IConfigSection) -> dict:
path = self.__get_first_existing_path()
if path is None and self.__required:
paths = ",".join(self.__paths)
raise RuntimeError(f"Configuration Error: None of these paths could be found: {paths}")
with open(path, 'r') as f:
return yaml.load(f, Loader=self.__loader```
|
0.0
|
296dfdc2eee8f82f0926d102f42db2f5fd8f2efb
|
[
"test/test_bugs.py::TestBugs::test_optional_flag"
] |
[
"test/test_bugs.py::TestBugs::test_empty_yaml_file"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-11-23 17:43:55+00:00
|
mit
| 2,107 |
|
encode__httpcore-631
|
diff --git a/httpcore/_async/connection_pool.py b/httpcore/_async/connection_pool.py
index 5541689..e6c4ef6 100644
--- a/httpcore/_async/connection_pool.py
+++ b/httpcore/_async/connection_pool.py
@@ -301,19 +301,11 @@ class AsyncConnectionPool(AsyncRequestInterface):
Close any connections in the pool.
"""
async with self._pool_lock:
- requests_still_in_flight = len(self._requests)
-
for connection in self._pool:
await connection.aclose()
self._pool = []
self._requests = []
- if requests_still_in_flight:
- raise RuntimeError(
- f"The connection pool was closed while {requests_still_in_flight} "
- f"HTTP requests/responses were still in-flight."
- )
-
async def __aenter__(self) -> "AsyncConnectionPool":
return self
diff --git a/httpcore/_sync/connection_pool.py b/httpcore/_sync/connection_pool.py
index 020893d..53536d0 100644
--- a/httpcore/_sync/connection_pool.py
+++ b/httpcore/_sync/connection_pool.py
@@ -301,19 +301,11 @@ class ConnectionPool(RequestInterface):
Close any connections in the pool.
"""
with self._pool_lock:
- requests_still_in_flight = len(self._requests)
-
for connection in self._pool:
connection.close()
self._pool = []
self._requests = []
- if requests_still_in_flight:
- raise RuntimeError(
- f"The connection pool was closed while {requests_still_in_flight} "
- f"HTTP requests/responses were still in-flight."
- )
-
def __enter__(self) -> "ConnectionPool":
return self
diff --git a/httpcore/backends/asyncio.py b/httpcore/backends/asyncio.py
index 3b8abf1..23f7dce 100644
--- a/httpcore/backends/asyncio.py
+++ b/httpcore/backends/asyncio.py
@@ -26,6 +26,7 @@ class AsyncIOStream(AsyncNetworkStream):
exc_map = {
TimeoutError: ReadTimeout,
anyio.BrokenResourceError: ReadError,
+ anyio.ClosedResourceError: ReadError,
}
with map_exceptions(exc_map):
with anyio.fail_after(timeout):
@@ -43,6 +44,7 @@ class AsyncIOStream(AsyncNetworkStream):
exc_map = {
TimeoutError: WriteTimeout,
anyio.BrokenResourceError: WriteError,
+ anyio.ClosedResourceError: WriteError,
}
with map_exceptions(exc_map):
with anyio.fail_after(timeout):
diff --git a/httpcore/backends/mock.py b/httpcore/backends/mock.py
index 8491f6d..9aba0ea 100644
--- a/httpcore/backends/mock.py
+++ b/httpcore/backends/mock.py
@@ -2,6 +2,7 @@ import ssl
import typing
from typing import Optional
+from .._exceptions import ReadError
from .base import AsyncNetworkBackend, AsyncNetworkStream, NetworkBackend, NetworkStream
@@ -17,8 +18,11 @@ class MockStream(NetworkStream):
def __init__(self, buffer: typing.List[bytes], http2: bool = False) -> None:
self._buffer = buffer
self._http2 = http2
+ self._closed = False
def read(self, max_bytes: int, timeout: Optional[float] = None) -> bytes:
+ if self._closed:
+ raise ReadError("Connection closed")
if not self._buffer:
return b""
return self._buffer.pop(0)
@@ -27,7 +31,7 @@ class MockStream(NetworkStream):
pass
def close(self) -> None:
- pass
+ self._closed = True
def start_tls(
self,
@@ -68,8 +72,11 @@ class AsyncMockStream(AsyncNetworkStream):
def __init__(self, buffer: typing.List[bytes], http2: bool = False) -> None:
self._buffer = buffer
self._http2 = http2
+ self._closed = False
async def read(self, max_bytes: int, timeout: Optional[float] = None) -> bytes:
+ if self._closed:
+ raise ReadError("Connection closed")
if not self._buffer:
return b""
return self._buffer.pop(0)
@@ -78,7 +85,7 @@ class AsyncMockStream(AsyncNetworkStream):
pass
async def aclose(self) -> None:
- pass
+ self._closed = True
async def start_tls(
self,
diff --git a/httpcore/backends/trio.py b/httpcore/backends/trio.py
index 7786d02..951016c 100644
--- a/httpcore/backends/trio.py
+++ b/httpcore/backends/trio.py
@@ -27,6 +27,7 @@ class TrioStream(AsyncNetworkStream):
exc_map: ExceptionMapping = {
trio.TooSlowError: ReadTimeout,
trio.BrokenResourceError: ReadError,
+ trio.ClosedResourceError: ReadError,
}
with map_exceptions(exc_map):
with trio.fail_after(timeout_or_inf):
@@ -43,6 +44,7 @@ class TrioStream(AsyncNetworkStream):
exc_map: ExceptionMapping = {
trio.TooSlowError: WriteTimeout,
trio.BrokenResourceError: WriteError,
+ trio.ClosedResourceError: WriteError,
}
with map_exceptions(exc_map):
with trio.fail_after(timeout_or_inf):
|
encode/httpcore
|
6a97dade2a57283491c7830385d52daccbeaf93b
|
diff --git a/tests/_async/test_connection_pool.py b/tests/_async/test_connection_pool.py
index 684e9ba..d2ac58a 100644
--- a/tests/_async/test_connection_pool.py
+++ b/tests/_async/test_connection_pool.py
@@ -3,7 +3,13 @@ from typing import List, Optional
import pytest
import trio as concurrency
-from httpcore import AsyncConnectionPool, ConnectError, PoolTimeout, UnsupportedProtocol
+from httpcore import (
+ AsyncConnectionPool,
+ ConnectError,
+ PoolTimeout,
+ ReadError,
+ UnsupportedProtocol,
+)
from httpcore.backends.base import AsyncNetworkStream
from httpcore.backends.mock import AsyncMockBackend
@@ -463,9 +469,10 @@ async def test_connection_pool_closed_while_request_in_flight():
) as pool:
# Send a request, and then close the connection pool while the
# response has not yet been streamed.
- async with pool.stream("GET", "https://example.com/"):
- with pytest.raises(RuntimeError):
- await pool.aclose()
+ async with pool.stream("GET", "https://example.com/") as response:
+ await pool.aclose()
+ with pytest.raises(ReadError):
+ await response.aread()
@pytest.mark.anyio
diff --git a/tests/_sync/test_connection_pool.py b/tests/_sync/test_connection_pool.py
index 3ab0c87..453b7fd 100644
--- a/tests/_sync/test_connection_pool.py
+++ b/tests/_sync/test_connection_pool.py
@@ -3,7 +3,13 @@ from typing import List, Optional
import pytest
from tests import concurrency
-from httpcore import ConnectionPool, ConnectError, PoolTimeout, UnsupportedProtocol
+from httpcore import (
+ ConnectionPool,
+ ConnectError,
+ PoolTimeout,
+ ReadError,
+ UnsupportedProtocol,
+)
from httpcore.backends.base import NetworkStream
from httpcore.backends.mock import MockBackend
@@ -463,9 +469,10 @@ def test_connection_pool_closed_while_request_in_flight():
) as pool:
# Send a request, and then close the connection pool while the
# response has not yet been streamed.
- with pool.stream("GET", "https://example.com/"):
- with pytest.raises(RuntimeError):
- pool.close()
+ with pool.stream("GET", "https://example.com/") as response:
+ pool.close()
+ with pytest.raises(ReadError):
+ response.read()
|
List in-flight connections
`AsyncConnectionPool.aclose()` raises `RuntimeError: The connection pool was closed while 1 HTTP requests/responses were still in-flight.` if there are any on-going requests.
It would be nice to provide a list of URLs to help me understand what I'm doing wrong. At the moment I'm adding `print` statements to the httpcore code to figure this out.
Something like this would be nice:
```
RuntimeError: The connection pool was closed while 3 HTTP requests/responses were still in-flight:
URL(scheme=b'https', host=b'www.example.com', port=None, target=b'/foo')
URL(scheme=b'https', host=b'www.example.com', port=None, target=b'/bar')
URL(scheme=b'https', host=b'www.example.net', port=None, target=b'/baz')
```
|
0.0
|
6a97dade2a57283491c7830385d52daccbeaf93b
|
[
"tests/_async/test_connection_pool.py::test_connection_pool_closed_while_request_in_flight[asyncio]",
"tests/_async/test_connection_pool.py::test_connection_pool_closed_while_request_in_flight[trio]",
"tests/_sync/test_connection_pool.py::test_connection_pool_closed_while_request_in_flight"
] |
[
"tests/_async/test_connection_pool.py::test_connection_pool_with_keepalive[asyncio]",
"tests/_async/test_connection_pool.py::test_connection_pool_with_close[asyncio]",
"tests/_async/test_connection_pool.py::test_trace_request[asyncio]",
"tests/_async/test_connection_pool.py::test_connection_pool_with_http_exception[asyncio]",
"tests/_async/test_connection_pool.py::test_connection_pool_with_connect_exception[asyncio]",
"tests/_async/test_connection_pool.py::test_connection_pool_with_immediate_expiry[asyncio]",
"tests/_async/test_connection_pool.py::test_connection_pool_with_no_keepalive_connections_allowed[asyncio]",
"tests/_async/test_connection_pool.py::test_unsupported_protocol[asyncio]",
"tests/_async/test_connection_pool.py::test_connection_pool_timeout[asyncio]",
"tests/_async/test_connection_pool.py::test_http11_upgrade_connection[asyncio]",
"tests/_async/test_connection_pool.py::test_connection_pool_with_keepalive[trio]",
"tests/_async/test_connection_pool.py::test_connection_pool_with_close[trio]",
"tests/_async/test_connection_pool.py::test_trace_request[trio]",
"tests/_async/test_connection_pool.py::test_connection_pool_with_http_exception[trio]",
"tests/_async/test_connection_pool.py::test_connection_pool_with_connect_exception[trio]",
"tests/_async/test_connection_pool.py::test_connection_pool_with_immediate_expiry[trio]",
"tests/_async/test_connection_pool.py::test_connection_pool_with_no_keepalive_connections_allowed[trio]",
"tests/_async/test_connection_pool.py::test_unsupported_protocol[trio]",
"tests/_async/test_connection_pool.py::test_connection_pool_timeout[trio]",
"tests/_async/test_connection_pool.py::test_http11_upgrade_connection[trio]",
"tests/_async/test_connection_pool.py::test_connection_pool_concurrency",
"tests/_async/test_connection_pool.py::test_connection_pool_concurrency_same_domain_closing",
"tests/_async/test_connection_pool.py::test_connection_pool_concurrency_same_domain_keepalive",
"tests/_sync/test_connection_pool.py::test_connection_pool_with_keepalive",
"tests/_sync/test_connection_pool.py::test_connection_pool_with_close",
"tests/_sync/test_connection_pool.py::test_trace_request",
"tests/_sync/test_connection_pool.py::test_connection_pool_with_http_exception",
"tests/_sync/test_connection_pool.py::test_connection_pool_with_connect_exception",
"tests/_sync/test_connection_pool.py::test_connection_pool_with_immediate_expiry",
"tests/_sync/test_connection_pool.py::test_connection_pool_with_no_keepalive_connections_allowed",
"tests/_sync/test_connection_pool.py::test_connection_pool_concurrency",
"tests/_sync/test_connection_pool.py::test_connection_pool_concurrency_same_domain_closing",
"tests/_sync/test_connection_pool.py::test_connection_pool_concurrency_same_domain_keepalive",
"tests/_sync/test_connection_pool.py::test_unsupported_protocol",
"tests/_sync/test_connection_pool.py::test_connection_pool_timeout",
"tests/_sync/test_connection_pool.py::test_http11_upgrade_connection"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-11-30 15:48:32+00:00
|
bsd-3-clause
| 2,108 |
|
encode__httpcore-641
|
diff --git a/httpcore/_async/http11.py b/httpcore/_async/http11.py
index 7ad3664..32fa3a6 100644
--- a/httpcore/_async/http11.py
+++ b/httpcore/_async/http11.py
@@ -20,6 +20,7 @@ from .._exceptions import (
ConnectionNotAvailable,
LocalProtocolError,
RemoteProtocolError,
+ WriteError,
map_exceptions,
)
from .._models import Origin, Request, Response
@@ -84,10 +85,21 @@ class AsyncHTTP11Connection(AsyncConnectionInterface):
try:
kwargs = {"request": request}
- async with Trace("send_request_headers", logger, request, kwargs) as trace:
- await self._send_request_headers(**kwargs)
- async with Trace("send_request_body", logger, request, kwargs) as trace:
- await self._send_request_body(**kwargs)
+ try:
+ async with Trace(
+ "send_request_headers", logger, request, kwargs
+ ) as trace:
+ await self._send_request_headers(**kwargs)
+ async with Trace("send_request_body", logger, request, kwargs) as trace:
+ await self._send_request_body(**kwargs)
+ except WriteError:
+ # If we get a write error while we're writing the request,
+ # then we supress this error and move on to attempting to
+ # read the response. Servers can sometimes close the request
+ # pre-emptively and then respond with a well formed HTTP
+ # error response.
+ pass
+
async with Trace(
"receive_response_headers", logger, request, kwargs
) as trace:
diff --git a/httpcore/_sync/http11.py b/httpcore/_sync/http11.py
index edcce72..0cc100e 100644
--- a/httpcore/_sync/http11.py
+++ b/httpcore/_sync/http11.py
@@ -20,6 +20,7 @@ from .._exceptions import (
ConnectionNotAvailable,
LocalProtocolError,
RemoteProtocolError,
+ WriteError,
map_exceptions,
)
from .._models import Origin, Request, Response
@@ -84,10 +85,21 @@ class HTTP11Connection(ConnectionInterface):
try:
kwargs = {"request": request}
- with Trace("send_request_headers", logger, request, kwargs) as trace:
- self._send_request_headers(**kwargs)
- with Trace("send_request_body", logger, request, kwargs) as trace:
- self._send_request_body(**kwargs)
+ try:
+ with Trace(
+ "send_request_headers", logger, request, kwargs
+ ) as trace:
+ self._send_request_headers(**kwargs)
+ with Trace("send_request_body", logger, request, kwargs) as trace:
+ self._send_request_body(**kwargs)
+ except WriteError:
+ # If we get a write error while we're writing the request,
+ # then we supress this error and move on to attempting to
+ # read the response. Servers can sometimes close the request
+ # pre-emptively and then respond with a well formed HTTP
+ # error response.
+ pass
+
with Trace(
"receive_response_headers", logger, request, kwargs
) as trace:
|
encode/httpcore
|
80ff02f1276eba3cb6b6493b3f0b033a26d6348d
|
diff --git a/tests/_async/test_connection.py b/tests/_async/test_connection.py
index 8b29942..b6ee0c7 100644
--- a/tests/_async/test_connection.py
+++ b/tests/_async/test_connection.py
@@ -9,10 +9,13 @@ from httpcore import (
SOCKET_OPTION,
AsyncHTTPConnection,
AsyncMockBackend,
+ AsyncMockStream,
AsyncNetworkStream,
ConnectError,
ConnectionNotAvailable,
Origin,
+ RemoteProtocolError,
+ WriteError,
)
@@ -83,7 +86,109 @@ async def test_concurrent_requests_not_available_on_http11_connections():
await conn.request("GET", "https://example.com/")
[email protected]("ignore::pytest.PytestUnraisableExceptionWarning")
@pytest.mark.anyio
+async def test_write_error_with_response_sent():
+ """
+ If a server half-closes the connection while the client is sending
+ the request, it may still send a response. In this case the client
+ should successfully read and return the response.
+
+ See also the `test_write_error_without_response_sent` test above.
+ """
+
+ class ErrorOnRequestTooLargeStream(AsyncMockStream):
+ def __init__(self, buffer: typing.List[bytes], http2: bool = False) -> None:
+ super().__init__(buffer, http2)
+ self.count = 0
+
+ async def write(
+ self, buffer: bytes, timeout: typing.Optional[float] = None
+ ) -> None:
+ self.count += len(buffer)
+
+ if self.count > 1_000_000:
+ raise WriteError()
+
+ class ErrorOnRequestTooLarge(AsyncMockBackend):
+ async def connect_tcp(
+ self,
+ host: str,
+ port: int,
+ timeout: typing.Optional[float] = None,
+ local_address: typing.Optional[str] = None,
+ socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None,
+ ) -> AsyncMockStream:
+ return ErrorOnRequestTooLargeStream(list(self._buffer), http2=self._http2)
+
+ origin = Origin(b"https", b"example.com", 443)
+ network_backend = ErrorOnRequestTooLarge(
+ [
+ b"HTTP/1.1 413 Payload Too Large\r\n",
+ b"Content-Type: plain/text\r\n",
+ b"Content-Length: 37\r\n",
+ b"\r\n",
+ b"Request body exceeded 1,000,000 bytes",
+ ]
+ )
+
+ async with AsyncHTTPConnection(
+ origin=origin, network_backend=network_backend, keepalive_expiry=5.0
+ ) as conn:
+ content = b"x" * 10_000_000
+ response = await conn.request("POST", "https://example.com/", content=content)
+ assert response.status == 413
+ assert response.content == b"Request body exceeded 1,000,000 bytes"
+
+
[email protected]
[email protected]("ignore::pytest.PytestUnraisableExceptionWarning")
+async def test_write_error_without_response_sent():
+ """
+ If a server fully closes the connection while the client is sending
+ the request, then client should raise an error.
+
+ See also the `test_write_error_with_response_sent` test above.
+ """
+
+ class ErrorOnRequestTooLargeStream(AsyncMockStream):
+ def __init__(self, buffer: typing.List[bytes], http2: bool = False) -> None:
+ super().__init__(buffer, http2)
+ self.count = 0
+
+ async def write(
+ self, buffer: bytes, timeout: typing.Optional[float] = None
+ ) -> None:
+ self.count += len(buffer)
+
+ if self.count > 1_000_000:
+ raise WriteError()
+
+ class ErrorOnRequestTooLarge(AsyncMockBackend):
+ async def connect_tcp(
+ self,
+ host: str,
+ port: int,
+ timeout: typing.Optional[float] = None,
+ local_address: typing.Optional[str] = None,
+ socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None,
+ ) -> AsyncMockStream:
+ return ErrorOnRequestTooLargeStream(list(self._buffer), http2=self._http2)
+
+ origin = Origin(b"https", b"example.com", 443)
+ network_backend = ErrorOnRequestTooLarge([])
+
+ async with AsyncHTTPConnection(
+ origin=origin, network_backend=network_backend, keepalive_expiry=5.0
+ ) as conn:
+ content = b"x" * 10_000_000
+ with pytest.raises(RemoteProtocolError) as exc_info:
+ await conn.request("POST", "https://example.com/", content=content)
+ assert str(exc_info.value) == "Server disconnected without sending a response."
+
+
[email protected]
[email protected]("ignore::pytest.PytestUnraisableExceptionWarning")
async def test_http2_connection():
origin = Origin(b"https", b"example.com", 443)
network_backend = AsyncMockBackend(
diff --git a/tests/_sync/test_connection.py b/tests/_sync/test_connection.py
index 9e0c403..37c82e0 100644
--- a/tests/_sync/test_connection.py
+++ b/tests/_sync/test_connection.py
@@ -9,10 +9,13 @@ from httpcore import (
SOCKET_OPTION,
HTTPConnection,
MockBackend,
+ MockStream,
NetworkStream,
ConnectError,
ConnectionNotAvailable,
Origin,
+ RemoteProtocolError,
+ WriteError,
)
@@ -83,7 +86,109 @@ def test_concurrent_requests_not_available_on_http11_connections():
conn.request("GET", "https://example.com/")
[email protected]("ignore::pytest.PytestUnraisableExceptionWarning")
+def test_write_error_with_response_sent():
+ """
+ If a server half-closes the connection while the client is sending
+ the request, it may still send a response. In this case the client
+ should successfully read and return the response.
+
+ See also the `test_write_error_without_response_sent` test above.
+ """
+
+ class ErrorOnRequestTooLargeStream(MockStream):
+ def __init__(self, buffer: typing.List[bytes], http2: bool = False) -> None:
+ super().__init__(buffer, http2)
+ self.count = 0
+
+ def write(
+ self, buffer: bytes, timeout: typing.Optional[float] = None
+ ) -> None:
+ self.count += len(buffer)
+
+ if self.count > 1_000_000:
+ raise WriteError()
+
+ class ErrorOnRequestTooLarge(MockBackend):
+ def connect_tcp(
+ self,
+ host: str,
+ port: int,
+ timeout: typing.Optional[float] = None,
+ local_address: typing.Optional[str] = None,
+ socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None,
+ ) -> MockStream:
+ return ErrorOnRequestTooLargeStream(list(self._buffer), http2=self._http2)
+
+ origin = Origin(b"https", b"example.com", 443)
+ network_backend = ErrorOnRequestTooLarge(
+ [
+ b"HTTP/1.1 413 Payload Too Large\r\n",
+ b"Content-Type: plain/text\r\n",
+ b"Content-Length: 37\r\n",
+ b"\r\n",
+ b"Request body exceeded 1,000,000 bytes",
+ ]
+ )
+
+ with HTTPConnection(
+ origin=origin, network_backend=network_backend, keepalive_expiry=5.0
+ ) as conn:
+ content = b"x" * 10_000_000
+ response = conn.request("POST", "https://example.com/", content=content)
+ assert response.status == 413
+ assert response.content == b"Request body exceeded 1,000,000 bytes"
+
+
+
[email protected]("ignore::pytest.PytestUnraisableExceptionWarning")
+def test_write_error_without_response_sent():
+ """
+ If a server fully closes the connection while the client is sending
+ the request, then client should raise an error.
+
+ See also the `test_write_error_with_response_sent` test above.
+ """
+
+ class ErrorOnRequestTooLargeStream(MockStream):
+ def __init__(self, buffer: typing.List[bytes], http2: bool = False) -> None:
+ super().__init__(buffer, http2)
+ self.count = 0
+
+ def write(
+ self, buffer: bytes, timeout: typing.Optional[float] = None
+ ) -> None:
+ self.count += len(buffer)
+
+ if self.count > 1_000_000:
+ raise WriteError()
+
+ class ErrorOnRequestTooLarge(MockBackend):
+ def connect_tcp(
+ self,
+ host: str,
+ port: int,
+ timeout: typing.Optional[float] = None,
+ local_address: typing.Optional[str] = None,
+ socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None,
+ ) -> MockStream:
+ return ErrorOnRequestTooLargeStream(list(self._buffer), http2=self._http2)
+
+ origin = Origin(b"https", b"example.com", 443)
+ network_backend = ErrorOnRequestTooLarge([])
+
+ with HTTPConnection(
+ origin=origin, network_backend=network_backend, keepalive_expiry=5.0
+ ) as conn:
+ content = b"x" * 10_000_000
+ with pytest.raises(RemoteProtocolError) as exc_info:
+ conn.request("POST", "https://example.com/", content=content)
+ assert str(exc_info.value) == "Server disconnected without sending a response."
+
+
+
[email protected]("ignore::pytest.PytestUnraisableExceptionWarning")
def test_http2_connection():
origin = Origin(b"https", b"example.com", 443)
network_backend = MockBackend(
|
Handle HTTP/1.1 half-closed connections gracefully.
There's an HTTP/1.1 case that can occur where...
* The client starts sending a request.
* The server half-closes the connection.
* The server sends a response, such as an HTTP 413 Content Too Large.
Currently our behaviour here is that we'll see a `WriteError` occur here and never get a response.
A more graceful behaviour is handle this case, and return the 413 response.
Prompted via https://github.com/encode/httpx/discussions/2503.
*A follow up question to this will be... is there an equivalent to this for HTTP/2 streams? But let's only consider that once we've dealt with this as a precursor.*
|
0.0
|
80ff02f1276eba3cb6b6493b3f0b033a26d6348d
|
[
"tests/_async/test_connection.py::test_write_error_with_response_sent[asyncio]",
"tests/_async/test_connection.py::test_write_error_without_response_sent[asyncio]",
"tests/_async/test_connection.py::test_write_error_with_response_sent[trio]",
"tests/_async/test_connection.py::test_write_error_without_response_sent[trio]",
"tests/_sync/test_connection.py::test_write_error_with_response_sent",
"tests/_sync/test_connection.py::test_write_error_without_response_sent"
] |
[
"tests/_async/test_connection.py::test_http_connection[asyncio]",
"tests/_async/test_connection.py::test_concurrent_requests_not_available_on_http11_connections[asyncio]",
"tests/_async/test_connection.py::test_http2_connection[asyncio]",
"tests/_async/test_connection.py::test_request_to_incorrect_origin[asyncio]",
"tests/_async/test_connection.py::test_connection_retries[asyncio]",
"tests/_async/test_connection.py::test_connection_retries_tls[asyncio]",
"tests/_async/test_connection.py::test_uds_connections[asyncio]",
"tests/_async/test_connection.py::test_http_connection[trio]",
"tests/_async/test_connection.py::test_concurrent_requests_not_available_on_http11_connections[trio]",
"tests/_async/test_connection.py::test_http2_connection[trio]",
"tests/_async/test_connection.py::test_request_to_incorrect_origin[trio]",
"tests/_async/test_connection.py::test_connection_retries[trio]",
"tests/_async/test_connection.py::test_connection_retries_tls[trio]",
"tests/_async/test_connection.py::test_uds_connections[trio]",
"tests/_sync/test_connection.py::test_http_connection",
"tests/_sync/test_connection.py::test_concurrent_requests_not_available_on_http11_connections",
"tests/_sync/test_connection.py::test_http2_connection",
"tests/_sync/test_connection.py::test_request_to_incorrect_origin",
"tests/_sync/test_connection.py::test_connection_retries",
"tests/_sync/test_connection.py::test_connection_retries_tls",
"tests/_sync/test_connection.py::test_uds_connections"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-12-13 19:52:44+00:00
|
bsd-3-clause
| 2,109 |
|
encode__httpx-1002
|
diff --git a/httpx/_client.py b/httpx/_client.py
index 7248f87..2c56edc 100644
--- a/httpx/_client.py
+++ b/httpx/_client.py
@@ -322,6 +322,10 @@ class BaseClient:
url = URL(location, allow_relative=True)
+ # Check that we can handle the scheme
+ if url.scheme and url.scheme not in ("http", "https"):
+ raise InvalidURL(f'Scheme "{url.scheme}" not supported.')
+
# Handle malformed 'Location' headers that are "absolute" form, have no host.
# See: https://github.com/encode/httpx/issues/771
if url.scheme and not url.host:
|
encode/httpx
|
21d7e16559d9360ae3a5c5cfd23bab8bb85ee4a8
|
diff --git a/tests/client/test_redirects.py b/tests/client/test_redirects.py
index e91bb7e..fa5ae4e 100644
--- a/tests/client/test_redirects.py
+++ b/tests/client/test_redirects.py
@@ -8,6 +8,7 @@ import pytest
from httpx import (
URL,
AsyncClient,
+ InvalidURL,
NotRedirectResponse,
RequestBodyUnavailable,
TooManyRedirects,
@@ -140,6 +141,17 @@ class MockDispatch(httpcore.AsyncHTTPTransport):
else:
return b"HTTP/1.1", 200, b"OK", [], ByteStream(b"Hello, world!")
+ elif path == b"/redirect_custom_scheme":
+ status_code = codes.MOVED_PERMANENTLY
+ headers = [(b"location", b"market://details?id=42")]
+ return (
+ b"HTTP/1.1",
+ status_code,
+ b"Moved Permanently",
+ headers,
+ ByteStream(b""),
+ )
+
return b"HTTP/1.1", 200, b"OK", [], ByteStream(b"Hello, world!")
@@ -431,3 +443,11 @@ async def test_redirect_cookie_behavior():
response = await client.get("https://example.com/")
assert response.url == "https://example.com/"
assert response.text == "Not logged in"
+
+
[email protected]("async_environment")
+async def test_redirect_custom_scheme():
+ client = AsyncClient(dispatch=MockDispatch())
+ with pytest.raises(InvalidURL) as e:
+ await client.post("https://example.org/redirect_custom_scheme")
+ assert str(e.value) == 'Scheme "market" not supported.'
|
KeyError occurs when redirect to custom scheme(ex. market://)
# Information
OS platform : mac OS
Python version : 3.7.2
Installed dependencies and versions : `httpx==0.9.3`
Code snippet
```
@property
def port(self) -> int:
port = self._uri_reference.port
if port is None:
return {"https": 443, "http": 80}[self.scheme]
return int(port)
```
Error traceback
```
[2020-02-24 14:57:08 +0900] [82150] [ERROR] Exception
Traceback (most recent call last):
File "/Users/sym/.pyenv/versions/airbridge-ads-was/lib/python3.7/site-packages/sanic/testing.py", line 120, in _collect_response
method, url, *request_args, **request_kwargs
File "/Users/sym/.pyenv/versions/airbridge-ads-was/lib/python3.7/site-packages/sanic/testing.py", line 41, in _local_request
url, verify=False, *args, **kwargs
File "/Users/sym/.pyenv/versions/airbridge-ads-was/lib/python3.7/site-packages/httpx/client.py", line 671, in get
trust_env=trust_env,
File "/Users/sym/.pyenv/versions/airbridge-ads-was/lib/python3.7/site-packages/httpx/client.py", line 268, in request
trust_env=trust_env,
File "/Users/sym/.pyenv/versions/airbridge-ads-was/lib/python3.7/site-packages/httpx/client.py", line 410, in send
allow_redirects=allow_redirects,
File "/Users/sym/.pyenv/versions/airbridge-ads-was/lib/python3.7/site-packages/httpx/client.py", line 478, in send_handling_redirects
request = self.build_redirect_request(request, response)
File "/Users/sym/.pyenv/versions/airbridge-ads-was/lib/python3.7/site-packages/httpx/client.py", line 500, in build_redirect_request
headers = self.redirect_headers(request, url, method)
File "/Users/sym/.pyenv/versions/airbridge-ads-was/lib/python3.7/site-packages/httpx/client.py", line 555, in redirect_headers
if url.origin != request.url.origin:
File "/Users/sym/.pyenv/versions/airbridge-ads-was/lib/python3.7/site-packages/httpx/models.py", line 215, in origin
return Origin(self)
File "/Users/sym/.pyenv/versions/airbridge-ads-was/lib/python3.7/site-packages/httpx/models.py", line 287, in __init__
self.port = url.port
File "/Users/sym/.pyenv/versions/airbridge-ads-was/lib/python3.7/site-packages/httpx/models.py", line 165, in port
return {"https": 443, "http": 80}[self.scheme]
KeyError: 'market'
[2020-02-24 14:57:08 +0900] [82150] [INFO] Starting worker [82150]
[2020-02-24 14:57:08 +0900] [82150] [INFO] Stopping worker [82150]
[2020-02-24 14:57:08 +0900] [82150] [INFO] Server Stopped
```
# Description
i'm using sanic and sanic uses httpx to test web request.
when i make a redirect response which goes to "market://details?id=~~" (android market url)", KeyError occurred.
I think it is associated with `port` property method.
Is this the intended behavior?
Thank you.
|
0.0
|
21d7e16559d9360ae3a5c5cfd23bab8bb85ee4a8
|
[
"tests/client/test_redirects.py::test_redirect_custom_scheme[asyncio]",
"tests/client/test_redirects.py::test_redirect_custom_scheme[trio]"
] |
[
"tests/client/test_redirects.py::test_no_redirect[asyncio]",
"tests/client/test_redirects.py::test_no_redirect[trio]",
"tests/client/test_redirects.py::test_redirect_301[asyncio]",
"tests/client/test_redirects.py::test_redirect_301[trio]",
"tests/client/test_redirects.py::test_redirect_302[asyncio]",
"tests/client/test_redirects.py::test_redirect_302[trio]",
"tests/client/test_redirects.py::test_redirect_303[asyncio]",
"tests/client/test_redirects.py::test_redirect_303[trio]",
"tests/client/test_redirects.py::test_disallow_redirects[asyncio]",
"tests/client/test_redirects.py::test_disallow_redirects[trio]",
"tests/client/test_redirects.py::test_relative_redirect[asyncio]",
"tests/client/test_redirects.py::test_relative_redirect[trio]",
"tests/client/test_redirects.py::test_malformed_redirect[asyncio]",
"tests/client/test_redirects.py::test_malformed_redirect[trio]",
"tests/client/test_redirects.py::test_no_scheme_redirect[asyncio]",
"tests/client/test_redirects.py::test_no_scheme_redirect[trio]",
"tests/client/test_redirects.py::test_fragment_redirect[asyncio]",
"tests/client/test_redirects.py::test_fragment_redirect[trio]",
"tests/client/test_redirects.py::test_multiple_redirects[asyncio]",
"tests/client/test_redirects.py::test_multiple_redirects[trio]",
"tests/client/test_redirects.py::test_too_many_redirects[asyncio]",
"tests/client/test_redirects.py::test_too_many_redirects[trio]",
"tests/client/test_redirects.py::test_too_many_redirects_calling_next[asyncio]",
"tests/client/test_redirects.py::test_too_many_redirects_calling_next[trio]",
"tests/client/test_redirects.py::test_redirect_loop[asyncio]",
"tests/client/test_redirects.py::test_redirect_loop[trio]",
"tests/client/test_redirects.py::test_cross_domain_redirect[asyncio]",
"tests/client/test_redirects.py::test_cross_domain_redirect[trio]",
"tests/client/test_redirects.py::test_same_domain_redirect[asyncio]",
"tests/client/test_redirects.py::test_same_domain_redirect[trio]",
"tests/client/test_redirects.py::test_body_redirect[asyncio]",
"tests/client/test_redirects.py::test_body_redirect[trio]",
"tests/client/test_redirects.py::test_no_body_redirect[asyncio]",
"tests/client/test_redirects.py::test_no_body_redirect[trio]",
"tests/client/test_redirects.py::test_can_stream_if_no_redirect[asyncio]",
"tests/client/test_redirects.py::test_can_stream_if_no_redirect[trio]",
"tests/client/test_redirects.py::test_cannot_redirect_streaming_body[asyncio]",
"tests/client/test_redirects.py::test_cannot_redirect_streaming_body[trio]",
"tests/client/test_redirects.py::test_cross_subdomain_redirect[asyncio]",
"tests/client/test_redirects.py::test_cross_subdomain_redirect[trio]",
"tests/client/test_redirects.py::test_redirect_cookie_behavior[asyncio]",
"tests/client/test_redirects.py::test_redirect_cookie_behavior[trio]"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2020-05-27 19:28:29+00:00
|
bsd-3-clause
| 2,110 |
|
encode__httpx-1034
|
diff --git a/httpx/_decoders.py b/httpx/_decoders.py
index 2a2e703..1ea47b0 100644
--- a/httpx/_decoders.py
+++ b/httpx/_decoders.py
@@ -261,7 +261,7 @@ class LineDecoder:
text = text[idx + 1 :]
break
elif next_char is None:
- self.buffer = text
+ self.buffer += text
text = ""
break
diff --git a/requirements.txt b/requirements.txt
index 246a08e..4260706 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -31,4 +31,4 @@ trustme
uvicorn
seed-isort-config
-attrs>=19.2 # See: https://github.com/encode/httpx/pull/566#issuecomment-559862665
+attrs>=19.3.0 # See: https://github.com/encode/httpx/pull/566#issuecomment-559862665
|
encode/httpx
|
27b0dbc22da1424a020c2bb769c81490f39ce283
|
diff --git a/tests/test_decoders.py b/tests/test_decoders.py
index 9f2fa51..89c545b 100644
--- a/tests/test_decoders.py
+++ b/tests/test_decoders.py
@@ -225,6 +225,15 @@ def test_line_decoder_nl():
assert decoder.decode("a\n\nb\nc\n") == ["a\n", "\n", "b\n", "c\n"]
assert decoder.flush() == []
+ # Issue #1033
+ decoder = LineDecoder()
+ assert decoder.decode("") == []
+ assert decoder.decode("12345\n") == ["12345\n"]
+ assert decoder.decode("foo ") == []
+ assert decoder.decode("bar ") == []
+ assert decoder.decode("baz\n") == ["foo bar baz\n"]
+ assert decoder.flush() == []
+
def test_line_decoder_cr():
decoder = LineDecoder()
@@ -237,6 +246,16 @@ def test_line_decoder_cr():
assert decoder.decode("a\r\rb\rc\r") == ["a\n", "\n", "b\n"]
assert decoder.flush() == ["c\n"]
+ # Issue #1033
+ # TODO: This seems like another bug; fix expectations and results.
+ decoder = LineDecoder()
+ assert decoder.decode("") == []
+ assert decoder.decode("12345\r") == []
+ assert decoder.decode("foo ") == []
+ assert decoder.decode("bar ") == []
+ assert decoder.decode("baz\r") == []
+ assert decoder.flush() == ["12345\rfoo bar baz\n"]
+
def test_line_decoder_crnl():
decoder = LineDecoder()
@@ -255,6 +274,15 @@ def test_line_decoder_crnl():
assert decoder.decode("\n\r\nb\r\nc") == ["a\n", "\n", "b\n"]
assert decoder.flush() == ["c"]
+ # Issue #1033
+ decoder = LineDecoder()
+ assert decoder.decode("") == []
+ assert decoder.decode("12345\r\n") == ["12345\n"]
+ assert decoder.decode("foo ") == []
+ assert decoder.decode("bar ") == []
+ assert decoder.decode("baz\r\n") == ["foo bar baz\n"]
+ assert decoder.flush() == []
+
def test_invalid_content_encoding_header():
headers = [(b"Content-Encoding", b"invalid-header")]
|
aiter_lines() doesn't return full lines that span multiple chunks
<https://gist.github.com/scr-oath/aa76d200222a0409d09a0d6feb1a13e2> shows an example setup using cherry.py as server that just outputs two lines - the json is big enough to be sent in two chunks; httpx aiter_lines() gets confused and sends data from the middle of the json line - seems to skip the starting part - which was most likely sent in a chunk without a newline
### test-httpx.py
```python
import asyncio
import json
import httpx
class TestHttpx:
def __init__(self):
pass
async def __call__(self):
http_client = httpx.AsyncClient()
async with http_client.stream(method="GET", url='http://localhost:8080/lines') as response:
is_message = True
async for line in response.aiter_lines():
is_message = not is_message
if is_message:
message = json.loads(line)
print(message)
def main():
test_httpx = TestHttpx()
asyncio.run(test_httpx())
if __name__ == '__main__':
main()
```
|
0.0
|
27b0dbc22da1424a020c2bb769c81490f39ce283
|
[
"tests/test_decoders.py::test_line_decoder_nl",
"tests/test_decoders.py::test_line_decoder_cr",
"tests/test_decoders.py::test_line_decoder_crnl"
] |
[
"tests/test_decoders.py::test_deflate",
"tests/test_decoders.py::test_zlib",
"tests/test_decoders.py::test_gzip",
"tests/test_decoders.py::test_brotli",
"tests/test_decoders.py::test_multi",
"tests/test_decoders.py::test_multi_with_identity",
"tests/test_decoders.py::test_streaming",
"tests/test_decoders.py::test_empty_content[deflate]",
"tests/test_decoders.py::test_empty_content[gzip]",
"tests/test_decoders.py::test_empty_content[br]",
"tests/test_decoders.py::test_empty_content[identity]",
"tests/test_decoders.py::test_decoders_empty_cases[BrotliDecoder]",
"tests/test_decoders.py::test_decoders_empty_cases[DeflateDecoder]",
"tests/test_decoders.py::test_decoders_empty_cases[GZipDecoder]",
"tests/test_decoders.py::test_decoders_empty_cases[IdentityDecoder]",
"tests/test_decoders.py::test_decoding_errors[deflate]",
"tests/test_decoders.py::test_decoding_errors[gzip]",
"tests/test_decoders.py::test_decoding_errors[br]",
"tests/test_decoders.py::test_text_decoder[data0-ascii]",
"tests/test_decoders.py::test_text_decoder[data1-utf-8]",
"tests/test_decoders.py::test_text_decoder[data2-shift-jis]",
"tests/test_decoders.py::test_text_decoder[data3-shift-jis]",
"tests/test_decoders.py::test_text_decoder[data4-MacCyrillic]",
"tests/test_decoders.py::test_text_decoder[data5-euc-jp]",
"tests/test_decoders.py::test_text_decoder_known_encoding",
"tests/test_decoders.py::test_text_decoder_empty_cases",
"tests/test_decoders.py::test_invalid_content_encoding_header"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-06-24 21:05:17+00:00
|
bsd-3-clause
| 2,111 |
|
encode__httpx-1044
|
diff --git a/httpx/__init__.py b/httpx/__init__.py
index 7a2b8a9..155ea5c 100644
--- a/httpx/__init__.py
+++ b/httpx/__init__.py
@@ -25,6 +25,7 @@ from ._exceptions import (
ResponseNotRead,
StreamConsumed,
StreamError,
+ TimeoutException,
TooManyRedirects,
WriteError,
WriteTimeout,
@@ -81,6 +82,7 @@ __all__ = [
"StreamConsumed",
"StreamError",
"ProxyError",
+ "TimeoutException",
"TooManyRedirects",
"WriteError",
"WriteTimeout",
diff --git a/httpx/_client.py b/httpx/_client.py
index c2a485f..fd9c1e5 100644
--- a/httpx/_client.py
+++ b/httpx/_client.py
@@ -18,7 +18,14 @@ from ._config import (
UnsetType,
)
from ._content_streams import ContentStream
-from ._exceptions import HTTPError, InvalidURL, RequestBodyUnavailable, TooManyRedirects
+from ._exceptions import (
+ HTTPCORE_EXC_MAP,
+ HTTPError,
+ InvalidURL,
+ RequestBodyUnavailable,
+ TooManyRedirects,
+ map_exceptions,
+)
from ._models import URL, Cookies, Headers, Origin, QueryParams, Request, Response
from ._status_codes import codes
from ._transports.asgi import ASGITransport
@@ -705,19 +712,20 @@ class Client(BaseClient):
transport = self.transport_for_url(request.url)
try:
- (
- http_version,
- status_code,
- reason_phrase,
- headers,
- stream,
- ) = transport.request(
- request.method.encode(),
- request.url.raw,
- headers=request.headers.raw,
- stream=request.stream,
- timeout=timeout.as_dict(),
- )
+ with map_exceptions(HTTPCORE_EXC_MAP):
+ (
+ http_version,
+ status_code,
+ reason_phrase,
+ headers,
+ stream,
+ ) = transport.request(
+ request.method.encode(),
+ request.url.raw,
+ headers=request.headers.raw,
+ stream=request.stream,
+ timeout=timeout.as_dict(),
+ )
except HTTPError as exc:
# Add the original request to any HTTPError unless
# there'a already a request attached in the case of
@@ -1255,19 +1263,20 @@ class AsyncClient(BaseClient):
transport = self.transport_for_url(request.url)
try:
- (
- http_version,
- status_code,
- reason_phrase,
- headers,
- stream,
- ) = await transport.request(
- request.method.encode(),
- request.url.raw,
- headers=request.headers.raw,
- stream=request.stream,
- timeout=timeout.as_dict(),
- )
+ with map_exceptions(HTTPCORE_EXC_MAP):
+ (
+ http_version,
+ status_code,
+ reason_phrase,
+ headers,
+ stream,
+ ) = await transport.request(
+ request.method.encode(),
+ request.url.raw,
+ headers=request.headers.raw,
+ stream=request.stream,
+ timeout=timeout.as_dict(),
+ )
except HTTPError as exc:
# Add the original request to any HTTPError unless
# there'a already a request attached in the case of
diff --git a/httpx/_exceptions.py b/httpx/_exceptions.py
index d8b3c8b..ae07ec5 100644
--- a/httpx/_exceptions.py
+++ b/httpx/_exceptions.py
@@ -1,3 +1,4 @@
+import contextlib
import typing
import httpcore
@@ -28,25 +29,87 @@ class HTTPError(Exception):
# Timeout exceptions...
-ConnectTimeout = httpcore.ConnectTimeout
-ReadTimeout = httpcore.ReadTimeout
-WriteTimeout = httpcore.WriteTimeout
-PoolTimeout = httpcore.PoolTimeout
+
+class TimeoutException(HTTPError):
+ """
+ The base class for timeout errors.
+
+ An operation has timed out.
+ """
+
+
+class ConnectTimeout(TimeoutException):
+ """
+ Timed out while connecting to the host.
+ """
+
+
+class ReadTimeout(TimeoutException):
+ """
+ Timed out while receiving data from the host.
+ """
+
+
+class WriteTimeout(TimeoutException):
+ """
+ Timed out while sending data to the host.
+ """
+
+
+class PoolTimeout(TimeoutException):
+ """
+ Timed out waiting to acquire a connection from the pool.
+ """
# Core networking exceptions...
-NetworkError = httpcore.NetworkError
-ReadError = httpcore.ReadError
-WriteError = httpcore.WriteError
-ConnectError = httpcore.ConnectError
-CloseError = httpcore.CloseError
+
+class NetworkError(HTTPError):
+ """
+ The base class for network-related errors.
+
+ An error occurred while interacting with the network.
+ """
+
+
+class ReadError(NetworkError):
+ """
+ Failed to receive data from the network.
+ """
+
+
+class WriteError(NetworkError):
+ """
+ Failed to send data through the network.
+ """
+
+
+class ConnectError(NetworkError):
+ """
+ Failed to establish a connection.
+ """
+
+
+class CloseError(NetworkError):
+ """
+ Failed to close a connection.
+ """
# Other transport exceptions...
-ProxyError = httpcore.ProxyError
-ProtocolError = httpcore.ProtocolError
+
+class ProxyError(HTTPError):
+ """
+ An error occurred while proxying a request.
+ """
+
+
+class ProtocolError(HTTPError):
+ """
+ A protocol was violated by the server.
+ """
# HTTP exceptions...
@@ -138,3 +201,43 @@ class CookieConflict(HTTPError):
"""
Attempted to lookup a cookie by name, but multiple cookies existed.
"""
+
+
[email protected]
+def map_exceptions(
+ mapping: typing.Mapping[typing.Type[Exception], typing.Type[Exception]]
+) -> typing.Iterator[None]:
+ try:
+ yield
+ except Exception as exc:
+ mapped_exc = None
+
+ for from_exc, to_exc in mapping.items():
+ if not isinstance(exc, from_exc):
+ continue
+ # We want to map to the most specific exception we can find.
+ # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to
+ # `httpx.ReadTimeout`, not just `httpx.TimeoutException`.
+ if mapped_exc is None or issubclass(to_exc, mapped_exc):
+ mapped_exc = to_exc
+
+ if mapped_exc is None:
+ raise
+
+ raise mapped_exc(exc) from None
+
+
+HTTPCORE_EXC_MAP = {
+ httpcore.TimeoutException: TimeoutException,
+ httpcore.ConnectTimeout: ConnectTimeout,
+ httpcore.ReadTimeout: ReadTimeout,
+ httpcore.WriteTimeout: WriteTimeout,
+ httpcore.PoolTimeout: PoolTimeout,
+ httpcore.NetworkError: NetworkError,
+ httpcore.ConnectError: ConnectError,
+ httpcore.ReadError: ReadError,
+ httpcore.WriteError: WriteError,
+ httpcore.CloseError: CloseError,
+ httpcore.ProxyError: ProxyError,
+ httpcore.ProtocolError: ProtocolError,
+}
|
encode/httpx
|
fab427972b7a5cb64d8dfb43467b84cdf430ff24
|
diff --git a/tests/test_exceptions.py b/tests/test_exceptions.py
index 1ce71e8..34a1752 100644
--- a/tests/test_exceptions.py
+++ b/tests/test_exceptions.py
@@ -1,6 +1,46 @@
+from typing import Any
+
+import httpcore
import pytest
import httpx
+from httpx._exceptions import HTTPCORE_EXC_MAP
+
+
+def test_httpcore_all_exceptions_mapped() -> None:
+ """
+ All exception classes exposed by HTTPCore are properly mapped to an HTTPX-specific
+ exception class.
+ """
+ not_mapped = [
+ value
+ for name, value in vars(httpcore).items()
+ if isinstance(value, type)
+ and issubclass(value, Exception)
+ and value not in HTTPCORE_EXC_MAP
+ ]
+
+ if not_mapped:
+ pytest.fail(f"Unmapped httpcore exceptions: {not_mapped}")
+
+
+def test_httpcore_exception_mapping() -> None:
+ """
+ HTTPCore exception mapping works as expected.
+ """
+
+ # Make sure we don't just map to `NetworkError`.
+ with pytest.raises(httpx.ConnectError):
+ httpx.get("http://doesnotexist")
+
+ # Make sure it also works with custom transports.
+ class MockTransport(httpcore.SyncHTTPTransport):
+ def request(self, *args: Any, **kwargs: Any) -> Any:
+ raise httpcore.ProtocolError()
+
+ client = httpx.Client(transport=MockTransport())
+ with pytest.raises(httpx.ProtocolError):
+ client.get("http://testserver")
def test_httpx_exceptions_exposed() -> None:
|
0.13: some `httpcore` exceptions are missing from top level package
### Checklist
- [x] The bug is reproducible against the latest release and/or `master`.
- [x] There are no similar issues or pull requests to fix it yet.
### Describe the bug
I believe that `HTTPError` is expected to be the base exception for all exceptions that httpx may raise? Since 0.13 this is no longer true and comments in [`_exceptions.py`](https://github.com/encode/httpx/blob/master/httpx/_exceptions.py#L11) indicate this is a bug. In real world use I have at least one bit of code that has failed due to this. I could see an argument for trying to catch a more specific error, but I think this case `HTTPError` was really nice to use.
Even if users should be using more specific exceptions having a single base exception is an incredibly useful feature for any library and it'd be great if we could ensure this behavior is set for the upcoming 1.0 release.
### To reproduce
The following code assumes localhost does not have any webserver running on port 80. Running on httpx 0.12 it correctly catches the `HTTPError`, running on 0.13 it does not catch the exception (httpcore.ConnectError, which doesn't appear to be properly exported in [`\_\_init\_\_.py](https://github.com/encode/httpx/blob/master/httpx/__init__.py), though it is aliased in `_exceptions.py`).
```python
import httpx
try:
httpx.get('http://localhost')
except httpx.HTTPError:
print('There was an httpx error')
```
### Expected behavior
If `HTTPError` is the base exception for httpx I'd expect catching `HTTPError` to actually catch all errors httpx may raise.
### Actual behavior
Instead of catching `HTTPError` it appears that httpcore errors bubble up. This breaks exception handling that expects `HTTPError` to be the base exception.
### Environment
- OS: macOS 10.15.5
- Python version: 3.8.3
- HTTPX version: 0.13
- Async environment: asyncio
- HTTP proxy: No
- Custom certificates: No
|
0.0
|
fab427972b7a5cb64d8dfb43467b84cdf430ff24
|
[
"tests/test_exceptions.py::test_httpcore_all_exceptions_mapped",
"tests/test_exceptions.py::test_httpcore_exception_mapping",
"tests/test_exceptions.py::test_httpx_exceptions_exposed"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-02 17:12:43+00:00
|
bsd-3-clause
| 2,112 |
|
encode__httpx-1075
|
diff --git a/httpx/_client.py b/httpx/_client.py
index 1ee9352..d4ec7aa 100644
--- a/httpx/_client.py
+++ b/httpx/_client.py
@@ -80,7 +80,7 @@ class BaseClient:
self.timeout = Timeout(timeout)
self.max_redirects = max_redirects
self.trust_env = trust_env
- self.netrc = NetRCInfo()
+ self._netrc = NetRCInfo()
def _get_proxy_map(
self, proxies: typing.Optional[ProxiesTypes], trust_env: bool,
@@ -269,7 +269,7 @@ class BaseClient:
return BasicAuth(username=username, password=password)
if self.trust_env and "Authorization" not in request.headers:
- credentials = self.netrc.get_credentials(request.url.authority)
+ credentials = self._netrc.get_credentials(request.url.authority)
if credentials is not None:
return BasicAuth(username=credentials[0], password=credentials[1])
diff --git a/httpx/_decoders.py b/httpx/_decoders.py
index 1ea47b0..d1c60fb 100644
--- a/httpx/_decoders.py
+++ b/httpx/_decoders.py
@@ -233,12 +233,18 @@ class LineDecoder:
def decode(self, text: str) -> typing.List[str]:
lines = []
- if text.startswith("\n") and self.buffer and self.buffer[-1] == "\r":
- # Handle the case where we have an "\r\n" split across
- # our previous input, and our new chunk.
- lines.append(self.buffer[:-1] + "\n")
- self.buffer = ""
- text = text[1:]
+ if text and self.buffer and self.buffer[-1] == "\r":
+ if text.startswith("\n"):
+ # Handle the case where we have an "\r\n" split across
+ # our previous input, and our new chunk.
+ lines.append(self.buffer[:-1] + "\n")
+ self.buffer = ""
+ text = text[1:]
+ else:
+ # Handle the case where we have "\r" at the end of our
+ # previous input.
+ lines.append(self.buffer[:-1] + "\n")
+ self.buffer = ""
while text:
num_chars = len(text)
diff --git a/httpx/_models.py b/httpx/_models.py
index 892a959..dca3eff 100644
--- a/httpx/_models.py
+++ b/httpx/_models.py
@@ -87,10 +87,6 @@ class URL:
if not self.host:
raise InvalidURL("No host included in URL.")
- # Allow setting full_path to custom attributes requests
- # like OPTIONS, CONNECT, and forwarding proxy requests.
- self._full_path: typing.Optional[str] = None
-
@property
def scheme(self) -> str:
return self._uri_reference.scheme or ""
@@ -138,17 +134,11 @@ class URL:
@property
def full_path(self) -> str:
- if self._full_path is not None:
- return self._full_path
path = self.path
if self.query:
path += "?" + self.query
return path
- @full_path.setter
- def full_path(self, value: typing.Optional[str]) -> None:
- self._full_path = value
-
@property
def fragment(self) -> str:
return self._uri_reference.fragment or ""
|
encode/httpx
|
064160661929089864938a62ae18dcf0cff75737
|
diff --git a/tests/models/test_url.py b/tests/models/test_url.py
index f9a568a..7910a8e 100644
--- a/tests/models/test_url.py
+++ b/tests/models/test_url.py
@@ -177,13 +177,6 @@ def test_url_set():
assert all(url in urls for url in url_set)
-def test_url_full_path_setter():
- url = URL("http://example.org")
-
- url.full_path = "http://example.net"
- assert url.full_path == "http://example.net"
-
-
def test_origin_from_url_string():
origin = Origin("https://example.com")
assert origin.scheme == "https"
diff --git a/tests/test_decoders.py b/tests/test_decoders.py
index 89c545b..6b79931 100644
--- a/tests/test_decoders.py
+++ b/tests/test_decoders.py
@@ -247,14 +247,13 @@ def test_line_decoder_cr():
assert decoder.flush() == ["c\n"]
# Issue #1033
- # TODO: This seems like another bug; fix expectations and results.
decoder = LineDecoder()
assert decoder.decode("") == []
assert decoder.decode("12345\r") == []
- assert decoder.decode("foo ") == []
+ assert decoder.decode("foo ") == ["12345\n"]
assert decoder.decode("bar ") == []
assert decoder.decode("baz\r") == []
- assert decoder.flush() == ["12345\rfoo bar baz\n"]
+ assert decoder.flush() == ["foo bar baz\n"]
def test_line_decoder_crnl():
|
aiter_lines() doesn't return full lines that span multiple chunks
<https://gist.github.com/scr-oath/aa76d200222a0409d09a0d6feb1a13e2> shows an example setup using cherry.py as server that just outputs two lines - the json is big enough to be sent in two chunks; httpx aiter_lines() gets confused and sends data from the middle of the json line - seems to skip the starting part - which was most likely sent in a chunk without a newline
### test-httpx.py
```python
import asyncio
import json
import httpx
class TestHttpx:
def __init__(self):
pass
async def __call__(self):
http_client = httpx.AsyncClient()
async with http_client.stream(method="GET", url='http://localhost:8080/lines') as response:
is_message = True
async for line in response.aiter_lines():
is_message = not is_message
if is_message:
message = json.loads(line)
print(message)
def main():
test_httpx = TestHttpx()
asyncio.run(test_httpx())
if __name__ == '__main__':
main()
```
|
0.0
|
064160661929089864938a62ae18dcf0cff75737
|
[
"tests/test_decoders.py::test_line_decoder_cr"
] |
[
"tests/models/test_url.py::test_idna_url[http_with_port]",
"tests/models/test_url.py::test_idna_url[unicode_tr46_compat]",
"tests/models/test_url.py::test_idna_url[https_without_port]",
"tests/models/test_url.py::test_idna_url[https_with_port]",
"tests/models/test_url.py::test_idna_url[http_with_custom_port]",
"tests/models/test_url.py::test_idna_url[https_with_custom_port]",
"tests/models/test_url.py::test_url",
"tests/models/test_url.py::test_url_eq_str",
"tests/models/test_url.py::test_url_params",
"tests/models/test_url.py::test_url_join",
"tests/models/test_url.py::test_url_join_rfc3986",
"tests/models/test_url.py::test_url_set",
"tests/models/test_url.py::test_origin_from_url_string",
"tests/models/test_url.py::test_origin_repr",
"tests/models/test_url.py::test_origin_equal",
"tests/models/test_url.py::test_url_copywith_for_authority",
"tests/test_decoders.py::test_deflate",
"tests/test_decoders.py::test_zlib",
"tests/test_decoders.py::test_gzip",
"tests/test_decoders.py::test_brotli",
"tests/test_decoders.py::test_multi",
"tests/test_decoders.py::test_multi_with_identity",
"tests/test_decoders.py::test_streaming",
"tests/test_decoders.py::test_empty_content[deflate]",
"tests/test_decoders.py::test_empty_content[gzip]",
"tests/test_decoders.py::test_empty_content[br]",
"tests/test_decoders.py::test_empty_content[identity]",
"tests/test_decoders.py::test_decoders_empty_cases[BrotliDecoder]",
"tests/test_decoders.py::test_decoders_empty_cases[DeflateDecoder]",
"tests/test_decoders.py::test_decoders_empty_cases[GZipDecoder]",
"tests/test_decoders.py::test_decoders_empty_cases[IdentityDecoder]",
"tests/test_decoders.py::test_decoding_errors[deflate]",
"tests/test_decoders.py::test_decoding_errors[gzip]",
"tests/test_decoders.py::test_decoding_errors[br]",
"tests/test_decoders.py::test_text_decoder[data0-ascii]",
"tests/test_decoders.py::test_text_decoder[data1-utf-8]",
"tests/test_decoders.py::test_text_decoder[data2-shift-jis]",
"tests/test_decoders.py::test_text_decoder[data3-shift-jis]",
"tests/test_decoders.py::test_text_decoder[data4-MacCyrillic]",
"tests/test_decoders.py::test_text_decoder[data5-euc-jp]",
"tests/test_decoders.py::test_text_decoder_known_encoding",
"tests/test_decoders.py::test_text_decoder_empty_cases",
"tests/test_decoders.py::test_line_decoder_nl",
"tests/test_decoders.py::test_line_decoder_crnl",
"tests/test_decoders.py::test_invalid_content_encoding_header"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-21 09:30:50+00:00
|
bsd-3-clause
| 2,113 |
|
encode__httpx-139
|
diff --git a/httpx/models.py b/httpx/models.py
index 710c030..9cbdc25 100644
--- a/httpx/models.py
+++ b/httpx/models.py
@@ -204,7 +204,7 @@ class URL:
return hash(str(self))
def __eq__(self, other: typing.Any) -> bool:
- return isinstance(other, URL) and str(self) == str(other)
+ return isinstance(other, (URL, str)) and str(self) == str(other)
def __str__(self) -> str:
return self.components.unsplit()
|
encode/httpx
|
37df46a83b93a0f40f2b8e10282f6f038dd908f6
|
diff --git a/tests/models/test_url.py b/tests/models/test_url.py
index 5f5208c..70089e0 100644
--- a/tests/models/test_url.py
+++ b/tests/models/test_url.py
@@ -25,6 +25,12 @@ def test_url():
assert new.scheme == "http"
+def test_url_eq_str():
+ url = URL("https://example.org:123/path/to/somewhere?abc=123#anchor")
+ assert url == "https://example.org:123/path/to/somewhere?abc=123#anchor"
+ assert str(url) == url
+
+
def test_url__params():
url = URL("https://example.org:123/path/to/somewhere", params={"a": "123"})
assert str(url) == "https://example.org:123/path/to/somewhere?a=123"
|
API design question - `Response.url`
Currently our `Response.url` attribute exposes a `URL` instance.
This is a breaking change from the requests API where it just exposes a plain string.
It's feasible that we should instead only be exposing plain string URLs, in order to aim for drop-in replacement API compatibility w/ requests, *and* in order to keep the API surface area low.
Options here are:
* Expose `request.url` as a URL instance. (Richer information, URL class is also useful in its own right.)
* Expose `request.url` as a str. (Better API compat. Lower API surface area to maintain.)
* Expose `request.url` as a str, and `request.urlinfo` as a URL instance. (Better API compat. High API surface area.)
|
0.0
|
37df46a83b93a0f40f2b8e10282f6f038dd908f6
|
[
"tests/models/test_url.py::test_url_eq_str"
] |
[
"tests/models/test_url.py::test_idna_url",
"tests/models/test_url.py::test_url",
"tests/models/test_url.py::test_url__params",
"tests/models/test_url.py::test_url_set"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-07-24 15:22:02+00:00
|
bsd-3-clause
| 2,114 |
|
encode__httpx-161
|
diff --git a/httpx/models.py b/httpx/models.py
index df5d071..32e412f 100644
--- a/httpx/models.py
+++ b/httpx/models.py
@@ -89,16 +89,10 @@ class URL:
params: QueryParamTypes = None,
) -> None:
if isinstance(url, str):
- self._uri_reference = rfc3986.api.uri_reference(url)
+ self._uri_reference = rfc3986.api.iri_reference(url).encode()
else:
self._uri_reference = url._uri_reference
- # Handle IDNA domain names.
- if self._uri_reference.authority:
- idna_authority = self._uri_reference.authority.encode("idna").decode("ascii")
- if idna_authority != self._uri_reference.authority:
- self._uri_reference = self._uri_reference.copy_with(authority=idna_authority)
-
# Normalize scheme and domain name.
if self.is_absolute_url:
self._uri_reference = self._uri_reference.normalize()
|
encode/httpx
|
66754ad0c58d61d34489294530351aa4b17e217f
|
diff --git a/tests/models/test_url.py b/tests/models/test_url.py
index 7c865f5..a556ed8 100644
--- a/tests/models/test_url.py
+++ b/tests/models/test_url.py
@@ -1,13 +1,65 @@
import pytest
+import rfc3986
from httpx import URL
from httpx.exceptions import InvalidURL
-def test_idna_url():
- url = URL("http://中国.icom.museum:80/")
- assert url == URL("http://xn--fiqs8s.icom.museum:80/")
- assert url.host == "xn--fiqs8s.icom.museum"
[email protected](
+ "given,idna,host,scheme,port",
+ [
+ (
+ "http://中国.icom.museum:80/",
+ "http://xn--fiqs8s.icom.museum:80/",
+ "xn--fiqs8s.icom.museum",
+ "http",
+ 80,
+ ),
+ (
+ "http://Königsgäßchen.de",
+ "http://xn--knigsgchen-b4a3dun.de",
+ "xn--knigsgchen-b4a3dun.de",
+ "http",
+ 80,
+ ),
+ ("https://faß.de", "https://xn--fa-hia.de", "xn--fa-hia.de", "https", 443),
+ (
+ "https://βόλος.com:443",
+ "https://xn--nxasmm1c.com:443",
+ "xn--nxasmm1c.com",
+ "https",
+ 443,
+ ),
+ (
+ "http://ශ්රී.com:444",
+ "http://xn--10cl1a0b660p.com:444",
+ "xn--10cl1a0b660p.com",
+ "http",
+ 444,
+ ),
+ (
+ "https://نامهای.com:4433",
+ "https://xn--mgba3gch31f060k.com:4433",
+ "xn--mgba3gch31f060k.com",
+ "https",
+ 4433,
+ ),
+ ],
+ ids=[
+ "http_with_port",
+ "unicode_tr46_compat",
+ "https_without_port",
+ "https_with_port",
+ "http_with_custom_port",
+ "https_with_custom_port",
+ ],
+)
+def test_idna_url(given, idna, host, scheme, port):
+ url = URL(given)
+ assert url == URL(idna)
+ assert url.host == host
+ assert url.scheme == scheme
+ assert url.port == port
def test_url():
|
Use IDNA 2008 instead of IDNA 2003
Using `str.encode("idna")` uses IDNA 2003 which isn't recommended for modern use. We should be using IDNA 2008 provided by the `idna` module (which is a dependency of `httpx` but I don't think we're using it anywhere?)
|
0.0
|
66754ad0c58d61d34489294530351aa4b17e217f
|
[
"tests/models/test_url.py::test_idna_url[unicode_tr46_compat]",
"tests/models/test_url.py::test_idna_url[https_without_port]",
"tests/models/test_url.py::test_idna_url[https_with_port]",
"tests/models/test_url.py::test_idna_url[http_with_custom_port]",
"tests/models/test_url.py::test_idna_url[https_with_custom_port]"
] |
[
"tests/models/test_url.py::test_idna_url[http_with_port]",
"tests/models/test_url.py::test_url",
"tests/models/test_url.py::test_url_eq_str",
"tests/models/test_url.py::test_url_params",
"tests/models/test_url.py::test_url_join",
"tests/models/test_url.py::test_url_join_rfc3986",
"tests/models/test_url.py::test_url_set",
"tests/models/test_url.py::test_hsts_preload_converted_to_https"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-07-27 23:07:27+00:00
|
bsd-3-clause
| 2,115 |
|
encode__httpx-175
|
diff --git a/httpx/__init__.py b/httpx/__init__.py
index 8de5276..fd17441 100644
--- a/httpx/__init__.py
+++ b/httpx/__init__.py
@@ -34,6 +34,8 @@ from .interfaces import (
AsyncDispatcher,
BaseReader,
BaseWriter,
+ BaseBackgroundManager,
+ BasePoolSemaphore,
ConcurrencyBackend,
Dispatcher,
Protocol,
diff --git a/httpx/models.py b/httpx/models.py
index f98447c..df5d071 100644
--- a/httpx/models.py
+++ b/httpx/models.py
@@ -34,14 +34,17 @@ from .utils import (
is_known_encoding,
normalize_header_key,
normalize_header_value,
+ str_query_param
)
+PrimitiveData = typing.Union[str, int, float, bool, type(None)]
+
URLTypes = typing.Union["URL", str]
QueryParamTypes = typing.Union[
"QueryParams",
- typing.Mapping[str, str],
- typing.List[typing.Tuple[typing.Any, typing.Any]],
+ typing.Mapping[str, PrimitiveData],
+ typing.List[typing.Tuple[str, PrimitiveData]],
str,
]
@@ -268,8 +271,8 @@ class QueryParams(typing.Mapping[str, str]):
else:
items = value.items() # type: ignore
- self._list = [(str(k), str(v)) for k, v in items]
- self._dict = {str(k): str(v) for k, v in items}
+ self._list = [(str(k), str_query_param(v)) for k, v in items]
+ self._dict = {str(k): str_query_param(v) for k, v in items}
def getlist(self, key: typing.Any) -> typing.List[str]:
return [item_value for item_key, item_value in self._list if item_key == key]
diff --git a/httpx/utils.py b/httpx/utils.py
index 3d0d660..e96335f 100644
--- a/httpx/utils.py
+++ b/httpx/utils.py
@@ -20,6 +20,21 @@ def normalize_header_value(value: typing.AnyStr, encoding: str = None) -> bytes:
return value.encode(encoding or "ascii")
+def str_query_param(value: typing.Union[str, int, float, bool, type(None)]) -> str:
+ """
+ Coerce a primitive data type into a string value for query params.
+
+ Note that we prefer JSON-style 'true'/'false' for boolean values here.
+ """
+ if value is True:
+ return "true"
+ elif value is False:
+ return "false"
+ elif value is None:
+ return ""
+ return str(value)
+
+
def is_known_encoding(encoding: str) -> bool:
"""
Return `True` if `encoding` is a known codec.
|
encode/httpx
|
db6731a3d2dc882fc9bd9b9791544a2fba2cb559
|
diff --git a/tests/models/test_queryparams.py b/tests/models/test_queryparams.py
index 983b09f..fbb559f 100644
--- a/tests/models/test_queryparams.py
+++ b/tests/models/test_queryparams.py
@@ -31,3 +31,23 @@ def test_queryparams():
q = QueryParams([("a", "123"), ("a", "456")])
assert QueryParams(q) == q
+
+
+def test_queryparam_types():
+ q = QueryParams({"a": True})
+ assert str(q) == "a=true"
+
+ q = QueryParams({"a": False})
+ assert str(q) == "a=false"
+
+ q = QueryParams({"a": ""})
+ assert str(q) == "a="
+
+ q = QueryParams({"a": None})
+ assert str(q) == "a="
+
+ q = QueryParams({"a": 1.23})
+ assert str(q) == "a=1.23"
+
+ q = QueryParams({"a": 123})
+ assert str(q) == "a=123"
|
Change QueryParams(key=None) to not emit a parameter
This one I know is [`requests` behaviour](https://github.com/psf/requests/blob/9a2e5df000691ad28613524ca7f80aa28a94a8d5/requests/models.py#L101):
Take a dict:
```py
>>> from httpx import QueryParams
>>> params = {"thing_one": 1, "thing_two": [1,2,3], "thing_three": None}
>>> QueryParams(params)
QueryParams('thing_one=1&thing_two=%5B1%2C+2%2C+3%5D&thing_three=None')
```
Expected:
```py
QueryParams('thing_one=1&thing_two=%5B1%2C+2%2C+3%5D')
```
Adding `if v is not None` [here](https://github.com/encode/httpx/blob/master/httpx/models.py#L263-L264) will do it.
|
0.0
|
db6731a3d2dc882fc9bd9b9791544a2fba2cb559
|
[
"tests/models/test_queryparams.py::test_queryparam_types"
] |
[
"tests/models/test_queryparams.py::test_queryparams"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-07-30 09:18:43+00:00
|
bsd-3-clause
| 2,116 |
|
encode__httpx-199
|
diff --git a/httpx/__init__.py b/httpx/__init__.py
index fd17441..39b44aa 100644
--- a/httpx/__init__.py
+++ b/httpx/__init__.py
@@ -32,10 +32,10 @@ from .exceptions import (
)
from .interfaces import (
AsyncDispatcher,
- BaseReader,
- BaseWriter,
BaseBackgroundManager,
BasePoolSemaphore,
+ BaseReader,
+ BaseWriter,
ConcurrencyBackend,
Dispatcher,
Protocol,
diff --git a/httpx/client.py b/httpx/client.py
index e22d1a9..6b49797 100644
--- a/httpx/client.py
+++ b/httpx/client.py
@@ -20,11 +20,11 @@ from .dispatch.connection_pool import ConnectionPool
from .dispatch.threaded import ThreadedDispatcher
from .dispatch.wsgi import WSGIDispatch
from .exceptions import (
+ HTTPError,
InvalidURL,
RedirectBodyUnavailable,
RedirectLoop,
TooManyRedirects,
- HTTPError,
)
from .interfaces import AsyncDispatcher, ConcurrencyBackend, Dispatcher
from .models import (
@@ -312,6 +312,7 @@ class BaseClient:
headers = Headers(request.headers)
if url.origin != request.url.origin:
del headers["Authorization"]
+ del headers["host"]
return headers
def redirect_content(
diff --git a/httpx/exceptions.py b/httpx/exceptions.py
index 21dc7f4..ddccbf8 100644
--- a/httpx/exceptions.py
+++ b/httpx/exceptions.py
@@ -9,7 +9,9 @@ class HTTPError(Exception):
Base class for Httpx exception
"""
- def __init__(self, request: 'BaseRequest' = None, response: 'BaseResponse' = None, *args) -> None:
+ def __init__(
+ self, request: "BaseRequest" = None, response: "BaseResponse" = None, *args
+ ) -> None:
self.response = response
self.request = request or getattr(self.response, "request", None)
super().__init__(*args)
diff --git a/httpx/models.py b/httpx/models.py
index e7e7a57..f397e78 100644
--- a/httpx/models.py
+++ b/httpx/models.py
@@ -33,7 +33,7 @@ from .utils import (
is_known_encoding,
normalize_header_key,
normalize_header_value,
- str_query_param
+ str_query_param,
)
PrimitiveData = typing.Union[str, int, float, bool, type(None)]
|
encode/httpx
|
ebbc003c55c5a0280208c4d7502bd6038520f29b
|
diff --git a/tests/client/test_headers.py b/tests/client/test_headers.py
index bf82a57..2d17c12 100755
--- a/tests/client/test_headers.py
+++ b/tests/client/test_headers.py
@@ -1,15 +1,16 @@
#!/usr/bin/env python3
import json
+
from httpx import (
- __version__,
- Client,
+ AsyncDispatcher,
AsyncRequest,
AsyncResponse,
- VerifyTypes,
CertTypes,
+ Client,
TimeoutTypes,
- AsyncDispatcher,
+ VerifyTypes,
+ __version__,
)
diff --git a/tests/client/test_redirects.py b/tests/client/test_redirects.py
index ff75475..f9c2075 100644
--- a/tests/client/test_redirects.py
+++ b/tests/client/test_redirects.py
@@ -86,6 +86,17 @@ class MockDispatch(AsyncDispatcher):
body = json.dumps({"body": content.decode()}).encode()
return AsyncResponse(codes.OK, content=body, request=request)
+ elif request.url.path == "/cross_subdomain":
+ if request.headers["host"] != "www.example.org":
+ headers = {"location": "https://www.example.org/cross_subdomain"}
+ return AsyncResponse(
+ codes.PERMANENT_REDIRECT, headers=headers, request=request
+ )
+ else:
+ return AsyncResponse(
+ codes.OK, content=b"Hello, world!", request=request
+ )
+
return AsyncResponse(codes.OK, content=b"Hello, world!", request=request)
@@ -250,3 +261,11 @@ async def test_cannot_redirect_streaming_body():
with pytest.raises(RedirectBodyUnavailable):
await client.post(url, data=streaming_body())
+
+
[email protected]
+async def test_cross_dubdomain_redirect():
+ client = AsyncClient(dispatch=MockDispatch())
+ url = "https://example.com/cross_subdomain"
+ response = await client.get(url)
+ assert response.url == URL("https://www.example.org/cross_subdomain")
|
Erroneous Infinite Redirect Loop detected
Our redirect loop detection or redirects in general is broken somehow because this shouldn't error out:
```python
from httpx import Client
client = Client()
r = client.request("GET", "https://www.howsmyssl.com/a/check") # Completes successfully
r = client.request("GET", "https://howsmyssl.com/a/check") # Sends a redirect to 'www.howsmyssl.com' but then errors?
```
|
0.0
|
ebbc003c55c5a0280208c4d7502bd6038520f29b
|
[
"tests/client/test_redirects.py::test_cross_dubdomain_redirect"
] |
[
"tests/client/test_headers.py::test_client_header",
"tests/client/test_headers.py::test_header_merge",
"tests/client/test_headers.py::test_header_merge_conflicting_headers",
"tests/client/test_headers.py::test_header_update",
"tests/client/test_redirects.py::test_redirect_301",
"tests/client/test_redirects.py::test_redirect_302",
"tests/client/test_redirects.py::test_redirect_303",
"tests/client/test_redirects.py::test_disallow_redirects",
"tests/client/test_redirects.py::test_relative_redirect",
"tests/client/test_redirects.py::test_no_scheme_redirect",
"tests/client/test_redirects.py::test_fragment_redirect",
"tests/client/test_redirects.py::test_multiple_redirects",
"tests/client/test_redirects.py::test_too_many_redirects",
"tests/client/test_redirects.py::test_too_many_redirects_calling_next",
"tests/client/test_redirects.py::test_redirect_loop",
"tests/client/test_redirects.py::test_redirect_loop_calling_next",
"tests/client/test_redirects.py::test_cross_domain_redirect",
"tests/client/test_redirects.py::test_same_domain_redirect",
"tests/client/test_redirects.py::test_body_redirect",
"tests/client/test_redirects.py::test_cannot_redirect_streaming_body"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-08-13 09:00:50+00:00
|
bsd-3-clause
| 2,117 |
|
encode__httpx-2156
|
diff --git a/httpx/_client.py b/httpx/_client.py
index cec0d63..ce7b92c 100644
--- a/httpx/_client.py
+++ b/httpx/_client.py
@@ -900,7 +900,7 @@ class Client(BaseClient):
return response
- except Exception as exc:
+ except BaseException as exc:
response.close()
raise exc
@@ -932,7 +932,7 @@ class Client(BaseClient):
request = next_request
history.append(response)
- except Exception as exc:
+ except BaseException as exc:
response.close()
raise exc
finally:
@@ -971,7 +971,7 @@ class Client(BaseClient):
response.next_request = request
return response
- except Exception as exc:
+ except BaseException as exc:
response.close()
raise exc
@@ -1604,7 +1604,7 @@ class AsyncClient(BaseClient):
return response
- except Exception as exc: # pragma: no cover
+ except BaseException as exc: # pragma: no cover
await response.aclose()
raise exc
@@ -1636,7 +1636,7 @@ class AsyncClient(BaseClient):
request = next_request
history.append(response)
- except Exception as exc:
+ except BaseException as exc:
await response.aclose()
raise exc
finally:
@@ -1676,7 +1676,7 @@ class AsyncClient(BaseClient):
response.next_request = request
return response
- except Exception as exc:
+ except BaseException as exc:
await response.aclose()
raise exc
|
encode/httpx
|
67c297069f3d6b034069882428e4c1dd303d693c
|
diff --git a/tests/client/test_async_client.py b/tests/client/test_async_client.py
index 219d612..da2387d 100644
--- a/tests/client/test_async_client.py
+++ b/tests/client/test_async_client.py
@@ -324,6 +324,46 @@ async def test_async_mock_transport():
assert response.text == "Hello, world!"
[email protected]("async_environment")
+async def test_cancellation_during_stream():
+ """
+ If any BaseException is raised during streaming the response, then the
+ stream should be closed.
+
+ This includes:
+
+ * `asyncio.CancelledError` (A subclass of BaseException from Python 3.8 onwards.)
+ * `trio.Cancelled`
+ * `KeyboardInterrupt`
+ * `SystemExit`
+
+ See https://github.com/encode/httpx/issues/2139
+ """
+ stream_was_closed = False
+
+ def response_with_cancel_during_stream(request):
+ class CancelledStream(httpx.AsyncByteStream):
+ async def __aiter__(self) -> typing.AsyncIterator[bytes]:
+ yield b"Hello"
+ raise KeyboardInterrupt()
+ yield b", world" # pragma: nocover
+
+ async def aclose(self) -> None:
+ nonlocal stream_was_closed
+ stream_was_closed = True
+
+ return httpx.Response(
+ 200, headers={"Content-Length": "12"}, stream=CancelledStream()
+ )
+
+ transport = httpx.MockTransport(response_with_cancel_during_stream)
+
+ async with httpx.AsyncClient(transport=transport) as client:
+ with pytest.raises(KeyboardInterrupt):
+ await client.get("https://www.example.com")
+ assert stream_was_closed
+
+
@pytest.mark.usefixtures("async_environment")
async def test_server_extensions(server):
url = server.url
|
Response not closed when timeout/cancel reading response stream, which cause RuntimeError: The connection pool was closed while 1 HTTP requests/responses were still in-flight.
Response not closed when timeout/cancel reading response stream, which cause `RuntimeError: The connection pool was closed while 1 HTTP requests/responses were still in-flight.`
#### Reproduce:
```python
import asyncio
import httpx
async def main():
url = "https://httpbin.org/drip?delay=0&duration=5"
# Or use local httpbin server:
# docker run -ti -p 8088:80 kennethreitz/httpbin:latest
url = "http://127.0.0.1:8088/drip?delay=0&duration=5"
async with httpx.AsyncClient(timeout=10, trust_env=False) as client:
try:
coro = client.get(url)
response = await asyncio.wait_for(coro, 3)
except Exception as ex:
print(type(ex), repr(ex))
else:
print(response)
if __name__ == "__main__":
asyncio.run(main())
```
#### Output:
```python
<class 'asyncio.exceptions.TimeoutError'> TimeoutError()
Traceback (most recent call last):
File "/Users/kk/dev/zhuwen/httpx/http_error2.py", line 22, in <module>
asyncio.run(main())
File "/Users/kk/.pyenv/versions/3.9.7/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/Users/kk/.pyenv/versions/3.9.7/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/asyncio/base_events.py", line 642, in run_until_complete
return future.result()
File "/Users/kk/dev/zhuwen/httpx/http_error2.py", line 18, in main
print(response)
File "/Users/kk/dev/zhuwen/httpx/httpx/_client.py", line 1978, in __aexit__
await self._transport.__aexit__(exc_type, exc_value, traceback)
File "/Users/kk/dev/zhuwen/httpx/httpx/_transports/default.py", line 332, in __aexit__
await self._pool.__aexit__(exc_type, exc_value, traceback)
File "/Users/kk/.pyenv/versions/3.9.7/envs/httpx/lib/python3.9/site-packages/httpcore/_async/connection_pool.py", line 326, in __aexit__
await self.aclose()
File "/Users/kk/.pyenv/versions/3.9.7/envs/httpx/lib/python3.9/site-packages/httpcore/_async/connection_pool.py", line 312, in aclose
raise RuntimeError(
RuntimeError: The connection pool was closed while 1 HTTP requests/responses were still in-flight.
```
#### Root cause:
It's because use `except Exception` in _client.py which will not catch asyncio CancelledError, so except branch will not executed. https://github.com/encode/httpx/blob/master/httpx/_client.py#L1604
```python
try:
if not stream:
await response.aread() # will raise CancelledError
return response
except Exception as exc: # pragma: no cover
await response.aclose() # will not executed
raise exc
```
Change all `except Exception` in _client.py to `except BaseException` can resolve the issue.
|
0.0
|
67c297069f3d6b034069882428e4c1dd303d693c
|
[
"tests/client/test_async_client.py::test_cancellation_during_stream[asyncio]",
"tests/client/test_async_client.py::test_cancellation_during_stream[trio]"
] |
[
"tests/client/test_async_client.py::test_get[asyncio]",
"tests/client/test_async_client.py::test_get[trio]",
"tests/client/test_async_client.py::test_get_invalid_url[asyncio-scheme-not-http(s)]",
"tests/client/test_async_client.py::test_get_invalid_url[asyncio-no-scheme]",
"tests/client/test_async_client.py::test_get_invalid_url[asyncio-no-host]",
"tests/client/test_async_client.py::test_get_invalid_url[trio-scheme-not-http(s)]",
"tests/client/test_async_client.py::test_get_invalid_url[trio-no-scheme]",
"tests/client/test_async_client.py::test_get_invalid_url[trio-no-host]",
"tests/client/test_async_client.py::test_build_request[asyncio]",
"tests/client/test_async_client.py::test_build_request[trio]",
"tests/client/test_async_client.py::test_post[asyncio]",
"tests/client/test_async_client.py::test_post[trio]",
"tests/client/test_async_client.py::test_post_json[asyncio]",
"tests/client/test_async_client.py::test_post_json[trio]",
"tests/client/test_async_client.py::test_stream_response[asyncio]",
"tests/client/test_async_client.py::test_stream_response[trio]",
"tests/client/test_async_client.py::test_access_content_stream_response[asyncio]",
"tests/client/test_async_client.py::test_access_content_stream_response[trio]",
"tests/client/test_async_client.py::test_stream_request[asyncio]",
"tests/client/test_async_client.py::test_stream_request[trio]",
"tests/client/test_async_client.py::test_cannot_stream_sync_request[asyncio]",
"tests/client/test_async_client.py::test_cannot_stream_sync_request[trio]",
"tests/client/test_async_client.py::test_raise_for_status[asyncio]",
"tests/client/test_async_client.py::test_raise_for_status[trio]",
"tests/client/test_async_client.py::test_options[asyncio]",
"tests/client/test_async_client.py::test_options[trio]",
"tests/client/test_async_client.py::test_head[asyncio]",
"tests/client/test_async_client.py::test_head[trio]",
"tests/client/test_async_client.py::test_put[asyncio]",
"tests/client/test_async_client.py::test_put[trio]",
"tests/client/test_async_client.py::test_patch[asyncio]",
"tests/client/test_async_client.py::test_patch[trio]",
"tests/client/test_async_client.py::test_delete[asyncio]",
"tests/client/test_async_client.py::test_delete[trio]",
"tests/client/test_async_client.py::test_100_continue[asyncio]",
"tests/client/test_async_client.py::test_100_continue[trio]",
"tests/client/test_async_client.py::test_context_managed_transport[asyncio]",
"tests/client/test_async_client.py::test_context_managed_transport[trio]",
"tests/client/test_async_client.py::test_context_managed_transport_and_mount[asyncio]",
"tests/client/test_async_client.py::test_context_managed_transport_and_mount[trio]",
"tests/client/test_async_client.py::test_client_closed_state_using_implicit_open[asyncio]",
"tests/client/test_async_client.py::test_client_closed_state_using_implicit_open[trio]",
"tests/client/test_async_client.py::test_client_closed_state_using_with_block[asyncio]",
"tests/client/test_async_client.py::test_client_closed_state_using_with_block[trio]",
"tests/client/test_async_client.py::test_mounted_transport[asyncio]",
"tests/client/test_async_client.py::test_mounted_transport[trio]",
"tests/client/test_async_client.py::test_async_mock_transport[asyncio]",
"tests/client/test_async_client.py::test_async_mock_transport[trio]",
"tests/client/test_async_client.py::test_server_extensions[asyncio]",
"tests/client/test_async_client.py::test_server_extensions[trio]"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-03-31 10:44:40+00:00
|
bsd-3-clause
| 2,118 |
|
encode__httpx-2659
|
diff --git a/httpx/_utils.py b/httpx/_utils.py
index c55d33a..2568fdc 100644
--- a/httpx/_utils.py
+++ b/httpx/_utils.py
@@ -1,5 +1,6 @@
import codecs
import email.message
+import ipaddress
import mimetypes
import os
import re
@@ -259,7 +260,16 @@ def get_environment_proxies() -> typing.Dict[str, typing.Optional[str]]:
# NO_PROXY=google.com is marked as "all://*google.com,
# which disables "www.google.com" and "google.com".
# (But not "wwwgoogle.com")
- mounts[f"all://*{hostname}"] = None
+ # NO_PROXY can include domains, IPv6, IPv4 addresses and "localhost"
+ # NO_PROXY=example.com,::1,localhost,192.168.0.0/16
+ if is_ipv4_hostname(hostname):
+ mounts[f"all://{hostname}"] = None
+ elif is_ipv6_hostname(hostname):
+ mounts[f"all://[{hostname}]"] = None
+ elif hostname.lower() == "localhost":
+ mounts[f"all://{hostname}"] = None
+ else:
+ mounts[f"all://*{hostname}"] = None
return mounts
@@ -449,3 +459,19 @@ class URLPattern:
def __eq__(self, other: typing.Any) -> bool:
return isinstance(other, URLPattern) and self.pattern == other.pattern
+
+
+def is_ipv4_hostname(hostname: str) -> bool:
+ try:
+ ipaddress.IPv4Address(hostname.split("/")[0])
+ except:
+ return False
+ return True
+
+
+def is_ipv6_hostname(hostname: str) -> bool:
+ try:
+ ipaddress.IPv6Address(hostname.split("/")[0])
+ except:
+ return False
+ return True
|
encode/httpx
|
7d7c4f15b8784e4a550d974139acfa64193b32c2
|
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 3eaf245..ab0fcbe 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -185,6 +185,12 @@ def test_get_ssl_cert_file():
),
({"all_proxy": "http://127.0.0.1"}, {"all://": "http://127.0.0.1"}),
({"TRAVIS_APT_PROXY": "http://127.0.0.1"}, {}),
+ ({"no_proxy": "127.0.0.1"}, {"all://127.0.0.1": None}),
+ ({"no_proxy": "192.168.0.0/16"}, {"all://192.168.0.0/16": None}),
+ ({"no_proxy": "::1"}, {"all://[::1]": None}),
+ ({"no_proxy": "localhost"}, {"all://localhost": None}),
+ ({"no_proxy": "github.com"}, {"all://*github.com": None}),
+ ({"no_proxy": ".github.com"}, {"all://*.github.com": None}),
],
)
def test_get_environment_proxies(environment, proxies):
|
The `get_environment_proxies` function in _utils.py does not support IPv4, IPv6 correctly
Hi, I encountered error when my environment `no_proxy` includes IPv6 address like `::1`. It is wrongly transformed into `all://*::1` and causes urlparse error since the _urlparse.py parses the `:1` as port.

The `get_environment_proxies` function in **_utils.py** is responsible for parsing and mounting proxy info from system environment.
https://github.com/encode/httpx/blob/4b5a92e88e03443c2619f0905d756b159f9f0222/httpx/_utils.py#L229-L264
For env `no_proxy`, according to [CURLOPT_NOPROXY explained](https://curl.se/libcurl/c/CURLOPT_NOPROXY.html), it should support domains, IPv4, IPv6 and the `localhost`. However, current `get_environment_proxies` function implementation only supports domains correctly as it always adds wildcard `*` in front of the `hostname`.
To fix this issue, I looked into this repo and suggest handling the `no_proxy` hostnames as domains, IPv4, IPv6 and the `localhost` seperately. I have updated the `get_environment_proxies` function in **_utils.py** and tested it.
Refer to the PR: #2659
Replies and discussions are welcomed!
|
0.0
|
7d7c4f15b8784e4a550d974139acfa64193b32c2
|
[
"tests/test_utils.py::test_get_environment_proxies[environment5-proxies5]",
"tests/test_utils.py::test_get_environment_proxies[environment6-proxies6]",
"tests/test_utils.py::test_get_environment_proxies[environment7-proxies7]",
"tests/test_utils.py::test_get_environment_proxies[environment8-proxies8]"
] |
[
"tests/test_utils.py::test_encoded[utf-32]",
"tests/test_utils.py::test_encoded[utf-8-sig]",
"tests/test_utils.py::test_encoded[utf-16]",
"tests/test_utils.py::test_encoded[utf-8]",
"tests/test_utils.py::test_encoded[utf-16-be]",
"tests/test_utils.py::test_encoded[utf-16-le]",
"tests/test_utils.py::test_encoded[utf-32-be]",
"tests/test_utils.py::test_encoded[utf-32-le]",
"tests/test_utils.py::test_bad_utf_like_encoding",
"tests/test_utils.py::test_guess_by_bom[utf-16-be-utf-16]",
"tests/test_utils.py::test_guess_by_bom[utf-16-le-utf-16]",
"tests/test_utils.py::test_guess_by_bom[utf-32-be-utf-32]",
"tests/test_utils.py::test_guess_by_bom[utf-32-le-utf-32]",
"tests/test_utils.py::test_parse_header_links[<http:/.../front.jpeg>;",
"tests/test_utils.py::test_parse_header_links[<http:/.../front.jpeg>-expected1]",
"tests/test_utils.py::test_parse_header_links[<http:/.../front.jpeg>;-expected2]",
"tests/test_utils.py::test_parse_header_links[-expected4]",
"tests/test_utils.py::test_logging_request",
"tests/test_utils.py::test_logging_redirect_chain",
"tests/test_utils.py::test_logging_ssl",
"tests/test_utils.py::test_get_ssl_cert_file",
"tests/test_utils.py::test_get_environment_proxies[environment0-proxies0]",
"tests/test_utils.py::test_get_environment_proxies[environment1-proxies1]",
"tests/test_utils.py::test_get_environment_proxies[environment2-proxies2]",
"tests/test_utils.py::test_get_environment_proxies[environment3-proxies3]",
"tests/test_utils.py::test_get_environment_proxies[environment4-proxies4]",
"tests/test_utils.py::test_get_environment_proxies[environment9-proxies9]",
"tests/test_utils.py::test_get_environment_proxies[environment10-proxies10]",
"tests/test_utils.py::test_obfuscate_sensitive_headers[headers0-output0]",
"tests/test_utils.py::test_obfuscate_sensitive_headers[headers1-output1]",
"tests/test_utils.py::test_obfuscate_sensitive_headers[headers2-output2]",
"tests/test_utils.py::test_same_origin",
"tests/test_utils.py::test_not_same_origin",
"tests/test_utils.py::test_is_https_redirect",
"tests/test_utils.py::test_is_not_https_redirect",
"tests/test_utils.py::test_is_not_https_redirect_if_not_default_ports",
"tests/test_utils.py::test_url_matches[http://example.com-http://example.com-True]",
"tests/test_utils.py::test_url_matches[http://example.com-https://example.com-False]",
"tests/test_utils.py::test_url_matches[http://example.com-http://other.com-False]",
"tests/test_utils.py::test_url_matches[http://example.com:123-http://example.com:123-True]",
"tests/test_utils.py::test_url_matches[http://example.com:123-http://example.com:456-False]",
"tests/test_utils.py::test_url_matches[http://example.com:123-http://example.com-False]",
"tests/test_utils.py::test_url_matches[all://example.com-http://example.com-True]",
"tests/test_utils.py::test_url_matches[all://example.com-https://example.com-True]",
"tests/test_utils.py::test_url_matches[http://-http://example.com-True]",
"tests/test_utils.py::test_url_matches[http://-https://example.com-False]",
"tests/test_utils.py::test_url_matches[all://-https://example.com:123-True]",
"tests/test_utils.py::test_url_matches[-https://example.com:123-True]",
"tests/test_utils.py::test_pattern_priority"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_media"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-04-14 09:34:36+00:00
|
bsd-3-clause
| 2,119 |
|
encode__httpx-575
|
diff --git a/httpx/decoders.py b/httpx/decoders.py
index 3ae822b..22c6104 100644
--- a/httpx/decoders.py
+++ b/httpx/decoders.py
@@ -211,6 +211,67 @@ class TextDecoder:
return result
+class LineDecoder:
+ """
+ Handles incrementally reading lines from text.
+
+ Uses universal line decoding, supporting any of `\n`, `\r`, or `\r\n`
+ as line endings, normalizing to `\n`.
+ """
+
+ def __init__(self) -> None:
+ self.buffer = ""
+
+ def decode(self, text: str) -> typing.List[str]:
+ lines = []
+
+ if text.startswith("\n") and self.buffer and self.buffer[-1] == "\r":
+ # Handle the case where we have an "\r\n" split across
+ # our previous input, and our new chunk.
+ lines.append(self.buffer[:-1] + "\n")
+ self.buffer = ""
+ text = text[1:]
+
+ while text:
+ num_chars = len(text)
+ for idx in range(num_chars):
+ char = text[idx]
+ next_char = None if idx + 1 == num_chars else text[idx + 1]
+ if char == "\n":
+ lines.append(self.buffer + text[: idx + 1])
+ self.buffer = ""
+ text = text[idx + 1 :]
+ break
+ elif char == "\r" and next_char == "\n":
+ lines.append(self.buffer + text[:idx] + "\n")
+ self.buffer = ""
+ text = text[idx + 2 :]
+ break
+ elif char == "\r" and next_char is not None:
+ lines.append(self.buffer + text[:idx] + "\n")
+ self.buffer = ""
+ text = text[idx + 1 :]
+ break
+ elif next_char is None:
+ self.buffer = text
+ text = ""
+ break
+
+ return lines
+
+ def flush(self) -> typing.List[str]:
+ if self.buffer.endswith("\r"):
+ # Handle the case where we had a trailing '\r', which could have
+ # been a '\r\n' pair.
+ lines = [self.buffer[:-1] + "\n"]
+ elif self.buffer:
+ lines = [self.buffer]
+ else:
+ lines = []
+ self.buffer = ""
+ return lines
+
+
SUPPORTED_DECODERS = {
"identity": IdentityDecoder,
"gzip": GZipDecoder,
diff --git a/httpx/models.py b/httpx/models.py
index 5469efc..33cfb83 100644
--- a/httpx/models.py
+++ b/httpx/models.py
@@ -17,6 +17,7 @@ from .decoders import (
SUPPORTED_DECODERS,
Decoder,
IdentityDecoder,
+ LineDecoder,
MultiDecoder,
TextDecoder,
)
@@ -936,6 +937,14 @@ class Response:
yield decoder.decode(chunk)
yield decoder.flush()
+ async def stream_lines(self) -> typing.AsyncIterator[str]:
+ decoder = LineDecoder()
+ async for text in self.stream_text():
+ for line in decoder.decode(text):
+ yield line
+ for line in decoder.flush():
+ yield line
+
async def raw(self) -> typing.AsyncIterator[bytes]:
"""
A byte-iterator over the raw response content.
|
encode/httpx
|
fdaa01275a1b80e54be4423e579f9a19f7c63d8f
|
diff --git a/tests/models/test_responses.py b/tests/models/test_responses.py
index e7be487..9b60c67 100644
--- a/tests/models/test_responses.py
+++ b/tests/models/test_responses.py
@@ -164,6 +164,18 @@ async def test_stream_text():
assert content == "Hello, world!"
[email protected]
+async def test_stream_lines():
+ response = httpx.Response(200, content=b"Hello,\nworld!")
+
+ await response.read()
+
+ content = []
+ async for line in response.stream_lines():
+ content.append(line)
+ assert content == ["Hello,\n", "world!"]
+
+
@pytest.mark.asyncio
async def test_stream_interface_after_read():
response = httpx.Response(200, content=b"Hello, world!")
diff --git a/tests/test_decoders.py b/tests/test_decoders.py
index 7525239..a599ce0 100644
--- a/tests/test_decoders.py
+++ b/tests/test_decoders.py
@@ -9,6 +9,7 @@ from httpx.decoders import (
DeflateDecoder,
GZipDecoder,
IdentityDecoder,
+ LineDecoder,
TextDecoder,
)
@@ -167,6 +168,48 @@ def test_text_decoder_empty_cases():
assert decoder.flush() == ""
+def test_line_decoder_nl():
+ decoder = LineDecoder()
+ assert decoder.decode("") == []
+ assert decoder.decode("a\n\nb\nc") == ["a\n", "\n", "b\n"]
+ assert decoder.flush() == ["c"]
+
+ decoder = LineDecoder()
+ assert decoder.decode("") == []
+ assert decoder.decode("a\n\nb\nc\n") == ["a\n", "\n", "b\n", "c\n"]
+ assert decoder.flush() == []
+
+
+def test_line_decoder_cr():
+ decoder = LineDecoder()
+ assert decoder.decode("") == []
+ assert decoder.decode("a\r\rb\rc") == ["a\n", "\n", "b\n"]
+ assert decoder.flush() == ["c"]
+
+ decoder = LineDecoder()
+ assert decoder.decode("") == []
+ assert decoder.decode("a\r\rb\rc\r") == ["a\n", "\n", "b\n"]
+ assert decoder.flush() == ["c\n"]
+
+
+def test_line_decoder_crnl():
+ decoder = LineDecoder()
+ assert decoder.decode("") == []
+ assert decoder.decode("a\r\n\r\nb\r\nc") == ["a\n", "\n", "b\n"]
+ assert decoder.flush() == ["c"]
+
+ decoder = LineDecoder()
+ assert decoder.decode("") == []
+ assert decoder.decode("a\r\n\r\nb\r\nc\r\n") == ["a\n", "\n", "b\n", "c\n"]
+ assert decoder.flush() == []
+
+ decoder = LineDecoder()
+ assert decoder.decode("") == []
+ assert decoder.decode("a\r") == []
+ assert decoder.decode("\n\r\nb\r\nc") == ["a\n", "\n", "b\n"]
+ assert decoder.flush() == ["c"]
+
+
def test_invalid_content_encoding_header():
headers = [(b"Content-Encoding", b"invalid-header")]
body = b"test 123"
|
Feature Request: support iterating stream lines
I think it is better to implement a api for iterating stream line by line. This scene is very common (e.g., kubernetes watch api). Now we need to create a wrapper for this.
```Python
import json
import httpx
class StreamWrapper(object):
def __init__(self, stream):
self._stream = stream
def __iter__(self):
pending = ""
for chunk in self._stream:
chunk = pending + chunk
lines = chunk.splitlines()
if chunk and lines and lines[-1] and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = ""
for line in lines:
yield line
if pending:
yield pending
timeout = httpx.TimeoutConfig(
connect_timeout=5, read_timeout=None, write_timeout=5
)
resp = httpx.get(
"http://127.0.0.1:18081/api/v1/watch/namespaces/default/pods",
stream=True,
timeout=timeout,
)
for chunk in StreamWrapper(resp.stream_text()):
print(json.loads(chunk))
```
|
0.0
|
fdaa01275a1b80e54be4423e579f9a19f7c63d8f
|
[
"tests/models/test_responses.py::test_response",
"tests/models/test_responses.py::test_response_repr",
"tests/models/test_responses.py::test_response_content_type_encoding",
"tests/models/test_responses.py::test_response_autodetect_encoding",
"tests/models/test_responses.py::test_response_fallback_to_autodetect",
"tests/models/test_responses.py::test_response_default_text_encoding",
"tests/models/test_responses.py::test_response_default_encoding",
"tests/models/test_responses.py::test_response_non_text_encoding",
"tests/models/test_responses.py::test_response_set_explicit_encoding",
"tests/models/test_responses.py::test_response_force_encoding",
"tests/models/test_responses.py::test_read_response",
"tests/models/test_responses.py::test_raw_interface",
"tests/models/test_responses.py::test_stream_interface",
"tests/models/test_responses.py::test_stream_text",
"tests/models/test_responses.py::test_stream_lines",
"tests/models/test_responses.py::test_stream_interface_after_read",
"tests/models/test_responses.py::test_streaming_response",
"tests/models/test_responses.py::test_cannot_read_after_stream_consumed",
"tests/models/test_responses.py::test_cannot_read_after_response_closed",
"tests/models/test_responses.py::test_unknown_status_code",
"tests/models/test_responses.py::test_json_with_specified_encoding",
"tests/models/test_responses.py::test_json_with_options",
"tests/models/test_responses.py::test_json_without_specified_encoding",
"tests/models/test_responses.py::test_json_without_specified_encoding_decode_error",
"tests/models/test_responses.py::test_link_headers[headers0-expected0]",
"tests/models/test_responses.py::test_link_headers[headers1-expected1]",
"tests/test_decoders.py::test_deflate",
"tests/test_decoders.py::test_gzip",
"tests/test_decoders.py::test_brotli",
"tests/test_decoders.py::test_multi",
"tests/test_decoders.py::test_multi_with_identity",
"tests/test_decoders.py::test_streaming",
"tests/test_decoders.py::test_empty_content[deflate]",
"tests/test_decoders.py::test_empty_content[gzip]",
"tests/test_decoders.py::test_empty_content[br]",
"tests/test_decoders.py::test_empty_content[identity]",
"tests/test_decoders.py::test_decoders_empty_cases[BrotliDecoder]",
"tests/test_decoders.py::test_decoders_empty_cases[DeflateDecoder]",
"tests/test_decoders.py::test_decoders_empty_cases[GZipDecoder]",
"tests/test_decoders.py::test_decoders_empty_cases[IdentityDecoder]",
"tests/test_decoders.py::test_decoding_errors[deflate]",
"tests/test_decoders.py::test_decoding_errors[gzip]",
"tests/test_decoders.py::test_decoding_errors[br]",
"tests/test_decoders.py::test_text_decoder[data0-ascii]",
"tests/test_decoders.py::test_text_decoder[data1-utf-8]",
"tests/test_decoders.py::test_text_decoder[data2-shift-jis]",
"tests/test_decoders.py::test_text_decoder[data3-shift-jis]",
"tests/test_decoders.py::test_text_decoder[data4-MacCyrillic]",
"tests/test_decoders.py::test_text_decoder[data5-euc-jp]",
"tests/test_decoders.py::test_text_decoder_known_encoding",
"tests/test_decoders.py::test_text_decoder_empty_cases",
"tests/test_decoders.py::test_line_decoder_nl",
"tests/test_decoders.py::test_line_decoder_cr",
"tests/test_decoders.py::test_line_decoder_crnl",
"tests/test_decoders.py::test_invalid_content_encoding_header"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-11-30 17:46:40+00:00
|
bsd-3-clause
| 2,120 |
|
encode__httpx-685
|
diff --git a/httpx/auth.py b/httpx/auth.py
index e0ef50c..e412c57 100644
--- a/httpx/auth.py
+++ b/httpx/auth.py
@@ -6,7 +6,7 @@ import typing
from base64 import b64encode
from urllib.request import parse_http_list
-from .exceptions import ProtocolError
+from .exceptions import ProtocolError, RequestBodyUnavailable
from .models import Request, Response
from .utils import to_bytes, to_str, unquote
@@ -104,6 +104,8 @@ class DigestAuth(Auth):
self.password = to_bytes(password)
def __call__(self, request: Request) -> AuthFlow:
+ if not request.stream.can_replay():
+ raise RequestBodyUnavailable("Request body is no longer available.")
response = yield request
if response.status_code != 401 or "www-authenticate" not in response.headers:
|
encode/httpx
|
bc6163c55a75f2e655ff59301ce0a53fa12973ec
|
diff --git a/tests/client/test_auth.py b/tests/client/test_auth.py
index ea6ff8a..34ec77e 100644
--- a/tests/client/test_auth.py
+++ b/tests/client/test_auth.py
@@ -5,7 +5,15 @@ import typing
import pytest
-from httpx import URL, AsyncClient, DigestAuth, ProtocolError, Request, Response
+from httpx import (
+ URL,
+ AsyncClient,
+ DigestAuth,
+ ProtocolError,
+ Request,
+ RequestBodyUnavailable,
+ Response,
+)
from httpx.auth import Auth, AuthFlow
from httpx.config import CertTypes, TimeoutTypes, VerifyTypes
from httpx.dispatch.base import Dispatcher
@@ -442,3 +450,16 @@ async def test_auth_history() -> None:
assert resp2.history == [resp1]
assert len(resp1.history) == 0
+
+
[email protected]
+async def test_digest_auth_unavailable_streaming_body():
+ url = "https://example.org/"
+ auth = DigestAuth(username="tomchristie", password="password123")
+ client = AsyncClient(dispatch=MockDispatch())
+
+ async def streaming_body():
+ yield b"Example request body"
+
+ with pytest.raises(RequestBodyUnavailable):
+ await client.post(url, data=streaming_body(), auth=auth)
|
DigestAuth should raise a clear error if the request cannot replay.
Our DigestAuth implementation cannot work with non-replayable requests.
We ought to raise a nice clear error if `request.stream.is_replayable` is not True.
|
0.0
|
bc6163c55a75f2e655ff59301ce0a53fa12973ec
|
[
"tests/client/test_auth.py::test_digest_auth_unavailable_streaming_body"
] |
[
"tests/client/test_auth.py::test_basic_auth",
"tests/client/test_auth.py::test_basic_auth_in_url",
"tests/client/test_auth.py::test_basic_auth_on_session",
"tests/client/test_auth.py::test_custom_auth",
"tests/client/test_auth.py::test_netrc_auth",
"tests/client/test_auth.py::test_auth_header_has_priority_over_netrc",
"tests/client/test_auth.py::test_trust_env_auth",
"tests/client/test_auth.py::test_auth_hidden_url",
"tests/client/test_auth.py::test_auth_hidden_header",
"tests/client/test_auth.py::test_auth_invalid_type",
"tests/client/test_auth.py::test_digest_auth_returns_no_auth_if_no_digest_header_in_response",
"tests/client/test_auth.py::test_digest_auth_200_response_including_digest_auth_header",
"tests/client/test_auth.py::test_digest_auth_401_response_without_digest_auth_header",
"tests/client/test_auth.py::test_digest_auth[MD5-64-32]",
"tests/client/test_auth.py::test_digest_auth[MD5-SESS-64-32]",
"tests/client/test_auth.py::test_digest_auth[SHA-64-40]",
"tests/client/test_auth.py::test_digest_auth[SHA-SESS-64-40]",
"tests/client/test_auth.py::test_digest_auth[SHA-256-64-64]",
"tests/client/test_auth.py::test_digest_auth[SHA-256-SESS-64-64]",
"tests/client/test_auth.py::test_digest_auth[SHA-512-64-128]",
"tests/client/test_auth.py::test_digest_auth[SHA-512-SESS-64-128]",
"tests/client/test_auth.py::test_digest_auth_no_specified_qop",
"tests/client/test_auth.py::test_digest_auth_qop_including_spaces_and_auth_returns_auth[auth,",
"tests/client/test_auth.py::test_digest_auth_qop_including_spaces_and_auth_returns_auth[auth,auth-int]",
"tests/client/test_auth.py::test_digest_auth_qop_including_spaces_and_auth_returns_auth[unknown,auth]",
"tests/client/test_auth.py::test_digest_auth_qop_auth_int_not_implemented",
"tests/client/test_auth.py::test_digest_auth_qop_must_be_auth_or_auth_int",
"tests/client/test_auth.py::test_digest_auth_incorrect_credentials",
"tests/client/test_auth.py::test_digest_auth_raises_protocol_error_on_malformed_header[Digest",
"tests/client/test_auth.py::test_digest_auth_raises_protocol_error_on_malformed_header[realm=\"[email protected]\",",
"tests/client/test_auth.py::test_digest_auth_raises_protocol_error_on_malformed_header[DigestZ",
"tests/client/test_auth.py::test_auth_history"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-12-26 08:37:55+00:00
|
bsd-3-clause
| 2,121 |
|
encode__httpx-718
|
diff --git a/docs/quickstart.md b/docs/quickstart.md
index 15defb1..0d68965 100644
--- a/docs/quickstart.md
+++ b/docs/quickstart.md
@@ -379,7 +379,7 @@ with additional API for accessing cookies by their domain or path.
By default, HTTPX will follow redirects for anything except `HEAD` requests.
The `history` property of the response can be used to inspect any followed redirects.
-It contains a list of all any redirect responses that were followed, in the order
+It contains a list of any redirect responses that were followed, in the order
in which they were made.
For example, GitHub redirects all HTTP requests to HTTPS.
diff --git a/httpx/__init__.py b/httpx/__init__.py
index caaa81a..01f4068 100644
--- a/httpx/__init__.py
+++ b/httpx/__init__.py
@@ -17,8 +17,8 @@ from .exceptions import (
ProtocolError,
ProxyError,
ReadTimeout,
- RedirectBodyUnavailable,
RedirectLoop,
+ RequestBodyUnavailable,
ResponseClosed,
ResponseNotRead,
StreamConsumed,
@@ -63,8 +63,8 @@ __all__ = [
"PoolTimeout",
"ProtocolError",
"ReadTimeout",
- "RedirectBodyUnavailable",
"RedirectLoop",
+ "RequestBodyUnavailable",
"ResponseClosed",
"ResponseNotRead",
"StreamConsumed",
diff --git a/httpx/client.py b/httpx/client.py
index b2c2e5e..75f29ac 100644
--- a/httpx/client.py
+++ b/httpx/client.py
@@ -27,8 +27,8 @@ from .dispatch.proxy_http import HTTPProxy
from .exceptions import (
HTTPError,
InvalidURL,
- RedirectBodyUnavailable,
RedirectLoop,
+ RequestBodyUnavailable,
TooManyRedirects,
)
from .models import (
@@ -451,7 +451,7 @@ class AsyncClient:
raise RedirectLoop()
response = await self.send_handling_auth(
- request, auth=auth, timeout=timeout,
+ request, history, auth=auth, timeout=timeout,
)
response.history = list(history)
@@ -561,12 +561,21 @@ class AsyncClient:
"""
if method != request.method and method == "GET":
return None
+
if not request.stream.can_replay():
- raise RedirectBodyUnavailable()
+ raise RequestBodyUnavailable(
+ "Got a redirect response, but the request body was streaming "
+ "and is no longer available."
+ )
+
return request.stream
async def send_handling_auth(
- self, request: Request, auth: Auth, timeout: Timeout,
+ self,
+ request: Request,
+ history: typing.List[Response],
+ auth: Auth,
+ timeout: Timeout,
) -> Response:
auth_flow = auth(request)
request = next(auth_flow)
@@ -580,8 +589,10 @@ class AsyncClient:
await response.aclose()
raise exc from None
else:
+ response.history = list(history)
+ await response.aread()
request = next_request
- await response.aclose()
+ history.append(response)
async def send_single_request(
self, request: Request, timeout: Timeout,
diff --git a/httpx/exceptions.py b/httpx/exceptions.py
index 2d8d27d..e199270 100644
--- a/httpx/exceptions.py
+++ b/httpx/exceptions.py
@@ -86,13 +86,6 @@ class TooManyRedirects(RedirectError):
"""
-class RedirectBodyUnavailable(RedirectError):
- """
- Got a redirect response, but the request body was streaming, and is
- no longer available.
- """
-
-
class RedirectLoop(RedirectError):
"""
Infinite redirect loop.
@@ -117,6 +110,13 @@ class StreamError(HTTPError):
"""
+class RequestBodyUnavailable(StreamError):
+ """
+ Had to send the request again, but the request body was streaming, and is
+ no longer available.
+ """
+
+
class StreamConsumed(StreamError):
"""
Attempted to read or stream response content, but the content has already
|
encode/httpx
|
79a9748ae6df16b428c840d4bcdb5447c138df34
|
diff --git a/tests/client/test_auth.py b/tests/client/test_auth.py
index d4dd76a..ea6ff8a 100644
--- a/tests/client/test_auth.py
+++ b/tests/client/test_auth.py
@@ -6,6 +6,7 @@ import typing
import pytest
from httpx import URL, AsyncClient, DigestAuth, ProtocolError, Request, Response
+from httpx.auth import Auth, AuthFlow
from httpx.config import CertTypes, TimeoutTypes, VerifyTypes
from httpx.dispatch.base import Dispatcher
@@ -218,6 +219,7 @@ async def test_digest_auth_returns_no_auth_if_no_digest_header_in_response() ->
assert response.status_code == 200
assert response.json() == {"auth": None}
+ assert len(response.history) == 0
@pytest.mark.asyncio
@@ -233,6 +235,7 @@ async def test_digest_auth_200_response_including_digest_auth_header() -> None:
assert response.status_code == 200
assert response.json() == {"auth": None}
+ assert len(response.history) == 0
@pytest.mark.asyncio
@@ -245,6 +248,7 @@ async def test_digest_auth_401_response_without_digest_auth_header() -> None:
assert response.status_code == 401
assert response.json() == {"auth": None}
+ assert len(response.history) == 0
@pytest.mark.parametrize(
@@ -271,6 +275,8 @@ async def test_digest_auth(
response = await client.get(url, auth=auth)
assert response.status_code == 200
+ assert len(response.history) == 1
+
authorization = typing.cast(dict, response.json())["auth"]
scheme, _, fields = authorization.partition(" ")
assert scheme == "Digest"
@@ -299,6 +305,8 @@ async def test_digest_auth_no_specified_qop() -> None:
response = await client.get(url, auth=auth)
assert response.status_code == 200
+ assert len(response.history) == 1
+
authorization = typing.cast(dict, response.json())["auth"]
scheme, _, fields = authorization.partition(" ")
assert scheme == "Digest"
@@ -325,7 +333,10 @@ async def test_digest_auth_qop_including_spaces_and_auth_returns_auth(qop: str)
auth = DigestAuth(username="tomchristie", password="password123")
client = AsyncClient(dispatch=MockDigestAuthDispatch(qop=qop))
- await client.get(url, auth=auth)
+ response = await client.get(url, auth=auth)
+
+ assert response.status_code == 200
+ assert len(response.history) == 1
@pytest.mark.asyncio
@@ -357,6 +368,7 @@ async def test_digest_auth_incorrect_credentials() -> None:
response = await client.get(url, auth=auth)
assert response.status_code == 401
+ assert len(response.history) == 1
@pytest.mark.parametrize(
@@ -381,3 +393,52 @@ async def test_digest_auth_raises_protocol_error_on_malformed_header(
with pytest.raises(ProtocolError):
await client.get(url, auth=auth)
+
+
[email protected]
+async def test_auth_history() -> None:
+ """
+ Test that intermediate requests sent as part of an authentication flow
+ are recorded in the response history.
+ """
+
+ class RepeatAuth(Auth):
+ """
+ A mock authentication scheme that requires clients to send
+ the request a fixed number of times, and then send a last request containing
+ an aggregation of nonces that the server sent in 'WWW-Authenticate' headers
+ of intermediate responses.
+ """
+
+ def __init__(self, repeat: int):
+ self.repeat = repeat
+
+ def __call__(self, request: Request) -> AuthFlow:
+ nonces = []
+
+ for index in range(self.repeat):
+ request.headers["Authorization"] = f"Repeat {index}"
+ response = yield request
+ nonces.append(response.headers["www-authenticate"])
+
+ key = ".".join(nonces)
+ request.headers["Authorization"] = f"Repeat {key}"
+ yield request
+
+ url = "https://example.org/"
+ auth = RepeatAuth(repeat=2)
+ client = AsyncClient(dispatch=MockDispatch(auth_header="abc"))
+
+ response = await client.get(url, auth=auth)
+ assert response.status_code == 200
+ assert response.json() == {"auth": "Repeat abc.abc"}
+
+ assert len(response.history) == 2
+ resp1, resp2 = response.history
+ assert resp1.json() == {"auth": "Repeat 0"}
+ assert resp2.json() == {"auth": "Repeat 1"}
+
+ assert len(resp2.history) == 1
+ assert resp2.history == [resp1]
+
+ assert len(resp1.history) == 0
diff --git a/tests/client/test_redirects.py b/tests/client/test_redirects.py
index 663d2de..4c02745 100644
--- a/tests/client/test_redirects.py
+++ b/tests/client/test_redirects.py
@@ -7,9 +7,9 @@ from httpx import (
URL,
AsyncClient,
NotRedirectResponse,
- RedirectBodyUnavailable,
RedirectLoop,
Request,
+ RequestBodyUnavailable,
Response,
TooManyRedirects,
codes,
@@ -293,7 +293,7 @@ async def test_cannot_redirect_streaming_body():
async def streaming_body():
yield b"Example request body"
- with pytest.raises(RedirectBodyUnavailable):
+ with pytest.raises(RequestBodyUnavailable):
await client.post(url, data=streaming_body())
|
Authentication flows should preserve history.
If the authentication flow makes more that one request, then any prior requests should be visible in `response.history`.
|
0.0
|
79a9748ae6df16b428c840d4bcdb5447c138df34
|
[
"tests/client/test_auth.py::test_basic_auth",
"tests/client/test_auth.py::test_basic_auth_in_url",
"tests/client/test_auth.py::test_basic_auth_on_session",
"tests/client/test_auth.py::test_custom_auth",
"tests/client/test_auth.py::test_netrc_auth",
"tests/client/test_auth.py::test_auth_header_has_priority_over_netrc",
"tests/client/test_auth.py::test_trust_env_auth",
"tests/client/test_auth.py::test_auth_hidden_url",
"tests/client/test_auth.py::test_auth_hidden_header",
"tests/client/test_auth.py::test_auth_invalid_type",
"tests/client/test_auth.py::test_digest_auth_returns_no_auth_if_no_digest_header_in_response",
"tests/client/test_auth.py::test_digest_auth_200_response_including_digest_auth_header",
"tests/client/test_auth.py::test_digest_auth_401_response_without_digest_auth_header",
"tests/client/test_auth.py::test_digest_auth[MD5-64-32]",
"tests/client/test_auth.py::test_digest_auth[MD5-SESS-64-32]",
"tests/client/test_auth.py::test_digest_auth[SHA-64-40]",
"tests/client/test_auth.py::test_digest_auth[SHA-SESS-64-40]",
"tests/client/test_auth.py::test_digest_auth[SHA-256-64-64]",
"tests/client/test_auth.py::test_digest_auth[SHA-256-SESS-64-64]",
"tests/client/test_auth.py::test_digest_auth[SHA-512-64-128]",
"tests/client/test_auth.py::test_digest_auth[SHA-512-SESS-64-128]",
"tests/client/test_auth.py::test_digest_auth_no_specified_qop",
"tests/client/test_auth.py::test_digest_auth_qop_including_spaces_and_auth_returns_auth[auth,",
"tests/client/test_auth.py::test_digest_auth_qop_including_spaces_and_auth_returns_auth[auth,auth-int]",
"tests/client/test_auth.py::test_digest_auth_qop_including_spaces_and_auth_returns_auth[unknown,auth]",
"tests/client/test_auth.py::test_digest_auth_qop_auth_int_not_implemented",
"tests/client/test_auth.py::test_digest_auth_qop_must_be_auth_or_auth_int",
"tests/client/test_auth.py::test_digest_auth_incorrect_credentials",
"tests/client/test_auth.py::test_digest_auth_raises_protocol_error_on_malformed_header[Digest",
"tests/client/test_auth.py::test_digest_auth_raises_protocol_error_on_malformed_header[realm=\"[email protected]\",",
"tests/client/test_auth.py::test_digest_auth_raises_protocol_error_on_malformed_header[DigestZ",
"tests/client/test_auth.py::test_auth_history",
"tests/client/test_redirects.py::test_no_redirect[asyncio]",
"tests/client/test_redirects.py::test_no_redirect[trio]",
"tests/client/test_redirects.py::test_redirect_301[asyncio]",
"tests/client/test_redirects.py::test_redirect_301[trio]",
"tests/client/test_redirects.py::test_redirect_302[asyncio]",
"tests/client/test_redirects.py::test_redirect_302[trio]",
"tests/client/test_redirects.py::test_redirect_303[asyncio]",
"tests/client/test_redirects.py::test_redirect_303[trio]",
"tests/client/test_redirects.py::test_disallow_redirects[asyncio]",
"tests/client/test_redirects.py::test_disallow_redirects[trio]",
"tests/client/test_redirects.py::test_relative_redirect[asyncio]",
"tests/client/test_redirects.py::test_relative_redirect[trio]",
"tests/client/test_redirects.py::test_no_scheme_redirect[asyncio]",
"tests/client/test_redirects.py::test_no_scheme_redirect[trio]",
"tests/client/test_redirects.py::test_fragment_redirect[asyncio]",
"tests/client/test_redirects.py::test_fragment_redirect[trio]",
"tests/client/test_redirects.py::test_multiple_redirects[asyncio]",
"tests/client/test_redirects.py::test_multiple_redirects[trio]",
"tests/client/test_redirects.py::test_too_many_redirects[asyncio]",
"tests/client/test_redirects.py::test_too_many_redirects[trio]",
"tests/client/test_redirects.py::test_too_many_redirects_calling_next[asyncio]",
"tests/client/test_redirects.py::test_too_many_redirects_calling_next[trio]",
"tests/client/test_redirects.py::test_redirect_loop[asyncio]",
"tests/client/test_redirects.py::test_redirect_loop[trio]",
"tests/client/test_redirects.py::test_redirect_loop_calling_next[asyncio]",
"tests/client/test_redirects.py::test_redirect_loop_calling_next[trio]",
"tests/client/test_redirects.py::test_cross_domain_redirect[asyncio]",
"tests/client/test_redirects.py::test_cross_domain_redirect[trio]",
"tests/client/test_redirects.py::test_same_domain_redirect[asyncio]",
"tests/client/test_redirects.py::test_same_domain_redirect[trio]",
"tests/client/test_redirects.py::test_body_redirect[asyncio]",
"tests/client/test_redirects.py::test_body_redirect[trio]",
"tests/client/test_redirects.py::test_no_body_redirect[asyncio]",
"tests/client/test_redirects.py::test_no_body_redirect[trio]",
"tests/client/test_redirects.py::test_cannot_redirect_streaming_body[asyncio]",
"tests/client/test_redirects.py::test_cannot_redirect_streaming_body[trio]",
"tests/client/test_redirects.py::test_cross_subdomain_redirect[asyncio]",
"tests/client/test_redirects.py::test_cross_subdomain_redirect[trio]",
"tests/client/test_redirects.py::test_redirect_cookie_behavior[asyncio]",
"tests/client/test_redirects.py::test_redirect_cookie_behavior[trio]"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-01-03 20:45:26+00:00
|
bsd-3-clause
| 2,122 |
|
encode__httpx-763
|
diff --git a/README.md b/README.md
index cf6b9b3..27fd542 100644
--- a/README.md
+++ b/README.md
@@ -39,6 +39,18 @@ Let's get started...
'<!doctype html>\n<html>\n<head>\n<title>Example Domain</title>...'
```
+Or, using the async API...
+
+_Use [IPython](https://ipython.readthedocs.io/en/stable/) or Python 3.8+ with `python -m asyncio` to try this code interactively._
+
+```python
+>>> import httpx
+>>> async with httpx.AsyncClient() as client:
+>>> r = await client.get('https://www.example.org/')
+>>> r
+<Response [200 OK]>
+```
+
## Features
HTTPX builds on the well-established usability of `requests`, and gives you:
diff --git a/docs/index.md b/docs/index.md
index e3d6e2a..476f62a 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -51,6 +51,18 @@ Let's get started...
'<!doctype html>\n<html>\n<head>\n<title>Example Domain</title>...'
```
+Or, using the async API...
+
+_Use [IPython](https://ipython.readthedocs.io/en/stable/) or Python 3.8+ with `python -m asyncio` to try this code interactively._
+
+```python
+>>> import httpx
+>>> async with httpx.AsyncClient() as client:
+>>> r = await client.get('https://www.example.org/')
+>>> r
+<Response [200 OK]>
+```
+
## Features
HTTPX is a high performance asynchronous HTTP client, that builds on the
diff --git a/httpx/decoders.py b/httpx/decoders.py
index 75d980d..454ec4a 100644
--- a/httpx/decoders.py
+++ b/httpx/decoders.py
@@ -45,12 +45,18 @@ class DeflateDecoder(Decoder):
"""
def __init__(self) -> None:
- self.decompressor = zlib.decompressobj(-zlib.MAX_WBITS)
+ self.first_attempt = True
+ self.decompressor = zlib.decompressobj()
def decode(self, data: bytes) -> bytes:
+ was_first_attempt = self.first_attempt
+ self.first_attempt = False
try:
return self.decompressor.decompress(data)
except zlib.error as exc:
+ if was_first_attempt:
+ self.decompressor = zlib.decompressobj(-zlib.MAX_WBITS)
+ return self.decode(data)
raise DecodingError from exc
def flush(self) -> bytes:
diff --git a/httpx/dispatch/urllib3.py b/httpx/dispatch/urllib3.py
index 1782834..2728170 100644
--- a/httpx/dispatch/urllib3.py
+++ b/httpx/dispatch/urllib3.py
@@ -77,7 +77,7 @@ class URLLib3Dispatcher(SyncDispatcher):
)
else:
return urllib3.ProxyManager(
- proxy_url=proxy.url,
+ proxy_url=str(proxy.url),
proxy_headers=dict(proxy.headers),
ssl_context=ssl_context,
num_pools=num_pools,
|
encode/httpx
|
956129fbf71495c97844dc7adf4b595a9da4cd18
|
diff --git a/tests/test_decoders.py b/tests/test_decoders.py
index f320d94..d9e82f7 100644
--- a/tests/test_decoders.py
+++ b/tests/test_decoders.py
@@ -18,6 +18,11 @@ REQUEST = httpx.Request("GET", "https://example.org")
def test_deflate():
+ """
+ Deflate encoding may use either 'zlib' or 'deflate' in the wild.
+
+ https://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib#answer-22311297
+ """
body = b"test 123"
compressor = zlib.compressobj(9, zlib.DEFLATED, -zlib.MAX_WBITS)
compressed_body = compressor.compress(body) + compressor.flush()
@@ -29,6 +34,22 @@ def test_deflate():
assert response.content == body
+def test_zlib():
+ """
+ Deflate encoding may use either 'zlib' or 'deflate' in the wild.
+
+ https://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib#answer-22311297
+ """
+ body = b"test 123"
+ compressed_body = zlib.compress(body)
+
+ headers = [(b"Content-Encoding", b"deflate")]
+ response = httpx.Response(
+ 200, headers=headers, content=compressed_body, request=REQUEST
+ )
+ assert response.content == body
+
+
def test_gzip():
body = b"test 123"
compressor = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS | 16)
|
urllib3.ProxyManager() instantiation is broken.
python 3.7.5
httpx 0.11.0
urllib3 1.25.7
```
$ ipython3 -c 'import httpx; r = httpx.get("https://www.google.com")'
parse_url http://127.0.0.1:1234 <class 'httpx.models.URL'>
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-1-d6fe3101235f> in <module>
----> 1 import httpx; r = httpx.get("https://www.google.com")
/usr/local/lib/python3.7/site-packages/httpx/api.py in get(url, params, headers, cookies, auth, allow_redirects, cert, verify, timeout, trust_env)
168 verify=verify,
169 timeout=timeout,
--> 170 trust_env=trust_env,
171 )
172
/usr/local/lib/python3.7/site-packages/httpx/api.py in request(method, url, params, data, files, json, headers, cookies, auth, timeout, allow_redirects, verify, cert, trust_env)
82 """
83 with Client(
---> 84 cert=cert, verify=verify, timeout=timeout, trust_env=trust_env,
85 ) as client:
86 return client.request(
/usr/local/lib/python3.7/site-packages/httpx/client.py in __init__(self, auth, params, headers, cookies, verify, cert, proxies, timeout, pool_limits, max_redirects, base_url, dispatch, app, trust_env)
477 trust_env=trust_env,
478 )
--> 479 for key, proxy in proxy_map.items()
480 }
481
/usr/local/lib/python3.7/site-packages/httpx/client.py in <dictcomp>(.0)
477 trust_env=trust_env,
478 )
--> 479 for key, proxy in proxy_map.items()
480 }
481
/usr/local/lib/python3.7/site-packages/httpx/client.py in init_proxy_dispatch(self, proxy, verify, cert, pool_limits, trust_env)
512 cert=cert,
513 pool_limits=pool_limits,
--> 514 trust_env=trust_env,
515 )
516
/usr/local/lib/python3.7/site-packages/httpx/dispatch/urllib3.py in __init__(self, proxy, verify, cert, trust_env, pool_limits)
58 num_pools=num_pools,
59 maxsize=maxsize,
---> 60 block=block,
61 )
62
/usr/local/lib/python3.7/site-packages/httpx/dispatch/urllib3.py in init_pool_manager(self, proxy, ssl_context, num_pools, maxsize, block)
83 num_pools=num_pools,
84 maxsize=maxsize,
---> 85 block=block,
86 )
87
/usr/local/lib/python3.7/site-packages/urllib3/poolmanager.py in __init__(self, proxy_url, num_pools, headers, proxy_headers, **connection_pool_kw)
412 proxy_url.port,
413 )
--> 414 proxy = parse_url(proxy_url)
415 if not proxy.port:
416 port = port_by_scheme.get(proxy.scheme, 80)
/usr/local/lib/python3.7/site-packages/urllib3/util/url.py in parse_url(url)
361 print('parse_url', url, type(url))
362 source_url = url
--> 363 if not SCHEME_RE.search(url):
364 url = "//" + url
365
TypeError: expected string or bytes-like object
```
|
0.0
|
956129fbf71495c97844dc7adf4b595a9da4cd18
|
[
"tests/test_decoders.py::test_zlib"
] |
[
"tests/test_decoders.py::test_deflate",
"tests/test_decoders.py::test_gzip",
"tests/test_decoders.py::test_brotli",
"tests/test_decoders.py::test_multi",
"tests/test_decoders.py::test_multi_with_identity",
"tests/test_decoders.py::test_streaming",
"tests/test_decoders.py::test_empty_content[deflate]",
"tests/test_decoders.py::test_empty_content[gzip]",
"tests/test_decoders.py::test_empty_content[br]",
"tests/test_decoders.py::test_empty_content[identity]",
"tests/test_decoders.py::test_decoders_empty_cases[BrotliDecoder]",
"tests/test_decoders.py::test_decoders_empty_cases[DeflateDecoder]",
"tests/test_decoders.py::test_decoders_empty_cases[GZipDecoder]",
"tests/test_decoders.py::test_decoders_empty_cases[IdentityDecoder]",
"tests/test_decoders.py::test_decoding_errors[deflate]",
"tests/test_decoders.py::test_decoding_errors[gzip]",
"tests/test_decoders.py::test_decoding_errors[br]",
"tests/test_decoders.py::test_text_decoder[data0-ascii]",
"tests/test_decoders.py::test_text_decoder[data1-utf-8]",
"tests/test_decoders.py::test_text_decoder[data2-shift-jis]",
"tests/test_decoders.py::test_text_decoder[data3-shift-jis]",
"tests/test_decoders.py::test_text_decoder[data4-MacCyrillic]",
"tests/test_decoders.py::test_text_decoder[data5-euc-jp]",
"tests/test_decoders.py::test_text_decoder_known_encoding",
"tests/test_decoders.py::test_text_decoder_empty_cases",
"tests/test_decoders.py::test_line_decoder_nl",
"tests/test_decoders.py::test_line_decoder_cr",
"tests/test_decoders.py::test_line_decoder_crnl",
"tests/test_decoders.py::test_invalid_content_encoding_header"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-01-14 09:00:29+00:00
|
bsd-3-clause
| 2,123 |
|
encode__httpx-774
|
diff --git a/httpx/client.py b/httpx/client.py
index 590d5d0..4991478 100644
--- a/httpx/client.py
+++ b/httpx/client.py
@@ -330,6 +330,11 @@ class BaseClient:
url = URL(location, allow_relative=True)
+ # Handle malformed 'Location' headers that are "absolute" form, have no host.
+ # See: https://github.com/encode/httpx/issues/771
+ if url.scheme and not url.host:
+ url = url.copy_with(host=request.url.host)
+
# Facilitate relative 'Location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
if url.is_relative_url:
diff --git a/httpx/models.py b/httpx/models.py
index acaaf66..8d22738 100644
--- a/httpx/models.py
+++ b/httpx/models.py
@@ -201,7 +201,7 @@ class URL:
or "port" in kwargs
):
host = kwargs.pop("host", self.host)
- port = kwargs.pop("port", self.port)
+ port = kwargs.pop("port", None if self.is_relative_url else self.port)
username = kwargs.pop("username", self.username)
password = kwargs.pop("password", self.password)
@@ -216,7 +216,10 @@ class URL:
kwargs["authority"] = authority
- return URL(self._uri_reference.copy_with(**kwargs).unsplit())
+ return URL(
+ self._uri_reference.copy_with(**kwargs).unsplit(),
+ allow_relative=self.is_relative_url,
+ )
def join(self, relative_url: URLTypes) -> "URL":
"""
|
encode/httpx
|
5a63540e8ab35209849894819ac731c836fdcf27
|
diff --git a/tests/client/test_redirects.py b/tests/client/test_redirects.py
index eab44f0..1717d78 100644
--- a/tests/client/test_redirects.py
+++ b/tests/client/test_redirects.py
@@ -56,6 +56,10 @@ class MockDispatch(AsyncDispatcher):
headers = {"location": "/"}
return Response(codes.SEE_OTHER, headers=headers, request=request)
+ elif request.url.path == "/malformed_redirect":
+ headers = {"location": "https://:443/"}
+ return Response(codes.SEE_OTHER, headers=headers, request=request)
+
elif request.url.path == "/no_scheme_redirect":
headers = {"location": "//example.org/"}
return Response(codes.SEE_OTHER, headers=headers, request=request)
@@ -176,6 +180,16 @@ async def test_relative_redirect():
assert len(response.history) == 1
[email protected]("async_environment")
+async def test_malformed_redirect():
+ # https://github.com/encode/httpx/issues/771
+ client = AsyncClient(dispatch=MockDispatch())
+ response = await client.get("http://example.org/malformed_redirect")
+ assert response.status_code == codes.OK
+ assert response.url == URL("https://example.org/")
+ assert len(response.history) == 1
+
+
@pytest.mark.usefixtures("async_environment")
async def test_no_scheme_redirect():
client = AsyncClient(dispatch=MockDispatch())
|
Handle redirects with missing hostname in `Location:` header.
While doing something like this:
```
import httpx
def run_sync(url):
with httpx.Client(verify=False) as client:
response = client.get(url)
print(response)
run_sync('http://62.28.16.253')
```
I get a `httpx.exceptions.InvalidURL: No host included in URL.`
However `curl -vLk http://62.28.16.253` works like normal, as well as other HTTP clients. I think this might be another sketchy server misconfiguration, but should work nonetheless.
|
0.0
|
5a63540e8ab35209849894819ac731c836fdcf27
|
[
"tests/client/test_redirects.py::test_malformed_redirect[asyncio]",
"tests/client/test_redirects.py::test_malformed_redirect[trio]"
] |
[
"tests/client/test_redirects.py::test_no_redirect[asyncio]",
"tests/client/test_redirects.py::test_no_redirect[trio]",
"tests/client/test_redirects.py::test_redirect_301[asyncio]",
"tests/client/test_redirects.py::test_redirect_301[trio]",
"tests/client/test_redirects.py::test_redirect_302[asyncio]",
"tests/client/test_redirects.py::test_redirect_302[trio]",
"tests/client/test_redirects.py::test_redirect_303[asyncio]",
"tests/client/test_redirects.py::test_redirect_303[trio]",
"tests/client/test_redirects.py::test_disallow_redirects[asyncio]",
"tests/client/test_redirects.py::test_disallow_redirects[trio]",
"tests/client/test_redirects.py::test_relative_redirect[asyncio]",
"tests/client/test_redirects.py::test_relative_redirect[trio]",
"tests/client/test_redirects.py::test_no_scheme_redirect[asyncio]",
"tests/client/test_redirects.py::test_no_scheme_redirect[trio]",
"tests/client/test_redirects.py::test_fragment_redirect[asyncio]",
"tests/client/test_redirects.py::test_fragment_redirect[trio]",
"tests/client/test_redirects.py::test_multiple_redirects[asyncio]",
"tests/client/test_redirects.py::test_multiple_redirects[trio]",
"tests/client/test_redirects.py::test_too_many_redirects[asyncio]",
"tests/client/test_redirects.py::test_too_many_redirects[trio]",
"tests/client/test_redirects.py::test_too_many_redirects_calling_next[asyncio]",
"tests/client/test_redirects.py::test_too_many_redirects_calling_next[trio]",
"tests/client/test_redirects.py::test_redirect_loop[asyncio]",
"tests/client/test_redirects.py::test_redirect_loop[trio]",
"tests/client/test_redirects.py::test_redirect_loop_calling_next[asyncio]",
"tests/client/test_redirects.py::test_redirect_loop_calling_next[trio]",
"tests/client/test_redirects.py::test_cross_domain_redirect[asyncio]",
"tests/client/test_redirects.py::test_cross_domain_redirect[trio]",
"tests/client/test_redirects.py::test_same_domain_redirect[asyncio]",
"tests/client/test_redirects.py::test_same_domain_redirect[trio]",
"tests/client/test_redirects.py::test_body_redirect[asyncio]",
"tests/client/test_redirects.py::test_body_redirect[trio]",
"tests/client/test_redirects.py::test_no_body_redirect[asyncio]",
"tests/client/test_redirects.py::test_no_body_redirect[trio]",
"tests/client/test_redirects.py::test_can_stream_if_no_redirect[asyncio]",
"tests/client/test_redirects.py::test_can_stream_if_no_redirect[trio]",
"tests/client/test_redirects.py::test_cannot_redirect_streaming_body[asyncio]",
"tests/client/test_redirects.py::test_cannot_redirect_streaming_body[trio]",
"tests/client/test_redirects.py::test_cross_subdomain_redirect[asyncio]",
"tests/client/test_redirects.py::test_cross_subdomain_redirect[trio]",
"tests/client/test_redirects.py::test_redirect_cookie_behavior[asyncio]",
"tests/client/test_redirects.py::test_redirect_cookie_behavior[trio]"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-01-17 11:08:10+00:00
|
bsd-3-clause
| 2,124 |
|
encode__httpx-995
|
diff --git a/httpx/_models.py b/httpx/_models.py
index 865fd9a..683dde1 100644
--- a/httpx/_models.py
+++ b/httpx/_models.py
@@ -616,6 +616,9 @@ class Request:
auto_headers: typing.List[typing.Tuple[bytes, bytes]] = []
has_host = "host" in self.headers
+ has_content_length = (
+ "content-length" in self.headers or "transfer-encoding" in self.headers
+ )
has_user_agent = "user-agent" in self.headers
has_accept = "accept" in self.headers
has_accept_encoding = "accept-encoding" in self.headers
@@ -626,6 +629,8 @@ class Request:
if url.userinfo:
url = url.copy_with(username=None, password=None)
auto_headers.append((b"host", url.authority.encode("ascii")))
+ if not has_content_length and self.method in ("POST", "PUT", "PATCH"):
+ auto_headers.append((b"content-length", b"0"))
if not has_user_agent:
auto_headers.append((b"user-agent", USER_AGENT.encode("ascii")))
if not has_accept:
|
encode/httpx
|
66a45379594e5cf836e4743931119e76dc8f22c7
|
diff --git a/tests/client/test_client.py b/tests/client/test_client.py
index 1555690..0bb1cc1 100644
--- a/tests/client/test_client.py
+++ b/tests/client/test_client.py
@@ -37,6 +37,22 @@ def test_build_request(server):
assert response.json()["Custom-header"] == "value"
+def test_build_post_request(server):
+ url = server.url.copy_with(path="/echo_headers")
+ headers = {"Custom-header": "value"}
+
+ with httpx.Client() as client:
+ request = client.build_request("POST", url)
+ request.headers.update(headers)
+ response = client.send(request)
+
+ assert response.status_code == 200
+ assert response.url == url
+
+ assert response.json()["Content-length"] == "0"
+ assert response.json()["Custom-header"] == "value"
+
+
def test_post(server):
with httpx.Client() as client:
response = client.post(server.url, data=b"Hello, world!")
|
Content-Length header is missing in POST/DELETE/PUT/PATCH requests without body
### Checklist
<!-- To help keep this issue tracker clean and focused, please make sure you tried *all* the following resources before submitting your question. -->
- [x] I searched the [HTTPX documentation](https://www.python-httpx.org) but couldn't find what I'm looking for.
- [x] I looked through similar issues on GitHub, but didn't find anything.
- [x] I looked up "How to do ... in HTTPX" on a search engine and didn't find any information.
- [x] I asked the [community chat](https://gitter.im/encode/community) for help ~~but didn't get an answer.~~ [and decided to create an issue. :)](https://gitter.im/encode/community?at=5ec95c46778fad0b13201d03)
### Environment
**httpx version**: `0.13.1`
**Python version**: `3.7.3`
**OS**: Windows 7
### Question
I am not sure how to classify this issue: it could be a bug, since it is an incompatible with `requests`, on the other hand, it could be feature request, if you consider it is not that important. So, I decided to go with question. :D
Was replacing `requests` with `httpx` in my small script and received an error from the server `httpx._exceptions.HTTPError: 411 Client Error: Length Required for url: ...` while doing a `POST` request **without a body** (same behavior is applicable for `PUT` and `PATCH` requests too).
*Steps to reproduce:*
```python
import requests
import httpx
for client in (requests, httpx):
for method in ("get", "head", "delete", "post", "put", "patch"):
r = client.request(method, f"https://httpbin.org/headers")
print(
f"[{client.__name__}] method={method.upper()} "
f'Content-Length={r.request.headers.get("Content-Length")}'
)
```
`requests` adds `Content-Length` header in every possible http method, except `GET` and `HEAD`.
`httpx` does not add `Content-Length` header at all, **unless requests has a body**.
#866 added `Content-Length` to all methods, but since it is not needed in `GET` and `HEAD` methods, it was reverted.
I assume, it should be handled during the `Request` building.
`httpx` decides whether to add `Content-Length` depending on the steam type, which is handled in [_content_streams.py#L314](https://github.com/encode/httpx/blob/aa630d36c22642d7004a57abd222a14461ee4d77/httpx/_content_streams.py#L372).
### Thoughts on how to solve it
To be honest, I am not sure what is the proper way to deal with this. :-(
My first idea was to use `method` inside the [stream](https://github.com/encode/httpx/blob/aa630d36c22642d7004a57abd222a14461ee4d77/httpx/_content_streams.py#L49):
```python3
class ByteStream(ContentStream):
def __init__(self, body: typing.Union[str, bytes], method: str) -> None:
self.body = body.encode("utf-8") if isinstance(body, str) else body
self.method = method
def get_headers(self) -> typing.Dict[str, str]:
if self.method in ("GET", "HEAD"):
return {}
content_length = str(len(self.body)) if self.body else "0"
return {"Content-Length": content_length}
```
But this approach is not great, since it requires to pass `method` from `Request` to `encode` and then to `ByteStream` inside it. Also, it is not clear what `method` should I use when building stream in [`read`](https://github.com/encode/httpx/blob/aa630d36c22642d7004a57abd222a14461ee4d77/httpx/_models.py#L656) methods.
Please, let me know what you think. Thanks!
|
0.0
|
66a45379594e5cf836e4743931119e76dc8f22c7
|
[
"tests/client/test_client.py::test_build_post_request"
] |
[
"tests/client/test_client.py::test_get",
"tests/client/test_client.py::test_build_request",
"tests/client/test_client.py::test_post",
"tests/client/test_client.py::test_post_json",
"tests/client/test_client.py::test_stream_response",
"tests/client/test_client.py::test_stream_iterator",
"tests/client/test_client.py::test_raw_iterator",
"tests/client/test_client.py::test_raise_for_status",
"tests/client/test_client.py::test_options",
"tests/client/test_client.py::test_head",
"tests/client/test_client.py::test_put",
"tests/client/test_client.py::test_patch",
"tests/client/test_client.py::test_delete",
"tests/client/test_client.py::test_base_url",
"tests/client/test_client.py::test_merge_url"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-05-25 09:46:44+00:00
|
bsd-3-clause
| 2,125 |
|
encode__starlette-105
|
diff --git a/starlette/middleware/cors.py b/starlette/middleware/cors.py
index 7345531..8bc3380 100644
--- a/starlette/middleware/cors.py
+++ b/starlette/middleware/cors.py
@@ -32,6 +32,8 @@ class CORSMiddleware:
simple_headers = {}
if "*" in allow_origins:
simple_headers["Access-Control-Allow-Origin"] = "*"
+ else:
+ simple_headers["Vary"] = "Origin"
if allow_credentials:
simple_headers["Access-Control-Allow-Credentials"] = "true"
if expose_headers:
@@ -74,7 +76,7 @@ class CORSMiddleware:
return self.preflight_response(request_headers=headers)
else:
return functools.partial(
- self.simple_response, scope=scope, origin=origin
+ self.simple_response, scope=scope, request_headers=headers
)
return self.app(scope)
@@ -130,22 +132,31 @@ class CORSMiddleware:
return PlainTextResponse("OK", status_code=200, headers=headers)
- async def simple_response(self, receive, send, scope=None, origin=None):
+ async def simple_response(self, receive, send, scope=None, request_headers=None):
inner = self.app(scope)
- send = functools.partial(self.send, send=send, origin=origin)
+ send = functools.partial(self.send, send=send, request_headers=request_headers)
await inner(receive, send)
- async def send(self, message, send=None, origin=None):
+ async def send(self, message, send=None, request_headers=None):
if message["type"] != "http.response.start":
await send(message)
return
message.setdefault("headers", [])
headers = MutableHeaders(message["headers"])
+ origin = request_headers["Origin"]
+ has_cookie = "cookie" in request_headers
+
+ # If request includes any cookie headers, then we must respond
+ # with the specific origin instead of '*'.
+ if self.allow_all_origins and has_cookie:
+ self.simple_headers["Access-Control-Allow-Origin"] = origin
# If we only allow specific origins, then we have to mirror back
# the Origin header in the response.
- if not self.allow_all_origins and self.is_allowed_origin(origin=origin):
+ elif not self.allow_all_origins and self.is_allowed_origin(origin=origin):
headers["Access-Control-Allow-Origin"] = origin
+ if "vary" in headers:
+ self.simple_headers["Vary"] = f"{headers.get('vary')}, Origin"
headers.update(self.simple_headers)
await send(message)
|
encode/starlette
|
cc09042c1ca5ccac78bf0ff689faa5dcd3e04f3a
|
diff --git a/tests/test_middleware.py b/tests/test_middleware.py
index 1b42a2f..ab3a077 100644
--- a/tests/test_middleware.py
+++ b/tests/test_middleware.py
@@ -206,3 +206,60 @@ def test_cors_allow_origin_regex():
assert response.status_code == 400
assert response.text == "Disallowed CORS origin"
assert "access-control-allow-origin" not in response.headers
+
+
+def test_cors_credentialed_requests_return_specific_origin():
+ app = Starlette()
+
+ app.add_middleware(CORSMiddleware, allow_origins=["*"])
+
+ @app.route("/")
+ def homepage(request):
+ return PlainTextResponse("Homepage", status_code=200)
+
+ client = TestClient(app)
+
+ # Test credentialed request
+ headers = {"Origin": "https://example.org", "Cookie": "star_cookie=sugar"}
+ response = client.get("/", headers=headers)
+ assert response.status_code == 200
+ assert response.text == "Homepage"
+ assert response.headers["access-control-allow-origin"] == "https://example.org"
+
+
+def test_cors_vary_header_defaults_to_origin():
+ app = Starlette()
+
+ app.add_middleware(CORSMiddleware, allow_origins=["https://example.org"])
+
+ headers = {"Origin": "https://example.org"}
+
+ @app.route("/")
+ def homepage(request):
+ return PlainTextResponse("Homepage", status_code=200)
+
+ client = TestClient(app)
+
+ response = client.get("/", headers=headers)
+ assert response.status_code == 200
+ assert response.headers["vary"] == "Origin"
+
+
+def test_cors_vary_header_is_properly_set():
+ app = Starlette()
+
+ app.add_middleware(CORSMiddleware, allow_origins=["https://example.org"])
+
+ headers = {"Origin": "https://example.org"}
+
+ @app.route("/")
+ def homepage(request):
+ return PlainTextResponse(
+ "Homepage", status_code=200, headers={"Vary": "Accept-Encoding"}
+ )
+
+ client = TestClient(app)
+
+ response = client.get("/", headers=headers)
+ assert response.status_code == 200
+ assert response.headers["vary"] == "Accept-Encoding, Origin"
|
Credentialed CORS standard requests should not respond with wildcard origins
See https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS#Credentialed_requests_and_wildcards
If a standard request is made, that includes any cookie headers, then CORSMiddleware *ought* to strictly respond with the requested origin, rather than a wildcard.
This is actually potentially a bit fiddly since we maybe also need to make sure to *set or add* Vary: Origin in those cases, in order to ensure correct cacheability.
|
0.0
|
cc09042c1ca5ccac78bf0ff689faa5dcd3e04f3a
|
[
"tests/test_middleware.py::test_cors_credentialed_requests_return_specific_origin",
"tests/test_middleware.py::test_cors_vary_header_defaults_to_origin",
"tests/test_middleware.py::test_cors_vary_header_is_properly_set"
] |
[
"tests/test_middleware.py::test_trusted_host_middleware",
"tests/test_middleware.py::test_https_redirect_middleware",
"tests/test_middleware.py::test_cors_allow_all",
"tests/test_middleware.py::test_cors_allow_specific_origin",
"tests/test_middleware.py::test_cors_disallowed_preflight",
"tests/test_middleware.py::test_cors_allow_origin_regex"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-10-11 22:53:24+00:00
|
bsd-3-clause
| 2,126 |
|
encode__starlette-109
|
diff --git a/starlette/datastructures.py b/starlette/datastructures.py
index 558c8a9..2705fd3 100644
--- a/starlette/datastructures.py
+++ b/starlette/datastructures.py
@@ -7,16 +7,20 @@ class URL:
def __init__(self, url: str = "", scope: Scope = None) -> None:
if scope is not None:
assert not url, 'Cannot set both "url" and "scope".'
- scheme = scope["scheme"]
- host, port = scope["server"]
+ scheme = scope.get("scheme", "http")
+ server = scope.get("server", None)
path = scope.get("root_path", "") + scope["path"]
query_string = scope["query_string"]
- default_port = {"http": 80, "https": 443, "ws": 80, "wss": 443}[scheme]
- if port == default_port:
- url = "%s://%s%s" % (scheme, host, path)
+ if server is None:
+ url = path
else:
- url = "%s://%s:%s%s" % (scheme, host, port, path)
+ host, port = server
+ default_port = {"http": 80, "https": 443, "ws": 80, "wss": 443}[scheme]
+ if port == default_port:
+ url = "%s://%s%s" % (scheme, host, path)
+ else:
+ url = "%s://%s:%s%s" % (scheme, host, port, path)
if query_string:
url += "?" + unquote(query_string.decode())
@@ -85,6 +89,9 @@ class URL:
def __str__(self):
return self._url
+ def __repr__(self):
+ return "%s(%s)" % (self.__class__.__name__, repr(self._url))
+
# Type annotations for valid `__init__` values to QueryParams and Headers.
StrPairs = typing.Sequence[typing.Tuple[str, str]]
|
encode/starlette
|
02aaa4bddfe126b1458184b1ee1e8604af5041c7
|
diff --git a/tests/test_datastructures.py b/tests/test_datastructures.py
index d6aa62f..4312e1c 100644
--- a/tests/test_datastructures.py
+++ b/tests/test_datastructures.py
@@ -27,6 +27,23 @@ def test_url():
assert new.hostname == "example.com"
+def test_url_from_scope():
+ u = URL(scope={"path": "/path/to/somewhere", "query_string": b"abc=123"})
+ assert u == "/path/to/somewhere?abc=123"
+ assert repr(u) == "URL('/path/to/somewhere?abc=123')"
+
+ u = URL(
+ scope={
+ "scheme": "https",
+ "server": ("example.org", 123),
+ "path": "/path/to/somewhere",
+ "query_string": b"abc=123",
+ }
+ )
+ assert u == "https://example.org:123/path/to/somewhere?abc=123"
+ assert repr(u) == "URL('https://example.org:123/path/to/somewhere?abc=123')"
+
+
def test_headers():
h = Headers([(b"a", b"123"), (b"a", b"456"), (b"b", b"789")])
assert "a" in h
|
scope["server"] can be None
From https://asgi.readthedocs.io/en/latest/specs/www.html#connection-scope
> server: A two-item iterable of [host, port], where host is the listening address for this server as a unicode string, and port is the integer listening port. Optional, defaults to None.
https://github.com/encode/starlette/blob/master/starlette/datastructures.py#L11 doesn't handle that option, it assumes scope["server"] is always a two-pair
|
0.0
|
02aaa4bddfe126b1458184b1ee1e8604af5041c7
|
[
"tests/test_datastructures.py::test_url_from_scope"
] |
[
"tests/test_datastructures.py::test_url",
"tests/test_datastructures.py::test_headers",
"tests/test_datastructures.py::test_mutable_headers",
"tests/test_datastructures.py::test_headers_mutablecopy",
"tests/test_datastructures.py::test_queryparams"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-10-15 08:34:17+00:00
|
bsd-3-clause
| 2,127 |
|
encode__starlette-1106
|
diff --git a/starlette/routing.py b/starlette/routing.py
index ce5e4d1..1e6ae0b 100644
--- a/starlette/routing.py
+++ b/starlette/routing.py
@@ -2,7 +2,6 @@ import asyncio
import functools
import inspect
import re
-import sys
import traceback
import typing
from enum import Enum
@@ -33,11 +32,10 @@ class Match(Enum):
def iscoroutinefunction_or_partial(obj: typing.Any) -> bool:
"""
Correctly determines if an object is a coroutine function,
- with a fix for partials on Python < 3.8.
+ including those wrapped in functools.partial objects.
"""
- if sys.version_info < (3, 8): # pragma: no cover
- while isinstance(obj, functools.partial):
- obj = obj.func
+ while isinstance(obj, functools.partial):
+ obj = obj.func
return inspect.iscoroutinefunction(obj)
|
encode/starlette
|
62e95b89fca3a4c60d6638776bddbe1b59fcd2f9
|
diff --git a/tests/test_routing.py b/tests/test_routing.py
index 27640ef..8927c60 100644
--- a/tests/test_routing.py
+++ b/tests/test_routing.py
@@ -590,16 +590,35 @@ def test_raise_on_shutdown():
pass # pragma: nocover
+class AsyncEndpointClassMethod:
+ @classmethod
+ async def async_endpoint(cls, arg, request):
+ return JSONResponse({"arg": arg})
+
+
async def _partial_async_endpoint(arg, request):
return JSONResponse({"arg": arg})
partial_async_endpoint = functools.partial(_partial_async_endpoint, "foo")
+partial_cls_async_endpoint = functools.partial(
+ AsyncEndpointClassMethod.async_endpoint, "foo"
+)
-partial_async_app = Router(routes=[Route("/", partial_async_endpoint)])
+partial_async_app = Router(
+ routes=[
+ Route("/", partial_async_endpoint),
+ Route("/cls", partial_cls_async_endpoint),
+ ]
+)
def test_partial_async_endpoint():
- response = TestClient(partial_async_app).get("/")
+ test_client = TestClient(partial_async_app)
+ response = test_client.get("/")
assert response.status_code == 200
assert response.json() == {"arg": "foo"}
+
+ cls_method_response = test_client.get("/cls")
+ assert cls_method_response.status_code == 200
+ assert cls_method_response.json() == {"arg": "foo"}
|
Asynchronous classmethod handlers not recognized as coroutines if wrapped in functools.partial
https://github.com/encode/starlette/blob/e4307065ea6dbe708fba9643d14fe7adfff06d46/starlette/routing.py#L38
Although the recent 0.14.0 version adds support for functools.partial for Python 3.6 and Python 3.7, that support appears to be incomplete, not working for classmethods in Python 3.8.
I was unaware of the fact that `inspect.iscoroutinefunction` does not properly detect a functools.partial as async if it wraps an async classmethod.
TLDR: the previous PR was too cautious, we can always unwrap the functools.partial object to be sure we check the underlying object.
|
0.0
|
62e95b89fca3a4c60d6638776bddbe1b59fcd2f9
|
[
"tests/test_routing.py::test_partial_async_endpoint"
] |
[
"tests/test_routing.py::test_router",
"tests/test_routing.py::test_route_converters",
"tests/test_routing.py::test_url_path_for",
"tests/test_routing.py::test_url_for",
"tests/test_routing.py::test_router_add_route",
"tests/test_routing.py::test_router_duplicate_path",
"tests/test_routing.py::test_router_add_websocket_route",
"tests/test_routing.py::test_protocol_switch",
"tests/test_routing.py::test_mount_urls",
"tests/test_routing.py::test_reverse_mount_urls",
"tests/test_routing.py::test_mount_at_root",
"tests/test_routing.py::test_host_routing",
"tests/test_routing.py::test_host_reverse_urls",
"tests/test_routing.py::test_subdomain_routing",
"tests/test_routing.py::test_subdomain_reverse_urls",
"tests/test_routing.py::test_url_for_with_root_path",
"tests/test_routing.py::test_url_for_with_double_mount",
"tests/test_routing.py::test_standalone_route_matches",
"tests/test_routing.py::test_standalone_route_does_not_match",
"tests/test_routing.py::test_standalone_ws_route_matches",
"tests/test_routing.py::test_standalone_ws_route_does_not_match",
"tests/test_routing.py::test_lifespan_async",
"tests/test_routing.py::test_lifespan_sync",
"tests/test_routing.py::test_raise_on_startup",
"tests/test_routing.py::test_raise_on_shutdown"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2020-12-02 08:11:24+00:00
|
bsd-3-clause
| 2,128 |
|
encode__starlette-1147
|
diff --git a/starlette/middleware/sessions.py b/starlette/middleware/sessions.py
index d1b1d5a..a13ec5c 100644
--- a/starlette/middleware/sessions.py
+++ b/starlette/middleware/sessions.py
@@ -49,14 +49,16 @@ class SessionMiddleware:
async def send_wrapper(message: Message) -> None:
if message["type"] == "http.response.start":
+ path = scope.get("root_path", "") or "/"
if scope["session"]:
# We have session data to persist.
data = b64encode(json.dumps(scope["session"]).encode("utf-8"))
data = self.signer.sign(data)
headers = MutableHeaders(scope=message)
- header_value = "%s=%s; path=/; Max-Age=%d; %s" % (
+ header_value = "%s=%s; path=%s; Max-Age=%d; %s" % (
self.session_cookie,
data.decode("utf-8"),
+ path,
self.max_age,
self.security_flags,
)
@@ -66,7 +68,7 @@ class SessionMiddleware:
headers = MutableHeaders(scope=message)
header_value = "{}={}; {}".format(
self.session_cookie,
- "null; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT;",
+ f"null; path={path}; expires=Thu, 01 Jan 1970 00:00:00 GMT;",
self.security_flags,
)
headers.append("Set-Cookie", header_value)
|
encode/starlette
|
23e15789bf6b879b455f1249e47a43d4c752b3ed
|
diff --git a/tests/middleware/test_session.py b/tests/middleware/test_session.py
index 3f71232..68cf36d 100644
--- a/tests/middleware/test_session.py
+++ b/tests/middleware/test_session.py
@@ -101,3 +101,15 @@ def test_secure_session():
response = secure_client.get("/view_session")
assert response.json() == {"session": {}}
+
+
+def test_session_cookie_subpath():
+ app = create_app()
+ second_app = create_app()
+ second_app.add_middleware(SessionMiddleware, secret_key="example")
+ app.mount("/second_app", second_app)
+ client = TestClient(app, base_url="http://testserver/second_app")
+ response = client.post("second_app/update_session", json={"some": "data"})
+ cookie = response.headers["set-cookie"]
+ cookie_path = re.search(r"; path=(\S+);", cookie).groups()[0]
+ assert cookie_path == "/second_app"
|
Session cookie should use root path
The session cookie currently uses '/'.
It should really use the ASGI root path instead, in case the application is submounted.
|
0.0
|
23e15789bf6b879b455f1249e47a43d4c752b3ed
|
[
"tests/middleware/test_session.py::test_session_cookie_subpath"
] |
[
"tests/middleware/test_session.py::test_session",
"tests/middleware/test_session.py::test_session_expires",
"tests/middleware/test_session.py::test_secure_session"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-03-11 11:33:28+00:00
|
bsd-3-clause
| 2,129 |
|
encode__starlette-134
|
diff --git a/docs/applications.md b/docs/applications.md
index 2cce4ab..d4b44ca 100644
--- a/docs/applications.md
+++ b/docs/applications.md
@@ -56,7 +56,7 @@ There are two ways to add event handlers:
* `@app.on_event(event_type)` - Add an event, decorator style
* `app.add_event_handler(event_type, func)` - Add an event through a function call.
-`event_type` must be specified as either `'startup'` or `'cleanup'`.
+`event_type` must be specified as either `'startup'` or `'shutdown'`.
### Submounting other applications
diff --git a/docs/events.md b/docs/events.md
index b6696bb..3378a83 100644
--- a/docs/events.md
+++ b/docs/events.md
@@ -20,7 +20,7 @@ app = Starlette()
async def open_database_connection_pool():
...
[email protected]_event('cleanup')
[email protected]_event('shutdown')
async def close_database_connection_pool():
...
```
@@ -39,14 +39,14 @@ async def close_database_connection_pool():
...
app.add_event_handler('startup', open_database_connection_pool)
-app.add_event_handler('cleanup', close_database_connection_pool)
+app.add_event_handler('shutdown', close_database_connection_pool)
```
Starlette will not start serving any incoming requests until all of the
registered startup handlers have completed.
-The cleanup handlers will run once all connections have been closed, and
+The shutdown handlers will run once all connections have been closed, and
any in-process background tasks have completed.
**Note**: The ASGI lifespan protocol has only recently been added to the spec,
@@ -74,5 +74,5 @@ def test_homepage():
response = client.get("/")
assert response.status_code == 200
- # Application 'cleanup' handlers are called on exiting the block.
+ # Application 'shutdown' handlers are called on exiting the block.
```
diff --git a/starlette/lifespan.py b/starlette/lifespan.py
index a862298..b25ec8c 100644
--- a/starlette/lifespan.py
+++ b/starlette/lifespan.py
@@ -22,7 +22,7 @@ class LifespanHandler:
return decorator
def add_event_handler(self, event_type: str, func: typing.Callable) -> None:
- assert event_type in ("startup", "cleanup")
+ assert event_type in ("startup", "shutdown", "cleanup")
if event_type == "startup":
self.startup_handlers.append(func)
@@ -53,19 +53,26 @@ class LifespanHandler:
await self.run_startup()
await send({"type": "lifespan.startup.complete"})
message = await receive()
- assert message["type"] == "lifespan.cleanup"
+ assert (
+ message["type"] == "lifespan.shutdown"
+ or message["type"] == "lifespan.cleanup"
+ )
await self.run_cleanup()
- await send({"type": "lifespan.cleanup.complete"})
+ if message["type"] == "lifespan.shutdown":
+ await send({"type": "lifespan.shutdown.complete"})
+
+ if message["type"] == "lifespan.cleanup":
+ await send({"type": "lifespan.cleanup.complete"}) # pragma: no cover
class LifespanContext:
def __init__(
- self, app: ASGIApp, startup_timeout: int = 10, cleanup_timeout: int = 10
+ self, app: ASGIApp, startup_timeout: int = 10, shutdown_timeout: int = 10
) -> None:
self.startup_timeout = startup_timeout
- self.cleanup_timeout = cleanup_timeout
+ self.shutdown_timeout = shutdown_timeout
self.startup_event = asyncio.Event()
- self.cleanup_event = asyncio.Event()
+ self.shutdown_event = asyncio.Event()
self.receive_queue = asyncio.Queue() # type: asyncio.Queue
self.asgi = app({"type": "lifespan"}) # type: ASGIInstance
@@ -81,25 +88,25 @@ class LifespanContext:
tb: TracebackType,
) -> None:
loop = asyncio.get_event_loop()
- loop.run_until_complete(self.wait_cleanup())
+ loop.run_until_complete(self.wait_shutdown())
async def run_lifespan(self) -> None:
try:
await self.asgi(self.receive, self.send)
finally:
self.startup_event.set()
- self.cleanup_event.set()
+ self.shutdown_event.set()
async def send(self, message: Message) -> None:
if message["type"] == "lifespan.startup.complete":
assert not self.startup_event.is_set(), STATE_TRANSITION_ERROR
- assert not self.cleanup_event.is_set(), STATE_TRANSITION_ERROR
+ assert not self.shutdown_event.is_set(), STATE_TRANSITION_ERROR
self.startup_event.set()
else:
- assert message["type"] == "lifespan.cleanup.complete"
+ assert message["type"] == "lifespan.shutdown.complete"
assert self.startup_event.is_set(), STATE_TRANSITION_ERROR
- assert not self.cleanup_event.is_set(), STATE_TRANSITION_ERROR
- self.cleanup_event.set()
+ assert not self.shutdown_event.is_set(), STATE_TRANSITION_ERROR
+ self.shutdown_event.set()
async def receive(self) -> Message:
return await self.receive_queue.get()
@@ -108,6 +115,8 @@ class LifespanContext:
await self.receive_queue.put({"type": "lifespan.startup"})
await asyncio.wait_for(self.startup_event.wait(), timeout=self.startup_timeout)
- async def wait_cleanup(self) -> None:
- await self.receive_queue.put({"type": "lifespan.cleanup"})
- await asyncio.wait_for(self.cleanup_event.wait(), timeout=self.cleanup_timeout)
+ async def wait_shutdown(self) -> None:
+ await self.receive_queue.put({"type": "lifespan.shutdown"})
+ await asyncio.wait_for(
+ self.shutdown_event.wait(), timeout=self.shutdown_timeout
+ )
|
encode/starlette
|
49f76ab5e9ef0ce5d966485553ad6019a9d37da5
|
diff --git a/tests/test_applications.py b/tests/test_applications.py
index 8dc4c40..64a48c0 100644
--- a/tests/test_applications.py
+++ b/tests/test_applications.py
@@ -181,7 +181,7 @@ def test_app_add_event_handler():
cleanup_complete = True
app.add_event_handler("startup", run_startup)
- app.add_event_handler("cleanup", run_cleanup)
+ app.add_event_handler("shutdown", run_cleanup)
assert not startup_complete
assert not cleanup_complete
diff --git a/tests/test_lifespan.py b/tests/test_lifespan.py
index db4cb73..7b7372d 100644
--- a/tests/test_lifespan.py
+++ b/tests/test_lifespan.py
@@ -12,7 +12,7 @@ def test_lifespan_handler():
nonlocal startup_complete
startup_complete = True
- @handler.on_event("cleanup")
+ @handler.on_event("shutdown")
def run_cleanup():
nonlocal cleanup_complete
cleanup_complete = True
@@ -36,7 +36,7 @@ def test_async_lifespan_handler():
nonlocal startup_complete
startup_complete = True
- @handler.on_event("cleanup")
+ @handler.on_event("shutdown")
async def run_cleanup():
nonlocal cleanup_complete
cleanup_complete = True
@@ -60,7 +60,7 @@ def test_app_lifespan():
nonlocal startup_complete
startup_complete = True
- @app.on_event("cleanup")
+ @app.on_event("shutdown")
def run_cleanup():
nonlocal cleanup_complete
cleanup_complete = True
|
Support `shutdown` as a synonym for `cleanup`
* Support either `cleanup` or `shutdown` as the ASGI lifespan message name.
* Update uvicorn to move to shutdown - https://github.com/encode/uvicorn/issues/233
* Finally, after a small period of time, drop `cleanup`
Easy PR for a contributor to jump on would be addressing the first part of this, and supporting either name.
|
0.0
|
49f76ab5e9ef0ce5d966485553ad6019a9d37da5
|
[
"tests/test_applications.py::test_app_add_event_handler",
"tests/test_lifespan.py::test_lifespan_handler",
"tests/test_lifespan.py::test_async_lifespan_handler",
"tests/test_lifespan.py::test_app_lifespan"
] |
[
"tests/test_applications.py::test_func_route",
"tests/test_applications.py::test_async_route",
"tests/test_applications.py::test_class_route",
"tests/test_applications.py::test_route_kwargs",
"tests/test_applications.py::test_websocket_route",
"tests/test_applications.py::test_400",
"tests/test_applications.py::test_405",
"tests/test_applications.py::test_500",
"tests/test_applications.py::test_middleware",
"tests/test_applications.py::test_app_mount",
"tests/test_applications.py::test_app_debug"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-10-20 20:45:45+00:00
|
bsd-3-clause
| 2,130 |
|
encode__starlette-1377
|
diff --git a/starlette/staticfiles.py b/starlette/staticfiles.py
index d09630f..da10a39 100644
--- a/starlette/staticfiles.py
+++ b/starlette/staticfiles.py
@@ -3,6 +3,7 @@ import os
import stat
import typing
from email.utils import parsedate
+from pathlib import Path
import anyio
@@ -51,7 +52,7 @@ class StaticFiles:
self.all_directories = self.get_directories(directory, packages)
self.html = html
self.config_checked = False
- if check_dir and directory is not None and not os.path.isdir(directory):
+ if check_dir and directory is not None and not Path(directory).is_dir():
raise RuntimeError(f"Directory '{directory}' does not exist")
def get_directories(
@@ -77,11 +78,9 @@ class StaticFiles:
spec = importlib.util.find_spec(package)
assert spec is not None, f"Package {package!r} could not be found."
assert spec.origin is not None, f"Package {package!r} could not be found."
- package_directory = os.path.normpath(
- os.path.join(spec.origin, "..", statics_dir)
- )
- assert os.path.isdir(
- package_directory
+ package_directory = Path(spec.origin).joinpath("..", statics_dir).resolve()
+ assert (
+ package_directory.is_dir()
), f"Directory '{statics_dir!r}' in package {package!r} could not be found."
directories.append(package_directory)
@@ -101,14 +100,14 @@ class StaticFiles:
response = await self.get_response(path, scope)
await response(scope, receive, send)
- def get_path(self, scope: Scope) -> str:
+ def get_path(self, scope: Scope) -> Path:
"""
Given the ASGI scope, return the `path` string to serve up,
with OS specific path separators, and any '..', '.' components removed.
"""
- return os.path.normpath(os.path.join(*scope["path"].split("/")))
+ return Path(*scope["path"].split("/"))
- async def get_response(self, path: str, scope: Scope) -> Response:
+ async def get_response(self, path: Path, scope: Scope) -> Response:
"""
Returns an HTTP response, given the incoming path, method and request headers.
"""
@@ -131,7 +130,7 @@ class StaticFiles:
elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html:
# We're in HTML mode, and have got a directory URL.
# Check if we have 'index.html' file to serve.
- index_path = os.path.join(path, "index.html")
+ index_path = path.joinpath("index.html")
full_path, stat_result = await anyio.to_thread.run_sync(
self.lookup_path, index_path
)
@@ -158,20 +157,25 @@ class StaticFiles:
raise HTTPException(status_code=404)
def lookup_path(
- self, path: str
- ) -> typing.Tuple[str, typing.Optional[os.stat_result]]:
+ self, path: Path
+ ) -> typing.Tuple[Path, typing.Optional[os.stat_result]]:
for directory in self.all_directories:
- full_path = os.path.realpath(os.path.join(directory, path))
- directory = os.path.realpath(directory)
- if os.path.commonprefix([full_path, directory]) != directory:
- # Don't allow misbehaving clients to break out of the static files
- # directory.
- continue
+ original_path = Path(directory).joinpath(path)
+ full_path = original_path.resolve()
+ directory = Path(directory).resolve()
try:
- return full_path, os.stat(full_path)
+ stat_result = os.lstat(original_path)
+ full_path.relative_to(directory)
+ return full_path, stat_result
+ except ValueError:
+ # Allow clients to break out of the static files directory
+ # if following symlinks.
+ if stat.S_ISLNK(stat_result.st_mode):
+ stat_result = os.lstat(full_path)
+ return full_path, stat_result
except (FileNotFoundError, NotADirectoryError):
continue
- return "", None
+ return Path(), None
def file_response(
self,
|
encode/starlette
|
d81545c71a7988cfd57c613be02f4661449c0793
|
diff --git a/tests/test_staticfiles.py b/tests/test_staticfiles.py
index 7d13a05..53f3ea9 100644
--- a/tests/test_staticfiles.py
+++ b/tests/test_staticfiles.py
@@ -166,8 +166,8 @@ def test_staticfiles_prevents_breaking_out_of_directory(tmpdir):
directory = os.path.join(tmpdir, "foo")
os.mkdir(directory)
- path = os.path.join(tmpdir, "example.txt")
- with open(path, "w") as file:
+ file_path = os.path.join(tmpdir, "example.txt")
+ with open(file_path, "w") as file:
file.write("outside root dir")
app = StaticFiles(directory=directory)
@@ -441,3 +441,28 @@ def test_staticfiles_unhandled_os_error_returns_500(
response = client.get("/example.txt")
assert response.status_code == 500
assert response.text == "Internal Server Error"
+
+
+def test_staticfiles_follows_symlinks_to_break_out_of_dir(
+ tmp_path: pathlib.Path, test_client_factory
+):
+ statics_path = tmp_path.joinpath("statics")
+ statics_path.mkdir()
+
+ symlink_path = tmp_path.joinpath("symlink")
+ symlink_path.mkdir()
+
+ symlink_file_path = symlink_path.joinpath("index.html")
+ with open(symlink_file_path, "w") as file:
+ file.write("<h1>Hello</h1>")
+
+ statics_file_path = statics_path.joinpath("index.html")
+ statics_file_path.symlink_to(symlink_file_path)
+
+ app = StaticFiles(directory=statics_path)
+ client = test_client_factory(app)
+
+ response = client.get("/index.html")
+ assert response.url == "http://testserver/index.html"
+ assert response.status_code == 200
+ assert response.text == "<h1>Hello</h1>"
|
StaticFiles middleware doesn't follow symlinks
### Checklist
- [x] The bug is reproducible against the latest release and/or `master`.
- [x] There are no similar issues or pull requests to fix it yet.
### Describe the bug
The StaticFiles middleware is checking the `os.realpath` of a file and returning a 404 for symlinks that lead outside the static directory.
### To reproduce
1. create a minimal app with a staticfiles middleware
1. put a symlink in your static directory. the link's target must be above the static directory.
1. you'll get a 404
### Expected behavior
Support symlinks in static directory.
The use case for symlinks in static is to target frontend assets that are being generated in file-watch mode.
### Actual behavior
404.
### Debugging material
It's happening here:
https://github.com/encode/starlette/blob/b95acea973c20eea3e7cbbca42d09b1f5d4a3412/starlette/staticfiles.py#L147-L149
### Environment
- OS: linux
- Python version: 3.7.5
- Starlette version: 0.13.8
### Additional context
I'm happy to post a PR for this if useful, ideally adding a bool param to the StaticFiles middleware that allows symlinks.
|
0.0
|
d81545c71a7988cfd57c613be02f4661449c0793
|
[
"tests/test_staticfiles.py::test_staticfiles_follows_symlinks_to_break_out_of_dir[asyncio]",
"tests/test_staticfiles.py::test_staticfiles_follows_symlinks_to_break_out_of_dir[trio]"
] |
[
"tests/test_staticfiles.py::test_staticfiles[asyncio]",
"tests/test_staticfiles.py::test_staticfiles[trio]",
"tests/test_staticfiles.py::test_staticfiles_with_pathlib[asyncio]",
"tests/test_staticfiles.py::test_staticfiles_with_pathlib[trio]",
"tests/test_staticfiles.py::test_staticfiles_with_package[asyncio]",
"tests/test_staticfiles.py::test_staticfiles_with_package[trio]",
"tests/test_staticfiles.py::test_staticfiles_post[asyncio]",
"tests/test_staticfiles.py::test_staticfiles_post[trio]",
"tests/test_staticfiles.py::test_staticfiles_with_directory_returns_404[asyncio]",
"tests/test_staticfiles.py::test_staticfiles_with_directory_returns_404[trio]",
"tests/test_staticfiles.py::test_staticfiles_with_missing_file_returns_404[asyncio]",
"tests/test_staticfiles.py::test_staticfiles_with_missing_file_returns_404[trio]",
"tests/test_staticfiles.py::test_staticfiles_instantiated_with_missing_directory",
"tests/test_staticfiles.py::test_staticfiles_configured_with_missing_directory[asyncio]",
"tests/test_staticfiles.py::test_staticfiles_configured_with_missing_directory[trio]",
"tests/test_staticfiles.py::test_staticfiles_configured_with_file_instead_of_directory[asyncio]",
"tests/test_staticfiles.py::test_staticfiles_configured_with_file_instead_of_directory[trio]",
"tests/test_staticfiles.py::test_staticfiles_config_check_occurs_only_once[asyncio]",
"tests/test_staticfiles.py::test_staticfiles_config_check_occurs_only_once[trio]",
"tests/test_staticfiles.py::test_staticfiles_prevents_breaking_out_of_directory",
"tests/test_staticfiles.py::test_staticfiles_304_with_etag_match[asyncio]",
"tests/test_staticfiles.py::test_staticfiles_304_with_etag_match[trio]",
"tests/test_staticfiles.py::test_staticfiles_304_with_last_modified_compare_last_req[asyncio]",
"tests/test_staticfiles.py::test_staticfiles_304_with_last_modified_compare_last_req[trio]",
"tests/test_staticfiles.py::test_staticfiles_html_normal[asyncio]",
"tests/test_staticfiles.py::test_staticfiles_html_normal[trio]",
"tests/test_staticfiles.py::test_staticfiles_html_without_index[asyncio]",
"tests/test_staticfiles.py::test_staticfiles_html_without_index[trio]",
"tests/test_staticfiles.py::test_staticfiles_html_without_404[asyncio]",
"tests/test_staticfiles.py::test_staticfiles_html_without_404[trio]",
"tests/test_staticfiles.py::test_staticfiles_html_only_files[asyncio]",
"tests/test_staticfiles.py::test_staticfiles_html_only_files[trio]",
"tests/test_staticfiles.py::test_staticfiles_cache_invalidation_for_deleted_file_html_mode[asyncio]",
"tests/test_staticfiles.py::test_staticfiles_cache_invalidation_for_deleted_file_html_mode[trio]",
"tests/test_staticfiles.py::test_staticfiles_with_missing_dir_returns_404[asyncio]",
"tests/test_staticfiles.py::test_staticfiles_with_missing_dir_returns_404[trio]",
"tests/test_staticfiles.py::test_staticfiles_access_file_as_dir_returns_404[asyncio]",
"tests/test_staticfiles.py::test_staticfiles_access_file_as_dir_returns_404[trio]",
"tests/test_staticfiles.py::test_staticfiles_unhandled_os_error_returns_500[asyncio]",
"tests/test_staticfiles.py::test_staticfiles_unhandled_os_error_returns_500[trio]"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-12-17 14:32:49+00:00
|
bsd-3-clause
| 2,131 |
|
encode__starlette-145
|
diff --git a/docs/staticfiles.md b/docs/staticfiles.md
index 03d9a58..f1b9d0a 100644
--- a/docs/staticfiles.md
+++ b/docs/staticfiles.md
@@ -1,20 +1,17 @@
-As well as the `FileResponse` class, Starlette also includes ASGI applications
-for serving a specific file or directory:
+Starlette also includes an `StaticFiles` class for serving a specific directory:
-* `StaticFile(path)` - Serve a single file, given by `path`.
* `StaticFiles(directory)` - Serve any files in the given `directory`.
-You can combine these ASGI applications with Starlette's routing to provide
+You can combine this ASGI application with Starlette's routing to provide
comprehensive static file serving.
```python
from starlette.routing import Router, Path, PathPrefix
-from starlette.staticfiles import StaticFile, StaticFiles
+from starlette.staticfiles import StaticFiles
app = Router(routes=[
- Path('/', app=StaticFile(path='index.html')),
PathPrefix('/static', app=StaticFiles(directory='static')),
])
```
diff --git a/starlette/formparsers.py b/starlette/formparsers.py
index 4d6580c..6d83320 100644
--- a/starlette/formparsers.py
+++ b/starlette/formparsers.py
@@ -1,9 +1,11 @@
-from enum import Enum
-from starlette.datastructures import Headers
-import asyncio
import io
-import tempfile
import typing
+import asyncio
+import tempfile
+from enum import Enum
+from urllib.parse import unquote
+
+from starlette.datastructures import Headers
try:
from multipart.multipart import parse_options_header
@@ -69,27 +71,22 @@ class FormParser:
self.messages = [] # type: typing.List[typing.Tuple[FormMessage, bytes]]
def on_field_start(self) -> None:
- print("on_field_start")
message = (FormMessage.FIELD_START, b"")
self.messages.append(message)
def on_field_name(self, data: bytes, start: int, end: int) -> None:
- print("on_field_name")
message = (FormMessage.FIELD_NAME, data[start:end])
self.messages.append(message)
def on_field_data(self, data: bytes, start: int, end: int) -> None:
- print("on_field_data")
message = (FormMessage.FIELD_DATA, data[start:end])
self.messages.append(message)
def on_field_end(self) -> None:
- print("on_field_end")
message = (FormMessage.FIELD_END, b"")
self.messages.append(message)
def on_end(self) -> None:
- print("on_end")
message = (FormMessage.END, b"")
self.messages.append(message)
@@ -127,7 +124,9 @@ class FormParser:
elif message_type == FormMessage.FIELD_DATA:
field_value += message_bytes
elif message_type == FormMessage.FIELD_END:
- result[field_name.decode("latin-1")] = field_value.decode("latin-1")
+ result[field_name.decode("latin-1")] = unquote(
+ field_value.decode("latin-1")
+ )
elif message_type == FormMessage.END:
pass
diff --git a/starlette/responses.py b/starlette/responses.py
index 3471fbf..09ac58f 100644
--- a/starlette/responses.py
+++ b/starlette/responses.py
@@ -1,15 +1,16 @@
-import hashlib
import os
-import typing
import json
-
+import stat
+import typing
+import hashlib
+import http.cookies
from email.utils import formatdate
from mimetypes import guess_type
+from urllib.parse import quote_plus
+
from starlette.background import BackgroundTask
from starlette.datastructures import MutableHeaders, URL
from starlette.types import Receive, Send
-from urllib.parse import quote_plus
-import http.cookies
try:
import aiofiles
@@ -227,8 +228,15 @@ class FileResponse(Response):
async def __call__(self, receive: Receive, send: Send) -> None:
if self.stat_result is None:
- stat_result = await aio_stat(self.path)
- self.set_stat_headers(stat_result)
+ try:
+ stat_result = await aio_stat(self.path)
+ self.set_stat_headers(stat_result)
+ except FileNotFoundError:
+ raise RuntimeError(f"File at path {self.path} does not exist.")
+ else:
+ mode = stat_result.st_mode
+ if not stat.S_ISREG(mode):
+ raise RuntimeError(f"File at path {self.path} is not a file.")
await send(
{
"type": "http.response.start",
diff --git a/starlette/staticfiles.py b/starlette/staticfiles.py
index eb8f748..b9b54cf 100644
--- a/starlette/staticfiles.py
+++ b/starlette/staticfiles.py
@@ -7,17 +7,6 @@ from starlette.responses import PlainTextResponse, FileResponse, Response
from starlette.types import Send, Receive, Scope, ASGIInstance
-class StaticFile:
- def __init__(self, *, path: str) -> None:
- self.path = path
-
- def __call__(self, scope: Scope) -> ASGIInstance:
- assert scope["type"] == "http"
- if scope["method"] not in ("GET", "HEAD"):
- return PlainTextResponse("Method Not Allowed", status_code=405)
- return _StaticFileResponder(scope, path=self.path)
-
-
class StaticFiles:
def __init__(self, *, directory: str) -> None:
self.directory = directory
@@ -39,25 +28,6 @@ class StaticFiles:
return _StaticFilesResponder(scope, path=path, check_directory=check_directory)
-class _StaticFileResponder:
- def __init__(self, scope: Scope, path: str) -> None:
- self.scope = scope
- self.path = path
-
- async def __call__(self, receive: Receive, send: Send) -> None:
- try:
- stat_result = await aio_stat(self.path)
- except FileNotFoundError:
- raise RuntimeError("StaticFile at path '%s' does not exist." % self.path)
- else:
- mode = stat_result.st_mode
- if not stat.S_ISREG(mode):
- raise RuntimeError("StaticFile at path '%s' is not a file." % self.path)
-
- response = FileResponse(self.path, stat_result=stat_result)
- await response(receive, send)
-
-
class _StaticFilesResponder:
def __init__(self, scope: Scope, path: str, check_directory: str = None) -> None:
self.scope = scope
|
encode/starlette
|
a32ea0a8e89567b24f3b8cd04847a1b01c9e98f0
|
diff --git a/tests/test_responses.py b/tests/test_responses.py
index 467b360..63d5076 100644
--- a/tests/test_responses.py
+++ b/tests/test_responses.py
@@ -9,6 +9,7 @@ from starlette.requests import Request
from starlette.testclient import TestClient
from starlette import status
import asyncio
+import pytest
import os
@@ -144,6 +145,28 @@ def test_file_response(tmpdir):
assert "etag" in response.headers
+def test_file_response_with_directory_raises_error(tmpdir):
+ def app(scope):
+ return FileResponse(path=tmpdir, filename="example.png")
+
+ client = TestClient(app)
+ with pytest.raises(RuntimeError) as exc:
+ client.get("/")
+ assert "is not a file" in str(exc)
+
+
+def test_file_response_with_missing_file_raises_error(tmpdir):
+ path = os.path.join(tmpdir, "404.txt")
+
+ def app(scope):
+ return FileResponse(path=path, filename="404.txt")
+
+ client = TestClient(app)
+ with pytest.raises(RuntimeError) as exc:
+ client.get("/")
+ assert "does not exist" in str(exc)
+
+
def test_set_cookie():
def app(scope):
async def asgi(receive, send):
diff --git a/tests/test_staticfiles.py b/tests/test_staticfiles.py
index e21ce60..bc7ef0f 100644
--- a/tests/test_staticfiles.py
+++ b/tests/test_staticfiles.py
@@ -2,63 +2,7 @@ import os
import pytest
from starlette.testclient import TestClient
-from starlette.staticfiles import StaticFile, StaticFiles
-
-
-def test_staticfile(tmpdir):
- path = os.path.join(tmpdir, "example.txt")
- with open(path, "w") as file:
- file.write("<file content>")
-
- app = StaticFile(path=path)
- client = TestClient(app)
- response = client.get("/")
- assert response.status_code == 200
- assert response.text == "<file content>"
-
-
-def test_large_staticfile(tmpdir):
- path = os.path.join(tmpdir, "example.txt")
- content = "this is a lot of content" * 200
- print("content len = ", len(content))
- with open(path, "w") as file:
- file.write(content)
-
- app = StaticFile(path=path)
- client = TestClient(app)
- response = client.get("/")
- assert response.status_code == 200
- assert len(content) == len(response.text)
- assert content == response.text
-
-
-def test_staticfile_post(tmpdir):
- path = os.path.join(tmpdir, "example.txt")
- with open(path, "w") as file:
- file.write("<file content>")
-
- app = StaticFile(path=path)
- client = TestClient(app)
- response = client.post("/")
- assert response.status_code == 405
- assert response.text == "Method Not Allowed"
-
-
-def test_staticfile_with_directory_raises_error(tmpdir):
- app = StaticFile(path=tmpdir)
- client = TestClient(app)
- with pytest.raises(RuntimeError) as exc:
- client.get("/")
- assert "is not a file" in str(exc)
-
-
-def test_staticfile_with_missing_file_raises_error(tmpdir):
- path = os.path.join(tmpdir, "404.txt")
- app = StaticFile(path=path)
- client = TestClient(app)
- with pytest.raises(RuntimeError) as exc:
- client.get("/")
- assert "does not exist" in str(exc)
+from starlette.staticfiles import StaticFiles
def test_staticfiles(tmpdir):
|
Drop `StaticFile` app.
We have `FileResponse` and `StaticFiles`.
I think that including the `StaticFile` ASGI app complicates things unnecessarily, and that we should probably remove it.
* Drop `StaticFile` app.
* Put runtime checks that file exists, and file is a regular file in `FileResponse`.
|
0.0
|
a32ea0a8e89567b24f3b8cd04847a1b01c9e98f0
|
[
"tests/test_responses.py::test_file_response_with_directory_raises_error",
"tests/test_responses.py::test_file_response_with_missing_file_raises_error"
] |
[
"tests/test_responses.py::test_text_response",
"tests/test_responses.py::test_bytes_response",
"tests/test_responses.py::test_ujson_response",
"tests/test_responses.py::test_redirect_response",
"tests/test_responses.py::test_streaming_response",
"tests/test_responses.py::test_response_headers",
"tests/test_responses.py::test_response_phrase",
"tests/test_responses.py::test_file_response",
"tests/test_responses.py::test_set_cookie",
"tests/test_responses.py::test_delete_cookie",
"tests/test_staticfiles.py::test_staticfiles",
"tests/test_staticfiles.py::test_staticfiles_post",
"tests/test_staticfiles.py::test_staticfiles_with_directory_returns_404",
"tests/test_staticfiles.py::test_staticfiles_with_missing_file_returns_404",
"tests/test_staticfiles.py::test_staticfiles_configured_with_missing_directory",
"tests/test_staticfiles.py::test_staticfiles_configured_with_file_instead_of_directory",
"tests/test_staticfiles.py::test_staticfiles_config_check_occurs_only_once",
"tests/test_staticfiles.py::test_staticfiles_prevents_breaking_out_of_directory"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-10-26 18:55:36+00:00
|
bsd-3-clause
| 2,132 |
|
encode__starlette-1617
|
diff --git a/starlette/applications.py b/starlette/applications.py
index 8c51544..c3daade 100644
--- a/starlette/applications.py
+++ b/starlette/applications.py
@@ -1,10 +1,10 @@
import typing
from starlette.datastructures import State, URLPath
-from starlette.exceptions import ExceptionMiddleware
from starlette.middleware import Middleware
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.middleware.errors import ServerErrorMiddleware
+from starlette.middleware.exceptions import ExceptionMiddleware
from starlette.requests import Request
from starlette.responses import Response
from starlette.routing import BaseRoute, Router
diff --git a/starlette/exceptions.py b/starlette/exceptions.py
index 61039c5..2b5acdd 100644
--- a/starlette/exceptions.py
+++ b/starlette/exceptions.py
@@ -1,11 +1,8 @@
-import asyncio
import http
import typing
+import warnings
-from starlette.concurrency import run_in_threadpool
-from starlette.requests import Request
-from starlette.responses import PlainTextResponse, Response
-from starlette.types import ASGIApp, Message, Receive, Scope, Send
+__all__ = ("HTTPException",)
class HTTPException(Exception):
@@ -26,86 +23,22 @@ class HTTPException(Exception):
return f"{class_name}(status_code={self.status_code!r}, detail={self.detail!r})"
-class ExceptionMiddleware:
- def __init__(
- self,
- app: ASGIApp,
- handlers: typing.Optional[
- typing.Mapping[typing.Any, typing.Callable[[Request, Exception], Response]]
- ] = None,
- debug: bool = False,
- ) -> None:
- self.app = app
- self.debug = debug # TODO: We ought to handle 404 cases if debug is set.
- self._status_handlers: typing.Dict[int, typing.Callable] = {}
- self._exception_handlers: typing.Dict[
- typing.Type[Exception], typing.Callable
- ] = {HTTPException: self.http_exception}
- if handlers is not None:
- for key, value in handlers.items():
- self.add_exception_handler(key, value)
-
- def add_exception_handler(
- self,
- exc_class_or_status_code: typing.Union[int, typing.Type[Exception]],
- handler: typing.Callable[[Request, Exception], Response],
- ) -> None:
- if isinstance(exc_class_or_status_code, int):
- self._status_handlers[exc_class_or_status_code] = handler
- else:
- assert issubclass(exc_class_or_status_code, Exception)
- self._exception_handlers[exc_class_or_status_code] = handler
-
- def _lookup_exception_handler(
- self, exc: Exception
- ) -> typing.Optional[typing.Callable]:
- for cls in type(exc).__mro__:
- if cls in self._exception_handlers:
- return self._exception_handlers[cls]
- return None
-
- async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
- if scope["type"] != "http":
- await self.app(scope, receive, send)
- return
+__deprecated__ = "ExceptionMiddleware"
- response_started = False
- async def sender(message: Message) -> None:
- nonlocal response_started
+def __getattr__(name: str) -> typing.Any: # pragma: no cover
+ if name == __deprecated__:
+ from starlette.middleware.exceptions import ExceptionMiddleware
- if message["type"] == "http.response.start":
- response_started = True
- await send(message)
-
- try:
- await self.app(scope, receive, sender)
- except Exception as exc:
- handler = None
-
- if isinstance(exc, HTTPException):
- handler = self._status_handlers.get(exc.status_code)
-
- if handler is None:
- handler = self._lookup_exception_handler(exc)
-
- if handler is None:
- raise exc
-
- if response_started:
- msg = "Caught handled exception, but response already started."
- raise RuntimeError(msg) from exc
+ warnings.warn(
+ f"{__deprecated__} is deprecated on `starlette.exceptions`. "
+ f"Import it from `starlette.middleware.exceptions` instead.",
+ category=DeprecationWarning,
+ stacklevel=3,
+ )
+ return ExceptionMiddleware
+ raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
- request = Request(scope, receive=receive)
- if asyncio.iscoroutinefunction(handler):
- response = await handler(request, exc)
- else:
- response = await run_in_threadpool(handler, request, exc)
- await response(scope, receive, sender)
- def http_exception(self, request: Request, exc: HTTPException) -> Response:
- if exc.status_code in {204, 304}:
- return Response(status_code=exc.status_code, headers=exc.headers)
- return PlainTextResponse(
- exc.detail, status_code=exc.status_code, headers=exc.headers
- )
+def __dir__() -> typing.List[str]:
+ return sorted(list(__all__) + [__deprecated__]) # pragma: no cover
diff --git a/starlette/formparsers.py b/starlette/formparsers.py
index fd19492..4cde71b 100644
--- a/starlette/formparsers.py
+++ b/starlette/formparsers.py
@@ -38,6 +38,11 @@ def _user_safe_decode(src: bytes, codec: str) -> str:
return src.decode("latin-1")
+class MultiPartException(Exception):
+ def __init__(self, message: str) -> None:
+ self.message = message
+
+
class FormParser:
def __init__(
self, headers: Headers, stream: typing.AsyncGenerator[bytes, None]
@@ -159,7 +164,10 @@ class MultiPartParser:
charset = params.get(b"charset", "utf-8")
if type(charset) == bytes:
charset = charset.decode("latin-1")
- boundary = params[b"boundary"]
+ try:
+ boundary = params[b"boundary"]
+ except KeyError:
+ raise MultiPartException("Missing boundary in multipart.")
# Callbacks dictionary.
callbacks = {
diff --git a/starlette/middleware/exceptions.py b/starlette/middleware/exceptions.py
new file mode 100644
index 0000000..a3b4633
--- /dev/null
+++ b/starlette/middleware/exceptions.py
@@ -0,0 +1,93 @@
+import asyncio
+import typing
+
+from starlette.concurrency import run_in_threadpool
+from starlette.exceptions import HTTPException
+from starlette.requests import Request
+from starlette.responses import PlainTextResponse, Response
+from starlette.types import ASGIApp, Message, Receive, Scope, Send
+
+
+class ExceptionMiddleware:
+ def __init__(
+ self,
+ app: ASGIApp,
+ handlers: typing.Optional[
+ typing.Mapping[typing.Any, typing.Callable[[Request, Exception], Response]]
+ ] = None,
+ debug: bool = False,
+ ) -> None:
+ self.app = app
+ self.debug = debug # TODO: We ought to handle 404 cases if debug is set.
+ self._status_handlers: typing.Dict[int, typing.Callable] = {}
+ self._exception_handlers: typing.Dict[
+ typing.Type[Exception], typing.Callable
+ ] = {HTTPException: self.http_exception}
+ if handlers is not None:
+ for key, value in handlers.items():
+ self.add_exception_handler(key, value)
+
+ def add_exception_handler(
+ self,
+ exc_class_or_status_code: typing.Union[int, typing.Type[Exception]],
+ handler: typing.Callable[[Request, Exception], Response],
+ ) -> None:
+ if isinstance(exc_class_or_status_code, int):
+ self._status_handlers[exc_class_or_status_code] = handler
+ else:
+ assert issubclass(exc_class_or_status_code, Exception)
+ self._exception_handlers[exc_class_or_status_code] = handler
+
+ def _lookup_exception_handler(
+ self, exc: Exception
+ ) -> typing.Optional[typing.Callable]:
+ for cls in type(exc).__mro__:
+ if cls in self._exception_handlers:
+ return self._exception_handlers[cls]
+ return None
+
+ async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
+ if scope["type"] != "http":
+ await self.app(scope, receive, send)
+ return
+
+ response_started = False
+
+ async def sender(message: Message) -> None:
+ nonlocal response_started
+
+ if message["type"] == "http.response.start":
+ response_started = True
+ await send(message)
+
+ try:
+ await self.app(scope, receive, sender)
+ except Exception as exc:
+ handler = None
+
+ if isinstance(exc, HTTPException):
+ handler = self._status_handlers.get(exc.status_code)
+
+ if handler is None:
+ handler = self._lookup_exception_handler(exc)
+
+ if handler is None:
+ raise exc
+
+ if response_started:
+ msg = "Caught handled exception, but response already started."
+ raise RuntimeError(msg) from exc
+
+ request = Request(scope, receive=receive)
+ if asyncio.iscoroutinefunction(handler):
+ response = await handler(request, exc)
+ else:
+ response = await run_in_threadpool(handler, request, exc)
+ await response(scope, receive, sender)
+
+ def http_exception(self, request: Request, exc: HTTPException) -> Response:
+ if exc.status_code in {204, 304}:
+ return Response(status_code=exc.status_code, headers=exc.headers)
+ return PlainTextResponse(
+ exc.detail, status_code=exc.status_code, headers=exc.headers
+ )
diff --git a/starlette/requests.py b/starlette/requests.py
index c738eba..66c510c 100644
--- a/starlette/requests.py
+++ b/starlette/requests.py
@@ -6,7 +6,8 @@ from http import cookies as http_cookies
import anyio
from starlette.datastructures import URL, Address, FormData, Headers, QueryParams, State
-from starlette.formparsers import FormParser, MultiPartParser
+from starlette.exceptions import HTTPException
+from starlette.formparsers import FormParser, MultiPartException, MultiPartParser
from starlette.types import Message, Receive, Scope, Send
try:
@@ -250,8 +251,13 @@ class Request(HTTPConnection):
content_type_header = self.headers.get("Content-Type")
content_type, options = parse_options_header(content_type_header)
if content_type == b"multipart/form-data":
- multipart_parser = MultiPartParser(self.headers, self.stream())
- self._form = await multipart_parser.parse()
+ try:
+ multipart_parser = MultiPartParser(self.headers, self.stream())
+ self._form = await multipart_parser.parse()
+ except MultiPartException as exc:
+ if "app" in self.scope:
+ raise HTTPException(status_code=400, detail=exc.message)
+ raise exc
elif content_type == b"application/x-www-form-urlencoded":
form_parser = FormParser(self.headers, self.stream())
self._form = await form_parser.parse()
|
encode/starlette
|
621abc747a6604825190b93467918a0ec6456a24
|
diff --git a/tests/test_exceptions.py b/tests/test_exceptions.py
index 50f6774..9acd421 100644
--- a/tests/test_exceptions.py
+++ b/tests/test_exceptions.py
@@ -1,6 +1,9 @@
+import warnings
+
import pytest
-from starlette.exceptions import ExceptionMiddleware, HTTPException
+from starlette.exceptions import HTTPException
+from starlette.middleware.exceptions import ExceptionMiddleware
from starlette.responses import PlainTextResponse
from starlette.routing import Route, Router, WebSocketRoute
@@ -130,3 +133,16 @@ def test_repr():
assert repr(CustomHTTPException(500, detail="Something custom")) == (
"CustomHTTPException(status_code=500, detail='Something custom')"
)
+
+
+def test_exception_middleware_deprecation() -> None:
+ # this test should be removed once the deprecation shim is removed
+ with pytest.warns(DeprecationWarning):
+ from starlette.exceptions import ExceptionMiddleware # noqa: F401
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("error")
+ import starlette.exceptions
+
+ with pytest.warns(DeprecationWarning):
+ starlette.exceptions.ExceptionMiddleware
diff --git a/tests/test_formparsers.py b/tests/test_formparsers.py
index 3d4b0a1..6710595 100644
--- a/tests/test_formparsers.py
+++ b/tests/test_formparsers.py
@@ -1,11 +1,15 @@
import os
import typing
+from contextlib import nullcontext as does_not_raise
import pytest
-from starlette.formparsers import UploadFile, _user_safe_decode
+from starlette.applications import Starlette
+from starlette.formparsers import MultiPartException, UploadFile, _user_safe_decode
from starlette.requests import Request
from starlette.responses import JSONResponse
+from starlette.routing import Mount
+from starlette.testclient import TestClient
class ForceMultipartDict(dict):
@@ -390,10 +394,19 @@ def test_user_safe_decode_ignores_wrong_charset():
assert result == "abc"
-def test_missing_boundary_parameter(test_client_factory):
[email protected](
+ "app,expectation",
+ [
+ (app, pytest.raises(MultiPartException)),
+ (Starlette(routes=[Mount("/", app=app)]), does_not_raise()),
+ ],
+)
+def test_missing_boundary_parameter(
+ app, expectation, test_client_factory: typing.Callable[..., TestClient]
+) -> None:
client = test_client_factory(app)
- with pytest.raises(KeyError, match="boundary"):
- client.post(
+ with expectation:
+ res = client.post(
"/",
data=(
# file
@@ -403,3 +416,5 @@ def test_missing_boundary_parameter(test_client_factory):
),
headers={"Content-Type": "multipart/form-data; charset=utf-8"},
)
+ assert res.status_code == 400
+ assert res.text == "Missing boundary in multipart."
|
Send 400 on missing `boundary`
I did some research about this PR, and to understand if the issue was reasonable.
Here's what I've found out:
- Flask raises `ValueError` when the `boundary` is not found: https://github.com/pallets/werkzeug/blob/dae7e0d06651e54a74f04e1cf24d806c4e2e9be9/src/werkzeug/formparser.py#L270-L291
- Django raises a custom exception: https://github.com/django/django/blob/abfdb4d7f384fb06ed9b7ca37b548542df7b5dda/django/http/multipartparser.py#L79-L83
- Django also verifies if the `boundary` is valid with https://github.com/python/cpython/blob/27ee43183437c473725eba00def0ea7647688926/Lib/cgi.py#L991-L997
- After that raise, Django catches that exception and raises a 400: https://github.com/django/django/blob/abfdb4d7f384fb06ed9b7ca37b548542df7b5dda/django/core/handlers/exception.py#L84-L94
On Starlette, we raise `KeyError` with "boundary key missing". Although this PR follows the issue that was raised, and it's actually an improvement, I think we should follow Django's lead, and also create a 400 response on this case.
_Originally posted by @Kludex in https://github.com/encode/starlette/issues/1544#issuecomment-1080197920_
|
0.0
|
621abc747a6604825190b93467918a0ec6456a24
|
[
"tests/test_exceptions.py::test_repr",
"tests/test_exceptions.py::test_exception_middleware_deprecation",
"tests/test_formparsers.py::test_user_safe_decode_helper",
"tests/test_formparsers.py::test_user_safe_decode_ignores_wrong_charset"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-05-03 05:53:48+00:00
|
bsd-3-clause
| 2,133 |
|
encode__starlette-173
|
diff --git a/starlette/routing.py b/starlette/routing.py
index 9f29d99..a2b851f 100644
--- a/starlette/routing.py
+++ b/starlette/routing.py
@@ -8,7 +8,7 @@ from enum import Enum
from starlette.datastructures import URL, URLPath
from starlette.exceptions import HTTPException
from starlette.requests import Request
-from starlette.responses import PlainTextResponse
+from starlette.responses import PlainTextResponse, RedirectResponse
from starlette.types import ASGIApp, ASGIInstance, Receive, Scope, Send
from starlette.websockets import WebSocket, WebSocketClose
@@ -72,7 +72,9 @@ def get_name(endpoint: typing.Callable) -> str:
return endpoint.__class__.__name__
-def replace_params(path: str, **path_params: str) -> typing.Tuple[str, dict]:
+def replace_params(
+ path: str, path_params: typing.Dict[str, str]
+) -> typing.Tuple[str, dict]:
for key, value in list(path_params.items()):
if "{" + key + "}" in path:
path_params.pop(key)
@@ -95,14 +97,16 @@ class Route(BaseRoute):
def __init__(
self,
path: str,
- *,
endpoint: typing.Callable,
+ *,
methods: typing.List[str] = None,
+ name: str = None,
include_in_schema: bool = True
) -> None:
+ assert path.startswith("/"), "Routed paths must always start '/'"
self.path = path
self.endpoint = endpoint
- self.name = get_name(endpoint)
+ self.name = get_name(endpoint) if name is None else name
self.include_in_schema = include_in_schema
if inspect.isfunction(endpoint) or inspect.ismethod(endpoint):
@@ -137,7 +141,7 @@ class Route(BaseRoute):
def url_path_for(self, name: str, **path_params: str) -> URLPath:
if name != self.name or self.param_names != set(path_params.keys()):
raise NoMatchFound()
- path, remaining_params = replace_params(self.path, **path_params)
+ path, remaining_params = replace_params(self.path, path_params)
assert not remaining_params
return URLPath(path=path, protocol="http")
@@ -158,10 +162,13 @@ class Route(BaseRoute):
class WebSocketRoute(BaseRoute):
- def __init__(self, path: str, *, endpoint: typing.Callable) -> None:
+ def __init__(
+ self, path: str, endpoint: typing.Callable, *, name: str = None
+ ) -> None:
+ assert path.startswith("/"), "Routed paths must always start '/'"
self.path = path
self.endpoint = endpoint
- self.name = get_name(endpoint)
+ self.name = get_name(endpoint) if name is None else name
if inspect.isfunction(endpoint) or inspect.ismethod(endpoint):
# Endpoint is function or method. Treat it as `func(websocket)`.
@@ -189,7 +196,7 @@ class WebSocketRoute(BaseRoute):
def url_path_for(self, name: str, **path_params: str) -> URLPath:
if name != self.name or self.param_names != set(path_params.keys()):
raise NoMatchFound()
- path, remaining_params = replace_params(self.path, **path_params)
+ path, remaining_params = replace_params(self.path, path_params)
assert not remaining_params
return URLPath(path=path, protocol="websocket")
@@ -205,12 +212,14 @@ class WebSocketRoute(BaseRoute):
class Mount(BaseRoute):
- def __init__(self, path: str, app: ASGIApp) -> None:
- self.path = path
+ def __init__(self, path: str, app: ASGIApp, name: str = None) -> None:
+ assert path == "" or path.startswith("/"), "Routed paths must always start '/'"
+ self.path = path.rstrip("/")
self.app = app
- regex = "^" + path
+ regex = "^" + self.path + "(?P<path>/.*)$"
regex = re.sub("{([a-zA-Z_][a-zA-Z0-9_]*)}", r"(?P<\1>[^/]*)", regex)
self.path_regex = re.compile(regex)
+ self.name = name
@property
def routes(self) -> typing.List[BaseRoute]:
@@ -219,23 +228,40 @@ class Mount(BaseRoute):
def matches(self, scope: Scope) -> typing.Tuple[Match, Scope]:
match = self.path_regex.match(scope["path"])
if match:
+ matched_params = match.groupdict()
+ matched_path = matched_params.pop("path")
path_params = dict(scope.get("path_params", {}))
- path_params.update(match.groupdict())
+ path_params.update(matched_params)
child_scope = dict(scope)
child_scope["path_params"] = path_params
- child_scope["root_path"] = scope.get("root_path", "") + match.string
- child_scope["path"] = scope["path"][match.span()[1] :]
+ child_scope["root_path"] = (
+ scope.get("root_path", "") + scope["path"][: -len(matched_path)]
+ )
+ child_scope["path"] = matched_path
return Match.FULL, child_scope
return Match.NONE, {}
def url_path_for(self, name: str, **path_params: str) -> URLPath:
- path, remaining_params = replace_params(self.path, **path_params)
- for route in self.routes or []:
- try:
- url = route.url_path_for(name, **remaining_params)
- return URLPath(path=path + str(url), protocol=url.protocol)
- except NoMatchFound as exc:
- pass
+ if self.name is not None and name == self.name and "path" in path_params:
+ # 'name' matches "<mount_name>".
+ path_params["path"] = path_params["path"].lstrip("/")
+ path, remaining_params = replace_params(self.path + "/{path}", path_params)
+ if not remaining_params:
+ return URLPath(path=path, protocol="http")
+ elif self.name is None or name.startswith(self.name + ":"):
+ if self.name is None:
+ # No mount name.
+ remaining_name = name
+ else:
+ # 'name' matches "<mount_name>:<child_name>".
+ remaining_name = name[len(self.name) + 1 :]
+ path, remaining_params = replace_params(self.path, path_params)
+ for route in self.routes or []:
+ try:
+ url = route.url_path_for(remaining_name, **remaining_params)
+ return URLPath(path=path + str(url), protocol=url.protocol)
+ except NoMatchFound as exc:
+ pass
raise NoMatchFound()
def __call__(self, scope: Scope) -> ASGIInstance:
@@ -251,9 +277,13 @@ class Mount(BaseRoute):
class Router:
def __init__(
- self, routes: typing.List[BaseRoute] = None, default: ASGIApp = None
+ self,
+ routes: typing.List[BaseRoute] = None,
+ redirect_slashes: bool = True,
+ default: ASGIApp = None,
) -> None:
self.routes = [] if routes is None else routes
+ self.redirect_slashes = redirect_slashes
self.default = self.not_found if default is None else default
def mount(self, path: str, app: ASGIApp) -> None:
@@ -337,6 +367,17 @@ class Router:
if partial is not None:
return partial(partial_scope)
+
+ if self.redirect_slashes and not scope["path"].endswith("/"):
+ redirect_scope = dict(scope)
+ redirect_scope["path"] += "/"
+
+ for route in self.routes:
+ match, child_scope = route.matches(redirect_scope)
+ if match != Match.NONE:
+ redirect_url = URL(scope=redirect_scope)
+ return RedirectResponse(url=str(redirect_url))
+
return self.default(scope)
def __eq__(self, other: typing.Any) -> bool:
|
encode/starlette
|
ed970c86be89a497e8082429726ce94dedcc6c0e
|
diff --git a/tests/test_routing.py b/tests/test_routing.py
index 6988b9f..ee2a7f7 100644
--- a/tests/test_routing.py
+++ b/tests/test_routing.py
@@ -1,7 +1,7 @@
import pytest
from starlette.exceptions import ExceptionMiddleware
-from starlette.responses import Response
+from starlette.responses import PlainTextResponse, Response
from starlette.routing import Mount, NoMatchFound, Route, Router, WebSocketRoute
from starlette.testclient import TestClient
from starlette.websockets import WebSocket, WebSocketDisconnect
@@ -30,7 +30,7 @@ app = Router(
Mount(
"/users",
app=Router(
- [Route("", endpoint=users), Route("/{username}", endpoint=user)]
+ [Route("/", endpoint=users), Route("/{username}", endpoint=user)]
),
),
Mount("/static", app=staticfiles),
@@ -176,3 +176,32 @@ def test_protocol_switch():
with pytest.raises(WebSocketDisconnect):
client.websocket_connect("/404")
+
+
+def ok(request):
+ return PlainTextResponse("OK")
+
+
+def test_mount_urls():
+ mounted = Router([Mount("/users", ok, name="users")])
+ client = TestClient(mounted)
+ assert client.get("/users").status_code == 200
+ assert client.get("/users").url == "http://testserver/users/"
+ assert client.get("/users/").status_code == 200
+ assert client.get("/users/a").status_code == 200
+ assert client.get("/usersa").status_code == 404
+
+
+def test_reverse_mount_urls():
+ mounted = Router([Mount("/users", ok, name="users")])
+ assert mounted.url_path_for("users", path="/a") == "/users/a"
+
+ users = Router([Route("/{username}", ok, name="user")])
+ mounted = Router([Mount("/{subpath}/users", users, name="users")])
+ assert (
+ mounted.url_path_for("users:user", subpath="test", username="tom")
+ == "/test/users/tom"
+ )
+ assert (
+ mounted.url_path_for("users", subpath="test", path="/tom") == "/test/users/tom"
+ )
|
[question] how to mount router into Starlette app? `app.mount('/', router)` does't work
Example:
```python
from starlette.applications import Starlette
from starlette.middleware.cors import CORSMiddleware
from starlette.requests import Request
from starlette.routing import Router, Route
import uvicorn
from views import FooView
app = Starlette(debug=True)
# need middleware, so I use Starlette app
app.add_middleware(
CORSMiddleware, allow_origins=["*"], allow_headers=["*"], allow_methods=["*"]
)
router = Router([
Route('/foo', endpoint=FooView),
])
app.mount('/', router) # << doesn't work for some reason
if __name__ == '__main__':
uvicorn.run(app, host='0.0.0.0', port=4000)
```
|
0.0
|
ed970c86be89a497e8082429726ce94dedcc6c0e
|
[
"tests/test_routing.py::test_router",
"tests/test_routing.py::test_mount_urls",
"tests/test_routing.py::test_reverse_mount_urls"
] |
[
"tests/test_routing.py::test_url_path_for",
"tests/test_routing.py::test_url_for",
"tests/test_routing.py::test_router_add_route",
"tests/test_routing.py::test_router_duplicate_path",
"tests/test_routing.py::test_router_add_websocket_route",
"tests/test_routing.py::test_protocol_switch"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-11-01 16:43:44+00:00
|
bsd-3-clause
| 2,134 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.