instance_id
stringlengths 10
57
| patch
stringlengths 261
37.7k
| repo
stringlengths 7
53
| base_commit
stringlengths 40
40
| hints_text
stringclasses 301
values | test_patch
stringlengths 212
2.22M
| problem_statement
stringlengths 23
37.7k
| version
stringclasses 1
value | environment_setup_commit
stringlengths 40
40
| FAIL_TO_PASS
listlengths 1
4.94k
| PASS_TO_PASS
listlengths 0
7.82k
| meta
dict | created_at
stringlengths 25
25
| license
stringclasses 8
values | __index_level_0__
int64 0
6.41k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
kinnala__scikit-fem-754
|
diff --git a/README.md b/README.md
index db185e4c..06378b42 100644
--- a/README.md
+++ b/README.md
@@ -279,6 +279,7 @@ with respect to documented and/or tested features.
- Added: `ElementTri15ParamPlate`, 15-parameter nonconforming triangular element for plates
- Added: `ElementTriBDM1`, the lowest order Brezzi-Douglas-Marini element
- Added: `Mesh.draw().show()` will now visualize any mesh interactively (requires [vedo](https://vedo.embl.es/))
+- Added: Adaptive refinement for `MeshTet1`
- Fixed: `MappingIsoparametric` is now about 2x faster for large meshes thanks
to additional caching
- Fixed: `MeshHex2.save` did not work properly
diff --git a/skfem/mesh/mesh.py b/skfem/mesh/mesh.py
index 7b880736..bb9852e8 100644
--- a/skfem/mesh/mesh.py
+++ b/skfem/mesh/mesh.py
@@ -433,12 +433,30 @@ class Mesh:
]
raise NotImplementedError
+ def is_valid(self) -> bool:
+ """Perform some mesh validation checks."""
+ # check that there are no duplicate points
+ tmp = np.ascontiguousarray(self.p.T)
+ if self.p.shape[1] != np.unique(tmp.view([('', tmp.dtype)]
+ * tmp.shape[1])).shape[0]:
+ warn("Mesh contains duplicate vertices.")
+ return False
+
+ # check that all points are at least in some element
+ if len(np.setdiff1d(np.arange(self.p.shape[1]),
+ np.unique(self.t))) > 0:
+ warn("Mesh contains a vertex not belonging to any element.")
+ return False
+
+ return True
+
def __add__(self, other):
"""Join two meshes."""
cls = type(self)
if not isinstance(other, cls):
raise TypeError("Can only join meshes with same type.")
- p = np.hstack((self.p, other.p))
+ p = np.hstack((self.p.round(decimals=8),
+ other.p.round(decimals=8)))
t = np.hstack((self.t, other.t + self.p.shape[1]))
tmp = np.ascontiguousarray(p.T)
tmp, ixa, ixb = np.unique(tmp.view([('', tmp.dtype)] * tmp.shape[1]),
diff --git a/skfem/mesh/mesh_tet_1.py b/skfem/mesh/mesh_tet_1.py
index f7f75bbd..1d93ec53 100644
--- a/skfem/mesh/mesh_tet_1.py
+++ b/skfem/mesh/mesh_tet_1.py
@@ -117,6 +117,163 @@ class MeshTet1(Mesh3D):
_subdomains=None,
)
+ @staticmethod
+ def _adaptive_sort_mesh(p, t, marked):
+ """Make (0, 1) the longest edge in t for marked."""
+
+ # add noise so that there are no edges with the same length
+ np.random.seed(1337)
+ p = p.copy() + 1e-5 * np.random.random(p.shape)
+
+ l01 = np.sqrt(np.sum((p[:, t[0, marked]] - p[:, t[1, marked]]) ** 2,
+ axis=0))
+ l12 = np.sqrt(np.sum((p[:, t[1, marked]] - p[:, t[2, marked]]) ** 2,
+ axis=0))
+ l02 = np.sqrt(np.sum((p[:, t[0, marked]] - p[:, t[2, marked]]) ** 2,
+ axis=0))
+ l03 = np.sqrt(np.sum((p[:, t[0, marked]] - p[:, t[3, marked]]) ** 2,
+ axis=0))
+ l13 = np.sqrt(np.sum((p[:, t[1, marked]] - p[:, t[3, marked]]) ** 2,
+ axis=0))
+ l23 = np.sqrt(np.sum((p[:, t[2, marked]] - p[:, t[3, marked]]) ** 2,
+ axis=0))
+
+ # indices where (1, 2) is the longest etc.
+ ix12 = ((l12 > l01)
+ * (l12 > l02)
+ * (l12 > l03)
+ * (l12 > l13)
+ * (l12 > l23))
+ ix02 = ((l02 > l01)
+ * (l02 > l12)
+ * (l02 > l03)
+ * (l02 > l13)
+ * (l02 > l23))
+ ix03 = ((l03 > l01)
+ * (l03 > l12)
+ * (l03 > l02)
+ * (l03 > l13)
+ * (l03 > l23))
+ ix13 = ((l13 > l01)
+ * (l13 > l12)
+ * (l13 > l02)
+ * (l13 > l03)
+ * (l13 > l23))
+ ix23 = ((l23 > l01)
+ * (l23 > l12)
+ * (l23 > l02)
+ * (l23 > l03)
+ * (l23 > l13))
+
+ # flip edges
+ T = t.copy()
+ T[:, marked[ix02]] = t[:, marked[ix02]][[2, 0, 1, 3]]
+ T[:, marked[ix03]] = t[:, marked[ix03]][[0, 3, 1, 2]]
+ T[:, marked[ix12]] = t[:, marked[ix12]][[1, 2, 0, 3]]
+ T[:, marked[ix13]] = t[:, marked[ix13]][[1, 3, 2, 0]]
+ T[:, marked[ix23]] = t[:, marked[ix23]][[3, 2, 1, 0]]
+
+ return T
+
+ def _find_nz(self, rows, cols, shape, transform=None):
+ """Find nonzero entries from the incidence matrix after transform."""
+ from scipy.sparse import coo_matrix, find
+ rows = rows.flatten('C')
+ cols = cols.flatten('C')
+ inc = coo_matrix((np.ones(len(rows)), (rows, cols)),
+ shape=shape).tocsr()
+ if transform is not None:
+ inc = transform(inc)
+ inc.eliminate_zeros()
+ return find(inc)[:2]
+
+ def _adaptive(self, marked):
+ """Longest edge bisection."""
+ if isinstance(marked, list):
+ marked = np.array(marked, dtype=np.int64)
+ nt = self.t.shape[1]
+ nv = self.p.shape[1]
+ p = np.zeros((3, 9 * nv), dtype=np.float64)
+ t = np.zeros((4, 4 * nt), dtype=np.int64)
+ p[:, :self.p.shape[1]] = self.p.copy()
+ t[:, :self.t.shape[1]] = self.t.copy()
+
+ gen = np.zeros(nv + 6 * nt, dtype=np.int8)
+ nonconf = np.ones(8 * nv, dtype=np.int8)
+ split_edge = np.zeros((3, 8 * nv), dtype=np.int64)
+ ns = 0
+
+ while len(marked) > 0:
+ nm = len(marked)
+ tnew = np.zeros(nm, dtype=np.int64)
+ ix = np.arange(nm, dtype=np.int64)
+ t = self._adaptive_sort_mesh(p, t, marked)
+ t0, t1, t2, t3 = t[:, marked]
+
+ if ns > 0:
+ nonconf_edge = np.nonzero(nonconf[:ns])[0]
+ i, j = self._find_nz(
+ split_edge[:2, nonconf_edge],
+ np.vstack((split_edge[2, nonconf_edge],) * 2),
+ (nv, nv),
+ lambda I: I[t0].multiply(I[t1])
+ )
+ tnew[i] = j
+ ix = np.nonzero(tnew == 0)[0]
+
+ if len(ix) > 0:
+ i, j = self._find_nz(
+ *np.sort(np.vstack((t0[ix], t1[ix])), axis=0),
+ (nv, nv),
+ )
+ nn = len(i)
+ nix = slice(ns, ns + nn)
+ split_edge[0, nix] = i
+ split_edge[1, nix] = j
+ split_edge[2, nix] = np.arange(nv, nv + nn, dtype=np.int64)
+
+ # add new points
+ p[:, nv:(nv + nn)] = .5 * (p[:, i] + p[:, j])
+ nv += nn
+ i, j = self._find_nz(
+ split_edge[:2, nix],
+ np.vstack((split_edge[2, nix],) * 2),
+ (nv, nv),
+ lambda I: I[t0].multiply(I[t1])
+ )
+ tnew[i] = j
+ ns += nn
+
+ ix = np.nonzero(gen[tnew] == 0)[0]
+ gen[tnew[ix]] = np.max(gen[t[:, marked[ix]]], axis=0) + 1
+
+ # add new elements
+ t[:, marked] = np.vstack((t3, t0, t2, tnew))
+ t[:, nt:(nt + nm)] = np.vstack((t2, t1, t3, tnew))
+ nt += nm
+
+ check = np.nonzero(nonconf[:ns])[0]
+ nonconf[check] = 0
+ check_node = np.zeros(nv, dtype=np.int64)
+ check_node[split_edge[:2, check]] = 1
+ check_elem = np.nonzero(check_node[t[:, :nt]].sum(axis=0))[0]
+
+ i, j = self._find_nz(
+ t[:, check_elem],
+ np.vstack((check_elem,) * 4),
+ (nv, nt),
+ lambda I: (I[split_edge[0, check]]
+ .multiply(I[split_edge[1, check]]))
+ )
+ nonconf[check[i]] = 1
+ marked = np.unique(j)
+
+ return replace(
+ self,
+ doflocs=p[:, :nv],
+ t=t[:, :nt],
+ )
+
@classmethod
def init_tensor(cls: Type,
x: ndarray,
|
kinnala/scikit-fem
|
9241274eaa94f860110ed1369c6fdab2bb5a94dc
|
diff --git a/tests/test_mesh.py b/tests/test_mesh.py
index 516bc581..b3219ba4 100644
--- a/tests/test_mesh.py
+++ b/tests/test_mesh.py
@@ -9,6 +9,9 @@ from numpy.testing import assert_array_equal, assert_almost_equal
from skfem.mesh import (Mesh, MeshHex, MeshLine, MeshQuad, MeshTet, MeshTri,
MeshTri2, MeshQuad2, MeshTet2, MeshHex2, MeshLine1DG,
MeshQuad1DG, MeshHex2, MeshTri1DG)
+from skfem.assembly import Basis, LinearForm
+from skfem.element import ElementTetP1
+from skfem.utils import projection
from skfem.io.meshio import to_meshio, from_meshio
from skfem.io.json import to_dict, from_dict
@@ -202,6 +205,79 @@ class TestAdaptiveSplitting2D(TestCase):
self.assertEqual(prev_p_size, m.p.shape[1] - 3)
+class TestAdaptiveSplitting3D(TestCase):
+
+ def runTest(self):
+
+ m = MeshTet()
+ for itr in range(10):
+ m = m.refined([itr, itr + 1, itr + 2])
+ assert m.is_valid()
+
+ m = MeshTet()
+ for itr in range(10):
+ m = m.refined([itr, itr + 1])
+ assert m.is_valid()
+
+ m = MeshTet()
+ for itr in range(50):
+ m = m.refined([itr])
+ assert m.is_valid()
+
+ m = MeshTet()
+ for itr in range(5):
+ m = m.refined(np.arange(m.nelements, dtype=np.int64))
+ assert m.is_valid()
+
+ # adaptively refine one face of a cube, check that the mesh parameter h
+ # is approximately linear w.r.t to distance from the face
+ m = MeshTet.init_tensor(np.linspace(0, 1, 3),
+ np.linspace(0, 1, 3),
+ np.linspace(0, 1, 3))
+
+ for itr in range(15):
+ m = m.refined(m.f2t[0, m.facets_satisfying(lambda x: x[0] == 0)])
+
+ @LinearForm
+ def hproj(v, w):
+ return w.h * v
+
+ basis = Basis(m, ElementTetP1())
+ h = projection(hproj, basis)
+
+ funh = basis.interpolator(h)
+
+ xs = np.vstack((
+ np.linspace(0, .5, 20),
+ np.zeros(20) + .5,
+ np.zeros(20) + .5,
+ ))
+ hs = funh(xs)
+
+ assert np.max(np.abs(hs - xs[0])) < 0.063
+
+ # check that the same mesh is reproduced by any future versions
+ m = MeshTet.init_tensor(np.linspace(0, 1, 2),
+ np.linspace(0, 1, 2),
+ np.linspace(0, 1, 2))
+
+ m = m.refined(m.f2t[0, m.facets_satisfying(lambda x: x[0] == 0)])
+
+ assert_array_equal(
+ m.p,
+ np.array([[0. , 0. , 1. , 1. , 0. , 0. , 1. , 1. , 0.5],
+ [0. , 1. , 0. , 1. , 0. , 1. , 0. , 1. , 0.5],
+ [0. , 0. , 0. , 0. , 1. , 1. , 1. , 1. , 0.5]])
+ )
+
+ assert_array_equal(
+ m.t,
+ np.array([[5, 3, 3, 5, 6, 6, 1, 4, 1, 2, 2, 4],
+ [0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7],
+ [1, 1, 2, 4, 2, 4, 5, 5, 3, 3, 6, 6],
+ [8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8]])
+ )
+
class TestMirrored(TestCase):
def runTest(self):
|
Adaptive refinement for tetrahedral mesh
E.g. longest edge bisection. This is quite tedious but would be a very nice feature.
|
0.0
|
9241274eaa94f860110ed1369c6fdab2bb5a94dc
|
[
"tests/test_mesh.py::TestAdaptiveSplitting3D::runTest"
] |
[
"tests/test_mesh.py::MeshTests::runTest",
"tests/test_mesh.py::Loading::runTest",
"tests/test_mesh.py::SerializeUnserializeCycle::runTest",
"tests/test_mesh.py::TestBoundaryEdges::runTest",
"tests/test_mesh.py::TestBoundaryEdges2::runTest",
"tests/test_mesh.py::TestMeshAddition::runTest",
"tests/test_mesh.py::TestMeshQuadSplit::runTest",
"tests/test_mesh.py::TestAdaptiveSplitting1D::runTest",
"tests/test_mesh.py::TestAdaptiveSplitting2D::runTest",
"tests/test_mesh.py::TestFinder1DRefined::runTest",
"tests/test_mesh.py::TestFinder1DLinspaced::runTest",
"tests/test_mesh.py::test_finder_simplex[m0-0]",
"tests/test_mesh.py::test_finder_simplex[m1-1]",
"tests/test_mesh.py::test_finder_simplex[m2-2]",
"tests/test_mesh.py::test_finder_simplex[m3-0]",
"tests/test_mesh.py::test_finder_simplex[m4-1]",
"tests/test_mesh.py::test_finder_simplex[m5-2]",
"tests/test_mesh.py::test_finder_simplex[m6-10]",
"tests/test_mesh.py::test_meshio_cycle[m0]",
"tests/test_mesh.py::test_meshio_cycle[m1]",
"tests/test_mesh.py::test_meshio_cycle[m2]",
"tests/test_mesh.py::test_meshio_cycle[m3]",
"tests/test_mesh.py::test_meshio_cycle[m4]",
"tests/test_mesh.py::test_meshio_cycle[m5]",
"tests/test_mesh.py::test_meshio_cycle[m6]",
"tests/test_mesh.py::test_meshio_cycle[m7]",
"tests/test_mesh.py::test_meshio_cycle[m8]",
"tests/test_mesh.py::test_meshio_cycle_boundaries[m0-True]",
"tests/test_mesh.py::test_meshio_cycle_boundaries[m0-False]",
"tests/test_mesh.py::test_meshio_cycle_boundaries[m1-True]",
"tests/test_mesh.py::test_meshio_cycle_boundaries[m1-False]",
"tests/test_mesh.py::test_meshio_cycle_boundaries[m2-True]",
"tests/test_mesh.py::test_meshio_cycle_boundaries[m2-False]",
"tests/test_mesh.py::test_meshio_cycle_boundaries[m3-True]",
"tests/test_mesh.py::test_meshio_cycle_boundaries[m3-False]",
"tests/test_mesh.py::test_meshio_cycle_boundaries[m4-True]",
"tests/test_mesh.py::test_meshio_cycle_boundaries[m4-False]",
"tests/test_mesh.py::test_meshio_cycle_boundaries[m5-True]",
"tests/test_mesh.py::test_meshio_cycle_boundaries[m5-False]",
"tests/test_mesh.py::test_meshio_cycle_boundaries[m6-True]",
"tests/test_mesh.py::test_meshio_cycle_boundaries[m6-False]",
"tests/test_mesh.py::test_meshio_cycle_boundaries[m7-True]",
"tests/test_mesh.py::test_meshio_cycle_boundaries[m7-False]",
"tests/test_mesh.py::test_meshio_cycle_subdomains[m0]",
"tests/test_mesh.py::test_meshio_cycle_subdomains[m1]",
"tests/test_mesh.py::test_meshio_cycle_subdomains[m2]",
"tests/test_mesh.py::test_meshio_cycle_subdomains[m3]",
"tests/test_mesh.py::test_saveload_cycle_vtk[m0]",
"tests/test_mesh.py::test_saveload_cycle_vtk[m1]",
"tests/test_mesh.py::test_saveload_cycle_vtk[m2]",
"tests/test_mesh.py::test_saveload_cycle_vtk[m3]",
"tests/test_mesh.py::test_saveload_cycle_tags[m0-.msh-kwargs1]",
"tests/test_mesh.py::test_saveload_cycle_tags[m0-.vtk-kwargs2]",
"tests/test_mesh.py::test_saveload_cycle_tags[m0-.vtu-kwargs4]",
"tests/test_mesh.py::test_saveload_cycle_tags[m1-.msh-kwargs1]",
"tests/test_mesh.py::test_saveload_cycle_tags[m1-.vtk-kwargs2]",
"tests/test_mesh.py::test_saveload_cycle_tags[m1-.vtu-kwargs4]",
"tests/test_mesh.py::test_saveload_cycle_tags[m2-.msh-kwargs1]",
"tests/test_mesh.py::test_saveload_cycle_tags[m2-.vtk-kwargs2]",
"tests/test_mesh.py::test_saveload_cycle_tags[m2-.vtu-kwargs4]",
"tests/test_mesh.py::test_saveload_cycle_tags[m3-.msh-kwargs1]",
"tests/test_mesh.py::test_saveload_cycle_tags[m3-.vtk-kwargs2]",
"tests/test_mesh.py::test_saveload_cycle_tags[m3-.vtu-kwargs4]",
"tests/test_mesh.py::test_periodic_failure",
"tests/test_mesh.py::test_init_refdom[MeshTri1]",
"tests/test_mesh.py::test_init_refdom[MeshQuad1]",
"tests/test_mesh.py::test_init_refdom[MeshHex1]",
"tests/test_mesh.py::test_init_refdom[MeshTet1]",
"tests/test_mesh.py::test_init_refdom[MeshLine1]"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-09-27 09:54:25+00:00
|
bsd-3-clause
| 3,435 |
|
kinnala__scikit-fem-819
|
diff --git a/skfem/assembly/basis/abstract_basis.py b/skfem/assembly/basis/abstract_basis.py
index 46aeeb95..4827b5de 100644
--- a/skfem/assembly/basis/abstract_basis.py
+++ b/skfem/assembly/basis/abstract_basis.py
@@ -239,15 +239,13 @@ class AbstractBasis:
raise ValueError
if elements is not None:
- elements = self._get_dofs_normalize_elements(elements)
- return self.dofs.get_element_dofs(elements,
- skip_dofnames=skip)
+ elements = self._normalize_elements(elements)
+ return self.dofs.get_element_dofs(elements, skip_dofnames=skip)
- facets = self._get_dofs_normalize_facets(facets)
- return self.dofs.get_facet_dofs(facets,
- skip_dofnames=skip)
+ facets = self._normalize_facets(facets)
+ return self.dofs.get_facet_dofs(facets, skip_dofnames=skip)
- def _get_dofs_normalize_facets(self, facets):
+ def _normalize_facets(self, facets):
if isinstance(facets, ndarray):
return facets
if facets is None:
@@ -255,7 +253,7 @@ class AbstractBasis:
elif isinstance(facets, (tuple, list, set)):
return np.unique(
np.concatenate(
- [self._get_dofs_normalize_facets(f) for f in facets]
+ [self._normalize_facets(f) for f in facets]
)
)
elif callable(facets):
@@ -268,7 +266,7 @@ class AbstractBasis:
raise ValueError("Boundary '{}' not found.".format(facets))
raise NotImplementedError
- def _get_dofs_normalize_elements(self, elements):
+ def _normalize_elements(self, elements):
if isinstance(elements, ndarray):
return elements
if callable(elements):
@@ -276,7 +274,7 @@ class AbstractBasis:
elif isinstance(elements, (tuple, list, set)):
return np.unique(
np.concatenate(
- [self._get_dofs_normalize_elements(e) for e in elements]
+ [self._normalize_elements(e) for e in elements]
)
)
elif isinstance(elements, str):
diff --git a/skfem/assembly/basis/boundary_facet_basis.py b/skfem/assembly/basis/boundary_facet_basis.py
index e7a961a6..6470983e 100644
--- a/skfem/assembly/basis/boundary_facet_basis.py
+++ b/skfem/assembly/basis/boundary_facet_basis.py
@@ -52,9 +52,10 @@ class BoundaryFacetBasis(AbstractBasis):
Optional :class:`~skfem.assembly.Dofs` object.
"""
- logger.info("Initializing {}({}, {})".format(type(self).__name__,
- type(mesh).__name__,
- type(elem).__name__))
+ typestr = ("{}({}, {})".format(type(self).__name__,
+ type(mesh).__name__,
+ type(elem).__name__))
+ logger.info("Initializing " + typestr)
super(BoundaryFacetBasis, self).__init__(mesh,
elem,
mapping,
@@ -67,7 +68,11 @@ class BoundaryFacetBasis(AbstractBasis):
if facets is None:
self.find = np.nonzero(self.mesh.f2t[1] == -1)[0]
else:
- self.find = facets
+ self.find = self._normalize_facets(facets)
+
+ if len(self.find) == 0:
+ logger.warning("Initializing {} with zero facets.".format(typestr))
+
self.tind = self.mesh.f2t[_side, self.find]
self._side = _side # for debugging
diff --git a/skfem/assembly/basis/cell_basis.py b/skfem/assembly/basis/cell_basis.py
index 23523d95..969426f3 100644
--- a/skfem/assembly/basis/cell_basis.py
+++ b/skfem/assembly/basis/cell_basis.py
@@ -82,9 +82,10 @@ class CellBasis(AbstractBasis):
if elements is None:
self.nelems = mesh.nelements
+ self.tind = None
else:
self.nelems = len(elements)
- self.tind = elements
+ self.tind = self._normalize_elements(elements)
self.dx = (np.abs(self.mapping.detDF(self.X, tind=elements))
* np.tile(self.W, (self.nelems, 1)))
diff --git a/skfem/mapping/mapping_mortar.py b/skfem/mapping/mapping_mortar.py
index fcea0900..99c6a653 100644
--- a/skfem/mapping/mapping_mortar.py
+++ b/skfem/mapping/mapping_mortar.py
@@ -71,7 +71,8 @@ class MappingMortar(Mapping):
# find unique supermesh facets by combining nodes from both sides
param_p1 = param(p1)
param_p2 = param(p2)
- _, ix = np.unique(np.concatenate((param_p1, param_p2)),
+ _, ix = np.unique(np.concatenate((param_p1.round(decimals=10),
+ param_p2.round(decimals=10))),
return_index=True)
ixorig = np.concatenate((p1_ix, p2_ix + mesh1.p.shape[1]))[ix]
p = np.array([np.hstack((param(mesh1.p), param(mesh2.p)))])
diff --git a/skfem/visuals/matplotlib.py b/skfem/visuals/matplotlib.py
index 5eb3c835..304bc8b9 100644
--- a/skfem/visuals/matplotlib.py
+++ b/skfem/visuals/matplotlib.py
@@ -99,7 +99,10 @@ def draw_mesh2d(m: Mesh2D, **kwargs) -> Axes:
ys.append(t)
ys.append(v)
ys.append(None)
- ax.plot(xs, ys, 'k', linewidth='0.5')
+ ax.plot(xs,
+ ys,
+ kwargs['color'] if 'color' in kwargs else 'k',
+ linewidth=kwargs['linewidth'] if 'linewidth' in kwargs else .5)
if "node_numbering" in kwargs:
for itr in range(m.p.shape[1]):
|
kinnala/scikit-fem
|
fbd2558b9a20c635159737954c81a42687144486
|
diff --git a/tests/test_basis.py b/tests/test_basis.py
index 60eefcd2..414dfa09 100644
--- a/tests/test_basis.py
+++ b/tests/test_basis.py
@@ -85,7 +85,8 @@ class TestCompositeFacetAssembly(TestCase):
fbasis1 = FacetBasis(m, ElementTriP1() * ElementTriP1(),
facets=m.facets_satisfying(lambda x: x[0] == 0))
fbasis2 = FacetBasis(m, ElementTriP1(),
- facets=m.facets_satisfying(lambda x: x[0] == 0))
+ facets=lambda x: x[0] == 0)
+ fbasis3 = FacetBasis(m, ElementTriP1(), facets='left')
@BilinearForm
def uv1(u, p, v, q, w):
@@ -97,10 +98,14 @@ class TestCompositeFacetAssembly(TestCase):
A = asm(uv1, fbasis1)
B = asm(uv2, fbasis2)
+ C = asm(uv2, fbasis2)
assert_allclose(A[0].todense()[0, ::2],
B[0].todense()[0])
+ assert_allclose(A[0].todense()[0, ::2],
+ C[0].todense()[0])
+
class TestFacetExpansion(TestCase):
|
FacetBasis(..., facets=) raises IndexError
```python
def is_outer_boundary(x):
return np.logical_or(
np.isclose(abs(x[0]-.5), .5),
np.isclose(abs(x[1]-.5), .5),
)
mesh = skfem.MeshTri().refined(1)
basis_p1 = skfem.Basis(mesh, skfem.ElementTriP1())
basis_p1.get_dofs(facets=is_outer_boundary) # no exception here
fbasis_p1 = skfem.FacetBasis(mesh, basis_p1.elem, facets=is_outer_boundary)
```
```
\scikit-fem\skfem\assembly\basis\boundary_facet_basis.py in __init__(self, mesh, elem, mapping, intorder, quadrature, facets, _side, dofs)
69 else:
70 self.find = facets
---> 71 self.tind = self.mesh.f2t[_side, self.find]
72 self._side = _side # for debugging
73
IndexError: only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and integer or boolean arrays are valid indices
```
|
0.0
|
fbd2558b9a20c635159737954c81a42687144486
|
[
"tests/test_basis.py::TestCompositeFacetAssembly::runTest"
] |
[
"tests/test_basis.py::TestCompositeSplitting::runTest",
"tests/test_basis.py::TestFacetExpansion::runTest",
"tests/test_basis.py::TestFacetExpansionHexS2::runTest",
"tests/test_basis.py::TestFacetExpansionHex2::runTest",
"tests/test_basis.py::TestInterpolatorTet::runTest",
"tests/test_basis.py::TestInterpolatorTet2::runTest",
"tests/test_basis.py::TestInterpolatorTri::runTest",
"tests/test_basis.py::TestInterpolatorQuad::runTest",
"tests/test_basis.py::TestInterpolatorHex::runTest",
"tests/test_basis.py::TestInterpolatorLine::runTest",
"tests/test_basis.py::TestInterpolatorLine2::runTest",
"tests/test_basis.py::TestIncompatibleMeshElement::runTest",
"tests/test_basis.py::test_interpolator_probes[MeshTri1-e0-0-10]",
"tests/test_basis.py::test_interpolator_probes[MeshTri1-e1-1-10]",
"tests/test_basis.py::test_interpolator_probes[MeshTri1-e2-5-10]",
"tests/test_basis.py::test_interpolator_probes[MeshTri1-e3-1-300000.0]",
"tests/test_basis.py::test_interpolator_probes[MeshTet1-e4-1-10]",
"tests/test_basis.py::test_interpolator_probes[MeshTet1-e5-4-10]",
"tests/test_basis.py::test_interpolator_probes[MeshTet1-e6-1-30000.0]",
"tests/test_basis.py::test_interpolator_probes[MeshQuad1-e7-1-10]",
"tests/test_basis.py::test_interpolator_probes[MeshQuad1-e8-1-300000.0]",
"tests/test_basis.py::test_interpolator_probes[MeshHex1-e9-1-100000.0]",
"tests/test_basis.py::test_interpolator_probes[MeshWedge1-e10-0-10]",
"tests/test_basis.py::test_trace[MeshTri1-e10-e20-False]",
"tests/test_basis.py::test_trace[MeshTri1-e11-e21-False]",
"tests/test_basis.py::test_trace[MeshTri1-e12-e22-False]",
"tests/test_basis.py::test_trace[MeshTri1-e13-e23-False]",
"tests/test_basis.py::test_trace[MeshTri1-e14-e24-True]",
"tests/test_basis.py::test_trace[MeshTri1-e15-e25-True]",
"tests/test_basis.py::test_trace[MeshTri1-e16-e26-True]",
"tests/test_basis.py::test_trace[MeshTri1-e17-e27-True]",
"tests/test_basis.py::test_trace[MeshTri1-e18-None-False]",
"tests/test_basis.py::test_trace[MeshTri1-e19-None-True]",
"tests/test_basis.py::test_trace[MeshQuad1-e110-e210-False]",
"tests/test_basis.py::test_trace[MeshQuad1-e111-e211-False]",
"tests/test_basis.py::test_trace[MeshQuad1-e112-e212-False]",
"tests/test_basis.py::test_trace[MeshQuad1-e113-e213-True]",
"tests/test_basis.py::test_trace[MeshQuad1-e114-e214-True]",
"tests/test_basis.py::test_trace[MeshQuad1-e115-e215-True]",
"tests/test_basis.py::test_trace[MeshTet1-e116-e216-False]",
"tests/test_basis.py::test_trace[MeshTet1-e117-e217-False]",
"tests/test_basis.py::test_trace[MeshHex1-e118-e218-False]",
"tests/test_basis.py::test_trace[MeshHex1-e119-e219-False]",
"tests/test_basis.py::test_trace[MeshHex1-e120-e220-False]",
"tests/test_basis.py::test_point_source[ElementLineP1]",
"tests/test_basis.py::test_point_source[ElementLineP2]",
"tests/test_basis.py::test_point_source[ElementLineMini]",
"tests/test_basis.py::test_pickling"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-12-02 08:57:05+00:00
|
bsd-3-clause
| 3,436 |
|
kinnala__scikit-fem-976
|
diff --git a/skfem/assembly/basis/abstract_basis.py b/skfem/assembly/basis/abstract_basis.py
index eb3a4c02..5d5c6cd1 100644
--- a/skfem/assembly/basis/abstract_basis.py
+++ b/skfem/assembly/basis/abstract_basis.py
@@ -394,14 +394,17 @@ class AbstractBasis:
def _normalize_interp(self, interp) -> Tuple[ndarray, ...]:
- if isinstance(interp, ndarray):
- pass
- elif callable(interp):
- interp = interp(self.global_coordinates())
- elif isinstance(interp, (float, int)):
+ if isinstance(interp, (tuple, list)):
+ interp = tuple(self._normalize_interp(c)[0] for c in interp)
+
+ if callable(interp):
+ interp = self._normalize_interp(interp(self.global_coordinates()))
+
+ if isinstance(interp, (float, int)):
interp = interp + self.zero_w()
- elif isinstance(interp, (tuple, list)):
- interp = tuple(self._normalize_interp(c) for c in interp)
+
+ if isinstance(interp, ndarray):
+ interp = (interp,)
return interp
@@ -411,8 +414,7 @@ class AbstractBasis:
from skfem.helpers import inner
interp = self._normalize_interp(interp)
- if not isinstance(interp, tuple):
- interp = (interp,)
+ assert isinstance(interp, tuple)
assert len(interp) == len(self.basis[0])
return (
|
kinnala/scikit-fem
|
1eb9571dcbfdf1ec4667c8066b5f5beb73890bf1
|
diff --git a/tests/test_basis.py b/tests/test_basis.py
index 4ef5ed98..b0a88575 100644
--- a/tests/test_basis.py
+++ b/tests/test_basis.py
@@ -471,6 +471,18 @@ def test_basis_project(m, e, fun):
lambda x: (x[0] * 0 + 1, x[0] * 0 + 1),
np.ones(8),
),
+ (
+ MeshTri(),
+ ElementTriP1() * ElementTriP1(),
+ lambda x: [x[0] * 0 + 1, x[0] * 0 + 1],
+ np.ones(8),
+ ),
+ (
+ MeshTri(),
+ ElementTriP1() * ElementTriP1(),
+ lambda x: [x[0] * 0 + 1, 1],
+ np.ones(8),
+ ),
]
)
def test_basis_project_composite(m, e, p1, p2):
|
Projecting on a Basis with a combination of ElementVector and ElementComposite
```python
from skfem import *
from skfem.helpers import *
mesh = MeshTri()
basis = Basis(mesh, ElementVector(ElementTriP1()) * ElementTriP1())
@LinearForm
def from_form(v_t, v_z, w): # e_t=[y,-x], e_z = 2
return w.x[1] * v_t[0] - w.x[0] * v_t[1] + 2 * v_z
@BilinearForm
def to_form(e_t, e_z, v_t, v_z, w):
return inner(e_t, v_t) + inner(e_z, v_z)
x_0 = solve(to_form.assemble(basis), from_form.assemble(basis))
x_1 = basis.project(lambda x: [np.array((x[1], -x[0])), 2])
```
It seems that basis.project doesn't work if we project on a Basis which has a combination of ElementVector and ElementComposite. Or am I doing definition of the lambda wrong?
|
0.0
|
1eb9571dcbfdf1ec4667c8066b5f5beb73890bf1
|
[
"tests/test_basis.py::test_basis_project_composite[m3-e3-<lambda>-p23]",
"tests/test_basis.py::test_basis_project_composite[m4-e4-<lambda>-p24]"
] |
[
"tests/test_basis.py::TestCompositeSplitting::runTest",
"tests/test_basis.py::TestCompositeFacetAssembly::runTest",
"tests/test_basis.py::TestFacetExpansion::runTest",
"tests/test_basis.py::TestFacetExpansionHexS2::runTest",
"tests/test_basis.py::TestFacetExpansionHex2::runTest",
"tests/test_basis.py::TestInterpolatorTet::runTest",
"tests/test_basis.py::TestInterpolatorTet2::runTest",
"tests/test_basis.py::TestInterpolatorTri::runTest",
"tests/test_basis.py::TestInterpolatorQuad::runTest",
"tests/test_basis.py::TestInterpolatorHex::runTest",
"tests/test_basis.py::TestInterpolatorLine::runTest",
"tests/test_basis.py::TestInterpolatorLine2::runTest",
"tests/test_basis.py::TestIncompatibleMeshElement::runTest",
"tests/test_basis.py::test_interpolator_probes[MeshTri1-e0-0-10]",
"tests/test_basis.py::test_interpolator_probes[MeshTri1-e1-1-10]",
"tests/test_basis.py::test_interpolator_probes[MeshTri1-e2-5-10]",
"tests/test_basis.py::test_interpolator_probes[MeshTri1-e3-1-300000.0]",
"tests/test_basis.py::test_interpolator_probes[MeshTet1-e4-1-10]",
"tests/test_basis.py::test_interpolator_probes[MeshTet1-e5-4-10]",
"tests/test_basis.py::test_interpolator_probes[MeshTet1-e6-1-30000.0]",
"tests/test_basis.py::test_interpolator_probes[MeshQuad1-e7-1-10]",
"tests/test_basis.py::test_interpolator_probes[MeshQuad1-e8-1-300000.0]",
"tests/test_basis.py::test_interpolator_probes[MeshHex1-e9-1-100000.0]",
"tests/test_basis.py::test_interpolator_probes[MeshWedge1-e10-0-10]",
"tests/test_basis.py::test_trace[MeshTri1-e10-e20-False]",
"tests/test_basis.py::test_trace[MeshTri1-e11-e21-False]",
"tests/test_basis.py::test_trace[MeshTri1-e12-e22-False]",
"tests/test_basis.py::test_trace[MeshTri1-e13-e23-False]",
"tests/test_basis.py::test_trace[MeshTri1-e14-e24-True]",
"tests/test_basis.py::test_trace[MeshTri1-e15-e25-True]",
"tests/test_basis.py::test_trace[MeshTri1-e16-e26-True]",
"tests/test_basis.py::test_trace[MeshTri1-e17-e27-True]",
"tests/test_basis.py::test_trace[MeshTri1-e18-None-False]",
"tests/test_basis.py::test_trace[MeshTri1-e19-None-True]",
"tests/test_basis.py::test_trace[MeshQuad1-e110-e210-False]",
"tests/test_basis.py::test_trace[MeshQuad1-e111-e211-False]",
"tests/test_basis.py::test_trace[MeshQuad1-e112-e212-False]",
"tests/test_basis.py::test_trace[MeshQuad1-e113-e213-True]",
"tests/test_basis.py::test_trace[MeshQuad1-e114-e214-True]",
"tests/test_basis.py::test_trace[MeshQuad1-e115-e215-True]",
"tests/test_basis.py::test_trace[MeshTet1-e116-e216-False]",
"tests/test_basis.py::test_trace[MeshTet1-e117-e217-False]",
"tests/test_basis.py::test_trace[MeshHex1-e118-e218-False]",
"tests/test_basis.py::test_trace[MeshHex1-e119-e219-False]",
"tests/test_basis.py::test_trace[MeshHex1-e120-e220-False]",
"tests/test_basis.py::test_point_source[ElementLineP1]",
"tests/test_basis.py::test_point_source[ElementLineP2]",
"tests/test_basis.py::test_point_source[ElementLineMini]",
"tests/test_basis.py::test_pickling",
"tests/test_basis.py::test_mortar_basis[m10-m20-1.0]",
"tests/test_basis.py::test_mortar_basis[m11-m21-1.4142135623730951]",
"tests/test_basis.py::test_basis_project[m0-e0-<lambda>]",
"tests/test_basis.py::test_basis_project[m1-e1-<lambda>]",
"tests/test_basis.py::test_basis_project[m2-e2-<lambda>]",
"tests/test_basis.py::test_basis_project_composite[m0-e0-p10-p20]",
"tests/test_basis.py::test_basis_project_composite[m1-e1-p11-p21]",
"tests/test_basis.py::test_basis_project_composite[m2-e2-<lambda>-p22]",
"tests/test_basis.py::test_basis_project_grad",
"tests/test_basis.py::test_basis_interpolate_project[m0-e0]",
"tests/test_basis.py::test_basis_interpolate_project[m1-e1]",
"tests/test_basis.py::test_basis_interpolate_project[m2-e2]",
"tests/test_basis.py::test_basis_interpolate_project[m3-e3]",
"tests/test_basis.py::test_subdomain_facet_assembly",
"tests/test_basis.py::test_subdomain_facet_assembly_2",
"tests/test_basis.py::test_oriented_interface_integral[m0-e0-interfacee-<lambda>]",
"tests/test_basis.py::test_oriented_interface_integral[m1-e1-interface-<lambda>]",
"tests/test_basis.py::test_oriented_gauss_integral[m0-e0]",
"tests/test_basis.py::test_oriented_gauss_integral[m1-e1]",
"tests/test_basis.py::test_oriented_gauss_integral[m2-e2]",
"tests/test_basis.py::test_oriented_saveload[m0]",
"tests/test_basis.py::test_oriented_saveload[m1]",
"tests/test_basis.py::test_oriented_saveload[m2]",
"tests/test_basis.py::test_oriented_saveload[m3]",
"tests/test_basis.py::test_oriented_saveload[m4]"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-11-15 06:24:51+00:00
|
bsd-3-clause
| 3,437 |
|
kinnala__scikit-fem-985
|
diff --git a/skfem/assembly/basis/abstract_basis.py b/skfem/assembly/basis/abstract_basis.py
index 63290000..d352694e 100644
--- a/skfem/assembly/basis/abstract_basis.py
+++ b/skfem/assembly/basis/abstract_basis.py
@@ -295,13 +295,13 @@ class AbstractBasis:
# loop over solution components
for c in range(len(refs)):
- ref = refs[c][1].basis[0][0]
- ref = ref.astuple
- fs = []
+ ref = refs[c][1].basis[0][0].astuple
- def linear_combination(n, refn):
+ def linear_combination(n):
"""Global discrete function at quadrature points."""
- out = np.zeros_like(refn, dtype=w.dtype)
+ out = 0. * np.einsum('...,...j->...j',
+ w[self.element_dofs[0]],
+ self.basis[0][c].get(n))
for i in range(self.Nbfun):
values = w[self.element_dofs[i]]
out += np.einsum('...,...j->...j', values,
@@ -309,9 +309,10 @@ class AbstractBasis:
return out
# interpolate DiscreteField
+ fs = []
for n in range(len(ref)):
if ref[n] is not None:
- fs.append(linear_combination(n, ref[n]))
+ fs.append(linear_combination(n))
else:
fs.append(None)
|
kinnala/scikit-fem
|
d6b1fac6ceb5a2f7c7d4f0cee29f007e4a83da7a
|
diff --git a/tests/test_basis.py b/tests/test_basis.py
index d77533ad..c39fd890 100644
--- a/tests/test_basis.py
+++ b/tests/test_basis.py
@@ -117,6 +117,13 @@ class TestCompositeFacetAssembly(TestCase):
assert_allclose(A[0].todense()[0, ::2],
C[0].todense()[0])
+ y = fbasis1.zeros()
+ y[fbasis1.get_dofs('left')] = 1
+ assert_allclose(
+ fbasis1.project(fbasis1.interpolate(fbasis1.ones())),
+ y,
+ )
+
class TestFacetExpansion(TestCase):
|
FacetBasis on a set of facets with ElementComposite not working
```python
from skfem import *
mesh = MeshTri().refined(3)
facet_basis = FacetBasis(mesh, ElementTriP1() * ElementTriP1(), facets='left')
facet_basis.interpolate(facet_basis.zeros())
```
results in
```
ValueError: operands could not be broadcast together with shapes (32,3) (8,3) (32,3)
```
|
0.0
|
d6b1fac6ceb5a2f7c7d4f0cee29f007e4a83da7a
|
[
"tests/test_basis.py::TestCompositeFacetAssembly::runTest"
] |
[
"tests/test_basis.py::TestCompositeSplitting::runTest",
"tests/test_basis.py::TestFacetExpansion::runTest",
"tests/test_basis.py::TestFacetExpansionHexS2::runTest",
"tests/test_basis.py::TestFacetExpansionHex2::runTest",
"tests/test_basis.py::TestInterpolatorTet::runTest",
"tests/test_basis.py::TestInterpolatorTet2::runTest",
"tests/test_basis.py::TestInterpolatorTri::runTest",
"tests/test_basis.py::TestInterpolatorQuad::runTest",
"tests/test_basis.py::TestInterpolatorHex::runTest",
"tests/test_basis.py::TestInterpolatorLine::runTest",
"tests/test_basis.py::TestInterpolatorLine2::runTest",
"tests/test_basis.py::TestIncompatibleMeshElement::runTest",
"tests/test_basis.py::test_interpolator_probes[MeshTri1-e0-0-10]",
"tests/test_basis.py::test_interpolator_probes[MeshTri1-e1-1-10]",
"tests/test_basis.py::test_interpolator_probes[MeshTri1-e2-5-10]",
"tests/test_basis.py::test_interpolator_probes[MeshTri1-e3-1-300000.0]",
"tests/test_basis.py::test_interpolator_probes[MeshTet1-e4-1-10]",
"tests/test_basis.py::test_interpolator_probes[MeshTet1-e5-4-10]",
"tests/test_basis.py::test_interpolator_probes[MeshTet1-e6-1-30000.0]",
"tests/test_basis.py::test_interpolator_probes[MeshQuad1-e7-1-10]",
"tests/test_basis.py::test_interpolator_probes[MeshQuad1-e8-1-300000.0]",
"tests/test_basis.py::test_interpolator_probes[MeshHex1-e9-1-100000.0]",
"tests/test_basis.py::test_interpolator_probes[MeshWedge1-e10-0-10]",
"tests/test_basis.py::test_trace[MeshTri1-e10-e20-False]",
"tests/test_basis.py::test_trace[MeshTri1-e11-e21-False]",
"tests/test_basis.py::test_trace[MeshTri1-e12-e22-False]",
"tests/test_basis.py::test_trace[MeshTri1-e13-e23-False]",
"tests/test_basis.py::test_trace[MeshTri1-e14-e24-True]",
"tests/test_basis.py::test_trace[MeshTri1-e15-e25-True]",
"tests/test_basis.py::test_trace[MeshTri1-e16-e26-True]",
"tests/test_basis.py::test_trace[MeshTri1-e17-e27-True]",
"tests/test_basis.py::test_trace[MeshTri1-e18-None-False]",
"tests/test_basis.py::test_trace[MeshTri1-e19-None-True]",
"tests/test_basis.py::test_trace[MeshQuad1-e110-e210-False]",
"tests/test_basis.py::test_trace[MeshQuad1-e111-e211-False]",
"tests/test_basis.py::test_trace[MeshQuad1-e112-e212-False]",
"tests/test_basis.py::test_trace[MeshQuad1-e113-e213-True]",
"tests/test_basis.py::test_trace[MeshQuad1-e114-e214-True]",
"tests/test_basis.py::test_trace[MeshQuad1-e115-e215-True]",
"tests/test_basis.py::test_trace[MeshTet1-e116-e216-False]",
"tests/test_basis.py::test_trace[MeshTet1-e117-e217-False]",
"tests/test_basis.py::test_trace[MeshHex1-e118-e218-False]",
"tests/test_basis.py::test_trace[MeshHex1-e119-e219-False]",
"tests/test_basis.py::test_trace[MeshHex1-e120-e220-False]",
"tests/test_basis.py::test_point_source[ElementLineP1]",
"tests/test_basis.py::test_point_source[ElementLineP2]",
"tests/test_basis.py::test_point_source[ElementLineMini]",
"tests/test_basis.py::test_pickling",
"tests/test_basis.py::test_mortar_basis[m10-m20-1.0]",
"tests/test_basis.py::test_mortar_basis[m11-m21-1.4142135623730951]",
"tests/test_basis.py::test_basis_project[m0-e0-<lambda>]",
"tests/test_basis.py::test_basis_project[m1-e1-<lambda>]",
"tests/test_basis.py::test_basis_project[m2-e2-<lambda>]",
"tests/test_basis.py::test_basis_project_composite[m0-e0-p10-p20]",
"tests/test_basis.py::test_basis_project_composite[m1-e1-p11-p21]",
"tests/test_basis.py::test_basis_project_composite[m2-e2-<lambda>-p22]",
"tests/test_basis.py::test_basis_project_composite[m3-e3-<lambda>-p23]",
"tests/test_basis.py::test_basis_project_composite[m4-e4-<lambda>-p24]",
"tests/test_basis.py::test_basis_project_grad",
"tests/test_basis.py::test_basis_interpolate_project[m0-e0]",
"tests/test_basis.py::test_basis_interpolate_project[m1-e1]",
"tests/test_basis.py::test_basis_interpolate_project[m2-e2]",
"tests/test_basis.py::test_basis_interpolate_project[m3-e3]",
"tests/test_basis.py::test_subdomain_facet_assembly",
"tests/test_basis.py::test_subdomain_facet_assembly_2",
"tests/test_basis.py::test_oriented_interface_integral[m0-e0-interfacee-<lambda>]",
"tests/test_basis.py::test_oriented_interface_integral[m1-e1-interface-<lambda>]",
"tests/test_basis.py::test_oriented_gauss_integral[m0-e0]",
"tests/test_basis.py::test_oriented_gauss_integral[m1-e1]",
"tests/test_basis.py::test_oriented_gauss_integral[m2-e2]",
"tests/test_basis.py::test_oriented_saveload[m0]",
"tests/test_basis.py::test_oriented_saveload[m1]",
"tests/test_basis.py::test_oriented_saveload[m2]",
"tests/test_basis.py::test_oriented_saveload[m3]",
"tests/test_basis.py::test_oriented_saveload[m4]",
"tests/test_basis.py::TestZerosOnes::runTest"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-11-23 20:21:50+00:00
|
bsd-3-clause
| 3,438 |
|
kiorky__croniter-69
|
diff --git a/src/croniter/croniter.py b/src/croniter/croniter.py
index 2a3e3c8..9124954 100644
--- a/src/croniter/croniter.py
+++ b/src/croniter/croniter.py
@@ -787,7 +787,12 @@ class croniter(object):
res = set(res)
res = sorted(res, key=lambda i: "{:02}".format(i) if isinstance(i, int) else i)
if len(res) == cls.LEN_MEANS_ALL[i]:
- res = ['*']
+ # Make sure the wildcard is used in the correct way (avoid over-optimization)
+ if ((i == 2 and '*' not in expressions[4]) or
+ (i == 4 and '*' not in expressions[2])):
+ pass
+ else:
+ res = ['*']
expanded.append(['*'] if (len(res) == 1
and res[0] == '*')
@@ -798,7 +803,8 @@ class croniter(object):
dow_expanded_set = set(expanded[4])
dow_expanded_set = dow_expanded_set.difference(nth_weekday_of_month.keys())
dow_expanded_set.discard("*")
- if dow_expanded_set:
+ # Skip: if it's all weeks instead of wildcard
+ if dow_expanded_set and len(set(expanded[4])) != cls.LEN_MEANS_ALL[4]:
raise CroniterUnsupportedSyntaxError(
"day-of-week field does not support mixing literal values and nth day of week syntax. "
"Cron: '{}' dow={} vs nth={}".format(expr_format, dow_expanded_set, nth_weekday_of_month))
@@ -824,6 +830,11 @@ class croniter(object):
@classmethod
def is_valid(cls, expression, hash_id=None):
+ if hash_id:
+ if not isinstance(hash_id, (bytes, str)):
+ raise TypeError('hash_id must be bytes or UTF-8 string')
+ if not isinstance(hash_id, bytes):
+ hash_id = hash_id.encode('UTF-8')
try:
cls.expand(expression, hash_id=hash_id)
except CroniterError:
|
kiorky/croniter
|
ce61b6221d2ef16e0991bf088f440f2b0e82ed87
|
diff --git a/src/croniter/tests/test_croniter.py b/src/croniter/tests/test_croniter.py
index 1dcfb0b..b2ea829 100755
--- a/src/croniter/tests/test_croniter.py
+++ b/src/croniter/tests/test_croniter.py
@@ -344,11 +344,15 @@ class CroniterTest(base.TestCase):
# Test each field individually
self.assertEqual(croniter('0-59 0 1 1 0').expanded[m], wildcard)
self.assertEqual(croniter('0 0-23 1 1 0').expanded[h], wildcard)
- self.assertEqual(croniter('0 0 1-31 1 0').expanded[d], wildcard)
+ self.assertNotEqual(croniter('0 0 1-31 1 0').expanded[d], wildcard)
+ self.assertEqual(croniter('0 0 1-31 1 *').expanded[d], wildcard)
self.assertEqual(croniter('0 0 1 1-12 0').expanded[mon], wildcard)
- self.assertEqual(croniter('0 0 1 1 0-6').expanded[dow], wildcard)
- self.assertEqual(croniter('0 0 1 1 1-7').expanded[dow], wildcard)
- self.assertEqual(croniter('0 0 1 1 1-7,sat#3').expanded[dow], wildcard)
+ self.assertNotEqual(croniter('0 0 1 1 0-6').expanded[dow], wildcard)
+ self.assertEqual(croniter('0 0 * 1 0-6').expanded[dow], wildcard)
+ self.assertNotEqual(croniter('0 0 1 1 1-7').expanded[dow], wildcard)
+ self.assertEqual(croniter('0 0 * 1 1-7').expanded[dow], wildcard)
+ self.assertNotEqual(croniter('0 0 1 1 1-7,sat#3').expanded[dow], wildcard)
+ self.assertEqual(croniter('0 0 * 1 1-7,sat#3').expanded[dow], wildcard)
self.assertEqual(croniter('0 0 1 1 0 0-59').expanded[s], wildcard)
# Real life examples
self.assertEqual(croniter('30 1-12,0,10-23 15-21 * fri').expanded[h], wildcard)
|
Croniter expression "* * 5 3 1-7" does not match weeks
I want to match March 5th or Monday to Sunday in March, so I used the expression `* * 5 3 1-7`, but the expression optimization will convert the expression to `* * 5 3 *`, so that only Match March 5th. Let's look at another example, `* * 5 3 1-6` is correct, the expression will be converted to `* * 5 3 1,2,3,4,5,6`, so it will not only match March 5 , it can also match every day in March except Sunday.
|
0.0
|
ce61b6221d2ef16e0991bf088f440f2b0e82ed87
|
[
"src/croniter/tests/test_croniter.py::CroniterTest::testOptimizeCronExpressions"
] |
[
"src/croniter/tests/test_croniter.py::CroniterTest::testBlockDupRanges",
"src/croniter/tests/test_croniter.py::CroniterTest::testBug1",
"src/croniter/tests/test_croniter.py::CroniterTest::testBug2",
"src/croniter/tests/test_croniter.py::CroniterTest::testBug3",
"src/croniter/tests/test_croniter.py::CroniterTest::testBug57",
"src/croniter/tests/test_croniter.py::CroniterTest::testDay",
"src/croniter/tests/test_croniter.py::CroniterTest::testDay2",
"src/croniter/tests/test_croniter.py::CroniterTest::testDomDowVixieCronBug",
"src/croniter/tests/test_croniter.py::CroniterTest::testError",
"src/croniter/tests/test_croniter.py::CroniterTest::testGetCurrent",
"src/croniter/tests/test_croniter.py::CroniterTest::testHour",
"src/croniter/tests/test_croniter.py::CroniterTest::testISOWeekday",
"src/croniter/tests/test_croniter.py::CroniterTest::testInitNoStartTime",
"src/croniter/tests/test_croniter.py::CroniterTest::testLastDayOfMonth",
"src/croniter/tests/test_croniter.py::CroniterTest::testMinute",
"src/croniter/tests/test_croniter.py::CroniterTest::testMonth",
"src/croniter/tests/test_croniter.py::CroniterTest::testNthWeekDay",
"src/croniter/tests/test_croniter.py::CroniterTest::testPrevDayOfMonthWithCrossing",
"src/croniter/tests/test_croniter.py::CroniterTest::testPrevLastDayOfMonth",
"src/croniter/tests/test_croniter.py::CroniterTest::testPrevMinute",
"src/croniter/tests/test_croniter.py::CroniterTest::testPrevNthWeekDay",
"src/croniter/tests/test_croniter.py::CroniterTest::testPrevWeekDay",
"src/croniter/tests/test_croniter.py::CroniterTest::testPrevWeekDay2",
"src/croniter/tests/test_croniter.py::CroniterTest::testPreviousDay",
"src/croniter/tests/test_croniter.py::CroniterTest::testPreviousDow",
"src/croniter/tests/test_croniter.py::CroniterTest::testPreviousHour",
"src/croniter/tests/test_croniter.py::CroniterTest::testPreviousMonth",
"src/croniter/tests/test_croniter.py::CroniterTest::testRangeWithUppercaseLastDayOfMonth",
"src/croniter/tests/test_croniter.py::CroniterTest::testSecond",
"src/croniter/tests/test_croniter.py::CroniterTest::testSecondRepeat",
"src/croniter/tests/test_croniter.py::CroniterTest::testSecondSec",
"src/croniter/tests/test_croniter.py::CroniterTest::testSundayToThursdayWithAlphaConversion",
"src/croniter/tests/test_croniter.py::CroniterTest::testTimezone",
"src/croniter/tests/test_croniter.py::CroniterTest::testTimezoneDateutil",
"src/croniter/tests/test_croniter.py::CroniterTest::testTimezoneSummerTime",
"src/croniter/tests/test_croniter.py::CroniterTest::testTimezoneWinterTime",
"src/croniter/tests/test_croniter.py::CroniterTest::testWeekDay",
"src/croniter/tests/test_croniter.py::CroniterTest::testWeekDayDayAnd",
"src/croniter/tests/test_croniter.py::CroniterTest::test_bug34",
"src/croniter/tests/test_croniter.py::CroniterTest::test_bug_62_leap",
"src/croniter/tests/test_croniter.py::CroniterTest::test_confirm_sort",
"src/croniter/tests/test_croniter.py::CroniterTest::test_dst_issue90_st31ny",
"src/croniter/tests/test_croniter.py::CroniterTest::test_dst_iter",
"src/croniter/tests/test_croniter.py::CroniterTest::test_error_alpha_cron",
"src/croniter/tests/test_croniter.py::CroniterTest::test_error_bad_cron",
"src/croniter/tests/test_croniter.py::CroniterTest::test_exactly_the_same_minute",
"src/croniter/tests/test_croniter.py::CroniterTest::test_explicit_year_forward",
"src/croniter/tests/test_croniter.py::CroniterTest::test_hash_mixup_all_fri_3rd_sat",
"src/croniter/tests/test_croniter.py::CroniterTest::test_invalid_zerorepeat",
"src/croniter/tests/test_croniter.py::CroniterTest::test_is_valid",
"src/croniter/tests/test_croniter.py::CroniterTest::test_issue145_getnext",
"src/croniter/tests/test_croniter.py::CroniterTest::test_issue151",
"src/croniter/tests/test_croniter.py::CroniterTest::test_issue156",
"src/croniter/tests/test_croniter.py::CroniterTest::test_issue_142_dow",
"src/croniter/tests/test_croniter.py::CroniterTest::test_issue_47",
"src/croniter/tests/test_croniter.py::CroniterTest::test_issue_k11",
"src/croniter/tests/test_croniter.py::CroniterTest::test_issue_k12",
"src/croniter/tests/test_croniter.py::CroniterTest::test_issue_k33",
"src/croniter/tests/test_croniter.py::CroniterTest::test_issue_k34",
"src/croniter/tests/test_croniter.py::CroniterTest::test_issue_k6",
"src/croniter/tests/test_croniter.py::CroniterTest::test_issue_monsun_117",
"src/croniter/tests/test_croniter.py::CroniterTest::test_last_out_of_range",
"src/croniter/tests/test_croniter.py::CroniterTest::test_lwom_friday",
"src/croniter/tests/test_croniter.py::CroniterTest::test_lwom_friday_2hours",
"src/croniter/tests/test_croniter.py::CroniterTest::test_lwom_friday_2xh_2xm",
"src/croniter/tests/test_croniter.py::CroniterTest::test_lwom_mixup_4th_and_last",
"src/croniter/tests/test_croniter.py::CroniterTest::test_lwom_mixup_all_fri_last_sat",
"src/croniter/tests/test_croniter.py::CroniterTest::test_lwom_mixup_firstlast_sat",
"src/croniter/tests/test_croniter.py::CroniterTest::test_lwom_saturday_rev",
"src/croniter/tests/test_croniter.py::CroniterTest::test_lwom_tue_thu",
"src/croniter/tests/test_croniter.py::CroniterTest::test_match",
"src/croniter/tests/test_croniter.py::CroniterTest::test_match_range",
"src/croniter/tests/test_croniter.py::CroniterTest::test_milliseconds",
"src/croniter/tests/test_croniter.py::CroniterTest::test_mixdow",
"src/croniter/tests/test_croniter.py::CroniterTest::test_multiple_months",
"src/croniter/tests/test_croniter.py::CroniterTest::test_next_when_now_satisfies_cron",
"src/croniter/tests/test_croniter.py::CroniterTest::test_nth_as_last_wday_simple",
"src/croniter/tests/test_croniter.py::CroniterTest::test_nth_out_of_range",
"src/croniter/tests/test_croniter.py::CroniterTest::test_nth_wday_simple",
"src/croniter/tests/test_croniter.py::CroniterTest::test_overflow",
"src/croniter/tests/test_croniter.py::CroniterTest::test_rangeGenerator",
"src/croniter/tests/test_croniter.py::CroniterTest::test_std_dst",
"src/croniter/tests/test_croniter.py::CroniterTest::test_std_dst2",
"src/croniter/tests/test_croniter.py::CroniterTest::test_std_dst3",
"src/croniter/tests/test_croniter.py::CroniterTest::test_wdom_core_leap_year",
"src/croniter/tests/test_croniter.py::CroniterTest::test_weekday_range"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2024-03-21 22:02:47+00:00
|
mit
| 3,439 |
|
kislyuk__argcomplete-204
|
diff --git a/README.rst b/README.rst
index 1620461..b77e533 100644
--- a/README.rst
+++ b/README.rst
@@ -212,6 +212,10 @@ In global completion mode, you don't have to register each argcomplete-capable e
will look for the string **PYTHON_ARGCOMPLETE_OK** in the first 1024 bytes of any executable that it's running
completion for, and if it's found, follow the rest of the argcomplete protocol as described above.
+Additionally, completion is activated for scripts run as ``python <script>`` and ``python -m <module>``.
+This also works for alternate Python versions (e.g. ``python3`` and ``pypy``), as long as that version of Python has
+argcomplete installed.
+
.. admonition:: Bash version compatibility
Global completion requires bash support for ``complete -D``, which was introduced in bash 4.2. On OS X or older Linux
diff --git a/argcomplete/__init__.py b/argcomplete/__init__.py
index 029c6b8..25d4328 100644
--- a/argcomplete/__init__.py
+++ b/argcomplete/__init__.py
@@ -203,9 +203,13 @@ class CompletionFinder(object):
comp_line = ensure_str(comp_line)
cword_prequote, cword_prefix, cword_suffix, comp_words, last_wordbreak_pos = split_line(comp_line, comp_point)
- if os.environ["_ARGCOMPLETE"] == "2":
- # Shell hook recognized the first word as the interpreter; discard it
- comp_words.pop(0)
+ # _ARGCOMPLETE is set by the shell script to tell us where comp_words
+ # should start, based on what we're completing.
+ # 1: <script> [args]
+ # 2: python <script> [args]
+ # 3: python -m <module> [args]
+ start = int(os.environ["_ARGCOMPLETE"]) - 1
+ comp_words = comp_words[start:]
debug("\nLINE: '{l}'\nPREQUOTE: '{pq}'\nPREFIX: '{p}'".format(l=comp_line, pq=cword_prequote, p=cword_prefix),
"\nSUFFIX: '{s}'".format(s=cword_suffix),
diff --git a/argcomplete/_check_module.py b/argcomplete/_check_module.py
new file mode 100644
index 0000000..9d14b3d
--- /dev/null
+++ b/argcomplete/_check_module.py
@@ -0,0 +1,49 @@
+import os
+import sys
+
+try:
+ from importlib.util import find_spec
+except ImportError:
+ from collections import namedtuple
+ from imp import find_module
+
+ ModuleSpec = namedtuple(
+ 'ModuleSpec', ['origin', 'has_location', 'submodule_search_locations'])
+
+ def find_spec(name):
+ """Minimal implementation as required by `find`."""
+ f, path, _ = find_module(name)
+ has_location = path is not None
+ if f is None:
+ return ModuleSpec(None, has_location, [path])
+ f.close()
+ return ModuleSpec(path, has_location, None)
+
+
+def find(name):
+ names = name.split('.')
+ spec = find_spec(names[0])
+ if not spec.has_location:
+ raise Exception('cannot locate file')
+ if spec.submodule_search_locations is None:
+ if len(names) != 1:
+ raise Exception('{} is not a package'.format(names[0]))
+ return spec.origin
+ if len(spec.submodule_search_locations) != 1:
+ raise Exception('expecting one search location')
+ path = os.path.join(spec.submodule_search_locations[0], *names[1:])
+ if os.path.isdir(path):
+ return os.path.join(path, '__main__.py')
+ else:
+ return path + '.py'
+
+
+def main():
+ with open(find(sys.argv[1])) as f:
+ head = f.read(1024)
+ if 'PYTHON_ARGCOMPLETE_OK' not in head:
+ raise Exception('marker not found')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/argcomplete/bash_completion.d/python-argcomplete.sh b/argcomplete/bash_completion.d/python-argcomplete.sh
index 41bd53b..3e520a5 100644
--- a/argcomplete/bash_completion.d/python-argcomplete.sh
+++ b/argcomplete/bash_completion.d/python-argcomplete.sh
@@ -18,7 +18,13 @@ _python_argcomplete_global() {
local ARGCOMPLETE=0
if [[ "$executable" == python* ]] || [[ "$executable" == pypy* ]]; then
- if [[ -f "${COMP_WORDS[1]}" ]] && (head -c 1024 "${COMP_WORDS[1]}" | grep --quiet "PYTHON_ARGCOMPLETE_OK") >/dev/null 2>&1; then
+ if [[ "${COMP_WORDS[1]}" == -m ]]; then
+ if "$executable" -m argcomplete._check_module "${COMP_WORDS[2]}" >/dev/null 2>&1; then
+ ARGCOMPLETE=3
+ else
+ return
+ fi
+ elif [[ -f "${COMP_WORDS[1]}" ]] && (head -c 1024 "${COMP_WORDS[1]}" | grep --quiet "PYTHON_ARGCOMPLETE_OK") >/dev/null 2>&1; then
local ARGCOMPLETE=2
else
return
@@ -36,7 +42,7 @@ _python_argcomplete_global() {
fi
fi
- if [[ $ARGCOMPLETE == 1 ]] || [[ $ARGCOMPLETE == 2 ]]; then
+ if [[ $ARGCOMPLETE != 0 ]]; then
local IFS=$(echo -e '\v')
COMPREPLY=( $(_ARGCOMPLETE_IFS="$IFS" \
COMP_LINE="$COMP_LINE" \
|
kislyuk/argcomplete
|
5cd934c586ab2a4f4bbf831201b46c9ceeb855eb
|
diff --git a/test/test.py b/test/test.py
index ed86324..7a48798 100755
--- a/test/test.py
+++ b/test/test.py
@@ -17,6 +17,7 @@ from argcomplete import (
CompletionFinder,
split_line,
ExclusiveCompletionFinder,
+ _check_module
)
from argcomplete.completers import FilesCompleter, DirectoriesCompleter
from argcomplete.compat import USING_PYTHON2, str, sys_encoding, ensure_str, ensure_bytes
@@ -61,7 +62,7 @@ class TestArgcomplete(unittest.TestCase):
def setUp(self):
self._os_environ = os.environ
os.environ = os.environ.copy()
- os.environ["_ARGCOMPLETE"] = "yes"
+ os.environ["_ARGCOMPLETE"] = "1"
os.environ["_ARC_DEBUG"] = "yes"
os.environ["IFS"] = IFS
os.environ["_ARGCOMPLETE_COMP_WORDBREAKS"] = COMP_WORDBREAKS
@@ -872,6 +873,64 @@ class TestSplitLine(unittest.TestCase):
self.assertEqual(self.wordbreak('"b:c=d" '), None)
+class TestCheckModule(unittest.TestCase):
+ def setUp(self):
+ self.dir = TempDir(prefix="test_dir_module", dir=".")
+ self.dir.__enter__()
+ # There is some odd bug that seems to only come up in Python 3.4 where
+ # using "." in sys.path sometimes won't find modules, so we'll use the
+ # full path each time.
+ sys.path.insert(0, os.getcwd())
+
+ def tearDown(self):
+ sys.path.pop(0)
+ self.dir.__exit__()
+
+ def test_module(self):
+ self._mkfile('module.py')
+ path = _check_module.find('module')
+ self.assertEqual(path, os.path.abspath('module.py'))
+ self.assertNotIn('module', sys.modules)
+
+ def test_package(self):
+ os.mkdir('package')
+ self._mkfile('package/__init__.py')
+ self._mkfile('package/module.py')
+ path = _check_module.find('package.module')
+ self.assertEqual(path, os.path.abspath('package/module.py'))
+ self.assertNotIn('package', sys.modules)
+ self.assertNotIn('package.module', sys.modules)
+
+ def test_subpackage(self):
+ os.mkdir('package')
+ self._mkfile('package/__init__.py')
+ os.mkdir('package/subpackage')
+ self._mkfile('package/subpackage/__init__.py')
+ self._mkfile('package/subpackage/module.py')
+ path = _check_module.find('package.subpackage.module')
+ self.assertEqual(path, os.path.abspath('package/subpackage/module.py'))
+ self.assertNotIn('package', sys.modules)
+ self.assertNotIn('package.subpackage', sys.modules)
+ self.assertNotIn('package.subpackage.module', sys.modules)
+
+ def test_package_main(self):
+ os.mkdir('package')
+ self._mkfile('package/__init__.py')
+ self._mkfile('package/__main__.py')
+ path = _check_module.find('package')
+ self.assertEqual(path, os.path.abspath('package/__main__.py'))
+ self.assertNotIn('package', sys.modules)
+
+ def test_not_package(self):
+ self._mkfile('module.py')
+ with self.assertRaisesRegexp(Exception, 'module is not a package'):
+ _check_module.find('module.bad')
+ self.assertNotIn('module', sys.modules)
+
+ def _mkfile(self, path):
+ open(path, 'w').close()
+
+
class _TestSh(object):
"""
Contains tests which should work in any shell using argcomplete.
@@ -1040,6 +1099,16 @@ class TestBashGlobal(TestBash):
self.assertIn('Permission denied', self.sh.run_command('./prog'))
self.assertEqual(self.sh.run_command('python ./prog basic f\t'), 'foo\r\n')
+ def test_python_module(self):
+ """Test completing a module run with python -m."""
+ prog = os.path.join(TEST_DIR, 'prog')
+ with TempDir(prefix='test_dir_py', dir='.'):
+ os.mkdir('package')
+ open('package/__init__.py', 'w').close()
+ shutil.copy(prog, 'package/prog.py')
+ self.sh.run_command('cd ' + os.getcwd())
+ self.assertEqual(self.sh.run_command('python -m package.prog basic f\t'), 'foo\r\n')
+
class TestTcsh(_TestSh, unittest.TestCase):
expected_failures = [
|
Support for python -m module
Is it possible for argcomplete to work for modules run as `python -m module`? This is necessary to do once you start organizing your code in a module (rather than just a single script) to make relative imports work correctly, but I don't really want to make a separate entry point script if I don't have to.
I am using global completion and it isn't working, at least not out of the box.
|
0.0
|
5cd934c586ab2a4f4bbf831201b46c9ceeb855eb
|
[
"test/test.py::TestArgcomplete::test_directory_completion",
"test/test.py::TestArgcomplete::test_display_completions",
"test/test.py::TestArgcomplete::test_file_completion",
"test/test.py::TestArgcomplete::test_readline_entry_point",
"test/test.py::TestArgcompleteREPL::test_repl_multiple_complete",
"test/test.py::TestArgcompleteREPL::test_repl_parse_after_complete",
"test/test.py::TestArgcompleteREPL::test_repl_reuse_parser_with_positional",
"test/test.py::TestArgcompleteREPL::test_repl_subcommand",
"test/test.py::TestArgcompleteREPL::test_repl_subparser_parse_after_complete",
"test/test.py::TestSplitLine::test_escaped_special",
"test/test.py::TestSplitLine::test_last_wordbreak_pos",
"test/test.py::TestSplitLine::test_punctuation",
"test/test.py::TestSplitLine::test_simple",
"test/test.py::TestSplitLine::test_unescaped_special",
"test/test.py::TestCheckModule::test_module",
"test/test.py::TestCheckModule::test_not_package",
"test/test.py::TestCheckModule::test_package",
"test/test.py::TestCheckModule::test_package_main",
"test/test.py::TestCheckModule::test_subpackage",
"test/test.py::TestBash::test_completion_environment",
"test/test.py::TestBash::test_continuation",
"test/test.py::TestBash::test_double_quoted_completion",
"test/test.py::TestBash::test_one_space_after_exact",
"test/test.py::TestBash::test_parse_special_characters",
"test/test.py::TestBash::test_partial_completion",
"test/test.py::TestBash::test_quoted_exact",
"test/test.py::TestBash::test_quoted_space",
"test/test.py::TestBash::test_quotes",
"test/test.py::TestBash::test_simple_completion",
"test/test.py::TestBash::test_single_quoted_completion",
"test/test.py::TestBash::test_single_quotes_in_double_quotes",
"test/test.py::TestBash::test_single_quotes_in_single_quotes",
"test/test.py::TestBash::test_special_characters",
"test/test.py::TestBash::test_special_characters_double_quoted",
"test/test.py::TestBash::test_special_characters_single_quoted",
"test/test.py::TestBash::test_unquoted_space",
"test/test.py::TestBash::test_wordbreak_chars",
"test/test.py::TestBashGlobal::test_completion_environment",
"test/test.py::TestBashGlobal::test_continuation",
"test/test.py::TestBashGlobal::test_double_quoted_completion",
"test/test.py::TestBashGlobal::test_one_space_after_exact",
"test/test.py::TestBashGlobal::test_parse_special_characters",
"test/test.py::TestBashGlobal::test_partial_completion",
"test/test.py::TestBashGlobal::test_python_completion",
"test/test.py::TestBashGlobal::test_python_filename_completion",
"test/test.py::TestBashGlobal::test_python_module",
"test/test.py::TestBashGlobal::test_python_not_executable",
"test/test.py::TestBashGlobal::test_quoted_exact",
"test/test.py::TestBashGlobal::test_quoted_space",
"test/test.py::TestBashGlobal::test_quotes",
"test/test.py::TestBashGlobal::test_simple_completion",
"test/test.py::TestBashGlobal::test_single_quoted_completion",
"test/test.py::TestBashGlobal::test_single_quotes_in_double_quotes",
"test/test.py::TestBashGlobal::test_single_quotes_in_single_quotes",
"test/test.py::TestBashGlobal::test_special_characters",
"test/test.py::TestBashGlobal::test_special_characters_double_quoted",
"test/test.py::TestBashGlobal::test_special_characters_single_quoted",
"test/test.py::TestBashGlobal::test_unquoted_space",
"test/test.py::TestBashGlobal::test_wordbreak_chars"
] |
[] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-01-27 02:43:00+00:00
|
apache-2.0
| 3,440 |
|
kislyuk__argcomplete-224
|
diff --git a/argcomplete/__init__.py b/argcomplete/__init__.py
index 25d4328..0c74918 100644
--- a/argcomplete/__init__.py
+++ b/argcomplete/__init__.py
@@ -6,7 +6,7 @@ from __future__ import absolute_import, division, print_function, unicode_litera
import os, sys, argparse, contextlib
from . import completers, my_shlex as shlex
from .compat import USING_PYTHON2, str, sys_encoding, ensure_str, ensure_bytes
-from .completers import FilesCompleter
+from .completers import FilesCompleter, SuppressCompleter
from .my_argparse import IntrospectiveArgumentParser, action_is_satisfied, action_is_open, action_is_greedy
_DEBUG = "_ARC_DEBUG" in os.environ
@@ -346,8 +346,12 @@ class CompletionFinder(object):
option_completions = []
for action in parser._actions:
- if action.help == argparse.SUPPRESS and not self.print_suppressed:
- continue
+ if not self.print_suppressed:
+ completer = getattr(action, "completer", None)
+ if isinstance(completer, SuppressCompleter) and completer.suppress():
+ continue
+ if action.help == argparse.SUPPRESS:
+ continue
if not self._action_allowed(action, parser):
continue
if not isinstance(action, argparse._SubParsersAction):
diff --git a/argcomplete/completers.py b/argcomplete/completers.py
index 05a1b63..59d8b79 100644
--- a/argcomplete/completers.py
+++ b/argcomplete/completers.py
@@ -92,3 +92,17 @@ class _FilteredFilesCompleter(object):
class DirectoriesCompleter(_FilteredFilesCompleter):
def __init__(self):
_FilteredFilesCompleter.__init__(self, predicate=os.path.isdir)
+
+class SuppressCompleter(object):
+ """
+ A completer used to suppress the completion of specific arguments
+ """
+
+ def __init__(self):
+ pass
+
+ def suppress(self):
+ """
+ Decide if the completion should be suppressed
+ """
+ return True
|
kislyuk/argcomplete
|
e72c5fb565300b935de3247e4cb84c4253471669
|
diff --git a/test/test.py b/test/test.py
index d552331..cbe2de3 100755
--- a/test/test.py
+++ b/test/test.py
@@ -19,7 +19,7 @@ from argcomplete import (
ExclusiveCompletionFinder,
_check_module
)
-from argcomplete.completers import FilesCompleter, DirectoriesCompleter
+from argcomplete.completers import FilesCompleter, DirectoriesCompleter, SuppressCompleter
from argcomplete.compat import USING_PYTHON2, str, sys_encoding, ensure_str, ensure_bytes
IFS = "\013"
@@ -153,6 +153,30 @@ class TestArgcomplete(unittest.TestCase):
for cmd, output in expected_outputs:
self.assertEqual(set(self.run_completer(make_parser(), cmd, print_suppressed=True)), set(output))
+ def test_suppress_completer(self):
+ def make_parser():
+ parser = ArgumentParser()
+ parser.add_argument("--foo")
+ arg = parser.add_argument("--bar")
+ arg.completer = SuppressCompleter()
+ return parser
+
+ expected_outputs = (
+ ("prog ", ["--foo", "-h", "--help"]),
+ ("prog --b", [""])
+ )
+
+ for cmd, output in expected_outputs:
+ self.assertEqual(set(self.run_completer(make_parser(), cmd)), set(output))
+
+ expected_outputs = (
+ ("prog ", ["--foo", "--bar", "-h", "--help"]),
+ ("prog --b", ["--bar "])
+ )
+
+ for cmd, output in expected_outputs:
+ self.assertEqual(set(self.run_completer(make_parser(), cmd, print_suppressed=True)), set(output))
+
def test_action_activation(self):
def make_parser():
parser = ArgumentParser()
|
Skip completion for specific argument
I would like a specific argument (e.g. `--foo`) to not appear in the completion. Since the application is pretty modular I can't configure e.g. the global `CompletionFinder`. I found the option to set `help=argparse.SUPPRESS` but I still want a help text to be shown for `--help`.
I was thinking if something like this would be feasible:
```
arg = parser.add_argument('--foo', help='awesome option')
arg.completer = False
```
If anyone could be give me some pointers I am happy to provide a PR for this feature idea.
|
0.0
|
e72c5fb565300b935de3247e4cb84c4253471669
|
[
"test/test.py::TestArgcomplete::test_directory_completion",
"test/test.py::TestArgcomplete::test_display_completions",
"test/test.py::TestArgcomplete::test_file_completion",
"test/test.py::TestArgcomplete::test_readline_entry_point",
"test/test.py::TestArgcompleteREPL::test_repl_multiple_complete",
"test/test.py::TestArgcompleteREPL::test_repl_parse_after_complete",
"test/test.py::TestArgcompleteREPL::test_repl_reuse_parser_with_positional",
"test/test.py::TestArgcompleteREPL::test_repl_subcommand",
"test/test.py::TestArgcompleteREPL::test_repl_subparser_parse_after_complete",
"test/test.py::TestSplitLine::test_escaped_special",
"test/test.py::TestSplitLine::test_last_wordbreak_pos",
"test/test.py::TestSplitLine::test_punctuation",
"test/test.py::TestSplitLine::test_simple",
"test/test.py::TestSplitLine::test_unescaped_special",
"test/test.py::TestCheckModule::test_module",
"test/test.py::TestCheckModule::test_not_package",
"test/test.py::TestCheckModule::test_package",
"test/test.py::TestCheckModule::test_package_main",
"test/test.py::TestCheckModule::test_subpackage",
"test/test.py::TestBash::test_completion_environment",
"test/test.py::TestBash::test_continuation",
"test/test.py::TestBash::test_debug_output",
"test/test.py::TestBash::test_double_quoted_completion",
"test/test.py::TestBash::test_one_space_after_exact",
"test/test.py::TestBash::test_parse_special_characters",
"test/test.py::TestBash::test_partial_completion",
"test/test.py::TestBash::test_quoted_exact",
"test/test.py::TestBash::test_quoted_space",
"test/test.py::TestBash::test_quotes",
"test/test.py::TestBash::test_simple_completion",
"test/test.py::TestBash::test_single_quoted_completion",
"test/test.py::TestBash::test_single_quotes_in_double_quotes",
"test/test.py::TestBash::test_single_quotes_in_single_quotes",
"test/test.py::TestBash::test_special_characters",
"test/test.py::TestBash::test_special_characters_double_quoted",
"test/test.py::TestBash::test_special_characters_single_quoted",
"test/test.py::TestBash::test_unquoted_space",
"test/test.py::TestBash::test_wordbreak_chars",
"test/test.py::TestBashGlobal::test_completion_environment",
"test/test.py::TestBashGlobal::test_continuation",
"test/test.py::TestBashGlobal::test_debug_output",
"test/test.py::TestBashGlobal::test_double_quoted_completion",
"test/test.py::TestBashGlobal::test_one_space_after_exact",
"test/test.py::TestBashGlobal::test_parse_special_characters",
"test/test.py::TestBashGlobal::test_partial_completion",
"test/test.py::TestBashGlobal::test_python_completion",
"test/test.py::TestBashGlobal::test_python_filename_completion",
"test/test.py::TestBashGlobal::test_python_module",
"test/test.py::TestBashGlobal::test_python_not_executable",
"test/test.py::TestBashGlobal::test_quoted_exact",
"test/test.py::TestBashGlobal::test_quoted_space",
"test/test.py::TestBashGlobal::test_quotes",
"test/test.py::TestBashGlobal::test_simple_completion",
"test/test.py::TestBashGlobal::test_single_quoted_completion",
"test/test.py::TestBashGlobal::test_single_quotes_in_double_quotes",
"test/test.py::TestBashGlobal::test_single_quotes_in_single_quotes",
"test/test.py::TestBashGlobal::test_special_characters",
"test/test.py::TestBashGlobal::test_special_characters_double_quoted",
"test/test.py::TestBashGlobal::test_special_characters_single_quoted",
"test/test.py::TestBashGlobal::test_unquoted_space",
"test/test.py::TestBashGlobal::test_wordbreak_chars"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-08-15 13:04:34+00:00
|
apache-2.0
| 3,441 |
|
kivy__buildozer-1225
|
diff --git a/buildozer/default.spec b/buildozer/default.spec
index 612f79e..b33a62d 100644
--- a/buildozer/default.spec
+++ b/buildozer/default.spec
@@ -256,6 +256,13 @@ android.allow_backup = True
# (int) port number to specify an explicit --port= p4a argument (eg for bootstrap flask)
#p4a.port =
+# Control passing the --use-setup-py vs --ignore-setup-py to p4a
+# "in the future" --use-setup-py is going to be the default behaviour in p4a, right now it is not
+# Setting this to false will pass --ignore-setup-py, true will pass --use-setup-py
+# NOTE: this is general setuptools integration, having pyproject.toml is enough, no need to generate
+# setup.py if you're using Poetry, but you need to add "toml" to source.include_exts.
+#p4a.setup_py = false
+
#
# iOS specific
diff --git a/buildozer/targets/android.py b/buildozer/targets/android.py
index 6d9be36..b8738bf 100644
--- a/buildozer/targets/android.py
+++ b/buildozer/targets/android.py
@@ -92,6 +92,12 @@ class TargetAndroid(Target):
if port is not None:
self.extra_p4a_args += ' --port={}'.format(port)
+ setup_py = self.buildozer.config.getdefault('app', 'p4a.setup_py', False)
+ if setup_py:
+ self.extra_p4a_args += ' --use-setup-py'
+ else:
+ self.extra_p4a_args += ' --ignore-setup-py'
+
self.warn_on_deprecated_tokens()
def warn_on_deprecated_tokens(self):
|
kivy/buildozer
|
7bf9f397098d3185a953f8fdf265834f8da83378
|
diff --git a/tests/targets/test_android.py b/tests/targets/test_android.py
index 09af719..6e127bc 100644
--- a/tests/targets/test_android.py
+++ b/tests/targets/test_android.py
@@ -115,7 +115,7 @@ class TestTargetAndroid:
assert (
target_android.extra_p4a_args == (
' --color=always'
- ' --storage-dir="{buildozer_dir}/android/platform/build-armeabi-v7a" --ndk-api=21'.format(
+ ' --storage-dir="{buildozer_dir}/android/platform/build-armeabi-v7a" --ndk-api=21 --ignore-setup-py'.format(
buildozer_dir=buildozer.buildozer_dir)
)
)
|
Cannot tell p4a to use/not to use distutils integration
<!--
The issue tracker is a tool to address bugs.
For support questions, please use: https://github.com/kivy/buildozer#support
Before opening a new issue, make sure you do the following:
* check that your issue isn't already filed: https://github.com/kivy/buildozer/issues
* prepare a short, runnable example that reproduces the issue
* make sure to have `log_level = 2` in your `buildozer.spec`
* reproduce the problem with the latest development version of Kivy
* double-check that the issue is indeed a bug and not a support request
-->
### Versions
* Python: 3.7
* OS: Ubuntu 20.04
* Buildozer: git master (and pypi 1.2.0)
### Description
Cannot tell p4a to use distutils support (https://python-for-android.readthedocs.io/en/latest/distutils/) since there
is no way to pass the argument.
Also in the future p4a is going to default to enabling distutils support (now defaults to disabled) so in the future
things will behave differently unless the corresponding option is explicitly passed to p4a
|
0.0
|
7bf9f397098d3185a953f8fdf265834f8da83378
|
[
"tests/targets/test_android.py::TestTargetAndroid::test_init"
] |
[
"tests/targets/test_android.py::TestTargetAndroid::test_init_positional_buildozer",
"tests/targets/test_android.py::TestTargetAndroid::test_sdkmanager",
"tests/targets/test_android.py::TestTargetAndroid::test_check_requirements",
"tests/targets/test_android.py::TestTargetAndroid::test_check_configuration_tokens",
"tests/targets/test_android.py::TestTargetAndroid::test_install_android_sdk[linux]",
"tests/targets/test_android.py::TestTargetAndroid::test_install_android_sdk[darwin]",
"tests/targets/test_android.py::TestTargetAndroid::test_build_package",
"tests/targets/test_android.py::TestTargetAndroid::test_numeric_version",
"tests/targets/test_android.py::TestTargetAndroid::test_build_package_intent_filters",
"tests/targets/test_android.py::TestTargetAndroid::test_allow_backup"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-11 17:56:04+00:00
|
mit
| 3,442 |
|
kivy__python-for-android-2636
|
diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml
index 597293a9..74121077 100644
--- a/.github/workflows/push.yml
+++ b/.github/workflows/push.yml
@@ -3,9 +3,9 @@ name: Unit tests & build apps
on: ['push', 'pull_request']
env:
- APK_ARTIFACT_FILENAME: bdist_unit_tests_app-debug-1.1-.apk
- AAB_ARTIFACT_FILENAME: bdist_unit_tests_app-release-1.1-.aab
- AAR_ARTIFACT_FILENAME: bdist_unit_tests_app-release-1.1-.aar
+ APK_ARTIFACT_FILENAME: bdist_unit_tests_app-debug-1.1.apk
+ AAB_ARTIFACT_FILENAME: bdist_unit_tests_app-release-1.1.aab
+ AAR_ARTIFACT_FILENAME: bdist_unit_tests_app-release-1.1.aar
PYTHONFORANDROID_PREREQUISITES_INSTALL_INTERACTIVE: 0
jobs:
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4f90b67e..2e37bdc5 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,99 @@
# Changelog
+## [v2022.07.20](https://github.com/kivy/python-for-android/tree/v2022.07.20) (2022-07-20)
+
+[Full Changelog](https://github.com/kivy/python-for-android/compare/v2022.03.13...v2022.07.20)
+
+**Fixed bugs:**
+
+- Current default Python version \(3.8.9\) is failing to build on latest macOS releases [\#2568](https://github.com/kivy/python-for-android/issues/2568)
+- Build failed for Pillow recipe when targeting x86\_64 arch [\#2259](https://github.com/kivy/python-for-android/issues/2259)
+- UnboundLocalError: local variable 'toolchain\_version' referenced before assignment [\#2190](https://github.com/kivy/python-for-android/issues/2190)
+- Numpy on MacOsX fails in our `CI` tests [\#2087](https://github.com/kivy/python-for-android/issues/2087)
+
+**Closed issues:**
+
+- pyzbar android building error [\#2635](https://github.com/kivy/python-for-android/issues/2635)
+- `tflite-runtime` build every time [\#2630](https://github.com/kivy/python-for-android/issues/2630)
+- Failed to build `matplotlib` because `kiwisolver` [\#2629](https://github.com/kivy/python-for-android/issues/2629)
+- Trying to build pandas with buildozer results in missing headers errors [\#2626](https://github.com/kivy/python-for-android/issues/2626)
+- https://github.com/kivy/python-for-android.git [\#2625](https://github.com/kivy/python-for-android/issues/2625)
+- \[SSL : CERTIFICATE\_VERIFY\_FAILED \] in Android [\#2620](https://github.com/kivy/python-for-android/issues/2620)
+- How to run Python script in background in android? [\#2618](https://github.com/kivy/python-for-android/issues/2618)
+- USB permission [\#2611](https://github.com/kivy/python-for-android/issues/2611)
+- ffmpeg recipe for 23b build fails [\#2608](https://github.com/kivy/python-for-android/issues/2608)
+- Broken jpeg recipe for NDK 23b? [\#2603](https://github.com/kivy/python-for-android/issues/2603)
+- Need a help [\#2595](https://github.com/kivy/python-for-android/issues/2595)
+- Termux build fails [\#2585](https://github.com/kivy/python-for-android/issues/2585)
+- lapack build error [\#2584](https://github.com/kivy/python-for-android/issues/2584)
+- still this issue is happening [\#2572](https://github.com/kivy/python-for-android/issues/2572)
+- "Unit test apk" + "Unit test aab" + "Test updated recipes" test jobs should be run also on macOS \(both Intel and Apple Silicon\) [\#2569](https://github.com/kivy/python-for-android/issues/2569)
+- unpackPyBundle\(\) on startup crashes already running service [\#2564](https://github.com/kivy/python-for-android/issues/2564)
+- Webview app fail to startup. [\#2559](https://github.com/kivy/python-for-android/issues/2559)
+- genericndkbuild receipe Not compiling with android api \> 28 [\#2555](https://github.com/kivy/python-for-android/issues/2555)
+- Is there a way to build smaller apks? [\#2553](https://github.com/kivy/python-for-android/issues/2553)
+- Webview, icon [\#2552](https://github.com/kivy/python-for-android/issues/2552)
+- SONAME header not present in libpython3.8.so [\#2548](https://github.com/kivy/python-for-android/issues/2548)
+- How to mention Python modules in Kivy buildozer.spec file? [\#2547](https://github.com/kivy/python-for-android/issues/2547)
+- Issue with pyaudio and portaudio [\#2535](https://github.com/kivy/python-for-android/issues/2535)
+- \[Temporary Resolved\] Python 4 android in mac os with Apple Silicon via Roseta [\#2528](https://github.com/kivy/python-for-android/issues/2528)
+- Scipy is not installed due to "Error: 'numpy' must be installed before running the build." [\#2509](https://github.com/kivy/python-for-android/issues/2509)
+- Lapack depends on arm-linux-androideabi-gfortran [\#2508](https://github.com/kivy/python-for-android/issues/2508)
+- Apk file built by buildozer is large in comparision to other apks [\#2473](https://github.com/kivy/python-for-android/issues/2473)
+- p4a is not compatible with ndk \>= 22 [\#2391](https://github.com/kivy/python-for-android/issues/2391)
+- Sympy module. Error in buildozer: no module named sympy.testing [\#2381](https://github.com/kivy/python-for-android/issues/2381)
+- build.gradle 'compile' depreciated [\#2362](https://github.com/kivy/python-for-android/issues/2362)
+- API 29 support [\#2360](https://github.com/kivy/python-for-android/issues/2360)
+- python for android [\#2307](https://github.com/kivy/python-for-android/issues/2307)
+- application is not working in android made with buildozer kivy [\#2260](https://github.com/kivy/python-for-android/issues/2260)
+- hostpython3 unpack error [\#2247](https://github.com/kivy/python-for-android/issues/2247)
+- no recipe for pyaudio \_portaudio. [\#2223](https://github.com/kivy/python-for-android/issues/2223)
+- How to add a native Python package for kivy? [\#2089](https://github.com/kivy/python-for-android/issues/2089)
+- scipy module fails loading for 32 bit and 64 bit APK builds. [\#2061](https://github.com/kivy/python-for-android/issues/2061)
+- Support for androidx [\#2020](https://github.com/kivy/python-for-android/issues/2020)
+- Cannot build apk using buidozer [\#2005](https://github.com/kivy/python-for-android/issues/2005)
+- Android NDK - "$NDK/platforms/android-25" missing? [\#1992](https://github.com/kivy/python-for-android/issues/1992)
+- Tidy up NDK 19+ support [\#1962](https://github.com/kivy/python-for-android/issues/1962)
+- Support for NDK 19 [\#1613](https://github.com/kivy/python-for-android/issues/1613)
+- Android NDK 18b issues [\#1525](https://github.com/kivy/python-for-android/issues/1525)
+- Google requiring 64 bits binary in August 2019 [\#1519](https://github.com/kivy/python-for-android/issues/1519)
+- Investigate Azure Pipelines [\#1400](https://github.com/kivy/python-for-android/issues/1400)
+
+**Merged pull requests:**
+
+- Use `shutil.which` instead of `sh.which` [\#2637](https://github.com/kivy/python-for-android/pull/2637) ([misl6](https://github.com/misl6))
+- add service\_lib and aar to the docs [\#2634](https://github.com/kivy/python-for-android/pull/2634) ([mzakharo](https://github.com/mzakharo))
+- Fix issue \#2630 [\#2631](https://github.com/kivy/python-for-android/pull/2631) ([Neizvestnyj](https://github.com/Neizvestnyj))
+- lapack/scipy: support NDK r21e, x86/64 archs [\#2619](https://github.com/kivy/python-for-android/pull/2619) ([mzakharo](https://github.com/mzakharo))
+- add scipy/lapack CI tests [\#2617](https://github.com/kivy/python-for-android/pull/2617) ([mzakharo](https://github.com/mzakharo))
+- use LEGACY\_NDK option to build lapack/scipy with a separate NDK [\#2615](https://github.com/kivy/python-for-android/pull/2615) ([mzakharo](https://github.com/mzakharo))
+- Fixing service\_library bootstrap + .aar build. [\#2612](https://github.com/kivy/python-for-android/pull/2612) ([mzakharo](https://github.com/mzakharo))
+- Bump groestlcoin\_hash to 1.0.3 [\#2607](https://github.com/kivy/python-for-android/pull/2607) ([gruve-p](https://github.com/gruve-p))
+- removed `usr` and `lib` from ndk library path in `librt` recipe [\#2606](https://github.com/kivy/python-for-android/pull/2606) ([kengoon](https://github.com/kengoon))
+- changed arch.ndk\_platform to arch.ndk\_lib\_dir in `librt` recipe [\#2605](https://github.com/kivy/python-for-android/pull/2605) ([kengoon](https://github.com/kengoon))
+- Our self-hosted Apple Silicon runner now has been migrated to actions/runner v2.292.0 which now supports arm64 natively [\#2602](https://github.com/kivy/python-for-android/pull/2602) ([misl6](https://github.com/misl6))
+- Introduces pkg\_config\_location in Prerequisite and use OpenSSLPrerequisite\(\).pkg\_config\_location in hostpython3, so we can support ssl on hostpython3 just out of the box also on macOS [\#2599](https://github.com/kivy/python-for-android/pull/2599) ([misl6](https://github.com/misl6))
+- Add service to webview test app [\#2598](https://github.com/kivy/python-for-android/pull/2598) ([dbnicholson](https://github.com/dbnicholson))
+- Fix webview testapp jnius usage [\#2597](https://github.com/kivy/python-for-android/pull/2597) ([dbnicholson](https://github.com/dbnicholson))
+- Support multiarch in webview bootstrap [\#2596](https://github.com/kivy/python-for-android/pull/2596) ([dbnicholson](https://github.com/dbnicholson))
+- Handle all the macOS prerequisites \(except NDK/SDK\) via prerequisites.py [\#2594](https://github.com/kivy/python-for-android/pull/2594) ([misl6](https://github.com/misl6))
+- Prefer avdmanager from cmdline-tools [\#2593](https://github.com/kivy/python-for-android/pull/2593) ([dbnicholson](https://github.com/dbnicholson))
+- \*\_rebuild\_updated\_recipes CI jobs now test the updated recipe along all the supported Android archs \(arm64-v8a, armeabi-v7a, x86\_64, x86\) [\#2592](https://github.com/kivy/python-for-android/pull/2592) ([misl6](https://github.com/misl6))
+- Introduces pythonforandroid/prerequisites.py \(Experimental\). This allows a more granular check and install process for dependencies on both CI jobs and users installation. [\#2591](https://github.com/kivy/python-for-android/pull/2591) ([misl6](https://github.com/misl6))
+- Added py3dns recipe [\#2590](https://github.com/kivy/python-for-android/pull/2590) ([Neizvestnyj](https://github.com/Neizvestnyj))
+- Upload artifacts produced from every build platform, not only ubuntu-latest [\#2588](https://github.com/kivy/python-for-android/pull/2588) ([misl6](https://github.com/misl6))
+- Fixes a typo in macos\_rebuild\_updated\_recipes [\#2587](https://github.com/kivy/python-for-android/pull/2587) ([misl6](https://github.com/misl6))
+- Added pythonforandroid.androidndk.AndroidNDK + some changes needed in order to support build on Apple Silicon macs. [\#2586](https://github.com/kivy/python-for-android/pull/2586) ([misl6](https://github.com/misl6))
+- Set PATH using real SDK and NDK directories [\#2583](https://github.com/kivy/python-for-android/pull/2583) ([dbnicholson](https://github.com/dbnicholson))
+- Add missing fetch-depth: 0 on macos\_rebuild\_updated\_recipes [\#2579](https://github.com/kivy/python-for-android/pull/2579) ([misl6](https://github.com/misl6))
+- Bumps libffi to v3.4.2 + adds -fPIC on i686-linux-android [\#2578](https://github.com/kivy/python-for-android/pull/2578) ([misl6](https://github.com/misl6))
+- Bumps numpy version to 1.22.3, cython version to 0.29.28 and fixes numpy build on macOS [\#2575](https://github.com/kivy/python-for-android/pull/2575) ([misl6](https://github.com/misl6))
+- macOS CI: ADD APK, AAB & Updated Recipes build [\#2574](https://github.com/kivy/python-for-android/pull/2574) ([misl6](https://github.com/misl6))
+- add version check to unpackPyBundle [\#2565](https://github.com/kivy/python-for-android/pull/2565) ([mzakharo](https://github.com/mzakharo))
+- Merges master into develop after release 2022.03.13 [\#2562](https://github.com/kivy/python-for-android/pull/2562) ([misl6](https://github.com/misl6))
+- Fixes App Icon and Presplash\_Screen For Webview bootstrap [\#2556](https://github.com/kivy/python-for-android/pull/2556) ([kengoon](https://github.com/kengoon))
+- NDK 23 + Gradle 7 support [\#2550](https://github.com/kivy/python-for-android/pull/2550) ([misl6](https://github.com/misl6))
+
## [v2022.03.13](https://github.com/kivy/python-for-android/tree/v2022.03.13) (2022-03-13)
[Full Changelog](https://github.com/kivy/python-for-android/compare/v2021.09.05...v2022.03.13)
diff --git a/Makefile b/Makefile
index b0d3da88..97f50221 100644
--- a/Makefile
+++ b/Makefile
@@ -106,17 +106,17 @@ docker/run/command: docker/build
docker/run/make/with-artifact/apk/%: docker/build
docker run --name p4a-latest --env-file=.env $(DOCKER_IMAGE) make $*
- docker cp p4a-latest:/home/user/app/testapps/on_device_unit_tests/bdist_unit_tests_app-debug-1.1-.apk ./apks
+ docker cp p4a-latest:/home/user/app/testapps/on_device_unit_tests/bdist_unit_tests_app-debug-1.1.apk ./apks
docker rm -fv p4a-latest
docker/run/make/with-artifact/aar/%: docker/build
docker run --name p4a-latest --env-file=.env $(DOCKER_IMAGE) make $*
- docker cp p4a-latest:/home/user/app/testapps/on_device_unit_tests/bdist_unit_tests_app-release-1.1-.aar ./aars
+ docker cp p4a-latest:/home/user/app/testapps/on_device_unit_tests/bdist_unit_tests_app-release-1.1.aar ./aars
docker rm -fv p4a-latest
docker/run/make/with-artifact/aab/%: docker/build
docker run --name p4a-latest --env-file=.env $(DOCKER_IMAGE) make $*
- docker cp p4a-latest:/home/user/app/testapps/on_device_unit_tests/bdist_unit_tests_app-release-1.1-.aab ./aabs
+ docker cp p4a-latest:/home/user/app/testapps/on_device_unit_tests/bdist_unit_tests_app-release-1.1.aab ./aabs
docker rm -fv p4a-latest
docker/run/make/rebuild_updated_recipes: docker/build
diff --git a/pythonforandroid/__init__.py b/pythonforandroid/__init__.py
index eb2a14cb..e014c4ff 100644
--- a/pythonforandroid/__init__.py
+++ b/pythonforandroid/__init__.py
@@ -1,1 +1,1 @@
-__version__ = '2022.03.13'
+__version__ = '2022.07.20'
diff --git a/pythonforandroid/bootstraps/service_only/build/src/main/java/org/kivy/android/PythonActivity.java b/pythonforandroid/bootstraps/service_only/build/src/main/java/org/kivy/android/PythonActivity.java
index 919c42b0..87ea061c 100644
--- a/pythonforandroid/bootstraps/service_only/build/src/main/java/org/kivy/android/PythonActivity.java
+++ b/pythonforandroid/bootstraps/service_only/build/src/main/java/org/kivy/android/PythonActivity.java
@@ -177,19 +177,22 @@ public class PythonActivity extends Activity {
new File(getApplicationInfo().nativeLibraryDir));
}
- long lastBackClick = SystemClock.elapsedRealtime();
+ long lastBackClick = 0;
@Override
public boolean onKeyDown(int keyCode, KeyEvent event) {
- // If it wasn't the Back key or there's no web page history, bubble up to the default
- // system behavior (probably exit the activity)
- if (SystemClock.elapsedRealtime() - lastBackClick > 2000){
+ // Check if the key event was the Back button
+ if (keyCode == KeyEvent.KEYCODE_BACK) {
+ // If there's no web page history, bubble up to the default
+ // system behavior (probably exit the activity)
+ if (SystemClock.elapsedRealtime() - lastBackClick > 2000){
+ lastBackClick = SystemClock.elapsedRealtime();
+ Toast.makeText(this, "Tap again to close the app", Toast.LENGTH_LONG).show();
+ return true;
+ }
+
lastBackClick = SystemClock.elapsedRealtime();
- Toast.makeText(this, "Click again to close the app",
- Toast.LENGTH_LONG).show();
- return true;
}
- lastBackClick = SystemClock.elapsedRealtime();
return super.onKeyDown(keyCode, event);
}
diff --git a/pythonforandroid/bootstraps/webview/build/src/main/java/org/kivy/android/PythonActivity.java b/pythonforandroid/bootstraps/webview/build/src/main/java/org/kivy/android/PythonActivity.java
index b8499849..8aa308b2 100644
--- a/pythonforandroid/bootstraps/webview/build/src/main/java/org/kivy/android/PythonActivity.java
+++ b/pythonforandroid/bootstraps/webview/build/src/main/java/org/kivy/android/PythonActivity.java
@@ -32,6 +32,7 @@ import android.graphics.Color;
import android.widget.AbsoluteLayout;
import android.view.ViewGroup.LayoutParams;
+import android.webkit.WebBackForwardList;
import android.webkit.WebViewClient;
import android.webkit.WebView;
import android.webkit.CookieManager;
@@ -269,24 +270,30 @@ public class PythonActivity extends Activity {
return mLayout;
}
- long lastBackClick = SystemClock.elapsedRealtime();
+ long lastBackClick = 0;
@Override
public boolean onKeyDown(int keyCode, KeyEvent event) {
- // Check if the key event was the Back button and if there's history
- if ((keyCode == KeyEvent.KEYCODE_BACK) && mWebView.canGoBack()) {
- mWebView.goBack();
- return true;
- }
- // If it wasn't the Back key or there's no web page history, bubble up to the default
- // system behavior (probably exit the activity)
- if (SystemClock.elapsedRealtime() - lastBackClick > 2000){
+ // Check if the key event was the Back button
+ if (keyCode == KeyEvent.KEYCODE_BACK) {
+ // Go back if there is web page history behind,
+ // but not to the start preloader
+ WebBackForwardList webViewBackForwardList = mWebView.copyBackForwardList();
+ if (webViewBackForwardList.getCurrentIndex() > 1) {
+ mWebView.goBack();
+ return true;
+ }
+
+ // If there's no web page history, bubble up to the default
+ // system behavior (probably exit the activity)
+ if (SystemClock.elapsedRealtime() - lastBackClick > 2000){
+ lastBackClick = SystemClock.elapsedRealtime();
+ Toast.makeText(this, "Tap again to close the app", Toast.LENGTH_LONG).show();
+ return true;
+ }
+
lastBackClick = SystemClock.elapsedRealtime();
- Toast.makeText(this, "Click again to close the app",
- Toast.LENGTH_LONG).show();
- return true;
}
- lastBackClick = SystemClock.elapsedRealtime();
return super.onKeyDown(keyCode, event);
}
diff --git a/pythonforandroid/toolchain.py b/pythonforandroid/toolchain.py
index 7e19aef2..359ea636 100644
--- a/pythonforandroid/toolchain.py
+++ b/pythonforandroid/toolchain.py
@@ -720,7 +720,7 @@ class ToolchainCL:
self._archs = args.arch
- self.ctx.local_recipes = args.local_recipes
+ self.ctx.local_recipes = realpath(args.local_recipes)
self.ctx.copy_libs = args.copy_libs
self.ctx.activity_class_name = args.activity_class_name
@@ -1147,7 +1147,7 @@ class ToolchainCL:
if package_add_version:
info('# Add version number to android package')
package_name = basename(package_file)[:-len(package_extension)]
- package_file_dest = "{}-{}-{}".format(
+ package_file_dest = "{}-{}{}".format(
package_name, build_args.version, package_extension)
info('# Android package renamed to {}'.format(package_file_dest))
shprint(sh.cp, package_file, package_file_dest)
|
kivy/python-for-android
|
28151d1864df9564ee796910f38a995b54d73631
|
diff --git a/tests/test_toolchain.py b/tests/test_toolchain.py
index 23d0d3ff..0cc2b1a7 100644
--- a/tests/test_toolchain.py
+++ b/tests/test_toolchain.py
@@ -1,4 +1,5 @@
import io
+import os
import sys
import pytest
from unittest import mock
@@ -136,3 +137,34 @@ class TestToolchainCL:
assert expected_string in m_stdout.getvalue()
# deletes static attribute to not mess with other tests
del Recipe.recipes
+
+ def test_local_recipes_dir(self):
+ """
+ Checks the `local_recipes` attribute in the Context is absolute.
+ """
+ cwd = os.path.realpath(os.getcwd())
+ common_args = [
+ 'toolchain.py',
+ 'recommendations',
+ ]
+
+ # Check the default ./p4a-recipes becomes absolute.
+ argv = common_args
+ with patch_sys_argv(argv):
+ toolchain = ToolchainCL()
+ expected_local_recipes = os.path.join(cwd, 'p4a-recipes')
+ assert toolchain.ctx.local_recipes == expected_local_recipes
+
+ # Check a supplied relative directory becomes absolute.
+ argv = common_args + ['--local-recipes=foo']
+ with patch_sys_argv(argv):
+ toolchain = ToolchainCL()
+ expected_local_recipes = os.path.join(cwd, 'foo')
+ assert toolchain.ctx.local_recipes == expected_local_recipes
+
+ # An absolute directory should remain unchanged.
+ local_recipes = os.path.join(cwd, 'foo')
+ argv = common_args + ['--local-recipes={}'.format(local_recipes)]
+ with patch_sys_argv(argv):
+ toolchain = ToolchainCL()
+ assert toolchain.ctx.local_recipes == local_recipes
|
splash always loading
i build app with bootstrap=webview,
the application works perfect, but when I press the back button the splah comes back out and stays loaded. What I try is that this does not happen and that when you press the button to go back out of the application, is there any option to access the onResume methods ...? Is there any way to solve this problem?
|
0.0
|
28151d1864df9564ee796910f38a995b54d73631
|
[
"tests/test_toolchain.py::TestToolchainCL::test_local_recipes_dir"
] |
[
"tests/test_toolchain.py::TestToolchainCL::test_help",
"tests/test_toolchain.py::TestToolchainCL::test_unknown",
"tests/test_toolchain.py::TestToolchainCL::test_create",
"tests/test_toolchain.py::TestToolchainCL::test_create_no_sdk_dir",
"tests/test_toolchain.py::TestToolchainCL::test_recipes"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-07-18 17:13:40+00:00
|
mit
| 3,443 |
|
kiwicom__structlog-sentry-13
|
diff --git a/structlog_sentry/__init__.py b/structlog_sentry/__init__.py
index f6a7168..c8055c9 100644
--- a/structlog_sentry/__init__.py
+++ b/structlog_sentry/__init__.py
@@ -40,6 +40,9 @@ class SentryProcessor:
:param event_dict: structlog event_dict
"""
exc_info = event_dict.pop("exc_info", sys.exc_info())
+ if exc_info is True:
+ # logger.exeception() or logger.error(exc_info=True)
+ exc_info = sys.exc_info()
has_exc_info = exc_info and exc_info != (None, None, None)
if has_exc_info:
|
kiwicom/structlog-sentry
|
7a2276e899a5a822925546c6dd07c578fa3ec317
|
diff --git a/test/test_sentry_processor.py b/test/test_sentry_processor.py
index 3307c82..22ca188 100644
--- a/test/test_sentry_processor.py
+++ b/test/test_sentry_processor.py
@@ -77,6 +77,27 @@ def test_sentry_log_failure(mocker, level):
)
[email protected]("level", ["error", "critical"])
+def test_sentry_log_failure_exc_info_true(mocker, level):
+ """Make sure sentry_sdk.utils.exc_info_from_error doesn't raise ValueError
+ Because it can't introspect exc_info.
+ Bug triggered when logger.error(..., exc_info=True) or logger.exception(...)
+ are used.
+ """
+ m_capture_event = mocker.patch("structlog_sentry.capture_event")
+
+ event_data = {"level": level, "event": level + " message", "exc_info": True}
+ processor = SentryProcessor(level=getattr(logging, level.upper()))
+ try:
+ 1 / 0
+ except ZeroDivisionError:
+ processor(None, None, event_data)
+
+ assert m_capture_event.call_count == 1
+ _, kwargs = m_capture_event.call_args
+ assert kwargs["hint"]["exc_info"][0] == ZeroDivisionError
+
+
@pytest.mark.parametrize("level", ["debug", "info", "warning"])
def test_sentry_log_no_extra(mocker, level):
m_capture_event = mocker.patch("structlog_sentry.capture_event")
|
Support for capturing stack locals with `logging.error()`
Sentry supports capturing stack variables for non-exception messages with `logger.error("something", exc_info=True)` (added in https://github.com/getsentry/sentry-python/pull/176).
I might have some time to contribute this, but opening it here first in case I don't or you have other ideas. :)
|
0.0
|
7a2276e899a5a822925546c6dd07c578fa3ec317
|
[
"test/test_sentry_processor.py::test_sentry_log_failure_exc_info_true[error]",
"test/test_sentry_processor.py::test_sentry_log_failure_exc_info_true[critical]"
] |
[
"test/test_sentry_processor.py::test_sentry_disabled",
"test/test_sentry_processor.py::test_sentry_skip",
"test/test_sentry_processor.py::test_sentry_sent",
"test/test_sentry_processor.py::test_sentry_log[debug]",
"test/test_sentry_processor.py::test_sentry_log[info]",
"test/test_sentry_processor.py::test_sentry_log[warning]",
"test/test_sentry_processor.py::test_sentry_log_failure[error]",
"test/test_sentry_processor.py::test_sentry_log_failure[critical]",
"test/test_sentry_processor.py::test_sentry_log_no_extra[debug]",
"test/test_sentry_processor.py::test_sentry_log_no_extra[info]",
"test/test_sentry_processor.py::test_sentry_log_no_extra[warning]",
"test/test_sentry_processor.py::test_sentry_log_all_as_tags[debug]",
"test/test_sentry_processor.py::test_sentry_log_all_as_tags[info]",
"test/test_sentry_processor.py::test_sentry_log_all_as_tags[warning]",
"test/test_sentry_processor.py::test_sentry_log_specific_keys_as_tags[debug]",
"test/test_sentry_processor.py::test_sentry_log_specific_keys_as_tags[info]",
"test/test_sentry_processor.py::test_sentry_log_specific_keys_as_tags[warning]",
"test/test_sentry_processor.py::test_sentry_json_ignore_logger_using_event_dict_logger_name",
"test/test_sentry_processor.py::test_sentry_json_ignore_logger_using_event_dict_record",
"test/test_sentry_processor.py::test_sentry_json_ignore_logger_using_logger_instance_name",
"test/test_sentry_processor.py::test_sentry_json_call_ignores_logger_once",
"test/test_sentry_processor.py::test_sentry_json_ignores_multiple_loggers_once"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-11-05 13:47:29+00:00
|
mit
| 3,444 |
|
kiwicom__structlog-sentry-18
|
diff --git a/README.md b/README.md
index f0cfe2f..7d67e81 100644
--- a/README.md
+++ b/README.md
@@ -67,6 +67,12 @@ except RequestException:
This will automatically collect `sys.exc_info()` along with the message, if you want
to turn this behavior off, just pass `exc_info=False`.
+When you want to use structlog's built-in
+[`format_exc_info`](http://www.structlog.org/en/stable/api.html#structlog.processors.format_exc_info)
+processor, make that the `SentryProcessor` comes *before* `format_exc_info`!
+Otherwise, the `SentryProcessor` won't have an `exc_info` to work with, because
+it's removed from the event by `format_exc_info`.
+
Logging calls with no `sys.exc_info()` are also automatically captured by Sentry:
```python
diff --git a/structlog_sentry/__init__.py b/structlog_sentry/__init__.py
index 8a5d041..d175653 100644
--- a/structlog_sentry/__init__.py
+++ b/structlog_sentry/__init__.py
@@ -39,7 +39,7 @@ class SentryProcessor:
:param event_dict: structlog event_dict
"""
- exc_info = event_dict.pop("exc_info", True)
+ exc_info = event_dict.get("exc_info", True)
if exc_info is True:
# logger.exeception() or logger.error(exc_info=True)
exc_info = sys.exc_info()
|
kiwicom/structlog-sentry
|
00ba79ef470ed81f4e396fde134f6612aa34551d
|
diff --git a/test/test_sentry_processor.py b/test/test_sentry_processor.py
index 22ca188..39f69f3 100644
--- a/test/test_sentry_processor.py
+++ b/test/test_sentry_processor.py
@@ -98,6 +98,27 @@ def test_sentry_log_failure_exc_info_true(mocker, level):
assert kwargs["hint"]["exc_info"][0] == ZeroDivisionError
+def test_sentry_log_leave_exc_info_untouched(mocker):
+ """Make sure exc_info remains in event_data at the end of the processor.
+
+ The structlog built-in format_exc_info processor pops the key and formats
+ it. Using SentryProcessor, and format_exc_info wasn't possible before,
+ because the latter one didn't have an exc_info to work with.
+
+ https://github.com/kiwicom/structlog-sentry/issues/16
+ """
+ mocker.patch("structlog_sentry.capture_event")
+
+ event_data = {"level": "warning", "event": "some.event", "exc_info": True}
+ processor = SentryProcessor()
+ try:
+ 1 / 0
+ except ZeroDivisionError:
+ processor(None, None, event_data)
+
+ assert "exc_info" in event_data
+
+
@pytest.mark.parametrize("level", ["debug", "info", "warning"])
def test_sentry_log_no_extra(mocker, level):
m_capture_event = mocker.patch("structlog_sentry.capture_event")
|
Can't use format_exc_info with SentryProcessor
The `SentryProcessor` as well as the structlog built-in `format_exc_info` processor both use `event_dict.pop("exc_info", None)`. This means, when the built-in processor is used for nicely converting the exception into a readable traceback and the former is used to send the event and exception to Sentry, the one that comes later in structlog's processors list doesn't do anything.
I'd like to propose to change the `SentryProcessor` to use `event_dict.get("exc_info")` instead of `pop()` in https://github.com/kiwicom/structlog-sentry/blob/00ba79ef470ed81f4e396fde134f6612aa34551d/structlog_sentry/__init__.py#L42
The documentation should be changed such that it points out that structlog's built-in `format_exc_info` processor must come _after_ the `SentryProcessor`.
|
0.0
|
00ba79ef470ed81f4e396fde134f6612aa34551d
|
[
"test/test_sentry_processor.py::test_sentry_log_leave_exc_info_untouched"
] |
[
"test/test_sentry_processor.py::test_sentry_disabled",
"test/test_sentry_processor.py::test_sentry_skip",
"test/test_sentry_processor.py::test_sentry_sent",
"test/test_sentry_processor.py::test_sentry_log[debug]",
"test/test_sentry_processor.py::test_sentry_log[info]",
"test/test_sentry_processor.py::test_sentry_log[warning]",
"test/test_sentry_processor.py::test_sentry_log_failure[error]",
"test/test_sentry_processor.py::test_sentry_log_failure[critical]",
"test/test_sentry_processor.py::test_sentry_log_failure_exc_info_true[error]",
"test/test_sentry_processor.py::test_sentry_log_failure_exc_info_true[critical]",
"test/test_sentry_processor.py::test_sentry_log_no_extra[debug]",
"test/test_sentry_processor.py::test_sentry_log_no_extra[info]",
"test/test_sentry_processor.py::test_sentry_log_no_extra[warning]",
"test/test_sentry_processor.py::test_sentry_log_all_as_tags[debug]",
"test/test_sentry_processor.py::test_sentry_log_all_as_tags[info]",
"test/test_sentry_processor.py::test_sentry_log_all_as_tags[warning]",
"test/test_sentry_processor.py::test_sentry_log_specific_keys_as_tags[debug]",
"test/test_sentry_processor.py::test_sentry_log_specific_keys_as_tags[info]",
"test/test_sentry_processor.py::test_sentry_log_specific_keys_as_tags[warning]",
"test/test_sentry_processor.py::test_sentry_json_ignore_logger_using_event_dict_logger_name",
"test/test_sentry_processor.py::test_sentry_json_ignore_logger_using_event_dict_record",
"test/test_sentry_processor.py::test_sentry_json_ignore_logger_using_logger_instance_name",
"test/test_sentry_processor.py::test_sentry_json_call_ignores_logger_once",
"test/test_sentry_processor.py::test_sentry_json_ignores_multiple_loggers_once"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-03-03 21:47:58+00:00
|
mit
| 3,445 |
|
kiwicom__structlog-sentry-19
|
diff --git a/README.md b/README.md
index 7d67e81..e02f2f1 100644
--- a/README.md
+++ b/README.md
@@ -31,6 +31,7 @@ sentry_sdk.init() # pass dsn in argument or via SENTRY_DSN env variable
structlog.configure(
processors=[
+ structlog.stdlib.add_logger_name, # optional, but before SentryProcessor()
structlog.stdlib.add_log_level, # required before SentryProcessor()
SentryProcessor(level=logging.ERROR),
],
@@ -42,8 +43,9 @@ structlog.configure(
log = structlog.get_logger()
```
-Do not forget to add the `structlog.stdlib.add_log_level` processor before
-`SentryProcessor`. The `SentryProcessor` class takes the following arguments:
+Do not forget to add the `structlog.stdlib.add_log_level` and optionally the
+`structlog.stdlib.add_logger_name` processors before `SentryProcessor`. The
+`SentryProcessor` class takes the following arguments:
- `level` - events of this or higher levels will be reported to Sentry,
default is `WARNING`
@@ -95,6 +97,7 @@ You can set some or all of key/value pairs of structlog `event_dict` as sentry `
```python
structlog.configure(
processors=[
+ structlog.stdlib.add_logger_name,
structlog.stdlib.add_log_level,
SentryProcessor(level=logging.ERROR, tag_keys=["city", "timezone"]),
],...
@@ -109,6 +112,7 @@ If you want to have all event data as tags, create the `SentryProcessor` with `t
```python
structlog.configure(
processors=[
+ structlog.stdlib.add_logger_name,
structlog.stdlib.add_log_level,
SentryProcessor(level=logging.ERROR, tag_keys="__all__"),
],...
@@ -123,6 +127,7 @@ Sometimes you may want to skip this, specially when sending the `event_dict` as
```python
structlog.configure(
processors=[
+ structlog.stdlib.add_logger_name,
structlog.stdlib.add_log_level,
SentryProcessor(level=logging.ERROR, as_extra=False, tag_keys="__all__"),
],...
diff --git a/structlog_sentry/__init__.py b/structlog_sentry/__init__.py
index d175653..f63ce6b 100644
--- a/structlog_sentry/__init__.py
+++ b/structlog_sentry/__init__.py
@@ -52,6 +52,8 @@ class SentryProcessor:
event["message"] = event_dict.get("event")
event["level"] = event_dict.get("level")
+ if "logger" in event_dict:
+ event["logger"] = event_dict["logger"]
if self._as_extra:
event["extra"] = self._original_event_dict
|
kiwicom/structlog-sentry
|
2fcdc295d35e0425859fe304e7e6de21237affca
|
diff --git a/test/test_sentry_processor.py b/test/test_sentry_processor.py
index 39f69f3..a9d70e8 100644
--- a/test/test_sentry_processor.py
+++ b/test/test_sentry_processor.py
@@ -98,6 +98,30 @@ def test_sentry_log_failure_exc_info_true(mocker, level):
assert kwargs["hint"]["exc_info"][0] == ZeroDivisionError
+absent = object()
+
+
[email protected]("logger", ["some.logger.name", absent])
+def test_sentry_add_logger_name(mocker, logger):
+ m_capture_event = mocker.patch("structlog_sentry.capture_event")
+
+ event_data = {"level": "warning", "event": "some.event"}
+ if logger is not absent:
+ event_data["logger"] = logger
+
+ processor = SentryProcessor(as_extra=False)
+ processor(None, None, event_data)
+
+ if logger is absent:
+ m_capture_event.assert_called_once_with(
+ {"level": "warning", "message": "some.event"}, hint=None
+ )
+ else:
+ m_capture_event.assert_called_once_with(
+ {"level": "warning", "message": "some.event", "logger": logger}, hint=None
+ )
+
+
def test_sentry_log_leave_exc_info_untouched(mocker):
"""Make sure exc_info remains in event_data at the end of the processor.
|
Logger not set when recording events
The Sentry SDK sets a `logger` key at the root level of the `event`: https://github.com/getsentry/sentry-python/blob/718f61b892398c1156ab8952f41494768175da6f/sentry_sdk/integrations/logging.py#L206. structlog-sentry does not do that: https://github.com/kiwicom/structlog-sentry/blob/00ba79ef470ed81f4e396fde134f6612aa34551d/structlog_sentry/__init__.py#L53-L54
I'd like to suggest to add the logger when present in the `event_dict` and document to use structlog's built-in `add_logger_name` processor.
|
0.0
|
2fcdc295d35e0425859fe304e7e6de21237affca
|
[
"test/test_sentry_processor.py::test_sentry_add_logger_name[some.logger.name]"
] |
[
"test/test_sentry_processor.py::test_sentry_disabled",
"test/test_sentry_processor.py::test_sentry_skip",
"test/test_sentry_processor.py::test_sentry_sent",
"test/test_sentry_processor.py::test_sentry_log[debug]",
"test/test_sentry_processor.py::test_sentry_log[info]",
"test/test_sentry_processor.py::test_sentry_log[warning]",
"test/test_sentry_processor.py::test_sentry_log_failure[error]",
"test/test_sentry_processor.py::test_sentry_log_failure[critical]",
"test/test_sentry_processor.py::test_sentry_log_failure_exc_info_true[error]",
"test/test_sentry_processor.py::test_sentry_log_failure_exc_info_true[critical]",
"test/test_sentry_processor.py::test_sentry_add_logger_name[logger1]",
"test/test_sentry_processor.py::test_sentry_log_leave_exc_info_untouched",
"test/test_sentry_processor.py::test_sentry_log_no_extra[debug]",
"test/test_sentry_processor.py::test_sentry_log_no_extra[info]",
"test/test_sentry_processor.py::test_sentry_log_no_extra[warning]",
"test/test_sentry_processor.py::test_sentry_log_all_as_tags[debug]",
"test/test_sentry_processor.py::test_sentry_log_all_as_tags[info]",
"test/test_sentry_processor.py::test_sentry_log_all_as_tags[warning]",
"test/test_sentry_processor.py::test_sentry_log_specific_keys_as_tags[debug]",
"test/test_sentry_processor.py::test_sentry_log_specific_keys_as_tags[info]",
"test/test_sentry_processor.py::test_sentry_log_specific_keys_as_tags[warning]",
"test/test_sentry_processor.py::test_sentry_json_ignore_logger_using_event_dict_logger_name",
"test/test_sentry_processor.py::test_sentry_json_ignore_logger_using_event_dict_record",
"test/test_sentry_processor.py::test_sentry_json_ignore_logger_using_logger_instance_name",
"test/test_sentry_processor.py::test_sentry_json_call_ignores_logger_once",
"test/test_sentry_processor.py::test_sentry_json_ignores_multiple_loggers_once"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-03-03 22:16:28+00:00
|
mit
| 3,446 |
|
kiwicom__structlog-sentry-4
|
diff --git a/README.md b/README.md
index 436ea1e..37f9b09 100644
--- a/README.md
+++ b/README.md
@@ -77,6 +77,68 @@ optional argument to logger methods, like this:
log.error(sentry_skip=True)
```
+### Sentry Tags
+
+You can set some or all of key/value pairs of structlog `event_dict` as sentry `tags`:
+
+```python
+structlog.configure(
+ processors=[
+ structlog.stdlib.add_log_level,
+ SentryProcessor(level=logging.ERROR, tag_keys=["city", "timezone"]),
+ ],...
+)
+
+log.error("error message", city="Tehran", timezone="UTC+3:30", movie_title="Some title")
+```
+
+this will report the error and the sentry event will have **city** and **timezone** tags.
+If you want to have all event data as tags, create the `SentryProcessor` with `tag_keys="__all__"`.
+
+```python
+structlog.configure(
+ processors=[
+ structlog.stdlib.add_log_level,
+ SentryProcessor(level=logging.ERROR, tag_keys="__all__"),
+ ],...
+)
+```
+
+### Skip Extra
+
+By default `SentryProcessor` will send `event_dict` key/value pairs as extra info to the sentry.
+Sometimes you may want to skip this, specially when sending the `event_dict` as sentry tags:
+
+```python
+structlog.configure(
+ processors=[
+ structlog.stdlib.add_log_level,
+ SentryProcessor(level=logging.ERROR, as_extra=False, tag_keys="__all__"),
+ ],...
+)
+```
+
+### Logging as JSON
+
+If you want to configure `structlog` to format the output as **JSON**
+(maybe for [elk-stack](https://www.elastic.co/elk-stack)) you have to use `SentryJsonProcessor` to prevent
+duplication of an event reported to sentry.
+
+```python
+from structlog_sentry import SentryJsonProcessor
+
+structlog.configure(
+ processors=[
+ structlog.stdlib.add_logger_name, # required before SentryJsonProcessor()
+ structlog.stdlib.add_log_level,
+ SentryJsonProcessor(level=logging.ERROR, tag_keys="__all__"),
+ structlog.processors.JSONRenderer()
+ ],...
+)
+```
+
+This processor tells sentry to *ignore* the logger and captures the events manually.
+
## Testing
To run all tests:
diff --git a/setup.py b/setup.py
index cddb9fc..dc9ed96 100644
--- a/setup.py
+++ b/setup.py
@@ -11,7 +11,7 @@ with open("README.md") as f:
setup(
name="structlog-sentry",
- version="1.0.0",
+ version="1.1.0",
url="https://github.com/kiwicom/structlog-sentry",
long_description=readme,
long_description_content_type="text/markdown",
diff --git a/structlog_sentry/__init__.py b/structlog_sentry/__init__.py
index 0c43731..9d9d1b7 100644
--- a/structlog_sentry/__init__.py
+++ b/structlog_sentry/__init__.py
@@ -1,19 +1,40 @@
import logging
-import os
import sys
+from typing import List, Optional, Tuple, Union
from sentry_sdk import capture_event
+from sentry_sdk.integrations.logging import ignore_logger
from sentry_sdk.utils import event_from_exception
class SentryProcessor:
- """Sentry processor for structlog. Uses Sentry SDK to capture events in Sentry."""
- def __init__(self, level=logging.WARNING, active=True):
+ def __init__(
+ self,
+ level: int = logging.WARNING,
+ active: bool = True,
+ as_extra: bool = True,
+ tag_keys: Union[List[str], str] = None) -> None:
+ """Sentry processor for structlog. Uses Sentry SDK to capture events in Sentry.
+
+ :param level: events of this or higher levels will be reported to Sentry.
+ :param active: a flag to make this processor enabled/disabled.
+ :param as_extra: send `event_dict` as extra info to Sentry.
+ :param tag_keys: a list of keys. If any if these keys appear in `event_dict`,
+ the key and its corresponding value in `event_dict` will be used as Sentry event tags. use `"__all__"` to report
+ all key/value pairs of event as tags.
+ """
self.level = level
self.active = active
+ self.tag_keys = tag_keys
+ self._as_extra = as_extra
+ self._original_event_dict = None
+
+ def _get_event_and_hint(self, event_dict: dict) -> Tuple[dict, Optional[str]]:
+ """Create a sentry event and hint from structlog `event_dict` and sys.exc_info.
- def _log(self, event_dict, level):
+ :param event_dict: structlog event_dict
+ """
exc_info = event_dict.pop("exc_info", sys.exc_info())
has_exc_info = exc_info and exc_info != (None, None, None)
@@ -23,24 +44,71 @@ class SentryProcessor:
event, hint = {}, None
event["message"] = event_dict.get("event")
- event["level"] = level
- event["extra"] = event_dict
+ event["level"] = event_dict.get("level")
+
+ if self._as_extra:
+ event["extra"] = self._original_event_dict
+ if self.tag_keys == "__all__":
+ event["tags"] = self._original_event_dict
+ elif isinstance(self.tag_keys, list):
+ event["tags"] = {key: event_dict[key] for key in self.tag_keys if key in event_dict}
+
+ return event, hint
+
+ def _log(self, event_dict: dict) -> str:
+ """Send an event to Sentry and return sentry event id.
+ :param event_dict: structlog event_dict
+ """
+ event, hint = self._get_event_and_hint(event_dict)
return capture_event(event, hint=hint)
- def __call__(self, logger, method, event_dict):
- event_dict["sentry"] = "skipped"
+ def __call__(self, logger, method, event_dict) -> dict:
+ """A middleware to process structlog `event_dict` and send it to Sentry."""
+ self._original_event_dict = event_dict.copy()
sentry_skip = event_dict.pop("sentry_skip", False)
-
- level = event_dict["level"]
- do_log = getattr(logging, level.upper()) >= self.level
+ do_log = getattr(logging, event_dict["level"].upper()) >= self.level
if sentry_skip or not self.active or not do_log:
+ event_dict["sentry"] = "skipped"
return event_dict
- sid = self._log(event_dict, level=level)
-
- event_dict["sentry_id"] = sid
+ sid = self._log(event_dict)
event_dict["sentry"] = "sent"
+ event_dict["sentry_id"] = sid
return event_dict
+
+
+class SentryJsonProcessor(SentryProcessor):
+ """Sentry processor for structlog which uses JSONRenderer. Uses Sentry SDK to capture events in Sentry."""
+
+ def __init__(self, *args, **kwargs) -> None:
+ super().__init__(*args, **kwargs)
+ self._is_logger_ignored = False
+
+ def __call__(self, logger, method, event_dict) -> dict:
+ if not self._is_logger_ignored:
+ self._ignore_logger(logger, event_dict)
+ return super().__call__(logger, method, event_dict)
+
+ def _ignore_logger(self, logger, event_dict: dict) -> None:
+ """Tell Sentry to ignore logger. This is temporary workaround to prevent duplication of a JSON event in Sentry.
+
+ :param logger: logger instance
+ :param event_dict: structlog event_dict
+ """
+ record = event_dict.get("_record")
+ l_name = event_dict.get("logger")
+ if l_name:
+ logger_name = l_name
+ elif record is None:
+ logger_name = logger.name
+ else:
+ logger_name = record.name
+
+ if not logger_name:
+ raise Exception("Cannot ignore logger without a name.")
+
+ ignore_logger(logger_name)
+ self._is_logger_ignored = True
|
kiwicom/structlog-sentry
|
2023588436e1b123ce9928dcc30b3374840f3dd1
|
diff --git a/test/test_sentry_processor.py b/test/test_sentry_processor.py
index ab0f618..8f15de2 100644
--- a/test/test_sentry_processor.py
+++ b/test/test_sentry_processor.py
@@ -2,7 +2,12 @@ import logging
import pytest
-from structlog_sentry import SentryProcessor
+from structlog_sentry import SentryJsonProcessor, SentryProcessor
+
+
+class MockLogger:
+ def __init__(self, name):
+ self.name = name
def test_sentry_disabled():
@@ -28,6 +33,7 @@ def test_sentry_log(mocker, level):
m_capture_event = mocker.patch("structlog_sentry.capture_event")
event_data = {"level": level, "event": level + " message"}
+ sentry_event_data = event_data.copy()
processor = SentryProcessor(level=getattr(logging, level.upper()))
processor(None, None, event_data)
@@ -35,7 +41,7 @@ def test_sentry_log(mocker, level):
{
"level": level,
"message": event_data["event"],
- "extra": event_data,
+ "extra": sentry_event_data,
},
hint=None,
)
@@ -57,6 +63,7 @@ def test_sentry_log_failure(mocker, level):
)
event_data = {"level": level, "event": level + " message"}
+ sentry_event_data = event_data.copy()
processor = SentryProcessor(level=getattr(logging, level.upper()))
try:
1 / 0
@@ -68,7 +75,133 @@ def test_sentry_log_failure(mocker, level):
"level": level,
"message": event_data["event"],
"exception": mocker.sentinel.exception,
- "extra": event_data,
+ "extra": sentry_event_data,
},
hint=mocker.sentinel.hint,
)
+
+
[email protected]("level", ["debug", "info", "warning"])
+def test_sentry_log_no_extra(mocker, level):
+ m_capture_event = mocker.patch("structlog_sentry.capture_event")
+
+ event_data = {"level": level, "event": level + " message"}
+ processor = SentryProcessor(level=getattr(logging, level.upper()), as_extra=False)
+ processor(None, None, event_data)
+
+ m_capture_event.assert_called_once_with(
+ {
+ "level": level,
+ "message": event_data["event"],
+ },
+ hint=None,
+ )
+
+ processor_only_errors = SentryProcessor(level=logging.ERROR)
+ event_dict = processor_only_errors(
+ None, None, {"level": level, "event": level + " message"}
+ )
+
+ assert event_dict.get("sentry") != "sent"
+
+
[email protected]("level", ["debug", "info", "warning"])
+def test_sentry_log_all_as_tags(mocker, level):
+ m_capture_event = mocker.patch("structlog_sentry.capture_event")
+
+ event_data = {"level": level, "event": level + " message"}
+ sentry_event_data = event_data.copy()
+ processor = SentryProcessor(level=getattr(logging, level.upper()), tag_keys="__all__")
+ processor(None, None, event_data)
+
+ m_capture_event.assert_called_once_with(
+ {
+ "level": level,
+ "message": event_data["event"],
+ "extra": sentry_event_data,
+ "tags": sentry_event_data,
+ },
+ hint=None,
+ )
+
+ processor_only_errors = SentryProcessor(level=logging.ERROR)
+ event_dict = processor_only_errors(
+ None, None, {"level": level, "event": level + " message"}
+ )
+
+ assert event_dict.get("sentry") != "sent"
+
+
[email protected]("level", ["debug", "info", "warning"])
+def test_sentry_log_specific_keys_as_tags(mocker, level):
+ m_capture_event = mocker.patch("structlog_sentry.capture_event")
+
+ event_data = {"level": level, "event": level + " message", "info1": "info1", "required": True}
+ tag_keys = ["info1", "required", "some non existing key"]
+ sentry_event_data = event_data.copy()
+ processor = SentryProcessor(level=getattr(logging, level.upper()), tag_keys=tag_keys)
+ processor(None, None, event_data)
+
+ m_capture_event.assert_called_once_with(
+ {
+ "level": level,
+ "message": event_data["event"],
+ "extra": sentry_event_data,
+ "tags": {k: sentry_event_data[k] for k in tag_keys if k in sentry_event_data},
+ },
+ hint=None,
+ )
+
+ processor_only_errors = SentryProcessor(level=logging.ERROR)
+ event_dict = processor_only_errors(
+ None, None, {"level": level, "event": level + " message"}
+ )
+
+ assert event_dict.get("sentry") != "sent"
+
+
+def test_sentry_json_ignore_logger_using_event_dict_logger_name(mocker):
+ m_ignore_logger = mocker.patch("structlog_sentry.ignore_logger")
+ m_logger = MockLogger("MockLogger")
+ event_data = {"level": "info", "event": "message", "logger": "EventLogger", "_record": MockLogger("RecordLogger")}
+ processor = SentryJsonProcessor()
+
+ assert not processor._is_logger_ignored
+ processor._ignore_logger(logger=m_logger, event_dict=event_data)
+ m_ignore_logger.assert_called_once_with(event_data["logger"])
+ assert processor._is_logger_ignored
+
+
+def test_sentry_json_ignore_logger_using_event_dict_record(mocker):
+ m_ignore_logger = mocker.patch("structlog_sentry.ignore_logger")
+ m_logger = MockLogger("MockLogger")
+ event_data = {"level": "info", "event": "message", "_record": MockLogger("RecordLogger")}
+ processor = SentryJsonProcessor()
+
+ assert not processor._is_logger_ignored
+ processor._ignore_logger(logger=m_logger, event_dict=event_data)
+ m_ignore_logger.assert_called_once_with(event_data["_record"].name)
+ assert processor._is_logger_ignored
+
+
+def test_sentry_json_ignore_logger_using_logger_instance_name(mocker):
+ m_ignore_logger = mocker.patch("structlog_sentry.ignore_logger")
+ m_logger = MockLogger("MockLogger")
+ event_data = {"level": "info", "event": "message"}
+ processor = SentryJsonProcessor()
+
+ assert not processor._is_logger_ignored
+ processor._ignore_logger(logger=m_logger, event_dict=event_data)
+ m_ignore_logger.assert_called_once_with(m_logger.name)
+ assert processor._is_logger_ignored
+
+
+def test_sentry_json_call_ignores_logger_once(mocker):
+ processor = SentryJsonProcessor()
+ m_ignore_logger = mocker.patch("structlog_sentry.ignore_logger")
+ event_data = {"level": "warning", "event": "message", "sentry_skip": True}
+ logger = MockLogger("MockLogger")
+ processor(logger, None, event_data)
+ processor(logger, None, event_data)
+ processor(logger, None, event_data)
+ m_ignore_logger.assert_called_once_with(logger.name)
|
Log messages are sent twice when using processors.JSONRenderer
First of all I would like to thank you guys for this package. It's really nice.
In production environment I use `processors.JSONRenderer` to format logs properly for log storages like [elk-stack](https://www.elastic.co/elk-stack). Now I am trying to configure sentry in compound with structlog and your package.
The problem is that the log messages are sent twice.
Here's the code:
```python
## config = dict(...)
if config.sentry.enabled:
sentry_sdk.init(
dsn=config.sentry.dsn,
send_default_pii=True,
attach_stacktrace="On",
)
logging.basicConfig(format='%(message)s', stream=sys.stdout, level=config.level)
chain = [
structlog.stdlib.add_log_level,
SentryProcessor(level=logging.WARNING, active=config.sentry.enabled),
structlog.processors.JSONRenderer(),
]
structlog.configure_once(
processors=chain,
context_class=dict,
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
```
Result screenshot:

Any suggestions on how to workaround this?
|
0.0
|
2023588436e1b123ce9928dcc30b3374840f3dd1
|
[
"test/test_sentry_processor.py::test_sentry_disabled",
"test/test_sentry_processor.py::test_sentry_skip",
"test/test_sentry_processor.py::test_sentry_sent",
"test/test_sentry_processor.py::test_sentry_log[debug]",
"test/test_sentry_processor.py::test_sentry_log[info]",
"test/test_sentry_processor.py::test_sentry_log[warning]",
"test/test_sentry_processor.py::test_sentry_log_failure[error]",
"test/test_sentry_processor.py::test_sentry_log_failure[critical]",
"test/test_sentry_processor.py::test_sentry_log_no_extra[debug]",
"test/test_sentry_processor.py::test_sentry_log_no_extra[info]",
"test/test_sentry_processor.py::test_sentry_log_no_extra[warning]",
"test/test_sentry_processor.py::test_sentry_log_all_as_tags[debug]",
"test/test_sentry_processor.py::test_sentry_log_all_as_tags[info]",
"test/test_sentry_processor.py::test_sentry_log_all_as_tags[warning]",
"test/test_sentry_processor.py::test_sentry_log_specific_keys_as_tags[debug]",
"test/test_sentry_processor.py::test_sentry_log_specific_keys_as_tags[info]",
"test/test_sentry_processor.py::test_sentry_log_specific_keys_as_tags[warning]",
"test/test_sentry_processor.py::test_sentry_json_ignore_logger_using_event_dict_logger_name",
"test/test_sentry_processor.py::test_sentry_json_ignore_logger_using_event_dict_record",
"test/test_sentry_processor.py::test_sentry_json_ignore_logger_using_logger_instance_name",
"test/test_sentry_processor.py::test_sentry_json_call_ignores_logger_once"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-07-23 09:31:40+00:00
|
mit
| 3,447 |
|
kjd__idna-111
|
diff --git a/idna/core.py b/idna/core.py
index d605129..871ebd3 100644
--- a/idna/core.py
+++ b/idna/core.py
@@ -383,8 +383,11 @@ def encode(s, strict=False, uts46=False, std3_rules=False, transitional=False):
def decode(s, strict=False, uts46=False, std3_rules=False):
# type: (Union[str, bytes, bytearray], bool, bool, bool) -> str
- if isinstance(s, (bytes, bytearray)):
- s = s.decode('ascii')
+ try:
+ if isinstance(s, (bytes, bytearray)):
+ s = s.decode('ascii')
+ except UnicodeDecodeError:
+ raise IDNAError('Invalid ASCII in A-label')
if uts46:
s = uts46_remap(s, std3_rules, False)
trailing_dot = False
|
kjd/idna
|
d0116b5d390a3f7934501d328faa6daaeea21124
|
diff --git a/tests/test_idna.py b/tests/test_idna.py
index 9e1d2d9..7ac6057 100755
--- a/tests/test_idna.py
+++ b/tests/test_idna.py
@@ -259,6 +259,7 @@ class IDNATests(unittest.TestCase):
self.assertRaises(idna.IDNAError, idna.decode, 'XN---------90GGLBAGAAC.AA')
self.assertRaises(idna.IDNAError, idna.decode, 'xn---------90gglbagaac.aa')
self.assertRaises(idna.IDNAError, idna.decode, 'xn--')
+ self.assertRaises(idna.IDNAError, idna.decode, b'\x8d\xd2')
if __name__ == '__main__':
unittest.main()
|
[Uncaught exception] UnicodeDecodeError when calling decode with arbitrary data
Hey,
I was doing some fuzzing and I found out that the `idna.decode()` method can raise a `UnicodeDecodeError` leading to a crash of the running program.
This could be problematic if users of the library are dealing with untrusted data since this issue will lead to a DoS. This should be detected and an `idna.IDNAError` should be triggered as detailed in the Readme [Exception](https://github.com/kjd/idna#exceptions) section.
# Reproduction
``` python
import idna
idna.decode(b'\x8d\xd2')
```
# Traceback
``` sh
python3 replay_crash.py
Traceback (most recent call last):
File "replay_crash.py", line 3, in <module>
idna.decode(b'\x8d\xd2')
File "/home/scop/.local/lib/python3.8/site-packages/idna/core.py", line 387, in decode
s = s.decode('ascii')
UnicodeDecodeError: 'ascii' codec can't decode byte 0x8d in position 0: ordinal not in range(128)
```
Happy to help if you have any questions ;)
|
0.0
|
d0116b5d390a3f7934501d328faa6daaeea21124
|
[
"tests/test_idna.py::IDNATests::test_decode"
] |
[
"tests/test_idna.py::IDNATests::testIDNTLDALabels",
"tests/test_idna.py::IDNATests::testIDNTLDULabels",
"tests/test_idna.py::IDNATests::test_check_bidi",
"tests/test_idna.py::IDNATests::test_check_hyphen_ok",
"tests/test_idna.py::IDNATests::test_check_initial_combiner",
"tests/test_idna.py::IDNATests::test_encode",
"tests/test_idna.py::IDNATests::test_valid_contextj",
"tests/test_idna.py::IDNATests::test_valid_contexto",
"tests/test_idna.py::IDNATests::test_valid_label_length"
] |
{
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-10-03 14:50:46+00:00
|
bsd-3-clause
| 3,448 |
|
kjd__idna-112
|
diff --git a/idna/core.py b/idna/core.py
index 871ebd3..c66fb9f 100644
--- a/idna/core.py
+++ b/idna/core.py
@@ -312,7 +312,10 @@ def ulabel(label):
check_label(label_bytes)
return label_bytes.decode('ascii')
- label = label_bytes.decode('punycode')
+ try:
+ label = label_bytes.decode('punycode')
+ except UnicodeError:
+ raise IDNAError('Invalid A-label')
check_label(label)
return label
|
kjd/idna
|
c3383c97b3fffd8aa73aaefd16baf9c6da1e9f4e
|
diff --git a/tests/test_idna.py b/tests/test_idna.py
index 7ac6057..f0c74d2 100755
--- a/tests/test_idna.py
+++ b/tests/test_idna.py
@@ -260,6 +260,7 @@ class IDNATests(unittest.TestCase):
self.assertRaises(idna.IDNAError, idna.decode, 'xn---------90gglbagaac.aa')
self.assertRaises(idna.IDNAError, idna.decode, 'xn--')
self.assertRaises(idna.IDNAError, idna.decode, b'\x8d\xd2')
+ self.assertRaises(idna.IDNAError, idna.decode, b'A.A.0.a.a.A.0.a.A.A.0.a.A.0A.2.a.A.A.0.a.A.0.A.a.A0.a.a.A.0.a.fB.A.A.a.A.A.B.A.A.a.A.A.B.A.A.a.A.A.0.a.A.a.a.A.A.0.a.A.0.A.a.A0.a.a.A.0.a.fB.A.A.a.A.A.B.0A.A.a.A.A.B.A.A.a.A.A.a.A.A.B.A.A.a.A.0.a.B.A.A.a.A.B.A.a.A.A.5.a.A.0.a.Ba.A.B.A.A.a.A.0.a.Xn--B.A.A.A.a')
if __name__ == '__main__':
unittest.main()
|
[Uncaught exception] UnicodeError (punycode) when calling decode with arbitrary data
Hey,
I just found another case making the program crash where `idna.decode()` raise a `UnicodeError` (due to punycode).
In the same way as #108, this could be problematic if users of the library are dealing with untrusted data since this issue will lead to a DoS. This should be detected and an `idna.IDNAError` should be triggered as detailed in the Readme [Exception](https://github.com/kjd/idna#exceptions) section.
# Reproduction
``` python
import idna
idna.decode(b'A.A.0.a.a.A.0.a.A.A.0.a.A.0A.2.a.A.A.0.a.A.0.A.a.A0.a.a.A.0.a.fB.A.A.a.A.A.B.A.A.a.A.A.B.A.A.a.A.A.0.a.A.a.a.A.A.0.a.A.0.A.a.A0.a.a.A.0.a.fB.A.A.a.A.A.B.0A.A.a.A.A.B.A.A.a.A.A.a.A.A.B.A.A.a.A.0.a.B.A.A.a.A.B.A.a.A.A.5.a.A.0.a.Ba.A.B.A.A.a.A.0.a.Xn--B.A.A.A.a')
```
# Traceback
``` sh
python3 replay_crash2.py
Traceback (most recent call last):
File "/usr/lib/python3.8/encodings/punycode.py", line 134, in decode_generalized_number
char = ord(extended[extpos])
IndexError: string index out of range
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.8/encodings/punycode.py", line 207, in decode
res = punycode_decode(input, errors)
File "/usr/lib/python3.8/encodings/punycode.py", line 194, in punycode_decode
return insertion_sort(base, extended, errors)
File "/usr/lib/python3.8/encodings/punycode.py", line 164, in insertion_sort
newpos, delta = decode_generalized_number(extended, extpos,
File "/usr/lib/python3.8/encodings/punycode.py", line 137, in decode_generalized_number
raise UnicodeError("incomplete punicode string")
UnicodeError: incomplete punicode string
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "replay_crash2.py", line 3, in <module>
idna.decode(b'A.A.0.a.a.A.0.a.A.A.0.a.A.0A.2.a.A.A.0.a.A.0.A.a.A0.a.a.A.0.a.fB.A.A.a.A.A.B.A.A.a.A.A.B.A.A.a.A.A.0.a.A.a.a.A.A.0.a.A.0.A.a.A0.a.a.A.0.a.fB.A.A.a.A.A.B.0A.A.a.A.A.B.A.A.a.A.A.a.A.A.B.A.A.a.A.0.a.B.A.A.a.A.B.A.a.A.A.5.a.A.0.a.Ba.A.B.A.A.a.A.0.a.Xn--B.A.A.A.a')
File "/home/scop/.local/lib/python3.8/site-packages/idna/core.py", line 402, in decode
s = ulabel(label)
File "/home/scop/.local/lib/python3.8/site-packages/idna/core.py", line 315, in ulabel
label = label_bytes.decode('punycode')
UnicodeError: decoding with 'punycode' codec failed (UnicodeError: incomplete punicode string)
```
Happy to help if you have any questions ;)
|
0.0
|
c3383c97b3fffd8aa73aaefd16baf9c6da1e9f4e
|
[
"tests/test_idna.py::IDNATests::test_decode"
] |
[
"tests/test_idna.py::IDNATests::testIDNTLDALabels",
"tests/test_idna.py::IDNATests::testIDNTLDULabels",
"tests/test_idna.py::IDNATests::test_check_bidi",
"tests/test_idna.py::IDNATests::test_check_hyphen_ok",
"tests/test_idna.py::IDNATests::test_check_initial_combiner",
"tests/test_idna.py::IDNATests::test_encode",
"tests/test_idna.py::IDNATests::test_valid_contextj",
"tests/test_idna.py::IDNATests::test_valid_contexto",
"tests/test_idna.py::IDNATests::test_valid_label_length"
] |
{
"failed_lite_validators": [
"has_issue_reference",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-10-03 15:01:39+00:00
|
bsd-3-clause
| 3,449 |
|
kjd__idna-145
|
diff --git a/README.rst b/README.rst
index 27731b1..c926351 100644
--- a/README.rst
+++ b/README.rst
@@ -57,9 +57,9 @@ You may use the codec encoding and decoding methods using the
.. code-block:: pycon
>>> import idna.codec
- >>> print('домен.испытание'.encode('idna'))
+ >>> print('домен.испытание'.encode('idna2008'))
b'xn--d1acufc.xn--80akhbyknj4f'
- >>> print(b'xn--d1acufc.xn--80akhbyknj4f'.decode('idna'))
+ >>> print(b'xn--d1acufc.xn--80akhbyknj4f'.decode('idna2008'))
домен.испытание
Conversions can be applied at a per-label basis using the ``ulabel`` or
diff --git a/idna/codec.py b/idna/codec.py
index 7a0558d..eaeada5 100644
--- a/idna/codec.py
+++ b/idna/codec.py
@@ -1,7 +1,7 @@
from .core import encode, decode, alabel, ulabel, IDNAError
import codecs
import re
-from typing import Tuple, Optional
+from typing import Any, Tuple, Optional
_unicode_dots_re = re.compile('[\u002e\u3002\uff0e\uff61]')
@@ -26,24 +26,24 @@ class Codec(codecs.Codec):
return decode(data), len(data)
class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
- def _buffer_encode(self, data: str, errors: str, final: bool) -> Tuple[str, int]: # type: ignore
+ def _buffer_encode(self, data: str, errors: str, final: bool) -> Tuple[bytes, int]:
if errors != 'strict':
raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
if not data:
- return "", 0
+ return b'', 0
labels = _unicode_dots_re.split(data)
- trailing_dot = ''
+ trailing_dot = b''
if labels:
if not labels[-1]:
- trailing_dot = '.'
+ trailing_dot = b'.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
- trailing_dot = '.'
+ trailing_dot = b'.'
result = []
size = 0
@@ -54,18 +54,21 @@ class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
size += len(label)
# Join with U+002E
- result_str = '.'.join(result) + trailing_dot # type: ignore
+ result_bytes = b'.'.join(result) + trailing_dot
size += len(trailing_dot)
- return result_str, size
+ return result_bytes, size
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
- def _buffer_decode(self, data: str, errors: str, final: bool) -> Tuple[str, int]: # type: ignore
+ def _buffer_decode(self, data: Any, errors: str, final: bool) -> Tuple[str, int]:
if errors != 'strict':
raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
if not data:
return ('', 0)
+ if not isinstance(data, str):
+ data = str(data, 'ascii')
+
labels = _unicode_dots_re.split(data)
trailing_dot = ''
if labels:
@@ -99,13 +102,11 @@ class StreamReader(Codec, codecs.StreamReader):
pass
-def getregentry(name: str) -> Optional[codecs.CodecInfo]:
- if name != 'idna' and name != 'idna2008':
+def search_function(name: str) -> Optional[codecs.CodecInfo]:
+ if name != 'idna2008':
return None
-
- # Compatibility as a search_function for codecs.register()
return codecs.CodecInfo(
- name='idna2008',
+ name=name,
encode=Codec().encode, # type: ignore
decode=Codec().decode, # type: ignore
incrementalencoder=IncrementalEncoder,
@@ -114,4 +115,4 @@ def getregentry(name: str) -> Optional[codecs.CodecInfo]:
streamreader=StreamReader,
)
-codecs.register(getregentry)
+codecs.register(search_function)
diff --git a/idna/core.py b/idna/core.py
index 4f30037..0bd89a3 100644
--- a/idna/core.py
+++ b/idna/core.py
@@ -338,9 +338,9 @@ def uts46_remap(domain: str, std3_rules: bool = True, transitional: bool = False
def encode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool = False, std3_rules: bool = False, transitional: bool = False) -> bytes:
- if isinstance(s, (bytes, bytearray)):
+ if not isinstance(s, str):
try:
- s = s.decode('ascii')
+ s = str(s, 'ascii')
except UnicodeDecodeError:
raise IDNAError('should pass a unicode string to the function rather than a byte string.')
if uts46:
@@ -372,8 +372,8 @@ def encode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool =
def decode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool = False, std3_rules: bool = False) -> str:
try:
- if isinstance(s, (bytes, bytearray)):
- s = s.decode('ascii')
+ if not isinstance(s, str):
+ s = str(s, 'ascii')
except UnicodeDecodeError:
raise IDNAError('Invalid ASCII in A-label')
if uts46:
|
kjd/idna
|
55e98a52da69835d720c0c41c555f731afdab359
|
diff --git a/tests/test_idna.py b/tests/test_idna.py
index 1035bcf..81afb32 100755
--- a/tests/test_idna.py
+++ b/tests/test_idna.py
@@ -231,37 +231,45 @@ class IDNATests(unittest.TestCase):
self.assertTrue(idna.valid_contexto(ext_arabic_digit + ext_arabic_digit, 0))
self.assertFalse(idna.valid_contexto(ext_arabic_digit + arabic_digit, 0))
- def test_encode(self):
-
- self.assertEqual(idna.encode('xn--zckzah.xn--zckzah'), b'xn--zckzah.xn--zckzah')
- self.assertEqual(idna.encode('\u30c6\u30b9\u30c8.xn--zckzah'), b'xn--zckzah.xn--zckzah')
- self.assertEqual(idna.encode('\u30c6\u30b9\u30c8.\u30c6\u30b9\u30c8'), b'xn--zckzah.xn--zckzah')
- self.assertEqual(idna.encode('abc.abc'), b'abc.abc')
- self.assertEqual(idna.encode('xn--zckzah.abc'), b'xn--zckzah.abc')
- self.assertEqual(idna.encode('\u30c6\u30b9\u30c8.abc'), b'xn--zckzah.abc')
- self.assertEqual(idna.encode('\u0521\u0525\u0523-\u0523\u0523-----\u0521\u0523\u0523\u0523.aa'),
+ def test_encode(self, encode=None, skip_bytes=False):
+ if encode is None:
+ encode = idna.encode
+
+ self.assertEqual(encode('xn--zckzah.xn--zckzah'), b'xn--zckzah.xn--zckzah')
+ self.assertEqual(encode('\u30c6\u30b9\u30c8.xn--zckzah'), b'xn--zckzah.xn--zckzah')
+ self.assertEqual(encode('\u30c6\u30b9\u30c8.\u30c6\u30b9\u30c8'), b'xn--zckzah.xn--zckzah')
+ self.assertEqual(encode('abc.abc'), b'abc.abc')
+ self.assertEqual(encode('xn--zckzah.abc'), b'xn--zckzah.abc')
+ self.assertEqual(encode('\u30c6\u30b9\u30c8.abc'), b'xn--zckzah.abc')
+ self.assertEqual(encode('\u0521\u0525\u0523-\u0523\u0523-----\u0521\u0523\u0523\u0523.aa'),
b'xn---------90gglbagaar.aa')
- self.assertRaises(idna.IDNAError, idna.encode,
- '\u0521\u0524\u0523-\u0523\u0523-----\u0521\u0523\u0523\u0523.aa', uts46=False)
- self.assertEqual(idna.encode('a'*63), b'a'*63)
- self.assertRaises(idna.IDNAError, idna.encode, 'a'*64)
- self.assertRaises(idna.core.InvalidCodepoint, idna.encode, '*')
- self.assertRaises(idna.IDNAError, idna.encode, b'\x0a\x33\x81')
-
- def test_decode(self):
-
- self.assertEqual(idna.decode('xn--zckzah.xn--zckzah'), '\u30c6\u30b9\u30c8.\u30c6\u30b9\u30c8')
- self.assertEqual(idna.decode('\u30c6\u30b9\u30c8.xn--zckzah'), '\u30c6\u30b9\u30c8.\u30c6\u30b9\u30c8')
- self.assertEqual(idna.decode('\u30c6\u30b9\u30c8.\u30c6\u30b9\u30c8'),
- '\u30c6\u30b9\u30c8.\u30c6\u30b9\u30c8')
- self.assertEqual(idna.decode('abc.abc'), 'abc.abc')
- self.assertEqual(idna.decode('xn---------90gglbagaar.aa'),
+ if encode is idna.encode:
+ self.assertRaises(idna.IDNAError, encode,
+ '\u0521\u0524\u0523-\u0523\u0523-----\u0521\u0523\u0523\u0523.aa', uts46=False)
+ self.assertEqual(encode('a'*63), b'a'*63)
+ self.assertRaises(idna.IDNAError, encode, 'a'*64)
+ self.assertRaises(idna.core.InvalidCodepoint, encode, '*')
+ if not skip_bytes:
+ self.assertRaises(idna.IDNAError, encode, b'\x0a\x33\x81')
+
+ def test_decode(self, decode=None, skip_str=False):
+ if decode is None:
+ decode = idna.decode
+ self.assertEqual(decode(b'xn--zckzah.xn--zckzah'), '\u30c6\u30b9\u30c8.\u30c6\u30b9\u30c8')
+ self.assertEqual(decode(b'xn--d1acufc.xn--80akhbyknj4f'),
+ '\u0434\u043e\u043c\u0435\u043d.\u0438\u0441\u043f\u044b\u0442\u0430\u043d\u0438\u0435')
+ if not skip_str:
+ self.assertEqual(decode('\u30c6\u30b9\u30c8.xn--zckzah'), '\u30c6\u30b9\u30c8.\u30c6\u30b9\u30c8')
+ self.assertEqual(decode('\u30c6\u30b9\u30c8.\u30c6\u30b9\u30c8'),
+ '\u30c6\u30b9\u30c8.\u30c6\u30b9\u30c8')
+ self.assertEqual(decode('abc.abc'), 'abc.abc')
+ self.assertEqual(decode(b'xn---------90gglbagaar.aa'),
'\u0521\u0525\u0523-\u0523\u0523-----\u0521\u0523\u0523\u0523.aa')
- self.assertRaises(idna.IDNAError, idna.decode, 'XN---------90GGLBAGAAC.AA')
- self.assertRaises(idna.IDNAError, idna.decode, 'xn---------90gglbagaac.aa')
- self.assertRaises(idna.IDNAError, idna.decode, 'xn--')
- self.assertRaises(idna.IDNAError, idna.decode, b'\x8d\xd2')
- self.assertRaises(idna.IDNAError, idna.decode, b'A.A.0.a.a.A.0.a.A.A.0.a.A.0A.2.a.A.A.0.a.A.0.A.a.A0.a.a.A.0.a.fB.A.A.a.A.A.B.A.A.a.A.A.B.A.A.a.A.A.0.a.A.a.a.A.A.0.a.A.0.A.a.A0.a.a.A.0.a.fB.A.A.a.A.A.B.0A.A.a.A.A.B.A.A.a.A.A.a.A.A.B.A.A.a.A.0.a.B.A.A.a.A.B.A.a.A.A.5.a.A.0.a.Ba.A.B.A.A.a.A.0.a.Xn--B.A.A.A.a')
+ self.assertRaises(idna.IDNAError, decode, b'XN---------90GGLBAGAAC.AA')
+ self.assertRaises(idna.IDNAError, decode, b'xn---------90gglbagaac.aa')
+ self.assertRaises(idna.IDNAError, decode, b'xn--')
+ self.assertRaises(idna.IDNAError, decode, b'\x8d\xd2')
+ self.assertRaises(idna.IDNAError, decode, b'A.A.0.a.a.A.0.a.A.A.0.a.A.0A.2.a.A.A.0.a.A.0.A.a.A0.a.a.A.0.a.fB.A.A.a.A.A.B.A.A.a.A.A.B.A.A.a.A.A.0.a.A.a.a.A.A.0.a.A.0.A.a.A0.a.a.A.0.a.fB.A.A.a.A.A.B.0A.A.a.A.A.B.A.A.a.A.A.a.A.A.B.A.A.a.A.0.a.B.A.A.a.A.B.A.a.A.A.5.a.A.0.a.Ba.A.B.A.A.a.A.0.a.Xn--B.A.A.A.a')
if __name__ == '__main__':
unittest.main()
diff --git a/tests/test_idna_codec.py b/tests/test_idna_codec.py
index 4aad3c2..a1ecffe 100755
--- a/tests/test_idna_codec.py
+++ b/tests/test_idna_codec.py
@@ -1,15 +1,51 @@
#!/usr/bin/env python
import codecs
-import sys
+import io
import unittest
import idna.codec
+CODEC_NAME = 'idna2008'
+
class IDNACodecTests(unittest.TestCase):
-
+ def setUp(self):
+ from . import test_idna
+ self.idnatests = test_idna.IDNATests()
+ self.idnatests.setUp()
+
def testCodec(self):
- pass
+ self.assertIs(codecs.lookup(CODEC_NAME).incrementalencoder, idna.codec.IncrementalEncoder)
+
+ def testDirectDecode(self):
+ self.idnatests.test_decode(decode=lambda obj: codecs.decode(obj, CODEC_NAME))
+
+ def testIndirectDecode(self):
+ self.idnatests.test_decode(decode=lambda obj: obj.decode(CODEC_NAME), skip_str=True)
+
+ def testDirectEncode(self):
+ self.idnatests.test_encode(encode=lambda obj: codecs.encode(obj, CODEC_NAME))
+
+ def testIndirectEncode(self):
+ self.idnatests.test_encode(encode=lambda obj: obj.encode(CODEC_NAME), skip_bytes=True)
+
+ def testStreamReader(self):
+ def decode(obj):
+ if isinstance(obj, str):
+ obj = bytes(obj, 'ascii')
+ buffer = io.BytesIO(obj)
+ stream = codecs.getreader(CODEC_NAME)(buffer)
+ return stream.read()
+ return self.idnatests.test_decode(decode=decode, skip_str=True)
+
+ def testStreamWriter(self):
+ def encode(obj):
+ buffer = io.BytesIO()
+ stream = codecs.getwriter(CODEC_NAME)(buffer)
+ stream.write(obj)
+ stream.flush()
+ return buffer.getvalue()
+ return self.idnatests.test_encode(encode=encode)
def testIncrementalDecoder(self):
@@ -23,10 +59,10 @@ class IDNACodecTests(unittest.TestCase):
)
for decoded, encoded in incremental_tests:
- self.assertEqual("".join(codecs.iterdecode((bytes([c]) for c in encoded), "idna")),
+ self.assertEqual("".join(codecs.iterdecode((bytes([c]) for c in encoded), CODEC_NAME)),
decoded)
- decoder = codecs.getincrementaldecoder("idna")()
+ decoder = codecs.getincrementaldecoder(CODEC_NAME)()
self.assertEqual(decoder.decode(b"xn--xam", ), "")
self.assertEqual(decoder.decode(b"ple-9ta.o", ), "\xe4xample.")
self.assertEqual(decoder.decode(b"rg"), "")
@@ -50,10 +86,10 @@ class IDNACodecTests(unittest.TestCase):
("pyth\xf6n.org.", b"xn--pythn-mua.org."),
)
for decoded, encoded in incremental_tests:
- self.assertEqual(b"".join(codecs.iterencode(decoded, "idna")),
+ self.assertEqual(b"".join(codecs.iterencode(decoded, CODEC_NAME)),
encoded)
- encoder = codecs.getincrementalencoder("idna")()
+ encoder = codecs.getincrementalencoder(CODEC_NAME)()
self.assertEqual(encoder.encode("\xe4x"), b"")
self.assertEqual(encoder.encode("ample.org"), b"xn--xample-9ta.")
self.assertEqual(encoder.encode("", True), b"org")
diff --git a/tests/test_idna_uts46.py b/tests/test_idna_uts46.py
index fd1996d..c540b04 100755
--- a/tests/test_idna_uts46.py
+++ b/tests/test_idna_uts46.py
@@ -124,7 +124,7 @@ class TestIdnaTest(unittest.TestCase):
def runTest(self):
if not self.fields:
- return ''
+ return
source, to_unicode, to_unicode_status, to_ascii, to_ascii_status, to_ascii_t, to_ascii_t_status = self.fields
if source in _SKIP_TESTS:
return
|
Using idna codec
As per the documentation I tried using idna codec but it still uses the one from encodings.idna.
```
>>> import idna.codec
>>> codecs.encode('fuß', 'idna')
b'fuss'
>>> codecs.getencoder('idna')
<bound method Codec.encode of <encodings.idna.Codec object at 0x7ff6eb158fa0>>
```
I also tried registering the codec with idna.codec.getregentry with the same result. Am I missing something here?
|
0.0
|
55e98a52da69835d720c0c41c555f731afdab359
|
[
"tests/test_idna_codec.py::IDNACodecTests::testIncrementalDecoder",
"tests/test_idna_codec.py::IDNACodecTests::testIncrementalEncoder",
"tests/test_idna_codec.py::IDNACodecTests::testIndirectDecode"
] |
[
"tests/test_idna.py::IDNATests::testIDNTLDALabels",
"tests/test_idna.py::IDNATests::testIDNTLDULabels",
"tests/test_idna.py::IDNATests::test_check_bidi",
"tests/test_idna.py::IDNATests::test_check_hyphen_ok",
"tests/test_idna.py::IDNATests::test_check_initial_combiner",
"tests/test_idna.py::IDNATests::test_decode",
"tests/test_idna.py::IDNATests::test_encode",
"tests/test_idna.py::IDNATests::test_valid_contextj",
"tests/test_idna.py::IDNATests::test_valid_contexto",
"tests/test_idna.py::IDNATests::test_valid_label_length",
"tests/test_idna_codec.py::IDNACodecTests::testCodec",
"tests/test_idna_codec.py::IDNACodecTests::testDirectDecode",
"tests/test_idna_codec.py::IDNACodecTests::testDirectEncode",
"tests/test_idna_codec.py::IDNACodecTests::testIndirectEncode",
"tests/test_idna_codec.py::IDNACodecTests::testStreamReader",
"tests/test_idna_codec.py::IDNACodecTests::testStreamWriter",
"tests/test_idna_uts46.py::TestIdnaTest::runTest"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-06-19 16:49:05+00:00
|
bsd-3-clause
| 3,450 |
|
kjd__idna-83
|
diff --git a/idna/core.py b/idna/core.py
index b861d14..2c193d6 100644
--- a/idna/core.py
+++ b/idna/core.py
@@ -296,6 +296,8 @@ def ulabel(label):
label = label.lower()
if label.startswith(_alabel_prefix):
label = label[len(_alabel_prefix):]
+ if not label:
+ raise IDNAError('Malformed A-label, no Punycode eligible content found')
if label.decode('ascii')[-1] == '-':
raise IDNAError('A-label must not end with a hyphen')
else:
|
kjd/idna
|
3062063d477591228a4b4d000688ef4e7ab7cf81
|
diff --git a/tests/test_idna.py b/tests/test_idna.py
index b62b9c6..9e1d2d9 100755
--- a/tests/test_idna.py
+++ b/tests/test_idna.py
@@ -258,6 +258,7 @@ class IDNATests(unittest.TestCase):
'\u0521\u0525\u0523-\u0523\u0523-----\u0521\u0523\u0523\u0523.aa')
self.assertRaises(idna.IDNAError, idna.decode, 'XN---------90GGLBAGAAC.AA')
self.assertRaises(idna.IDNAError, idna.decode, 'xn---------90gglbagaac.aa')
+ self.assertRaises(idna.IDNAError, idna.decode, 'xn--')
if __name__ == '__main__':
unittest.main()
|
IndexError with new trailing hyphen check
#64 introduced the following uncaught exception. Is this intended?
```python
>>> idna.ulabel('xn--')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/John/.pyenv/versions/3.6.5/Python.framework/Versions/3.6/lib/python3.6/site-packages/idna/core.py", line 303, in ulabel
if label.decode('ascii')[-1] == '-':
IndexError: string index out of range
```
I would've expected `UnicodeError` or `IDNAError`, which are more descriptive.
|
0.0
|
3062063d477591228a4b4d000688ef4e7ab7cf81
|
[
"tests/test_idna.py::IDNATests::test_decode"
] |
[
"tests/test_idna.py::IDNATests::testIDNTLDALabels",
"tests/test_idna.py::IDNATests::testIDNTLDULabels",
"tests/test_idna.py::IDNATests::test_check_bidi",
"tests/test_idna.py::IDNATests::test_check_hyphen_ok",
"tests/test_idna.py::IDNATests::test_check_initial_combiner",
"tests/test_idna.py::IDNATests::test_encode",
"tests/test_idna.py::IDNATests::test_valid_contextj",
"tests/test_idna.py::IDNATests::test_valid_contexto",
"tests/test_idna.py::IDNATests::test_valid_label_length"
] |
{
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-03-14 00:01:09+00:00
|
bsd-3-clause
| 3,451 |
|
klen__peewee_migrate-203
|
diff --git a/peewee_migrate/auto.py b/peewee_migrate/auto.py
index cab9f2a..a71ff4b 100644
--- a/peewee_migrate/auto.py
+++ b/peewee_migrate/auto.py
@@ -67,7 +67,11 @@ class Column(VanilaColumn):
if isinstance(field, pw.ForeignKeyField):
self.to_field = field.rel_field.name
self.related_name = field.backref
- self.rel_model = "migrator.orm['%s']" % field.rel_model._meta.table_name
+ self.rel_model = (
+ "'self'"
+ if field.rel_model == field.model
+ else "migrator.orm['%s']" % field.rel_model._meta.table_name
+ )
def get_field(self, space=' '):
# Generate the field definition for this column.
|
klen/peewee_migrate
|
c78f6fc199e73df086a4e4badbffefdb6e11b75f
|
diff --git a/tests/test_auto.py b/tests/test_auto.py
index 5123aa5..2065bc8 100644
--- a/tests/test_auto.py
+++ b/tests/test_auto.py
@@ -95,3 +95,28 @@ def test_auto_multi_column_index():
code = model_to_code(Object)
assert code
assert "indexes = [(('first_name', 'last_name'), True)]" in code
+
+
+def test_self_referencing_foreign_key_on_model_create():
+ from peewee_migrate.auto import field_to_code
+
+ class Employee(pw.Model):
+ manager = pw.ForeignKeyField("self")
+
+ code = field_to_code(Employee.manager)
+ assert "model='self'" in code
+
+
+def test_self_referencing_foreign_key_on_field_added():
+ from peewee_migrate.auto import diff_one
+
+ class Employee(pw.Model):
+ name = pw.CharField()
+
+ class EmployeeNew(pw.Model):
+ name = pw.CharField()
+ manager = pw.ForeignKeyField("self")
+
+ changes = diff_one(EmployeeNew, Employee)
+ assert "migrator.add_fields" in changes[0]
+ assert "model='self'" in changes[0]
|
Bug report: Self foreign keys are not handled
**Describe the bug**
After creating the migration with the model containing foreign key to itself, migration is failing due to `KeyError`. After quick investigation, the exact line containing the bug is `ForeignKeyField(model=migrator.orm["model"], ...)`. During execution of this code, `"model"` is not yet added to `migrator.orm` dict. Instead, the call should look like: `ForeignKeyField(model="self", ...)`. After changing it the proposed way, migration has completed successfully.
**To Reproduce**
Steps to reproduce the behavior:
1. Write a model containing a self-referencing foreign key:
```python
class TestModel(Model):
parent = ForeignKeyField("self")
```
2. Create a migration
3. Run the migration
**Expected behavior**
Migration should pass correctly.
**Desktop (please complete the following information):**
- Version 1.4.2
|
0.0
|
c78f6fc199e73df086a4e4badbffefdb6e11b75f
|
[
"tests/test_auto.py::test_self_referencing_foreign_key_on_model_create",
"tests/test_auto.py::test_self_referencing_foreign_key_on_field_added"
] |
[
"tests/test_auto.py::test_auto",
"tests/test_auto.py::test_auto_postgresext",
"tests/test_auto.py::test_auto_multi_column_index"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2021-06-21 22:44:57+00:00
|
bsd-3-clause
| 3,452 |
|
klen__peewee_migrate-212
|
diff --git a/peewee_migrate/auto.py b/peewee_migrate/auto.py
index 1043072..38a5ea6 100644
--- a/peewee_migrate/auto.py
+++ b/peewee_migrate/auto.py
@@ -156,6 +156,22 @@ def diff_one(model1: pw.Model, model2: pw.Model, **kwargs) -> t.List[str]:
else:
changes.append(drop_index(model1, name))
+ # Check additional compound indexes
+ indexes1 = model1._meta.indexes
+ indexes2 = model2._meta.indexes
+
+ # Drop compound indexes
+ indexes_to_drop = set(indexes2) - set(indexes1)
+ for index in indexes_to_drop:
+ if isinstance(index[0], (list, tuple)) and len(index[0]) > 1:
+ changes.append(drop_index(model1, name=index[0]))
+
+ # Add compound indexes
+ indexes_to_add = set(indexes1) - set(indexes2)
+ for index in indexes_to_add:
+ if isinstance(index[0], (list, tuple)) and len(index[0]) > 1:
+ changes.append(add_index(model1, name=index[0], unique=index[1]))
+
return changes
|
klen/peewee_migrate
|
903a8e19079ab9d9b934f37f7dea4eb53e071187
|
diff --git a/tests/test_auto.py b/tests/test_auto.py
index c770539..1be0853 100644
--- a/tests/test_auto.py
+++ b/tests/test_auto.py
@@ -95,6 +95,41 @@ def test_auto_multi_column_index():
assert "indexes = [(('first_name', 'last_name'), True)]" in code
+def test_diff_multi_column_index():
+ from peewee_migrate.auto import diff_one
+
+ class Object(pw.Model):
+ first_name = pw.CharField()
+ last_name = pw.CharField()
+
+ class ObjectWithUniqueIndex(pw.Model):
+ first_name = pw.CharField()
+ last_name = pw.CharField()
+
+ class Meta:
+ indexes = ((("first_name", "last_name"), True),)
+
+ class ObjectWithNonUniqueIndex(pw.Model):
+ first_name = pw.CharField()
+ last_name = pw.CharField()
+
+ class Meta:
+ indexes = ((("first_name", "last_name"), False),)
+
+ changes = diff_one(ObjectWithUniqueIndex, Object)
+ assert len(changes) == 1
+ assert "('first_name', 'last_name'), unique=True)" in changes[0]
+
+ changes = diff_one(ObjectWithNonUniqueIndex, Object)
+ assert len(changes) == 1
+ assert "('first_name', 'last_name'), unique=False)" in changes[0]
+
+ changes = diff_one(ObjectWithNonUniqueIndex, ObjectWithUniqueIndex)
+ assert len(changes) == 2
+ assert "drop_index" in changes[0] and "('first_name', 'last_name')" in changes[0]
+ assert "('first_name', 'last_name'), unique=False)" in changes[1]
+
+
def test_self_referencing_foreign_key_on_model_create():
from peewee_migrate.auto import field_to_code
|
Add support for multi-column index/unique
I added a "multi-column index unique" definition according to the PeeWee documentation: http://docs.peewee-orm.com/en/latest/peewee/models.html#indexes-and-constraints but when I run **peewee_migrate** it doesn't generate the statements for the multi-column index.
I was taking a look at your code (auto.py) and looks like you are not considering that case. It would be possible to add support for multicolumn index/unique ?
Adding manually the sentence using `migrator.add_index(model, *col_names, unique=False)` works but this means that I have to manually maintain autogenerated scripts and the other one for multi column indexes.
This is my model:
```
class Dog(BaseModel):
id = PrimaryKeyField()
user = ForeignKeyField(User, to_field='id', related_name='dogs', db_column='user_id')
name = CharField(null=False)
description = CharField(null=False)
class Meta:
indexes = (
(('user', 'name'), True), # This should generated a unique constraint of those two columns
)
```
|
0.0
|
903a8e19079ab9d9b934f37f7dea4eb53e071187
|
[
"tests/test_auto.py::test_diff_multi_column_index",
"tests/test_auto.py::test_self_referencing_foreign_key_on_model_create",
"tests/test_auto.py::test_self_referencing_foreign_key_on_field_added"
] |
[
"tests/test_auto.py::mypy",
"tests/test_auto.py::mypy-status",
"tests/test_auto.py::test_auto",
"tests/test_auto.py::test_auto_postgresext",
"tests/test_auto.py::test_auto_multi_column_index"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-09-14 09:01:16+00:00
|
bsd-3-clause
| 3,453 |
|
koaning__bulk-12
|
diff --git a/Makefile b/Makefile
index bd4bb88..31c68ae 100644
--- a/Makefile
+++ b/Makefile
@@ -4,6 +4,7 @@ clean:
rm -rf .pytest_cache
rm -rf build
rm -rf dist
+ rm -rf downloads
pypi: clean
python setup.py sdist
@@ -12,10 +13,12 @@ pypi: clean
install:
python -m pip install --upgrade pip
- python -m pip install -e .
+ python -m pip install -e ".[dev]"
serve:
python -m bulk text cluestarred.csv
test:
pytest
+
+check: clean test clean
\ No newline at end of file
diff --git a/bulk/__main__.py b/bulk/__main__.py
index b561430..d5770fd 100644
--- a/bulk/__main__.py
+++ b/bulk/__main__.py
@@ -17,8 +17,10 @@ app = typer.Typer(
app.add_typer(util_app, name="util")
+
@app.command("version")
def version():
+ """Prints the version."""
print("0.1.0")
diff --git a/bulk/_bokeh_utils.py b/bulk/_bokeh_utils.py
index 5276d85..67dd7f5 100644
--- a/bulk/_bokeh_utils.py
+++ b/bulk/_bokeh_utils.py
@@ -20,6 +20,9 @@ def get_color_mapping(df: pd.DataFrame) -> Tuple[Optional[bokeh.transform.transf
all_values = list(df["color"].dropna().unique())
if len(all_values) == 2:
all_values.extend([""])
+ elif len(all_values) > len(Category10) + 2:
+ raise ValueError(f"Too many classes defined, the limit for visualisation is {len(Category10) + 2}. "
+ f"Got {len(all_values)}.")
mapper = factor_cmap(
field_name="color",
palette=Category10[len(all_values)],
diff --git a/bulk/download.py b/bulk/download.py
new file mode 100644
index 0000000..61337a3
--- /dev/null
+++ b/bulk/download.py
@@ -0,0 +1,55 @@
+import shutil
+import pathlib
+import pandas as pd
+import tarfile
+import urllib.request
+import typer
+
+
+app = typer.Typer(
+ name="download",
+ add_completion=False,
+ help="Download datasets.",
+ no_args_is_help=True,
+)
+
+
[email protected]("tinyplanet")
+def tinyplanet(force: bool = typer.Option(False, help="Force the download", is_flag=True)):
+ """
+ Download the tiny planet dataset.
+
+ The dataset contains satellite images to track the human footprint in the Amazon rainforest.
+
+ This dataset was uses the same dataresource as the fast.ai project, found here:
+ https://docs.fast.ai/data.external.html
+
+ It is a small subset of a dataset that was originally found as part of a Kaggle competition, found here:
+ https://www.kaggle.com/c/planet-understanding-the-amazon-from-space
+ """
+ src = pathlib.Path("downloads/tinyplanet/tinyplanet.tgz")
+ if not force and src.parent.exists():
+ print("The tinyplanet dataset already exists")
+ raise typer.Exit(1)
+ src.parent.mkdir(exist_ok=True, parents=True)
+ dst = pathlib.Path("downloads/tinyplanet")
+
+ # Download and untar
+ urllib.request.urlretrieve("https://s3.amazonaws.com/fast-ai-sample/planet_tiny.tgz", str(src))
+ with tarfile.open(str(src), 'r:gz') as tar:
+ tar.extractall(str(dst))
+ src.unlink()
+
+ # Move files into nice positions
+ glob = pathlib.Path("downloads/tinyplanet/planet_tiny/train").glob("*.jpg")
+ pathlib.Path("downloads/tinyplanet/images").mkdir(exist_ok=True, parents=True)
+ for file in glob:
+ file.rename(f"downloads/tinyplanet/images/{file.name}")
+
+ # Move the labels file
+ (pd.read_csv("downloads/tinyplanet/planet_tiny/labels.csv")
+ .assign(image=lambda d: d['image_name'].str.replace("train_", ""))
+ .drop(columns=["image_name"])
+ .to_csv("downloads/tinyplanet/labels.csv", index=False))
+
+ shutil.rmtree("downloads/tinyplanet/planet_tiny")
diff --git a/bulk/util.py b/bulk/util.py
index 66519af..2dcee54 100644
--- a/bulk/util.py
+++ b/bulk/util.py
@@ -1,15 +1,17 @@
-from typing import List
import pathlib
+from typing import List
import typer
import pandas as pd
+from bulk.download import app as download_app
app = typer.Typer(
name="util",
add_completion=False,
- help="Utilities for data.",
+ help="Extra utilities.",
no_args_is_help=True,
)
+app.add_typer(download_app, name="download")
@app.command("concat")
|
koaning/bulk
|
3dd6a1957c00564ad242af4b9fce9a6281646b3c
|
diff --git a/tests/test_cli.py b/tests/test_cli.py
new file mode 100644
index 0000000..d2849c3
--- /dev/null
+++ b/tests/test_cli.py
@@ -0,0 +1,16 @@
+import pathlib
+from typer.testing import CliRunner
+
+from bulk.__main__ import app
+
+runner = CliRunner()
+
+
+def test_download_tinyplanet():
+ result = runner.invoke(app, ["util", "download", "tinyplanet"])
+ assert result.exit_code == 0
+ assert pathlib.Path("downloads/tinyplanet").exists()
+
+ # The path exists now, so exit_code needs to be raised
+ result = runner.invoke(app, ["util", "download", "tinyplanet"])
+ assert result.exit_code == 1
diff --git a/tests/test_utils.py b/tests/test_utils.py
new file mode 100644
index 0000000..2b851b4
--- /dev/null
+++ b/tests/test_utils.py
@@ -0,0 +1,33 @@
+import pandas as pd
+import pytest
+
+from bulk._bokeh_utils import get_color_mapping
+
+MAX_DISCRETE_CLASSES = 10
+
+
+def _int_to_alpha(x: int) -> str:
+ # returns int mapped to char: 0: a, 1: b, 2: c
+ return chr(x + 97)
+
+
+def _create_dummy_df() -> pd.DataFrame:
+ df = pd.DataFrame()
+ df["color"] = [_int_to_alpha(i) for i in range(MAX_DISCRETE_CLASSES)]
+ return df
+
+
+def test_get_color_mapping_raises_error_on_too_many_classes():
+ df = _create_dummy_df()
+ additional_class = pd.DataFrame.from_records([{"color": _int_to_alpha(MAX_DISCRETE_CLASSES + 1)}])
+ df = pd.concat([df, additional_class], axis=0, ignore_index=True)
+ with pytest.raises(ValueError):
+ get_color_mapping(df)
+
+
+def test_get_color_mapping_doesnt_raise_error():
+ df = _create_dummy_df()
+ try:
+ get_color_mapping(df)
+ except ValueError as e:
+ assert False, f"get_color_mapping raised a ValueError: {e}"
|
Color mapping crashes if too many classes defined
Looks like I crashed the color mapping with too many classes.
The palette could be upped to Category20, though this would still limit the number of classes a user can visualize. Whatever the limit is, there should be an error message with a graceful exit if the user tries to define more. I'd be happy to make a PR!
```Traceback (most recent call last):
File ".pyenv/versions/rules-env/lib/python3.8/site-packages/tornado/web.py", line 1713, in _execute
result = await result
File ".pyenv/versions/rules-env/lib/python3.8/site-packages/bokeh/server/views/doc_handler.py", line 54, in get
session = await self.get_session()
File ".pyenv/versions/rules-env/lib/python3.8/site-packages/bokeh/server/views/session_handler.py", line 144, in get_session
session = await self.application_context.create_session_if_needed(session_id, self.request, token)
File ".pyenv/versions/rules-env/lib/python3.8/site-packages/bokeh/server/contexts.py", line 243, in create_session_if_needed
self._application.initialize_document(doc)
File ".pyenv/versions/rules-env/lib/python3.8/site-packages/bokeh/application/application.py", line 194, in initialize_document
h.modify_document(doc)
File ".pyenv/versions/rules-env/lib/python3.8/site-packages/bokeh/application/handlers/function.py", line 143, in modify_document
self._func(doc)
File ".pyenv/versions/rules-env/lib/python3.8/site-packages/bulk/text.py", line 27, in bkapp
mapper, df = get_color_mapping(df)
File ".pyenv/versions/rules-env/lib/python3.8/site-packages/bulk/utils.py", line 25, in get_color_mapping
palette=Category10[len(all_values)],
KeyError: 16```
|
0.0
|
3dd6a1957c00564ad242af4b9fce9a6281646b3c
|
[
"tests/test_utils.py::test_get_color_mapping_raises_error_on_too_many_classes"
] |
[
"tests/test_utils.py::test_get_color_mapping_doesnt_raise_error"
] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-08-24 12:48:34+00:00
|
mit
| 3,454 |
|
koaning__clumper-36
|
diff --git a/clumper/clump.py b/clumper/clump.py
index 2fa98f1..134e204 100644
--- a/clumper/clump.py
+++ b/clumper/clump.py
@@ -1,4 +1,5 @@
import json
+import csv
import pathlib
import itertools as it
import urllib.request
@@ -112,6 +113,54 @@ class Clumper:
except Exception:
raise RuntimeError("Error occured during reading in JSONL file")
+ @classmethod
+ def read_csv(cls, path, delimiter=",", fieldnames=None, nrows=None):
+ """
+ Reads in a csv file. Can also read files from url.
+ Arguments
+ ---------
+ path : filename or url
+ delimiter: must be a single character. `,` is the default.
+ fieldnames: You may prefer a different set of keys for the data, in which case, you can supply new keys with the fieldnames.
+ By default, the first row of the csv will provide the Clumper keys if fieldnames is None. If fieldnames is provided,
+ then the first row stays as part of the data. You should ensure that the correct number of fieldnames is supplied,
+ as an incorrect number can lead to truncation of the clumper. So, if you have seven columns and your fieldnames length is 3,
+ then every row will have only 3 values, the remaining four will be cut off.
+ nrows: Number of rows to read in. Useful when reading large files. If `None`, all rows are read.
+ Usage:
+ ```python
+ from clumper import Clumper
+ clump = Clumper.read_csv("tests/monopoly.csv")
+ assert len(clump) == 22
+ clump = Clumper.read_csv("tests/monopoly.csv", nrows = 10)
+ assert len(clump) == 10
+ clump = Clumper.read_csv("https://calmcode.io/datasets/monopoly.csv")
+ assert len(clump) == 22
+ # By default, the first row of the csv is treated as the keys of the Clumper.
+ # If the fieldnames argument is not None, then the first row stays as part of the data.
+ fieldnames = ['date', 'currency', 'country', 'price', 'dollar_rate', 'cost']
+ clump = Clumper.read_csv("https://calmcode.io/datasets/bigmac.csv",
+ # supply new fieldnames
+ fieldnames=fieldnames)
+ # check first row :
+ first_row = ['date', 'currency_code','name','local_price', 'dollar_ex', 'dollar_price']
+ assert clump.head(1).equals([dict(zip(fieldnames, first_row))])
+ ```
+ """
+ if path.startswith("https:") or path.startswith("http:"):
+ with urllib.request.urlopen(path) as resp:
+ if fieldnames is None:
+ fieldnames = resp.readline().decode().strip().split(",")
+ # this section allows us to chunk the rows, if nrows is supplied
+ body = it.islice(resp, 0, nrows)
+ body = (word.decode().strip().split(",") for word in body)
+ body = it.product([fieldnames], body)
+ return Clumper([dict(zip(key, values)) for key, values in body])
+
+ with open(path, newline="") as csvfile:
+ reader = csv.DictReader(csvfile, delimiter=delimiter, fieldnames=fieldnames)
+ return Clumper(list(it.islice(reader, 0, nrows)))
+
def _create_new(self, blob):
"""
Creates a new collection of data while preserving settings of the
|
koaning/clumper
|
f24197cd178d0c1ec5e20f0dfb31acc9d9ecf621
|
diff --git a/tests/monopoly.csv b/tests/monopoly.csv
new file mode 100644
index 0000000..1a5fe78
--- /dev/null
+++ b/tests/monopoly.csv
@@ -0,0 +1,23 @@
+name,rent,house_1,house_2,house_3,house_4,hotel,deed_cost,house_cost,color,tile
+Mediterranean Avenue,2,10,30,90,160,250,60,50,purple,1
+Baltic Avenue,4,20,60,180,320,450,60,50,purple,3
+Oriental Avenue,6,30,90,270,400,550,100,50,light_blue,6
+Vermont Avenue,6,30,90,270,400,550,100,50,light_blue,8
+Connecticut Avenue,8,40,100,300,450,600,120,50,light_blue,9
+St. Charles Place,10,50,150,450,625,750,140,100,pink,11
+States Avenue,10,50,150,450,625,750,140,100,pink,13
+Virginia Avenue,12,60,180,500,700,900,160,100,pink,14
+Tennessee Avenue,14,70,200,550,750,950,180,100,orange,16
+St. James Place,14,70,200,550,750,950,180,100,orange,18
+New York Avenue,16,80,220,600,800,1000,200,100,orange,19
+Kentucky Avenue,18,90,250,700,875,1050,220,150,red,21
+Indiana Avenue,18,90,250,700,875,1050,220,150,red,23
+Illinois Avenue,20,100,300,750,925,1100,240,150,red,24
+Atlantic Avenue,22,110,330,800,975,1150,260,150,yellow,26
+Ventnor Avenue,22,110,330,800,975,1150,260,150,yellow,27
+Marvin Gardens,24,120,360,850,1025,1200,280,150,yellow,29
+Pacific Avenue,26,130,390,900,1100,1275,300,200,green,31
+North Carolina Avenue,26,130,390,900,1100,1275,300,200,green,32
+Pennsylvania Avenue,28,150,450,1000,1200,1400,320,200,green,34
+Park Place,35,175,500,1100,1300,1500,350,200,blue,37
+Boardwalk,50,200,600,1400,1700,2000,400,200,blue,39
diff --git a/tests/test_read_write/test_read_csv.py b/tests/test_read_write/test_read_csv.py
new file mode 100644
index 0000000..33fddbb
--- /dev/null
+++ b/tests/test_read_write/test_read_csv.py
@@ -0,0 +1,85 @@
+import pytest
+from itertools import product
+from clumper import Clumper
+
+
+paths = ["tests/monopoly.csv", "https://calmcode.io/datasets/monopoly.csv"]
+nrows = [(None, 22), (10, 10), (15, 15), [80, 22]]
+fields = [
+ (
+ None,
+ [
+ "name",
+ "rent",
+ "house_1",
+ "house_2",
+ "house_3",
+ "house_4",
+ "hotel",
+ "deed_cost",
+ "house_cost",
+ "color",
+ "tile",
+ ],
+ ),
+ (
+ [
+ "namee",
+ "rent",
+ "house1",
+ "house2",
+ "house3",
+ "house4",
+ "hotell",
+ "deed_cost",
+ "house_cost",
+ "colour",
+ "tille",
+ ],
+ [
+ "namee",
+ "rent",
+ "house1",
+ "house2",
+ "house3",
+ "house4",
+ "hotell",
+ "deed_cost",
+ "house_cost",
+ "colour",
+ "tille",
+ ],
+ ),
+]
+
+path_nrows = [(path, nrows, length) for path, (nrows, length) in product(paths, nrows)]
+path_fields = [
+ (path, fieldnames, fields_check)
+ for path, (fieldnames, fields_check) in product(paths, fields)
+]
+
+
[email protected]("path,nrows,length", path_nrows)
+def test_read_csv(path, nrows, length):
+ "Test that the length of clumper matches the total number of rows in the csv."
+ clump = Clumper.read_csv(path=path, nrows=nrows)
+ assert len(clump) == length
+
+
[email protected]("path,fieldnames,field_check", path_fields)
+def test_fieldnames(path, fieldnames, field_check):
+ "Test that fieldnames matches keys of Clumper."
+ clump = Clumper.read_csv(path=path, fieldnames=fieldnames)
+ assert not set(field_check).difference(clump.keys())
+
+
+def test_wrong_delimiter():
+ "Test that an error is raised if a wrong delimiter is supplied."
+ with pytest.raises(TypeError):
+ Clumper.read_csv("tests/monopoly.csv", delimiter=", ")
+
+
+def test_read_csv_negative_nrows():
+ "Test that an error is raised if nrows is negative."
+ with pytest.raises(ValueError):
+ Clumper.read_csv("tests/monopoly.csv", nrows=-5)
|
Data Loader: .from_csv()
It'd be nice if we could also read data from disk. A syntax like this would be nice:
```python
Clumper.from_csv(path, settings)
```
|
0.0
|
f24197cd178d0c1ec5e20f0dfb31acc9d9ecf621
|
[
"tests/test_read_write/test_read_csv.py::test_read_csv[tests/monopoly.csv-None-22]",
"tests/test_read_write/test_read_csv.py::test_read_csv[tests/monopoly.csv-10-10]",
"tests/test_read_write/test_read_csv.py::test_read_csv[tests/monopoly.csv-15-15]",
"tests/test_read_write/test_read_csv.py::test_read_csv[tests/monopoly.csv-80-22]",
"tests/test_read_write/test_read_csv.py::test_fieldnames[tests/monopoly.csv-None-field_check0]",
"tests/test_read_write/test_read_csv.py::test_fieldnames[tests/monopoly.csv-fieldnames1-field_check1]",
"tests/test_read_write/test_read_csv.py::test_wrong_delimiter",
"tests/test_read_write/test_read_csv.py::test_read_csv_negative_nrows"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-08-15 04:02:53+00:00
|
mit
| 3,455 |
|
koaning__clumper-53
|
diff --git a/clumper/clump.py b/clumper/clump.py
index b95ffb4..6be9a4d 100644
--- a/clumper/clump.py
+++ b/clumper/clump.py
@@ -116,28 +116,24 @@ class Clumper:
if n <= 0:
raise ValueError("Number of lines to read must be > 0.")
- try:
- # Case 1 : Open cloud file in stream
- if path.startswith("https:") or path.startswith("http:"):
- f = urllib.request.urlopen(path)
- # Case 2 : Local file
- else:
- f = open(path)
-
- # Initalize a place to store the parsed data as list
- data_array = []
- # Read it, parse and close it
- with f:
- for current_line_nr, json_string in enumerate(f):
- if n is not None and current_line_nr == n:
- break
- json_object = json.loads(json_string)
- data_array.append(json_object)
- # Return it
- return Clumper(data_array)
-
- except Exception:
- raise RuntimeError("Error occured during reading in JSONL file")
+ # Case 1 : Open cloud file in stream
+ if path.startswith("https:") or path.startswith("http:"):
+ f = urllib.request.urlopen(path)
+ # Case 2 : Local file
+ else:
+ f = open(path)
+
+ # Initialize a place to store the parsed data as list
+ data_array = []
+ # Read it, parse and close it
+ with f:
+ for current_line_nr, json_string in enumerate(f):
+ if n is not None and current_line_nr == n:
+ break
+ json_object = json.loads(json_string)
+ data_array.append(json_object)
+ # Return it
+ return Clumper(data_array)
@classmethod
def read_yaml(cls, path: str, n=None):
@@ -242,13 +238,9 @@ class Clumper:
assert clump_copy.collect() == clump_orig.collect()
```
"""
-
- try:
- # Create a new file and open it for writing
- with open(path, "w") as f:
- json.dump(self.collect(), f, sort_keys=sort_keys, indent=indent)
- except Exception:
- raise RuntimeError("Error occured during writing JSON file")
+ # Create a new file and open it for writing
+ with open(path, "w") as f:
+ json.dump(self.collect(), f, sort_keys=sort_keys, indent=indent)
def write_jsonl(self, path, sort_keys=False, indent=None):
"""
|
koaning/clumper
|
0f7ec0ad87ebe883942c501a82e127d446ef8548
|
diff --git a/tests/test_read_write/test_read_jsonl.py b/tests/test_read_write/test_read_jsonl.py
index a812373..351b6ce 100644
--- a/tests/test_read_write/test_read_jsonl.py
+++ b/tests/test_read_write/test_read_jsonl.py
@@ -29,5 +29,5 @@ def test_read_csv_negative_zero():
def test_non_existing_file():
- with pytest.raises(RuntimeError):
+ with pytest.raises(FileNotFoundError):
Clumper.read_jsonl("tests/cards.jsonl")
|
Let's remove "Error occured during writing JSONL file"
This is the result of a failing pytest on my side.
```
def write_jsonl(self, path, sort_keys=False, indent=None):
"""
Writes to a jsonl file.
Arguments:
path: filename
sort_keys: If sort_keys is true (default: False), then the output of dictionaries will be sorted by key.
indent: If indent is a non-negative integer (default: None), then JSON array elements members will be pretty-printed with that indent level.
Usage:
```python
from clumper import Clumper
clump_orig = Clumper.read_jsonl("tests/data/cards.jsonl")
clump_orig.write_jsonl("tests/data/cards_copy.jsonl")
clump_copy = Clumper.read_jsonl("tests/data/cards_copy.jsonl")
assert clump_copy.collect() == clump_orig.collect()
```
"""
try:
# Create a new file and open it for writing
with open(path, "x") as f:
for current_line_nr, json_dict in enumerate(self.collect()):
f.write(
json.dumps(json_dict, sort_keys=sort_keys, indent=indent) + "\n"
)
except Exception:
> raise RuntimeError("Error occured during writing JSONL file")
E RuntimeError: Error occured during writing JSONL file
clumper/clump.py:276: RuntimeError
```
The message `Error occured during writing JSONL file` is making it harder for me to understand what is actually going on. Can we just maybe remove it?
The error here was that I was trying to write a file that already exists. Instead of giving me this error I got the uninformative "Error occured during writing JSONL file" message.
|
0.0
|
0f7ec0ad87ebe883942c501a82e127d446ef8548
|
[
"tests/test_read_write/test_read_jsonl.py::test_non_existing_file"
] |
[
"tests/test_read_write/test_read_jsonl.py::test_local_read_jsonl_expected[None-4]",
"tests/test_read_write/test_read_jsonl.py::test_local_read_jsonl_expected[1-1]",
"tests/test_read_write/test_read_jsonl.py::test_local_read_jsonl_expected[2-2]",
"tests/test_read_write/test_read_jsonl.py::test_local_read_jsonl_expected[5-4]",
"tests/test_read_write/test_read_jsonl.py::test_read_csv_negative_nrows",
"tests/test_read_write/test_read_jsonl.py::test_read_csv_negative_zero"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2020-09-14 13:28:21+00:00
|
mit
| 3,456 |
|
kodemore__kink-43
|
diff --git a/README.md b/README.md
index 8bacef6..630ffd7 100644
--- a/README.md
+++ b/README.md
@@ -76,7 +76,7 @@ No need for manual work and manual dependency management. Give it a try and you
To fully utilise the potential of kink it is recommended to bootstrap your initial dependencies
(config values, or instances of classes that are standalone, requires no other dependencies than themselves).
Some people prefer to keep it in `__init__.py` in the top module of your application, other
-create separate `bootstra.py` file for this purpose. Once all is setup the only step left
+create separate `bootstrap.py` file for this purpose. Once all is setup the only step left
is to decorate your classes/functions with `@inject` decorator.
## Bootstrapping/Adding services manually
diff --git a/kink/container.py b/kink/container.py
index caa1171..d0a24d1 100644
--- a/kink/container.py
+++ b/kink/container.py
@@ -38,7 +38,10 @@ class Container:
return service
if key in self._aliases:
- service = self._get(self._aliases[key][0]) # By default return first aliased service
+ unaliased_key = self._aliases[key][0] # By default return first aliased service
+ if unaliased_key in self._factories:
+ return self._factories[unaliased_key](self)
+ service = self._get(unaliased_key)
if service is not _MISSING_SERVICE:
return service
|
kodemore/kink
|
8ee300c1d283c4ce115bbd64cb831523d79f26da
|
diff --git a/tests/test_issue_aliased_factory.py b/tests/test_issue_aliased_factory.py
new file mode 100644
index 0000000..54105f7
--- /dev/null
+++ b/tests/test_issue_aliased_factory.py
@@ -0,0 +1,17 @@
+
+from kink import inject, di
+
+class Repository:
+ pass
+
+@inject(alias=Repository, use_factory=True)
+class PerInstanceRepository(Repository):
+ pass
+
+@inject
+class Service:
+ def __init__(self, repository: Repository):
+ pass
+
+def test_can_inject_aliased_factory_services():
+ di[Service]
\ No newline at end of file
|
cannot alias a factory service with inject
`inject` cannot find services which are both aliased and factoried. For example:
class Repository: ...
@inject(alias=Repository, use_factory=True)
class PerInstanceRepository(Repository): ...
@inject
class Service:
def __init__(self, repository: Repository): ...
`Container.__getitem__` looks for factory services before resolving aliases. However, factories are not referenced again after aliases are resolved.
|
0.0
|
8ee300c1d283c4ce115bbd64cb831523d79f26da
|
[
"tests/test_issue_aliased_factory.py::test_can_inject_aliased_factory_services"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-07-12 20:01:02+00:00
|
mit
| 3,457 |
|
konradhalas__dacite-216
|
diff --git a/dacite/dataclasses.py b/dacite/dataclasses.py
index d70f423..c06ddc0 100644
--- a/dacite/dataclasses.py
+++ b/dacite/dataclasses.py
@@ -11,7 +11,6 @@ class DefaultValueNotFoundError(Exception):
pass
-@cache
def get_default_value_for_field(field: Field, type_: Type) -> Any:
if field.default != MISSING:
return field.default
|
konradhalas/dacite
|
c076b8c3b3de3631f28db01f3ae41e4d69aa6c50
|
diff --git a/tests/core/test_base.py b/tests/core/test_base.py
index 8d82788..a866d57 100644
--- a/tests/core/test_base.py
+++ b/tests/core/test_base.py
@@ -1,5 +1,5 @@
from dataclasses import dataclass, field
-from typing import Any, NewType, Optional
+from typing import Any, NewType, Optional, List
import pytest
@@ -191,3 +191,16 @@ def test_from_dict_with_new_type():
result = from_dict(X, {"s": "test"})
assert result == X(s=MyStr("test"))
+
+
+def test_dataclass_default_factory_identity():
+ # https://github.com/konradhalas/dacite/issues/215
+ @dataclass
+ class A:
+ name: str
+ items: List[str] = field(default_factory=list)
+
+ a1 = from_dict(A, {"name": "a1"})
+ a2 = from_dict(A, {"name": "a2"})
+
+ assert a1.items is not a2.items
|
Incorrect work when using field with default_factory
**Describe the bug**
`default_factory` is called only once if a field absent in the data
**To Reproduce**
```python
#!/usr/bin/env python
from dataclasses import field, dataclass
from dacite import from_dict
@dataclass
class A:
name: str
items: list[str] = field(default_factory=list)
if __name__ == '__main__':
a1 = from_dict(A, {'name': 'a1'})
a2 = from_dict(A, {'name': 'a2'})
print('Test identity:', a1.items is a2.items)
```
**Output**
```
Test identity: True
```
**Expected behavior**
`items` shouldn't be identical.
`dacite v1.7.0` works as expected
```
Test identity: False
```
**Environment**
- Python version: 3.9.16
- `dacite` version: 1.8.0
|
0.0
|
c076b8c3b3de3631f28db01f3ae41e4d69aa6c50
|
[
"tests/core/test_base.py::test_dataclass_default_factory_identity"
] |
[
"tests/core/test_base.py::test_from_dict_with_correct_data",
"tests/core/test_base.py::test_from_dict_with_default_value",
"tests/core/test_base.py::test_from_dict_with_default_factory",
"tests/core/test_base.py::test_from_dict_with_wrong_type",
"tests/core/test_base.py::test_from_dict_with_missing_value",
"tests/core/test_base.py::test_from_dict_with_nested_data_class",
"tests/core/test_base.py::test_from_dict_with_missing_value_of_nested_data_class",
"tests/core/test_base.py::test_from_dict_with_additional_values",
"tests/core/test_base.py::test_from_dict_with_any",
"tests/core/test_base.py::test_from_dict_with_nested_data_classes_and_default_factory",
"tests/core/test_base.py::test_from_dict_with_post_init",
"tests/core/test_base.py::test_from_dict_with_post_init_missing_value",
"tests/core/test_base.py::test_from_dict_with_optional_non_init_field",
"tests/core/test_base.py::test_from_dict_with_non_init_field_with_default_value_and_frozen_dataclass",
"tests/core/test_base.py::test_from_dict_with_new_type"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-02-04 12:28:23+00:00
|
mit
| 3,458 |
|
kootenpv__access_points-22
|
diff --git a/access_points/__init__.py b/access_points/__init__.py
index b0040cf..ab7ecdc 100644
--- a/access_points/__init__.py
+++ b/access_points/__init__.py
@@ -94,22 +94,28 @@ class OSXWifiScanner(WifiScanner):
cmd = "airport -s"
return path + cmd
+ # OSX Monterey doesn't output the BSSID unless you `sudo` which means the
+ # old method using a regexp to match those lines fails. Since the output
+ # is column-formatted, we can use that instead and it works on both
+ # Monterey-without-BSSID and pre-Monterey-with-BSSID.
def parse_output(self, output):
results = []
- # 5 times 2 "letters and/or digits" followed by ":"
- # Then one time only 2 "letters and/or digits"
- # Use non-capturing groups (?:...) to use {} for amount
- # One wrapping group (...) to capture the whole thing
- bbsid_re = re.compile("((?:[0-9a-zA-Z]{2}:){5}(?:[0-9a-zA-Z]){2})")
security_start_index = False
+ # First line looks like this (multiple whitespace truncated to fit.)
+ # `\w+SSID BSSID\w+ RSSI CHANNEL HT CC SECURITY (auth/unicast/group)`
+ # ` ^ ssid_end_index`
+ # ` ^ rssi_start_index`
+ # ` ^ ^ bssid`
for line in output.split("\n"):
if line.strip().startswith("SSID BSSID"):
security_start_index = line.index("SECURITY")
+ ssid_end_index = line.index("SSID") + 4
+ rssi_start_index = line.index("RSSI")
elif line and security_start_index and 'IBSS' not in line:
try:
- ssid = bbsid_re.split(line)[0].strip()
- bssid = bbsid_re.findall(line)[0]
- rssi = bbsid_re.split(line)[-1].strip().split()[0]
+ ssid = line[0:ssid_end_index].strip()
+ bssid = line[ssid_end_index+1:rssi_start_index-1].strip()
+ rssi = line[rssi_start_index:rssi_start_index+4].strip()
security = line[security_start_index:]
ap = AccessPoint(ssid, bssid, rssi_to_quality(int(rssi)), security)
results.append(ap)
|
kootenpv/access_points
|
7ca2aaed39b966249a30580fd8b6f13612b4ac04
|
diff --git a/data/osx_monterey_test.txt b/data/osx_monterey_test.txt
new file mode 100644
index 0000000..a10d443
--- /dev/null
+++ b/data/osx_monterey_test.txt
@@ -0,0 +1,6 @@
+ SSID BSSID RSSI CHANNEL HT CC SECURITY (auth/unicast/group)
+ X000X000X00 -83 6 Y -- WPA(PSK/AES,TKIP/TKIP) WPA2(PSK/AES,TKIP/TKIP)
+ XXX-XXX0000000000 -68 5 Y -- WPA(PSK/TKIP/TKIP) WPA2(PSK/AES/TKIP)
+ XXXXXXXXX -52 8 N -- WPA(PSK/TKIP/TKIP)
+ XX-XXX -75 10 Y -- WPA(PSK/TKIP/TKIP) WPA2(PSK/AES/TKIP)
+ XXXXXXX00X0X0 -58 10 N -- WPA(PSK/TKIP/TKIP) WPA2(PSK/AES/TKIP)
diff --git a/tests/all_test.py b/tests/all_test.py
index d4b6960..0fba97a 100644
--- a/tests/all_test.py
+++ b/tests/all_test.py
@@ -26,18 +26,20 @@ def read_output(fn):
return f.read()
-def assert_access_point(aps):
+def assert_access_point(aps, bssid_required=True):
assert isinstance(aps, list)
for ap in aps:
assert isinstance(ap['quality'], int)
assert isinstance(ap['ssid'], basestring) and ap['ssid'] != ''
- assert isinstance(ap['bssid'], basestring) and ap['bssid'] != ''
+ # `ap['bssid']` can sometimes be empty, e.g. on macOS Monterey
+ if bssid_required:
+ assert isinstance(ap['bssid'], basestring) and ap['bssid'] != ''
-def parse_output(wifi_scanner, fname):
+def parse_output(wifi_scanner, fname, bssid_required=True):
output = read_output(fname)
aps = wifi_scanner.parse_output(output)
- assert_access_point(aps)
+ assert_access_point(aps, bssid_required)
return aps
@@ -53,7 +55,9 @@ def assert_all_included(aps, answers):
def test_scan():
scanner = get_scanner()
aps = scanner.get_access_points()
- assert_access_point(aps)
+ # We don't know if we necessarily get BSSIDs from a live scan;
+ # best to err on the side of caution here and not require a match.
+ assert_access_point(aps, False)
def test_iwlist():
@@ -249,6 +253,35 @@ def test_osx():
]
assert_all_included(aps, osx_ans)
+def test_osx_monterey():
+ # BSSID isn't a required match for macOS Monterey because it's not there.
+ aps = parse_output(OSXWifiScanner(), "osx_monterey_test.txt", False)
+ assert len(aps) == 5
+
+ osx_monterey_ans = [
+ ('X000X000X00',
+ '',
+ rssi_to_quality(-83),
+ 'WPA(PSK/AES,TKIP/TKIP) WPA2(PSK/AES,TKIP/TKIP)'),
+ ('XXX-XXX0000000000',
+ '',
+ rssi_to_quality(-68),
+ 'WPA(PSK/TKIP/TKIP) WPA2(PSK/AES/TKIP)'),
+ ('XXXXXXXXX',
+ '',
+ rssi_to_quality(-52),
+ 'WPA(PSK/TKIP/TKIP)'),
+ ('XX-XXX',
+ '',
+ rssi_to_quality(-75),
+ 'WPA(PSK/TKIP/TKIP) WPA2(PSK/AES/TKIP)'),
+ ('XXXXXXX00X0X0',
+ '',
+ rssi_to_quality(-58),
+ 'WPA(PSK/TKIP/TKIP) WPA2(PSK/AES/TKIP)')
+ ]
+ assert_all_included(aps, osx_monterey_ans)
+
def test_termux():
aps = parse_output(TermuxWifiScanner(), "termux_test.txt")
|
"list index out of range"
```
list index out of range
Line:
CFInternet19 -91 100 Y -- WPA2(PSK/AES/AES)
Output:
SSID BSSID RSSI CHANNEL HT CC SECURITY (auth/unicast/group)
CFInternet19 -91 100 Y -- WPA2(PSK/AES/AES)
CFInternet19 -90 36 Y -- WPA2(PSK/AES/AES)
BTWi-fi -87 11 Y -- NONE
BTHub6-C3NH -87 11 Y -- WPA2(PSK/AES/AES)
BTWifi-X -84 1 Y -- WPA2(802.1x/AES/AES)
SKYDAAPC -82 1 Y -- WPA2(PSK/AES/AES)
CFL-6BDJ-2.4G -61 5,+1 Y -- WPA(PSK/AES,TKIP/TKIP) WPA2(PSK/AES,TKIP/TKIP)
CFL-6BDJ -62 64 Y -- WPA(PSK/AES,TKIP/TKIP) WPA2(PSK/AES,TKIP/TKIP)
```
On macOS Monterey, `12.0 Beta (21A5522h)`. `Python 3.6.5 |Anaconda, Inc.| (default, Apr 26 2018, 08:42:37)`
(It output one of those chunks for every SSID in the list but I don't think it's useful to provide all of them?)
|
0.0
|
7ca2aaed39b966249a30580fd8b6f13612b4ac04
|
[
"tests/all_test.py::test_osx_monterey"
] |
[
"tests/all_test.py::test_scan",
"tests/all_test.py::test_iwlist",
"tests/all_test.py::test_nmcli",
"tests/all_test.py::test_windows",
"tests/all_test.py::test_osx",
"tests/all_test.py::test_termux"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-01-11 17:17:16+00:00
|
mit
| 3,459 |
|
koxudaxi__datamodel-code-generator-1248
|
diff --git a/datamodel_code_generator/__init__.py b/datamodel_code_generator/__init__.py
index b78d25f8..55285754 100644
--- a/datamodel_code_generator/__init__.py
+++ b/datamodel_code_generator/__init__.py
@@ -2,6 +2,7 @@ from __future__ import annotations
import contextlib
import os
+import sys
from datetime import datetime, timezone
from enum import Enum
from pathlib import Path
@@ -308,6 +309,10 @@ def generate(
if is_openapi(input_text_) # type: ignore
else InputFileType.JsonSchema
)
+ print(
+ f'The input file type was determined to be: {input_file_type.value}',
+ file=sys.stderr,
+ )
except: # noqa
raise Error('Invalid file format')
|
koxudaxi/datamodel-code-generator
|
e10f1bcce5f0135458a96e4d0e3d4e6ab7e54c3d
|
diff --git a/tests/test_main.py b/tests/test_main.py
index d5ce25d1..1f9a5bce 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -588,7 +588,7 @@ def test_main_no_file(capsys: CaptureFixture) -> None:
assert (
captured.out == (EXPECTED_MAIN_PATH / 'main_no_file' / 'output.py').read_text()
)
- assert not captured.err
+ assert captured.err == 'The input file type was determined to be: openapi\n'
def test_main_extra_template_data_config(capsys: CaptureFixture) -> None:
@@ -614,7 +614,7 @@ def test_main_extra_template_data_config(capsys: CaptureFixture) -> None:
EXPECTED_MAIN_PATH / 'main_extra_template_data_config' / 'output.py'
).read_text()
)
- assert not captured.err
+ assert captured.err == 'The input file type was determined to be: openapi\n'
def test_main_custom_template_dir_old_style(capsys: CaptureFixture) -> None:
@@ -641,7 +641,7 @@ def test_main_custom_template_dir_old_style(capsys: CaptureFixture) -> None:
captured.out
== (EXPECTED_MAIN_PATH / 'main_custom_template_dir' / 'output.py').read_text()
)
- assert not captured.err
+ assert captured.err == 'The input file type was determined to be: openapi\n'
def test_main_custom_template_dir(capsys: CaptureFixture) -> None:
@@ -668,7 +668,7 @@ def test_main_custom_template_dir(capsys: CaptureFixture) -> None:
captured.out
== (EXPECTED_MAIN_PATH / 'main_custom_template_dir' / 'output.py').read_text()
)
- assert not captured.err
+ assert captured.err == 'The input file type was determined to be: openapi\n'
@freeze_time('2019-07-26')
diff --git a/tests/test_main_kr.py b/tests/test_main_kr.py
index ca261f59..d8949825 100644
--- a/tests/test_main_kr.py
+++ b/tests/test_main_kr.py
@@ -146,7 +146,7 @@ def test_main_no_file(capsys: CaptureFixture) -> None:
== (EXPECTED_MAIN_KR_PATH / 'main_no_file' / 'output.py').read_text()
)
- assert not captured.err
+ assert captured.err == 'The input file type was determined to be: openapi\n'
def test_main_custom_template_dir(capsys: CaptureFixture) -> None:
@@ -175,7 +175,7 @@ def test_main_custom_template_dir(capsys: CaptureFixture) -> None:
EXPECTED_MAIN_KR_PATH / 'main_custom_template_dir' / 'output.py'
).read_text()
)
- assert not captured.err
+ assert captured.err == 'The input file type was determined to be: openapi\n'
@freeze_time('2019-07-26')
|
(🎁) Log input file type when when `--input-file-type` is `auto`
I was left a little confused when my json file was silently detected as jsonschema instead of json.
|
0.0
|
e10f1bcce5f0135458a96e4d0e3d4e6ab7e54c3d
|
[
"tests/test_main.py::test_main_extra_template_data_config",
"tests/test_main.py::test_main_no_file",
"tests/test_main.py::test_main_custom_template_dir",
"tests/test_main.py::test_main_custom_template_dir_old_style",
"tests/test_main_kr.py::test_main_custom_template_dir",
"tests/test_main_kr.py::test_main_no_file"
] |
[
"tests/test_main.py::test_main_generate_custom_class_name_generator",
"tests/test_main.py::test_external_relative_ref",
"tests/test_main.py::test_main_with_bad_aliases",
"tests/test_main.py::test_main_json_arrary_include_null",
"tests/test_main.py::test_main_json_failed",
"tests/test_main.py::test_main_null_and_array",
"tests/test_main.py::test_main_nested_directory",
"tests/test_main.py::test_jsonschema_pattern_properties",
"tests/test_main.py::test_main_all_of_ref",
"tests/test_main.py::test_main_openapi_nullable_use_union_operator",
"tests/test_main.py::test_main_with_aliases",
"tests/test_main.py::test_main_jsonschema_boolean_property",
"tests/test_main.py::test_jsonschema_pattern_properties_field_constraints",
"tests/test_main.py::test_main_multiple_required_any_of",
"tests/test_main.py::test_main_json_pointer",
"tests/test_main.py::test_main_openapi_reference_to_object_properties_collapse_root_models",
"tests/test_main.py::test_validation",
"tests/test_main.py::test_main_invalid_enum_name_snake_case_field",
"tests/test_main.py::test_jsonschema_titles_use_title_as_name",
"tests/test_main.py::test_show_help_when_no_input",
"tests/test_main.py::test_main_original_field_name_delimiter_without_snake_case_field",
"tests/test_main.py::test_main_strict_types",
"tests/test_main.py::test_main_jsonschema_field_extras_field_include_all_keys",
"tests/test_main.py::test_main_openapi_all_of_required",
"tests/test_main.py::test_main_json_reuse_enum",
"tests/test_main.py::test_main_collapse_root_models_field_constraints",
"tests/test_main.py::test_main_jsonschema_reference_same_hierarchy_directory",
"tests/test_main.py::test_force_optional",
"tests/test_main.py::test_main_jsonschema_special_enum",
"tests/test_main.py::test_main_jsonschema_special_enum_empty_enum_field_name",
"tests/test_main.py::test_main_jsonschema_complex_one_of",
"tests/test_main.py::test_main_json_snake_case_field",
"tests/test_main.py::test_main_external_definitions",
"tests/test_main.py::test_main_invalid_enum_name",
"tests/test_main.py::test_main_openapi_max_items_enum",
"tests/test_main.py::test_main_invalid_model_name_failed",
"tests/test_main.py::test_main_models_not_found",
"tests/test_main.py::test_main_jsonschema_id",
"tests/test_main.py::test_main_generate",
"tests/test_main.py::test_main_invalid_model_name",
"tests/test_main.py::test_main_jsonschema_ids",
"tests/test_main.py::test_main_jsonschema_id_as_stdin",
"tests/test_main.py::test_main_jsonschema_multiple_files_ref_test_json",
"tests/test_main.py::test_disable_timestamp",
"tests/test_main.py::test_main_yaml",
"tests/test_main.py::test_main_dataclass_field",
"tests/test_main.py::test_main_jsonschema_field_extras",
"tests/test_main.py::test_main",
"tests/test_main.py::test_main_openapi_json_pointer",
"tests/test_main.py::test_long_description",
"tests/test_main.py::test_main_without_field_constraints",
"tests/test_main.py::test_main_jsonschema_json_pointer_array",
"tests/test_main.py::test_main_jsonschema_special_model_remove_special_field_name_prefix",
"tests/test_main.py::test_main_root_id_jsonschema_with_absolute_local_file",
"tests/test_main.py::test_main_jsonschema_enum_root_literal",
"tests/test_main.py::test_version",
"tests/test_main.py::test_main_jsonschema_nested_skip",
"tests/test_main.py::test_main_modular",
"tests/test_main.py::test_jsonschema_titles",
"tests/test_main.py::test_target_python_version",
"tests/test_main.py::test_main_json_capitalise_enum_members",
"tests/test_main.py::test_main_json",
"tests/test_main.py::test_main_disable_warnings",
"tests/test_main.py::test_enable_faux_immutability",
"tests/test_main.py::test_main_use_default_kwarg",
"tests/test_main.py::test_allow_extra_fields",
"tests/test_main.py::test_main_all_of_with_object",
"tests/test_main.py::test_main_nullable_any_of",
"tests/test_main.py::test_main_autodetect",
"tests/test_main.py::test_main_openapi_body_and_parameters_only_schemas",
"tests/test_main.py::test_main_nested_all_of_",
"tests/test_main.py::test_main_jsonschema_multiple_files_json_pointer",
"tests/test_main.py::test_main_jsonschema_combine_one_of_object",
"tests/test_main.py::test_main_openapi_discriminator",
"tests/test_main.py::test_main_jsonschema_duplicate_name",
"tests/test_main.py::test_main_pydantic_basemodel",
"tests/test_main.py::test_main_openapi_reference_to_object_properties",
"tests/test_main.py::test_main_jsonschema_array_in_additional_properites",
"tests/test_main.py::test_enable_version_header",
"tests/test_main.py::test_main_jsonschema_modular_default_enum_member",
"tests/test_main.py::test_main_jsonschema_special_field_name",
"tests/test_main.py::test_main_jsonschema_multiple_files_ref",
"tests/test_main.py::test_main_strict_types_all_with_field_constraints",
"tests/test_main.py::test_main_generate_custom_class_name_generator_additional_properties",
"tests/test_main.py::test_main_use_standard_collections",
"tests/test_main.py::test_main_jsonschema_pattern_properties_by_reference",
"tests/test_main.py::test_csv_stdin",
"tests/test_main.py::test_main_jsonschema_nested_deep",
"tests/test_main.py::test_main_jsonschema_subclass_enum",
"tests/test_main.py::test_main_openapi_body_and_parameters",
"tests/test_main.py::test_main_base_class",
"tests/test_main.py::test_allow_population_by_field_name",
"tests/test_main.py::test_main_nullable_any_of_use_union_operator",
"tests/test_main.py::test_main_root_model_with_additional_properties",
"tests/test_main.py::test_csv_file",
"tests/test_main.py::test_main_jsonschema_nullable_object",
"tests/test_main.py::test_main_jsonschema_special_enum_special_field_name_prefix",
"tests/test_main.py::test_main_with_exclusive",
"tests/test_main.py::test_main_space_field_enum_snake_case_field",
"tests/test_main.py::test_main_inheritance_forward_ref",
"tests/test_main.py::test_stdin",
"tests/test_main.py::test_main_openapi_body_and_parameters_only_paths",
"tests/test_main.py::test_validation_failed",
"tests/test_main.py::test_space_and_special_characters_dict",
"tests/test_main.py::test_main_root_model_with_additional_properties_use_standard_collections",
"tests/test_main.py::test_main_autodetect_failed",
"tests/test_main.py::test_main_jsonschema_object_has_one_of",
"tests/test_main.py::test_main_generate_custom_class_name_generator_modular",
"tests/test_main.py::test_main_jsonschema_field_include_all_keys",
"tests/test_main.py::test_use_default",
"tests/test_main.py::test_main_modular_no_file",
"tests/test_main.py::test_main_dataclass",
"tests/test_main.py::test_main_openapi_default_object",
"tests/test_main.py::test_openapi_special_yaml_keywords",
"tests/test_main.py::test_main_openapi_pattern",
"tests/test_main.py::test_main_nested_enum",
"tests/test_main.py::test_main_disable_appending_item_suffix",
"tests/test_main.py::test_main_openapi_datetime",
"tests/test_main.py::test_main_root_model_with_additional_properties_use_generic_container_types",
"tests/test_main.py::test_main_openapi_complex_reference",
"tests/test_main.py::test_main_similar_nested_array",
"tests/test_main.py::test_main_use_annotated_with_field_constraints",
"tests/test_main.py::test_main_json_capitalise_enum_members_without_enum",
"tests/test_main.py::test_main_with_more_bad_aliases",
"tests/test_main.py::test_main_jsonschema_external_files",
"tests/test_main.py::test_main_jsonschema_pattern",
"tests/test_main.py::test_jsonschema_without_titles_use_title_as_name",
"tests/test_main.py::test_main_with_field_constraints",
"tests/test_main.py::test_main_json_reuse_enum_default_member",
"tests/test_main.py::test_main_dataclass_base_class",
"tests/test_main.py::test_main_jsonschema_combine_any_of_object",
"tests/test_main.py::test_main_jsonschema_has_default_value",
"tests/test_main.py::test_main_with_bad_extra_data",
"tests/test_main.py::test_main_openapi_nullable",
"tests/test_main.py::test_main_use_generic_container_types_py36",
"tests/test_main.py::test_main_self_reference",
"tests/test_main.py::test_main_modular_reuse_model",
"tests/test_main.py::test_simple_json_snake_case_field",
"tests/test_main.py::test_main_with_strip_default_none",
"tests/test_main.py::test_main_nested_json_pointer",
"tests/test_main.py::test_main_openapi_content_in_parameters",
"tests/test_main.py::test_main_modular_filename",
"tests/test_main.py::test_main_openapi_oas_response_reference",
"tests/test_main.py::test_main_openapi_const",
"tests/test_main.py::test_main_openapi_override_required_all_of_field",
"tests/test_main.py::test_main_use_union_operator",
"tests/test_main.py::test_main_jsonschema_complex_any_of",
"tests/test_main.py::test_main_external_files_in_directory",
"tests/test_main.py::test_main_with_snake_case_field",
"tests/test_main.py::test_space_and_special_characters_json",
"tests/test_main.py::test_main_circular_reference",
"tests/test_main.py::test_main_inheritance_forward_ref_keep_model_order",
"tests/test_main.py::test_main_json_reuse_model",
"tests/test_main.py::test_main_jsonschema_special_enum_special_field_name_prefix_keep_private",
"tests/test_main.py::test_main_openapi_nullable_strict_nullable",
"tests/test_main.py::test_pyproject_not_found",
"tests/test_main.py::test_main_jsonschema_multiple_files",
"tests/test_main.py::test_main_jsonschema_field_extras_field_extra_keys",
"tests/test_main.py::test_main_collapse_root_models",
"tests/test_main.py::test_main_complicated_enum_default_member",
"tests/test_main.py::test_main_jsonschema_items_boolean",
"tests/test_main.py::test_main_jsonschema",
"tests/test_main.py::test_main_root_model_with_additional_properties_literal",
"tests/test_main.py::test_main_subclass_enum",
"tests/test_main.py::test_main_generate_from_directory",
"tests/test_main.py::test_main_disable_warnings_config",
"tests/test_main.py::test_main_invalid_model_name_converted",
"tests/test_main_kr.py::test_main_modular_filename",
"tests/test_main_kr.py::test_main_modular_no_file",
"tests/test_main_kr.py::test_target_python_version",
"tests/test_main_kr.py::test_main_use_schema_description",
"tests/test_main_kr.py::test_main_use_field_description",
"tests/test_main_kr.py::test_main_base_class",
"tests/test_main_kr.py::test_main_modular",
"tests/test_main_kr.py::test_main"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-04-15 04:09:13+00:00
|
mit
| 3,460 |
|
koxudaxi__datamodel-code-generator-1249
|
diff --git a/datamodel_code_generator/__init__.py b/datamodel_code_generator/__init__.py
index 1b7bcab6..b5ccb464 100644
--- a/datamodel_code_generator/__init__.py
+++ b/datamodel_code_generator/__init__.py
@@ -168,6 +168,37 @@ def is_openapi(text: str) -> bool:
return 'openapi' in load_yaml(text)
+JSON_SCHEMA_URLS: Tuple[str, ...] = (
+ 'http://json-schema.org/',
+ 'https://json-schema.org/',
+)
+
+
+def is_schema(text: str) -> bool:
+ data = load_yaml(text)
+ if not isinstance(data, dict):
+ return False
+ schema = data.get('$schema')
+ if isinstance(schema, str) and any(
+ schema.startswith(u) for u in JSON_SCHEMA_URLS
+ ): # pragma: no cover
+ return True
+ if isinstance(data.get('type'), str):
+ return True
+ if any(
+ isinstance(data.get(o), list)
+ for o in (
+ 'allOf',
+ 'anyOf',
+ 'oneOf',
+ )
+ ):
+ return True
+ if isinstance(data.get('properties'), dict):
+ return True
+ return False
+
+
class InputFileType(Enum):
Auto = 'auto'
OpenAPI = 'openapi'
@@ -304,11 +335,8 @@ def generate(
if isinstance(input_, Path)
else input_text
)
- input_file_type = (
- InputFileType.OpenAPI
- if is_openapi(input_text_) # type: ignore
- else InputFileType.JsonSchema
- )
+ assert isinstance(input_text_, str)
+ input_file_type = infer_input_type(input_text_)
print(
inferred_message.format(input_file_type.value),
file=sys.stderr,
@@ -483,6 +511,14 @@ def generate(
file.close()
+def infer_input_type(text: str) -> InputFileType:
+ if is_openapi(text):
+ return InputFileType.OpenAPI
+ elif is_schema(text):
+ return InputFileType.JsonSchema
+ return InputFileType.Json
+
+
inferred_message = (
'The input file type was determined to be: {}\nThis can be specificied explicitly with the '
'`--input-file-type` option.'
|
koxudaxi/datamodel-code-generator
|
083691c6fea8fabc5000466c40c16298c7a4b463
|
diff --git a/tests/data/jsonschema/items_boolean.json b/tests/data/jsonschema/items_boolean.json
index e12ab0fc..8038870f 100644
--- a/tests/data/jsonschema/items_boolean.json
+++ b/tests/data/jsonschema/items_boolean.json
@@ -1,5 +1,5 @@
{
- "$schema ": "http://json-schema.org/draft-07/schema#",
+ "$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"example": {
diff --git a/tests/data/jsonschema/root_id.json b/tests/data/jsonschema/root_id.json
index 5dee01e1..b62bcde3 100644
--- a/tests/data/jsonschema/root_id.json
+++ b/tests/data/jsonschema/root_id.json
@@ -1,6 +1,6 @@
{
"$id": "https://example.com/root_id.json",
- "$schema ": "http://json-schema.org/draft-07/schema#",
+ "$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"Person": {
"$ref": "person.json"
diff --git a/tests/data/jsonschema/root_id_absolute_url.json b/tests/data/jsonschema/root_id_absolute_url.json
index 9ea3e152..4ac23adc 100644
--- a/tests/data/jsonschema/root_id_absolute_url.json
+++ b/tests/data/jsonschema/root_id_absolute_url.json
@@ -1,6 +1,6 @@
{
"$id": "https://example.com/root_id.json",
- "$schema ": "http://json-schema.org/draft-07/schema#",
+ "$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"Person": {
"$ref": "person.json"
diff --git a/tests/data/jsonschema/root_id_ref.json b/tests/data/jsonschema/root_id_ref.json
index 4e73314c..f7de9985 100644
--- a/tests/data/jsonschema/root_id_ref.json
+++ b/tests/data/jsonschema/root_id_ref.json
@@ -1,5 +1,5 @@
{
- "$schema ": "http://json-schema.org/draft-07/schema#",
+ "$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"Person": {
"$ref": "root_id.json#/definitions/Person"
diff --git a/tests/data/jsonschema/root_id_self_ref.json b/tests/data/jsonschema/root_id_self_ref.json
index 45dcfe64..5c8acb50 100644
--- a/tests/data/jsonschema/root_id_self_ref.json
+++ b/tests/data/jsonschema/root_id_self_ref.json
@@ -1,6 +1,6 @@
{
"$id": "https://example.com/root_id_self_ref.json",
- "$schema ": "http://json-schema.org/draft-07/schema#",
+ "$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"Person": {
"$ref": "person.json"
diff --git a/tests/data/openapi/complex_reference.json b/tests/data/openapi/complex_reference.json
index 24fee52c..1d89f9d2 100644
--- a/tests/data/openapi/complex_reference.json
+++ b/tests/data/openapi/complex_reference.json
@@ -1,4 +1,5 @@
{
+ "openapi": "3.0.0",
"components": {
"schemas": {
"A": {
diff --git a/tests/data/openapi/datetime.yaml b/tests/data/openapi/datetime.yaml
index 21288b3f..9c0a73b1 100644
--- a/tests/data/openapi/datetime.yaml
+++ b/tests/data/openapi/datetime.yaml
@@ -1,3 +1,4 @@
+openapi: "3.0.0"
components:
schemas:
InventoryItem:
diff --git a/tests/data/openapi/definitions.yaml b/tests/data/openapi/definitions.yaml
index 7e6b7d3d..2e99635d 100644
--- a/tests/data/openapi/definitions.yaml
+++ b/tests/data/openapi/definitions.yaml
@@ -1,3 +1,4 @@
+openapi: "3.0.0"
schemas:
Problem:
properties:
diff --git a/tests/data/openapi/discriminator.yaml b/tests/data/openapi/discriminator.yaml
index 334c50f9..9a611ae1 100644
--- a/tests/data/openapi/discriminator.yaml
+++ b/tests/data/openapi/discriminator.yaml
@@ -1,3 +1,4 @@
+openapi: "3.0.0"
components:
schemas:
ObjectBase:
diff --git a/tests/data/openapi/override_required_all_of.yaml b/tests/data/openapi/override_required_all_of.yaml
index 95726152..9c5eeee8 100644
--- a/tests/data/openapi/override_required_all_of.yaml
+++ b/tests/data/openapi/override_required_all_of.yaml
@@ -1,3 +1,4 @@
+openapi: "3.0.0"
components:
schemas:
ObjectBase:
diff --git a/tests/data/openapi/x_enum_varnames.yaml b/tests/data/openapi/x_enum_varnames.yaml
index 5a986655..1b778769 100644
--- a/tests/data/openapi/x_enum_varnames.yaml
+++ b/tests/data/openapi/x_enum_varnames.yaml
@@ -1,4 +1,4 @@
-openapi: 3.0
+openapi: "3.0.0"
components:
schemas:
string:
diff --git a/tests/root_id.json b/tests/root_id.json
index 9ea3e152..4ac23adc 100644
--- a/tests/root_id.json
+++ b/tests/root_id.json
@@ -1,6 +1,6 @@
{
"$id": "https://example.com/root_id.json",
- "$schema ": "http://json-schema.org/draft-07/schema#",
+ "$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"Person": {
"$ref": "person.json"
diff --git a/tests/test_infer_input_type.py b/tests/test_infer_input_type.py
new file mode 100644
index 00000000..27b74afd
--- /dev/null
+++ b/tests/test_infer_input_type.py
@@ -0,0 +1,46 @@
+from pathlib import Path
+
+from datamodel_code_generator import InputFileType, infer_input_type
+
+DATA_PATH: Path = Path(__file__).parent / 'data'
+
+
+def test_infer_input_type():
+ def assert_infer_input_type(file: Path, raw_data_type: InputFileType) -> None:
+ __tracebackhide__ = True
+ if file.is_dir():
+ return
+ if file.suffix not in ('.yaml', '.json'):
+ return
+ result = infer_input_type(file.read_text())
+ assert result == raw_data_type, f'{file} was the wrong type!'
+
+ for file in (DATA_PATH / 'json').rglob('*'):
+ if str(file).endswith('broken.json'):
+ continue
+ assert_infer_input_type(file, InputFileType.Json)
+ for file in (DATA_PATH / 'jsonschema').rglob('*'):
+ if str(file).endswith(('external_child.json', 'external_child.yaml')):
+ continue
+ if 'reference_same_hierarchy_directory' in str(file):
+ continue
+ assert_infer_input_type(file, InputFileType.JsonSchema)
+ for file in (DATA_PATH / 'openapi').rglob('*'):
+ if str(file).endswith(
+ (
+ 'aliases.json',
+ 'extra_data.json',
+ 'invalid.yaml',
+ 'list.json',
+ 'empty_data.json',
+ 'root_model.yaml',
+ 'json_pointer.yaml',
+ 'const.json',
+ )
+ ):
+ continue
+
+ if str(file).endswith('not.json'):
+ assert_infer_input_type(file, InputFileType.Json)
+ continue
+ assert_infer_input_type(file, InputFileType.OpenAPI)
|
(🎁) Can we use heuristics to automatically detect the input type of json files?
It would be convenient if json files could be automatically detected if they are either schema or data files.
input:
```json
{
"a": 1
}
```
```
👉 datamodel_code_generator --input test.json
# generated by datamodel-codegen:
# filename: test.json
# timestamp: 2023-04-15T04:05:21+00:00
from __future__ import annotations
from typing import Any
from pydantic import BaseModel
class Model(BaseModel):
__root__: Any
```
|
0.0
|
083691c6fea8fabc5000466c40c16298c7a4b463
|
[
"tests/test_infer_input_type.py::test_infer_input_type"
] |
[] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-04-15 07:50:00+00:00
|
mit
| 3,461 |
|
koxudaxi__datamodel-code-generator-726
|
diff --git a/datamodel_code_generator/format.py b/datamodel_code_generator/format.py
index 1b2ffb53..a742d884 100644
--- a/datamodel_code_generator/format.py
+++ b/datamodel_code_generator/format.py
@@ -51,7 +51,7 @@ def black_find_project_root(sources: Sequence[Path]) -> Path:
project_root = _find_project_root(tuple(str(s) for s in sources))
if isinstance(project_root, tuple):
return project_root[0]
- else:
+ else: # pragma: no cover
return project_root
diff --git a/datamodel_code_generator/parser/jsonschema.py b/datamodel_code_generator/parser/jsonschema.py
index 312a786b..5b666378 100644
--- a/datamodel_code_generator/parser/jsonschema.py
+++ b/datamodel_code_generator/parser/jsonschema.py
@@ -670,7 +670,7 @@ class JsonSchemaParser(Parser):
is_dict=True,
dict_key=self.data_type_manager.get_data_type(
Types.string,
- pattern=k,
+ pattern=k if not self.field_constraints else None,
),
)
for k, v in item.patternProperties.items()
|
koxudaxi/datamodel-code-generator
|
ca976e8e43815600b9cc3b3ca3e30c45464d2839
|
diff --git a/tests/data/expected/main/main_pattern_properties_field_constraints/output.py b/tests/data/expected/main/main_pattern_properties_field_constraints/output.py
new file mode 100644
index 00000000..446ff3a2
--- /dev/null
+++ b/tests/data/expected/main/main_pattern_properties_field_constraints/output.py
@@ -0,0 +1,17 @@
+# generated by datamodel-codegen:
+# filename: pattern_properties.json
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from typing import Dict, Optional
+
+from pydantic import BaseModel
+
+
+class Bar(BaseModel):
+ name: Optional[str] = None
+
+
+class Foo(BaseModel):
+ bar: Dict[str, Bar]
diff --git a/tests/test_main.py b/tests/test_main.py
index 8905a357..608a18e6 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -3480,6 +3480,34 @@ def test_jsonschema_pattern_properties():
main()
+@freeze_time('2019-07-26')
+def test_jsonschema_pattern_properties_field_constraints():
+ with TemporaryDirectory() as output_dir:
+ output_file: Path = Path(output_dir) / 'output.py'
+ return_code: Exit = main(
+ [
+ '--input',
+ str(JSON_SCHEMA_DATA_PATH / 'pattern_properties.json'),
+ '--output',
+ str(output_file),
+ '--input-file-type',
+ 'jsonschema',
+ '--field-constraints',
+ ]
+ )
+ assert return_code == Exit.OK
+ assert (
+ output_file.read_text()
+ == (
+ EXPECTED_MAIN_PATH
+ / 'main_pattern_properties_field_constraints'
+ / 'output.py'
+ ).read_text()
+ )
+ with pytest.raises(SystemExit):
+ main()
+
+
@freeze_time('2019-07-26')
def test_jsonschema_titles():
with TemporaryDirectory() as output_dir:
|
Optional command to generate code without constr
**Is your feature request related to a problem? Please describe.**
The inclusion of `constr` in the generated datamodel when it contains regular expressions can cause the generated code to fail checks in mypy and flake8.
Specifically, with `https://github.com/compose-spec/compose-spec/blob/master/schema/compose-spec.json` as the target file, the following command attempts to generate the code.
````
datamodel-codegen --input compose-spec.json --input-file-type jsonschema --output model.py --field-constraints --target-python-version 3.8
````
Part of the generated result contains the following code
```
services: Optional[Dict[constr(regex=r'^[a-zA-Z0-9._-]+$'), Service]] = None
````
This fails on mypy and flake8 checks.
**Describe the solution you'd like**
The reason for the failure seems to be the use of constr, so I would like an optional command that outputs in a format that does not contain constr (nor conint).
**Describe alternatives you've considered**
If there is already an optional command that accomplishes this kind of functionality, it would be great to know.
I have already tried the `--field-constraints`, `-use-annotated` option commands, etc., but they did not solve this problem.
**Additional context**
If it is difficult to express a constraint by regular expression without using constr, I would like to see a behavior that replaces constr with str and outputs a warning.
Thank you for creating such a great package.
|
0.0
|
ca976e8e43815600b9cc3b3ca3e30c45464d2839
|
[
"tests/test_main.py::test_jsonschema_pattern_properties_field_constraints"
] |
[
"tests/test_main.py::test_main_json",
"tests/test_main.py::test_csv_stdin",
"tests/test_main.py::test_main_modular_reuse_model",
"tests/test_main.py::test_main_json_reuse_model",
"tests/test_main.py::test_main_jsonschema_nested_deep",
"tests/test_main.py::test_main_root_model_with_additional_properties",
"tests/test_main.py::test_jsonschema_without_titles_use_title_as_name",
"tests/test_main.py::test_main_openapi_json_pointer",
"tests/test_main.py::test_main_jsonschema_multiple_files_json_pointer",
"tests/test_main.py::test_main_generate_custom_class_name_generator_modular",
"tests/test_main.py::test_main_jsonschema_field_include_all_keys",
"tests/test_main.py::test_main_with_aliases",
"tests/test_main.py::test_main_nested_directory",
"tests/test_main.py::test_main_json_pointer",
"tests/test_main.py::test_target_python_version",
"tests/test_main.py::test_enable_faux_immutability",
"tests/test_main.py::test_main_autodetect",
"tests/test_main.py::test_main_without_field_constraints",
"tests/test_main.py::test_main_all_of_ref",
"tests/test_main.py::test_main_json_reuse_enum_default_member",
"tests/test_main.py::test_main_openapi_datetime",
"tests/test_main.py::test_main_json_reuse_enum",
"tests/test_main.py::test_main_openapi_oas_response_reference",
"tests/test_main.py::test_main_openapi_body_and_parameters_only_schemas",
"tests/test_main.py::test_main_root_model_with_additional_properties_literal",
"tests/test_main.py::test_main_with_bad_extra_data",
"tests/test_main.py::test_main_models_not_found",
"tests/test_main.py::test_main_generate_custom_class_name_generator_additional_properties",
"tests/test_main.py::test_main_with_snake_case_field",
"tests/test_main.py::test_space_and_special_characters_dict",
"tests/test_main.py::test_main_modular_filename",
"tests/test_main.py::test_main_jsonschema_special_enum_empty_enum_field_name",
"tests/test_main.py::test_pyproject_not_found",
"tests/test_main.py::test_jsonschema_titles",
"tests/test_main.py::test_use_default",
"tests/test_main.py::test_main_null_and_array",
"tests/test_main.py::test_main_generate",
"tests/test_main.py::test_main_jsonschema_combine_any_of_object",
"tests/test_main.py::test_space_and_special_characters_json",
"tests/test_main.py::test_main_disable_appending_item_suffix",
"tests/test_main.py::test_csv_file",
"tests/test_main.py::test_main_use_generic_container_types_py36",
"tests/test_main.py::test_main_with_more_bad_aliases",
"tests/test_main.py::test_main_jsonschema_pattern",
"tests/test_main.py::test_main_json_arrary_include_null",
"tests/test_main.py::test_main_jsonschema_field_extras",
"tests/test_main.py::test_main_strict_types",
"tests/test_main.py::test_main_all_of_with_object",
"tests/test_main.py::test_main_with_bad_aliases",
"tests/test_main.py::test_main_external_definitions",
"tests/test_main.py::test_version",
"tests/test_main.py::test_main_jsonschema_id",
"tests/test_main.py::test_main_modular_no_file",
"tests/test_main.py::test_allow_population_by_field_name",
"tests/test_main.py::test_main_circular_reference",
"tests/test_main.py::test_main_autodetect_failed",
"tests/test_main.py::test_main_jsonschema_multiple_files_ref_test_json",
"tests/test_main.py::test_jsonschema_titles_use_title_as_name",
"tests/test_main.py::test_main_openapi_body_and_parameters_only_paths",
"tests/test_main.py::test_validation_failed",
"tests/test_main.py::test_main_generate_from_directory",
"tests/test_main.py::test_main_jsonschema_id_as_stdin",
"tests/test_main.py::test_main_yaml",
"tests/test_main.py::test_main_custom_template_dir",
"tests/test_main.py::test_stdin",
"tests/test_main.py::test_main_openapi_content_in_parameters",
"tests/test_main.py::test_main_external_files_in_directory",
"tests/test_main.py::test_main_root_model_with_additional_properties_use_generic_container_types",
"tests/test_main.py::test_main_openapi_nullable_strict_nullable",
"tests/test_main.py::test_main_jsonschema_field_extras_field_include_all_keys",
"tests/test_main.py::test_long_description",
"tests/test_main.py::test_main_json_failed",
"tests/test_main.py::test_main_nested_json_pointer",
"tests/test_main.py::test_main_use_annotated_with_field_constraints",
"tests/test_main.py::test_main_strict_types_all_with_field_constraints",
"tests/test_main.py::test_main_jsonschema_complex_any_of",
"tests/test_main.py::test_main",
"tests/test_main.py::test_main_complicated_enum_default_member",
"tests/test_main.py::test_validation",
"tests/test_main.py::test_main_jsonschema_multiple_files",
"tests/test_main.py::test_main_with_field_constraints",
"tests/test_main.py::test_main_no_file",
"tests/test_main.py::test_main_jsonschema_external_files",
"tests/test_main.py::test_main_openapi_body_and_parameters",
"tests/test_main.py::test_simple_json_snake_case_field",
"tests/test_main.py::test_main_generate_custom_class_name_generator",
"tests/test_main.py::test_jsonschema_pattern_properties",
"tests/test_main.py::test_main_invalid_model_name_converted",
"tests/test_main.py::test_main_openapi_pattern",
"tests/test_main.py::test_main_self_reference",
"tests/test_main.py::test_main_jsonschema_field_extras_field_extra_keys",
"tests/test_main.py::test_force_optional",
"tests/test_main.py::test_main_invalid_model_name",
"tests/test_main.py::test_main_jsonschema_special_enum",
"tests/test_main.py::test_main_with_strip_default_none",
"tests/test_main.py::test_main_root_model_with_additional_properties_use_standard_collections",
"tests/test_main.py::test_main_similar_nested_array",
"tests/test_main.py::test_main_openapi_nullable",
"tests/test_main.py::test_main_jsonschema_complex_one_of",
"tests/test_main.py::test_main_jsonschema_combine_one_of_object",
"tests/test_main.py::test_main_jsonschema_ids",
"tests/test_main.py::test_main_invalid_enum_name",
"tests/test_main.py::test_main_with_exclusive",
"tests/test_main.py::test_main_jsonschema",
"tests/test_main.py::test_main_jsonschema_unsupported_parent_class",
"tests/test_main.py::test_main_jsonschema_special_field_name",
"tests/test_main.py::test_main_invalid_model_name_failed",
"tests/test_main.py::test_main_base_class",
"tests/test_main.py::test_main_subclass_enum",
"tests/test_main.py::test_disable_timestamp",
"tests/test_main.py::test_main_jsonschema_multiple_files_ref",
"tests/test_main.py::test_main_use_standard_collections",
"tests/test_main.py::test_main_modular"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-03-09 15:23:35+00:00
|
mit
| 3,462 |
|
krayzpipes__txt-ferret-12
|
diff --git a/README.md b/README.md
index fb7e3d9..bfd38ce 100644
--- a/README.md
+++ b/README.md
@@ -4,7 +4,7 @@ Identify and classify data in your text files with Python.
## Description
**Definition:** txtferret
- A weasel-like mammal that feasts on rodents... and apparently social security numbers,
-credit card numbers, or any other data that's in your text files.
+credit card numbers, or any other data that's in your text or gzipped text files.
Use custom regular expressions and sanity checks (ex: `luhn` algorithm for account numbers) to find
sensitive data in virtually any size file via your command line.
@@ -109,12 +109,12 @@ filters:
- **Pattern:**
- The regular expression which will be used to find data in the file.
- Regular expression must be compatible with the python `re` module in the standard library.
- - Be sure that your regular expression only contains ONE capture group. For example,
+ - Be sure that your regular expression only contains ONE and ONLY ONE capture group. For example,
if you are capturing a phone number:
- Don't do this: `'(555-(867|555)-5309)'`
- Do this: `'(555-(?:867|555)-5309)'`
- - The first has two capture groups, and inner and an outer.
- - The second has one capture group (the outer). The inner is a non-capturing
+ - The former example has two capture groups, and inner and an outer.
+ - The latter has one capture group (the outer). The inner is a non-capturing
group as defined by starting the capture group with `?:`.
- __Note: If you run into issues with loading a custom filter, try adding
single-quotes around your regular expression.__
@@ -251,6 +251,10 @@ sanity check which can be paired with a DLP solution. Here are some things it wa
- No outrageous licensing per GB of data scanned.
- __You can contribute!__
+## Releases
+
+#### Version 0.0.3 - 2019-06-01
+- Added gzip detection and support.
# Development
diff --git a/setup.py b/setup.py
index a133a6b..bf4f22a 100644
--- a/setup.py
+++ b/setup.py
@@ -14,7 +14,7 @@ from setuptools import setup, find_packages
from codecs import open
from os import path
-__version__ = "0.0.2"
+__version__ = "0.0.3"
description = "Scan text files for sensitive (or non-sensitive) data."
here = path.abspath(path.dirname(__file__))
diff --git a/src/txtferret/_sanity.py b/src/txtferret/_sanity.py
index f591755..0e1452e 100644
--- a/src/txtferret/_sanity.py
+++ b/src/txtferret/_sanity.py
@@ -18,6 +18,10 @@ def luhn(account_string):
test.
"""
+ # TODO - Is there a more effecient way to do this?
+ if not isinstance(account_string, str):
+ account_string = account_string.decode("utf-8")
+
no_special_chars = re.sub("[\W_]", "", account_string)
try:
diff --git a/src/txtferret/core.py b/src/txtferret/core.py
index 0bf3ea5..102e68a 100644
--- a/src/txtferret/core.py
+++ b/src/txtferret/core.py
@@ -1,6 +1,7 @@
"""Core classes and functions for txt_ferret."""
from datetime import datetime
+import gzip
from pathlib import Path
import re
@@ -22,10 +23,19 @@ def tokenize(clear_text, mask, index, tokenize=True, show_matches=False):
:param show_matches: Bool representing whether the clear text should
be redacted all together or not.
"""
+
if not show_matches:
return "REDACTED"
+
+ # byte string can be present if source file is Gzipped
+ # convert to utf-8 string for logging/file output.
+ if not isinstance(clear_text, str):
+ clear_text = clear_text.decode("utf-8")
+ mask = mask.decode("utf-8")
+
if not tokenize:
return clear_text
+
return _get_tokenized_string(clear_text, mask, index)
@@ -36,6 +46,7 @@ def _get_tokenized_string(text, mask, index):
than the original string, then the mask will be cut down to
size.
"""
+
end_index = index + len(mask)
text_length = len(text)
if (text_length - 1) < end_index:
@@ -61,6 +72,30 @@ def _byte_code_to_string(byte_code):
return bytes((code_,)).decode("utf-8")
+def gzipped_file_check(file_to_scan, _opener=None):
+ """ Return bool based on if opening file returns UnicodeDecodeError
+
+ If UnicodeDecodeError is returned when trying to read a line of the
+ file, then we will assume this is a gzipped file.
+
+ :param file_to_scan: String containing file path/name to read.
+ :param _opener: Used to pass file handler stub for testing.
+
+ :return: True if UnicodeDecodeError is detected. False if not.
+ """
+
+ # Use test stub or the normal 'open'
+ _open = _opener or open
+
+ try:
+ with _open(file_to_scan, "r") as rf:
+ _ = rf.readline()
+ except UnicodeDecodeError:
+ return True
+ else:
+ return False
+
+
class Filter:
""" Helper class to hold filter configurations and add a simple
API to interface with Filter attributes.
@@ -75,7 +110,7 @@ class Filter:
mask should start being applied.
"""
- def __init__(self, filter_dict):
+ def __init__(self, filter_dict, gzip):
"""Initialize the Filter object. Lots handling input from
the config file here.
@@ -109,6 +144,11 @@ class Filter:
f"default tokenization mask and index."
)
+ # If gzip, we need to use byte strings instead of utf-8
+ if gzip:
+ self.token_mask = self.token_mask.encode("utf-8")
+ self.pattern = self.pattern.encode("utf-8")
+
try:
self.token_index = int(filter_dict["tokenize"].get("index", 0))
except ValueError:
@@ -124,6 +164,7 @@ class TxtFerret:
config/settings file or CLI arguments/switches.
:attribute file_name: The name of the file to scan.
+ :attribute gzip: Bool depicting if input file is gzipped
:attribute tokenize: Determines if txt_ferret will tokenize the
output of strings that match and pass sanity checks.
:attribute log_level: Log level to be used by logouru.logger.
@@ -144,9 +185,16 @@ class TxtFerret:
def __init__(self, file_name=None, config_file=None, config_=None, **cli_settings):
"""Initialize the TxtFerret object."""
config = config_ or load_config(
- yaml_file=config_file, default_override=cli_settings["config_override"],
+ yaml_file=config_file, default_override=cli_settings["config_override"]
)
self.file_name = file_name
+ self.gzip = gzipped_file_check(self.file_name)
+
+ if self.gzip:
+ logger.info(
+ f"Detected non-text file '{file_name}'... "
+ f"attempting GZIP mode (slower)."
+ )
# Set settings from file.
self.set_attributes(**config["settings"])
@@ -158,7 +206,9 @@ class TxtFerret:
self.failed_sanity = 0
self.passed_sanity = 0
- self.filters = [Filter(filter_dict=filter_) for filter_ in config["filters"]]
+ self.filters = [
+ Filter(filter_dict=filter_, gzip=self.gzip) for filter_ in config["filters"]
+ ]
def set_attributes(self, **kwargs):
"""Sets attributes for the TxtFerret object.
@@ -210,9 +260,17 @@ class TxtFerret:
file_to_scan = file_name or self.file_name
- with open(file_to_scan, "r") as rf:
+ if not self.gzip:
+ _open = open
+ else:
+ _open = gzip.open
+
+ with _open(file_to_scan, "r") as rf:
for index, line in enumerate(rf):
+ # if isinstance(line, bytes):
+ # line = str(line)
+
# If delimiter, then treat file as if it has columns.
if self.delimiter:
self._scan_delimited_line(line, index)
|
krayzpipes/txt-ferret
|
fede6722371aad3f58045e7eb3ab60300955146b
|
diff --git a/tests/core/test_core.py b/tests/core/test_core.py
new file mode 100644
index 0000000..8679f1a
--- /dev/null
+++ b/tests/core/test_core.py
@@ -0,0 +1,35 @@
+from contextlib import contextmanager
+
+from txtferret.core import gzipped_file_check
+
+
+def test_gzipped_file_check_return_true():
+
+ @contextmanager
+ def opener_stub_raise_error(x, y):
+
+ class FileHandlerStub:
+
+ @staticmethod
+ def readline():
+ raise UnicodeDecodeError("fake", b"o", 1, 2, "fake")
+
+ yield FileHandlerStub()
+
+ assert gzipped_file_check("f.txt", _opener=opener_stub_raise_error) == True
+
+
+def test_gzipped_file_check_return_false():
+
+ @contextmanager
+ def opener_stub_no_error(x, y):
+
+ class FileHandlerStub:
+
+ @staticmethod
+ def readline():
+ return ""
+
+ yield FileHandlerStub()
+
+ assert gzipped_file_check("f.txt", _opener=opener_stub_no_error) == False
|
Add gzip support
Would be nice to be able to stream in gzip files. Currently gzipped files raise an 'UnicodeDecodeError' because the utf-8 codec can't read the byte in position 1 of the gzipped file.
|
0.0
|
fede6722371aad3f58045e7eb3ab60300955146b
|
[
"tests/core/test_core.py::test_gzipped_file_check_return_true",
"tests/core/test_core.py::test_gzipped_file_check_return_false"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-06-01 15:22:39+00:00
|
apache-2.0
| 3,463 |
|
krayzpipes__txt-ferret-20
|
diff --git a/README.md b/README.md
index 04d548a..1725ebe 100644
--- a/README.md
+++ b/README.md
@@ -303,6 +303,9 @@ sanity check which can be paired with a DLP solution. Here are some things it wa
## Releases
+#### Version 0.1.0 - 2019-07-30
+- Removed the `config-override` option
+
#### Version 0.0.4 - 2019-06-09
- Added bulk file scanning by the `--bulk` switch.
- Added multiprocessing for bulk scanning.
diff --git a/setup.py b/setup.py
index c16247c..85ebb7f 100644
--- a/setup.py
+++ b/setup.py
@@ -14,7 +14,7 @@ from setuptools import setup, find_packages
from codecs import open
from os import path
-__version__ = "0.0.4"
+__version__ = "0.1.0"
description = "Scan text files for sensitive (or non-sensitive) data."
here = path.abspath(path.dirname(__file__))
diff --git a/src/txtferret/_config.py b/src/txtferret/_config.py
index f8c1723..5a8ce23 100644
--- a/src/txtferret/_config.py
+++ b/src/txtferret/_config.py
@@ -46,7 +46,7 @@ def _load_default_config(config_string=None):
return yaml.safe_load(default_yaml_config)
-def load_config(yaml_file=None, default_override=False, config_=None):
+def load_config(yaml_file=None, config_=None, user_config_func=None):
"""Return dict containing config YAML file content.
If not YAML file is explicitly passed as an argument, this function
@@ -54,51 +54,30 @@ def load_config(yaml_file=None, default_override=False, config_=None):
config file.
:param yaml_file: YAMl file name containing config information.
- :param default_override: If set to 'True', this will result in
- the final returned config dict containing only user-defined
- filters. The defaults will be completely overridden.
:param config_: Used for tests.
:return: dict with the final configuration.
"""
# Load the default config as the final config, we will make
# adjustments as we look at the user-defined config.
- working_config = config_ or _load_default_config()
- # Return default config if no file is defined by user or settings
- # introduced through CLI switches.
if yaml_file is None:
- return working_config
-
- # Mix in the user config if present and return it.
- # If default_override is True, we should return filters ONLY
- # defined by the user.
- return _add_user_config_file(
- config_=working_config, yaml_file=yaml_file, default_override=default_override
- )
-
-
-def _add_user_config_file(
- config_=None,
- yaml_file=None,
- default_override=None,
- _user_config=None,
- validator=None,
-):
- """Return dict containing default config + user defined config.
+ default_config = config_ or _load_default_config()
+ return default_config
+
+ _user_config_load = user_config_func or _get_user_config_file
+
+ return _user_config_load(yaml_file=yaml_file)
- If default_override is set to 'True', then only return the
- user-defined filters.
- :param config_: dict containing config file content.
+def _get_user_config_file(yaml_file=None, _user_config=None, validator=None):
+ """Return dict containing default config + user defined config.
+
:param yaml_file: File name of user-defined configuration.
- :param default_override: If set to True, will only return filters
- defined by the user. Default filters will not be returned.
:param _user_config: Configuration used for tests.
:param validator: Used to pass in validation stubs during tests.
- :return: dict containing the default + user + cli-defined
- configuration.
+ :return: dict containing the user-defined configuration file.
"""
user_defined_config = _user_config or _load_config(yaml_file)
@@ -106,20 +85,7 @@ def _add_user_config_file(
_validator(user_defined_config)
- if "filters" in user_defined_config:
- if default_override:
- # Remove default filters completely.
- config_["filters"] = user_defined_config["filters"]
- else:
- # Add user filters to default filters.
- for filter_ in user_defined_config["filters"]:
- config_["filters"].append(filter_)
-
- if "settings" in user_defined_config:
- for key, value in user_defined_config["settings"].items():
- config_["settings"][key] = value
-
- return config_
+ return user_defined_config
def save_config(data, file_name):
diff --git a/src/txtferret/cli.py b/src/txtferret/cli.py
index 59b76c8..a13eecd 100644
--- a/src/txtferret/cli.py
+++ b/src/txtferret/cli.py
@@ -57,8 +57,7 @@ def prep_config(loader=None, **cli_kwargs):
"""Return a final config file to be sent to TxtFerret."""
_loader = loader or load_config
file_name = cli_kwargs["config_file"]
- override = cli_kwargs["config_override"]
- config = _loader(yaml_file=file_name, default_override=override)
+ config = _loader(yaml_file=file_name)
config["cli_kwargs"] = {**cli_kwargs}
return config
@@ -150,12 +149,6 @@ def cli():
@click.option(
"--config-file", "-c", default=None, help="Load user-defined config file."
)
[email protected](
- "--config-override",
- "-co",
- is_flag=True,
- help="Delete default filters and only use user-defined filters from config file.",
-)
@click.option(
"--delimiter",
"-d",
|
krayzpipes/txt-ferret
|
c9c32b37024a7657f65104b76a29f7375ee519c4
|
diff --git a/tests/_config/test_config.py b/tests/_config/test_config.py
index f4822fb..657f63e 100644
--- a/tests/_config/test_config.py
+++ b/tests/_config/test_config.py
@@ -6,7 +6,7 @@ from txtferret._config import (
_load_config,
_load_default_config,
load_config,
- _add_user_config_file,
+ _get_user_config_file,
save_config,
subset_check,
validate_config,
@@ -15,6 +15,7 @@ from txtferret._config import (
# Test _load_default_config function
+
def test_load_default_config():
config_string = """
settings:
@@ -30,6 +31,28 @@ def test_load_default_config():
# TODO: test _load_config
+def test_load_config_no_custom_config():
+ def stub_func(yaml_file=None):
+ return {"dont": "return_me"}
+
+ rv = load_config(
+ yaml_file=None, config_={"mytest": "config"}, user_config_func=stub_func
+ )
+
+ assert {"mytest": "config"} == rv, "dict should be the same"
+
+
+def test_load_config_return_custom_config():
+ def stub_func(yaml_file=None):
+ return {"my": "config"}
+
+ jv = load_config(
+ yaml_file="something", config_={"dont": "return_me"}, user_config_func=stub_func
+ )
+
+ assert {"my": "config"} == jv, "returned the wrong dict"
+
+
@pytest.fixture(scope="module")
def user_config():
config = {
@@ -55,7 +78,7 @@ def user_config():
@pytest.fixture(scope="module")
-def default_config():
+def custom_config():
config = {
"filters": [
{
@@ -78,7 +101,7 @@ def default_config():
return config
-# Test _add_user_config_file function
+# Test _get_user_config_file function
@pytest.fixture(scope="module")
@@ -97,83 +120,25 @@ def validator_raise():
return validator_func
-def test_add_user_config_file_no_user_config(default_config, validator_true):
- config_copy = copy.deepcopy(default_config)
- rv = _add_user_config_file(
- config_=config_copy,
+def test_get_user_config_file_valid(validator_true):
+ rv = _get_user_config_file(
yaml_file=None,
- default_override=False,
- _user_config={"fake_key", "fake_value"},
+ _user_config={"fake_key": "fake_value"},
validator=validator_true,
)
- assert default_config == rv, "Dicts should be the same."
+ assert {"fake_key": "fake_value"} == rv, "Dicts should be the same."
-def test_add_user_config_file_default_override_with_user_config(
- default_config, user_config, validator_true
-):
- config_copy = copy.deepcopy(default_config)
- expected_config = copy.deepcopy(default_config)
- expected_config["filters"] = copy.deepcopy(user_config["filters"])
- rv = _add_user_config_file(
- config_=config_copy,
- yaml_file=None,
- default_override=True,
- _user_config=user_config,
- validator=validator_true,
- )
- assert (
- expected_config["filters"] == rv["filters"]
- ), "Expected default filters to be replaced by user-defined filters."
- assert expected_config == rv, "Only the filters should have been changed."
-
-
-def test_add_user_config_file_no_default_override_with_user_config(
- default_config, user_config, validator_true
-):
- config_copy = copy.deepcopy(default_config)
- expected_config = copy.deepcopy(default_config)
- user_filters = copy.deepcopy(user_config["filters"])
- expected_config["filters"] += user_filters
- rv = _add_user_config_file(
- config_=config_copy,
- yaml_file=None,
- default_override=False,
- _user_config=user_config,
- validator=validator_true,
- )
- assert (
- expected_config["filters"] == rv["filters"]
- ), "Expected both default and user filters in the final dict."
- assert expected_config == rv, "Only the filters should have been changed."
-
-
-def test_add_user_config_file_change_settings(validator_true):
- original_config = {"settings": {"key_1": "original_value_1"}}
- user_config= {"settings": {"key_1": "user_value_1"}}
- rv = _add_user_config_file(
- config_=original_config,
- yaml_file=None,
- default_override=False,
- _user_config=user_config,
- validator=validator_true,
- )
- assert original_config == user_config, "Expected user settings to override default."
-
-
-def test_add_user_config_file_validation_failed(validator_raise):
+def test_get_user_config_file_validation_failed(validator_raise):
with pytest.raises(ValueError):
- _ = _add_user_config_file(
- config_={"key": "value"},
- yaml_file=None,
- default_override=False,
- _user_config={"who": "cares"},
- validator=validator_raise,
+ _ = _get_user_config_file(
+ yaml_file=None, _user_config={"who": "cares"}, validator=validator_raise
)
# Testing the subset_check function
+
def test_subset_check_it_is_subset():
rv = subset_check(subset={"hello"}, set_={"hello", "world"})
assert rv == True, "Return value should be True."
@@ -194,4 +159,4 @@ def test_subset_check_is_not_subset():
# Test validate_config function
-# TODO: continue writing tests.
\ No newline at end of file
+# TODO: continue writing tests.
diff --git a/tests/cli/test_cli.py b/tests/cli/test_cli.py
index bd0137e..5e2c62f 100644
--- a/tests/cli/test_cli.py
+++ b/tests/cli/test_cli.py
@@ -1,50 +1,29 @@
-
from txtferret.cli import prep_config, bootstrap, get_totals
def test_prep_config():
+ def stub_loader(yaml_file=None):
+ return {"yaml_file": yaml_file}
- def stub_loader(yaml_file=None, default_override=False):
- return {
- "yaml_file": yaml_file,
- "config_override": default_override,
- }
-
- fake_cli_kwargs = {
- "config_file": "my_test_file",
- "config_override": True,
- }
+ fake_cli_kwargs = {"config_file": "my_test_file"}
- final_config = {
- "cli_kwargs": {**fake_cli_kwargs},
- "yaml_file": "my_test_file",
- "config_override": True,
- }
+ final_config = {"cli_kwargs": {**fake_cli_kwargs}, "yaml_file": "my_test_file"}
assert prep_config(loader=stub_loader, **fake_cli_kwargs) == final_config
def test_bootstrap():
-
class StubClass:
def __init__(self, _config):
pass
+
def scan_file(self):
pass
+
def summary(self):
- return {
- "file_name": "hello.txt",
- "failures": 5,
- "passes": 5,
- "time": 28,
- }
-
- expected = {
- "file_name": "hello.txt",
- "failures": 5,
- "passes": 5,
- "time": 28,
- }
+ return {"file_name": "hello.txt", "failures": 5, "passes": 5, "time": 28}
+
+ expected = {"file_name": "hello.txt", "failures": 5, "passes": 5, "time": 28}
config = None
@@ -52,9 +31,6 @@ def test_bootstrap():
def test_get_totals():
- results = [
- {"failures": 2, "passes": 5},
- {"failures": 3, "passes": 10},
- ]
+ results = [{"failures": 2, "passes": 5}, {"failures": 3, "passes": 10}]
assert get_totals(results) == (5, 15)
|
Simplify config override
Using an extra switch to override the default config with a custom config causes confusion. It is easy to dump the existing config and add to it.
- Remove the 'config override' option
- If a user defines a custom configuration, then ONLY values in that custom config can be used.
- For the time being, require the custom config to have all the same settings as the default config.
|
0.0
|
c9c32b37024a7657f65104b76a29f7375ee519c4
|
[
"tests/_config/test_config.py::test_load_default_config",
"tests/_config/test_config.py::test_load_config_no_custom_config",
"tests/_config/test_config.py::test_load_config_return_custom_config",
"tests/_config/test_config.py::test_get_user_config_file_valid",
"tests/_config/test_config.py::test_get_user_config_file_validation_failed",
"tests/_config/test_config.py::test_subset_check_it_is_subset",
"tests/_config/test_config.py::test_subset_check_it_is_subset_they_are_the_same",
"tests/_config/test_config.py::test_subset_check_is_not_subset",
"tests/cli/test_cli.py::test_prep_config",
"tests/cli/test_cli.py::test_bootstrap",
"tests/cli/test_cli.py::test_get_totals"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-07-30 23:51:48+00:00
|
apache-2.0
| 3,464 |
|
krayzpipes__txt-ferret-29
|
diff --git a/README.md b/README.md
index 3faf10a..2ac1574 100644
--- a/README.md
+++ b/README.md
@@ -193,6 +193,7 @@ settings:
show_matches: Yes
delimiter:
ignore_columns: [1, 5, 6]
+ file_encoding: 'utf-8'
```
- **bulk**
- This setting is accessible via CLI arguments `-b` or `--bulk`.
@@ -276,7 +277,11 @@ settings:
- If `ignore_columns: [2, 6]` is configured and a csv row is `hello,world,how,are,you,doing,today`, then
`world` and `doing` will not be scanned but will be ignored.
- This is particularly useful in columnar datasets when you know there is a column that is full of false positives.
-
+ - **file_encoding**
+ - Two uses:
+ - Used to encode your `delimiter` value to the appropriate encoding of your file.
+ - Used to encode the data matched in the file before being applied to sanity check.
+ - Default value is `'utf-8'`
# How/why did this come about?
There are a few shortcomings with commercial Data Loss Prevention (DLP) products:
@@ -315,6 +320,9 @@ sanity check which can be paired with a DLP solution. Here are some things it wa
## Releases
+#### Version 0.1.3 - 2019-08-05
+- Added `file_encoding` setting for multi-encoding support.
+ - Reads in bytes and assumes `'utf-8'` encoding by default.
#### Version 0.1.2 - 2019-08-01
- Fixed bug with regex when reading gzipped files.
#### Version 0.1.1 - 2019-07-30
diff --git a/setup.py b/setup.py
index 1030c83..d1229ec 100644
--- a/setup.py
+++ b/setup.py
@@ -14,7 +14,7 @@ from setuptools import setup, find_packages
from codecs import open
from os import path
-__version__ = "0.1.2"
+__version__ = "0.1.3"
description = "Scan text files for sensitive (or non-sensitive) data."
here = path.abspath(path.dirname(__file__))
diff --git a/src/debug.py b/src/debug.py
index bd32582..e9158fd 100644
--- a/src/debug.py
+++ b/src/debug.py
@@ -1,4 +1,3 @@
-
"""Point your debuggers to this file as the script/entry point..."""
from txtferret.cli import cli
diff --git a/src/txtferret/_config.py b/src/txtferret/_config.py
index b6b5f7a..eea7149 100644
--- a/src/txtferret/_config.py
+++ b/src/txtferret/_config.py
@@ -2,14 +2,22 @@ import os
import yaml
-from ._default import default_yaml
+from ._default import DEFAULT_YAML
# Keys allowed in top lovel of config.
_allowed_top_level = {"filters", "settings"}
# Keys allowed for a filter in the config YAML file.
-_allowed_filter_keys = {"label", "type", "pattern", "tokenize", "sanity", "substitute"}
+_allowed_filter_keys = {
+ "label",
+ "type",
+ "pattern",
+ "tokenize",
+ "sanity",
+ "substitute",
+ "encoding",
+}
# Keys allowed for the filter.tokenize values.
_allowed_token_keys = {"mask", "index"}
@@ -26,6 +34,7 @@ _allowed_settings_keys = {
"show_matches",
"delimiter",
"ignore_columns",
+ "file_encoding",
}
@@ -43,7 +52,7 @@ def _load_default_config(config_string=None):
:return: dict containing default config YAML file content.
"""
- default_yaml_config = config_string or default_yaml
+ default_yaml_config = config_string or DEFAULT_YAML
return yaml.safe_load(default_yaml_config)
diff --git a/src/txtferret/_default.py b/src/txtferret/_default.py
index 9d101b6..268d3b8 100644
--- a/src/txtferret/_default.py
+++ b/src/txtferret/_default.py
@@ -1,4 +1,3 @@
-
"""Default YAML config.
IF YOU WILL BE CHANGING THIS CONFIG FILE, be sure that you update the
@@ -6,10 +5,13 @@ validation functions and tests for _config.py.
"""
-default_substitute = "[\W_]"
+DEFAULT_SUBSTITUTE = "[\W_]"
+DEFAULT_ENCODING = "utf-8"
+DEFAULT_TOKEN_MASK = "XXXXXXXXXXXXXXX"
+DEFAULT_TOKEN_INDEX = 0
-default_yaml = """
+DEFAULT_YAML = """
settings:
tokenize: Yes
log_level: INFO
@@ -18,6 +20,7 @@ settings:
show_matches: Yes
delimiter:
ignore_columns:
+ file_encoding: 'utf-8'
filters:
- label: american_express_15_ccn
@@ -60,4 +63,4 @@ filters:
tokenize:
mask: XXXXXXXXXXXX
index: 2
-"""
\ No newline at end of file
+"""
diff --git a/src/txtferret/_sanity.py b/src/txtferret/_sanity.py
index 7754977..bd9ba6e 100644
--- a/src/txtferret/_sanity.py
+++ b/src/txtferret/_sanity.py
@@ -1,7 +1,7 @@
import re
-def luhn(account_string):
+def luhn(account_string, _encoding):
"""Return bool if string passes Luhn test.
This is based on the algorithm example found on the wikipedia
@@ -11,6 +11,7 @@ def luhn(account_string):
:param account_string: The string of digits to be tested by the
luhn algorithm.
+ :param encoding: Encoding of the string to be tested.
:raises ValueError: Input couldn't be converted to int type.
@@ -20,7 +21,7 @@ def luhn(account_string):
# TODO - Is there a more effecient way to do this?
if not isinstance(account_string, str):
- account_string = account_string.decode("utf-8")
+ account_string = account_string.decode(_encoding)
# no_special_chars = re.sub("[\W_]", "", account_string)
@@ -46,12 +47,13 @@ def luhn(account_string):
sanity_mapping = {"luhn": luhn}
-def sanity_check(sanity_check_name, data, sanity_map=None):
+def sanity_check(sanity_check_name, data, encoding=None, sanity_map=None):
"""Return bool representing whether the sanity check passed or not.
:param sanity_check_name: Name of the sanity check to be
performed. (Ex: 'luhn')
:param data: Data to be validated by the sanity check.
+ :param encoding: Encoding of the data to be tested.
:param sanity_map: Map of sanity checks. Mostly here for tests.
:raises ValueError: Sanity check does not exist.
@@ -64,4 +66,4 @@ def sanity_check(sanity_check_name, data, sanity_map=None):
except KeyError:
raise ValueError(f"Sanity algorithm {sanity_check_name} does not exist.")
else:
- return _sanity_algorithm(data)
+ return _sanity_algorithm(data, encoding)
diff --git a/src/txtferret/core.py b/src/txtferret/core.py
index 39fe682..9d882e9 100644
--- a/src/txtferret/core.py
+++ b/src/txtferret/core.py
@@ -9,11 +9,22 @@ from loguru import logger
from ._config import _allowed_settings_keys
from ._sanity import sanity_check
-from ._default import default_substitute
+from ._default import (
+ DEFAULT_SUBSTITUTE,
+ DEFAULT_ENCODING,
+ DEFAULT_TOKEN_INDEX,
+ DEFAULT_TOKEN_MASK,
+)
def tokenize(
- clear_text, mask, index, tokenize=True, show_matches=False, tokenize_func=None
+ clear_text,
+ mask,
+ index,
+ tokenize=True,
+ show_matches=False,
+ encoding_=DEFAULT_ENCODING,
+ tokenize_func=None,
):
"""Return string as redacted, tokenized format, or clear text.
@@ -25,23 +36,22 @@ def tokenize(
tokenized or not.
:param show_matches: Bool representing whether the clear text should
be redacted all together or not.
+ :param encoding_: Encoding of the text which will be tokenized.
"""
if not show_matches:
return "REDACTED"
- # byte string can be present if source file is Gzipped
- # convert to utf-8 string for logging/file output.
- if not isinstance(clear_text, str):
- clear_text = clear_text.decode("utf-8")
- mask = mask.decode("utf-8")
-
if not tokenize:
return clear_text
tokenize_function = tokenize_func or _get_tokenized_string
- return tokenize_function(clear_text, mask, index)
+ # Convert to str so we can use it like a list with indexes natively.
+ clear_text = clear_text.decode(encoding_)
+ mask = mask.decode(encoding_)
+
+ return tokenize_function(clear_text, mask, index).encode(encoding_)
def _get_tokenized_string(text, mask, index):
@@ -60,7 +70,7 @@ def _get_tokenized_string(text, mask, index):
return f"{text[:index]}{mask}{text[end_index:]}"
-def _byte_code_to_string(byte_code):
+def _byte_code_to_string(byte_code, _encoding):
"""Return the UTF-8 form of a byte code.
:param byte_code: String that may contain a string that matches the
@@ -70,35 +80,37 @@ def _byte_code_to_string(byte_code):
:return: UTF-8 version of byte-code.
"""
- match = re.match("b(\d{1,3})", byte_code)
+ match = re.match(b"b(\d{1,3})", byte_code)
if not match:
return byte_code
code_ = int(match.group(1))
- return bytes((code_,)).decode("utf-8")
+ return bytes((code_,))
def gzipped_file_check(file_to_scan, _opener=None):
- """ Return bool based on if opening file returns UnicodeDecodeError
+ """ Return bool based on if opening file having first two
+ gzip chars.
- If UnicodeDecodeError is returned when trying to read a line of the
- file, then we will assume this is a gzipped file.
+ If the first two bytes are \x1f\x8b, then it is a gzip file.
:param file_to_scan: String containing file path/name to read.
:param _opener: Used to pass file handler stub for testing.
- :return: True if UnicodeDecodeError is detected. False if not.
+ :return: True if first two bytes match first two bytes of gzip file.
"""
# Use test stub or the normal 'open'
_open = _opener or open
- try:
- with _open(file_to_scan, "r") as rf:
- _ = rf.readline()
- except UnicodeDecodeError:
+ # Read first two bytes
+ with _open(file_to_scan, "rb") as rf:
+ first_two_bytes = rf.read(2)
+
+ gzip_bytes = b"\x1f\x8b"
+
+ if first_two_bytes == gzip_bytes:
return True
- else:
- return False
+ return False
class Filter:
@@ -117,7 +129,7 @@ class Filter:
mask should start being applied.
"""
- def __init__(self, filter_dict, gzip):
+ def __init__(self, filter_dict, gzip, _encoding=DEFAULT_ENCODING):
"""Initialize the Filter object. Lots handling input from
the config file here.
@@ -136,10 +148,10 @@ class Filter:
try:
self.substitute = filter_dict["substitute"]
except KeyError:
- self.substitute = default_substitute
+ self.substitute = DEFAULT_SUBSTITUTE
else:
- if not self.substitute:
- self.substitute = default_substitute
+ if not self.substitute or self.substitute is None:
+ self.substitute = DEFAULT_SUBSTITUTE
self.type = filter_dict.get("type", "NOT_DEFINED")
self.sanity = filter_dict.get("sanity", "")
@@ -151,21 +163,19 @@ class Filter:
self.sanity = [self.sanity]
try:
- self.token_mask = filter_dict["tokenize"].get("mask", "XXXXXXXXXXXXXXX")
+ self.token_mask = filter_dict["tokenize"].get("mask", DEFAULT_TOKEN_MASK)
except KeyError:
- self.token_mask = "XXXXXXXXXXXXXXX" # move this to the default
- self.token_index = 0
+ self.token_mask = DEFAULT_TOKEN_MASK # move this to the default
+ self.token_index = DEFAULT_TOKEN_INDEX
logger.info(
f"Filter did not have tokenize section. Reverting to "
f"default tokenization mask and index."
)
- # If gzip, we need to use byte strings instead of utf-8
- if gzip:
- self.token_mask = self.token_mask.encode("utf-8")
- self.pattern = self.pattern.encode("utf-8")
- self.substitute = self.substitute.encode("utf-8")
- self.empty = b"" # Used in re.sub in 'sanity_check'
+ self.token_mask = self.token_mask.encode(_encoding)
+ self.pattern = self.pattern.encode(_encoding)
+ self.substitute = self.substitute.encode(_encoding)
+ self.empty = b"" # Used in re.sub in 'sanity_check'
try:
self.token_index = int(filter_dict["tokenize"].get("index", 0))
@@ -187,7 +197,7 @@ class TxtFerret:
output of strings that match and pass sanity checks.
:attribute log_level: Log level to be used by logouru.logger.
:attribute summarize: If True, only outputs summary of the scan
- resutls.
+ results.
:attribute output_file: File to write results to.
:attribute show_matches: Show or redact matched strings.
:attribute delimiter: String representing the delimiter for
@@ -219,6 +229,12 @@ class TxtFerret:
# Override settings from file with CLI arguments if present.
self.set_attributes(**cli_settings)
+ if getattr(self, "file_encoding", None) is None:
+ self.file_encoding = DEFAULT_ENCODING
+
+ if self.delimiter:
+ self.delimiter = self.delimiter.encode(self.file_encoding)
+
# Counters
self.failed_sanity = 0
self.passed_sanity = 0
@@ -313,7 +329,7 @@ class TxtFerret:
else:
_open = gzip.open
- with _open(file_to_scan, "r") as rf:
+ with _open(file_to_scan, "rb") as rf:
for index, line in enumerate(rf):
# if isinstance(line, bytes):
@@ -342,12 +358,9 @@ class TxtFerret:
# Make sure to convert to bytecode/hex if necessary.
# For example... Start Of Header (SOH).
- delimiter = _byte_code_to_string(self.delimiter)
+ delimiter = _byte_code_to_string(self.delimiter, self.file_encoding)
- if not self.gzip:
- columns = line.split(delimiter)
- else:
- columns = line.split(delimiter.encode("utf-8"))
+ columns = line.split(delimiter)
column_map = get_column_map(
columns=columns, filter_=filter_, ignore_columns=self.ignore_columns
@@ -356,7 +369,9 @@ class TxtFerret:
for column_number, column_match_list in column_map.items():
for column_match in column_match_list:
- if not sanity_test(filter_, column_match):
+ if not sanity_test(
+ filter_, column_match, encoding=self.file_encoding
+ ):
self.failed_sanity += 1
if not self.summarize:
@@ -366,13 +381,16 @@ class TxtFerret:
self.passed_sanity += 1
- string_to_log = tokenize(
+ _string_to_log = tokenize(
column_match,
filter_.token_mask,
filter_.token_index,
tokenize=self.tokenize,
+ encoding_=self.file_encoding,
show_matches=self.show_matches,
)
+ # Print a str instead of byte-string
+ string_to_log = _string_to_log.decode(self.file_encoding)
if not self.summarize:
log_success(
@@ -404,14 +422,18 @@ class TxtFerret:
self.passed_sanity += 1
- string_to_log = tokenize(
+ _string_to_log = tokenize(
match,
filter_.token_mask,
filter_.token_index,
tokenize=self.tokenize,
+ encoding_=self.file_encoding,
show_matches=self.show_matches,
)
+ # Print a str instead of byte-string
+ string_to_log = _string_to_log.decode(self.file_encoding)
+
if not self.summarize:
log_success(self.file_name, filter_, index, string_to_log)
@@ -452,13 +474,14 @@ def get_column_map(columns=None, filter_=None, ignore_columns=None):
return column_map
-def sanity_test(filter_, text, sub=True, sanity_func=None):
+def sanity_test(filter_, text, sub=True, encoding=DEFAULT_ENCODING, sanity_func=None):
"""Return bool depending on if text passes the sanity check.
:param filter_: Filter object.
:param text: The text being tested by the sanity check.
:param sub: For future use, can be used to skip the substitution
portion before passing text to sanity checks.
+ :param encoding: Encoding of the text that will be checked.
:sanity_func: Used for tests.
:return: True or False - Depending on if sanity check passed
@@ -473,7 +496,7 @@ def sanity_test(filter_, text, sub=True, sanity_func=None):
for algorithm_name in filter_.sanity:
- if not _sanity_checker(algorithm_name, _text):
+ if not _sanity_checker(algorithm_name, _text, encoding=encoding):
return False
return True
|
krayzpipes/txt-ferret
|
331d671142e87569089f2a9e7e72d2ee77349cc4
|
diff --git a/tests/_sanity/test_sanity.py b/tests/_sanity/test_sanity.py
index 9ce86e9..3cad182 100644
--- a/tests/_sanity/test_sanity.py
+++ b/tests/_sanity/test_sanity.py
@@ -27,13 +27,13 @@ def bad_luhn_fake_account_num_delims():
def test_luhn_with_passing_account_num(good_luhn_fake_account_num):
- rv = luhn(good_luhn_fake_account_num)
+ rv = luhn(good_luhn_fake_account_num, "utf-8")
assert rv == True, "Should have returned True"
assert isinstance(rv, bool), "Return value should be a bool"
def test_luhn_with_failing_account_num(bad_luhn_fake_account_num):
- rv = luhn(bad_luhn_fake_account_num)
+ rv = luhn(bad_luhn_fake_account_num, "utf-8")
assert rv == False, "Should have returned False"
assert isinstance(rv, bool), "Return value should be a bool"
@@ -41,13 +41,13 @@ def test_luhn_with_failing_account_num(bad_luhn_fake_account_num):
def test_luhn_for_value_error():
non_int = "123abc"
with pytest.raises(ValueError) as e_info:
- _ = luhn(non_int)
+ _ = luhn(non_int, "utf-8")
def test_luhn_for_value_error_with_delimeters():
non_int_with_delims = "123-abc"
with pytest.raises(ValueError) as e_info:
- _ = luhn(non_int_with_delims)
+ _ = luhn(non_int_with_delims, "utf-8")
# Sanity check function tests
@@ -55,7 +55,7 @@ def test_luhn_for_value_error_with_delimeters():
@pytest.fixture(scope="module")
def always_true_algorithm_stub():
- def stub_func(not_used):
+ def stub_func(not_used, also_not_used):
return True
return stub_func
@@ -63,7 +63,7 @@ def always_true_algorithm_stub():
@pytest.fixture(scope="module")
def always_false_algorithm_stub():
- def stub_func(not_used):
+ def stub_func(not_used, also_not_used):
return False
return stub_func
@@ -76,7 +76,7 @@ def test_sanity_check_passes_sanity(always_true_algorithm_stub):
name = "always_true"
test_sanity_map = {name: always_true_algorithm_stub}
data = "placeholder"
- rv = sanity_check(name, data, sanity_map=test_sanity_map)
+ rv = sanity_check(name, data, encoding="utf-8", sanity_map=test_sanity_map)
assert rv == True, "Should have returned True"
assert isinstance(rv, bool), "Return value should have been a bool"
@@ -85,7 +85,7 @@ def test_sanity_check_fails_sanity(always_false_algorithm_stub):
name = "always_false"
test_sanity_map = {name: always_false_algorithm_stub}
data = "placeholder"
- rv = sanity_check(name, data, sanity_map=test_sanity_map)
+ rv = sanity_check(name, data, encoding="utf-8", sanity_map=test_sanity_map)
assert rv == False, "Should have returned False"
assert isinstance(rv, bool), "Return value should have been a bool"
@@ -95,4 +95,4 @@ def test_sanity_check_algorithm_name_doesnt_exist():
test_sanity_map = {"not_real": "me_either"}
data = "placeholder"
with pytest.raises(ValueError) as e_info:
- sanity_check(name, data, sanity_map=test_sanity_map)
+ sanity_check(name, data, encoding="utf-8", sanity_map=test_sanity_map)
diff --git a/tests/core/test_core.py b/tests/core/test_core.py
index babadcf..60b6764 100644
--- a/tests/core/test_core.py
+++ b/tests/core/test_core.py
@@ -16,8 +16,8 @@ def test_gzipped_file_check_return_true():
def opener_stub_raise_error(x, y):
class FileHandlerStub:
@staticmethod
- def readline():
- raise UnicodeDecodeError("fake", b"o", 1, 2, "fake")
+ def read(not_used):
+ return b"\x1f\x8b"
yield FileHandlerStub()
@@ -29,8 +29,8 @@ def test_gzipped_file_check_return_false():
def opener_stub_no_error(x, y):
class FileHandlerStub:
@staticmethod
- def readline():
- return ""
+ def read(not_used):
+ return b"NOPE"
yield FileHandlerStub()
@@ -44,7 +44,7 @@ def test_tokenize_not_show_matches():
def test_tokenize_return_clear_text():
- assert tokenize("hello", "XXX", 0, tokenize=False, show_matches=True) == "hello"
+ assert tokenize(b"hello", b"XXX", 0, tokenize=False, show_matches=True) == b"hello"
def test_tokenize_runs_tokenization_function():
@@ -52,20 +52,11 @@ def test_tokenize_runs_tokenization_function():
return "stub was called"
assert (
- tokenize("hello", "XXX", 0, show_matches=True, tokenize_func=stub_func)
- == "stub was called"
+ tokenize(b"hello", b"XXX", 0, show_matches=True, tokenize_func=stub_func)
+ == b"stub was called"
)
-def test_tokenize_for_byte_return():
- def stub_func(arg1, arg2, arg3):
- return (arg1, arg2, arg3)
-
- assert tokenize(
- b"hello", b"XXX", 0, show_matches=True, tokenize_func=stub_func
- ) == ("hello", "XXX", 0)
-
-
def test_get_tokenized_string_normal():
text = "howdy"
@@ -86,20 +77,20 @@ def test_get_tokenized_string_mask_too_long():
def test_byte_code_to_string_no_byte_string():
- fake_byte_code = "bhello"
+ fake_byte_code = b"bhello"
- assert _byte_code_to_string(fake_byte_code) == fake_byte_code
+ assert _byte_code_to_string(fake_byte_code, _encoding="utf-8") == fake_byte_code
def test_byte_code_to_string_start_of_header():
- byte_code = "b1"
+ byte_code = b"b1"
- assert _byte_code_to_string(byte_code) == "\x01"
+ assert _byte_code_to_string(byte_code, _encoding="utf-8") == b"\x01"
def test_sanity_for_failed_sanity_check():
- def stub_func(a, b):
+ def stub_func(a, b, encoding):
return False
class StubFilter:
@@ -111,7 +102,7 @@ def test_sanity_for_failed_sanity_check():
def test_sanity_for_passed_sanity_checks():
- def stub_func(a, b):
+ def stub_func(a, b, encoding):
return True
class StubFilter:
|
Support other codecs than just 'utf-8'
The following stack trace shows what appears to be a non-utf-8 encoded file. Some options:
- Let the user define the codec to use. <-- I like this one
- Automatically run through a list of codecs to try.
```python
Traceback (most recent call last):
File "/usr/lib64/python3.6/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/usr/lib64/python3.6/multiprocessing/pool.py", line 44, in mapstar
return list(map(*args))
File "/home/some_user/venv/lib64/python3.6/site-packages/txtferret/cli.py", line 69, in bootstrap
ferret.scan_file()
File "/home/some_user/venv/lib64/python3.6/site-packages/txtferret/core.py", line 315, in scan_file
for index, line in enumerate(rf):
File "/usr/lib64/python3.6/codecs.py", line 321, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe1 in position 1471: invalid continuation byte
"""
Traceback (most recent call last):
File "/home/some_user/venv/bin/txtferret", line 11, in <module>
sys.exit(main())
File "/home/some_user/venv/lib64/python3.6/site-packages/txtferret/__init__.py", line 7, in main
exit(cli())
File "/home/some_user/venv/lib64/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/some_user/venv/lib64/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/some_user/venv/lib64/python3.6/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/some_user/venv/lib64/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/some_user/venv/lib64/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/some_user/venv/lib64/python3.6/site-packages/txtferret/cli.py", line 190, in scan
results = p.map(bootstrap, configs)
File "/usr/lib64/python3.6/multiprocessing/pool.py", line 266, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "/usr/lib64/python3.6/multiprocessing/pool.py", line 644, in get
raise self._value
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe1 in position 1471: invalid continuation byte
```
|
0.0
|
331d671142e87569089f2a9e7e72d2ee77349cc4
|
[
"tests/_sanity/test_sanity.py::test_luhn_with_passing_account_num",
"tests/_sanity/test_sanity.py::test_luhn_with_failing_account_num",
"tests/_sanity/test_sanity.py::test_luhn_for_value_error",
"tests/_sanity/test_sanity.py::test_luhn_for_value_error_with_delimeters",
"tests/_sanity/test_sanity.py::test_sanity_check_passes_sanity",
"tests/_sanity/test_sanity.py::test_sanity_check_fails_sanity",
"tests/_sanity/test_sanity.py::test_sanity_check_algorithm_name_doesnt_exist",
"tests/core/test_core.py::test_gzipped_file_check_return_true",
"tests/core/test_core.py::test_gzipped_file_check_return_false",
"tests/core/test_core.py::test_tokenize_return_clear_text",
"tests/core/test_core.py::test_tokenize_runs_tokenization_function",
"tests/core/test_core.py::test_byte_code_to_string_no_byte_string",
"tests/core/test_core.py::test_byte_code_to_string_start_of_header",
"tests/core/test_core.py::test_sanity_for_failed_sanity_check",
"tests/core/test_core.py::test_sanity_for_passed_sanity_checks"
] |
[
"tests/core/test_core.py::test_tokenize_not_show_matches",
"tests/core/test_core.py::test_get_tokenized_string_normal",
"tests/core/test_core.py::test_get_tokenized_string_mask_too_long"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-08-06 02:31:06+00:00
|
apache-2.0
| 3,465 |
|
kronenthaler__mod-pbxproj-320
|
diff --git a/pbxproj/pbxsections/PBXBuildRule.py b/pbxproj/pbxsections/PBXBuildRule.py
new file mode 100644
index 0000000..e7d2493
--- /dev/null
+++ b/pbxproj/pbxsections/PBXBuildRule.py
@@ -0,0 +1,6 @@
+from pbxproj import PBXGenericObject
+
+
+class PBXBuildRule(PBXGenericObject):
+ def _get_comment(self):
+ return 'PBXBuildRule'
diff --git a/pbxproj/pbxsections/XCConfigurationList.py b/pbxproj/pbxsections/XCConfigurationList.py
index 4612415..70aa38f 100644
--- a/pbxproj/pbxsections/XCConfigurationList.py
+++ b/pbxproj/pbxsections/XCConfigurationList.py
@@ -10,7 +10,7 @@ class XCConfigurationList(PBXGenericObject):
objects = self.get_parent()
target_id = self.get_id()
- for obj in objects.get_objects_in_section('PBXNativeTarget', 'PBXAggregateTarget'):
+ for obj in objects.get_objects_in_section('PBXNativeTarget', 'PBXLegacyTarget', 'PBXAggregateTarget'):
if target_id in obj.buildConfigurationList:
return obj.isa, obj.name
diff --git a/pbxproj/pbxsections/__init__.py b/pbxproj/pbxsections/__init__.py
index 7139ba9..8a33a40 100644
--- a/pbxproj/pbxsections/__init__.py
+++ b/pbxproj/pbxsections/__init__.py
@@ -1,4 +1,5 @@
from pbxproj.pbxsections.PBXBuildFile import *
+from pbxproj.pbxsections.PBXBuildRule import *
from pbxproj.pbxsections.PBXFileReference import *
from pbxproj.pbxsections.PBXFrameworksBuildPhase import *
from pbxproj.pbxsections.PBXProject import *
|
kronenthaler/mod-pbxproj
|
76ae75e6ee045410ef147a355e57e49ab23b77e2
|
diff --git a/tests/pbxsections/TestXCConfigurationList.py b/tests/pbxsections/TestXCConfigurationList.py
index 384d0c0..6641588 100644
--- a/tests/pbxsections/TestXCConfigurationList.py
+++ b/tests/pbxsections/TestXCConfigurationList.py
@@ -11,7 +11,7 @@ class XCConfigurationListTest(unittest.TestCase):
self.assertEqual(config._get_comment(), 'Build configuration list for TargetType "name"')
- def testGetSectionOnTarget(self):
+ def testGetSectionOnNativeTarget(self):
objs = objects(None).parse(
{
'1': {
@@ -26,6 +26,21 @@ class XCConfigurationListTest(unittest.TestCase):
config = objs['2']
self.assertEqual(config._get_comment(), 'Build configuration list for PBXNativeTarget "the-target-name"')
+ def testGetSectionOnLegacyTarget(self):
+ objs = objects(None).parse(
+ {
+ '1': {
+ 'isa': 'PBXLegacyTarget',
+ 'buildConfigurationList': ['2'],
+ 'name': 'the-target-name'
+ },
+ '2': {
+ 'isa': 'XCConfigurationList'
+ }
+ })
+ config = objs['2']
+ self.assertEqual(config._get_comment(), 'Build configuration list for PBXLegacyTarget "the-target-name"')
+
def testGetSectionOnProject(self):
objs = objects(None).parse(
{
|
[BUG] This one project loads but doesn't save
**Describe the bug**
I have a project file (attached) that can be loaded, but when saved errors out with a backtrace.
**System information**
1. pbxproj version used: 3.4.0
2. python version used: 3.9.13
3. Xcode version used: 13.4.1 (13F100)
**To Reproduce**
Steps to reproduce the behavior:
1. Download the project file and test.py and unzip into a directory: [test.zip](https://github.com/kronenthaler/mod-pbxproj/files/9226878/test.zip)
2. `python3 test.py`
3. See error:
```
% python3 test.py
Traceback (most recent call last):
File "/Users/tobynelson/Code/InformAppTESTING/Inform/zoom/ZoomCocoa.xcodeproj/test.py", line 4, in <module>
project.save()
File "/usr/local/lib/python3.9/site-packages/pbxproj/XcodeProject.py", line 56, in save
file.write(self.__repr__() + "\n")
File "/usr/local/lib/python3.9/site-packages/pbxproj/XcodeProject.py", line 65, in __repr__
return '// !$*UTF8*$!\n' + super(XcodeProject, self).__repr__()
File "/usr/local/lib/python3.9/site-packages/pbxproj/PBXGenericObject.py", line 75, in __repr__
return self._print_object()
File "/usr/local/lib/python3.9/site-packages/pbxproj/PBXGenericObject.py", line 80, in _print_object
value = self._format(self[key], indent_depth, entry_separator, object_start,
File "/usr/local/lib/python3.9/site-packages/pbxproj/PBXGenericObject.py", line 101, in _format
value = value._print_object(indentation_depth + indentation_increment,
File "/usr/local/lib/python3.9/site-packages/pbxproj/PBXObjects.py", line 45, in _print_object
obj = value._print_object(indent_depth + '\t', entry_separator, object_start,
File "/usr/local/lib/python3.9/site-packages/pbxproj/PBXGenericObject.py", line 80, in _print_object
value = self._format(self[key], indent_depth, entry_separator, object_start,
File "/usr/local/lib/python3.9/site-packages/pbxproj/PBXGenericObject.py", line 111, in _format
value = value.__repr__()
File "/usr/local/lib/python3.9/site-packages/pbxproj/PBXKey.py", line 8, in __repr__
comment = self._get_comment()
File "/usr/local/lib/python3.9/site-packages/pbxproj/PBXKey.py", line 20, in _get_comment
return self.get_parent()._resolve_comment(self)
File "/usr/local/lib/python3.9/site-packages/pbxproj/PBXGenericObject.py", line 162, in _resolve_comment
return parent._resolve_comment(key)
File "/usr/local/lib/python3.9/site-packages/pbxproj/PBXGenericObject.py", line 162, in _resolve_comment
return parent._resolve_comment(key)
File "/usr/local/lib/python3.9/site-packages/pbxproj/PBXGenericObject.py", line 157, in _resolve_comment
return self[key]._get_comment()
File "/usr/local/lib/python3.9/site-packages/pbxproj/pbxsections/XCConfigurationList.py", line 6, in _get_comment
info = self._get_section()
File "/usr/local/lib/python3.9/site-packages/pbxproj/pbxsections/XCConfigurationList.py", line 18, in _get_section
project = projects.__next__()
StopIteration
```
**Expected behavior**
The project file should be written without any change.
|
0.0
|
76ae75e6ee045410ef147a355e57e49ab23b77e2
|
[
"tests/pbxsections/TestXCConfigurationList.py::XCConfigurationListTest::testGetSectionOnLegacyTarget"
] |
[
"tests/pbxsections/TestXCConfigurationList.py::XCConfigurationListTest::testGetComment",
"tests/pbxsections/TestXCConfigurationList.py::XCConfigurationListTest::testGetSectionOnNativeTarget",
"tests/pbxsections/TestXCConfigurationList.py::XCConfigurationListTest::testGetSectionOnProject"
] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-07-31 18:05:22+00:00
|
mit
| 3,466 |
|
kutaslab__fitgrid-23
|
diff --git a/fitgrid/epochs.py b/fitgrid/epochs.py
index 1b09227..b14ad8a 100644
--- a/fitgrid/epochs.py
+++ b/fitgrid/epochs.py
@@ -41,12 +41,13 @@ class Epochs:
levels_to_remove = set(epochs_table.index.names)
levels_to_remove.discard(EPOCH_ID)
- # so we remove all levels from index except EPOCH_ID
- epochs_table.reset_index(list(levels_to_remove), inplace=True)
- assert epochs_table.index.names == [EPOCH_ID]
+ # copy since we are about to modify
+ self.table = epochs_table.copy()
+ # remove all levels from index except EPOCH_ID
+ self.table.reset_index(list(levels_to_remove), inplace=True)
+ assert self.table.index.names == [EPOCH_ID]
- self.table = epochs_table
- snapshots = epochs_table.groupby(TIME)
+ snapshots = self.table.groupby(TIME)
# check that snapshots across epochs have equal index by transitivity
prev_group = None
@@ -66,10 +67,13 @@ class Epochs:
if not prev_group.index.is_unique:
raise FitGridError(
f'Duplicate values in {EPOCH_ID} index not allowed:',
- tools.get_index_duplicates_table(epochs_table, EPOCH_ID),
+ tools.get_index_duplicates_table(self.table, EPOCH_ID),
)
- # we're good, set instance variable
+ self.table.reset_index(inplace=True)
+ self.table.set_index([EPOCH_ID, TIME], inplace=True)
+ assert self.table.index.names == [EPOCH_ID, TIME]
+
self.snapshots = snapshots
def lm(self, LHS='default', RHS=None):
|
kutaslab/fitgrid
|
1cba86280c45b7a5f1422621167aeee1952a2254
|
diff --git a/tests/test_epochs.py b/tests/test_epochs.py
index 736db4b..a92bf85 100644
--- a/tests/test_epochs.py
+++ b/tests/test_epochs.py
@@ -2,7 +2,8 @@ import pytest
import numpy as np
from .context import fitgrid
-from fitgrid import fake_data, epochs, errors
+from fitgrid import fake_data, errors
+from fitgrid.epochs import Epochs
def test_epochs_unequal_snapshots():
@@ -13,7 +14,7 @@ def test_epochs_unequal_snapshots():
epochs_table.drop(epochs_table.index[42], inplace=True)
with pytest.raises(errors.FitGridError) as error:
- epochs.Epochs(epochs_table)
+ Epochs(epochs_table)
assert 'differs from previous snapshot' in str(error.value)
@@ -34,6 +35,23 @@ def test__raises_error_on_epoch_index_mismatch():
# now time index is equal to row number in the table overall
with pytest.raises(errors.FitGridError) as error:
- epochs.Epochs(epochs_table)
+ Epochs(epochs_table)
assert 'differs from previous snapshot' in str(error.value)
+
+
+def test_multiple_indices_end_up_EPOCH_ID_and_TIME():
+
+ from fitgrid import EPOCH_ID, TIME
+
+ epochs_table = fake_data._generate(
+ n_epochs=10, n_samples=100, n_categories=2, n_channels=32
+ )
+ epochs_table.reset_index(inplace=True)
+ epochs_table.set_index([EPOCH_ID, TIME, 'categorical'], inplace=True)
+
+ epochs = Epochs(epochs_table)
+ # internal table has EPOCH_ID and TIME in index
+ assert epochs.table.index.names == [EPOCH_ID, TIME]
+ # input table is not altered
+ assert epochs_table.index.names == [EPOCH_ID, TIME, 'categorical']
|
Set epochs table index to EPOCH_ID and TIME during Epochs creation
In `__init__` we reset the index to only keep `EPOCH_ID`. After `snapshots` are created, set index to `EPOCH_ID`, `TIME`. This is needed for plotting of individual epochs.
|
0.0
|
1cba86280c45b7a5f1422621167aeee1952a2254
|
[
"tests/test_epochs.py::test_multiple_indices_end_up_EPOCH_ID_and_TIME"
] |
[
"tests/test_epochs.py::test_epochs_unequal_snapshots"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-09-15 23:56:13+00:00
|
bsd-3-clause
| 3,467 |
|
kvesteri__intervals-54
|
diff --git a/CHANGES.rst b/CHANGES.rst
index ab8d128..41c7cdc 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -4,6 +4,12 @@ Changelog
Here you can see the full list of changes between each intervals release.
+0.9.1 (2020-12-31)
+^^^^^^^^^^^^^^^^^^
+
+- Fixed discrete interval length (#53)
+
+
0.9.0 (2020-07-16)
^^^^^^^^^^^^^^^^^^
diff --git a/intervals/__init__.py b/intervals/__init__.py
index 1636366..32d7508 100644
--- a/intervals/__init__.py
+++ b/intervals/__init__.py
@@ -32,4 +32,4 @@ __all__ = (
)
-__version__ = '0.9.0'
+__version__ = '0.9.1'
diff --git a/intervals/interval.py b/intervals/interval.py
index 81bb4b9..9d27a28 100644
--- a/intervals/interval.py
+++ b/intervals/interval.py
@@ -456,6 +456,11 @@ class AbstractInterval(object):
@property
def length(self):
+ if self.discrete:
+ if not self:
+ return 0
+ if not self.lower_inc or not self.upper_inc:
+ return canonicalize(self, lower_inc=True, upper_inc=True).length
return abs(self.upper - self.lower)
@property
|
kvesteri/intervals
|
f5fd76036ab02275dff79f025b7234fa942c01a6
|
diff --git a/tests/interval/test_properties.py b/tests/interval/test_properties.py
index d70a8e8..083c9a4 100644
--- a/tests/interval/test_properties.py
+++ b/tests/interval/test_properties.py
@@ -15,16 +15,20 @@ from intervals import (
class TestIntervalProperties(object):
@mark.parametrize(
- ('number_range', 'length'),
+ ('interval', 'length'),
(
- ([1, 4], 3),
- ([-1, 1], 2),
- ((-inf, inf), inf),
- ((1, inf), inf),
+ (IntInterval([1, 4]), 3),
+ (IntInterval([-1, 1]), 2),
+ (IntInterval([-inf, inf]), inf),
+ (IntInterval([1, inf]), inf),
+ (IntInterval.from_string('(0, 3)'), 1),
+ (IntInterval.from_string('[0, 3)'), 2),
+ (IntInterval.from_string('(0, 2)'), 0),
+ (IntInterval.from_string('(0, 1)'), 0)
)
)
- def test_length(self, number_range, length):
- assert IntInterval(number_range).length == length
+ def test_length(self, interval, length):
+ assert interval.length == length
@mark.parametrize(
('number_range', 'radius'),
|
length should return the actual length for discrete intervals
``IntInterval.from_string('[0, 2)').length`` should be `1` not `2`
|
0.0
|
f5fd76036ab02275dff79f025b7234fa942c01a6
|
[
"tests/interval/test_properties.py::TestIntervalProperties::test_length[interval4-1]",
"tests/interval/test_properties.py::TestIntervalProperties::test_length[interval5-2]",
"tests/interval/test_properties.py::TestIntervalProperties::test_length[interval6-0]",
"tests/interval/test_properties.py::TestIntervalProperties::test_length[interval7-0]"
] |
[
"tests/interval/test_properties.py::TestIntervalProperties::test_length[interval0-3]",
"tests/interval/test_properties.py::TestIntervalProperties::test_length[interval1-2]",
"tests/interval/test_properties.py::TestIntervalProperties::test_length[interval2-length2]",
"tests/interval/test_properties.py::TestIntervalProperties::test_length[interval3-length3]",
"tests/interval/test_properties.py::TestIntervalProperties::test_radius[number_range0-1.5]",
"tests/interval/test_properties.py::TestIntervalProperties::test_radius[number_range1-1.0]",
"tests/interval/test_properties.py::TestIntervalProperties::test_radius[number_range2-1.5]",
"tests/interval/test_properties.py::TestIntervalProperties::test_radius[number_range3-radius3]",
"tests/interval/test_properties.py::TestIntervalProperties::test_radius[number_range4-radius4]",
"tests/interval/test_properties.py::TestIntervalProperties::test_centre[number_range0-2.5]",
"tests/interval/test_properties.py::TestIntervalProperties::test_centre[number_range1-0]",
"tests/interval/test_properties.py::TestIntervalProperties::test_centre[number_range2--2.5]",
"tests/interval/test_properties.py::TestIntervalProperties::test_centre[number_range3-centre3]",
"tests/interval/test_properties.py::TestIntervalProperties::test_is_open[interval0-True]",
"tests/interval/test_properties.py::TestIntervalProperties::test_is_open[interval1-True]",
"tests/interval/test_properties.py::TestIntervalProperties::test_is_open[interval2-False]",
"tests/interval/test_properties.py::TestIntervalProperties::test_is_open[interval3-False]",
"tests/interval/test_properties.py::TestIntervalProperties::test_is_open[interval4-False]",
"tests/interval/test_properties.py::TestIntervalProperties::test_is_open[interval5-False]",
"tests/interval/test_properties.py::TestIntervalProperties::test_is_open[interval6-False]",
"tests/interval/test_properties.py::TestIntervalProperties::test_closed[interval0-False]",
"tests/interval/test_properties.py::TestIntervalProperties::test_closed[interval1-False]",
"tests/interval/test_properties.py::TestIntervalProperties::test_closed[interval2-False]",
"tests/interval/test_properties.py::TestIntervalProperties::test_closed[interval3-False]",
"tests/interval/test_properties.py::TestIntervalProperties::test_closed[interval4-True]",
"tests/interval/test_properties.py::TestIntervalProperties::test_closed[interval5-True]",
"tests/interval/test_properties.py::TestIntervalProperties::test_closed[interval6-True]",
"tests/interval/test_properties.py::TestIntervalProperties::test_empty[interval0-True]",
"tests/interval/test_properties.py::TestIntervalProperties::test_empty[interval1-False]",
"tests/interval/test_properties.py::TestIntervalProperties::test_empty[interval2-False]",
"tests/interval/test_properties.py::TestIntervalProperties::test_empty[interval3-True]",
"tests/interval/test_properties.py::TestIntervalProperties::test_empty[interval4-True]",
"tests/interval/test_properties.py::TestIntervalProperties::test_empty[interval5-False]",
"tests/interval/test_properties.py::TestIntervalProperties::test_empty[interval6-False]",
"tests/interval/test_properties.py::TestIntervalProperties::test_degenerate[interval0-False]",
"tests/interval/test_properties.py::TestIntervalProperties::test_degenerate[interval1-True]",
"tests/interval/test_properties.py::TestIntervalProperties::test_degenerate[interval2-True]",
"tests/interval/test_properties.py::TestIntervalProperties::test_discrete[interval0-True]",
"tests/interval/test_properties.py::TestIntervalProperties::test_discrete[interval1-True]",
"tests/interval/test_properties.py::TestIntervalProperties::test_discrete[interval2-False]",
"tests/interval/test_properties.py::TestIntervalProperties::test_discrete[interval3-False]",
"tests/interval/test_properties.py::TestIntervalProperties::test_discrete[interval4-False]",
"tests/interval/test_properties.py::TestIntervalProperties::test_discrete[interval5-True]"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-12-31 11:50:27+00:00
|
bsd-3-clause
| 3,468 |
|
kwzrd__pypopulation-16
|
diff --git a/README.md b/README.md
index b24bc4a..0fe7e4c 100644
--- a/README.md
+++ b/README.md
@@ -12,7 +12,7 @@ Lightweight population lookup using [ISO 3166](https://en.wikipedia.org/wiki/ISO
83132799
```
-The aim is to provide a minimalist package with no dependencies that does one thing only, as best as possible. Population figures are read from a JSON file into Python dictionaries on the first lookup, _not_ at import time. The API then only exposes the dictionaries.
+The aim is to provide a minimalist package with no dependencies that does one thing only, as best as possible. Population figures are read from a JSON file into Python dictionaries at import time. The API then only exposes the dictionaries.
**The given figures are estimates at best.** Read below for more details on the data source.
@@ -32,8 +32,6 @@ If you would like to build your own wrapper around the source JSON, you can do:
countries: t.List[t.Dict] = pypopulation._load_file()
```
-**Note**: This function is wrapped in `functools.lru_cache(max_size=1)`.
-
## Installation
With `pip` from [PyPI](https://pypi.org/):
diff --git a/pypopulation/implementation.py b/pypopulation/implementation.py
index 7f514f1..5f82fb8 100644
--- a/pypopulation/implementation.py
+++ b/pypopulation/implementation.py
@@ -1,4 +1,3 @@
-import functools
import json
import typing as t
from pathlib import Path
@@ -8,14 +7,12 @@ DATAFILE = Path(__file__).parent.joinpath("resources", "countries.json")
PopulationMap = t.Dict[str, int] # From country code to its population
[email protected]_cache(maxsize=1)
def _load_file() -> t.List[t.Dict]:
"""Load `DATAFILE` into a Python list object."""
with DATAFILE.open(mode="r", encoding="UTF-8") as datafile:
return json.load(datafile)
[email protected]_cache(maxsize=1)
def _initialize() -> t.Tuple[PopulationMap, PopulationMap]:
"""Init Alpha-2 and Alpha-3 maps from `DATAFILE`."""
country_list = _load_file()
@@ -31,6 +28,12 @@ def _initialize() -> t.Tuple[PopulationMap, PopulationMap]:
return alpha_2, alpha_3
+# The runtime maps get initialized the first time this module is imported,
+# which means that there is no overhead once a lookup is made, however it
+# slightly increases the cost of initial import
+_a2_map, _a3_map = _initialize()
+
+
def _normalize(country_code: str) -> str:
"""Normalize `country_code` casing."""
return country_code.upper()
@@ -51,8 +54,7 @@ def get_population_a2(country_code: str) -> t.Optional[int]:
None if `country_code` does not exist in the map.
"""
- a2_map, _ = _initialize()
- return a2_map.get(_normalize(country_code))
+ return _a2_map.get(_normalize(country_code))
def get_population_a3(country_code: str) -> t.Optional[int]:
@@ -61,5 +63,4 @@ def get_population_a3(country_code: str) -> t.Optional[int]:
None if `country_code` does not exist in the map.
"""
- _, a3_map = _initialize()
- return a3_map.get(_normalize(country_code))
+ return _a3_map.get(_normalize(country_code))
|
kwzrd/pypopulation
|
e082081a463ba2330a8779b1a0daeac7f1fef6ca
|
diff --git a/tests/test_implementation.py b/tests/test_implementation.py
index 9d2feec..913c02f 100644
--- a/tests/test_implementation.py
+++ b/tests/test_implementation.py
@@ -1,154 +1,91 @@
-import typing as t
import unittest
-from unittest.mock import MagicMock, patch
+from unittest.mock import patch
from pypopulation import implementation as imp
-mock_a2_map = {"AA": 1}
-mock_a3_map = {"BBB": 2}
-mock_initialize = MagicMock(return_value=(mock_a2_map, mock_a3_map))
+class TestImplementationHelpers(unittest.TestCase):
+ """
+ Test private helpers in the implementation module.
-
-class TestImplementation(unittest.TestCase):
- """Test the implementation module."""
-
- def setUp(self) -> None:
- """
- Clear LRU caches.
-
- Ensure that nothing is cached before each test.
- """
- imp._load_file.cache_clear()
- imp._initialize.cache_clear()
-
- # region: resource file
+ In short, this class tests private methods using the production resources file,
+ not mocked data. This therefore also makes weak assertions about the resources.
+ """
def test_file_exists(self):
- """Resource file exists and lives where expected."""
+ """Resource path exists and leads to a file."""
self.assertTrue(imp.DATAFILE.exists())
self.assertTrue(imp.DATAFILE.is_file())
- def test_file_loads(self):
- """Resource file is valid JSON and loads into a Python list."""
- obj = imp._load_file()
- self.assertIsInstance(obj, list)
+ def test_load_file(self):
+ """Resource file loads into a Python list of dicts."""
+ loaded = imp._load_file()
+ self.assertIsInstance(loaded, list)
+ for expected_dict in loaded:
+ self.assertIsInstance(expected_dict, dict)
- def test_file_is_cached(self):
- """Resource file is cached after first load."""
- obj_a = imp._load_file()
- obj_b = imp._load_file()
- self.assertIs(obj_a, obj_b)
-
- # endregion
- # region: map initialization
-
- def test_map_init(self):
- """Country maps initialize from the resource file."""
+ def test_initialize(self):
+ """Initialize produces two mappings as expected."""
a2_map, a3_map = imp._initialize()
- self.assertIsInstance(a2_map, dict)
- self.assertIsInstance(a3_map, dict)
-
- def test_map_is_cached(self):
- """Country maps are cached and do not re-build on re-query."""
- a2_map_a, a3_map_a = imp._initialize()
- a2_map_b, a3_map_b = imp._initialize()
- self.assertIs(a2_map_a, a2_map_b)
- self.assertIs(a3_map_a, a3_map_b)
- # endregion
- # region: lookup methodology (on mock data)
+ def check(dct, key_length):
+ """Perform a series of checks on `dct`."""
+ for key, value in dct.items():
+ self.assertIsInstance(key, str) # keys must be strings
+ self.assertIsInstance(value, int) # Values must be integers
+ self.assertEqual(len(key), key_length) # Keys must be exactly `key_length` long
+ self.assertTrue(key.isupper()) # Keys must be strictly upper-cased
- def check_pairs(self, pairs: t.Iterable[t.Tuple], func: t.Callable):
- """Run `pairs` of input, expected output and compare them against `func` result."""
- for code, expected_population in pairs:
- with self.subTest(code=code, expected_population=expected_population):
- self.assertEqual(expected_population, func(code))
+ check(a2_map, 2)
+ check(a3_map, 3)
def test_normalize(self):
- """The `_normalize_` functions makes all strings uppercase."""
- pairs = [("", ""), (" ", " "), ("a", "A"), ("A", "A"), ("aBc", "ABC")]
- self.check_pairs(pairs, imp._normalize)
-
- def test_general_lookup_not_a_country(self):
- """Check both maps and return None when queried code does not exist."""
- patch_a2 = patch("pypopulation.implementation.get_population_a2", MagicMock(return_value=None))
- patch_a3 = patch("pypopulation.implementation.get_population_a3", MagicMock(return_value=None))
-
- with patch_a2 as mock_a2, patch_a3 as mock_a3:
- out_value = imp.get_population("not_a_country")
-
- self.assertIsNone(out_value)
- mock_a2.assert_called_once_with("not_a_country")
- mock_a3.assert_called_once_with("not_a_country")
-
- def test_general_lookup_ask_a2_a3(self):
- """Check both maps and return population when queried code exists in a3 map."""
- patch_a2 = patch("pypopulation.implementation.get_population_a2", MagicMock(return_value=None))
- patch_a3 = patch("pypopulation.implementation.get_population_a3", MagicMock(return_value=1234))
-
- with patch_a2 as mock_a2, patch_a3 as mock_a3:
- out_value = imp.get_population("alpha_3")
-
- self.assertEqual(out_value, 1234)
- mock_a2.assert_called_once_with("alpha_3")
- mock_a3.assert_called_once_with("alpha_3")
-
- def test_general_lookup_ask_a2_only(self):
- """Check first map only and return population when queried code exists in a2 map."""
- patch_a2 = patch("pypopulation.implementation.get_population_a2", MagicMock(return_value=1234))
- patch_a3 = patch("pypopulation.implementation.get_population_a3", MagicMock(return_value=5678))
-
- with patch_a2 as mock_a2, patch_a3 as mock_a3:
- out_value = imp.get_population("alpha_2")
-
- self.assertEqual(out_value, 1234)
- mock_a2.assert_called_once_with("alpha_2")
- mock_a3.assert_not_called() # In this case, the A3 map is never called
-
- @patch("pypopulation.implementation._initialize", mock_initialize)
- def test_alpha_2_lookup(self):
- """Find populations for 'AA' but not 'BBB' using `get_population_a2`."""
- none_pairs = [(code, None) for code in ("", "a", "A", "b", "B")]
- good_pairs = [(code, 1) for code in ("aa", "aA", "AA")]
- self.check_pairs(none_pairs + good_pairs, imp.get_population_a2)
-
- @patch("pypopulation.implementation._initialize", mock_initialize)
- def test_alpha_3_lookup(self):
- """Find populations for 'BBB' but not 'AA' using `get_population_a3`."""
- none_pairs = [(code, None) for code in ("", "a", "A", "b", "B")]
- good_pairs = [(code, 2) for code in ("bbb", "bbB", "BBB")]
- self.check_pairs(none_pairs + good_pairs, imp.get_population_a3)
-
- # endregion
- # region: lookups on the actual data
-
- def check_type(self, codes: t.Iterable[str], type_: t.Any, func: t.Callable):
- """Check that all `codes` produce an instance of `type_` when passed to `func`."""
- for country_code in codes:
- with self.subTest(country_code=country_code, expected_type=type_):
- self.assertIsInstance(func(country_code), type_)
-
- def check_values(self, codes: t.Iterable[str], func: t.Callable):
- """Check that all `codes` produce the same result when passed to `func`."""
- values = set(func(code) for code in codes)
- self.assertEqual(len(values), 1)
-
- def test_germany(self):
- """The same integer is returned for alternatives of Germany's country code."""
- a2_codes = ["de", "DE", "dE"]
- self.check_type(a2_codes, int, imp.get_population_a2)
- self.check_values(a2_codes, imp.get_population_a2)
-
- a3_codes = ["deu", "DEU", "dEu"]
- self.check_type(a3_codes, int, imp.get_population_a3)
- self.check_values(a3_codes, imp.get_population_a3)
-
- all_codes = a2_codes + a3_codes
- self.check_type(all_codes, int, imp.get_population)
- self.check_values(all_codes, imp.get_population)
-
- def test_non_country(self):
- """None is returned for non-existing countries."""
- non_country_codes = "abcde", "", "e"
- self.check_type(non_country_codes, type(None), imp.get_population)
+ """Normalization returns uppercase strings."""
+ cases = [
+ ("", ""),
+ (" ", " "),
+ ("a", "A"),
+ ("1a", "1A"),
+ ("aAa", "AAA"),
+ ]
+ for before, after in cases:
+ self.assertEqual(imp._normalize(before), after)
+
+
+@patch("pypopulation.implementation._a2_map", {"AA": 1})
+@patch("pypopulation.implementation._a3_map", {"BBB": 2})
+class TestImplementationLookups(unittest.TestCase):
+ """
+ Test public API lookup methods against mocked data.
+
+ This class contains test for the public functions that expose the internal data.
+ All cases are ran against mocked data. These tests are completely disconnected
+ from the resource file that is used in production.
+ """
+
+ def test_get_population(self):
+ """Get population fetches population for both A2 and A3 codes."""
+ self.assertEqual(imp.get_population("AA"), 1)
+ self.assertEqual(imp.get_population("aa"), 1)
+ self.assertEqual(imp.get_population("BBB"), 2)
+ self.assertEqual(imp.get_population("bbb"), 2)
+ self.assertEqual(imp.get_population("CCC"), None)
+ self.assertEqual(imp.get_population("ccc"), None)
+
+ def test_get_population_a2(self):
+ """Get population A2 fetches population A2 codes only."""
+ self.assertEqual(imp.get_population_a2("AA"), 1)
+ self.assertEqual(imp.get_population_a2("aa"), 1)
+ self.assertEqual(imp.get_population_a2("BBB"), None)
+ self.assertEqual(imp.get_population_a2("bbb"), None)
+ self.assertEqual(imp.get_population_a2("CCC"), None)
+ self.assertEqual(imp.get_population_a2("ccc"), None)
+
+ def test_get_population_a3(self):
+ """Get population A2 fetches population A2 codes only."""
+ self.assertEqual(imp.get_population_a3("AA"), None)
+ self.assertEqual(imp.get_population_a3("aa"), None)
+ self.assertEqual(imp.get_population_a3("BBB"), 2)
+ self.assertEqual(imp.get_population_a3("bbb"), 2)
+ self.assertEqual(imp.get_population_a3("CCC"), None)
+ self.assertEqual(imp.get_population_a3("ccc"), None)
|
Load resource file at import time
The current methodology is to delay loading the resource file until the first lookup is made, and then to cache the loaded map using a 1-size LRU cache. This was done in an effort to make the import faster and to avoid the price of loading the file should the module be imported but never used. However, it adds some unnecessary complexity to both the implementation and the tests, and the advantages are debatable at best. Let's load the file at import time instead.
|
0.0
|
e082081a463ba2330a8779b1a0daeac7f1fef6ca
|
[
"tests/test_implementation.py::TestImplementationLookups::test_get_population",
"tests/test_implementation.py::TestImplementationLookups::test_get_population_a2",
"tests/test_implementation.py::TestImplementationLookups::test_get_population_a3"
] |
[
"tests/test_implementation.py::TestImplementationHelpers::test_file_exists",
"tests/test_implementation.py::TestImplementationHelpers::test_initialize",
"tests/test_implementation.py::TestImplementationHelpers::test_load_file",
"tests/test_implementation.py::TestImplementationHelpers::test_normalize"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-11 12:25:26+00:00
|
mit
| 3,469 |
|
kyan001__ping3-39
|
diff --git a/UPDATES.md b/UPDATES.md
index f51a2bf..71722b1 100644
--- a/UPDATES.md
+++ b/UPDATES.md
@@ -1,4 +1,6 @@
# UPDATES
+* 2.9.2:
+ * Converted to a proper package
* 2.9.1:
* Bug Fix: macOS is not treated as Linux now.
* 2.9.0:
diff --git a/ping3.py b/ping3/__init__.py
similarity index 98%
rename from ping3.py
rename to ping3/__init__.py
index 95cb529..1b15ba9 100644
--- a/ping3.py
+++ b/ping3/__init__.py
@@ -12,8 +12,8 @@ import logging
import functools
import errno
-import errors
-from enums import ICMP_DEFAULT_CODE, IcmpType, IcmpTimeExceededCode, IcmpDestinationUnreachableCode
+from . import errors
+from .enums import ICMP_DEFAULT_CODE, IcmpType, IcmpTimeExceededCode, IcmpDestinationUnreachableCode
__version__ = "2.9.1"
DEBUG = False # DEBUG: Show debug info for developers. (default False)
@@ -349,7 +349,3 @@ def verbose_ping(dest_addr: str, count: int = 4, interval: float = 0, *args, **k
print("{value}{unit}".format(value=int(delay), unit=unit))
i += 1
-
-if __name__ == "__main__":
- import command_line_ping3
- command_line_ping3.main()
diff --git a/command_line_ping3.py b/ping3/command_line.py
similarity index 98%
rename from command_line_ping3.py
rename to ping3/command_line.py
index e48bba6..8243653 100644
--- a/command_line_ping3.py
+++ b/ping3/command_line.py
@@ -30,3 +30,6 @@ def main(assigned_args: list = None):
for addr in args.dest_addr:
ping3.verbose_ping(addr, count=args.count, ttl=args.ttl, timeout=args.timeout, size=args.size, interval=args.interval, interface=args.interface)
+
+if __name__ == "__main__":
+ main()
diff --git a/enums.py b/ping3/enums.py
similarity index 100%
rename from enums.py
rename to ping3/enums.py
diff --git a/errors.py b/ping3/errors.py
similarity index 100%
rename from errors.py
rename to ping3/errors.py
diff --git a/setup.py b/setup.py
index f50f621..03c6aae 100644
--- a/setup.py
+++ b/setup.py
@@ -21,8 +21,7 @@ setuptools.setup(
'Programming Language :: Python :: 3',
],
keywords='python3 ping icmp socket tool',
- packages=setuptools.find_packages(exclude=['contrib', 'docs', 'tests']),
- py_modules=["ping3", "command_line_ping3", "errors", "enums"],
+ packages=["ping3"],
python_requires='>=3',
install_requires=[],
extras_require={
@@ -31,6 +30,6 @@ setuptools.setup(
package_data={},
data_files=[],
entry_points={
- 'console_scripts': ['ping3=command_line_ping3:main'],
+ 'console_scripts': ['ping3=ping3.command_line:main'],
},
)
|
kyan001/ping3
|
741874246c509051ed9c1d11e64397bb93661d55
|
diff --git a/tests/test_command_line_ping3.py b/tests/test_command_line.py
similarity index 76%
rename from tests/test_command_line_ping3.py
rename to tests/test_command_line.py
index 267d0d6..48bd53c 100644
--- a/tests/test_command_line_ping3.py
+++ b/tests/test_command_line.py
@@ -7,8 +7,8 @@ import socket
from unittest.mock import patch
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
-import command_line_ping3 # noqa: linter (pycodestyle) should not lint this line.
-import errors # noqa: linter (pycodestyle) should not lint this line.
+from ping3 import command_line # noqa: linter (pycodestyle) should not lint this line.
+from ping3 import errors # noqa: linter (pycodestyle) should not lint this line.
class test_ping3(unittest.TestCase):
@@ -22,46 +22,46 @@ class test_ping3(unittest.TestCase):
def test_dest_addr_0(self):
with patch("sys.stdout", new=io.StringIO()) as fake_out:
- command_line_ping3.main()
+ command_line.main()
self.assertRegex(fake_out.getvalue(), r".*[0-9]+ms.*")
def test_dest_addr_1(self):
with patch("sys.stdout", new=io.StringIO()) as fake_out:
- command_line_ping3.main(["127.0.0.1"])
+ command_line.main(["127.0.0.1"])
self.assertTrue("127.0.0.1" in fake_out.getvalue())
def test_dest_addr_2(self):
with patch("sys.stdout", new=io.StringIO()) as fake_out:
- command_line_ping3.main(["127.0.0.1", "8.8.8.8"])
+ command_line.main(["127.0.0.1", "8.8.8.8"])
self.assertTrue("127.0.0.1" in fake_out.getvalue())
self.assertTrue("8.8.8.8" in fake_out.getvalue())
def test_count(self):
with patch("sys.stdout", new=io.StringIO()) as fake_out:
- command_line_ping3.main(['-c', '1', 'example.com'])
+ command_line.main(['-c', '1', 'example.com'])
self.assertEqual(fake_out.getvalue().count("\n"), 1)
def test_timeout(self):
with patch("sys.stdout", new=io.StringIO()) as fake_out:
- command_line_ping3.main(['-w', '0.0001', 'example.com'])
+ command_line.main(['-w', '0.0001', 'example.com'])
self.assertRegex(fake_out.getvalue(), r".*Timeout \> [0-9\.]+s.*")
def test_ttl(self):
with patch("sys.stdout", new=io.StringIO()) as fake_out:
- command_line_ping3.main(['-t', '1', 'example.com'])
+ command_line.main(['-t', '1', 'example.com'])
self.assertRegex(fake_out.getvalue(), r".*Timeout.*")
def test_size(self):
with patch("sys.stdout", new=io.StringIO()) as fake_out:
- command_line_ping3.main(['-l', '100', 'example.com'])
+ command_line.main(['-l', '100', 'example.com'])
self.assertRegex(fake_out.getvalue(), r".*[0-9]+ms.*")
with self.assertRaises(OSError):
- command_line_ping3.main(['-l', '99999', 'example.com'])
+ command_line.main(['-l', '99999', 'example.com'])
def test_interval(self):
with patch("sys.stdout", new=io.StringIO()) as fake_out:
start_time = time.time()
- command_line_ping3.main(['-i', '1.7', 'example.com'])
+ command_line.main(['-i', '1.7', 'example.com'])
end_time = time.time()
self.assertTrue((end_time - start_time) >= 5.1) # time_expect = (count - 1) * interval
self.assertFalse('Timeout' in fake_out.getvalue())
@@ -79,18 +79,18 @@ class test_ping3(unittest.TestCase):
socket.if_nametoindex(my_interface) # test if the interface exists.
except OSError:
self.fail('Interface Name Error: {}'.format(my_interface))
- command_line_ping3.main(['-I', my_interface, 'example.com'])
+ command_line.main(['-I', my_interface, 'example.com'])
self.assertRegex(fake_out.getvalue(), r".*[0-9]+ms.*")
def test_debug(self):
with patch("sys.stdout", new=io.StringIO()), patch("sys.stderr", new=io.StringIO()) as fake_err:
- command_line_ping3.main(['--debug', '-c', '1', 'example.com'])
+ command_line.main(['--debug', '-c', '1', 'example.com'])
self.assertIn("[DEBUG]", fake_err.getvalue())
def test_exceptions(self):
with patch("sys.stdout", new=io.StringIO()) as fake_out:
with self.assertRaises(errors.Timeout):
- command_line_ping3.main(['--exceptions', '-w', '0.0001', 'example.com'])
+ command_line.main(['--exceptions', '-w', '0.0001', 'example.com'])
if __name__ == "__main__":
diff --git a/tests/test_ping3.py b/tests/test_ping3.py
index a1cfed4..b9f648c 100644
--- a/tests/test_ping3.py
+++ b/tests/test_ping3.py
@@ -8,7 +8,7 @@ import socket
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import ping3 # noqa: linter (pycodestyle) should not lint this line.
-import errors # noqa: linter (pycodestyle) should not lint this line.
+from ping3 import errors # noqa: linter (pycodestyle) should not lint this line.
class test_ping3(unittest.TestCase):
|
Packaging
This should be a proper package. Toplevel "errors" and "enums" modules make no sense whatsoever.
|
0.0
|
741874246c509051ed9c1d11e64397bb93661d55
|
[
"tests/test_command_line.py::test_ping3::test_count",
"tests/test_command_line.py::test_ping3::test_debug",
"tests/test_command_line.py::test_ping3::test_dest_addr_1",
"tests/test_command_line.py::test_ping3::test_dest_addr_2",
"tests/test_command_line.py::test_ping3::test_exceptions",
"tests/test_command_line.py::test_ping3::test_interval",
"tests/test_command_line.py::test_ping3::test_size",
"tests/test_command_line.py::test_ping3::test_timeout",
"tests/test_command_line.py::test_ping3::test_ttl",
"tests/test_ping3.py::test_ping3::test_DEBUG",
"tests/test_ping3.py::test_ping3::test_ping_bind",
"tests/test_ping3.py::test_ping3::test_ping_hostunknown",
"tests/test_ping3.py::test_ping3::test_ping_hostunknown_exception",
"tests/test_ping3.py::test_ping3::test_ping_normal",
"tests/test_ping3.py::test_ping3::test_ping_seq",
"tests/test_ping3.py::test_ping3::test_ping_size",
"tests/test_ping3.py::test_ping3::test_ping_timeout",
"tests/test_ping3.py::test_ping3::test_ping_timeout_exception",
"tests/test_ping3.py::test_ping3::test_ping_ttl",
"tests/test_ping3.py::test_ping3::test_ping_ttl_exception",
"tests/test_ping3.py::test_ping3::test_ping_unit",
"tests/test_ping3.py::test_ping3::test_verbose_ping_bind",
"tests/test_ping3.py::test_ping3::test_verbose_ping_count",
"tests/test_ping3.py::test_ping3::test_verbose_ping_interval",
"tests/test_ping3.py::test_ping3::test_verbose_ping_normal",
"tests/test_ping3.py::test_ping3::test_verbose_ping_size",
"tests/test_ping3.py::test_ping3::test_verbose_ping_timeout",
"tests/test_ping3.py::test_ping3::test_verbose_ping_timeout_exception",
"tests/test_ping3.py::test_ping3::test_verbose_ping_ttl",
"tests/test_ping3.py::test_ping3::test_verbose_ping_ttl_exception",
"tests/test_ping3.py::test_ping3::test_verbose_ping_unit",
"tests/test_ping3.py::test_ping3::test_version"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-07-20 17:10:09+00:00
|
mit
| 3,470 |
|
kylef__goji-19
|
diff --git a/README.md b/README.md
index fa3f47d..a617e78 100644
--- a/README.md
+++ b/README.md
@@ -9,6 +9,17 @@ goji is a minimal command line client for JIRA.
## Usage
+### login
+
+Authenticate with a JIRA server.
+
+```bash
+$ goji login
+
+Email: [email protected]
+Password:
+```
+
### show
Show detailed information about an issue.
diff --git a/goji/auth.py b/goji/auth.py
new file mode 100644
index 0000000..c764a88
--- /dev/null
+++ b/goji/auth.py
@@ -0,0 +1,45 @@
+from netrc import netrc
+from os import path, chmod
+import re
+from stat import S_IRUSR, S_IWUSR
+from textwrap import dedent
+
+from requests.compat import urlparse
+
+
+def get_credentials(base_url):
+ hostname = urlparse(base_url).hostname
+ try:
+ hosts = netrc().hosts
+ if hostname in hosts:
+ return (hosts[hostname][0], hosts[hostname][2])
+ except:
+ pass
+
+ return (None, None)
+
+
+def set_credentials(base_url, email, password):
+ hostname = urlparse(base_url).hostname
+ filepath = path.expanduser('~/.netrc')
+ if path.isfile(filepath):
+ rcfile = open(filepath)
+ contents = rcfile.read()
+ rcfile.close()
+ pattern = r'machine {}\n(\s+(login|password).*)+\n?'
+ matcher = re.compile(pattern.format(re.escape(hostname)), re.MULTILINE)
+ contents = matcher.sub('', contents)
+ with open(filepath, 'w') as rcfile:
+ rcfile.write(contents)
+ rcfile.write(dedent("""\n\
+ machine {}
+ login {}
+ password {}""").format(hostname, email, password))
+
+ else:
+ with open(filepath, 'w') as rcfile:
+ rcfile.write(dedent("""\
+ machine {}
+ login {}
+ password {}""").format(hostname, email, password))
+ chmod(filepath, S_IWUSR | S_IRUSR)
diff --git a/goji/client.py b/goji/client.py
index 0ba61e8..6733ac3 100644
--- a/goji/client.py
+++ b/goji/client.py
@@ -1,24 +1,22 @@
-from netrc import netrc
import json
import requests
-from requests.compat import urljoin, urlparse
+from requests.compat import urljoin
from goji.models import Issue
+from goji.auth import get_credentials
class JIRAClient(object):
def __init__(self, base_url):
- self.base_url = base_url
- self.rest_base_url = urljoin(self.base_url, '/rest/api/2/')
+ email, password = get_credentials(base_url)
- hostname = urlparse(self.base_url).hostname
- hosts = netrc().hosts
-
- if hostname in hosts:
- self.auth = (hosts[hostname][0], hosts[hostname][2])
+ if email is not None and password is not None:
+ self.auth = (email, password)
+ self.base_url = base_url
+ self.rest_base_url = urljoin(self.base_url, '/rest/api/2/')
else:
- print('== Hostname %s not found in .netrc.' % hostname)
+ print('== Authentication not configured. Run `goji login`')
exit()
@property
diff --git a/goji/commands.py b/goji/commands.py
index 672df40..047d0c8 100644
--- a/goji/commands.py
+++ b/goji/commands.py
@@ -1,9 +1,8 @@
-from os import environ
-
import click
from requests.compat import urljoin
from goji.client import JIRAClient
+from goji.auth import get_credentials, set_credentials
@click.group()
@@ -11,13 +10,16 @@ from goji.client import JIRAClient
@click.pass_context
def cli(ctx, base_url):
if not ctx.obj:
- ctx.obj = JIRAClient(base_url)
+ if ctx.invoked_subcommand == 'login':
+ ctx.obj = base_url
+ else:
+ ctx.obj = JIRAClient(base_url)
@click.argument('issue_key')
[email protected]()
[email protected]('open')
@click.pass_obj
-def open(client, issue_key):
+def open_command(client, issue_key):
"""Open issue in a web browser"""
url = urljoin(client.base_url, 'browse/%s' % issue_key)
click.launch(url)
@@ -111,6 +113,23 @@ def edit(client, issue_key):
print(description)
[email protected]()
[email protected]_obj
+def login(base_url):
+ """Authenticate with JIRA server"""
+ email, password = get_credentials(base_url)
+ if email is not None:
+ if not click.confirm('This server is already configured. Override?'):
+ return
+
+ click.echo('Enter your JIRA credentials')
+
+ email = click.prompt('Email', type=str)
+ password = click.prompt('Password', type=str, hide_input=True)
+
+ set_credentials(base_url, email, password)
+
+
@click.argument('query')
@cli.command()
@click.pass_obj
|
kylef/goji
|
4846242eef679b70ae00ae8ffb55daf46f50cd07
|
diff --git a/tests/test_auth.py b/tests/test_auth.py
new file mode 100644
index 0000000..712b22e
--- /dev/null
+++ b/tests/test_auth.py
@@ -0,0 +1,97 @@
+import unittest
+import os
+from stat import S_IRUSR, S_IWUSR
+from textwrap import dedent
+
+from click.testing import CliRunner
+
+from goji.auth import get_credentials, set_credentials
+
+
+class AuthTests(unittest.TestCase):
+ def test_empty_get_credentials(self):
+ base_url = 'https://goji.example.com/'
+ runner = CliRunner()
+ with runner.isolated_filesystem():
+ os.environ['HOME'] = './'
+ login, password = get_credentials(base_url)
+ self.assertIsNone(login)
+ self.assertIsNone(password)
+
+ def test_preset_get_credentials(self):
+ base_url = 'https://goji.example.com/'
+ runner = CliRunner()
+ with runner.isolated_filesystem():
+ os.environ['HOME'] = './'
+ with open('.netrc', 'w') as rcfile:
+ rcfile.write(dedent("""\
+ machine goji.example.com
+ login delisa
+ password foober_1-"""))
+ os.chmod('.netrc', S_IWUSR | S_IRUSR)
+ login, password = get_credentials(base_url)
+ self.assertEqual(login, 'delisa')
+ self.assertEqual(password, 'foober_1-')
+
+ def test_new_set_credentials(self):
+ base_url = 'https://goji.example.com/'
+ runner = CliRunner()
+ with runner.isolated_filesystem():
+ os.environ['HOME'] = './'
+ with open('.netrc', 'w') as rcfile:
+ rcfile.write(dedent("""\
+ machine goji2.example.com
+ login delisa
+ password foober_1-"""))
+ os.chmod('.netrc', S_IWUSR | S_IRUSR)
+ set_credentials(base_url, 'kylef', '39481-a')
+ with open('.netrc', 'r') as rcfile:
+ self.assertEqual(dedent("""\
+ machine goji2.example.com
+ login delisa
+ password foober_1-
+ machine goji.example.com
+ login kylef
+ password 39481-a"""), rcfile.read())
+
+ def test_override_set_credentials(self):
+ base_url = 'https://goji.example.com/'
+ runner = CliRunner()
+ with runner.isolated_filesystem():
+ os.environ['HOME'] = './'
+ with open('.netrc', 'w') as rcfile:
+ rcfile.write(dedent("""\
+ machine goji3.example.com
+ login df
+ password mypassword
+ machine goji.example.com
+ login delisa
+ password foobar+1
+ machine goji2.example.com
+ login delisa
+ password foober_1-"""))
+ os.chmod('.netrc', S_IWUSR | S_IRUSR)
+ set_credentials(base_url, 'kylef', '39481-a')
+ with open('.netrc', 'r') as rcfile:
+ self.assertEqual(dedent("""\
+ machine goji3.example.com
+ login df
+ password mypassword
+ machine goji2.example.com
+ login delisa
+ password foober_1-
+ machine goji.example.com
+ login kylef
+ password 39481-a"""), rcfile.read())
+
+ def test_create_file_set_credentials(self):
+ base_url = 'https://goji.example.com/'
+ runner = CliRunner()
+ with runner.isolated_filesystem():
+ os.environ['HOME'] = './'
+ set_credentials(base_url, 'kylef', '39481-a')
+ with open('.netrc', 'r') as rcfile:
+ self.assertEqual(dedent("""\
+ machine goji.example.com
+ login kylef
+ password 39481-a"""), rcfile.read())
|
Implement login
This is the first step, including creating the whole application structure
|
0.0
|
4846242eef679b70ae00ae8ffb55daf46f50cd07
|
[
"tests/test_auth.py::AuthTests::test_create_file_set_credentials",
"tests/test_auth.py::AuthTests::test_empty_get_credentials",
"tests/test_auth.py::AuthTests::test_new_set_credentials",
"tests/test_auth.py::AuthTests::test_override_set_credentials",
"tests/test_auth.py::AuthTests::test_preset_get_credentials"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-05-24 20:57:59+00:00
|
bsd-2-clause
| 3,471 |
|
kymatio__kymatio-863
|
diff --git a/kymatio/scattering1d/filter_bank.py b/kymatio/scattering1d/filter_bank.py
index 003cfba..090e48d 100644
--- a/kymatio/scattering1d/filter_bank.py
+++ b/kymatio/scattering1d/filter_bank.py
@@ -287,7 +287,7 @@ def compute_params_filterbank(sigma_min, Q, alpha, r_psi=math.sqrt(0.5)):
----------
sigma_min : float
This acts as a lower bound on the frequential widths of the band-pass
- filters. The low-pass filter may be wider (if T < 2**J_scattering), making
+ filters. The low-pass filter may be wider (if T < _N_padded), making
invariants over shorter time scales than longest band-pass filter.
Q : int
number of wavelets per octave.
@@ -337,7 +337,7 @@ def compute_params_filterbank(sigma_min, Q, alpha, r_psi=math.sqrt(0.5)):
return xis, sigmas, js
-def scattering_filter_factory(J_support, J_scattering, Q, T, r_psi=math.sqrt(0.5),
+def scattering_filter_factory(N, J, Q, T, r_psi=math.sqrt(0.5),
max_subsampling=None, sigma0=0.1, alpha=5., **kwargs):
"""
Builds in Fourier the Morlet filters used for the scattering transform.
@@ -354,11 +354,6 @@ def scattering_filter_factory(J_support, J_scattering, Q, T, r_psi=math.sqrt(0.5
Parameters
----------
- J_support : int
- 2**J_support is the desired support size of the filters
- J_scattering : int
- parameter for the scattering transform (2**J_scattering
- corresponds to maximal temporal support of any filter)
Q : tuple
number of wavelets per octave at the first and second order
Q = (Q1, Q2). Q1 and Q2 are both int >= 1.
@@ -374,9 +369,8 @@ def scattering_filter_factory(J_support, J_scattering, Q, T, r_psi=math.sqrt(0.5
to save computation time if it is not required. Defaults to None, in
which case this value is dynamically adjusted depending on the filters.
sigma0 : float, optional
- parameter controlling the frequential width of the
- low-pass filter at J_scattering=0; at a an absolute J_scattering, it
- is equal to sigma0 / 2**J_scattering. Defaults to 1e-1
+ parameter controlling the frequential width of the low-pass filter at
+ j=0; at a an absolute J, it is equal to sigma0 / 2**J. Defaults to 0.1
alpha : float, optional
tolerance factor for the aliasing after subsampling.
The larger alpha, the more conservative the value of maximal
@@ -416,7 +410,7 @@ def scattering_filter_factory(J_support, J_scattering, Q, T, r_psi=math.sqrt(0.5
https://tel.archives-ouvertes.fr/tel-01559667
"""
# compute the spectral parameters of the filters
- sigma_min = sigma0 / math.pow(2, J_scattering)
+ sigma_min = sigma0 / math.pow(2, J)
Q1, Q2 = Q
xi1s, sigma1s, j1s = compute_params_filterbank(sigma_min, Q1, alpha, r_psi)
xi2s, sigma2s, j2s = compute_params_filterbank(sigma_min, Q2, alpha, r_psi)
@@ -435,8 +429,7 @@ def scattering_filter_factory(J_support, J_scattering, Q, T, r_psi=math.sqrt(0.5
# compute the current value for the max_subsampling,
# which depends on the input it can accept.
if max_subsampling is None:
- possible_subsamplings_after_order1 = [
- j1 for j1 in j1s if j2 > j1]
+ possible_subsamplings_after_order1 = [j1 for j1 in j1s if j2 > j1]
if len(possible_subsamplings_after_order1) > 0:
max_sub_psi2 = max(possible_subsamplings_after_order1)
else:
@@ -444,7 +437,6 @@ def scattering_filter_factory(J_support, J_scattering, Q, T, r_psi=math.sqrt(0.5
else:
max_sub_psi2 = max_subsampling
# We first compute the filter without subsampling
- N = 2**J_support
psi_levels = [morlet_1d(N, xi2, sigma2)]
# compute the filter after subsampling at all other subsamplings
@@ -457,7 +449,6 @@ def scattering_filter_factory(J_support, J_scattering, Q, T, r_psi=math.sqrt(0.5
# for the 1st order filters, the input is not subsampled so we
# can only compute them with N=2**J_support
for (xi1, sigma1, j1) in zip(xi1s, sigma1s, j1s):
- N = 2**J_support
psi_levels = [morlet_1d(N, xi1, sigma1)]
psi1_f.append({'levels': psi_levels, 'xi': xi1, 'sigma': sigma1, 'j': j1})
diff --git a/kymatio/scattering1d/frontend/base_frontend.py b/kymatio/scattering1d/frontend/base_frontend.py
index 436cd40..a20059d 100644
--- a/kymatio/scattering1d/frontend/base_frontend.py
+++ b/kymatio/scattering1d/frontend/base_frontend.py
@@ -1,8 +1,8 @@
from ...frontend.base_frontend import ScatteringBase
import math
import numbers
-
import numpy as np
+from warnings import warn
from ..filter_bank import compute_temporal_support, gauss_1d, scattering_filter_factory
from ..utils import (compute_border_indices, compute_padding,
@@ -52,46 +52,48 @@ class ScatteringBase1D(ScatteringBase):
else:
raise ValueError("Q must be an integer or a tuple")
- # check the shape
+ # check input length
if isinstance(self.shape, numbers.Integral):
- self.N = self.shape
+ self.shape = (self.shape,)
elif isinstance(self.shape, tuple):
- self.N = self.shape[0]
if len(self.shape) > 1:
raise ValueError("If shape is specified as a tuple, it must "
"have exactly one element")
else:
raise ValueError("shape must be an integer or a 1-tuple")
+ N_input = self.shape[0]
# check T or set default
if self.T is None:
self.T = 2**(self.J)
- elif self.T > self.N:
+ elif self.T > N_input:
raise ValueError("The temporal support T of the low-pass filter "
"cannot exceed input length (got {} > {})".format(
- self.T, self.N))
+ self.T, N_input))
self.log2_T = math.floor(math.log2(self.T))
# Compute the minimum support to pad (ideally)
- phi_f = gauss_1d(self.N, self.sigma0/self.T)
+ phi_f = gauss_1d(N_input, self.sigma0/self.T)
min_to_pad = 3 * compute_temporal_support(
phi_f.reshape(1, -1), criterion_amplitude=1e-3)
# to avoid padding more than N - 1 on the left and on the right,
# since otherwise torch sends nans
- J_max_support = int(np.floor(np.log2(3 * self.N - 2)))
- self.J_pad = min(int(np.ceil(np.log2(self.N + 2 * min_to_pad))),
- J_max_support)
+ J_max_support = int(np.floor(np.log2(3 * N_input - 2)))
+ J_pad = min(int(np.ceil(np.log2(N_input + 2 * min_to_pad))),
+ J_max_support)
+ self._N_padded = 2**J_pad
+
# compute the padding quantities:
- self.pad_left, self.pad_right = compute_padding(self.J_pad, self.N)
+ self.pad_left, self.pad_right = compute_padding(self._N_padded, N_input)
# compute start and end indices
self.ind_start, self.ind_end = compute_border_indices(
- self.log2_T, self.J, self.pad_left, self.pad_left + self.N)
+ self.log2_T, self.J, self.pad_left, self.pad_left + N_input)
def create_filters(self):
# Create the filters
self.phi_f, self.psi1_f, self.psi2_f = scattering_filter_factory(
- self.J_pad, self.J, self.Q, self.T,
+ self._N_padded, self.J, self.Q, self.T,
r_psi=self.r_psi, sigma0=self.sigma0, alpha=self.alpha)
def meta(self):
@@ -147,6 +149,20 @@ class ScatteringBase1D(ScatteringBase):
'Input tensor x should have at least one axis, got {}'.format(
len(x.shape)))
+ @property
+ def J_pad(self):
+ warn("The attribute J_pad is deprecated and will be removed in v0.4. "
+ "Measure len(self.phi_f[0]) for the padded length (previously 2**J_pad) "
+ "or access shape[0] for the unpadded length (previously N).", DeprecationWarning)
+ return int(np.log2(self._N_padded))
+
+ @property
+ def N(self):
+ warn("The attribute N is deprecated and will be removed in v0.4. "
+ "Measure len(self.phi_f[0]) for the padded length (previously 2**J_pad) "
+ "or access shape[0] for the unpadded length (previously N).", DeprecationWarning)
+ return int(self.shape[0])
+
_doc_shape = 'N'
_doc_instantiation_shape = {True: 'S = Scattering1D(J, N, Q)',
@@ -158,9 +174,7 @@ class ScatteringBase1D(ScatteringBase):
"""
_doc_attrs_shape = \
- r"""J_pad : int
- The logarithm of the padded length of the signals.
- pad_left : int
+ r"""pad_left : int
The amount of padding to the left of the signal.
pad_right : int
The amount of padding to the right of the signal.
diff --git a/kymatio/scattering1d/utils.py b/kymatio/scattering1d/utils.py
index 6261436..6bddf3e 100644
--- a/kymatio/scattering1d/utils.py
+++ b/kymatio/scattering1d/utils.py
@@ -39,33 +39,32 @@ def compute_border_indices(log2_T, J, i0, i1):
ind_end[j] = (ind_end[j - 1] // 2) + (ind_end[j - 1] % 2)
return ind_start, ind_end
-def compute_padding(J_pad, N):
+def compute_padding(N, N_input):
"""
Computes the padding to be added on the left and on the right
of the signal.
- It should hold that 2**J_pad >= N
+ It should hold that N >= N_input
Parameters
----------
- J_pad : int
- 2**J_pad is the support of the padded signal
N : int
- original signal support size
+ support of the padded signal
+ N_input : int
+ support of the unpadded signal
Returns
-------
pad_left: amount to pad on the left ("beginning" of the support)
pad_right: amount to pad on the right ("end" of the support)
"""
- N_pad = 2**J_pad
- if N_pad < N:
+ if N < N_input:
raise ValueError('Padding support should be larger than the original' +
'signal size!')
- to_add = 2**J_pad - N
+ to_add = N - N_input
pad_left = to_add // 2
pad_right = to_add - pad_left
- if max(pad_left, pad_right) >= N:
+ if max(pad_left, pad_right) >= N_input:
raise ValueError('Too large padding value, will lead to NaN errors')
return pad_left, pad_right
@@ -95,9 +94,8 @@ def precompute_size_scattering(J, Q, T, max_order, r_psi, sigma0, alpha):
Should be >0 and <1. Controls the redundancy of the filters
(the larger r_psi, the larger the overlap between adjacent wavelets).
sigma0 : float
- parameter controlling the frequential width of the
- low-pass filter at J_scattering=0; at a an absolute J_scattering, it
- is equal to sigma0 / 2**J_scattering.
+ parameter controlling the frequential width of the low-pass filter at
+ j=0; at a an absolute J, it is equal to sigma0 / 2**J.
alpha : float, optional
tolerance factor for the aliasing after subsampling.
The larger alpha, the more conservative the value of maximal
@@ -150,9 +148,8 @@ def compute_meta_scattering(J, Q, T, max_order, r_psi, sigma0, alpha):
Should be >0 and <1. Controls the redundancy of the filters
(the larger r_psi, the larger the overlap between adjacent wavelets).
sigma0 : float
- parameter controlling the frequential width of the
- low-pass filter at J_scattering=0; at a an absolute J_scattering, it
- is equal to sigma0 / 2**J_scattering.
+ parameter controlling the frequential width of the low-pass filter at
+ j=0; at a an absolute J, it is equal to sigma0 / 2**J.
alpha : float, optional
tolerance factor for the aliasing after subsampling.
The larger alpha, the more conservative the value of maximal
|
kymatio/kymatio
|
a776d575aeff2bcefe2b8c002a8da9b492158303
|
diff --git a/tests/scattering1d/test_numpy_scattering1d.py b/tests/scattering1d/test_numpy_scattering1d.py
index 9de28cb..042a050 100644
--- a/tests/scattering1d/test_numpy_scattering1d.py
+++ b/tests/scattering1d/test_numpy_scattering1d.py
@@ -96,7 +96,7 @@ class TestScattering1DNumpy:
default_str = ' (default)'
else:
default_str = ''
- phi_f, psi1_f, psi2_f = scattering_filter_factory(np.log2(N), J, Q, T)
+ phi_f, psi1_f, psi2_f = scattering_filter_factory(N, J, Q, T)
assert(phi_f['sigma']==0.1/T)
frontends = ['numpy', 'sklearn']
diff --git a/tests/scattering1d/test_utils_scattering1d.py b/tests/scattering1d/test_utils_scattering1d.py
index 3ff8235..2bbd33c 100644
--- a/tests/scattering1d/test_utils_scattering1d.py
+++ b/tests/scattering1d/test_utils_scattering1d.py
@@ -10,15 +10,15 @@ def test_compute_padding():
Test the compute_padding function
"""
- pad_left, pad_right = compute_padding(5, 16)
+ pad_left, pad_right = compute_padding(32, 16)
assert pad_left == 8 and pad_right == 8
with pytest.raises(ValueError) as ve:
- _, _ = compute_padding(3, 16)
+ _, _ = compute_padding(8, 16)
assert "should be larger" in ve.value.args[0]
with pytest.raises(ValueError) as ve:
- _, _ = compute_padding(6, 16)
+ _, _ = compute_padding(64, 16)
assert "Too large padding value" in ve.value.args[0]
|
redefine 1D `self.N`, deprecate`self.J_pad`?
in 1D, `self.N` is the unpadded length of the input signal. It is defined in `ScatteringBase1D` as some derivative of `self.shape` and isn't being used anywhere else.
Same for 2D where `self.shape` unpacks as `self.M`, `self.N`.
I see no reason to keep these values as attributes in the object when:
* we have `self.shape` for that
* these values are only being used transiently, in `self.build()`
* our filterbank construction relies on `N_padded`, not `N`
* these values are trivially available in the forward via `self.N = len(x)`
In the spirit of preparing for scattering on variable-length inputs (`shape=None`, a long-requested feature !), i ask the permission to remove these attributes from `self`.
I am open to making a deprecation cycle where we convert these attributes to read-only properties and print a `DeprecationWarning` in v0.3 if anyone tries to access these numbers (NB: none of our examples access `self.N` and friends)
@janden @eickenberg
|
0.0
|
a776d575aeff2bcefe2b8c002a8da9b492158303
|
[
"tests/scattering1d/test_numpy_scattering1d.py::TestScattering1DNumpy::test_Scattering1D_filter_factory_T[NumpyBackend1D]",
"tests/scattering1d/test_utils_scattering1d.py::test_compute_padding"
] |
[
"tests/scattering1d/test_numpy_scattering1d.py::TestScattering1DNumpy::test_Scattering1D[NumpyBackend1D]",
"tests/scattering1d/test_numpy_scattering1d.py::TestScattering1DNumpy::test_Scattering1D_T[NumpyBackend1D]",
"tests/scattering1d/test_numpy_scattering1d.py::test_Q[numpy-NumpyBackend1D]",
"tests/scattering1d/test_utils_scattering1d.py::test_border_indices",
"tests/scattering1d/test_utils_scattering1d.py::test_scattering1d_frontend"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-06-09 23:29:11+00:00
|
bsd-3-clause
| 3,472 |
|
kymatio__kymatio-982
|
diff --git a/kymatio/scattering1d/filter_bank.py b/kymatio/scattering1d/filter_bank.py
index 63e7dd5..b1d69ea 100644
--- a/kymatio/scattering1d/filter_bank.py
+++ b/kymatio/scattering1d/filter_bank.py
@@ -377,8 +377,8 @@ def scattering_filter_factory(N, J, Q, T, filterbank):
j = get_max_dyadic_subsampling(xi, sigma, **filterbank_kwargs)
# Resample to smaller resolutions if necessary (beyond 1st layer)
- # The idiom min(previous_J, j, log2_T) implements "j1 < j2"
- for level in range(1, min(previous_J, j, log2_T)):
+ # The idiom min(previous_J, j, 1+log2_T) implements "j1 < j2"
+ for level in range(1, min(previous_J, j, 1+log2_T)):
psi_level = psi_levels[0].reshape(2 ** level, -1).mean(axis=0)
psi_levels.append(psi_level)
psi_f.append({'levels': psi_levels, 'xi': xi, 'sigma': sigma, 'j': j})
|
kymatio/kymatio
|
954c44990ca2edcd9b4cc12c20321dd20d27cce6
|
diff --git a/tests/scattering1d/test_numpy_scattering1d.py b/tests/scattering1d/test_numpy_scattering1d.py
index 8c84f7f..9908bff 100644
--- a/tests/scattering1d/test_numpy_scattering1d.py
+++ b/tests/scattering1d/test_numpy_scattering1d.py
@@ -114,6 +114,16 @@ class TestScattering1DNumpy:
Sx = sc(x)
assert Sx.shape[-1] == 1
+ def test_981(self, backend):
+ """
+ Tests against bug #981, in which some low values of T triggered an
+ `IndexError` in second-order backend.cdgmm
+ """
+ N = 1024
+ x = np.zeros(N)
+ sc = Scattering1D(J=8, shape=(N,), T=2)
+ Sx = sc(x)
+ assert Sx.shape[-1] == N/sc.T
frontends = ['numpy', 'sklearn']
|
BUG missing one second-order wavelet for low-T Sc1D and JTFS
MWE
```python
import numpy as np
from kymatio.numpy import Scattering1D
N = 1024
x = np.zeros(N)
S = Scattering1D(J=8, shape=(N,), T=2)
Sx = S(x)
```
Traceback:
```
IndexError Traceback (most recent call last)
Cell In [5], line 7
4 kwargs = dict(J=8, shape=(N,))
6 S = Scattering1D(**kwargs, T=2)
----> 7 S(x)
File ~/kymatio/kymatio/frontend/numpy_frontend.py:10, in ScatteringNumPy.__call__(self, x)
7 def __call__(self, x):
8 """This method is an alias for `scattering`."""
---> 10 return self.scattering(x)
File ~/kymatio/kymatio/scattering1d/frontend/base_frontend.py:116, in ScatteringBase1D.scattering(self, x)
113 elif self.out_type == 'dict':
114 S = dict()
--> 116 for path in S_gen:
117 path['order'] = len(path['n'])
118 if self.average == 'local':
File ~/kymatio/kymatio/scattering1d/core/scattering1d.py:97, in scattering1d(U_0, backend, filters, oversampling, average_local)
94 sub2_adj = min(j2, log2_T) if average_local else j2
95 k2 = max(sub2_adj - k1 - oversampling, 0)
---> 97 U_2_c = backend.cdgmm(U_1_hat, psi2[n2]['levels'][k1])
98 U_2_hat = backend.subsample_fourier(U_2_c, 2**k2)
99 # take the modulus
IndexError: list index out of range
```
|
0.0
|
954c44990ca2edcd9b4cc12c20321dd20d27cce6
|
[
"tests/scattering1d/test_numpy_scattering1d.py::TestScattering1DNumpy::test_981[NumpyBackend1D]"
] |
[
"tests/scattering1d/test_numpy_scattering1d.py::TestScattering1DNumpy::test_Scattering1D[NumpyBackend1D]",
"tests/scattering1d/test_numpy_scattering1d.py::TestScattering1DNumpy::test_Scattering1D_T[NumpyBackend1D]",
"tests/scattering1d/test_numpy_scattering1d.py::TestScattering1DNumpy::test_Scattering1D_filter_factory_T[NumpyBackend1D]",
"tests/scattering1d/test_numpy_scattering1d.py::TestScattering1DNumpy::test_Scattering1D_average_global[NumpyBackend1D]",
"tests/scattering1d/test_numpy_scattering1d.py::test_Q[numpy-NumpyBackend1D]"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-01-02 19:57:26+00:00
|
bsd-3-clause
| 3,473 |
|
kytos__kytos-1146
|
diff --git a/kytos/core/api_server.py b/kytos/core/api_server.py
index f10c3b0..ccc0c80 100644
--- a/kytos/core/api_server.py
+++ b/kytos/core/api_server.py
@@ -200,7 +200,10 @@ class APIServer:
def web_ui(self):
"""Serve the index.html page for the admin-ui."""
- return send_file(f"{self.flask_dir}/index.html")
+ index_path = f"{self.flask_dir}/index.html"
+ if os.path.exists(index_path):
+ return send_file(index_path)
+ return f"File '{index_path}' not found.", HTTPStatus.NOT_FOUND.value
def update_web_ui(self, version='latest', force=True):
"""Update the static files for the Web UI.
@@ -227,6 +230,9 @@ class APIServer:
package = urlretrieve(uri)[0]
except HTTPError:
return f"Uri not found {uri}."
+ except URLError:
+ self.log.warning("Error accessing URL %s.", uri)
+ return f"Error accessing URL {uri}."
# test downloaded zip file
zip_ref = zipfile.ZipFile(package, 'r')
|
kytos/kytos
|
b4d1010f90e3a36a8caa646362d1bd8646589691
|
diff --git a/tests/unit/test_core/test_api_server.py b/tests/unit/test_core/test_api_server.py
index 1d98e71..20ff2bd 100644
--- a/tests/unit/test_core/test_api_server.py
+++ b/tests/unit/test_core/test_api_server.py
@@ -113,13 +113,23 @@ class TestAPIServer(unittest.TestCase):
self.assertEqual(response.json, expected_json)
self.assertEqual(response.status_code, 200)
+ @patch('os.path')
@patch('kytos.core.api_server.send_file')
- def test_web_ui(self, mock_send_file):
+ def test_web_ui__success(self, mock_send_file, ospath_mock):
"""Test web_ui method."""
+ ospath_mock.exists.return_value = True
self.api_server.web_ui()
mock_send_file.assert_called_with('flask_dir/index.html')
+ @patch('os.path')
+ def test_web_ui__error(self, ospath_mock):
+ """Test web_ui method."""
+ ospath_mock.exists.return_value = False
+ _, error = self.api_server.web_ui()
+
+ self.assertEqual(error, 404)
+
@patch('kytos.core.api_server.urlretrieve')
@patch('kytos.core.api_server.urlopen')
@patch('zipfile.ZipFile')
|
Remove hard dependency on github for UI
# The Issue
In order to execute `kytosd`, the kytos needs to be able to download the UI from github. This download may be inaccessible, either from github being unavailable, firewall blocking github, or DNS unable to find github.
# Proposed Solution
- Allow for executing kytos without running the web server for the UI, while still running the web server for the api.
- Provide a different mechanism to couple the Web server for the UI to the controller.
|
0.0
|
b4d1010f90e3a36a8caa646362d1bd8646589691
|
[
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_deprecation_warning",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_disable_napp__error_not_enabling",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_disable_napp__error_not_installed",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_disable_napp__success",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_enable_napp__error_not_enabling",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_enable_napp__error_not_installed",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_enable_napp__success",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_get_napp_metadata",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_get_napp_metadata__invalid_key",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_get_napp_metadata__not_installed",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_get_ui_components",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_install_napp__error_not_installing",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_install_napp__http_error",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_install_napp__success",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_install_napp__success_is_installed",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_list_enabled_napps",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_list_installed_napps",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_run",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_run_error",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_static_web_ui__error",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_static_web_ui__success",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_status_api",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_stop_api_server",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_uninstall_napp__error_not_uninstalling",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_uninstall_napp__success",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_uninstall_napp__success_not_installed",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_update_web_ui",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_update_web_ui__http_error",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_update_web_ui__zip_error",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_web_ui__error",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_web_ui__success",
"tests/unit/test_core/test_api_server.py::TestAPIDecorator::test_flask_call",
"tests/unit/test_core/test_api_server.py::TestAPIDecorator::test_remove_napp_endpoints",
"tests/unit/test_core/test_api_server.py::TestAPIDecorator::test_rule_from_classmethod",
"tests/unit/test_core/test_api_server.py::TestAPIDecorator::test_rule_from_staticmethod",
"tests/unit/test_core/test_api_server.py::TestAPIDecorator::test_rule_with_slash"
] |
[] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2020-09-01 17:43:48+00:00
|
mit
| 3,474 |
|
kytos__kytos-1163
|
diff --git a/kytos/lib/helpers.py b/kytos/lib/helpers.py
index 1f9b771..5c12f86 100644
--- a/kytos/lib/helpers.py
+++ b/kytos/lib/helpers.py
@@ -36,7 +36,7 @@ def get_link_mock(endpoint_a, endpoint_b):
link = create_autospec(Link)
link.endpoint_a = endpoint_a
link.endpoint_b = endpoint_b
- link.metadata = {"A": 0}
+ link.metadata = {"A": 0, "BB": 0.0, "CCC": "test"}
return link
|
kytos/kytos
|
c67933ccecd8c87eab9a7d527312956841959b97
|
diff --git a/tests/unit/test_lib/test_helpers.py b/tests/unit/test_lib/test_helpers.py
index 253fe54..48b339d 100644
--- a/tests/unit/test_lib/test_helpers.py
+++ b/tests/unit/test_lib/test_helpers.py
@@ -41,7 +41,8 @@ class TestHelpers(TestCase):
self.assertEqual(link_mock.endpoint_a, endpoint_a)
self.assertEqual(link_mock.endpoint_b, endpoint_b)
- self.assertEqual(link_mock.metadata, {"A": 0})
+ self.assertEqual(link_mock.metadata, {"A": 0, "BB": 0.0,
+ "CCC": "test"})
def test_switch_mock(self):
"""Test switch mock."""
|
Improve key generation for mock `Link.metadata`
The following test code in does not sufficiently expose kytos/pathfinder#64:
https://github.com/kytos/kytos/blob/c67933ccecd8c87eab9a7d527312956841959b97/kytos/lib/helpers.py#L34-L40
This change is to address the problem of the referenced bug not showing up for single-letter link attribute names.
|
0.0
|
c67933ccecd8c87eab9a7d527312956841959b97
|
[
"tests/unit/test_lib/test_helpers.py::TestHelpers::test_link_mock"
] |
[
"tests/unit/test_lib/test_helpers.py::TestHelpers::test_connection_mock",
"tests/unit/test_lib/test_helpers.py::TestHelpers::test_interface_mock",
"tests/unit/test_lib/test_helpers.py::TestHelpers::test_kytos_event_mock",
"tests/unit/test_lib/test_helpers.py::TestHelpers::test_switch_mock"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-11 01:37:08+00:00
|
mit
| 3,475 |
|
kytos__kytos-1249
|
diff --git a/kytos/core/api_server.py b/kytos/core/api_server.py
index a18e15e..2682535 100644
--- a/kytos/core/api_server.py
+++ b/kytos/core/api_server.py
@@ -12,7 +12,7 @@ from http import HTTPStatus
from urllib.error import HTTPError, URLError
from urllib.request import urlopen, urlretrieve
-from flask import Blueprint, Flask, jsonify, send_file
+from flask import Blueprint, Flask, jsonify, request, send_file
from flask_cors import CORS
from flask_socketio import SocketIO, join_room, leave_room
from werkzeug.exceptions import HTTPException
@@ -107,9 +107,11 @@ class APIServer:
def start_api(self):
"""Start this APIServer instance API.
- Start /api/kytos/core/shutdown/ and status/ endpoints, web UI.
+ Start /api/kytos/core/_shutdown/ and status/ endpoints, web UI.
+ The '_shutdown' endpoint should not be public and is intended to
+ shutdown the APIServer.
"""
- self.register_core_endpoint('shutdown/', self.shutdown_api)
+ self.register_core_endpoint('_shutdown/', self.shutdown_api)
self.register_core_endpoint('status/', self.status_api)
self.register_core_endpoint('web/update/<version>/',
self.update_web_ui,
@@ -146,23 +148,27 @@ class APIServer:
return '{"response": "running"}', HTTPStatus.OK.value
def stop_api_server(self):
- """Send a shutdown request to stop Api Server."""
+ """Send a shutdown request to stop API Server."""
try:
- url = f'http://127.0.0.1:{self.port}/api/kytos/core/shutdown'
+ url = f'http://127.0.0.1:{self.port}/api/kytos/core/_shutdown'
urlopen(url)
except URLError:
pass
- @authenticated
def shutdown_api(self):
- """Handle shutdown requests received by Api Server.
+ """Handle requests received to shutdown the API Server.
This method must be called by kytos using the method
stop_api_server, otherwise this request will be ignored.
"""
+ allowed_host = ['127.0.0.1:'+str(self.port),
+ 'localhost:'+str(self.port)]
+ if request.host not in allowed_host:
+ return "", HTTPStatus.FORBIDDEN.value
+
self.server.stop()
- return 'Server shutting down...', HTTPStatus.OK.value
+ return 'API Server shutting down...', HTTPStatus.OK.value
def static_web_ui(self, username, napp_name, filename):
"""Serve static files from installed napps."""
|
kytos/kytos
|
8c8456626a73ba0112680447b1fcfcabb00a3a37
|
diff --git a/tests/unit/test_core/test_api_server.py b/tests/unit/test_core/test_api_server.py
index 109aa09..85b23bd 100644
--- a/tests/unit/test_core/test_api_server.py
+++ b/tests/unit/test_core/test_api_server.py
@@ -54,29 +54,21 @@ class TestAPIServer(unittest.TestCase):
mock_exit.assert_called()
- @patch('kytos.core.auth.request')
- @patch('kytos.core.auth.jwt.decode', return_value=True)
- def test_shutdown_api(self, _, mock_request):
+ @patch('kytos.core.api_server.request')
+ def test_shutdown_api(self, mock_request):
"""Test shutdown_api method."""
+ mock_request.host = 'localhost:8181'
- mock_request.headers = {'Authorization': 'Bearer 123'}
self.api_server.shutdown_api()
self.api_server.server.stop.assert_called()
- @patch('kytos.core.auth.jsonify')
- @patch('kytos.core.auth.request')
- def test_shutdown_api__error(self, mock_request, mock_jsonify):
+ @patch('kytos.core.api_server.request')
+ def test_shutdown_api__error(self, mock_request):
"""Test shutdown_api method to error case."""
-
- mock_request.headers = {'Authorization': None}
+ mock_request.host = 'any:port'
self.api_server.shutdown_api()
- exc_msg = "The attribute 'content' has an invalid value 'None'."
- msg = f"Token not sent or expired: {exc_msg}"
-
- mock_jsonify.assert_called_with({"error": msg})
-
self.api_server.server.stop.assert_not_called()
def test_status_api(self):
@@ -89,7 +81,7 @@ class TestAPIServer(unittest.TestCase):
"""Test stop_api_server method."""
self.api_server.stop_api_server()
- url = "%s/shutdown" % API_URI
+ url = "%s/_shutdown" % API_URI
mock_urlopen.assert_called_with(url)
@patch('kytos.core.api_server.send_file')
|
Kytos running in background only stop after two pkill
Hi, even though this seems to be similar to #1236 and #1235, I've decided to create this new issue because it just started happening after PR #1231. The problem is: when you start kytos in the background, you have to issue pkill kytos twice before be able to stop the process.
This only happens to me if I install kytos from master branch. The pre-release 2021.1rc1 don't includes this bug.
Steps to reproduce:
1. Run kytos base docker image: `docker run -d --name k3 kytos/base:latest tail -f /dev/null`
2. Install the basic kytos packages:
```
python3.6 -m pip install --upgrade pip setuptools wheel
python3.6 -m pip install https://github.com/kytos/python-openflow/archive/master.zip
python3.6 -m pip install https://github.com/kytos/kytos-utils/archive/master.zip
python3.6 -m pip install https://github.com/kytos/kytos/archive/master.zip
```
3. Start kytos in background: `kytosd`
4. Try to shutdown kytos: `pkill kytosd`
Expected result: kytos should be shutdown
Actual result: kytos remains running
If I revert PR #1231, the issue stops happening. So, looks like some internal command is using the shutdown API endpoint but does not fit the authenticated decorator. For example the stop_api_server() ( https://github.com/kytos/kytos/blob/master/kytos/core/api_server.py#L148) which is called by kytos.controller.stop_controller() (https://github.com/kytos/kytos/blob/master/kytos/core/controller.py#L399)
|
0.0
|
8c8456626a73ba0112680447b1fcfcabb00a3a37
|
[
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_stop_api_server"
] |
[
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_deprecation_warning",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_disable_napp__error_not_enabling",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_disable_napp__error_not_installed",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_disable_napp__success",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_enable_napp__error_not_enabling",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_enable_napp__error_not_installed",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_enable_napp__success",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_get_napp_metadata",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_get_napp_metadata__invalid_key",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_get_napp_metadata__not_installed",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_get_ui_components",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_install_napp__error_not_installing",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_install_napp__http_error",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_install_napp__success",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_install_napp__success_is_installed",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_list_enabled_napps",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_list_installed_napps",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_run",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_run_error",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_static_web_ui__error",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_static_web_ui__success",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_status_api",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_uninstall_napp__error_not_uninstalling",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_uninstall_napp__success",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_uninstall_napp__success_not_installed",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_update_web_ui",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_update_web_ui__http_error",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_update_web_ui__zip_error",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_web_ui__error",
"tests/unit/test_core/test_api_server.py::TestAPIServer::test_web_ui__success",
"tests/unit/test_core/test_api_server.py::TestAPIDecorator::test_flask_call",
"tests/unit/test_core/test_api_server.py::TestAPIDecorator::test_remove_napp_endpoints",
"tests/unit/test_core/test_api_server.py::TestAPIDecorator::test_rule_from_classmethod",
"tests/unit/test_core/test_api_server.py::TestAPIDecorator::test_rule_from_staticmethod",
"tests/unit/test_core/test_api_server.py::TestAPIDecorator::test_rule_with_slash"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-05-18 18:54:21+00:00
|
mit
| 3,476 |
|
kytos__python-openflow-193
|
diff --git a/pyof/foundation/basic_types.py b/pyof/foundation/basic_types.py
index 297baa2..a3b5461 100644
--- a/pyof/foundation/basic_types.py
+++ b/pyof/foundation/basic_types.py
@@ -130,10 +130,11 @@ class DPID(GenericType):
self._value = ':'.join(bytes)
def pack(self, value=None):
- buffer = b''
- for value in self._value.split(":"):
- buffer += struct.pack('!B', int(value, 16))
- return buffer
+ if isinstance(value, type(self)):
+ return value.pack()
+ if value is None:
+ value = self._value
+ return struct.pack('!8B', *[int(v, 16) for v in value.split(':')])
class Char(GenericType):
diff --git a/pyof/v0x01/common/utils.py b/pyof/v0x01/common/utils.py
index 81ded8f..274130c 100644
--- a/pyof/v0x01/common/utils.py
+++ b/pyof/v0x01/common/utils.py
@@ -11,7 +11,7 @@ from pyof.v0x01.asynchronous.flow_removed import FlowRemoved
from pyof.v0x01.asynchronous.packet_in import PacketIn
from pyof.v0x01.asynchronous.port_status import PortStatus
# Importing controller2switch messages
-from pyof.v0x01.common.header import Type
+from pyof.v0x01.common.header import Header, Type
from pyof.v0x01.controller2switch.barrier_reply import BarrierReply
from pyof.v0x01.controller2switch.barrier_request import BarrierRequest
from pyof.v0x01.controller2switch.features_reply import FeaturesReply
@@ -34,7 +34,8 @@ from pyof.v0x01.symmetric.echo_request import EchoRequest
from pyof.v0x01.symmetric.hello import Hello
from pyof.v0x01.symmetric.vendor_header import VendorHeader
-__all__ = ('new_message_from_header', 'new_message_from_message_type')
+__all__ = ('new_message_from_header', 'new_message_from_message_type',
+ 'unpack_message')
def new_message_from_message_type(message_type):
@@ -116,3 +117,14 @@ def new_message_from_header(header):
message.header.length = header.length
return message
+
+
+def unpack_message(buffer):
+ """Unpack the whole buffer, including header pack."""
+ hdr_size = Header().get_size()
+ hdr_buff, msg_buff = buffer[:hdr_size], buffer[hdr_size:]
+ header = Header()
+ header.unpack(hdr_buff)
+ message = new_message_from_header(header)
+ message.unpack(msg_buff)
+ return message
diff --git a/pyof/v0x01/controller2switch/features_reply.py b/pyof/v0x01/controller2switch/features_reply.py
index 8dff843..33f5541 100644
--- a/pyof/v0x01/controller2switch/features_reply.py
+++ b/pyof/v0x01/controller2switch/features_reply.py
@@ -62,7 +62,7 @@ class SwitchFeatures(GenericMessage):
Args:
xid (int): xid to be used on the message header.
- datapath_id (int): UBInt64 datapath unique ID.
+ datapath_id (str or :class:`.DPID`): datapath unique ID.
The lower 48-bits are for MAC address, while
the upper 16-bits are implementer-defined.
n_buffers (int): UBInt32 max packets buffered at once.
diff --git a/pyof/v0x01/controller2switch/stats_reply.py b/pyof/v0x01/controller2switch/stats_reply.py
index 1383aca..795ecce 100644
--- a/pyof/v0x01/controller2switch/stats_reply.py
+++ b/pyof/v0x01/controller2switch/stats_reply.py
@@ -1,14 +1,9 @@
"""Response the stat request packet from the controller."""
-
-# System imports
-
-# Third-party imports
-
from pyof.foundation.base import GenericMessage
-from pyof.foundation.basic_types import BinaryData, UBInt16
-# Local imports
+from pyof.foundation.basic_types import BinaryData, FixedTypeList, UBInt16
from pyof.v0x01.common.header import Header, Type
-from pyof.v0x01.controller2switch.common import FlowStats, StatsTypes
+from pyof.v0x01.controller2switch.common import (DescStats, PortStats,
+ StatsTypes)
__all__ = ('StatsReply',)
@@ -35,7 +30,7 @@ class StatsReply(GenericMessage):
self.flags = flags
self.body = body
- def unpack(self, buff, offset=0):
+ def unpack(self, buff):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
@@ -43,29 +38,21 @@ class StatsReply(GenericMessage):
of the message **without the header**.
This class' unpack method is like the :meth:`.GenericMessage.unpack`
- one, except for the ``actions`` attribute which has a length determined
- by the ``actions_len`` attribute.
+ one, except for the ``body`` attribute which has its type determined
+ by the ``body_type`` attribute.
Args:
buff (bytes): Binary data package to be unpacked, without the
header.
- offset (int): Where to begin unpacking.
"""
- stats = []
- super().unpack(buff, offset)
- data = self.body.value
- if self.body_type == StatsTypes.OFPST_FLOW:
- ReplyClass = FlowStats
- else:
- # TODO: Implement other types
- return
+ super().unpack(buff)
- while len(data) > 0:
- length = UBInt16()
- length.unpack(data[:2])
- item = ReplyClass()
- item.unpack(data[0:length.value])
- stats.append(item)
- data = data[length.value:]
+ if self.body_type == StatsTypes.OFPST_PORT:
+ self._unpack_body(FixedTypeList(pyof_class=PortStats))
+ elif self.body_type == StatsTypes.OFPST_DESC:
+ self._unpack_body(DescStats())
- self.body = stats
+ def _unpack_body(self, obj):
+ """Unpack `body` using `obj` and replace it by the result."""
+ obj.unpack(self.body.value)
+ self.body = obj
diff --git a/pyof/v0x01/controller2switch/stats_request.py b/pyof/v0x01/controller2switch/stats_request.py
index be1a5cf..838dc41 100644
--- a/pyof/v0x01/controller2switch/stats_request.py
+++ b/pyof/v0x01/controller2switch/stats_request.py
@@ -8,7 +8,7 @@ from pyof.foundation.base import GenericMessage
from pyof.foundation.basic_types import BinaryData, UBInt16
# Local imports
from pyof.v0x01.common.header import Header, Type
-from pyof.v0x01.controller2switch.common import StatsTypes
+from pyof.v0x01.controller2switch.common import PortStatsRequest, StatsTypes
__all__ = ('StatsRequest',)
@@ -34,3 +34,26 @@ class StatsRequest(GenericMessage):
self.body_type = body_type
self.flags = flags
self.body = body
+
+ def pack(self):
+ """Pack according to :attr:`body_type`.
+
+ Make `body` a binary pack before packing this object. Then, restore
+ body.
+ """
+ if self.body_type == StatsTypes.OFPST_PORT:
+ backup = self.body
+ self.body = self.body.pack()
+ pack = super().pack()
+ self.body = backup
+ return pack
+ else:
+ return super().pack()
+
+ def unpack(self, buff):
+ """Unpack according to :attr:`body_type`."""
+ super().unpack(buff)
+ if self.body_type == StatsTypes.OFPST_PORT:
+ buff = self.body.value
+ self.body = PortStatsRequest()
+ self.body.unpack(buff)
diff --git a/setup.cfg b/setup.cfg
index b16a827..deec796 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -3,8 +3,9 @@ exclude = .eggs,ENV,build,docs/conf.py,venv
[pylama]
linters = pyflakes,isort,pep8,mccabe,pep257,pylint
-ignore = D105,D203,I0011
+ignore = D105,D203,D213,I0011
# D105: Missing docstring in magic method [pep257]
# D203: 1 blank line required before class docstring (found 0) [pep257]
# Conflicts with D211: No blank lines allowed before class docstring
+# D213: Should be ignored by default, but sometimes it is not
# I0011: I0011 Locally disabling no-member (E1101) [pylint]
|
kytos/python-openflow
|
013e9c24bd4c234f0bf39210686a58af4c586d7d
|
diff --git a/tests/v0x01/test_controller2switch/test_features_reply.py b/tests/v0x01/test_controller2switch/test_features_reply.py
index 2728ded..3f56325 100644
--- a/tests/v0x01/test_controller2switch/test_features_reply.py
+++ b/tests/v0x01/test_controller2switch/test_features_reply.py
@@ -1,5 +1,5 @@
"""Echo request message tests."""
-from pyof.foundation.basic_types import HWAddress
+from pyof.foundation.basic_types import HWAddress, DPID
from pyof.v0x01.common.phy_port import PhyPort, PortConfig, PortState
from pyof.v0x01.controller2switch.features_reply import FeaturesReply
from tests.test_struct import TestStruct
@@ -19,8 +19,9 @@ class TestFeaturesReply(TestStruct):
def _get_kwargs():
- return {'xid': 2, 'datapath_id': 1, 'n_buffers': 256, 'n_tables': 254,
- 'capabilities': 0x000000c7, 'actions': 4095, 'ports': _get_ports()}
+ return {'xid': 2, 'datapath_id': DPID('00:00:00:00:00:00:00:01'),
+ 'n_buffers': 256, 'n_tables': 254, 'capabilities': 0x000000c7,
+ 'actions': 4095, 'ports': _get_ports()}
def _get_ports():
diff --git a/tests/v0x01/test_controller2switch/test_stats_request.py b/tests/v0x01/test_controller2switch/test_stats_request.py
index 1891f66..087afa6 100644
--- a/tests/v0x01/test_controller2switch/test_stats_request.py
+++ b/tests/v0x01/test_controller2switch/test_stats_request.py
@@ -1,7 +1,10 @@
"""Test for StatsRequest message."""
import unittest
-from pyof.v0x01.controller2switch import common, stats_request
+from pyof.v0x01.common.phy_port import Port
+from pyof.v0x01.common.utils import unpack_message
+from pyof.v0x01.controller2switch.common import PortStatsRequest
+from pyof.v0x01.controller2switch.stats_request import StatsRequest, StatsTypes
class TestStatsRequest(unittest.TestCase):
@@ -9,9 +12,9 @@ class TestStatsRequest(unittest.TestCase):
def setUp(self):
"""Basic test setup."""
- self.message = stats_request.StatsRequest()
+ self.message = StatsRequest()
self.message.header.xid = 1
- self.message.type = common.StatsTypes.OFPST_FLOW
+ self.message.type = StatsTypes.OFPST_FLOW
self.message.flags = 1
self.message.body = []
@@ -19,6 +22,15 @@ class TestStatsRequest(unittest.TestCase):
"""[Controller2Switch/StatsRequest] - size 12."""
self.assertEqual(self.message.get_size(), 12)
+ def test_pack_unpack_port_stats(self):
+ """Pack and unpack PortStatsRequest."""
+ body = PortStatsRequest(Port.OFPP_NONE)
+ req = StatsRequest(16909060, body_type=StatsTypes.OFPST_PORT,
+ body=body)
+ pack = req.pack()
+ unpacked = unpack_message(pack)
+ self.assertEqual(req, unpacked)
+
@unittest.skip('Not yet implemented')
def test_pack(self):
"""[Controller2Switch/StatsRequest] - packing."""
|
Review body attribute
Review `body` attribute of `...controller2switch.stats_request` and `...controller2switch.stats_reply` to evaluate if this attribute its type. It has been defined as a `ConstantTypeList`, but may be it could be a `UBInt8` (to act as a pointer) or a `BinaryData` generic_type field.
|
0.0
|
013e9c24bd4c234f0bf39210686a58af4c586d7d
|
[
"tests/v0x01/test_controller2switch/test_features_reply.py::TestFeaturesReply::test_minimum_size",
"tests/v0x01/test_controller2switch/test_features_reply.py::TestFeaturesReply::test_pack",
"tests/v0x01/test_controller2switch/test_features_reply.py::TestFeaturesReply::test_unpack",
"tests/v0x01/test_controller2switch/test_stats_request.py::TestStatsRequest::test_get_size",
"tests/v0x01/test_controller2switch/test_stats_request.py::TestStatsRequest::test_pack_unpack_port_stats"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-10-05 14:55:29+00:00
|
mit
| 3,477 |
|
kytos__python-openflow-201
|
diff --git a/pyof/foundation/basic_types.py b/pyof/foundation/basic_types.py
index 297baa2..a3b5461 100644
--- a/pyof/foundation/basic_types.py
+++ b/pyof/foundation/basic_types.py
@@ -130,10 +130,11 @@ class DPID(GenericType):
self._value = ':'.join(bytes)
def pack(self, value=None):
- buffer = b''
- for value in self._value.split(":"):
- buffer += struct.pack('!B', int(value, 16))
- return buffer
+ if isinstance(value, type(self)):
+ return value.pack()
+ if value is None:
+ value = self._value
+ return struct.pack('!8B', *[int(v, 16) for v in value.split(':')])
class Char(GenericType):
diff --git a/pyof/v0x01/controller2switch/features_reply.py b/pyof/v0x01/controller2switch/features_reply.py
index 8dff843..33f5541 100644
--- a/pyof/v0x01/controller2switch/features_reply.py
+++ b/pyof/v0x01/controller2switch/features_reply.py
@@ -62,7 +62,7 @@ class SwitchFeatures(GenericMessage):
Args:
xid (int): xid to be used on the message header.
- datapath_id (int): UBInt64 datapath unique ID.
+ datapath_id (str or :class:`.DPID`): datapath unique ID.
The lower 48-bits are for MAC address, while
the upper 16-bits are implementer-defined.
n_buffers (int): UBInt32 max packets buffered at once.
|
kytos/python-openflow
|
013e9c24bd4c234f0bf39210686a58af4c586d7d
|
diff --git a/tests/v0x01/test_controller2switch/test_features_reply.py b/tests/v0x01/test_controller2switch/test_features_reply.py
index 2728ded..3f56325 100644
--- a/tests/v0x01/test_controller2switch/test_features_reply.py
+++ b/tests/v0x01/test_controller2switch/test_features_reply.py
@@ -1,5 +1,5 @@
"""Echo request message tests."""
-from pyof.foundation.basic_types import HWAddress
+from pyof.foundation.basic_types import HWAddress, DPID
from pyof.v0x01.common.phy_port import PhyPort, PortConfig, PortState
from pyof.v0x01.controller2switch.features_reply import FeaturesReply
from tests.test_struct import TestStruct
@@ -19,8 +19,9 @@ class TestFeaturesReply(TestStruct):
def _get_kwargs():
- return {'xid': 2, 'datapath_id': 1, 'n_buffers': 256, 'n_tables': 254,
- 'capabilities': 0x000000c7, 'actions': 4095, 'ports': _get_ports()}
+ return {'xid': 2, 'datapath_id': DPID('00:00:00:00:00:00:00:01'),
+ 'n_buffers': 256, 'n_tables': 254, 'capabilities': 0x000000c7,
+ 'actions': 4095, 'ports': _get_ports()}
def _get_ports():
|
Fix tests
I ran bisect assigning zeros to default DPID value and it lead to d207690facd4844557fb7d53aebbd5d2fb66a414 (added a new basic type: dpid).
|
0.0
|
013e9c24bd4c234f0bf39210686a58af4c586d7d
|
[
"tests/v0x01/test_controller2switch/test_features_reply.py::TestFeaturesReply::test_pack",
"tests/v0x01/test_controller2switch/test_features_reply.py::TestFeaturesReply::test_unpack"
] |
[
"tests/v0x01/test_controller2switch/test_features_reply.py::TestFeaturesReply::test_minimum_size"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_git_commit_hash",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-10-11 23:39:44+00:00
|
mit
| 3,478 |
|
kytos__python-openflow-202
|
diff --git a/pyof/foundation/basic_types.py b/pyof/foundation/basic_types.py
index 297baa2..219a823 100644
--- a/pyof/foundation/basic_types.py
+++ b/pyof/foundation/basic_types.py
@@ -130,10 +130,11 @@ class DPID(GenericType):
self._value = ':'.join(bytes)
def pack(self, value=None):
- buffer = b''
- for value in self._value.split(":"):
- buffer += struct.pack('!B', int(value, 16))
- return buffer
+ if isinstance(value, type(self)):
+ return value.pack()
+ if value is None:
+ value = self._value
+ return struct.pack('!8B', *[int(v, 16) for v in value.split(':')])
class Char(GenericType):
@@ -217,15 +218,6 @@ class IPAddress(GenericType):
super().__init__(address)
self.netmask = int(netmask)
- @property
- def wildcard_netmask(self):
- """Calculate a wildcard to openflow netmask.
-
- Returns:
- netmask (bits): Wildcarded bits for netmask
- """
- return self.max_prefix - self.netmask
-
def pack(self, value=None):
"""Pack the value as a binary representation.
diff --git a/pyof/v0x01/common/flow_match.py b/pyof/v0x01/common/flow_match.py
index 79e7556..5940bec 100644
--- a/pyof/v0x01/common/flow_match.py
+++ b/pyof/v0x01/common/flow_match.py
@@ -160,7 +160,7 @@ class Match(GenericStruct):
else:
self.wildcards |= FlowWildCards.OFPFW_NW_SRC_MASK
shift = FlowWildCards.OFPFW_NW_SRC_SHIFT
- wildcard = value.wildcard_netmask << shift
+ wildcard = (value.max_prefix - value.netmask) << shift
self.wildcards -= wildcard
else:
wildcard_field = "OFPFW_{}".format(field.upper())
diff --git a/pyof/v0x01/common/utils.py b/pyof/v0x01/common/utils.py
index 81ded8f..274130c 100644
--- a/pyof/v0x01/common/utils.py
+++ b/pyof/v0x01/common/utils.py
@@ -11,7 +11,7 @@ from pyof.v0x01.asynchronous.flow_removed import FlowRemoved
from pyof.v0x01.asynchronous.packet_in import PacketIn
from pyof.v0x01.asynchronous.port_status import PortStatus
# Importing controller2switch messages
-from pyof.v0x01.common.header import Type
+from pyof.v0x01.common.header import Header, Type
from pyof.v0x01.controller2switch.barrier_reply import BarrierReply
from pyof.v0x01.controller2switch.barrier_request import BarrierRequest
from pyof.v0x01.controller2switch.features_reply import FeaturesReply
@@ -34,7 +34,8 @@ from pyof.v0x01.symmetric.echo_request import EchoRequest
from pyof.v0x01.symmetric.hello import Hello
from pyof.v0x01.symmetric.vendor_header import VendorHeader
-__all__ = ('new_message_from_header', 'new_message_from_message_type')
+__all__ = ('new_message_from_header', 'new_message_from_message_type',
+ 'unpack_message')
def new_message_from_message_type(message_type):
@@ -116,3 +117,14 @@ def new_message_from_header(header):
message.header.length = header.length
return message
+
+
+def unpack_message(buffer):
+ """Unpack the whole buffer, including header pack."""
+ hdr_size = Header().get_size()
+ hdr_buff, msg_buff = buffer[:hdr_size], buffer[hdr_size:]
+ header = Header()
+ header.unpack(hdr_buff)
+ message = new_message_from_header(header)
+ message.unpack(msg_buff)
+ return message
diff --git a/pyof/v0x01/controller2switch/features_reply.py b/pyof/v0x01/controller2switch/features_reply.py
index 8dff843..33f5541 100644
--- a/pyof/v0x01/controller2switch/features_reply.py
+++ b/pyof/v0x01/controller2switch/features_reply.py
@@ -62,7 +62,7 @@ class SwitchFeatures(GenericMessage):
Args:
xid (int): xid to be used on the message header.
- datapath_id (int): UBInt64 datapath unique ID.
+ datapath_id (str or :class:`.DPID`): datapath unique ID.
The lower 48-bits are for MAC address, while
the upper 16-bits are implementer-defined.
n_buffers (int): UBInt32 max packets buffered at once.
diff --git a/pyof/v0x01/controller2switch/stats_reply.py b/pyof/v0x01/controller2switch/stats_reply.py
index 1383aca..795ecce 100644
--- a/pyof/v0x01/controller2switch/stats_reply.py
+++ b/pyof/v0x01/controller2switch/stats_reply.py
@@ -1,14 +1,9 @@
"""Response the stat request packet from the controller."""
-
-# System imports
-
-# Third-party imports
-
from pyof.foundation.base import GenericMessage
-from pyof.foundation.basic_types import BinaryData, UBInt16
-# Local imports
+from pyof.foundation.basic_types import BinaryData, FixedTypeList, UBInt16
from pyof.v0x01.common.header import Header, Type
-from pyof.v0x01.controller2switch.common import FlowStats, StatsTypes
+from pyof.v0x01.controller2switch.common import (DescStats, PortStats,
+ StatsTypes)
__all__ = ('StatsReply',)
@@ -35,7 +30,7 @@ class StatsReply(GenericMessage):
self.flags = flags
self.body = body
- def unpack(self, buff, offset=0):
+ def unpack(self, buff):
"""Unpack a binary message into this object's attributes.
Unpack the binary value *buff* and update this object attributes based
@@ -43,29 +38,21 @@ class StatsReply(GenericMessage):
of the message **without the header**.
This class' unpack method is like the :meth:`.GenericMessage.unpack`
- one, except for the ``actions`` attribute which has a length determined
- by the ``actions_len`` attribute.
+ one, except for the ``body`` attribute which has its type determined
+ by the ``body_type`` attribute.
Args:
buff (bytes): Binary data package to be unpacked, without the
header.
- offset (int): Where to begin unpacking.
"""
- stats = []
- super().unpack(buff, offset)
- data = self.body.value
- if self.body_type == StatsTypes.OFPST_FLOW:
- ReplyClass = FlowStats
- else:
- # TODO: Implement other types
- return
+ super().unpack(buff)
- while len(data) > 0:
- length = UBInt16()
- length.unpack(data[:2])
- item = ReplyClass()
- item.unpack(data[0:length.value])
- stats.append(item)
- data = data[length.value:]
+ if self.body_type == StatsTypes.OFPST_PORT:
+ self._unpack_body(FixedTypeList(pyof_class=PortStats))
+ elif self.body_type == StatsTypes.OFPST_DESC:
+ self._unpack_body(DescStats())
- self.body = stats
+ def _unpack_body(self, obj):
+ """Unpack `body` using `obj` and replace it by the result."""
+ obj.unpack(self.body.value)
+ self.body = obj
diff --git a/pyof/v0x01/controller2switch/stats_request.py b/pyof/v0x01/controller2switch/stats_request.py
index be1a5cf..838dc41 100644
--- a/pyof/v0x01/controller2switch/stats_request.py
+++ b/pyof/v0x01/controller2switch/stats_request.py
@@ -8,7 +8,7 @@ from pyof.foundation.base import GenericMessage
from pyof.foundation.basic_types import BinaryData, UBInt16
# Local imports
from pyof.v0x01.common.header import Header, Type
-from pyof.v0x01.controller2switch.common import StatsTypes
+from pyof.v0x01.controller2switch.common import PortStatsRequest, StatsTypes
__all__ = ('StatsRequest',)
@@ -34,3 +34,26 @@ class StatsRequest(GenericMessage):
self.body_type = body_type
self.flags = flags
self.body = body
+
+ def pack(self):
+ """Pack according to :attr:`body_type`.
+
+ Make `body` a binary pack before packing this object. Then, restore
+ body.
+ """
+ if self.body_type == StatsTypes.OFPST_PORT:
+ backup = self.body
+ self.body = self.body.pack()
+ pack = super().pack()
+ self.body = backup
+ return pack
+ else:
+ return super().pack()
+
+ def unpack(self, buff):
+ """Unpack according to :attr:`body_type`."""
+ super().unpack(buff)
+ if self.body_type == StatsTypes.OFPST_PORT:
+ buff = self.body.value
+ self.body = PortStatsRequest()
+ self.body.unpack(buff)
diff --git a/setup.cfg b/setup.cfg
index b16a827..deec796 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -3,8 +3,9 @@ exclude = .eggs,ENV,build,docs/conf.py,venv
[pylama]
linters = pyflakes,isort,pep8,mccabe,pep257,pylint
-ignore = D105,D203,I0011
+ignore = D105,D203,D213,I0011
# D105: Missing docstring in magic method [pep257]
# D203: 1 blank line required before class docstring (found 0) [pep257]
# Conflicts with D211: No blank lines allowed before class docstring
+# D213: Should be ignored by default, but sometimes it is not
# I0011: I0011 Locally disabling no-member (E1101) [pylint]
|
kytos/python-openflow
|
013e9c24bd4c234f0bf39210686a58af4c586d7d
|
diff --git a/tests/test_foundation/test_basic_types.py b/tests/test_foundation/test_basic_types.py
index 27b856e..af5329b 100644
--- a/tests/test_foundation/test_basic_types.py
+++ b/tests/test_foundation/test_basic_types.py
@@ -146,14 +146,22 @@ class TestIPAddress(unittest.TestCase):
unpacked.unpack(packed)
self.assertEqual(ip_addr.value, unpacked.value)
- def test_wildcard(self):
- """Testing get wildcard from IPAddress."""
+ def test_netmask(self):
+ """Testing get netmask from IPAddress."""
ip_addr = basic_types.IPAddress('192.168.0.1/24')
- self.assertEqual(ip_addr.wildcard_netmask, 8)
+ self.assertEqual(ip_addr.netmask, 24)
ip_addr = basic_types.IPAddress('192.168.0.1/16')
- self.assertEqual(ip_addr.wildcard_netmask, 16)
+ self.assertEqual(ip_addr.netmask, 16)
ip_addr = basic_types.IPAddress('192.168.0.1')
- self.assertEqual(ip_addr.wildcard_netmask, 0)
+ self.assertEqual(ip_addr.netmask, 32)
+
+ def test_max_prefix(self):
+ """Testing get max_prefix from IPAddress."""
+ ip_addr = basic_types.IPAddress()
+ self.assertEqual(ip_addr.max_prefix, 32)
+ ip_addr = basic_types.IPAddress('192.168.0.35/16')
+ self.assertEqual(ip_addr.max_prefix, 32)
+
def test_get_size(self):
"""Testing get_size from IPAddress."""
diff --git a/tests/v0x01/test_controller2switch/test_features_reply.py b/tests/v0x01/test_controller2switch/test_features_reply.py
index 2728ded..3f56325 100644
--- a/tests/v0x01/test_controller2switch/test_features_reply.py
+++ b/tests/v0x01/test_controller2switch/test_features_reply.py
@@ -1,5 +1,5 @@
"""Echo request message tests."""
-from pyof.foundation.basic_types import HWAddress
+from pyof.foundation.basic_types import HWAddress, DPID
from pyof.v0x01.common.phy_port import PhyPort, PortConfig, PortState
from pyof.v0x01.controller2switch.features_reply import FeaturesReply
from tests.test_struct import TestStruct
@@ -19,8 +19,9 @@ class TestFeaturesReply(TestStruct):
def _get_kwargs():
- return {'xid': 2, 'datapath_id': 1, 'n_buffers': 256, 'n_tables': 254,
- 'capabilities': 0x000000c7, 'actions': 4095, 'ports': _get_ports()}
+ return {'xid': 2, 'datapath_id': DPID('00:00:00:00:00:00:00:01'),
+ 'n_buffers': 256, 'n_tables': 254, 'capabilities': 0x000000c7,
+ 'actions': 4095, 'ports': _get_ports()}
def _get_ports():
diff --git a/tests/v0x01/test_controller2switch/test_stats_request.py b/tests/v0x01/test_controller2switch/test_stats_request.py
index 1891f66..087afa6 100644
--- a/tests/v0x01/test_controller2switch/test_stats_request.py
+++ b/tests/v0x01/test_controller2switch/test_stats_request.py
@@ -1,7 +1,10 @@
"""Test for StatsRequest message."""
import unittest
-from pyof.v0x01.controller2switch import common, stats_request
+from pyof.v0x01.common.phy_port import Port
+from pyof.v0x01.common.utils import unpack_message
+from pyof.v0x01.controller2switch.common import PortStatsRequest
+from pyof.v0x01.controller2switch.stats_request import StatsRequest, StatsTypes
class TestStatsRequest(unittest.TestCase):
@@ -9,9 +12,9 @@ class TestStatsRequest(unittest.TestCase):
def setUp(self):
"""Basic test setup."""
- self.message = stats_request.StatsRequest()
+ self.message = StatsRequest()
self.message.header.xid = 1
- self.message.type = common.StatsTypes.OFPST_FLOW
+ self.message.type = StatsTypes.OFPST_FLOW
self.message.flags = 1
self.message.body = []
@@ -19,6 +22,15 @@ class TestStatsRequest(unittest.TestCase):
"""[Controller2Switch/StatsRequest] - size 12."""
self.assertEqual(self.message.get_size(), 12)
+ def test_pack_unpack_port_stats(self):
+ """Pack and unpack PortStatsRequest."""
+ body = PortStatsRequest(Port.OFPP_NONE)
+ req = StatsRequest(16909060, body_type=StatsTypes.OFPST_PORT,
+ body=body)
+ pack = req.pack()
+ unpacked = unpack_message(pack)
+ self.assertEqual(req, unpacked)
+
@unittest.skip('Not yet implemented')
def test_pack(self):
"""[Controller2Switch/StatsRequest] - packing."""
|
Remove attribute wildcard_mask from IPAddress class
This is only used on Match class, for now.
|
0.0
|
013e9c24bd4c234f0bf39210686a58af4c586d7d
|
[
"tests/test_foundation/test_basic_types.py::TestUBInt8::test_get_size",
"tests/test_foundation/test_basic_types.py::TestUBInt16::test_get_size",
"tests/test_foundation/test_basic_types.py::TestUBInt32::test_get_size",
"tests/test_foundation/test_basic_types.py::TestChar::test_get_size",
"tests/test_foundation/test_basic_types.py::TestChar::test_pack",
"tests/test_foundation/test_basic_types.py::TestChar::test_unpack",
"tests/test_foundation/test_basic_types.py::TestHWaddress::test_default_value",
"tests/test_foundation/test_basic_types.py::TestHWaddress::test_unpack_packed",
"tests/test_foundation/test_basic_types.py::TestIPAddress::test_get_size",
"tests/test_foundation/test_basic_types.py::TestIPAddress::test_max_prefix",
"tests/test_foundation/test_basic_types.py::TestIPAddress::test_netmask",
"tests/test_foundation/test_basic_types.py::TestIPAddress::test_unpack_packed",
"tests/test_foundation/test_basic_types.py::TestIPAddress::test_unpack_packed_with_netmask",
"tests/v0x01/test_controller2switch/test_features_reply.py::TestFeaturesReply::test_minimum_size",
"tests/v0x01/test_controller2switch/test_features_reply.py::TestFeaturesReply::test_pack",
"tests/v0x01/test_controller2switch/test_features_reply.py::TestFeaturesReply::test_unpack",
"tests/v0x01/test_controller2switch/test_stats_request.py::TestStatsRequest::test_get_size",
"tests/v0x01/test_controller2switch/test_stats_request.py::TestStatsRequest::test_pack_unpack_port_stats"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-10-13 13:47:35+00:00
|
mit
| 3,479 |
|
kytos__python-openflow-225
|
diff --git a/pyof/v0x01/common/header.py b/pyof/v0x01/common/header.py
index 1e62b87..c0489e5 100644
--- a/pyof/v0x01/common/header.py
+++ b/pyof/v0x01/common/header.py
@@ -73,7 +73,7 @@ class Header(GenericStruct):
length = UBInt16()
xid = UBInt32()
- def __init__(self, message_type=None, length=None, xid=randint(0, MAXID)):
+ def __init__(self, message_type=None, length=None, xid=None):
"""The constructor takes the optional parameters below.
Args:
@@ -84,4 +84,4 @@ class Header(GenericStruct):
super().__init__()
self.message_type = message_type
self.length = length
- self.xid = xid
+ self.xid = randint(0, MAXID) if xid is None else xid
|
kytos/python-openflow
|
7185bc685a1e11d159eb4aba5384b795c373567f
|
diff --git a/tests/v0x01/test_common/test_header.py b/tests/v0x01/test_common/test_header.py
index 003621c..3c01fd8 100644
--- a/tests/v0x01/test_common/test_header.py
+++ b/tests/v0x01/test_common/test_header.py
@@ -1,6 +1,7 @@
"""Testing Header structure."""
import os
import unittest
+from unittest.mock import patch
from pyof.v0x01.common.header import Header, Type
@@ -43,3 +44,9 @@ class TestHeader(unittest.TestCase):
self.assertEqual(self.message.version, 1)
f.close()
+
+ @patch('pyof.v0x01.common.header.randint')
+ def test_random_xid(self, m):
+ """Each Header instantiations without xid should call randint."""
+ Header(), Header() # noqa
+ self.assertEqual(m.call_count, 2)
|
Fix Header's random xid
One single random value is used during all code execution.
|
0.0
|
7185bc685a1e11d159eb4aba5384b795c373567f
|
[
"tests/v0x01/test_common/test_header.py::TestHeader::test_random_xid"
] |
[
"tests/v0x01/test_common/test_header.py::TestHeader::test_pack",
"tests/v0x01/test_common/test_header.py::TestHeader::test_size",
"tests/v0x01/test_common/test_header.py::TestHeader::test_unpack"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-12-14 20:39:56+00:00
|
mit
| 3,480 |
|
kytos__python-openflow-226
|
diff --git a/README.rst b/README.rst
index 3e7eddc..4712d69 100644
--- a/README.rst
+++ b/README.rst
@@ -1,32 +1,40 @@
|Experimental| |Openflow| |Tag| |Release| |Pypi| |Tests| |License|
Overview
---------
+========
-*python-openflow* is a low level library to parse OpenFlow messages. If you
-want to read an OpenFlow packet from an open socket or send a message to an
-OpenFlow switch, this is your best friend. The main features are: high
+*python-openflow* is a low level library to parse and create OpenFlow messages.
+If you want to read an OpenFlow packet from an open socket or send a message to
+an OpenFlow switch, this is your best friend. The main features are: high
performance, short learning curve and free software license.
-This library is part of `Kytos <http://kytos.io>`_ project. *python-openflow*
-was developed to be used with *Kytos* controller, but feel free to use this
-simple and intuitive library in other projects.
+This library is part of `Kytos <http://kytos.io>`_ project, but feel free to
+use this simple and intuitive library in other projects.
-This is just an overview for you to check whether this project fit your needs.
-For a more detailed documentation, please check the :doc:`python-openflow API
-Reference Manual <pyof>`.
+.. attention::
+ *python-openflow* does not perform I/O operations. To communicate with a
+ switch, you can use, for example, `Kyco <http://docs.kytos.io/kyco>`_, the
+ Kytos Controller.
+
+A quick start follows for you to check whether this project fits your needs.
+For a more detailed documentation, please check the
+`python-openflow API Reference Manual <http://docs.kytos.io/python-openflow/pyof/>`_.
+
+Quick Start
+-----------
Installing
^^^^^^^^^^
For now, you can install this package from source (if you have cloned this
-repository) or via pip.
+repository) or via pip. If you are a more experienced Python user, you can
+also install it without root permissions.
.. note:: We are improving this and soon you will be able to install from the
- major distros repositories.
+ major distros' repositories.
From PyPI
-=========
+"""""""""
*python-openflow* is in PyPI, so you can easily install it via `pip3` (`pip`
for Python 3) or include this project in your `requirements.txt`. To install it
@@ -37,7 +45,7 @@ with `pip3`, run the following command:
$ sudo pip3 install python-openflow
From source code
-================
+""""""""""""""""
First you need to clone `python-openflow` repository:
@@ -56,7 +64,7 @@ install procedure:
Basic Usage Example
^^^^^^^^^^^^^^^^^^^
-See how easy is the creation of a features request message with this library.
+See how it is easy to create a feature request message with this library.
You can use ipython3 to get the advantages of autocompletion:
.. code-block:: python
@@ -67,19 +75,33 @@ You can use ipython3 to get the advantages of autocompletion:
Type.OFPT_FEATURES_REQUEST
If you need to send this message via socket, call the ``pack()`` method to get
-its binary representation that should be used to be sent throught the network:
+its binary representation to be sent through the network:
-.. code:: python3
+.. code:: python
>>> binary_msg = request.pack()
+ >>> print(binary_msg)
+ b"\x01\x05\x00\x08\x14\xad'\x8d"
+ >>> # Use a controller (e.g. Kytos Controller) to send "binary_msg"
-Please note that this library do not send or receive messages via socket. You
-have to create your own server to receive messages from switches. This library
-only helps you to handle OpenFlow messages on a more pythonic way.
+To parse a message, use ``unpack_message()``:
-.. seealso::
+.. code:: python
+
+ >>> from pyof.v0x01.common.utils import unpack_message
+ >>> binary_msg = b"\x01\x05\x00\x08\x14\xad'\x8d"
+ >>> msg = unpack_message(binary_msg)
+ >>> print(msg.header.message_type)
+ Type.OFPT_FEATURES_REQUEST
+
+Please, note that this library do not send or receive messages via socket. You
+have to create your own server to receive messages from switches. This library
+only helps you to handle OpenFlow messages in a more pythonic way.
+To communicate with switches, we also develop *Kyco*, the Kytos Controller.
- To see more examples, please visit our :doc:`examples/index` chapter.
+.. hint::
+ To see more examples, please visit our
+ `Examples <http://docs.kytos.io/python-openflow/examples>`_ section.
.. |Experimental| image:: https://img.shields.io/badge/stability-experimental-orange.svg
.. |Openflow| image:: https://img.shields.io/badge/Openflow-1.0.0-brightgreen.svg
diff --git a/pyof/v0x01/common/header.py b/pyof/v0x01/common/header.py
index 1e62b87..c0489e5 100644
--- a/pyof/v0x01/common/header.py
+++ b/pyof/v0x01/common/header.py
@@ -73,7 +73,7 @@ class Header(GenericStruct):
length = UBInt16()
xid = UBInt32()
- def __init__(self, message_type=None, length=None, xid=randint(0, MAXID)):
+ def __init__(self, message_type=None, length=None, xid=None):
"""The constructor takes the optional parameters below.
Args:
@@ -84,4 +84,4 @@ class Header(GenericStruct):
super().__init__()
self.message_type = message_type
self.length = length
- self.xid = xid
+ self.xid = randint(0, MAXID) if xid is None else xid
|
kytos/python-openflow
|
7185bc685a1e11d159eb4aba5384b795c373567f
|
diff --git a/tests/v0x01/test_common/test_header.py b/tests/v0x01/test_common/test_header.py
index 003621c..3c01fd8 100644
--- a/tests/v0x01/test_common/test_header.py
+++ b/tests/v0x01/test_common/test_header.py
@@ -1,6 +1,7 @@
"""Testing Header structure."""
import os
import unittest
+from unittest.mock import patch
from pyof.v0x01.common.header import Header, Type
@@ -43,3 +44,9 @@ class TestHeader(unittest.TestCase):
self.assertEqual(self.message.version, 1)
f.close()
+
+ @patch('pyof.v0x01.common.header.randint')
+ def test_random_xid(self, m):
+ """Each Header instantiations without xid should call randint."""
+ Header(), Header() # noqa
+ self.assertEqual(m.call_count, 2)
|
Improve Introduction and first sections on sphinx documentation
|
0.0
|
7185bc685a1e11d159eb4aba5384b795c373567f
|
[
"tests/v0x01/test_common/test_header.py::TestHeader::test_random_xid"
] |
[
"tests/v0x01/test_common/test_header.py::TestHeader::test_pack",
"tests/v0x01/test_common/test_header.py::TestHeader::test_size",
"tests/v0x01/test_common/test_header.py::TestHeader::test_unpack"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-12-15 01:13:42+00:00
|
mit
| 3,481 |
|
kytos__python-openflow-240
|
diff --git a/.travis.yml b/.travis.yml
index d0ab470..2c8a88d 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,17 +1,14 @@
language: python
python:
- - "3.4"
- - "3.5"
-# - "3.5-dev" # 3.5 development branch
-# - "nightly" # currently points to 3.6-dev
+ - "3.6"
# command to install dependencies
install:
- - pip install -r requirements.txt
- - pip install -r requirements-dev.txt
- - pip install -r requirements-docs.txt
+ - pip install -r requirements.txt -r requirements-dev.txt
+ - pip install coveralls
# command to run tests
script:
- python setup.py test
+ - coverage run -m unittest discover -s tests -p "test*.py"
notifications:
irc:
channels: "irc.freenode.org#kytos"
@@ -19,3 +16,5 @@ notifications:
- "%{repository}@%{branch}: %{message} (%{build_url})"
on_success: change
on_failure: change
+after_success:
+ coveralls
diff --git a/docs/.gitignore b/docs/.gitignore
index 6b8dea0..5ce97da 100644
--- a/docs/.gitignore
+++ b/docs/.gitignore
@@ -1,1 +1,3 @@
+_rebuild
+modules.rst
python.inv
diff --git a/pyof/foundation/network_types.py b/pyof/foundation/network_types.py
index a538631..d91847c 100644
--- a/pyof/foundation/network_types.py
+++ b/pyof/foundation/network_types.py
@@ -67,7 +67,7 @@ class GenericTLV:
"""Create an instance and set its attributes."""
#: type (int): The Type of the TLV Structure
self.type = tlv_type
- #: value (int): The value of the TLV Structure
+ #: value (BinaryData): The value of the TLV Structure
self._value = value
@property
@@ -78,7 +78,7 @@ class GenericTLV:
@property
def length(self):
"""Struct length in bytes."""
- return len(self.value.pack())
+ return len(self._value.pack())
@property
def header(self):
@@ -101,7 +101,7 @@ class GenericTLV:
"""
if value is None:
output = self.header.pack()
- output += self.value.pack()
+ output += self._value.pack()
return output
elif isinstance(value, type(self)):
@@ -129,7 +129,7 @@ class GenericTLV:
self.type = header.value >> 9
length = header.value & 511
begin, end = offset + 2, offset + 2 + length
- self.value = BinaryData(buffer[begin:end])
+ self._value = BinaryData(buffer[begin:end])
def get_size(self, value=None):
"""Return struct size."""
diff --git a/requirements-dev.txt b/requirements-dev.txt
index e8aacce..816c26a 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -2,6 +2,7 @@
# For running doctests (during "python setup.py test")
Sphinx >= 1.4.5
+sphinx_bootstrap_theme
# Linters
git+git://github.com/cemsbr/pylama_pylint.git@master
diff --git a/setup.py b/setup.py
index ca5e1f0..b6a20ef 100644
--- a/setup.py
+++ b/setup.py
@@ -5,7 +5,7 @@ descriptions.
"""
import sys
from abc import abstractmethod
-from subprocess import call, check_call
+from subprocess import CalledProcessError, call, check_call
from setuptools import Command, find_packages, setup
@@ -15,11 +15,13 @@ from pyof import __version__
def lint():
"""Run pylama and radon."""
files = 'tests setup.py pyof'
- print('Pylama is running. It may take a while...')
+ print('Pylama is running. It may take several seconds...')
cmd = 'pylama {}'.format(files)
- check_call(cmd, shell=True)
- print('Low grades (<= C) for Maintainability Index (if any):')
- check_call('radon mi --min=C ' + files, shell=True)
+ try:
+ check_call(cmd, shell=True)
+ except CalledProcessError as e:
+ print('Please, fix linter errors above.')
+ sys.exit(e.returncode)
class SimpleCommand(Command):
|
kytos/python-openflow
|
558038f25ef5e85cf46dfa395c1c5262919b2850
|
diff --git a/tests/test_foundation/test_network_types.py b/tests/test_foundation/test_network_types.py
new file mode 100644
index 0000000..747a862
--- /dev/null
+++ b/tests/test_foundation/test_network_types.py
@@ -0,0 +1,17 @@
+"""Test Python-openflow network types."""
+import unittest
+
+from pyof.foundation.basic_types import BinaryData
+from pyof.foundation.network_types import GenericTLV
+
+
+class TestNetworkTypes(unittest.TestCase):
+ """Reproduce bugs found."""
+
+ def test_GenTLV_value_unpack(self):
+ """Value attribute should be the same after unpacking."""
+ value = BinaryData(b'test')
+ tlv = GenericTLV(value=value)
+ tlv_unpacked = GenericTLV()
+ tlv_unpacked.unpack(tlv.pack())
+ self.assertEqual(tlv.value.value, tlv_unpacked.value.value)
diff --git a/tests/v0x01/test_asynchronous/test_error_msg.py b/tests/v0x01/test_asynchronous/test_error_msg.py
index 92a6436..589c429 100644
--- a/tests/v0x01/test_asynchronous/test_error_msg.py
+++ b/tests/v0x01/test_asynchronous/test_error_msg.py
@@ -3,7 +3,7 @@ from pyof.v0x01.asynchronous.error_msg import (BadRequestCode, ErrorMsg,
ErrorType)
from pyof.v0x01.symmetric.hello import Hello
-from ...test_struct import TestStruct
+from tests.test_struct import TestStruct
class TestErrorMsg(TestStruct):
|
Error while migrating to python 3.6
While testing the full project on python 3.6 (running kyco against a mininet instance with 3 hosts and 3 switches, no loop version), the error below were throughed on the kytos console:
> Exception in thread Thread-198:
> Traceback (most recent call last):
> File "/usr/local/lib/python3.6/threading.py", line 916, in _bootstrap_inner
> self.run()
> File "/usr/local/lib/python3.6/threading.py", line 864, in run
> self._target(*self._args, **self._kwargs)
> File "/home/diraol/devel/kyco/kyco/utils.py", line 70, in threaded_handler
> handler(*args)
> File "/home/diraol/.virtualenvs/python-openflow/var/lib/kytos/napps/kytos/of_lldp/main.py", line 78, in update_links
> lldp.unpack(ethernet.data.value)
> File "/home/diraol/devel/python-openflow/pyof/foundation/base.py", line 431, in unpack
> size = self._unpack_attribute(name, value, buff, begin)
> File "/home/diraol/devel/python-openflow/pyof/foundation/base.py", line 441, in _unpack_attribute
> attribute.unpack(buff, begin)
> File "/home/diraol/devel/python-openflow/pyof/foundation/network_types.py", line 132, in unpack
> self.value = BinaryData(buffer[begin:end])
> AttributeError: can't set attribute
@cemsbr, please, address this error as soon as possible so we can have the project working on Python 3.6 =)
|
0.0
|
558038f25ef5e85cf46dfa395c1c5262919b2850
|
[
"tests/test_foundation/test_network_types.py::TestNetworkTypes::test_GenTLV_value_unpack"
] |
[
"tests/v0x01/test_asynchronous/test_error_msg.py::TestErrorMsg::test_minimum_size",
"tests/v0x01/test_asynchronous/test_error_msg.py::TestErrorMsg::test_pack_unpack_with_empty_data",
"tests/v0x01/test_asynchronous/test_error_msg.py::TestErrorMsg::test_pack_unpack_with_hello"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-01-18 13:33:44+00:00
|
mit
| 3,482 |
|
kytos__python-openflow-278
|
diff --git a/pyof/v0x04/common/action.py b/pyof/v0x04/common/action.py
index 9b550a2..dba4ad0 100644
--- a/pyof/v0x04/common/action.py
+++ b/pyof/v0x04/common/action.py
@@ -72,7 +72,6 @@ class ActionType(Enum):
OFPAT_POP_PBB = 27
#: Experimenter type
OFPAT_EXPERIMENTER = 0xffff
- OFPAT_VENDOR = 0xffff
class ControllerMaxLen(Enum):
diff --git a/pyof/v0x04/common/port.py b/pyof/v0x04/common/port.py
index db3b41c..41b5be3 100644
--- a/pyof/v0x04/common/port.py
+++ b/pyof/v0x04/common/port.py
@@ -44,7 +44,7 @@ class PortNo(Enum):
#: Wildcard port used only for flow mod (delete) and flow stats requests.
#: Selects all flows regardless of output port (including flows with no
#: output port).
- OFPP_NONE = 0xffffffff
+ OFPP_ANY = 0xffffffff
class PortConfig(GenericBitMask):
diff --git a/pyof/v0x04/controller2switch/packet_out.py b/pyof/v0x04/controller2switch/packet_out.py
index e23fc87..1520f85 100644
--- a/pyof/v0x04/controller2switch/packet_out.py
+++ b/pyof/v0x04/controller2switch/packet_out.py
@@ -13,7 +13,7 @@ __all__ = ('PacketOut',)
# Classes
#: in_port valid virtual port values, for validation
-_VIRT_IN_PORTS = (PortNo.OFPP_LOCAL, PortNo.OFPP_CONTROLLER, PortNo.OFPP_NONE)
+_VIRT_IN_PORTS = (PortNo.OFPP_LOCAL, PortNo.OFPP_CONTROLLER, PortNo.OFPP_ANY)
class PacketOut(GenericMessage):
|
kytos/python-openflow
|
7c6e4aba1043f29a35a464a1cea02faa7a141a49
|
diff --git a/tests/v0x04/test_controller2switch/test_packet_out.py b/tests/v0x04/test_controller2switch/test_packet_out.py
index 733fff2..803eb1a 100644
--- a/tests/v0x04/test_controller2switch/test_packet_out.py
+++ b/tests/v0x04/test_controller2switch/test_packet_out.py
@@ -23,7 +23,7 @@ class TestPacketOut(TestStruct):
super().setUpClass()
super().set_raw_dump_file('v0x04', 'ofpt_packet_out')
super().set_raw_dump_object(PacketOut, xid=80, buffer_id=5,
- in_port=PortNo.OFPP_NONE)
+ in_port=PortNo.OFPP_ANY)
super().set_minimum_size(24)
def test_valid_virtual_in_ports(self):
@@ -34,7 +34,7 @@ class TestPacketOut(TestStruct):
raise self.skipTest(NO_RAW)
else:
valid = (PortNo.OFPP_LOCAL, PortNo.OFPP_CONTROLLER,
- PortNo.OFPP_NONE)
+ PortNo.OFPP_ANY)
msg = self.get_raw_object()
for in_port in valid:
msg.in_port = in_port
|
OF 1.3 Compliance: additional ENUM attribute
File: `v0x04/common/action.py`
`class ActionType(Enum):`
There is a enum attribute that is not present in 1.3 spec with duplicate value. Remove `OFPAT_EXPERIMENTER` and keep only `OFPAT_VENDOR`
```
OFPAT_EXPERIMENTER = 0xffff
OFPAT_VENDOR = 0xffff
```
|
0.0
|
7c6e4aba1043f29a35a464a1cea02faa7a141a49
|
[
"tests/v0x04/test_controller2switch/test_packet_out.py::TestPacketOut::test_minimum_size"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-03-07 14:55:09+00:00
|
mit
| 3,483 |
|
kytos__python-openflow-279
|
diff --git a/pyof/v0x04/common/action.py b/pyof/v0x04/common/action.py
index 9b550a2..78d6004 100644
--- a/pyof/v0x04/common/action.py
+++ b/pyof/v0x04/common/action.py
@@ -72,7 +72,6 @@ class ActionType(Enum):
OFPAT_POP_PBB = 27
#: Experimenter type
OFPAT_EXPERIMENTER = 0xffff
- OFPAT_VENDOR = 0xffff
class ControllerMaxLen(Enum):
@@ -254,6 +253,8 @@ class ActionPopMPLS(GenericStruct):
length = UBInt16(8)
#: Ethertype
ethertype = UBInt16()
+ #: Padding
+ pad = Pad(2)
def __init__(self, ethertype=None):
"""Action structure for OFPAT_POP_MPLS.
diff --git a/pyof/v0x04/common/port.py b/pyof/v0x04/common/port.py
index db3b41c..41b5be3 100644
--- a/pyof/v0x04/common/port.py
+++ b/pyof/v0x04/common/port.py
@@ -44,7 +44,7 @@ class PortNo(Enum):
#: Wildcard port used only for flow mod (delete) and flow stats requests.
#: Selects all flows regardless of output port (including flows with no
#: output port).
- OFPP_NONE = 0xffffffff
+ OFPP_ANY = 0xffffffff
class PortConfig(GenericBitMask):
diff --git a/pyof/v0x04/controller2switch/packet_out.py b/pyof/v0x04/controller2switch/packet_out.py
index e23fc87..1520f85 100644
--- a/pyof/v0x04/controller2switch/packet_out.py
+++ b/pyof/v0x04/controller2switch/packet_out.py
@@ -13,7 +13,7 @@ __all__ = ('PacketOut',)
# Classes
#: in_port valid virtual port values, for validation
-_VIRT_IN_PORTS = (PortNo.OFPP_LOCAL, PortNo.OFPP_CONTROLLER, PortNo.OFPP_NONE)
+_VIRT_IN_PORTS = (PortNo.OFPP_LOCAL, PortNo.OFPP_CONTROLLER, PortNo.OFPP_ANY)
class PacketOut(GenericMessage):
|
kytos/python-openflow
|
7c6e4aba1043f29a35a464a1cea02faa7a141a49
|
diff --git a/tests/v0x04/test_controller2switch/test_packet_out.py b/tests/v0x04/test_controller2switch/test_packet_out.py
index 733fff2..803eb1a 100644
--- a/tests/v0x04/test_controller2switch/test_packet_out.py
+++ b/tests/v0x04/test_controller2switch/test_packet_out.py
@@ -23,7 +23,7 @@ class TestPacketOut(TestStruct):
super().setUpClass()
super().set_raw_dump_file('v0x04', 'ofpt_packet_out')
super().set_raw_dump_object(PacketOut, xid=80, buffer_id=5,
- in_port=PortNo.OFPP_NONE)
+ in_port=PortNo.OFPP_ANY)
super().set_minimum_size(24)
def test_valid_virtual_in_ports(self):
@@ -34,7 +34,7 @@ class TestPacketOut(TestStruct):
raise self.skipTest(NO_RAW)
else:
valid = (PortNo.OFPP_LOCAL, PortNo.OFPP_CONTROLLER,
- PortNo.OFPP_NONE)
+ PortNo.OFPP_ANY)
msg = self.get_raw_object()
for in_port in valid:
msg.in_port = in_port
|
OF 1.3 Compliance: Missing attribute
File: `v0x04/common/action.py`
`class ActionPopMPLS(GenericStruct):`
The attribute `pad[2]` is missing:
`pad = Pad(2)`
|
0.0
|
7c6e4aba1043f29a35a464a1cea02faa7a141a49
|
[
"tests/v0x04/test_controller2switch/test_packet_out.py::TestPacketOut::test_minimum_size"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-03-07 18:32:34+00:00
|
mit
| 3,484 |
|
kytos__python-openflow-280
|
diff --git a/pyof/v0x04/common/action.py b/pyof/v0x04/common/action.py
index 9b550a2..78d6004 100644
--- a/pyof/v0x04/common/action.py
+++ b/pyof/v0x04/common/action.py
@@ -72,7 +72,6 @@ class ActionType(Enum):
OFPAT_POP_PBB = 27
#: Experimenter type
OFPAT_EXPERIMENTER = 0xffff
- OFPAT_VENDOR = 0xffff
class ControllerMaxLen(Enum):
@@ -254,6 +253,8 @@ class ActionPopMPLS(GenericStruct):
length = UBInt16(8)
#: Ethertype
ethertype = UBInt16()
+ #: Padding
+ pad = Pad(2)
def __init__(self, ethertype=None):
"""Action structure for OFPAT_POP_MPLS.
diff --git a/pyof/v0x04/common/port.py b/pyof/v0x04/common/port.py
index db3b41c..41b5be3 100644
--- a/pyof/v0x04/common/port.py
+++ b/pyof/v0x04/common/port.py
@@ -44,7 +44,7 @@ class PortNo(Enum):
#: Wildcard port used only for flow mod (delete) and flow stats requests.
#: Selects all flows regardless of output port (including flows with no
#: output port).
- OFPP_NONE = 0xffffffff
+ OFPP_ANY = 0xffffffff
class PortConfig(GenericBitMask):
diff --git a/pyof/v0x04/controller2switch/common.py b/pyof/v0x04/controller2switch/common.py
index bebb8b2..42a4a46 100644
--- a/pyof/v0x04/controller2switch/common.py
+++ b/pyof/v0x04/controller2switch/common.py
@@ -251,9 +251,11 @@ class DescStats(GenericStruct):
sw_desc = Char(length=DESC_STR_LEN)
#: Serial number
serial_num = Char(length=SERIAL_NUM_LEN)
+ #: Datapath description
+ dp_desc = Char(length=DESC_STR_LEN)
def __init__(self, mfr_desc=None, hw_desc=None, sw_desc=None,
- serial_num=None):
+ serial_num=None, dp_desc=None):
"""The constructor just assings parameters to object attributes.
Args:
@@ -261,12 +263,14 @@ class DescStats(GenericStruct):
hw_desc (str): Hardware description
sw_desc (str): Software description
serial_num (str): Serial number
+ dp_desc (str): Datapath description
"""
super().__init__()
self.mfr_desc = mfr_desc
self.hw_desc = hw_desc
self.sw_desc = sw_desc
self.serial_num = serial_num
+ self.dp_desc = dp_desc
class FlowStats(GenericStruct):
diff --git a/pyof/v0x04/controller2switch/packet_out.py b/pyof/v0x04/controller2switch/packet_out.py
index e23fc87..1520f85 100644
--- a/pyof/v0x04/controller2switch/packet_out.py
+++ b/pyof/v0x04/controller2switch/packet_out.py
@@ -13,7 +13,7 @@ __all__ = ('PacketOut',)
# Classes
#: in_port valid virtual port values, for validation
-_VIRT_IN_PORTS = (PortNo.OFPP_LOCAL, PortNo.OFPP_CONTROLLER, PortNo.OFPP_NONE)
+_VIRT_IN_PORTS = (PortNo.OFPP_LOCAL, PortNo.OFPP_CONTROLLER, PortNo.OFPP_ANY)
class PacketOut(GenericMessage):
|
kytos/python-openflow
|
7c6e4aba1043f29a35a464a1cea02faa7a141a49
|
diff --git a/tests/v0x04/test_controller2switch/test_packet_out.py b/tests/v0x04/test_controller2switch/test_packet_out.py
index 733fff2..803eb1a 100644
--- a/tests/v0x04/test_controller2switch/test_packet_out.py
+++ b/tests/v0x04/test_controller2switch/test_packet_out.py
@@ -23,7 +23,7 @@ class TestPacketOut(TestStruct):
super().setUpClass()
super().set_raw_dump_file('v0x04', 'ofpt_packet_out')
super().set_raw_dump_object(PacketOut, xid=80, buffer_id=5,
- in_port=PortNo.OFPP_NONE)
+ in_port=PortNo.OFPP_ANY)
super().set_minimum_size(24)
def test_valid_virtual_in_ports(self):
@@ -34,7 +34,7 @@ class TestPacketOut(TestStruct):
raise self.skipTest(NO_RAW)
else:
valid = (PortNo.OFPP_LOCAL, PortNo.OFPP_CONTROLLER,
- PortNo.OFPP_NONE)
+ PortNo.OFPP_ANY)
msg = self.get_raw_object()
for in_port in valid:
msg.in_port = in_port
|
OF 1.3 Compliance: Missing attribute
File: `v0x04/controller2switch/common.py`
`class DecStats(GenericStruct):`
Following attribute is missing:
`dp_desc = Char(length=DESC_STR_LEN)`
|
0.0
|
7c6e4aba1043f29a35a464a1cea02faa7a141a49
|
[
"tests/v0x04/test_controller2switch/test_packet_out.py::TestPacketOut::test_minimum_size"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-03-07 19:39:27+00:00
|
mit
| 3,485 |
|
kytos__python-openflow-281
|
diff --git a/pyof/v0x04/common/action.py b/pyof/v0x04/common/action.py
index 9b550a2..78d6004 100644
--- a/pyof/v0x04/common/action.py
+++ b/pyof/v0x04/common/action.py
@@ -72,7 +72,6 @@ class ActionType(Enum):
OFPAT_POP_PBB = 27
#: Experimenter type
OFPAT_EXPERIMENTER = 0xffff
- OFPAT_VENDOR = 0xffff
class ControllerMaxLen(Enum):
@@ -254,6 +253,8 @@ class ActionPopMPLS(GenericStruct):
length = UBInt16(8)
#: Ethertype
ethertype = UBInt16()
+ #: Padding
+ pad = Pad(2)
def __init__(self, ethertype=None):
"""Action structure for OFPAT_POP_MPLS.
diff --git a/pyof/v0x04/common/port.py b/pyof/v0x04/common/port.py
index db3b41c..41b5be3 100644
--- a/pyof/v0x04/common/port.py
+++ b/pyof/v0x04/common/port.py
@@ -44,7 +44,7 @@ class PortNo(Enum):
#: Wildcard port used only for flow mod (delete) and flow stats requests.
#: Selects all flows regardless of output port (including flows with no
#: output port).
- OFPP_NONE = 0xffffffff
+ OFPP_ANY = 0xffffffff
class PortConfig(GenericBitMask):
diff --git a/pyof/v0x04/controller2switch/common.py b/pyof/v0x04/controller2switch/common.py
index bebb8b2..1bcc883 100644
--- a/pyof/v0x04/controller2switch/common.py
+++ b/pyof/v0x04/controller2switch/common.py
@@ -251,9 +251,11 @@ class DescStats(GenericStruct):
sw_desc = Char(length=DESC_STR_LEN)
#: Serial number
serial_num = Char(length=SERIAL_NUM_LEN)
+ #: Datapath description
+ dp_desc = Char(length=DESC_STR_LEN)
def __init__(self, mfr_desc=None, hw_desc=None, sw_desc=None,
- serial_num=None):
+ serial_num=None, dp_desc=None):
"""The constructor just assings parameters to object attributes.
Args:
@@ -261,12 +263,14 @@ class DescStats(GenericStruct):
hw_desc (str): Hardware description
sw_desc (str): Software description
serial_num (str): Serial number
+ dp_desc (str): Datapath description
"""
super().__init__()
self.mfr_desc = mfr_desc
self.hw_desc = hw_desc
self.sw_desc = sw_desc
self.serial_num = serial_num
+ self.dp_desc = dp_desc
class FlowStats(GenericStruct):
@@ -454,9 +458,9 @@ class PortStats(GenericStruct):
class PortStatsRequest(GenericStruct):
"""Body for ofp_stats_request of type OFPST_PORT."""
- port_no = UBInt16()
+ port_no = UBInt32()
#: Align to 64-bits.
- pad = Pad(6)
+ pad = Pad(4)
def __init__(self, port_no=None):
"""The constructor just assings parameters to object attributes.
diff --git a/pyof/v0x04/controller2switch/packet_out.py b/pyof/v0x04/controller2switch/packet_out.py
index e23fc87..1520f85 100644
--- a/pyof/v0x04/controller2switch/packet_out.py
+++ b/pyof/v0x04/controller2switch/packet_out.py
@@ -13,7 +13,7 @@ __all__ = ('PacketOut',)
# Classes
#: in_port valid virtual port values, for validation
-_VIRT_IN_PORTS = (PortNo.OFPP_LOCAL, PortNo.OFPP_CONTROLLER, PortNo.OFPP_NONE)
+_VIRT_IN_PORTS = (PortNo.OFPP_LOCAL, PortNo.OFPP_CONTROLLER, PortNo.OFPP_ANY)
class PacketOut(GenericMessage):
|
kytos/python-openflow
|
7c6e4aba1043f29a35a464a1cea02faa7a141a49
|
diff --git a/tests/v0x04/test_controller2switch/test_packet_out.py b/tests/v0x04/test_controller2switch/test_packet_out.py
index 733fff2..803eb1a 100644
--- a/tests/v0x04/test_controller2switch/test_packet_out.py
+++ b/tests/v0x04/test_controller2switch/test_packet_out.py
@@ -23,7 +23,7 @@ class TestPacketOut(TestStruct):
super().setUpClass()
super().set_raw_dump_file('v0x04', 'ofpt_packet_out')
super().set_raw_dump_object(PacketOut, xid=80, buffer_id=5,
- in_port=PortNo.OFPP_NONE)
+ in_port=PortNo.OFPP_ANY)
super().set_minimum_size(24)
def test_valid_virtual_in_ports(self):
@@ -34,7 +34,7 @@ class TestPacketOut(TestStruct):
raise self.skipTest(NO_RAW)
else:
valid = (PortNo.OFPP_LOCAL, PortNo.OFPP_CONTROLLER,
- PortNo.OFPP_NONE)
+ PortNo.OFPP_ANY)
msg = self.get_raw_object()
for in_port in valid:
msg.in_port = in_port
|
OF 1.3 Compliance: Wrong pad attribute size
File: `v0x04/controller2switch/common.py`
`class PortStatsRequest(GenericStruct):`
In source code the pad attribute size is set to 6, but in 1.3 Spec it is 4.
Espec 1.3: `pad[4]`
Source Code: `pad = Pad(6)`
|
0.0
|
7c6e4aba1043f29a35a464a1cea02faa7a141a49
|
[
"tests/v0x04/test_controller2switch/test_packet_out.py::TestPacketOut::test_minimum_size"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-03-08 12:04:33+00:00
|
mit
| 3,486 |
|
kytos__python-openflow-287
|
diff --git a/pyof/v0x04/controller2switch/common.py b/pyof/v0x04/controller2switch/common.py
index d4f740b..048d5f2 100644
--- a/pyof/v0x04/controller2switch/common.py
+++ b/pyof/v0x04/controller2switch/common.py
@@ -14,6 +14,9 @@ from pyof.v0x04.asynchronous.port_status import PortReason
from pyof.v0x04.common.action import ActionHeader
from pyof.v0x04.common.flow_match import Match
from pyof.v0x04.common.header import Header
+from pyof.v0x04.controller2switch.meter_mod import (Meter, MeterFlags,
+ MeterBandHeader,
+ ListOfMeterBandHeader)
# Third-party imports
@@ -23,7 +26,8 @@ __all__ = ('AggregateStatsReply', 'AggregateStatsRequest', 'Bucket',
'GroupDescStats', 'GroupFeatures', 'GroupStats',
'GroupStatsRequest', 'ListOfActions', 'MultipartTypes', 'PortStats',
'PortStatsRequest', 'QueueStats', 'QueueStatsRequest', 'StatsTypes',
- 'TableStats')
+ 'TableStats', 'MeterMultipartRequest', 'MeterConfig')
+
# Enums
@@ -857,3 +861,89 @@ class SwitchConfig(GenericMessage):
super().__init__(xid)
self.flags = flags
self.miss_send_len = miss_send_len
+
+
+class MeterMultipartRequest(GenericStruct):
+ """MeterMultipartRequest structure.
+
+ This class represents the structure for ofp_meter_multipart_request.
+ This structure is a body of OFPMP_METER and OFPMP_METER_CONFIG requests.
+ """
+
+ # Meter instance, or OFPM_ALL.
+ meter_id = UBInt32(enum_ref=Meter)
+
+ # Align to 64 bits.
+ pad = Pad(4)
+
+ def __init__(self, meter_id=Meter.OFPM_ALL):
+ """The Constructor of MeterMultipartRequest receives the paramters
+ below.
+
+ Args:
+ meter_id(Meter): Meter Indentify.The value Meter.OFPM_ALL is used
+ to refer to all Meters on the switch.
+ """
+
+ super().__init__()
+ self.meter_id = meter_id
+
+
+class MeterConfig(GenericStruct):
+ """MeterConfig is a class to represents ofp_meter_config structure.
+
+ Body of reply to OFPMP_METER_CONFIG request.
+ """
+ # Length of this entry.
+ length = UBInt16()
+ # All OFPMC_* that apply.
+ flags = UBInt16(enum_ref=MeterFlags)
+ # Meter instance.
+ meter_id = UBInt32(enum_ref=Meter)
+ # The bands length is inferred from the length field.
+ bands = ListOfMeterBandHeader()
+
+ def __init__(self, flags=MeterFlags.OFPMF_STATS, meter_id=Meter.OFPM_ALL,
+ bands=[]):
+ """The Constructor of MeterConfig receives the parameters below.
+
+ Args:
+ flags(MeterFlags): Meter configuration flags.The default value is
+ MeterFlags.OFPMF_STATS
+ meter_id(Meter): Meter Indentify.The value Meter.OFPM_ALL is used
+ to refer to all Meters on the switch.
+ bands(list): List of MeterBandHeader instances.
+ """
+ super().__init__()
+ self.flags = flags
+ self.meter_id = meter_id
+ self.bands = bands
+ self.update_length()
+
+ def update_length(self):
+ self.length = self.get_size()
+
+
+ def pack(self, value=None):
+ """Pack method used to update the length of instance and packing.
+
+ Args:
+ value: Structure to be packed.
+ """
+ self.update_length()
+ return super().pack(value)
+
+ def unpack(self, buff=None, offset=0):
+ """Unpack *buff* into this object.
+ This method will convert a binary data into a readable value according
+ to the attribute format.
+ Args:
+ buff (bytes): Binary buffer.
+ offset (int): Where to begin unpacking.
+ Raises:
+ :exc:`~.exceptions.UnpackException`: If unpack fails.
+ """
+ length = UBInt16()
+ length.unpack(buff,offset)
+
+ super().unpack(buff[:offset+length.value],offset)
diff --git a/pyof/v0x04/controller2switch/meter_mod.py b/pyof/v0x04/controller2switch/meter_mod.py
index 1d2786a..ea88335 100644
--- a/pyof/v0x04/controller2switch/meter_mod.py
+++ b/pyof/v0x04/controller2switch/meter_mod.py
@@ -60,6 +60,11 @@ class MeterBandType(Enum):
#: Experimenter meter band.
OFPMBT_EXPERIMENTER = 0xFFFF
+ def find_class(self):
+ """Method used to return a class related with this type."""
+ types = {1: MeterBandDrop, 2: MeterBandDscpRemark,
+ 3: MeterBandExperimenter}
+ return types[self.value]
class MeterBandHeader(GenericStruct):
"""Common header for all meter bands."""
@@ -69,21 +74,45 @@ class MeterBandHeader(GenericStruct):
rate = UBInt32()
burst_size = UBInt32()
- def __init__(self, band_type=None, length=None, rate=None,
- burst_size=None):
+ def __init__(self, band_type=None, rate=None, burst_size=None):
"""Instance attributes assignments.
Args:
band_type (MeterBandType): One of OFPMBT_*.
- length (int): Length in bytes of this band.
rate (int): Rate for this band.
burst_size (int): Size of bursts.
"""
super().__init__()
self.band_type = band_type
- self.length = length
self.rate = rate
self.burst_size = burst_size
+ self.update_length()
+
+ def update_length(self):
+ """Update the length attribute of current instance."""
+ self.length = self.get_size()
+
+ def unpack(self, buff=None, offset=0):
+ """Unpack *buff* into this object.
+
+ This method will convert a binary data into a readable value according
+ to the attribute format.
+
+ Args:
+ buff (bytes): Binary buffer.
+ offset (int): Where to begin unpacking.
+
+ Raises:
+ :exc:`~.exceptions.UnpackException`: If unpack fails.
+ """
+ band_type = UBInt16(enum_ref=MeterBandType)
+ band_type.unpack(buff, offset)
+ self.__class__ = MeterBandType(band_type.value).find_class()
+
+ length = UBInt16()
+ length.unpack(buff, offset=offset+2)
+
+ super().unpack(buff[:offset+length.value],offset)
class MeterMod(GenericMessage):
@@ -114,80 +143,67 @@ class MeterMod(GenericMessage):
self.bands = bands
-class MeterBandDrop(GenericStruct):
+class MeterBandDrop(MeterBandHeader):
"""OFPMBT_DROP band - drop packets."""
- band_type = UBInt16(MeterBandType.OFPMBT_DROP, enum_ref=MeterBandType)
- length = UBInt16()
- rate = UBInt32()
- burst_size = UBInt32()
pad = Pad(4)
- def __init__(self, length=None, rate=None, burst_size=None):
+ def __init__(self, rate=None, burst_size=None):
"""Instance attributes assignment.
Args:
- length (int): Length in bytes of this band.
rate (int): Rate for dropping packets.
burst_size (int): Size of bursts.
"""
- super().__init__()
- self.length = length
- self.rate = rate
- self.burst_size = burst_size
+ super().__init__(MeterBandType.OFPMBT_DROP, rate, burst_size)
-class MeterBandDscpRemark(GenericStruct):
+class MeterBandDscpRemark(MeterBandHeader):
"""OFPMBT_DSCP_REMARK band - Remark DSCP in the IP header."""
- band_type = UBInt16(MeterBandType.OFPMBT_DSCP_REMARK,
- enum_ref=MeterBandType)
- length = UBInt16()
- rate = UBInt32()
- burst_size = UBInt32()
prec_level = UBInt8()
pad = Pad(3)
- def __init__(self, length=None, rate=None, burst_size=None,
- prec_level=None):
+ def __init__(self, rate=None, burst_size=None, prec_level=None):
"""Instance attributes assignment.
Args:
- length (int): Length in bytes of this band.
rate (int): Rate for remarking packets.
burst_size (int): Size of bursts.
prec_level (int): Number of precendence level to substract.
"""
- super().__init__()
- self.length = length
- self.rate = rate
- self.burst_size = burst_size
+ super().__init__(MeterBandType.OFPMBT_DSCP_REMARK, rate, burst_size)
self.prec_level = prec_level
-class MeterBandExperimenter(GenericStruct):
+class MeterBandExperimenter(MeterBandHeader):
"""OFPMBT_EXPERIMENTER band - Write actions in action set."""
- band_type = UBInt16(MeterBandType.OFPMBT_EXPERIMENTER,
- enum_ref=MeterBandType)
- length = UBInt16()
- rate = UBInt32()
- burst_size = UBInt32()
experimenter = UBInt32()
- def __init__(self, length=None, rate=None, burst_size=None,
- experimenter=None):
+ def __init__(self, rate=None, burst_size=None, experimenter=None):
"""Instance attributes assignment.
Args:
- length (int): Length in bytes of this band.
rate (int): Rate for remarking packets.
burst_size (int): Size of bursts.
experimenter (int): Experimenter ID which takes the same form as in
:class:`.ExperimenterHeader`.
"""
- super().__init__()
- self.length = length
- self.rate = rate
- self.burst_size = burst_size
+ super().__init__(MeterBandType.OFPMBT_EXPERIMENTER, rate, burst_size)
self.experimenter = experimenter
+
+
+class ListOfMeterBandHeader(FixedTypeList):
+ """List of MeterBandHeader.
+
+ Represented by instances of MeterBandHeader.
+ """
+
+ def __init__(self, items=[]):
+ """The constructor just assings parameters to object attributes.
+
+ Args:
+ items (MeterBandHeader): Instance or a list of instances.
+ """
+ super().__init__(pyof_class=MeterBandHeader,items=items)
|
kytos/python-openflow
|
7ea588608c063b876d933618c5d8a075b401404a
|
diff --git a/tests/v0x04/test_controller2switch/test_multipart_reply.py b/tests/v0x04/test_controller2switch/test_multipart_reply.py
index 814fccd..3b28cf1 100644
--- a/tests/v0x04/test_controller2switch/test_multipart_reply.py
+++ b/tests/v0x04/test_controller2switch/test_multipart_reply.py
@@ -1,16 +1,36 @@
"""MultipartReply message test."""
-from pyof.v0x04.controller2switch.multipart_reply import MultipartReply
-from tests.test_struct import TestStruct
+from pyof.v0x04.controller2switch.common import (MeterConfig, MultipartTypes)
+from pyof.v0x04.controller2switch.meter_mod import (MeterFlags, Meter,
+ MeterBandDrop,
+ MeterBandDscpRemark,
+ ListOfMeterBandHeader)
+from pyof.v0x04.controller2switch.multipart_reply import (MultipartReply,
+ MultipartReplyFlags)
+from tests.v0x04.test_struct import TestStruct
-class TestMultipartReply(TestStruct):
- """Test the MultipartReply message."""
+class TestTableFeatures(TestStruct):
+ """"""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
- super().set_raw_dump_file('v0x04', 'ofpt_multipart_reply')
- super().set_raw_dump_object(MultipartReply, xid=3, multipart_type=0,
- flags=1, body=0)
+ super().set_message(MultipartReply, xid=16,
+ multipart_type=MultipartTypes.OFPMP_METER_CONFIG,
+ flags=MultipartReplyFlags.OFPMPF_REPLY_MORE,
+ body=cls.meter_config_instance())
super().set_minimum_size(16)
+
+ @classmethod
+ def meter_config_instance(cls):
+ """Method used to create a MeterConfig instance."""
+ return MeterConfig(bands=cls.list_of_meters())
+
+ @staticmethod
+ def list_of_meters():
+ """Method used to instantiate a ListOfMeterBandHeader with some instances."""
+ meters = [MeterBandDrop(rate=6, burst_size=3),
+ MeterBandDscpRemark(rate=1,burst_size=4,prec_level=2),
+ MeterBandDrop(rate=9, burst_size=1)]
+ return ListOfMeterBandHeader(items=[meters])
|
OF 1.3 Compliance: Missing class ofp_meter_config
Class `ofp_meter_config` is missing.
Page 73 of 1.3 Spec.
```
struct ofp_meter_config {
uint16_t length;
uint16_t flags;
uint32_t meter_id;
struct ofp_meter_band_header bands[0];
};
OFP_ASSERT(sizeof(struct ofp_meter_config) == 8);
```
|
0.0
|
7ea588608c063b876d933618c5d8a075b401404a
|
[
"tests/v0x04/test_controller2switch/test_multipart_reply.py::TestTableFeatures::test_minimum_size"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-03-13 17:04:39+00:00
|
mit
| 3,487 |
|
kytos__python-openflow-288
|
diff --git a/pyof/v0x04/controller2switch/common.py b/pyof/v0x04/controller2switch/common.py
index 048d5f2..eb546cb 100644
--- a/pyof/v0x04/controller2switch/common.py
+++ b/pyof/v0x04/controller2switch/common.py
@@ -8,6 +8,7 @@ from pyof.foundation.base import GenericBitMask, GenericMessage, GenericStruct
from pyof.foundation.basic_types import (Char, FixedTypeList, Pad, UBInt8,
UBInt16, UBInt32, UBInt64)
from pyof.foundation.constants import DESC_STR_LEN, SERIAL_NUM_LEN
+from pyof.v0x04.controller2switch.meter_mod import MeterBandType, MeterFlags
from pyof.v0x04.asynchronous.flow_removed import FlowRemovedReason
from pyof.v0x04.asynchronous.packet_in import PacketInReason
from pyof.v0x04.asynchronous.port_status import PortReason
@@ -26,8 +27,8 @@ __all__ = ('AggregateStatsReply', 'AggregateStatsRequest', 'Bucket',
'GroupDescStats', 'GroupFeatures', 'GroupStats',
'GroupStatsRequest', 'ListOfActions', 'MultipartTypes', 'PortStats',
'PortStatsRequest', 'QueueStats', 'QueueStatsRequest', 'StatsTypes',
- 'TableStats', 'MeterMultipartRequest', 'MeterConfig')
-
+ 'TableStats', 'MeterMultipartRequest', 'MeterConfig',
+ 'MeterFeatures')
# Enums
@@ -947,3 +948,31 @@ class MeterConfig(GenericStruct):
length.unpack(buff,offset)
super().unpack(buff[:offset+length.value],offset)
+
+
+class MeterFeatures(GenericStruct):
+ """Body of reply to OFPMP_METER_FEATURES request. Meter features."""
+
+ max_meter = UBInt32()
+ band_types = UBInt32(enum_ref=MeterBandType)
+ capabilities = UBInt32(enum_ref=MeterFlags)
+ max_bands = UBInt8()
+ max_color = UBInt8()
+ pad = Pad(2)
+
+ def __init__(self, max_meter=None, band_types=None, capabilities=None,
+ max_bands=None, max_color=None):
+ """The Constructor of MeterFeatures receives the parameters below.
+
+ Args:
+ max_meter(int): Maximum number of meters.
+ band_types(Meter): Bitmaps of OFPMBT_* values supported.
+ capabilities(MeterFlags): Bitmaps of "ofp_meter_flags".
+ max_bands(int): Maximum bands per meters
+ max_color(int): Maximum color value
+ """
+ self.max_meter = max_meter
+ self.band_types = band_types
+ self.capabilities = capabilities
+ self.max_bands = max_bands
+ self.max_color = max_color
|
kytos/python-openflow
|
422a8c1d28bc1f9848319e47c33e1019f9b33765
|
diff --git a/tests/v0x04/test_controller2switch/test_meter_features.py b/tests/v0x04/test_controller2switch/test_meter_features.py
new file mode 100644
index 0000000..db5a08b
--- /dev/null
+++ b/tests/v0x04/test_controller2switch/test_meter_features.py
@@ -0,0 +1,28 @@
+"""Test of v0x04 meter features module."""
+
+from pyof.v0x04.controller2switch.common import MeterFeatures, MultipartTypes
+from pyof.v0x04.controller2switch.meter_mod import MeterBandType, MeterFlags
+from pyof.v0x04.controller2switch.multipart_reply import (MultipartReply,
+ MultipartReplyFlags)
+
+from tests.v0x04.test_struct import TestStruct
+
+class TestMeterFeatures(TestStruct):
+ """Class to test MeterFeatures structures."""
+
+ @classmethod
+ def setUpClass(cls):
+ """Configure raw file and its object in parent class (TestDump)."""
+ super().setUpClass()
+ super().set_message(MultipartReply, xid=17,
+ multipart_type=MultipartTypes.OFPMP_METER_FEATURES,
+ flags=MultipartReplyFlags.OFPMPF_REPLY_MORE,
+ body=cls.meter_feature_instance())
+ super().set_minimum_size(16)
+
+ @classmethod
+ def meter_feature_instance(cls):
+ """Method used to create a MeterFeature instance."""
+ return MeterFeatures(max_meter=200,max_bands=20, max_color=4,
+ band_types=MeterBandType.OFPMBT_DROP,
+ capabilities=MeterFlags.OFPMF_KBPS)
|
OF 1.3 Compliance: Missing class ofp_meter_features
Class `ofp_meter_features` is missing.
Page 74 of 1.3 Spec.
```
struct ofp_meter_features {
uint32_t max_meter;
uint32_t band_types;
uint32_t capabilities;
uint8_t max_bands;
uint8_t max_color;
uint8_t pad[2];
};
OFP_ASSERT(sizeof(struct ofp_meter_features) == 16);
```
|
0.0
|
422a8c1d28bc1f9848319e47c33e1019f9b33765
|
[
"tests/v0x04/test_controller2switch/test_meter_features.py::TestMeterFeatures::test_minimum_size"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-03-13 17:09:12+00:00
|
mit
| 3,488 |
|
kytos__python-openflow-343
|
diff --git a/pyof/v0x01/common/queue.py b/pyof/v0x01/common/queue.py
index 8bb6fd0..b33d776 100644
--- a/pyof/v0x01/common/queue.py
+++ b/pyof/v0x01/common/queue.py
@@ -49,21 +49,21 @@ class ListOfProperties(FixedTypeList):
class QueuePropHeader(GenericStruct):
"""Describe the header of each queue property."""
- property = UBInt16(enum_ref=QueueProperties)
- len = UBInt16()
+ queue_property = UBInt16(enum_ref=QueueProperties)
+ length = UBInt16()
#: 64-bit alignment
pad = Pad(4)
- def __init__(self, prop=None, length=None):
+ def __init__(self, queue_property=None, length=None):
"""The contructor takes the paremeters below.
Args:
- property (QueueProperties): The queue property.
- len (int): Length of property, including this header.
+ queue_property (QueueProperties): The queue property.
+ length (int): Length of property, including this header.
"""
super().__init__()
- self.property = prop
- self.len = length
+ self.queue_property = queue_property
+ self.length = length
class PacketQueue(GenericStruct):
@@ -93,8 +93,8 @@ class PacketQueue(GenericStruct):
class QueuePropMinRate(GenericStruct):
"""Define the minimum-rate type queue."""
- prop_header = QueuePropHeader(prop=QueueProperties.OFPQT_MIN_RATE,
- length=16)
+ prop_header = QueuePropHeader(
+ queue_property=QueueProperties.OFPQT_MIN_RATE, length=16)
rate = UBInt16()
#: 64-bit alignmet.
pad = Pad(6)
diff --git a/pyof/v0x04/common/queue.py b/pyof/v0x04/common/queue.py
index 56b3f37..c48f592 100644
--- a/pyof/v0x04/common/queue.py
+++ b/pyof/v0x04/common/queue.py
@@ -35,22 +35,22 @@ class QueuePropHeader(GenericStruct):
"""Describe the header of each queue property."""
#: One of OFPQT_*
- property = UBInt16(enum_ref=QueueProperties)
+ queue_property = UBInt16(enum_ref=QueueProperties)
#: Length of property, including this header
length = UBInt16()
#: 64-bit alignment
pad = Pad(4)
# pylint: disable=redefined-builtin
- def __init__(self, property=None, length=None):
+ def __init__(self, queue_property=None, length=None):
"""The contructor takes the paremeters below.
Args:
- property (QueueProperties): The queue property.
- len (int): Length of property, including this header.
+ queue_property (QueueProperties): The queue property.
+ length (int): Length of property, including this header.
"""
super().__init__()
- self.property = property
+ self.queue_property = queue_property
self.length = length
@@ -124,8 +124,8 @@ class ListOfQueues(FixedTypeList):
class QueuePropExperimenter(GenericStruct):
"""Experimenter queue property uses the following structure and fields."""
- prop_header = QueuePropHeader(property=QueueProperties.OFPQT_EXPERIMENTER,
- length=16)
+ prop_header = QueuePropHeader(
+ queue_property=QueueProperties.OFPQT_EXPERIMENTER, length=16)
#: Experimenter ID which takes the same form as in struct
#: ofp_experimenter_header
experimenter = UBInt32()
@@ -150,8 +150,8 @@ class QueuePropExperimenter(GenericStruct):
class QueuePropMaxRate(GenericStruct):
"""Maximum-rate queue property uses the following structure and fields."""
- prop_header = QueuePropHeader(property=QueueProperties.OFPQT_MAX_RATE,
- length=16)
+ prop_header = QueuePropHeader(
+ queue_property=QueueProperties.OFPQT_MAX_RATE, length=16)
#: In 1/10 of a percent; >1000 -> disabled.
rate = UBInt16()
#: 64-bit alignmet.
@@ -170,8 +170,8 @@ class QueuePropMaxRate(GenericStruct):
class QueuePropMinRate(GenericStruct):
"""Minimum-rate queue property uses the following structure and fields."""
- prop_header = QueuePropHeader(property=QueueProperties.OFPQT_MIN_RATE,
- length=16)
+ prop_header = QueuePropHeader(
+ queue_property=QueueProperties.OFPQT_MIN_RATE, length=16)
#: In 1/10 of a percent; >1000 -> disabled.
rate = UBInt16()
#: 64-bit alignmet.
|
kytos/python-openflow
|
e221a33f932eb6aa7ceeb45d6bc8130baac0b7f7
|
diff --git a/tests/v0x01/test_common/test_queue.py b/tests/v0x01/test_common/test_queue.py
index 89ae5ed..6054e5b 100644
--- a/tests/v0x01/test_common/test_queue.py
+++ b/tests/v0x01/test_common/test_queue.py
@@ -10,8 +10,8 @@ class TestQueuePropHeader(unittest.TestCase):
def setUp(self):
"""Basic setup for test."""
self.message = queue.QueuePropHeader()
- self.message.property = queue.QueueProperties.OFPQT_MIN_RATE
- self.message.len = 12
+ self.message.queue_property = queue.QueueProperties.OFPQT_MIN_RATE
+ self.message.length = 12
def test_get_size(self):
"""[Common/QueuePropHeader] - size 8."""
diff --git a/tests/v0x01/test_controller2switch/test_queue_get_config_reply.py b/tests/v0x01/test_controller2switch/test_queue_get_config_reply.py
index 1d7ad38..46db67f 100644
--- a/tests/v0x01/test_controller2switch/test_queue_get_config_reply.py
+++ b/tests/v0x01/test_controller2switch/test_queue_get_config_reply.py
@@ -32,6 +32,6 @@ def _get_packet_queue():
def _get_queue_properties():
"""Function used to return a list of queue properties."""
properties = []
- properties.append(QueuePropHeader(prop=QueueProperties.OFPQT_MIN_RATE,
- length=12))
+ properties.append(QueuePropHeader(
+ queue_property=QueueProperties.OFPQT_MIN_RATE, length=12))
return properties
|
Attribute with builtin name
In `/v0x04/common/queue.py`, `class QueuePropHeader` has an attribute called `property`, which is a Python builtin name. Is this needed somehow?
|
0.0
|
e221a33f932eb6aa7ceeb45d6bc8130baac0b7f7
|
[
"tests/v0x01/test_controller2switch/test_queue_get_config_reply.py::TestQueueGetConfigReply::test_minimum_size",
"tests/v0x01/test_controller2switch/test_queue_get_config_reply.py::TestQueueGetConfigReply::test_pack",
"tests/v0x01/test_controller2switch/test_queue_get_config_reply.py::TestQueueGetConfigReply::test_raw_dump_size",
"tests/v0x01/test_controller2switch/test_queue_get_config_reply.py::TestQueueGetConfigReply::test_unpack"
] |
[
"tests/v0x01/test_common/test_queue.py::TestQueuePropHeader::test_get_size",
"tests/v0x01/test_common/test_queue.py::TestPacketQueue::test_get_size",
"tests/v0x01/test_common/test_queue.py::TestQueuePropMinRate::test_get_size"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-03-31 20:31:56+00:00
|
mit
| 3,489 |
|
kytos__python-openflow-412
|
diff --git a/pyof/foundation/constants.py b/pyof/foundation/constants.py
index 966305c..b799b0e 100644
--- a/pyof/foundation/constants.py
+++ b/pyof/foundation/constants.py
@@ -11,3 +11,5 @@ OFP_MAX_PORT_NAME_LEN = 16
OFP_MAX_TABLE_NAME_LEN = 32
SERIAL_NUM_LEN = 32
DESC_STR_LEN = 256
+
+VLAN_TPID = 33024
diff --git a/pyof/foundation/network_types.py b/pyof/foundation/network_types.py
index 9c31fb4..1158903 100644
--- a/pyof/foundation/network_types.py
+++ b/pyof/foundation/network_types.py
@@ -6,9 +6,98 @@ Defines and Implements Basic Network packet types , such as Ethertnet and LLDP.
from pyof.foundation.base import GenericStruct
from pyof.foundation.basic_types import (
BinaryData, HWAddress, IPAddress, UBInt8, UBInt16)
-from pyof.foundation.exceptions import PackException
+from pyof.foundation.constants import VLAN_TPID
+from pyof.foundation.exceptions import PackException, UnpackException
-__all__ = ('Ethernet', 'GenericTLV', 'IPv4', 'TLVWithSubType', 'LLDP')
+__all__ = ('Ethernet', 'GenericTLV', 'IPv4', 'VLAN', 'TLVWithSubType', 'LLDP')
+
+
+class VLAN(GenericStruct):
+ """802.1q VLAN header."""
+
+ #: tpid (:class:`UBInt16`): Tag Protocol Identifier
+ tpid = UBInt16(VLAN_TPID)
+ #: _tci (:class:`UBInt16`): Tag Control Information - has the
+ #: Priority Code Point, DEI/CFI bit and the VLAN ID
+ _tci = UBInt16()
+
+ def __init__(self, pcp=None, cfi=None, vid=None):
+ """The constructor receives the parameters below.
+
+ If no arguments are set for a particular instance, it is interpreted as
+ abscence of VLAN information, and the pack() method will return an
+ empty binary string.
+
+ Args:
+ tpid (int): Tag Protocol Identifier. Defaults to 0x8100 for 802.1q.
+ pcp (int): 802.1p Priority Code Point. Defaults to 0 for Best
+ Effort Queue.
+ cfi (int): Canonical Format Indicator. Defaults to 0 for Ethernet.
+ vid (int): VLAN ID. If no VLAN is specified, value is 0.
+ """
+ super().__init__()
+ self.tpid = VLAN_TPID
+ self.pcp = pcp
+ self.cfi = cfi
+ self.vid = vid
+
+ def pack(self, value=None):
+ """Pack the struct in a binary representation.
+
+ Merge some fields to ensure correct packing.
+
+ If no arguments are set for a particular instance, it is interpreted as
+ abscence of VLAN information, and the pack() method will return an
+ empty binary string.
+
+ Returns:
+ bytes: Binary representation of this instance.
+ """
+ if isinstance(value, type(self)):
+ return value.pack()
+
+ if self.pcp is None and self.cfi is None and self.vid is None:
+ return b''
+ self.pcp = self.pcp if self.pcp is not None else 0
+ self.cfi = self.cfi if self.cfi is not None else 0
+ self.vid = self.vid if self.vid is not None else 0
+ self._tci = self.pcp << 13 | self.cfi << 12 | self.vid
+ return super().pack()
+
+ def _validate(self):
+ """Assure this is a 802.1q VLAN header instance."""
+ if self.tpid.value != VLAN_TPID:
+ raise UnpackException
+ return
+
+ def unpack(self, buff, offset=0):
+ """Unpack a binary struct into this object's attributes.
+
+ Return the values instead of the lib's basic types.
+
+ After unpacking, the abscence of a `tpid` value causes the assignment
+ of None to the field values to indicate that there is no VLAN
+ information.
+
+ Args:
+ buff (bytes): Binary buffer.
+ offset (int): Where to begin unpacking.
+
+ Raises:
+ :exc:`~.exceptions.UnpackException`: If unpack fails.
+ """
+ super().unpack(buff, offset)
+ if self.tpid.value:
+ self._validate()
+ self.tpid = self.tpid.value
+ self.pcp = self._tci.value >> 13
+ self.cfi = (self._tci.value >> 12) & 1
+ self.vid = self._tci.value & 4095
+ else:
+ self.tpid = VLAN_TPID
+ self.pcp = None
+ self.cfi = None
+ self.vid = None
class Ethernet(GenericStruct):
@@ -30,11 +119,12 @@ class Ethernet(GenericStruct):
destination = HWAddress()
source = HWAddress()
+ vlan = VLAN()
ether_type = UBInt16()
data = BinaryData()
- def __init__(self, destination=None, source=None, ether_type=None,
- data=b''):
+ def __init__(self, destination=None, source=None, vlan=VLAN(),
+ ether_type=None, data=b''):
"""Create an instance and set its attributes.
Args:
@@ -50,6 +140,7 @@ class Ethernet(GenericStruct):
super().__init__()
self.destination = destination
self.source = source
+ self.vlan = vlan
self.ether_type = ether_type
self.data = data
@@ -61,6 +152,31 @@ class Ethernet(GenericStruct):
"""
return hash(self.pack())
+ def unpack(self, buff, offset=0):
+ """Unpack a binary message into this object's attributes.
+
+ Unpack the binary value *buff* and update this object attributes based
+ on the results.
+
+ Ethernet headers may have VLAN tags. If no VLAN tag is found, a
+ 'wildcard VLAN tag' is inserted to assure correct unpacking.
+
+ Args:
+ buff (bytes): Binary data package to be unpacked.
+ offset (int): Where to begin unpacking.
+
+ Raises:
+ UnpackException: If there is a struct unpacking error.
+ """
+ # Checking if the EtherType bytes are actually equal to VLAN_TPID -
+ # indicating that the packet is tagged. If it is not, we insert the
+ # equivalent to 'NULL VLAN data' (\x00\x00\x00\x00) to enable the
+ # correct unpacking process.
+ if buff[12:16] != VLAN_TPID.to_bytes(2, 'big'):
+ buff = buff[0:12] + b'\x00\x00\x00\x00' + buff[12:]
+
+ super().unpack(buff, offset)
+
class GenericTLV(GenericStruct):
"""TLV structure of LLDP packets.
@@ -217,7 +333,7 @@ class IPv4(GenericStruct):
identification=0, flags=0, offset=0, ttl=255, protocol=0,
checksum=0, source="0.0.0.0", destination="0.0.0.0",
options=b'', data=b''):
- """The contructor receives the parameters below.
+ """The constructor receives the parameters below.
Args:
version (int): IP protocol version. Defaults to 4.
diff --git a/pyof/v0x01/common/action.py b/pyof/v0x01/common/action.py
index 7041dfd..9e595e4 100644
--- a/pyof/v0x01/common/action.py
+++ b/pyof/v0x01/common/action.py
@@ -10,9 +10,9 @@ from pyof.foundation.constants import UBINT16_MAX_VALUE
# Third-party imports
-__all__ = ('ActionType', 'ActionHeader', 'ActionOutput', 'ActionEnqueue',
- 'ActionVlanVid', 'ActionVlanPCP', 'ActionDLAddr', 'ActionNWAddr',
- 'ActionNWTos', 'ActionTPPort', 'ActionVendorHeader',
+__all__ = ('ActionType', 'ActionHeader', 'ActionOutput', 'ActionStripVlan',
+ 'ActionEnqueue', 'ActionVlanVid', 'ActionVlanPCP', 'ActionDLAddr',
+ 'ActionNWAddr', 'ActionNWTos', 'ActionTPPort', 'ActionVendorHeader',
'ListOfActions')
# Enums
|
kytos/python-openflow
|
9eabc4a8fbf8de2cca853ab51f29de7a770e1b67
|
diff --git a/tests/test_foundation/test_network_types.py b/tests/test_foundation/test_network_types.py
index a72fd42..ce7b910 100644
--- a/tests/test_foundation/test_network_types.py
+++ b/tests/test_foundation/test_network_types.py
@@ -2,7 +2,8 @@
import unittest
from pyof.foundation.basic_types import BinaryData
-from pyof.foundation.network_types import GenericTLV, IPv4
+from pyof.foundation.exceptions import UnpackException
+from pyof.foundation.network_types import VLAN, Ethernet, GenericTLV, IPv4
class TestNetworkTypes(unittest.TestCase):
@@ -17,6 +18,79 @@ class TestNetworkTypes(unittest.TestCase):
self.assertEqual(tlv.value.value, tlv_unpacked.value.value)
+class TestEthernet(unittest.TestCase):
+ """Test Ethernet frames."""
+
+ def test_Ethernet_pack(self):
+ """Test pack method of Ethernet class without VLAN tag."""
+ ethernet = Ethernet(destination='00:1f:3a:3e:9a:cf',
+ source='00:15:af:d5:38:98', ether_type=0x800,
+ data=b'testdata')
+ packed = ethernet.pack()
+ expected = b'\x00\x1f:>\x9a\xcf\x00\x15\xaf\xd58\x98\x08\x00testdata'
+ self.assertEqual(packed, expected)
+
+ def test_Ethernet_unpack(self):
+ """Test pack method of Ethernet class without VLAN tag."""
+ raw = b'\x00\x15\xaf\xd58\x98\x00\x1f:>\x9a\xcf\x08\x00testdata'
+ expected = Ethernet(destination='00:15:af:d5:38:98',
+ source='00:1f:3a:3e:9a:cf', ether_type=0x800,
+ data=b'testdata')
+ expected.pack()
+ unpacked = Ethernet()
+ unpacked.unpack(raw)
+ self.assertEqual(unpacked, expected)
+
+ def test_Tagged_Ethernet_pack(self):
+ """Test pack method of Ethernet class including VLAN tag."""
+ ethernet = Ethernet(destination='00:1f:3a:3e:9a:cf',
+ source='00:15:af:d5:38:98', vlan=VLAN(vid=200),
+ ether_type=0x800, data=b'testdata')
+ packed = ethernet.pack()
+ expected = b'\x00\x1f:>\x9a\xcf\x00\x15\xaf\xd58'
+ expected += b'\x98\x81\x00\x00\xc8\x08\x00testdata'
+ self.assertEqual(packed, expected)
+
+ def test_Tagged_Ethernet_unpack(self):
+ """Test pack method of Ethernet class including VLAN tag."""
+ raw = b'\x00\x15\xaf\xd58\x98\x00\x1f:>'
+ raw += b'\x9a\xcf\x81\x00!^\x08\x00testdata'
+ expected = Ethernet(destination='00:15:af:d5:38:98',
+ source='00:1f:3a:3e:9a:cf', vlan=VLAN(pcp=1,
+ vid=350),
+ ether_type=0x800, data=b'testdata')
+ expected.pack()
+ unpacked = Ethernet()
+ unpacked.unpack(raw)
+ self.assertEqual(unpacked, expected)
+
+
+class TestVLAN(unittest.TestCase):
+ """Test VLAN headers."""
+
+ def test_VLAN_pack(self):
+ """Test pack method of VLAN class."""
+ vlan = VLAN(pcp=3, vid=20)
+ packed = vlan.pack()
+ expected = b'\x81\x00`\x14'
+ self.assertEqual(packed, expected)
+
+ def test_VLAN_unpack(self):
+ """Test unpack method of VLAN class."""
+ raw = b'\x81\x00\xa0{'
+ expected = VLAN(pcp=5, vid=123)
+ unpacked = VLAN()
+ unpacked.unpack(raw)
+ self.assertEqual(unpacked, expected)
+
+ def test_unpack_wrong_tpid(self):
+ """Raise UnpackException if the tpid is not VLAN_TPID."""
+ raw = b'\x12\x34\xa0{'
+ vlan = VLAN()
+ with self.assertRaises(UnpackException):
+ vlan.unpack(raw)
+
+
class TestIPv4(unittest.TestCase):
"""Test IPv4 packets."""
|
Implement VLAN Class as Network Types
@jab1982 it will start this implementation, but @renanrodrigo please feel free to start this.
|
0.0
|
9eabc4a8fbf8de2cca853ab51f29de7a770e1b67
|
[
"tests/test_foundation/test_network_types.py::TestNetworkTypes::test_GenTLV_value_unpack",
"tests/test_foundation/test_network_types.py::TestEthernet::test_Ethernet_pack",
"tests/test_foundation/test_network_types.py::TestEthernet::test_Ethernet_unpack",
"tests/test_foundation/test_network_types.py::TestEthernet::test_Tagged_Ethernet_pack",
"tests/test_foundation/test_network_types.py::TestEthernet::test_Tagged_Ethernet_unpack",
"tests/test_foundation/test_network_types.py::TestVLAN::test_VLAN_pack",
"tests/test_foundation/test_network_types.py::TestVLAN::test_VLAN_unpack",
"tests/test_foundation/test_network_types.py::TestVLAN::test_unpack_wrong_tpid",
"tests/test_foundation/test_network_types.py::TestIPv4::test_IPv4_checksum",
"tests/test_foundation/test_network_types.py::TestIPv4::test_IPv4_pack",
"tests/test_foundation/test_network_types.py::TestIPv4::test_IPv4_size",
"tests/test_foundation/test_network_types.py::TestIPv4::test_IPv4_unpack"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-07-20 19:46:03+00:00
|
mit
| 3,490 |
|
kytos__python-openflow-452
|
diff --git a/pyof/v0x01/controller2switch/packet_out.py b/pyof/v0x01/controller2switch/packet_out.py
index 5996d47..ad906e3 100644
--- a/pyof/v0x01/controller2switch/packet_out.py
+++ b/pyof/v0x01/controller2switch/packet_out.py
@@ -112,13 +112,20 @@ class PacketOut(GenericMessage):
self.actions_len = ListOfActions(self.actions).get_size()
def _validate_in_port(self):
- port = self.in_port
- valid = True
- if isinstance(port, Port):
- if port not in _VIRT_IN_PORTS:
- valid = False
- elif isinstance(port, int) and (port < 1 or port >=
- Port.OFPP_MAX.value):
- valid = False
- if not valid:
- raise ValidationError('{} is not a valid input port.'.format(port))
+ """Validate in_port attribute.
+
+ A valid port is either:
+
+ * Greater than 0 and less than or equals to Port.OFPP_MAX
+ * One of the valid virtual ports: Port.OFPP_LOCAL,
+ Port.OFPP_CONTROLLER or Port.OFPP_NONE
+
+ Raises:
+ ValidationError: If in_port is an invalid port.
+
+ """
+ is_valid_range = self.in_port > 0 and self.in_port <= Port.OFPP_MAX
+ is_valid_virtual_in_ports = self.in_port in _VIRT_IN_PORTS
+
+ if (is_valid_range or is_valid_virtual_in_ports) is False:
+ raise ValidationError(f'{self.in_port} is not a valid input port.')
|
kytos/python-openflow
|
fcdfda718dd8716ff8f5f41cc26eac9c74862054
|
diff --git a/tests/v0x01/test_controller2switch/test_packet_out.py b/tests/v0x01/test_controller2switch/test_packet_out.py
index 00ea797..7a277ee 100644
--- a/tests/v0x01/test_controller2switch/test_packet_out.py
+++ b/tests/v0x01/test_controller2switch/test_packet_out.py
@@ -45,14 +45,14 @@ class TestPacketOut(TestStruct):
def test_valid_physical_in_ports(self):
"""Physical port limits from 1.0.0 spec."""
- max_valid = int(Port.OFPP_MAX.value) - 1
+ max_valid = int(Port.OFPP_MAX.value)
for in_port in (1, max_valid):
self.message.in_port = in_port
self.assertTrue(self.message.is_valid())
def test_invalid_physical_in_port(self):
"""Physical port limits from 1.0.0 spec."""
- max_valid = int(Port.OFPP_MAX.value) - 1
+ max_valid = int(Port.OFPP_MAX.value)
for in_port in (-1, 0, max_valid + 1, max_valid + 2):
self.message.in_port = in_port
self.assertFalse(self.message.is_valid())
|
packetOut validation needs fix
packetOut validation of in_port is not working correctly and needs fix so it can pass the tests properly.
|
0.0
|
fcdfda718dd8716ff8f5f41cc26eac9c74862054
|
[
"tests/v0x01/test_controller2switch/test_packet_out.py::TestPacketOut::test_valid_physical_in_ports"
] |
[
"tests/v0x01/test_controller2switch/test_packet_out.py::TestPacketOut::test_invalid_physical_in_port",
"tests/v0x01/test_controller2switch/test_packet_out.py::TestPacketOut::test_invalid_virtual_in_ports",
"tests/v0x01/test_controller2switch/test_packet_out.py::TestPacketOut::test_minimum_size",
"tests/v0x01/test_controller2switch/test_packet_out.py::TestPacketOut::test_raw_dump_file",
"tests/v0x01/test_controller2switch/test_packet_out.py::TestPacketOut::test_valid_virtual_in_ports"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-09-13 16:19:58+00:00
|
mit
| 3,491 |
|
l-vo__photos-picker-49
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b17ec72..b760c15 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,6 +3,7 @@
* Add CHANGELOG.md
* Add SmartPicker
* Take care that photos to retrieve count is not greater than total photos count (at AbstractPicker level)
+* On FilesystemUploader, check that target directory exists and is empty before scanning
# 0.3.1
* Allow to use a custom target directory in Dropbox
diff --git a/photospicker/uploader/filesystem_uploader.py b/photospicker/uploader/filesystem_uploader.py
index 05af0f9..a2277ae 100644
--- a/photospicker/uploader/filesystem_uploader.py
+++ b/photospicker/uploader/filesystem_uploader.py
@@ -6,7 +6,14 @@ import os
class FilesystemUploader(AbstractUploader):
"""Copy picked photo to a filesystem empty directory"""
- def initialize(self): # pragma: no cover
+ def __init__(self, folder_path):
+ """
+ Constructor
+
+ :param str folder_path: target folder path
+ """
+ super(FilesystemUploader, self).__init__(folder_path)
+
"""Check target directory"""
if not os.path.isdir(self._path):
raise UploaderException(
@@ -20,6 +27,9 @@ class FilesystemUploader(AbstractUploader):
"Directory {path} not empty".format(path=self._path)
)
+ def initialize(self):
+ pass
+
def upload(self, binary, original_filename):
"""
Upload or copy files to destination
|
l-vo/photos-picker
|
3ed989be337cfcfd97d926cdfad8573fe5db8f14
|
diff --git a/tests/uploader/test_filesystem_uploader.py b/tests/uploader/test_filesystem_uploader.py
index 9e7045c..ae278db 100644
--- a/tests/uploader/test_filesystem_uploader.py
+++ b/tests/uploader/test_filesystem_uploader.py
@@ -10,7 +10,7 @@ class TestFilesystemUploader(TestCase):
@mock.patch('os.listdir')
@mock.patch('os.path.isdir')
- def test_initialize_directory_not_found(self, is_dir_mock, listdir_mock):
+ def test_constructor_directory_not_found(self, is_dir_mock, listdir_mock):
"""
Test that an exception is launched if the directory is not found
@@ -20,8 +20,7 @@ class TestFilesystemUploader(TestCase):
is_dir_mock.return_value = False
with self.assertRaises(UploaderException) as cm:
- sut = FilesystemUploader('/root/myfolder')
- sut.initialize()
+ FilesystemUploader('/root/myfolder')
is_dir_mock.assert_called_with('/root/myfolder')
@@ -29,7 +28,7 @@ class TestFilesystemUploader(TestCase):
@mock.patch('os.listdir')
@mock.patch('os.path.isdir')
- def test_initialize_directory_not_empty(self, is_dir_mock, listdir_mock):
+ def test_constructor_directory_not_empty(self, is_dir_mock, listdir_mock):
"""
Test that an exception is launched if the directory is not empty
@@ -40,8 +39,7 @@ class TestFilesystemUploader(TestCase):
listdir_mock.return_value = ['myfile']
with self.assertRaises(UploaderException) as cm:
- sut = FilesystemUploader('/root/myfolder')
- sut.initialize()
+ FilesystemUploader('/root/myfolder')
is_dir_mock.assert_called_with('/root/myfolder')
listdir_mock.assert_called_with('/root/myfolder')
|
FilesystemUploader: check that directory exists and is empty in the constructor
For raising the error before the files scan
|
0.0
|
3ed989be337cfcfd97d926cdfad8573fe5db8f14
|
[
"tests/uploader/test_filesystem_uploader.py::TestFilesystemUploader::test_constructor_directory_not_empty",
"tests/uploader/test_filesystem_uploader.py::TestFilesystemUploader::test_constructor_directory_not_found"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-11-17 08:27:23+00:00
|
mit
| 3,492 |
|
labd__commercetools-python-sdk-71
|
diff --git a/CHANGES b/CHANGES
index 8c2726a..3ab1120 100644
--- a/CHANGES
+++ b/CHANGES
@@ -1,3 +1,7 @@
+5.0.1 (2019-11-18)
+------------------
+ - Allow passing in the base auth URL when creating a Client
+
5.0.0 (2019-11-05)
------------------
This is breaking change since the commercetools api specification is moving
diff --git a/README.rst b/README.rst
index 99c3176..ea2c4d3 100644
--- a/README.rst
+++ b/README.rst
@@ -42,7 +42,7 @@ Example
client_secret="<your-client-secret>",
scope=["<scopes>"],
url="https://api.sphere.io",
- token_url="https://auth.sphere.io/oauth/token",
+ token_url="https://auth.sphere.io",
)
product = client.products.get_by_id("00633d11-c5bb-434e-b132-73f7e130b4e3")
diff --git a/src/commercetools/client.py b/src/commercetools/client.py
index 0765893..971323f 100644
--- a/src/commercetools/client.py
+++ b/src/commercetools/client.py
@@ -39,7 +39,7 @@ from commercetools.services.stores import StoreService
from commercetools.services.subscriptions import SubscriptionService
from commercetools.services.tax_categories import TaxCategoryService
from commercetools.services.types import TypeService
-from commercetools.utils import BaseTokenSaver, DefaultTokenSaver
+from commercetools.utils import BaseTokenSaver, DefaultTokenSaver, fix_token_url
class RefreshingOAuth2Session(OAuth2Session):
@@ -112,6 +112,7 @@ class Client:
del project_key, client_id, client_secret, url, token_url, scope
self._config = self._read_env_vars(config)
+ self._config["token_url"] = fix_token_url(self._config["token_url"])
self._token_saver = token_saver or DefaultTokenSaver()
self._url = self._config["url"]
self._base_url = f"{self._config['url']}/{self._config['project_key']}/"
@@ -282,16 +283,6 @@ class Client:
if not config.get("token_url"):
config["token_url"] = os.environ.get("CTP_AUTH_URL")
- # When the token_url is passed via environment variables we
- # check if we need to append /oauth/token to the url. This is
- # required since commercetools doesn't do this when outputting
- # the settings when you create an API Client.
- parts = urllib.parse.urlparse(config["token_url"])
- if parts.path == "":
- config["token_url"] = urllib.parse.urlunparse(
- (*parts[:2], "/oauth/token", *parts[3:])
- )
-
if not config["scope"]:
config["scope"] = os.environ.get("CTP_SCOPES")
if config["scope"]:
diff --git a/src/commercetools/utils.py b/src/commercetools/utils.py
index bdf2c09..51caf85 100644
--- a/src/commercetools/utils.py
+++ b/src/commercetools/utils.py
@@ -1,4 +1,5 @@
import threading
+import urllib.parse
tls = threading.local()
@@ -36,3 +37,18 @@ class DefaultTokenSaver(BaseTokenSaver):
def clear_cache(cls):
items = getattr(tls, "tokens", {})
items.clear()
+
+
+def fix_token_url(token_url: str) -> str:
+ """
+ Ensure the token url has the right format.
+
+ Often clients only pass the base url instead of the complete
+ token url, which gets confusing for users.
+ """
+ parts = urllib.parse.urlparse(token_url)
+ if parts.path == "":
+ token_url = urllib.parse.urlunparse(
+ (*parts[:2], "/oauth/token", *parts[3:])
+ )
+ return token_url
|
labd/commercetools-python-sdk
|
7f7a193fdaea3d632bdda662127e68f60995af40
|
diff --git a/tests/test_utils.py b/tests/test_utils.py
new file mode 100644
index 0000000..41ebbdb
--- /dev/null
+++ b/tests/test_utils.py
@@ -0,0 +1,15 @@
+import pytest
+
+from commercetools.utils import fix_token_url
+
+
[email protected](
+ "token_url,expected_url", [
+ ("https://auth.sphere.io", "https://auth.sphere.io/oauth/token"),
+ ("https://auth.sphere.io/oauth/token", "https://auth.sphere.io/oauth/token"),
+ ("https://auth.commercetools.co", "https://auth.commercetools.co/oauth/token"),
+ ("https://auth.sphere.io?test=123", "https://auth.sphere.io/oauth/token?test=123"),
+ ]
+)
+def test_fix_token_url(token_url, expected_url):
+ assert fix_token_url(token_url) == expected_url
|
BUG broken auth missing token
commercetools==5.0.0
got valid creds (works with curl)
creds are present
always dies `*** oauthlib.oauth2.rfc6749.errors.MissingTokenError: (missing_token) Missing access token parameter.`
seems related to https://github.com/requests/requests-oauthlib/issues/324
same happens with 4.1.0 so i believe its the interface to the oauthlib that has changed
|
0.0
|
7f7a193fdaea3d632bdda662127e68f60995af40
|
[
"tests/test_utils.py::test_fix_token_url[https://auth.sphere.io-https://auth.sphere.io/oauth/token]",
"tests/test_utils.py::test_fix_token_url[https://auth.sphere.io/oauth/token-https://auth.sphere.io/oauth/token]",
"tests/test_utils.py::test_fix_token_url[https://auth.commercetools.co-https://auth.commercetools.co/oauth/token]",
"tests/test_utils.py::test_fix_token_url[https://auth.sphere.io?test=123-https://auth.sphere.io/oauth/token?test=123]"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-11-18 16:03:41+00:00
|
mit
| 3,493 |
|
lace__polliwog-127
|
diff --git a/polliwog/plane/plane.py b/polliwog/plane/plane.py
index fc35b8b..c115070 100644
--- a/polliwog/plane/plane.py
+++ b/polliwog/plane/plane.py
@@ -141,7 +141,6 @@ class Plane(object):
the plane (away from the normal), and 0 for points on the plane.
"""
- vg.shape.check(locals(), "points", (-1, 3))
return np.sign(self.signed_distance(points))
def points_in_front(self, points, inverted=False, ret_indices=False):
@@ -180,7 +179,6 @@ class Plane(object):
return functions.signed_distance_to_plane(points, self.equation)
def distance(self, points):
- vg.shape.check(locals(), "points", (-1, 3))
return np.absolute(self.signed_distance(points))
def project_point(self, points):
diff --git a/polliwog/segment/segment.py b/polliwog/segment/segment.py
index 49ddb24..ef6600f 100644
--- a/polliwog/segment/segment.py
+++ b/polliwog/segment/segment.py
@@ -69,40 +69,6 @@ def partition_segment(p1, p2, n_samples, endpoint=True):
] + p1
-def partition_segment_old(p1, p2, partition_size=5):
- """
- Deprecated. Please use partition_segment.
-
- For two points in n-space, return an np.ndarray of partition points at equal widths
- determined by 'partition_size' on the interior of the segment determined by p1 & p2.
-
- Accomplished by partitioning the segment into 'partition_size' sub-intervals.
-
- Partition order is oriented from p1 to p2.
-
- Args:
- p1, p2:
- 1 x N vectors
-
- partition_size:
- size of partition. should be > 1.
- """
-
- if not isinstance(partition_size, int):
- raise TypeError("partition_size should be an int.")
- elif partition_size < 2:
- raise ValueError("partition_size should be bigger than 1.")
-
- dist = np.linalg.norm(p1 - p2)
-
- unit_direction = (p2 - p1) / dist
- partition_width = dist / partition_size
-
- domain = partition_width * np.arange(1, partition_size)
-
- return p1 + unit_direction * domain[:, np.newaxis]
-
-
def closest_point_of_line_segment(points, start_points, segment_vectors):
# Adapted from public domain algorithm
# https://gdbooks.gitbooks.io/3dcollisions/content/Chapter1/closest_point_on_line.html
|
lace/polliwog
|
3953122d5f309753bddd7c1df38afe5a877d3ba9
|
diff --git a/polliwog/plane/test_plane.py b/polliwog/plane/test_plane.py
index 79a8269..4c4ae95 100644
--- a/polliwog/plane/test_plane.py
+++ b/polliwog/plane/test_plane.py
@@ -51,6 +51,7 @@ def test_returns_unsigned_distances_for_xz_plane_at_origin():
expected = np.array([502.0, 501.0])
np.testing.assert_array_equal(expected, plane.distance(pts))
+ np.testing.assert_array_equal(expected[0], plane.distance(pts[0]))
def test_returns_signed_distances_for_diagonal_plane():
@@ -103,10 +104,9 @@ def test_returns_sign_for_diagonal_plane():
pts = np.array([[425.0, 425.0, 25.0], [-500.0, -500.0, 25.0]])
- sign = plane.sign(pts)
-
expected = np.array([1.0, -1.0])
- np.testing.assert_array_equal(sign, expected)
+ np.testing.assert_array_equal(plane.sign(pts), expected)
+ np.testing.assert_array_equal(plane.sign(pts[0]), expected[0])
def test_points_in_front():
diff --git a/polliwog/segment/test_segment.py b/polliwog/segment/test_segment.py
index efe231e..5a8e17f 100644
--- a/polliwog/segment/test_segment.py
+++ b/polliwog/segment/test_segment.py
@@ -4,99 +4,9 @@ from .segment import (
closest_point_of_line_segment,
partition,
partition_segment,
- partition_segment_old,
)
-def test_partition_segment_old_raises_exception_for_invalid_partition_size_type():
- p1 = np.array([0.0, 0.0, 0.0])
- p2 = np.array([1.0, 0.0, 0.0])
-
- with pytest.raises(TypeError):
- partition_segment_old(p1, p2, "foobar")
-
-
-def test_partition_segment_old_raises_exception_for_invalid_partition_size_value():
- p1 = np.array([0.0, 0.0, 0.0])
- p2 = np.array([1.0, 0.0, 0.0])
-
- with pytest.raises(ValueError):
- partition_segment_old(p1, p2, 1)
-
-
-def test_partition_segment_old_returns_partition_for_odd_partition_size():
- p1 = np.array([0.0, 0.0, 0.0])
- p2 = np.array([2.0, 0.0, 0.0])
-
- partition_size = 4
-
- expected_partition_points = np.array(
- [[0.5, 0.0, 0.0], [1.0, 0.0, 0.0], [1.5, 0.0, 0.0]]
- )
-
- np.testing.assert_array_almost_equal(
- partition_segment_old(p1, p2, partition_size),
- expected_partition_points,
- decimal=7,
- )
-
-
-def test_partition_segment_old_returns_partition_points_for_even_partition_size():
- p1 = np.array([0.0, 0.0, 0.0])
- p2 = np.array([1.0, 0.0, 0.0])
-
- partition_size = 5
-
- expected_partition_points = np.array(
- [[0.2, 0.0, 0.0], [0.4, 0.0, 0.0], [0.6, 0.0, 0.0], [0.8, 0.0, 0.0]]
- )
-
- np.testing.assert_array_almost_equal(
- partition_segment_old(p1, p2, partition_size),
- expected_partition_points,
- decimal=7,
- )
-
-
-def test_partition_segment_old_returns_partition_points_in_oriented_order():
- p1 = np.array([0.0, 0.0, 0.0])
- p2 = np.array([1.0, 0.0, 0.0])
-
- partition_size = 5
-
- expected_partition_points = np.array(
- [[0.8, 0.0, 0.0], [0.6, 0.0, 0.0], [0.4, 0.0, 0.0], [0.2, 0.0, 0.0]]
- )
-
- np.testing.assert_array_almost_equal(
- partition_segment_old(p2, p1, partition_size),
- expected_partition_points,
- decimal=7,
- )
-
-
-def test_partition_segment_old_returns_partition_points_for_diagonal_segment():
- p1 = np.array([0.0, 0.0, 0.0])
- p2 = np.array([1.0, 1.0, 0.0])
-
- partition_size = 3
-
- dist = np.linalg.norm(p2 - p1)
- domain = [(1 / 3.0) * dist, (2 / 3.0) * dist]
-
- unit_direction = (p2 - p1) / dist
-
- expected_partition_points = np.array(
- [p1 + scalar * unit_direction for scalar in domain]
- )
-
- np.testing.assert_array_almost_equal(
- partition_segment_old(p1, p2, partition_size),
- expected_partition_points,
- decimal=7,
- )
-
-
def test_partition_segment_raises_exception_for_invalid_partition_size_type():
p1 = np.array([0.0, 0.0, 0.0])
p2 = np.array([1.0, 0.0, 0.0])
|
Remove partition_segment_old
Seems a safe bet that we'd want to remove a function with this name.
|
0.0
|
3953122d5f309753bddd7c1df38afe5a877d3ba9
|
[
"polliwog/plane/test_plane.py::test_returns_unsigned_distances_for_xz_plane_at_origin",
"polliwog/plane/test_plane.py::test_returns_sign_for_diagonal_plane"
] |
[
"polliwog/plane/test_plane.py::test_validation",
"polliwog/plane/test_plane.py::test_repr",
"polliwog/plane/test_plane.py::test_flipped",
"polliwog/plane/test_plane.py::test_returns_signed_distances_for_xz_plane_at_origin",
"polliwog/plane/test_plane.py::test_returns_signed_distances_for_diagonal_plane",
"polliwog/plane/test_plane.py::test_returns_unsigned_distances_for_diagonal_plane_at_origin",
"polliwog/plane/test_plane.py::test_signed_distance_validation",
"polliwog/plane/test_plane.py::test_points_in_front",
"polliwog/plane/test_plane.py::test_canonical_point",
"polliwog/plane/test_plane.py::test_project_point",
"polliwog/plane/test_plane.py::test_project_point_vectorized",
"polliwog/plane/test_plane.py::test_plane_from_points",
"polliwog/plane/test_plane.py::test_plane_from_points_and_vector",
"polliwog/plane/test_plane.py::test_fit_from_points",
"polliwog/plane/test_plane.py::test_line_plane_intersection",
"polliwog/plane/test_plane.py::test_line_plane_intersections",
"polliwog/plane/test_plane.py::test_line_segment_plane_intersection",
"polliwog/plane/test_plane.py::test_line_segment_plane_intersections",
"polliwog/segment/test_segment.py::test_partition_segment_raises_exception_for_invalid_partition_size_type",
"polliwog/segment/test_segment.py::test_partition_segment_raises_exception_for_invalid_partition_size_value",
"polliwog/segment/test_segment.py::test_partition_segment_returns_partition_for_odd_partition_size",
"polliwog/segment/test_segment.py::test_partition_segment_returns_partition_points_for_even_partition_size",
"polliwog/segment/test_segment.py::test_partition_segment_returns_partition_omitting_endpoint",
"polliwog/segment/test_segment.py::test_partition_adds_points_for_equal_length_line_segments",
"polliwog/segment/test_segment.py::test_partition_adds_points_for_nonequal_arbitrarily_oriented_line",
"polliwog/segment/test_segment.py::test_closest_point_of_line_segment"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-12-02 19:24:03+00:00
|
bsd-2-clause
| 3,494 |
|
lace__polliwog-128
|
diff --git a/polliwog/transform/affine_transform.py b/polliwog/transform/affine_transform.py
index 4c25c0d..feab638 100644
--- a/polliwog/transform/affine_transform.py
+++ b/polliwog/transform/affine_transform.py
@@ -18,3 +18,116 @@ def apply_affine_transform(points, transform_matrix):
transformed_points = np.delete(transformed_padded_points, 3, axis=1)
return maybe_decolumnize(transformed_points)
+
+
+def _convert_33_to_44(matrix):
+ """
+ Transform from:
+ array([[1., 2., 3.],
+ [2., 3., 4.],
+ [5., 6., 7.]])
+ to:
+ array([[1., 2., 3., 0.],
+ [2., 3., 4., 0.],
+ [5., 6., 7., 0.],
+ [0., 0., 0., 1.]])
+
+ """
+ vg.shape.check(locals(), "matrix", (3, 3))
+ result = np.pad(matrix, ((0, 1), (0, 1)), mode="constant")
+ result[3][3] = 1
+ return result
+
+
+def transform_matrix_for_rotation(rotation, ret_inverse_matrix=False):
+ """
+ Create a transformation matrix from the given 3x3 rotation matrix or a
+ Rodrigues vector.
+
+ With `ret_inverse_matrix=True`, also returns a matrix which provides
+ the reverse transform.
+ """
+ from .rodrigues import as_rotation_matrix
+
+ if rotation.shape == (3, 3):
+ forward3 = rotation
+ else:
+ vg.shape.check(locals(), "rotation", (3,))
+ forward3 = as_rotation_matrix(rotation)
+
+ forward = _convert_33_to_44(forward3)
+
+ if not ret_inverse_matrix:
+ return forward
+
+ # The inverse of a rotation matrix is its transpose.
+ inverse = forward.T
+ return forward, inverse
+
+
+def transform_matrix_for_translation(translation, ret_inverse_matrix=False):
+ """
+ Create a transformation matrix which translates by the provided
+ displacement vector.
+
+ Forward:
+
+ [[ 1, 0, 0, v_0 ],
+ [ 0, 1, 0, v_1 ],
+ [ 0, 0, 1, v_2 ],
+ [ 0, 0, 0, 1 ]]
+
+ Reverse:
+
+ [[ 1, 0, 0, -v_0 ],
+ [ 0, 1, 0, -v_1 ],
+ [ 0, 0, 1, -v_2 ],
+ [ 0, 0, 0, 1 ]]
+
+ Args:
+ vector (np.arraylike): A 3x1 vector.
+ """
+ vg.shape.check(locals(), "translation", (3,))
+
+ forward = np.eye(4)
+ forward[:, -1][:-1] = translation
+
+ if not ret_inverse_matrix:
+ return forward
+
+ inverse = np.eye(4)
+ inverse[:, -1][:-1] = -translation
+ return forward, inverse
+
+
+def transform_matrix_for_scale(scale_factor, ret_inverse_matrix=False):
+ """
+ Create a transformation matrix that scales by the given factor.
+
+ Forward:
+ [[ s_0, 0, 0, 0 ],
+ [ 0, s_1, 0, 0 ],
+ [ 0, 0, s_2, 0 ],
+ [ 0, 0, 0, 1 ]]
+
+ Reverse:
+ [[ 1/s_0, 0, 0, 0 ],
+ [ 0, 1/s_1, 0, 0 ],
+ [ 0, 0, 1/s_2, 0 ],
+ [ 0, 0, 0, 1 ]]
+
+ Args:
+ factor (float): The scale factor.
+ ret_inverse_matrix (bool): When `True`, also returns a matrix which
+ provides the inverse transform.
+ """
+ if scale_factor <= 0:
+ raise ValueError("Scale factor should be greater than zero")
+
+ forward = _convert_33_to_44(np.diag(np.repeat(scale_factor, 3)))
+
+ if not ret_inverse_matrix:
+ return forward
+
+ inverse = _convert_33_to_44(np.diag(np.repeat(1.0 / scale_factor, 3)))
+ return forward, inverse
diff --git a/polliwog/transform/composite.py b/polliwog/transform/composite.py
index e0090d8..17a3c99 100644
--- a/polliwog/transform/composite.py
+++ b/polliwog/transform/composite.py
@@ -1,25 +1,5 @@
import numpy as np
import vg
-from .affine_transform import apply_affine_transform
-
-
-def _convert_33_to_44(matrix):
- """
- Transform from:
- array([[1., 2., 3.],
- [2., 3., 4.],
- [5., 6., 7.]])
- to:
- array([[1., 2., 3., 0.],
- [2., 3., 4., 0.],
- [5., 6., 7., 0.],
- [0., 0., 0., 1.]])
-
- """
- vg.shape.check(locals(), "matrix", (3, 3))
- result = np.pad(matrix, ((0, 1), (0, 1)), mode="constant")
- result[3][3] = 1
- return result
class CompositeTransform(object):
@@ -59,6 +39,8 @@ class CompositeTransform(object):
or reverse mode.
"""
+ from .affine_transform import apply_affine_transform
+
transform_matrix = self.transform_matrix_for(
from_range=from_range, reverse=reverse
)
@@ -97,7 +79,7 @@ class CompositeTransform(object):
matrix = reduce(np.dot, matrices)
return matrix if reverse else matrix.T
- def append_transform4(self, forward, reverse=None):
+ def append_transform(self, forward, reverse=None):
"""
Append an arbitrary transformation, defined by 4x4 forward and reverse
matrices.
@@ -105,56 +87,27 @@ class CompositeTransform(object):
The new transformation is added to the end. Return its index.
"""
+ vg.shape.check(locals(), "forward", (4, 4))
if reverse is None:
reverse = np.linalg.inv(forward)
+ else:
+ vg.shape.check(locals(), "reverse", (4, 4))
new_index = len(self.transforms)
self.transforms.append((forward, reverse))
return new_index
- def append_transform3(self, forward, reverse=None):
- """
- Append an arbitrary transformation, defined by 3x3 forward and reverse
- matrices.
-
- The new transformation is added to the end. Return its index.
-
- """
- vg.shape.check(locals(), "forward", (3, 3))
- forward4 = _convert_33_to_44(forward)
- if reverse is None:
- reverse4 = None
- else:
- vg.shape.check(locals(), "reverse", (3, 3))
- reverse4 = _convert_33_to_44(reverse)
- return self.append_transform4(forward4, reverse4)
-
def scale(self, factor):
"""
Scale by the given factor.
- Forward:
- [[ s_0, 0, 0, 0 ],
- [ 0, s_1, 0, 0 ],
- [ 0, 0, s_2, 0 ],
- [ 0, 0, 0, 1 ]]
-
- Reverse:
- [[ 1/s_0, 0, 0, 0 ],
- [ 0, 1/s_1, 0, 0 ],
- [ 0, 0, 1/s_2, 0 ],
- [ 0, 0, 0, 1 ]]
-
Args:
factor (float): The scale factor.
"""
- if factor <= 0:
- raise ValueError("Scale factor should be greater than zero")
-
- forward3 = np.diag(np.repeat(factor, 3))
- reverse3 = np.diag(np.repeat(1.0 / factor, 3))
+ from .affine_transform import transform_matrix_for_scale
- return self.append_transform3(forward3, reverse3)
+ forward, inverse = transform_matrix_for_scale(factor, ret_inverse_matrix=True)
+ return self.append_transform(forward, inverse)
def convert_units(self, from_units, to_units):
"""
@@ -171,61 +124,37 @@ class CompositeTransform(object):
import ounce
factor = ounce.factor(from_units, to_units)
- self.scale(factor)
+ return self.scale(factor)
def translate(self, translation):
"""
Translate by the vector provided.
- Forward:
-
- [[ 1, 0, 0, v_0 ],
- [ 0, 1, 0, v_1 ],
- [ 0, 0, 1, v_2 ],
- [ 0, 0, 0, 1 ]]
-
- Reverse:
-
- [[ 1, 0, 0, -v_0 ],
- [ 0, 1, 0, -v_1 ],
- [ 0, 0, 1, -v_2 ],
- [ 0, 0, 0, 1 ]]
-
Args:
vector (np.arraylike): A 3x1 vector.
"""
- vg.shape.check(locals(), "translation", (3,))
+ from .affine_transform import transform_matrix_for_translation
- forward = np.eye(4)
- forward[:, -1][:-1] = translation
-
- reverse = np.eye(4)
- reverse[:, -1][:-1] = -translation
-
- return self.append_transform4(forward, reverse)
+ forward, inverse = transform_matrix_for_translation(
+ translation, ret_inverse_matrix=True
+ )
+ return self.append_transform(forward, inverse)
def reorient(self, up, look):
"""
Reorient using up and look.
-
"""
from .rotation import rotation_from_up_and_look
- forward3 = rotation_from_up_and_look(up, look)
- # The inverse of a rotation matrix is its transpose.
- return self.append_transform3(forward3, forward3.T)
+ return self.rotate(rotation_from_up_and_look(up, look))
def rotate(self, rotation):
"""
- Rotate by either an explicit matrix or a rodrigues vector
+ Rotate by the given 3x3 rotation matrix or a Rodrigues vector.
"""
- from .rodrigues import as_rotation_matrix
+ from .affine_transform import transform_matrix_for_rotation
- if rotation.shape == (3, 3):
- forward3 = rotation
- else:
- vg.shape.check(locals(), "rotation", (3,))
- forward3 = as_rotation_matrix(rotation)
-
- # The inverse of a rotation matrix is its transpose.
- return self.append_transform3(forward3, forward3.T)
+ forward, inverse = transform_matrix_for_rotation(
+ rotation, ret_inverse_matrix=True
+ )
+ return self.append_transform(forward, inverse)
diff --git a/polliwog/transform/coordinate_manager.py b/polliwog/transform/coordinate_manager.py
index e301e8d..5761e56 100644
--- a/polliwog/transform/coordinate_manager.py
+++ b/polliwog/transform/coordinate_manager.py
@@ -33,11 +33,8 @@ class CoordinateManager(object):
}
)
- def append_transform4(self, *args, **kwargs):
- self._transform.append_transform4(*args, **kwargs)
-
- def append_transform3(self, *args, **kwargs):
- self._transform.append_transform3(*args, **kwargs)
+ def append_transform(self, *args, **kwargs):
+ self._transform.append_transform(*args, **kwargs)
def scale(self, *args, **kwargs):
self._transform.scale(*args, **kwargs)
|
lace/polliwog
|
b4399acbe78b92924f98d395135a2e95fd30033a
|
diff --git a/polliwog/transform/test_affine_transform.py b/polliwog/transform/test_affine_transform.py
index f97d6ea..730089e 100644
--- a/polliwog/transform/test_affine_transform.py
+++ b/polliwog/transform/test_affine_transform.py
@@ -1,28 +1,105 @@
import numpy as np
-from .affine_transform import apply_affine_transform
-
-scale_factor = np.array([3.0, 0.5, 2.0])
-transform = np.array(
- [
- [scale_factor[0], 0, 0, 0],
- [0, scale_factor[1], 0, 0],
- [0, 0, scale_factor[2], 0],
- [0, 0, 0, 1],
- ]
+import pytest
+from .affine_transform import (
+ apply_affine_transform,
+ transform_matrix_for_rotation,
+ transform_matrix_for_scale,
+ transform_matrix_for_translation,
)
-def test_apply_homogeneous():
- point = np.array([5.0, 0.0, 1.0])
- expected_point = np.array([15.0, 0.0, 2.0])
- np.testing.assert_array_equal(
- apply_affine_transform(point, transform), expected_point
+def create_cube_verts(origin, size):
+ # Create a cube. Since CompositeTransform just works on verticies,
+ # we don't need a full lace.mesh object.
+ origin = np.array(origin)
+ size = np.repeat(size, 3)
+ lower_base_plane = np.array(
+ [
+ # Lower base plane
+ origin,
+ origin + np.array([size[0], 0, 0]),
+ origin + np.array([size[0], 0, size[2]]),
+ origin + np.array([0, 0, size[2]]),
+ ]
)
+ upper_base_plane = lower_base_plane + np.array([0, size[1], 0])
+ return np.vstack([lower_base_plane, upper_base_plane])
+
+
+def create_default_cube_verts():
+ return create_cube_verts([1.0, 0.0, 0.0], 4.0)
-def test_apply_homogeneous_stacked():
+def test_apply_affine_transform():
+ scale_factor = np.array([3.0, 0.5, 2.0])
+ transform = np.array(
+ [
+ [scale_factor[0], 0, 0, 0],
+ [0, scale_factor[1], 0, 0],
+ [0, 0, scale_factor[2], 0],
+ [0, 0, 0, 1],
+ ]
+ )
+
points = np.array([[1.0, 2.0, 3.0], [5.0, 0.0, 1.0]])
expected_points = np.array([[3.0, 1.0, 6.0], [15.0, 0.0, 2.0]])
np.testing.assert_array_equal(
apply_affine_transform(points, transform), expected_points
)
+ np.testing.assert_array_equal(
+ apply_affine_transform(points[1], transform), expected_points[1]
+ )
+
+
+def test_rotate():
+ cube_v = create_default_cube_verts()
+ ways_to_rotate_around_y_a_quarter_turn = [
+ np.array([[0, 0, 1], [0, 1, 0], [-1, 0, 0]]),
+ np.array([0, np.pi / 2, 0]),
+ ]
+ for rot in ways_to_rotate_around_y_a_quarter_turn:
+ # Confidence check.
+ np.testing.assert_array_equal(cube_v[0], [1.0, 0.0, 0.0])
+ np.testing.assert_array_equal(cube_v[6], [5.0, 4.0, 4.0])
+
+ transformed_cube_v = apply_affine_transform(
+ cube_v, transform_matrix_for_rotation(rot)
+ )
+
+ np.testing.assert_array_almost_equal(transformed_cube_v[0], [0.0, 0.0, -1.0])
+ np.testing.assert_array_almost_equal(transformed_cube_v[6], [4, 4.0, -5.0])
+
+
+def test_translate():
+ cube_v = create_default_cube_verts()
+
+ # Confidence check.
+ np.testing.assert_array_equal(cube_v[0], [1.0, 0.0, 0.0])
+ np.testing.assert_array_equal(cube_v[6], [5.0, 4.0, 4.0])
+
+ transformed_cube_v = apply_affine_transform(
+ cube_v, transform_matrix_for_translation(np.array([8.0, 6.0, 7.0]))
+ )
+
+ np.testing.assert_array_equal(transformed_cube_v[0], [9.0, 6.0, 7.0])
+ np.testing.assert_array_equal(transformed_cube_v[6], [13.0, 10.0, 11.0])
+
+
+def test_scale():
+ cube_v = create_default_cube_verts()
+
+ # Confidence check.
+ np.testing.assert_array_equal(cube_v[0], [1.0, 0.0, 0.0])
+ np.testing.assert_array_equal(cube_v[6], [5.0, 4.0, 4.0])
+
+ transformed_cube_v = apply_affine_transform(
+ cube_v, transform_matrix_for_scale(10.0)
+ )
+
+ np.testing.assert_array_equal(transformed_cube_v[0], [10.0, 0.0, 0.0])
+ np.testing.assert_array_equal(transformed_cube_v[6], [50.0, 40.0, 40.0])
+
+
+def test_scale_error():
+ with pytest.raises(ValueError, match="Scale factor should be greater than zero"):
+ transform_matrix_for_scale(-1)
diff --git a/polliwog/transform/test_composite.py b/polliwog/transform/test_composite.py
index 53569b5..a6fe883 100644
--- a/polliwog/transform/test_composite.py
+++ b/polliwog/transform/test_composite.py
@@ -1,67 +1,7 @@
import numpy as np
-import pytest
import vg
from .composite import CompositeTransform
-
-
-def create_cube_verts(origin, size):
- # Create a cube. Since CompositeTransform just works on verticies,
- # we don't need a full lace.mesh object.
- origin = np.asarray(origin)
- size = np.repeat(size, 3)
- lower_base_plane = np.array(
- [
- # Lower base plane
- origin,
- origin + np.array([size[0], 0, 0]),
- origin + np.array([size[0], 0, size[2]]),
- origin + np.array([0, 0, size[2]]),
- ]
- )
- upper_base_plane = lower_base_plane + np.array([0, size[1], 0])
- return np.vstack([lower_base_plane, upper_base_plane])
-
-
-def create_default_cube_verts():
- return create_cube_verts([1.0, 0.0, 0.0], 4.0)
-
-
-def test_translate():
- transform = CompositeTransform()
- transform.translate(np.array([8.0, 6.0, 7.0]))
-
- cube_v = create_default_cube_verts()
-
- # Confidence check.
- np.testing.assert_array_equal(cube_v[0], [1.0, 0.0, 0.0])
- np.testing.assert_array_equal(cube_v[6], [5.0, 4.0, 4.0])
-
- transformed_cube_v = transform(cube_v)
-
- np.testing.assert_array_equal(transformed_cube_v[0], [9.0, 6.0, 7.0])
- np.testing.assert_array_equal(transformed_cube_v[6], [13.0, 10.0, 11.0])
-
-
-def test_scale():
- transform = CompositeTransform()
- transform.scale(10.0)
-
- cube_v = create_default_cube_verts()
-
- # Confidence check.
- np.testing.assert_array_equal(cube_v[0], [1.0, 0.0, 0.0])
- np.testing.assert_array_equal(cube_v[6], [5.0, 4.0, 4.0])
-
- transformed_cube_v = transform(cube_v)
-
- np.testing.assert_array_equal(transformed_cube_v[0], [10.0, 0.0, 0.0])
- np.testing.assert_array_equal(transformed_cube_v[6], [50.0, 40.0, 40.0])
-
-
-def test_scale_error():
- transform = CompositeTransform()
- with pytest.raises(ValueError):
- transform.scale(-1)
+from .test_affine_transform import create_default_cube_verts
def test_convert_units():
@@ -150,26 +90,6 @@ def test_reorient():
np.testing.assert_array_equal(transformed_cube_v[6], [4, 4.0, -5.0])
-def test_rotate():
- ways_to_rotate_around_y_a_quarter_turn = [
- np.array([[0, 0, 1], [0, 1, 0], [-1, 0, 0]]),
- np.array([0, np.pi / 2, 0]),
- ]
- for rot in ways_to_rotate_around_y_a_quarter_turn:
- transform = CompositeTransform()
- transform.rotate(rot)
- cube_v = create_default_cube_verts()
-
- # Confidence check.
- np.testing.assert_array_equal(cube_v[0], [1.0, 0.0, 0.0])
- np.testing.assert_array_equal(cube_v[6], [5.0, 4.0, 4.0])
-
- transformed_cube_v = transform(cube_v)
-
- np.testing.assert_array_almost_equal(transformed_cube_v[0], [0.0, 0.0, -1.0])
- np.testing.assert_array_almost_equal(transformed_cube_v[6], [4, 4.0, -5.0])
-
-
def test_reverse_transforms():
transforms = [CompositeTransform() for _ in range(5)]
diff --git a/polliwog/transform/test_coordinate_manager.py b/polliwog/transform/test_coordinate_manager.py
index 3cecb81..414f2f0 100644
--- a/polliwog/transform/test_coordinate_manager.py
+++ b/polliwog/transform/test_coordinate_manager.py
@@ -2,7 +2,7 @@ import numpy as np
import pytest
import vg
from .coordinate_manager import CoordinateManager
-from .test_composite import create_cube_verts
+from .test_affine_transform import create_cube_verts
def perform_transform_test(apply_transform_fn, expected_v0, expected_v6):
@@ -132,19 +132,10 @@ def test_coordinate_manager_invalid_tag():
def test_coordinate_manager_custom_transform():
- scale4 = np.array([[3, 0, 0, 0], [0, 3, 0, 0], [0, 0, 3, 0], [0, 0, 0, 1]])
+ scale = np.array([[3, 0, 0, 0], [0, 3, 0, 0], [0, 0, 3, 0], [0, 0, 0, 1]])
perform_transform_test(
- apply_transform_fn=lambda coordinate_manager: coordinate_manager.append_transform4(
- scale4
- ),
- expected_v0=np.array([3.0, 0.0, 0.0]),
- expected_v6=np.array([15.0, 12.0, 12.0]),
- )
-
- scale3 = np.array([[3, 0, 0], [0, 3, 0], [0, 0, 3]])
- perform_transform_test(
- apply_transform_fn=lambda coordinate_manager: coordinate_manager.append_transform3(
- scale3
+ apply_transform_fn=lambda coordinate_manager: coordinate_manager.append_transform(
+ scale
),
expected_v0=np.array([3.0, 0.0, 0.0]),
expected_v6=np.array([15.0, 12.0, 12.0]),
|
Clean up matrix functions
- [x] Stop using `matrix` functions from vg (opened lace/vg#95 about removing them)
- [ ] Add `affine_transform`
- [x] Add `apply_affine_transform` (from `vg.matrix.transform`)
|
0.0
|
b4399acbe78b92924f98d395135a2e95fd30033a
|
[
"polliwog/transform/test_affine_transform.py::test_apply_affine_transform",
"polliwog/transform/test_affine_transform.py::test_rotate",
"polliwog/transform/test_affine_transform.py::test_translate",
"polliwog/transform/test_affine_transform.py::test_scale",
"polliwog/transform/test_affine_transform.py::test_scale_error",
"polliwog/transform/test_composite.py::test_convert_units",
"polliwog/transform/test_composite.py::test_translate_then_scale",
"polliwog/transform/test_composite.py::test_scale_then_translate",
"polliwog/transform/test_composite.py::test_rotate_then_translate",
"polliwog/transform/test_composite.py::test_reorient",
"polliwog/transform/test_composite.py::test_reverse_transforms",
"polliwog/transform/test_composite.py::test_forward_reverse_equivalence",
"polliwog/transform/test_coordinate_manager.py::test_coordinate_manager_forward",
"polliwog/transform/test_coordinate_manager.py::test_coordinate_manager_forward_with_attrs",
"polliwog/transform/test_coordinate_manager.py::test_coordinate_manager_out_of_order",
"polliwog/transform/test_coordinate_manager.py::test_coordinate_manager_invalid_tag",
"polliwog/transform/test_coordinate_manager.py::test_coordinate_manager_custom_transform",
"polliwog/transform/test_coordinate_manager.py::test_coordinate_manager_convert_units",
"polliwog/transform/test_coordinate_manager.py::test_coordinate_manager_reorient",
"polliwog/transform/test_coordinate_manager.py::test_coordinate_manager_rotate"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-12-02 20:03:33+00:00
|
bsd-2-clause
| 3,495 |
|
lace__polliwog-151
|
diff --git a/polliwog/polyline/polyline.py b/polliwog/polyline/polyline.py
index 2f79230..a9fe270 100644
--- a/polliwog/polyline/polyline.py
+++ b/polliwog/polyline/polyline.py
@@ -285,15 +285,16 @@ class Polyline(object):
else:
return result
- def partition_by_length(self, max_length, ret_indices=False):
+ def subdivided_by_length(self, max_length, ret_indices=False):
"""
- Subdivide each line segment longer than max_length with
- equal-length segments, such that none of the new segments
- are longer than max_length.
-
- ret_indices: If True, return the indices of the original vertices.
- Otherwise return self for chaining.
+ Subdivide each line segment longer than `max_length` with
+ equal-length segments, such that none of the new segments are longer
+ than `max_length`. Returns a new Polyline.
+ Args:
+ max_length (float): The maximum lenth of a segment.
+ ret_indices (bool): When `True`, also returns the indices of the
+ original vertices.
"""
import itertools
from ..segment.segment_functions import subdivide_segment
@@ -319,45 +320,45 @@ class Polyline(object):
]
splits_of_original_vs = np.vsplit(self.v, es_to_subdivide + 1)
- self.v = np.concatenate(
- list(
- itertools.chain(
- *zip(
- splits_of_original_vs,
- vs_to_insert + [np.empty((0, 3), dtype=np.float64)],
+ new_polyline = Polyline(
+ v=np.concatenate(
+ list(
+ itertools.chain(
+ *zip(
+ splits_of_original_vs,
+ vs_to_insert + [np.empty((0, 3), dtype=np.float64)],
+ )
)
)
- )
+ ),
+ is_closed=self.is_closed,
)
- if ret_indices:
- # In a degenerate case, `partition_segment()` may return fewer than
- # the requested number of indices. So, recompute the actual number of
- # segments inserted.
- num_segments_inserted = np.zeros(old_num_e, dtype=np.int64)
- num_segments_inserted[es_to_subdivide] = [len(vs) for vs in vs_to_insert]
- stepwise_index_offsets = np.concatenate(
- [
- # The first vertex is never moved.
- np.zeros(1, dtype=np.int64),
- # In a closed polyline, the last edge goes back to vertex
- # 0. Subdivisions of that segment do not affect indexing of
- # any of the vertices (since the original end vertex is
- # still at index 0).
- num_segments_inserted[:-1]
- if self.is_closed
- else num_segments_inserted,
- ]
- )
- cumulative_index_offsets = np.sum(
- np.tril(
- np.broadcast_to(stepwise_index_offsets, (old_num_v, old_num_v))
- ),
- axis=1,
- )
- return np.arange(old_num_v) + cumulative_index_offsets
- else:
- return self
+ if not ret_indices:
+ return new_polyline
+
+ # In a degenerate case, `partition_segment()` may return fewer than
+ # the requested number of indices. So, recompute the actual number of
+ # segments inserted.
+ num_segments_inserted = np.zeros(old_num_e, dtype=np.int64)
+ num_segments_inserted[es_to_subdivide] = [len(vs) for vs in vs_to_insert]
+ stepwise_index_offsets = np.concatenate(
+ [
+ # The first vertex is never moved.
+ np.zeros(1, dtype=np.int64),
+ # In a closed polyline, the last edge goes back to vertex
+ # 0. Subdivisions of that segment do not affect indexing of
+ # any of the vertices (since the original end vertex is
+ # still at index 0).
+ num_segments_inserted[:-1] if self.is_closed else num_segments_inserted,
+ ]
+ )
+ cumulative_index_offsets = np.sum(
+ np.tril(np.broadcast_to(stepwise_index_offsets, (old_num_v, old_num_v))),
+ axis=1,
+ )
+ indices_of_original_vertices = np.arange(old_num_v) + cumulative_index_offsets
+ return new_polyline, indices_of_original_vertices
def with_segments_bisected(self, segment_indices, ret_new_indices=False):
"""
|
lace/polliwog
|
1c2291f417ea6a563b59c479703779d146f546a8
|
diff --git a/polliwog/polyline/test_polyline.py b/polliwog/polyline/test_polyline.py
index 0fa38c3..9952359 100644
--- a/polliwog/polyline/test_polyline.py
+++ b/polliwog/polyline/test_polyline.py
@@ -67,6 +67,19 @@ def test_to_dict():
np.testing.assert_array_equal(expected_dict["edges"], actual_dict["edges"])
+def test_copy():
+ original_vs = np.array(
+ [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [1.0, 2.0, 0.0]]
+ )
+ polyline = Polyline(v=original_vs.copy(), is_closed=False)
+ copy_of_polyline = polyline.copy()
+ assert polyline is not copy_of_polyline
+ assert polyline.is_closed == copy_of_polyline.is_closed
+ np.testing.assert_array_equal(polyline.v, copy_of_polyline.v)
+ polyline.v[0] = np.array([2.0, 3.0, 4.0])
+ np.testing.assert_array_equal(copy_of_polyline.v, original_vs)
+
+
def test_bounding_box():
bounding_box = Polyline(
np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [1.0, 2.0, 0.0]])
@@ -233,7 +246,7 @@ def test_length_of_empty_polyline():
assert polyline.total_length == 0
-def test_partition_by_length_noop():
+def test_subdivided_by_length_noop():
original = Polyline(
np.array(
[
@@ -246,8 +259,7 @@ def test_partition_by_length_noop():
)
)
- result = original.copy()
- indices = result.partition_by_length(1.0, ret_indices=True)
+ result, indices = original.subdivided_by_length(1.0, ret_indices=True)
expected_indices = np.array([0, 1, 2, 3, 4])
@@ -257,15 +269,14 @@ def test_partition_by_length_noop():
np.testing.assert_array_equal(result.v[indices], original.v)
-def test_partition_by_length_degenerate():
+def test_subdivided_by_length_degenerate():
"""
This covers a bug that arose from a numerical stability issue in
measurement on EC2 / MKL.
"""
original = Polyline(np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0]]))
- result = original.copy()
- indices = result.partition_by_length(1.0, ret_indices=True)
+ result, indices = original.subdivided_by_length(1.0, ret_indices=True)
expected_indices = np.array([0, 1, 2])
@@ -275,7 +286,7 @@ def test_partition_by_length_degenerate():
np.testing.assert_array_equal(result.v[indices], original.v)
-def test_partition_by_length_divide_by_two():
+def test_subdivided_by_length_divide_by_two():
original = Polyline(
np.array(
[
@@ -307,17 +318,15 @@ def test_partition_by_length_divide_by_two():
expected_indices = np.array([0, 2, 4, 6, 8])
for max_length in (0.99, 0.75, 0.5):
- result = original.copy()
- indices = result.partition_by_length(max_length, ret_indices=True)
+ result, indices = original.subdivided_by_length(max_length, ret_indices=True)
np.testing.assert_array_almost_equal(result.v, expected.v)
np.testing.assert_array_equal(result.e, expected.e)
np.testing.assert_array_equal(indices, expected_indices)
- result_2 = original.copy()
- ret = result_2.partition_by_length(max_length, ret_indices=False)
- np.testing.assert_array_almost_equal(result.v, expected.v)
- assert ret is result_2
+ result_2 = result.subdivided_by_length(max_length, ret_indices=False)
+ np.testing.assert_array_almost_equal(result_2.v, expected.v)
+ assert result_2 is not result
np.testing.assert_array_equal(result.v[indices], original.v)
@@ -365,8 +374,7 @@ def test_partition_length_divide_by_five():
expected_indices = np.array([0, 5, 10, 15, 20])
for max_length in (0.2, 0.24):
- result = original.copy()
- indices = result.partition_by_length(max_length, ret_indices=True)
+ result, indices = original.subdivided_by_length(max_length, ret_indices=True)
np.testing.assert_array_almost_equal(result.v, expected.v)
np.testing.assert_array_equal(result.e, expected.e)
@@ -374,7 +382,7 @@ def test_partition_length_divide_by_five():
np.testing.assert_array_equal(result.v[indices], original.v)
-def test_partition_by_length_divide_some_leave_some():
+def test_subdivided_by_length_divide_some_leave_some():
original = Polyline(
np.array(
[
@@ -404,8 +412,7 @@ def test_partition_by_length_divide_some_leave_some():
expected_indices = np.array([0, 1, 2, 5, 6])
for max_length in (2.0, 2.99):
- result = original.copy()
- indices = result.partition_by_length(max_length, ret_indices=True)
+ result, indices = original.subdivided_by_length(max_length, ret_indices=True)
np.testing.assert_array_almost_equal(result.v, expected.v)
np.testing.assert_array_equal(result.e, expected.e)
@@ -413,7 +420,7 @@ def test_partition_by_length_divide_some_leave_some():
np.testing.assert_array_equal(result.v[indices], original.v)
-def test_partition_by_length_closed():
+def test_subdivided_by_length_closed():
original = Polyline(
np.array(
[
@@ -450,8 +457,7 @@ def test_partition_by_length_closed():
expected_indices = np.array([0, 1, 2, 5, 6, 7])
for max_length in (2.0, 2.5, 2.6):
- result = original.copy()
- indices = result.partition_by_length(max_length, ret_indices=True)
+ result, indices = original.subdivided_by_length(max_length, ret_indices=True)
np.testing.assert_array_almost_equal(result.v, expected.v)
np.testing.assert_array_equal(result.e, expected.e)
|
Convert `Polyline.partition_by_length()` to work on a copy
This follows the pattern of most of the other mutating functions.
|
0.0
|
1c2291f417ea6a563b59c479703779d146f546a8
|
[
"polliwog/polyline/test_polyline.py::test_subdivided_by_length_noop",
"polliwog/polyline/test_polyline.py::test_subdivided_by_length_degenerate"
] |
[
"polliwog/polyline/test_polyline.py::test_join",
"polliwog/polyline/test_polyline.py::test_repr",
"polliwog/polyline/test_polyline.py::test_to_dict",
"polliwog/polyline/test_polyline.py::test_copy",
"polliwog/polyline/test_polyline.py::test_bounding_box",
"polliwog/polyline/test_polyline.py::test_bounding_box_degnerate",
"polliwog/polyline/test_polyline.py::test_index_of_vertex",
"polliwog/polyline/test_polyline.py::test_with_insertions",
"polliwog/polyline/test_polyline.py::test_update_is_closed",
"polliwog/polyline/test_polyline.py::test_num_v_num_e",
"polliwog/polyline/test_polyline.py::test_edges",
"polliwog/polyline/test_polyline.py::test_segments",
"polliwog/polyline/test_polyline.py::test_segment_vectors",
"polliwog/polyline/test_polyline.py::test_length_of_empty_polyline",
"polliwog/polyline/test_polyline.py::test_with_segments_bisected",
"polliwog/polyline/test_polyline.py::test_flipped",
"polliwog/polyline/test_polyline.py::test_aligned_with",
"polliwog/polyline/test_polyline.py::test_aligned_with_closed",
"polliwog/polyline/test_polyline.py::test_aligned_with_degenerate",
"polliwog/polyline/test_polyline.py::test_reindexed",
"polliwog/polyline/test_polyline.py::test_intersect_plane",
"polliwog/polyline/test_polyline.py::test_sliced_by_plane_closed",
"polliwog/polyline/test_polyline.py::test_sliced_by_plane_closed_on_vertex",
"polliwog/polyline/test_polyline.py::test_sliced_by_plane_closed_one_vertex",
"polliwog/polyline/test_polyline.py::test_sliced_by_plane_open",
"polliwog/polyline/test_polyline.py::test_apex",
"polliwog/polyline/test_polyline.py::test_sliced_at_indices",
"polliwog/polyline/test_polyline.py::test_polyline_nearest",
"polliwog/polyline/test_polyline.py::test_slice_at_points"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-12-11 20:46:19+00:00
|
bsd-2-clause
| 3,496 |
|
lace__polliwog-182
|
diff --git a/polliwog/plane/_plane_object.py b/polliwog/plane/_plane_object.py
index 60eb7cc..3c67e1d 100644
--- a/polliwog/plane/_plane_object.py
+++ b/polliwog/plane/_plane_object.py
@@ -148,6 +148,34 @@ class Plane(object):
return np.sign(self.signed_distance(points))
def points_in_front(self, points, inverted=False, ret_indices=False):
+ """
+ Given an array of points, return the points which lie in the
+ half-space in front of it (i.e. in the direction of the plane
+ normal).
+
+ Args:
+ points (np.arraylikw): An array of points.
+ inverted (bool): When `True`, return the points which lie on or
+ behind the plane instead.
+ ret_indices (bool): When `True`, return the indices instead of the
+ points themselves.
+
+ Note:
+ Use `points_on_or_in_front()` for points which lie either on the
+ plane or in front of it.
+ """
+ sign = self.sign(points)
+
+ if inverted:
+ mask = np.less(sign, 0)
+ else:
+ mask = np.greater(sign, 0)
+
+ indices = np.flatnonzero(mask)
+
+ return indices if ret_indices else points[indices]
+
+ def points_on_or_in_front(self, points, inverted=False, ret_indices=False):
"""
Given an array of points, return the points which lie either on the
plane or in the half-space in front of it (i.e. in the direction of
@@ -155,10 +183,14 @@ class Plane(object):
Args:
points (np.arraylikw): An array of points.
- inverted (bool): When `True`, effectively invert the plane. Return
- the points that lie on or behind the plane instead.
+ inverted (bool): When `True`, return the points behind the plane
+ instead.
ret_indices (bool): When `True`, return the indices instead of the
points themselves.
+
+ Note:
+ Use `points_in_front()` to get points which lie only in front of
+ the plane.
"""
sign = self.sign(points)
diff --git a/polliwog/pointcloud/__init__.py b/polliwog/pointcloud/__init__.py
index 7b816e6..d9ea056 100644
--- a/polliwog/pointcloud/__init__.py
+++ b/polliwog/pointcloud/__init__.py
@@ -2,6 +2,6 @@
Functions for working with point clouds (i.e. unstructured sets of 3D points).
"""
-from ._pointcloud_functions import percentile
+from ._pointcloud_functions import extent, percentile
-__all__ = ["percentile"]
+__all__ = ["extent", "percentile"]
diff --git a/polliwog/pointcloud/_pointcloud_functions.py b/polliwog/pointcloud/_pointcloud_functions.py
index 5393544..9767839 100644
--- a/polliwog/pointcloud/_pointcloud_functions.py
+++ b/polliwog/pointcloud/_pointcloud_functions.py
@@ -28,3 +28,39 @@ def percentile(points, axis, percentile):
selected_coord_on_axis = np.percentile(coords_on_axis, percentile)
centroid = np.average(points, axis=0)
return vg.reject(centroid, axis) + selected_coord_on_axis * axis
+
+
+def extent(points, ret_indices=False):
+ """
+ Find the distance between the two farthest-most points.
+
+ Args:
+ points (np.arraylike): A `kx3` stack of points.
+ ret_indices (bool): When `True`, return the indices along with the
+ distance.
+
+ Returns:
+ object: With `ret_indices=False`, the distance; with
+ `ret_indices=True` a tuple `(distance, first_index, second_index)`.
+
+ Note:
+ This is implemented using a brute-force method.
+ """
+ k = vg.shape.check(locals(), "points", (-1, 3))
+ if k < 2:
+ raise ValueError("At least two points are required")
+
+ farthest_i = -1
+ farthest_j = -1
+ farthest_distance = -1
+ for i, probe in enumerate(points):
+ distances = vg.euclidean_distance(points, probe)
+ this_farthest_j = np.argmax(distances)
+ if distances[this_farthest_j] > farthest_distance:
+ farthest_i = i
+ farthest_j = this_farthest_j
+ farthest_distance = distances[this_farthest_j]
+ if ret_indices:
+ return farthest_distance, farthest_i, farthest_j
+ else:
+ return farthest_distance
diff --git a/polliwog/shapes/_shapes.py b/polliwog/shapes/_shapes.py
index a21adcc..8c60dd4 100644
--- a/polliwog/shapes/_shapes.py
+++ b/polliwog/shapes/_shapes.py
@@ -1,10 +1,10 @@
import numpy as np
+import vg
__all__ = [
- "create_rectangular_prism",
- "create_cube",
- "create_triangular_prism",
- "create_rectangle",
+ "rectangular_prism",
+ "cube",
+ "triangular_prism",
]
@@ -15,7 +15,7 @@ def _maybe_flatten(vertices, faces, ret_unique_vertices_and_faces):
return vertices[faces]
-def create_rectangular_prism(origin, size, ret_unique_vertices_and_faces=False):
+def rectangular_prism(origin, size, ret_unique_vertices_and_faces=False):
"""
Tesselate an axis-aligned rectangular prism. One vertex is `origin`. The
diametrically opposite vertex is `origin + size`.
@@ -40,6 +40,9 @@ def create_rectangular_prism(origin, size, ret_unique_vertices_and_faces=False):
"""
from ..tri import quads_to_tris
+ vg.shape.check(locals(), "origin", (3,))
+ vg.shape.check(locals(), "size", (3,))
+
lower_base_plane = np.array(
[
# Lower base plane
@@ -73,7 +76,7 @@ def create_rectangular_prism(origin, size, ret_unique_vertices_and_faces=False):
return _maybe_flatten(vertices, faces, ret_unique_vertices_and_faces)
-def create_cube(origin, size, ret_unique_vertices_and_faces=False):
+def cube(origin, size, ret_unique_vertices_and_faces=False):
"""
Tesselate an axis-aligned cube. One vertex is `origin`. The diametrically
opposite vertex is `size` units along `+x`, `+y`, and `+z`.
@@ -96,14 +99,18 @@ def create_cube(origin, size, ret_unique_vertices_and_faces=False):
- With `ret_unique_vertices_and_faces=False`: a `12x3x3` matrix of
flattened triangle coordinates.
"""
- return create_rectangular_prism(
+ vg.shape.check(locals(), "origin", (3,))
+ if not isinstance(size, float):
+ raise ValueError("`size` should be a number")
+
+ return rectangular_prism(
origin,
np.repeat(size, 3),
ret_unique_vertices_and_faces=ret_unique_vertices_and_faces,
)
-def create_triangular_prism(p1, p2, p3, height, ret_unique_vertices_and_faces=False):
+def triangular_prism(p1, p2, p3, height, ret_unique_vertices_and_faces=False):
"""
Tesselate a triangular prism whose base is the triangle `p1`, `p2`, `p3`.
If the vertices are oriented in a counterclockwise direction, the prism
@@ -129,6 +136,12 @@ def create_triangular_prism(p1, p2, p3, height, ret_unique_vertices_and_faces=Fa
"""
from .. import Plane
+ vg.shape.check(locals(), "p1", (3,))
+ vg.shape.check(locals(), "p2", (3,))
+ vg.shape.check(locals(), "p3", (3,))
+ if not isinstance(height, float):
+ raise ValueError("`height` should be a number")
+
base_plane = Plane.from_points(p1, p2, p3)
lower_base_to_upper_base = height * -base_plane.normal
vertices = np.vstack(([p1, p2, p3], [p1, p2, p3] + lower_base_to_upper_base))
@@ -148,28 +161,3 @@ def create_triangular_prism(p1, p2, p3, height, ret_unique_vertices_and_faces=Fa
)
return _maybe_flatten(vertices, faces, ret_unique_vertices_and_faces)
-
-
-def create_rectangle(ret_unique_vertices_and_faces=False):
- """
- Create a rectangle.
-
- Args:
- ret_unique_vertices_and_faces (bool): When `True` return a vertex
- array containing the unique vertices and an array of faces (i.e.
- vertex indices). When `False`, return a flattened array of
- triangle coordinates.
-
- Returns:
- object:
-
- - With `ret_unique_vertices_and_faces=True`: a tuple containing
- an `4x3` array of vertices and a `2x3` array of triangle faces.
- - With `ret_unique_vertices_and_faces=False`: a `16x3x3` matrix of
- flattened triangle coordinates.
- """
- vertices = np.array(
- [[1.0, 0.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, -1.0]]
- )
- faces = np.array([[0, 1, 2], [3, 1, 0]], dtype=np.uint64)
- return _maybe_flatten(vertices, faces, ret_unique_vertices_and_faces)
|
lace/polliwog
|
dcd092dd3084da689029f2cb6bf48a1c38858959
|
diff --git a/polliwog/plane/test_functions.py b/polliwog/plane/test_functions.py
index 895155a..e29934e 100644
--- a/polliwog/plane/test_functions.py
+++ b/polliwog/plane/test_functions.py
@@ -20,7 +20,7 @@ def assert_plane_equation_satisfies_points(plane_equation, points):
def test_plane_normal_from_points_parity():
- from ..shapes import create_triangular_prism
+ from ..shapes import triangular_prism
from ..tri import surface_normals
points = np.array([[3.0, 0.0, 0.0], [0.0, 3.0, 0.0], [0.0, 0.0, 3.0]])
@@ -37,7 +37,7 @@ def test_plane_normal_from_points_parity():
p1 = np.array([3.0, 0.0, 0.0])
p2 = np.array([0.0, 3.0, 0.0])
p3 = np.array([0.0, 0.0, 3.0])
- vertices = create_triangular_prism(p1, p2, p3, 1.0)
+ vertices = triangular_prism(p1, p2, p3, 1.0)
np.testing.assert_allclose(
plane_normal_from_points(vertices), surface_normals(vertices)
diff --git a/polliwog/plane/test_plane.py b/polliwog/plane/test_plane.py
index 8c86bd3..5fc77c7 100644
--- a/polliwog/plane/test_plane.py
+++ b/polliwog/plane/test_plane.py
@@ -117,15 +117,48 @@ def test_points_in_front():
plane = Plane(sample, normal)
- pts = np.array([[425.0, 425.0, 25.0], [-500.0, -500.0, 25.0]])
+ pts = np.array([[425.0, 425.0, 25.0], [-500.0, -500.0, 25.0], [1.0, 1.0, 0.0]])
+
+ expected_indices = np.array([0])
+ np.testing.assert_array_equal(plane.points_in_front(pts), pts[expected_indices])
+ np.testing.assert_array_equal(
+ plane.points_in_front(pts, ret_indices=True), expected_indices
+ )
+
+ expected_indices = np.array([1])
+ np.testing.assert_array_equal(
+ plane.points_in_front(pts, inverted=True), pts[expected_indices]
+ )
+ np.testing.assert_array_equal(
+ plane.points_in_front(pts, inverted=True, ret_indices=True), expected_indices
+ )
+
+
+def test_points_on_or_in_front():
+ # diagonal plane @ origin - draw a picture!
+ normal = np.array([1.0, 1.0, 0.0])
+ normal /= np.linalg.norm(normal)
+ sample = np.array([1.0, 1.0, 0.0])
+
+ plane = Plane(sample, normal)
+
+ pts = np.array([[425.0, 425.0, 25.0], [-500.0, -500.0, 25.0], [1.0, 1.0, 0.0]])
+
+ expected_indices = np.array([0, 2])
+ np.testing.assert_array_equal(
+ plane.points_on_or_in_front(pts), pts[expected_indices]
+ )
+ np.testing.assert_array_equal(
+ plane.points_on_or_in_front(pts, ret_indices=True), expected_indices
+ )
- np.testing.assert_array_equal(plane.points_in_front(pts), pts[0:1])
+ expected_indices = np.array([1, 2])
np.testing.assert_array_equal(
- plane.points_in_front(pts, ret_indices=True), np.array([0])
+ plane.points_on_or_in_front(pts, inverted=True), pts[expected_indices]
)
- np.testing.assert_array_equal(plane.points_in_front(pts, inverted=True), pts[1:2])
np.testing.assert_array_equal(
- plane.points_in_front(pts, inverted=True, ret_indices=True), np.array([1])
+ plane.points_on_or_in_front(pts, inverted=True, ret_indices=True),
+ expected_indices,
)
diff --git a/polliwog/pointcloud/test_pointcloud_functions.py b/polliwog/pointcloud/test_pointcloud_functions.py
index cae1a55..6a1722f 100644
--- a/polliwog/pointcloud/test_pointcloud_functions.py
+++ b/polliwog/pointcloud/test_pointcloud_functions.py
@@ -2,7 +2,7 @@ import math
import numpy as np
import pytest
import vg
-from ._pointcloud_functions import percentile
+from ._pointcloud_functions import extent, percentile
def random_points_along_side_of_cylinder(radius=1.0, height=3.0, n_samples=1):
@@ -30,3 +30,18 @@ def test_percentile():
with pytest.raises(ValueError, match="Axis must be non-zero"):
percentile(points=points, axis=np.array([0, 0, 0]), percentile=75)
+
+
+def test_extent():
+ points = np.array(
+ [[0, 0, 0], [10, 10, 0], [10, 0, 0], [0, 11, 0]], dtype=np.float64
+ )
+ np.testing.assert_almost_equal(extent(points), 14.87, decimal=2)
+
+ distance, i, j = extent(points, ret_indices=True)
+ np.testing.assert_almost_equal(distance, 14.87, decimal=2)
+ assert i == 2
+ assert j == 3
+
+ with pytest.raises(ValueError, match="At least two points are required"):
+ extent(np.array([[0, 0, 0]]))
diff --git a/polliwog/shapes/test_shapes.py b/polliwog/shapes/test_shapes.py
index 2dd4d7a..09d04e1 100644
--- a/polliwog/shapes/test_shapes.py
+++ b/polliwog/shapes/test_shapes.py
@@ -1,8 +1,9 @@
import numpy as np
-from ._shapes import create_cube, create_rectangle, create_rectangular_prism
+import pytest
+from ._shapes import cube, rectangular_prism, triangular_prism
-def test_create_rectangular_prism():
+def test_rectangular_prism():
origin = np.array([3.0, 4.0, 5.0])
size = np.array([2.0, 10.0, 20.0])
@@ -35,36 +36,42 @@ def test_create_rectangular_prism():
]
)
- vertices, faces = create_rectangular_prism(
+ vertices, faces = rectangular_prism(
origin=origin, size=size, ret_unique_vertices_and_faces=True
)
np.testing.assert_array_equal(faces, expected_faces)
np.testing.assert_array_equal(vertices, expected_vertices)
- flattened_vertices = create_rectangular_prism(
+ flattened_vertices = rectangular_prism(
origin=origin, size=size, ret_unique_vertices_and_faces=False
)
np.testing.assert_array_equal(flattened_vertices, expected_vertices[expected_faces])
-def test_create_cube():
+def test_cube():
origin = np.array([3.0, 4.0, 5.0])
size = 2.0
- flattened_vertices = create_cube(origin=origin, size=size)
+ flattened_vertices = cube(origin=origin, size=size)
expected_first_triangle = np.array(
[[3.0, 4.0, 5.0], [5.0, 4.0, 5.0], [5.0, 4.0, 7.0]]
)
np.testing.assert_array_equal(flattened_vertices[0], expected_first_triangle)
+ with pytest.raises(ValueError, match="`size` should be a number"):
+ cube(origin=origin, size="not a number")
-def test_create_rectangle():
- expected_vertices = np.array(
- [[1.0, 0.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, -1.0]]
- )
- expected_faces = np.array([[0, 1, 2], [3, 1, 0]])
- vertices, faces = create_rectangle(ret_unique_vertices_and_faces=True)
- np.testing.assert_array_equal(faces, expected_faces)
- np.testing.assert_array_equal(vertices, expected_vertices)
+def test_triangular_prism():
+ p1 = np.array([3.0, 0.0, 0.0])
+ p2 = np.array([0.0, 3.0, 0.0])
+ p3 = np.array([0.0, 0.0, 3.0])
+
+ flattened_vertices = triangular_prism(p1, p2, p3, 1.0)
+
+ expected_first_triangle = np.array([p1, p2, p3])
+ np.testing.assert_array_equal(flattened_vertices[0], expected_first_triangle)
+
+ with pytest.raises(ValueError, match="`height` should be a number"):
+ triangular_prism(p1, p2, p3, "not-a-number")
diff --git a/polliwog/tri/test_functions.py b/polliwog/tri/test_functions.py
index 74337ca..3f4a5e9 100644
--- a/polliwog/tri/test_functions.py
+++ b/polliwog/tri/test_functions.py
@@ -21,12 +21,12 @@ def test_surface_normals_from_points_single():
def test_surface_normals_from_points_vectorized():
- from ..shapes import create_triangular_prism
+ from ..shapes import triangular_prism
p1 = np.array([3.0, 0.0, 0.0])
p2 = np.array([0.0, 3.0, 0.0])
p3 = np.array([0.0, 0.0, 3.0])
- vertices = create_triangular_prism(p1, p2, p3, 1.0)
+ vertices = triangular_prism(p1, p2, p3, 1.0)
expected_normals = vg.normalize(
np.array(
|
Consider renaming Plane `points_in_front()` method to `points_on_or_in_front()`
|
0.0
|
dcd092dd3084da689029f2cb6bf48a1c38858959
|
[
"polliwog/plane/test_functions.py::test_plane_normal_from_points_parity",
"polliwog/plane/test_functions.py::test_plane_equation_from_points",
"polliwog/plane/test_functions.py::test_plane_equation_from_points_is_in_expected_orientation",
"polliwog/plane/test_functions.py::test_plane_equation_from_points_stacked",
"polliwog/plane/test_functions.py::test_plane_normal_from_points",
"polliwog/plane/test_functions.py::test_plane_normal_from_points_stacked",
"polliwog/plane/test_functions.py::test_normal_and_offset_from_plane_equations",
"polliwog/plane/test_functions.py::test_signed_distances_for_xz_plane_at_origin",
"polliwog/plane/test_functions.py::test_signed_distances_for_diagonal_plane",
"polliwog/plane/test_functions.py::test_signed_distance_validation",
"polliwog/plane/test_functions.py::test_project_point_to_plane",
"polliwog/plane/test_functions.py::test_project_point_to_plane_vectorized_points",
"polliwog/plane/test_functions.py::test_project_point_to_plane_vectorized_planes",
"polliwog/plane/test_functions.py::test_project_point_to_plane_vectorized_both",
"polliwog/plane/test_functions.py::test_project_point_to_plane_validation",
"polliwog/plane/test_functions.py::test_mirror_point_across_plane_vectorized_points",
"polliwog/plane/test_plane.py::test_validation",
"polliwog/plane/test_plane.py::test_repr",
"polliwog/plane/test_plane.py::test_flipped",
"polliwog/plane/test_plane.py::test_returns_signed_distances_for_xz_plane_at_origin",
"polliwog/plane/test_plane.py::test_returns_unsigned_distances_for_xz_plane_at_origin",
"polliwog/plane/test_plane.py::test_returns_signed_distances_for_diagonal_plane",
"polliwog/plane/test_plane.py::test_returns_unsigned_distances_for_diagonal_plane_at_origin",
"polliwog/plane/test_plane.py::test_signed_distance_validation",
"polliwog/plane/test_plane.py::test_returns_sign_for_diagonal_plane",
"polliwog/plane/test_plane.py::test_points_in_front",
"polliwog/plane/test_plane.py::test_points_on_or_in_front",
"polliwog/plane/test_plane.py::test_canonical_point",
"polliwog/plane/test_plane.py::test_project_point",
"polliwog/plane/test_plane.py::test_project_point_vectorized",
"polliwog/plane/test_plane.py::test_mirror_point",
"polliwog/plane/test_plane.py::test_plane_from_points",
"polliwog/plane/test_plane.py::test_plane_from_points_and_vector",
"polliwog/plane/test_plane.py::test_fit_from_points",
"polliwog/plane/test_plane.py::test_line_plane_intersection",
"polliwog/plane/test_plane.py::test_line_plane_intersections",
"polliwog/plane/test_plane.py::test_line_segment_plane_intersection",
"polliwog/plane/test_plane.py::test_line_segment_plane_intersections",
"polliwog/plane/test_plane.py::test_constants",
"polliwog/pointcloud/test_pointcloud_functions.py::test_percentile",
"polliwog/pointcloud/test_pointcloud_functions.py::test_extent",
"polliwog/shapes/test_shapes.py::test_rectangular_prism",
"polliwog/shapes/test_shapes.py::test_cube",
"polliwog/shapes/test_shapes.py::test_triangular_prism",
"polliwog/tri/test_functions.py::test_surface_normals_from_points_single",
"polliwog/tri/test_functions.py::test_surface_normals_from_points_vectorized",
"polliwog/tri/test_functions.py::test_tri_contains_coplanar_point",
"polliwog/tri/test_functions.py::test_barycentric"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-04-22 17:29:52+00:00
|
bsd-2-clause
| 3,497 |
|
lace__polliwog-183
|
diff --git a/polliwog/pointcloud/__init__.py b/polliwog/pointcloud/__init__.py
index 7b816e6..d9ea056 100644
--- a/polliwog/pointcloud/__init__.py
+++ b/polliwog/pointcloud/__init__.py
@@ -2,6 +2,6 @@
Functions for working with point clouds (i.e. unstructured sets of 3D points).
"""
-from ._pointcloud_functions import percentile
+from ._pointcloud_functions import extent, percentile
-__all__ = ["percentile"]
+__all__ = ["extent", "percentile"]
diff --git a/polliwog/pointcloud/_pointcloud_functions.py b/polliwog/pointcloud/_pointcloud_functions.py
index 5393544..9767839 100644
--- a/polliwog/pointcloud/_pointcloud_functions.py
+++ b/polliwog/pointcloud/_pointcloud_functions.py
@@ -28,3 +28,39 @@ def percentile(points, axis, percentile):
selected_coord_on_axis = np.percentile(coords_on_axis, percentile)
centroid = np.average(points, axis=0)
return vg.reject(centroid, axis) + selected_coord_on_axis * axis
+
+
+def extent(points, ret_indices=False):
+ """
+ Find the distance between the two farthest-most points.
+
+ Args:
+ points (np.arraylike): A `kx3` stack of points.
+ ret_indices (bool): When `True`, return the indices along with the
+ distance.
+
+ Returns:
+ object: With `ret_indices=False`, the distance; with
+ `ret_indices=True` a tuple `(distance, first_index, second_index)`.
+
+ Note:
+ This is implemented using a brute-force method.
+ """
+ k = vg.shape.check(locals(), "points", (-1, 3))
+ if k < 2:
+ raise ValueError("At least two points are required")
+
+ farthest_i = -1
+ farthest_j = -1
+ farthest_distance = -1
+ for i, probe in enumerate(points):
+ distances = vg.euclidean_distance(points, probe)
+ this_farthest_j = np.argmax(distances)
+ if distances[this_farthest_j] > farthest_distance:
+ farthest_i = i
+ farthest_j = this_farthest_j
+ farthest_distance = distances[this_farthest_j]
+ if ret_indices:
+ return farthest_distance, farthest_i, farthest_j
+ else:
+ return farthest_distance
diff --git a/polliwog/shapes/_shapes.py b/polliwog/shapes/_shapes.py
index a21adcc..8c60dd4 100644
--- a/polliwog/shapes/_shapes.py
+++ b/polliwog/shapes/_shapes.py
@@ -1,10 +1,10 @@
import numpy as np
+import vg
__all__ = [
- "create_rectangular_prism",
- "create_cube",
- "create_triangular_prism",
- "create_rectangle",
+ "rectangular_prism",
+ "cube",
+ "triangular_prism",
]
@@ -15,7 +15,7 @@ def _maybe_flatten(vertices, faces, ret_unique_vertices_and_faces):
return vertices[faces]
-def create_rectangular_prism(origin, size, ret_unique_vertices_and_faces=False):
+def rectangular_prism(origin, size, ret_unique_vertices_and_faces=False):
"""
Tesselate an axis-aligned rectangular prism. One vertex is `origin`. The
diametrically opposite vertex is `origin + size`.
@@ -40,6 +40,9 @@ def create_rectangular_prism(origin, size, ret_unique_vertices_and_faces=False):
"""
from ..tri import quads_to_tris
+ vg.shape.check(locals(), "origin", (3,))
+ vg.shape.check(locals(), "size", (3,))
+
lower_base_plane = np.array(
[
# Lower base plane
@@ -73,7 +76,7 @@ def create_rectangular_prism(origin, size, ret_unique_vertices_and_faces=False):
return _maybe_flatten(vertices, faces, ret_unique_vertices_and_faces)
-def create_cube(origin, size, ret_unique_vertices_and_faces=False):
+def cube(origin, size, ret_unique_vertices_and_faces=False):
"""
Tesselate an axis-aligned cube. One vertex is `origin`. The diametrically
opposite vertex is `size` units along `+x`, `+y`, and `+z`.
@@ -96,14 +99,18 @@ def create_cube(origin, size, ret_unique_vertices_and_faces=False):
- With `ret_unique_vertices_and_faces=False`: a `12x3x3` matrix of
flattened triangle coordinates.
"""
- return create_rectangular_prism(
+ vg.shape.check(locals(), "origin", (3,))
+ if not isinstance(size, float):
+ raise ValueError("`size` should be a number")
+
+ return rectangular_prism(
origin,
np.repeat(size, 3),
ret_unique_vertices_and_faces=ret_unique_vertices_and_faces,
)
-def create_triangular_prism(p1, p2, p3, height, ret_unique_vertices_and_faces=False):
+def triangular_prism(p1, p2, p3, height, ret_unique_vertices_and_faces=False):
"""
Tesselate a triangular prism whose base is the triangle `p1`, `p2`, `p3`.
If the vertices are oriented in a counterclockwise direction, the prism
@@ -129,6 +136,12 @@ def create_triangular_prism(p1, p2, p3, height, ret_unique_vertices_and_faces=Fa
"""
from .. import Plane
+ vg.shape.check(locals(), "p1", (3,))
+ vg.shape.check(locals(), "p2", (3,))
+ vg.shape.check(locals(), "p3", (3,))
+ if not isinstance(height, float):
+ raise ValueError("`height` should be a number")
+
base_plane = Plane.from_points(p1, p2, p3)
lower_base_to_upper_base = height * -base_plane.normal
vertices = np.vstack(([p1, p2, p3], [p1, p2, p3] + lower_base_to_upper_base))
@@ -148,28 +161,3 @@ def create_triangular_prism(p1, p2, p3, height, ret_unique_vertices_and_faces=Fa
)
return _maybe_flatten(vertices, faces, ret_unique_vertices_and_faces)
-
-
-def create_rectangle(ret_unique_vertices_and_faces=False):
- """
- Create a rectangle.
-
- Args:
- ret_unique_vertices_and_faces (bool): When `True` return a vertex
- array containing the unique vertices and an array of faces (i.e.
- vertex indices). When `False`, return a flattened array of
- triangle coordinates.
-
- Returns:
- object:
-
- - With `ret_unique_vertices_and_faces=True`: a tuple containing
- an `4x3` array of vertices and a `2x3` array of triangle faces.
- - With `ret_unique_vertices_and_faces=False`: a `16x3x3` matrix of
- flattened triangle coordinates.
- """
- vertices = np.array(
- [[1.0, 0.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, -1.0]]
- )
- faces = np.array([[0, 1, 2], [3, 1, 0]], dtype=np.uint64)
- return _maybe_flatten(vertices, faces, ret_unique_vertices_and_faces)
|
lace/polliwog
|
dcd092dd3084da689029f2cb6bf48a1c38858959
|
diff --git a/polliwog/plane/test_functions.py b/polliwog/plane/test_functions.py
index 895155a..e29934e 100644
--- a/polliwog/plane/test_functions.py
+++ b/polliwog/plane/test_functions.py
@@ -20,7 +20,7 @@ def assert_plane_equation_satisfies_points(plane_equation, points):
def test_plane_normal_from_points_parity():
- from ..shapes import create_triangular_prism
+ from ..shapes import triangular_prism
from ..tri import surface_normals
points = np.array([[3.0, 0.0, 0.0], [0.0, 3.0, 0.0], [0.0, 0.0, 3.0]])
@@ -37,7 +37,7 @@ def test_plane_normal_from_points_parity():
p1 = np.array([3.0, 0.0, 0.0])
p2 = np.array([0.0, 3.0, 0.0])
p3 = np.array([0.0, 0.0, 3.0])
- vertices = create_triangular_prism(p1, p2, p3, 1.0)
+ vertices = triangular_prism(p1, p2, p3, 1.0)
np.testing.assert_allclose(
plane_normal_from_points(vertices), surface_normals(vertices)
diff --git a/polliwog/pointcloud/test_pointcloud_functions.py b/polliwog/pointcloud/test_pointcloud_functions.py
index cae1a55..6a1722f 100644
--- a/polliwog/pointcloud/test_pointcloud_functions.py
+++ b/polliwog/pointcloud/test_pointcloud_functions.py
@@ -2,7 +2,7 @@ import math
import numpy as np
import pytest
import vg
-from ._pointcloud_functions import percentile
+from ._pointcloud_functions import extent, percentile
def random_points_along_side_of_cylinder(radius=1.0, height=3.0, n_samples=1):
@@ -30,3 +30,18 @@ def test_percentile():
with pytest.raises(ValueError, match="Axis must be non-zero"):
percentile(points=points, axis=np.array([0, 0, 0]), percentile=75)
+
+
+def test_extent():
+ points = np.array(
+ [[0, 0, 0], [10, 10, 0], [10, 0, 0], [0, 11, 0]], dtype=np.float64
+ )
+ np.testing.assert_almost_equal(extent(points), 14.87, decimal=2)
+
+ distance, i, j = extent(points, ret_indices=True)
+ np.testing.assert_almost_equal(distance, 14.87, decimal=2)
+ assert i == 2
+ assert j == 3
+
+ with pytest.raises(ValueError, match="At least two points are required"):
+ extent(np.array([[0, 0, 0]]))
diff --git a/polliwog/shapes/test_shapes.py b/polliwog/shapes/test_shapes.py
index 2dd4d7a..09d04e1 100644
--- a/polliwog/shapes/test_shapes.py
+++ b/polliwog/shapes/test_shapes.py
@@ -1,8 +1,9 @@
import numpy as np
-from ._shapes import create_cube, create_rectangle, create_rectangular_prism
+import pytest
+from ._shapes import cube, rectangular_prism, triangular_prism
-def test_create_rectangular_prism():
+def test_rectangular_prism():
origin = np.array([3.0, 4.0, 5.0])
size = np.array([2.0, 10.0, 20.0])
@@ -35,36 +36,42 @@ def test_create_rectangular_prism():
]
)
- vertices, faces = create_rectangular_prism(
+ vertices, faces = rectangular_prism(
origin=origin, size=size, ret_unique_vertices_and_faces=True
)
np.testing.assert_array_equal(faces, expected_faces)
np.testing.assert_array_equal(vertices, expected_vertices)
- flattened_vertices = create_rectangular_prism(
+ flattened_vertices = rectangular_prism(
origin=origin, size=size, ret_unique_vertices_and_faces=False
)
np.testing.assert_array_equal(flattened_vertices, expected_vertices[expected_faces])
-def test_create_cube():
+def test_cube():
origin = np.array([3.0, 4.0, 5.0])
size = 2.0
- flattened_vertices = create_cube(origin=origin, size=size)
+ flattened_vertices = cube(origin=origin, size=size)
expected_first_triangle = np.array(
[[3.0, 4.0, 5.0], [5.0, 4.0, 5.0], [5.0, 4.0, 7.0]]
)
np.testing.assert_array_equal(flattened_vertices[0], expected_first_triangle)
+ with pytest.raises(ValueError, match="`size` should be a number"):
+ cube(origin=origin, size="not a number")
-def test_create_rectangle():
- expected_vertices = np.array(
- [[1.0, 0.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, -1.0]]
- )
- expected_faces = np.array([[0, 1, 2], [3, 1, 0]])
- vertices, faces = create_rectangle(ret_unique_vertices_and_faces=True)
- np.testing.assert_array_equal(faces, expected_faces)
- np.testing.assert_array_equal(vertices, expected_vertices)
+def test_triangular_prism():
+ p1 = np.array([3.0, 0.0, 0.0])
+ p2 = np.array([0.0, 3.0, 0.0])
+ p3 = np.array([0.0, 0.0, 3.0])
+
+ flattened_vertices = triangular_prism(p1, p2, p3, 1.0)
+
+ expected_first_triangle = np.array([p1, p2, p3])
+ np.testing.assert_array_equal(flattened_vertices[0], expected_first_triangle)
+
+ with pytest.raises(ValueError, match="`height` should be a number"):
+ triangular_prism(p1, p2, p3, "not-a-number")
diff --git a/polliwog/tri/test_functions.py b/polliwog/tri/test_functions.py
index 74337ca..3f4a5e9 100644
--- a/polliwog/tri/test_functions.py
+++ b/polliwog/tri/test_functions.py
@@ -21,12 +21,12 @@ def test_surface_normals_from_points_single():
def test_surface_normals_from_points_vectorized():
- from ..shapes import create_triangular_prism
+ from ..shapes import triangular_prism
p1 = np.array([3.0, 0.0, 0.0])
p2 = np.array([0.0, 3.0, 0.0])
p3 = np.array([0.0, 0.0, 3.0])
- vertices = create_triangular_prism(p1, p2, p3, 1.0)
+ vertices = triangular_prism(p1, p2, p3, 1.0)
expected_normals = vg.normalize(
np.array(
|
Rework `create_rectangle()`
This function should be parameterized.
|
0.0
|
dcd092dd3084da689029f2cb6bf48a1c38858959
|
[
"polliwog/plane/test_functions.py::test_plane_normal_from_points_parity",
"polliwog/plane/test_functions.py::test_plane_equation_from_points",
"polliwog/plane/test_functions.py::test_plane_equation_from_points_is_in_expected_orientation",
"polliwog/plane/test_functions.py::test_plane_equation_from_points_stacked",
"polliwog/plane/test_functions.py::test_plane_normal_from_points",
"polliwog/plane/test_functions.py::test_plane_normal_from_points_stacked",
"polliwog/plane/test_functions.py::test_normal_and_offset_from_plane_equations",
"polliwog/plane/test_functions.py::test_signed_distances_for_xz_plane_at_origin",
"polliwog/plane/test_functions.py::test_signed_distances_for_diagonal_plane",
"polliwog/plane/test_functions.py::test_signed_distance_validation",
"polliwog/plane/test_functions.py::test_project_point_to_plane",
"polliwog/plane/test_functions.py::test_project_point_to_plane_vectorized_points",
"polliwog/plane/test_functions.py::test_project_point_to_plane_vectorized_planes",
"polliwog/plane/test_functions.py::test_project_point_to_plane_vectorized_both",
"polliwog/plane/test_functions.py::test_project_point_to_plane_validation",
"polliwog/plane/test_functions.py::test_mirror_point_across_plane_vectorized_points",
"polliwog/pointcloud/test_pointcloud_functions.py::test_percentile",
"polliwog/pointcloud/test_pointcloud_functions.py::test_extent",
"polliwog/shapes/test_shapes.py::test_rectangular_prism",
"polliwog/shapes/test_shapes.py::test_cube",
"polliwog/shapes/test_shapes.py::test_triangular_prism",
"polliwog/tri/test_functions.py::test_surface_normals_from_points_single",
"polliwog/tri/test_functions.py::test_surface_normals_from_points_vectorized",
"polliwog/tri/test_functions.py::test_tri_contains_coplanar_point",
"polliwog/tri/test_functions.py::test_barycentric"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-04-22 17:47:44+00:00
|
bsd-2-clause
| 3,498 |
|
lace__vg-105
|
diff --git a/vg/core.py b/vg/core.py
index 0c3867b..273ddc9 100644
--- a/vg/core.py
+++ b/vg/core.py
@@ -503,14 +503,14 @@ def apex(points, along):
interest.
Returns:
- np.ndarray: A `3x1` point taken from `points`.
+ np.ndarray: A copy of a point taken from `points`.
"""
if points.ndim != 2 or points.shape[1] != 3:
raise ValueError("Invalid shape %s: apex expects nx3" % (points.shape,))
if along.shape != (3,):
raise ValueError("along should be a 3x1 vector")
coords_on_axis = points.dot(along)
- return points[np.argmax(coords_on_axis)]
+ return points[np.argmax(coords_on_axis)].copy()
def nearest(from_points, to_point, ret_index=False):
|
lace/vg
|
4bbfb179da51a24f23bcb48c57f22b19154b3fe1
|
diff --git a/vg/test_apex.py b/vg/test_apex.py
index 478430c..605aa67 100644
--- a/vg/test_apex.py
+++ b/vg/test_apex.py
@@ -50,3 +50,23 @@ def test_apex():
with pytest.raises(ValueError, match="along should be a 3x1 vector"):
vg.apex(points, along=points)
+
+
+def test_apex_returns_a_copy():
+ points = np.array(
+ [
+ [-0.97418884, -0.79808404, -0.18545491],
+ [0.60675227, 0.32673201, -0.20369793],
+ [0.67040405, 0.19267665, -0.56983579],
+ [-0.68038753, -0.90011588, 0.4649872],
+ [-0.62813991, -0.23947753, 0.07933854],
+ [0.26348356, 0.23701114, -0.38230596],
+ [0.08302473, 0.2784907, 0.09308946],
+ [0.58695587, -0.33253376, -0.33493078],
+ [-0.39221704, -0.45240036, 0.25284163],
+ [0.46270635, -0.3865265, -0.98106526],
+ ]
+ )
+ result = vg.apex(points, along=vg.basis.x)
+ result[1] = 5.0
+ np.testing.assert_array_equal(points[2], [0.67040405, 0.19267665, -0.56983579])
|
`vg.apex()` should copy instead of returning a view
|
0.0
|
4bbfb179da51a24f23bcb48c57f22b19154b3fe1
|
[
"vg/test_apex.py::test_apex_returns_a_copy"
] |
[
"vg/test_apex.py::test_apex"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-01-22 21:22:40+00:00
|
bsd-2-clause
| 3,499 |
|
lace__vg-110
|
diff --git a/vg/core.py b/vg/core.py
index 9b81c34..df58395 100644
--- a/vg/core.py
+++ b/vg/core.py
@@ -62,8 +62,8 @@ def perpendicular(v1, v2, normalized=True):
are both stacked, `result[k]` is perpendicular to `v1[k]` and `v2[k]`.)
Args:
- v1 (np.arraylike): A `3x1` vector or a `kx3` stack of vectors.
- v2 (np.arraylike): A `3x1 vector or a `kx3` stack of vectors. If
+ v1 (np.arraylike): A `(3,)` vector or a `kx3` stack of vectors.
+ v2 (np.arraylike): A `(3,)` vector or a `kx3` stack of vectors. If
stacked, the shape must be the same as `v1`.
normalized (bool): When `True`, the result vector is guaranteed to be
unit length.
@@ -167,15 +167,15 @@ def reject_axis(vector, axis, squash=False):
def magnitude(vector):
"""
- Compute the magnitude of `vector`. For stacked inputs, compute the magnitude
- of each one.
+ Compute the magnitude of `vector`. For a stacked input, compute the
+ magnitude of each one.
Args:
- vector (np.arraylike): A `3x1` vector or a `kx3` stack of vectors.
+ vector (np.arraylike): A `(3,)` vector or a `kx3` stack of vectors.
Returns:
- object: For `3x1` inputs, a `float` with the magnitude. For `kx1`
- inputs, a `kx1` array.
+ object: For a `(3,)` input, a `float` with the magnitude. For a `kx3`
+ input, a `(k,)` array.
"""
if vector.ndim == 1:
return np.linalg.norm(vector)
@@ -197,14 +197,14 @@ def euclidean_distance(v1, v2):
points.
Args:
- v1 (np.arraylike): A `3x1` vector or a `kx3` stack of vectors.
- v2 (np.arraylike): A `3x1` vector or a `kx3` stack of vectors. If
+ v1 (np.arraylike): A `(3,)` vector or a `kx3` stack of vectors.
+ v2 (np.arraylike): A `(3,)` vector or a `kx3` stack of vectors. If
stacks are provided for both `v1` and `v2` they must have the
same shape.
Returns:
- object: When both inputs are `3x1`, a `float` with the distance. Otherwise
- a `kx1` array.
+ object: When both inputs are `(3,)`, a `float` with the distance.
+ Otherwise a `(k,)` array.
"""
k = check_value_any(v1, (3,), (-1, 3), name="v1")
check_value_any(
@@ -219,17 +219,17 @@ def euclidean_distance(v1, v2):
def angle(v1, v2, look=None, assume_normalized=False, units="deg"):
"""
- Compute the unsigned angle between two vectors. For stacked inputs, the
+ Compute the unsigned angle between two vectors. For a stacked input, the
angle is computed pairwise.
When `look` is provided, the angle is computed in that viewing plane
(`look` is the normal). Otherwise the angle is computed in 3-space.
Args:
- v1 (np.arraylike): A `3x1` vector or a `kx3` stack of vectors.
+ v1 (np.arraylike): A `(3,)` vector or a `kx3` stack of vectors.
v2 (np.arraylike): A vector or stack of vectors with the same shape as
`v1`.
- look (np.arraylike): A `3x1` vector specifying the normal of a viewing
+ look (np.arraylike): A `(3,)` vector specifying the normal of a viewing
plane, or `None` to compute the angle in 3-space.
assume_normalized (bool): When `True`, assume the input vectors
are unit length. This improves performance, however when the inputs
@@ -237,8 +237,8 @@ def angle(v1, v2, look=None, assume_normalized=False, units="deg"):
units (str): `'deg'` to return degrees or `'rad'` to return radians.
Return:
- object: For `3x1` inputs, a `float` with the angle. For `kx1` inputs,
- a `kx1` array.
+ object: For a `(3,)` input, a `float` with the angle. For a `kx3`
+ input, a `(k,)` array.
"""
if units not in ["deg", "rad"]:
raise ValueError("Unrecognized units {}; expected deg or rad".format(units))
@@ -266,7 +266,7 @@ def angle(v1, v2, look=None, assume_normalized=False, units="deg"):
def signed_angle(v1, v2, look, units="deg"):
"""
- Compute the signed angle between two vectors. For stacked inputs, the
+ Compute the signed angle between two vectors. For a stacked input, the
angle is computed pairwise.
Results are in the range -180 and 180 (or `-math.pi` and `math.pi`). A
@@ -274,16 +274,16 @@ def signed_angle(v1, v2, look, units="deg"):
number is counterclockwise.
Args:
- v1 (np.arraylike): A `3x1` vector or a `kx3` stack of vectors.
+ v1 (np.arraylike): A `(3,)` vector or a `kx3` stack of vectors.
v2 (np.arraylike): A vector or stack of vectors with the same shape as
`v1`.
- look (np.arraylike): A `3x1` vector specifying the normal of the
+ look (np.arraylike): A `(3,)` vector specifying the normal of the
viewing plane.
units (str): `'deg'` to return degrees or `'rad'` to return radians.
Returns:
- object: For `3x1` inputs, a `float` with the angle. For `kx1` inputs,
- a `kx1` array.
+ object: For a `(3,)` input, a `float` with the angle. For a `kx3`
+ input, a `(k,)` array.
"""
# The sign of (A x B) dot look gives the sign of the angle.
# > 0 means clockwise, < 0 is counterclockwise.
@@ -301,8 +301,8 @@ def rotate(vector, around_axis, angle, units="deg", assume_normalized=False):
around `around_axis` is determined by the right-hand rule.
Args:
- vector (np.arraylike): A `3x1` vector or a `kx3` stack of vectors.
- around_axis (np.arraylike): A `3x1` vector specifying the axis of rotation.
+ vector (np.arraylike): A `(3,)` vector or a `kx3` stack of vectors.
+ around_axis (np.arraylike): A `(3,)` vector specifying the axis of rotation.
assume_normalized (bool): When `True`, assume `around_axis` is unit
length. This improves performance marginally, however
when the inputs are not normalized, setting this will cause an
@@ -412,14 +412,15 @@ def almost_zero(v, atol=1e-08):
def almost_unit_length(vector, atol=1e-08):
"""
- Test if the `vector` has almost unit length. For stacked inputs, test each
+ Test if the `vector` has almost unit length. For a stacked input, test each
one.
Args:
- vector (np.arraylike): A `3x1` vector or a `kx3` stack of vectors.
+ vector (np.arraylike): A `(3,)` vector or a `kx3` stack of vectors.
Returns:
- object: For `3x1` inputs, a `bool`. For `kx1` inputs, a `kx1` array.
+ object: For a `(3,)` input, a `bool`. For a `kx3` input, a `(k,)`
+ array.
"""
return np.isclose(magnitude(vector), 1.0, rtol=0, atol=atol)
@@ -481,7 +482,7 @@ def major_axis(coords):
coords (np.arraylike): A `nxk` stack of coordinates.
Returns:
- np.ndarray: A `kx1` vector.
+ np.ndarray: A `(k,)` vector.
See also:
- http://setosa.io/ev/principal-component-analysis/
@@ -497,7 +498,7 @@ def apex(points, along):
Args:
points (np.arraylike): A `kx3` stack of points in R^3.
- along (np.arraylike): A `3x1` vector specifying the direction of
+ along (np.arraylike): A `(3,)` vector specifying the direction of
interest.
Returns:
@@ -506,7 +507,7 @@ def apex(points, along):
if points.ndim != 2 or points.shape[1] != 3:
raise ValueError("Invalid shape %s: apex expects nx3" % (points.shape,))
if along.shape != (3,):
- raise ValueError("along should be a 3x1 vector")
+ raise ValueError("along should be a (3,) vector")
coords_on_axis = points.dot(along)
return points[np.argmax(coords_on_axis)].copy()
@@ -517,11 +518,11 @@ def nearest(from_points, to_point, ret_index=False):
Args:
from_points (np.arraylike): A `kx3` stack of points in R^3.
- to_point (np.arraylike): A `3x1` point of interest.
+ to_point (np.arraylike): A `(3,)` point of interest.
ret_index (bool): When `True`, return both the point and its index.
Returns:
- np.ndarray: A `3x1` vector taken from `from_points`.
+ np.ndarray: A `(3,)` vector taken from `from_points`.
"""
check(locals(), "from_points", (-1, 3))
check(locals(), "to_point", (3,))
@@ -543,18 +544,18 @@ def farthest(from_points, to_point, ret_index=False):
Args:
from_points (np.arraylike): A `kx3` stack of points in R^3.
- to_point (np.arraylike): A `3x1` point of interest.
+ to_point (np.arraylike): A `(3,)` point of interest.
ret_index (bool): When `True`, return both the point and its index.
Returns:
- np.ndarray: A `3x1` vector taken from `from_points`.
+ np.ndarray: A `(3,)` vector taken from `from_points`.
"""
if from_points.ndim != 2 or from_points.shape[1] != 3:
raise ValueError(
"Invalid shape %s: farthest expects nx3" % (from_points.shape,)
)
if to_point.shape != (3,):
- raise ValueError("to_point should be 3x1")
+ raise ValueError("to_point should be (3,)")
absolute_distances = magnitude(from_points - to_point)
@@ -575,21 +576,21 @@ def within(points, radius, of_point, atol=1e-08, ret_indices=False):
points (np.arraylike): A `kx3` stack of points in R^3.
radius (float): The radius of the sphere of interest centered on
`of_point`.
- of_point (np.arraylike): The `3x1` point of interest.
+ of_point (np.arraylike): The `(3,)` point of interest.
atol (float): The distance tolerance. Points within `radius + atol`
of `of_point` are selected.
ret_indexes (bool): When `True`, return both the points and their
indices.
Returns:
- np.ndarray: A `3x1` vector taken from `points`.
+ np.ndarray: A `(3,)` vector taken from `points`.
"""
if points.ndim != 2 or points.shape[1] != 3:
raise ValueError("Invalid shape %s: within expects nx3" % (points.shape,))
if not isinstance(radius, float):
raise ValueError("radius should be a float")
if of_point.shape != (3,):
- raise ValueError("to_point should be 3x1")
+ raise ValueError("to_point should be (3,)")
absolute_distances = magnitude(points - of_point)
(indices_within_radius,) = (absolute_distances < radius + atol).nonzero()
@@ -632,8 +633,8 @@ def dot(v1, v2):
Compute individual or pairwise dot products.
Args:
- v1 (np.arraylike): A `3x1` vector or a `kx3` stack of vectors.
- v2 (np.arraylike): A `3x1` vector or a `kx3` stack of vectors. If
+ v1 (np.arraylike): A `(3,)` vector or a `kx3` stack of vectors.
+ v2 (np.arraylike): A `(3,)` vector or a `kx3` stack of vectors. If
stacks are provided for both `v1` and `v2` they must have the
same shape.
"""
@@ -651,8 +652,8 @@ def cross(v1, v2):
Compute individual or pairwise cross products.
Args:
- v1 (np.arraylike): A `3x1` vector or a `kx3` stack of vectors.
- v2 (np.arraylike): A `3x1` vector or a `kx3` stack of vectors. If
+ v1 (np.arraylike): A `(3,)` vector or a `kx3` stack of vectors.
+ v2 (np.arraylike): A `(3,)` vector or a `kx3` stack of vectors. If
stacks are provided for both `v1` and `v2` they must have the
same shape.
"""
diff --git a/vg/matrix.py b/vg/matrix.py
index db3cefb..4356fc0 100644
--- a/vg/matrix.py
+++ b/vg/matrix.py
@@ -54,7 +54,7 @@ def transform(vertices, transform):
raise_dimension_error(vertices)
if matrix.shape[1] != 3:
- raise ValueError("Vertices should be 3x1 or Nx3")
+ raise ValueError("Vertices should be (3,) or Nx3")
result = unpad(np.dot(transform, pad_with_ones(matrix).T).T)
return result[0] if vertices.ndim == 1 else result
diff --git a/vg/shape.py b/vg/shape.py
index ffdd88b..004c001 100644
--- a/vg/shape.py
+++ b/vg/shape.py
@@ -157,15 +157,18 @@ def check(locals_namespace, name, shape):
def columnize(arr, shape=(-1, 3), name=None):
"""
- Helper for functions which may accept many stacks of three points (kx3)
- returning a stack of results, or a single set of three points (3x1)
- returning a single result.
-
- Returns the points as kx3, and a `transform_result` function which can
- be applied to the result. It picks off the first result in the 3x1 case.
-
- Not limited to kx3; this can be used different dimensional shapes like
- kx4, or higher dimensional shapes like kx3x3.
+ Helper for functions which may accept a stack of points (`kx3`) returning
+ a stack of results, or a single set of three points `(3,)` returning a
+ single result.
+
+ For either kind of input, it returns the points as `kx3`, a boolean
+ `is_columnized`, and a `maybe_decolumnized` function which can be applied
+ to the result before returning it. For a columnized input this function
+ does nothing, and for a non-columnized input, it decolumnizes it,
+ producing the desired return value.
+
+ This is not limited to `kx3`. It can be used for different dimensional
+ shapes like `kx4`, and even higher dimensional shapes like `kx3x3`.
"""
if not isinstance(shape, tuple):
raise ValueError("shape should be a tuple")
|
lace/vg
|
2c35fa53761c354d2758996af9e0f32a2af5a083
|
diff --git a/vg/test_apex.py b/vg/test_apex.py
index 605aa67..d42f5f9 100644
--- a/vg/test_apex.py
+++ b/vg/test_apex.py
@@ -45,10 +45,10 @@ def test_apex():
# Test non-normalized too.
np.testing.assert_array_equal(vg.apex(points, along=np.array([1, 1, 1])), expected)
- with pytest.raises(ValueError, match="Invalid shape \\(3,\\): apex expects nx3"):
+ with pytest.raises(ValueError, match=r"Invalid shape \(3,\): apex expects nx3"):
vg.apex(vg.basis.x, along=vg.basis.x)
- with pytest.raises(ValueError, match="along should be a 3x1 vector"):
+ with pytest.raises(ValueError, match=r"along should be a \(3,\) vector"):
vg.apex(points, along=points)
diff --git a/vg/test_farthest.py b/vg/test_farthest.py
index a9bba6c..6af5f4f 100644
--- a/vg/test_farthest.py
+++ b/vg/test_farthest.py
@@ -17,10 +17,8 @@ def test_farthest():
vg.farthest(from_points, to_point, ret_index=False), from_points[1]
)
- with pytest.raises(
- ValueError, match="Invalid shape \\(3,\\): farthest expects nx3"
- ):
+ with pytest.raises(ValueError, match=r"Invalid shape \(3,\): farthest expects nx3"):
vg.farthest(to_point, to_point)
- with pytest.raises(ValueError, match="to_point should be 3x1"):
+ with pytest.raises(ValueError, match=r"to_point should be \(3,\)"):
vg.farthest(from_points, from_points)
diff --git a/vg/test_matrix_transform.py b/vg/test_matrix_transform.py
index 5e6908c..9ead30e 100644
--- a/vg/test_matrix_transform.py
+++ b/vg/test_matrix_transform.py
@@ -28,7 +28,7 @@ def test_apply_homogeneous_stacked():
def test_apply_homogeneous_error():
with pytest.raises(ValueError, match="Transformation matrix should be 4x4"):
apply_transform(np.array([1.0, 2.0, 3.0]), np.array([1.0]))
- with pytest.raises(ValueError, match="Vertices should be 3x1 or Nx3"):
+ with pytest.raises(ValueError, match=r"Vertices should be \(3,\) or Nx3"):
apply_transform(np.array([1.0, 2.0]), transform)
with pytest.raises(ValueError, match="Not sure what to do with 3 dimensions"):
apply_transform(np.array([[[1.0, 2.0, 3.0]]]), transform)
diff --git a/vg/test_within.py b/vg/test_within.py
index e310b17..e84ee3b 100644
--- a/vg/test_within.py
+++ b/vg/test_within.py
@@ -66,7 +66,7 @@ def test_within_error():
radius=False,
of_point=np.array([0.0, 1.0, 0.0]),
)
- with pytest.raises(ValueError, match="to_point should be 3x1"):
+ with pytest.raises(ValueError, match=r"to_point should be \(3,\)"):
vg.within(
np.array([[2.0, 4.0, 0.0]]), radius=4.0, of_point=np.array([0.0, 1.0])
)
|
Error using signed_angle with vector array
Hello!
I am calling
```python
angles = vg.signed_angle(vec1_array.T, points.T, look, units='rad')
```
with shapes
```
('points', (3, 8))
('vec1_array', (3, 8))
('look', (3, 1))
```
However, I get the error:
```
Traceback (most recent call last):
File "./extract_fe_pictures.py", line 113, in <module>
test_code('skywalker_2013_mod', plot=False, interactive=False, export_path='.', verbose=False)
File "./extract_fe_pictures.py", line 28, in test_code
safe_poly.plot()
File "/home/george/ros_workspaces/uav_ftc/src/uav_ftc/src/uav_ftc/polytope_utils.py", line 1271, in plot
face_points = self._get_face_points(temp_polytope)
File "/home/george/ros_workspaces/uav_ftc/src/uav_ftc/src/uav_ftc/polytope_utils.py", line 853, in _get_face_points
angles = vg.signed_angle(vec1_array.T, points.T, look, units='rad')
File "/usr/local/lib/python2.7/dist-packages/vg/core.py", line 297, in signed_angle
return sign * angle(v1, v2, look, units=units)
File "/usr/local/lib/python2.7/dist-packages/vg/core.py", line 251, in angle
v1, v2 = [reject(v, from_v=look) for v in (v1, v2)]
File "/usr/local/lib/python2.7/dist-packages/vg/core.py", line 125, in reject
return vector - project(vector, onto=from_v)
File "/usr/local/lib/python2.7/dist-packages/vg/core.py", line 92, in project
return scalar_projection(vector, onto=onto)[:, np.newaxis] * normalize(onto)
File "/usr/local/lib/python2.7/dist-packages/vg/core.py", line 112, in scalar_projection
check(locals(), "onto", (k, 3))
File "/usr/local/lib/python2.7/dist-packages/vg/shape.py", line 93, in check
return check_value(locals_namespace[name], shape, name=name)
File "/usr/local/lib/python2.7/dist-packages/vg/shape.py", line 54, in check_value
raise ValueError("{} with shape {}; got {}".format(preamble, shape, a.shape))
ValueError: onto must be an array with shape (8, 3); got (3, 1)
```
I believe I have set the dimensions correctly.
Am I missing something?
Thanks!
|
0.0
|
2c35fa53761c354d2758996af9e0f32a2af5a083
|
[
"vg/test_apex.py::test_apex",
"vg/test_farthest.py::test_farthest",
"vg/test_matrix_transform.py::test_apply_homogeneous_error",
"vg/test_within.py::test_within_error"
] |
[
"vg/test_apex.py::test_apex_returns_a_copy",
"vg/test_matrix_transform.py::test_apply_homogeneous",
"vg/test_matrix_transform.py::test_apply_homogeneous_stacked",
"vg/test_within.py::test_within"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-04-22 19:15:20+00:00
|
bsd-2-clause
| 3,500 |
|
lace__vg-158
|
diff --git a/README.md b/README.md
index 37a6e9c..60a4081 100644
--- a/README.md
+++ b/README.md
@@ -13,7 +13,7 @@ A **v**ery **g**ood vector-geometry toolbelt for dealing with 3D points and
vectors. These are simple [NumPy][] operations made readable, built to scale
from prototyping to production.
-See the complete API reference: https://vgpy.readthedocs.io/en/latest/
+:book: See the complete documentation: https://vgpy.readthedocs.io/en/latest/
[pypi]: https://pypi.org/project/vg/
[coverage]: https://github.com/lace/vg/blob/master/.coveragerc
@@ -70,14 +70,6 @@ angles = np.arccos(np.clip(cosines, -1.0, 1.0))
angles = vg.angle(v1s, v2s)
```
-Features
---------
-
-All functions are optionally vectorized, meaning they accept single inputs and
-stacks of inputs interchangeably. They return The Right Thing – a single
-result or a stack of results – without the need to reshape inputs or outputs.
-With the power of NumPy, the vectorized functions are fast.
-
Installation
------------
@@ -100,47 +92,17 @@ projected = vg.scalar_projection(
```
-Design principles
------------------
-
-Linear algebra is useful and it doesn't have to be dificult to use. With the
-power of abstractions, simple operations can be made simple, without poring
-through lecture slides, textbooks, inscrutable Stack Overflow answers, or
-dense NumPy docs. Code that uses linear algebra and geometric transformation
-should be readable like English, without compromising efficiency.
-
-These common operations should be abstracted for a few reasons:
-
-1. If a developer is not programming linalg every day, they might forget the
- underlying formula. These forms are easier to remember and more easily
- referenced.
-
-2. These forms tend to be self-documenting in a way that the NumPy forms are
- not. If a developer is not programming linalg every day, this will again
- come in handy.
-
-3. These implementations are more robust. They automatically inspect `ndim`
- on their arguments, so they work equally well if the argument is a vector
- or a stack of vectors. They are more careful about checking edge cases
- like a zero norm or zero cross product and returning a correct result
- or raising an appropriate error.
-
-
Development
-----------
+First, [install Poetry][].
+
After cloning the repo, run `./bootstrap.zsh` to initialize a virtual
environment with the project's dependencies.
Subsequently, run `./dev.py install` to update the dependencies.
-
-Versioning
-----------
-
-This library adheres to [Semantic Versioning][semver].
-
-[semver]: https://semver.org/
+[install poetry]: https://python-poetry.org/docs/#installation
Acknowledgements
diff --git a/doc/index.md b/doc/index.md
index 94503b2..84d8292 100644
--- a/doc/index.md
+++ b/doc/index.md
@@ -112,13 +112,57 @@ These common operations should be abstracted for a few reasons:
or raising an appropriate error.
-Versioning
-----------
+Future-proofing your application or library
+-------------------------------------------
This library adheres to [Semantic Versioning][semver].
[semver]: https://semver.org/
+Since Python can accommodate only one installation of a package, using a
+toolbelt like `vg` as a transitive dependency can be a particular challenge, as
+various dependencies in the tree may rely on different versions of vg.
+
+One option would be to avoid making breaking changes forevever. However this is
+antithetical to one of the goals of the project, which is to make a friendly
+interface for doing linear algebra. Experience has shown that over the years,
+we get clearer about what does and doesn't belong in this library, and what ways
+of exposing this functionality are easiest to learn. We want to continue to
+improve the interface over time, even if it means small breaking changes.
+
+As a result, we provide a forward compatibility layer, which all libraries
+depending on `vg` are encouraged to use. Replace `import vg` with
+`from vg.compat import v1 as vg` and use `>=1.11` as your dependency specifier.
+You can also replace 1.11 with a later version which includes a feature you
+need. The important thing is not to use `>=1.11,<2`. Since this project
+guarantees that `from vg.compat import v1 as vg` will continue to work the same
+in 2.0+, the `<2` constraint provides no stability value – and it makes
+things unnecessarily difficult for consumers who use multiple dependencies with
+`vg`.
+
+Applications have two options:
+
+1. Follow the recommendation for libraries: specify `>=1.11` and import using
+ `from vg.compat import v1 as vg`. This option provides better code stability
+ and makes upgrades seamless.
+2. Specify `>=1.11,<2` and use `import vg` directly, and when upgrading to
+ `>=2,<3`, review the changelog and modify the calling code if necessary.
+ This option ensures you stay up to date with the recommended, friendliest
+ interface for calling into `vg`.
+
+### Breaking changes
+
+The project's goal is to limit breaking changes to the API to every one to two
+years. This means breaking changes must be batched. Typically such features are
+first made available under the `vg.experimental` module, and then moved into
+`vg` upon the next major version release. Such experimental features may change
+in any subsequent minor release.
+
+### Deprecations
+
+Deprecated features will emit deprecation warnings in a minor version and cause
+errors or incorrect behavior in the next major version.
+
If you like vg you might also like …
-------------------------------------------
diff --git a/vg/compat/v1.py b/vg/compat/v1.py
new file mode 100644
index 0000000..31d9d3e
--- /dev/null
+++ b/vg/compat/v1.py
@@ -0,0 +1,1 @@
+from ..core import * # noqa: F401, F403
|
lace/vg
|
cf7295e8750e725bb9bf952a4412ea214f90d08b
|
diff --git a/vg/compat/__init__.py b/vg/compat/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/vg/compat/test_v1.py b/vg/compat/test_v1.py
new file mode 100644
index 0000000..d22eb4e
--- /dev/null
+++ b/vg/compat/test_v1.py
@@ -0,0 +1,21 @@
+import numpy as np
+import pytest
+import vg.compat.v1 as vg
+
+
+def test_v1_has_functions():
+ np.testing.assert_array_equal(
+ vg.normalize(np.array([5, 0, 0])), np.array([1, 0, 0])
+ )
+
+
+def test_v1_has_constants():
+ np.testing.assert_array_equal(vg.basis.x, np.array([1, 0, 0]))
+
+
+def test_v1_orient_is_alias_for_aligned_with():
+ v1 = np.array([1.0, 2.0, 3.0])
+ with pytest.deprecated_call():
+ np.testing.assert_array_equal(
+ vg.orient(v1, along=vg.basis.z), vg.aligned_with(v1, along=vg.basis.z)
+ )
|
Add a compatibility layer
Since Python modules are installed in a global namespace, breaking changes to APIs can be very difficult to deal with, especially in a toolbelt like this one, since it's intended to be used by whole ecosystems of packages. If a breaking change is introduced here it needs to be updated at every point in the dependency tree, all at once, which is challenging.
I don't want to freeze the API forever, so instead the solution should be to provide a stable compatibility layer for libraries. For example, instead of `import vg`, a library would use `import v1 as vg from vg`. That way, libraries will be inured to future breaking changes, so long as 1.x or later version is used.
Convenience for application code means that `import vg` will keep working, and use the latest version.
It could also be possible to `import next as vg from vg` to provide experimental APIs before they've reached a point of stability.
I'd like to include this before the next major version is released.
|
0.0
|
cf7295e8750e725bb9bf952a4412ea214f90d08b
|
[
"vg/compat/test_v1.py::test_v1_has_functions",
"vg/compat/test_v1.py::test_v1_has_constants",
"vg/compat/test_v1.py::test_v1_orient_is_alias_for_aligned_with"
] |
[] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-07-10 19:29:37+00:00
|
bsd-2-clause
| 3,501 |
|
lapix-ufsc__lapixdl-43
|
diff --git a/lapixdl/formats/annotation.py b/lapixdl/formats/annotation.py
index 919f70a..a0e2eb1 100644
--- a/lapixdl/formats/annotation.py
+++ b/lapixdl/formats/annotation.py
@@ -26,6 +26,16 @@ class BBox:
cls: int
score: float | None = None
+ def __post_init__(self):
+ if self.upper_left_x < 0 or self.upper_left_y < 0:
+ raise ValueError(f'The upper left (x, y) should be positive values. Got ({self.upper_left_x}, {self.upper_left_y})')
+
+ if self.width <= 0:
+ raise ValueError(f'The width should be bigger than zero. Got {self.width}')
+
+ if self.height <= 0:
+ raise ValueError(f'The height should be bigger than zero. Got {self.height}')
+
@property
def upper_left_point(self) -> tuple[int, int]:
"""Tuple[int, int]: (X,Y) of the upper left point of the Bounding Box."""
|
lapix-ufsc/lapixdl
|
354ad05a93680744e0c0d3a9345fc05d23ca6f79
|
diff --git a/tests/formats/annotation_test.py b/tests/formats/annotation_test.py
index aa25429..202ba3a 100644
--- a/tests/formats/annotation_test.py
+++ b/tests/formats/annotation_test.py
@@ -28,6 +28,20 @@ def test_bbox():
assert bbox.slice_y == slice(0, 14)
+def test_invalid_bbox():
+ with pytest.raises(ValueError):
+ BBox(0, -1, 1, 1, 0)
+
+ with pytest.raises(ValueError):
+ BBox(-1, 0, 1, 1, 0)
+
+ with pytest.raises(ValueError):
+ BBox(0, 0, 0, 1, 0)
+
+ with pytest.raises(ValueError):
+ BBox(0, 0, 1, 0, 0)
+
+
def test_bbox_intersection_and_union_area_with():
bbox_A = BBox(0, 0, 10, 15, 0)
bbox_B = BBox(5, 5, 20, 25, 0)
|
Division by zero at calculate_bbox_iou
I can't reproduce the error with tests, but, some moments at https://github.com/lapix-ufsc/lapixdl/blob/ee3faf20b2beab7bbb794328f724b7b8044ac1b1/lapixdl/evaluation/evaluate.py#L211
The union it's equals zero and raise the error. To solve just need:
```python
if union_area == 0:
return 0
```
I don't PR this because I can't reproduce this when i try
|
0.0
|
354ad05a93680744e0c0d3a9345fc05d23ca6f79
|
[
"tests/formats/annotation_test.py::test_invalid_bbox"
] |
[
"tests/formats/annotation_test.py::test_bbox",
"tests/formats/annotation_test.py::test_bbox_intersection_and_union_area_with",
"tests/formats/annotation_test.py::test_bbox_to_polygon",
"tests/formats/annotation_test.py::test_bounds_to_bbox",
"tests/formats/annotation_test.py::test_annotation_bbox",
"tests/formats/annotation_test.py::test_annotation_geo_type",
"tests/formats/annotation_test.py::test_annotation_xywh_bbox",
"tests/formats/annotation_test.py::test_annotation_iter",
"tests/formats/annotation_test.py::test_annotation_iter_wrong_geo",
"tests/formats/annotation_test.py::test_annotation_copy"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-10-26 13:21:23+00:00
|
mit
| 3,502 |
|
lark-parser__lark-1003
|
diff --git a/lark/lark.py b/lark/lark.py
index 0d143df..f29d444 100644
--- a/lark/lark.py
+++ b/lark/lark.py
@@ -3,7 +3,7 @@ import sys, os, pickle, hashlib
import tempfile
from typing import (
TypeVar, Type, List, Dict, Iterator, Callable, Union, Optional,
- Tuple, Iterable, IO, Any, TYPE_CHECKING
+ Tuple, Iterable, IO, Any, TYPE_CHECKING, Collection
)
if TYPE_CHECKING:
from .parsers.lalr_interactive_parser import InteractiveParser
@@ -416,7 +416,7 @@ class Lark(Serialize):
assert cache_md5 is not None
f.write(cache_md5.encode('utf8') + b'\n')
pickle.dump(used_files, f)
- self.save(f)
+ self.save(f, _LOAD_ALLOWED_OPTIONS)
if __doc__:
__doc__ += "\n\n" + LarkOptions.OPTIONS_DOC
@@ -451,12 +451,14 @@ class Lark(Serialize):
parser_conf = ParserConf(self.rules, self._callbacks, self.options.start)
return parser_class(self.lexer_conf, parser_conf, options=self.options)
- def save(self, f):
+ def save(self, f, exclude_options: Collection[str] = ()):
"""Saves the instance into the given file object
Useful for caching and multiprocessing.
"""
data, m = self.memo_serialize([TerminalDef, Rule])
+ if exclude_options:
+ data["options"] = {n: v for n, v in data["options"].items() if n not in exclude_options}
pickle.dump({'data': data, 'memo': m}, f, protocol=pickle.HIGHEST_PROTOCOL)
@classmethod
diff --git a/lark/parser_frontends.py b/lark/parser_frontends.py
index 06533e0..d0703f2 100644
--- a/lark/parser_frontends.py
+++ b/lark/parser_frontends.py
@@ -42,7 +42,7 @@ class MakeParsingFrontend:
class ParsingFrontend(Serialize):
- __serialize_fields__ = 'lexer_conf', 'parser_conf', 'parser', 'options'
+ __serialize_fields__ = 'lexer_conf', 'parser_conf', 'parser'
def __init__(self, lexer_conf, parser_conf, options, parser=None):
self.parser_conf = parser_conf
|
lark-parser/lark
|
2335aa63e183c9182d6f8554b0d2d3714fd2286b
|
diff --git a/tests/test_cache.py b/tests/test_cache.py
index 9f71552..5778632 100644
--- a/tests/test_cache.py
+++ b/tests/test_cache.py
@@ -52,6 +52,9 @@ class InlineTestT(Transformer):
def NUM(self, token):
return int(token)
+ def __reduce__(self):
+ raise TypeError("This Transformer should not be pickled.")
+
def append_zero(t):
return t.update(value=t.value + '0')
@@ -107,6 +110,8 @@ class TestCache(TestCase):
def test_inline(self):
# Test inline transformer (tree-less) & lexer_callbacks
+ # Note: the Transformer should not be saved to the file,
+ # and is made unpickable to check for that
g = """
start: add+
add: NUM "+" NUM
@@ -134,7 +139,7 @@ class TestCache(TestCase):
assert len(self.mock_fs.files) == 1
res = parser.parse("ab")
self.assertEqual(res, Tree('startab', [Tree('expr', ['a', 'b'])]))
-
+
diff --git a/tests/test_nearley/nearley b/tests/test_nearley/nearley
index 3268316..a46b374 160000
--- a/tests/test_nearley/nearley
+++ b/tests/test_nearley/nearley
@@ -1,1 +1,1 @@
-Subproject commit 326831689826cb1b9a4d21d1ce0d5db9278e9636
+Subproject commit a46b37471db486db0f6e1ce6a2934fb238346b44
|
failure to load grammar from cache
(this is a follow-up on an issue encountered in #992)
```python
import logging
from lark import Lark, logger
from lark.visitors import Transformer, merge_transformers, v_args
logger.setLevel(logging.ERROR)
@v_args(inline=True)
class MergedTransformer(Transformer):
SOMETHING = str
class MainTransformer(Transformer):
pass
transformer = merge_transformers(
MainTransformer(),
merged=MergedTransformer()
)
parser = Lark(
'start: "a"',
parser='lalr',
transformer=transformer,
cache='cached_grammar',
debug=True
)
```
in this minimal example, running the code for a second time (first we need to create the cached grammar) will result in this error being raised:
```python
Failed to load Lark from cache: 'cached_grammar'. We will try to carry on.
Traceback (most recent call last):
File "C:\Python39\lib\site-packages\lark-1.0.0a0-py3.9.egg\lark\lark.py", line 319, in __init__
cached_parser_data = pickle.load(f)
AttributeError: 'MergedTransformer' object has no attribute 'str'
```
|
0.0
|
2335aa63e183c9182d6f8554b0d2d3714fd2286b
|
[
"tests/test_cache.py::TestCache::test_inline"
] |
[
"tests/test_cache.py::TestCache::test_automatic_naming",
"tests/test_cache.py::TestCache::test_custom_lexer",
"tests/test_cache.py::TestCache::test_imports",
"tests/test_cache.py::TestCache::test_options",
"tests/test_cache.py::TestCache::test_simple"
] |
{
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-09-18 20:14:56+00:00
|
mit
| 3,503 |
|
lark-parser__lark-1013
|
diff --git a/lark/visitors.py b/lark/visitors.py
index 03dc740..ae7930c 100644
--- a/lark/visitors.py
+++ b/lark/visitors.py
@@ -277,9 +277,15 @@ class Transformer_NonRecursive(Transformer):
del stack[-size:]
else:
args = []
- stack.append(self._call_userfunc(x, args))
+ try:
+ stack.append(self._call_userfunc(x, args))
+ except Discard:
+ pass
elif self.__visit_tokens__ and isinstance(x, Token):
- stack.append(self._call_userfunc_token(x))
+ try:
+ stack.append(self._call_userfunc_token(x))
+ except Discard:
+ pass
else:
stack.append(x)
|
lark-parser/lark
|
1f56497de8d90c6136dcda0ce71f4de3b298a07e
|
diff --git a/tests/test_trees.py b/tests/test_trees.py
index 36be73f..7d2cc7f 100644
--- a/tests/test_trees.py
+++ b/tests/test_trees.py
@@ -375,7 +375,11 @@ class TestTrees(TestCase):
self.assertEqual(x, t2)
def test_transformer_variants(self):
- tree = Tree('start', [Tree('add', [Token('N', '1'), Token('N', '2')]), Tree('add', [Token('N', '3'), Token('N', '4')])])
+ tree = Tree('start', [
+ Tree('add', [Token('N', '1'), Token('N', '2'), Token('IGNORE_TOKEN', '4')]),
+ Tree('add', [Token('N', '3'), Token('N', '4')]),
+ Tree('ignore_tree', [Token('DO', 'NOT PANIC')]),
+ ])
for base in (Transformer, Transformer_InPlace, Transformer_NonRecursive, Transformer_InPlaceRecursive):
class T(base):
def add(self, children):
@@ -384,6 +388,12 @@ class TestTrees(TestCase):
def N(self, token):
return int(token)
+ def ignore_tree(self, children):
+ raise Discard
+
+ def IGNORE_TOKEN(self, token):
+ raise Discard
+
copied = copy.deepcopy(tree)
result = T().transform(copied)
self.assertEqual(result, Tree('start', [3, 7]))
|
Transformer_NonRecursive fails on Discard
**Describe the bug**
As documented, `Transformer` catches any `Discard` exceptions and removes the node from the tree.
I expected `Transformer_NonRecursive` to behave the same way - instead it lets the exception propagate eventually crashing the program.
**To Reproduce**
```
tree = Tree('start', [
Tree('add', [Token('N', '1'), Token('N', '2'), Token('token_ignore', '4')]),
Tree('add', [Token('N', '3'), Token('N', '4')]),
Tree('ignore_tree', [Token('DO', 'NOT PANIC')]),
])
for base in (Transformer, Transformer_InPlace, Transformer_NonRecursive, Transformer_InPlaceRecursive):
class T(base):
def add(self, children):
return sum(children)
def N(self, token):
return int(token)
def ignore_tree(self, children):
raise Discard
def token_ignore(self, token):
raise Discard
```
|
0.0
|
1f56497de8d90c6136dcda0ce71f4de3b298a07e
|
[
"tests/test_trees.py::TestTrees::test_transformer_variants"
] |
[
"tests/test_trees.py::TestTrees::test_deepcopy",
"tests/test_trees.py::TestTrees::test_discard",
"tests/test_trees.py::TestTrees::test_inline_static",
"tests/test_trees.py::TestTrees::test_interp",
"tests/test_trees.py::TestTrees::test_iter_subtrees",
"tests/test_trees.py::TestTrees::test_iter_subtrees_topdown",
"tests/test_trees.py::TestTrees::test_merge_transformers",
"tests/test_trees.py::TestTrees::test_partial",
"tests/test_trees.py::TestTrees::test_pickle",
"tests/test_trees.py::TestTrees::test_repr_runnable",
"tests/test_trees.py::TestTrees::test_smart_decorator",
"tests/test_trees.py::TestTrees::test_transformer",
"tests/test_trees.py::TestTrees::test_vargs",
"tests/test_trees.py::TestTrees::test_vargs_override",
"tests/test_trees.py::TestTrees::test_vargs_set_name",
"tests/test_trees.py::TestTrees::test_visitor"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2021-10-04 16:28:40+00:00
|
mit
| 3,504 |
|
lark-parser__lark-650
|
diff --git a/lark/common.py b/lark/common.py
index cc8c73c..714399a 100644
--- a/lark/common.py
+++ b/lark/common.py
@@ -17,9 +17,6 @@ class LexerConf(Serialize):
self.skip_validation = skip_validation
self.use_bytes = use_bytes
- def _deserialize(self):
- self.callbacks = {} # TODO
-
###}
class ParserConf:
diff --git a/lark/lark.py b/lark/lark.py
index daab45b..3ed96d7 100644
--- a/lark/lark.py
+++ b/lark/lark.py
@@ -344,7 +344,14 @@ class Lark(Serialize):
self.rules = [Rule.deserialize(r, memo) for r in data['rules']]
self.source = '<deserialized>'
self._prepare_callbacks()
- self.parser = self.parser_class.deserialize(data['parser'], memo, self._callbacks, self.options.postlex, re_module)
+ self.parser = self.parser_class.deserialize(
+ data['parser'],
+ memo,
+ self._callbacks,
+ self.options.postlex,
+ self.options.transformer,
+ re_module
+ )
return self
@classmethod
diff --git a/lark/parser_frontends.py b/lark/parser_frontends.py
index 33ad9bc..a45bf9c 100644
--- a/lark/parser_frontends.py
+++ b/lark/parser_frontends.py
@@ -1,6 +1,6 @@
from .utils import get_regexp_width, Serialize
from .parsers.grammar_analysis import GrammarAnalyzer
-from .lexer import TraditionalLexer, ContextualLexer, Lexer, Token
+from .lexer import TraditionalLexer, ContextualLexer, Lexer, Token, TerminalDef
from .parsers import earley, xearley, cyk
from .parsers.lalr_parser import LALR_Parser
from .grammar import Rule
@@ -58,6 +58,16 @@ class _ParserFrontend(Serialize):
return self.parser.parse(input, start, *args)
+def _recreate_lexer_callbacks(memo, transformer):
+ result = {}
+ terminals = [item for item in memo.values() if isinstance(item, TerminalDef)]
+ for terminal in terminals:
+ callback = getattr(transformer, terminal.name, None)
+ if callback is not None:
+ result[terminal.name] = callback
+ return result
+
+
class WithLexer(_ParserFrontend):
lexer = None
parser = None
@@ -73,10 +83,11 @@ class WithLexer(_ParserFrontend):
self.postlex = lexer_conf.postlex
@classmethod
- def deserialize(cls, data, memo, callbacks, postlex, re_module):
+ def deserialize(cls, data, memo, callbacks, postlex, transformer, re_module):
inst = super(WithLexer, cls).deserialize(data, memo)
inst.postlex = postlex
inst.parser = LALR_Parser.deserialize(inst.parser, memo, callbacks)
+ inst.lexer_conf.callbacks = _recreate_lexer_callbacks(memo, transformer)
inst.lexer_conf.re_module = re_module
inst.lexer_conf.skip_validation=True
inst.init_lexer()
@@ -229,4 +240,3 @@ class CYK(WithLexer):
def _apply_callback(self, tree):
return self.callbacks[tree.rule](tree.children)
-
|
lark-parser/lark
|
c9ca287e9e2ae2a3f5ad7cf028fdabf408f053a1
|
diff --git a/tests/test_tools.py b/tests/test_tools.py
index 1e0d78e..e691237 100644
--- a/tests/test_tools.py
+++ b/tests/test_tools.py
@@ -106,6 +106,33 @@ class TestStandalone(TestCase):
x = l.parse('(\n)\n')
self.assertEqual(x, Tree('start', []))
+ def test_transformer(self):
+ grammar = r"""
+ start: some_rule "(" SOME_TERMINAL ")"
+ some_rule: SOME_TERMINAL
+ SOME_TERMINAL: /[A-Za-z_][A-Za-z0-9_]*/
+ """
+ context = self._create_standalone(grammar)
+ _Lark = context["Lark_StandAlone"]
+
+ _Token = context["Token"]
+ _Tree = context["Tree"]
+
+ class MyTransformer(context["Transformer"]):
+ def SOME_TERMINAL(self, token):
+ return _Token("SOME_TERMINAL", "token is transformed")
+
+ def some_rule(self, children):
+ return _Tree("rule_is_transformed", [])
+
+ parser = _Lark(transformer=MyTransformer())
+ self.assertEqual(
+ parser.parse("FOO(BAR)"),
+ _Tree("start", [
+ _Tree("rule_is_transformed", []),
+ _Token("SOME_TERMINAL", "token is transformed")
+ ])
+ )
if __name__ == '__main__':
|
Transformer is not applied for tokens in standalone parser
**Describe the bug**
Standalone parser doesn't invoke methods on tokens defined in supplied transformer
**To Reproduce**
Given:
- grammar `example.lark`:
```
start: IDENTIFIER "(" text ")"
IDENTIFIER: /[A-Za-z_][A-Za-z0-9_]*/
text: IDENTIFIER
```
- standalone parser generated with: `python3 -m lark.tools.standalone example.lark > example.py`
Then:
- this program:
```python
import lark
class DowncaseIdentifiers(lark.Transformer):
def IDENTIFIER(self, token):
return lark.Token("IDENTIFIER", token.lower())
def text(self, children):
return lark.Tree("text", [self.IDENTIFIER(children[0])])
parser = lark.Lark.open(
grammar_filename="example.lark", transformer=DowncaseIdentifiers(), parser="lalr"
)
to_parse = "FOO(BAR)"
result = parser.parse(to_parse)
print(result.pretty())
```
produces:
```
start
foo
text bar
```
- this program:
```python
import example
class DowncaseIdentifiers(example.Transformer):
def IDENTIFIER(self, token):
return example.Token("IDENTIFIER", token.lower())
def text(self, children):
return example.Tree("text", [self.IDENTIFIER(children[0])])
parser = example.Lark_StandAlone(transformer=DowncaseIdentifiers())
to_parse = "FOO(BAR)"
result = parser.parse(to_parse)
print(result.pretty())
```
produces:
```
start
FOO
text bar
```
Expected:
Both programs produce the same output, namely:
```
start
foo
text bar
```
|
0.0
|
c9ca287e9e2ae2a3f5ad7cf028fdabf408f053a1
|
[
"tests/test_tools.py::TestStandalone::test_transformer"
] |
[
"tests/test_tools.py::TestStandalone::test_contextual",
"tests/test_tools.py::TestStandalone::test_postlex",
"tests/test_tools.py::TestStandalone::test_simple"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-08-13 16:13:07+00:00
|
mit
| 3,505 |
|
lark-parser__lark-916
|
diff --git a/lark-stubs/reconstruct.pyi b/lark-stubs/reconstruct.pyi
index 9826428..a8d39e3 100644
--- a/lark-stubs/reconstruct.pyi
+++ b/lark-stubs/reconstruct.pyi
@@ -34,5 +34,6 @@ class Reconstructor:
def __init__(self, parser: Lark, term_subs: Dict[str, Callable[[Symbol], str]] = ...):
...
- def reconstruct(self, tree: Tree, postproc: Callable[[Iterable[str]], Iterable[str]]) -> str:
+ def reconstruct(self, tree: Tree, postproc: Callable[[Iterable[str]], Iterable[str]]=None,
+ insert_spaces: bool = True) -> str:
...
diff --git a/lark-stubs/tree.pyi b/lark-stubs/tree.pyi
index 98aadff..ea99ff6 100644
--- a/lark-stubs/tree.pyi
+++ b/lark-stubs/tree.pyi
@@ -40,7 +40,7 @@ class Tree:
def expand_kids_by_index(self, *indices: int) -> None:
...
- def scan_values(self, pred: Callable[[Union[str, Tree]], bool]) -> List[str]:
+ def scan_values(self, pred: Callable[[Union[str, Tree]], bool]) -> Iterator[str]:
...
def iter_subtrees(self) -> Iterator[Tree]:
diff --git a/lark/load_grammar.py b/lark/load_grammar.py
index dcb4c81..7b38a74 100644
--- a/lark/load_grammar.py
+++ b/lark/load_grammar.py
@@ -448,6 +448,9 @@ def _literal_to_pattern(literal):
s = eval_escaping(x)
+ if s == "":
+ raise GrammarError("Can't have empty terminals (offending literal: %s)" % literal.value)
+
if literal.type == 'STRING':
s = s.replace('\\\\', '\\')
return PatternStr(s, flags, raw=literal.value)
diff --git a/lark/reconstruct.py b/lark/reconstruct.py
index 2efc0ae..ab2fb38 100644
--- a/lark/reconstruct.py
+++ b/lark/reconstruct.py
@@ -87,14 +87,14 @@ class Reconstructor(TreeMatcher):
else:
yield item
- def reconstruct(self, tree, postproc=None):
+ def reconstruct(self, tree, postproc=None, insert_spaces=True):
x = self._reconstruct(tree)
if postproc:
x = postproc(x)
y = []
prev_item = ''
for item in x:
- if prev_item and item and is_id_continue(prev_item[-1]) and is_id_continue(item[0]):
+ if insert_spaces and prev_item and item and is_id_continue(prev_item[-1]) and is_id_continue(item[0]):
y.append(' ')
y.append(item)
prev_item = item
|
lark-parser/lark
|
e4904b32da24cf37f04b79672953e6ce3351bb67
|
diff --git a/tests/test_grammar.py b/tests/test_grammar.py
index 6a1aefa..a643117 100644
--- a/tests/test_grammar.py
+++ b/tests/test_grammar.py
@@ -22,6 +22,10 @@ class TestGrammar(TestCase):
else:
assert False, "example did not raise an error"
+ def test_empty_literal(self):
+ # Issues #888
+ self.assertRaises(GrammarError, Lark, "start: \"\"")
+
def test_override_rule(self):
# Overrides the 'sep' template in existing grammar to add an optional terminating delimiter
# Thus extending it beyond its original capacity
|
Generic exception with (seemingly) valid grammar input
**Describe the bug**
When using Lark, I keep getting generic exceptions instead of any detailed report. I am unsure if this is caused by bad grammar input, or from a bug in Lark (I'm guessing the former), but I'd love it if we could get to the bottom of this.
```
Traceback (most recent call last):
File "lark_test\venv\lib\site-packages\lark\load_grammar.py", line 327, in pattern
term_name = self.term_reverse[p].name
KeyError: ''
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "lark_test\venv\lib\site-packages\lark\load_grammar.py", line 331, in pattern
term_name = _TERMINAL_NAMES[value]
KeyError: ''
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "lark_test\venv\lib\site-packages\lark\visitors.py", line 91, in _call_userfunc
return f.visit_wrapper(f, tree.data, children, tree.meta)
File "lark_test\venv\lib\site-packages\lark\visitors.py", line 390, in _vargs_inline
return f(*children)
File "lark_test\venv\lib\site-packages\lark\visitors.py", line 374, in f
return _f(self, *args, **kwargs)
File "lark_test\venv\lib\site-packages\lark\load_grammar.py", line 333, in pattern
if is_id_continue(value) and is_id_start(value[0]) and value.upper() not in self.term_set:
IndexError: string index out of range
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "lark_test\main.py", line 5, in <module>
parser = Lark(grammar.read(), start="PROGRAM")
File "lark_test\venv\lib\site-packages\lark\lark.py", line 348, in __init__
self.terminals, self.rules, self.ignore_tokens = self.grammar.compile(self.options.start, terminals_to_keep)
File "lark_test\venv\lib\site-packages\lark\load_grammar.py", line 604, in compile
tree = transformer.transform(rule_tree)
File "lark_test\venv\lib\site-packages\lark\visitors.py", line 170, in transform
tree = t.transform(tree)
File "lark_test\venv\lib\site-packages\lark\visitors.py", line 187, in transform
subtree.children = list(self._transform_children(subtree.children))
File "lark_test\venv\lib\site-packages\lark\visitors.py", line 116, in _transform_children
yield self._transform_tree(c)
File "lark_test\venv\lib\site-packages\lark\visitors.py", line 183, in _transform_tree
return self._call_userfunc(tree)
File "lark_test\venv\lib\site-packages\lark\visitors.py", line 97, in _call_userfunc
raise VisitError(tree.data, tree, e)
lark.exceptions.VisitError: Error trying to process rule "pattern":
string index out of range
Process finished with exit code 1
````
**To Reproduce**
main.py
```
from lark import Lark
if __name__ == "__main__":
with open('grammar.ebnf', 'r') as grammar:
parser = Lark(grammar.read(), start="PROGRAM")
````
grammar.ebnf
```
program : statement_or_func ("\n" statement_or_func)*
statement_or_func : sequence | func_stat
sequence : statement (le statement)*
le : "\n" | ";"
brackets_seq : (sequence)
brack_or_stat : brackets_seq | statement
statement : ""
| decl_stat assignment_stat
| if_stat
| do_until_stat
| while_stat
| return_stat
| print_stat
| input_stat
| func_stat
| call_stat
| comment_stat
decl_stat : LET assignment_stat ("," declaration)*
declaration : ID | assignment_stat
assignment_stat : ID "=" expression
if_stat : IF (condition) THEN brack_or_stat elsepart
elsepart : "" | ELSE brack_or_stat
do_until_stat : DO brack_or_stat LOOP UNTIL (condition)
while_stat : WHILE (condition) brack_or_stat WEND
return_stat : RETURN (expression)
print_stat : PRINT MESSAGE"," expression_list
expression_list : expression ("," expression)*
input_stat : INPUT MESSAGE"," ID ("," ID)*
func_stat : SUB ID funcbody
funcbody : formalpars brackets_seq
formalpars : ("" | formalparlist)
formalparlist : ID ("," ID)*
call_stat : CALL ID actualpars
actualpars : ("" | actualparlist)
actualparlist : actualparitem ("," actualparitem)*
actualparitem : expression | ID
comment_stat : REM MESSAGE
condition : boolterm (OR boolterm)*
boolterm : boolfactor (AND boolfactor)*
boolfactor : NOT [condition] | [condition] | expression RELATIONAL_OPER expression
expression : OPTIONAL_SIGN term (ADD_OPER term)*
term : factor (MUL_OPER factor)*
factor : CONSTANT | (expression) | ID | call_stat
RELATIONAL_OPER : "==" | "<" | "<=" | "<>" | ">=" | "<"
ADD_OPER : "+" | "-"
MUL_OPER : "*" | "/"
OPTIONAL_SIGN : "" | ADD_OPER
ID : (ALPHA)(ALPHA|DIGIT)*
LET : "LET"
DO : "DO"
LOOP: "LOOP"
UNTIL: "UNTIL"
WHILE: "WHILE"
WEND : "WEND"
SUB : "SUB"
CALL : "CALL"
INPUT : "INPUT"
PRINT : "PRINT"
IF : "IF"
THEN : "THEN"
ELSE : "ELSE"
AND : "AND"
NOT : "NOT"
OR : "OR"
RETURN : "RETURN"
REM : "REM"
MULTI_LINE_COMMENT_START : "%*"
MULTI_LINE_COMMENT_END : "*%"
ALPHA : "a".."z" | "A".."Z"
DIGIT : "0".."9"
INTEGER : /-?{DIGIT}{1,5}/
MESSAGE : /"(\\.|[^"\\])*"/
CONSTANT : INTEGER | MESSAGE
```
I'm using the latest version of Lark installed from PIP, with python 3.9.0
|
0.0
|
e4904b32da24cf37f04b79672953e6ce3351bb67
|
[
"tests/test_grammar.py::TestGrammar::test_empty_literal"
] |
[
"tests/test_grammar.py::TestGrammar::test_token_multiline_only_works_with_x_flag",
"tests/test_grammar.py::TestGrammar::test_extend_term",
"tests/test_grammar.py::TestGrammar::test_errors",
"tests/test_grammar.py::TestGrammar::test_alias_in_terminal",
"tests/test_grammar.py::TestGrammar::test_extend_twice",
"tests/test_grammar.py::TestGrammar::test_undefined_rule",
"tests/test_grammar.py::TestGrammar::test_override_terminal",
"tests/test_grammar.py::TestGrammar::test_find_grammar_errors",
"tests/test_grammar.py::TestGrammar::test_import_custom_sources3",
"tests/test_grammar.py::TestGrammar::test_import_custom_sources2",
"tests/test_grammar.py::TestGrammar::test_undefined_term",
"tests/test_grammar.py::TestGrammar::test_extend_rule",
"tests/test_grammar.py::TestGrammar::test_undefined_ignore",
"tests/test_grammar.py::TestGrammar::test_import_custom_sources",
"tests/test_grammar.py::TestGrammar::test_override_rule"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-06-22 20:23:55+00:00
|
mit
| 3,506 |
|
larq__zookeeper-124
|
diff --git a/zookeeper/core/component.py b/zookeeper/core/component.py
index 8ce0517..7944fa6 100644
--- a/zookeeper/core/component.py
+++ b/zookeeper/core/component.py
@@ -106,7 +106,7 @@ def _type_check_and_cache(instance, field: Field, result: Any) -> None:
raise TypeError(
f"Field '{field.name}' of component '{instance.__component_name__}' is "
f"annotated with type '{field.type}', which is not satisfied by "
- f"default value {repr(result)}."
+ f"value {repr(result)}."
)
object.__setattr__(instance, field.name, result)
@@ -186,7 +186,7 @@ def _wrap_getattribute(component_cls: Type) -> None:
utils.generate_component_ancestors_with_field(instance, name), None
)
try:
- result = getattr(parent_instance, name)
+ result = parent_instance.__base_getattribute__(name) # type: ignore
except AttributeError:
# From here we raise the original exception instead because it
# will correctly refer to this component rather than some parent
@@ -655,6 +655,9 @@ def configure(
continue
raise e from None
+ if not utils.is_component_instance(sub_component_instance):
+ continue
+
full_name = f"{instance.__component_name__}.{field.name}"
if not sub_component_instance.__component_configured__:
diff --git a/zookeeper/tf/dataset.py b/zookeeper/tf/dataset.py
index fb79156..2a1053a 100644
--- a/zookeeper/tf/dataset.py
+++ b/zookeeper/tf/dataset.py
@@ -4,6 +4,7 @@ from typing import Dict, Optional, Tuple
import tensorflow as tf
import tensorflow_datasets as tfds
+from zookeeper.core import utils
from zookeeper.core.field import Field
@@ -61,6 +62,9 @@ class TFDSDataset(Dataset):
# The directory that the dataset is stored in.
data_dir: Optional[str] = Field(None)
+ # Whether or not to download the dataset (if it's not already downloaded).
+ download: bool = Field(False)
+
# Train and validation splits. A validation split is not required.
train_split: str = Field()
validation_split: Optional[str] = Field(None)
@@ -97,13 +101,23 @@ class TFDSDataset(Dataset):
def load(self, split, decoders, shuffle) -> tf.data.Dataset:
"""Return a `tf.data.Dataset` object representing the requested split."""
- return tfds.load(
- name=self.name,
- split=split,
- data_dir=self.data_dir,
- decoders=decoders,
- as_dataset_kwargs={"shuffle_files": shuffle},
- )
+ try:
+ return tfds.load(
+ name=self.name,
+ split=split,
+ data_dir=self.data_dir,
+ download=self.download,
+ decoders=decoders,
+ as_dataset_kwargs={"shuffle_files": shuffle},
+ )
+ except AssertionError as e:
+ if not self.download:
+ utils.warn(
+ f"Field 'download' of component {self.__class__.__name__} is False. "
+ "If the TFDS dataset is not downloaded, set 'download' to True to "
+ "call `download_and_prepare()` automatically."
+ )
+ raise e from None
def train(self, decoders=None) -> Tuple[tf.data.Dataset, int]:
return (
@@ -129,6 +143,12 @@ class MultiTFDSDataset(Dataset):
to be trained on data that is combined from multiple datasets.
"""
+ # The directory that the dataset is stored in.
+ data_dir: Optional[str] = Field(None)
+
+ # Whether or not to download the dataset (if it's not already downloaded).
+ download: bool = Field(False)
+
# A non-empty mapping from dataset names as keys to splits as values. The
# training data will be the concatenation of the datasets loaded from each
# (key, value) pair.
@@ -138,9 +158,6 @@ class MultiTFDSDataset(Dataset):
# empty, indicating no validation data.
validation_split: Dict[str, str] = Field(lambda: {})
- # The directory that the dataset is stored in.
- data_dir: Optional[str] = Field(None)
-
def num_examples(self, splits) -> int:
"""
Compute the total number of examples in the splits specified by the
@@ -156,13 +173,23 @@ class MultiTFDSDataset(Dataset):
def load(self, splits, decoders, shuffle) -> tf.data.Dataset:
result = None
for name, split in splits.items():
- dataset = tfds.load(
- name=name,
- split=split,
- data_dir=self.data_dir,
- decoders=decoders,
- as_dataset_kwargs={"shuffle_files": shuffle},
- )
+ try:
+ dataset = tfds.load(
+ name=name,
+ split=split,
+ data_dir=self.data_dir,
+ download=self.download,
+ decoders=decoders,
+ as_dataset_kwargs={"shuffle_files": shuffle},
+ )
+ except AssertionError as e:
+ if not self.download:
+ utils.warn(
+ f"Field 'download' of component {self.__class__.__name__} is "
+ "False. If the TFDS dataset is not downloaded, set 'download' "
+ "to True to call `download_and_prepare()` automatically."
+ )
+ raise e from None
result = result.concatenate(dataset) if result is not None else dataset
return result
|
larq/zookeeper
|
f29443225770ab3582bfb9d461f5b77c303b3d84
|
diff --git a/.github/workflows/unittest.yml b/.github/workflows/unittest.yml
index 3da10d9..489e3fe 100644
--- a/.github/workflows/unittest.yml
+++ b/.github/workflows/unittest.yml
@@ -31,7 +31,7 @@ jobs:
- name: Test with pytest
run: pytest . -vv --cov=zookeeper --cov-report=xml --cov-config=.coveragerc
- name: Test example script # Test that the model successfully compiles.
- run: python examples/larq_experiment.py BinaryNetMnist epochs=0
+ run: python examples/larq_experiment.py BinaryNetMnist epochs=0 --dataset.download
- name: Upload code-coverage report to Codecov
if: matrix.python-version == '3.7' && matrix.tf-version == '2.0.1'
run: curl -s https://codecov.io/bash | bash -s -- -t $token -f ./coverage.xml -F unittests
diff --git a/zookeeper/core/component_test.py b/zookeeper/core/component_test.py
index 8495571..0d188ba 100644
--- a/zookeeper/core/component_test.py
+++ b/zookeeper/core/component_test.py
@@ -326,7 +326,7 @@ def test_type_check(ExampleComponentClass):
# Attempting to access the field should now raise a type error.
with pytest.raises(
TypeError,
- match="Field 'a' of component 'x' is annotated with type '<class 'int'>', which is not satisfied by default value 4.5.",
+ match="Field 'a' of component 'x' is annotated with type '<class 'int'>', which is not satisfied by value 4.5.",
):
instance.a
@@ -417,6 +417,34 @@ def test_component_getattr_value_via_factory_parent():
assert f.build() == 5
+def test_component_inherited_factory_value():
+ """https://github.com/larq/zookeeper/issues/123."""
+
+ @factory
+ class IntFactory:
+ def build(self) -> int:
+ return 5
+
+ @component
+ class Child:
+ x: int = ComponentField()
+
+ @component
+ class Parent:
+ child: Child = ComponentField(Child)
+ x: int = ComponentField(IntFactory)
+
+ p = Parent()
+ configure(p, {})
+ assert p.x == 5
+ assert p.child.x == 5
+
+ p = Parent()
+ configure(p, {"child.x": 7})
+ assert p.x == 5
+ assert p.child.x == 7
+
+
def test_component_post_configure():
with pytest.raises(
TypeError,
|
Error when configuring components with values inherited from factories
The following fails:
```python
@factory
class IntFactory:
def build(self) -> int:
return 5
@component
class Child:
x: int = ComponentField()
@component
class Parent:
child: Child = ComponentField(Child)
x: int = ComponentField(IntFactory)
p = Parent()
configure(p, {})
assert p.x == 5
assert p.child.x == 5
```
with error `AttributeError: 'int' object has no attribute '__component_configured__'`.
This happens because `configure()` tries to recursively configure `p.child.x`, as it is a `ComponentField` (shadowing `p.x`, which is a `ComponentField`). But `p.child.x` resolves to an integer. We can solve this by checking that the thing we try and recursively configure is a component instance (this will not break type-checking, which happens separately).
|
0.0
|
f29443225770ab3582bfb9d461f5b77c303b3d84
|
[
"zookeeper/core/component_test.py::test_type_check"
] |
[
"zookeeper/core/component_test.py::test_non_class_decorate_error",
"zookeeper/core/component_test.py::test_abstract_class_decorate_error",
"zookeeper/core/component_test.py::test_init_decorate_error",
"zookeeper/core/component_test.py::test_configure_non_interactive_missing_field_value",
"zookeeper/core/component_test.py::test_error_if_field_overwritten_in_subclass"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-02-26 14:43:39+00:00
|
apache-2.0
| 3,507 |
|
larq__zookeeper-131
|
diff --git a/zookeeper/core/component.py b/zookeeper/core/component.py
index 7944fa6..d5f0f7a 100644
--- a/zookeeper/core/component.py
+++ b/zookeeper/core/component.py
@@ -474,6 +474,13 @@ def configure(
overwrite any values already set on the instance - either class defaults
or those set in `__init__`.
"""
+ # Only component instances can be configured.
+ if not utils.is_component_instance(instance):
+ raise TypeError(
+ "Only @component, @factory, and @task instances can be configured. "
+ f"Received: {instance}."
+ )
+
# Configuration can only happen once.
if instance.__component_configured__:
raise ValueError(
|
larq/zookeeper
|
e41661a9a067cd5b684a60b67c49175536f22371
|
diff --git a/zookeeper/core/component_test.py b/zookeeper/core/component_test.py
index 0d188ba..6c4764a 100644
--- a/zookeeper/core/component_test.py
+++ b/zookeeper/core/component_test.py
@@ -523,6 +523,40 @@ def test_component_configure_error_non_existant_key():
configure(GrandParent(), {"parent.non_existent_field": "bar"})
+def test_component_configure_error_non_component_instance():
+ class A:
+ a: int = Field()
+
+ with pytest.raises(
+ TypeError,
+ match="Only @component, @factory, and @task instances can be configured.",
+ ):
+ configure(A(), conf={"a": 5})
+
+ @component
+ class B:
+ b: int = Field()
+
+ with pytest.raises(
+ TypeError,
+ match="Only @component, @factory, and @task instances can be configured.",
+ ):
+ # The following we expect to fail because it is a component class, not
+ # an instance.
+ configure(B, conf={"b": 3})
+
+ class C(B):
+ c: int = Field()
+
+ with pytest.raises(
+ TypeError,
+ match="Only @component, @factory, and @task instances can be configured.",
+ ):
+ # Even the an instance of a class that subclasses a component class
+ # should fail.
+ configure(C(), conf={"b": 3, "c": 42})
+
+
def test_component_configure_field_allow_missing():
@component
class A:
|
Non-components should raise an error when being configured
### Describe the bug
A subclass of a component that itself does not have an `@component` decorator can be initialized with `configure` without any errors. It will then not act as expected, since it is not a component.
### To Reproduce
```python
@component
class BaseComponent:
value: int = Field(1)
class SubclassedComponent(BaseComponent):
value = Field(2)
component = SubclassedComponent()
configure(component, {})
print(component.value) # prints 1
assert component.value == 2 # fails
```
### Expected behavior
During the call to configure an error should be thrown that the component being configured is not a component.
### Environment
Zookeeper version: 1.0b7
|
0.0
|
e41661a9a067cd5b684a60b67c49175536f22371
|
[
"zookeeper/core/component_test.py::test_component_configure_error_non_component_instance"
] |
[
"zookeeper/core/component_test.py::test_non_class_decorate_error",
"zookeeper/core/component_test.py::test_abstract_class_decorate_error",
"zookeeper/core/component_test.py::test_init_decorate_error",
"zookeeper/core/component_test.py::test_configure_non_interactive_missing_field_value",
"zookeeper/core/component_test.py::test_type_check",
"zookeeper/core/component_test.py::test_error_if_field_overwritten_in_subclass"
] |
{
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-03-18 21:32:19+00:00
|
apache-2.0
| 3,508 |
|
larq__zookeeper-48
|
diff --git a/zookeeper/hparam.py b/zookeeper/hparam.py
index 709f2ba..5416e0e 100644
--- a/zookeeper/hparam.py
+++ b/zookeeper/hparam.py
@@ -11,14 +11,21 @@ except ImportError: # pragma: no cover
SPLIT_REGEX = re.compile(r",?(\w+)=")
+INDENT = " "
+
def group(sequence):
return zip(*[iter(sequence)] * 2)
-def str_key_val(key, value, color=True):
+def str_key_val(key, value, color=True, single_line=False):
if callable(value):
value = "<callable>"
+ if isinstance(value, HParams):
+ if single_line:
+ value = repr(value)
+ else:
+ value = f"\n{INDENT}".join(str(value).split("\n"))
return f"{BLUE}{key}{RESET}={YELLOW}{value}{RESET}" if color else f"{key}={value}"
@@ -103,11 +110,16 @@ class HParams(collections.abc.Mapping):
raise AttributeError("Hyperparameters are immutable, cannot assign to field.")
def __str__(self):
- params = ",\n ".join([str_key_val(k, v) for k, v in sorted(self.items())])
- return f"{self.__class__.__name__}(\n {params}\n)"
+ params = f",\n{INDENT}".join(
+ [str_key_val(k, v) for k, v in sorted(self.items())]
+ )
+ return f"{self.__class__.__name__}(\n{INDENT}{params}\n)"
def __repr__(self):
params = ",".join(
- [str_key_val(k, v, color=False) for k, v in sorted(self.items())]
+ [
+ str_key_val(k, v, color=False, single_line=True)
+ for k, v in sorted(self.items())
+ ]
)
return f"{self.__class__.__name__}({params})"
|
larq/zookeeper
|
eb5f593db0922bde8a2a49cad35b8dbf11be390a
|
diff --git a/zookeeper/hparam_test.py b/zookeeper/hparam_test.py
index fc97ed8..91e25ea 100644
--- a/zookeeper/hparam_test.py
+++ b/zookeeper/hparam_test.py
@@ -22,6 +22,20 @@ def hyper():
return Hyper()
[email protected]
+def hyper_with_nested():
+ class Child(HParams):
+ c = -1.5
+ d = "aeiou"
+
+ class Parent(HParams):
+ a = 4.9
+ b = "some string"
+ child = Child()
+
+ return Parent()
+
+
def test_defaults(hyper):
assert hyper.foo == [1, 2, 3]
assert hyper.bar == 0.5
@@ -72,6 +86,11 @@ def test_repr(hyper):
assert repr(hyper) == output
+def test_repr_nested(hyper_with_nested):
+ output = "Parent(a=4.9,b=some string,child=Child(c=-1.5,d=aeiou))"
+ assert repr(hyper_with_nested) == output
+
+
def test_str(hyper):
output = """Hyper(
bar=0.5,
@@ -81,3 +100,15 @@ def test_str(hyper):
foo=[1, 2, 3]
)"""
assert click.unstyle(str(hyper)) == output
+
+
+def test_str_nested(hyper_with_nested):
+ output = """Parent(
+ a=4.9,
+ b=some string,
+ child=Child(
+ c=-1.5,
+ d=aeiou
+ )
+)"""
+ assert click.unstyle(str(hyper_with_nested)) == output
|
Support nested HParams
If we have a model with multiple independent components, e.g. an encoder and a decoder, or a base network and some auxilliary structure, it would be nice if the `HParams` class could nest other `HParams` classes, one for each of the components.
This would facilitate easy model modularity, e.g. a `build_model` function could internally call `build_encoder(hparams.encoder_hparams)` and `build_decoder(hparams.decoder_hparams)`.
|
0.0
|
eb5f593db0922bde8a2a49cad35b8dbf11be390a
|
[
"zookeeper/hparam_test.py::test_repr_nested",
"zookeeper/hparam_test.py::test_str_nested"
] |
[
"zookeeper/hparam_test.py::test_defaults",
"zookeeper/hparam_test.py::test_parse",
"zookeeper/hparam_test.py::test_spaced_parse",
"zookeeper/hparam_test.py::test_parse_fail",
"zookeeper/hparam_test.py::test_immutability",
"zookeeper/hparam_test.py::test_key_error",
"zookeeper/hparam_test.py::test_repr",
"zookeeper/hparam_test.py::test_str"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-09-18 14:14:21+00:00
|
apache-2.0
| 3,509 |
|
larsoner__flake8-array-spacing-3
|
diff --git a/README.rst b/README.rst
index fa55f59..3487b30 100644
--- a/README.rst
+++ b/README.rst
@@ -1,2 +1,4 @@
-# flake8-array-spacing
+flake8-array-spacing
+====================
+
Recast some E2XX errors as A2XX with exceptions for array-like variables
diff --git a/flake8_array_spacing/__init__.py b/flake8_array_spacing/__init__.py
index b615b4f..052b4b0 100644
--- a/flake8_array_spacing/__init__.py
+++ b/flake8_array_spacing/__init__.py
@@ -1,9 +1,9 @@
import re
-from pycodestyle import (
- extraneous_whitespace, # 201, 202
- whitespace_around_operator, # 221, 222
- whitespace_around_comma, # 241
-)
+import tokenize
+
+from pycodestyle import (extraneous_whitespace, whitespace_around_operator,
+ whitespace_around_comma)
+from flake8.defaults import NOQA_INLINE_REGEXP
__version__ = '0.1.dev0'
@@ -12,30 +12,47 @@ __version__ = '0.1.dev0'
ARRAY_LIKE_REGEX = re.compile(
r'[\[(][0-9 ij,+\-*^\/&|\[\]]*[\]),;]'
)
+FUNC_KINDS = (
+ (extraneous_whitespace, ('E201', 'E202')),
+ (whitespace_around_operator, ('E221', 'E222')),
+ (whitespace_around_comma, ('E241',)),
+)
class ArraySpacing(object):
+ """Checker for E2XX variants, ignoring array-like."""
+
name = 'array-spacing'
version = __version__
- def __init__(self, logical_line):
+ def __init__(self, logical_line, tokens):
self.logical_line = logical_line
+ self.tokens = tokens
self._array_ranges = None # only compute if necessary
def in_array_like(self, idx):
+ """Determine if in array like range."""
if self._array_ranges is None:
self._array_ranges = [
(match.start() + 1, match.end() + 1)
for match in ARRAY_LIKE_REGEX.finditer(self.logical_line)]
return any(start <= idx <= end for start, end in self._array_ranges)
+ def ignoring(self, kind):
+ """Determine if a kind is being ignored."""
+ for token in self.tokens:
+ if token.type == tokenize.COMMENT:
+ codes = NOQA_INLINE_REGEXP.match(token.string).group('codes')
+ if codes is not None and kind in codes:
+ return True
+ return False
+
def __iter__(self):
- for found, msg in extraneous_whitespace(self.logical_line):
- if msg[:4] in ('E201', 'E202') and not self.in_array_like(found):
- yield found, 'A' + msg[1:]
- for found, msg in whitespace_around_operator(self.logical_line):
- if msg[:4] in ('E221', 'E222') and not self.in_array_like(found):
- yield found, 'A' + msg[1:]
- for found, msg in whitespace_around_comma(self.logical_line):
- if msg[:4] == 'E241' and not self.in_array_like(found):
- yield found, 'A' + msg[1:]
+ """Iterate over errors."""
+ for func, kinds in FUNC_KINDS:
+ for found, msg in func(self.logical_line):
+ found_kind = msg[:4]
+ if found_kind in kinds and \
+ (not self.in_array_like(found)) and \
+ (not self.ignoring(found_kind)):
+ yield found, 'A' + msg[1:]
|
larsoner/flake8-array-spacing
|
772e3f1ea84eee89068cc8a775d74cd059e353e3
|
diff --git a/flake8_array_spacing/tests/test_array_spacing.py b/flake8_array_spacing/tests/test_array_spacing.py
index a0a2e01..d411f3f 100644
--- a/flake8_array_spacing/tests/test_array_spacing.py
+++ b/flake8_array_spacing/tests/test_array_spacing.py
@@ -10,6 +10,8 @@ def flake8(path, *args):
['flake8', '--select', 'A2',
'--ignore', 'E201,E202,E203,E221,E222,E241', '.'] + list(args),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stderr = proc.stderr.decode().strip()
+ assert stderr == ''
return proc.stdout.decode().strip(), proc.returncode
@@ -31,6 +33,8 @@ a_b_pre = 'a = 1\nb = 2\n'
(a_b_pre + '[a + b, b]', "./bad.py:3:3: A221 multiple spaces before operator", 1), # noqa: E501
(a_b_pre + '[a + b, b]', "./bad.py:3:5: A222 multiple spaces after operator", 1), # noqa: E501
(a_b_pre + '[a, b]', "./bad.py:3:4: A241 multiple spaces after ','", 1),
+ (a_b_pre + '[a, b] # noqa', '', 0),
+ (a_b_pre + '[a, b] # noqa: E241', '', 0),
])
def test_array_spacing(line, want_output, want_code, tmpdir):
"""Test some cases."""
|
BUG: Installing for one repo breaks others
I installed this to make things work for SciPy, now in MNE-Python I get a bunch of A241's even though we have E241 disabled for MNE-Python. I wonder if we need an explicit `--array-spacing` or so. It's trivial to add but seems like a bit of a hack...
|
0.0
|
772e3f1ea84eee89068cc8a775d74cd059e353e3
|
[
"flake8_array_spacing/tests/test_array_spacing.py::test_array_spacing[a"
] |
[
"flake8_array_spacing/tests/test_array_spacing.py::test_array_spacing[["
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-09 19:02:13+00:00
|
bsd-3-clause
| 3,510 |
|
larsoner__flake8-array-spacing-4
|
diff --git a/flake8_array_spacing/__init__.py b/flake8_array_spacing/__init__.py
index 052b4b0..b19694d 100644
--- a/flake8_array_spacing/__init__.py
+++ b/flake8_array_spacing/__init__.py
@@ -9,9 +9,50 @@ __version__ = '0.1.dev0'
# This could almost certainly be better, it just checks for [...]
# (no pairing within, or checking that i/j are attached to a number, etc.)
-ARRAY_LIKE_REGEX = re.compile(
- r'[\[(][0-9 ij,+\-*^\/&|\[\]]*[\]),;]'
+_FMT = dict(
+ VARIABLE=r'[_a-z]+[_a-z0-9]*',
+ OPERATORS=r'[+\-*\/^\|&]',
+ SEPARATORS=r'[+\-*\/^\|& ,\[\]()]',
+ NUMERICAL=r"""
+(
+[0-9.]+(e[+-]?[0-9.]+)?j?|
+(np\.|numpy\.)?(nan|inf)|
)
+""")
+
+_FMT['NUMERICAL_LIST'] = r"""
+[\[(]+ # >= 1 left bracket or paren
+({SEPARATORS}*{NUMERICAL}{SEPARATORS}*)+ # >= 1 numerical
+[\])]+ # >= 1 right bracket or paren
+""".format(**_FMT)
+
+_FMT['ARRAY_WITH_BINARY_OPS'] = r"""
+array\([\[(]+ # open array
+(
+ {SEPARATORS}*
+ (
+ {NUMERICAL}|
+ (
+ ({VARIABLE}|{NUMERICAL})
+ \ *
+ {OPERATORS}
+ \ *
+ ({VARIABLE}|{NUMERICAL})
+ )
+
+ )
+ {SEPARATORS}*
+)+ # at least one numerical-or-variable-with-binary-op
+[\])]+\) # close array
+""".format(**_FMT)
+
+ARRAY_LIKE_REGEXP = re.compile("""(?x)
+(
+{NUMERICAL_LIST}
+|
+{ARRAY_WITH_BINARY_OPS}
+)""".format(**_FMT))
+
FUNC_KINDS = (
(extraneous_whitespace, ('E201', 'E202')),
(whitespace_around_operator, ('E221', 'E222')),
@@ -35,7 +76,7 @@ class ArraySpacing(object):
if self._array_ranges is None:
self._array_ranges = [
(match.start() + 1, match.end() + 1)
- for match in ARRAY_LIKE_REGEX.finditer(self.logical_line)]
+ for match in ARRAY_LIKE_REGEXP.finditer(self.logical_line)]
return any(start <= idx <= end for start, end in self._array_ranges)
def ignoring(self, kind):
|
larsoner/flake8-array-spacing
|
7dd499e8413ca4a354aed926e337e05434b9b6fb
|
diff --git a/flake8_array_spacing/tests/test_array_spacing.py b/flake8_array_spacing/tests/test_array_spacing.py
index d411f3f..269699e 100644
--- a/flake8_array_spacing/tests/test_array_spacing.py
+++ b/flake8_array_spacing/tests/test_array_spacing.py
@@ -1,4 +1,5 @@
import subprocess
+from textwrap import dedent
import pytest
@@ -7,8 +8,8 @@ def flake8(path, *args):
import os
os.chdir(str(path))
proc = subprocess.run(
- ['flake8', '--select', 'A2',
- '--ignore', 'E201,E202,E203,E221,E222,E241', '.'] + list(args),
+ ['flake8', '--select', 'F,A2',
+ '--ignore', 'E201,E202,E203,E221,E222,E241,F821', '.'] + list(args),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderr = proc.stderr.decode().strip()
assert stderr == ''
@@ -22,25 +23,45 @@ def test_installed():
assert 'array-spacing' in out
-a_b_pre = 'a = 1\nb = 2\n'
-
-
[email protected]('line, want_output, want_code', [
- ('[ -3 + 4j , -10 + 20j ]', '', 0),
- (a_b_pre + '[a, b]', '', 0),
- (a_b_pre + '[ a, b]', "./bad.py:3:2: A201 whitespace after '['", 1),
- (a_b_pre + '[a, b ]', "./bad.py:3:6: A202 whitespace before ']'", 1),
- (a_b_pre + '[a + b, b]', "./bad.py:3:3: A221 multiple spaces before operator", 1), # noqa: E501
- (a_b_pre + '[a + b, b]', "./bad.py:3:5: A222 multiple spaces after operator", 1), # noqa: E501
- (a_b_pre + '[a, b]', "./bad.py:3:4: A241 multiple spaces after ','", 1),
- (a_b_pre + '[a, b] # noqa', '', 0),
- (a_b_pre + '[a, b] # noqa: E241', '', 0),
[email protected]('content, output', [
+ ("""
+ array([[x / 2.0, 0.],
+ [0., -y / 2.0]])'""", ''),
+ ("""
+ np.array([[x / 2.0, 0.],
+ [0., -y / 2.0]])'""", ''),
+ ('[[x / 2.0, 0.], [0., -y / 2.0]]', """\
+ ./bad.py:1:11: A241 multiple spaces after ','
+ ./bad.py:1:27: A241 multiple spaces after ','"""),
+ ('[ -3. + 4.1j + 2, -10e-2 + 20e+2j ]', ''),
+ ('[[ 1 , 2 ] , [3, 4]]', ''),
+ ('[[ a , b ] , [a, b]]', """\
+ ./bad.py:1:3: A201 whitespace after '['
+ ./bad.py:1:10: A202 whitespace before ']'
+ ./bad.py:1:15: A241 multiple spaces after ','"""),
+ ('[ np.inf , 1 , 2 , numpy.nan , -inf ]', ''),
+ ('[a, b]', ''),
+ ('[ a, b]', "./bad.py:1:2: A201 whitespace after '['"),
+ ('[a, b ]', "./bad.py:1:6: A202 whitespace before ']'"),
+ ('[a + b, b]',
+ "./bad.py:1:3: A221 multiple spaces before operator"),
+ ('[a + b, b]',
+ "./bad.py:1:5: A222 multiple spaces after operator"), # noqa: E501
+ ('[ a, b ]', """\
+ ./bad.py:1:2: A201 whitespace after '['
+ ./bad.py:1:5: A241 multiple spaces after ','
+ ./bad.py:1:8: A202 whitespace before ']'"""),
+ ('[a, b]', "./bad.py:1:4: A241 multiple spaces after ','"),
+ ('[a, b] # noqa', ''),
+ ('[a, b] # noqa: E241', ''),
])
-def test_array_spacing(line, want_output, want_code, tmpdir):
+def test_array_spacing(content, output, tmpdir):
"""Test some cases."""
+ content = dedent(content)
+ output = dedent(output)
fname = tmpdir.join('bad.py')
with open(fname, 'w') as fid:
- fid.write(line)
+ fid.write(content)
got_output, got_code = flake8(tmpdir)
- assert got_output == want_output
- assert got_code == want_code
+ assert got_output == output
+ assert got_code == (1 if output else 0)
|
BUG: Regex should be more complete/selective
It should be more selective (matching `[` and maybe ensuring any `i/j` are immediately preceded by numbers, currently it's pretty basic:
```
ARRAY_LIKE_REGEX = re.compile(
r'[\[(][0-9 ij,+\-*^\/&|\[\]]*[\]),;]'
)
```
@jnothman I know you work on sklearn, @agramfort mentioned this might be useful for you all, too. Any interest in improving the regex [here](https://github.com/larsoner/flake8-array-spacing/blob/master/flake8_array_spacing/__init__.py#L12-L14), and maybe adopting this for sklearn?
Basically this plugin will replace `E` error variants:
```
extraneous_whitespace, # 201, 202
whitespace_around_operator, # 221, 222
whitespace_around_comma, # 241
```
With `A2XX` versions, where the ones above are ignored if they occur within an array-like list of (list of) numerical values. So you basically run an equivalent of `flake8 --ignore E201,E202,E203,E221,E222,E241 --select=A` and it should work. This came up originally for [scipy](https://github.com/scipy/scipy/issues/12367) (WIP PR to add this plugin [here](https://github.com/scipy/scipy/pull/12516/files)) but I imagine you all might have been annoyed about this at some point, too, so wanted to loop you in.
|
0.0
|
7dd499e8413ca4a354aed926e337e05434b9b6fb
|
[
"flake8_array_spacing/tests/test_array_spacing.py::test_array_spacing[["
] |
[
"flake8_array_spacing/tests/test_array_spacing.py::test_array_spacing[\\n",
"flake8_array_spacing/tests/test_array_spacing.py::test_array_spacing[[[x",
"flake8_array_spacing/tests/test_array_spacing.py::test_array_spacing[[[",
"flake8_array_spacing/tests/test_array_spacing.py::test_array_spacing[[a,",
"flake8_array_spacing/tests/test_array_spacing.py::test_array_spacing[[a"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-10 20:28:41+00:00
|
bsd-3-clause
| 3,511 |
|
laterpay__laterpay-client-python-103
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0ba7c00..5ad4dde 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,9 @@
## 5.3.1 (under development)
+* Only passed one of `lptoken` and `muid` to `/access` calls. Passing both is
+ not supported.
+
## 5.3.0
* Added explicit support for the `muid` argument to `get_add_url()`,
diff --git a/laterpay/__init__.py b/laterpay/__init__.py
index 1a39bfc..af44ce6 100644
--- a/laterpay/__init__.py
+++ b/laterpay/__init__.py
@@ -360,16 +360,34 @@ class LaterPayClient(object):
params = {
'cp': self.cp_key,
'ts': str(int(time.time())),
- 'lptoken': str(lptoken or self.lptoken),
'article_id': article_ids,
}
- if muid:
- # TODO: The behavior when lptoken and muid are given is not yet
- # defined. Thus we'll allow both at the same time for now. It might
- # be that in the end only one is allowed or one is prefered over
- # the other.
+ """
+ l = lptoken
+ s = self.lptoken
+ m = muid
+ x = error
+
+ | L | not L | L | not L
+ -------+-------+-------+-------+-------
+ l | x | x | l | l
+ -------+-------+-------+-------+-------
+ not l | m | m | s | x
+ -------+-------+-------+-------+-------
+ | m | m | not m | not m
+ """
+ if lptoken is None and muid is not None:
params['muid'] = muid
+ elif lptoken is not None and muid is None:
+ params['lptoken'] = lptoken
+ elif lptoken is None and muid is None and self.lptoken is not None:
+ params['lptoken'] = self.lptoken
+ else:
+ raise AssertionError(
+ 'Either lptoken, self.lptoken or muid has to be passed. '
+ 'Passing neither or both lptoken and muid is not allowed.',
+ )
params['hmac'] = signing.sign(
secret=self.shared_secret,
|
laterpay/laterpay-client-python
|
397afe02a9ddec00096da24febb8f37d60658f26
|
diff --git a/tests/test_client.py b/tests/test_client.py
index 7ffe43a..bc9e37f 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -326,7 +326,6 @@ class TestLaterPayClient(unittest.TestCase):
data = client.get_access_data(
['article-1', 'article-2'],
lptoken='fake-lptoken',
- muid='some-user',
)
self.assertEqual(data, {
@@ -349,7 +348,7 @@ class TestLaterPayClient(unittest.TestCase):
self.assertEqual(qd['cp'], ['fake-cp-key'])
self.assertEqual(qd['article_id'], ['article-1', 'article-2'])
self.assertEqual(qd['hmac'], ['fake-signature'])
- self.assertEqual(qd['muid'], ['some-user'])
+ self.assertNotIn('muid', 'qd')
sign_mock.assert_called_once_with(
secret='fake-shared-secret',
@@ -358,6 +357,70 @@ class TestLaterPayClient(unittest.TestCase):
'article_id': ['article-1', 'article-2'],
'ts': '123',
'lptoken': 'fake-lptoken',
+ },
+ url='http://example.net/access',
+ method='GET',
+ )
+
+ @mock.patch('laterpay.signing.sign')
+ @mock.patch('time.time')
+ @responses.activate
+ def test_get_access_data_success_muid(self, time_time_mock, sign_mock):
+ time_time_mock.return_value = 123
+ sign_mock.return_value = 'fake-signature'
+ responses.add(
+ responses.GET,
+ 'http://example.net/access',
+ body=json.dumps({
+ "status": "ok",
+ "articles": {
+ "article-1": {"access": True},
+ "article-2": {"access": False},
+ },
+ }),
+ status=200,
+ content_type='application/json',
+ )
+
+ client = LaterPayClient(
+ 'fake-cp-key',
+ 'fake-shared-secret',
+ api_root='http://example.net',
+ )
+
+ data = client.get_access_data(
+ ['article-1', 'article-2'],
+ muid='some-user',
+ )
+
+ self.assertEqual(data, {
+ "status": "ok",
+ "articles": {
+ "article-1": {"access": True},
+ "article-2": {"access": False},
+ },
+ })
+ self.assertEqual(len(responses.calls), 1)
+
+ call = responses.calls[0]
+
+ self.assertEqual(call.request.headers['X-LP-APIVersion'], '2')
+
+ qd = parse_qs(urlparse(call.request.url).query)
+
+ self.assertEqual(qd['ts'], ['123'])
+ self.assertEqual(qd['cp'], ['fake-cp-key'])
+ self.assertEqual(qd['article_id'], ['article-1', 'article-2'])
+ self.assertEqual(qd['hmac'], ['fake-signature'])
+ self.assertEqual(qd['muid'], ['some-user'])
+ self.assertNotIn('lptoken', 'qd')
+
+ sign_mock.assert_called_once_with(
+ secret='fake-shared-secret',
+ params={
+ 'cp': 'fake-cp-key',
+ 'article_id': ['article-1', 'article-2'],
+ 'ts': '123',
'muid': 'some-user',
},
url='http://example.net/access',
@@ -379,16 +442,31 @@ class TestLaterPayClient(unittest.TestCase):
'hmac': 'fake-signature',
})
- params = self.lp.get_access_params('article-1', lptoken='fake-lptoken', muid='some-user')
+ params = self.lp.get_access_params('article-1', muid='some-user')
self.assertEqual(params, {
'cp': '1',
'ts': '123',
- 'lptoken': 'fake-lptoken',
'article_id': ['article-1'],
'hmac': 'fake-signature',
'muid': 'some-user',
})
+ lpclient = LaterPayClient('1', 'some-secret', lptoken='instance-lptoken')
+ params = lpclient.get_access_params('article-1')
+ self.assertEqual(params, {
+ 'cp': '1',
+ 'ts': '123',
+ 'lptoken': 'instance-lptoken',
+ 'article_id': ['article-1'],
+ 'hmac': 'fake-signature',
+ })
+
+ with self.assertRaises(AssertionError):
+ self.lp.get_access_params('article-1', lptoken='fake-lptoken', muid='some-user')
+
+ with self.assertRaises(AssertionError):
+ self.lp.get_access_params('article-1')
+
@mock.patch('time.time')
def test_get_gettoken_redirect(self, time_mock):
time_mock.return_value = 12345678
|
We must only pass lptoken or muid to /access
Only one of `lptoken` and `muid` is allowed to the API's `/access` calls. Not both.
|
0.0
|
397afe02a9ddec00096da24febb8f37d60658f26
|
[
"tests/test_client.py::TestLaterPayClient::test_get_access_data_success_muid",
"tests/test_client.py::TestLaterPayClient::test_get_access_params"
] |
[
"tests/test_client.py::TestItemDefinition::test_invalid_sub_id",
"tests/test_client.py::TestItemDefinition::test_invalid_period",
"tests/test_client.py::TestItemDefinition::test_invalid_pricing",
"tests/test_client.py::TestItemDefinition::test_item_definition",
"tests/test_client.py::TestItemDefinition::test_sub_id",
"tests/test_client.py::TestItemDefinition::test_invalid_expiry",
"tests/test_client.py::TestLaterPayClient::test_get_controls_links_url_all_set_long",
"tests/test_client.py::TestLaterPayClient::test_get_web_url_itemdefinition_value_none",
"tests/test_client.py::TestLaterPayClient::test_web_url_transaction_reference",
"tests/test_client.py::TestLaterPayClient::test_get_access_data_success",
"tests/test_client.py::TestLaterPayClient::test_get_add_url",
"tests/test_client.py::TestLaterPayClient::test_get_gettoken_redirect",
"tests/test_client.py::TestLaterPayClient::test_get_login_dialog_url_with_use_dialog_api_false",
"tests/test_client.py::TestLaterPayClient::test_get_controls_balance_url_all_defaults",
"tests/test_client.py::TestLaterPayClient::test_web_url_muid",
"tests/test_client.py::TestLaterPayClient::test_get_logout_dialog_url_with_use_dialog_api_false",
"tests/test_client.py::TestLaterPayClient::test_web_url_consumable",
"tests/test_client.py::TestLaterPayClient::test_get_controls_links_url_all_set_short",
"tests/test_client.py::TestLaterPayClient::test_web_url_return_url",
"tests/test_client.py::TestLaterPayClient::test_web_url_product_key",
"tests/test_client.py::TestLaterPayClient::test_web_url_dialog",
"tests/test_client.py::TestLaterPayClient::test_get_buy_url",
"tests/test_client.py::TestLaterPayClient::test_has_token",
"tests/test_client.py::TestLaterPayClient::test_get_controls_balance_url_all_set",
"tests/test_client.py::TestLaterPayClient::test_web_url_jsevents",
"tests/test_client.py::TestLaterPayClient::test_web_url_failure_url",
"tests/test_client.py::TestLaterPayClient::test_get_subscribe_url",
"tests/test_client.py::TestLaterPayClient::test_get_controls_links_url_all_defaults",
"tests/test_client.py::TestLaterPayClient::test_get_signup_dialog_url"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-05-15 15:09:08+00:00
|
mit
| 3,512 |
|
laterpay__laterpay-client-python-108
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9257c1d..5da76e2 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,9 @@
## 5.4.0 (under development)
+* Added the `connection_handler` to `LaterPayClient` to simplify connection
+ pooling.
+
## 5.3.1
* Only passed one of `lptoken` and `muid` to `/access` calls. Passing both is
diff --git a/laterpay/__init__.py b/laterpay/__init__.py
index 6fb563a..45417c3 100644
--- a/laterpay/__init__.py
+++ b/laterpay/__init__.py
@@ -112,7 +112,8 @@ class LaterPayClient(object):
api_root='https://api.laterpay.net',
web_root='https://web.laterpay.net',
lptoken=None,
- timeout_seconds=10):
+ timeout_seconds=10,
+ connection_handler=None):
"""
Instantiate a LaterPay API client.
@@ -122,6 +123,9 @@ class LaterPayClient(object):
:param timeout_seconds: number of seconds after which backend api
requests (e.g. /access) will time out (10 by default).
+ :param connection_handler: Defaults to Python requests. Set it to
+ ``requests.Session()`` to use a `Python requests Session object
+ <http://docs.python-requests.org/en/master/user/advanced/#session-objects>`_.
"""
self.cp_key = cp_key
@@ -130,6 +134,7 @@ class LaterPayClient(object):
self.shared_secret = shared_secret
self.lptoken = lptoken
self.timeout_seconds = timeout_seconds
+ self.connection_handler = connection_handler or requests
def get_gettoken_redirect(self, return_to):
"""
@@ -408,9 +413,9 @@ class LaterPayClient(object):
"""
Perform a request to /access API and return obtained data.
- This method uses ``requests.get`` to fetch the data and then calls
- ``.raise_for_status()`` on the response. It does not handle any errors
- raised by ``requests`` API.
+ This method uses ``requests.get`` or ``requests.Session().get`` to
+ fetch the data and then calls ``.raise_for_status()`` on the response.
+ It does not handle any errors raised by ``requests`` API.
:param article_ids: list of article ids or a single article id as a
string
@@ -421,7 +426,7 @@ class LaterPayClient(object):
url = self.get_access_url()
headers = self.get_request_headers()
- response = requests.get(
+ response = self.connection_handler.get(
url,
params=params,
headers=headers,
|
laterpay/laterpay-client-python
|
87100aa64f880f61f9cdda81f1675c7650fc7c2e
|
diff --git a/tests/test_client.py b/tests/test_client.py
index bc9e37f..f59f98b 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -427,6 +427,34 @@ class TestLaterPayClient(unittest.TestCase):
method='GET',
)
+ @mock.patch('time.time')
+ def test_get_access_data_connection_handler(self, time_time_mock):
+ time_time_mock.return_value = 123
+ connection_handler = mock.Mock()
+ client = LaterPayClient(
+ 'fake-cp-key',
+ 'fake-shared-secret',
+ connection_handler=connection_handler,
+ )
+
+ client.get_access_data(
+ ['article-1', 'article-2'],
+ lptoken='fake-lptoken',
+ )
+
+ connection_handler.get.assert_called_once_with(
+ 'https://api.laterpay.net/access',
+ headers=client.get_request_headers(),
+ params={
+ 'article_id': ['article-1', 'article-2'],
+ 'ts': '123',
+ 'hmac': '198717d5c98b89ec3b509784758a98323f167ca6d42c363672169cfc',
+ 'cp': 'fake-cp-key',
+ 'lptoken': 'fake-lptoken',
+ },
+ timeout=10,
+ )
+
@mock.patch('laterpay.signing.sign')
@mock.patch('time.time')
def test_get_access_params(self, time_time_mock, sign_mock):
|
Added support for `requests.Session`
Sometimes it's desirable to use [`requests.Session()`](http://docs.python-requests.org/en/master/user/advanced/#session-objects) over `requests.get()`.
|
0.0
|
87100aa64f880f61f9cdda81f1675c7650fc7c2e
|
[
"tests/test_client.py::TestLaterPayClient::test_get_access_data_connection_handler"
] |
[
"tests/test_client.py::TestLaterPayClient::test_get_controls_links_url_all_set_short",
"tests/test_client.py::TestLaterPayClient::test_get_controls_balance_url_all_defaults",
"tests/test_client.py::TestLaterPayClient::test_get_controls_balance_url_all_set",
"tests/test_client.py::TestLaterPayClient::test_web_url_transaction_reference",
"tests/test_client.py::TestLaterPayClient::test_web_url_dialog",
"tests/test_client.py::TestLaterPayClient::test_get_logout_dialog_url_with_use_dialog_api_false",
"tests/test_client.py::TestLaterPayClient::test_get_access_params",
"tests/test_client.py::TestLaterPayClient::test_web_url_return_url",
"tests/test_client.py::TestLaterPayClient::test_get_add_url",
"tests/test_client.py::TestLaterPayClient::test_get_login_dialog_url_with_use_dialog_api_false",
"tests/test_client.py::TestLaterPayClient::test_get_access_data_success_muid",
"tests/test_client.py::TestLaterPayClient::test_get_web_url_itemdefinition_value_none",
"tests/test_client.py::TestLaterPayClient::test_has_token",
"tests/test_client.py::TestLaterPayClient::test_web_url_jsevents",
"tests/test_client.py::TestLaterPayClient::test_web_url_consumable",
"tests/test_client.py::TestLaterPayClient::test_web_url_product_key",
"tests/test_client.py::TestLaterPayClient::test_get_subscribe_url",
"tests/test_client.py::TestLaterPayClient::test_get_gettoken_redirect",
"tests/test_client.py::TestLaterPayClient::test_get_controls_links_url_all_defaults",
"tests/test_client.py::TestLaterPayClient::test_get_signup_dialog_url",
"tests/test_client.py::TestLaterPayClient::test_get_access_data_success",
"tests/test_client.py::TestLaterPayClient::test_web_url_failure_url",
"tests/test_client.py::TestLaterPayClient::test_web_url_muid",
"tests/test_client.py::TestLaterPayClient::test_get_controls_links_url_all_set_long",
"tests/test_client.py::TestLaterPayClient::test_get_buy_url",
"tests/test_client.py::TestItemDefinition::test_sub_id",
"tests/test_client.py::TestItemDefinition::test_invalid_expiry",
"tests/test_client.py::TestItemDefinition::test_item_definition",
"tests/test_client.py::TestItemDefinition::test_invalid_period",
"tests/test_client.py::TestItemDefinition::test_invalid_sub_id",
"tests/test_client.py::TestItemDefinition::test_invalid_pricing"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-09-20 14:33:27+00:00
|
mit
| 3,513 |
|
laterpay__laterpay-client-python-82
|
diff --git a/laterpay/__init__.py b/laterpay/__init__.py
index c92f556..f2df4a4 100644
--- a/laterpay/__init__.py
+++ b/laterpay/__init__.py
@@ -421,10 +421,7 @@ class LaterPayClient(object):
"""
params = self._sign_and_encode(params=params, url=url, method=method)
- headers = {
- 'X-LP-APIVersion': 2,
- 'User-Agent': 'LaterPay Client - Python - v0.2'
- }
+ headers = self.get_request_headers()
if method == 'POST':
req = Request(url, data=params, headers=headers)
@@ -503,7 +500,7 @@ class LaterPayClient(object):
Return a ``dict`` of request headers to be sent to the API.
"""
return {
- 'X-LP-APIVersion': 2,
+ 'X-LP-APIVersion': '2',
# TODO: Add client version information.
'User-Agent': 'LaterPay Client Python',
}
|
laterpay/laterpay-client-python
|
3744237922be5c422568b701a6bf930418581be8
|
diff --git a/tests/test_client.py b/tests/test_client.py
index e6ebc1c..f3069e7 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -302,7 +302,7 @@ class TestLaterPayClient(unittest.TestCase):
call = responses.calls[0]
- self.assertEqual(call.request.headers['X-LP-APIVersion'], 2)
+ self.assertEqual(call.request.headers['X-LP-APIVersion'], '2')
qd = parse_qs(urlparse(call.request.url).query)
|
python-requests requires string only headers
As of python-requests 2.11 headers that are neither string not bytes are not accepted anymore. See kennethreitz/requests#3477
|
0.0
|
3744237922be5c422568b701a6bf930418581be8
|
[
"tests/test_client.py::TestLaterPayClient::test_get_access_data_success"
] |
[
"tests/test_client.py::TestLaterPayClient::test_get_add_url_with_use_dialog_api_false",
"tests/test_client.py::TestLaterPayClient::test_get_login_dialog_url_without_use_dialog_api",
"tests/test_client.py::TestLaterPayClient::test_get_logout_dialog_url_without_use_dialog_api",
"tests/test_client.py::TestLaterPayClient::test_get_buy_url_with_use_dialog_api_false",
"tests/test_client.py::TestLaterPayClient::test_get_controls_balance_url_all_set",
"tests/test_client.py::TestLaterPayClient::test_get_controls_links_url_all_set_short",
"tests/test_client.py::TestLaterPayClient::test_get_add_url_product_key_param",
"tests/test_client.py::TestLaterPayClient::test_get_controls_links_url_all_defaults",
"tests/test_client.py::TestLaterPayClient::test_transaction_reference",
"tests/test_client.py::TestLaterPayClient::test_get_logout_dialog_url_with_use_dialog_api_false",
"tests/test_client.py::TestLaterPayClient::test_get_signup_dialog_url_with_use_dialog_api_false",
"tests/test_client.py::TestLaterPayClient::test_get_login_dialog_url_with_use_dialog_api_false",
"tests/test_client.py::TestLaterPayClient::test_has_token",
"tests/test_client.py::TestLaterPayClient::test_get_add_url_no_product_key_param",
"tests/test_client.py::TestLaterPayClient::test_get_web_url_extra_kwargs",
"tests/test_client.py::TestLaterPayClient::test_log_warning_for_skip_add_to_invoice_deprecation",
"tests/test_client.py::TestLaterPayClient::test_get_buy_url_product_key_param",
"tests/test_client.py::TestLaterPayClient::test_get_buy_url_no_product_key_param",
"tests/test_client.py::TestLaterPayClient::test_get_signup_dialog_url_without_use_dialog_api",
"tests/test_client.py::TestLaterPayClient::test_get_access_params",
"tests/test_client.py::TestLaterPayClient::test_get_controls_links_url_all_set_long",
"tests/test_client.py::TestLaterPayClient::test_failure_url_param",
"tests/test_client.py::TestLaterPayClient::test_get_web_url_has_no_none_params",
"tests/test_client.py::TestLaterPayClient::test_get_controls_balance_url_all_defaults",
"tests/test_client.py::TestLaterPayClient::test_get_gettoken_redirect",
"tests/test_client.py::TestItemDefinition::test_item_definition"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-08-22 06:03:06+00:00
|
mit
| 3,514 |
|
laterpay__laterpay-client-python-85
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8484c44..532fd0e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,7 +1,11 @@
# Changelog
-## Unreleased
+## 4.6.0 (under development)
+
+* Fixed encoding issues when passing byte string parameters on Python 3
+ ([#84](https://github.com/laterpay/laterpay-client-python/issues/84))
+
## 4.5.0
diff --git a/laterpay/signing.py b/laterpay/signing.py
index 24b1bf2..347aa31 100644
--- a/laterpay/signing.py
+++ b/laterpay/signing.py
@@ -53,12 +53,8 @@ def sort_params(param_dict):
for name, value_list in six.iteritems(param_dict):
if isinstance(value_list, (list, tuple)):
for value in value_list:
- if not isinstance(value, six.string_types):
- value = str(value)
param_list.append((name, value))
else:
- if not isinstance(value_list, six.string_types):
- value_list = str(value_list)
param_list.append((name, value_list))
return sorted(param_list)
@@ -127,10 +123,13 @@ def create_base_message(params, url, method='POST'):
values_str = []
- # If any non six.string_types objects, ``str()`` them.
for value in values:
- if not isinstance(value, six.string_types):
+ if not isinstance(value, (six.string_types, six.binary_type)):
+ # If any non-string or non-bytes like objects, ``str()`` them.
value = str(value)
+ if six.PY3 and isinstance(value, six.binary_type):
+ # Issue #84, decode byte strings before using them on Python 3
+ value = value.decode()
values_str.append(value)
data[key] = [quote(compat.encode_if_unicode(value_str), safe='') for value_str in values_str]
|
laterpay/laterpay-client-python
|
a4738be03b9cc0680d24466b7eefe5fdde6b7d2d
|
diff --git a/tests/test_signing.py b/tests/test_signing.py
index ecbff1f..2f0fe02 100644
--- a/tests/test_signing.py
+++ b/tests/test_signing.py
@@ -25,7 +25,7 @@ class TestSigningHelper(unittest.TestCase):
)
def test_create_message_bytestrings(self):
- params = {'parĄm1': 'valuĘ'}
+ params = {b'par\xc4\x84m1': b'valu\xc4\x98'}
url = 'https://endpoint.com/ąpi'
msg = signing.create_base_message(params, url)
|
Decode byte strings before using them during signing
On Python 3, when singing a request containing a byte string (`b'foo'`) the value is casted into a string, resulting in `"b'foo'"`, instead of properly decoding the string to `'foo'`.
|
0.0
|
a4738be03b9cc0680d24466b7eefe5fdde6b7d2d
|
[
"tests/test_signing.py::TestSigningHelper::test_create_message_bytestrings"
] |
[
"tests/test_signing.py::TestSigningHelper::test_create_message_sorting_and_combining_params",
"tests/test_signing.py::TestSigningHelper::test_create_message_unicode",
"tests/test_signing.py::TestSigningHelper::test_create_message_wrong_method",
"tests/test_signing.py::TestSigningHelper::test_normalise_param_structure",
"tests/test_signing.py::TestSigningHelper::test_sign",
"tests/test_signing.py::TestSigningHelper::test_sign_and_encode",
"tests/test_signing.py::TestSigningHelper::test_signing_with_item",
"tests/test_signing.py::TestSigningHelper::test_url_verification",
"tests/test_signing.py::TestSigningHelper::test_verify_invalid_unicode_signature",
"tests/test_signing.py::TestSigningHelper::test_verify_str_signature",
"tests/test_signing.py::TestSigningHelper::test_verify_unicode_signature"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-10-12 13:56:38+00:00
|
mit
| 3,515 |
|
laterpay__laterpay-client-python-94
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index edb66fd..2e263cb 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,9 @@
## 5.1.0 (under development)
+* Ignored HMAC character capitalization
+ ([#93](https://github.com/laterpay/laterpay-client-python/issues/93))
+
## 5.0.0
* Removed the following long deprecated methods from the
diff --git a/laterpay/signing.py b/laterpay/signing.py
index 863fa05..164cba9 100644
--- a/laterpay/signing.py
+++ b/laterpay/signing.py
@@ -27,6 +27,7 @@ def time_independent_HMAC_compare(a, b):
if len(a) != len(b):
return False
result = 0
+ a, b = a.lower(), b.lower()
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
|
laterpay/laterpay-client-python
|
efe5ec2ff1a6b4020ae081c7491c7f9eca22cc4e
|
diff --git a/tests/test_signing.py b/tests/test_signing.py
index ce4742d..7874b13 100644
--- a/tests/test_signing.py
+++ b/tests/test_signing.py
@@ -138,8 +138,9 @@ class TestSigningHelper(unittest.TestCase):
secret = 'secret'
+ # Use some upper case characters to test for issue #93
verified = signing.verify(
- b'346f3d53ad762f3ed3fb7f2427dec2bbfaf0338bb7f91f0460aff15c',
+ b'346f3d53ad762f3ed3fb7f2427deC2BBFAF0338BB7F91F0460AFF15C',
secret,
params,
url,
@@ -153,8 +154,9 @@ class TestSigningHelper(unittest.TestCase):
'param2': ['value2', 'value3'],
}
url = u'https://endpoint.com/api'
+ # Use some upper case characters to test for issue #93
verified = signing.verify(
- u'346f3d53ad762f3ed3fb7f2427dec2bbfaf0338bb7f91f0460aff15c',
+ u'346F3D53AD762F3ED3FB7F2427DEc2bbfaf0338bb7f91f0460aff15c',
u'secret',
params,
url,
|
Ignore capitalization when checking HMACs
`laterpay.signing.time_independent_HMAC_compare()` treats hashes `12ab3` and `12AB3` as different. Let's not do that.
|
0.0
|
efe5ec2ff1a6b4020ae081c7491c7f9eca22cc4e
|
[
"tests/test_signing.py::TestSigningHelper::test_verify_byte_signature",
"tests/test_signing.py::TestSigningHelper::test_verify_unicode_signature"
] |
[
"tests/test_signing.py::TestSigningHelper::test_create_message_sorting_and_combining_params_omdict",
"tests/test_signing.py::TestSigningHelper::test_create_message_unicode",
"tests/test_signing.py::TestSigningHelper::test_normalise_param_structure",
"tests/test_signing.py::TestSigningHelper::test_sign",
"tests/test_signing.py::TestSigningHelper::test_create_message_bytestrings",
"tests/test_signing.py::TestSigningHelper::test_sort_params",
"tests/test_signing.py::TestSigningHelper::test_sign_unicode_secret",
"tests/test_signing.py::TestSigningHelper::test_create_message_sorting_and_combining_params",
"tests/test_signing.py::TestSigningHelper::test_url_verification",
"tests/test_signing.py::TestSigningHelper::test_verify_iterable_signature",
"tests/test_signing.py::TestSigningHelper::test_verify_invalid_unicode_signature",
"tests/test_signing.py::TestSigningHelper::test_sort_params_public_deprecation",
"tests/test_signing.py::TestSigningHelper::test_create_message_wrong_method"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-01-04 14:44:16+00:00
|
mit
| 3,516 |
|
lavr__python-emails-151
|
diff --git a/emails/backend/smtp/backend.py b/emails/backend/smtp/backend.py
index 558e75d..56ed3e6 100644
--- a/emails/backend/smtp/backend.py
+++ b/emails/backend/smtp/backend.py
@@ -26,7 +26,7 @@ class SMTPBackend(object):
connection_ssl_cls = SMTPClientWithResponse_SSL
response_cls = SMTPResponse
- def __init__(self, ssl=False, fail_silently=True, **kwargs):
+ def __init__(self, ssl=False, fail_silently=True, mail_options=None, **kwargs):
self.smtp_cls = ssl and self.connection_ssl_cls or self.connection_cls
@@ -46,6 +46,7 @@ class SMTPBackend(object):
self.host = kwargs.get('host')
self.port = kwargs.get('port')
self.fail_silently = fail_silently
+ self.mail_options = mail_options or []
self._client = None
@@ -119,7 +120,7 @@ class SMTPBackend(object):
response = send(from_addr=from_addr,
to_addrs=to_addrs,
msg=msg.as_string(),
- mail_options=mail_options,
+ mail_options=mail_options or self.mail_options,
rcpt_options=rcpt_options)
if not self.fail_silently:
diff --git a/emails/compat/__init__.py b/emails/compat/__init__.py
index 9f301e1..d4a10f8 100644
--- a/emails/compat/__init__.py
+++ b/emails/compat/__init__.py
@@ -160,7 +160,10 @@ elif is_py3:
Does not encode non-ascii realname.
Python3 email.utils.formataddr do encode realname.
+
+ TODO: switch to email.headerregistry.AddressHeader ?
"""
+
name, address = pair
if name:
quotes = ''
diff --git a/emails/utils.py b/emails/utils.py
index 46a5abd..cb90a78 100644
--- a/emails/utils.py
+++ b/emails/utils.py
@@ -12,7 +12,8 @@ from email import generator
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header, decode_header as decode_header_
-from email.utils import formataddr, parseaddr, formatdate
+from email.utils import parseaddr, formatdate
+from emails.compat import formataddr
import requests
|
lavr/python-emails
|
eecc9ed30b3130e6675c9d9df5046312d97cd12f
|
diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml
index e508d08..871bb40 100644
--- a/.github/workflows/tests.yaml
+++ b/.github/workflows/tests.yaml
@@ -22,7 +22,7 @@ jobs:
- {name: '2.7', python: '2.7', os: ubuntu-16.04, tox: py27}
services:
postfix:
- image: juanluisbaptiste/postfix
+ image: lavr/docker-postfix
env:
SMTP_SERVER: smtp.gmail.com
SMTP_PORT: 587
@@ -53,11 +53,13 @@ jobs:
- name: run rests
env:
SMTP_TEST_SUBJECT_SUFFIX: "github-actions sha:${{ github.sha }} run_id:${{ github.run_id }}"
+ SMTP_TEST_MAIL_FROM: [email protected]
+ SMTP_TEST_MAIL_TO: [email protected]
SMTP_TEST_SETS: LOCAL
- SMTP_TEST_LOCAL_TO: [email protected]
SMTP_TEST_LOCAL_WITHOUT_TLS: true
SMTP_TEST_LOCAL_HOST: 127.0.0.1
SMTP_TEST_LOCAL_PORT: 2525
+
SMTP_TEST_GMAIL_TO: [email protected]
SMTP_TEST_GMAIL_USER: ${{ secrets.SMTP_TEST_GMAIL_USER }}
SMTP_TEST_GMAIL_PASSWORD: ${{ secrets.SMTP_TEST_GMAIL_PASSWORD }}
diff --git a/emails/testsuite/message/helpers.py b/emails/testsuite/message/helpers.py
index 6fe2144..9050bce 100644
--- a/emails/testsuite/message/helpers.py
+++ b/emails/testsuite/message/helpers.py
@@ -5,10 +5,11 @@ import os
import emails
from emails.template import JinjaTemplate
-TO_EMAIL = os.environ.get('TEST_TO_EMAIL') or '[email protected]'
-FROM_EMAIL = os.environ.get('TEST_FROM_EMAIL') or '[email protected]'
+TO_EMAIL = os.environ.get('SMTP_TEST_MAIL_TO') or '[email protected]'
+FROM_EMAIL = os.environ.get('SMTP_TEST_MAIL_FROM') or '[email protected]'
ROOT = os.path.dirname(__file__)
+
def common_email_data(**kw):
T = JinjaTemplate
data = {'charset': 'utf-8',
diff --git a/emails/testsuite/message/test_message.py b/emails/testsuite/message/test_message.py
index 55a838d..51fead3 100644
--- a/emails/testsuite/message/test_message.py
+++ b/emails/testsuite/message/test_message.py
@@ -147,6 +147,13 @@ def test_message_addresses():
assert m.mail_to == [("웃", "[email protected]"), (None, "[email protected]")]
+def test_rfc6532_address():
+ m = Message()
+ m.mail_to = "anaï[email protected]"
+ m.html = 'X'
+ assert m.as_string()
+
+
def test_message_policy():
if is_py34_plus:
diff --git a/emails/testsuite/message/test_send.py b/emails/testsuite/message/test_send.py
index 2493211..d3be34b 100644
--- a/emails/testsuite/message/test_send.py
+++ b/emails/testsuite/message/test_send.py
@@ -26,6 +26,9 @@ def get_letters():
del data['html']
yield emails.loader.from_url(url=url, message_params=data, images_inline=True), None
+ # Email with utf-8 "to"
+ yield emails.Message(**common_email_data(mail_to="anaï[email protected]", subject="UTF-8 To")), None
+
def test_send_letters():
@@ -33,10 +36,9 @@ def test_send_letters():
for tag, server in get_servers():
server.patch_message(m)
print(tag, server.params)
- response = m.send(smtp=server.params, render=render)
- print(server.params)
- assert response.success or response.status_code in (421, 451) # gmail not always like test emails
- server.sleep()
+ response = m.send(smtp=server.params, render=render, smtp_mail_options=['smtputf8'])
+ assert response.success
+ # server.sleep()
def test_send_with_context_manager():
diff --git a/emails/testsuite/smtp_servers.py b/emails/testsuite/smtp_servers.py
index ba6e981..4b0afd2 100644
--- a/emails/testsuite/smtp_servers.py
+++ b/emails/testsuite/smtp_servers.py
@@ -41,7 +41,7 @@ def smtp_server_from_env(name='GMAIL'):
return v
def _valid_smtp(data):
- return data['to_email'] and data['host']
+ return data['host']
smtp_info = dict(
from_email=_var("FROM", default=DEFAULT_FROM),
|
Library does not support RFC 6532 (non ascii characters in local part of the email address)
The `sanitize_address` function in the utils file imports and makes use of the `formataddr` function from the stdlib.
According to [this thread ](https://bugs.python.org/issue25955), the `formataddr` should no longer be used:
> formataddr is part of the legacy interface and has no knowledge of the current policy. So it doesn't support RFC 6532. For that you need to use the new API: just assign your address to the appropriate field, or create a headerregistry.Address object.
Because of this, it is impossible to send emails to addresses such as **`anaï[email protected]`** with this package, although it should be possible to send an email to such an address using a smtp server supporting SMTPUTF8. (At least according to [RFC 6532](https://tools.ietf.org/html/rfc6532))
|
0.0
|
eecc9ed30b3130e6675c9d9df5046312d97cd12f
|
[
"emails/testsuite/message/test_message.py::test_rfc6532_address"
] |
[
"emails/testsuite/message/test_message.py::test_date",
"emails/testsuite/message/test_message.py::test_sanitize_header",
"emails/testsuite/message/test_message.py::test_headers_not_double_encoded",
"emails/testsuite/message/test_message.py::test_headers_ascii_encoded",
"emails/testsuite/message/test_message.py::test_message_addresses",
"emails/testsuite/message/test_message.py::test_message_id",
"emails/testsuite/message/test_message.py::test_several_recipients",
"emails/testsuite/message/test_message.py::test_transform",
"emails/testsuite/message/test_send.py::test_send_with_context_manager"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-06-20 18:10:37+00:00
|
apache-2.0
| 3,517 |
|
lbl-srg__BuildingsPy-181
|
diff --git a/buildingspy/development/error_dictionary.py b/buildingspy/development/error_dictionary.py
index 2304f63..4a3c8d3 100644
--- a/buildingspy/development/error_dictionary.py
+++ b/buildingspy/development/error_dictionary.py
@@ -138,6 +138,13 @@ class ErrorDictionary(object):
'model_message': "\"inner Modelica.StateGraph.StateGraphRoot\" is missing in '{}'.\n",
'summary_message': "Number of models with missing StateGraphRoot : {}\n"}
+ self._error_dict["mismatched displayUnits"] = {
+ 'tool_message': "Mismatched displayUnit",
+ 'counter': 0,
+ 'buildingspy_var': "iMisDisUni",
+ 'model_message': "\"Mismatched displayUnit in '{}'.\n",
+ 'summary_message': "Number of models with mismatched displayUnit : {}\n"}
+
def get_dictionary(self):
""" Return the dictionary with all error data
"""
diff --git a/buildingspy/development/refactor.py b/buildingspy/development/refactor.py
index ba4d363..2c064b0 100644
--- a/buildingspy/development/refactor.py
+++ b/buildingspy/development/refactor.py
@@ -637,6 +637,10 @@ def move_class(source, target):
"""
##############################################################
+ # First, remove empty subdirectories
+ _remove_empty_folders(source.replace(".", os.path.sep),
+ removeRoot=False)
+ ##############################################################
# Check if it is a directory with a package.mo file
if os.path.isdir(source.replace(".", os.path.sep)):
_move_class_directory(source, target)
@@ -665,6 +669,26 @@ def move_class(source, target):
_update_all_references(source, target)
+def _remove_empty_folders(path, removeRoot=True):
+ ''' Remove empty directories
+ '''
+ if not os.path.isdir(path):
+ return
+
+ # remove empty subfolders
+ files = os.listdir(path)
+ if len(files):
+ for f in files:
+ fullpath = os.path.join(path, f)
+ if os.path.isdir(fullpath):
+ _remove_empty_folders(fullpath)
+
+ # if folder empty, delete it
+ files = os.listdir(path)
+ if len(files) == 0 and removeRoot:
+ os.rmdir(path)
+
+
def _update_all_references(source, target):
""" Updates all references in `.mo` and `.mos` files.
|
lbl-srg/BuildingsPy
|
ad2f3e7ffb0a01117e5f09ac498a87b5c02ca158
|
diff --git a/buildingspy/tests/test_development_error_dictionary.py b/buildingspy/tests/test_development_error_dictionary.py
index 393e2cc..ee9d12b 100644
--- a/buildingspy/tests/test_development_error_dictionary.py
+++ b/buildingspy/tests/test_development_error_dictionary.py
@@ -39,7 +39,8 @@ class Test_development_error_dictionary(unittest.TestCase):
'type inconsistent definition equations',
'unspecified initial conditions',
'unused connector',
- 'stateGraphRoot missing'])
+ 'stateGraphRoot missing',
+ 'mismatched displayUnits'])
self.assertEqual(len(k), len(k_expected), "Wrong number of keys.")
for i in range(len(k)):
@@ -63,7 +64,8 @@ class Test_development_error_dictionary(unittest.TestCase):
'Type inconsistent definition equation',
'Dymola has selected default initial condition',
'Warning: The following connector variables are not used in the model',
- "A \\\"stateGraphRoot\\\" component was automatically introduced."])
+ "A \\\"stateGraphRoot\\\" component was automatically introduced.",
+ "Mismatched displayUnit"])
self.assertEqual(len(k), len(k_expected), "Wrong number of tool messages.")
for i in range(len(k)):
|
add test for Mismatched displayUnit
Add a test for `Mismatched displayUnit` to the regression testing
|
0.0
|
ad2f3e7ffb0a01117e5f09ac498a87b5c02ca158
|
[
"buildingspy/tests/test_development_error_dictionary.py::Test_development_error_dictionary::test_keys",
"buildingspy/tests/test_development_error_dictionary.py::Test_development_error_dictionary::test_tool_messages"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-11-27 20:38:23+00:00
|
bsd-3-clause
| 3,518 |
|
lbl-srg__BuildingsPy-183
|
diff --git a/buildingspy/development/error_dictionary.py b/buildingspy/development/error_dictionary.py
index 2304f63..4a3c8d3 100644
--- a/buildingspy/development/error_dictionary.py
+++ b/buildingspy/development/error_dictionary.py
@@ -138,6 +138,13 @@ class ErrorDictionary(object):
'model_message': "\"inner Modelica.StateGraph.StateGraphRoot\" is missing in '{}'.\n",
'summary_message': "Number of models with missing StateGraphRoot : {}\n"}
+ self._error_dict["mismatched displayUnits"] = {
+ 'tool_message': "Mismatched displayUnit",
+ 'counter': 0,
+ 'buildingspy_var': "iMisDisUni",
+ 'model_message': "\"Mismatched displayUnit in '{}'.\n",
+ 'summary_message': "Number of models with mismatched displayUnit : {}\n"}
+
def get_dictionary(self):
""" Return the dictionary with all error data
"""
diff --git a/buildingspy/development/refactor.py b/buildingspy/development/refactor.py
index ba4d363..2c064b0 100644
--- a/buildingspy/development/refactor.py
+++ b/buildingspy/development/refactor.py
@@ -637,6 +637,10 @@ def move_class(source, target):
"""
##############################################################
+ # First, remove empty subdirectories
+ _remove_empty_folders(source.replace(".", os.path.sep),
+ removeRoot=False)
+ ##############################################################
# Check if it is a directory with a package.mo file
if os.path.isdir(source.replace(".", os.path.sep)):
_move_class_directory(source, target)
@@ -665,6 +669,26 @@ def move_class(source, target):
_update_all_references(source, target)
+def _remove_empty_folders(path, removeRoot=True):
+ ''' Remove empty directories
+ '''
+ if not os.path.isdir(path):
+ return
+
+ # remove empty subfolders
+ files = os.listdir(path)
+ if len(files):
+ for f in files:
+ fullpath = os.path.join(path, f)
+ if os.path.isdir(fullpath):
+ _remove_empty_folders(fullpath)
+
+ # if folder empty, delete it
+ files = os.listdir(path)
+ if len(files) == 0 and removeRoot:
+ os.rmdir(path)
+
+
def _update_all_references(source, target):
""" Updates all references in `.mo` and `.mos` files.
diff --git a/buildingspy/fmi/__init__.py b/buildingspy/fmi/__init__.py
index 4efbae7..7bb1d9d 100644
--- a/buildingspy/fmi/__init__.py
+++ b/buildingspy/fmi/__init__.py
@@ -58,8 +58,6 @@ def get_dependencies(fmu_file_name):
]
},
"InitialUnknowns": {
- "CPUtime": [],
- "EventCounter": [],
"der(x)": [
"u"
],
@@ -72,8 +70,6 @@ def get_dependencies(fmu_file_name):
]
},
"Outputs": {
- "CPUtime": [],
- "EventCounter": [],
"y1": [
"x"
],
@@ -120,8 +116,13 @@ def get_dependencies(fmu_file_name):
#this_root = outputs
for child in children:
variable = variable_names[int(child.attrib['index'])]
- dependencies[typ][variable] = []
- for ind_var in child.attrib['dependencies'].split(' '):
- if ind_var.strip() != "": # If variables depend on nothing, there will be an empty string
- dependencies[typ][variable].append(variable_names[int(ind_var)])
+ # Exclude CPUtime and EventCounter, which are written
+ # depending on the Dymola 2018FD01 configuration.
+ if variable not in ["CPUtime", "EventCounter"]:
+ dependencies[typ][variable] = []
+ for ind_var in child.attrib['dependencies'].split(' '):
+ # If variables depend on nothing, there will be an empty string, these
+ # are therefore excluded.
+ if ind_var.strip() != "":
+ dependencies[typ][variable].append(variable_names[int(ind_var)])
return dependencies
|
lbl-srg/BuildingsPy
|
ad2f3e7ffb0a01117e5f09ac498a87b5c02ca158
|
diff --git a/buildingspy/tests/test_development_error_dictionary.py b/buildingspy/tests/test_development_error_dictionary.py
index 393e2cc..ee9d12b 100644
--- a/buildingspy/tests/test_development_error_dictionary.py
+++ b/buildingspy/tests/test_development_error_dictionary.py
@@ -39,7 +39,8 @@ class Test_development_error_dictionary(unittest.TestCase):
'type inconsistent definition equations',
'unspecified initial conditions',
'unused connector',
- 'stateGraphRoot missing'])
+ 'stateGraphRoot missing',
+ 'mismatched displayUnits'])
self.assertEqual(len(k), len(k_expected), "Wrong number of keys.")
for i in range(len(k)):
@@ -63,7 +64,8 @@ class Test_development_error_dictionary(unittest.TestCase):
'Type inconsistent definition equation',
'Dymola has selected default initial condition',
'Warning: The following connector variables are not used in the model',
- "A \\\"stateGraphRoot\\\" component was automatically introduced."])
+ "A \\\"stateGraphRoot\\\" component was automatically introduced.",
+ "Mismatched displayUnit"])
self.assertEqual(len(k), len(k_expected), "Wrong number of tool messages.")
for i in range(len(k)):
|
In reference results, exclude CPUtime and EventCounter in FMI dependencies
These are only written based on the configuration of Dymola. As they are only output of the solver, they should be excluded from the reference results.
|
0.0
|
ad2f3e7ffb0a01117e5f09ac498a87b5c02ca158
|
[
"buildingspy/tests/test_development_error_dictionary.py::Test_development_error_dictionary::test_keys",
"buildingspy/tests/test_development_error_dictionary.py::Test_development_error_dictionary::test_tool_messages"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-12-05 15:34:13+00:00
|
bsd-3-clause
| 3,519 |
|
lbl-srg__BuildingsPy-249
|
diff --git a/buildingspy/development/error_dictionary_dymola.py b/buildingspy/development/error_dictionary_dymola.py
index c163591..6e24d20 100644
--- a/buildingspy/development/error_dictionary_dymola.py
+++ b/buildingspy/development/error_dictionary_dymola.py
@@ -26,7 +26,8 @@ class ErrorDictionary(ed.ErrorDictionary):
# Set the error dictionaries.
# Note that buildingspy_var needs to be a unique variable name.
self._error_dict["numerical Jacobians"] = {
- 'tool_message': "Number of numerical Jacobians:",
+ 'tool_message': r"Number of numerical Jacobians: (\d*)",
+ 'is_regex': True,
'counter': 0,
'buildingspy_var': "lJac",
'model_message': "Numerical Jacobian in '{}'.",
|
lbl-srg/BuildingsPy
|
22db44ff2a3b4d4bf6e2233c50d1cbf186bc8db4
|
diff --git a/buildingspy/development/regressiontest.py b/buildingspy/development/regressiontest.py
index 660f3fd..2adf08a 100644
--- a/buildingspy/development/regressiontest.py
+++ b/buildingspy/development/regressiontest.py
@@ -58,8 +58,8 @@ def runSimulation(worDir, cmd):
else:
return 0
except OSError as e:
- sys.stderr.write("Execution of '" + " ".join(map(str, cmd)) + " failed.\n"
- + "Working directory is '" + worDir + "'.")
+ sys.stderr.write("Execution of '" + " ".join(map(str, cmd)) + " failed.\n" +
+ "Working directory is '" + worDir + "'.")
raise(e)
except KeyboardInterrupt as e:
pro.kill()
@@ -1291,9 +1291,9 @@ len(yNew) = %d.""" % (filNam, varNam, len(tGriOld), len(tGriNew), len(yNew))
for i in range(len(yInt)):
errAbs[i] = abs(yOld[i] - yInt[i])
if np.isnan(errAbs[i]):
- raise ValueError('NaN in errAbs ' + varNam + " " + str(yOld[i])
- + " " + str(yInt[i]) + " i, N " + str(i) + " --:" + str(yInt[i - 1])
- + " ++:", str(yInt[i + 1]))
+ raise ValueError('NaN in errAbs ' + varNam + " " + str(yOld[i]) +
+ " " + str(yInt[i]) + " i, N " + str(i) + " --:" + str(yInt[i - 1]) +
+ " ++:", str(yInt[i + 1]))
if (abs(yOld[i]) > 10 * tol):
errRel[i] = errAbs[i] / abs(yOld[i])
else:
@@ -1324,8 +1324,8 @@ len(yNew) = %d.""" % (filNam, varNam, len(tGriOld), len(tGriNew), len(yNew))
"""
import numpy as np
if not (isinstance(dataSeries, np.ndarray) or isinstance(dataSeries, list)):
- raise TypeError("Program error: dataSeries must be a numpy.ndarr or a list. Received type "
- + str(type(dataSeries)) + ".\n")
+ raise TypeError("Program error: dataSeries must be a numpy.ndarr or a list. Received type " +
+ str(type(dataSeries)) + ".\n")
return (len(dataSeries) == 2)
def format_float(self, value):
@@ -1958,11 +1958,11 @@ len(yNew) = %d.""" % (filNam, varNam, len(tGriOld), len(tGriNew), len(yNew))
if counter > 0:
print(v['summary_message'].format(counter))
- self._reporter.writeOutput("Script that runs unit tests had "
- + str(self._reporter.getNumberOfWarnings())
- + " warnings and "
- + str(self._reporter.getNumberOfErrors())
- + " errors.\n")
+ self._reporter.writeOutput("Script that runs unit tests had " +
+ str(self._reporter.getNumberOfWarnings()) +
+ " warnings and " +
+ str(self._reporter.getNumberOfErrors()) +
+ " errors.\n")
sys.stdout.write("See '{}' for details.\n".format(self._simulator_log_file))
if self._reporter.getNumberOfErrors() > 0:
@@ -2065,6 +2065,26 @@ len(yNew) = %d.""" % (filNam, varNam, len(tGriOld), len(tGriNew), len(yNew))
"Output file of " + data['ScriptFile'] + " is excluded from result test.")
return ret_val
+ def _performTranslationErrorChecks(self, logFil, stat):
+ with open(logFil, mode="rt", encoding="utf-8-sig") as fil:
+ lines = fil.readlines()
+
+ for k, v in list(self._error_dict.get_dictionary().items()):
+ stat[k] = 0
+ for line in lines:
+ # use regex to extract first group and sum them in stat
+ if 'is_regex' in v and v['is_regex']:
+ import re
+ m = re.search(v["tool_message"], line)
+ if m is not None:
+ stat[k] = stat[k] + int(m.group(1))
+ # otherwise, default: count the number of line occurences
+ else:
+ if v["tool_message"] in line:
+ stat[k] = stat[k] + 1
+
+ return stat
+
def _checkSimulationError(self, errorFile):
""" Check whether the simulation had any errors, and
write the error messages to ``self._reporter``.
@@ -2107,11 +2127,14 @@ len(yNew) = %d.""" % (filNam, varNam, len(tGriOld), len(tGriNew), len(yNew))
else:
key = 'FMUExport'
- for k, v in list(self._error_dict.get_dictionary().items()):
- # For JModelica, we neither have simulate nor FMUExport
- if key in ele and ele[key][k] > 0:
- self._reporter.writeWarning(v["model_message"].format(ele[key]["command"]))
- self._error_dict.increment_counter(k)
+ if key in ele:
+ logFil = ele[key]["translationLog"]
+ ele[key] = self._performTranslationErrorChecks(logFil, ele[key])
+ for k, v in list(self._error_dict.get_dictionary().items()):
+ # For JModelica, we neither have simulate nor FMUExport
+ if ele[key][k] > 0:
+ self._reporter.writeWarning(v["model_message"].format(ele[key]["command"]))
+ self._error_dict.increment_counter(k)
if iChe > 0:
print("Number of models that failed check : {}".format(iChe))
@@ -2126,11 +2149,11 @@ len(yNew) = %d.""" % (filNam, varNam, len(tGriOld), len(tGriNew), len(yNew))
if counter > 0:
print(v['summary_message'].format(counter))
- self._reporter.writeOutput("Script that runs unit tests had "
- + str(self._reporter.getNumberOfWarnings())
- + " warnings and "
- + str(self._reporter.getNumberOfErrors())
- + " errors.\n")
+ self._reporter.writeOutput("Script that runs unit tests had " +
+ str(self._reporter.getNumberOfWarnings()) +
+ " warnings and " +
+ str(self._reporter.getNumberOfErrors()) +
+ " errors.\n")
sys.stdout.write("See '{}' for details.\n".format(self._simulator_log_file))
if self._reporter.getNumberOfErrors() > 0:
@@ -2237,44 +2260,11 @@ len(yNew) = %d.""" % (filNam, varNam, len(tGriOld), len(tGriNew), len(yNew))
The commands in the script depend on the tool: 'dymola', 'jmodelica' or 'omc'
"""
- def _write_translation_checks(runFil, values):
- template = r"""
-if Modelica.Utilities.Files.exist("{model_name}.translation.log") then
- lines=Modelica.Utilities.Streams.readFile("{model_name}.translation.log");
-else
- Modelica.Utilities.Streams.print("{model_name}.translation.log was not generated.", "{model_name}.log");
- lines=String();
-end if;
-
-// Count the zero numerical Jacobians separately
-iJac=sum(Modelica.Utilities.Strings.count(lines, "Number of numerical Jacobians: 0"));
-"""
- runFil.write(template.format(**values))
-
- # Do the other tests
- for _, v in list(self._error_dict.get_dictionary().items()):
- template = r""" {}=sum(Modelica.Utilities.Strings.count(lines, "{}"));
-"""
- runFil.write(template.format(v["buildingspy_var"], v["tool_message"]))
-
def _write_translation_stats(runFil, values):
- for k, v in list(self._error_dict.get_dictionary().items()):
- if k != "numerical Jacobians":
- template = r"""
-Modelica.Utilities.Streams.print(" \"{}\" : " + String({}) + ",", "{}");"""
- runFil.write(template.format(k, v["buildingspy_var"], values['statisticsLog']))
-
- # Write the numerical Jacobians separately as this requires subtraction of two counters.
- # As this is the last entry, there also is no terminating comma.
- template = r"""
-Modelica.Utilities.Streams.print(" \"numerical Jacobians\" : " + String(lJac-iJac), "{statisticsLog}");
-"""
- runFil.write(template.format(**values))
-
# Close the bracket for the JSON object
- runFil.write("""Modelica.Utilities.Streams.print(" }", """
- + '"' + values['statisticsLog'] + '"' + ");\n")
+ runFil.write("""Modelica.Utilities.Streams.print(" }", """ +
+ '"' + values['statisticsLog'] + '"' + ");\n")
def _print_end_of_json(isLastItem, fileHandle, logFileName):
if isLastItem:
@@ -2393,6 +2383,13 @@ Modelica.Utilities.Streams.print(" \"numerical Jacobians\" : " + String(
"statisticsLog": self._statistics_log.replace(
"\\",
"/"),
+ "translationLog": os.path.join(
+ self._temDir[iPro],
+ self.getLibraryName(),
+ self._data[i]['model_name'] +
+ ".translation.log").replace(
+ "\\",
+ "/"),
"simulatorLog": self._simulator_log_file.replace(
"\\",
"/")}
@@ -2459,12 +2456,11 @@ Modelica.Utilities.Streams.print(" \"numerical Jacobians\" : " + String(
"""
runFil.write(template.format(**values))
- _write_translation_checks(runFil, values)
-
template = r"""
Modelica.Utilities.Streams.print(" \"simulate\" : {{", "{statisticsLog}");
Modelica.Utilities.Streams.print(" \"command\" : \"RunScript(\\\"Resources/Scripts/Dymola/{scriptFile}\\\");\",", "{statisticsLog}");
- Modelica.Utilities.Streams.print(" \"result\" : " + String(iSuc > 0) + ",", "{statisticsLog}");
+ Modelica.Utilities.Streams.print(" \"translationLog\" : \"{translationLog}\",", "{statisticsLog}");
+ Modelica.Utilities.Streams.print(" \"result\" : " + String(iSuc > 0), "{statisticsLog}");
"""
runFil.write(template.format(**values))
@@ -2500,12 +2496,11 @@ Modelica.Utilities.Streams.print(" \"numerical Jacobians\" : " + String(
"""
runFil.write(template.format(**values))
- _write_translation_checks(runFil, values)
-
template = r"""
Modelica.Utilities.Streams.print(" \"FMUExport\" : {{", "{statisticsLog}");
Modelica.Utilities.Streams.print(" \"command\" :\"RunScript(\\\"Resources/Scripts/Dymola/{scriptFile}\\\");\",", "{statisticsLog}");
- Modelica.Utilities.Streams.print(" \"result\" : " + String(iSuc > 0) + ",", "{statisticsLog}");
+ Modelica.Utilities.Streams.print(" \"translationLog\" : \"{translationLog}\",", "{statisticsLog}");
+ Modelica.Utilities.Streams.print(" \"result\" : " + String(iSuc > 0), "{statisticsLog}");
"""
runFil.write(template.format(**values))
@@ -2831,6 +2826,13 @@ Modelica.Utilities.Streams.print(" \"numerical Jacobians\" : " + String(
else:
self._check_jmodelica_runs()
+ # Check for errors
+ if self._modelica_tool == 'dymola':
+ if retVal == 0:
+ retVal = self._checkSimulationError(self._simulator_log_file)
+ else:
+ self._checkSimulationError(self._simulator_log_file)
+
# Delete temporary directories, or write message that they are not deleted
for d in self._temDir:
@@ -2839,13 +2841,6 @@ Modelica.Utilities.Streams.print(" \"numerical Jacobians\" : " + String(
else:
print("Did not delete temporary directory {}".format(d))
- # Check for errors
- if self._modelica_tool == 'dymola':
- if retVal == 0:
- retVal = self._checkSimulationError(self._simulator_log_file)
- else:
- self._checkSimulationError(self._simulator_log_file)
-
# Print list of files that may be excluded from unit tests
if len(self._exclude_tests) > 0:
print("*** Warning: The following files may be excluded from the regression tests:\n")
@@ -3041,8 +3036,8 @@ Modelica.Utilities.Streams.print(" \"numerical Jacobians\" : " + String(
return retcode
except OSError as e:
- raise OSError("Execution of omc +d=initialization " + mosfile + " failed.\n"
- + "Working directory is '" + worDir + "'.")
+ raise OSError("Execution of omc +d=initialization " + mosfile + " failed.\n" +
+ "Working directory is '" + worDir + "'.")
else:
# process the log file
print("Logfile created: {}".format(logFilNam))
diff --git a/buildingspy/tests/test_development_error_dictionary.py b/buildingspy/tests/test_development_error_dictionary.py
index 5c2b000..b8a62ee 100644
--- a/buildingspy/tests/test_development_error_dictionary.py
+++ b/buildingspy/tests/test_development_error_dictionary.py
@@ -55,7 +55,7 @@ class Test_development_error_dictionary(unittest.TestCase):
'Warning: Failed to interpret experiment annotation',
'which was not found',
'The model contained invalid connect statements.',
- 'Number of numerical Jacobians:',
+ r'Number of numerical Jacobians: (\d*)',
"Warning: The following parameters don't have any value, only a start value",
"Redundant consistent initial conditions:",
"Redundant connection",
|
slow parsing using Modelica.Utilities.Strings.Count
BuildingsPy relies on the use of `Modelica.Utilities.Strings.Count` for counting the number of occurrences of specific substrings in the translation log. This implementation seems quite inefficient, which is a problem for large models that have a translation log of ~100 kB.
To illustrate the problem:
For a specific file that I am working on, executing `iJac=sum(Modelica.Utilities.Strings.count(lines, "Number of numerical Jacobians: 0"));` takes about one second.
Using python
```
with open("IDEAS.Examples.PPD12.Ventilation.translation.log") as f:
test=0
for lin in f.readlines():
test+=lin.count("Number of numerical Jacobians: 0")
print test
```
this takes 0.01 seconds. When I modify the file such that all newlines are removed, then the computation time increases to 13s for Modelica and to 0.01 seconds in python.
Longer lines are thus treated less efficiently, which is problematic in our models since we often get long output in the form of
```
The model has
33036+2*valBed1.val.filter.order+valBat2.val.filter.order+valBat2.val.filter.order +valBat1.val.filter.order+valBat1.val.filter.order+valBed2.val.filter.order+ valBed2.val.filter.order+valBed3.val.filter.order+valBed3.val.filter.order+ valGnd.val.filter.order+valGnd.val.filter.order+max(com1.layMul.monLay[1].monLayDyn.nStaMin, 2)+(if com1.layMul.monLay[1].monLayDyn.addRes_b then max(com1.layMul.monLay[1].monLayDyn.nStaMin, 2) else max(max(com1.layMul.monLay[1].monLayDyn.nStaMin, 2)-1, 1))+max( com1.layMul.monLay[2].monLayDyn.nStaMin, 2)+(if com1.layMul.monLay[2].monLayDyn.addRes_b then max(com1.layMul.monLay[2].monLayDyn.nStaMin, 2) else max(max( com1.layMul.monLay[2].monLayDyn.nStaMin, 2)-1, 1))+max(out1.layMul.monLay[1].monLayDyn.nStaMin, 5)+(if out1.layMul.monLay[1].monLayDyn.addRes_b then max(out1.layMul.monLay[1].monLayDyn.nStaMin, 5) else max(max(out1.layMul.monLay[1].monLayDyn.nStaMin, 5)-1, 1))+max( out1.layMul.monLay[2].monLayDyn.nStaMin, 2)+(if out1.layMul.monLay[2].monLayDyn.addRes_b then max(out1.layMul.monLay[2].monLayDyn.nStaMin, 2) else max(max( out1.layMul.monLay[2].monLayDyn.nStaMin, 2)-1, 1))+max(cei2.layMul.monLay[1].monLayDyn.nStaMin, 2)+(if cei2.layMul.monLay[1].monLayDyn.addRes_b then max(cei2.layMul.monLay[1].monLayDyn.nStaMin, 2) else max
...
...
scalar equations.
```
Looking at the Modelica code, this may be caused by the implementation of `Find()`, which creates a substring of the checked line and then verifies whether the substring and checked string are equal. This is clearly not the most efficient implementation and it causes a lot of overhead to allocate memory and copy strings.
If we fix this then the unit tests may complete a lot faster, but I'm not sure what is the most appropriate way:
1) Get MSL to fix this (propose a new implementation for `Find()`).
2) Implement our own function and include it in Buildingspy. I think that this would require the unit test script to load an additional package that includes the new function.
3) Do the post-processing of the log file directly in python.
I think we should do 3)?
Edit: I removed the occurrences of `Count()` from buildingspy, which reduced the unit test time for my model from 18m to 3.5m. The other unit tests are not affected as much.
|
0.0
|
22db44ff2a3b4d4bf6e2233c50d1cbf186bc8db4
|
[
"buildingspy/tests/test_development_error_dictionary.py::Test_development_error_dictionary::test_tool_messages"
] |
[
"buildingspy/tests/test_development_error_dictionary.py::Test_development_error_dictionary::test_keys"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-02-20 01:47:59+00:00
|
bsd-3-clause
| 3,520 |
|
lbl-srg__BuildingsPy-377
|
diff --git a/buildingspy/CHANGES.txt b/buildingspy/CHANGES.txt
index ebf666b..32cc572 100644
--- a/buildingspy/CHANGES.txt
+++ b/buildingspy/CHANGES.txt
@@ -3,7 +3,7 @@ BuildingsPy Changelog
Version 2.2.0, xxx, 2020 -- Release 2.2
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-- xxx
+- Added check for wrong derivative implementation that is reported by Dymola 2021x (https://github.com/lbl-srg/BuildingsPy/issues/376).
Version 2.1.0, May 28, 2020 -- Release 2.1
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/buildingspy/development/error_dictionary_dymola.py b/buildingspy/development/error_dictionary_dymola.py
index 1fea2f4..3579230 100644
--- a/buildingspy/development/error_dictionary_dymola.py
+++ b/buildingspy/development/error_dictionary_dymola.py
@@ -24,68 +24,58 @@ class ErrorDictionary(ed.ErrorDictionary):
"""
self._error_dict = dict()
# Set the error dictionaries.
- # Note that buildingspy_var needs to be a unique variable name.
self._error_dict["numerical Jacobians"] = {
'tool_message': r"Number of numerical Jacobians: (\d*)",
'is_regex': True,
'counter': 0,
- 'buildingspy_var': "lJac",
'model_message': "Numerical Jacobian in '{}'.",
'summary_message': "Number of models with numerical Jacobian : {}\n"}
self._error_dict["unused connector"] = {
'tool_message': "Warning: The following connector variables are not used in the model",
'counter': 0,
- 'buildingspy_var': "iUnuCon",
'model_message': "Unused connector variables in '{}'.\n",
'summary_message': "Number of models with unused connector variables : {}\n"}
self._error_dict["parameter with start value only"] = {
'tool_message': "Warning: The following parameters don't have any value, only a start value",
'counter': 0,
- 'buildingspy_var': "iParNoSta",
'model_message': "Parameter with start value only in '{}'.\n",
'summary_message': "Number of models with parameters that only have a start value: {}\n"}
self._error_dict["redundant consistent initial conditions"] = {
'tool_message': "Redundant consistent initial conditions:",
'counter': 0,
- 'buildingspy_var': "iRedConIni",
'model_message': "Redundant consistent initial conditions in '{}'.\n",
'summary_message': "Number of models with redundant consistent initial conditions: {}\n"}
self._error_dict["redundant connection"] = {
'tool_message': "Redundant connection",
'counter': 0,
- 'buildingspy_var': "iRedConSta",
'model_message': "Redundant connections in '{}'.\n",
'summary_message': "Number of models with redundant connections : {}\n"}
self._error_dict["type inconsistent definition equations"] = {
'tool_message': "Type inconsistent definition equation",
'counter': 0,
- 'buildingspy_var': "iTypIncDef",
'model_message': "Type inconsistent definition equations in '{}'.\n",
'summary_message': "Number of models with type inconsistent definition equations : {}\n"}
self._error_dict["type incompatibility"] = {
'tool_message': "but they must be compatible",
'counter': 0,
- 'buildingspy_var': "iTypInc",
'model_message': "Type incompatibility in '{}'.\n",
'summary_message': "Number of models with incompatible types : {}\n"}
self._error_dict["unspecified initial conditions"] = {
'tool_message': "Dymola has selected default initial condition",
'counter': 0,
- 'buildingspy_var': "iUnsIni",
'model_message': "Unspecified initial conditions in '{}'.\n",
'summary_message': "Number of models with unspecified initial conditions : {}\n"}
self._error_dict["invalid connect"] = {
'tool_message': "The model contained invalid connect statements.",
'counter': 0,
- 'buildingspy_var': "iInvCon",
'model_message': "Invalid connect statements in '{}'.\n",
'summary_message': "Number of models with invalid connect statements : {}\n"}
@@ -95,21 +85,18 @@ class ErrorDictionary(ed.ErrorDictionary):
self._error_dict["differentiated if"] = {
'tool_message': "Differentiating (if",
'counter': 0,
- 'buildingspy_var': "iDiffIf",
'model_message': "Differentiated if-expression under assumption it is smooth in '{}'.\n",
'summary_message': "Number of models with differentiated if-expression : {}\n"}
self._error_dict["redeclare non-replaceable"] = \
{'tool_message': "Warning: Redeclaration of non-replaceable requires type equivalence",
'counter': 0,
- 'buildingspy_var': "iRedNon",
'model_message': "Redeclaration of non-replaceable class in '{}'.\n",
'summary_message': "Number of models with redeclaration of non-replaceable class : {}\n"}
self._error_dict["experiment annotation"] = {
'tool_message': "Warning: Failed to interpret experiment annotation",
'counter': 0,
- 'buildingspy_var': "iExpAnn",
'model_message': "Failed to interpret experiment annotation in '{}'.\n",
'summary_message': "Number of models with wrong experiment annotation : {}\n"}
@@ -119,27 +106,33 @@ class ErrorDictionary(ed.ErrorDictionary):
self._error_dict["file not found"] = {
'tool_message': "which was not found",
'counter': 0,
- 'buildingspy_var': "iFilNotFou",
'model_message': "File not found in '{}'.\n",
'summary_message': "Number of models with file not found : {}\n"}
self._error_dict["stateGraphRoot missing"] = {
'tool_message': "A \\\"stateGraphRoot\\\" component was automatically introduced.",
'counter': 0,
- 'buildingspy_var': "iStaGraRooMis",
'model_message': "\"inner Modelica.StateGraph.StateGraphRoot\" is missing in '{}'.\n",
'summary_message': "Number of models with missing StateGraphRoot : {}\n"}
self._error_dict["mismatched displayUnits"] = {
'tool_message': "Mismatched displayUnit",
'counter': 0,
- 'buildingspy_var': "iMisDisUni",
'model_message': "Mismatched displayUnit in '{}'.\n",
'summary_message': "Number of models with mismatched displayUnit : {}\n"}
self._error_dict["suspicious attributes"] = {
'tool_message': "which is suspicious",
'counter': 0,
- 'buildingspy_var': "iSus",
'model_message': "Check min and max attributes in '{}'.\n",
'summary_message': "Number of models with suspicious attributes (likely min/max) : {}\n"}
+
+ # This captures
+ # Warning: function Buildings.Utilities.Psychrometrics.Functions.saturationPressureLiquid
+ # specified derivative Buildings.Utilities.Psychrometrics.Functions.BaseClasses.der_saturationPressureLiquid,
+ # but argument TSat function did not match argument Tsat of derivative
+ self._error_dict["wrong derivative specification"] = {
+ 'tool_message': "did not match argument",
+ 'counter': 0,
+ 'model_message': "Check specification of derivative of '{}'.\n",
+ 'summary_message': "Number of models with wrong derivative specification : {}\n"}
|
lbl-srg/BuildingsPy
|
a27e52fd0339a57d87290abef8878300b1e74931
|
diff --git a/buildingspy/tests/test_development_error_dictionary.py b/buildingspy/tests/test_development_error_dictionary.py
index 73b7aa1..d2ae84f 100644
--- a/buildingspy/tests/test_development_error_dictionary.py
+++ b/buildingspy/tests/test_development_error_dictionary.py
@@ -41,7 +41,8 @@ class Test_development_error_dictionary(unittest.TestCase):
'unused connector',
'stateGraphRoot missing',
'mismatched displayUnits',
- 'suspicious attributes'])
+ 'suspicious attributes',
+ 'wrong derivative specification'])
self.assertEqual(len(k), len(k_expected), "Wrong number of keys.")
for i in range(len(k)):
@@ -67,7 +68,8 @@ class Test_development_error_dictionary(unittest.TestCase):
'Warning: The following connector variables are not used in the model',
"A \\\"stateGraphRoot\\\" component was automatically introduced.",
"Mismatched displayUnit",
- "which is suspicious"])
+ "which is suspicious",
+ "did not match argument"])
self.assertEqual(len(k), len(k_expected), "Wrong number of tool messages.")
for i in range(len(k)):
|
check for wrong derivative implementation
Dymola 2021x beta2 issues warnings such as
```
Warning: function Buildings.Utilities.Psychrometrics.Functions.saturationPressureLiquid specified derivative Buildings.Utilities.Psychrometrics.Functions.BaseClasses.der_saturationPressureLiquid, but
argument TSat function did not match argument Tsat of derivative.
```
This issue is to catch these warnings and fail the unit tests.
|
0.0
|
a27e52fd0339a57d87290abef8878300b1e74931
|
[
"buildingspy/tests/test_development_error_dictionary.py::Test_development_error_dictionary::test_keys",
"buildingspy/tests/test_development_error_dictionary.py::Test_development_error_dictionary::test_tool_messages"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-12 15:30:50+00:00
|
bsd-3-clause
| 3,521 |
|
lbl-srg__BuildingsPy-383
|
diff --git a/buildingspy/development/refactor.py b/buildingspy/development/refactor.py
index a4cb721..216d16c 100644
--- a/buildingspy/development/refactor.py
+++ b/buildingspy/development/refactor.py
@@ -16,6 +16,7 @@
"""
import os
+import re
__all__ = ["create_modelica_package", "move_class", "write_package_order"]
@@ -266,20 +267,22 @@ def replace_text_in_file(file_name, old, new, isRegExp=False):
If `isRegExp==True`, then old must be a regular expression, and
`re.sub(old, new, ...)` is called where `...` is each line of the file.
"""
- import re
# Read source file, store the lines and update the content of the lines
+ modified = False
with open(file_name, mode="r", encoding="utf-8-sig") as f_sou:
lines = list()
- for _, lin in enumerate(f_sou):
+ for lin in f_sou:
if isRegExp:
- lin = re.sub(old, new, lin)
+ lin1 = re.sub(old, new, lin)
else:
- lin = lin.replace(old, new)
- lines.append(lin)
-
+ lin1 = lin.replace(old, new)
+ lines.append(lin1)
+ if lin1 != lin:
+ modified = True
# Write the lines to the new file
- with open(file_name, mode="w", encoding="utf-8") as f_des:
- f_des.writelines(lines)
+ if modified:
+ with open(file_name, mode="w", encoding="utf-8") as f_des:
+ f_des.writelines(lines)
def _move_mo_file(source, target):
@@ -491,7 +494,6 @@ def write_package_order(directory=".", recursive=False):
def _get_package_list_for_file(directory, file_name):
""" Gets the package list for the file `directory/file_name`
"""
- import re
pacLis = list()
@@ -739,13 +741,48 @@ def _update_all_references(source, target):
_updateFile(ele)
+def _getShortName(filePath, classPath):
+ """Returns the shortest reference to a class within a file.
+
+ Args:
+ filePath: file path relative to the library root path (e.g., `Buildings/package.mo`).
+ classPath: full library path of the class to be shortened (e.g., `Buildings.Class`).
+ """
+
+ pos = re.search(r'\w', filePath).start()
+ splFil = filePath[pos:].split(os.path.sep)
+ splCla = classPath.split(".")
+ shortSource = None
+ for i in range(min(len(splFil), len(splCla))):
+ if splFil[i] != splCla[i]:
+ # See https://github.com/lbl-srg/BuildingsPy/issues/382 for the rationale
+ # behind the code below.
+ idx_start = i
+ if i > 0:
+ for k in range(i + 1, len(splFil)):
+ lookup_path = os.path.sep.join(splFil[:k])
+ if splCla[i] in [re.sub(r'\.mo', '', el) for el in os.listdir(lookup_path)]:
+ idx_start = i - 1
+ break
+ shortSource = '.'.join(splCla[idx_start:len(splCla)])
+ # shortSource starts with a space as instance names are
+ # preceded with a space.
+ shortSource = ' ' + shortSource
+ break
+ return shortSource
+
+
def _updateFile(arg):
""" Update all `.mo`, `package.order` and reference result file
- The argument `arg` is a list where the first item is
- the relative file name (e.g., `./Buildings/package.mo`),
- the second element is the class name of the source and
- the third element is the class name of the target.
+ The argument `arg` is a list providing
+ [
+ the path of the package directory where the file is located, relative
+ to the current working directory (e.g., `./Buildings` when working from `~/modelica-buildings/.`),
+ the file name (e.g., `package.mo`),
+ the full library path of the source class (e.g., `Buildings.SourceClass`),
+ the full library path of the target class (e.g., `Buildings.TargetClass`),
+ ]
This function has been implemented as doing the text replace is time
consuming and hence this is done in parallel.
@@ -753,25 +790,6 @@ def _updateFile(arg):
:param arg: A list with the arguments.
"""
- def _getShortName(fileName, className):
- import re
-
- pos = re.search(r'\w', fileName).start()
- splFil = fileName[pos:].split(os.path.sep)
- splCla = className.split(".")
- shortSource = None
- for i in range(min(len(splFil), len(splCla))):
- if splFil[i] != splCla[i]:
- # shortSource starts with a space as instance names are
- # preceeded with a space
- shortSource = " "
- for j in range(i, len(splCla)):
- shortSource += splCla[j] + "."
- # Remove last dot
- shortSource = shortSource[:-1]
- break
- return shortSource
-
root = arg[0]
fil = arg[1]
source = arg[2]
@@ -803,6 +821,7 @@ def _updateFile(arg):
# with the new name.
# The same is done with the target name so that short instance names
# remain short instance names.
+
shortSource = _getShortName(srcFil, source)
shortTarget = _getShortName(srcFil, target)
if shortSource is None or shortTarget is None:
@@ -810,13 +829,16 @@ def _updateFile(arg):
# If shortSource is only one class (e.g., "xx" and not "xx.yy",
# then this is also used in constructs such as "model xx" and "end xx;"
- # Hence, we only replace it if it is proceeded only by empty characters, and nothing else.
+ # Hence, we only replace it if it is
+ # . preceded by empty characters, and
+ # . followed by some optional empty characters and \s or [ or , or ;.
+ # (We use a "negative lookbehind assertion" to do so.)
if "." in shortSource:
replace_text_in_file(srcFil, shortSource, shortTarget, isRegExp=False)
else:
- regExp = r"(?!\w)" + shortTarget
- replace_text_in_file(srcFil, regExp, shortTarget, isRegExp=True)
-
+ regExpSource = r'(?<!\w)' + shortSource + r'(\s*(\s|\[|,|;))'
+ regExpTarget = shortTarget + r'\1'
+ replace_text_in_file(srcFil, regExpSource, regExpTarget, isRegExp=True)
# Replace the hyperlinks, without the top-level library name.
# This updates for example the RunScript command that points to
# "....Dymola/Fluid/..."
|
lbl-srg/BuildingsPy
|
1a1967f51e19cd0ae88e766b28681ecfbf5ac263
|
diff --git a/buildingspy/development/regressiontest.py b/buildingspy/development/regressiontest.py
index 171ee2e..dbc0a12 100644
--- a/buildingspy/development/regressiontest.py
+++ b/buildingspy/development/regressiontest.py
@@ -1409,7 +1409,8 @@ class Tester(object):
errAbs[i] = abs(yOld[i] - yInt[i])
if np.isnan(errAbs[i]):
raise ValueError('NaN in errAbs ' + varNam + " " + str(yOld[i])
- + " " + str(yInt[i]) + " i, N " + str(i) + " --:" + str(yInt[i - 1])
+ + " " + str(yInt[i]) + " i, N " + str(i) +
+ " --:" + str(yInt[i - 1])
+ " ++:", str(yInt[i + 1]))
if (abs(yOld[i]) > 10 * tol):
errRel[i] = errAbs[i] / abs(yOld[i])
diff --git a/buildingspy/tests/test_development_refactor.py b/buildingspy/tests/test_development_refactor.py
index 11c13c4..7e15a5b 100644
--- a/buildingspy/tests/test_development_refactor.py
+++ b/buildingspy/tests/test_development_refactor.py
@@ -75,6 +75,36 @@ class Test_development_refactor(unittest.TestCase):
self.assertEqual(r.get_modelica_file_name("Buildings.Rooms.MixedAir"),
os.path.join("Buildings", "Rooms", "MixedAir.mo"))
+ def test_getShortName(self):
+ import os
+ import buildingspy.development.refactor as r
+
+ workdir = os.getcwd()
+ os.chdir(os.path.join("buildingspy", "tests"))
+ filePath = 'MyModelicaLibrary/Examples/FMUs/Gain.mo'
+ self.assertEqual(
+ r._getShortName(
+ filePath,
+ 'MyModelicaLibrary.Examples.IntegratorGain'
+ ),
+ ' Examples.IntegratorGain'
+ )
+ self.assertEqual(
+ r._getShortName(
+ filePath,
+ 'MyModelicaLibrary.Examples.Test'
+ ),
+ ' Test'
+ )
+ self.assertEqual(
+ r._getShortName(
+ filePath,
+ 'MyModelicaLibrary.Examples.FMUs.IntegratorGain'
+ ),
+ ' IntegratorGain'
+ )
+ os.chdir(workdir)
+
if __name__ == '__main__':
unittest.main()
|
Bug in development.refactor.move_class when updating class path
The function `_getShortName` converts the class reference `A.B.C.X.Y` in the file `A/B/C/D/E.mo` into `X.Y`.
This is incorrect in the case where a subpackage `X` also exists under `D` (typically `BaseClasses`).
In that case the function should return `C.X.Y`.
However, the class reference `A.B.C.D.X.Y` should indeed be converted into `X.Y`.
So the logic is as follows.
1. Find the first element of the path that differs between the class and the file (`X` in the above example).
2. Test if a class with that name (`X`) exists at any level below, in reference to the file path (under `A/B/C/D/.` in the above example).
- `false`: the short class name should start with that element (`X`).
- `true`: the short class name should start with the prior element (`C`).
In addition, in the function `_updateFile`
```
if "." in shortSource:
replace_text_in_file(srcFil, shortSource, shortTarget, isRegExp=False)
else:
regExp = r"(?!\w)" + shortTarget
replace_text_in_file(srcFil, regExp, shortTarget, isRegExp=True)
```
`regExp = r"(?!\w)" + shortTarget` should be `regExp = r"(?!\w)" + shortSource`.
EDIT: Also the _negative lookahead assertion_ `(?!\w)` has no effect if not preceded with `^` for instance. So
```
re.sub(r'(?!\w) Class', ' Test', 'model Class')
```
returns `model Test` which is the undesired behavior. Better use the _negative lookbehind assertion_ `(?<!\w)`.
|
0.0
|
1a1967f51e19cd0ae88e766b28681ecfbf5ac263
|
[
"buildingspy/tests/test_development_refactor.py::Test_development_refactor::test_getShortName",
"buildingspy/tests/test_development_refactor.py::Test_development_refactor::test_write_package_order"
] |
[
"buildingspy/tests/test_development_refactor.py::Test_development_refactor::test_get_modelica_file_name",
"buildingspy/tests/test_development_refactor.py::Test_development_refactor::test_sort_package_order"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-30 23:44:45+00:00
|
bsd-3-clause
| 3,522 |
|
lbl-srg__BuildingsPy-388
|
diff --git a/buildingspy/CHANGES.txt b/buildingspy/CHANGES.txt
index ac2406f..e4bb144 100644
--- a/buildingspy/CHANGES.txt
+++ b/buildingspy/CHANGES.txt
@@ -4,6 +4,7 @@ BuildingsPy Changelog
Version 2.2.0, xxx, 2020 -- Release 2.2
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- xxx
+- Corrected generation of package.order for constant arrays (https://github.com/lbl-srg/BuildingsPy/issues/387)
- Refactored class buildingspy.simulate.Dymola, and added buildingspy.simulate.Optimica
- Added check for wrong derivative implementation that is reported by Dymola 2021x (https://github.com/lbl-srg/BuildingsPy/issues/376).
diff --git a/buildingspy/development/refactor.py b/buildingspy/development/refactor.py
index 216d16c..314f131 100644
--- a/buildingspy/development/refactor.py
+++ b/buildingspy/development/refactor.py
@@ -491,6 +491,25 @@ def write_package_order(directory=".", recursive=False):
filPac.write(p[1] + "\n")
+def _get_constants(lines):
+ """ Get a list with all constants.
+
+ :param: lines All lines of the Modelica file.
+ """
+ import re
+ # Constants can be "constant Real n = ..." or "constant someClass n(..."
+ # or "constant Real n[:] = ..." or or "constant Real n[4] = ..."
+ # See also https://regex101.com/r/cD5nE0/2 for testing
+ f = re.findall(
+ r"\s*constant\s+[\w+\.]+\s+(\w+)(\[\w+\]|\[\s*[\w:]\s*(,\s*[\w:]\s*)*\])?\s*[=\(]",
+ lines,
+ re.MULTILINE)
+ r = []
+ for ele in f:
+ r.append(ele[0])
+ return r
+
+
def _get_package_list_for_file(directory, file_name):
""" Gets the package list for the file `directory/file_name`
"""
@@ -509,10 +528,7 @@ def _get_package_list_for_file(directory, file_name):
# They need to be added to the package.order as well.
with open(os.path.join(directory, file_name), mode="r", encoding="utf-8-sig") as fil:
lines = fil.read()
- # Constants can be 'constant Real n = ..." or "constant someClass n(..."
- con = re.findall(
- r"\s*constant\s+[a-zA-Z0-9_\.]+\s+(\w+)\s*[=\(]", lines, re.MULTILINE)
-# con=re.search(r"constant\s+\w+\s+(\w+)\s*=", lines, re.MULTILINE);
+ con = _get_constants(lines)
for ele in con:
# Found a constant whose name is in con.group(1)
pacLis.append([__CON, ele])
|
lbl-srg/BuildingsPy
|
c047e118c177d637f1729c2fa8240ac7e649154a
|
diff --git a/buildingspy/tests/test_development_refactor.py b/buildingspy/tests/test_development_refactor.py
index 7e15a5b..0d81f4f 100644
--- a/buildingspy/tests/test_development_refactor.py
+++ b/buildingspy/tests/test_development_refactor.py
@@ -69,6 +69,36 @@ class Test_development_refactor(unittest.TestCase):
self.assertEqual(pac_lis, correct, "Parsing package.order failed.")
+ def test_get_constants_non_empty(self):
+ import buildingspy.development.refactor as r
+
+ lines = """
+ constant Real a = 1 "some text";
+ constant Real b = 1;
+ constant Real A = 1;
+ constant Real B[2] = {1, 2};
+ constant Real C[:] = {1, 2};
+ constant Real D[1,2] = {{1}, {1, 2}};
+ constant Real E[:,:] = {{1}, {1, 2}};
+ not_a_constant f = 1;
+ """
+ con = r._get_constants(lines)
+ self.assertEqual(con, ['a', 'b', 'A', 'B', 'C', 'D', 'E'], "Failed to get all constants.")
+
+ def test_get_constants_empty(self):
+ import buildingspy.development.refactor as r
+
+ lines = """
+
+ """
+ con = r._get_constants(lines)
+ for ele in con:
+ print(f"--{ele}--")
+ self.assertEqual(
+ con,
+ [],
+ "Failed to get all constants for a file content with no constants.")
+
def test_get_modelica_file_name(self):
import os
import buildingspy.development.refactor as r
|
Package.order is misses to include constants that are arrays
This issue is to correct the generation of `package.order` for packages that have constant arrays, such as
```modelica
final constant Real A[:] = {-1.721781e2, 2.381558e-1, -4.329207e-4, -6.241072e-7}
"Coefficients A for Martin-Hou equation of state";
```
This is required for https://github.com/ibpsa/modelica-ibpsa/pull/1415 to be loaded in OMEdit without a warning.
|
0.0
|
c047e118c177d637f1729c2fa8240ac7e649154a
|
[
"buildingspy/tests/test_development_refactor.py::Test_development_refactor::test_get_constants_empty",
"buildingspy/tests/test_development_refactor.py::Test_development_refactor::test_get_constants_non_empty"
] |
[
"buildingspy/tests/test_development_refactor.py::Test_development_refactor::test_getShortName",
"buildingspy/tests/test_development_refactor.py::Test_development_refactor::test_get_modelica_file_name",
"buildingspy/tests/test_development_refactor.py::Test_development_refactor::test_sort_package_order",
"buildingspy/tests/test_development_refactor.py::Test_development_refactor::test_write_package_order"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-11-10 13:15:20+00:00
|
bsd-3-clause
| 3,523 |
|
learningequality__kolibri-5951
|
diff --git a/kolibri/utils/cli.py b/kolibri/utils/cli.py
index 43743384c9..6c1a9098e2 100644
--- a/kolibri/utils/cli.py
+++ b/kolibri/utils/cli.py
@@ -334,6 +334,20 @@ def update(old_version, new_version):
logger.info("Running update routines for new version...")
+ try:
+ # Check if there are other kolibri instances running
+ # If there are, then we need to stop users from starting kolibri again.
+ server.get_status()
+ logger.error(
+ "There is a Kolibri server running."
+ "Running updates now could cause a database error."
+ "Please use `kolibri stop` and try again."
+ )
+ sys.exit(1)
+
+ except server.NotRunning:
+ pass
+
# Need to do this here, before we run any Django management commands that
# import settings. Otherwise the updated configuration will not be used
# during this runtime.
|
learningequality/kolibri
|
99ba95d7b0e26095292bbe1bcd382ea634927967
|
diff --git a/kolibri/utils/tests/test_cli.py b/kolibri/utils/tests/test_cli.py
index 6ef76aed0f..5296fa5f55 100755
--- a/kolibri/utils/tests/test_cli.py
+++ b/kolibri/utils/tests/test_cli.py
@@ -228,6 +228,20 @@ def test_update(update, get_version):
update.assert_called_once()
[email protected]_db
+@patch("kolibri.utils.cli.get_version", return_value="0.0.1")
+def test_update_exits_if_running(get_version):
+ """
+ Tests that update() function performs as expected
+ """
+ with patch("kolibri.utils.cli.server.get_status"):
+ try:
+ cli.initialize()
+ pytest.fail("Update did not exit when Kolibri was already running")
+ except SystemExit:
+ pass
+
+
@pytest.mark.django_db
def test_version_updated():
"""
|
Migrations / Initialization happens without checking if another instance is running
### Observed behavior
Kolibri 0.13.0.dev runs migrations even though another instance is running, which can potentially lead to database corruption.
CC: @rtibbles
### Expected behavior
Kolibri should immediately quit.
### User-facing consequences
Almost certainly bad.
### Errors and logs
```
➜ kolibri git:(develop) kolibri start --foreground
WARNING:root:No C Extensions available for this platform.
INFO Option RUN_MODE in section [Deployment] being overridden by environment variable KOLIBRI_RUN_MODE
INFO Running Kolibri with the following settings: kolibri.deployment.default.settings.base
INFO Version was 0.13.0.dev0+git.20190716150511, new version: 0.13.0.dev0+git.20190822215051
INFO Running update routines for new version...
Operations to perform:
Apply all migrations: admin, analytics, auth, content, contenttypes, device, discovery, exams, kolibriauth, lessons, logger, morango, notifications, sessions
Running migrations:
Applying content.0019_contentnode_slideshow_options... OK
Applying morango.0013_auto_20190627_1513... OK
Operations to perform:
Apply all migrations: admin, analytics, auth, content, contenttypes, device, discovery, exams, kolibriauth, lessons, logger, morango, notifications, sessions
Running migrations:
Applying content.0019_contentnode_slideshow_options... OK
Applying morango.0013_auto_20190627_1513... OK
Installed 2 object(s) from 1 fixture(s)
INFO Sqlite database Vacuum finished.
INFO Running Kolibri
ERROR Port 8080 is occupied.
Please check that you do not have other processes running on this port and try again.
```
### Steps to reproduce
Run Kolibri 0.12, so it shares directory with current `develop`. Then run `kolibri start --foreground`.
### Context
Development env. for current `develop` branch on a system with 0.12.8 installed from .deb.
### Related
The otherwise great #5494
|
0.0
|
99ba95d7b0e26095292bbe1bcd382ea634927967
|
[
"kolibri/utils/tests/test_cli.py::test_update_exits_if_running"
] |
[
"kolibri/utils/tests/test_cli.py::test_real_plugin_disable",
"kolibri/utils/tests/test_cli.py::test_bogus_plugin_disable",
"kolibri/utils/tests/test_cli.py::test_plugin_with_no_plugin_class",
"kolibri/utils/tests/test_cli.py::test_bogus_plugin_autoremove",
"kolibri/utils/tests/test_cli.py::test_real_plugin_disable_twice",
"kolibri/utils/tests/test_cli.py::test_bogus_plugin_autoremove_no_path",
"kolibri/utils/tests/test_cli.py::test_cli_usage",
"kolibri/utils/tests/test_cli.py::test_version_updated",
"kolibri/utils/tests/test_cli.py::test_update_no_version_change",
"kolibri/utils/tests/test_cli.py::test_first_run",
"kolibri/utils/tests/test_cli.py::test_update",
"kolibri/utils/tests/test_cli.py::test_plugin_cannot_be_imported_disable"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-09-21 06:59:10+00:00
|
mit
| 3,524 |
|
learningequality__kolibri-6089
|
diff --git a/kolibri/core/discovery/utils/network/search.py b/kolibri/core/discovery/utils/network/search.py
index b07f8aff3d..6bb6a2ba59 100644
--- a/kolibri/core/discovery/utils/network/search.py
+++ b/kolibri/core/discovery/utils/network/search.py
@@ -11,8 +11,7 @@ from zeroconf import ServiceInfo
from zeroconf import USE_IP_OF_OUTGOING_INTERFACE
from zeroconf import Zeroconf
-from kolibri.core.auth.models import Facility
-from kolibri.core.content.models import ChannelMetadata
+import kolibri
logger = logging.getLogger(__name__)
@@ -106,13 +105,17 @@ class KolibriZeroconfListener(object):
info = zeroconf.get_service_info(type, name)
id = _id_from_name(name)
ip = socket.inet_ntoa(info.address)
+
self.instances[id] = {
"id": id,
"ip": ip,
"local": ip in get_all_addresses(),
"port": info.port,
"host": info.server.strip("."),
- "data": {key: json.loads(val) for (key, val) in info.properties.items()},
+ "data": {
+ bytes.decode(key): json.loads(val)
+ for (key, val) in info.properties.items()
+ },
"base_url": "http://{ip}:{port}/".format(ip=ip, port=info.port),
}
logger.info(
@@ -150,12 +153,7 @@ def register_zeroconf_service(port, id):
if ZEROCONF_STATE["service"] is not None:
unregister_zeroconf_service()
logger.info("Registering ourselves to zeroconf network with id '%s'..." % id)
- data = {
- "facilities": list(Facility.objects.values("id", "dataset_id", "name")),
- "channels": list(
- ChannelMetadata.objects.filter(root__available=True).values("id", "name")
- ),
- }
+ data = {"version": kolibri.VERSION}
ZEROCONF_STATE["service"] = KolibriZeroconfService(id=id, port=port, data=data)
ZEROCONF_STATE["service"].register()
diff --git a/kolibri/plugins/device/assets/src/views/ManageContentPage/SelectNetworkAddressModal/SelectAddressForm.vue b/kolibri/plugins/device/assets/src/views/ManageContentPage/SelectNetworkAddressModal/SelectAddressForm.vue
index a7f1db2f67..fd75f58c46 100644
--- a/kolibri/plugins/device/assets/src/views/ManageContentPage/SelectNetworkAddressModal/SelectAddressForm.vue
+++ b/kolibri/plugins/device/assets/src/views/ManageContentPage/SelectNetworkAddressModal/SelectAddressForm.vue
@@ -68,7 +68,7 @@
</div>
</template>
- <template>
+ <template v-if="false">
{{ $tr('searchingText') }}
</template>
|
learningequality/kolibri
|
3e122db94be8d65814337984e37ff87b9fa8662b
|
diff --git a/kolibri/core/discovery/test/test_network_search.py b/kolibri/core/discovery/test/test_network_search.py
index 27ee4f71dd..e8b27d6f54 100644
--- a/kolibri/core/discovery/test/test_network_search.py
+++ b/kolibri/core/discovery/test/test_network_search.py
@@ -21,6 +21,7 @@ from ..utils.network.search import ZEROCONF_STATE
MOCK_INTERFACE_IP = "111.222.111.222"
MOCK_PORT = 555
MOCK_ID = "abba"
+MOCK_PROPERTIES = {b"version": '[0, 13, 0, "alpha", 0]'}
class MockServiceBrowser(object):
@@ -48,8 +49,9 @@ class MockZeroconf(Zeroconf):
server=".".join([id, LOCAL_DOMAIN, ""]),
address=socket.inet_aton(MOCK_INTERFACE_IP),
port=MOCK_PORT,
- properties={"facilities": "[]", "channels": "[]"},
+ properties=MOCK_PROPERTIES,
)
+
return info
def add_service_listener(self, type_, listener):
@@ -114,7 +116,7 @@ class TestNetworkSearch(TestCase):
"self": True,
"port": MOCK_PORT,
"host": ".".join([MOCK_ID, LOCAL_DOMAIN]),
- "data": {"facilities": [], "channels": []},
+ "data": {"version": [0, 13, 0, "alpha", 0]},
"base_url": "http://{ip}:{port}/".format(
ip=MOCK_INTERFACE_IP, port=MOCK_PORT
),
|
zeroconf: Crashes when starting devserver
### Observed behavior
Tried to run devserver.
```
Traceback (most recent call last):
File "/home/richard/.virtualenvs/kolibri/bin/kolibri", line 11, in <module>
load_entry_point('kolibri', 'console_scripts', 'kolibri')()
File "/home/richard/.virtualenvs/kolibri/local/lib/python2.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/richard/.virtualenvs/kolibri/local/lib/python2.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/richard/.virtualenvs/kolibri/local/lib/python2.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/richard/github/kolibri/kolibri/utils/cli.py", line 237, in invoke
return super(KolibriDjangoCommand, self).invoke(ctx)
File "/home/richard/.virtualenvs/kolibri/local/lib/python2.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/richard/.virtualenvs/kolibri/local/lib/python2.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/richard/github/kolibri/kolibri/utils/cli.py", line 610, in services
server.services()
File "/home/richard/github/kolibri/kolibri/utils/server.py", line 167, in services
run_services(port=port)
File "/home/richard/github/kolibri/kolibri/utils/server.py", line 121, in run_services
register_zeroconf_service(port=port, id=instance.id[:4])
File "/home/richard/github/kolibri/kolibri/core/discovery/utils/network/search.py", line 160, in register_zeroconf_service
ZEROCONF_STATE["service"].register()
File "/home/richard/github/kolibri/kolibri/core/discovery/utils/network/search.py", line 68, in register
properties=self.data,
File "/home/richard/.virtualenvs/kolibri/local/lib/python2.7/site-packages/zeroconf.py", line 1485, in __init__
self._set_properties(properties)
File "/home/richard/.virtualenvs/kolibri/local/lib/python2.7/site-packages/zeroconf.py", line 1516, in _set_properties
result = b"".join((result, int2byte(len(item)), item))
ValueError: chr() arg not in range(256)
```
### Expected behavior
Devserver should run :)
|
0.0
|
3e122db94be8d65814337984e37ff87b9fa8662b
|
[
"kolibri/core/discovery/test/test_network_search.py::TestNetworkSearch::test_irreconcilable_naming_conflict",
"kolibri/core/discovery/test/test_network_search.py::TestNetworkSearch::test_naming_conflict",
"kolibri/core/discovery/test/test_network_search.py::TestNetworkSearch::test_excluding_local",
"kolibri/core/discovery/test/test_network_search.py::TestNetworkSearch::test_register_zeroconf_service",
"kolibri/core/discovery/test/test_network_search.py::TestNetworkSearch::test_initialize_zeroconf_listener"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-11-19 01:35:02+00:00
|
mit
| 3,525 |
|
learningequality__kolibri-7396
|
diff --git a/kolibri/core/tasks/utils.py b/kolibri/core/tasks/utils.py
index 2150613d96..6df4b0d13f 100644
--- a/kolibri/core/tasks/utils.py
+++ b/kolibri/core/tasks/utils.py
@@ -4,7 +4,7 @@ import time
import uuid
from kolibri.core.tasks import compat
-from kolibri.core.utils.cache import get_process_lock
+from kolibri.core.utils.cache import ProcessLock
# An object on which to store data about the current job
@@ -120,4 +120,4 @@ class InfiniteLoopThread(compat.Thread):
self.stop()
-db_task_write_lock = get_process_lock("db_task_write_lock")
+db_task_write_lock = ProcessLock("db_task_write_lock")
diff --git a/kolibri/core/utils/cache.py b/kolibri/core/utils/cache.py
index 43c6bf9a5a..110faba1de 100644
--- a/kolibri/core/utils/cache.py
+++ b/kolibri/core/utils/cache.py
@@ -22,32 +22,52 @@ def __get_process_cache():
process_cache = SimpleLazyObject(__get_process_cache)
-def get_process_lock(key, expire=None):
- """
- Return Lock object that's appropriate given current cache backend
-
- :param key: The lock key
- :param expire: The cache key expiration in seconds
- :type key: str
- :type expire: int
- :rtype: redis.lock.Lock|diskcache.recipes.RLock
- """
- if OPTIONS["Cache"]["CACHE_BACKEND"] == "redis":
- expire = expire * 1000 if expire is not None else None
- # if we're using Redis, be sure we use Redis' locking mechanism which uses
- # `SET NX` under the hood. See redis.lock.Lock
- return process_cache.lock(
- key,
- timeout=expire, # milliseconds
- sleep=0.01, # seconds
- blocking_timeout=100, # seconds
- thread_local=True,
- )
- else:
- # we can't pass in the `process_cache` because it's an instance of DjangoCache
- # and we need a Cache instance
- cache = process_cache.cache("locks")
- return RLock(cache, key, expire=expire)
+class ProcessLock(object):
+ def __init__(self, key, expire=None):
+ """
+ :param key: The lock key
+ :param expire: The cache key expiration in seconds
+ :type key: str
+ :type expire: int
+ """
+ self.key = key
+ self.expire = expire
+
+ self._lock_object = None
+
+ @property
+ def _lock(self):
+ if self._lock_object is None:
+ if OPTIONS["Cache"]["CACHE_BACKEND"] == "redis":
+ expire = self.expire * 1000 if self.expire is not None else None
+ # if we're using Redis, be sure we use Redis' locking mechanism which uses
+ # `SET NX` under the hood. See redis.lock.Lock
+ # The Django RedisCache backend provide the lock method to proxy this
+ self._lock_object = process_cache.lock(
+ self.key,
+ timeout=expire, # milliseconds
+ sleep=0.01, # seconds
+ blocking_timeout=100, # seconds
+ thread_local=True,
+ )
+ else:
+ # we can't pass in the `process_cache` because it's an instance of DjangoCache
+ # and we need a DiskCache Cache instance
+ cache = process_cache.cache("locks")
+ self._lock_object = RLock(cache, self.key, expire=self.expire)
+ return self._lock_object
+
+ def acquire(self):
+ self._lock.acquire()
+
+ def release(self):
+ self._lock.release()
+
+ def __enter__(self):
+ self.acquire()
+
+ def __exit__(self, *exc_info):
+ self.release()
class NamespacedCacheProxy(BaseCache):
@@ -64,7 +84,7 @@ class NamespacedCacheProxy(BaseCache):
params.update(KEY_PREFIX=namespace)
super(NamespacedCacheProxy, self).__init__(params)
self.cache = cache
- self._lock = get_process_lock("namespaced_cache_{}".format(namespace))
+ self._lock = ProcessLock("namespaced_cache_{}".format(namespace))
def _get_keys(self):
"""
diff --git a/kolibri/plugins/__init__.py b/kolibri/plugins/__init__.py
index 7fbf8c6831..95dfa117e9 100644
--- a/kolibri/plugins/__init__.py
+++ b/kolibri/plugins/__init__.py
@@ -242,7 +242,15 @@ class KolibriPluginBase(with_metaclass(SingletonMeta)):
def _return_module(self, module_name):
if module_has_submodule(sys.modules[self.module_path], module_name):
models_module_name = "%s.%s" % (self.module_path, module_name)
- return import_module(models_module_name)
+ try:
+ return import_module(models_module_name)
+ except Exception as e:
+ logging.warn(
+ "Tried to import module {module_name} from {plugin} but an error was raised".format(
+ plugin=self.module_path, module_name=module_name,
+ )
+ )
+ logging.exception(e)
return None
diff --git a/kolibri/utils/server.py b/kolibri/utils/server.py
index cd7f28a54b..86ebd1ca68 100644
--- a/kolibri/utils/server.py
+++ b/kolibri/utils/server.py
@@ -11,7 +11,6 @@ from django.conf import settings
from zeroconf import get_all_addresses
import kolibri
-from .conf import OPTIONS
from .system import kill_pid
from .system import pid_exists
from kolibri.core.content.utils import paths
@@ -87,7 +86,7 @@ class ServicesPlugin(SimplePlugin):
# Initialize the iceqube scheduler to handle scheduled tasks
scheduler.clear_scheduler()
- if not OPTIONS["Deployment"]["DISABLE_PING"]:
+ if not conf.OPTIONS["Deployment"]["DISABLE_PING"]:
# schedule the pingback job
from kolibri.core.analytics.utils import schedule_ping
|
learningequality/kolibri
|
e23f73ae60ff787f2e42fae3f50b1ff8dcaec9a0
|
diff --git a/kolibri/core/test/test_utils.py b/kolibri/core/test/test_utils.py
index e83d427301..acaa654430 100644
--- a/kolibri/core/test/test_utils.py
+++ b/kolibri/core/test/test_utils.py
@@ -4,8 +4,8 @@ from django.core.cache.backends.base import BaseCache
from django.test import TestCase
from redis import Redis
-from kolibri.core.utils.cache import get_process_lock
from kolibri.core.utils.cache import NamespacedCacheProxy
+from kolibri.core.utils.cache import ProcessLock
from kolibri.core.utils.cache import RedisSettingsHelper
@@ -21,7 +21,7 @@ class GetProcessLockTestCase(TestCase):
self.setup_opts(options, redis=True)
lock = mock.Mock()
process_cache.lock.return_value = lock
- self.assertEqual(lock, get_process_lock("test_key", expire=2))
+ self.assertEqual(lock, ProcessLock("test_key", expire=2)._lock)
process_cache.lock.assert_called_once_with(
"test_key",
timeout=2000,
@@ -34,15 +34,15 @@ class GetProcessLockTestCase(TestCase):
self.setup_opts(options, redis=False)
sub_cache = mock.Mock()
process_cache.cache.return_value = sub_cache
- lock = get_process_lock("test_key", expire=2)
- self.assertIsInstance(lock, RLock)
+ lock = ProcessLock("test_key", expire=2)
+ self.assertIsInstance(lock._lock, RLock)
process_cache.cache.assert_called_once_with("locks")
- self.assertEqual(sub_cache, lock._cache)
- self.assertEqual("test_key", lock._key)
- self.assertEqual(2, lock._expire)
+ self.assertEqual(sub_cache, lock._lock._cache)
+ self.assertEqual("test_key", lock._lock._key)
+ self.assertEqual(2, lock._lock._expire)
[email protected]("kolibri.core.utils.cache.get_process_lock")
[email protected]("kolibri.core.utils.cache.ProcessLock")
class NamespacedCacheProxyTestCase(TestCase):
def setUp(self):
self.lock = mock.MagicMock(spec=RLock)
|
incompatible code between the kolibri source code and kolibri sentry plugin
<!--
Instructions:
* Fill out the sections below, replace …'s with information about your issue
* Use the 'preview' function above this text box to verify formatting before submitting
-->
### Observed behavior
<!--
Description of the behavior that was observed, including screenshots or other references when applicable
-->
On kolibri-beta server that uses sentry plugin, the upgrade to the latest beta failed
### Expected behavior
<!--
Description of what behavior was expected but did not occur
-->
upgrade should be successful
### User-facing consequences
<!--
Implications and real-world consequences for learners, coaches, admins, and other users of the application
-->
can't upgrade the beta server to the latest beta
### Errors and logs
<!--
Relevant logs from:
* the command line
* ~/.kolibri/logs/kolibri.txt
* the browser console
Please wrap errors in triple backticks for clean formatting like this:
```
01:10 info: something happened
01:12 error: something bad happened
```
-->
```
Traceback (most recent call last):
File "/usr/local/bin/kolibri", line 7, in <module>
from kolibri.utils.cli import main
File "/usr/local/lib/python2.7/dist-packages/kolibri/utils/cli.py", line 29, in <module>
from . import sanity_checks
File "/usr/local/lib/python2.7/dist-packages/kolibri/utils/sanity_checks.py", line 14, in <module>
from .server import get_status
File "/usr/local/lib/python2.7/dist-packages/kolibri/utils/server.py", line 18, in <module>
from kolibri.core.deviceadmin.utils import schedule_vacuum
File "/usr/local/lib/python2.7/dist-packages/kolibri/core/deviceadmin/utils.py", line 13, in <module>
from kolibri.core.tasks.main import scheduler
File "/usr/local/lib/python2.7/dist-packages/kolibri/core/tasks/main.py", line 11, in <module>
from kolibri.core.tasks.queue import Queue
File "/usr/local/lib/python2.7/dist-packages/kolibri/core/tasks/queue.py", line 1, in <module>
from kolibri.core.tasks.job import Job
File "/usr/local/lib/python2.7/dist-packages/kolibri/core/tasks/job.py", line 7, in <module>
from kolibri.core.tasks.utils import current_state_tracker
File "/usr/local/lib/python2.7/dist-packages/kolibri/core/tasks/utils.py", line 123, in <module>
db_task_write_lock = get_process_lock("db_task_write_lock")
File "/usr/local/lib/python2.7/dist-packages/kolibri/core/utils/cache.py", line 49, in get_process_lock
cache = process_cache.cache("locks")
File "/usr/local/lib/python2.7/dist-packages/kolibri/dist/django/utils/functional.py", line 238, in inner
self._setup()
File "/usr/local/lib/python2.7/dist-packages/kolibri/dist/django/utils/functional.py", line 386, in _setup
self._wrapped = self._setupfunc()
File "/usr/local/lib/python2.7/dist-packages/kolibri/core/utils/cache.py", line 17, in __get_process_cache
return caches["process_cache"]
File "/usr/local/lib/python2.7/dist-packages/kolibri/dist/django/core/cache/__init__.py", line 75, in __getitem__
if alias not in settings.CACHES:
File "/usr/local/lib/python2.7/dist-packages/kolibri/dist/django/conf/__init__.py", line 56, in __getattr__
self._setup(name)
File "/usr/local/lib/python2.7/dist-packages/kolibri/dist/django/conf/__init__.py", line 41, in _setup
self._wrapped = Settings(settings_module)
File "/usr/local/lib/python2.7/dist-packages/kolibri/dist/django/conf/__init__.py", line 110, in __init__
mod = importlib.import_module(self.SETTINGS_MODULE)
File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/usr/local/lib/python2.7/dist-packages/kolibri/deployment/default/settings/base.py", line 357, in <module>
apply_settings(sys.modules[__name__])
File "/usr/local/lib/python2.7/dist-packages/kolibri/plugins/utils/settings.py", line 133, in apply_settings
plugin_settings_module = plugin_instance.settings_module
File "/usr/local/lib/python2.7/dist-packages/kolibri/plugins/__init__.py", line 345, in settings_module
module = self._return_module(self.django_settings)
File "/usr/local/lib/python2.7/dist-packages/kolibri/plugins/__init__.py", line 245, in _return_module
return import_module(models_module_name)
File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/usr/local/lib/python2.7/dist-packages/kolibri_sentry_plugin/settings.py", line 6, in <module>
from kolibri.utils.server import installation_type
ImportError: cannot import name installation_type
```
### Steps to reproduce
<!--
Precise steps that someone else can follow in order to see this behavior
-->
…
### Context
<!--
Tell us about your environment, including:
* Kolibri version
* Operating system
* Browser
-->
0.14.0b14
|
0.0
|
e23f73ae60ff787f2e42fae3f50b1ff8dcaec9a0
|
[
"kolibri/core/test/test_utils.py::GetProcessLockTestCase::test_redis",
"kolibri/core/test/test_utils.py::GetProcessLockTestCase::test_not_redis",
"kolibri/core/test/test_utils.py::RedisSettingsHelperTestCase::test_save",
"kolibri/core/test/test_utils.py::RedisSettingsHelperTestCase::test_get_used_memory",
"kolibri/core/test/test_utils.py::RedisSettingsHelperTestCase::test_setters",
"kolibri/core/test/test_utils.py::RedisSettingsHelperTestCase::test_getters",
"kolibri/core/test/test_utils.py::NamespacedCacheProxyTestCase::test_get_keys",
"kolibri/core/test/test_utils.py::NamespacedCacheProxyTestCase::test_clear",
"kolibri/core/test/test_utils.py::NamespacedCacheProxyTestCase::test_set_keys",
"kolibri/core/test/test_utils.py::NamespacedCacheProxyTestCase::test_add__failed",
"kolibri/core/test/test_utils.py::NamespacedCacheProxyTestCase::test_set",
"kolibri/core/test/test_utils.py::NamespacedCacheProxyTestCase::test_add",
"kolibri/core/test/test_utils.py::NamespacedCacheProxyTestCase::test_delete"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-28 17:34:48+00:00
|
mit
| 3,526 |
|
learningequality__kolibri-8753
|
diff --git a/kolibri/core/assets/src/views/TextTruncatorCss.vue b/kolibri/core/assets/src/views/TextTruncatorCss.vue
index 05eb673054..47d75aab01 100644
--- a/kolibri/core/assets/src/views/TextTruncatorCss.vue
+++ b/kolibri/core/assets/src/views/TextTruncatorCss.vue
@@ -1,24 +1,28 @@
<template>
<!--
- Text is wrapped in two `div`s to allow parent components adding
+ Text is wrapped in two `spans`s to allow parent components adding
padding style directly on `<TextTruncatorCss>` component no matter
of what truncating technique is used. Otherwise adding padding directly
would break when using technique (B) since text that should be truncated
would show in padding area.
- Attributes are inherited by the inner `div` to emulate the same behavior
+ Attributes are inherited by the inner `span` to emulate the same behavior
like if only one element would wrap the text to allow attributes be applied
as close as possible to the text element.
+
+ Some width information need to be provided to `<span>s` to allow `text-overflow`
+ calculate properly when ellipsis should be added.
-->
- <div>
- <div
+ <span :style="{ display: 'inline-block', maxWidth: '100%' }">
+ <span
v-bind="$attrs"
+ :style="{ display: 'inline-block', maxWidth: '100%' }"
:class="$computedClass(truncate)"
>
{{ text }}
- </div>
- </div>
+ </span>
+ </span>
</template>
diff --git a/kolibri/plugins/facility/assets/src/views/DataPage/SyncInterface/index.vue b/kolibri/plugins/facility/assets/src/views/DataPage/SyncInterface/index.vue
index 58282151cc..757428af80 100644
--- a/kolibri/plugins/facility/assets/src/views/DataPage/SyncInterface/index.vue
+++ b/kolibri/plugins/facility/assets/src/views/DataPage/SyncInterface/index.vue
@@ -28,17 +28,49 @@
<td class="button-col">
<KButtonGroup style="margin-top: 8px; overflow: visible">
<KButton
+ v-if="!theFacility.dataset.registered"
appearance="raised-button"
:text="$tr('register')"
- :disabled="Boolean(syncTaskId) || theFacility.dataset.registered"
@click="displayModal(Modals.REGISTER_FACILITY)"
/>
<KButton
+ v-else-if="!Boolean(syncTaskId)"
appearance="raised-button"
:text="$tr('sync')"
- :disabled="Boolean(syncTaskId)"
@click="displayModal(Modals.SYNC_FACILITY)"
/>
+ <KIconButton
+ ref="moreOptionsButton"
+ data-test="moreOptionsButton"
+ icon="optionsHorizontal"
+ :tooltip="coreString('optionsLabel')"
+ :ariaLabel="coreString('optionsLabel')"
+ @click="toggleMenu"
+ />
+ <CoreMenu
+ v-show="isMenuOpen"
+ ref="menu"
+ class="menu"
+ :raised="true"
+ :isOpen="isMenuOpen"
+ :containFocus="true"
+ @close="closeMenu"
+ >
+ <template #options>
+ <CoreMenuOption
+ v-if="theFacility.dataset.registered"
+ :style="{ 'cursor': 'pointer', textAlign: 'left' }"
+ :label="$tr('register')"
+ @select="displayModal(Modals.REGISTER_FACILITY)"
+ />
+ <CoreMenuOption
+ v-else
+ :style="{ 'cursor': 'pointer', textAlign: 'left' }"
+ :label="$tr('sync')"
+ @select="displayModal(Modals.SYNC_FACILITY)"
+ />
+ </template>
+ </CoreMenu>
</KButtonGroup>
</td>
</tr>
@@ -89,6 +121,9 @@
} from 'kolibri.coreVue.componentSets.sync';
import commonSyncElements from 'kolibri.coreVue.mixins.commonSyncElements';
import { FacilityTaskResource, FacilityResource } from 'kolibri.resources';
+ import commonCoreStrings from 'kolibri.coreVue.mixins.commonCoreStrings';
+ import CoreMenu from 'kolibri.coreVue.components.CoreMenu';
+ import CoreMenuOption from 'kolibri.coreVue.components.CoreMenuOption';
import { TaskStatuses } from '../../../constants';
import PrivacyModal from './PrivacyModal';
@@ -108,8 +143,10 @@
RegisterFacilityModal,
ConfirmationRegisterModal,
SyncFacilityModalGroup,
+ CoreMenu,
+ CoreMenuOption,
},
- mixins: [commonSyncElements],
+ mixins: [commonSyncElements, commonCoreStrings],
data() {
return {
theFacility: null,
@@ -119,6 +156,7 @@
isSyncing: false,
syncHasFailed: false,
Modals,
+ isMenuOpen: false,
};
},
beforeMount() {
@@ -178,6 +216,24 @@
this.syncHasFailed = true;
this.closeModal();
},
+ closeMenu({ focusMoreOptionsButton = true } = {}) {
+ this.isMenuOpen = false;
+ if (!focusMoreOptionsButton) {
+ return;
+ }
+ this.$nextTick(() => {
+ this.$refs.moreOptionsButton.$el.focus();
+ });
+ },
+ toggleMenu() {
+ this.isMenuOpen = !this.isMenuOpen;
+ if (!this.isMenuOpen) {
+ return;
+ }
+ this.$nextTick(() => {
+ this.$refs.menu.$el.focus();
+ });
+ },
},
$trs: {
syncData: {
diff --git a/kolibri/plugins/learn/assets/src/views/LearningActivityBar.vue b/kolibri/plugins/learn/assets/src/views/LearningActivityBar.vue
index 3351c67567..97aa3e9ddb 100644
--- a/kolibri/plugins/learn/assets/src/views/LearningActivityBar.vue
+++ b/kolibri/plugins/learn/assets/src/views/LearningActivityBar.vue
@@ -5,7 +5,7 @@
:value="isCoachContent"
style="margin-top: 8px; width: auto;"
/>
- <KLabeledIcon :style="{ 'margin-top': '8px', 'width': 'auto' }">
+ <KLabeledIcon :style="{ 'margin-top': '8px' }">
<template #icon>
<LearningActivityIcon
data-test="learningActivityIcon"
@@ -13,14 +13,12 @@
:shaded="true"
/>
</template>
- <TextTruncator
+ <TextTruncatorCss
:text="resourceTitle"
- :maxHeight="26"
+ :maxLines="1"
/>
- <template #iconAfter>
- <ProgressIcon :progress="contentProgress" class="progress-icon" />
- </template>
</KLabeledIcon>
+ <ProgressIcon :progress="contentProgress" class="progress-icon" />
<template #icon>
<KIconButton
@@ -107,7 +105,7 @@
import CoreMenuOption from 'kolibri.coreVue.components.CoreMenuOption';
import ProgressIcon from 'kolibri.coreVue.components.ProgressIcon';
import UiToolbar from 'kolibri.coreVue.components.UiToolbar';
- import TextTruncator from 'kolibri.coreVue.components.TextTruncator';
+ import TextTruncatorCss from 'kolibri.coreVue.components.TextTruncatorCss';
import { validateLearningActivity } from 'kolibri.utils.validators';
import commonCoreStrings from 'kolibri.coreVue.mixins.commonCoreStrings';
import LearningActivityIcon from './LearningActivityIcon.vue';
@@ -120,7 +118,7 @@
CoachContentLabel,
CoreMenu,
CoreMenuOption,
- TextTruncator,
+ TextTruncatorCss,
LearningActivityIcon,
MarkAsCompleteModal,
ProgressIcon,
@@ -361,8 +359,33 @@
transform: translateY(16px);
}
+ /*
+ Make truncation via text ellipsis work well in UIToolbar's body flex item:
+ By default, `min-width` is `auto` for a flex item which means it
+ cannot be smaller than the size of its content which causes the whole
+ title being visible even in cases when it should be already truncated.
+ Overriding it to `0` allows the title to be shrinked and then truncated
+ properly. Labeled icon wrapper needs to have this set too for its parent
+ flex item to shrink.
+ */
+ /deep/ .ui-toolbar__body,
+ /deep/ .labeled-icon-wrapper {
+ min-width: 0;
+ }
+
+ /deep/ .ui-toolbar__body {
+ flex-grow: 0; // make sure that the completion icon is right next to the title
+ align-items: center;
+ }
+
+ /deep/ .ui-toolbar__right {
+ // never shrink controls on the right side of the toolbar
+ flex-shrink: 0;
+ }
+
/deep/ .progress-icon .ui-icon {
margin-top: -2px;
+ margin-left: 16px;
svg {
width: 18px;
diff --git a/kolibri/utils/constants/__init__.py b/kolibri/utils/constants/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/kolibri/utils/constants/installation_types.py b/kolibri/utils/constants/installation_types.py
new file mode 100644
index 0000000000..29e6c273e1
--- /dev/null
+++ b/kolibri/utils/constants/installation_types.py
@@ -0,0 +1,28 @@
+"""
+This module contains constants representing the type of "installers" used to install Kolibri.
+"""
+from __future__ import unicode_literals
+
+APK = "apk"
+DEB = "deb"
+FLATPAK = "flatpak"
+GNOME = "gnome"
+KOLIBRI_SERVER = "kolibriserver"
+MACOS = "mac"
+PEX = "pex"
+WHL = "whl"
+WINDOWS = "windows"
+WINDOWS_APP = "windowsapp"
+
+install_type_map = {
+ APK: "apk - {}",
+ DEB: "deb kolibri - {}",
+ FLATPAK: "Flatpak - {}",
+ GNOME: "GNOME - {}",
+ KOLIBRI_SERVER: "deb kolibri-server - {}",
+ MACOS: "Mac - {}",
+ PEX: "pex",
+ WHL: "whl",
+ WINDOWS: "Windows - {}",
+ WINDOWS_APP: "Windows App - {}",
+}
diff --git a/kolibri/utils/server.py b/kolibri/utils/server.py
index 4f4bd079bd..227cd9c346 100644
--- a/kolibri/utils/server.py
+++ b/kolibri/utils/server.py
@@ -25,6 +25,7 @@ from zeroconf import get_all_addresses
from zeroconf import InterfaceChoice
import kolibri
+from .constants import installation_types
from .system import become_daemon
from .system import pid_exists
from kolibri.utils import conf
@@ -913,77 +914,116 @@ def get_urls(listen_port=None):
return e.status_code, []
+def get_installer_version(installer_type): # noqa: C901
+ def get_debian_pkg_version(package):
+ """
+ In case we want to distinguish between dpkg and apt installations
+ we can use apt-cache show madison and compare versions with dpkg
+ if dpkg > madison, it's dpkg otherwise it's apt
+ """
+ try:
+ output = check_output(["dpkg", "-s", package])
+ if hasattr(output, "decode"): # needed in python 2.x
+ output = output.decode("utf-8")
+ package_info = output.split("\n")
+ version_info = [output for output in package_info if "Version" in output]
+ if version_info:
+ version = version_info[0].split(":")[1].strip()
+ return version
+ except CalledProcessError: # package not installed!
+ pass # will return None
+ return None
+
+ def get_deb_kolibriserver_version():
+ return get_debian_pkg_version("kolibri-server")
+
+ def get_deb_version():
+ return get_debian_pkg_version("kolibri")
+
+ def get_apk_version():
+ return os.environ.get("KOLIBRI_APK_VERSION_NAME")
+
+ installer_version = os.environ.get("KOLIBRI_INSTALLER_VERSION")
+ if installer_version:
+ return installer_version
+
+ version_funcs = {
+ installation_types.DEB: get_deb_version,
+ installation_types.KOLIBRI_SERVER: get_deb_kolibriserver_version,
+ installation_types.APK: get_apk_version,
+ }
+
+ if installer_type in version_funcs:
+ return version_funcs[installer_type]()
+ else:
+ return None
+
+
def installation_type(cmd_line=None): # noqa:C901
"""
Tries to guess how the running kolibri server was installed
:returns: install_type is the type of detected installation
"""
+
+ install_type = os.environ.get("KOLIBRI_INSTALLATION_TYPE", "Unknown")
+
if cmd_line is None:
cmd_line = sys.argv
- install_type = "Unknown"
def is_debian_package():
# find out if this is from the debian package
- install_type = "dpkg"
+ install_type = installation_types.DEB
try:
- check_output(["apt-cache", "show", "kolibri"])
- apt_repo = str(check_output(["apt-cache", "madison", "kolibri"]))
- if len(apt_repo) > 4: # repo will have at least http
- install_type = "apt"
+ check_output(["dpkg", "-s", "kolibri"])
except (
CalledProcessError,
FileNotFoundError,
): # kolibri package not installed!
if sys.path[-1] != "/usr/lib/python3/dist-packages":
- install_type = "whl"
+ install_type = installation_types.WHL
return install_type
def is_kolibri_server():
# running under uwsgi, finding out if we are using kolibri-server
- install_type = ""
+ install_type = "Unknown"
try:
- package_info = (
- check_output(["apt-cache", "show", "kolibri-server"])
- .decode("utf-8")
- .split("\n")
- )
- version = [output for output in package_info if "Version" in output]
- install_type = "kolibri-server {}".format(version[0])
+ check_output(["dpkg", "-s", "kolibri-server"])
+ install_type = installation_types.KOLIBRI_SERVER
except CalledProcessError: # kolibri-server package not installed!
- install_type = "uwsgi"
+ install_type = installation_types.WHL
return install_type
- if len(cmd_line) > 1 or "uwsgi" in cmd_line:
- launcher = cmd_line[0]
- if launcher.endswith(".pex"):
- install_type = "pex"
- elif "runserver" in cmd_line:
- install_type = "devserver"
- elif launcher == "/usr/bin/kolibri":
- install_type = is_debian_package()
- elif launcher == "uwsgi":
- package = is_debian_package()
- if package != "whl":
- kolibri_server = is_kolibri_server()
- install_type = "kolibri({kolibri_type}) with {kolibri_server}".format(
- kolibri_type=package, kolibri_server=kolibri_server
- )
- elif "\\Scripts\\kolibri" in launcher:
- paths = sys.path
- for path in paths:
- if "kolibri.exe" in path:
- install_type = "Windows"
- break
- elif "start" in cmd_line:
- install_type = "whl"
- if on_android():
-
- version_name = os.environ.get("KOLIBRI_APK_VERSION_NAME")
-
- if version_name:
- install_type = "apk - {}".format(version_name)
+ # in case the KOLIBRI_INSTALLATION_TYPE is not set, let's use the old method:
+ if install_type == "Unknown":
+ if on_android():
+ install_type = installation_types.APK
+ elif len(cmd_line) > 1 or "uwsgi" in cmd_line:
+ launcher = cmd_line[0]
+ if launcher.endswith(".pex"):
+ install_type = installation_types.PEX
+ elif "runserver" in cmd_line:
+ install_type = "devserver"
+ elif launcher == "/usr/bin/kolibri":
+ install_type = is_debian_package()
+ elif launcher == "uwsgi":
+ package = is_debian_package()
+ if package != "whl":
+ install_type = is_kolibri_server()
+ elif "\\Scripts\\kolibri" in launcher:
+ paths = sys.path
+ for path in paths:
+ if "kolibri.exe" in path:
+ install_type = installation_types.WINDOWS
+ break
+ elif "start" in cmd_line:
+ install_type = installation_types.WHL
+
+ if install_type in installation_types.install_type_map:
+ version = get_installer_version(install_type)
+ if version:
+ return installation_types.install_type_map[install_type].format(version)
else:
- install_type = "apk"
+ return installation_types.install_type_map[install_type].split(" - ")[0]
return install_type
|
learningequality/kolibri
|
387eaedc906ad86f6fb3f462f462b06679ed4427
|
diff --git a/kolibri/utils/tests/test_server.py b/kolibri/utils/tests/test_server.py
index 02e815eb5e..4884ddb380 100755
--- a/kolibri/utils/tests/test_server.py
+++ b/kolibri/utils/tests/test_server.py
@@ -14,13 +14,14 @@ import pytest
from kolibri.core.tasks.scheduler import Scheduler
from kolibri.core.tasks.test.base import connection
from kolibri.utils import server
+from kolibri.utils.constants import installation_types
class TestServerInstallation(object):
@mock.patch("sys.argv", ["kolibri-0.9.3.pex", "start"])
def test_pex(self):
install_type = server.installation_type()
- assert install_type == "pex"
+ assert install_type == installation_types.PEX
def test_dev(self):
sys_args = [
@@ -36,27 +37,38 @@ class TestServerInstallation(object):
assert install_type == "devserver"
@mock.patch("sys.argv", ["/usr/bin/kolibri", "start"])
+ @mock.patch("os.environ", {"KOLIBRI_INSTALLER_VERSION": "1.0"})
def test_dpkg(self):
with mock.patch("kolibri.utils.server.check_output", return_value=""):
install_type = server.installation_type()
- assert install_type == "dpkg"
+ assert install_type == installation_types.install_type_map[
+ installation_types.DEB
+ ].format("1.0")
@mock.patch("sys.argv", ["/usr/bin/kolibri", "start"])
+ @mock.patch("os.environ", {"KOLIBRI_INSTALLER_VERSION": "1.0"})
def test_apt(apt):
with mock.patch("kolibri.utils.server.check_output", return_value="any repo"):
install_type = server.installation_type()
- assert install_type == "apt"
+ assert install_type == installation_types.install_type_map[
+ installation_types.DEB
+ ].format("1.0")
@mock.patch("sys.argv", ["C:\\Python34\\Scripts\\kolibri", "start"])
@mock.patch("sys.path", ["", "C:\\Program Files\\Kolibri\\kolibri.exe"])
+ @mock.patch("os.environ", {"KOLIBRI_INSTALLER_VERSION": "1.0"})
def test_windows(self):
install_type = server.installation_type()
- assert install_type == "Windows"
+ assert install_type == installation_types.install_type_map[
+ installation_types.WINDOWS
+ ].format("1.0")
@mock.patch("sys.argv", ["/usr/local/bin/kolibri", "start"])
def test_whl(self):
install_type = server.installation_type()
- assert install_type == "whl"
+ assert (
+ install_type == installation_types.install_type_map[installation_types.WHL]
+ )
@pytest.fixture
|
Facilities should have option to sync KDP even if already registered, and if device does not know it's registered
<!--
Instructions:
* Fill out the sections below, replace …'s with information about your issue
* Use the 'preview' function above this text box to verify formatting before submitting
-->
### Observed behavior
1. After a Facility is registered to a KDP project, its `FacilityDataset.registered` value is set to `true`, which then block the user from registering it again with a different token.
1. If a facility is not registered to a project, it can only be synced to other peer devices, but not KDP
### Expected behavior
1. After a Facility is registered to a KDP project, the use can then register it again using a different (or same) token.
1. If a facility is not registered to a project, it can only be synced to other peer devices, and not KDP
### User-facing consequences
<!--
Implications and real-world consequences for learners, coaches, admins, and other users of the application
-->
…
### Errors and logs
<!--
Relevant logs from:
* the command line
* ~/.kolibri/logs/kolibri.txt
* the browser console
Please wrap errors in triple backticks for clean formatting like this:
```
01:10 info: something happened
01:12 error: something bad happened
```
-->
…
### Steps to reproduce
<!--
Precise steps that someone else can follow in order to see this behavior
-->
…
### Context
<!--
Tell us about your environment, including:
* Kolibri version
* Operating system
* Browser
-->
…
|
0.0
|
387eaedc906ad86f6fb3f462f462b06679ed4427
|
[
"kolibri/utils/tests/test_server.py::ServerInitializationTestCase::test_unclean_shutdown",
"kolibri/utils/tests/test_server.py::ServerInitializationTestCase::test_port_occupied",
"kolibri/utils/tests/test_server.py::ServerInitializationTestCase::test_server_running",
"kolibri/utils/tests/test_server.py::ServerInitializationTestCase::test_port_occupied_socket_activation",
"kolibri/utils/tests/test_server.py::ServerInitializationTestCase::test_port_zero_zip_port_zero",
"kolibri/utils/tests/test_server.py::TestServerInstallation::test_dev",
"kolibri/utils/tests/test_server.py::TestServerInstallation::test_windows",
"kolibri/utils/tests/test_server.py::TestServerInstallation::test_pex",
"kolibri/utils/tests/test_server.py::TestServerInstallation::test_apt",
"kolibri/utils/tests/test_server.py::TestServerInstallation::test_dpkg",
"kolibri/utils/tests/test_server.py::TestServerInstallation::test_whl",
"kolibri/utils/tests/test_server.py::ServerSignalHandlerTestCase::test_signal_different_pid",
"kolibri/utils/tests/test_server.py::ServerSignalHandlerTestCase::test_signal_subscribe",
"kolibri/utils/tests/test_server.py::ServerSignalHandlerTestCase::test_signal_same_pid",
"kolibri/utils/tests/test_server.py::TestZeroConfPlugin::test_required_services_initiate_on_start",
"kolibri/utils/tests/test_server.py::TestZeroConfPlugin::test_services_shutdown_on_stop",
"kolibri/utils/tests/test_server.py::TestServerServices::test_services_shutdown_on_stop",
"kolibri/utils/tests/test_server.py::TestServerServices::test_scheduled_jobs_persist_on_restart",
"kolibri/utils/tests/test_server.py::TestServerServices::test_required_services_initiate_on_start"
] |
[] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-11-19 18:17:54+00:00
|
mit
| 3,527 |
|
learningequality__kolibri-8883
|
diff --git a/kolibri/utils/server.py b/kolibri/utils/server.py
index 3c9d41d86a..8451fcffdf 100644
--- a/kolibri/utils/server.py
+++ b/kolibri/utils/server.py
@@ -116,7 +116,7 @@ class Server(BaseServer):
return logger.log(level, msg)
-def check_port_availability(host, port):
+def port_is_available_on_host(host, port):
"""
Make sure the port is available for the server to start.
"""
@@ -152,7 +152,7 @@ class PortCache:
if not self.values[p] and p not in self.occupied_ports
)
if port:
- if check_port_availability(host, port):
+ if port_is_available_on_host(host, port):
self.values[port] = True
return port
except StopIteration:
@@ -603,11 +603,10 @@ class KolibriProcessBus(ProcessBus):
if sys.platform == "darwin":
self.background = False
- # Check if there are other kolibri instances running
- # If there are, then we need to stop users from starting kolibri again.
- pid, _, _, status = _read_pid_file(self.pid_file)
-
- if status in IS_RUNNING and pid_exists(pid):
+ if (
+ self._kolibri_appears_to_be_running()
+ and self._kolibri_main_port_is_occupied()
+ ):
logger.error(
"There is another Kolibri server running. "
"Please use `kolibri stop` and try again."
@@ -662,6 +661,17 @@ class KolibriProcessBus(ProcessBus):
reload_plugin = ProcessControlPlugin(self)
reload_plugin.subscribe()
+ def _kolibri_appears_to_be_running(self):
+ # Check if there are other kolibri instances running
+ # If there are, then we need to stop users from starting kolibri again.
+ pid, _, _, status = _read_pid_file(self.pid_file)
+ return status in IS_RUNNING and pid_exists(pid)
+
+ def _kolibri_main_port_is_occupied(self):
+ if not self.serve_http:
+ return False
+ return not port_is_available_on_host(self.listen_address, self.port)
+
def _port_check(self, port):
# In case that something other than Kolibri occupies the port,
# check the port's availability.
@@ -672,7 +682,7 @@ class KolibriProcessBus(ProcessBus):
if (
not os.environ.get("LISTEN_PID", None)
and port
- and not check_port_availability(self.listen_address, port)
+ and not port_is_available_on_host(self.listen_address, port)
):
# Port is occupied
logger.error(
|
learningequality/kolibri
|
a56ff920278f47d83b4a7d362c67e355b835e000
|
diff --git a/kolibri/utils/tests/test_server.py b/kolibri/utils/tests/test_server.py
index f55d44512c..45aef08567 100755
--- a/kolibri/utils/tests/test_server.py
+++ b/kolibri/utils/tests/test_server.py
@@ -288,11 +288,15 @@ class ServerInitializationTestCase(TestCase):
run_mock.assert_called()
@mock.patch("kolibri.utils.server.pid_exists")
- def test_server_running(self, pid_exists_mock, read_pid_file_mock):
+ @mock.patch("kolibri.utils.server.wait_for_free_port")
+ def test_server_running(
+ self, wait_for_port_mock, pid_exists_mock, read_pid_file_mock
+ ):
+ wait_for_port_mock.side_effect = OSError
pid_exists_mock.return_value = True
read_pid_file_mock.return_value = (1000, 8000, 8001, server.STATUS_RUNNING)
with self.assertRaises(SystemExit):
- server.start()
+ server.start(port=8000)
class ServerSignalHandlerTestCase(TestCase):
|
Windows app won't start up again if there's a leftover server.pid file
https://www.notion.so/learningequality/BUG-Windows-app-won-t-start-up-again-if-there-s-a-leftover-server-pid-file-b11d3d7450c14844bda03ee81f06f373
There is another Kolibri server running. Please use `kolibri stop` and try again.
Even though the PID in the [server.pid](http://server.pid) doesn't exist anymore:
```
42412
8080
0
4
```
UPDATE: it seems that `_windows_pid_exists` is still returning `True` for the PID even though it no longer exists.
|
0.0
|
a56ff920278f47d83b4a7d362c67e355b835e000
|
[
"kolibri/utils/tests/test_server.py::TestServerServices::test_required_services_initiate_on_start",
"kolibri/utils/tests/test_server.py::TestServerServices::test_scheduled_jobs_persist_on_restart",
"kolibri/utils/tests/test_server.py::TestZeroConfPlugin::test_services_shutdown_on_stop",
"kolibri/utils/tests/test_server.py::TestZeroConfPlugin::test_required_services_initiate_on_start"
] |
[
"kolibri/utils/tests/test_server.py::ServerInitializationTestCase::test_port_zero_zip_port_zero",
"kolibri/utils/tests/test_server.py::ServerInitializationTestCase::test_unclean_shutdown",
"kolibri/utils/tests/test_server.py::ServerInitializationTestCase::test_port_occupied",
"kolibri/utils/tests/test_server.py::ServerInitializationTestCase::test_server_running",
"kolibri/utils/tests/test_server.py::ServerInitializationTestCase::test_port_occupied_socket_activation",
"kolibri/utils/tests/test_server.py::TestServerServices::test_services_shutdown_on_stop",
"kolibri/utils/tests/test_server.py::ServerSignalHandlerTestCase::test_signal_same_pid",
"kolibri/utils/tests/test_server.py::ServerSignalHandlerTestCase::test_signal_subscribe",
"kolibri/utils/tests/test_server.py::ServerSignalHandlerTestCase::test_signal_different_pid",
"kolibri/utils/tests/test_server.py::TestServerInstallation::test_apt",
"kolibri/utils/tests/test_server.py::TestServerInstallation::test_pex",
"kolibri/utils/tests/test_server.py::TestServerInstallation::test_whl",
"kolibri/utils/tests/test_server.py::TestServerInstallation::test_dpkg_version",
"kolibri/utils/tests/test_server.py::TestServerInstallation::test_dpkg",
"kolibri/utils/tests/test_server.py::TestServerInstallation::test_dev",
"kolibri/utils/tests/test_server.py::TestServerInstallation::test_windows"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-12-09 17:28:06+00:00
|
mit
| 3,528 |
|
learningequality__kolibri-8950
|
diff --git a/kolibri/core/tasks/job.py b/kolibri/core/tasks/job.py
index 766601a856..233c17094a 100644
--- a/kolibri/core/tasks/job.py
+++ b/kolibri/core/tasks/job.py
@@ -249,6 +249,8 @@ class Job(object):
raise ReferenceError(
"storage is not defined on this job, cannot update progress"
)
+ self.progress = progress
+ self.total_progress = total_progress
self.storage.update_job_progress(self.job_id, progress, total_progress)
def check_for_cancel(self):
@@ -268,6 +270,7 @@ class Job(object):
raise ReferenceError(
"storage is not defined on this job, cannot save as cancellable"
)
+ self.cancellable = cancellable
self.storage.save_job_as_cancellable(self.job_id, cancellable=cancellable)
@property
|
learningequality/kolibri
|
121a67267c35023bfc3350d411a3a06356bb8bd5
|
diff --git a/kolibri/core/tasks/test/test_job.py b/kolibri/core/tasks/test/test_job.py
index 692d84459f..39937231a3 100644
--- a/kolibri/core/tasks/test/test_job.py
+++ b/kolibri/core/tasks/test/test_job.py
@@ -7,7 +7,7 @@ from kolibri.core.tasks.job import RegisteredJob
class JobTest(TestCase):
def setUp(self):
- self.job = Job(id)
+ self.job = Job(id, track_progress=True)
self.job.storage = mock.MagicMock()
def test_job_save_as_cancellable(self):
@@ -18,6 +18,23 @@ class JobTest(TestCase):
self.job.job_id, cancellable=cancellable
)
+ def test_job_save_as_cancellable_sets_cancellable(self):
+ cancellable = not self.job.cancellable
+
+ self.job.save_as_cancellable(cancellable=cancellable)
+ self.assertEqual(self.job.cancellable, cancellable)
+
+ def test_job_update_progress_saves_progress_to_storage(self):
+ self.job.update_progress(0.5, 1.5)
+ self.job.storage.update_job_progress.assert_called_once_with(
+ self.job.job_id, 0.5, 1.5
+ )
+
+ def test_job_update_progress_sets_progress(self):
+ self.job.update_progress(0.5, 1.5)
+ self.assertEqual(self.job.progress, 0.5)
+ self.assertEqual(self.job.total_progress, 1.5)
+
def test_job_save_as_cancellable__skip(self):
cancellable = self.job.cancellable
self.job.save_as_cancellable(cancellable=cancellable)
|
Wrong progress on channel update
### Observed behavior
While trying to update ASB channel on kolibridemo, I went through the wizard that showed me the version "changelog" then when I started update task I saw it jump to 100% progress right away:
<img width="786" alt="shows as 100pct right away" src="https://user-images.githubusercontent.com/163966/77856401-d20fc900-71c4-11ea-8cd1-c27d6fd72a66.png">
### Expected behavior
Progress bar steadily growing from 0 to 100%.
### User-facing consequences
Confusing since UI says 100% but task is still running.
### Errors and logs
From the netowrk tab, I saw polling of the [task api url](https://kolibridemo.learningequality.org/api/tasks/tasks/?1585503395967=1585503395967) with the following response:
```json
[
{
"status": "COMPLETED",
"exception": "None",
"cancellable": false,
"traceback": "",
"percentage": 0,
"id": "0a0a6712ad624b7ea563a220e4d3a16e"
},
{
"status": "COMPLETED",
"exception": "None",
"cancellable": false,
"traceback": "",
"percentage": 0,
"id": "ec68f2bf7e354406a20f4d981ae36945"
},
{
"status": "RUNNING",
"exception": "None",
"started_by_username": "admin",
"new_version": 24,
"node_ids": [
"529175b4246e525e8d709c66e5c4982c",
"a2cd0fded0245abba2075b4ec226fd7d",
"a6dbf2bf66155fc0afe256a891ad001d",
"a1f99346ef2854368f28085ca04e17be",
">>>>>> 6000+ more rows go here (redacted for brevity..... LOTS A JSON) <<<<<<<<<<<<<<<<<<<<",
"9e8b227b665c50ad9ec95521654e41c5",
"cc0695941b3654468fefdbdb396d07ae",
"b5a13b940a5e5407a32901875a548917"
],
"cancellable": true,
"traceback": "",
"baseurl": "https://studio.learningequality.org",
"exclude_node_ids": null,
"started_by": "8b4c6655776a9ab94f560c5748803a93",
"channel_id": "f9d3e0e46ea25789bbed672ff6a399ed",
"percentage": 1,
"peer_id": null,
"type": "UPDATECHANNEL",
"id": "2927be07345d44119e67bdebf666e1c9",
"channel_name": "African Storybook"
}
]
```
I'm not sure what the COMPLETED tasks are, but the `UPDATECHANNEL` channel task percentage shouldn't be 1.0.
Also the `"node_ids"` was a huge list of 6000+ rows, which seems weird to be including in the status progress polling, but did not cause any noticeable slowdown. It's just text.
### Steps to reproduce
- Given Kolibri with channel vN installed
- Publish v(N+1) of the channel on Studio with changed files
- Go through the Kolibri update channel wizard (should result in similar list of node_ids to update for all the changed files)
### Context
- https://kolibridemo.learningequality.org/
- Kolibri 0.13.1
- Chrome
|
0.0
|
121a67267c35023bfc3350d411a3a06356bb8bd5
|
[
"kolibri/core/tasks/test/test_job.py::JobTest::test_job_save_as_cancellable_sets_cancellable",
"kolibri/core/tasks/test/test_job.py::JobTest::test_job_update_progress_sets_progress"
] |
[
"kolibri/core/tasks/test/test_job.py::JobTest::test_job_save_as_cancellable__skip",
"kolibri/core/tasks/test/test_job.py::JobTest::test_job_save_as_cancellable",
"kolibri/core/tasks/test/test_job.py::JobTest::test_job_update_progress_saves_progress_to_storage",
"kolibri/core/tasks/test/test_job.py::JobTest::test_job_save_as_cancellable__no_storage",
"kolibri/core/tasks/test/test_job.py::TestRegisteredJob::test_enqueue_at",
"kolibri/core/tasks/test/test_job.py::TestRegisteredJob::test_enqueue_in",
"kolibri/core/tasks/test/test_job.py::TestRegisteredJob::test__ready_job",
"kolibri/core/tasks/test/test_job.py::TestRegisteredJob::test_enqueue",
"kolibri/core/tasks/test/test_job.py::TestRegisteredJob::test_constructor_sets_required_params"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_media"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-12-17 18:11:00+00:00
|
mit
| 3,529 |
|
learningequality__ricecooker-394
|
diff --git a/Makefile b/Makefile
index aabbbbd..4a0a854 100644
--- a/Makefile
+++ b/Makefile
@@ -59,6 +59,14 @@ test: clean-test ## run tests quickly with the default Python
test-all: clean-test ## run tests on every Python version with tox
tox
+integration-test:
+ echo "Testing against hotfixes"
+ CONTENTWORKSHOP_URL=https://hotfixes.studio.learningequality.org python tests/test_chef_integration.py
+ echo "Testing against unstable"
+ CONTENTWORKSHOP_URL=https://unstable.studio.learningequality.org python tests/test_chef_integration.py
+ echo "Testing against production"
+ CONTENTWORKSHOP_URL=https://studio.learningequality.org python tests/test_chef_integration.py
+
coverage: ## check code coverage quickly with the default Python
pip install coverage pytest
coverage run --source ricecooker -m pytest
diff --git a/ricecooker/classes/nodes.py b/ricecooker/classes/nodes.py
index 3f6e794..2f28609 100644
--- a/ricecooker/classes/nodes.py
+++ b/ricecooker/classes/nodes.py
@@ -463,6 +463,12 @@ class TreeNode(Node):
provider="",
tags=None,
domain_ns=None,
+ grade_levels=None,
+ resource_types=None,
+ learning_activities=None,
+ accessibility_labels=None,
+ categories=None,
+ learner_needs=None,
**kwargs
):
# Map parameters to model variables
@@ -478,6 +484,13 @@ class TreeNode(Node):
self.questions if hasattr(self, "questions") else []
) # Needed for to_dict method
+ self.grade_levels = grade_levels or []
+ self.resource_types = resource_types or []
+ self.learning_activities = learning_activities or []
+ self.accessibility_labels = accessibility_labels or []
+ self.categories = categories or []
+ self.learner_needs = learner_needs or []
+
super(TreeNode, self).__init__(title, **kwargs)
def get_domain_namespace(self):
@@ -569,12 +582,12 @@ class TreeNode(Node):
"copyright_holder": "",
"questions": [],
"extra_fields": json.dumps(self.extra_fields),
- "grade_levels": None,
- "resource_types": None,
- "learning_activities": None,
- "accessibility_categories": None,
- "subjects": None,
- "needs": None,
+ "grade_levels": self.grade_levels,
+ "resource_types": self.resource_types,
+ "learning_activities": self.learning_activities,
+ "accessibility_labels": self.accessibility_labels,
+ "categories": self.categories,
+ "learner_needs": self.learner_needs,
}
def validate(self):
@@ -686,12 +699,6 @@ class ContentNode(TreeNode):
**kwargs
):
self.role = role
- self.grade_levels = grade_levels
- self.resource_types = resource_types
- self.learning_activities = learning_activities
- self.accessibility_labels = accessibility_labels
- self.categories = categories
- self.learner_needs = learner_needs
self.set_license(
license, copyright_holder=copyright_holder, description=license_description
@@ -826,9 +833,9 @@ class ContentNode(TreeNode):
"grade_levels": self.grade_levels,
"resource_types": self.resource_types,
"learning_activities": self.learning_activities,
- "accessibility_categories": self.accessibility_labels,
- "subjects": self.categories,
- "needs": self.learner_needs,
+ "accessibility_labels": self.accessibility_labels,
+ "categories": self.categories,
+ "learner_needs": self.learner_needs,
}
|
learningequality/ricecooker
|
ecea76069def01bae2aff9a3656d5715d85144e2
|
diff --git a/tests/conftest.py b/tests/conftest.py
index aa15b77..a0c5f30 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -171,6 +171,12 @@ def base_data(channel_domain_namespace, title):
"license_description": None,
"aggregator": "", # New in ricecooker 0.6.20
"provider": "", # New in ricecooker 0.6.20
+ "grade_levels": [],
+ "resource_types": [],
+ "learning_activities": [],
+ "accessibility_labels": [],
+ "categories": [],
+ "learner_needs": [],
}
@@ -259,6 +265,12 @@ def contentnode_base_data(base_data):
"copyright_holder": "Copyright Holder",
"license_description": None,
"role": roles.LEARNER,
+ "grade_levels": [],
+ "resource_types": [],
+ "learning_activities": [],
+ "accessibility_labels": [],
+ "categories": [],
+ "learner_needs": [],
}
)
return data
diff --git a/tests/test_chef_integration.py b/tests/test_chef_integration.py
new file mode 100644
index 0000000..1d4d9ad
--- /dev/null
+++ b/tests/test_chef_integration.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python
+import random
+import string
+
+from le_utils.constants import licenses
+
+from ricecooker.chefs import SushiChef
+from ricecooker.classes.files import AudioFile
+from ricecooker.classes.files import DocumentFile
+from ricecooker.classes.files import VideoFile
+from ricecooker.classes.licenses import get_license
+from ricecooker.classes.nodes import AudioNode
+from ricecooker.classes.nodes import DocumentNode
+from ricecooker.classes.nodes import TopicNode
+from ricecooker.classes.nodes import VideoNode
+
+
+class TestChef(SushiChef):
+ """
+ Used as an integration test by actually using Ricecooker to chef local test content into Studio.
+
+ For anything you need to test, add it to the channel created in the `construct_channel`.
+
+ Copied from examples/tutorial/sushichef.py
+ """
+
+ # Be sure we don't conflict with a channel someone else pushed before us when running this test
+ # as the channel source domain and ID determine which Channel is updated on Studio and since
+ # you'll run this with your own API key we can use this random (enough) string generator (thanks SO)
+ # to append a random set of characters to the two values.
+ def randomstring():
+ return "".join(
+ random.choice(string.ascii_uppercase + string.digits) for _ in range(8)
+ )
+
+ channel_info = {
+ "CHANNEL_SOURCE_DOMAIN": "RicecookerIntegrationTest.{}".format(
+ randomstring()
+ ), # who is providing the content (e.g. learningequality.org)
+ "CHANNEL_SOURCE_ID": "RicecookerTests.{}".format(
+ randomstring()
+ ), # channel's unique id
+ "CHANNEL_TITLE": "Ricecooker Testing!",
+ "CHANNEL_LANGUAGE": "en",
+ }
+
+ # CONSTRUCT CHANNEL
+ def construct_channel(self, *args, **kwargs):
+ """
+ This method is reponsible for creating a `ChannelNode` object and
+ populating it with `TopicNode` and `ContentNode` children.
+ """
+ # Create channel
+ ########################################################################
+ channel = self.get_channel(*args, **kwargs) # uses self.channel_info
+
+ # Create topics to add to your channel
+ ########################################################################
+ # Here we are creating a topic named 'Example Topic'
+ exampletopic = TopicNode(source_id="topic-1", title="Example Topic")
+
+ # Now we are adding 'Example Topic' to our channel
+ channel.add_child(exampletopic)
+
+ # You can also add subtopics to topics
+ # Here we are creating a subtopic named 'Example Subtopic'
+ examplesubtopic = TopicNode(source_id="topic-1a", title="Example Subtopic")
+
+ # Now we are adding 'Example Subtopic' to our 'Example Topic'
+ exampletopic.add_child(examplesubtopic)
+
+ # Content
+ # You can add documents (pdfs and ePubs), videos, audios, and other content
+ # let's create a document file called 'Example PDF'
+ document_file = DocumentFile(path="http://www.pdf995.com/samples/pdf.pdf")
+ examplepdf = DocumentNode(
+ title="Example PDF",
+ source_id="example-pdf",
+ files=[document_file],
+ license=get_license(licenses.PUBLIC_DOMAIN),
+ )
+
+ # We are also going to add a video file called 'Example Video'
+ video_file = VideoFile(
+ path="https://ia600209.us.archive.org/27/items/RiceChef/Rice Chef.mp4"
+ )
+ fancy_license = get_license(
+ licenses.SPECIAL_PERMISSIONS,
+ description="Special license for ricecooker fans only.",
+ copyright_holder="The chef video makers",
+ )
+ examplevideo = VideoNode(
+ title="Example Video",
+ source_id="example-video",
+ files=[video_file],
+ license=fancy_license,
+ )
+
+ # Finally, we are creating an audio file called 'Example Audio'
+ audio_file = AudioFile(
+ path="https://ia802508.us.archive.org/5/items/testmp3testfile/mpthreetest.mp3"
+ )
+ exampleaudio = AudioNode(
+ title="Example Audio",
+ source_id="example-audio",
+ files=[audio_file],
+ license=get_license(licenses.PUBLIC_DOMAIN),
+ )
+
+ # Now that we have our files, let's add them to our channel
+ channel.add_child(examplepdf) # Adding 'Example PDF' to your channel
+ exampletopic.add_child(
+ examplevideo
+ ) # Adding 'Example Video' to 'Example Topic'
+ examplesubtopic.add_child(
+ exampleaudio
+ ) # Adding 'Example Audio' to 'Example Subtopic'
+
+ # the `construct_channel` method returns a ChannelNode that will be
+ # processed by the ricecooker framework
+ return channel
+
+
+if __name__ == "__main__":
+ """
+ This code will run when the sushi chef is called from the command line.
+ """
+ chef = TestChef()
+ print(
+ "Note that you will need your Studio API key for this. It will upload to your account."
+ )
+ chef.main()
diff --git a/tests/test_data.py b/tests/test_data.py
index d50d03b..4816941 100644
--- a/tests/test_data.py
+++ b/tests/test_data.py
@@ -145,10 +145,26 @@ def test_video_to_dict(video, video_data):
video_dict.pop("files")
expected_files = video_data.pop("files")
video_data["extra_fields"] = json.dumps(video_data["extra_fields"])
+
assert video.files == expected_files, "Video files do not match"
+
for key, _ in video_data.items():
assert key in video_dict, "Key {} is not found in to_dict method".format(key)
+
+ list_type_keys = [
+ "grade_levels",
+ "learner_needs",
+ "accessibility_labels",
+ "categories",
+ "learning_activities",
+ "resource_types",
+ ]
for key, value in video_dict.items():
+ if key in list_type_keys:
+ assert isinstance(value, list), "{} should be a list, but it's {}".format(
+ key, value
+ )
+
assert value == video_data.get(key), "Mismatched {}: {} != {}".format(
key, value, video_data[key]
)
|
TreeNode to_dict is incompatible with Studio unstable branch
* ricecooker version: v0.7.0-beta6
* Python version: 3.7
* Operating System: Fedora Silverblue 36
### Description
I am creating a ricecooker script which makes use of the new structured metadata fields in the `develop` branch. This script creates a tree of TopicNode and VideoNode objects.
This is all working almost correctly.
The video nodes end up with attributes like `node.grade_levels = [levels.LOWER_PRIMARY]`, and the [`ContentNode.to_dict()`](https://github.com/learningequality/ricecooker/blob/aa857d102fc19a066931a8fe97e4d869e6d0d606/ricecooker/classes/nodes.py#L797-L832) function for each video node produces the expected schema, with something like `{…, "grade_levels": "wnarlxKo", …}`.
The topic nodes do not have the structured metadata attributes. However, the [`TreeNode.to_dict()`](https://github.com/learningequality/ricecooker/blob/aa857d102fc19a066931a8fe97e4d869e6d0d606/ricecooker/classes/nodes.py#L545-L578) function produces output like `{…, "grade_levels": null, …}`.
This is incompatible with the code in Studio which validates attributes for nodes: https://github.com/learningequality/studio/blob/269e7e0b677c569c3a68c0a30a0d0fa342f190c0/contentcuration/contentcuration/views/internal.py#L667-L675. If a structured metadata attribute is included in the body, it _must_ be a list.
The result is a failed upload, with an error like the following:
```
(319)add_nodes()
318 ]
--> 319 for chunk in chunks:
320 payload_children = []
ipdb> response.content
b"['grade_levels must pass a list of values']"
ipdb> response.request.body
'{"root_id": "db6de12407f24f1c9e36752abd49ef2f", "content_data": [{"title": "Basketball", "language": "en", "description": "", "node_id": "8ed9e0083dc45373a385d2ab2d001132", "content_id": "ce34868e07ec5404a35de380ccd502dd", "source_domain": "379151f326fd5a0080473baf3c4c396e", "source_id": "basketball", "author": "", "aggregator": "", "provider": "", "files": [], "tags": [], "kind": "topic", "license": null, "license_description": null, "copyright_holder": "", "questions": [], "extra_fields": "{}", "grade_levels": null, "resource_types": null, "learning_activities": null, "accessibility_categories": null, "subjects": null, "needs": null}, {"title": "Soccer", "language": "en", "description": "", "node_id": "357293c10e8b59e5b383da07d62c6c90", "content_id": "030cbf9bbc8353bbbbb91d641b6318e9", "source_domain": "379151f326fd5a0080473baf3c4c396e", "source_id": "soccer", "author": "", "aggregator": "", "provider": "", "files": [], "tags": [], "kind": "topic", "license": null, "license_description": null, "copyright_holder": "", "questions": [], "extra_fields": "{}", "grade_levels": null, "resource_types": null, "learning_activities": null, "accessibility_categories": null, "subjects": null, "needs": null}, {"title": "Juggling", "language": "en", "description": "", "node_id": "a5f005393b7b5841a6b067a99779d7b5", "content_id": "ef04f09ef399570aba145495ddcce80a", "source_domain": "379151f326fd5a0080473baf3c4c396e", "source_id": "juggling", "author": "", "aggregator": "", "provider": "", "files": [], "tags": [], "kind": "topic", "license": null, "license_description": null, "copyright_holder": "", "questions": [], "extra_fields": "{}", "grade_levels": null, "resource_types": null, "learning_activities": null, "accessibility_categories": null, "subjects": null, "needs": null}]}'
```
|
0.0
|
ecea76069def01bae2aff9a3656d5715d85144e2
|
[
"tests/test_data.py::test_init",
"tests/test_data.py::test_validate",
"tests/test_data.py::test_topic_to_dict",
"tests/test_data.py::test_video_to_dict",
"tests/test_data.py::test_audio_to_dict",
"tests/test_data.py::test_document_to_dict",
"tests/test_data.py::test_html_to_dict",
"tests/test_data.py::test_exercise_to_dict",
"tests/test_data.py::test_slideshow_to_dict"
] |
[
"tests/test_data.py::test_alternative_domain_namespace",
"tests/test_data.py::test_channel_to_dict"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-09-22 19:13:03+00:00
|
mit
| 3,530 |
|
lebedov__msgpack-numpy-43
|
diff --git a/msgpack_numpy.py b/msgpack_numpy.py
index 48e6df5..38d89a2 100644
--- a/msgpack_numpy.py
+++ b/msgpack_numpy.py
@@ -88,11 +88,11 @@ def decode(obj, chain=None):
else:
descr = obj[b'type']
return np.frombuffer(obj[b'data'],
- dtype=np.dtype(descr)).reshape(obj[b'shape'])
+ dtype=_unpack_dtype(descr)).reshape(obj[b'shape'])
else:
descr = obj[b'type']
return np.frombuffer(obj[b'data'],
- dtype=np.dtype(descr))[0]
+ dtype=_unpack_dtype(descr))[0]
elif b'complex' in obj:
return complex(tostr(obj[b'data']))
else:
@@ -100,6 +100,18 @@ def decode(obj, chain=None):
except KeyError:
return obj if chain is None else chain(obj)
+def _unpack_dtype(dtype):
+ """
+ Unpack dtype descr, recursively unpacking nested structured dtypes.
+ """
+
+ if isinstance(dtype, (list, tuple)):
+ dtype = [
+ (name, _unpack_dtype(subdtype)) + tuple(rest)
+ for name, subdtype, *rest in dtype
+ ]
+ return np.dtype(dtype)
+
if msgpack.version < (1, 0, 0):
warnings.warn('support for msgpack < 1.0.0 will be removed in a future release',
DeprecationWarning)
|
lebedov/msgpack-numpy
|
aa480f65fdbfb006b86622f40748e60977fa104d
|
diff --git a/tests.py b/tests.py
index cb0ad55..f84876b 100644
--- a/tests.py
+++ b/tests.py
@@ -243,5 +243,44 @@ class test_numpy_msgpack(TestCase):
x_rec = self.encode_decode_thirdparty(x)
self.assertEqual(x, x_rec)
+ def test_numpy_structured_array(self):
+ structured_dtype = np.dtype([("a", float), ("b", int)])
+
+ x = np.empty((10,), dtype=structured_dtype)
+ x["a"] = np.arange(10)
+ x["b"] = np.arange(10)
+
+ x_rec = self.encode_decode(x)
+
+ assert_array_equal(x, x_rec)
+ self.assertEqual(x.dtype, x_rec.dtype)
+
+ def test_numpy_shaped_structured_array(self):
+ shaped_structured_dtype = np.dtype([("a", float, 3), ("b", int)])
+
+ x = np.empty((10,), dtype=shaped_structured_dtype)
+ x["a"] = np.arange(30).reshape(10, 3)
+ x["b"] = np.arange(10)
+
+ x_rec = self.encode_decode(x)
+
+ assert_array_equal(x, x_rec)
+ self.assertEqual(x.dtype, x_rec.dtype)
+
+ def test_numpy_nested_structured_array(self):
+ structured_dtype = np.dtype([("a", float), ("b", int)])
+ nested_dtype = np.dtype([("foo", structured_dtype), ("bar", structured_dtype)])
+
+ x = np.empty((10,), dtype=nested_dtype)
+ x["foo"]["a"] = np.arange(10)
+ x["foo"]["b"] = np.arange(10)
+ x["bar"]["a"] = np.arange(10) + 10
+ x["bar"]["b"] = np.arange(10) + 10
+
+ x_rec = self.encode_decode(x)
+
+ assert_array_equal(x, x_rec)
+ self.assertEqual(x.dtype, x_rec.dtype)
+
if __name__ == '__main__':
main()
|
Failures unpacking nested, structured dtypes.
`msgpack_numpy` fails when unpacking nested, structured dtypes.
See https://numpy.org/doc/stable/user/basics.rec.html for description.
Example:
```python
structured_dtype = np.dtype([("a", float), ("b", int)])
nested_dtype = np.dtype([("foo", structured_dtype), ("bar", structured_dtype)])
x = np.empty((10,), dtype=nested_dtype)
x["foo"]["a"] = np.arange(10)
x["foo"]["b"] = np.arange(10)
x["bar"]["a"] = np.arange(10) + 10
x["bar"]["b"] = np.arange(10) + 10
unpackb(packb(x))
```
Fails with:
```
File "/home/alexford/asford/msgpack_numpy/msgpack-numpy/msgpack_numpy.py", line 260, in unpackb
return _unpackb(packed, **kwargs)
File "msgpack/_unpacker.pyx", line 195, in msgpack._cmsgpack.unpackb
File "/home/alexford/asford/msgpack_numpy/msgpack-numpy/msgpack_numpy.py", line 91, in decode
dtype=np.dtype(descr)).reshape(obj[b'shape'])
TypeError: Field elements must be 2- or 3-tuples, got '['a', '<f8']'
```
|
0.0
|
aa480f65fdbfb006b86622f40748e60977fa104d
|
[
"tests.py::test_numpy_msgpack::test_numpy_nested_structured_array"
] |
[
"tests.py::test_numpy_msgpack::test_bin",
"tests.py::test_numpy_msgpack::test_chain",
"tests.py::test_numpy_msgpack::test_dict_complex",
"tests.py::test_numpy_msgpack::test_dict_float",
"tests.py::test_numpy_msgpack::test_dict_numpy_complex",
"tests.py::test_numpy_msgpack::test_dict_numpy_float",
"tests.py::test_numpy_msgpack::test_dict_str",
"tests.py::test_numpy_msgpack::test_list_float",
"tests.py::test_numpy_msgpack::test_list_float_complex",
"tests.py::test_numpy_msgpack::test_list_mixed",
"tests.py::test_numpy_msgpack::test_list_numpy_float",
"tests.py::test_numpy_msgpack::test_list_numpy_float_complex",
"tests.py::test_numpy_msgpack::test_list_str",
"tests.py::test_numpy_msgpack::test_numpy_array_complex",
"tests.py::test_numpy_msgpack::test_numpy_array_float",
"tests.py::test_numpy_msgpack::test_numpy_array_float_2d",
"tests.py::test_numpy_msgpack::test_numpy_array_float_2d_macos",
"tests.py::test_numpy_msgpack::test_numpy_array_mixed",
"tests.py::test_numpy_msgpack::test_numpy_array_noncontiguous",
"tests.py::test_numpy_msgpack::test_numpy_array_str",
"tests.py::test_numpy_msgpack::test_numpy_scalar_bool",
"tests.py::test_numpy_msgpack::test_numpy_scalar_complex",
"tests.py::test_numpy_msgpack::test_numpy_scalar_float",
"tests.py::test_numpy_msgpack::test_numpy_shaped_structured_array",
"tests.py::test_numpy_msgpack::test_numpy_structured_array",
"tests.py::test_numpy_msgpack::test_scalar_complex",
"tests.py::test_numpy_msgpack::test_scalar_float",
"tests.py::test_numpy_msgpack::test_str"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-16 02:04:27+00:00
|
bsd-3-clause
| 3,531 |
|
lebrice__SimpleParsing-164
|
diff --git a/simple_parsing/docstring.py b/simple_parsing/docstring.py
index 81fb1c2..81105c1 100644
--- a/simple_parsing/docstring.py
+++ b/simple_parsing/docstring.py
@@ -1,10 +1,11 @@
"""Utility for retrieveing the docstring of a dataclass's attributes
@author: Fabrice Normandin
"""
+from __future__ import annotations
+
import inspect
from dataclasses import dataclass
from logging import getLogger
-from typing import List, Optional, Type
logger = getLogger(__name__)
@@ -18,7 +19,9 @@ class AttributeDocString:
docstring_below: str = ""
-def get_attribute_docstring(some_dataclass: Type, field_name: str) -> AttributeDocString:
+def get_attribute_docstring(
+ dataclass: type, field_name: str, accumulate_from_bases: bool = True
+) -> AttributeDocString:
"""Returns the docstrings of a dataclass field.
NOTE: a docstring can either be:
- An inline comment, starting with <#>
@@ -26,58 +29,96 @@ def get_attribute_docstring(some_dataclass: Type, field_name: str) -> AttributeD
- A docstring on the following line, starting with either <\"\"\"> or <'''>
Arguments:
- some_dataclass {type} -- a dataclass
- field_name {str} -- the name of the field.
-
+ some_dataclass: a dataclass
+ field_name: the name of the field.
+ accumulate_from_bases: Whether to accumulate the docstring components by looking through the
+ base classes. When set to `False`, whenever one of the classes has a definition for the
+ field, it is directly returned. Otherwise, we accumulate the parts of the dodc
Returns:
AttributeDocString -- an object holding the three possible comments
"""
- try:
- source = inspect.getsource(some_dataclass)
- except (TypeError, OSError) as e:
- logger.debug(f"Couldn't find the attribute docstring: {e}")
+ created_docstring: AttributeDocString | None = None
+
+ mro = inspect.getmro(dataclass)
+ assert mro[0] is dataclass
+ assert mro[-1] is object
+ mro = mro[:-1]
+ for base_class in mro:
+ attribute_docstring = _get_attribute_docstring(base_class, field_name)
+ if not attribute_docstring:
+ continue
+ if not created_docstring:
+ created_docstring = attribute_docstring
+ if not accumulate_from_bases:
+ # We found a definition for that field in that class, so return it directly.
+ return created_docstring
+ else:
+ # Update the fields.
+ created_docstring.comment_above = (
+ created_docstring.comment_above or attribute_docstring.comment_above
+ )
+ created_docstring.comment_inline = (
+ created_docstring.comment_inline or attribute_docstring.comment_inline
+ )
+ created_docstring.docstring_below = (
+ created_docstring.docstring_below or attribute_docstring.docstring_below
+ )
+ if not created_docstring:
+ logger.debug(
+ RuntimeWarning(
+ f"Couldn't find the definition for field '{field_name}' within the dataclass "
+ f"{dataclass} or any of its base classes {','.join(t.__name__ for t in mro[1:])}."
+ )
+ )
return AttributeDocString()
+ return created_docstring
- code_lines: List[str] = source.splitlines()
- # the first line is the class definition, we skip it.
+
+def _get_attribute_docstring(dataclass: type, field_name: str) -> AttributeDocString | None:
+ """Gets the AttributeDocString of the given field in the given dataclass.
+ Doesn't inspect base classes.
+ """
+ try:
+ source = inspect.getsource(dataclass)
+ except (TypeError, OSError) as e:
+ logger.debug(
+ UserWarning(
+ f"Couldn't retrieve the source code of class {dataclass} "
+ f"(in order to retrieve the docstring of field {field_name}): {e}"
+ )
+ )
+ return None
+ # NOTE: We want to skip the docstring lines.
+ # NOTE: Currently, we just remove the __doc__ from the source. It's perhaps a bit crude,
+ # but it works.
+ if dataclass.__doc__ and dataclass.__doc__ in source:
+ source = source.replace(dataclass.__doc__, "\n", 1)
+ # note: does this remove the whitespace though?
+
+ code_lines: list[str] = source.splitlines()
+ # the first line is the class definition (OR the decorator!), we skip it.
start_line_index = 1
# starting at the second line, there might be the docstring for the class.
# We want to skip over that until we reach an attribute definition.
while start_line_index < len(code_lines):
- if _contains_attribute_definition(code_lines[start_line_index]):
+ if _contains_field_definition(code_lines[start_line_index]):
break
start_line_index += 1
- lines_with_attribute_defs = [
- (index, line)
- for index, line in enumerate(code_lines)
- if _contains_attribute_definition(line)
+ lines_with_field_defs = [
+ (index, line) for index, line in enumerate(code_lines) if _contains_field_definition(line)
]
- for i, line in lines_with_attribute_defs:
- parts: List[str] = line.split(":", maxsplit=1)
- if parts[0].strip() == field_name:
+ for i, line in lines_with_field_defs:
+ if _line_contains_definition_for(line, field_name):
# we found the line with the definition of this field.
comment_above = _get_comment_ending_at_line(code_lines, i - 1)
comment_inline = _get_inline_comment_at_line(code_lines, i)
docstring_below = _get_docstring_starting_at_line(code_lines, i + 1)
- complete_docstring = AttributeDocString(comment_above, comment_inline, docstring_below)
- return complete_docstring
-
- # we didn't find the attribute.
- mro = inspect.getmro(some_dataclass)
- if len(mro) == 1:
- raise RuntimeWarning(
- f"Couldn't find the given attribute name {field_name}' within the " "given class."
- )
- base_class = mro[1]
- try:
- return get_attribute_docstring(base_class, field_name)
- except OSError as e:
- logger.warning(UserWarning(f"Couldn't find the docstring: {e}"))
- return AttributeDocString()
+ return AttributeDocString(comment_above, comment_inline, docstring_below)
+ return None
-def _contains_attribute_definition(line_str: str) -> bool:
+def _contains_field_definition(line: str) -> bool:
"""Returns whether or not a line contains a an dataclass field definition.
Arguments:
@@ -85,18 +126,57 @@ def _contains_attribute_definition(line_str: str) -> bool:
Returns:
bool -- True if there is an attribute definition in the line.
+
+ >>> _contains_field_definition("a: int = 0")
+ True
+ >>> _contains_field_definition("a: int")
+ True
+ >>> _contains_field_definition("a: int # comment")
+ True
+ >>> _contains_field_definition("a: int = 0 # comment")
+ True
+ >>> _contains_field_definition("class FooBaz(Foo, Baz):")
+ False
+ >>> _contains_field_definition("a = 4")
+ False
+ >>> _contains_field_definition("fooooooooobar.append(123)")
+ False
+ >>> _contains_field_definition("{a: int}")
+ False
+ >>> _contains_field_definition(" foobaz: int = 123 #: The foobaz property")
+ True
+ >>> _contains_field_definition("a #:= 3")
+ False
"""
- parts = line_str.split("#", maxsplit=1)
- before_comment = parts[0].strip()
+ # Get rid of any comments first.
+ line, _, _ = line.partition("#")
- before_first_equal = before_comment.split("=", maxsplit=1)[0]
- parts = before_first_equal.split(":")
- if len(parts) != 2:
- # For now, I don't think it's possible to have a type annotation contain :
+ if ":" not in line:
return False
- attr_name = parts[0]
- attr_type = parts[1]
- return not attr_name.isspace() and not attr_type.isspace()
+
+ if "=" in line:
+ attribute_and_type, _, _ = line.partition("=")
+ else:
+ attribute_and_type = line
+
+ field_name, _, type = attribute_and_type.partition(":")
+ field_name = field_name.strip()
+ if ":" in type:
+ # weird annotation or dictionary?
+ return False
+ if not field_name:
+ # Empty attribute name?
+ return False
+ return field_name.isidentifier()
+
+
+def _line_contains_definition_for(line: str, field_name: str) -> bool:
+ line = line.strip()
+ if not _contains_field_definition(line):
+ return False
+ attribute, _, type_and_value_assignment = line.partition(":")
+ attribute = attribute.strip() # remove any whitespace after the attribute name.
+ return attribute.isidentifier() and attribute == field_name
def _is_empty(line_str: str) -> bool:
@@ -107,7 +187,7 @@ def _is_comment(line_str: str) -> bool:
return line_str.strip().startswith("#")
-def _get_comment_at_line(code_lines: List[str], line: int) -> str:
+def _get_comment_at_line(code_lines: list[str], line: int) -> str:
"""Gets the comment at line `line` in `code_lines`.
Arguments:
@@ -117,7 +197,7 @@ def _get_comment_at_line(code_lines: List[str], line: int) -> str:
str -- the comment at the given line. empty string if not present.
"""
line_str = code_lines[line]
- assert not _contains_attribute_definition(line_str)
+ assert not _contains_field_definition(line_str)
if "#" not in line_str:
return ""
parts = line_str.split("#", maxsplit=1)
@@ -125,7 +205,7 @@ def _get_comment_at_line(code_lines: List[str], line: int) -> str:
return comment
-def _get_inline_comment_at_line(code_lines: List[str], line: int) -> str:
+def _get_inline_comment_at_line(code_lines: list[str], line: int) -> str:
"""Gets the inline comment at line `line`.
Arguments:
@@ -135,7 +215,7 @@ def _get_inline_comment_at_line(code_lines: List[str], line: int) -> str:
str -- the inline comment at the given line, else an empty string.
"""
assert 0 <= line < len(code_lines)
- assert _contains_attribute_definition(code_lines[line])
+ assert _contains_field_definition(code_lines[line])
line_str = code_lines[line]
parts = line_str.split("#", maxsplit=1)
if len(parts) != 2:
@@ -144,18 +224,14 @@ def _get_inline_comment_at_line(code_lines: List[str], line: int) -> str:
return comment
-def _get_comment_ending_at_line(code_lines: List[str], line: int) -> str:
+def _get_comment_ending_at_line(code_lines: list[str], line: int) -> str:
start_line = line
end_line = line
- # print(f"Get comment ending at line {line}")
- # for i, l in enumerate(code_lines):
- # print(f"line {i}: {l}")
-
# move up the code, one line at a time, while we don't hit the start,
# an attribute definition, or the end of a docstring.
while start_line > 0:
line_str = code_lines[start_line]
- if _contains_attribute_definition(line_str):
+ if _contains_field_definition(line_str):
break # previous line is an assignment
if '"""' in line_str or "'''" in line_str:
break # previous line has a docstring
@@ -167,15 +243,15 @@ def _get_comment_ending_at_line(code_lines: List[str], line: int) -> str:
# print(f"line {i}: {code_lines[i]}")
if _is_empty(code_lines[i]):
continue
- assert not _contains_attribute_definition(code_lines[i])
+ assert not _contains_field_definition(code_lines[i])
comment = _get_comment_at_line(code_lines, i)
lines.append(comment)
- return "\n".join(lines)
+ return "\n".join(lines).strip()
-def _get_docstring_starting_at_line(code_lines: List[str], line: int) -> str:
+def _get_docstring_starting_at_line(code_lines: list[str], line: int) -> str:
i = line
- token: Optional[str] = None
+ token: str | None = None
triple_single = "'''"
triple_double = '"""'
# print("finding docstring starting from line", line)
@@ -185,9 +261,9 @@ def _get_docstring_starting_at_line(code_lines: List[str], line: int) -> str:
if line >= len(code_lines):
return ""
# the list of lines making up the docstring.
- docstring_contents: List[str] = []
+ docstring_contents: list[str] = []
- while i <= len(code_lines):
+ while i < len(code_lines):
line_str = code_lines[i]
# print(f"(docstring) line {line}: {line_str}")
@@ -197,7 +273,7 @@ def _get_docstring_starting_at_line(code_lines: List[str], line: int) -> str:
i += 1
continue
- elif _contains_attribute_definition(line_str) or _is_comment(line_str):
+ elif _contains_field_definition(line_str) or _is_comment(line_str):
# we haven't reached the start of a docstring yet (since token
# is None), and we reached a line with an attribute definition,
# or a comment, hence the docstring is empty.
|
lebrice/SimpleParsing
|
6fca2e9322f73af7fca0e39978619b1286438def
|
diff --git a/simple_parsing/helpers/hparams/hyperparameters_test.py b/simple_parsing/helpers/hparams/hyperparameters_test.py
index 23ac082..06d7c9c 100644
--- a/simple_parsing/helpers/hparams/hyperparameters_test.py
+++ b/simple_parsing/helpers/hparams/hyperparameters_test.py
@@ -265,6 +265,6 @@ def test_field_types():
assert C.get_priors()["f"].discrete is True
if numpy_installed:
- assert all(c.f.dtype == np.int for c in cs)
+ assert all(c.f.dtype == int for c in cs)
else:
assert all(all(isinstance(v, int) for v in c.f) for c in cs)
diff --git a/test/test_conflicts.py b/test/test_conflicts.py
index c2059ce..d1e5cab 100644
--- a/test/test_conflicts.py
+++ b/test/test_conflicts.py
@@ -74,10 +74,10 @@ def test_parent_child_conflict():
batch_size: int = 32
@dataclass
- class Parent(TestSetup):
+ class Parent2(TestSetup):
batch_size: int = 48
child: HParams = HParams()
- p: Parent = Parent.setup()
+ p: Parent2 = Parent2.setup()
assert p.child.batch_size == 32
assert p.batch_size == 48
diff --git a/test/test_docstrings.py b/test/test_docstrings.py
index e91ca5f..a4d5fa6 100644
--- a/test/test_docstrings.py
+++ b/test/test_docstrings.py
@@ -2,7 +2,7 @@ from dataclasses import dataclass
from typing import List
from simple_parsing import field
-from simple_parsing.docstring import get_attribute_docstring
+from simple_parsing.docstring import AttributeDocString, get_attribute_docstring
from .testutils import TestSetup
@@ -84,14 +84,95 @@ def test_docstring_parsing_works_on_extended():
def test_docstring_works_with_field_function():
@dataclass
- class Foo(TestSetup):
+ class UniqueFoo(TestSetup):
"""Some class Foo"""
# A sequence of tasks.
task_sequence: List[str] = field(choices=["train", "test", "ood"]) # side
"""Below"""
- docstring = get_attribute_docstring(Foo, "task_sequence")
+ docstring = get_attribute_docstring(UniqueFoo, "task_sequence")
assert docstring.comment_above == "A sequence of tasks."
assert docstring.comment_inline == "side"
assert docstring.docstring_below == "Below"
+
+
+def test_docstrings_with_multiple_inheritance():
+ """Test to reproduce issue 162: https://github.com/lebrice/SimpleParsing/issues/162"""
+
+ @dataclass
+ class Fooz:
+ bar: int = 123 #: The bar property
+
+ @dataclass
+ class Baz:
+ bat: int = 123 #: The bat property
+
+ @dataclass
+ class FooBaz(Fooz, Baz):
+ foobaz: int = 123 #: The foobaz property
+
+ assert get_attribute_docstring(FooBaz, "bar") == AttributeDocString(
+ comment_inline=": The bar property"
+ )
+ assert get_attribute_docstring(FooBaz, "bat") == AttributeDocString(
+ comment_inline=": The bat property"
+ )
+ assert get_attribute_docstring(FooBaz, "foobaz") == AttributeDocString(
+ comment_inline=": The foobaz property"
+ )
+
+
+def test_weird_docstring_with_field_like():
+ @dataclass
+ class FooA:
+ """
+ @dataclass
+ class weird:
+ bar: int = 123 # WRONG DOCSTRING
+ """
+
+ bar: int = 123 # The bar property
+
+ assert get_attribute_docstring(FooA, "bar") == AttributeDocString(
+ comment_inline="The bar property"
+ )
+
+
+def test_docstring_builds_upon_bases():
+ @dataclass
+ class Base2(TestSetup):
+ """
+ # WRONG ABOVE
+ bar: int = 333 # WRONG INLINE
+ '''WRONG DOCSTRING'''
+ """
+
+ bar: int = 123 # inline
+ """field docstring from base class"""
+
+ @dataclass
+ class FooB(Base2):
+ # Above
+ bar: int = 123 # The bar property
+
+ assert get_attribute_docstring(FooB, "bar") == AttributeDocString(
+ comment_inline="The bar property",
+ comment_above="Above",
+ docstring_below="field docstring from base class",
+ )
+
+ assert "field docstring from base class" in FooB.get_help_text()
+
+
+def test_getdocstring_bug():
+ @dataclass
+ class HParams:
+ batch_size: int = 32
+
+ @dataclass
+ class Parent(TestSetup):
+ batch_size: int = 48
+ child: HParams = HParams()
+
+ assert get_attribute_docstring(Parent, "child") == AttributeDocString()
|
Missing descriptions with multiple inheritance
**Describe the bug**
Some argument descriptions are missing when using multiple inheritance.
**To Reproduce**
```python
from simple_parsing import ArgumentParser
from dataclasses import dataclass
@dataclass
class Foo:
bar: int = 123 #: The bar property
@dataclass
class Baz:
bat: int = 123 #: The bat property
@dataclass
class FooBaz(Foo, Baz):
foobaz: int = 123 #: The foobaz property
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_arguments(FooBaz, "foobaz")
args = parser.parse_args()
foobaz: FooBaz = args.foobaz
print(foobaz)
```
**Expected behavior**
The help string for "bat" contains "The bat property".
**Actual behavior**
The help string for "bat" is empty.
```console
usage: foo.py [-h] [--bat int] [--bar int] [--foobaz int]
optional arguments:
-h, --help show this help message and exit
FooBaz ['foobaz']:
FooBaz(bat: int = 123, bar: int = 123, foobaz: int = 123)
--bat int (default: 123)
--bar int : The bar property (default: 123)
--foobaz int : The foobaz property (default: 123)
```
**Desktop (please complete the following information):**
- Version 0.0.20
- Python version: 3.8.9
**Additional context**
None.
|
0.0
|
6fca2e9322f73af7fca0e39978619b1286438def
|
[
"test/test_docstrings.py::test_docstrings_with_multiple_inheritance",
"test/test_docstrings.py::test_weird_docstring_with_field_like",
"test/test_docstrings.py::test_docstring_builds_upon_bases"
] |
[
"simple_parsing/helpers/hparams/hyperparameters_test.py::test_clip_within_bounds",
"simple_parsing/helpers/hparams/hyperparameters_test.py::test_strict_bounds",
"simple_parsing/helpers/hparams/hyperparameters_test.py::test_nesting",
"simple_parsing/helpers/hparams/hyperparameters_test.py::test_choice_field",
"simple_parsing/helpers/hparams/hyperparameters_test.py::test_choice_field_with_values_of_a_weird_type",
"simple_parsing/helpers/hparams/hyperparameters_test.py::test_priors_with_shape",
"simple_parsing/helpers/hparams/hyperparameters_test.py::test_field_types",
"test/test_conflicts.py::test_arg_and_dataclass_with_same_name",
"test/test_conflicts.py::test_arg_and_dataclass_with_same_name_after_prefixing",
"test/test_conflicts.py::test_weird_hierarchy",
"test/test_conflicts.py::test_parent_child_conflict",
"test/test_docstrings.py::test_docstring_parsing_work_on_base",
"test/test_docstrings.py::test_docstring_parsing_works_on_extended",
"test/test_docstrings.py::test_docstring_works_with_field_function",
"test/test_docstrings.py::test_getdocstring_bug"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-09-22 20:08:32+00:00
|
mit
| 3,532 |
|
lebrice__SimpleParsing-220
|
diff --git a/simple_parsing/helpers/serialization/serializable.py b/simple_parsing/helpers/serialization/serializable.py
index 22bfb42..a3c15f3 100644
--- a/simple_parsing/helpers/serialization/serializable.py
+++ b/simple_parsing/helpers/serialization/serializable.py
@@ -361,6 +361,20 @@ def load(
) -> Dataclass:
"""Loads an instance of `cls` from the given file.
+ First, `load_fn` is used to get a potentially nested dictionary of python primitives from a
+ file. Then, a decoding function is applied to each value, based on the type annotation of the
+ corresponding field. Finally, the resulting dictionary is used to instantiate an instance of
+ the dataclass `cls`.
+
+ - string -> `load_fn` (json/yaml/etc) -> dict with "raw" python values -> decode -> \
+ dict with constructor arguments -> `cls`(**dict) -> instance of `cls`
+
+ NOTE: This does not save the types of the dataclass fields. This is usually not an issue, since
+ we can recover the right type to use by looking at subclasses of the annotated type. However,
+ in some cases (e.g. subgroups), it might be useful to save all the types of all the
+ fields, in which case you should probably use something like `yaml.dump`, directly passing it
+ the dataclass, instead of this.
+
Args:
cls (Type[D]): A dataclass type to load.
path (Path | str): Path or Path string or open file.
@@ -529,7 +543,12 @@ def read_file(path: str | Path) -> dict:
def save(obj: Any, path: str | Path, dump_fn: Callable[[dict, IO], None] | None = None) -> None:
- """Save the given dataclass or dictionary to the given file."""
+ """Save the given dataclass or dictionary to the given file.
+
+ Note: The `encode` function is applied to all the object fields to get serializable values,
+ like so:
+ - obj -> encode -> "raw" values (dicts, strings, ints, etc) -> `dump_fn` ([json/yaml/etc].dumps) -> string
+ """
path = Path(path)
if not isinstance(obj, dict):
|
lebrice/SimpleParsing
|
10495b71f3c974ce76eb01fd4bc18deeb99e2c4f
|
diff --git a/test/helpers/test_enum_serialization.py b/test/helpers/test_enum_serialization.py
new file mode 100644
index 0000000..8ae3b11
--- /dev/null
+++ b/test/helpers/test_enum_serialization.py
@@ -0,0 +1,138 @@
+import textwrap
+from dataclasses import dataclass, field
+from enum import Enum
+from pathlib import Path
+from typing import List, Optional
+
+import pytest
+import yaml
+
+from simple_parsing import Serializable
+from simple_parsing.helpers.serialization.serializable import dumps_yaml, loads_yaml
+
+
+class LoggingTypes(Enum):
+ JSONL = "jsonl"
+ PRINT = "print"
+
+
+@dataclass
+class Hparams:
+ seed: int = 13
+ xyz: List[LoggingTypes] = field(default_factory=list)
+
+
+@dataclass
+class Parameters(Serializable):
+ hparams: Hparams = field(default_factory=Hparams)
+ p: Optional[Path] = None
+
+
[email protected](autouse=True)
+def clear_get_decoding_fn_cache():
+ """Clears the lru_cache of the `get_decoding_fn` function between tests.
+ This avoids any interference between runs."""
+ from simple_parsing.helpers.serialization.decoding import get_decoding_fn
+
+ get_decoding_fn.cache_clear()
+
+
[email protected](
+ raises=KeyError, match="'jsonl'", strict=True, reason="Enums are saved by name, not by value."
+)
+def test_decode_enum_saved_by_value_doesnt_work():
+ """Test to reproduce https://github.com/lebrice/SimpleParsing/issues/219#issuecomment-1437817369"""
+ with open("conf.yaml", "w") as f:
+ f.write(
+ textwrap.dedent(
+ """\
+ p: /tmp
+ hparams:
+ xyz:
+ - jsonl
+ """
+ )
+ )
+
+ file_config = Parameters.load_yaml("conf.yaml")
+ assert file_config == Parameters(hparams=Hparams(xyz=[LoggingTypes.JSONL]), p=Path("/tmp"))
+
+
+def test_decode_enum_saved_by_name():
+ with open("conf.yaml", "w") as f:
+ f.write(
+ textwrap.dedent(
+ """\
+ p: /tmp
+ hparams:
+ xyz:
+ - JSONL
+ """
+ )
+ )
+ file_config = Parameters.load("conf.yaml", load_fn=yaml.safe_load)
+ assert file_config == Parameters(hparams=Hparams(xyz=[LoggingTypes.JSONL]), p=Path("/tmp"))
+
+
+def test_round_trip():
+ p = Parameters(hparams=Hparams(xyz=[LoggingTypes.JSONL]), p=Path("/tmp"))
+ assert loads_yaml(Parameters, dumps_yaml(p)) == p
+ assert dumps_yaml(loads_yaml(Parameters, dumps_yaml(p))) == dumps_yaml(p)
+
+
+def test_decode_enum_saved_by_value_using_register():
+ from simple_parsing.helpers.serialization.decoding import register_decoding_fn
+ from simple_parsing.helpers.serialization.encoding import encode
+
+ register_decoding_fn(LoggingTypes, LoggingTypes)
+ encode.register(LoggingTypes, lambda x: x.value)
+
+ with open("conf.yaml", "w") as f:
+ f.write(
+ textwrap.dedent(
+ """\
+ p: /tmp
+ hparams:
+ xyz:
+ - jsonl
+ """
+ )
+ )
+
+ file_config = Parameters.load_yaml("conf.yaml")
+ assert file_config == Parameters(hparams=Hparams(xyz=[LoggingTypes.JSONL]), p=Path("/tmp"))
+
+
+def test_decode_enum_saved_by_value_using_field():
+ from simple_parsing.helpers import field
+
+ @dataclass
+ class HparamsWithField:
+ seed: int = 13
+ xyz: List[LoggingTypes] = field(
+ encoding_fn=lambda x: [e.value for e in x],
+ decoding_fn=lambda str_list: [LoggingTypes(e) for e in str_list],
+ default_factory=list,
+ )
+
+ @dataclass
+ class ParametersWithField(Serializable):
+ hparams: HparamsWithField = field(default_factory=HparamsWithField)
+ p: Optional[Path] = None
+
+ with open("conf.yaml", "w") as f:
+ f.write(
+ textwrap.dedent(
+ """\
+ p: /tmp
+ hparams:
+ xyz:
+ - jsonl
+ """
+ )
+ )
+
+ file_config = ParametersWithField.load_yaml("conf.yaml")
+ assert file_config == ParametersWithField(
+ hparams=HparamsWithField(xyz=[LoggingTypes.JSONL]), p=Path("/tmp")
+ )
|
How to get rid of a warning for each used `dataclass`
**Describe the bug**
For each dataclass I use I get a warning like:
> .../python3.8/site-packages/simple_parsing/helpers/serialization/decoding.py:249: UserWarning: Unable to find a decoding function for the annotation <class '__main__.Hparams'> (of type <class 'type'>). Will try to use the type as a constructor. Consider registering a decoding function using `register_decoding_fn`, or posting an issue on GitHub.
> warnings.warn(
**To Reproduce**
I reduced the problem to just this:
```python
from dataclasses import dataclass
import yaml
from simple_parsing import ArgumentParser, Serializable
@dataclass
class Hparams:
seed: int = 13
@dataclass
class Parameters(Serializable):
hparams: Hparams = Hparams()
if __name__ == "__main__":
with open("conf.yaml", "w") as f: f.write("hparams:")
file_config = Parameters.load("conf.yaml", load_fn=yaml.safe_load)
```
Run:
```
$ python config.py
/home/stas/anaconda3/envs/py38-pt113/lib/python3.8/site-packages/simple_parsing/helpers/serialization/decoding.py:249:
UserWarning: Unable to find a decoding function for the annotation <class '__main__.Hparams'> (of type <class 'type'>). Will try to
use the type as a constructor. Consider registering a decoding function using `register_decoding_fn`, or posting an issue on GitHub.
warnings.warn(
```
It'd be awesome to receive an advice on how to resolve this warning as we have a whole slew of those.
I think it has to be one of:
```
register_decoding_fn(Hparams, type)
register_decoding_fn(Hparams, Hparams)
```
may be `dataclasses.asdict`?
Additionally a few issues that you might kindly consider:
1. I think this library surely should know how to decode a `dataclass` type of class, since it's used everywhere in the documentation and there is not a single `register_decoding_fn` used for such classes. i.e. is this a documentation issue, or the authors don't mind the warnings?
I dug a bit into `dataclasses` and the check is:
```
dataclasses.is_dataclass(Hparams)
```
so auto-recognizing `dataclass` objects should be very helpful, no?
2. I'd also like to highlight that this warnings isn't very actionable as the user doesn't know what `<class 'type'>` means or if they do how to act on it. Surely, it comes from `type(Hparams)` - but how does it help to the user to fix the problem? i.e. would it be possible to guide the user from the problem to the solution w/o needing to file an issue? Perhaps writing a small section with examples? I diligently spent an hour reading your docs and a bit the code but I am still not sure if I'm doing it right.
3. The real complex program (not the repro) works despite those warnings. Is it possible that these warnings are misplaced and shouldn't be emitted in the first place?
If it's a problem please assert, if it is not a problem then why emit a warning? Perhaps it'd help to understand the situation better with an example where `type` as a fallback doesn't work. But please feel free to ignore this curiosity question and let's focus on the actual solving of the problem first if this doesn't resonate.
---------------------
now let's go to a non-dataclass classes:
The real program also emitted this warning about `pathlib` (not in the repro example), and this code removed the warning
```
register_decoding_fn(Path, str)
```
But I'm not at all sure if I did it correctly, does it want a constructor? `str` would be more of a encoding_fn - does it want:
```
register_decoding_fn(Path, Path)
```
I think it'd also help the user to have the context, what are you decoding from - is it a pickled string to object situation? or object to string?
Again, having a section with examples of common and custom classes would be very helpful and the warning could send the user to that document.
Thank you!
**Desktop (please complete the following information):**
- Version: 0.0.20 / 0.0.21.post1
- Python version: 3.8
|
0.0
|
10495b71f3c974ce76eb01fd4bc18deeb99e2c4f
|
[
"test/helpers/test_enum_serialization.py::test_decode_enum_saved_by_name[verbose]"
] |
[
"test/helpers/test_enum_serialization.py::test_round_trip[simple]",
"test/helpers/test_enum_serialization.py::test_decode_enum_saved_by_value_using_field[simple]",
"test/helpers/test_enum_serialization.py::test_decode_enum_saved_by_value_using_register[verbose]",
"test/helpers/test_enum_serialization.py::test_decode_enum_saved_by_value_using_field[verbose]",
"test/helpers/test_enum_serialization.py::test_round_trip[verbose]",
"test/helpers/test_enum_serialization.py::test_decode_enum_saved_by_value_using_register[simple]"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-02-24 04:51:01+00:00
|
mit
| 3,533 |
|
lebrice__SimpleParsing-50
|
diff --git a/simple_parsing/conflicts.py b/simple_parsing/conflicts.py
index 3d7af91..8efd80c 100644
--- a/simple_parsing/conflicts.py
+++ b/simple_parsing/conflicts.py
@@ -119,6 +119,8 @@ class ConflictResolver:
else:
field_wrappers.extend(w.fields)
+ # TODO: #49: Also consider the conflicts with regular argparse arguments.
+
conflicts: Dict[str, List[FieldWrapper]] = defaultdict(list)
for field_wrapper in field_wrappers:
for option_string in field_wrapper.option_strings:
diff --git a/simple_parsing/wrappers/dataclass_wrapper.py b/simple_parsing/wrappers/dataclass_wrapper.py
index 5cc9785..62b3826 100644
--- a/simple_parsing/wrappers/dataclass_wrapper.py
+++ b/simple_parsing/wrappers/dataclass_wrapper.py
@@ -24,7 +24,8 @@ class DataclassWrapper(Wrapper[Dataclass]):
# super().__init__(dataclass, name)
self.dataclass = dataclass
self._name = name
- self.default = default
+ self.default = default
+ self.prefix = prefix
self.fields: List[FieldWrapper] = []
self._destinations: List[str] = []
@@ -75,7 +76,7 @@ class DataclassWrapper(Wrapper[Dataclass]):
else:
# a normal attribute
- field_wrapper = FieldWrapper(field, parent=self)
+ field_wrapper = FieldWrapper(field, parent=self, prefix=self.prefix)
logger.debug(f"wrapped field at {field_wrapper.dest} has a default value of {field_wrapper.default}")
self.fields.append(field_wrapper)
|
lebrice/SimpleParsing
|
5aa7bb01e12308ddfa68f306c25fb20dfe7ac972
|
diff --git a/test/conftest.py b/test/conftest.py
index d821c60..3fe8689 100644
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -48,6 +48,15 @@ def simple_attribute(request):
logging.debug(f"Attribute type: {some_type}, passed value: '{passed_value}', expected: '{expected_value}'")
return request.param
[email protected]
+def assert_equals_stdout(capsys):
+ def strip(string): return "".join(string.split())
+
+ def should_equal(expected: str, file_path: str=None):
+ out = capsys.readouterr().out
+ assert strip(out) == strip(expected), file_path
+ return should_equal
+
@pytest.fixture(scope="module")
def parser():
diff --git a/test/test_issue_46.py b/test/test_issue_46.py
new file mode 100644
index 0000000..5538fff
--- /dev/null
+++ b/test/test_issue_46.py
@@ -0,0 +1,99 @@
+from dataclasses import dataclass
+import simple_parsing
+import textwrap
+import pytest
+
+
+@dataclass
+class JBuildRelease:
+ id: int
+ url: str
+ docker_image: str
+
+
+def test_issue_46(assert_equals_stdout):
+ parser = simple_parsing.ArgumentParser()
+ parser.add_argument('--run_id', type=str)
+ parser.add_arguments(JBuildRelease, dest="jbuild", prefix="jbuild")
+
+ parser.print_help()
+
+ assert_equals_stdout(textwrap.dedent("""\
+ usage: pytest [-h] [--run_id str] --jbuildid int --jbuildurl str
+ --jbuilddocker_image str
+
+ optional arguments:
+ -h, --help show this help message and exit
+ --run_id str
+
+ JBuildRelease ['jbuild']:
+ JBuildRelease(id:int, url:str, docker_image:str)
+
+ --jbuildid int
+ --jbuildurl str
+ --jbuilddocker_image str
+ """
+ ))
+ from .testutils import raises_missing_required_arg
+ with raises_missing_required_arg():
+ args = parser.parse_args("--id 123 --jbuild.id 456 --jbuild.url bob --jbuild.docker_image foo".split())
+
+
+def test_issue_46_solution2(assert_equals_stdout):
+ # This (now) works:
+ parser = simple_parsing.ArgumentParser(add_dest_to_option_strings=True)
+ parser.add_argument('--run_id', type=str)
+ parser.add_arguments(JBuildRelease, dest="jbuild", prefix="jbuild.")
+
+ parser.print_help()
+ assert_equals_stdout(textwrap.dedent("""\
+ usage: pytest [-h] [--run_id str] --jbuild.id int --jbuild.url str
+ --jbuild.docker_image str
+
+ optional arguments:
+ -h, --help show this help message and exit
+ --run_id str
+
+ JBuildRelease ['jbuild']:
+ JBuildRelease(id:int, url:str, docker_image:str)
+
+ --jbuild.id int
+ --jbuild.url str
+ --jbuild.docker_image str
+ """
+ ))
+
+
[email protected](reason="TODO: Issue #49")
+def test_conflict_with_regular_argparse_arg():
+ # This _should_ work, but it doesn't, adding a new issue for this:
+ # the problem: SimpleParsing doesn't yet detect
+ # conflicts between arguments added the usual way with `add_argument` and those
+ # added through `add_arguments`.
+ parser = simple_parsing.ArgumentParser()
+ parser.add_argument('--id', type=str)
+ parser.add_arguments(JBuildRelease, dest="jbuild")
+ args = parser.parse_args("--id 123 --jbuild.id 456 --jbuild.url bob --jbuild.docker_image foo".split())
+ assert args.id == 123
+ assert args.jbuild.id == 456
+
+
[email protected](reason="TODO: Issue #49")
+def test_workaround():
+ from simple_parsing import mutable_field, ConflictResolution
+ # This also doesn't work, since the prefix is only added to the 'offending'
+ # argument, rather than to all the args in that group.
+ @dataclass
+ class Main:
+ id: int
+ jbuild: JBuildRelease
+
+ parser = simple_parsing.ArgumentParser()
+ parser.add_arguments(Main, "main")
+ args = parser.parse_args("--id 123 --jbuild.id 456 --jbuild.url bob --jbuild.docker_image foo".split())
+ args = args.main
+ assert args.id == 123
+ assert args.jbuild.id == 456
+
+
+
\ No newline at end of file
|
Nested parameter name with dataclass
Hi, great library, thanks !
I have an issue with nested parser from dataclass.
When trying to create a parser from a dataclass
```
@dataclass
class JBuildRelease:
id: int
url: str
docker_image: str
parser = simple_parsing.ArgumentParser()
parser.add_argument('--run_id', type=str)
parser.add_arguments(JBuildRelease, dest='jbuild', prefix='jbuild')
args = parser.parse_args()
```
I get the following:
> usage: release.py [-h] [--run_id str] --id int --url str --docker_image str
> release.py: error: the following arguments are required: --id, --url, --docker_image
>
I wish the arguments would be prefixed like so: --jbuild.id 1 --jbuild.url url --jbuild.docker_image image
Is this flow supported ?
|
0.0
|
5aa7bb01e12308ddfa68f306c25fb20dfe7ac972
|
[
"test/test_issue_46.py::test_issue_46",
"test/test_issue_46.py::test_issue_46_solution2"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-03-03 01:44:37+00:00
|
mit
| 3,534 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.