instance_id
stringlengths 10
57
| patch
stringlengths 261
37.7k
| repo
stringlengths 7
53
| base_commit
stringlengths 40
40
| hints_text
stringclasses 301
values | test_patch
stringlengths 212
2.22M
| problem_statement
stringlengths 23
37.7k
| version
stringclasses 1
value | environment_setup_commit
stringlengths 40
40
| FAIL_TO_PASS
listlengths 1
4.94k
| PASS_TO_PASS
listlengths 0
7.82k
| meta
dict | created_at
stringlengths 25
25
| license
stringclasses 8
values | __index_level_0__
int64 0
6.41k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mosquito__aiohttp-xmlrpc-38
|
diff --git a/aiohttp_xmlrpc/__init__.py b/aiohttp_xmlrpc/__init__.py
index 9aa8c62..8aa33bb 100644
--- a/aiohttp_xmlrpc/__init__.py
+++ b/aiohttp_xmlrpc/__init__.py
@@ -11,7 +11,7 @@ author_info = [
("Dmitry Orlov", "[email protected]"),
]
-version_info = (0, 9, 0)
+version_info = (1, 0, 0)
description = "aiohttp XML-RPC server handler and client"
__version__ = ".".join(map(str, version_info))
diff --git a/aiohttp_xmlrpc/handler.py b/aiohttp_xmlrpc/handler.py
index 0be170e..f0d55db 100644
--- a/aiohttp_xmlrpc/handler.py
+++ b/aiohttp_xmlrpc/handler.py
@@ -1,4 +1,8 @@
+import asyncio
+import inspect
import logging
+from abc import ABCMeta
+from types import MappingProxyType
from aiohttp.web import HTTPBadRequest, HTTPError, Response, View
from lxml import etree
@@ -10,9 +14,54 @@ from .common import awaitable, py2xml, schema, xml2py
log = logging.getLogger(__name__)
-class XMLRPCView(View):
+# noinspection PyUnresolvedReferences
+class XMLRPCViewMeta(ABCMeta):
+ def __new__(cls, clsname, superclasses, attributedict):
+ mapping_key = "__method_arg_mapping__"
+ allowed_key = "__allowed_methods__"
+ attributedict[mapping_key] = dict()
+ attributedict[allowed_key] = dict()
+
+ for superclass in superclasses:
+ attributedict[mapping_key].update(
+ getattr(superclass, mapping_key, {}),
+ )
+
+ instance = super(XMLRPCViewMeta, cls).__new__(
+ cls, clsname, superclasses, attributedict,
+ )
+
+ argmapping = getattr(instance, mapping_key)
+ allowed_methods = getattr(instance, allowed_key)
+
+ for key in attributedict.keys():
+ if not key.startswith(instance.METHOD_PREFIX):
+ continue
+
+ value = getattr(instance, key)
+ method_name = key.replace(instance.METHOD_PREFIX, "", 1)
+ allowed_methods[method_name] = key
+ argmapping[method_name] = inspect.getargspec(value)
+
+ setattr(
+ instance,
+ mapping_key,
+ MappingProxyType(argmapping),
+ )
+
+ setattr(
+ instance,
+ allowed_key,
+ MappingProxyType(allowed_methods),
+ )
+
+ return instance
+
+
+class XMLRPCView(View, metaclass=XMLRPCViewMeta):
METHOD_PREFIX = "rpc_"
DEBUG = False
+ THREAD_POOL_EXECUTOR = None
async def post(self, *args, **kwargs):
try:
@@ -37,25 +86,25 @@ class XMLRPCView(View):
response.body = xml_data
return response
- def _parse_body(self, body):
+ async def _parse_body(self, body):
+ loop = asyncio.get_event_loop()
try:
- return self._parse_xml(body)
+ return await loop.run_in_executor(
+ self.THREAD_POOL_EXECUTOR,
+ self._parse_xml,
+ body,
+ )
except etree.DocumentInvalid:
raise HTTPBadRequest
+ # noinspection PyUnresolvedReferences
def _lookup_method(self, method_name):
- method = getattr(self, "{0}{1}".format(self.METHOD_PREFIX, method_name), None)
-
- if not callable(method):
- log.warning(
- "Can't find method %s%s in %r",
- self.METHOD_PREFIX,
- method_name,
- self.__class__.__name__,
+ if method_name not in self.__allowed_methods__:
+ raise exceptions.ApplicationError(
+ "Method %r not found" % method_name,
)
- raise exceptions.ApplicationError("Method %r not found" % method_name)
- return method
+ return awaitable(getattr(self, self.__allowed_methods__[method_name]))
def _check_request(self):
if "xml" not in self.request.headers.get("Content-Type", ""):
@@ -65,7 +114,7 @@ class XMLRPCView(View):
self._check_request()
body = await self.request.read()
- xml_request = self._parse_body(body)
+ xml_request = await self._parse_body(body)
method_name = xml_request.xpath("//methodName[1]")[0].text
method = self._lookup_method(method_name)
@@ -87,12 +136,12 @@ class XMLRPCView(View):
),
)
- if args and isinstance(args[-1], dict):
+ kwargs = {}
+ argspec = self.__method_arg_mapping__[method_name]
+ if argspec.keywords and isinstance(args[-1], dict):
kwargs = args.pop(-1)
- else:
- kwargs = {}
- result = await awaitable(method)(*args, **kwargs)
+ result = await method(*args, **kwargs)
return self._format_success(result)
@staticmethod
|
mosquito/aiohttp-xmlrpc
|
4bcaed5c463a0f3f338b6282f9f299094b25440c
|
diff --git a/tests/test_handler.py b/tests/test_handler.py
index 9bc8ef4..741adc5 100644
--- a/tests/test_handler.py
+++ b/tests/test_handler.py
@@ -43,6 +43,12 @@ class XMLRPCMain(handler.XMLRPCView):
loop.call_soon(f.set_result, 42)
return f
+ def rpc_dict_args(self, a, b, d):
+ return (a, b, d)
+
+ def rpc_dict_kwargs(self, d, **kw):
+ return (d, kw)
+
def create_app(loop):
app = web.Application()
@@ -174,3 +180,11 @@ async def test_9_datetime(test_client):
async def test_10_future(client):
result = await client.future()
assert result == 42
+
+
+async def test_11_dict_args(client):
+ result = await client.dict_args(41, 42, {"foo": "bar"})
+ assert result == [41, 42, {"foo": "bar"}]
+
+ result = await client.dict_kwargs({"foo": "bar"}, spam="egg")
+ assert result == [{"foo": "bar"}, {"spam": "egg"}]
|
kwargs parsing seems incorrect
Hi,
This is a great project and exactly what I am looking for. However when I did some test on server code, it responds inconsistently with python native xmlrpc library. Here is my parameter:
```
<param>
<value>
<i4>100</i4>
</value>
</param>
<param>
<value>
<i4>200</i4>
</value>
</param>
<param>
<value>
<struct>
<member>
<name>key1</name>
<value>
<i4>1</i4>
</value>
</member>
<member>
<name>key2</name>
<value>
<i4>2</i4>
</value>
</member>
</struct>
</value>
</param>
```
aiohttp-xmlrpc will parse it as one argument list with 2 elements(100 and 200) and one named argument with 2 keys(`key1` and `key2`), while python3's native xmlrpc will parse it as one argument list with 3 elements (`(100, 200, {'key1': 1, 'key2': 2})`)
Here is the code I use for native xmlrpc:
```
from xmlrpc.server import SimpleXMLRPCServer
def ping():
return 'Pong'
def echo( *args, **kwargs):
return (args, kwargs)
def test():
return None
def args(*args):
return len(args)
def kwargs(**kwargs):
return len(kwargs)
def args_kwargs(*args, **kwargs):
print(args)
print(kwargs)
return len(args) + len(kwargs)
def exception():
raise Exception("YEEEEEE!!!")
server = SimpleXMLRPCServer(("localhost", 8080),allow_none=True)
print("Listening on port 8080...")
server.register_function(ping, "ping")
server.register_function(echo, "echo")
server.register_function(test, "test")
server.register_function(args, "args")
server.register_function(kwargs, "kwargs")
server.register_function(args_kwargs, "args_kwargs")
server.serve_forever()
```
server on aiohttp-xmlrpc one:
```
from aiohttp import web
from aiohttp_xmlrpc import handler
class XMLRPCExample(handler.XMLRPCView):
def rpc_ping(self):
return 'Pong'
def rpc_echo(self, *args, **kwargs):
return (args, kwargs)
def rpc_test(self):
return None
def rpc_args(self, *args):
return len(args)
def rpc_kwargs(self, **kwargs):
return len(kwargs)
def rpc_args_kwargs(self, *args, **kwargs):
print(args)
print(kwargs)
return len(args) + len(kwargs)
def rpc_exception(self):
raise Exception("YEEEEEE!!!")
def start_server():
app = web.Application()
app.router.add_route('*', '/', XMLRPCExample)
web.run_app(app)
if __name__ == "__main__":
start_server()
```
I called the same method of `args_kwargs` on client side. But results are 4 for aiohttp-xmlrpc and 3 for xmlrpc.
(BTW, I use https://xmlrpc.devzing.com/ tool to call rpc service which is running locally and tunnelled by ngork).
I think `xmlrpc` is doing the right way. Maybe there are some configurations I missed or some tests I did incorrectly?
|
0.0
|
4bcaed5c463a0f3f338b6282f9f299094b25440c
|
[
"tests/test_handler.py::test_11_dict_args[pyloop]"
] |
[
"tests/test_handler.py::test_1_test[pyloop]",
"tests/test_handler.py::test_2_args[pyloop]",
"tests/test_handler.py::test_3_kwargs[pyloop]",
"tests/test_handler.py::test_4_kwargs[pyloop]",
"tests/test_handler.py::test_5_exception[pyloop]",
"tests/test_handler.py::test_6_unknown_method[pyloop]",
"tests/test_handler.py::test_7_strings[pyloop]",
"tests/test_handler.py::test_8_strings_pretty[pyloop]",
"tests/test_handler.py::test_9_datetime[pyloop]",
"tests/test_handler.py::test_10_future[pyloop]"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-08-30 12:17:27+00:00
|
mit
| 4,035 |
|
mozilla__bleach-205
|
diff --git a/bleach/__init__.py b/bleach/__init__.py
index 3092cb7..bf67bf3 100644
--- a/bleach/__init__.py
+++ b/bleach/__init__.py
@@ -2,6 +2,12 @@
from __future__ import unicode_literals
import logging
+try: # Python 2.7+
+ from logging import NullHandler
+except ImportError:
+ class NullHandler(logging.Handler):
+ def emit(self, record):
+ pass
import re
import html5lib
@@ -15,7 +21,8 @@ from .version import __version__, VERSION # flake8: noqa
__all__ = ['clean', 'linkify']
-log = logging.getLogger('bleach')
+log = logging.getLogger(__name__)
+log.addHandler(NullHandler())
ALLOWED_TAGS = [
'a',
@@ -315,7 +322,7 @@ def linkify(text, callbacks=DEFAULT_CALLBACKS, skip_pre=False,
if node.tag == ETREE_TAG('pre') and skip_pre:
linkify_nodes(node, False)
elif not (node in _seen):
- linkify_nodes(node, True)
+ linkify_nodes(node, parse_text)
current_child += 1
@@ -344,6 +351,14 @@ def linkify(text, callbacks=DEFAULT_CALLBACKS, skip_pre=False,
if url.startswith('('):
_wrapping = strip_wrapping_parentheses(url)
url, open_brackets, close_brackets = _wrapping
+ if url.endswith(')') and '(' not in url:
+ # This is a clumsy handling for the case where we have something
+ # like (foo http://example.com) and the ) gets picked up by the
+ # url_re but we don't want it part of the link.
+ new_url = url.rstrip(')')
+ close_brackets += len(url) - len(new_url)
+ url = new_url
+
end = ''
m = re.search(punct_re, url)
if m:
|
mozilla/bleach
|
2235b8fcadc8abef3a2845bb0ce67206982f3489
|
diff --git a/bleach/tests/test_links.py b/bleach/tests/test_links.py
index 62da8d1..20d50ac 100644
--- a/bleach/tests/test_links.py
+++ b/bleach/tests/test_links.py
@@ -314,6 +314,13 @@ def test_skip_pre():
eq_(nofollowed, linkify(already_linked))
eq_(nofollowed, linkify(already_linked, skip_pre=True))
+ eq_(
+ linkify('<pre><code>http://example.com</code></pre>http://example.com',
+ skip_pre=True),
+ ('<pre><code>http://example.com</code></pre>'
+ '<a href="http://example.com" rel="nofollow">http://example.com</a>')
+ )
+
def test_libgl():
"""libgl.so.1 should not be linkified."""
@@ -360,11 +367,16 @@ def test_wrapping_parentheses():
tests = (
('(example.com)', ('(', 'example.com', 'example.com', ')')),
('(example.com/)', ('(', 'example.com/', 'example.com/', ')')),
- ('(example.com/foo)', ('(', 'example.com/foo',
- 'example.com/foo', ')')),
- ('(((example.com/))))', ('(((', 'example.com/)',
- 'example.com/)', ')))')),
- ('example.com/))', ('', 'example.com/))', 'example.com/))', '')),
+ ('(example.com/foo)',
+ ('(', 'example.com/foo', 'example.com/foo', ')')),
+ ('(((example.com/))))',
+ ('(((', 'example.com/', 'example.com/', '))))')),
+ ('example.com/))',
+ ('', 'example.com/', 'example.com/', '))')),
+ ('(foo http://example.com/)',
+ ('(foo ', 'example.com/', 'http://example.com/', ')')),
+ ('(foo http://example.com)',
+ ('(foo ', 'example.com', 'http://example.com', ')')),
('http://en.wikipedia.org/wiki/Test_(assessment)',
('', 'en.wikipedia.org/wiki/Test_(assessment)',
'http://en.wikipedia.org/wiki/Test_(assessment)', '')),
|
Children of <pre> tags should not be linkified when skip_pre=True (patch attached)
The children of `pre` tags should not be linkified when `skip_pre` is on
```
diff --git a/bleach/__init__.py b/bleach/__init__.py
index 48b6512..4c2dd1b 100644
--- a/bleach/__init__.py
+++ b/bleach/__init__.py
@@ -300,7 +300,7 @@ def linkify(text, callbacks=DEFAULT_CALLBACKS, skip_pre=False,
if node.tag == ETREE_TAG('pre') and skip_pre:
linkify_nodes(node, False)
elif not (node in _seen):
- linkify_nodes(node, True)
+ linkify_nodes(node, parse_text)
current_child += 1
diff --git a/bleach/tests/test_links.py b/bleach/tests/test_links.py
index 62da8d1..ae0fba7 100644
--- a/bleach/tests/test_links.py
+++ b/bleach/tests/test_links.py
@@ -314,6 +314,13 @@ def test_skip_pre():
eq_(nofollowed, linkify(already_linked))
eq_(nofollowed, linkify(already_linked, skip_pre=True))
+def test_skip_pre_child():
+ # Don't linkify the children of pre tags.
+ intext = '<pre><code>http://foo.com</code></pre>http://bar.com'
+ expect = '<pre><code>http://foo.com</code></pre><a href="http://bar.com" rel="nofollow">http://bar.com</a>'
+ output = linkify(intext, skip_pre=True)
+ eq_(expect, output)
+
def test_libgl():
"""libgl.so.1 should not be linkified."""
```
|
0.0
|
2235b8fcadc8abef3a2845bb0ce67206982f3489
|
[
"bleach/tests/test_links.py::test_skip_pre"
] |
[
"bleach/tests/test_links.py::test_empty",
"bleach/tests/test_links.py::test_simple_link",
"bleach/tests/test_links.py::test_trailing_slash",
"bleach/tests/test_links.py::test_mangle_link",
"bleach/tests/test_links.py::test_mangle_text",
"bleach/tests/test_links.py::test_set_attrs",
"bleach/tests/test_links.py::test_only_proto_links",
"bleach/tests/test_links.py::test_stop_email",
"bleach/tests/test_links.py::test_tlds",
"bleach/tests/test_links.py::test_escaping",
"bleach/tests/test_links.py::test_nofollow_off",
"bleach/tests/test_links.py::test_link_in_html",
"bleach/tests/test_links.py::test_links_https",
"bleach/tests/test_links.py::test_add_rel_nofollow",
"bleach/tests/test_links.py::test_url_with_path",
"bleach/tests/test_links.py::test_link_ftp",
"bleach/tests/test_links.py::test_link_query",
"bleach/tests/test_links.py::test_link_fragment",
"bleach/tests/test_links.py::test_link_entities",
"bleach/tests/test_links.py::test_escaped_html",
"bleach/tests/test_links.py::test_link_http_complete",
"bleach/tests/test_links.py::test_non_url",
"bleach/tests/test_links.py::test_javascript_url",
"bleach/tests/test_links.py::test_unsafe_url",
"bleach/tests/test_links.py::test_libgl",
"bleach/tests/test_links.py::test_end_of_clause",
"bleach/tests/test_links.py::test_sarcasm",
"bleach/tests/test_links.py::test_parentheses_with_removing",
"bleach/tests/test_links.py::test_tokenizer",
"bleach/tests/test_links.py::test_ignore_bad_protocols",
"bleach/tests/test_links.py::test_max_recursion_depth",
"bleach/tests/test_links.py::test_link_emails_and_urls",
"bleach/tests/test_links.py::test_links_case_insensitive",
"bleach/tests/test_links.py::test_elements_inside_links",
"bleach/tests/test_links.py::test_remove_first_childlink"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-06-14 16:16:47+00:00
|
apache-2.0
| 4,036 |
|
mozilla__bleach-394
|
diff --git a/bleach/html5lib_shim.py b/bleach/html5lib_shim.py
index 58e2e78..5ede0d4 100644
--- a/bleach/html5lib_shim.py
+++ b/bleach/html5lib_shim.py
@@ -15,11 +15,10 @@ from bleach._vendor.html5lib import (
HTMLParser,
getTreeWalker,
)
+from bleach._vendor.html5lib import constants
from bleach._vendor.html5lib.constants import (
- entities,
namespaces,
prefixes,
- tokenTypes,
)
from bleach._vendor.html5lib.constants import _ReparseException as ReparseException
from bleach._vendor.html5lib.filters.base import Filter
@@ -32,15 +31,31 @@ from bleach._vendor.html5lib._trie import Trie
#: Map of entity name to expanded entity
-ENTITIES = entities
+ENTITIES = constants.entities
#: Trie of html entity string -> character representation
ENTITIES_TRIE = Trie(ENTITIES)
#: Token type constants--these never change
-START_TAG_TYPE = tokenTypes['StartTag']
-END_TAG_TYPE = tokenTypes['EndTag']
-CHARACTERS_TYPE = tokenTypes['Characters']
+TAG_TOKEN_TYPES = set([
+ constants.tokenTypes['StartTag'],
+ constants.tokenTypes['EndTag'],
+ constants.tokenTypes['EmptyTag']
+])
+CHARACTERS_TYPE = constants.tokenTypes['Characters']
+
+
+#: List of HTML tags
+HTML_TAGS = [
+ tag for namespace, tag in
+ (
+ list(constants.scopingElements) +
+ list(constants.formattingElements) +
+ list(constants.specialElements) +
+ list(constants.htmlIntegrationPointElements) +
+ list(constants.mathmlTextIntegrationPointElements)
+ )
+]
class InputStreamWithMemory(object):
@@ -99,8 +114,10 @@ class InputStreamWithMemory(object):
class BleachHTMLTokenizer(HTMLTokenizer):
"""Tokenizer that doesn't consume character entities"""
- def __init__(self, *args, **kwargs):
- super(BleachHTMLTokenizer, self).__init__(*args, **kwargs)
+ def __init__(self, consume_entities=False, **kwargs):
+ super(BleachHTMLTokenizer, self).__init__(**kwargs)
+
+ self.consume_entities = consume_entities
# Wrap the stream with one that remembers the history
self.stream = InputStreamWithMemory(self.stream)
@@ -139,17 +156,23 @@ class BleachHTMLTokenizer(HTMLTokenizer):
# If the token is a ParseError, we hold on to it so we can get the
# next token and potentially fix it.
- if token['type'] == tokenTypes['ParseError']:
+ if token['type'] == constants.tokenTypes['ParseError']:
last_error_token = token
continue
yield token
def consumeEntity(self, allowedChar=None, fromAttribute=False):
- # We don't want to consume and convert entities, so this overrides the
- # html5lib tokenizer's consumeEntity so that it's now a no-op.
+ # If this tokenizer is set to consume entities, then we can let the
+ # superclass do its thing.
+ if self.consume_entities:
+ return super(BleachHTMLTokenizer, self).consumeEntity(allowedChar, fromAttribute)
+
+ # If this tokenizer is set to not consume entities, then we don't want
+ # to consume and convert them, so this overrides the html5lib tokenizer's
+ # consumeEntity so that it's now a no-op.
#
- # However, when that gets called, it's consumed an &, so we put that in
+ # However, when that gets called, it's consumed an &, so we put that back in
# the stream.
if fromAttribute:
self.currentToken['data'][-1][1] += '&'
@@ -158,10 +181,10 @@ class BleachHTMLTokenizer(HTMLTokenizer):
self.tokenQueue.append({"type": CHARACTERS_TYPE, "data": '&'})
def tagOpenState(self):
- # This state marks a < that is either a StartTag, EndTag, or ParseError.
- # In all cases, we want to drop any stream history we've collected
- # so far and we do that by calling start_tag() on the input stream
- # wrapper.
+ # This state marks a < that is either a StartTag, EndTag, EmptyTag,
+ # or ParseError. In all cases, we want to drop any stream history
+ # we've collected so far and we do that by calling start_tag() on
+ # the input stream wrapper.
self.stream.start_tag()
return super(BleachHTMLTokenizer, self).tagOpenState()
@@ -169,11 +192,11 @@ class BleachHTMLTokenizer(HTMLTokenizer):
token = self.currentToken
if ((self.parser.tags is not None and
- token['type'] in (START_TAG_TYPE, END_TAG_TYPE) and
+ token['type'] in TAG_TOKEN_TYPES and
token['name'].lower() not in self.parser.tags)):
- # If this is a start/end tag for a tag that's not in our allowed
- # list, then it gets stripped or escaped. In both of these cases
- # it gets converted to a Characters token.
+ # If this is a start/end/empty tag for a tag that's not in our
+ # allowed list, then it gets stripped or escaped. In both of these
+ # cases it gets converted to a Characters token.
if self.parser.strip:
# If we're stripping the token, we just throw in an empty
# string token.
@@ -202,16 +225,19 @@ class BleachHTMLTokenizer(HTMLTokenizer):
class BleachHTMLParser(HTMLParser):
"""Parser that uses BleachHTMLTokenizer"""
- def __init__(self, tags, strip, **kwargs):
+ def __init__(self, tags, strip, consume_entities, **kwargs):
"""
:arg tags: list of allowed tags--everything else is either stripped or
escaped; if None, then this doesn't look at tags at all
:arg strip: whether to strip disallowed tags (True) or escape them (False);
if tags=None, then this doesn't have any effect
+ :arg consume_entities: whether to consume entities (default behavior) or
+ leave them as is when tokenizing (BleachHTMLTokenizer-added behavior)
"""
self.tags = [tag.lower() for tag in tags] if tags is not None else None
self.strip = strip
+ self.consume_entities = consume_entities
super(BleachHTMLParser, self).__init__(**kwargs)
def _parse(self, stream, innerHTML=False, container='div', scripting=False, **kwargs):
@@ -219,7 +245,12 @@ class BleachHTMLParser(HTMLParser):
self.innerHTMLMode = innerHTML
self.container = container
self.scripting = scripting
- self.tokenizer = BleachHTMLTokenizer(stream, parser=self, **kwargs)
+ self.tokenizer = BleachHTMLTokenizer(
+ stream=stream,
+ consume_entities=self.consume_entities,
+ parser=self,
+ **kwargs
+ )
self.reset()
try:
diff --git a/bleach/linkifier.py b/bleach/linkifier.py
index 3c8c3ee..6394c03 100644
--- a/bleach/linkifier.py
+++ b/bleach/linkifier.py
@@ -110,9 +110,16 @@ class Linker(object):
self.url_re = url_re
self.email_re = email_re
- self.parser = html5lib_shim.HTMLParser(namespaceHTMLElements=False)
+ # Create a parser/tokenizer that allows all HTML tags and escapes
+ # anything not in that list.
+ self.parser = html5lib_shim.BleachHTMLParser(
+ tags=html5lib_shim.HTML_TAGS,
+ strip=False,
+ consume_entities=True,
+ namespaceHTMLElements=False,
+ )
self.walker = html5lib_shim.getTreeWalker('etree')
- self.serializer = html5lib_shim.HTMLSerializer(
+ self.serializer = html5lib_shim.BleachHTMLSerializer(
quote_attr_values='always',
omit_optional_tags=False,
diff --git a/bleach/sanitizer.py b/bleach/sanitizer.py
index de82027..262915a 100644
--- a/bleach/sanitizer.py
+++ b/bleach/sanitizer.py
@@ -126,6 +126,7 @@ class Cleaner(object):
self.parser = html5lib_shim.BleachHTMLParser(
tags=self.tags,
strip=self.strip,
+ consume_entities=False,
namespaceHTMLElements=False
)
self.walker = html5lib_shim.getTreeWalker('etree')
diff --git a/docs/linkify.rst b/docs/linkify.rst
index b0ede1a..6665300 100644
--- a/docs/linkify.rst
+++ b/docs/linkify.rst
@@ -5,12 +5,20 @@
Linkifying text fragments
=========================
-:py:func:`bleach.linkify` searches text for links, URLs, and email addresses and
-lets you control how and when those links are rendered.
+Bleach comes with several tools for searching text for links, URLs, and email
+addresses and letting you specify how those links are rendered in HTML.
-It works by building a document tree, so it's guaranteed never to do weird
-things to URLs in attribute values, can modify the value of attributes on
-``<a>`` tags and can even do things like skip ``<pre>`` sections.
+For example, you could pass in text and have all URL things converted into
+HTML links.
+
+It works by parsing the text as HTML and building a document tree. In this
+way, it's guaranteed never to do weird things to URLs in attribute values,
+can modify the value of attributes on ``<a>`` tags and can even do things
+like skip ``<pre>`` sections.
+
+If you plan to sanitize/clean the text and linkify it, you should do that
+in a single pass using :ref:`LinkifyFilter <linkify-LinkifyFilter>`. This
+is faster and it'll use the list of allowed tags from clean.
.. note::
@@ -308,6 +316,7 @@ instance.
.. versionadded:: 2.0
+.. _linkify-LinkifyFilter:
Using ``bleach.linkifier.LinkifyFilter``
========================================
|
mozilla/bleach
|
c27512d20b48b7901687b62d15c91be1de856f89
|
diff --git a/tests/test_html5lib_shim.py b/tests/test_html5lib_shim.py
index d122fa0..5712d33 100644
--- a/tests/test_html5lib_shim.py
+++ b/tests/test_html5lib_shim.py
@@ -62,6 +62,7 @@ def test_serializer(data, expected):
parser = html5lib_shim.BleachHTMLParser(
tags=None,
strip=True,
+ consume_entities=False,
namespaceHTMLElements=False
)
walker = html5lib_shim.getTreeWalker('etree')
diff --git a/tests/test_linkify.py b/tests/test_linkify.py
index 4fa4512..876cb84 100644
--- a/tests/test_linkify.py
+++ b/tests/test_linkify.py
@@ -407,7 +407,6 @@ def test_end_of_clause():
)
[email protected](reason='html5lib >= 0.99999999: changed API')
def test_sarcasm():
"""Jokes should crash.<sarcasm/>"""
assert linkify('Yeah right <sarcasm/>') == 'Yeah right <sarcasm/>'
@@ -581,7 +580,7 @@ def test_hang():
"""This string would hang linkify. Issue #200"""
assert (
linkify("[email protected]<mailto:[email protected]>", parse_email=True) ==
- '<a href="mailto:[email protected]">[email protected]</a><mailto:[email protected]></mailto:[email protected]>'
+ '<a href="mailto:[email protected]">[email protected]</a><mailto:<a href="mailto:[email protected]">[email protected]</a>>' # noqa
)
|
linkify() "fixes" HTML by adding or removing tags
Similar to the issue in https://github.com/mozilla/bleach/issues/280, `bleach.linkify()` seems to be "fixing" HTML by adding or removing tags if it interprets something as an HTML tag.
For example:
```
>>> bleach.linkify("insert <name> here")
'insert <name> here</name>'
>>> bleach.linkify("Well that's just great</sarcasm>")
"Well that's just great"
```
|
0.0
|
c27512d20b48b7901687b62d15c91be1de856f89
|
[
"tests/test_html5lib_shim.py::test_serializer[-]",
"tests/test_html5lib_shim.py::test_serializer[text-text]",
"tests/test_html5lib_shim.py::test_serializer[&-&]",
"tests/test_html5lib_shim.py::test_serializer[a",
"tests/test_html5lib_shim.py::test_serializer[<a",
"tests/test_linkify.py::test_sarcasm",
"tests/test_linkify.py::test_hang"
] |
[
"tests/test_html5lib_shim.py::test_convert_entities[-]",
"tests/test_html5lib_shim.py::test_convert_entities[abc-abc]",
"tests/test_html5lib_shim.py::test_convert_entities[ -\\xa0]",
"tests/test_html5lib_shim.py::test_convert_entities[ -",
"tests/test_html5lib_shim.py::test_convert_entities[ -",
"tests/test_html5lib_shim.py::test_convert_entities[&xx;-&xx;]",
"tests/test_html5lib_shim.py::test_convert_entities[this",
"tests/test_linkify.py::test_empty",
"tests/test_linkify.py::test_simple_link",
"tests/test_linkify.py::test_trailing_slash",
"tests/test_linkify.py::test_mangle_link",
"tests/test_linkify.py::test_mangle_text",
"tests/test_linkify.py::test_email_link[a",
"tests/test_linkify.py::test_email_link[aussie",
"tests/test_linkify.py::test_email_link[email",
"tests/test_linkify.py::test_email_link[<br>[email protected]<br><a",
"tests/test_linkify.py::test_email_link[mailto",
"tests/test_linkify.py::test_email_link[\"\\\\\\n\"@opa.ru-True-\"\\\\\\n\"@opa.ru]",
"tests/test_linkify.py::test_email_link_escaping[\"james\"@example.com-<a",
"tests/test_linkify.py::test_email_link_escaping[\"j'ames\"@example.com-<a",
"tests/test_linkify.py::test_email_link_escaping[\"ja>mes\"@example.com-<a",
"tests/test_linkify.py::test_prevent_links[callback0-a",
"tests/test_linkify.py::test_prevent_links[callback1-a",
"tests/test_linkify.py::test_prevent_links[callback2-a",
"tests/test_linkify.py::test_prevent_links[callback3-a",
"tests/test_linkify.py::test_prevent_links[callback4-a",
"tests/test_linkify.py::test_prevent_links[callback5-a",
"tests/test_linkify.py::test_set_attrs",
"tests/test_linkify.py::test_only_proto_links",
"tests/test_linkify.py::test_stop_email",
"tests/test_linkify.py::test_tlds[example.com-<a",
"tests/test_linkify.py::test_tlds[example.co-<a",
"tests/test_linkify.py::test_tlds[example.co.uk-<a",
"tests/test_linkify.py::test_tlds[example.edu-<a",
"tests/test_linkify.py::test_tlds[example.xxx-<a",
"tests/test_linkify.py::test_tlds[bit.ly/fun-<a",
"tests/test_linkify.py::test_tlds[example.yyy-example.yyy]",
"tests/test_linkify.py::test_tlds[brie-brie]",
"tests/test_linkify.py::test_escaping",
"tests/test_linkify.py::test_nofollow_off",
"tests/test_linkify.py::test_link_in_html",
"tests/test_linkify.py::test_links_https",
"tests/test_linkify.py::test_add_rel_nofollow",
"tests/test_linkify.py::test_url_with_path",
"tests/test_linkify.py::test_link_ftp",
"tests/test_linkify.py::test_link_query",
"tests/test_linkify.py::test_link_fragment",
"tests/test_linkify.py::test_link_entities",
"tests/test_linkify.py::test_escaped_html",
"tests/test_linkify.py::test_link_http_complete",
"tests/test_linkify.py::test_non_url",
"tests/test_linkify.py::test_javascript_url",
"tests/test_linkify.py::test_unsafe_url",
"tests/test_linkify.py::test_skip_tags",
"tests/test_linkify.py::test_libgl",
"tests/test_linkify.py::test_end_of_sentence[example.com-.]",
"tests/test_linkify.py::test_end_of_sentence[example.com-...]",
"tests/test_linkify.py::test_end_of_sentence[ex.com/foo-.]",
"tests/test_linkify.py::test_end_of_sentence[ex.com/foo-....]",
"tests/test_linkify.py::test_end_of_clause",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com)-expected_data0]",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com/)-expected_data1]",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com/foo)-expected_data2]",
"tests/test_linkify.py::test_wrapping_parentheses[(((example.com/))))-expected_data3]",
"tests/test_linkify.py::test_wrapping_parentheses[example.com/))-expected_data4]",
"tests/test_linkify.py::test_wrapping_parentheses[(foo",
"tests/test_linkify.py::test_wrapping_parentheses[http://en.wikipedia.org/wiki/Test_(assessment)-expected_data7]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/Test_(assessment))-expected_data8]",
"tests/test_linkify.py::test_wrapping_parentheses[((http://en.wikipedia.org/wiki/Test_(assessment))-expected_data9]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/Test_(assessment)))-expected_data10]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/)Test_(assessment-expected_data11]",
"tests/test_linkify.py::test_wrapping_parentheses[hello",
"tests/test_linkify.py::test_parentheses_with_removing",
"tests/test_linkify.py::test_ports[http://foo.com:8000-expected_data0]",
"tests/test_linkify.py::test_ports[http://foo.com:8000/-expected_data1]",
"tests/test_linkify.py::test_ports[http://bar.com:xkcd-expected_data2]",
"tests/test_linkify.py::test_ports[http://foo.com:81/bar-expected_data3]",
"tests/test_linkify.py::test_ports[http://foo.com:-expected_data4]",
"tests/test_linkify.py::test_ports[http://foo.com:\\u0663\\u0669/-expected_data5]",
"tests/test_linkify.py::test_ports[http://foo.com:\\U0001d7e0\\U0001d7d8/-expected_data6]",
"tests/test_linkify.py::test_ignore_bad_protocols",
"tests/test_linkify.py::test_link_emails_and_urls",
"tests/test_linkify.py::test_links_case_insensitive",
"tests/test_linkify.py::test_elements_inside_links",
"tests/test_linkify.py::test_drop_link_tags",
"tests/test_linkify.py::test_naughty_unescaping[<br>-<br>]",
"tests/test_linkify.py::test_naughty_unescaping[<br>",
"tests/test_linkify.py::test_hyphen_in_mail",
"tests/test_linkify.py::test_url_re_arg",
"tests/test_linkify.py::test_email_re_arg",
"tests/test_linkify.py::test_linkify_idempotent",
"tests/test_linkify.py::TestLinkify::test_no_href_links",
"tests/test_linkify.py::TestLinkify::test_rel_already_there",
"tests/test_linkify.py::TestLinkify::test_only_text_is_linkified"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-10-02 14:44:48+00:00
|
apache-2.0
| 4,037 |
|
mozilla__bleach-410
|
diff --git a/bleach/linkifier.py b/bleach/linkifier.py
index 6394c03..5d815f8 100644
--- a/bleach/linkifier.py
+++ b/bleach/linkifier.py
@@ -499,13 +499,11 @@ class LinkifyFilter(html5lib_shim.Filter):
# the tokens we're going to yield
in_a = False
token_buffer = []
- continue
-
else:
token_buffer.append(token)
- continue
+ continue
- elif token['type'] in ['StartTag', 'EmptyTag']:
+ if token['type'] in ['StartTag', 'EmptyTag']:
if token['name'] in self.skip_tags:
# Skip tags start a "special mode" where we don't linkify
# anything until the end tag.
diff --git a/bleach/sanitizer.py b/bleach/sanitizer.py
index 262915a..9ba4c57 100644
--- a/bleach/sanitizer.py
+++ b/bleach/sanitizer.py
@@ -267,8 +267,8 @@ class BleachSanitizerFilter(html5lib_shim.SanitizerFilter):
return super(BleachSanitizerFilter, self).__init__(source, **kwargs)
- def __iter__(self):
- for token in html5lib_shim.Filter.__iter__(self):
+ def sanitize_stream(self, token_iterator):
+ for token in token_iterator:
ret = self.sanitize_token(token)
if not ret:
@@ -280,6 +280,40 @@ class BleachSanitizerFilter(html5lib_shim.SanitizerFilter):
else:
yield ret
+ def merge_characters(self, token_iterator):
+ """Merge consecutive Characters tokens in a stream"""
+ characters_buffer = []
+
+ for token in token_iterator:
+ if characters_buffer:
+ if token['type'] == 'Characters':
+ characters_buffer.append(token)
+ continue
+ else:
+ # Merge all the characters tokens together into one and then
+ # operate on it.
+ new_token = {
+ 'data': ''.join([char_token['data'] for char_token in characters_buffer]),
+ 'type': 'Characters'
+ }
+ characters_buffer = []
+ yield new_token
+
+ elif token['type'] == 'Characters':
+ characters_buffer.append(token)
+ continue
+
+ yield token
+
+ new_token = {
+ 'data': ''.join([char_token['data'] for char_token in characters_buffer]),
+ 'type': 'Characters'
+ }
+ yield new_token
+
+ def __iter__(self):
+ return self.merge_characters(self.sanitize_stream(html5lib_shim.Filter.__iter__(self)))
+
def sanitize_token(self, token):
"""Sanitize a token either by HTML-encoding or dropping.
|
mozilla/bleach
|
a39f7d6742cca84a4bb095e097c47b1d3770e58b
|
diff --git a/tests/test_clean.py b/tests/test_clean.py
index b543cdf..5322767 100644
--- a/tests/test_clean.py
+++ b/tests/test_clean.py
@@ -58,6 +58,7 @@ def test_html_is_lowercased():
'<a href="http://example.com">foo</a>'
)
+
def test_invalid_uri_does_not_raise_error():
assert clean('<a href="http://example.com]">text</a>') == '<a>text</a>'
diff --git a/tests/test_linkify.py b/tests/test_linkify.py
index 876cb84..eeea3e3 100644
--- a/tests/test_linkify.py
+++ b/tests/test_linkify.py
@@ -4,7 +4,8 @@ import pytest
from six.moves.urllib_parse import quote_plus
from bleach import linkify, DEFAULT_CALLBACKS as DC
-from bleach.linkifier import Linker
+from bleach.linkifier import Linker, LinkifyFilter
+from bleach.sanitizer import Cleaner
def test_empty():
@@ -656,3 +657,20 @@ class TestLinkify:
with pytest.raises(TypeError):
linkify(no_type)
+
+
[email protected]('text, expected', [
+ ('abc', 'abc'),
+ ('example.com', '<a href="http://example.com">example.com</a>'),
+ (
+ 'http://example.com?b=1&c=2',
+ '<a href="http://example.com?b=1&c=2">http://example.com?b=1&c=2</a>'
+ ),
+ (
+ 'link: https://example.com/watch#anchor',
+ 'link: <a href="https://example.com/watch#anchor">https://example.com/watch#anchor</a>'
+ )
+])
+def test_linkify_filter(text, expected):
+ cleaner = Cleaner(filters=[LinkifyFilter])
+ assert cleaner.clean(text) == expected
|
LinkifyFilter not working for URLs with ampersands
Example with bleach 2.1.3:
```
from bleach import Cleaner
from bleach.linkifier import LinkifyFilter
url1 = 'http://a.co?b=1&c=2'
url2 = 'http://a.co?b=1&c=2'
cleaner = Cleaner(filters=[LinkifyFilter])
cleaner.clean(url1)
cleaner.clean(url2)
```
Both result in `<a href="http://a.co?b=1">http://a.co?b=1</a>&c=2`
|
0.0
|
a39f7d6742cca84a4bb095e097c47b1d3770e58b
|
[
"tests/test_linkify.py::test_linkify_filter[http://example.com?b=1&c=2-<a"
] |
[
"tests/test_clean.py::test_clean_idempotent",
"tests/test_clean.py::test_only_text_is_cleaned",
"tests/test_clean.py::test_empty",
"tests/test_clean.py::test_content_has_no_html",
"tests/test_clean.py::test_content_has_allowed_html[an",
"tests/test_clean.py::test_content_has_allowed_html[another",
"tests/test_clean.py::test_html_is_lowercased",
"tests/test_clean.py::test_invalid_uri_does_not_raise_error",
"tests/test_clean.py::test_comments[<!--",
"tests/test_clean.py::test_comments[<!--open",
"tests/test_clean.py::test_comments[<!--comment-->text-True-text]",
"tests/test_clean.py::test_comments[<!--comment-->text-False-<!--comment-->text]",
"tests/test_clean.py::test_comments[text<!--",
"tests/test_clean.py::test_comments[text<!--comment-->-True-text]",
"tests/test_clean.py::test_comments[text<!--comment-->-False-text<!--comment-->]",
"tests/test_clean.py::test_invalid_char_in_tag",
"tests/test_clean.py::test_unclosed_tag",
"tests/test_clean.py::test_nested_script_tag",
"tests/test_clean.py::test_bare_entities_get_escaped_correctly[an",
"tests/test_clean.py::test_bare_entities_get_escaped_correctly[tag",
"tests/test_clean.py::test_character_entities_handling[&-&]",
"tests/test_clean.py::test_character_entities_handling[ - ]",
"tests/test_clean.py::test_character_entities_handling[ ",
"tests/test_clean.py::test_character_entities_handling[<em>strong</em>-<em>strong</em>]",
"tests/test_clean.py::test_character_entities_handling[&is",
"tests/test_clean.py::test_character_entities_handling[cool",
"tests/test_clean.py::test_character_entities_handling[&&",
"tests/test_clean.py::test_character_entities_handling[&",
"tests/test_clean.py::test_character_entities_handling[this",
"tests/test_clean.py::test_character_entities_handling[http://example.com?active=true¤t=true-http://example.com?active=true&current=true]",
"tests/test_clean.py::test_character_entities_handling[<a",
"tests/test_clean.py::test_character_entities_handling[&xx;-&xx;]",
"tests/test_clean.py::test_character_entities_handling['-']",
"tests/test_clean.py::test_character_entities_handling["-"]",
"tests/test_clean.py::test_character_entities_handling[{-{]",
"tests/test_clean.py::test_character_entities_handling[{-{]",
"tests/test_clean.py::test_character_entities_handling[{-{]",
"tests/test_clean.py::test_character_entities_handling[&#-&#]",
"tests/test_clean.py::test_character_entities_handling[&#<-&#<]",
"tests/test_clean.py::test_character_entities_handling['"-'"]",
"tests/test_clean.py::test_stripping_tags[a",
"tests/test_clean.py::test_stripping_tags[<p><a",
"tests/test_clean.py::test_stripping_tags[<p><span>multiply",
"tests/test_clean.py::test_stripping_tags[<ul><li><script></li></ul>-kwargs4-<ul><li></li></ul>]",
"tests/test_clean.py::test_stripping_tags[<isindex>-kwargs6-]",
"tests/test_clean.py::test_stripping_tags[Yeah",
"tests/test_clean.py::test_stripping_tags[<sarcasm>-kwargs8-]",
"tests/test_clean.py::test_stripping_tags[</sarcasm>-kwargs9-]",
"tests/test_clean.py::test_stripping_tags[</",
"tests/test_clean.py::test_stripping_tags[Foo",
"tests/test_clean.py::test_stripping_tags[Favorite",
"tests/test_clean.py::test_stripping_tags[</3-kwargs14-</3]",
"tests/test_clean.py::test_escaping_tags[<img",
"tests/test_clean.py::test_escaping_tags[<script>safe()</script>-<script>safe()</script>]",
"tests/test_clean.py::test_escaping_tags[<style>body{}</style>-<style>body{}</style>]",
"tests/test_clean.py::test_escaping_tags[<ul><li><script></li></ul>-<ul><li><script></li></ul>]",
"tests/test_clean.py::test_escaping_tags[<isindex>-<isindex>]",
"tests/test_clean.py::test_escaping_tags[<sarcasm/>-<sarcasm/>]",
"tests/test_clean.py::test_escaping_tags[<sarcasm>-<sarcasm>]",
"tests/test_clean.py::test_escaping_tags[</sarcasm>-</sarcasm>]",
"tests/test_clean.py::test_escaping_tags[</",
"tests/test_clean.py::test_escaping_tags[</3-</3]",
"tests/test_clean.py::test_escaping_tags[<[email protected]>-<[email protected]>]",
"tests/test_clean.py::test_escaping_tags[Favorite",
"tests/test_clean.py::test_stripping_tags_is_safe[<scri<script>pt>alert(1)</scr</script>ipt>-pt>alert(1)ipt>]",
"tests/test_clean.py::test_stripping_tags_is_safe[<scri<scri<script>pt>pt>alert(1)</script>-pt>pt>alert(1)]",
"tests/test_clean.py::test_allowed_styles",
"tests/test_clean.py::test_href_with_wrong_tag",
"tests/test_clean.py::test_disallowed_attr",
"tests/test_clean.py::test_unquoted_attr_values_are_quoted",
"tests/test_clean.py::test_unquoted_event_handler_attr_value",
"tests/test_clean.py::test_invalid_filter_attr",
"tests/test_clean.py::test_poster_attribute",
"tests/test_clean.py::test_attributes_callable",
"tests/test_clean.py::test_attributes_wildcard",
"tests/test_clean.py::test_attributes_wildcard_callable",
"tests/test_clean.py::test_attributes_tag_callable",
"tests/test_clean.py::test_attributes_tag_list",
"tests/test_clean.py::test_attributes_list",
"tests/test_clean.py::test_svg_attr_val_allows_ref",
"tests/test_clean.py::test_svg_allow_local_href[<svg><pattern",
"tests/test_clean.py::test_svg_allow_local_href_nonlocal[<svg><pattern",
"tests/test_clean.py::test_invisible_characters[1\\x0723-1?23]",
"tests/test_clean.py::test_invisible_characters[1\\x0823-1?23]",
"tests/test_clean.py::test_invisible_characters[1\\x0b23-1?23]",
"tests/test_clean.py::test_invisible_characters[1\\x0c23-1?23]",
"tests/test_clean.py::test_invisible_characters[import",
"tests/test_clean.py::test_nonexistent_namespace",
"tests/test_clean.py::test_regressions[1.test]",
"tests/test_clean.py::test_regressions[2.test]",
"tests/test_clean.py::test_regressions[3.test]",
"tests/test_clean.py::test_regressions[4.test]",
"tests/test_clean.py::test_regressions[5.test]",
"tests/test_clean.py::test_regressions[6.test]",
"tests/test_clean.py::test_regressions[7.test]",
"tests/test_clean.py::test_regressions[8.test]",
"tests/test_clean.py::test_regressions[9.test]",
"tests/test_clean.py::test_regressions[10.test]",
"tests/test_clean.py::test_regressions[11.test]",
"tests/test_clean.py::test_regressions[12.test]",
"tests/test_clean.py::test_regressions[13.test]",
"tests/test_clean.py::test_regressions[14.test]",
"tests/test_clean.py::test_regressions[15.test]",
"tests/test_clean.py::test_regressions[16.test]",
"tests/test_clean.py::test_regressions[17.test]",
"tests/test_clean.py::test_regressions[18.test]",
"tests/test_clean.py::test_regressions[19.test]",
"tests/test_clean.py::test_regressions[20.test]",
"tests/test_clean.py::TestCleaner::test_basics",
"tests/test_clean.py::TestCleaner::test_filters",
"tests/test_linkify.py::test_empty",
"tests/test_linkify.py::test_simple_link",
"tests/test_linkify.py::test_trailing_slash",
"tests/test_linkify.py::test_mangle_link",
"tests/test_linkify.py::test_mangle_text",
"tests/test_linkify.py::test_email_link[a",
"tests/test_linkify.py::test_email_link[aussie",
"tests/test_linkify.py::test_email_link[email",
"tests/test_linkify.py::test_email_link[<br>[email protected]<br><a",
"tests/test_linkify.py::test_email_link[mailto",
"tests/test_linkify.py::test_email_link[\"\\\\\\n\"@opa.ru-True-\"\\\\\\n\"@opa.ru]",
"tests/test_linkify.py::test_email_link_escaping[\"james\"@example.com-<a",
"tests/test_linkify.py::test_email_link_escaping[\"j'ames\"@example.com-<a",
"tests/test_linkify.py::test_email_link_escaping[\"ja>mes\"@example.com-<a",
"tests/test_linkify.py::test_prevent_links[callback0-a",
"tests/test_linkify.py::test_prevent_links[callback1-a",
"tests/test_linkify.py::test_prevent_links[callback2-a",
"tests/test_linkify.py::test_prevent_links[callback3-a",
"tests/test_linkify.py::test_prevent_links[callback4-a",
"tests/test_linkify.py::test_prevent_links[callback5-a",
"tests/test_linkify.py::test_set_attrs",
"tests/test_linkify.py::test_only_proto_links",
"tests/test_linkify.py::test_stop_email",
"tests/test_linkify.py::test_tlds[example.com-<a",
"tests/test_linkify.py::test_tlds[example.co-<a",
"tests/test_linkify.py::test_tlds[example.co.uk-<a",
"tests/test_linkify.py::test_tlds[example.edu-<a",
"tests/test_linkify.py::test_tlds[example.xxx-<a",
"tests/test_linkify.py::test_tlds[bit.ly/fun-<a",
"tests/test_linkify.py::test_tlds[example.yyy-example.yyy]",
"tests/test_linkify.py::test_tlds[brie-brie]",
"tests/test_linkify.py::test_escaping",
"tests/test_linkify.py::test_nofollow_off",
"tests/test_linkify.py::test_link_in_html",
"tests/test_linkify.py::test_links_https",
"tests/test_linkify.py::test_add_rel_nofollow",
"tests/test_linkify.py::test_url_with_path",
"tests/test_linkify.py::test_link_ftp",
"tests/test_linkify.py::test_link_query",
"tests/test_linkify.py::test_link_fragment",
"tests/test_linkify.py::test_link_entities",
"tests/test_linkify.py::test_escaped_html",
"tests/test_linkify.py::test_link_http_complete",
"tests/test_linkify.py::test_non_url",
"tests/test_linkify.py::test_javascript_url",
"tests/test_linkify.py::test_unsafe_url",
"tests/test_linkify.py::test_skip_tags",
"tests/test_linkify.py::test_libgl",
"tests/test_linkify.py::test_end_of_sentence[example.com-.]",
"tests/test_linkify.py::test_end_of_sentence[example.com-...]",
"tests/test_linkify.py::test_end_of_sentence[ex.com/foo-.]",
"tests/test_linkify.py::test_end_of_sentence[ex.com/foo-....]",
"tests/test_linkify.py::test_end_of_clause",
"tests/test_linkify.py::test_sarcasm",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com)-expected_data0]",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com/)-expected_data1]",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com/foo)-expected_data2]",
"tests/test_linkify.py::test_wrapping_parentheses[(((example.com/))))-expected_data3]",
"tests/test_linkify.py::test_wrapping_parentheses[example.com/))-expected_data4]",
"tests/test_linkify.py::test_wrapping_parentheses[(foo",
"tests/test_linkify.py::test_wrapping_parentheses[http://en.wikipedia.org/wiki/Test_(assessment)-expected_data7]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/Test_(assessment))-expected_data8]",
"tests/test_linkify.py::test_wrapping_parentheses[((http://en.wikipedia.org/wiki/Test_(assessment))-expected_data9]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/Test_(assessment)))-expected_data10]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/)Test_(assessment-expected_data11]",
"tests/test_linkify.py::test_wrapping_parentheses[hello",
"tests/test_linkify.py::test_parentheses_with_removing",
"tests/test_linkify.py::test_ports[http://foo.com:8000-expected_data0]",
"tests/test_linkify.py::test_ports[http://foo.com:8000/-expected_data1]",
"tests/test_linkify.py::test_ports[http://bar.com:xkcd-expected_data2]",
"tests/test_linkify.py::test_ports[http://foo.com:81/bar-expected_data3]",
"tests/test_linkify.py::test_ports[http://foo.com:-expected_data4]",
"tests/test_linkify.py::test_ports[http://foo.com:\\u0663\\u0669/-expected_data5]",
"tests/test_linkify.py::test_ports[http://foo.com:\\U0001d7e0\\U0001d7d8/-expected_data6]",
"tests/test_linkify.py::test_ignore_bad_protocols",
"tests/test_linkify.py::test_link_emails_and_urls",
"tests/test_linkify.py::test_links_case_insensitive",
"tests/test_linkify.py::test_elements_inside_links",
"tests/test_linkify.py::test_drop_link_tags",
"tests/test_linkify.py::test_naughty_unescaping[<br>-<br>]",
"tests/test_linkify.py::test_naughty_unescaping[<br>",
"tests/test_linkify.py::test_hang",
"tests/test_linkify.py::test_hyphen_in_mail",
"tests/test_linkify.py::test_url_re_arg",
"tests/test_linkify.py::test_email_re_arg",
"tests/test_linkify.py::test_linkify_idempotent",
"tests/test_linkify.py::TestLinkify::test_no_href_links",
"tests/test_linkify.py::TestLinkify::test_rel_already_there",
"tests/test_linkify.py::TestLinkify::test_only_text_is_linkified",
"tests/test_linkify.py::test_linkify_filter[abc-abc]",
"tests/test_linkify.py::test_linkify_filter[example.com-<a",
"tests/test_linkify.py::test_linkify_filter[link:"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-10-10 18:58:35+00:00
|
apache-2.0
| 4,038 |
|
mozilla__bleach-423
|
diff --git a/bleach/linkifier.py b/bleach/linkifier.py
index 5d815f8..95baba1 100644
--- a/bleach/linkifier.py
+++ b/bleach/linkifier.py
@@ -85,7 +85,7 @@ class Linker(object):
"""
def __init__(self, callbacks=DEFAULT_CALLBACKS, skip_tags=None, parse_email=False,
- url_re=URL_RE, email_re=EMAIL_RE):
+ url_re=URL_RE, email_re=EMAIL_RE, recognized_tags=html5lib_shim.HTML_TAGS):
"""Creates a Linker instance
:arg list callbacks: list of callbacks to run when adjusting tag attributes;
@@ -101,6 +101,9 @@ class Linker(object):
:arg re email_re: email matching regex
+ :arg list-of-strings recognized_tags: the list of tags that linkify knows about;
+ everything else gets escaped
+
:returns: linkified text as unicode
"""
@@ -113,7 +116,7 @@ class Linker(object):
# Create a parser/tokenizer that allows all HTML tags and escapes
# anything not in that list.
self.parser = html5lib_shim.BleachHTMLParser(
- tags=html5lib_shim.HTML_TAGS,
+ tags=recognized_tags,
strip=False,
consume_entities=True,
namespaceHTMLElements=False,
diff --git a/docs/linkify.rst b/docs/linkify.rst
index d60e17b..b8e7884 100644
--- a/docs/linkify.rst
+++ b/docs/linkify.rst
@@ -12,13 +12,14 @@ For example, you could pass in text and have all URL things converted into
HTML links.
It works by parsing the text as HTML and building a document tree. In this
-way, it's guaranteed never to do weird things to URLs in attribute values,
-can modify the value of attributes on ``<a>`` tags and can even do things
-like skip ``<pre>`` sections.
+way, you're guaranteed to get valid HTML back without weird things like
+having URLs in tag attributes getting linkified.
-If you plan to sanitize/clean the text and linkify it, you should do that
-in a single pass using :ref:`LinkifyFilter <linkify-LinkifyFilter>`. This
-is faster and it'll use the list of allowed tags from clean.
+.. note::
+
+ If you plan to sanitize/clean the text and linkify it, you should do that
+ in a single pass using :ref:`LinkifyFilter <linkify-LinkifyFilter>`. This
+ is faster and it'll use the list of allowed tags from clean.
.. note::
@@ -297,8 +298,8 @@ writing callbacks that may need to behave differently if the protocol is
Using ``bleach.linkifier.Linker``
=================================
-If you're linking a lot of text and passing the same argument values or you want
-more configurability, consider using a :py:class:`bleach.linkifier.Linker`
+If you're linking a lot of text and passing the same argument values or you
+need more configurability, consider using a :py:class:`bleach.linkifier.Linker`
instance.
.. doctest::
@@ -325,8 +326,8 @@ Using ``bleach.linkifier.LinkifyFilter``
the ``bleach.linkifier.LinkifyFilter`` when walking the tree and serializing it
back into text.
-You can use this filter wherever you can use an html5lib Filter. For example, you
-could use it with ``bleach.Cleaner`` to clean and linkify in one step.
+You can use this filter wherever you can use an html5lib Filter. This lets you
+use it with ``bleach.Cleaner`` to clean and linkify in one step.
For example, using all the defaults:
|
mozilla/bleach
|
26fb1da0472ac76a37e0f5eb91f4017f29350a46
|
diff --git a/tests/test_linkify.py b/tests/test_linkify.py
index eeea3e3..d29a5c8 100644
--- a/tests/test_linkify.py
+++ b/tests/test_linkify.py
@@ -625,6 +625,23 @@ def test_email_re_arg():
)
+def test_recognized_tags_arg():
+ """Verifies that recognized_tags works"""
+ # The html parser doesn't recognize "sarcasm" as a tag, so it escapes it
+ linker = Linker(recognized_tags=['p'])
+ assert (
+ linker.linkify('<p>http://example.com/</p><sarcasm>') ==
+ '<p><a href="http://example.com/" rel="nofollow">http://example.com/</a></p><sarcasm>' # noqa
+ )
+
+ # The html parser recognizes "sarcasm" as a tag and fixes it
+ linker = Linker(recognized_tags=['p', 'sarcasm'])
+ assert (
+ linker.linkify('<p>http://example.com/</p><sarcasm>') ==
+ '<p><a href="http://example.com/" rel="nofollow">http://example.com/</a></p><sarcasm></sarcasm>' # noqa
+ )
+
+
def test_linkify_idempotent():
dirty = '<span>invalid & </span> < extra http://link.com<em>'
assert linkify(linkify(dirty)) == linkify(dirty)
|
linkify is escaping valid HTML tags
Originally noticed here in relation to the `<abbr>` tag: https://github.com/mozilla/bleach/issues/400
linkify is now using a list of valid HTML tags that is largely built up from constants in html5lib, here: https://github.com/mozilla/bleach/blob/master/bleach/html5lib_shim.py#L49
However, these constants don't seem to make up a complete list of valid tags—in addition to `<abbr>` (which is now being appended to the list by Bleach), there are also multiple other tags missing, including `del`, `ins`, `sub`, `sup`, and `span`.
It would probably be best to replace this combination of html5lib constants with a full list of valid tags. I can do a PR for this.
|
0.0
|
26fb1da0472ac76a37e0f5eb91f4017f29350a46
|
[
"tests/test_linkify.py::test_recognized_tags_arg"
] |
[
"tests/test_linkify.py::test_empty",
"tests/test_linkify.py::test_simple_link",
"tests/test_linkify.py::test_trailing_slash",
"tests/test_linkify.py::test_mangle_link",
"tests/test_linkify.py::test_mangle_text",
"tests/test_linkify.py::test_email_link[a",
"tests/test_linkify.py::test_email_link[aussie",
"tests/test_linkify.py::test_email_link[email",
"tests/test_linkify.py::test_email_link[<br>[email protected]<br><a",
"tests/test_linkify.py::test_email_link[mailto",
"tests/test_linkify.py::test_email_link[\"\\\\\\n\"@opa.ru-True-\"\\\\\\n\"@opa.ru]",
"tests/test_linkify.py::test_email_link_escaping[\"james\"@example.com-<a",
"tests/test_linkify.py::test_email_link_escaping[\"j'ames\"@example.com-<a",
"tests/test_linkify.py::test_email_link_escaping[\"ja>mes\"@example.com-<a",
"tests/test_linkify.py::test_prevent_links[callback0-a",
"tests/test_linkify.py::test_prevent_links[callback1-a",
"tests/test_linkify.py::test_prevent_links[callback2-a",
"tests/test_linkify.py::test_prevent_links[callback3-a",
"tests/test_linkify.py::test_prevent_links[callback4-a",
"tests/test_linkify.py::test_prevent_links[callback5-a",
"tests/test_linkify.py::test_set_attrs",
"tests/test_linkify.py::test_only_proto_links",
"tests/test_linkify.py::test_stop_email",
"tests/test_linkify.py::test_tlds[example.com-<a",
"tests/test_linkify.py::test_tlds[example.co-<a",
"tests/test_linkify.py::test_tlds[example.co.uk-<a",
"tests/test_linkify.py::test_tlds[example.edu-<a",
"tests/test_linkify.py::test_tlds[example.xxx-<a",
"tests/test_linkify.py::test_tlds[bit.ly/fun-<a",
"tests/test_linkify.py::test_tlds[example.yyy-example.yyy]",
"tests/test_linkify.py::test_tlds[brie-brie]",
"tests/test_linkify.py::test_escaping",
"tests/test_linkify.py::test_nofollow_off",
"tests/test_linkify.py::test_link_in_html",
"tests/test_linkify.py::test_links_https",
"tests/test_linkify.py::test_add_rel_nofollow",
"tests/test_linkify.py::test_url_with_path",
"tests/test_linkify.py::test_link_ftp",
"tests/test_linkify.py::test_link_query",
"tests/test_linkify.py::test_link_fragment",
"tests/test_linkify.py::test_link_entities",
"tests/test_linkify.py::test_escaped_html",
"tests/test_linkify.py::test_link_http_complete",
"tests/test_linkify.py::test_non_url",
"tests/test_linkify.py::test_javascript_url",
"tests/test_linkify.py::test_unsafe_url",
"tests/test_linkify.py::test_skip_tags",
"tests/test_linkify.py::test_libgl",
"tests/test_linkify.py::test_end_of_sentence[example.com-.]",
"tests/test_linkify.py::test_end_of_sentence[example.com-...]",
"tests/test_linkify.py::test_end_of_sentence[ex.com/foo-.]",
"tests/test_linkify.py::test_end_of_sentence[ex.com/foo-....]",
"tests/test_linkify.py::test_end_of_clause",
"tests/test_linkify.py::test_sarcasm",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com)-expected_data0]",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com/)-expected_data1]",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com/foo)-expected_data2]",
"tests/test_linkify.py::test_wrapping_parentheses[(((example.com/))))-expected_data3]",
"tests/test_linkify.py::test_wrapping_parentheses[example.com/))-expected_data4]",
"tests/test_linkify.py::test_wrapping_parentheses[(foo",
"tests/test_linkify.py::test_wrapping_parentheses[http://en.wikipedia.org/wiki/Test_(assessment)-expected_data7]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/Test_(assessment))-expected_data8]",
"tests/test_linkify.py::test_wrapping_parentheses[((http://en.wikipedia.org/wiki/Test_(assessment))-expected_data9]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/Test_(assessment)))-expected_data10]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/)Test_(assessment-expected_data11]",
"tests/test_linkify.py::test_wrapping_parentheses[hello",
"tests/test_linkify.py::test_parentheses_with_removing",
"tests/test_linkify.py::test_ports[http://foo.com:8000-expected_data0]",
"tests/test_linkify.py::test_ports[http://foo.com:8000/-expected_data1]",
"tests/test_linkify.py::test_ports[http://bar.com:xkcd-expected_data2]",
"tests/test_linkify.py::test_ports[http://foo.com:81/bar-expected_data3]",
"tests/test_linkify.py::test_ports[http://foo.com:-expected_data4]",
"tests/test_linkify.py::test_ports[http://foo.com:\\u0663\\u0669/-expected_data5]",
"tests/test_linkify.py::test_ports[http://foo.com:\\U0001d7e0\\U0001d7d8/-expected_data6]",
"tests/test_linkify.py::test_ignore_bad_protocols",
"tests/test_linkify.py::test_link_emails_and_urls",
"tests/test_linkify.py::test_links_case_insensitive",
"tests/test_linkify.py::test_elements_inside_links",
"tests/test_linkify.py::test_drop_link_tags",
"tests/test_linkify.py::test_naughty_unescaping[<br>-<br>]",
"tests/test_linkify.py::test_naughty_unescaping[<br>",
"tests/test_linkify.py::test_hang",
"tests/test_linkify.py::test_hyphen_in_mail",
"tests/test_linkify.py::test_url_re_arg",
"tests/test_linkify.py::test_email_re_arg",
"tests/test_linkify.py::test_linkify_idempotent",
"tests/test_linkify.py::TestLinkify::test_no_href_links",
"tests/test_linkify.py::TestLinkify::test_rel_already_there",
"tests/test_linkify.py::TestLinkify::test_only_text_is_linkified",
"tests/test_linkify.py::test_linkify_filter[abc-abc]",
"tests/test_linkify.py::test_linkify_filter[example.com-<a",
"tests/test_linkify.py::test_linkify_filter[http://example.com?b=1&c=2-<a",
"tests/test_linkify.py::test_linkify_filter[link:"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-12-12 22:04:41+00:00
|
apache-2.0
| 4,039 |
|
mozilla__bleach-429
|
diff --git a/bleach/sanitizer.py b/bleach/sanitizer.py
index c60c26b..79b80f5 100644
--- a/bleach/sanitizer.py
+++ b/bleach/sanitizer.py
@@ -395,7 +395,18 @@ class BleachSanitizerFilter(html5lib_shim.SanitizerFilter):
if part.startswith('&'):
entity = html5lib_shim.match_entity(part)
if entity is not None:
- new_tokens.append({'type': 'Entity', 'name': entity})
+ if entity == 'amp':
+ # LinkifyFilter can't match urls across token boundaries
+ # which is problematic with & since that shows up in
+ # querystrings all the time. This special-cases &
+ # and converts it to a & and sticks it in as a
+ # Characters token. It'll get merged with surrounding
+ # tokens in the BleachSanitizerfilter.__iter__ and
+ # escaped in the serializer.
+ new_tokens.append({'type': 'Characters', 'data': '&'})
+ else:
+ new_tokens.append({'type': 'Entity', 'name': entity})
+
# Length of the entity plus 2--one for & at the beginning
# and and one for ; at the end
remainder = part[len(entity) + 2:]
|
mozilla/bleach
|
3097fd3466118539a30d2e3a2a469113aa007cbe
|
diff --git a/tests/test_linkify.py b/tests/test_linkify.py
index 584a5b0..ab1c513 100644
--- a/tests/test_linkify.py
+++ b/tests/test_linkify.py
@@ -694,6 +694,10 @@ class TestLinkify:
'http://example.com?b=1&c=2',
'<a href="http://example.com?b=1&c=2">http://example.com?b=1&c=2</a>'
),
+ (
+ 'http://example.com?b=1&c=2',
+ '<a href="http://example.com?b=1&c=2">http://example.com?b=1&c=2</a>'
+ ),
(
'link: https://example.com/watch#anchor',
'link: <a href="https://example.com/watch#anchor">https://example.com/watch#anchor</a>'
|
LinkifyFilter with clean breaks links at &
(Split out to a new issue from [this comment](https://github.com/mozilla/bleach/issues/419#issuecomment-445056894))
When using `LinkifyFilter` instead of the stand-alone `linkify`, links are broken at any `&`, but not bare `&` characters.
Here's a pretty minimal demonstration:
```
import bleach
bleach.linkify("https://www.youtube.com/watch?v=SkcucKDrbOI&feature=youtu.be")
```
Result (expected): `<a href="https://www.youtube.com/watch?v=SkcucKDrbOI&feature=youtu.be" rel="nofollow">https://www.youtube.com/watch?v=SkcucKDrbOI&feature=youtu.be</a>`
```
import bleach
from functools import partial
bleach_linkifier = partial(bleach.linkifier.LinkifyFilter)
cleaner = bleach.Cleaner(filters=[bleach_linkifier])
cleaner.clean("https://www.youtube.com/watch?v=SkcucKDrbOI&feature=youtu.be")
```
Result (not expected, link stops at the `&`): `<a href="https://www.youtube.com/watch?v=SkcucKDrbOI">https://www.youtube.com/watch?v=SkcucKDrbOI</a>&feature=<a href="http://youtu.be">youtu.be</a>`
It works with just a bare `&` instead of the `&`, like `https://www.youtube.com/watch?v=SkcucKDrbOI&feature=youtu.be`. In that case, the output of both methods is:
```
<a href="https://www.youtube.com/watch?v=SkcucKDrbOI&feature=youtu.be">https://www.youtube.com/watch?v=SkcucKDrbOI&feature=youtu.be</a>
```
(with an additional `rel="nofollow"` attr added by `linkify()` by default)
|
0.0
|
3097fd3466118539a30d2e3a2a469113aa007cbe
|
[
"tests/test_linkify.py::test_linkify_filter[http://example.com?b=1&c=2-<a"
] |
[
"tests/test_linkify.py::test_empty",
"tests/test_linkify.py::test_simple_link",
"tests/test_linkify.py::test_trailing_slash",
"tests/test_linkify.py::test_mangle_link",
"tests/test_linkify.py::test_mangle_text",
"tests/test_linkify.py::test_invalid_attribute_names",
"tests/test_linkify.py::test_email_link[a",
"tests/test_linkify.py::test_email_link[aussie",
"tests/test_linkify.py::test_email_link[email",
"tests/test_linkify.py::test_email_link[<br>[email protected]<br><a",
"tests/test_linkify.py::test_email_link[mailto",
"tests/test_linkify.py::test_email_link[\"\\\\\\n\"@opa.ru-True-\"\\\\\\n\"@opa.ru]",
"tests/test_linkify.py::test_email_link_escaping[\"james\"@example.com-<a",
"tests/test_linkify.py::test_email_link_escaping[\"j'ames\"@example.com-<a",
"tests/test_linkify.py::test_email_link_escaping[\"ja>mes\"@example.com-<a",
"tests/test_linkify.py::test_prevent_links[callback0-a",
"tests/test_linkify.py::test_prevent_links[callback1-a",
"tests/test_linkify.py::test_prevent_links[callback2-a",
"tests/test_linkify.py::test_prevent_links[callback3-a",
"tests/test_linkify.py::test_prevent_links[callback4-a",
"tests/test_linkify.py::test_prevent_links[callback5-a",
"tests/test_linkify.py::test_set_attrs",
"tests/test_linkify.py::test_only_proto_links",
"tests/test_linkify.py::test_stop_email",
"tests/test_linkify.py::test_tlds[example.com-<a",
"tests/test_linkify.py::test_tlds[example.co-<a",
"tests/test_linkify.py::test_tlds[example.co.uk-<a",
"tests/test_linkify.py::test_tlds[example.edu-<a",
"tests/test_linkify.py::test_tlds[example.xxx-<a",
"tests/test_linkify.py::test_tlds[bit.ly/fun-<a",
"tests/test_linkify.py::test_tlds[example.yyy-example.yyy]",
"tests/test_linkify.py::test_tlds[brie-brie]",
"tests/test_linkify.py::test_escaping",
"tests/test_linkify.py::test_nofollow_off",
"tests/test_linkify.py::test_link_in_html",
"tests/test_linkify.py::test_links_https",
"tests/test_linkify.py::test_add_rel_nofollow",
"tests/test_linkify.py::test_url_with_path",
"tests/test_linkify.py::test_link_ftp",
"tests/test_linkify.py::test_link_query",
"tests/test_linkify.py::test_link_fragment",
"tests/test_linkify.py::test_link_entities",
"tests/test_linkify.py::test_escaped_html",
"tests/test_linkify.py::test_link_http_complete",
"tests/test_linkify.py::test_non_url",
"tests/test_linkify.py::test_javascript_url",
"tests/test_linkify.py::test_unsafe_url",
"tests/test_linkify.py::test_skip_tags",
"tests/test_linkify.py::test_libgl",
"tests/test_linkify.py::test_end_of_sentence[example.com-.]",
"tests/test_linkify.py::test_end_of_sentence[example.com-...]",
"tests/test_linkify.py::test_end_of_sentence[ex.com/foo-.]",
"tests/test_linkify.py::test_end_of_sentence[ex.com/foo-....]",
"tests/test_linkify.py::test_end_of_clause",
"tests/test_linkify.py::test_sarcasm",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com)-expected_data0]",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com/)-expected_data1]",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com/foo)-expected_data2]",
"tests/test_linkify.py::test_wrapping_parentheses[(((example.com/))))-expected_data3]",
"tests/test_linkify.py::test_wrapping_parentheses[example.com/))-expected_data4]",
"tests/test_linkify.py::test_wrapping_parentheses[(foo",
"tests/test_linkify.py::test_wrapping_parentheses[http://en.wikipedia.org/wiki/Test_(assessment)-expected_data7]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/Test_(assessment))-expected_data8]",
"tests/test_linkify.py::test_wrapping_parentheses[((http://en.wikipedia.org/wiki/Test_(assessment))-expected_data9]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/Test_(assessment)))-expected_data10]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/)Test_(assessment-expected_data11]",
"tests/test_linkify.py::test_wrapping_parentheses[hello",
"tests/test_linkify.py::test_parentheses_with_removing",
"tests/test_linkify.py::test_ports[http://foo.com:8000-expected_data0]",
"tests/test_linkify.py::test_ports[http://foo.com:8000/-expected_data1]",
"tests/test_linkify.py::test_ports[http://bar.com:xkcd-expected_data2]",
"tests/test_linkify.py::test_ports[http://foo.com:81/bar-expected_data3]",
"tests/test_linkify.py::test_ports[http://foo.com:-expected_data4]",
"tests/test_linkify.py::test_ports[http://foo.com:\\u0663\\u0669/-expected_data5]",
"tests/test_linkify.py::test_ports[http://foo.com:\\U0001d7e0\\U0001d7d8/-expected_data6]",
"tests/test_linkify.py::test_ignore_bad_protocols",
"tests/test_linkify.py::test_link_emails_and_urls",
"tests/test_linkify.py::test_links_case_insensitive",
"tests/test_linkify.py::test_elements_inside_links",
"tests/test_linkify.py::test_drop_link_tags",
"tests/test_linkify.py::test_naughty_unescaping[<br>-<br>]",
"tests/test_linkify.py::test_naughty_unescaping[<br>",
"tests/test_linkify.py::test_hang",
"tests/test_linkify.py::test_hyphen_in_mail",
"tests/test_linkify.py::test_url_re_arg",
"tests/test_linkify.py::test_email_re_arg",
"tests/test_linkify.py::test_recognized_tags_arg",
"tests/test_linkify.py::test_linkify_idempotent",
"tests/test_linkify.py::TestLinkify::test_no_href_links",
"tests/test_linkify.py::TestLinkify::test_rel_already_there",
"tests/test_linkify.py::TestLinkify::test_only_text_is_linkified",
"tests/test_linkify.py::test_linkify_filter[abc-abc]",
"tests/test_linkify.py::test_linkify_filter[example.com-<a",
"tests/test_linkify.py::test_linkify_filter[http://example.com?b=1&c=2-<a",
"tests/test_linkify.py::test_linkify_filter[link:"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-01-03 23:07:58+00:00
|
apache-2.0
| 4,040 |
|
mozilla__bleach-432
|
diff --git a/CHANGES b/CHANGES
index eb3dc23..4fe065e 100644
--- a/CHANGES
+++ b/CHANGES
@@ -1,7 +1,7 @@
Bleach changes
==============
-Version 3.0.3 (In development)
+Version 3.1.0 (In development)
------------------------------
**Security fixes**
@@ -25,6 +25,12 @@ None
* Fix cases where attribute names could have invalid characters in them.
(#419)
+* Fix problems with ``LinkifyFilter`` not being able to match links
+ across ``&``. (#422)
+
+* Fix ``InputStreamWithMemory`` when the ``BleachHTMLParser`` is
+ parsing ``meta`` tags. (#431)
+
Version 3.0.2 (October 11th, 2018)
----------------------------------
diff --git a/bleach/__init__.py b/bleach/__init__.py
index a6445d0..6249bf8 100644
--- a/bleach/__init__.py
+++ b/bleach/__init__.py
@@ -20,7 +20,7 @@ from bleach.sanitizer import (
# yyyymmdd
__releasedate__ = ''
# x.y.z or x.y.z.dev0 -- semver
-__version__ = '3.0.3.dev0'
+__version__ = '3.1.0.dev0'
VERSION = parse_version(__version__)
diff --git a/bleach/html5lib_shim.py b/bleach/html5lib_shim.py
index 8887667..25e3e95 100644
--- a/bleach/html5lib_shim.py
+++ b/bleach/html5lib_shim.py
@@ -181,6 +181,14 @@ class InputStreamWithMemory(object):
def errors(self):
return self._inner_stream.errors
+ @property
+ def charEncoding(self):
+ return self._inner_stream.charEncoding
+
+ @property
+ def changeEncoding(self):
+ return self._inner_stream.changeEncoding
+
def char(self):
c = self._inner_stream.char()
# char() can return None if EOF, so ignore that
|
mozilla/bleach
|
93a060e12138e5aeaf8627b305a918c4207b9c02
|
diff --git a/tests/test_html5lib_shim.py b/tests/test_html5lib_shim.py
index 5712d33..ce15de7 100644
--- a/tests/test_html5lib_shim.py
+++ b/tests/test_html5lib_shim.py
@@ -80,3 +80,65 @@ def test_serializer(data, expected):
serialized = serializer.render(walker(dom))
assert serialized == expected
+
+
[email protected]('parser_args, data, expected', [
+ # Make sure InputStreamWithMemory has charEncoding and changeEncoding
+ (
+ {},
+ '<meta charset="utf-8">',
+ '<meta charset="utf-8">'
+ ),
+ # Handle consume entities False--all entities are passed along and then
+ # escaped when serialized
+ (
+ {'consume_entities': False},
+ 'text &>"',
+ 'text &amp;&gt;&quot;'
+ ),
+ # Handle consume entities True--all entities are consumed and converted
+ # to their character equivalents and then &, <, and > are escaped when
+ # serialized
+ (
+ {'consume_entities': True},
+ 'text &>"',
+ 'text &>"'
+ ),
+ # Test that "invalid-character-in-attribute-name" errors in tokenizing
+ # result in attributes with invalid names getting dropped
+ (
+ {},
+ '<a href="http://example.com"">',
+ '<a href="http://example.com"></a>'
+ ),
+ (
+ {},
+ '<a href=\'http://example.com\'\'>',
+ '<a href="http://example.com"></a>'
+ )
+])
+def test_bleach_html_parser(parser_args, data, expected):
+ args = {
+ 'tags': None,
+ 'strip': True,
+ 'consume_entities': True
+ }
+ args.update(parser_args)
+
+ # Build a parser, walker, and serializer just like we do in clean()
+ parser = html5lib_shim.BleachHTMLParser(**args)
+ walker = html5lib_shim.getTreeWalker('etree')
+ serializer = html5lib_shim.BleachHTMLSerializer(
+ quote_attr_values='always',
+ omit_optional_tags=False,
+ escape_lt_in_attrs=True,
+ resolve_entities=False,
+ sanitize=False,
+ alphabetical_attributes=False,
+ )
+
+ # Parse, walk, and then serialize the output
+ dom = parser.parseFragment(data)
+ serialized = serializer.render(walker(dom))
+
+ assert serialized == expected
diff --git a/tests/test_linkify.py b/tests/test_linkify.py
index ab1c513..f121189 100644
--- a/tests/test_linkify.py
+++ b/tests/test_linkify.py
@@ -69,17 +69,6 @@ def test_mangle_text():
)
-def test_invalid_attribute_names():
- """Test that "invalid-character-in-attribute-name" errors in tokenizing
- result in attributes with invalid names get dropped.
-
- """
- assert (
- linkify('<a href="http://example.com/"">') ==
- '<a href="http://example.com/" rel="nofollow"></a>'
- )
-
-
@pytest.mark.parametrize('data,parse_email,expected', [
(
'a [email protected] mailto',
|
AttributeError: 'InputStreamWithMemory' object has no attribute 'charEncoding'
Using this code:
```
import bleach
TEXT = """\
<html>
<head>
<meta charset="utf-8">
</head>
<body>
</body>
</html>
"""
print(bleach.clean(TEXT, tags=['html', 'head', 'meta', 'body']))
```
we get this traceback:
```
Traceback (most recent call last):
File "s.py", line 12, in <module>
print(bleach.clean(TEXT, tags=['html', 'head', 'meta', 'body']))
File "/home/willkg/mozilla/bleach/bleach/__init__.py", line 84, in clean
return cleaner.clean(text)
File "/home/willkg/mozilla/bleach/bleach/sanitizer.py", line 169, in clean
dom = self.parser.parseFragment(text)
File "/home/willkg/mozilla/bleach/bleach/_vendor/html5lib/html5parser.py", line 317, in parseFragment
self._parse(stream, True, *args, **kwargs)
File "/home/willkg/mozilla/bleach/bleach/html5lib_shim.py", line 385, in _parse
self.mainLoop()
File "/home/willkg/mozilla/bleach/bleach/_vendor/html5lib/html5parser.py", line 241, in mainLoop
new_token = phase.processStartTag(new_token)
File "/home/willkg/mozilla/bleach/bleach/_vendor/html5lib/html5parser.py", line 472, in processStartTag
return self.startTagHandler[token["name"]](token)
File "/home/willkg/mozilla/bleach/bleach/_vendor/html5lib/html5parser.py", line 1067, in startTagProcessInHead
return self.parser.phases["inHead"].processStartTag(token)
File "/home/willkg/mozilla/bleach/bleach/_vendor/html5lib/html5parser.py", line 472, in processStartTag
return self.startTagHandler[token["name"]](token)
File "/home/willkg/mozilla/bleach/bleach/_vendor/html5lib/html5parser.py", line 745, in startTagMeta
if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
AttributeError: 'InputStreamWithMemory' object has no attribute 'charEncoding'
```
This is with Bleach from master tip (3.0.3.dev0) and Python 2.7.15.
This issue covers fixing that.
|
0.0
|
93a060e12138e5aeaf8627b305a918c4207b9c02
|
[
"tests/test_html5lib_shim.py::test_bleach_html_parser[parser_args0-<meta"
] |
[
"tests/test_html5lib_shim.py::test_convert_entities[-]",
"tests/test_html5lib_shim.py::test_convert_entities[abc-abc]",
"tests/test_html5lib_shim.py::test_convert_entities[ -\\xa0]",
"tests/test_html5lib_shim.py::test_convert_entities[ -",
"tests/test_html5lib_shim.py::test_convert_entities[ -",
"tests/test_html5lib_shim.py::test_convert_entities[&xx;-&xx;]",
"tests/test_html5lib_shim.py::test_convert_entities[this",
"tests/test_html5lib_shim.py::test_serializer[-]",
"tests/test_html5lib_shim.py::test_serializer[text-text]",
"tests/test_html5lib_shim.py::test_serializer[&-&]",
"tests/test_html5lib_shim.py::test_serializer[a",
"tests/test_html5lib_shim.py::test_serializer[<a",
"tests/test_html5lib_shim.py::test_bleach_html_parser[parser_args1-text",
"tests/test_html5lib_shim.py::test_bleach_html_parser[parser_args2-text",
"tests/test_html5lib_shim.py::test_bleach_html_parser[parser_args3-<a",
"tests/test_html5lib_shim.py::test_bleach_html_parser[parser_args4-<a",
"tests/test_linkify.py::test_empty",
"tests/test_linkify.py::test_simple_link",
"tests/test_linkify.py::test_trailing_slash",
"tests/test_linkify.py::test_mangle_link",
"tests/test_linkify.py::test_mangle_text",
"tests/test_linkify.py::test_email_link[a",
"tests/test_linkify.py::test_email_link[aussie",
"tests/test_linkify.py::test_email_link[email",
"tests/test_linkify.py::test_email_link[<br>[email protected]<br><a",
"tests/test_linkify.py::test_email_link[mailto",
"tests/test_linkify.py::test_email_link[\"\\\\\\n\"@opa.ru-True-\"\\\\\\n\"@opa.ru]",
"tests/test_linkify.py::test_email_link_escaping[\"james\"@example.com-<a",
"tests/test_linkify.py::test_email_link_escaping[\"j'ames\"@example.com-<a",
"tests/test_linkify.py::test_email_link_escaping[\"ja>mes\"@example.com-<a",
"tests/test_linkify.py::test_prevent_links[callback0-a",
"tests/test_linkify.py::test_prevent_links[callback1-a",
"tests/test_linkify.py::test_prevent_links[callback2-a",
"tests/test_linkify.py::test_prevent_links[callback3-a",
"tests/test_linkify.py::test_prevent_links[callback4-a",
"tests/test_linkify.py::test_prevent_links[callback5-a",
"tests/test_linkify.py::test_set_attrs",
"tests/test_linkify.py::test_only_proto_links",
"tests/test_linkify.py::test_stop_email",
"tests/test_linkify.py::test_tlds[example.com-<a",
"tests/test_linkify.py::test_tlds[example.co-<a",
"tests/test_linkify.py::test_tlds[example.co.uk-<a",
"tests/test_linkify.py::test_tlds[example.edu-<a",
"tests/test_linkify.py::test_tlds[example.xxx-<a",
"tests/test_linkify.py::test_tlds[bit.ly/fun-<a",
"tests/test_linkify.py::test_tlds[example.yyy-example.yyy]",
"tests/test_linkify.py::test_tlds[brie-brie]",
"tests/test_linkify.py::test_escaping",
"tests/test_linkify.py::test_nofollow_off",
"tests/test_linkify.py::test_link_in_html",
"tests/test_linkify.py::test_links_https",
"tests/test_linkify.py::test_add_rel_nofollow",
"tests/test_linkify.py::test_url_with_path",
"tests/test_linkify.py::test_link_ftp",
"tests/test_linkify.py::test_link_query",
"tests/test_linkify.py::test_link_fragment",
"tests/test_linkify.py::test_link_entities",
"tests/test_linkify.py::test_escaped_html",
"tests/test_linkify.py::test_link_http_complete",
"tests/test_linkify.py::test_non_url",
"tests/test_linkify.py::test_javascript_url",
"tests/test_linkify.py::test_unsafe_url",
"tests/test_linkify.py::test_skip_tags",
"tests/test_linkify.py::test_libgl",
"tests/test_linkify.py::test_end_of_sentence[example.com-.]",
"tests/test_linkify.py::test_end_of_sentence[example.com-...]",
"tests/test_linkify.py::test_end_of_sentence[ex.com/foo-.]",
"tests/test_linkify.py::test_end_of_sentence[ex.com/foo-....]",
"tests/test_linkify.py::test_end_of_clause",
"tests/test_linkify.py::test_sarcasm",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com)-expected_data0]",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com/)-expected_data1]",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com/foo)-expected_data2]",
"tests/test_linkify.py::test_wrapping_parentheses[(((example.com/))))-expected_data3]",
"tests/test_linkify.py::test_wrapping_parentheses[example.com/))-expected_data4]",
"tests/test_linkify.py::test_wrapping_parentheses[(foo",
"tests/test_linkify.py::test_wrapping_parentheses[http://en.wikipedia.org/wiki/Test_(assessment)-expected_data7]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/Test_(assessment))-expected_data8]",
"tests/test_linkify.py::test_wrapping_parentheses[((http://en.wikipedia.org/wiki/Test_(assessment))-expected_data9]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/Test_(assessment)))-expected_data10]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/)Test_(assessment-expected_data11]",
"tests/test_linkify.py::test_wrapping_parentheses[hello",
"tests/test_linkify.py::test_parentheses_with_removing",
"tests/test_linkify.py::test_ports[http://foo.com:8000-expected_data0]",
"tests/test_linkify.py::test_ports[http://foo.com:8000/-expected_data1]",
"tests/test_linkify.py::test_ports[http://bar.com:xkcd-expected_data2]",
"tests/test_linkify.py::test_ports[http://foo.com:81/bar-expected_data3]",
"tests/test_linkify.py::test_ports[http://foo.com:-expected_data4]",
"tests/test_linkify.py::test_ports[http://foo.com:\\u0663\\u0669/-expected_data5]",
"tests/test_linkify.py::test_ports[http://foo.com:\\U0001d7e0\\U0001d7d8/-expected_data6]",
"tests/test_linkify.py::test_ignore_bad_protocols",
"tests/test_linkify.py::test_link_emails_and_urls",
"tests/test_linkify.py::test_links_case_insensitive",
"tests/test_linkify.py::test_elements_inside_links",
"tests/test_linkify.py::test_drop_link_tags",
"tests/test_linkify.py::test_naughty_unescaping[<br>-<br>]",
"tests/test_linkify.py::test_naughty_unescaping[<br>",
"tests/test_linkify.py::test_hang",
"tests/test_linkify.py::test_hyphen_in_mail",
"tests/test_linkify.py::test_url_re_arg",
"tests/test_linkify.py::test_email_re_arg",
"tests/test_linkify.py::test_recognized_tags_arg",
"tests/test_linkify.py::test_linkify_idempotent",
"tests/test_linkify.py::TestLinkify::test_no_href_links",
"tests/test_linkify.py::TestLinkify::test_rel_already_there",
"tests/test_linkify.py::TestLinkify::test_only_text_is_linkified",
"tests/test_linkify.py::test_linkify_filter[abc-abc]",
"tests/test_linkify.py::test_linkify_filter[example.com-<a",
"tests/test_linkify.py::test_linkify_filter[http://example.com?b=1&c=2-<a",
"tests/test_linkify.py::test_linkify_filter[http://example.com?b=1&c=2-<a",
"tests/test_linkify.py::test_linkify_filter[link:"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-01-08 15:35:30+00:00
|
apache-2.0
| 4,041 |
|
mozilla__bleach-434
|
diff --git a/bleach/sanitizer.py b/bleach/sanitizer.py
index 79b80f5..6ccd78c 100644
--- a/bleach/sanitizer.py
+++ b/bleach/sanitizer.py
@@ -593,7 +593,8 @@ class BleachSanitizerFilter(html5lib_shim.SanitizerFilter):
# the whole thing.
parts = style.split(';')
gauntlet = re.compile(
- r"""^([-/:,#%.'"\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'\s*|"[\s\w]+"|\([\d,%\.\s]+\))*$"""
+ r"""^([-/:,#%.'"\s!\w]|\w-\w|'[\s\w]+'\s*|"[\s\w]+"|\([\d,%\.\s]+\))*$""",
+ flags=re.U
)
for part in parts:
|
mozilla/bleach
|
2f210e06baacb1015bdde9896ad465dab0ccc378
|
diff --git a/tests/test_css.py b/tests/test_css.py
index 12f27f3..7e34027 100644
--- a/tests/test_css.py
+++ b/tests/test_css.py
@@ -1,6 +1,8 @@
+# -*- coding: utf-8 -*-
from functools import partial
import pytest
+import six
from bleach import clean
@@ -14,6 +16,11 @@ clean = partial(clean, tags=['p'], attributes=['style'])
['color'],
'color: red;'
),
+ (
+ u'font-family: メイリオ; color: red; float: left; background-color: red;',
+ [u'color'],
+ u'color: red;'
+ ),
(
'border: 1px solid blue; color: red; float: left;',
['color'],
@@ -70,11 +77,19 @@ def test_allowed_css(data, styles, expected):
p_double = "<p style='{0!s}'>bar</p>"
if '"' in data:
+ if is_python2_unicode(data):
+ p_double = unicode(p_double)
assert clean(p_double.format(data), styles=styles) == p_double.format(expected)
else:
+ if is_python2_unicode(data):
+ p_single = unicode(p_single)
assert clean(p_single.format(data), styles=styles) == p_single.format(expected)
+def is_python2_unicode(data):
+ return six.PY2 and isinstance(data, unicode)
+
+
def test_valid_css():
"""The sanitizer should fix missing CSS values."""
styles = ['color', 'float']
|
Style filter does not work correctly when supplied non-ASCII style attribute
# Environment
Windows 10 Pro
Python 3.7.1
bleach 3.0.2
I've setup so that `color` style will survive after clean() method like below:
```
>>> bleach.clean('<span style="font-family:consolas;color:red">text</span>', tags = ['span'], attributes = {'span': ['style']}, styles = ['color'])
'<span style="color: red;">text</span>'
```
# Problem
When it contains non-ASCII style description such as Japanese font names, the `color` style is omitted.
```
>>> bleach.clean('<span style="font-family:メイリオ;color:red">text</span>', tags = ['span'], attributes = {'span': ['style']}, styles = ['color'])
'<span style="">text</span>'
```
It should retain the `color` style like below:
```
'<span style="color: red;">text</span>'
```
|
0.0
|
2f210e06baacb1015bdde9896ad465dab0ccc378
|
[
"tests/test_css.py::test_allowed_css[font-family:"
] |
[
"tests/test_css.py::test_allowed_css[border:",
"tests/test_css.py::test_allowed_css[color:",
"tests/test_css.py::test_allowed_css[cursor:",
"tests/test_css.py::test_allowed_css[text-overflow:",
"tests/test_css.py::test_valid_css",
"tests/test_css.py::test_urls[<p",
"tests/test_css.py::test_style_hang",
"tests/test_css.py::test_css_parsing_with_entities[<p"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-01-09 05:44:16+00:00
|
apache-2.0
| 4,042 |
|
mozilla__bleach-647
|
diff --git a/bleach/html5lib_shim.py b/bleach/html5lib_shim.py
index d97d5c4..3ebfc8a 100644
--- a/bleach/html5lib_shim.py
+++ b/bleach/html5lib_shim.py
@@ -512,13 +512,13 @@ def convert_entities(text):
def match_entity(stream):
"""Returns first entity in stream or None if no entity exists
- Note: For Bleach purposes, entities must start with a "&" and end with
- a ";". This ignoresambiguous character entities that have no ";" at the
- end.
+ Note: For Bleach purposes, entities must start with a "&" and end with a
+ ";". This ignores ambiguous character entities that have no ";" at the end.
:arg stream: the character stream
- :returns: ``None`` or the entity string without "&" or ";"
+ :returns: the entity string without "&" or ";" if it's a valid character
+ entity; ``None`` otherwise
"""
# Nix the & at the beginning
@@ -557,9 +557,11 @@ def match_entity(stream):
# Handle character entities
while stream and stream[0] not in end_characters:
c = stream.pop(0)
- if not ENTITIES_TRIE.has_keys_with_prefix(possible_entity):
- break
possible_entity += c
+ if not ENTITIES_TRIE.has_keys_with_prefix(possible_entity):
+ # If it's not a prefix, then it's not an entity and we're
+ # out
+ return None
if possible_entity and stream and stream[0] == ";":
return possible_entity
|
mozilla/bleach
|
78d311c8f39ca8a8137f3070e4b0d0ff2a096815
|
diff --git a/tests/test_clean.py b/tests/test_clean.py
index 9aca28a..c22ffa3 100644
--- a/tests/test_clean.py
+++ b/tests/test_clean.py
@@ -11,11 +11,14 @@ from bleach._vendor.html5lib.constants import rcdataElements
@pytest.mark.parametrize(
"data",
[
- "<span>text & </span>",
"a < b",
"link http://link.com",
"text<em>",
+ # Verify idempotentcy with character entity handling
+ "<span>text & </span>",
"jim ¤t joe",
+ "& &",
+ "jim &xx; joe",
# Link with querystring items
'<a href="http://example.com?foo=bar&bar=foo&biz=bash">',
],
@@ -156,41 +159,47 @@ def test_bare_entities_get_escaped_correctly(text, expected):
@pytest.mark.parametrize(
"text, expected",
[
- # Test character entities
+ # Test character entities in text don't get escaped
("&", "&"),
(" ", " "),
(" test string ", " test string "),
("<em>strong</em>", "<em>strong</em>"),
- # Test character entity at beginning of string
+ # Test character entity at beginning of string doesn't get escaped
("&is cool", "&is cool"),
- # Test it at the end of the string
+ # Test character entity at end of the string doesn't get escaped
("cool &", "cool &"),
- # Test bare ampersands and entities at beginning
+ # Test bare ampersands before an entity at the beginning of the string
+ # gets escaped
("&& is cool", "&& is cool"),
- # Test entities and bare ampersand at end
+ # Test ampersand after an entity at the end of the string gets escaped
("& is cool &&", "& is cool &&"),
- # Test missing semi-colon means we don't treat it like an entity
+ # Test missing semi-colons mean we don't treat the thing as an entity--Bleach
+ # only recognizes character entities that start with & and end with ;
("this & that", "this &amp that"),
- # Test a thing that looks like a character entity, but isn't because it's
- # missing a ; (¤t)
(
"http://example.com?active=true¤t=true",
"http://example.com?active=true&current=true",
),
- # Test character entities in attribute values are left alone
+ # Test character entities in attribute values are not escaped
('<a href="?art&copy">foo</a>', '<a href="?art&copy">foo</a>'),
('<a href="?this=>that">foo</a>', '<a href="?this=>that">foo</a>'),
- # Ambiguous ampersands get escaped in attributes
+ # Things in attributes that aren't character entities get escaped
(
'<a href="http://example.com/&xx;">foo</a>',
'<a href="http://example.com/&xx;">foo</a>',
),
+ (
+ '<a href="http://example.com?&adp;">foo</a>',
+ '<a href="http://example.com?&adp;">foo</a>',
+ ),
(
'<a href="http://example.com?active=true¤t=true">foo</a>',
'<a href="http://example.com?active=true&current=true">foo</a>',
),
- # Ambiguous ampersands in text are not escaped
- ("&xx;", "&xx;"),
+ # Things in text that aren't character entities get escaped
+ ("&xx;", "&xx;"),
+ ("&adp;", "&adp;"),
+ ("&currdupe;", "&currdupe;"),
# Test numeric entities
("'", "'"),
(""", """),
|
bleach.clean method returns unexpected value for string with unknown entities
For string strings like "&adp;" and "&*!;" actual output is "&ad;;" and "&*;;".
But I expect that such values should remain original value ("&adp;" and "&*!;" corresponding).
|
0.0
|
78d311c8f39ca8a8137f3070e4b0d0ff2a096815
|
[
"tests/test_clean.py::test_character_entities_handling[&xx;-&xx;]",
"tests/test_clean.py::test_character_entities_handling[&adp;-&adp;]"
] |
[
"tests/test_clean.py::test_clean_idempotent[a",
"tests/test_clean.py::test_clean_idempotent[link",
"tests/test_clean.py::test_clean_idempotent[text<em>]",
"tests/test_clean.py::test_clean_idempotent[<span>text",
"tests/test_clean.py::test_clean_idempotent[jim",
"tests/test_clean.py::test_clean_idempotent[& ",
"tests/test_clean.py::test_clean_idempotent[<a",
"tests/test_clean.py::test_clean_idempotent_img",
"tests/test_clean.py::test_only_text_is_cleaned",
"tests/test_clean.py::test_empty",
"tests/test_clean.py::test_content_has_no_html",
"tests/test_clean.py::test_content_has_allowed_html[an",
"tests/test_clean.py::test_content_has_allowed_html[another",
"tests/test_clean.py::test_html_is_lowercased",
"tests/test_clean.py::test_comments[<!--",
"tests/test_clean.py::test_comments[<!--open",
"tests/test_clean.py::test_comments[<!--comment-->text-True-text]",
"tests/test_clean.py::test_comments[<!--comment-->text-False-<!--comment-->text]",
"tests/test_clean.py::test_comments[text<!--",
"tests/test_clean.py::test_comments[text<!--comment-->-True-text]",
"tests/test_clean.py::test_comments[text<!--comment-->-False-text<!--comment-->]",
"tests/test_clean.py::test_invalid_char_in_tag",
"tests/test_clean.py::test_unclosed_tag",
"tests/test_clean.py::test_nested_script_tag",
"tests/test_clean.py::test_bare_entities_get_escaped_correctly[an",
"tests/test_clean.py::test_bare_entities_get_escaped_correctly[tag",
"tests/test_clean.py::test_character_entities_handling[&-&]",
"tests/test_clean.py::test_character_entities_handling[ - ]",
"tests/test_clean.py::test_character_entities_handling[ ",
"tests/test_clean.py::test_character_entities_handling[<em>strong</em>-<em>strong</em>]",
"tests/test_clean.py::test_character_entities_handling[&is",
"tests/test_clean.py::test_character_entities_handling[cool",
"tests/test_clean.py::test_character_entities_handling[&&",
"tests/test_clean.py::test_character_entities_handling[&",
"tests/test_clean.py::test_character_entities_handling[this",
"tests/test_clean.py::test_character_entities_handling[http://example.com?active=true¤t=true-http://example.com?active=true&current=true]",
"tests/test_clean.py::test_character_entities_handling[<a",
"tests/test_clean.py::test_character_entities_handling[&currdupe;-&currdupe;]",
"tests/test_clean.py::test_character_entities_handling['-']",
"tests/test_clean.py::test_character_entities_handling["-"]",
"tests/test_clean.py::test_character_entities_handling[{-{]",
"tests/test_clean.py::test_character_entities_handling[{-{]",
"tests/test_clean.py::test_character_entities_handling[{-{]",
"tests/test_clean.py::test_character_entities_handling[&#-&#]",
"tests/test_clean.py::test_character_entities_handling[&#<-&#<]",
"tests/test_clean.py::test_character_entities_handling['"-'"]",
"tests/test_clean.py::test_stripping_tags[a",
"tests/test_clean.py::test_stripping_tags[<p><a",
"tests/test_clean.py::test_stripping_tags[<p><span>multiply",
"tests/test_clean.py::test_stripping_tags[<ul><li><script></li></ul>-kwargs4-<ul><li></li></ul>]",
"tests/test_clean.py::test_stripping_tags[<isindex>-kwargs6-]",
"tests/test_clean.py::test_stripping_tags[Yeah",
"tests/test_clean.py::test_stripping_tags[<sarcasm>-kwargs8-]",
"tests/test_clean.py::test_stripping_tags[</sarcasm>-kwargs9-]",
"tests/test_clean.py::test_stripping_tags[</",
"tests/test_clean.py::test_stripping_tags[Foo",
"tests/test_clean.py::test_stripping_tags[Favorite",
"tests/test_clean.py::test_stripping_tags[</3-kwargs14-</3]",
"tests/test_clean.py::test_escaping_tags[<img",
"tests/test_clean.py::test_escaping_tags[<script>safe()</script>-<script>safe()</script>]",
"tests/test_clean.py::test_escaping_tags[<style>body{}</style>-<style>body{}</style>]",
"tests/test_clean.py::test_escaping_tags[<ul><li><script></li></ul>-<ul><li><script></li></ul>]",
"tests/test_clean.py::test_escaping_tags[<isindex>-<isindex>]",
"tests/test_clean.py::test_escaping_tags[<sarcasm/>-<sarcasm/>]",
"tests/test_clean.py::test_escaping_tags[<sarcasm>-<sarcasm>]",
"tests/test_clean.py::test_escaping_tags[</sarcasm>-</sarcasm>]",
"tests/test_clean.py::test_escaping_tags[</",
"tests/test_clean.py::test_escaping_tags[</3-</3]",
"tests/test_clean.py::test_escaping_tags[<[email protected]>-<[email protected]>]",
"tests/test_clean.py::test_escaping_tags[Favorite",
"tests/test_clean.py::test_stripping_tags_is_safe[<scri<script>pt>alert(1)</scr</script>ipt>-pt>alert(1)ipt>]",
"tests/test_clean.py::test_stripping_tags_is_safe[<scri<scri<script>pt>pt>alert(1)</script>-pt>pt>alert(1)]",
"tests/test_clean.py::test_allowed_styles",
"tests/test_clean.py::test_href_with_wrong_tag",
"tests/test_clean.py::test_disallowed_attr",
"tests/test_clean.py::test_unquoted_attr_values_are_quoted",
"tests/test_clean.py::test_unquoted_event_handler_attr_value",
"tests/test_clean.py::test_invalid_filter_attr",
"tests/test_clean.py::test_poster_attribute",
"tests/test_clean.py::test_attributes_callable",
"tests/test_clean.py::test_attributes_wildcard",
"tests/test_clean.py::test_attributes_wildcard_callable",
"tests/test_clean.py::test_attributes_tag_callable",
"tests/test_clean.py::test_attributes_tag_list",
"tests/test_clean.py::test_attributes_list",
"tests/test_clean.py::test_uri_value_allowed_protocols[<a",
"tests/test_clean.py::test_svg_attr_val_allows_ref",
"tests/test_clean.py::test_svg_allow_local_href[<svg><pattern",
"tests/test_clean.py::test_svg_allow_local_href_nonlocal[<svg><pattern",
"tests/test_clean.py::test_invisible_characters[1\\x0723-1?23]",
"tests/test_clean.py::test_invisible_characters[1\\x0823-1?23]",
"tests/test_clean.py::test_invisible_characters[1\\x0b23-1?23]",
"tests/test_clean.py::test_invisible_characters[1\\x0c23-1?23]",
"tests/test_clean.py::test_invisible_characters[import",
"tests/test_clean.py::test_nonexistent_namespace",
"tests/test_clean.py::test_self_closing_tags_self_close[area]",
"tests/test_clean.py::test_self_closing_tags_self_close[base]",
"tests/test_clean.py::test_self_closing_tags_self_close[br]",
"tests/test_clean.py::test_self_closing_tags_self_close[embed]",
"tests/test_clean.py::test_self_closing_tags_self_close[hr]",
"tests/test_clean.py::test_self_closing_tags_self_close[img]",
"tests/test_clean.py::test_self_closing_tags_self_close[input]",
"tests/test_clean.py::test_self_closing_tags_self_close[link]",
"tests/test_clean.py::test_self_closing_tags_self_close[meta]",
"tests/test_clean.py::test_self_closing_tags_self_close[param]",
"tests/test_clean.py::test_self_closing_tags_self_close[source]",
"tests/test_clean.py::test_self_closing_tags_self_close[track]",
"tests/test_clean.py::test_noscript_rawtag_[title-<noscript><title></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[textarea-<noscript><textarea></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[script-<noscript><script></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[style-<noscript><style></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[noembed-<noscript><noembed></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[noframes-<noscript><noframes></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[iframe-<noscript><iframe></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[xmp-<noscript><xmp></noscript><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-noframes-<math><noframes><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-noscript-<math><noscript><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-style-<math><style><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-noembed-<math><noembed><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-script-<math><script><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-xmp-<math><xmp><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-iframe-<math><iframe><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-noframes-<svg><noframes><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-noscript-<svg><noscript><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-style-<svg><style><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-noembed-<svg><noembed><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-script-<svg><script><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-xmp-<svg><xmp><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-iframe-<svg><iframe><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-style-<math></p><style><!--</style><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-style-<math></br><style><!--</style><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-style-<svg></p><style><!--</style><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-style-<svg></br><style><!--</style><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-title-<math></p><title><!--</title><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-title-<math></br><title><!--</title><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-title-<svg></p><title><!--</title><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-title-<svg></br><title><!--</title><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-noscript-<math></p><noscript><!--</noscript><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-noscript-<math></br><noscript><!--</noscript><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-noscript-<svg></p><noscript><!--</noscript><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-noscript-<svg></br><noscript><!--</noscript><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-script-<math></p><script><!--</script><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-script-<math></br><script><!--</script><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-script-<svg></p><script><!--</script><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-script-<svg></br><script><!--</script><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-noembed-<math></p><noembed><!--</noembed><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-noembed-<math></br><noembed><!--</noembed><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-noembed-<svg></p><noembed><!--</noembed><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-noembed-<svg></br><noembed><!--</noembed><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-textarea-<math></p><textarea><!--</textarea><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-textarea-<math></br><textarea><!--</textarea><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-textarea-<svg></p><textarea><!--</textarea><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-textarea-<svg></br><textarea><!--</textarea><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-noframes-<math></p><noframes><!--</noframes><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-noframes-<math></br><noframes><!--</noframes><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-noframes-<svg></p><noframes><!--</noframes><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-noframes-<svg></br><noframes><!--</noframes><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-iframe-<math></p><iframe><!--</iframe><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-iframe-<math></br><iframe><!--</iframe><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-iframe-<svg></p><iframe><!--</iframe><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-iframe-<svg></br><iframe><!--</iframe><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-xmp-<math></p><xmp><!--</xmp><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-xmp-<math></br><xmp><!--</xmp><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-xmp-<svg></p><xmp><!--</xmp><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-xmp-<svg></br><xmp><!--</xmp><img",
"tests/test_clean.py::test_regressions[1.test]",
"tests/test_clean.py::test_regressions[2.test]",
"tests/test_clean.py::test_regressions[3.test]",
"tests/test_clean.py::test_regressions[4.test]",
"tests/test_clean.py::test_regressions[5.test]",
"tests/test_clean.py::test_regressions[6.test]",
"tests/test_clean.py::test_regressions[7.test]",
"tests/test_clean.py::test_regressions[8.test]",
"tests/test_clean.py::test_regressions[9.test]",
"tests/test_clean.py::test_regressions[10.test]",
"tests/test_clean.py::test_regressions[11.test]",
"tests/test_clean.py::test_regressions[12.test]",
"tests/test_clean.py::test_regressions[13.test]",
"tests/test_clean.py::test_regressions[14.test]",
"tests/test_clean.py::test_regressions[15.test]",
"tests/test_clean.py::test_regressions[16.test]",
"tests/test_clean.py::test_regressions[17.test]",
"tests/test_clean.py::test_regressions[18.test]",
"tests/test_clean.py::test_regressions[19.test]",
"tests/test_clean.py::test_regressions[20.test]",
"tests/test_clean.py::test_preserves_attributes_order",
"tests/test_clean.py::TestCleaner::test_basics",
"tests/test_clean.py::TestCleaner::test_filters"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-03-01 13:38:35+00:00
|
apache-2.0
| 4,043 |
|
mozilla__bleach-659
|
diff --git a/bleach/linkifier.py b/bleach/linkifier.py
index 68a4042..df56f3c 100644
--- a/bleach/linkifier.py
+++ b/bleach/linkifier.py
@@ -1,5 +1,7 @@
import re
+from urllib.parse import quote
+
from bleach import callbacks as linkify_callbacks
from bleach import html5lib_shim
@@ -298,10 +300,15 @@ class LinkifyFilter(html5lib_shim.Filter):
{"type": "Characters", "data": text[end : match.start()]}
)
+ # URL-encode the "local-part" according to RFC6068
+ parts = match.group(0).split("@")
+ parts[0] = quote(parts[0])
+ address = "@".join(parts)
+
# Run attributes through the callbacks to see what we
# should do with this match
attrs = {
- (None, "href"): "mailto:%s" % match.group(0),
+ (None, "href"): "mailto:%s" % address,
"_text": match.group(0),
}
attrs = self.apply_callbacks(attrs, True)
|
mozilla/bleach
|
481b146b074ed004eab39abf8f9b964fcd61c408
|
diff --git a/tests/test_linkify.py b/tests/test_linkify.py
index 69181ca..fbabf12 100644
--- a/tests/test_linkify.py
+++ b/tests/test_linkify.py
@@ -104,6 +104,17 @@ def test_mangle_text():
),
# Incorrect email
('"\\\n"@opa.ru', True, '"\\\n"@opa.ru'),
+ # RFC6068 special characters
+ (
+ "gorby%[email protected]",
+ True,
+ '<a href="mailto:gorby%[email protected]">gorby%[email protected]</a>',
+ ),
+ (
+ "[email protected]",
+ True,
+ '<a href="mailto:unlikely%[email protected]">[email protected]</a>',
+ ),
],
)
def test_email_link(data, parse_email, expected):
@@ -115,15 +126,15 @@ def test_email_link(data, parse_email, expected):
[
(
'"james"@example.com',
- """<a href='mailto:"james"@example.com'>"james"@example.com</a>""",
+ """<a href="mailto:%22james%[email protected]">"james"@example.com</a>""",
),
(
'"j\'ames"@example.com',
- """<a href="mailto:"j'ames"@example.com">"j'ames"@example.com</a>""",
+ """<a href="mailto:%22j%27ames%[email protected]">"j'ames"@example.com</a>""",
),
(
'"ja>mes"@example.com',
- """<a href='mailto:"ja>mes"@example.com'>"ja>mes"@example.com</a>""",
+ """<a href="mailto:%22ja%3Emes%[email protected]">"ja>mes"@example.com</a>""",
),
],
)
|
bug: `linkify` with `parse_email=True` doesn't handle "%" a "?" in `addr-specs`
**Describe the bug**
bug: `linkify` with `parse_email=True` doesn't handle "%" and "?", which may occur in RFC822 addr-specs (see https://datatracker.ietf.org/doc/html/rfc2368#section-6)
- Python Version: 3.10.4
- Bleach Version: 5.0.0
**To Reproduce**
Steps to reproduce the behavior:
```python
>>> bleach.linkify("gorby%[email protected]", parse_email=True)
'<a href="mailto:gorby%[email protected]">gorby%[email protected]</a>'
```
**Expected behavior**
I expected RFC822 special characters to be percent-encoded according to RFC2368:
```python
>>> bleach.linkify("gorby%[email protected]", parse_email=True)
'<a href="mailto:gorby%[email protected]">gorby%[email protected]</a>'
```
**Additional context**
Same issue exists with "?"; I didn't test other RFC822 special characters but suspect they are similarly left unquoted.
|
0.0
|
481b146b074ed004eab39abf8f9b964fcd61c408
|
[
"tests/test_linkify.py::test_email_link[gorby%[email protected]<a",
"tests/test_linkify.py::test_email_link[[email protected]<a",
"tests/test_linkify.py::test_email_link_escaping[\"james\"@example.com-<a",
"tests/test_linkify.py::test_email_link_escaping[\"j'ames\"@example.com-<a",
"tests/test_linkify.py::test_email_link_escaping[\"ja>mes\"@example.com-<a"
] |
[
"tests/test_linkify.py::test_empty",
"tests/test_linkify.py::test_simple_link",
"tests/test_linkify.py::test_trailing_slash",
"tests/test_linkify.py::test_mangle_link",
"tests/test_linkify.py::test_mangle_text",
"tests/test_linkify.py::test_email_link[a",
"tests/test_linkify.py::test_email_link[aussie",
"tests/test_linkify.py::test_email_link[email",
"tests/test_linkify.py::test_email_link[<br>[email protected]<br><a",
"tests/test_linkify.py::test_email_link[mailto",
"tests/test_linkify.py::test_email_link[\"\\\\\\n\"@opa.ru-True-\"\\\\\\n\"@opa.ru]",
"tests/test_linkify.py::test_prevent_links[callback0-a",
"tests/test_linkify.py::test_prevent_links[callback1-a",
"tests/test_linkify.py::test_prevent_links[callback2-a",
"tests/test_linkify.py::test_prevent_links[callback3-a",
"tests/test_linkify.py::test_prevent_links[callback4-a",
"tests/test_linkify.py::test_prevent_links[callback5-a",
"tests/test_linkify.py::test_set_attrs",
"tests/test_linkify.py::test_only_proto_links",
"tests/test_linkify.py::test_stop_email",
"tests/test_linkify.py::test_tlds[example.com-<a",
"tests/test_linkify.py::test_tlds[example.co-<a",
"tests/test_linkify.py::test_tlds[example.co.uk-<a",
"tests/test_linkify.py::test_tlds[example.edu-<a",
"tests/test_linkify.py::test_tlds[example.xxx-<a",
"tests/test_linkify.py::test_tlds[bit.ly/fun-<a",
"tests/test_linkify.py::test_tlds[example.yyy-example.yyy]",
"tests/test_linkify.py::test_tlds[brie-brie]",
"tests/test_linkify.py::test_escaping[<",
"tests/test_linkify.py::test_escaping[<U",
"tests/test_linkify.py::test_nofollow_off",
"tests/test_linkify.py::test_link_in_html",
"tests/test_linkify.py::test_links_https",
"tests/test_linkify.py::test_add_rel_nofollow",
"tests/test_linkify.py::test_url_with_path",
"tests/test_linkify.py::test_link_ftp",
"tests/test_linkify.py::test_link_query",
"tests/test_linkify.py::test_link_fragment",
"tests/test_linkify.py::test_link_entities",
"tests/test_linkify.py::test_escaped_html",
"tests/test_linkify.py::test_link_http_complete",
"tests/test_linkify.py::test_non_url",
"tests/test_linkify.py::test_javascript_url",
"tests/test_linkify.py::test_unsafe_url",
"tests/test_linkify.py::test_skip_tags",
"tests/test_linkify.py::test_libgl",
"tests/test_linkify.py::test_end_of_sentence[example.com-.]",
"tests/test_linkify.py::test_end_of_sentence[example.com-...]",
"tests/test_linkify.py::test_end_of_sentence[ex.com/foo-.]",
"tests/test_linkify.py::test_end_of_sentence[ex.com/foo-....]",
"tests/test_linkify.py::test_end_of_clause",
"tests/test_linkify.py::test_sarcasm",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com)-expected_data0]",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com/)-expected_data1]",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com/foo)-expected_data2]",
"tests/test_linkify.py::test_wrapping_parentheses[(((example.com/))))-expected_data3]",
"tests/test_linkify.py::test_wrapping_parentheses[example.com/))-expected_data4]",
"tests/test_linkify.py::test_wrapping_parentheses[(foo",
"tests/test_linkify.py::test_wrapping_parentheses[http://en.wikipedia.org/wiki/Test_(assessment)-expected_data7]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/Test_(assessment))-expected_data8]",
"tests/test_linkify.py::test_wrapping_parentheses[((http://en.wikipedia.org/wiki/Test_(assessment))-expected_data9]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/Test_(assessment)))-expected_data10]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/)Test_(assessment-expected_data11]",
"tests/test_linkify.py::test_wrapping_parentheses[hello",
"tests/test_linkify.py::test_parentheses_with_removing",
"tests/test_linkify.py::test_ports[http://foo.com:8000-expected_data0]",
"tests/test_linkify.py::test_ports[http://foo.com:8000/-expected_data1]",
"tests/test_linkify.py::test_ports[http://bar.com:xkcd-expected_data2]",
"tests/test_linkify.py::test_ports[http://foo.com:81/bar-expected_data3]",
"tests/test_linkify.py::test_ports[http://foo.com:-expected_data4]",
"tests/test_linkify.py::test_ports[http://foo.com:\\u0663\\u0669/-expected_data5]",
"tests/test_linkify.py::test_ports[http://foo.com:\\U0001d7e0\\U0001d7d8/-expected_data6]",
"tests/test_linkify.py::test_ignore_bad_protocols",
"tests/test_linkify.py::test_link_emails_and_urls",
"tests/test_linkify.py::test_links_case_insensitive",
"tests/test_linkify.py::test_elements_inside_links",
"tests/test_linkify.py::test_drop_link_tags",
"tests/test_linkify.py::test_naughty_unescaping[<br>-<br>]",
"tests/test_linkify.py::test_naughty_unescaping[<br>",
"tests/test_linkify.py::test_hang",
"tests/test_linkify.py::test_hyphen_in_mail",
"tests/test_linkify.py::test_url_re_arg",
"tests/test_linkify.py::test_email_re_arg",
"tests/test_linkify.py::test_recognized_tags_arg",
"tests/test_linkify.py::test_linkify_idempotent[<span>text",
"tests/test_linkify.py::test_linkify_idempotent[a",
"tests/test_linkify.py::test_linkify_idempotent[link",
"tests/test_linkify.py::test_linkify_idempotent[text<em>]",
"tests/test_linkify.py::test_linkify_idempotent[jim",
"tests/test_linkify.py::test_linkify_idempotent[<a",
"tests/test_linkify.py::TestLinkify::test_no_href_links",
"tests/test_linkify.py::TestLinkify::test_rel_already_there",
"tests/test_linkify.py::TestLinkify::test_only_text_is_linkified",
"tests/test_linkify.py::test_linkify_filter[abc-abc]",
"tests/test_linkify.py::test_linkify_filter[example.com-<a",
"tests/test_linkify.py::test_linkify_filter[http://example.com?b=1&c=2-<a",
"tests/test_linkify.py::test_linkify_filter[http://example.com?b=1&c=2-<a",
"tests/test_linkify.py::test_linkify_filter[link:"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-05-03 05:29:14+00:00
|
apache-2.0
| 4,044 |
|
mozilla__bleach-667
|
diff --git a/bleach/html5lib_shim.py b/bleach/html5lib_shim.py
index 6fc9048..d121953 100644
--- a/bleach/html5lib_shim.py
+++ b/bleach/html5lib_shim.py
@@ -385,7 +385,17 @@ class BleachHTMLTokenizer(HTMLTokenizer):
yield token
if last_error_token:
- yield last_error_token
+ if last_error_token["data"] == "eof-in-tag-name":
+ # Handle the case where the text being parsed ends with <
+ # followed by a series of characters. It's treated as a tag
+ # name that abruptly ends, but we should treat that like
+ # character data
+ yield {
+ "type": TAG_TOKEN_TYPE_CHARACTERS,
+ "data": "<" + self.currentToken["name"],
+ }
+ else:
+ yield last_error_token
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# If this tokenizer is set to consume entities, then we can let the
|
mozilla/bleach
|
c5c3f50d01967feb1f67e55ad33a6880d9dc2e73
|
diff --git a/tests/test_clean.py b/tests/test_clean.py
index b9c262a..ab11253 100644
--- a/tests/test_clean.py
+++ b/tests/test_clean.py
@@ -156,6 +156,22 @@ def test_bare_entities_get_escaped_correctly(text, expected):
assert clean(text) == expected
[email protected](
+ "text, expected",
+ [
+ ("x<y", "x<y"),
+ ("<y", "<y"),
+ ("x < y", "x < y"),
+ ("<y>", "<y>"),
+ ],
+)
+def test_lessthan_escaping(text, expected):
+ # Tests whether < gets escaped correctly in a series of edge cases where
+ # the html5lib tokenizer hits an error because it's not the beginning of a
+ # tag.
+ assert clean(text) == expected
+
+
@pytest.mark.parametrize(
"text, expected",
[
|
Left angle bracket '<' with any characters (tag or not) and no closing right angle bracket prevents remaining text from being returned
Given some text that contains a left angle bracket and no closing right angle bracket, the `bleach.clean `method returns nothing after the occurrence of the left angle bracket.
Consider the following example strings
`example_str_1 = 'random prefix text <anything any amount of suffix text'`
`example_str_2 = '<e any amount of text here is gone'`
`example_str_3 = '<it works when there is a closing>'`
Expected output
example_str_1 `random prefix text <anything any amount of suffix text`
example_str_2 `<e any amount of text here is gone`
example_str_3 `<it works when there is a closing right bracket>`
Actual output
example_str_1 `random prefix text `
example_str_2 _empty string_
example_str_3 `<it works when there is a closing right bracket>`
Python version 3.6.8
bleach 3.1.5
|
0.0
|
c5c3f50d01967feb1f67e55ad33a6880d9dc2e73
|
[
"tests/test_clean.py::test_lessthan_escaping[x<y-x<y]",
"tests/test_clean.py::test_lessthan_escaping[<y-<y]"
] |
[
"tests/test_clean.py::test_clean_idempotent[a",
"tests/test_clean.py::test_clean_idempotent[link",
"tests/test_clean.py::test_clean_idempotent[text<em>]",
"tests/test_clean.py::test_clean_idempotent[<span>text",
"tests/test_clean.py::test_clean_idempotent[jim",
"tests/test_clean.py::test_clean_idempotent[& ",
"tests/test_clean.py::test_clean_idempotent[<a",
"tests/test_clean.py::test_clean_idempotent_img",
"tests/test_clean.py::test_only_text_is_cleaned",
"tests/test_clean.py::test_empty",
"tests/test_clean.py::test_content_has_no_html",
"tests/test_clean.py::test_content_has_allowed_html[an",
"tests/test_clean.py::test_content_has_allowed_html[another",
"tests/test_clean.py::test_html_is_lowercased",
"tests/test_clean.py::test_comments[<!--",
"tests/test_clean.py::test_comments[<!--open",
"tests/test_clean.py::test_comments[<!--comment-->text-True-text]",
"tests/test_clean.py::test_comments[<!--comment-->text-False-<!--comment-->text]",
"tests/test_clean.py::test_comments[text<!--",
"tests/test_clean.py::test_comments[text<!--comment-->-True-text]",
"tests/test_clean.py::test_comments[text<!--comment-->-False-text<!--comment-->]",
"tests/test_clean.py::test_invalid_char_in_tag",
"tests/test_clean.py::test_unclosed_tag",
"tests/test_clean.py::test_nested_script_tag",
"tests/test_clean.py::test_bare_entities_get_escaped_correctly[an",
"tests/test_clean.py::test_bare_entities_get_escaped_correctly[tag",
"tests/test_clean.py::test_lessthan_escaping[x",
"tests/test_clean.py::test_lessthan_escaping[<y>-<y>]",
"tests/test_clean.py::test_character_entities_handling[&-&]",
"tests/test_clean.py::test_character_entities_handling[ - ]",
"tests/test_clean.py::test_character_entities_handling[ ",
"tests/test_clean.py::test_character_entities_handling[<em>strong</em>-<em>strong</em>]",
"tests/test_clean.py::test_character_entities_handling[&is",
"tests/test_clean.py::test_character_entities_handling[cool",
"tests/test_clean.py::test_character_entities_handling[&&",
"tests/test_clean.py::test_character_entities_handling[&",
"tests/test_clean.py::test_character_entities_handling[this",
"tests/test_clean.py::test_character_entities_handling[http://example.com?active=true¤t=true-http://example.com?active=true&current=true]",
"tests/test_clean.py::test_character_entities_handling[<a",
"tests/test_clean.py::test_character_entities_handling[&xx;-&xx;]",
"tests/test_clean.py::test_character_entities_handling[&adp;-&adp;]",
"tests/test_clean.py::test_character_entities_handling[&currdupe;-&currdupe;]",
"tests/test_clean.py::test_character_entities_handling['-']",
"tests/test_clean.py::test_character_entities_handling["-"]",
"tests/test_clean.py::test_character_entities_handling[{-{]",
"tests/test_clean.py::test_character_entities_handling[{-{]",
"tests/test_clean.py::test_character_entities_handling[{-{]",
"tests/test_clean.py::test_character_entities_handling[&#-&#]",
"tests/test_clean.py::test_character_entities_handling[&#<-&#<]",
"tests/test_clean.py::test_character_entities_handling['"-'"]",
"tests/test_clean.py::test_stripping_tags[a",
"tests/test_clean.py::test_stripping_tags[<p><a",
"tests/test_clean.py::test_stripping_tags[<p><span>multiply",
"tests/test_clean.py::test_stripping_tags[<ul><li><script></li></ul>-kwargs4-<ul><li></li></ul>]",
"tests/test_clean.py::test_stripping_tags[<isindex>-kwargs6-]",
"tests/test_clean.py::test_stripping_tags[Yeah",
"tests/test_clean.py::test_stripping_tags[<sarcasm>-kwargs8-]",
"tests/test_clean.py::test_stripping_tags[</sarcasm>-kwargs9-]",
"tests/test_clean.py::test_stripping_tags[</",
"tests/test_clean.py::test_stripping_tags[Foo",
"tests/test_clean.py::test_stripping_tags[Favorite",
"tests/test_clean.py::test_stripping_tags[</3-kwargs14-</3]",
"tests/test_clean.py::test_escaping_tags[<img",
"tests/test_clean.py::test_escaping_tags[<script>safe()</script>-<script>safe()</script>]",
"tests/test_clean.py::test_escaping_tags[<style>body{}</style>-<style>body{}</style>]",
"tests/test_clean.py::test_escaping_tags[<ul><li><script></li></ul>-<ul><li><script></li></ul>]",
"tests/test_clean.py::test_escaping_tags[<isindex>-<isindex>]",
"tests/test_clean.py::test_escaping_tags[<sarcasm/>-<sarcasm/>]",
"tests/test_clean.py::test_escaping_tags[<sarcasm>-<sarcasm>]",
"tests/test_clean.py::test_escaping_tags[</sarcasm>-</sarcasm>]",
"tests/test_clean.py::test_escaping_tags[</",
"tests/test_clean.py::test_escaping_tags[</3-</3]",
"tests/test_clean.py::test_escaping_tags[<[email protected]>-<[email protected]>]",
"tests/test_clean.py::test_escaping_tags[Favorite",
"tests/test_clean.py::test_stripping_tags_is_safe[<scri<script>pt>alert(1)</scr</script>ipt>-pt>alert(1)ipt>]",
"tests/test_clean.py::test_stripping_tags_is_safe[<scri<scri<script>pt>pt>alert(1)</script>-pt>pt>alert(1)]",
"tests/test_clean.py::test_href_with_wrong_tag",
"tests/test_clean.py::test_disallowed_attr",
"tests/test_clean.py::test_unquoted_attr_values_are_quoted",
"tests/test_clean.py::test_unquoted_event_handler_attr_value",
"tests/test_clean.py::test_invalid_filter_attr",
"tests/test_clean.py::test_poster_attribute",
"tests/test_clean.py::test_attributes_callable",
"tests/test_clean.py::test_attributes_wildcard",
"tests/test_clean.py::test_attributes_wildcard_callable",
"tests/test_clean.py::test_attributes_tag_callable",
"tests/test_clean.py::test_attributes_tag_list",
"tests/test_clean.py::test_attributes_list",
"tests/test_clean.py::test_uri_value_allowed_protocols[<a",
"tests/test_clean.py::test_svg_attr_val_allows_ref",
"tests/test_clean.py::test_svg_allow_local_href[<svg><pattern",
"tests/test_clean.py::test_svg_allow_local_href_nonlocal[<svg><pattern",
"tests/test_clean.py::test_invisible_characters[1\\x0723-1?23]",
"tests/test_clean.py::test_invisible_characters[1\\x0823-1?23]",
"tests/test_clean.py::test_invisible_characters[1\\x0b23-1?23]",
"tests/test_clean.py::test_invisible_characters[1\\x0c23-1?23]",
"tests/test_clean.py::test_invisible_characters[import",
"tests/test_clean.py::test_nonexistent_namespace",
"tests/test_clean.py::test_self_closing_tags_self_close[area]",
"tests/test_clean.py::test_self_closing_tags_self_close[base]",
"tests/test_clean.py::test_self_closing_tags_self_close[br]",
"tests/test_clean.py::test_self_closing_tags_self_close[embed]",
"tests/test_clean.py::test_self_closing_tags_self_close[hr]",
"tests/test_clean.py::test_self_closing_tags_self_close[img]",
"tests/test_clean.py::test_self_closing_tags_self_close[input]",
"tests/test_clean.py::test_self_closing_tags_self_close[link]",
"tests/test_clean.py::test_self_closing_tags_self_close[meta]",
"tests/test_clean.py::test_self_closing_tags_self_close[param]",
"tests/test_clean.py::test_self_closing_tags_self_close[source]",
"tests/test_clean.py::test_self_closing_tags_self_close[track]",
"tests/test_clean.py::test_noscript_rawtag_[title-<noscript><title></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[textarea-<noscript><textarea></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[script-<noscript><script></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[style-<noscript><style></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[noembed-<noscript><noembed></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[noframes-<noscript><noframes></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[iframe-<noscript><iframe></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[xmp-<noscript><xmp></noscript><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-iframe-<math><iframe><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-noembed-<math><noembed><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-script-<math><script><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-noframes-<math><noframes><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-xmp-<math><xmp><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-style-<math><style><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-noscript-<math><noscript><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-iframe-<svg><iframe><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-noembed-<svg><noembed><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-script-<svg><script><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-noframes-<svg><noframes><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-xmp-<svg><xmp><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-style-<svg><style><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-noscript-<svg><noscript><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-style-<math></p><style><!--</style><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-style-<math></br><style><!--</style><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-style-<svg></p><style><!--</style><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-style-<svg></br><style><!--</style><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-title-<math></p><title><!--</title><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-title-<math></br><title><!--</title><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-title-<svg></p><title><!--</title><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-title-<svg></br><title><!--</title><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-noscript-<math></p><noscript><!--</noscript><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-noscript-<math></br><noscript><!--</noscript><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-noscript-<svg></p><noscript><!--</noscript><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-noscript-<svg></br><noscript><!--</noscript><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-script-<math></p><script><!--</script><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-script-<math></br><script><!--</script><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-script-<svg></p><script><!--</script><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-script-<svg></br><script><!--</script><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-noembed-<math></p><noembed><!--</noembed><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-noembed-<math></br><noembed><!--</noembed><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-noembed-<svg></p><noembed><!--</noembed><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-noembed-<svg></br><noembed><!--</noembed><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-textarea-<math></p><textarea><!--</textarea><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-textarea-<math></br><textarea><!--</textarea><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-textarea-<svg></p><textarea><!--</textarea><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-textarea-<svg></br><textarea><!--</textarea><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-noframes-<math></p><noframes><!--</noframes><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-noframes-<math></br><noframes><!--</noframes><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-noframes-<svg></p><noframes><!--</noframes><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-noframes-<svg></br><noframes><!--</noframes><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-iframe-<math></p><iframe><!--</iframe><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-iframe-<math></br><iframe><!--</iframe><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-iframe-<svg></p><iframe><!--</iframe><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-iframe-<svg></br><iframe><!--</iframe><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-xmp-<math></p><xmp><!--</xmp><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-xmp-<math></br><xmp><!--</xmp><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-xmp-<svg></p><xmp><!--</xmp><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-xmp-<svg></br><xmp><!--</xmp><img",
"tests/test_clean.py::test_strip_respects_block_level_elements[<p>Te<b>st</b>!</p><p>Hello</p>-Test!\\nHello]",
"tests/test_clean.py::test_strip_respects_block_level_elements[<p>This",
"tests/test_clean.py::test_strip_respects_block_level_elements[<div><p>This",
"tests/test_clean.py::test_regressions[1.test]",
"tests/test_clean.py::test_regressions[2.test]",
"tests/test_clean.py::test_regressions[3.test]",
"tests/test_clean.py::test_regressions[4.test]",
"tests/test_clean.py::test_regressions[5.test]",
"tests/test_clean.py::test_regressions[6.test]",
"tests/test_clean.py::test_regressions[7.test]",
"tests/test_clean.py::test_regressions[8.test]",
"tests/test_clean.py::test_regressions[9.test]",
"tests/test_clean.py::test_regressions[10.test]",
"tests/test_clean.py::test_regressions[11.test]",
"tests/test_clean.py::test_regressions[12.test]",
"tests/test_clean.py::test_regressions[13.test]",
"tests/test_clean.py::test_regressions[14.test]",
"tests/test_clean.py::test_regressions[15.test]",
"tests/test_clean.py::test_regressions[16.test]",
"tests/test_clean.py::test_regressions[17.test]",
"tests/test_clean.py::test_regressions[18.test]",
"tests/test_clean.py::test_regressions[19.test]",
"tests/test_clean.py::test_regressions[20.test]",
"tests/test_clean.py::test_preserves_attributes_order",
"tests/test_clean.py::TestCleaner::test_basics",
"tests/test_clean.py::TestCleaner::test_filters"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-06-01 17:26:53+00:00
|
apache-2.0
| 4,045 |
|
mozilla__bleach-669
|
diff --git a/bleach/sanitizer.py b/bleach/sanitizer.py
index 51ec562..3a222a1 100644
--- a/bleach/sanitizer.py
+++ b/bleach/sanitizer.py
@@ -488,9 +488,9 @@ class BleachSanitizerFilter(html5lib_shim.SanitizerFilter):
if ":" in new_value and new_value.split(":")[0] in allowed_protocols:
return value
- # If there's no protocol/scheme specified, then assume it's "http"
- # and see if that's allowed
- if "http" in allowed_protocols:
+ # If there's no protocol/scheme specified, then assume it's "http" or
+ # "https" and see if that's allowed
+ if "http" in allowed_protocols or "https" in allowed_protocols:
return value
return None
|
mozilla/bleach
|
ed06d4e56b70e08fae2dd8f13b6a1955cf106029
|
diff --git a/tests/test_clean.py b/tests/test_clean.py
index ab11253..b8e9cf1 100644
--- a/tests/test_clean.py
+++ b/tests/test_clean.py
@@ -542,12 +542,17 @@ def test_attributes_list():
{"protocols": []},
'<a href="#example.com">foo</a>',
),
- # Allow implicit http if allowed
+ # Allow implicit http/https if allowed
(
'<a href="/path">valid</a>',
{"protocols": ["http"]},
'<a href="/path">valid</a>',
),
+ (
+ '<a href="/path">valid</a>',
+ {"protocols": ["https"]},
+ '<a href="/path">valid</a>',
+ ),
(
'<a href="example.com">valid</a>',
{"protocols": ["http"]},
@@ -586,7 +591,7 @@ def test_attributes_list():
),
marks=pytest.mark.xfail,
),
- # Disallow implicit http if disallowed
+ # Disallow implicit http/https if disallowed
('<a href="example.com">foo</a>', {"protocols": []}, "<a>foo</a>"),
('<a href="example.com:8000">foo</a>', {"protocols": []}, "<a>foo</a>"),
('<a href="localhost">foo</a>', {"protocols": []}, "<a>foo</a>"),
|
bug: Relative url is removed when the allowed protocol is https
**Describe the bug**
bleach.clean does not keep the relative url when the allowed protocol is https
** python and bleach versions (please complete the following information):**
- Python Version: 2.7.18
- Bleach Version: 3.3.1
**To Reproduce**
Steps to reproduce the behavior:
```
from bleach.sanitizer import Cleaner
BleachCleaner = Cleaner(
tags=['a', 'br'],
attributes={'a': 'href'},
styles=[],
protocols=['https'],
strip=True,
strip_comments=True
)
description = 'create new study <a href="/path/to/study">Mental study</a>'
BleachCleaner.clean(description)
```
**Expected behavior**
In bleach version 2.0.0, the relative url is not removed from the result:
**'create new study <a href="/path/to/study">Mental study</a>'**
**Actual behavior**
In bleach version 3.3.1, the relative url is removed from the result:
**'create new study <a>Mental study</a>'**
**Additional context**
<img width="721" alt="Screen Shot 2022-05-09 at 11 14 27" src="https://user-images.githubusercontent.com/98076735/167339622-a242d63d-934d-4c67-a2a1-582a6e62a280.png">
<img width="721" alt="Screen Shot 2022-05-09 at 11 14 50" src="https://user-images.githubusercontent.com/98076735/167339629-1aa1e03c-86a7-4049-898d-f4a67ad965d7.png">
|
0.0
|
ed06d4e56b70e08fae2dd8f13b6a1955cf106029
|
[
"tests/test_clean.py::test_uri_value_allowed_protocols[<a"
] |
[
"tests/test_clean.py::test_clean_idempotent[a",
"tests/test_clean.py::test_clean_idempotent[link",
"tests/test_clean.py::test_clean_idempotent[text<em>]",
"tests/test_clean.py::test_clean_idempotent[<span>text",
"tests/test_clean.py::test_clean_idempotent[jim",
"tests/test_clean.py::test_clean_idempotent[& ",
"tests/test_clean.py::test_clean_idempotent[<a",
"tests/test_clean.py::test_clean_idempotent_img",
"tests/test_clean.py::test_only_text_is_cleaned",
"tests/test_clean.py::test_empty",
"tests/test_clean.py::test_content_has_no_html",
"tests/test_clean.py::test_content_has_allowed_html[an",
"tests/test_clean.py::test_content_has_allowed_html[another",
"tests/test_clean.py::test_html_is_lowercased",
"tests/test_clean.py::test_comments[<!--",
"tests/test_clean.py::test_comments[<!--open",
"tests/test_clean.py::test_comments[<!--comment-->text-True-text]",
"tests/test_clean.py::test_comments[<!--comment-->text-False-<!--comment-->text]",
"tests/test_clean.py::test_comments[text<!--",
"tests/test_clean.py::test_comments[text<!--comment-->-True-text]",
"tests/test_clean.py::test_comments[text<!--comment-->-False-text<!--comment-->]",
"tests/test_clean.py::test_invalid_char_in_tag",
"tests/test_clean.py::test_unclosed_tag",
"tests/test_clean.py::test_nested_script_tag",
"tests/test_clean.py::test_bare_entities_get_escaped_correctly[an",
"tests/test_clean.py::test_bare_entities_get_escaped_correctly[tag",
"tests/test_clean.py::test_lessthan_escaping[x<y-x<y]",
"tests/test_clean.py::test_lessthan_escaping[<y-<y]",
"tests/test_clean.py::test_lessthan_escaping[x",
"tests/test_clean.py::test_lessthan_escaping[<y>-<y>]",
"tests/test_clean.py::test_character_entities_handling[&-&]",
"tests/test_clean.py::test_character_entities_handling[ - ]",
"tests/test_clean.py::test_character_entities_handling[ ",
"tests/test_clean.py::test_character_entities_handling[<em>strong</em>-<em>strong</em>]",
"tests/test_clean.py::test_character_entities_handling[&is",
"tests/test_clean.py::test_character_entities_handling[cool",
"tests/test_clean.py::test_character_entities_handling[&&",
"tests/test_clean.py::test_character_entities_handling[&",
"tests/test_clean.py::test_character_entities_handling[this",
"tests/test_clean.py::test_character_entities_handling[http://example.com?active=true¤t=true-http://example.com?active=true&current=true]",
"tests/test_clean.py::test_character_entities_handling[<a",
"tests/test_clean.py::test_character_entities_handling[&xx;-&xx;]",
"tests/test_clean.py::test_character_entities_handling[&adp;-&adp;]",
"tests/test_clean.py::test_character_entities_handling[&currdupe;-&currdupe;]",
"tests/test_clean.py::test_character_entities_handling['-']",
"tests/test_clean.py::test_character_entities_handling["-"]",
"tests/test_clean.py::test_character_entities_handling[{-{]",
"tests/test_clean.py::test_character_entities_handling[{-{]",
"tests/test_clean.py::test_character_entities_handling[{-{]",
"tests/test_clean.py::test_character_entities_handling[&#-&#]",
"tests/test_clean.py::test_character_entities_handling[&#<-&#<]",
"tests/test_clean.py::test_character_entities_handling['"-'"]",
"tests/test_clean.py::test_stripping_tags[a",
"tests/test_clean.py::test_stripping_tags[<p><a",
"tests/test_clean.py::test_stripping_tags[<p><span>multiply",
"tests/test_clean.py::test_stripping_tags[<ul><li><script></li></ul>-kwargs4-<ul><li></li></ul>]",
"tests/test_clean.py::test_stripping_tags[<isindex>-kwargs6-]",
"tests/test_clean.py::test_stripping_tags[Yeah",
"tests/test_clean.py::test_stripping_tags[<sarcasm>-kwargs8-]",
"tests/test_clean.py::test_stripping_tags[</sarcasm>-kwargs9-]",
"tests/test_clean.py::test_stripping_tags[</",
"tests/test_clean.py::test_stripping_tags[Foo",
"tests/test_clean.py::test_stripping_tags[Favorite",
"tests/test_clean.py::test_stripping_tags[</3-kwargs14-</3]",
"tests/test_clean.py::test_escaping_tags[<img",
"tests/test_clean.py::test_escaping_tags[<script>safe()</script>-<script>safe()</script>]",
"tests/test_clean.py::test_escaping_tags[<style>body{}</style>-<style>body{}</style>]",
"tests/test_clean.py::test_escaping_tags[<ul><li><script></li></ul>-<ul><li><script></li></ul>]",
"tests/test_clean.py::test_escaping_tags[<isindex>-<isindex>]",
"tests/test_clean.py::test_escaping_tags[<sarcasm/>-<sarcasm/>]",
"tests/test_clean.py::test_escaping_tags[<sarcasm>-<sarcasm>]",
"tests/test_clean.py::test_escaping_tags[</sarcasm>-</sarcasm>]",
"tests/test_clean.py::test_escaping_tags[</",
"tests/test_clean.py::test_escaping_tags[</3-</3]",
"tests/test_clean.py::test_escaping_tags[<[email protected]>-<[email protected]>]",
"tests/test_clean.py::test_escaping_tags[Favorite",
"tests/test_clean.py::test_stripping_tags_is_safe[<scri<script>pt>alert(1)</scr</script>ipt>-pt>alert(1)ipt>]",
"tests/test_clean.py::test_stripping_tags_is_safe[<scri<scri<script>pt>pt>alert(1)</script>-pt>pt>alert(1)]",
"tests/test_clean.py::test_href_with_wrong_tag",
"tests/test_clean.py::test_disallowed_attr",
"tests/test_clean.py::test_unquoted_attr_values_are_quoted",
"tests/test_clean.py::test_unquoted_event_handler_attr_value",
"tests/test_clean.py::test_invalid_filter_attr",
"tests/test_clean.py::test_poster_attribute",
"tests/test_clean.py::test_attributes_callable",
"tests/test_clean.py::test_attributes_wildcard",
"tests/test_clean.py::test_attributes_wildcard_callable",
"tests/test_clean.py::test_attributes_tag_callable",
"tests/test_clean.py::test_attributes_tag_list",
"tests/test_clean.py::test_attributes_list",
"tests/test_clean.py::test_svg_attr_val_allows_ref",
"tests/test_clean.py::test_svg_allow_local_href[<svg><pattern",
"tests/test_clean.py::test_svg_allow_local_href_nonlocal[<svg><pattern",
"tests/test_clean.py::test_invisible_characters[1\\x0723-1?23]",
"tests/test_clean.py::test_invisible_characters[1\\x0823-1?23]",
"tests/test_clean.py::test_invisible_characters[1\\x0b23-1?23]",
"tests/test_clean.py::test_invisible_characters[1\\x0c23-1?23]",
"tests/test_clean.py::test_invisible_characters[import",
"tests/test_clean.py::test_nonexistent_namespace",
"tests/test_clean.py::test_self_closing_tags_self_close[area]",
"tests/test_clean.py::test_self_closing_tags_self_close[base]",
"tests/test_clean.py::test_self_closing_tags_self_close[br]",
"tests/test_clean.py::test_self_closing_tags_self_close[embed]",
"tests/test_clean.py::test_self_closing_tags_self_close[hr]",
"tests/test_clean.py::test_self_closing_tags_self_close[img]",
"tests/test_clean.py::test_self_closing_tags_self_close[input]",
"tests/test_clean.py::test_self_closing_tags_self_close[link]",
"tests/test_clean.py::test_self_closing_tags_self_close[meta]",
"tests/test_clean.py::test_self_closing_tags_self_close[param]",
"tests/test_clean.py::test_self_closing_tags_self_close[source]",
"tests/test_clean.py::test_self_closing_tags_self_close[track]",
"tests/test_clean.py::test_noscript_rawtag_[title-<noscript><title></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[textarea-<noscript><textarea></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[script-<noscript><script></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[style-<noscript><style></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[noembed-<noscript><noembed></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[noframes-<noscript><noframes></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[iframe-<noscript><iframe></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[xmp-<noscript><xmp></noscript><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-noembed-<math><noembed><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-noframes-<math><noframes><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-script-<math><script><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-iframe-<math><iframe><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-noscript-<math><noscript><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-style-<math><style><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-xmp-<math><xmp><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-noembed-<svg><noembed><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-noframes-<svg><noframes><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-script-<svg><script><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-iframe-<svg><iframe><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-noscript-<svg><noscript><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-style-<svg><style><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-xmp-<svg><xmp><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-style-<math></p><style><!--</style><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-style-<math></br><style><!--</style><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-style-<svg></p><style><!--</style><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-style-<svg></br><style><!--</style><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-title-<math></p><title><!--</title><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-title-<math></br><title><!--</title><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-title-<svg></p><title><!--</title><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-title-<svg></br><title><!--</title><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-noscript-<math></p><noscript><!--</noscript><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-noscript-<math></br><noscript><!--</noscript><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-noscript-<svg></p><noscript><!--</noscript><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-noscript-<svg></br><noscript><!--</noscript><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-script-<math></p><script><!--</script><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-script-<math></br><script><!--</script><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-script-<svg></p><script><!--</script><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-script-<svg></br><script><!--</script><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-noembed-<math></p><noembed><!--</noembed><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-noembed-<math></br><noembed><!--</noembed><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-noembed-<svg></p><noembed><!--</noembed><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-noembed-<svg></br><noembed><!--</noembed><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-textarea-<math></p><textarea><!--</textarea><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-textarea-<math></br><textarea><!--</textarea><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-textarea-<svg></p><textarea><!--</textarea><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-textarea-<svg></br><textarea><!--</textarea><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-noframes-<math></p><noframes><!--</noframes><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-noframes-<math></br><noframes><!--</noframes><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-noframes-<svg></p><noframes><!--</noframes><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-noframes-<svg></br><noframes><!--</noframes><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-iframe-<math></p><iframe><!--</iframe><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-iframe-<math></br><iframe><!--</iframe><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-iframe-<svg></p><iframe><!--</iframe><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-iframe-<svg></br><iframe><!--</iframe><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-xmp-<math></p><xmp><!--</xmp><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-xmp-<math></br><xmp><!--</xmp><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-xmp-<svg></p><xmp><!--</xmp><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-xmp-<svg></br><xmp><!--</xmp><img",
"tests/test_clean.py::test_strip_respects_block_level_elements[<p>Te<b>st</b>!</p><p>Hello</p>-Test!\\nHello]",
"tests/test_clean.py::test_strip_respects_block_level_elements[<p>This",
"tests/test_clean.py::test_strip_respects_block_level_elements[<div><p>This",
"tests/test_clean.py::test_regressions[1.test]",
"tests/test_clean.py::test_regressions[2.test]",
"tests/test_clean.py::test_regressions[3.test]",
"tests/test_clean.py::test_regressions[4.test]",
"tests/test_clean.py::test_regressions[5.test]",
"tests/test_clean.py::test_regressions[6.test]",
"tests/test_clean.py::test_regressions[7.test]",
"tests/test_clean.py::test_regressions[8.test]",
"tests/test_clean.py::test_regressions[9.test]",
"tests/test_clean.py::test_regressions[10.test]",
"tests/test_clean.py::test_regressions[11.test]",
"tests/test_clean.py::test_regressions[12.test]",
"tests/test_clean.py::test_regressions[13.test]",
"tests/test_clean.py::test_regressions[14.test]",
"tests/test_clean.py::test_regressions[15.test]",
"tests/test_clean.py::test_regressions[16.test]",
"tests/test_clean.py::test_regressions[17.test]",
"tests/test_clean.py::test_regressions[18.test]",
"tests/test_clean.py::test_regressions[19.test]",
"tests/test_clean.py::test_regressions[20.test]",
"tests/test_clean.py::test_preserves_attributes_order",
"tests/test_clean.py::TestCleaner::test_basics",
"tests/test_clean.py::TestCleaner::test_filters"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_media"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-06-02 14:10:22+00:00
|
apache-2.0
| 4,046 |
|
mozilla__bleach-691
|
diff --git a/bleach/sanitizer.py b/bleach/sanitizer.py
index 6527ac0..da9637c 100644
--- a/bleach/sanitizer.py
+++ b/bleach/sanitizer.py
@@ -48,6 +48,10 @@ INVISIBLE_CHARACTERS_RE = re.compile("[" + INVISIBLE_CHARACTERS + "]", re.UNICOD
INVISIBLE_REPLACEMENT_CHAR = "?"
+class NoCssSanitizerWarning(UserWarning):
+ pass
+
+
class Cleaner:
"""Cleaner for cleaning HTML fragments of malicious content
@@ -143,6 +147,25 @@ class Cleaner:
alphabetical_attributes=False,
)
+ if css_sanitizer is None:
+ # FIXME(willkg): this doesn't handle when attributes or an
+ # attributes value is a callable
+ attributes_values = []
+ if isinstance(attributes, list):
+ attributes_values = attributes
+
+ elif isinstance(attributes, dict):
+ attributes_values = []
+ for values in attributes.values():
+ if isinstance(values, (list, tuple)):
+ attributes_values.extend(values)
+
+ if "style" in attributes_values:
+ warnings.warn(
+ "'style' attribute specified, but css_sanitizer not set.",
+ category=NoCssSanitizerWarning,
+ )
+
def clean(self, text):
"""Cleans text and returns sanitized result as unicode
diff --git a/docs/clean.rst b/docs/clean.rst
index 7cfa757..9ebf74b 100644
--- a/docs/clean.rst
+++ b/docs/clean.rst
@@ -79,6 +79,12 @@ The default value is also a conservative dict found in
``bleach.sanitizer.ALLOWED_ATTRIBUTES``.
+.. Note::
+
+ If you allow ``style``, you need to also sanitize css. See
+ :ref:`clean-chapter-sanitizing-css` for details.
+
+
.. autodata:: bleach.sanitizer.ALLOWED_ATTRIBUTES
.. versionchanged:: 2.0
@@ -280,6 +286,8 @@ By default, Bleach will strip out HTML comments. To disable this behavior, set
'my<!-- commented --> html'
+.. _clean-chapter-sanitizing-css:
+
Sanitizing CSS
==============
diff --git a/setup.cfg b/setup.cfg
index aafdd5c..f631ecc 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -15,4 +15,6 @@ ignore =
max-line-length = 88
[tool:pytest]
-addopts = -W error:html5lib:DeprecationWarning
+filterwarnings =
+ error
+ ignore::bleach.sanitizer.NoCssSanitizerWarning
|
mozilla/bleach
|
11d756b3cd92408e3324c93371245bb9fb0f565d
|
diff --git a/tests/test_clean.py b/tests/test_clean.py
index 4abe991..10a91fd 100644
--- a/tests/test_clean.py
+++ b/tests/test_clean.py
@@ -4,7 +4,7 @@ import pytest
from bleach import clean
from bleach.html5lib_shim import Filter
-from bleach.sanitizer import ALLOWED_PROTOCOLS, Cleaner
+from bleach.sanitizer import ALLOWED_PROTOCOLS, Cleaner, NoCssSanitizerWarning
from bleach._vendor.html5lib.constants import rcdataElements
@@ -1176,6 +1176,20 @@ def test_preserves_attributes_order():
assert cleaned_html == html
[email protected](
+ "attr",
+ (
+ ["style"],
+ {"*": ["style"]},
+ ),
+)
+def test_css_sanitizer_warning(attr):
+ # If you have "style" in attributes, but don't set a css_sanitizer, it
+ # should raise a warning.
+ with pytest.warns(NoCssSanitizerWarning):
+ clean("foo", attributes=attr)
+
+
class TestCleaner:
def test_basics(self):
TAGS = ["span", "br"]
|
bug: bleach truncates Katex style attributes
**Bleach truncates a lot of Katex style attributes**
Basic example: a markdown_katex output may contain a span like so : `<span class="vlist" style="height:1.0697em;">`. It contains a `style` attribute, and when passed through bleach (allowing the `style` attribute), I get this:
```
<span class="vlist" style=""></span>
```
While the desired output would be:
```
<span class="vlist" style="height:1.0697em;"></span>
```
As a result, actual Katex math doesn't render properly.
** python and bleach versions:**
- Python Version: 3.9.2
- Bleach Version: 5.0.1
- Markdown-katex Version: 202112.1034
**To Reproduce**
Steps to reproduce the behavior:
```python
from bleach import Cleaner
cleaner = Cleaner(tags = ['span'],
attributes = {'span': ['class', 'style']})
minimal_katex_span = '<span class="vlist" style="height:1.0697em;">'
res = cleaner.clean(minimal_katex_span)
print(res)
```
**Additional context**
I am unsure if this is actually a bug or intended behavior in some way. The more general problem I face is: how to correctly use bleach after user input is transformed through markdown with the markdown_katex extension?
|
0.0
|
11d756b3cd92408e3324c93371245bb9fb0f565d
|
[
"tests/test_clean.py::test_clean_idempotent[a",
"tests/test_clean.py::test_clean_idempotent[link",
"tests/test_clean.py::test_clean_idempotent[text<em>]",
"tests/test_clean.py::test_clean_idempotent[<span>text",
"tests/test_clean.py::test_clean_idempotent[jim",
"tests/test_clean.py::test_clean_idempotent[& ",
"tests/test_clean.py::test_clean_idempotent[<a",
"tests/test_clean.py::test_clean_idempotent_img",
"tests/test_clean.py::test_only_text_is_cleaned",
"tests/test_clean.py::test_empty",
"tests/test_clean.py::test_content_has_no_html",
"tests/test_clean.py::test_content_has_allowed_html[an",
"tests/test_clean.py::test_content_has_allowed_html[another",
"tests/test_clean.py::test_html_is_lowercased",
"tests/test_clean.py::test_comments[<!--",
"tests/test_clean.py::test_comments[<!--open",
"tests/test_clean.py::test_comments[<!--comment-->text-True-text]",
"tests/test_clean.py::test_comments[<!--comment-->text-False-<!--comment-->text]",
"tests/test_clean.py::test_comments[text<!--",
"tests/test_clean.py::test_comments[text<!--comment-->-True-text]",
"tests/test_clean.py::test_comments[text<!--comment-->-False-text<!--comment-->]",
"tests/test_clean.py::test_invalid_char_in_tag",
"tests/test_clean.py::test_unclosed_tag",
"tests/test_clean.py::test_nested_script_tag",
"tests/test_clean.py::test_bare_entities_get_escaped_correctly[an",
"tests/test_clean.py::test_bare_entities_get_escaped_correctly[tag",
"tests/test_clean.py::test_lessthan_escaping[x<y-x<y]",
"tests/test_clean.py::test_lessthan_escaping[<y-<y]",
"tests/test_clean.py::test_lessthan_escaping[x",
"tests/test_clean.py::test_lessthan_escaping[<y>-<y>]",
"tests/test_clean.py::test_character_entities_handling[&-&]",
"tests/test_clean.py::test_character_entities_handling[ - ]",
"tests/test_clean.py::test_character_entities_handling[ ",
"tests/test_clean.py::test_character_entities_handling[<em>strong</em>-<em>strong</em>]",
"tests/test_clean.py::test_character_entities_handling[&is",
"tests/test_clean.py::test_character_entities_handling[cool",
"tests/test_clean.py::test_character_entities_handling[&&",
"tests/test_clean.py::test_character_entities_handling[&",
"tests/test_clean.py::test_character_entities_handling[this",
"tests/test_clean.py::test_character_entities_handling[http://example.com?active=true¤t=true-http://example.com?active=true&current=true]",
"tests/test_clean.py::test_character_entities_handling[<a",
"tests/test_clean.py::test_character_entities_handling[&xx;-&xx;]",
"tests/test_clean.py::test_character_entities_handling[&adp;-&adp;]",
"tests/test_clean.py::test_character_entities_handling[&currdupe;-&currdupe;]",
"tests/test_clean.py::test_character_entities_handling['-']",
"tests/test_clean.py::test_character_entities_handling["-"]",
"tests/test_clean.py::test_character_entities_handling[{-{]",
"tests/test_clean.py::test_character_entities_handling[{-{]",
"tests/test_clean.py::test_character_entities_handling[{-{]",
"tests/test_clean.py::test_character_entities_handling[&#-&#]",
"tests/test_clean.py::test_character_entities_handling[&#<-&#<]",
"tests/test_clean.py::test_character_entities_handling['"-'"]",
"tests/test_clean.py::test_stripping_tags[a",
"tests/test_clean.py::test_stripping_tags[<p><a",
"tests/test_clean.py::test_stripping_tags[<p><span>multiply",
"tests/test_clean.py::test_stripping_tags[<ul><li><script></li></ul>-kwargs4-<ul><li></li></ul>]",
"tests/test_clean.py::test_stripping_tags[<isindex>-kwargs6-]",
"tests/test_clean.py::test_stripping_tags[Yeah",
"tests/test_clean.py::test_stripping_tags[<sarcasm>-kwargs8-]",
"tests/test_clean.py::test_stripping_tags[</sarcasm>-kwargs9-]",
"tests/test_clean.py::test_stripping_tags[</",
"tests/test_clean.py::test_stripping_tags[Foo",
"tests/test_clean.py::test_stripping_tags[Favorite",
"tests/test_clean.py::test_stripping_tags[</3-kwargs14-</3]",
"tests/test_clean.py::test_escaping_tags[<img",
"tests/test_clean.py::test_escaping_tags[<script>safe()</script>-<script>safe()</script>]",
"tests/test_clean.py::test_escaping_tags[<style>body{}</style>-<style>body{}</style>]",
"tests/test_clean.py::test_escaping_tags[<ul><li><script></li></ul>-<ul><li><script></li></ul>]",
"tests/test_clean.py::test_escaping_tags[<isindex>-<isindex>]",
"tests/test_clean.py::test_escaping_tags[<sarcasm/>-<sarcasm/>]",
"tests/test_clean.py::test_escaping_tags[<sarcasm>-<sarcasm>]",
"tests/test_clean.py::test_escaping_tags[</sarcasm>-</sarcasm>]",
"tests/test_clean.py::test_escaping_tags[</",
"tests/test_clean.py::test_escaping_tags[</3-</3]",
"tests/test_clean.py::test_escaping_tags[<[email protected]>-<[email protected]>]",
"tests/test_clean.py::test_escaping_tags[Favorite",
"tests/test_clean.py::test_stripping_tags_is_safe[<scri<script>pt>alert(1)</scr</script>ipt>-pt>alert(1)ipt>]",
"tests/test_clean.py::test_stripping_tags_is_safe[<scri<scri<script>pt>pt>alert(1)</script>-pt>pt>alert(1)]",
"tests/test_clean.py::test_href_with_wrong_tag",
"tests/test_clean.py::test_disallowed_attr",
"tests/test_clean.py::test_unquoted_attr_values_are_quoted",
"tests/test_clean.py::test_unquoted_event_handler_attr_value",
"tests/test_clean.py::test_invalid_filter_attr",
"tests/test_clean.py::test_poster_attribute",
"tests/test_clean.py::test_attributes_callable",
"tests/test_clean.py::test_attributes_wildcard",
"tests/test_clean.py::test_attributes_wildcard_callable",
"tests/test_clean.py::test_attributes_tag_callable",
"tests/test_clean.py::test_attributes_tag_list",
"tests/test_clean.py::test_attributes_list",
"tests/test_clean.py::test_uri_value_allowed_protocols[<a",
"tests/test_clean.py::test_svg_attr_val_allows_ref",
"tests/test_clean.py::test_svg_allow_local_href[<svg><pattern",
"tests/test_clean.py::test_svg_allow_local_href_nonlocal[<svg><pattern",
"tests/test_clean.py::test_invisible_characters[1\\x0723-1?23]",
"tests/test_clean.py::test_invisible_characters[1\\x0823-1?23]",
"tests/test_clean.py::test_invisible_characters[1\\x0b23-1?23]",
"tests/test_clean.py::test_invisible_characters[1\\x0c23-1?23]",
"tests/test_clean.py::test_invisible_characters[import",
"tests/test_clean.py::test_nonexistent_namespace",
"tests/test_clean.py::test_self_closing_tags_self_close[area]",
"tests/test_clean.py::test_self_closing_tags_self_close[base]",
"tests/test_clean.py::test_self_closing_tags_self_close[br]",
"tests/test_clean.py::test_self_closing_tags_self_close[embed]",
"tests/test_clean.py::test_self_closing_tags_self_close[hr]",
"tests/test_clean.py::test_self_closing_tags_self_close[img]",
"tests/test_clean.py::test_self_closing_tags_self_close[input]",
"tests/test_clean.py::test_self_closing_tags_self_close[link]",
"tests/test_clean.py::test_self_closing_tags_self_close[meta]",
"tests/test_clean.py::test_self_closing_tags_self_close[param]",
"tests/test_clean.py::test_self_closing_tags_self_close[source]",
"tests/test_clean.py::test_self_closing_tags_self_close[track]",
"tests/test_clean.py::test_noscript_rawtag_[title-<noscript><title></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[textarea-<noscript><textarea></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[script-<noscript><script></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[style-<noscript><style></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[noembed-<noscript><noembed></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[noframes-<noscript><noframes></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[iframe-<noscript><iframe></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[xmp-<noscript><xmp></noscript><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-style-<math><style><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-noembed-<math><noembed><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-iframe-<math><iframe><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-xmp-<math><xmp><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-noframes-<math><noframes><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-noscript-<math><noscript><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-script-<math><script><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-style-<svg><style><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-noembed-<svg><noembed><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-iframe-<svg><iframe><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-xmp-<svg><xmp><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-noframes-<svg><noframes><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-noscript-<svg><noscript><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-script-<svg><script><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-style-<math></p><style><!--</style><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-style-<math></br><style><!--</style><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-style-<svg></p><style><!--</style><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-style-<svg></br><style><!--</style><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-title-<math></p><title><!--</title><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-title-<math></br><title><!--</title><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-title-<svg></p><title><!--</title><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-title-<svg></br><title><!--</title><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-noscript-<math></p><noscript><!--</noscript><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-noscript-<math></br><noscript><!--</noscript><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-noscript-<svg></p><noscript><!--</noscript><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-noscript-<svg></br><noscript><!--</noscript><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-script-<math></p><script><!--</script><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-script-<math></br><script><!--</script><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-script-<svg></p><script><!--</script><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-script-<svg></br><script><!--</script><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-noembed-<math></p><noembed><!--</noembed><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-noembed-<math></br><noembed><!--</noembed><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-noembed-<svg></p><noembed><!--</noembed><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-noembed-<svg></br><noembed><!--</noembed><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-textarea-<math></p><textarea><!--</textarea><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-textarea-<math></br><textarea><!--</textarea><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-textarea-<svg></p><textarea><!--</textarea><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-textarea-<svg></br><textarea><!--</textarea><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-noframes-<math></p><noframes><!--</noframes><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-noframes-<math></br><noframes><!--</noframes><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-noframes-<svg></p><noframes><!--</noframes><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-noframes-<svg></br><noframes><!--</noframes><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-iframe-<math></p><iframe><!--</iframe><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-iframe-<math></br><iframe><!--</iframe><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-iframe-<svg></p><iframe><!--</iframe><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-iframe-<svg></br><iframe><!--</iframe><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-xmp-<math></p><xmp><!--</xmp><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-xmp-<math></br><xmp><!--</xmp><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-xmp-<svg></p><xmp><!--</xmp><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-xmp-<svg></br><xmp><!--</xmp><img",
"tests/test_clean.py::test_strip_respects_block_level_elements[<p>Te<b>st</b>!</p><p>Hello</p>-Test!\\nHello]",
"tests/test_clean.py::test_strip_respects_block_level_elements[<p>This",
"tests/test_clean.py::test_strip_respects_block_level_elements[<div><p>This",
"tests/test_clean.py::test_regressions[1.test]",
"tests/test_clean.py::test_regressions[2.test]",
"tests/test_clean.py::test_regressions[3.test]",
"tests/test_clean.py::test_regressions[4.test]",
"tests/test_clean.py::test_regressions[5.test]",
"tests/test_clean.py::test_regressions[6.test]",
"tests/test_clean.py::test_regressions[7.test]",
"tests/test_clean.py::test_regressions[8.test]",
"tests/test_clean.py::test_regressions[9.test]",
"tests/test_clean.py::test_regressions[10.test]",
"tests/test_clean.py::test_regressions[11.test]",
"tests/test_clean.py::test_regressions[12.test]",
"tests/test_clean.py::test_regressions[13.test]",
"tests/test_clean.py::test_regressions[14.test]",
"tests/test_clean.py::test_regressions[15.test]",
"tests/test_clean.py::test_regressions[16.test]",
"tests/test_clean.py::test_regressions[17.test]",
"tests/test_clean.py::test_regressions[18.test]",
"tests/test_clean.py::test_regressions[19.test]",
"tests/test_clean.py::test_regressions[20.test]",
"tests/test_clean.py::test_preserves_attributes_order",
"tests/test_clean.py::test_css_sanitizer_warning[attr0]",
"tests/test_clean.py::test_css_sanitizer_warning[attr1]",
"tests/test_clean.py::TestCleaner::test_basics",
"tests/test_clean.py::TestCleaner::test_filters"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-12-23 19:21:53+00:00
|
apache-2.0
| 4,047 |
|
mozilla__bleach-692
|
diff --git a/bleach/linkifier.py b/bleach/linkifier.py
index b3b83e6..343f374 100644
--- a/bleach/linkifier.py
+++ b/bleach/linkifier.py
@@ -147,13 +147,16 @@ class Linker:
self.parser = html5lib_shim.BleachHTMLParser(
tags=recognized_tags,
strip=False,
- consume_entities=True,
+ consume_entities=False,
namespaceHTMLElements=False,
)
self.walker = html5lib_shim.getTreeWalker("etree")
self.serializer = html5lib_shim.BleachHTMLSerializer(
quote_attr_values="always",
omit_optional_tags=False,
+ # We want to leave entities as they are without escaping or
+ # resolving or expanding
+ resolve_entities=False,
# linkify does not sanitize
sanitize=False,
# linkify preserves attr order
@@ -510,6 +513,62 @@ class LinkifyFilter(html5lib_shim.Filter):
yield {"type": "Characters", "data": str(new_text)}
yield token_buffer[-1]
+ def extract_entities(self, token):
+ """Handles Characters tokens with entities
+
+ Our overridden tokenizer doesn't do anything with entities. However,
+ that means that the serializer will convert all ``&`` in Characters
+ tokens to ``&``.
+
+ Since we don't want that, we extract entities here and convert them to
+ Entity tokens so the serializer will let them be.
+
+ :arg token: the Characters token to work on
+
+ :returns: generator of tokens
+
+ """
+ data = token.get("data", "")
+
+ # If there isn't a & in the data, we can return now
+ if "&" not in data:
+ yield token
+ return
+
+ new_tokens = []
+
+ # For each possible entity that starts with a "&", we try to extract an
+ # actual entity and re-tokenize accordingly
+ for part in html5lib_shim.next_possible_entity(data):
+ if not part:
+ continue
+
+ if part.startswith("&"):
+ entity = html5lib_shim.match_entity(part)
+ if entity is not None:
+ if entity == "amp":
+ # LinkifyFilter can't match urls across token boundaries
+ # which is problematic with & since that shows up in
+ # querystrings all the time. This special-cases &
+ # and converts it to a & and sticks it in as a
+ # Characters token. It'll get merged with surrounding
+ # tokens in the BleachSanitizerfilter.__iter__ and
+ # escaped in the serializer.
+ new_tokens.append({"type": "Characters", "data": "&"})
+ else:
+ new_tokens.append({"type": "Entity", "name": entity})
+
+ # Length of the entity plus 2--one for & at the beginning
+ # and one for ; at the end
+ remainder = part[len(entity) + 2 :]
+ if remainder:
+ new_tokens.append({"type": "Characters", "data": remainder})
+ continue
+
+ new_tokens.append({"type": "Characters", "data": part})
+
+ yield from new_tokens
+
def __iter__(self):
in_a = False
in_skip_tag = None
@@ -564,8 +623,8 @@ class LinkifyFilter(html5lib_shim.Filter):
new_stream = self.handle_links(new_stream)
- for token in new_stream:
- yield token
+ for new_token in new_stream:
+ yield from self.extract_entities(new_token)
# We've already yielded this token, so continue
continue
|
mozilla/bleach
|
459d370ce6e27e5e82e72bfec92f95976df9a02f
|
diff --git a/tests/test_linkify.py b/tests/test_linkify.py
index c05bb30..a6d96e2 100644
--- a/tests/test_linkify.py
+++ b/tests/test_linkify.py
@@ -324,13 +324,17 @@ def test_link_fragment():
)
-def test_link_entities():
+def test_link_entities_in_qs():
assert (
linkify("http://xx.com/?a=1&b=2")
== '<a href="http://xx.com/?a=1&b=2" rel="nofollow">http://xx.com/?a=1&b=2</a>'
)
+def test_link_entities_in_characters_token():
+ assert linkify("foo bar") == "foo bar"
+
+
def test_escaped_html():
"""If I pass in escaped HTML, it should probably come out escaped."""
s = "<em>strong</em>"
|
Bug relating to nbsp?
This is with bleach 3.1:
```
>>> import bleach
>>> bleach.clean(u'foo. bar.')
'foo. bar.'
>>> bleach.linkify(u'foo. bar.')
'foo.\xa0 bar.'
```
Odd that `linkify` would convert ` ` to `\xa0`. Maybe related to #143?
Seems like a bug but I'd expect that someone else would have come across this already.
|
0.0
|
459d370ce6e27e5e82e72bfec92f95976df9a02f
|
[
"tests/test_linkify.py::test_link_entities_in_characters_token"
] |
[
"tests/test_linkify.py::test_empty",
"tests/test_linkify.py::test_simple_link",
"tests/test_linkify.py::test_trailing_slash",
"tests/test_linkify.py::test_mangle_link",
"tests/test_linkify.py::test_mangle_text",
"tests/test_linkify.py::test_email_link[a",
"tests/test_linkify.py::test_email_link[aussie",
"tests/test_linkify.py::test_email_link[email",
"tests/test_linkify.py::test_email_link[<br>[email protected]<br><a",
"tests/test_linkify.py::test_email_link[mailto",
"tests/test_linkify.py::test_email_link[\"\\\\\\n\"@opa.ru-True-\"\\\\\\n\"@opa.ru]",
"tests/test_linkify.py::test_email_link[gorby%[email protected]<a",
"tests/test_linkify.py::test_email_link[[email protected]<a",
"tests/test_linkify.py::test_email_link_escaping[\"james\"@example.com-<a",
"tests/test_linkify.py::test_email_link_escaping[\"j'ames\"@example.com-<a",
"tests/test_linkify.py::test_email_link_escaping[\"ja>mes\"@example.com-<a",
"tests/test_linkify.py::test_prevent_links[callback0-a",
"tests/test_linkify.py::test_prevent_links[callback1-a",
"tests/test_linkify.py::test_prevent_links[callback2-a",
"tests/test_linkify.py::test_prevent_links[callback3-a",
"tests/test_linkify.py::test_prevent_links[callback4-a",
"tests/test_linkify.py::test_prevent_links[callback5-a",
"tests/test_linkify.py::test_set_attrs",
"tests/test_linkify.py::test_only_proto_links",
"tests/test_linkify.py::test_stop_email",
"tests/test_linkify.py::test_tlds[example.com-<a",
"tests/test_linkify.py::test_tlds[example.co-<a",
"tests/test_linkify.py::test_tlds[example.co.uk-<a",
"tests/test_linkify.py::test_tlds[example.edu-<a",
"tests/test_linkify.py::test_tlds[example.xxx-<a",
"tests/test_linkify.py::test_tlds[bit.ly/fun-<a",
"tests/test_linkify.py::test_tlds[example.yyy-example.yyy]",
"tests/test_linkify.py::test_tlds[brie-brie]",
"tests/test_linkify.py::test_escaping[<",
"tests/test_linkify.py::test_escaping[<U",
"tests/test_linkify.py::test_nofollow_off",
"tests/test_linkify.py::test_link_in_html",
"tests/test_linkify.py::test_links_https",
"tests/test_linkify.py::test_add_rel_nofollow",
"tests/test_linkify.py::test_url_with_path",
"tests/test_linkify.py::test_link_ftp",
"tests/test_linkify.py::test_link_query",
"tests/test_linkify.py::test_link_fragment",
"tests/test_linkify.py::test_link_entities_in_qs",
"tests/test_linkify.py::test_escaped_html",
"tests/test_linkify.py::test_link_http_complete",
"tests/test_linkify.py::test_non_url",
"tests/test_linkify.py::test_javascript_url",
"tests/test_linkify.py::test_unsafe_url",
"tests/test_linkify.py::test_skip_tags",
"tests/test_linkify.py::test_libgl",
"tests/test_linkify.py::test_end_of_sentence[example.com-.]",
"tests/test_linkify.py::test_end_of_sentence[example.com-...]",
"tests/test_linkify.py::test_end_of_sentence[ex.com/foo-.]",
"tests/test_linkify.py::test_end_of_sentence[ex.com/foo-....]",
"tests/test_linkify.py::test_end_of_clause",
"tests/test_linkify.py::test_sarcasm",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com)-expected_parts0]",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com/)-expected_parts1]",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com/foo)-expected_parts2]",
"tests/test_linkify.py::test_wrapping_parentheses[(((example.com/))))-expected_parts3]",
"tests/test_linkify.py::test_wrapping_parentheses[example.com/))-expected_parts4]",
"tests/test_linkify.py::test_wrapping_parentheses[(foo",
"tests/test_linkify.py::test_wrapping_parentheses[http://en.wikipedia.org/wiki/Test_(assessment)-expected_parts7]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/Test_(assessment))-expected_parts8]",
"tests/test_linkify.py::test_wrapping_parentheses[((http://en.wikipedia.org/wiki/Test_(assessment))-expected_parts9]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/Test_(assessment)))-expected_parts10]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/)Test_(assessment-expected_parts11]",
"tests/test_linkify.py::test_wrapping_parentheses[hello",
"tests/test_linkify.py::test_parentheses_with_removing",
"tests/test_linkify.py::test_ports[http://foo.com:8000-http://foo.com:8000-]",
"tests/test_linkify.py::test_ports[http://foo.com:8000/-http://foo.com:8000/-]",
"tests/test_linkify.py::test_ports[http://bar.com:xkcd-http://bar.com-:xkcd]",
"tests/test_linkify.py::test_ports[http://foo.com:81/bar-http://foo.com:81/bar-]",
"tests/test_linkify.py::test_ports[http://foo.com:-http://foo.com-:]",
"tests/test_linkify.py::test_ports[http://foo.com:\\u0663\\u0669/-http://foo.com-:\\u0663\\u0669/]",
"tests/test_linkify.py::test_ports[http://foo.com:\\U0001d7e0\\U0001d7d8/-http://foo.com-:\\U0001d7e0\\U0001d7d8/]",
"tests/test_linkify.py::test_ignore_bad_protocols",
"tests/test_linkify.py::test_link_emails_and_urls",
"tests/test_linkify.py::test_links_case_insensitive",
"tests/test_linkify.py::test_elements_inside_links",
"tests/test_linkify.py::test_drop_link_tags",
"tests/test_linkify.py::test_naughty_unescaping[<br>-<br>]",
"tests/test_linkify.py::test_naughty_unescaping[<br>",
"tests/test_linkify.py::test_hang",
"tests/test_linkify.py::test_hyphen_in_mail",
"tests/test_linkify.py::test_url_re_arg",
"tests/test_linkify.py::test_email_re_arg",
"tests/test_linkify.py::test_recognized_tags_arg",
"tests/test_linkify.py::test_linkify_idempotent[<span>text",
"tests/test_linkify.py::test_linkify_idempotent[a",
"tests/test_linkify.py::test_linkify_idempotent[link",
"tests/test_linkify.py::test_linkify_idempotent[text<em>]",
"tests/test_linkify.py::test_linkify_idempotent[jim",
"tests/test_linkify.py::test_linkify_idempotent[<a",
"tests/test_linkify.py::TestLinkify::test_no_href_links",
"tests/test_linkify.py::TestLinkify::test_rel_already_there",
"tests/test_linkify.py::TestLinkify::test_only_text_is_linkified",
"tests/test_linkify.py::test_linkify_filter[abc-abc]",
"tests/test_linkify.py::test_linkify_filter[example.com-<a",
"tests/test_linkify.py::test_linkify_filter[http://example.com?b=1&c=2-<a",
"tests/test_linkify.py::test_linkify_filter[http://example.com?b=1&c=2-<a",
"tests/test_linkify.py::test_linkify_filter[link:"
] |
{
"failed_lite_validators": [
"has_issue_reference"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-01-11 18:42:44+00:00
|
apache-2.0
| 4,048 |
|
mozilla__bleach-720
|
diff --git a/bleach/linkifier.py b/bleach/linkifier.py
index 679d7ea..54a432e 100644
--- a/bleach/linkifier.py
+++ b/bleach/linkifier.py
@@ -591,7 +591,7 @@ class LinkifyFilter(html5lib_shim.Filter):
in_a = False
token_buffer = []
else:
- token_buffer.append(token)
+ token_buffer.extend(list(self.extract_entities(token)))
continue
if token["type"] in ["StartTag", "EmptyTag"]:
|
mozilla/bleach
|
6f0aaaaa98ff355a1207e7c1c77d3eb0c063a9b1
|
diff --git a/tests/test_linkify.py b/tests/test_linkify.py
index bd83813..4cc01a6 100644
--- a/tests/test_linkify.py
+++ b/tests/test_linkify.py
@@ -323,6 +323,7 @@ def test_link_fragment():
def test_link_entities_in_qs():
+ """Entities in the querystring get escaped"""
assert (
linkify("http://xx.com/?a=1&b=2")
== '<a href="http://xx.com/?a=1&b=2" rel="nofollow">http://xx.com/?a=1&b=2</a>'
@@ -330,9 +331,18 @@ def test_link_entities_in_qs():
def test_link_entities_in_characters_token():
+ """Entitites in a Characters token don't get escaped"""
assert linkify("foo bar") == "foo bar"
+def test_link_entities_in_a_tag():
+ """Entitites between an a start tag and an a end tag don't get escaped"""
+ assert (
+ linkify('<a href="/">Some entity’s</a>', callbacks=[])
+ == '<a href="/">Some entity’s</a>'
+ )
+
+
def test_escaped_html():
"""If I pass in escaped HTML, it should probably come out escaped."""
s = "<em>strong</em>"
|
bug: linkify with entities inside anchor strings are incorrectly escaped
**Describe the bug**
linkify on a string with entities inside anchor element text results in the `&` character of the entity being incorrect escaped to `&`
e.g. ` ` -> `&nbsp;`
** python and bleach versions (please complete the following information):**
- Python Version: 3.9.5
- Bleach Version: 6.0.0
**To Reproduce**
A simple test to reproduce the behavior:
```python
>>> from bleach import linkify
text = r'<p><a href="/">Some entity’s</a>More entity’s</p>'
expected = r'<p><a href="/" rel="nofollow">Some entity’s</a>More entity’s</p>'
assert linkify(text) == expected
```
**Expected behavior**
```python
linkify(r'<a href="/">Some entity’s</a>')
'<a href="/" rel="nofollow">Some entity’s</a>'
```
**Actual behavior**
```python
linkify(r'<a href="/">Some entity’s</a>')
'<a href="/" rel="nofollow">Some&nbsp;entity&rsquo;s</a>'
```
**Additional context**
This bug was introduced in 6.0.0 with the fix for #501 and #692: https://github.com/mozilla/bleach/pull/692
|
0.0
|
6f0aaaaa98ff355a1207e7c1c77d3eb0c063a9b1
|
[
"tests/test_linkify.py::test_link_entities_in_a_tag"
] |
[
"tests/test_linkify.py::test_empty",
"tests/test_linkify.py::test_simple_link",
"tests/test_linkify.py::test_trailing_slash",
"tests/test_linkify.py::test_mangle_link",
"tests/test_linkify.py::test_mangle_text",
"tests/test_linkify.py::test_email_link[a",
"tests/test_linkify.py::test_email_link[aussie",
"tests/test_linkify.py::test_email_link[email",
"tests/test_linkify.py::test_email_link[<br>[email protected]<br><a",
"tests/test_linkify.py::test_email_link[mailto",
"tests/test_linkify.py::test_email_link[\"\\\\\\n\"@opa.ru-True-\"\\\\\\n\"@opa.ru]",
"tests/test_linkify.py::test_email_link[gorby%[email protected]<a",
"tests/test_linkify.py::test_email_link[[email protected]<a",
"tests/test_linkify.py::test_email_link_escaping[\"james\"@example.com-<a",
"tests/test_linkify.py::test_email_link_escaping[\"j'ames\"@example.com-<a",
"tests/test_linkify.py::test_email_link_escaping[\"ja>mes\"@example.com-<a",
"tests/test_linkify.py::test_prevent_links[callback0-a",
"tests/test_linkify.py::test_prevent_links[callback1-a",
"tests/test_linkify.py::test_prevent_links[callback2-a",
"tests/test_linkify.py::test_prevent_links[callback3-a",
"tests/test_linkify.py::test_prevent_links[callback4-a",
"tests/test_linkify.py::test_prevent_links[callback5-a",
"tests/test_linkify.py::test_set_attrs",
"tests/test_linkify.py::test_only_proto_links",
"tests/test_linkify.py::test_stop_email",
"tests/test_linkify.py::test_tlds[example.com-<a",
"tests/test_linkify.py::test_tlds[example.co-<a",
"tests/test_linkify.py::test_tlds[example.co.uk-<a",
"tests/test_linkify.py::test_tlds[example.edu-<a",
"tests/test_linkify.py::test_tlds[example.xxx-<a",
"tests/test_linkify.py::test_tlds[bit.ly/fun-<a",
"tests/test_linkify.py::test_tlds[example.yyy-example.yyy]",
"tests/test_linkify.py::test_tlds[brie-brie]",
"tests/test_linkify.py::test_escaping[<",
"tests/test_linkify.py::test_escaping[<U",
"tests/test_linkify.py::test_nofollow_off",
"tests/test_linkify.py::test_link_in_html",
"tests/test_linkify.py::test_links_https",
"tests/test_linkify.py::test_add_rel_nofollow",
"tests/test_linkify.py::test_url_with_path",
"tests/test_linkify.py::test_link_ftp",
"tests/test_linkify.py::test_link_query",
"tests/test_linkify.py::test_link_fragment",
"tests/test_linkify.py::test_link_entities_in_qs",
"tests/test_linkify.py::test_link_entities_in_characters_token",
"tests/test_linkify.py::test_escaped_html",
"tests/test_linkify.py::test_link_http_complete",
"tests/test_linkify.py::test_non_url",
"tests/test_linkify.py::test_javascript_url",
"tests/test_linkify.py::test_unsafe_url",
"tests/test_linkify.py::test_skip_tags",
"tests/test_linkify.py::test_libgl",
"tests/test_linkify.py::test_end_of_sentence[example.com-.]",
"tests/test_linkify.py::test_end_of_sentence[example.com-...]",
"tests/test_linkify.py::test_end_of_sentence[ex.com/foo-.]",
"tests/test_linkify.py::test_end_of_sentence[ex.com/foo-....]",
"tests/test_linkify.py::test_end_of_clause",
"tests/test_linkify.py::test_sarcasm",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com)-expected_parts0]",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com/)-expected_parts1]",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com/foo)-expected_parts2]",
"tests/test_linkify.py::test_wrapping_parentheses[(((example.com/))))-expected_parts3]",
"tests/test_linkify.py::test_wrapping_parentheses[example.com/))-expected_parts4]",
"tests/test_linkify.py::test_wrapping_parentheses[(foo",
"tests/test_linkify.py::test_wrapping_parentheses[http://en.wikipedia.org/wiki/Test_(assessment)-expected_parts7]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/Test_(assessment))-expected_parts8]",
"tests/test_linkify.py::test_wrapping_parentheses[((http://en.wikipedia.org/wiki/Test_(assessment))-expected_parts9]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/Test_(assessment)))-expected_parts10]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/)Test_(assessment-expected_parts11]",
"tests/test_linkify.py::test_wrapping_parentheses[hello",
"tests/test_linkify.py::test_parentheses_with_removing",
"tests/test_linkify.py::test_ports[http://foo.com:8000-http://foo.com:8000-]",
"tests/test_linkify.py::test_ports[http://foo.com:8000/-http://foo.com:8000/-]",
"tests/test_linkify.py::test_ports[http://bar.com:xkcd-http://bar.com-:xkcd]",
"tests/test_linkify.py::test_ports[http://foo.com:81/bar-http://foo.com:81/bar-]",
"tests/test_linkify.py::test_ports[http://foo.com:-http://foo.com-:]",
"tests/test_linkify.py::test_ports[http://foo.com:\\u0663\\u0669/-http://foo.com-:\\u0663\\u0669/]",
"tests/test_linkify.py::test_ports[http://foo.com:\\U0001d7e0\\U0001d7d8/-http://foo.com-:\\U0001d7e0\\U0001d7d8/]",
"tests/test_linkify.py::test_ignore_bad_protocols",
"tests/test_linkify.py::test_link_emails_and_urls",
"tests/test_linkify.py::test_links_case_insensitive",
"tests/test_linkify.py::test_elements_inside_links",
"tests/test_linkify.py::test_drop_link_tags",
"tests/test_linkify.py::test_naughty_unescaping[<br>-<br>]",
"tests/test_linkify.py::test_naughty_unescaping[<br>",
"tests/test_linkify.py::test_hang",
"tests/test_linkify.py::test_hyphen_in_mail",
"tests/test_linkify.py::test_url_re_arg",
"tests/test_linkify.py::test_email_re_arg",
"tests/test_linkify.py::test_recognized_tags_arg",
"tests/test_linkify.py::test_linkify_idempotent[<span>text",
"tests/test_linkify.py::test_linkify_idempotent[a",
"tests/test_linkify.py::test_linkify_idempotent[link",
"tests/test_linkify.py::test_linkify_idempotent[text<em>]",
"tests/test_linkify.py::test_linkify_idempotent[jim",
"tests/test_linkify.py::test_linkify_idempotent[<a",
"tests/test_linkify.py::TestLinkify::test_no_href_links",
"tests/test_linkify.py::TestLinkify::test_rel_already_there",
"tests/test_linkify.py::TestLinkify::test_only_text_is_linkified",
"tests/test_linkify.py::test_linkify_filter[abc-abc]",
"tests/test_linkify.py::test_linkify_filter[example.com-<a",
"tests/test_linkify.py::test_linkify_filter[http://example.com?b=1&c=2-<a",
"tests/test_linkify.py::test_linkify_filter[http://example.com?b=1&c=2-<a",
"tests/test_linkify.py::test_linkify_filter[link:"
] |
{
"failed_lite_validators": [
"has_issue_reference"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-10-06 17:09:48+00:00
|
apache-2.0
| 4,049 |
|
mozilla__bleach-721
|
diff --git a/bleach/html5lib_shim.py b/bleach/html5lib_shim.py
index aa5189b..ca1cc8c 100644
--- a/bleach/html5lib_shim.py
+++ b/bleach/html5lib_shim.py
@@ -395,10 +395,17 @@ class BleachHTMLTokenizer(HTMLTokenizer):
# followed by a series of characters. It's treated as a tag
# name that abruptly ends, but we should treat that like
# character data
- yield {
- "type": TAG_TOKEN_TYPE_CHARACTERS,
- "data": "<" + self.currentToken["name"],
- }
+ yield {"type": TAG_TOKEN_TYPE_CHARACTERS, "data": self.stream.get_tag()}
+ elif last_error_token["data"] in (
+ "eof-in-attribute-name",
+ "eof-in-attribute-value-no-quotes",
+ ):
+ # Handle the case where the text being parsed ends with <
+ # followed by a series of characters and then space and then
+ # more characters. It's treated as a tag name followed by an
+ # attribute that abruptly ends, but we should treat that like
+ # character data.
+ yield {"type": TAG_TOKEN_TYPE_CHARACTERS, "data": self.stream.get_tag()}
else:
yield last_error_token
|
mozilla/bleach
|
b56aa7c8052fed01774c6ef8bac947b6dea2f551
|
diff --git a/tests/test_clean.py b/tests/test_clean.py
index 73946a1..dc129d0 100644
--- a/tests/test_clean.py
+++ b/tests/test_clean.py
@@ -163,6 +163,10 @@ def test_bare_entities_get_escaped_correctly(text, expected):
("<y", "<y"),
("x < y", "x < y"),
("<y>", "<y>"),
+ # this is an eof-in-attribute-name parser error
+ ("<some thing", "<some thing"),
+ # this is an eof-in-attribute-value-no-quotes parser error
+ ("<some thing=foo", "<some thing=foo"),
],
)
def test_lessthan_escaping(text, expected):
|
Open angle bracket '<' with few words after cleaned up if there's no closing bracket
**Describe the bug**
After https://github.com/mozilla/bleach/issues/544 being fixed it seems the issue is still persist. But it reproducible in another way
- Python Version: 3.8.13
- Bleach Version: 6.0.0
**To Reproduce**
Steps to reproduce the behavior:
```python
# Fixed!
In [2]: bleach.clean("<random")
Out[2]: '<random'
# Fixed!
In [3]: bleach.clean("random<text")
Out[3]: 'random<text'
# Problem!
In [4]: bleach.clean("<random text")
Out[4]: ''
```
**Expected behavior**
```python
In [4]: bleach.clean("<random text")
Out[4]: '<random text'
```
**Additional context**
Previously it was fixed by https://github.com/mozilla/bleach/pull/667, so that `<` without `>` considered as `eof-in-tag-name`, but in the case above it's considered as EOF in the attribute name -- `'eof-in-attribute-name'`:
```python
392 if last_error_token:
393 B-> if last_error_token["data"] == "eof-in-tag-name":
394 # Handle the case where the text being parsed ends with <
395 # followed by a series of characters. It's treated as a tag
396 # name that abruptly ends, but we should treat that like
397 # character data
398 yield {
(Pdb)
399 "type": TAG_TOKEN_TYPE_CHARACTERS,
400 "data": "<" + self.currentToken["name"],
401 }
402 else:
403 yield last_error_token
404
405 def consumeEntity(self, allowedChar=None, fromAttribute=False):
406 # If this tokenizer is set to consume entities, then we can let the
407 # superclass do its thing.
408 if self.consume_entities:
409 return super().consumeEntity(allowedChar, fromAttribute)
(Pdb) last_error_token
{'type': 7, 'data': 'eof-in-attribute-name'}
```
|
0.0
|
b56aa7c8052fed01774c6ef8bac947b6dea2f551
|
[
"tests/test_clean.py::test_lessthan_escaping[<some"
] |
[
"tests/test_clean.py::test_clean_idempotent[a",
"tests/test_clean.py::test_clean_idempotent[link",
"tests/test_clean.py::test_clean_idempotent[text<em>]",
"tests/test_clean.py::test_clean_idempotent[<span>text",
"tests/test_clean.py::test_clean_idempotent[jim",
"tests/test_clean.py::test_clean_idempotent[& ",
"tests/test_clean.py::test_clean_idempotent[<a",
"tests/test_clean.py::test_clean_idempotent_img",
"tests/test_clean.py::test_only_text_is_cleaned",
"tests/test_clean.py::test_empty",
"tests/test_clean.py::test_content_has_no_html",
"tests/test_clean.py::test_content_has_allowed_html[an",
"tests/test_clean.py::test_content_has_allowed_html[another",
"tests/test_clean.py::test_html_is_lowercased",
"tests/test_clean.py::test_comments[<!--",
"tests/test_clean.py::test_comments[<!--open",
"tests/test_clean.py::test_comments[<!--comment-->text-True-text]",
"tests/test_clean.py::test_comments[<!--comment-->text-False-<!--comment-->text]",
"tests/test_clean.py::test_comments[text<!--",
"tests/test_clean.py::test_comments[text<!--comment-->-True-text]",
"tests/test_clean.py::test_comments[text<!--comment-->-False-text<!--comment-->]",
"tests/test_clean.py::test_invalid_char_in_tag",
"tests/test_clean.py::test_unclosed_tag",
"tests/test_clean.py::test_nested_script_tag",
"tests/test_clean.py::test_bare_entities_get_escaped_correctly[an",
"tests/test_clean.py::test_bare_entities_get_escaped_correctly[tag",
"tests/test_clean.py::test_lessthan_escaping[x<y-x<y]",
"tests/test_clean.py::test_lessthan_escaping[<y-<y]",
"tests/test_clean.py::test_lessthan_escaping[x",
"tests/test_clean.py::test_lessthan_escaping[<y>-<y>]",
"tests/test_clean.py::test_character_entities_handling[&-&]",
"tests/test_clean.py::test_character_entities_handling[ - ]",
"tests/test_clean.py::test_character_entities_handling[ ",
"tests/test_clean.py::test_character_entities_handling[<em>strong</em>-<em>strong</em>]",
"tests/test_clean.py::test_character_entities_handling[&is",
"tests/test_clean.py::test_character_entities_handling[cool",
"tests/test_clean.py::test_character_entities_handling[&&",
"tests/test_clean.py::test_character_entities_handling[&",
"tests/test_clean.py::test_character_entities_handling[this",
"tests/test_clean.py::test_character_entities_handling[http://example.com?active=true¤t=true-http://example.com?active=true&current=true]",
"tests/test_clean.py::test_character_entities_handling[<a",
"tests/test_clean.py::test_character_entities_handling[&xx;-&xx;]",
"tests/test_clean.py::test_character_entities_handling[&adp;-&adp;]",
"tests/test_clean.py::test_character_entities_handling[&currdupe;-&currdupe;]",
"tests/test_clean.py::test_character_entities_handling['-']",
"tests/test_clean.py::test_character_entities_handling["-"]",
"tests/test_clean.py::test_character_entities_handling[{-{]",
"tests/test_clean.py::test_character_entities_handling[{-{]",
"tests/test_clean.py::test_character_entities_handling[{-{]",
"tests/test_clean.py::test_character_entities_handling[&#-&#]",
"tests/test_clean.py::test_character_entities_handling[&#<-&#<]",
"tests/test_clean.py::test_character_entities_handling['"-'"]",
"tests/test_clean.py::test_stripping_tags[a",
"tests/test_clean.py::test_stripping_tags[<p><a",
"tests/test_clean.py::test_stripping_tags[<p><span>multiply",
"tests/test_clean.py::test_stripping_tags[<ul><li><script></li></ul>-kwargs4-<ul><li></li></ul>]",
"tests/test_clean.py::test_stripping_tags[<isindex>-kwargs6-]",
"tests/test_clean.py::test_stripping_tags[Yeah",
"tests/test_clean.py::test_stripping_tags[<sarcasm>-kwargs8-]",
"tests/test_clean.py::test_stripping_tags[</sarcasm>-kwargs9-]",
"tests/test_clean.py::test_stripping_tags[</",
"tests/test_clean.py::test_stripping_tags[Foo",
"tests/test_clean.py::test_stripping_tags[Favorite",
"tests/test_clean.py::test_stripping_tags[</3-kwargs14-</3]",
"tests/test_clean.py::test_escaping_tags[<img",
"tests/test_clean.py::test_escaping_tags[<script>safe()</script>-<script>safe()</script>]",
"tests/test_clean.py::test_escaping_tags[<style>body{}</style>-<style>body{}</style>]",
"tests/test_clean.py::test_escaping_tags[<ul><li><script></li></ul>-<ul><li><script></li></ul>]",
"tests/test_clean.py::test_escaping_tags[<isindex>-<isindex>]",
"tests/test_clean.py::test_escaping_tags[<sarcasm/>-<sarcasm/>]",
"tests/test_clean.py::test_escaping_tags[<sarcasm>-<sarcasm>]",
"tests/test_clean.py::test_escaping_tags[</sarcasm>-</sarcasm>]",
"tests/test_clean.py::test_escaping_tags[</",
"tests/test_clean.py::test_escaping_tags[</3-</3]",
"tests/test_clean.py::test_escaping_tags[<[email protected]>-<[email protected]>]",
"tests/test_clean.py::test_escaping_tags[Favorite",
"tests/test_clean.py::test_stripping_tags_is_safe[<scri<script>pt>alert(1)</scr</script>ipt>-pt>alert(1)ipt>]",
"tests/test_clean.py::test_stripping_tags_is_safe[<scri<scri<script>pt>pt>alert(1)</script>-pt>pt>alert(1)]",
"tests/test_clean.py::test_href_with_wrong_tag",
"tests/test_clean.py::test_disallowed_attr",
"tests/test_clean.py::test_unquoted_attr_values_are_quoted",
"tests/test_clean.py::test_unquoted_event_handler_attr_value",
"tests/test_clean.py::test_invalid_filter_attr",
"tests/test_clean.py::test_poster_attribute",
"tests/test_clean.py::test_attributes_callable",
"tests/test_clean.py::test_attributes_wildcard",
"tests/test_clean.py::test_attributes_wildcard_callable",
"tests/test_clean.py::test_attributes_tag_callable",
"tests/test_clean.py::test_attributes_tag_list",
"tests/test_clean.py::test_attributes_list",
"tests/test_clean.py::test_uri_value_allowed_protocols[<a",
"tests/test_clean.py::test_svg_attr_val_allows_ref",
"tests/test_clean.py::test_svg_allow_local_href[<svg><pattern",
"tests/test_clean.py::test_svg_allow_local_href_nonlocal[<svg><pattern",
"tests/test_clean.py::test_invisible_characters[1\\x0723-1?23]",
"tests/test_clean.py::test_invisible_characters[1\\x0823-1?23]",
"tests/test_clean.py::test_invisible_characters[1\\x0b23-1?23]",
"tests/test_clean.py::test_invisible_characters[1\\x0c23-1?23]",
"tests/test_clean.py::test_invisible_characters[import",
"tests/test_clean.py::test_nonexistent_namespace",
"tests/test_clean.py::test_self_closing_tags_self_close[area]",
"tests/test_clean.py::test_self_closing_tags_self_close[base]",
"tests/test_clean.py::test_self_closing_tags_self_close[br]",
"tests/test_clean.py::test_self_closing_tags_self_close[embed]",
"tests/test_clean.py::test_self_closing_tags_self_close[hr]",
"tests/test_clean.py::test_self_closing_tags_self_close[img]",
"tests/test_clean.py::test_self_closing_tags_self_close[input]",
"tests/test_clean.py::test_self_closing_tags_self_close[link]",
"tests/test_clean.py::test_self_closing_tags_self_close[meta]",
"tests/test_clean.py::test_self_closing_tags_self_close[param]",
"tests/test_clean.py::test_self_closing_tags_self_close[source]",
"tests/test_clean.py::test_self_closing_tags_self_close[track]",
"tests/test_clean.py::test_noscript_rawtag_[title-<noscript><title></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[textarea-<noscript><textarea></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[script-<noscript><script></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[style-<noscript><style></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[noembed-<noscript><noembed></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[noframes-<noscript><noframes></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[iframe-<noscript><iframe></noscript><img",
"tests/test_clean.py::test_noscript_rawtag_[xmp-<noscript><xmp></noscript><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-style-<math><style><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-iframe-<math><iframe><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-xmp-<math><xmp><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-noembed-<math><noembed><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-script-<math><script><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-noscript-<math><noscript><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[math-noframes-<math><noframes><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-style-<svg><style><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-iframe-<svg><iframe><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-xmp-<svg><xmp><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-noembed-<svg><noembed><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-script-<svg><script><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-noscript-<svg><noscript><img",
"tests/test_clean.py::test_namespace_rc_data_element_strip_false[svg-noframes-<svg><noframes><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-style-<math></p><style><!--</style><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-style-<math></br><style><!--</style><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-style-<svg></p><style><!--</style><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-style-<svg></br><style><!--</style><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-title-<math></p><title><!--</title><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-title-<math></br><title><!--</title><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-title-<svg></p><title><!--</title><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-title-<svg></br><title><!--</title><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-noscript-<math></p><noscript><!--</noscript><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-noscript-<math></br><noscript><!--</noscript><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-noscript-<svg></p><noscript><!--</noscript><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-noscript-<svg></br><noscript><!--</noscript><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-script-<math></p><script><!--</script><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-script-<math></br><script><!--</script><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-script-<svg></p><script><!--</script><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-script-<svg></br><script><!--</script><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-noembed-<math></p><noembed><!--</noembed><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-noembed-<math></br><noembed><!--</noembed><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-noembed-<svg></p><noembed><!--</noembed><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-noembed-<svg></br><noembed><!--</noembed><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-textarea-<math></p><textarea><!--</textarea><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-textarea-<math></br><textarea><!--</textarea><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-textarea-<svg></p><textarea><!--</textarea><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-textarea-<svg></br><textarea><!--</textarea><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-noframes-<math></p><noframes><!--</noframes><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-noframes-<math></br><noframes><!--</noframes><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-noframes-<svg></p><noframes><!--</noframes><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-noframes-<svg></br><noframes><!--</noframes><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-iframe-<math></p><iframe><!--</iframe><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-iframe-<math></br><iframe><!--</iframe><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-iframe-<svg></p><iframe><!--</iframe><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-iframe-<svg></br><iframe><!--</iframe><img",
"tests/test_clean.py::test_html_comments_escaped[math-p-xmp-<math></p><xmp><!--</xmp><img",
"tests/test_clean.py::test_html_comments_escaped[math-br-xmp-<math></br><xmp><!--</xmp><img",
"tests/test_clean.py::test_html_comments_escaped[svg-p-xmp-<svg></p><xmp><!--</xmp><img",
"tests/test_clean.py::test_html_comments_escaped[svg-br-xmp-<svg></br><xmp><!--</xmp><img",
"tests/test_clean.py::test_strip_respects_block_level_elements[<p>Te<b>st</b>!</p><p>Hello</p>-Test!\\nHello]",
"tests/test_clean.py::test_strip_respects_block_level_elements[<p>This",
"tests/test_clean.py::test_strip_respects_block_level_elements[<div><p>This",
"tests/test_clean.py::test_regressions[1.test]",
"tests/test_clean.py::test_regressions[2.test]",
"tests/test_clean.py::test_regressions[3.test]",
"tests/test_clean.py::test_regressions[4.test]",
"tests/test_clean.py::test_regressions[5.test]",
"tests/test_clean.py::test_regressions[6.test]",
"tests/test_clean.py::test_regressions[7.test]",
"tests/test_clean.py::test_regressions[8.test]",
"tests/test_clean.py::test_regressions[9.test]",
"tests/test_clean.py::test_regressions[10.test]",
"tests/test_clean.py::test_regressions[11.test]",
"tests/test_clean.py::test_regressions[12.test]",
"tests/test_clean.py::test_regressions[13.test]",
"tests/test_clean.py::test_regressions[14.test]",
"tests/test_clean.py::test_regressions[15.test]",
"tests/test_clean.py::test_regressions[16.test]",
"tests/test_clean.py::test_regressions[17.test]",
"tests/test_clean.py::test_regressions[18.test]",
"tests/test_clean.py::test_regressions[19.test]",
"tests/test_clean.py::test_regressions[20.test]",
"tests/test_clean.py::test_preserves_attributes_order",
"tests/test_clean.py::test_css_sanitizer_warning[attr0]",
"tests/test_clean.py::test_css_sanitizer_warning[attr1]",
"tests/test_clean.py::TestCleaner::test_basics",
"tests/test_clean.py::TestCleaner::test_filters"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-10-06 17:42:52+00:00
|
apache-2.0
| 4,050 |
|
mozilla__bleach-722
|
diff --git a/bleach/linkifier.py b/bleach/linkifier.py
index 54a432e..8fcefb2 100644
--- a/bleach/linkifier.py
+++ b/bleach/linkifier.py
@@ -45,8 +45,8 @@ def build_url_re(tlds=TLDS, protocols=html5lib_shim.allowed_protocols):
r"""\(* # Match any opening parentheses.
\b(?<![@.])(?:(?:{0}):/{{0,3}}(?:(?:\w+:)?\w+@)?)? # http://
([\w-]+\.)+(?:{1})(?:\:[0-9]+)?(?!\.\w)\b # xx.yy.tld(:##)?
- (?:[/?][^\s\{{\}}\|\\\^\[\]`<>"]*)?
- # /path/zz (excluding "unsafe" chars from RFC 1738,
+ (?:[/?][^\s\{{\}}\|\\\^`<>"]*)?
+ # /path/zz (excluding "unsafe" chars from RFC 3986,
# except for # and ~, which happen in practice)
""".format(
"|".join(sorted(protocols)), "|".join(sorted(tlds))
|
mozilla/bleach
|
11d8c9bbcba659af99d89785ae0d137ea36d1263
|
diff --git a/tests/test_linkify.py b/tests/test_linkify.py
index 4cc01a6..0920c47 100644
--- a/tests/test_linkify.py
+++ b/tests/test_linkify.py
@@ -300,6 +300,14 @@ def test_link_ftp():
)
+def test_link_with_qs_with_array():
+ """Test that urls pick up [] in querystring"""
+ assert linkify("http://test.com?array[]=1¶ms_in[]=2") == (
+ '<a href="http://test.com?array[]=1&params_in[]=2" '
+ + 'rel="nofollow">http://test.com?array[]=1&params_in[]=2</a>'
+ )
+
+
def test_link_query():
assert (
linkify("http://xx.com/?test=win")
|
Linkify incorrectly parses array arguments
Hi.
Library version up to 3.1.0 incorrectly parses array and object url parameters:
```python
from bleach import DEFAULT_CALLBACKS, Linker
text= 'http://test.com?array[]=1¶ms_in[]=2'
linker = Linker(url_re=linkifier.URL_RE, callbacks=DEFAULT_CALLBACKS, skip_tags=None, parse_email=False)
print(linker.linkify(text))
# prints: <a href="http://test.com?array" rel="nofollow">http://test.com?array</a>[]=1¶ms_in[]=2
```
As you see, url is split by [], loosing part of the link.
|
0.0
|
11d8c9bbcba659af99d89785ae0d137ea36d1263
|
[
"tests/test_linkify.py::test_link_with_qs_with_array"
] |
[
"tests/test_linkify.py::test_empty",
"tests/test_linkify.py::test_simple_link",
"tests/test_linkify.py::test_trailing_slash",
"tests/test_linkify.py::test_mangle_link",
"tests/test_linkify.py::test_mangle_text",
"tests/test_linkify.py::test_email_link[a",
"tests/test_linkify.py::test_email_link[aussie",
"tests/test_linkify.py::test_email_link[email",
"tests/test_linkify.py::test_email_link[<br>[email protected]<br><a",
"tests/test_linkify.py::test_email_link[mailto",
"tests/test_linkify.py::test_email_link[\"\\\\\\n\"@opa.ru-True-\"\\\\\\n\"@opa.ru]",
"tests/test_linkify.py::test_email_link[gorby%[email protected]<a",
"tests/test_linkify.py::test_email_link[[email protected]<a",
"tests/test_linkify.py::test_email_link_escaping[\"james\"@example.com-<a",
"tests/test_linkify.py::test_email_link_escaping[\"j'ames\"@example.com-<a",
"tests/test_linkify.py::test_email_link_escaping[\"ja>mes\"@example.com-<a",
"tests/test_linkify.py::test_prevent_links[callback0-a",
"tests/test_linkify.py::test_prevent_links[callback1-a",
"tests/test_linkify.py::test_prevent_links[callback2-a",
"tests/test_linkify.py::test_prevent_links[callback3-a",
"tests/test_linkify.py::test_prevent_links[callback4-a",
"tests/test_linkify.py::test_prevent_links[callback5-a",
"tests/test_linkify.py::test_set_attrs",
"tests/test_linkify.py::test_only_proto_links",
"tests/test_linkify.py::test_stop_email",
"tests/test_linkify.py::test_tlds[example.com-<a",
"tests/test_linkify.py::test_tlds[example.co-<a",
"tests/test_linkify.py::test_tlds[example.co.uk-<a",
"tests/test_linkify.py::test_tlds[example.edu-<a",
"tests/test_linkify.py::test_tlds[example.xxx-<a",
"tests/test_linkify.py::test_tlds[bit.ly/fun-<a",
"tests/test_linkify.py::test_tlds[example.yyy-example.yyy]",
"tests/test_linkify.py::test_tlds[brie-brie]",
"tests/test_linkify.py::test_escaping[<",
"tests/test_linkify.py::test_escaping[<U",
"tests/test_linkify.py::test_nofollow_off",
"tests/test_linkify.py::test_link_in_html",
"tests/test_linkify.py::test_links_https",
"tests/test_linkify.py::test_add_rel_nofollow",
"tests/test_linkify.py::test_url_with_path",
"tests/test_linkify.py::test_link_ftp",
"tests/test_linkify.py::test_link_query",
"tests/test_linkify.py::test_link_fragment",
"tests/test_linkify.py::test_link_entities_in_qs",
"tests/test_linkify.py::test_link_entities_in_characters_token",
"tests/test_linkify.py::test_link_entities_in_a_tag",
"tests/test_linkify.py::test_escaped_html",
"tests/test_linkify.py::test_link_http_complete",
"tests/test_linkify.py::test_non_url",
"tests/test_linkify.py::test_javascript_url",
"tests/test_linkify.py::test_unsafe_url",
"tests/test_linkify.py::test_skip_tags",
"tests/test_linkify.py::test_libgl",
"tests/test_linkify.py::test_end_of_sentence[example.com-.]",
"tests/test_linkify.py::test_end_of_sentence[example.com-...]",
"tests/test_linkify.py::test_end_of_sentence[ex.com/foo-.]",
"tests/test_linkify.py::test_end_of_sentence[ex.com/foo-....]",
"tests/test_linkify.py::test_end_of_clause",
"tests/test_linkify.py::test_sarcasm",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com)-expected_parts0]",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com/)-expected_parts1]",
"tests/test_linkify.py::test_wrapping_parentheses[(example.com/foo)-expected_parts2]",
"tests/test_linkify.py::test_wrapping_parentheses[(((example.com/))))-expected_parts3]",
"tests/test_linkify.py::test_wrapping_parentheses[example.com/))-expected_parts4]",
"tests/test_linkify.py::test_wrapping_parentheses[(foo",
"tests/test_linkify.py::test_wrapping_parentheses[http://en.wikipedia.org/wiki/Test_(assessment)-expected_parts7]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/Test_(assessment))-expected_parts8]",
"tests/test_linkify.py::test_wrapping_parentheses[((http://en.wikipedia.org/wiki/Test_(assessment))-expected_parts9]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/Test_(assessment)))-expected_parts10]",
"tests/test_linkify.py::test_wrapping_parentheses[(http://en.wikipedia.org/wiki/)Test_(assessment-expected_parts11]",
"tests/test_linkify.py::test_wrapping_parentheses[hello",
"tests/test_linkify.py::test_parentheses_with_removing",
"tests/test_linkify.py::test_ports[http://foo.com:8000-http://foo.com:8000-]",
"tests/test_linkify.py::test_ports[http://foo.com:8000/-http://foo.com:8000/-]",
"tests/test_linkify.py::test_ports[http://bar.com:xkcd-http://bar.com-:xkcd]",
"tests/test_linkify.py::test_ports[http://foo.com:81/bar-http://foo.com:81/bar-]",
"tests/test_linkify.py::test_ports[http://foo.com:-http://foo.com-:]",
"tests/test_linkify.py::test_ports[http://foo.com:\\u0663\\u0669/-http://foo.com-:\\u0663\\u0669/]",
"tests/test_linkify.py::test_ports[http://foo.com:\\U0001d7e0\\U0001d7d8/-http://foo.com-:\\U0001d7e0\\U0001d7d8/]",
"tests/test_linkify.py::test_ignore_bad_protocols",
"tests/test_linkify.py::test_link_emails_and_urls",
"tests/test_linkify.py::test_links_case_insensitive",
"tests/test_linkify.py::test_elements_inside_links",
"tests/test_linkify.py::test_drop_link_tags",
"tests/test_linkify.py::test_naughty_unescaping[<br>-<br>]",
"tests/test_linkify.py::test_naughty_unescaping[<br>",
"tests/test_linkify.py::test_hang",
"tests/test_linkify.py::test_hyphen_in_mail",
"tests/test_linkify.py::test_url_re_arg",
"tests/test_linkify.py::test_email_re_arg",
"tests/test_linkify.py::test_recognized_tags_arg",
"tests/test_linkify.py::test_linkify_idempotent[<span>text",
"tests/test_linkify.py::test_linkify_idempotent[a",
"tests/test_linkify.py::test_linkify_idempotent[link",
"tests/test_linkify.py::test_linkify_idempotent[text<em>]",
"tests/test_linkify.py::test_linkify_idempotent[jim",
"tests/test_linkify.py::test_linkify_idempotent[<a",
"tests/test_linkify.py::TestLinkify::test_no_href_links",
"tests/test_linkify.py::TestLinkify::test_rel_already_there",
"tests/test_linkify.py::TestLinkify::test_only_text_is_linkified",
"tests/test_linkify.py::test_linkify_filter[abc-abc]",
"tests/test_linkify.py::test_linkify_filter[example.com-<a",
"tests/test_linkify.py::test_linkify_filter[http://example.com?b=1&c=2-<a",
"tests/test_linkify.py::test_linkify_filter[http://example.com?b=1&c=2-<a",
"tests/test_linkify.py::test_linkify_filter[link:"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-10-06 18:08:42+00:00
|
apache-2.0
| 4,051 |
|
mozman__ezdxf-246
|
diff --git a/src/ezdxf/addons/drawing/matplotlib.py b/src/ezdxf/addons/drawing/matplotlib.py
index 9337dc888..881e8bac5 100644
--- a/src/ezdxf/addons/drawing/matplotlib.py
+++ b/src/ezdxf/addons/drawing/matplotlib.py
@@ -244,11 +244,12 @@ class TextRenderer:
upper_x = self.get_text_path('X').vertices[:, 1].tolist()
lower_x = self.get_text_path('x').vertices[:, 1].tolist()
lower_p = self.get_text_path('p').vertices[:, 1].tolist()
+ baseline = min(lower_x)
return FontMeasurements(
- baseline=min(lower_x),
- cap_top=max(upper_x),
- x_top=max(lower_x),
- bottom=min(lower_p)
+ baseline=baseline,
+ cap_height=max(upper_x) - baseline,
+ x_height=max(lower_x) - baseline,
+ descender_height=baseline - min(lower_p)
)
def get_text_path(self, text: str) -> TextPath:
diff --git a/src/ezdxf/addons/drawing/pyqt.py b/src/ezdxf/addons/drawing/pyqt.py
index 6c00fedcd..42fc3eed3 100644
--- a/src/ezdxf/addons/drawing/pyqt.py
+++ b/src/ezdxf/addons/drawing/pyqt.py
@@ -250,11 +250,12 @@ class TextRenderer:
upper_x = self.get_text_rect('X')
lower_x = self.get_text_rect('x')
lower_p = self.get_text_rect('p')
+ baseline = lower_x.bottom()
return FontMeasurements(
- baseline=lower_x.bottom(),
- cap_top=upper_x.top(),
- x_top=lower_x.top(),
- bottom=lower_p.bottom(),
+ baseline=baseline,
+ cap_height=upper_x.top() - baseline,
+ x_height=lower_x.top() - baseline,
+ descender_height=baseline - lower_p.bottom(),
)
def get_text_path(self, text: str) -> qg.QPainterPath:
diff --git a/src/ezdxf/addons/drawing/text.py b/src/ezdxf/addons/drawing/text.py
index e42823d89..e4127069f 100644
--- a/src/ezdxf/addons/drawing/text.py
+++ b/src/ezdxf/addons/drawing/text.py
@@ -57,40 +57,39 @@ assert len(DXF_MTEXT_ALIGNMENT_TO_ALIGNMENT) == len(DXFConstants.MTEXT_ALIGN_FLA
class FontMeasurements:
- def __init__(self, baseline: float, cap_top: float, x_top: float, bottom: float):
+ def __init__(self, baseline: float, cap_height: float, x_height: float, descender_height: float):
self.baseline = baseline
- self.cap_top = cap_top
- self.x_top = x_top
- self.bottom = bottom
+ self.cap_height = cap_height
+ self.x_height = x_height
+ self.descender_height = descender_height
def __eq__(self, other):
return (isinstance(other, FontMeasurements) and
self.baseline == other.baseline and
- self.cap_top == other.cap_top and
- self.x_top == other.x_top and
- self.bottom == other.bottom)
+ self.cap_height == other.cap_height and
+ self.x_height == other.x_height and
+ self.descender_height == other.descender_height)
def scale_from_baseline(self, desired_cap_height: float) -> "FontMeasurements":
scale = desired_cap_height / self.cap_height
- assert math.isclose(self.baseline, 0.0)
return FontMeasurements(
baseline=self.baseline,
- cap_top=desired_cap_height,
- x_top=self.x_height * scale,
- bottom=self.bottom * scale,
+ cap_height=desired_cap_height,
+ x_height=self.x_height * scale,
+ descender_height=self.descender_height * scale,
)
@property
- def cap_height(self) -> float:
- return abs(self.cap_top - self.baseline)
+ def cap_top(self) -> float:
+ return self.baseline + self.cap_height
@property
- def x_height(self) -> float:
- return abs(self.x_top - self.baseline)
+ def x_top(self) -> float:
+ return self.baseline + self.x_height
@property
- def descender_height(self) -> float:
- return abs(self.baseline - self.bottom)
+ def bottom(self) -> float:
+ return self.baseline - self.descender_height
def _get_rotation(text: AnyText) -> Matrix44:
@@ -211,7 +210,6 @@ def _get_extra_transform(text: AnyText) -> Matrix44:
def _apply_alignment(alignment: Alignment,
line_widths: List[float],
- cap_height: float,
line_spacing: float,
box_width: Optional[float],
font_measurements: FontMeasurements) -> Tuple[Tuple[float, float], List[float], List[float]]:
@@ -219,7 +217,9 @@ def _apply_alignment(alignment: Alignment,
return (0, 0), [], []
halign, valign = alignment
- line_ys = [-(cap_height + i * line_spacing) for i in range(len(line_widths))]
+ line_ys = [-font_measurements.baseline -
+ (font_measurements.cap_height + i * line_spacing)
+ for i in range(len(line_widths))]
if box_width is None:
box_width = max(line_widths)
@@ -278,7 +278,7 @@ def simplified_text_chunks(text: AnyText, out: Backend,
line_widths = [out.get_text_line_width(line, cap_height, font=font) for line in lines]
font_measurements = out.get_font_measurements(cap_height, font=font)
anchor, line_xs, line_ys = \
- _apply_alignment(alignment, line_widths, cap_height, line_spacing, box_width, font_measurements)
+ _apply_alignment(alignment, line_widths, line_spacing, box_width, font_measurements)
rotation = _get_rotation(text)
extra_transform = _get_extra_transform(text)
insert = _get_wcs_insert(text)
|
mozman/ezdxf
|
bdf607742fa8e70ad27b1ff474359be432692418
|
diff --git a/tests/test_08_addons/test_811_drawing_frontend.py b/tests/test_08_addons/test_811_drawing_frontend.py
index 9bbaf7346..0cb07db65 100644
--- a/tests/test_08_addons/test_811_drawing_frontend.py
+++ b/tests/test_08_addons/test_811_drawing_frontend.py
@@ -39,8 +39,8 @@ class BasicBackend(Backend):
def get_font_measurements(self, cap_height: float,
font=None) -> FontMeasurements:
- return FontMeasurements(baseline=0.0, cap_top=1.0, x_top=0.5,
- bottom=-0.2)
+ return FontMeasurements(baseline=0.0, cap_height=1.0, x_height=0.5,
+ descender_height=0.2)
def set_background(self, color: str) -> None:
self.collector.append(('bgcolor', color))
|
Matplotlib font properties
Hi again!
In dxf-to-png conversion, I ran into a text rendering issues for DXF files containing CJK (Chinese, Japanese, Korean)
characters.
For instance, I have a `simple_text.dxf` containing Korean characters, which is visualized with `cad_viewer.py` with no problem:
<img width="1595" alt="simpletest_cadviewer" src="https://user-images.githubusercontent.com/24424825/94133541-bfd4c600-fe9b-11ea-9ad3-ec85eb13f2c4.png">
When the DXF file is rasterized with dxf2png tool, the output image displays Korean characters as squares:
<img width="1387" alt="initial" src="https://user-images.githubusercontent.com/24424825/94133722-ff031700-fe9b-11ea-8d34-e414a300eb84.png">
When I added a new Korean font (i.e., `NanumBarunGothic.otf`) by specifying its filename to `FontProperties` and passed it to the backend params, the characters are displayed properly.
```
... (omitted for brevity)
from matplotlib.font_manager import FontProperties
myfont = FontProperties(fname='/Library/Fonts/NanumBarunGothic.otf', size=15)
doc = ezdxf.readfile('./simple_text.dxf')
msp = doc.modelspace()
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ctx = RenderContext(doc)
ctx.set_current_layout(msp)
out = MatplotlibBackend(ax, font=myfont)
Frontend(ctx, out).draw_layout(msp, finalize=True)
fig.savefig('simple_text.png', dpi=300, transparent=True)
plt.close(fig)
```
<img width="1394" alt="fontadded" src="https://user-images.githubusercontent.com/24424825/94133939-4c7f8400-fe9c-11ea-91ee-6c069f7f197f.png">
However, there were some cases in which the following error message was returned:
```
Frontend(ctx, out).draw_layout(msp, finalize=True)
File "/usr/local/lib/python3.8/dist-packages/ezdxf/addons/drawing/frontend.py", line 93, in draw_layout
self.draw_entities(reorder.ascending(layout, handle_mapping))
File "/usr/local/lib/python3.8/dist-packages/ezdxf/addons/drawing/frontend.py", line 113, in draw_entities
self.draw_entity(entity, properties)
File "/usr/local/lib/python3.8/dist-packages/ezdxf/addons/drawing/frontend.py", line 133, in draw_entity
self.draw_text_entity_2d(entity, properties)
File "/usr/local/lib/python3.8/dist-packages/ezdxf/addons/drawing/frontend.py", line 180, in draw_text_entity_2d
for line, transform, cap_height in simplified_text_chunks(
File "/usr/local/lib/python3.8/dist-packages/ezdxf/addons/drawing/text.py", line 279, in simplified_text_chunks
font_measurements = out.get_font_measurements(cap_height, font=font)
File "/usr/local/lib/python3.8/dist-packages/ezdxf/addons/drawing/matplotlib.py", line 186, in get_font_measurements
return self._text.font_measurements.scale_from_baseline(
File "/usr/local/lib/python3.8/dist-packages/ezdxf/addons/drawing/text.py", line 75, in scale_from_baseline
assert math.isclose(self.baseline, 0.0)
```
I'm not sure if this error stems from the font measurements of a particular font or something else. From the error message, I might infer that there is a problem with the baseline of the font not being 0? I appreciate your explanations.
|
0.0
|
bdf607742fa8e70ad27b1ff474359be432692418
|
[
"tests/test_08_addons/test_811_drawing_frontend.py::test_2d_text",
"tests/test_08_addons/test_811_drawing_frontend.py::test_mtext",
"tests/test_08_addons/test_811_drawing_frontend.py::test_visibility_insert_0",
"tests/test_08_addons/test_811_drawing_frontend.py::test_visibility_insert_2",
"tests/test_08_addons/test_811_drawing_frontend.py::test_override_filter"
] |
[
"tests/test_08_addons/test_811_drawing_frontend.py::test_basic_frontend_init",
"tests/test_08_addons/test_811_drawing_frontend.py::test_backend_default_draw_path",
"tests/test_08_addons/test_811_drawing_frontend.py::test_draw_layout",
"tests/test_08_addons/test_811_drawing_frontend.py::test_draw_entities",
"tests/test_08_addons/test_811_drawing_frontend.py::test_point_and_layers",
"tests/test_08_addons/test_811_drawing_frontend.py::test_line",
"tests/test_08_addons/test_811_drawing_frontend.py::test_lwpolyline_basic",
"tests/test_08_addons/test_811_drawing_frontend.py::test_lwpolyline_path",
"tests/test_08_addons/test_811_drawing_frontend.py::test_banded_lwpolyline",
"tests/test_08_addons/test_811_drawing_frontend.py::test_polyline_2d",
"tests/test_08_addons/test_811_drawing_frontend.py::test_banded_polyline_2d",
"tests/test_08_addons/test_811_drawing_frontend.py::test_polyline_3d_basic",
"tests/test_08_addons/test_811_drawing_frontend.py::test_polyline_3d_path",
"tests/test_08_addons/test_811_drawing_frontend.py::test_2d_arc_basic",
"tests/test_08_addons/test_811_drawing_frontend.py::test_3d_circle_basic",
"tests/test_08_addons/test_811_drawing_frontend.py::test_3d_circle_path",
"tests/test_08_addons/test_811_drawing_frontend.py::test_3d_arc_basic",
"tests/test_08_addons/test_811_drawing_frontend.py::test_3d_arc_path",
"tests/test_08_addons/test_811_drawing_frontend.py::test_3d_ellipse_basic",
"tests/test_08_addons/test_811_drawing_frontend.py::test_3d_ellipse_path",
"tests/test_08_addons/test_811_drawing_frontend.py::test_ignore_3d_text",
"tests/test_08_addons/test_811_drawing_frontend.py::test_hatch",
"tests/test_08_addons/test_811_drawing_frontend.py::test_basic_spline",
"tests/test_08_addons/test_811_drawing_frontend.py::test_mesh",
"tests/test_08_addons/test_811_drawing_frontend.py::test_polyface"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-05 19:28:41+00:00
|
mit
| 4,052 |
|
mozman__steputils-3
|
diff --git a/src/steputils/p21.py b/src/steputils/p21.py
index c8dd28b..a7f3de1 100644
--- a/src/steputils/p21.py
+++ b/src/steputils/p21.py
@@ -33,6 +33,7 @@ FIRST_KEYWORD_CHAR = ascii_uppercase + '_'
KEYWORD_CHARS = ascii_letters + digits + '_-' # should accept ISO-10303-21 and lower case letters
STRING_CHARS = ascii_letters + digits + ' _' + SPECIAL + BACKSLASH
+IGNORE_DELIMETERS = "\n\r\f\t"
FIRST_NUMBER_CHARS = '-+01234567890'
NUMBER_CHARS = FIRST_NUMBER_CHARS + '.eE'
FIRST_ENUM_CHARS = ascii_uppercase + '_.'
@@ -553,6 +554,8 @@ class Lexer:
return ''.join(s)
elif current in STRING_CHARS:
s.append(b.get())
+ elif current in IGNORE_DELIMETERS:
+ b.skip()
else:
raise ParseError(f'Found invalid character in string "{current}".')
|
mozman/steputils
|
86fd714d6e2919970c674ef744dc04c0bb09241d
|
diff --git a/tests/p21/test_p21_lexer.py b/tests/p21/test_p21_lexer.py
index 53cda6c..787b453 100644
--- a/tests/p21/test_p21_lexer.py
+++ b/tests/p21/test_p21_lexer.py
@@ -63,9 +63,7 @@ def test_lexer_string_with_enclosed_comments():
def test_lexer_string_across_lines():
- # strings across lines are not allowed
- with pytest.raises(p21.ParseError):
- list(p21.Lexer("'\n',"))
+ assert list(p21.Lexer("'multi\nline',")) == ['multiline', ',']
def test_lexer_empty_string():
|
.step file parsing error
I am unable to read a step file.
These .step file are coming from Rhino & Creo professional tools
I keep getting the following error.
```
File "D:\Progam Files\Python\python v3.7.0\lib\site-packages\steputils\p21.py", line 1025, in readfile
return load(fp)
File "D:\Progam Files\Python\python v3.7.0\lib\site-packages\steputils\p21.py", line 1019, in load
return loads(content)
File "D:\Progam Files\Python\python v3.7.0\lib\site-packages\steputils\p21.py", line 1003, in loads
return Parser(lexer).parse()
File "D:\Progam Files\Python\python v3.7.0\lib\site-packages\steputils\p21.py", line 633, in __init__
self.tokens = list(lexer.parse())
File "D:\Progam Files\Python\python v3.7.0\lib\site-packages\steputils\p21.py", line 505, in parse
yield step_decoder(self.string()) # str
File "D:\Progam Files\Python\python v3.7.0\lib\site-packages\steputils\p21.py", line 557, in string
raise ParseError(f'Found invalid character in string "{current}".')
steputils.exceptions.ParseError: Found invalid character in string "
".
```
When I open the .stp files in notepad++ text editor, the encoding is UTF-8.
I tried changing the file encoding to ISO 8859-1 without any success.
|
0.0
|
86fd714d6e2919970c674ef744dc04c0bb09241d
|
[
"tests/p21/test_p21_lexer.py::test_lexer_string_across_lines"
] |
[
"tests/p21/test_p21_lexer.py::test_lexer_skip_whitespace",
"tests/p21/test_p21_lexer.py::test_lexer_skip_comments",
"tests/p21/test_p21_lexer.py::test_lexer_skip_comments_nl",
"tests/p21/test_p21_lexer.py::test_lexer_missing_end_of_comment",
"tests/p21/test_p21_lexer.py::test_lexer_invalid_keywords",
"tests/p21/test_p21_lexer.py::test_lexer_simple_string",
"tests/p21/test_p21_lexer.py::test_lexer_string_with_special_chars",
"tests/p21/test_p21_lexer.py::test_lexer_string_with_escaped_apostrophe",
"tests/p21/test_p21_lexer.py::test_lexer_string_with_enclosed_comments",
"tests/p21/test_p21_lexer.py::test_lexer_empty_string",
"tests/p21/test_p21_lexer.py::test_lexer_binary",
"tests/p21/test_p21_lexer.py::test_lexer_binary_error",
"tests/p21/test_p21_lexer.py::test_lexer_number",
"tests/p21/test_p21_lexer.py::test_lexer_number_error",
"tests/p21/test_p21_lexer.py::test_lexer_enum",
"tests/p21/test_p21_lexer.py::test_lexer_enum_error",
"tests/p21/test_p21_lexer.py::test_lexer_keyword",
"tests/p21/test_p21_lexer.py::test_lexer_keyword_error",
"tests/p21/test_p21_lexer.py::test_lexer_reference",
"tests/p21/test_p21_lexer.py::test_lexer_reference_error",
"tests/p21/test_p21_lexer.py::test_keyword_matcher"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-03-28 12:57:06+00:00
|
mit
| 4,053 |
|
mozman__svgwrite-109
|
diff --git a/svgwrite/data/svgparser.py b/svgwrite/data/svgparser.py
index 6a79dfa..adeaa0a 100644
--- a/svgwrite/data/svgparser.py
+++ b/svgwrite/data/svgparser.py
@@ -19,6 +19,7 @@ event_names = [
]
c = r"\s*[, ]\s*"
+s = r"\s*[; ]\s*"
integer_constant = r"\d+"
exponent = r"([eE][+-]?\d+)"
nonnegative_number = fr"(\d+\.?\d*|\.\d+){exponent}?"
@@ -125,7 +126,7 @@ def build_animation_timing_parser():
begin_value = "(" + "|".join((f"({reg})" for reg in (
offset_value, syncbase_value, event_value, repeat_value,
accesskey_value, wallclock_sync_value, "indefinite"))) + ")"
- return fr"{begin_value}({c}{begin_value})*"
+ return fr"{begin_value}({s}{begin_value})*"
is_valid_animation_timing = is_valid(build_animation_timing_parser())
|
mozman/svgwrite
|
c8cbf6f615910b3818ccf939fce0e407c9c789cb
|
diff --git a/.github/workflows/testrunner.yml b/.github/workflows/testrunner.yml
index 6ea7fac..12fedb6 100644
--- a/.github/workflows/testrunner.yml
+++ b/.github/workflows/testrunner.yml
@@ -10,7 +10,7 @@ jobs:
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
- python-version: [3.6, 3.7, 3.8, 3.9, pypy-3.6]
+ python-version: [3.7, 3.8, 3.9, pypy-3.7]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
diff --git a/tests/test_animation_timing_parser.py b/tests/test_animation_timing_parser.py
index 2be1ff5..32e24d6 100644
--- a/tests/test_animation_timing_parser.py
+++ b/tests/test_animation_timing_parser.py
@@ -51,6 +51,12 @@ class TestAnimationTimingParser(unittest.TestCase):
self.assertTrue(is_valid_animation_timing("wallclock(1997-07-16T19:20:30)"))
self.assertTrue(is_valid_animation_timing("wallclock(1997-07-16T19:20)"))
+ def test_list(self):
+ self.assertTrue(is_valid_animation_timing("0s;foo.end"))
+ self.assertTrue(is_valid_animation_timing("foo.end;0s"))
+ self.assertTrue(is_valid_animation_timing("0s;foo.end;bar.end"))
+ self.assertTrue(is_valid_animation_timing("foo.end;bar.end;0s"))
+
def test_invalid_value(self):
self.assertFalse(is_valid_animation_timing("xyz"))
self.assertFalse(is_valid_animation_timing("repeat(0"))
@@ -61,6 +67,10 @@ class TestAnimationTimingParser(unittest.TestCase):
self.assertFalse(is_valid_animation_timing("wallclock(1997-07-16T19:2)"))
self.assertFalse(is_valid_animation_timing("wallclock(1997-07-16T19:)"))
self.assertFalse(is_valid_animation_timing("wallclock(1997-07-16T19)"))
+ self.assertFalse(is_valid_animation_timing("0s,foo.end"))
+ self.assertFalse(is_valid_animation_timing("foo.end,0s"))
+ self.assertFalse(is_valid_animation_timing("0s,foo.end;bar.end"))
+ self.assertFalse(is_valid_animation_timing("foo.end,bar.end;0s"))
if __name__ == '__main__':
unittest.main()
|
build_animation_timing_parser uses wrong separator
The regex generated for `timing-value-list` uses a `,` as separator, while this list is separated by `;`. It thus does not validate lists of multiple values correctly (but in exchang eallows invalid values).
`0s;foo.end` is rejected while being valid; `0s,foo.end` is accepted while being invalid.
|
0.0
|
c8cbf6f615910b3818ccf939fce0e407c9c789cb
|
[
"tests/test_animation_timing_parser.py::TestAnimationTimingParser::test_invalid_value",
"tests/test_animation_timing_parser.py::TestAnimationTimingParser::test_list"
] |
[
"tests/test_animation_timing_parser.py::TestAnimationTimingParser::test_accessKey_value",
"tests/test_animation_timing_parser.py::TestAnimationTimingParser::test_event_value",
"tests/test_animation_timing_parser.py::TestAnimationTimingParser::test_offset_value",
"tests/test_animation_timing_parser.py::TestAnimationTimingParser::test_repeat_value",
"tests/test_animation_timing_parser.py::TestAnimationTimingParser::test_syncbase_value",
"tests/test_animation_timing_parser.py::TestAnimationTimingParser::test_wallclock"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-03-22 10:59:46+00:00
|
mit
| 4,054 |
|
mpdavis__python-jose-136
|
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 4d7654d..c9df8d2 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -17,6 +17,8 @@ Major
`#121 <https://github.com/mpdavis/python-jose/pull/121>`_
* Make pyca/cryptography backend the preferred backend if multiple backends are present.
`#122 <https://github.com/mpdavis/python-jose/pull/122>`_
+* Allow for headless JWT by sorting headers when serializing.
+ `#136 <https://github.com/mpdavis/python-jose/pull/136>`_
Bugfixes
""""""""
diff --git a/jose/jws.py b/jose/jws.py
index b2a75fd..9d2fc0d 100644
--- a/jose/jws.py
+++ b/jose/jws.py
@@ -144,6 +144,7 @@ def _encode_header(algorithm, additional_headers=None):
json_header = json.dumps(
header,
separators=(',', ':'),
+ sort_keys=True,
).encode('utf-8')
return base64url_encode(json_header)
|
mpdavis/python-jose
|
aac4c32df210fce3da69e98822e298b3f014fe13
|
diff --git a/tests/test_jwt.py b/tests/test_jwt.py
index 0140954..9ceb239 100644
--- a/tests/test_jwt.py
+++ b/tests/test_jwt.py
@@ -107,6 +107,33 @@ class TestJWT:
for k, v in headers.items():
assert all_headers[k] == v
+ def test_deterministic_headers(self):
+ from collections import OrderedDict
+ from jose.utils import base64url_decode
+
+ claims = {"a": "b"}
+ key = "secret"
+
+ headers1 = OrderedDict((
+ ('kid', 'my-key-id'),
+ ('another_key', 'another_value'),
+ ))
+ encoded1 = jwt.encode(claims, key, algorithm='HS256', headers=headers1)
+ encoded_headers1 = encoded1.split('.', 1)[0]
+
+ headers2 = OrderedDict((
+ ('another_key', 'another_value'),
+ ('kid', 'my-key-id'),
+ ))
+ encoded2 = jwt.encode(claims, key, algorithm='HS256', headers=headers2)
+ encoded_headers2 = encoded2.split('.', 1)[0]
+
+ assert encoded_headers1 == encoded_headers2
+
+ # manually decode header to compare it to known good
+ decoded_headers1 = base64url_decode(encoded_headers1.encode('utf-8'))
+ assert decoded_headers1 == b"""{"alg":"HS256","another_key":"another_value","kid":"my-key-id","typ":"JWT"}"""
+
def test_encode(self, claims, key):
expected = (
|
Use OrderedDict for headers in jwt.decode and jws._encode_header
Hello,
I would implement the "headless JWT" idea I rode in this article https://dev.to/neilmadden/7-best-practices-for-json-web-tokens the section 4 "Consider using "headless" JWTs" .
But the fact python-jose build the headers from dict make this impossible. Indeed, keys don't have an order in a classic dict. Could you consider to use OrderedDict instead, in jws.py l.131 and jwt.py l.114 ?
If this idea interests you, I could make a pull request.
Best,
|
0.0
|
aac4c32df210fce3da69e98822e298b3f014fe13
|
[
"tests/test_jwt.py::TestJWT::test_deterministic_headers"
] |
[
"tests/test_jwt.py::TestJWT::test_no_alg",
"tests/test_jwt.py::TestJWT::test_non_default_alg",
"tests/test_jwt.py::TestJWT::test_non_default_alg_positional_bwcompat",
"tests/test_jwt.py::TestJWT::test_no_alg_default_headers",
"tests/test_jwt.py::TestJWT::test_non_default_headers",
"tests/test_jwt.py::TestJWT::test_encode",
"tests/test_jwt.py::TestJWT::test_decode",
"tests/test_jwt.py::TestJWT::test_leeway_is_int",
"tests/test_jwt.py::TestJWT::test_leeway_is_timedelta",
"tests/test_jwt.py::TestJWT::test_iat_not_int",
"tests/test_jwt.py::TestJWT::test_nbf_not_int",
"tests/test_jwt.py::TestJWT::test_nbf_datetime",
"tests/test_jwt.py::TestJWT::test_nbf_with_leeway",
"tests/test_jwt.py::TestJWT::test_nbf_in_future",
"tests/test_jwt.py::TestJWT::test_nbf_skip",
"tests/test_jwt.py::TestJWT::test_exp_not_int",
"tests/test_jwt.py::TestJWT::test_exp_datetime",
"tests/test_jwt.py::TestJWT::test_exp_with_leeway",
"tests/test_jwt.py::TestJWT::test_exp_in_past",
"tests/test_jwt.py::TestJWT::test_exp_skip",
"tests/test_jwt.py::TestJWT::test_aud_string",
"tests/test_jwt.py::TestJWT::test_aud_list",
"tests/test_jwt.py::TestJWT::test_aud_list_multiple",
"tests/test_jwt.py::TestJWT::test_aud_list_is_strings",
"tests/test_jwt.py::TestJWT::test_aud_case_sensitive",
"tests/test_jwt.py::TestJWT::test_aud_empty_claim",
"tests/test_jwt.py::TestJWT::test_aud_not_string_or_list",
"tests/test_jwt.py::TestJWT::test_aud_given_number",
"tests/test_jwt.py::TestJWT::test_iss_string",
"tests/test_jwt.py::TestJWT::test_iss_list",
"tests/test_jwt.py::TestJWT::test_iss_tuple",
"tests/test_jwt.py::TestJWT::test_iss_invalid",
"tests/test_jwt.py::TestJWT::test_sub_string",
"tests/test_jwt.py::TestJWT::test_sub_invalid",
"tests/test_jwt.py::TestJWT::test_sub_correct",
"tests/test_jwt.py::TestJWT::test_sub_incorrect",
"tests/test_jwt.py::TestJWT::test_jti_string",
"tests/test_jwt.py::TestJWT::test_jti_invalid",
"tests/test_jwt.py::TestJWT::test_at_hash",
"tests/test_jwt.py::TestJWT::test_at_hash_invalid",
"tests/test_jwt.py::TestJWT::test_at_hash_missing_access_token",
"tests/test_jwt.py::TestJWT::test_at_hash_missing_claim",
"tests/test_jwt.py::TestJWT::test_at_hash_unable_to_calculate",
"tests/test_jwt.py::TestJWT::test_bad_claims",
"tests/test_jwt.py::TestJWT::test_unverified_claims_string",
"tests/test_jwt.py::TestJWT::test_unverified_claims_list",
"tests/test_jwt.py::TestJWT::test_unverified_claims_object"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-04-09 19:41:55+00:00
|
mit
| 4,055 |
|
mpdavis__python-jose-165
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 79fd0d6..71aa19a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,9 @@
# Changelog #
+## Development ##
+
+* Fix `to_dict` output, which should always be JSON encodeable. #139 (fixes #127 and #137)
+
## 3.1.0 -- 2019-12-10 ##
This is a greatly overdue release.
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
new file mode 100644
index 0000000..e69de29
diff --git a/jose/backends/cryptography_backend.py b/jose/backends/cryptography_backend.py
index fb6205f..4e8ff7d 100644
--- a/jose/backends/cryptography_backend.py
+++ b/jose/backends/cryptography_backend.py
@@ -183,15 +183,15 @@ class CryptographyECKey(Key):
'alg': self._algorithm,
'kty': 'EC',
'crv': crv,
- 'x': long_to_base64(public_key.public_numbers().x, size=key_size),
- 'y': long_to_base64(public_key.public_numbers().y, size=key_size),
+ 'x': long_to_base64(public_key.public_numbers().x, size=key_size).decode('ASCII'),
+ 'y': long_to_base64(public_key.public_numbers().y, size=key_size).decode('ASCII'),
}
if not self.is_public():
data['d'] = long_to_base64(
self.prepared_key.private_numbers().private_value,
size=key_size
- )
+ ).decode('ASCII')
return data
@@ -354,18 +354,18 @@ class CryptographyRSAKey(Key):
data = {
'alg': self._algorithm,
'kty': 'RSA',
- 'n': long_to_base64(public_key.public_numbers().n),
- 'e': long_to_base64(public_key.public_numbers().e),
+ 'n': long_to_base64(public_key.public_numbers().n).decode('ASCII'),
+ 'e': long_to_base64(public_key.public_numbers().e).decode('ASCII'),
}
if not self.is_public():
data.update({
- 'd': long_to_base64(self.prepared_key.private_numbers().d),
- 'p': long_to_base64(self.prepared_key.private_numbers().p),
- 'q': long_to_base64(self.prepared_key.private_numbers().q),
- 'dp': long_to_base64(self.prepared_key.private_numbers().dmp1),
- 'dq': long_to_base64(self.prepared_key.private_numbers().dmq1),
- 'qi': long_to_base64(self.prepared_key.private_numbers().iqmp),
+ 'd': long_to_base64(self.prepared_key.private_numbers().d).decode('ASCII'),
+ 'p': long_to_base64(self.prepared_key.private_numbers().p).decode('ASCII'),
+ 'q': long_to_base64(self.prepared_key.private_numbers().q).decode('ASCII'),
+ 'dp': long_to_base64(self.prepared_key.private_numbers().dmp1).decode('ASCII'),
+ 'dq': long_to_base64(self.prepared_key.private_numbers().dmq1).decode('ASCII'),
+ 'qi': long_to_base64(self.prepared_key.private_numbers().iqmp).decode('ASCII'),
})
return data
diff --git a/jose/backends/ecdsa_backend.py b/jose/backends/ecdsa_backend.py
index dc9ebe7..73b3eab 100644
--- a/jose/backends/ecdsa_backend.py
+++ b/jose/backends/ecdsa_backend.py
@@ -131,14 +131,14 @@ class ECDSAECKey(Key):
'alg': self._algorithm,
'kty': 'EC',
'crv': crv,
- 'x': long_to_base64(public_key.pubkey.point.x(), size=key_size),
- 'y': long_to_base64(public_key.pubkey.point.y(), size=key_size),
+ 'x': long_to_base64(public_key.pubkey.point.x(), size=key_size).decode('ASCII'),
+ 'y': long_to_base64(public_key.pubkey.point.y(), size=key_size).decode('ASCII'),
}
if not self.is_public():
data['d'] = long_to_base64(
self.prepared_key.privkey.secret_multiplier,
size=key_size
- )
+ ).decode('ASCII')
return data
diff --git a/jose/backends/pycrypto_backend.py b/jose/backends/pycrypto_backend.py
index c002102..cf270a8 100644
--- a/jose/backends/pycrypto_backend.py
+++ b/jose/backends/pycrypto_backend.py
@@ -185,8 +185,8 @@ class RSAKey(Key):
data = {
'alg': self._algorithm,
'kty': 'RSA',
- 'n': long_to_base64(self.prepared_key.n),
- 'e': long_to_base64(self.prepared_key.e),
+ 'n': long_to_base64(self.prepared_key.n).decode('ASCII'),
+ 'e': long_to_base64(self.prepared_key.e).decode('ASCII'),
}
if not self.is_public():
@@ -201,12 +201,12 @@ class RSAKey(Key):
dp = self.prepared_key.d % (self.prepared_key.p - 1)
dq = self.prepared_key.d % (self.prepared_key.q - 1)
data.update({
- 'd': long_to_base64(self.prepared_key.d),
- 'p': long_to_base64(self.prepared_key.q),
- 'q': long_to_base64(self.prepared_key.p),
- 'dp': long_to_base64(dq),
- 'dq': long_to_base64(dp),
- 'qi': long_to_base64(self.prepared_key.u),
+ 'd': long_to_base64(self.prepared_key.d).decode('ASCII'),
+ 'p': long_to_base64(self.prepared_key.q).decode('ASCII'),
+ 'q': long_to_base64(self.prepared_key.p).decode('ASCII'),
+ 'dp': long_to_base64(dq).decode('ASCII'),
+ 'dq': long_to_base64(dp).decode('ASCII'),
+ 'qi': long_to_base64(self.prepared_key.u).decode('ASCII'),
})
return data
diff --git a/jose/backends/rsa_backend.py b/jose/backends/rsa_backend.py
index ca9e956..38e42bb 100644
--- a/jose/backends/rsa_backend.py
+++ b/jose/backends/rsa_backend.py
@@ -246,18 +246,18 @@ class RSAKey(Key):
data = {
'alg': self._algorithm,
'kty': 'RSA',
- 'n': long_to_base64(public_key.n),
- 'e': long_to_base64(public_key.e),
+ 'n': long_to_base64(public_key.n).decode('ASCII'),
+ 'e': long_to_base64(public_key.e).decode('ASCII'),
}
if not self.is_public():
data.update({
- 'd': long_to_base64(self._prepared_key.d),
- 'p': long_to_base64(self._prepared_key.p),
- 'q': long_to_base64(self._prepared_key.q),
- 'dp': long_to_base64(self._prepared_key.exp1),
- 'dq': long_to_base64(self._prepared_key.exp2),
- 'qi': long_to_base64(self._prepared_key.coef),
+ 'd': long_to_base64(self._prepared_key.d).decode('ASCII'),
+ 'p': long_to_base64(self._prepared_key.p).decode('ASCII'),
+ 'q': long_to_base64(self._prepared_key.q).decode('ASCII'),
+ 'dp': long_to_base64(self._prepared_key.exp1).decode('ASCII'),
+ 'dq': long_to_base64(self._prepared_key.exp2).decode('ASCII'),
+ 'qi': long_to_base64(self._prepared_key.coef).decode('ASCII'),
})
return data
diff --git a/jose/jwk.py b/jose/jwk.py
index ad89e6d..b2c1113 100644
--- a/jose/jwk.py
+++ b/jose/jwk.py
@@ -137,5 +137,5 @@ class HMACKey(Key):
return {
'alg': self._algorithm,
'kty': 'oct',
- 'k': base64url_encode(self.prepared_key),
+ 'k': base64url_encode(self.prepared_key).decode('ASCII'),
}
|
mpdavis/python-jose
|
ba738d9b55aca679ce484629546886e2ed6f3269
|
diff --git a/tests/algorithms/test_EC.py b/tests/algorithms/test_EC.py
index 7f012af..62b952c 100644
--- a/tests/algorithms/test_EC.py
+++ b/tests/algorithms/test_EC.py
@@ -1,3 +1,4 @@
+import json
from jose.constants import ALGORITHMS
from jose.exceptions import JOSEError, JWKError
@@ -194,6 +195,9 @@ class TestECAlgorithm:
# Private parameters should be absent
assert 'd' not in as_dict
+ # as_dict should be serializable to JSON
+ json.dumps(as_dict)
+
def test_to_dict(self):
key = ECKey(private_key, ALGORITHMS.ES256)
self.assert_parameters(key.to_dict(), private=True)
diff --git a/tests/algorithms/test_HMAC.py b/tests/algorithms/test_HMAC.py
index e84c2c0..843d3a2 100644
--- a/tests/algorithms/test_HMAC.py
+++ b/tests/algorithms/test_HMAC.py
@@ -1,3 +1,4 @@
+import json
from jose.constants import ALGORITHMS
from jose.exceptions import JOSEError
@@ -31,7 +32,7 @@ class TestHMACAlgorithm:
def test_to_dict(self):
passphrase = 'The quick brown fox jumps over the lazy dog'
- encoded = b'VGhlIHF1aWNrIGJyb3duIGZveCBqdW1wcyBvdmVyIHRoZSBsYXp5IGRvZw'
+ encoded = 'VGhlIHF1aWNrIGJyb3duIGZveCBqdW1wcyBvdmVyIHRoZSBsYXp5IGRvZw'
key = HMACKey(passphrase, ALGORITHMS.HS256)
as_dict = key.to_dict()
@@ -43,3 +44,6 @@ class TestHMACAlgorithm:
assert 'k' in as_dict
assert as_dict['k'] == encoded
+
+ # as_dict should be serializable to JSON
+ json.dumps(as_dict)
diff --git a/tests/algorithms/test_RSA.py b/tests/algorithms/test_RSA.py
index 97aeb20..cdcb1da 100644
--- a/tests/algorithms/test_RSA.py
+++ b/tests/algorithms/test_RSA.py
@@ -1,4 +1,5 @@
import base64
+import json
import sys
try:
@@ -370,6 +371,9 @@ class TestRSAAlgorithm:
assert 'dq' not in as_dict
assert 'qi' not in as_dict
+ # as_dict should be serializable to JSON
+ json.dumps(as_dict)
+
def assert_roundtrip(self, key):
assert RSAKey(
key.to_dict(),
|
JWK can not be dumped to JSON
JOSE has a functionality that could be helpful for `.well-known/jwks.json` generation — ability to convert PEMs to dictionary compatible with JWK specification. However, produced dictionary utilizes `bytes`, instead of `str` in Python 3, which crashes `json.dumps`.
For example:
```python3
import json
from jose import jwk
key_pem = """-----BEGIN PUBLIC KEY-----
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEU7X2p6GjDeHWgvDkWZHcjKLXdNwM
ua9kolwyy3nv7k3xiRgX/jfX1QbXaQuvqr40PhoWjtJR2C9uTvyP6AMHkw==
-----END PUBLIC KEY-----"""
key = jwk.construct(key_pem, algorithm="ES256").to_dict()
json.dumps({"keys": [key]})
```
This will crash with `TypeError: Object of type bytes is not JSON serializable` due to all base64 encoded values being stored as `bytes` not `str`.
The solution to circumvent the issue is:
```python3
import json
from jose import jwk
class ByteEncoder(json.JSONEncoder):
#pylint: disable=method-hidden
def default(self, x):
if isinstance(x, bytes):
return str(x, "UTF-8")
else:
super().default(x)
key_pem = """-----BEGIN PUBLIC KEY-----
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEU7X2p6GjDeHWgvDkWZHcjKLXdNwM
ua9kolwyy3nv7k3xiRgX/jfX1QbXaQuvqr40PhoWjtJR2C9uTvyP6AMHkw==
-----END PUBLIC KEY-----"""
key = jwk.construct(key_pem, algorithm="ES256").to_dict()
json.dumps({"keys": [key]}, cls=ByteEncoder)
```
However, is there a better way?
|
0.0
|
ba738d9b55aca679ce484629546886e2ed6f3269
|
[
"tests/algorithms/test_HMAC.py::TestHMACAlgorithm::test_to_dict",
"tests/algorithms/test_RSA.py::TestRSAAlgorithm::test_to_dict[RSA_2048_PKCS1]",
"tests/algorithms/test_RSA.py::TestRSAAlgorithm::test_to_dict[RSA_4096_PKCS1]"
] |
[
"tests/algorithms/test_EC.py::test_key_from_ecdsa",
"tests/algorithms/test_EC.py::TestECAlgorithm::test_key_from_pem",
"tests/algorithms/test_EC.py::TestECAlgorithm::test_get_public_key",
"tests/algorithms/test_EC.py::TestECAlgorithm::test_string_secret",
"tests/algorithms/test_EC.py::TestECAlgorithm::test_object",
"tests/algorithms/test_EC.py::TestECAlgorithm::test_invalid_algorithm",
"tests/algorithms/test_EC.py::TestECAlgorithm::test_EC_jwk",
"tests/algorithms/test_EC.py::TestECAlgorithm::test_verify",
"tests/algorithms/test_HMAC.py::TestHMACAlgorithm::test_non_string_key",
"tests/algorithms/test_HMAC.py::TestHMACAlgorithm::test_RSA_key",
"tests/algorithms/test_RSA.py::TestPurePythonRsa::test_python_rsa_legacy_pem_read",
"tests/algorithms/test_RSA.py::TestPurePythonRsa::test_python_rsa_legacy_pem_invalid",
"tests/algorithms/test_RSA.py::TestPurePythonRsa::test_python_rsa_legacy_private_key_pkcs8_to_pkcs1",
"tests/algorithms/test_RSA.py::TestPurePythonRsa::test_python_rsa_legacy_private_key_pkcs8_to_pkcs1_invalid",
"tests/algorithms/test_RSA.py::test_pycrypto_RSA_key_instance",
"tests/algorithms/test_RSA.py::test_pycrypto_unencoded_cleartext[RSA_2048_PKCS1]",
"tests/algorithms/test_RSA.py::test_pycrypto_unencoded_cleartext[RSA_4096_PKCS1]",
"tests/algorithms/test_RSA.py::TestRSAAlgorithm::test_RSA_key",
"tests/algorithms/test_RSA.py::TestRSAAlgorithm::test_string_secret",
"tests/algorithms/test_RSA.py::TestRSAAlgorithm::test_object",
"tests/algorithms/test_RSA.py::TestRSAAlgorithm::test_bad_cert",
"tests/algorithms/test_RSA.py::TestRSAAlgorithm::test_invalid_algorithm",
"tests/algorithms/test_RSA.py::TestRSAAlgorithm::test_RSA_jwk",
"tests/algorithms/test_RSA.py::TestRSAAlgorithm::test_get_public_key[RSA_2048_PKCS1]",
"tests/algorithms/test_RSA.py::TestRSAAlgorithm::test_get_public_key[RSA_4096_PKCS1]",
"tests/algorithms/test_RSA.py::TestRSAAlgorithm::test_to_pem[RSA_2048_PKCS1]",
"tests/algorithms/test_RSA.py::TestRSAAlgorithm::test_to_pem[RSA_4096_PKCS1]"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-12-17 23:53:27+00:00
|
mit
| 4,056 |
|
mpdavis__python-jose-185
|
diff --git a/jose/jws.py b/jose/jws.py
index 293b32a..8d7bce6 100644
--- a/jose/jws.py
+++ b/jose/jws.py
@@ -225,28 +225,27 @@ def _sig_matches_keys(keys, signing_input, signature, alg):
def _get_keys(key):
try:
- key = json.loads(key)
+ key = json.loads(key, parse_int=str, parse_float=str)
except Exception:
pass
- # JWK Set per RFC 7517
- if 'keys' in key:
- return key['keys']
-
- # Individual JWK per RFC 7517
- elif 'kty' in key:
- return (key,)
-
- # Some other mapping. Firebase uses just dict of kid, cert pairs
- elif isinstance(key, Mapping):
- values = key.values()
- if values:
- return values
- return (key,)
+ if isinstance(key, Mapping):
+ if 'keys' in key:
+ # JWK Set per RFC 7517
+ return key['keys']
+ elif 'kty' in key:
+ # Individual JWK per RFC 7517
+ return (key,)
+ else:
+ # Some other mapping. Firebase uses just dict of kid, cert pairs
+ values = key.values()
+ if values:
+ return values
+ return (key,)
# Iterable but not text or mapping => list- or tuple-like
elif (isinstance(key, Iterable) and
- not (isinstance(key, six.string_types) or isinstance(key, Mapping))):
+ not (isinstance(key, six.string_types) or isinstance(key, six.binary_type))):
return key
# Scalar value, wrap in tuple.
|
mpdavis/python-jose
|
99ec142374a6eb98e32be5b8cdfd72508fd404d4
|
diff --git a/tests/test_jws.py b/tests/test_jws.py
index 2282c5f..4ca57a4 100644
--- a/tests/test_jws.py
+++ b/tests/test_jws.py
@@ -77,6 +77,17 @@ class TestJWS(object):
with pytest.raises(JWSError):
jws.sign(payload, 'secret', algorithm='RS256')
+ @pytest.mark.parametrize("key", [
+ b'key',
+ 'key',
+ ])
+ def test_round_trip_with_different_key_types(self, key):
+ signed_data = jws.sign({'testkey': 'testvalue'}, key, algorithm=ALGORITHMS.HS256)
+ verified_bytes = jws.verify(signed_data, key, algorithms=[ALGORITHMS.HS256])
+ verified_data = json.loads(verified_bytes.decode('utf-8'))
+ assert 'testkey' in verified_data.keys()
+ assert verified_data['testkey'] == 'testvalue'
+
class TestHMAC(object):
diff --git a/tests/test_jwt.py b/tests/test_jwt.py
index 03f5c28..03783ef 100644
--- a/tests/test_jwt.py
+++ b/tests/test_jwt.py
@@ -52,6 +52,13 @@ class TestJWT:
key=key,
algorithms=[])
+ @pytest.mark.parametrize("key, token",
+ [('1234567890', u'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoidGVzdCJ9.aNBlulVhiYSCzvsh1rTzXZC2eWJmNrMBjINT-0wQz4k'),
+ ('123456789.0',u'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoidGVzdCJ9.D8WLFPMi3yKgua2jm3BKThFsParXpgxhIbsUc39zJDw')])
+ def test_numeric_key(self, key, token):
+ token_info = jwt.decode(token, key)
+ assert token_info == {"name": "test"}
+
def test_invalid_claims_json(self):
old_jws_verify = jws.verify
try:
@@ -165,6 +172,16 @@ class TestJWT:
assert decoded == claims
+ @pytest.mark.parametrize('key', [
+ b'key',
+ 'key',
+ ])
+ def test_round_trip_with_different_key_types(self, key):
+ token = jwt.encode({'testkey': 'testvalue'}, key, algorithm='HS256')
+ verified_data = jwt.decode(token, key, algorithms=['HS256'])
+ assert 'testkey' in verified_data.keys()
+ assert verified_data['testkey'] == 'testvalue'
+
def test_leeway_is_int(self):
pass
|
Various issues in jwt.decode / jws._get_keys
I've had a couple issues (figuring out how to use jwt.decode) which stem from the jws._get_keys implementation.
1. key argument must be iterable- raises exception otherwise
2. string key argument must not contain 'keys' (ie if a PEM base64 segment or HS secret segment contains keys, it'll break)
3. key can't be the result of calling jwk.construct (usability issue)
4. attempting json.loads on anything not a string seems weird
|
0.0
|
99ec142374a6eb98e32be5b8cdfd72508fd404d4
|
[
"tests/test_jws.py::TestJWS::test_round_trip_with_different_key_types[key0]",
"tests/test_jwt.py::TestJWT::test_numeric_key[1234567890-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoidGVzdCJ9.aNBlulVhiYSCzvsh1rTzXZC2eWJmNrMBjINT-0wQz4k]",
"tests/test_jwt.py::TestJWT::test_numeric_key[123456789.0-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoidGVzdCJ9.D8WLFPMi3yKgua2jm3BKThFsParXpgxhIbsUc39zJDw]",
"tests/test_jwt.py::TestJWT::test_round_trip_with_different_key_types[key0]"
] |
[
"tests/test_jws.py::TestJWS::test_unicode_token",
"tests/test_jws.py::TestJWS::test_multiple_keys",
"tests/test_jws.py::TestJWS::test_invalid_algorithm",
"tests/test_jws.py::TestJWS::test_not_enough_segments",
"tests/test_jws.py::TestJWS::test_header_invalid_padding",
"tests/test_jws.py::TestJWS::test_header_not_json",
"tests/test_jws.py::TestJWS::test_claims_invalid_padding",
"tests/test_jws.py::TestJWS::test_claims_not_json",
"tests/test_jws.py::TestJWS::test_invalid_key",
"tests/test_jws.py::TestJWS::test_round_trip_with_different_key_types[key1]",
"tests/test_jws.py::TestHMAC::testHMAC256",
"tests/test_jws.py::TestHMAC::testHMAC384",
"tests/test_jws.py::TestHMAC::testHMAC512",
"tests/test_jws.py::TestHMAC::test_wrong_alg",
"tests/test_jws.py::TestHMAC::test_wrong_key",
"tests/test_jws.py::TestHMAC::test_unsupported_alg",
"tests/test_jws.py::TestHMAC::test_add_headers",
"tests/test_jws.py::TestGetKeys::test_dict",
"tests/test_jws.py::TestGetKeys::test_custom_object",
"tests/test_jws.py::TestGetKeys::test_RFC7517_string",
"tests/test_jws.py::TestGetKeys::test_RFC7517_jwk",
"tests/test_jws.py::TestGetKeys::test_RFC7517_mapping",
"tests/test_jws.py::TestGetKeys::test_string",
"tests/test_jws.py::TestGetKeys::test_tuple",
"tests/test_jws.py::TestGetKeys::test_list",
"tests/test_jws.py::TestRSA::test_jwk_set",
"tests/test_jws.py::TestRSA::test_jwk_set_failure",
"tests/test_jws.py::TestRSA::test_RSA256",
"tests/test_jws.py::TestRSA::test_RSA384",
"tests/test_jws.py::TestRSA::test_RSA512",
"tests/test_jws.py::TestRSA::test_wrong_alg",
"tests/test_jws.py::TestRSA::test_wrong_key",
"tests/test_jws.py::TestRSA::test_private_verify_raises_warning",
"tests/test_jws.py::TestEC::test_EC256",
"tests/test_jws.py::TestEC::test_EC384",
"tests/test_jws.py::TestEC::test_EC512",
"tests/test_jws.py::TestEC::test_wrong_alg",
"tests/test_jws.py::TestLoad::test_header_not_mapping",
"tests/test_jws.py::TestLoad::test_claims_not_mapping",
"tests/test_jws.py::TestLoad::test_signature_padding",
"tests/test_jwt.py::TestJWT::test_no_alg",
"tests/test_jwt.py::TestJWT::test_invalid_claims_json",
"tests/test_jwt.py::TestJWT::test_invalid_claims",
"tests/test_jwt.py::TestJWT::test_non_default_alg",
"tests/test_jwt.py::TestJWT::test_non_default_alg_positional_bwcompat",
"tests/test_jwt.py::TestJWT::test_no_alg_default_headers",
"tests/test_jwt.py::TestJWT::test_non_default_headers",
"tests/test_jwt.py::TestJWT::test_deterministic_headers",
"tests/test_jwt.py::TestJWT::test_encode",
"tests/test_jwt.py::TestJWT::test_decode",
"tests/test_jwt.py::TestJWT::test_round_trip_with_different_key_types[key1]",
"tests/test_jwt.py::TestJWT::test_leeway_is_int",
"tests/test_jwt.py::TestJWT::test_leeway_is_timedelta",
"tests/test_jwt.py::TestJWT::test_iat_not_int",
"tests/test_jwt.py::TestJWT::test_nbf_not_int",
"tests/test_jwt.py::TestJWT::test_nbf_datetime",
"tests/test_jwt.py::TestJWT::test_nbf_with_leeway",
"tests/test_jwt.py::TestJWT::test_nbf_in_future",
"tests/test_jwt.py::TestJWT::test_nbf_skip",
"tests/test_jwt.py::TestJWT::test_exp_not_int",
"tests/test_jwt.py::TestJWT::test_exp_datetime",
"tests/test_jwt.py::TestJWT::test_exp_with_leeway",
"tests/test_jwt.py::TestJWT::test_exp_in_past",
"tests/test_jwt.py::TestJWT::test_exp_skip",
"tests/test_jwt.py::TestJWT::test_aud_string",
"tests/test_jwt.py::TestJWT::test_aud_list",
"tests/test_jwt.py::TestJWT::test_aud_list_multiple",
"tests/test_jwt.py::TestJWT::test_aud_list_is_strings",
"tests/test_jwt.py::TestJWT::test_aud_case_sensitive",
"tests/test_jwt.py::TestJWT::test_aud_empty_claim",
"tests/test_jwt.py::TestJWT::test_aud_not_string_or_list",
"tests/test_jwt.py::TestJWT::test_aud_given_number",
"tests/test_jwt.py::TestJWT::test_iss_string",
"tests/test_jwt.py::TestJWT::test_iss_list",
"tests/test_jwt.py::TestJWT::test_iss_tuple",
"tests/test_jwt.py::TestJWT::test_iss_invalid",
"tests/test_jwt.py::TestJWT::test_sub_string",
"tests/test_jwt.py::TestJWT::test_sub_invalid",
"tests/test_jwt.py::TestJWT::test_sub_correct",
"tests/test_jwt.py::TestJWT::test_sub_incorrect",
"tests/test_jwt.py::TestJWT::test_jti_string",
"tests/test_jwt.py::TestJWT::test_jti_invalid",
"tests/test_jwt.py::TestJWT::test_at_hash",
"tests/test_jwt.py::TestJWT::test_at_hash_invalid",
"tests/test_jwt.py::TestJWT::test_at_hash_missing_access_token",
"tests/test_jwt.py::TestJWT::test_at_hash_missing_claim",
"tests/test_jwt.py::TestJWT::test_at_hash_unable_to_calculate",
"tests/test_jwt.py::TestJWT::test_bad_claims",
"tests/test_jwt.py::TestJWT::test_unverified_claims_string",
"tests/test_jwt.py::TestJWT::test_unverified_claims_list",
"tests/test_jwt.py::TestJWT::test_unverified_claims_object",
"tests/test_jwt.py::TestJWT::test_require[aud-aud]",
"tests/test_jwt.py::TestJWT::test_require[ait-ait]",
"tests/test_jwt.py::TestJWT::test_require[exp-value2]",
"tests/test_jwt.py::TestJWT::test_require[nbf-value3]",
"tests/test_jwt.py::TestJWT::test_require[iss-iss]",
"tests/test_jwt.py::TestJWT::test_require[sub-sub]",
"tests/test_jwt.py::TestJWT::test_require[jti-jti]"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2020-09-03 23:47:40+00:00
|
mit
| 4,057 |
|
mpetazzoni__sseclient-13
|
diff --git a/sseclient/__init__.py b/sseclient/__init__.py
index 3bd32e0..4fb502b 100644
--- a/sseclient/__init__.py
+++ b/sseclient/__init__.py
@@ -8,7 +8,6 @@ Provides a generator of SSE received through an existing HTTP response.
import logging
-
__author__ = 'Maxime Petazzoni <[email protected]>'
__email__ = '[email protected]'
__copyright__ = 'Copyright (C) 2016-2017 SignalFx, Inc. All rights reserved.'
@@ -32,8 +31,9 @@ class SSEClient(object):
method. That would usually be something that implements
io.BinaryIOBase, like an httplib or urllib3 HTTPResponse object.
"""
- logging.debug('Initialized SSE client from event source %s',
- event_source)
+ self._logger = logging.getLogger(self.__class__.__module__)
+ self._logger.debug('Initialized SSE client from event source %s',
+ event_source)
self._event_source = event_source
self._char_enc = char_enc
@@ -47,10 +47,10 @@ class SSEClient(object):
data = b''
for chunk in self._event_source:
for line in chunk.splitlines(True):
- if not line.strip():
+ data += line
+ if data.endswith((b'\r\r', b'\n\n', b'\r\n\r\n')):
yield data
data = b''
- data += line
if data:
yield data
@@ -72,13 +72,22 @@ class SSEClient(object):
# Ignore unknown fields.
if field not in event.__dict__:
- logging.debug('Saw invalid field %s while parsing '
- 'Server Side Event', field)
+ self._logger.debug('Saw invalid field %s while parsing '
+ 'Server Side Event', field)
continue
- # Spaces may occur before the value; strip them. If no value is
- # present after the separator, assume an empty value.
- value = data[1].lstrip() if len(data) > 1 else ''
+ if len(data) > 1:
+ # From the spec:
+ # "If value starts with a single U+0020 SPACE character,
+ # remove it from value."
+ if data[1].startswith(' '):
+ value = data[1][1:]
+ else:
+ value = data[1]
+ else:
+ # If no value is present after the separator,
+ # assume an empty value.
+ value = ''
# The data field may come over multiple lines and their values
# are concatenated with each other.
@@ -95,8 +104,11 @@ class SSEClient(object):
if event.data.endswith('\n'):
event.data = event.data[0:-1]
+ # Empty event names default to 'message'
+ event.event = event.event or 'message'
+
# Dispatch the event
- logging.debug('Dispatching %s...', event)
+ self._logger.debug('Dispatching %s...', event)
yield event
def close(self):
diff --git a/sseclient/version.py b/sseclient/version.py
index 273eb95..4c6e6f8 100644
--- a/sseclient/version.py
+++ b/sseclient/version.py
@@ -1,2 +1,2 @@
name = 'sseclient-py'
-version = '1.5'
+version = '1.7'
|
mpetazzoni/sseclient
|
41c0015fb0ef29b22d48b119a82b9e2d858c7d36
|
diff --git a/test.py b/test.py
new file mode 100644
index 0000000..ab0dc07
--- /dev/null
+++ b/test.py
@@ -0,0 +1,248 @@
+# coding: utf-8
+"""
+Tests mosly copied from:
+
+https://github.com/EventSource/eventsource/blob/master/test/eventsource_test.js
+"""
+
+import os
+import sys
+import struct
+import unittest
+sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
+import sseclient
+
+def parse(content):
+ return [{ 'id': ev.id,
+ 'event': ev.event,
+ 'data': ev.data }
+ for ev in sseclient.SSEClient(content).events()]
+
+class Parser(unittest.TestCase):
+ def test_multibyte_characters(self):
+ self.assertEqual(parse([u'id: 1\ndata: €豆腐\n\n'.encode('utf-8')]),
+ [{
+ 'id': '1',
+ 'event': 'message',
+ 'data': u'€豆腐'
+ }])
+
+ def test_parses_empty_lines_with_multibyte_characters(self):
+ self.assertEqual(parse([u'\n\n\n\nid: 1\ndata: 我現在都看實況不玩遊戲\n\n'.encode('utf-8')]),
+ [{
+ 'id': '1',
+ 'event': 'message',
+ 'data': u'我現在都看實況不玩遊戲'
+ }])
+
+ def test_one_one_line_message_in_one_chunk(self):
+ self.assertEqual(parse([b'data: Hello\n\n']),
+ [{
+ 'id': None,
+ 'event': 'message',
+ 'data': 'Hello'
+ }])
+
+ def test_one_one_line_message_in_two_chunks(self):
+ self.assertEqual(parse([b'data: Hel', b'lo\n\n']),
+ [{
+ 'id': None,
+ 'event': 'message',
+ 'data': 'Hello'
+ }])
+
+ def test_two_one_line_messages_in_one_chunk(self):
+ self.assertEqual(parse([b'data: Hello\n\n', b'data: World\n\n']),
+ [{
+ 'id': None,
+ 'event': 'message',
+ 'data': 'Hello'
+ }, {
+ 'id': None,
+ 'event': 'message',
+ 'data': 'World'
+ }])
+
+ def test_one_two_line_message_in_one_chunk(self):
+ self.assertEqual(parse([b'data: Hello\ndata:World\n\n']),
+ [{
+ 'id': None,
+ 'event': 'message',
+ 'data': 'Hello\nWorld'
+ }])
+
+ # Issue #8
+ def test_really_chopped_up_unicode_data(self):
+ self.assertEqual(parse([struct.pack('B', b) for b in bytearray(u'data: Aslak\n\ndata: Hellesøy\n\n'.encode('utf-8'))]),
+ [{
+ 'id': None,
+ 'event': 'message',
+ 'data': 'Aslak'
+ }, {
+ 'id': None,
+ 'event': 'message',
+ 'data': u'Hellesøy'
+ }])
+
+ def test_accepts_CRLF_as_separator(self):
+ self.assertEqual(parse([struct.pack('B', b) for b in bytearray(u'data: Aslak\r\n\r\ndata: Hellesøy\r\n\r\n'.encode('utf-8'))]),
+ [{
+ 'id': None,
+ 'event': 'message',
+ 'data': 'Aslak'
+ }, {
+ 'id': None,
+ 'event': 'message',
+ 'data': u'Hellesøy'
+ }])
+
+ def test_accepts_CR_as_separator(self):
+ self.assertEqual(parse([struct.pack('B', b) for b in bytearray(u'data: Aslak\r\rdata: Hellesøy\r\r'.encode('utf-8'))]),
+ [{
+ 'id': None,
+ 'event': 'message',
+ 'data': 'Aslak'
+ }, {
+ 'id': None,
+ 'event': 'message',
+ 'data': u'Hellesøy'
+ }])
+
+ def test_delivers_message_with_explicit_event(self):
+ self.assertEqual(parse([b'event: greeting\ndata: Hello\n\n']),
+ [{
+ 'id': None,
+ 'event': 'greeting',
+ 'data': 'Hello'
+ }])
+
+ def test_delivers_two_messages_with_same_explicit_event(self):
+ self.assertEqual(parse([b'event: greeting\ndata: Hello\n\n', b'event: greeting\ndata: World\n\n']),
+ [{
+ 'id': None,
+ 'event': 'greeting',
+ 'data': 'Hello'
+ },
+ {
+ 'id': None,
+ 'event': 'greeting',
+ 'data': 'World'
+ }])
+
+ def test_delivers_two_messages_with_different_explicit_events(self):
+ self.assertEqual(parse([b'event: greeting\ndata: Hello\n\n', b'event: salutation\ndata: World\n\n']),
+ [{
+ 'id': None,
+ 'event': 'greeting',
+ 'data': 'Hello'
+ },
+ {
+ 'id': None,
+ 'event': 'salutation',
+ 'data': 'World'
+ }])
+
+ def test_ignores_comments(self):
+ self.assertEqual(parse([b'data: Hello\n\n:nothing to see here\n\ndata: World\n\n']),
+ [{
+ 'id': None,
+ 'event': 'message',
+ 'data': 'Hello'
+ },
+ {
+ 'id': None,
+ 'event': 'message',
+ 'data': 'World'
+ }])
+
+ def test_ignores_empty_comments(self):
+ self.assertEqual(parse([b'data: Hello\n\n:\n\ndata: World\n\n']),
+ [{
+ 'id': None,
+ 'event': 'message',
+ 'data': 'Hello'
+ },
+ {
+ 'id': None,
+ 'event': 'message',
+ 'data': 'World'
+ }])
+
+ def test_does_not_ignore_multiline_strings(self):
+ self.assertEqual(parse([b'data: line one\ndata:\ndata: line two\n\n']),
+ [{
+ 'id': None,
+ 'event': 'message',
+ 'data': 'line one\n\nline two'
+ }])
+
+ def test_does_not_ignore_multiline_strings_even_in_data_beginning(self):
+ self.assertEqual(parse([b'data:\ndata:line one\ndata: line two\n\n']),
+ [{
+ 'id': None,
+ 'event': 'message',
+ 'data': '\nline one\nline two'
+ }])
+
+ def test_should_regard_empty_event_as_message(self):
+ self.assertEqual(parse([b'event:\ndata: Hello\n\n']),
+ [{
+ 'id': None,
+ 'event': 'message',
+ 'data': 'Hello'
+ }])
+
+ def test_should_ignore_message_with_no_data(self):
+ self.assertEqual(parse([b'event: greeting\n\n']), [])
+
+ def test_preserves_whitespace_at_end_of_lines(self):
+ self.assertEqual(parse([b'event: greeting \ndata: Hello \n\n']),
+ [{
+ 'id': None,
+ 'event': 'greeting ',
+ 'data': 'Hello '
+ }])
+
+ def test_parses_relatively_huge_messages_efficiently(self):
+ self.assertEqual(parse([b'data: ' + b'a'*10000 + b'\n\n']),
+ [{
+ 'id': None,
+ 'event': 'message',
+ 'data': 'a'*10000
+ }])
+
+ def test_ID(self):
+ self.assertEqual(parse([b'id: 90\ndata: Hello\n\n']),
+ [{
+ 'id': '90',
+ 'event': 'message',
+ 'data': 'Hello'
+ }])
+
+ # Issue #9
+ def test_does_not_split_on_universal_newlines(self):
+ self.assertEqual(parse([u'data: Hello\x0b\x0c\x1c\x1d\x1e\x85\u2028\u2029\n\n'.encode('utf-8')]),
+ [{
+ 'id': None,
+ 'event': 'message',
+ 'data': u'Hello\x0b\x0c\x1c\x1d\x1e\x85\u2028\u2029'
+ }])
+
+ # Issue #11 and PR #12
+ def test_empty_line_at_start_of_chunk(self):
+ self.assertEqual(parse([
+ b'event: test event\r\ndata: {\r\ndata: "terribly_split": "json_objects in SSE",',
+ b'\r\ndata: "which_should_probably": "be on a single line",\r\ndata: "but_oh_well": 1\r\ndata: }\r\n\r\n']),
+ [{
+ 'id': None,
+ 'event': 'test event',
+ # From the spec https://www.w3.org/TR/2009/WD-eventsource-20091029/
+ # "If the field name is "data"
+ # Append the field value to the data buffer, then append a single
+ # U+000A LINE FEED (LF) character to the data buffer."
+ # So the lines should be separated by \n
+ 'data': '{\n "terribly_split": "json_objects in SSE",\n "which_should_probably": "be on a single line",\n "but_oh_well": 1\n}'
+ }])
+
+if __name__ == '__main__':
+ unittest.main()
|
Invalid json when newline is at the beginning of a chunk
Hi, If the response chunk happens to have a newline character at the beginning then it is incorrectly parsed as an SSE delimiter resulting in invalid json in event.data
ex:
```
chunk = b'\ndata: "logicalTimestampMs" : 1497016850000,\ndata: "maxDelayMs" : 12000\ndata: }\n\nevent: data\nid: data-1497016860000\ndata: {\n'
chunk.splitlines(True)
['\n', 'data: "logicalTimestampMs" : 1497016850000,\n', 'data: "maxDelayMs" : 12000\n', 'data: }\n', '\n', 'event: data\n', 'id: data-1497016860000\n', 'data: {\n']
```
|
0.0
|
41c0015fb0ef29b22d48b119a82b9e2d858c7d36
|
[
"test.py::Parser::test_accepts_CRLF_as_separator",
"test.py::Parser::test_accepts_CR_as_separator",
"test.py::Parser::test_empty_line_at_start_of_chunk",
"test.py::Parser::test_really_chopped_up_unicode_data",
"test.py::Parser::test_should_regard_empty_event_as_message"
] |
[
"test.py::Parser::test_ID",
"test.py::Parser::test_delivers_message_with_explicit_event",
"test.py::Parser::test_delivers_two_messages_with_different_explicit_events",
"test.py::Parser::test_delivers_two_messages_with_same_explicit_event",
"test.py::Parser::test_does_not_ignore_multiline_strings",
"test.py::Parser::test_does_not_ignore_multiline_strings_even_in_data_beginning",
"test.py::Parser::test_does_not_split_on_universal_newlines",
"test.py::Parser::test_ignores_comments",
"test.py::Parser::test_ignores_empty_comments",
"test.py::Parser::test_multibyte_characters",
"test.py::Parser::test_one_one_line_message_in_one_chunk",
"test.py::Parser::test_one_one_line_message_in_two_chunks",
"test.py::Parser::test_one_two_line_message_in_one_chunk",
"test.py::Parser::test_parses_empty_lines_with_multibyte_characters",
"test.py::Parser::test_parses_relatively_huge_messages_efficiently",
"test.py::Parser::test_preserves_whitespace_at_end_of_lines",
"test.py::Parser::test_should_ignore_message_with_no_data",
"test.py::Parser::test_two_one_line_messages_in_one_chunk"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-07-22 07:50:20+00:00
|
apache-2.0
| 4,058 |
|
mplanchard__pydecor-20
|
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 0b7b8da..5c67b4e 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -10,7 +10,9 @@ pylint;implementation_name=="cpython"
coverage
mock;python_version<"3.0"
-pytest
-pytest-cov
+pytest>=4.6
+pytest-cov;implementation_name=="cpython"
+pytest-cov==2.8.1;implementation_name=="pypy"
+pytest-cov==2.8.1;python_version<="3.7"
tox
wheel
diff --git a/src/pydecor/decorators/_utility.py b/src/pydecor/decorators/_utility.py
index cd04ffb..300d28b 100644
--- a/src/pydecor/decorators/_utility.py
+++ b/src/pydecor/decorators/_utility.py
@@ -59,7 +59,7 @@ def get_fn_args(decorated, args):
# types)
if decor_name in cls_dict and isfunction(cls_dict[decor_name]):
# The first argument is probably a "self" variable
- fn_args = args[1:]
+ fn_args = args
return fn_args
|
mplanchard/pydecor
|
b7554cfa64f62e5619511438a821e0fae0a51a19
|
diff --git a/tests/decorators/test_generics.py b/tests/decorators/test_generics.py
index 6340048..af0e412 100644
--- a/tests/decorators/test_generics.py
+++ b/tests/decorators/test_generics.py
@@ -317,12 +317,13 @@ class TestBefore:
def to_call_static(*args):
tracker.append({4: args})
- _ToDecorate().to_call(1, 2)
+ instance = _ToDecorate()
+ instance.to_call(1, 2)
_ToDecorate().to_call_cls(3, 4)
_ToDecorate().to_call_static(5, 6)
assert len(tracker) == 6
- assert tracker[0] == {1: (1, 2)}
+ assert tracker[0] == {1: (instance, 1, 2)}
assert tracker[1] == {2: (1, 2)}
assert tracker[2] == {1: (3, 4)}
assert tracker[3] == {3: (3, 4)}
@@ -606,13 +607,14 @@ class TestAfter:
def to_call_static(*args):
tracker.append({4: args})
- _ToDecorate().to_call(1, 2)
+ instance = _ToDecorate()
+ instance.to_call(1, 2)
_ToDecorate().to_call_cls(3, 4)
_ToDecorate().to_call_static(5, 6)
assert len(tracker) == 6
assert tracker[0] == {2: (1, 2)}
- assert tracker[1] == {1: (1, 2)}
+ assert tracker[1] == {1: (instance, 1, 2)}
assert tracker[2] == {3: (3, 4)}
assert tracker[3] == {1: (3, 4)}
assert tracker[4] == {4: (5, 6)}
@@ -890,12 +892,13 @@ class TestInstead:
def to_call_static(*args):
tracker.append({4: args})
- _ToDecorate().to_call(1, 2)
+ instance = _ToDecorate()
+ instance.to_call(1, 2)
_ToDecorate().to_call_cls(3, 4)
_ToDecorate().to_call_static(5, 6)
assert len(tracker) == 3
- assert tracker[0] == {1: (1, 2)}
+ assert tracker[0] == {1: (instance, 1, 2)}
assert tracker[1] == {1: (3, 4)}
assert tracker[2] == {1: (5, 6)}
diff --git a/tests/decorators/test_ready_to_wear.py b/tests/decorators/test_ready_to_wear.py
index b5cd57d..876b4a8 100644
--- a/tests/decorators/test_ready_to_wear.py
+++ b/tests/decorators/test_ready_to_wear.py
@@ -68,7 +68,25 @@ def test_intercept(raises, catch, reraise, include_handler):
if handler is not None and not will_catch:
handler.assert_not_called()
- wrapped.assert_called_once_with(*(), **{})
+ wrapped.assert_called_once_with(*(), **{}) # type: ignore
+
+
+def test_intercept_method():
+ """Test decorating an instance method with intercept."""
+
+ calls = []
+
+ def _handler(exc):
+ calls.append(exc)
+
+ class SomeClass:
+ @intercept(handler=_handler)
+ def it_raises(self, val):
+ raise ValueError(val)
+
+ SomeClass().it_raises("a")
+ assert len(calls) == 1
+ assert isinstance(calls[0], ValueError)
def test_log_call():
@@ -89,7 +107,7 @@ def test_log_call():
name="func", args=call_args, kwargs=call_kwargs, result=call_res
)
- exp_logger.debug.assert_called_once_with(exp_msg) # type: ignore
+ exp_logger.debug.assert_called_once_with(exp_msg)
class TestMemoization:
diff --git a/tests/test_functions.py b/tests/test_functions.py
index 87015ec..ede8133 100644
--- a/tests/test_functions.py
+++ b/tests/test_functions.py
@@ -66,7 +66,7 @@ def test_interceptor(raises, catch, reraise, include_handler):
if handler is not None and not will_catch:
handler.assert_not_called()
- wrapped.assert_called_once_with(*(), **{})
+ wrapped.assert_called_once_with(*(), **{}) # type: ignore
def test_log_call():
@@ -88,4 +88,4 @@ def test_log_call():
name="func", args=call_args, kwargs=call_kwargs, result=call_res
)
- exp_logger.debug.assert_called_once_with(exp_msg) # type: ignore
+ exp_logger.debug.assert_called_once_with(exp_msg)
|
Support for decorating methods
Hello,
I just came across this package and contains several facilities that I find very useful to create expressive code, so I wanted to give it a try. However, when trying to use the "intercept" decorator on an instance method, it seems that the 'self' parameter gets swallowed, raising an error:
```
{TypeError}myfunction() missing 1 required positional argument: 'x'
```
Code example:
```
def _handle_exception(exception):
exception_name = type(exception).__name__
if exception_name == 'NoSuchKey':
raise EntityNotFound('The specified key does not exist')
else:
raise exception
... skipping class definition and details...
@intercept(handler=_handle_exception)
def get_node_information(self, key):
return self._key_to_meta(key)
```
Any workarounds for this case? Thanks!
|
0.0
|
b7554cfa64f62e5619511438a821e0fae0a51a19
|
[
"tests/decorators/test_generics.py::TestBefore::test_before_direct_method_decoration_equivalent",
"tests/decorators/test_generics.py::TestAfter::test_after_direct_method_decoration_equivalent",
"tests/decorators/test_generics.py::TestInstead::test_instead_direct_method_decoration_equivalent",
"tests/decorators/test_ready_to_wear.py::test_intercept_method"
] |
[
"tests/decorators/test_generics.py::TestDecorated::test_str",
"tests/decorators/test_generics.py::TestDecorated::test_call",
"tests/decorators/test_generics.py::TestDecorated::test_call_sets_result",
"tests/decorators/test_generics.py::TestDecorated::test_immutable",
"tests/decorators/test_generics.py::TestBefore::test_before_no_ret",
"tests/decorators/test_generics.py::TestBefore::test_before_ret",
"tests/decorators/test_generics.py::TestBefore::test_before_receives_kwargs",
"tests/decorators/test_generics.py::TestBefore::test_before_implicit_instancemethod",
"tests/decorators/test_generics.py::TestBefore::test_before_implicit_classmethod",
"tests/decorators/test_generics.py::TestBefore::test_before_implicit_staticmethod",
"tests/decorators/test_generics.py::TestBefore::test_before_implicit_instancemethod_instace_only",
"tests/decorators/test_generics.py::TestBefore::test_before_implicit_classmethod_instance_only",
"tests/decorators/test_generics.py::TestBefore::test_before_implicit_staticmethod_instance_only",
"tests/decorators/test_generics.py::TestBefore::test_before_method_decorates_class_if_not_implicit",
"tests/decorators/test_generics.py::TestBefore::test_before_decorates_on_class_references",
"tests/decorators/test_generics.py::TestAfter::test_after_no_ret",
"tests/decorators/test_generics.py::TestAfter::test_after_ret",
"tests/decorators/test_generics.py::TestAfter::test_after_receives_kwargs",
"tests/decorators/test_generics.py::TestAfter::test_after_implicit_instancemethod",
"tests/decorators/test_generics.py::TestAfter::test_after_implicit_classmethod",
"tests/decorators/test_generics.py::TestAfter::test_after_implicit_staticmethod",
"tests/decorators/test_generics.py::TestAfter::test_after_implicit_instancemethod_instace_only",
"tests/decorators/test_generics.py::TestAfter::test_after_implicit_classmethod_instance_only",
"tests/decorators/test_generics.py::TestAfter::test_after_implicit_staticmethod_instance_only",
"tests/decorators/test_generics.py::TestAfter::test_after_method_decorates_class_if_not_implicit",
"tests/decorators/test_generics.py::TestAfter::test_after_decorates_on_class_references",
"tests/decorators/test_generics.py::TestInstead::test_instead_no_call",
"tests/decorators/test_generics.py::TestInstead::test_instead_calls",
"tests/decorators/test_generics.py::TestInstead::test_instead_receives_kwargs",
"tests/decorators/test_generics.py::TestInstead::test_instead_implicit_instancemethod",
"tests/decorators/test_generics.py::TestInstead::test_instead_implicit_classmethod",
"tests/decorators/test_generics.py::TestInstead::test_instead_implicit_staticmethod",
"tests/decorators/test_generics.py::TestInstead::test_instead_implicit_instancemethod_instace_only",
"tests/decorators/test_generics.py::TestInstead::test_instead_implicit_classmethod_instance_only",
"tests/decorators/test_generics.py::TestInstead::test_instead_implicit_staticmethod_instance_only",
"tests/decorators/test_generics.py::TestInstead::test_instead_method_decorates_class_if_not_implicit",
"tests/decorators/test_generics.py::TestInstead::test_instead_decorates_on_class_references",
"tests/decorators/test_generics.py::TestDecorator::test_all_decorators",
"tests/decorators/test_generics.py::TestDecorator::test_all_decorators_constructed",
"tests/decorators/test_generics.py::TestDecorator::test_all_callables_get_extras",
"tests/decorators/test_generics.py::TestDecorator::test_all_callables_get_specific_extras",
"tests/decorators/test_generics.py::TestDecorator::test_all_callables_specific_extras_overridden",
"tests/decorators/test_generics.py::TestDecorator::test_just_before",
"tests/decorators/test_generics.py::TestDecorator::test_just_after",
"tests/decorators/test_generics.py::TestDecorator::test_just_instead",
"tests/decorators/test_generics.py::TestDecorator::test_all_decorators_implicit_class",
"tests/decorators/test_generics.py::TestDecorator::test_at_least_one_callable_must_be_specified",
"tests/decorators/test_generics.py::test_extras_persistence[before]",
"tests/decorators/test_generics.py::test_extras_persistence[after]",
"tests/decorators/test_generics.py::test_extras_persistence[instead]",
"tests/decorators/test_generics.py::test_extras_persistence_class[before]",
"tests/decorators/test_generics.py::test_extras_persistence_class[after]",
"tests/decorators/test_generics.py::test_extras_persistence_class[instead]",
"tests/decorators/test_generics.py::test_extras_persistence_class_inst_only[before]",
"tests/decorators/test_generics.py::test_extras_persistence_class_inst_only[after]",
"tests/decorators/test_generics.py::test_extras_persistence_class_inst_only[instead]",
"tests/decorators/test_ready_to_wear.py::test_intercept[Exception-Exception-ValueError-False]",
"tests/decorators/test_ready_to_wear.py::test_intercept[Exception-Exception-ValueError-True]",
"tests/decorators/test_ready_to_wear.py::test_intercept[Exception-Exception-True-True]",
"tests/decorators/test_ready_to_wear.py::test_intercept[Exception-Exception-True-False]",
"tests/decorators/test_ready_to_wear.py::test_intercept[None-Exception-ValueError-False]",
"tests/decorators/test_ready_to_wear.py::test_intercept[None-Exception-ValueError-True]",
"tests/decorators/test_ready_to_wear.py::test_intercept[Exception-Exception-None-False]",
"tests/decorators/test_ready_to_wear.py::test_intercept[Exception-Exception-None-True]",
"tests/decorators/test_ready_to_wear.py::test_intercept[Exception-RuntimeError-ValueError-False]",
"tests/decorators/test_ready_to_wear.py::test_intercept[Exception-RuntimeError-ValueError-True]",
"tests/decorators/test_ready_to_wear.py::test_log_call",
"tests/decorators/test_ready_to_wear.py::TestMemoization::test_memoize_basic[args0-kwargs0]",
"tests/decorators/test_ready_to_wear.py::TestMemoization::test_memoize_basic[args1-kwargs1]",
"tests/decorators/test_ready_to_wear.py::TestMemoization::test_memoize_basic[args2-kwargs2]",
"tests/decorators/test_ready_to_wear.py::TestMemoization::test_memoize_basic[args3-kwargs3]",
"tests/decorators/test_ready_to_wear.py::TestMemoization::test_memoize_basic[args4-kwargs4]",
"tests/decorators/test_ready_to_wear.py::TestMemoization::test_memoize_basic[args5-kwargs5]",
"tests/decorators/test_ready_to_wear.py::TestMemoization::test_memoize_basic[args6-kwargs6]",
"tests/decorators/test_ready_to_wear.py::TestMemoization::test_memoize_lru",
"tests/decorators/test_ready_to_wear.py::TestMemoization::test_memoize_fifo",
"tests/decorators/test_ready_to_wear.py::TestMemoization::test_memoization_timed",
"tests/test_functions.py::test_interceptor[Exception-Exception-ValueError-False]",
"tests/test_functions.py::test_interceptor[Exception-Exception-ValueError-True]",
"tests/test_functions.py::test_interceptor[None-Exception-ValueError-False]",
"tests/test_functions.py::test_interceptor[None-Exception-ValueError-True]",
"tests/test_functions.py::test_interceptor[Exception-Exception-None-False]",
"tests/test_functions.py::test_interceptor[Exception-Exception-None-True]",
"tests/test_functions.py::test_interceptor[Exception-RuntimeError-ValueError-False]",
"tests/test_functions.py::test_interceptor[Exception-RuntimeError-ValueError-True]",
"tests/test_functions.py::test_log_call"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-01 19:44:18+00:00
|
mit
| 4,059 |
|
mps-gmbh__hl7-parser-22
|
diff --git a/hl7parser/hl7_data_types.py b/hl7parser/hl7_data_types.py
index a0a4726..2a8ba81 100644
--- a/hl7parser/hl7_data_types.py
+++ b/hl7parser/hl7_data_types.py
@@ -173,7 +173,7 @@ class HL7Datetime(HL7DataType):
"""
component_map = ['datetime']
- def __init__(self, composite, delimiter):
+ def __init__(self, composite, delimiter, use_delimiter="subcomponent_separator"):
if len(composite) == 0:
self.datetime = ""
self.isNull = True
|
mps-gmbh/hl7-parser
|
30d03f9dbd51866f0217f098db73b3ae5bfb4950
|
diff --git a/tests/test_parse.py b/tests/test_parse.py
index d13898b..9ed57c6 100644
--- a/tests/test_parse.py
+++ b/tests/test_parse.py
@@ -64,7 +64,7 @@ class TestParsing(unittest.TestCase):
msg_mrg = (
"MSH|^~\&|HNAM_PM|HNA500|AIG||20131016140148||ADT^A34|Q150084042T145948315C489644\n"
"PID|1||3790333^^^MSH_MRN^MRN^MTSN~2175611^^^MSH_EMPI^CMRN^UPLOAD|195511^^^IID^DONOR ID^MTSN~Q3790333^^^MSQ_MRN^KMRN|EVERYMAN^ADAM^J^^^^CURRENT||19580321|M|||77 CRANFORD COURT^^NEW YORK^NY^10038^USA^HOME^^040|040|(212)555-1212^HOM\n"
- "MRG|3150123^^^MSH_MRN^MRN^MTSN|Q3150123"
+ "MRG|3150123^^^MSH_MRN^MRN^MTSN^20131016140148^^^^|Q3150123"
)
successful_query_result = "\n".join((
@@ -269,7 +269,7 @@ class TestParsing(unittest.TestCase):
def test_in1_segment():
message_data = (
- "IN1|1:1|McDH||McDonalds Health||||||"
+ "IN1|1:1|McDH|123456789^^^^^^^^^McDonalds Health|McDonalds Health||||||"
"|||||||||"
"||||||||||"
"||||||1|12345|||||||||||||"
|
Parser fails if a datetime is provided in extended composite id block
The following message part contains an extended composite id block but can not be parsed
```
IN1|1:1|McDH|123456789^^^^^^^^^McDonalds Health|McDonalds Health||||||
```
This causes the parser to fail with a `TypeError`
|
0.0
|
30d03f9dbd51866f0217f098db73b3ae5bfb4950
|
[
"tests/test_parse.py::TestParsing::test_mrg_message_parse",
"tests/test_parse.py::test_in1_segment"
] |
[
"tests/test_parse.py::TestParsing::test_address",
"tests/test_parse.py::TestParsing::test_datetime",
"tests/test_parse.py::TestParsing::test_len",
"tests/test_parse.py::TestParsing::test_message_parse",
"tests/test_parse.py::TestParsing::test_multiple_segments_parse",
"tests/test_parse.py::TestParsing::test_non_zero",
"tests/test_parse.py::TestParsing::test_pv1_segment",
"tests/test_parse.py::TestParsing::test_simple_segments_parse",
"tests/test_parse.py::TestParsing::test_successful_query_status",
"tests/test_parse.py::TestParsing::test_trailing_segment_fields",
"tests/test_parse.py::TestParsing::test_unknown_message_parse",
"tests/test_parse.py::TestParsing::test_unsuccessful_query_status"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-06-28 09:15:52+00:00
|
bsd-3-clause
| 4,060 |
|
mps-gmbh__hl7-parser-24
|
diff --git a/hl7parser/hl7_data_types.py b/hl7parser/hl7_data_types.py
index 2a8ba81..6969871 100644
--- a/hl7parser/hl7_data_types.py
+++ b/hl7parser/hl7_data_types.py
@@ -168,12 +168,22 @@ class HL7Datetime(HL7DataType):
"""
HL7 datetime data type
+ Complete Supports the DTM format and only partial support for the older TM format,
+ which is deprecated since hl7 2.6.
+ If the datetime is given in TM format the second component is ignored, because its not
+ reliable and the first component is treated like the DTM formatted datetime.
+
example input:
198808181126
"""
component_map = ['datetime']
- def __init__(self, composite, delimiter, use_delimiter="subcomponent_separator"):
+ def __init__(self, composite, delimiters, use_delimiter="component_separator"):
+
+ delimiter = getattr(delimiters, use_delimiter)
+ composite = composite.split(delimiter)
+ composite = composite[0]
+
if len(composite) == 0:
self.datetime = ""
self.isNull = True
|
mps-gmbh/hl7-parser
|
dec00031b505de2b38b6270ac40b0fff266ef2fa
|
diff --git a/tests/data_types.py b/tests/data_types.py
index d5afa72..97e0d16 100644
--- a/tests/data_types.py
+++ b/tests/data_types.py
@@ -3,6 +3,7 @@ from __future__ import unicode_literals
import pytest
+from hl7parser.hl7 import HL7Delimiters
from hl7parser.hl7_data_types import HL7Datetime
@@ -13,9 +14,12 @@ from hl7parser.hl7_data_types import HL7Datetime
("", "", ""),
("2010", "2010-01-01T00:00:00", "2010"),
("-200", "", ""),
+ ("20190924143134^YYYYMMDDHHMMSS", "2019-09-24T14:31:34", "20190924143134")
]
)
def test_datetime(input_string, isoformat, string_repr):
- dt = HL7Datetime(input_string, "|")
+
+ delimiters = HL7Delimiters(*"|^~\&")
+ dt = HL7Datetime(input_string, delimiters)
assert dt.isoformat() == isoformat
assert str(dt) == string_repr
diff --git a/tests/test_parse.py b/tests/test_parse.py
index 9ed57c6..82163da 100644
--- a/tests/test_parse.py
+++ b/tests/test_parse.py
@@ -10,6 +10,8 @@ import unittest
import six
import sys
import pytest
+import datetime
+
@pytest.mark.skipif(sys.version_info.major > 2, reason="not relevant for python 3")
def test_bytestring_segment():
@@ -241,7 +243,8 @@ class TestParsing(unittest.TestCase):
def test_pv1_segment(self):
segment = HL7Segment(
- "PV1|1|I|2000^2012^01||||004777^ATTEND^AARON^A|||SUR||||ADM|A0|"
+ "PV1|1|I|2000^2012^01||123^^^^^^20190924143134&YYYYMMDDHHMMSS"
+ "||004777^ATTEND^AARON^A|||SUR||||ADM|A0|"
)
self.assertEqual(six.text_type(segment.patient_class), "I")
@@ -266,6 +269,9 @@ class TestParsing(unittest.TestCase):
self.assertEqual(
six.text_type(segment.hospital_service), "SUR")
+ self.assertEqual(
+ six.text_type(segment.preadmit_number.effective_date.datetime), "2019-09-24 14:31:34")
+
def test_in1_segment():
message_data = (
|
HL7 Datetime fails to parse DTM with precision filed
If the optional precision at the DTM data filed is not empty, the given datetime (DTM) can not be parsed,instead a ValueError is raised.
***Example hl7 message to reproduce the error:***
```
MSH|^~\&|||||||ADT^A01|12346789|P|2.7|
EVN|A01|20190924143134^YYYYMMDDHHMMSS||
PID|||59wrtt0w9suJ-^foo^bar||Hilde^Held||19660119|F|||||||||||||
PV1||0|7697^69^^MED2||||||||||||||||744784|||||||||||||||||||||||||20190916143134920350||
IN1|1||00818903|TKK||||4||||||||||||||||||||||||||||00597522|
```
|
0.0
|
dec00031b505de2b38b6270ac40b0fff266ef2fa
|
[
"tests/data_types.py::test_datetime[20190924143134^YYYYMMDDHHMMSS-2019-09-24T14:31:34-20190924143134]",
"tests/test_parse.py::TestParsing::test_pv1_segment"
] |
[
"tests/data_types.py::test_datetime[198808181126-1988-08-18T11:26:00-198808181126]",
"tests/data_types.py::test_datetime[--]",
"tests/data_types.py::test_datetime[2010-2010-01-01T00:00:00-2010]",
"tests/data_types.py::test_datetime[-200--]",
"tests/test_parse.py::TestParsing::test_address",
"tests/test_parse.py::TestParsing::test_datetime",
"tests/test_parse.py::TestParsing::test_len",
"tests/test_parse.py::TestParsing::test_message_parse",
"tests/test_parse.py::TestParsing::test_mrg_message_parse",
"tests/test_parse.py::TestParsing::test_multiple_segments_parse",
"tests/test_parse.py::TestParsing::test_non_zero",
"tests/test_parse.py::TestParsing::test_simple_segments_parse",
"tests/test_parse.py::TestParsing::test_successful_query_status",
"tests/test_parse.py::TestParsing::test_trailing_segment_fields",
"tests/test_parse.py::TestParsing::test_unknown_message_parse",
"tests/test_parse.py::TestParsing::test_unsuccessful_query_status",
"tests/test_parse.py::test_in1_segment"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-10-01 12:41:02+00:00
|
bsd-3-clause
| 4,061 |
|
mps-gmbh__hl7-parser-27
|
diff --git a/hl7parser/hl7_segments.py b/hl7parser/hl7_segments.py
index 096da68..7effa36 100644
--- a/hl7parser/hl7_segments.py
+++ b/hl7parser/hl7_segments.py
@@ -251,7 +251,7 @@ segment_maps = {
"insureds_address",
options={"type": HL7_ExtendedAddress, "required": False, "repeats": False},
),
- make_cell_type("policy_number", index=36)
+ make_cell_type("policy_number", index=35)
# NOTE: standard defines more fields which can be added if needed in
# the future
],
|
mps-gmbh/hl7-parser
|
1bcd959dd74ce1d4fbeeefb7b0d4c966f4ff53b9
|
diff --git a/tests/test_parse.py b/tests/test_parse.py
index 8b285fb..e0ae193 100644
--- a/tests/test_parse.py
+++ b/tests/test_parse.py
@@ -288,7 +288,7 @@ def test_in1_segment():
"IN1|1:1|McDH|123456789^^^^^^^^^McDonalds Health|McDonalds Health||||||"
"||||||Musterfrau^Gertrud^^^^Dr.^L^A^|SEL|19700101|Königstr. 1B^^Stuttgart^^70173|"
"||||||||||"
- "||||||1|12345|||||||||||||"
+ "|||||1|12345||||||||||||||"
)
in1 = HL7Segment(message_data)
@@ -302,9 +302,9 @@ def test_in1_segment():
assert str(name_of_insured) == "Musterfrau^Gertrud^^^^Dr.^L^A^"
assert str(name_of_insured.family_name) == "Musterfrau"
assert str(name_of_insured.given_name) == "Gertrud"
- assert str(name_of_insured.middle_name) == ''
- assert str(name_of_insured.suffix) == ''
- assert str(name_of_insured.prefix) == ''
+ assert str(name_of_insured.middle_name) == ""
+ assert str(name_of_insured.suffix) == ""
+ assert str(name_of_insured.prefix) == ""
assert str(name_of_insured.degree) == "Dr."
assert str(name_of_insured.name_type_code) == "L"
assert str(name_of_insured.name_representation_code) == "A"
|
Wrong index for IN1 cell `policy_number`
Introduced by #26
The index for IN1 cell `policy_number` had unfortunately been changed from 35 to 36. Correct is 35.
|
0.0
|
1bcd959dd74ce1d4fbeeefb7b0d4c966f4ff53b9
|
[
"tests/test_parse.py::test_in1_segment"
] |
[
"tests/test_parse.py::TestParsing::test_address",
"tests/test_parse.py::TestParsing::test_datetime",
"tests/test_parse.py::TestParsing::test_len",
"tests/test_parse.py::TestParsing::test_message_parse",
"tests/test_parse.py::TestParsing::test_mrg_message_parse",
"tests/test_parse.py::TestParsing::test_multiple_segments_parse",
"tests/test_parse.py::TestParsing::test_non_zero",
"tests/test_parse.py::TestParsing::test_pv1_segment",
"tests/test_parse.py::TestParsing::test_simple_segments_parse",
"tests/test_parse.py::TestParsing::test_successful_query_status",
"tests/test_parse.py::TestParsing::test_trailing_segment_fields",
"tests/test_parse.py::TestParsing::test_unknown_message_parse",
"tests/test_parse.py::TestParsing::test_unsuccessful_query_status"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-04-28 15:06:15+00:00
|
bsd-3-clause
| 4,062 |
|
mrf345__flask_minify-80
|
diff --git a/Makefile b/Makefile
index 608c314..1227923 100644
--- a/Makefile
+++ b/Makefile
@@ -6,9 +6,9 @@ c ?= 1
test: install
test -f .venv/bin/activate && source .venv/bin/activate && python -m bandit -c bandit.yml -r . && python -m pytest --count=$(c)
lint: install
- source .venv/bin/activate && python -m isort --profile black --check . && python -m black --check .
+ source .venv/bin/activate && python -m isort -sg "**/.venv*" --profile black --check . && python -m black --check .
format: install
- test -f .venv/bin/activate && source .venv/bin/activate && python -m isort --profile black . && python -m black .
+ test -f .venv/bin/activate && source .venv/bin/activate && python -m isort -sg "**/.venv*" --profile black . && python -m black .
run: install
python tests/integration.py
release: install-dev clean
diff --git a/README.md b/README.md
index 1d17a44..29fdd1b 100644
--- a/README.md
+++ b/README.md
@@ -149,6 +149,10 @@ the **default** parsers are set to `{"html": Html, "script": Jsmin, "style": Rcs
## Breaking changes
+#### `0.40`
+
+Due to a future deprecation in Flask 2.3, the extension is no longer going to fallback to `Flask._app_ctx_stack`, it will raise an exception instead (`flask_minify.exceptions.MissingApp`)
+
#### `0.33`
introduces a breaking change to the expected output, in this release `lesscpy` will be replaced by `cssmin` as
diff --git a/flask_minify/about.py b/flask_minify/about.py
index 7071428..ef4cfb0 100644
--- a/flask_minify/about.py
+++ b/flask_minify/about.py
@@ -1,4 +1,4 @@
-__version__ = "0.39"
+__version__ = "0.40"
__doc__ = "Flask extension to minify html, css, js and less."
__license__ = "MIT"
__author__ = "Mohamed Feddad"
diff --git a/flask_minify/exceptions.py b/flask_minify/exceptions.py
new file mode 100644
index 0000000..3c99cd0
--- /dev/null
+++ b/flask_minify/exceptions.py
@@ -0,0 +1,10 @@
+class FlaskMinifyException(Exception):
+ """FlaskMinify base exception"""
+
+ pass
+
+
+class MissingApp(FlaskMinifyException):
+ """Raised when the flask app is accessed before it's set"""
+
+ pass
diff --git a/flask_minify/main.py b/flask_minify/main.py
index 9dba299..8fc30fe 100644
--- a/flask_minify/main.py
+++ b/flask_minify/main.py
@@ -1,9 +1,10 @@
from itertools import tee
from re import compile as compile_re
-from flask import _app_ctx_stack, request
+from flask import request
from flask_minify.cache import MemoryCache
+from flask_minify.exceptions import MissingApp
from flask_minify.parsers import Parser
from flask_minify.utils import does_content_type_match
@@ -112,14 +113,24 @@ class Minify:
@property
def app(self):
- """If app was passed take it, if not get the one on top.
+ """If app was passed take it, otherwise raise an exception.
Returns
-------
Flask App
The current Flask application.
+
+ Raises
+ ------
+ MissingApp
"""
- return self._app or (_app_ctx_stack.top and _app_ctx_stack.top.app)
+ if not self._app:
+ raise MissingApp(
+ "Flask app has not been passed to the extension `Minify(app=None)`, "
+ "nor lazy initialized with `.init_app(app)`"
+ )
+
+ return self._app
def init_app(self, app):
"""Handle initiation of multiple apps NOTE:Factory Method"""
|
mrf345/flask_minify
|
4b48318cb3a6eac55b76f59d197dc8c895b00392
|
diff --git a/tests/units.py b/tests/units.py
index ddd1aa5..97606da 100644
--- a/tests/units.py
+++ b/tests/units.py
@@ -1,8 +1,10 @@
-from random import randint
from unittest import mock
+import pytest
+
from flask_minify import minify, parsers
from flask_minify.cache import MemoryCache
+from flask_minify.exceptions import MissingApp
from flask_minify.utils import does_content_type_match, is_empty
from .constants import (
@@ -87,6 +89,14 @@ class TestMinifyRequest:
assert (list(matches), exists) == ([], False)
+ def test_access_missing_app_raises_exception(self):
+ """test accessing a missing flask app raises an exception"""
+ self.mock_app = None
+ ext = self.minify_defaults
+
+ with pytest.raises(MissingApp):
+ ext.app
+
class TestParsers:
def test_css_edge_cases_with_rcssmin(self):
|
DeprecationWarning: `_app_ctx_stack` is deprecated and will be removed in Flask `2.3`. Use `g` to store data, or `app_ctx` to access the current context.
Please address this deprecation warning in next release.
```
/venv/lib/python3.9/site-packages/flask_minify/main.py:122: DeprecationWarning: '_app_ctx_stack' is deprecated and will be removed in Flask 2.3. Use 'g' to store data, or 'app_ctx' to access the current context.
return self._app or (_app_ctx_stack.top and _app_ctx_stack.top.app)
```
|
0.0
|
4b48318cb3a6eac55b76f59d197dc8c895b00392
|
[
"tests/units.py::TestUtils::test_is_empty",
"tests/units.py::TestUtils::test_is_html",
"tests/units.py::TestUtils::test_is_js",
"tests/units.py::TestUtils::test_is_cssless",
"tests/units.py::TestParsers::test_css_edge_cases_with_rcssmin",
"tests/units.py::TestParsers::test_overriding_parser_options"
] |
[] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-10-02 20:04:41+00:00
|
mit
| 4,063 |
|
mroberge__hydrofunctions-61
|
diff --git a/hydrofunctions/charts.py b/hydrofunctions/charts.py
index 8e41052..aba1b8e 100644
--- a/hydrofunctions/charts.py
+++ b/hydrofunctions/charts.py
@@ -20,7 +20,7 @@ from matplotlib.ticker import NullFormatter
import numpy as np
-def flow_duration(Qdf, xscale='logit', yscale='log', ylabel='Stream Discharge (m³/s)', symbol='.', legend=True):
+def flow_duration(Qdf, xscale='logit', yscale='log', ylabel='Stream Discharge (m³/s)', symbol='.', legend=True, legend_loc='best', title=''):
"""Creates a flow duration chart from a dataframe of discharges.
Args:
@@ -56,6 +56,17 @@ def flow_duration(Qdf, xscale='logit', yscale='log', ylabel='Stream Discharge (m
legend (bool, default: True): whether the legend should be plotted.
+ legend_loc (str, default: 'best'): the location of the legend.
+
+ * 'best': Automatically choose the option below with the least overlap.
+ * 'upper left', 'upper right', 'lower left', 'lower right': place the legend at the corresponding corner of the axes/figure.
+ * 'upper center', 'lower center', 'center left', 'center right': place the legend at the center of the corresponding edge of the axes/figure.
+ * 'center': place the legend at the center of the axes/figure.
+ * The location can also be a 2-tuple giving the coordinates of the lower-left corner of the legend in axes coordinates.
+
+ title (str, default: ''): text to use as a figure title. If no text
+ is provided, no title will be created (default).
+
Returns:
fig (matplotlib.figure.Figure):
a matplotlib figure. This will plot immediately in a Jupyter
@@ -76,14 +87,16 @@ def flow_duration(Qdf, xscale='logit', yscale='log', ylabel='Stream Discharge (m
ax.set_yscale(yscale)
ax.set_ylabel(ylabel)
if legend:
- ax.legend()
+ ax.legend(loc=legend_loc)
+ if title:
+ ax.title.set_text(title)
# A pyplot bug causes a valueError value if the xlabel is set.
#ax.set_xlabel('Probability of Exceedence')
ax.xaxis.set_minor_formatter(NullFormatter())
return fig, ax
-def cycleplot(Qseries, cycle='diurnal', compare=None, y_label='Discharge (ft³/s)'):
+def cycleplot(Qseries, cycle='diurnal', compare=None, y_label='Discharge (ft³/s)', legend=True, legend_loc='best', title=''):
"""Creates a chart to illustrate annual and diurnal cycles.
This chart will use the pandas groupby method to plot the mean and median
@@ -138,6 +151,19 @@ def cycleplot(Qseries, cycle='diurnal', compare=None, y_label='Discharge (ft³/s
y_label (str): The label for the y axis.
+ legend (bool, default: True): whether the legend should be plotted.
+
+ legend_loc (str, default: 'best'): the location of the legend.
+
+ * 'best': Automatically choose the option below with the least overlap.
+ * 'upper left', 'upper right', 'lower left', 'lower right': place the legend at the corresponding corner of the axes/figure.
+ * 'upper center', 'lower center', 'center left', 'center right': place the legend at the center of the corresponding edge of the axes/figure.
+ * 'center': place the legend at the center of the axes/figure.
+ * The location can also be a 2-tuple giving the coordinates of the lower-left corner of the legend in axes coordinates.
+
+ title (str, default: ''): text to use as a figure title. If no text
+ is provided, no title will be created (default).
+
Returns:
fig (matplotlib.figure.Figure):
a matplotlib figure. This will plot immediately in a Jupyter
@@ -187,9 +213,7 @@ def cycleplot(Qseries, cycle='diurnal', compare=None, y_label='Discharge (ft³/s
cycleby = Qseries.index.hour
x_label = ' (hour of the day)'
else:
- print("The cycle label '", cycle, "' is not recognized as an option. Using cycle='diurnal' instead.")
- cycleby = Qseries.index.hour
- x_label = ' (hour of the day)'
+ raise ValueError("The cycle label '", cycle, "' is not recognized as an option.")
if compare is None:
# Don't make a comparison plot.
@@ -269,7 +293,8 @@ def cycleplot(Qseries, cycle='diurnal', compare=None, y_label='Discharge (ft³/s
# axs[i].xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H'))
# Set the legend on either the ax or fig.
- axs[0].legend(loc='best', fancybox=True, framealpha=0.5)
+ if legend:
+ axs[0].legend(loc=legend_loc, fancybox=True, framealpha=0.5)
# fig.legend(loc='upper center', shadow=True, frameon=True, fancybox=True, framealpha=0.5)
# Get the yaxis limits, set bottom to zero.
@@ -278,5 +303,7 @@ def cycleplot(Qseries, cycle='diurnal', compare=None, y_label='Discharge (ft³/s
axs[0].set_ylabel(y_label)
axs[0].set_xlabel('Time' + x_label)
plt.tight_layout()
+ if title:
+ fig.suptitle(title)
return fig, axs
|
mroberge/hydrofunctions
|
bf1f296f4a2e31a0df9bd07860bbfa90e71e7890
|
diff --git a/tests/test_charts.py b/tests/test_charts.py
index 9219578..a74174d 100644
--- a/tests/test_charts.py
+++ b/tests/test_charts.py
@@ -34,18 +34,28 @@ class TestFlowDuration(unittest.TestCase):
actual_yscale = actual_ax.yaxis.get_scale()
actual_ylabel = actual_ax.yaxis.get_label_text()
actual_marker = actual_ax.get_lines()[0].get_marker()
+ actual_legend = actual_ax.get_legend()
+ actual_legend_loc = actual_legend._loc
+ actual_title = actual_ax.get_title()
self.assertEqual(actual_xscale, 'logit')
self.assertEqual(actual_yscale, 'log')
self.assertEqual(actual_ylabel, 'Stream Discharge (m³/s)')
self.assertEqual(actual_marker, '.')
+ self.assertTrue(actual_legend)
+ self.assertEqual(actual_legend_loc, 0) # '0' is internal code for 'best'.
+ self.assertEqual(actual_title, '')
def test_charts_flowduration_accepts_params(self):
expected = pd.DataFrame(data=dummy)
params = {'xscale': 'linear',
'yscale': 'linear',
'ylabel': 'test value',
- 'symbol': ','}
+ 'symbol': ',',
+ 'legend': False,
+ 'legend_loc': 'center',
+ 'title': 'Test Title',
+ }
actual_fig, actual_ax = charts.flow_duration(expected, **params)
@@ -53,12 +63,19 @@ class TestFlowDuration(unittest.TestCase):
actual_yscale = actual_ax.yaxis.get_scale()
actual_ylabel = actual_ax.yaxis.get_label_text()
actual_marker = actual_ax.get_lines()[0].get_marker()
+ actual_legend = actual_ax.get_legend()
+ # There is no legend in this test, so there is no legend property.
+ #actual_legend_loc = actual_legend._loc
+ actual_title = actual_ax.get_title()
self.assertEqual(actual_xscale, 'linear')
self.assertEqual(actual_yscale, 'linear')
self.assertEqual(actual_ylabel, 'test value')
self.assertEqual(actual_marker, ',')
-
+ self.assertIsNone(actual_legend)
+ # There is no legend, so there is no legend location property.
+ #self.assertEqual(actual_legend_loc, 10) # 'center' is equal to 10.
+ self.assertEqual(actual_title, 'Test Title')
class TestCyclePlot(unittest.TestCase):
@@ -80,15 +97,21 @@ class TestCyclePlot(unittest.TestCase):
def test_charts_cycleplot_parts(self):
expected_df, expected_dict = hf.extract_nwis_df(test_json, interpolate=False)
- actual_fig, actual_ax = charts.cycleplot(expected_df)
+ actual_fig, actual_ax = charts.cycleplot(expected_df, legend_loc='center', title='test title')
actual_xscale = actual_ax[0].xaxis.get_scale()
actual_yscale = actual_ax[0].yaxis.get_scale()
actual_ylabel = actual_ax[0].yaxis.get_label_text()
+ actual_legend = actual_ax[0].get_legend()
+ actual_legend_loc = actual_legend._loc
+ actual_title = actual_fig._suptitle.get_text() # unofficial title accessor! A little wonky.
self.assertEqual(actual_xscale, 'linear')
self.assertEqual(actual_yscale, 'linear')
self.assertEqual(actual_ylabel, 'Discharge (ft³/s)')
+ self.assertTrue(actual_legend)
+ self.assertEqual(actual_legend_loc, 10) # '10' is internal code for legend(loc = 'center')
+ self.assertEqual(actual_title, 'test title')
def test_charts_cycleplot_compare_month(self):
expected_df, expected_dict = hf.extract_nwis_df(test_json, interpolate=False)
@@ -102,5 +125,46 @@ class TestCyclePlot(unittest.TestCase):
self.assertIsInstance(actual_fig, matplotlib.figure.Figure)
self.assertIsInstance(actual_ax[0], matplotlib.axes.Axes)
+ def test_charts_cycleplot_cycle_annual(self):
+ expected_df, expected_dict = hf.extract_nwis_df(test_json, interpolate=False)
+ actual_fig, actual_ax = charts.cycleplot(expected_df, 'annual')
+ self.assertIsInstance(actual_fig, matplotlib.figure.Figure)
+ self.assertIsInstance(actual_ax[0], matplotlib.axes.Axes)
+
+ def test_charts_cycleplot_cycle_annualdate(self):
+ expected_df, expected_dict = hf.extract_nwis_df(test_json, interpolate=False)
+ actual_fig, actual_ax = charts.cycleplot(expected_df, 'annual-date')
+ self.assertIsInstance(actual_fig, matplotlib.figure.Figure)
+ self.assertIsInstance(actual_ax[0], matplotlib.axes.Axes)
+
+ def test_charts_cycleplot_cycle_annualmonth(self):
+ expected_df, expected_dict = hf.extract_nwis_df(test_json, interpolate=False)
+ actual_fig, actual_ax = charts.cycleplot(expected_df, 'annual-month')
+ self.assertIsInstance(actual_fig, matplotlib.figure.Figure)
+ self.assertIsInstance(actual_ax[0], matplotlib.axes.Axes)
+
+ def test_charts_cycleplot_cycle_weekly(self):
+ expected_df, expected_dict = hf.extract_nwis_df(test_json, interpolate=False)
+ actual_fig, actual_ax = charts.cycleplot(expected_df, 'weekly')
+ self.assertIsInstance(actual_fig, matplotlib.figure.Figure)
+ self.assertIsInstance(actual_ax[0], matplotlib.axes.Axes)
+
+ def test_charts_cycleplot_cycle_diurnalsmallest(self):
+ expected_df, expected_dict = hf.extract_nwis_df(test_json, interpolate=False)
+ actual_fig, actual_ax = charts.cycleplot(expected_df, 'diurnal-smallest')
+ self.assertIsInstance(actual_fig, matplotlib.figure.Figure)
+ self.assertIsInstance(actual_ax[0], matplotlib.axes.Axes)
+
+ def test_charts_cycleplot_cycle_diurnalhour(self):
+ expected_df, expected_dict = hf.extract_nwis_df(test_json, interpolate=False)
+ actual_fig, actual_ax = charts.cycleplot(expected_df, 'diurnal-hour')
+ self.assertIsInstance(actual_fig, matplotlib.figure.Figure)
+ self.assertIsInstance(actual_ax[0], matplotlib.axes.Axes)
+
+ def test_charts_cycleplot_cycle_nonsense_raises_ValueError(self):
+ expected_df, expected_dict = hf.extract_nwis_df(test_json, interpolate=False)
+ with self.assertRaises(ValueError):
+ actual_fig, actual_ax = charts.cycleplot(expected_df, 'nonsense')
+
if __name__ == '__main__':
unittest.main(verbosity=2)
|
Add ability to modify the legend and titles
* HydroFunctions version: 0.1.8dev
* Python version: 3.7
### Description
Hydrofunction's built-in charts should have the ability to set a title or create a legend.
### Potential Solutions:
1) add new parameters `legend` and `title`
- `legend` default could be `False`; otherwise you could provide a value for legend.
- This approach might require a `legend_loc` parameter too.
- `title` could be set to `False` or text.
2) Use **kwargs and pass these on
3) What does Pandas do??
|
0.0
|
bf1f296f4a2e31a0df9bd07860bbfa90e71e7890
|
[
"tests/test_charts.py::TestFlowDuration::test_charts_flowduration_accepts_params"
] |
[
"tests/test_charts.py::TestFlowDuration::test_charts_flowduration_defaults",
"tests/test_charts.py::TestFlowDuration::test_charts_flowduration_exists"
] |
{
"failed_lite_validators": [
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-11-28 03:59:33+00:00
|
mit
| 4,064 |
|
mscroggs__symfem-208
|
diff --git a/CHANGELOG_SINCE_LAST_VERSION.md b/CHANGELOG_SINCE_LAST_VERSION.md
index ab4a35a..80efcc1 100644
--- a/CHANGELOG_SINCE_LAST_VERSION.md
+++ b/CHANGELOG_SINCE_LAST_VERSION.md
@@ -1,3 +1,4 @@
- Added enriched Galerkin element
- Improved plotting
- Added enriched vector Galerkin element
+- Fixed bug in integration of piecewise functions
diff --git a/symfem/basis_functions.py b/symfem/basis_functions.py
index 1c7a053..6c0a7c5 100644
--- a/symfem/basis_functions.py
+++ b/symfem/basis_functions.py
@@ -10,7 +10,7 @@ import sympy
from .functions import AnyFunction, FunctionInput, ScalarFunction, SympyFormat, ValuesToSubstitute
from .geometry import PointType
from .references import Reference
-from .symbols import AxisVariables, AxisVariablesNotSingle, t
+from .symbols import AxisVariables, AxisVariablesNotSingle, t, x
class BasisFunction(AnyFunction):
@@ -282,17 +282,21 @@ class BasisFunction(AnyFunction):
"""
raise self.get_function().norm()
- def integral(self, domain: Reference, vars: AxisVariablesNotSingle = t) -> AnyFunction:
+ def integral(
+ self, domain: Reference, vars: AxisVariablesNotSingle = x,
+ dummy_vars: AxisVariablesNotSingle = t
+ ) -> ScalarFunction:
"""Compute the integral of the function.
Args:
- domain: The domain to integrate over
- vars: The variables to integrate over
+ domain: The domain of the integral
+ vars: The variables to integrate with respect to
+ dummy_vars: The dummy variables to use inside the integral
Returns:
The integral
"""
- return self.get_function().integral(domain, vars)
+ return self.get_function().integral(domain, vars, dummy_vars)
def subs(self, vars: AxisVariables, values: ValuesToSubstitute) -> BasisFunction:
"""Substitute values into the function.
diff --git a/symfem/functions.py b/symfem/functions.py
index d7f45ef..209b4b4 100644
--- a/symfem/functions.py
+++ b/symfem/functions.py
@@ -314,12 +314,16 @@ class AnyFunction(ABC):
pass
@abstractmethod
- def integral(self, domain: Reference, vars: AxisVariablesNotSingle = t):
+ def integral(
+ self, domain: Reference, vars: AxisVariablesNotSingle = x,
+ dummy_vars: AxisVariablesNotSingle = t
+ ) -> ScalarFunction:
"""Compute the integral of the function.
Args:
domain: The domain of the integral
vars: The variables to integrate with respect to
+ dummy_vars: The dummy variables to use inside the integral
Returns:
The integral
@@ -758,22 +762,27 @@ class ScalarFunction(AnyFunction):
"""
return ScalarFunction(abs(self._f))
- def integral(self, domain: Reference, vars: AxisVariablesNotSingle = t) -> ScalarFunction:
+ def integral(
+ self, domain: Reference, vars: AxisVariablesNotSingle = x,
+ dummy_vars: AxisVariablesNotSingle = t
+ ) -> ScalarFunction:
"""Compute the integral of the function.
Args:
domain: The domain of the integral
vars: The variables to integrate with respect to
+ dummy_vars: The dummy variables to use inside the integral
Returns:
The integral
"""
- limits = domain.integration_limits(vars)
-
+ limits = domain.integration_limits(dummy_vars)
point = VectorFunction(domain.origin)
- for ti, a in zip(t, domain.axes):
+ for ti, a in zip(dummy_vars, domain.axes):
point += ti * VectorFunction(a)
- out = self._f.subs(x, point)
+ out = self._f * 1
+ for v, p in zip(vars, point):
+ out = out.subs(v, p)
if len(limits[0]) == 2:
for i in limits:
@@ -1159,12 +1168,16 @@ class VectorFunction(AnyFunction):
a += i._f ** 2
return ScalarFunction(sympy.sqrt(a))
- def integral(self, domain: Reference, vars: AxisVariablesNotSingle = t):
+ def integral(
+ self, domain: Reference, vars: AxisVariablesNotSingle = x,
+ dummy_vars: AxisVariablesNotSingle = t
+ ) -> ScalarFunction:
"""Compute the integral of the function.
Args:
domain: The domain of the integral
vars: The variables to integrate with respect to
+ dummy_vars: The dummy variables to use inside the integral
Returns:
The integral
@@ -1587,12 +1600,16 @@ class MatrixFunction(AnyFunction):
"""
raise NotImplementedError()
- def integral(self, domain: Reference, vars: AxisVariablesNotSingle = t):
+ def integral(
+ self, domain: Reference, vars: AxisVariablesNotSingle = x,
+ dummy_vars: AxisVariablesNotSingle = t
+ ) -> ScalarFunction:
"""Compute the integral of the function.
Args:
domain: The domain of the integral
vars: The variables to integrate with respect to
+ dummy_vars: The dummy variables to use inside the integral
Returns:
The integral
diff --git a/symfem/piecewise_functions.py b/symfem/piecewise_functions.py
index c714e04..ac8ddb2 100644
--- a/symfem/piecewise_functions.py
+++ b/symfem/piecewise_functions.py
@@ -6,8 +6,8 @@ import typing
import sympy
-from .functions import (AnyFunction, FunctionInput, SympyFormat, ValuesToSubstitute, VectorFunction,
- _to_sympy_format, parse_function_input)
+from .functions import (AnyFunction, FunctionInput, ScalarFunction, SympyFormat, ValuesToSubstitute,
+ VectorFunction, _to_sympy_format, parse_function_input)
from .geometry import (PointType, SetOfPoints, SetOfPointsInput, parse_set_of_points_input,
point_in_quadrilateral, point_in_tetrahedron, point_in_triangle)
from .references import Reference
@@ -422,20 +422,27 @@ class PiecewiseFunction(AnyFunction):
return PiecewiseFunction(
{shape: f.norm() for shape, f in self._pieces.items()}, self.tdim)
- def integral(self, domain: Reference, vars: AxisVariablesNotSingle = t) -> AnyFunction:
+ def integral(
+ self, domain: Reference, vars: AxisVariablesNotSingle = x,
+ dummy_vars: AxisVariablesNotSingle = t
+ ) -> ScalarFunction:
"""Compute the integral of the function.
Args:
domain: The domain of the integral
vars: The variables to integrate with respect to
+ dummy_vars: The dummy variables to use inside the integral
Returns:
The integral
"""
- # TODO: Add check that the domain is a subset of one piece
- # TODO: Add integral over multiple pieces
- p = self.get_piece(domain.midpoint())
- return p.integral(domain, vars)
+ result = ScalarFunction(0)
+ for shape, f in self._pieces.items():
+ ref = _piece_reference(self.tdim, shape)
+ sub_domain = ref.intersection(domain)
+ if sub_domain is not None:
+ result += f.integral(sub_domain, vars, dummy_vars)
+ return result
def det(self) -> PiecewiseFunction:
"""Compute the determinant.
@@ -500,25 +507,11 @@ class PiecewiseFunction(AnyFunction):
value_scale: The scale factor for the function values
n: The number of points per side for plotting
"""
- from .create import create_reference
from .plotting import Picture
assert isinstance(img, Picture)
for shape, f in self._pieces.items():
- if self.tdim == 2:
- if len(shape) == 3:
- ref = create_reference("triangle", shape)
- elif len(shape) == 4:
- ref = create_reference("quadrilateral", shape)
- else:
- raise ValueError("Unsupported cell type")
- elif self.tdim == 3:
- if len(shape) == 4:
- ref = create_reference("tetrahedron", shape)
- else:
- raise ValueError("Unsupported cell type")
- else:
- raise ValueError("Unsupported tdim")
+ ref = _piece_reference(self.tdim, shape)
f.plot_values(ref, img, value_scale, n // 2)
def with_floats(self) -> AnyFunction:
@@ -529,3 +522,22 @@ class PiecewiseFunction(AnyFunction):
"""
return PiecewiseFunction(
{shape: f.with_floats() for shape, f in self._pieces.items()}, self.tdim)
+
+
+def _piece_reference(tdim, shape):
+ """Create a reference element for a single piece."""
+ from .create import create_reference
+ if tdim == 2:
+ if len(shape) == 3:
+ return create_reference("triangle", shape)
+ elif len(shape) == 4:
+ return create_reference("quadrilateral", shape)
+ else:
+ raise ValueError("Unsupported cell type")
+ elif tdim == 3:
+ if len(shape) == 4:
+ return create_reference("tetrahedron", shape)
+ else:
+ raise ValueError("Unsupported cell type")
+ else:
+ raise ValueError("Unsupported tdim")
diff --git a/symfem/polynomials.py b/symfem/polynomials.py
index bf6d99b..b6fef54 100644
--- a/symfem/polynomials.py
+++ b/symfem/polynomials.py
@@ -1076,7 +1076,7 @@ def orthonormal_basis(
ref = create_reference(cell)
if variables is None:
variables = x
- norms = [sympy.sqrt((f ** 2).integral(ref, variables)) for f in poly[0]]
+ norms = [sympy.sqrt((f ** 2).integral(ref, dummy_vars=variables)) for f in poly[0]]
for i, n in enumerate(norms):
for j in range(len(poly)):
poly[j][i] /= n
diff --git a/symfem/references.py b/symfem/references.py
index bfa2a3a..3d70d22 100644
--- a/symfem/references.py
+++ b/symfem/references.py
@@ -17,6 +17,49 @@ IntLimits = typing.List[typing.Union[
typing.Tuple[sympy.core.symbol.Symbol, sympy.core.expr.Expr]]]
+def _which_side(vs: SetOfPoints, p: PointType, q: PointType) -> typing.Optional[int]:
+ """Check which side of a line or plane a set of points are.
+
+ Args:
+ vs: The set of points
+ p: A point on the line or plane
+ q: Another point on the line (2D) or the normal to the plane (3D)
+
+ Returns:
+ 2 if the points are all to the left, 1 if the points are all to the left or on the line,
+ 0 if the points are all on the line, -1 if the points are all to the right or on the line,
+ -1 if the points are all to the right, None if there are some points on either side.
+ """
+ sides = []
+ for v in vs:
+ if len(q) == 2:
+ cross = (v[0] - p[0]) * (q[1] - p[1]) - (v[1] - p[1]) * (q[0] - p[0])
+ elif len(q) == 3:
+ cross = (v[0] - p[0]) * q[0] + (v[1] - p[1]) * q[1] + (v[2] - p[2]) * q[2]
+ else:
+ return None
+ if cross == 0:
+ sides.append(0)
+ elif cross > 0:
+ sides.append(1)
+ else:
+ sides.append(-1)
+
+ if -1 in sides and 1 in sides:
+ return None
+ if 1 in sides:
+ if 0 in sides:
+ return 1
+ else:
+ return 2
+ if -1 in sides:
+ if 0 in sides:
+ return -1
+ else:
+ return -2
+ return 0
+
+
def _vsub(v: PointTypeInput, w: PointTypeInput) -> PointType:
"""Subtract.
@@ -155,6 +198,45 @@ class Reference(ABC):
"""
return self.vertices
+ def intersection(self, other: Reference) -> typing.Optional[Reference]:
+ """Get the intersection of two references.
+
+ Returns:
+ A reference element that is the intersection
+ """
+ if self.gdim != other.gdim:
+ raise ValueError("Incompatible cell dimensions")
+
+ for cell1, cell2 in [(self, other), (other, self)]:
+ try:
+ for v in cell1.vertices:
+ if not cell2.contains(v):
+ break
+ else:
+ return cell1
+ except NotImplementedError:
+ pass
+ for cell1, cell2 in [(self, other), (other, self)]:
+ if cell1.gdim == 2:
+ for e in cell1.edges:
+ p = cell1.vertices[e[0]]
+ q = cell1.vertices[e[1]]
+ dir1 = _which_side(cell1.vertices, p, q)
+ dir2 = _which_side(cell2.vertices, p, q)
+ if dir1 is not None and dir2 is not None and dir1 * dir2 < 0:
+ return None
+ if cell1.gdim == 3:
+ for i in range(cell1.sub_entity_count(2)):
+ face = cell1.sub_entity(2, i)
+ p = face.midpoint()
+ n = face.normal()
+ dir1 = _which_side(cell1.vertices, p, n)
+ dir2 = _which_side(cell2.vertices, p, n)
+ if dir1 is not None and dir2 is not None and dir1 * dir2 < 0:
+ return None
+
+ raise NotImplementedError("Intersection of these elements is not yet supported")
+
@abstractmethod
def default_reference(self) -> Reference:
"""Get the default reference for this cell type.
@@ -1028,9 +1110,17 @@ class Triangle(Reference):
Returns:
Is the point contained in the reference?
"""
- if self.vertices != self.reference_vertices:
- raise NotImplementedError()
- return 0 <= point[0] and 0 <= point[1] and sum(point) <= 1
+ if self.vertices == self.reference_vertices:
+ return 0 <= point[0] and 0 <= point[1] and sum(point) <= 1
+ elif self.gdim == 2:
+ po = _vsub(point, self.origin)
+ det = self.axes[0][0] * self.axes[1][1] - self.axes[0][1] * self.axes[1][0]
+ t0 = (self.axes[1][1] * po[0] - self.axes[1][0] * po[1]) / det
+ t1 = (self.axes[0][0] * po[1] - self.axes[0][1] * po[0]) / det
+ print(self.origin, self.axes, point)
+ print(t0, t1)
+ return 0 <= t0 and 0 <= t1 and t0 + t1 <= 1
+ raise NotImplementedError()
class Tetrahedron(Reference):
@@ -1203,9 +1293,15 @@ class Tetrahedron(Reference):
Returns:
Is the point contained in the reference?
"""
- if self.vertices != self.reference_vertices:
- raise NotImplementedError()
- return 0 <= point[0] and 0 <= point[1] and 0 <= point[2] and sum(point) <= 1
+ if self.vertices == self.reference_vertices:
+ return 0 <= point[0] and 0 <= point[1] and 0 <= point[2] and sum(point) <= 1
+ else:
+ po = _vsub(point, self.origin)
+ minv = sympy.Matrix([[a[i] for a in self.axes] for i in range(3)]).inv()
+ t0 = (minv[0, 0] * po[0] + minv[0, 1] * po[1] + minv[0, 2] * po[2])
+ t1 = (minv[1, 0] * po[0] + minv[1, 1] * po[1] + minv[1, 2] * po[2])
+ t2 = (minv[2, 0] * po[0] + minv[2, 1] * po[1] + minv[2, 2] * po[2])
+ return 0 <= t0 and 0 <= t1 and 0 >= t2 and t0 + t1 + t2 <= 1
class Quadrilateral(Reference):
|
mscroggs/symfem
|
43fbadd94cbb4698058997fd8cccb1f76ee42bd7
|
diff --git a/test/test_hct.py b/test/test_hct.py
index fd38eb7..eeadde8 100644
--- a/test/test_hct.py
+++ b/test/test_hct.py
@@ -78,3 +78,20 @@ def test_rhct():
assert f1.diff(x[0]).diff(x[0]) == 0
assert f2.diff(x[0]).diff(x[0]) == 0
assert f3.diff(x[1]).diff(x[1]) == 0
+
+
+def test_rhct_integral():
+ element = symfem.create_element("triangle", "rHCT", 3)
+ ref = element.reference
+ f1 = element.get_basis_function(1).directional_derivative((1, 0))
+ f2 = element.get_basis_function(6).directional_derivative((1, 0))
+ integrand = f1 * f2
+
+ third = sympy.Rational(1, 3)
+ expr = (f1*f2).pieces[((0, 1), (0, 0), (third, third))].as_sympy()
+ assert len((f1*f2).pieces) == 3
+ assert (f1*f2).pieces[((0, 0), (1, 0), (third, third))] == 0
+ assert (f1*f2).pieces[((1, 0), (0, 1), (third, third))] == 0
+
+ assert sympy.integrate(sympy.integrate(
+ expr, (x[1], x[0], 1 - 2 * x[0])), (x[0], 0, third)) == integrand.integral(ref, x)
|
Fix rHCT element
**Describe the bug**
The rHCT element seems to be wrong once integrated. I noticed it when I tried to build the stiffness matrix for the Poisson equation using these elements.
It is clear when one tries to integrate it using symfem or sympy.
**To Reproduce**
Minimal code to reproduce the behavior:
```python
import symfem
from symfem.symbols import x
from sympy import Symbol, integrate, Rational
#Compute integral using symfem
element = symfem.create_element("triangle", "rHCT", 3)
ref = element.reference
f1 = element.get_basis_function(1).directional_derivative((1,0))
f2 = element.get_basis_function(6).directional_derivative((1,0))
integrand = f1*f2
#f1*f2 is 0 everywhere, except on triangle ((0, 1), (0, 0), (1/3, 1/3))
#where it equals (42*x**2 - 24*x*y - 6*x)*(18*x**2 + 6*x*y - 10*x - y + 1)
print(integrand.integral(ref, x)) #=0
#Compute it using sympy
z = Symbol("x")
y = Symbol("y")
expr = (42*z**2 - 24*z*y - 6*z)*(18*z**2 + 6*z*y - 10*z - y + 1) #Copied from what is given by symfem
int_expr = integrate(expr, (y, z, 1-2*z)) #parametrization of triangle ((0, 1), (0, 0), (1/3, 1/3)) : x<y<1-2*x
print(integrate(int_expr, (z,0, Rational(1,3))))#=1/108
```
**Expected behavior**
The integral should be correct in this example
**Screenshots**
Not applicable.
**Additional context**
No other context needed (I guess?)
|
0.0
|
43fbadd94cbb4698058997fd8cccb1f76ee42bd7
|
[
"test/test_hct.py::test_rhct_integral"
] |
[
"test/test_hct.py::test_hct",
"test/test_hct.py::test_rhct"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-04-26 09:14:32+00:00
|
mit
| 4,065 |
|
mscroggs__symfem-212
|
diff --git a/++version.py b/++version.py
index 6a1f942..c1a2906 100644
--- a/++version.py
+++ b/++version.py
@@ -20,10 +20,16 @@ with open("VERSION") as f:
now = datetime.now()
if now.year == version[0] and now.month == version[1]:
- new_version = (now.year, now.month, version[2] + 1)
+ if len(version) == 2:
+ new_version = (now.year, now.month, 1)
+ else:
+ new_version = (now.year, now.month, version[2] + 1)
else:
- new_version = (now.year, now.month, 1)
-new_version_str = ".".join([f"{i}" for i in new_version])
+ new_version = (now.year, now.month, 0)
+if len(new_version) == 2:
+ new_version_str = f"{new_version[0]}.{new_version[1]}"
+else:
+ new_version_str = f"{new_version[0]}.{new_version[1]}.{new_version[2]}"
# VERSION file
with open("VERSION", "w") as f:
diff --git a/CHANGELOG_SINCE_LAST_VERSION.md b/CHANGELOG_SINCE_LAST_VERSION.md
index e69de29..75d7d2c 100644
--- a/CHANGELOG_SINCE_LAST_VERSION.md
+++ b/CHANGELOG_SINCE_LAST_VERSION.md
@@ -0,0 +1,1 @@
+- Corrected HCT and rHCT elements
diff --git a/symfem/elements/hct.py b/symfem/elements/hct.py
index b184e08..17218b5 100644
--- a/symfem/elements/hct.py
+++ b/symfem/elements/hct.py
@@ -8,14 +8,13 @@ import typing
import sympy
-from ..basis_functions import BasisFunction
from ..finite_element import CiarletElement
from ..functionals import (DerivativePointEvaluation, ListOfFunctionals, PointEvaluation,
PointNormalDerivativeEvaluation)
-from ..functions import FunctionInput, VectorFunction
+from ..functions import FunctionInput, ScalarFunction
from ..piecewise_functions import PiecewiseFunction
from ..references import Reference
-from .hermite import Hermite
+from ..symbols import x
class HsiehCloughTocher(CiarletElement):
@@ -28,7 +27,6 @@ class HsiehCloughTocher(CiarletElement):
reference: The reference element
order: The polynomial order
"""
- from symfem import create_reference
assert order == 3
assert reference.name == "triangle"
dofs: ListOfFunctionals = []
@@ -48,52 +46,25 @@ class HsiehCloughTocher(CiarletElement):
(reference.vertices[1], reference.vertices[2], mid),
(reference.vertices[2], reference.vertices[0], mid)]
- refs = [create_reference("triangle", vs) for vs in subs]
-
- hermite_spaces = [Hermite(ref, 3) for ref in refs]
-
- piece_list: typing.List[typing.Tuple[typing.Union[int, BasisFunction], ...]] = []
- piece_list.append((hermite_spaces[0].get_basis_function(0), 0,
- hermite_spaces[2].get_basis_function(3)))
- piece_list.append((hermite_spaces[0].get_basis_function(1), 0,
- hermite_spaces[2].get_basis_function(4)))
- piece_list.append((hermite_spaces[0].get_basis_function(2), 0,
- hermite_spaces[2].get_basis_function(5)))
- piece_list.append((hermite_spaces[0].get_basis_function(3),
- hermite_spaces[1].get_basis_function(0), 0))
- piece_list.append((hermite_spaces[0].get_basis_function(4),
- hermite_spaces[1].get_basis_function(1), 0))
- piece_list.append((hermite_spaces[0].get_basis_function(5),
- hermite_spaces[1].get_basis_function(2), 0))
- piece_list.append((hermite_spaces[0].get_basis_function(6),
- hermite_spaces[1].get_basis_function(6),
- hermite_spaces[2].get_basis_function(6)))
- piece_list.append((hermite_spaces[0].get_basis_function(7),
- hermite_spaces[1].get_basis_function(7),
- hermite_spaces[2].get_basis_function(7)))
- piece_list.append((hermite_spaces[0].get_basis_function(8),
- hermite_spaces[1].get_basis_function(8),
- hermite_spaces[2].get_basis_function(8)))
- piece_list.append((0, hermite_spaces[1].get_basis_function(3),
- hermite_spaces[2].get_basis_function(0)))
- piece_list.append((0, hermite_spaces[1].get_basis_function(4),
- hermite_spaces[2].get_basis_function(1)))
- piece_list.append((0, hermite_spaces[1].get_basis_function(5),
- hermite_spaces[2].get_basis_function(2)))
-
- # TODO: are these right to remove??
- # piece_list.append((hermite_spaces[0].get_basis_function(9), 0, 0))
- # piece_list.append((0, hermite_spaces[1].get_basis_function(9), 0))
- # piece_list.append((0, 0, hermite_spaces[2].get_basis_function(9)))
-
- piece_list2: typing.List[VectorFunction] = []
- for i in piece_list:
- piece_list2.append(VectorFunction(i))
+ piece_list = [tuple(ScalarFunction(p) for _ in range(3))
+ for p in [1, x[0], x[1], x[0]**2, x[0]*x[1], x[1]**2,
+ x[0]**3, x[0]**2*x[1], x[0]*x[1]**2, x[1]**3]]
+ piece_list.append((
+ ScalarFunction(-23*x[0]**3 + 24*x[0]**2*x[1] - 12*x[0]*x[1]**2 + 36*x[1]**2),
+ ScalarFunction(
+ -28*x[0]**3 + 12*x[0]**2*x[1] + 9*x[0]**2 - 3*x[0] + 32*x[1]**3 + 12*x[1] - 1),
+ ScalarFunction(-15*x[0]**2 - 33*x[0]*x[1]**2 + 30*x[0]*x[1] + 22*x[1]**3 + 21*x[1]**2)))
+ piece_list.append((
+ ScalarFunction(
+ 22*x[0]**3 - 21*x[0]**2*x[1] - 12*x[0]*x[1]**2 + 30*x[0]*x[1] - 24*x[1]**2),
+ ScalarFunction(
+ 32*x[0]**3 + 12*x[0]**2*x[1] - 21*x[0]**2 + 12*x[0] - 28*x[1]**3 - 3*x[1] - 1),
+ ScalarFunction(15*x[0]**2 + 12*x[0]*x[1]**2 - 23*x[1]**3 - 9*x[1]**2)))
poly: typing.List[FunctionInput] = []
poly += [
PiecewiseFunction({i: j for i, j in zip(subs, p)}, 2)
- for p in piece_list2]
+ for p in piece_list]
super().__init__(
reference, order, poly, dofs, reference.tdim, 1
diff --git a/symfem/elements/rhct.py b/symfem/elements/rhct.py
index 395cd71..bddbfa3 100644
--- a/symfem/elements/rhct.py
+++ b/symfem/elements/rhct.py
@@ -10,47 +10,12 @@ import sympy
from ..finite_element import CiarletElement
from ..functionals import DerivativePointEvaluation, ListOfFunctionals, PointEvaluation
-from ..functions import FunctionInput
+from ..functions import FunctionInput, ScalarFunction
from ..piecewise_functions import PiecewiseFunction
from ..references import Reference
from ..symbols import x
-class P1Hermite(CiarletElement):
- """P1Hermite finite element."""
-
- def __init__(self, reference: Reference, order: int, poly: typing.List[FunctionInput]):
- """Create the element.
-
- Args:
- reference: The reference element
- order: The polynomial order
- poly: The polynomial basis
- """
- assert order == 3
- dofs: ListOfFunctionals = []
- for v_n, vs in enumerate(reference.vertices):
- dofs.append(PointEvaluation(reference, vs, entity=(0, v_n)))
- dofs.append(DerivativePointEvaluation(reference, vs, (1, 0), entity=(0, v_n)))
- dofs.append(DerivativePointEvaluation(reference, vs, (0, 1), entity=(0, v_n)))
-
- super().__init__(reference, order, poly, dofs, reference.tdim, 1)
-
- def init_kwargs(self) -> typing.Dict[str, typing.Any]:
- """Return the kwargs used to create this element.
-
- Returns:
- Keyword argument dictionary
- """
- return {"poly": self._basis}
-
- names: typing.List[str] = []
- references = ["triangle"]
- min_order = 3
- max_order = 3
- continuity = "C0"
-
-
class ReducedHsiehCloughTocher(CiarletElement):
"""Reduced Hsieh-Clough-Tocher finite element."""
@@ -61,7 +26,6 @@ class ReducedHsiehCloughTocher(CiarletElement):
reference: The reference element
order: The polynomial order
"""
- from symfem import create_reference
assert order == 3
assert reference.name == "triangle"
dofs: ListOfFunctionals = []
@@ -77,48 +41,24 @@ class ReducedHsiehCloughTocher(CiarletElement):
(reference.vertices[1], reference.vertices[2], mid),
(reference.vertices[2], reference.vertices[0], mid)]
- refs = [create_reference("triangle", vs) for vs in subs]
-
- polys: typing.List[typing.List[FunctionInput]] = [[], [], []]
- for i in range(order + 1):
- for j in range(order + 1 - i):
- if i != 2 or j != 1:
- polys[0].append(x[0] ** i * x[1] ** j)
- polys[1] += [1, x[0], x[0] ** 2, x[1], x[0] * x[1], x[1] ** 2,
- x[0] * x[1] ** 2 - x[0] ** 2 * x[1],
- x[0] ** 3 - x[1] ** 3, x[0] ** 3 + 3 * x[0] * x[1] ** 2]
- for i in range(order + 1):
- for j in range(order + 1 - i):
- if i != 1 or j != 2:
- polys[2].append(x[0] ** i * x[1] ** j)
-
- bases = []
- for r, p in zip(refs, polys):
- bf = []
- for f in P1Hermite(r, 3, p).get_basis_functions():
- bf.append(f)
- bases.append(bf)
-
- piece_list: typing.List[typing.Tuple[FunctionInput, ...]] = []
- piece_list.append((bases[0][0], 0, bases[2][3]))
- piece_list.append((bases[0][1], 0, bases[2][4]))
- piece_list.append((bases[0][2], 0, bases[2][5]))
- piece_list.append((bases[0][3], bases[1][0], 0))
- piece_list.append((bases[0][4], bases[1][1], 0))
- piece_list.append((bases[0][5], bases[1][2], 0))
- # TODO: are these right to remove??
- # piece_list.append((bases[0][6], bases[1][6], bases[2][6]))
- # piece_list.append((bases[0][7], bases[1][7], bases[2][7]))
- # piece_list.append((bases[0][8], bases[1][8], bases[2][8]))
- piece_list.append((0, bases[1][3], bases[2][0]))
- piece_list.append((0, bases[1][4], bases[2][1]))
- piece_list.append((0, bases[1][5], bases[2][2]))
+ piece_list = [tuple(ScalarFunction(p) for _ in range(3))
+ for p in [1, x[0], x[1], x[0]**2, x[0]*x[1], x[1]**2,
+ x[0]**3 - x[1]**3]]
+ piece_list.append((
+ ScalarFunction(4*x[0]**3 - 3*x[0]*x[1]**2 + 2*x[0]*x[1] + 4*x[1]**2),
+ ScalarFunction(7*x[0]**3 + 12*x[0]**2*x[1] - 7*x[0]**2 + 9*x[0]*x[1]**2
+ - 14*x[0]*x[1] + 5*x[0] + 4*x[1] - 1),
+ ScalarFunction(3*x[0]**3 + x[0]**2 - 2*x[1]**3 + 5*x[1]**2)))
+ piece_list.append((
+ ScalarFunction(25*x[0]**3 - 24*x[0]*x[1]**2 + 30*x[0]*x[1] - 24*x[1]**2),
+ ScalarFunction(35*x[0]**3 + 33*x[0]**2*x[1] - 21*x[0]**2 - 12*x[0]*x[1]**2
+ + 12*x[0] - 28*x[1]**3 - 3*x[1] - 1),
+ ScalarFunction(3*x[0]**3 + 21*x[0]**2*x[1] + 15*x[0]**2 - 23*x[1]**3 - 9*x[1]**2)))
poly: typing.List[FunctionInput] = []
poly += [
PiecewiseFunction({i: j for i, j in zip(subs, p)}, 2)
- for p in piece_list
- ]
+ for p in piece_list]
super().__init__(
reference, order, poly, dofs, reference.tdim, 1
diff --git a/symfem/elements/vector_enriched_galerkin.py b/symfem/elements/vector_enriched_galerkin.py
index d7f5bf7..0bf3110 100644
--- a/symfem/elements/vector_enriched_galerkin.py
+++ b/symfem/elements/vector_enriched_galerkin.py
@@ -32,7 +32,7 @@ class Enrichment(CiarletElement):
super().__init__(reference, 1, poly, dofs, reference.tdim, reference.tdim)
- names = []
+ names: typing.List[str] = []
references = ["triangle", "quadrilateral", "tetrahedron", "hexahedron"]
min_order = 1
max_order = 1
|
mscroggs/symfem
|
47ae563072bc327fe0bb71e27086aef458cf0832
|
diff --git a/test/test_hct.py b/test/test_hct.py
index eeadde8..58009b5 100644
--- a/test/test_hct.py
+++ b/test/test_hct.py
@@ -1,80 +1,69 @@
"""Test Hsieh-Clough-Tocher elements."""
+import pytest
import sympy
import symfem
+from symfem.functions import ScalarFunction
from symfem.symbols import t, x
from symfem.utils import allequal
half = sympy.Rational(1, 2)
-def test_hct():
- e = symfem.create_element("triangle", "HCT", 3)
[email protected]("family", ["HCT", "rHCT"])
+def test_c1_continuity(family):
+ e = symfem.create_element("triangle", family, 3)
for f in e.get_polynomial_basis():
# edge from (1,0) to (1/3,1/3)
f1 = f.get_piece((half, 0))
f2 = f.get_piece((half, half))
+ grad_f1 = f1.grad(2)
+ grad_f2 = f2.grad(2)
line = ((1 - 2 * t[0], t[0]))
f1 = f1.subs(x[:2], line)
f2 = f2.subs(x[:2], line)
+ grad_f1 = grad_f1.subs(x[:2], line)
+ grad_f2 = grad_f2.subs(x[:2], line)
assert allequal(f1, f2)
- assert allequal(f1.grad(2), f2.grad(2))
+ assert allequal(grad_f1, grad_f2)
# edge from (0,1) to (1/3,1/3)
f1 = f.get_piece((half, half))
f2 = f.get_piece((0, half))
+ grad_f1 = f1.grad(2)
+ grad_f2 = f2.grad(2)
line = ((t[0], 1 - 2 * t[0]))
f1 = f1.subs(x[:2], line)
f2 = f2.subs(x[:2], line)
+ grad_f1 = grad_f1.subs(x[:2], line)
+ grad_f2 = grad_f2.subs(x[:2], line)
assert allequal(f1, f2)
- assert allequal(f1.grad(2), f2.grad(2))
+ assert allequal(grad_f1, grad_f2)
# edge from (0,0) to (1/3,1/3)
f1 = f.get_piece((0, half))
f2 = f.get_piece((half, 0))
+ grad_f1 = f1.grad(2)
+ grad_f2 = f2.grad(2)
line = ((t[0], t[0]))
f1 = f1.subs(x[:2], line)
f2 = f2.subs(x[:2], line)
+ grad_f1 = grad_f1.subs(x[:2], line)
+ grad_f2 = grad_f2.subs(x[:2], line)
assert allequal(f1, f2)
- assert allequal(f1.grad(2), f2.grad(2))
+ assert allequal(grad_f1, grad_f2)
-def test_rhct():
+def test_rcht_linear_normal_derivatices():
e = symfem.create_element("triangle", "rHCT", 3)
for f in e.get_polynomial_basis():
- # edge from (1,0) to (1/3,1/3)
- f1 = f.get_piece((half, 0))
- f2 = f.get_piece((half, half))
- line = ((1 - 2 * t[0], t[0]))
- f1 = f1.subs(x[:2], line)
- f2 = f2.subs(x[:2], line)
- assert allequal(f1, f2)
- assert allequal(f1.grad(2), f2.grad(2))
-
- # edge from (0,1) to (1/3,1/3)
- f1 = f.get_piece((half, half))
- f2 = f.get_piece((0, half))
- line = ((t[0], 1 - 2 * t[0]))
- f1 = f1.subs(x[:2], line)
- f2 = f2.subs(x[:2], line)
- assert allequal(f1, f2)
- assert allequal(f1.grad(2), f2.grad(2))
-
- # edge from (0,0) to (1/3,1/3)
- f1 = f.get_piece((0, half))
- f2 = f.get_piece((half, 0))
- line = ((t[0], t[0]))
- f1 = f1.subs(x[:2], line)
- f2 = f2.subs(x[:2], line)
- assert allequal(f1, f2)
- assert allequal(f1.grad(2), f2.grad(2))
-
# Check that normal derivatives are linear
f1 = f.get_piece((half, 0)).diff(x[1]).subs(x[1], 0)
f2 = f.get_piece((half, half))
f2 = (f2.diff(x[0]) + f2.diff(x[1])).subs(x[1], 1 - x[0])
f3 = f.get_piece((0, half)).diff(x[0]).subs(x[0], 0)
+ print(f)
assert f1.diff(x[0]).diff(x[0]) == 0
assert f2.diff(x[0]).diff(x[0]) == 0
assert f3.diff(x[1]).diff(x[1]) == 0
@@ -85,13 +74,14 @@ def test_rhct_integral():
ref = element.reference
f1 = element.get_basis_function(1).directional_derivative((1, 0))
f2 = element.get_basis_function(6).directional_derivative((1, 0))
- integrand = f1 * f2
+ integrand = f1 * f2
third = sympy.Rational(1, 3)
- expr = (f1*f2).pieces[((0, 1), (0, 0), (third, third))].as_sympy()
- assert len((f1*f2).pieces) == 3
- assert (f1*f2).pieces[((0, 0), (1, 0), (third, third))] == 0
- assert (f1*f2).pieces[((1, 0), (0, 1), (third, third))] == 0
+ integrand.pieces[((0, 0), (1, 0), (third, third))] = ScalarFunction(0)
+ integrand.pieces[((1, 0), (0, 1), (third, third))] = ScalarFunction(0)
+
+ expr = integrand.pieces[((0, 1), (0, 0), (third, third))].as_sympy()
+ assert len(integrand.pieces) == 3
assert sympy.integrate(sympy.integrate(
expr, (x[1], x[0], 1 - 2 * x[0])), (x[0], 0, third)) == integrand.integral(ref, x)
|
HCT element is incorrect
**Describe the bug**
The basis functions should be C1 continuous inside the cell. They are currently not
|
0.0
|
47ae563072bc327fe0bb71e27086aef458cf0832
|
[
"test/test_hct.py::test_c1_continuity[HCT]",
"test/test_hct.py::test_c1_continuity[rHCT]"
] |
[
"test/test_hct.py::test_rcht_linear_normal_derivatices",
"test/test_hct.py::test_rhct_integral"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-05-21 09:08:33+00:00
|
mit
| 4,066 |
|
mscroggs__symfem-229
|
diff --git a/CHANGELOG_SINCE_LAST_VERSION.md b/CHANGELOG_SINCE_LAST_VERSION.md
index 1ccf031..74e4f28 100644
--- a/CHANGELOG_SINCE_LAST_VERSION.md
+++ b/CHANGELOG_SINCE_LAST_VERSION.md
@@ -4,3 +4,4 @@
- Added Alfeld-Sorokina element
- Corrected C1 and higher order C tests
- Allow element creation on non-default references
+- Corrected Bell element
diff --git a/symfem/elements/bell.py b/symfem/elements/bell.py
index 11c91b9..6c4bba8 100644
--- a/symfem/elements/bell.py
+++ b/symfem/elements/bell.py
@@ -6,13 +6,11 @@ This element's definition is given in https://doi.org/10.1002/nme.1620010108 (Be
import typing
from ..finite_element import CiarletElement
-from ..functionals import (DerivativePointEvaluation, ListOfFunctionals,
- NormalDerivativeIntegralMoment, PointEvaluation)
+from ..functionals import DerivativePointEvaluation, ListOfFunctionals, PointEvaluation
from ..functions import FunctionInput
-from ..moments import make_integral_moment_dofs
from ..polynomials import polynomial_set_1d
from ..references import Reference
-from .lagrange import Lagrange
+from ..symbols import x
class Bell(CiarletElement):
@@ -36,14 +34,13 @@ class Bell(CiarletElement):
dofs.append(DerivativePointEvaluation(reference, v, (2, 0), entity=(0, v_n)))
dofs.append(DerivativePointEvaluation(reference, v, (1, 1), entity=(0, v_n)))
dofs.append(DerivativePointEvaluation(reference, v, (0, 2), entity=(0, v_n)))
- dofs += make_integral_moment_dofs(
- reference,
- edges=(NormalDerivativeIntegralMoment, Lagrange, 0, {"variant": variant}),
- )
self.variant = variant
poly: typing.List[FunctionInput] = []
- poly += polynomial_set_1d(reference.tdim, order)
+ poly += polynomial_set_1d(reference.tdim, 4)
+ poly.append(x[0]**5 - x[1]**5)
+ poly.append(x[0]**3*x[1]**2 - x[0]**2*x[1]**3)
+ poly.append(5*x[0]**2*x[1]**3 - x[0]**5)
super().__init__(reference, order, poly, dofs, reference.tdim, 1)
|
mscroggs/symfem
|
9c47d528ef4c1d08f9520ae0a8d946ced92bf0dd
|
diff --git a/test/test_bell.py b/test/test_bell.py
new file mode 100644
index 0000000..ced4516
--- /dev/null
+++ b/test/test_bell.py
@@ -0,0 +1,18 @@
+"""Test Bell elements."""
+
+import symfem
+from symfem.symbols import t, x
+
+
+def test_bell_polyset():
+ b = symfem.create_element("triangle", "Bell", 5)
+ for p in b.get_polynomial_basis():
+ gradp = [p.diff(x[0]), p.diff(x[1])]
+ for en in range(b.reference.sub_entity_count(1)):
+ edge = b.reference.sub_entity(1, en)
+ variables = [o + sum(a[i] * t[0] for a in edge.axes)
+ for i, o in enumerate(edge.origin)]
+ n = edge.normal()
+ normal_deriv = gradp[0] * n[0] + gradp[1] * n[1]
+ normal_deriv = normal_deriv.subs(x, variables)
+ assert normal_deriv.diff(t[0]).diff(t[0]).diff(t[0]).diff(t[0]) == 0
|
Bell element is incorrect
see https://github.com/mscroggs/defelement.com/issues/224
|
0.0
|
9c47d528ef4c1d08f9520ae0a8d946ced92bf0dd
|
[
"test/test_bell.py::test_bell_polyset"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-06-22 07:11:34+00:00
|
mit
| 4,067 |
|
msgpack__msgpack-python-331
|
diff --git a/ChangeLog.rst b/ChangeLog.rst
index cc6b5e4..d39e76b 100644
--- a/ChangeLog.rst
+++ b/ChangeLog.rst
@@ -5,21 +5,25 @@ Release Date: TBD
Important changes
-------------------
+-----------------
-Extension modules are merged. There is ``msgpack._msgpack`` instead of
-``msgpack._packer`` and ``msgpack._unpacker``. (#314)
+* unpacker: Default size limits is smaller than before to avoid DoS attack.
+ If you need to handle large data, you need to specify limits manually. (#319)
-unpacker: Default size limits is smaller than before to avoid DoS attack.
-If you need to handle large data, you need to specify limits manually.
+Other changes
+-------------
+* Extension modules are merged. There is ``msgpack._msgpack`` instead of
+ ``msgpack._packer`` and ``msgpack._unpacker``. (#314)
-Other changes
---------------
+* Add ``Unpacker.getbuffer()`` method. (#320)
-Add ``Unpacker.getbuffer()`` method.
+* unpacker: ``msgpack.StackError`` is raised when input data contains too
+ nested data. (#331)
+* unpacker: ``msgpack.FormatError`` is raised when input data is not valid
+ msgpack format. (#331)
0.5.6
diff --git a/Makefile b/Makefile
index b65aa85..5828ed4 100644
--- a/Makefile
+++ b/Makefile
@@ -7,7 +7,8 @@ cython:
cython --cplus msgpack/_cmsgpack.pyx
.PHONY: test
-test:
+test: cython
+ pip install -e .
pytest -v test
MSGPACK_PUREPYTHON=1 pytest -v test
diff --git a/msgpack/_unpacker.pyx b/msgpack/_unpacker.pyx
index aeebe2a..69330d3 100644
--- a/msgpack/_unpacker.pyx
+++ b/msgpack/_unpacker.pyx
@@ -16,6 +16,8 @@ from msgpack.exceptions import (
BufferFull,
OutOfData,
ExtraData,
+ FormatError,
+ StackError,
)
from msgpack import ExtType
@@ -149,7 +151,11 @@ def unpackb(object packed, object object_hook=None, object list_hook=None,
"""
Unpack packed_bytes to object. Returns an unpacked object.
- Raises `ValueError` when `packed` contains extra bytes.
+ Raises ``ExtraData`` when *packed* contains extra bytes.
+ Raises ``ValueError`` when *packed* is incomplete.
+ Raises ``FormatError`` when *packed* is not valid msgpack.
+ Raises ``StackError`` when *packed* contains too nested.
+ Other exceptions can be raised during unpacking.
See :class:`Unpacker` for options.
"""
@@ -187,6 +193,12 @@ def unpackb(object packed, object object_hook=None, object list_hook=None,
raise ExtraData(obj, PyBytes_FromStringAndSize(buf+off, buf_len-off))
return obj
unpack_clear(&ctx)
+ if ret == 0:
+ raise ValueError("Unpack failed: incomplete input")
+ elif ret == -2:
+ raise FormatError
+ elif ret == -3:
+ raise StackError
raise ValueError("Unpack failed: error = %d" % (ret,))
@@ -201,7 +213,7 @@ def unpack(object stream, **kwargs):
cdef class Unpacker(object):
"""Streaming unpacker.
- arguments:
+ Arguments:
:param file_like:
File-like object having `.read(n)` method.
@@ -279,6 +291,12 @@ cdef class Unpacker(object):
unpacker.feed(buf)
for o in unpacker:
process(o)
+
+ Raises ``ExtraData`` when *packed* contains extra bytes.
+ Raises ``OutOfData`` when *packed* is incomplete.
+ Raises ``FormatError`` when *packed* is not valid msgpack.
+ Raises ``StackError`` when *packed* contains too nested.
+ Other exceptions can be raised during unpacking.
"""
cdef unpack_context ctx
cdef char* buf
@@ -451,6 +469,10 @@ cdef class Unpacker(object):
raise StopIteration("No more data to unpack.")
else:
raise OutOfData("No more data to unpack.")
+ elif ret == -2:
+ raise FormatError
+ elif ret == -3:
+ raise StackError
else:
raise ValueError("Unpack failed: error = %d" % (ret,))
diff --git a/msgpack/exceptions.py b/msgpack/exceptions.py
index 5bee5b2..d6d2615 100644
--- a/msgpack/exceptions.py
+++ b/msgpack/exceptions.py
@@ -6,6 +6,7 @@ class UnpackException(Exception):
Exception instead.
"""
+
class BufferFull(UnpackException):
pass
@@ -14,6 +15,14 @@ class OutOfData(UnpackException):
pass
+class FormatError(ValueError, UnpackException):
+ """Invalid msgpack format"""
+
+
+class StackError(ValueError, UnpackException):
+ """Too nested"""
+
+
# Deprecated. Use ValueError instead
UnpackValueError = ValueError
@@ -24,6 +33,7 @@ class ExtraData(UnpackValueError):
This exception is raised while only one-shot (not streaming)
unpack.
"""
+
def __init__(self, unpacked, extra):
self.unpacked = unpacked
self.extra = extra
@@ -32,7 +42,7 @@ class ExtraData(UnpackValueError):
return "unpack(b) received extra data."
-#Deprecated. Use Exception instead to catch all exception during packing.
+# Deprecated. Use Exception instead to catch all exception during packing.
PackException = Exception
PackValueError = ValueError
PackOverflowError = OverflowError
diff --git a/msgpack/fallback.py b/msgpack/fallback.py
index 04fb5b9..9c767a7 100644
--- a/msgpack/fallback.py
+++ b/msgpack/fallback.py
@@ -18,6 +18,16 @@ else:
def dict_iteritems(d):
return d.iteritems()
+if sys.version_info < (3, 5):
+ # Ugly hack...
+ RecursionError = RuntimeError
+
+ def _is_recursionerror(e):
+ return len(e.args) == 1 and isinstance(e.args[0], str) and \
+ e.args[0].startswith('maximum recursion depth exceeded')
+else:
+ def _is_recursionerror(e):
+ return True
if hasattr(sys, 'pypy_version_info'):
# cStringIO is slow on PyPy, StringIO is faster. However: PyPy's own
@@ -52,7 +62,10 @@ else:
from msgpack.exceptions import (
BufferFull,
OutOfData,
- ExtraData)
+ ExtraData,
+ FormatError,
+ StackError,
+)
from msgpack import ExtType
@@ -109,7 +122,12 @@ def unpackb(packed, **kwargs):
"""
Unpack an object from `packed`.
- Raises `ExtraData` when `packed` contains extra bytes.
+ Raises ``ExtraData`` when *packed* contains extra bytes.
+ Raises ``ValueError`` when *packed* is incomplete.
+ Raises ``FormatError`` when *packed* is not valid msgpack.
+ Raises ``StackError`` when *packed* contains too nested.
+ Other exceptions can be raised during unpacking.
+
See :class:`Unpacker` for options.
"""
unpacker = Unpacker(None, **kwargs)
@@ -117,7 +135,11 @@ def unpackb(packed, **kwargs):
try:
ret = unpacker._unpack()
except OutOfData:
- raise ValueError("Data is not enough.")
+ raise ValueError("Unpack failed: incomplete input")
+ except RecursionError as e:
+ if _is_recursionerror(e):
+ raise StackError
+ raise
if unpacker._got_extradata():
raise ExtraData(ret, unpacker._get_extradata())
return ret
@@ -211,6 +233,12 @@ class Unpacker(object):
unpacker.feed(buf)
for o in unpacker:
process(o)
+
+ Raises ``ExtraData`` when *packed* contains extra bytes.
+ Raises ``OutOfData`` when *packed* is incomplete.
+ Raises ``FormatError`` when *packed* is not valid msgpack.
+ Raises ``StackError`` when *packed* contains too nested.
+ Other exceptions can be raised during unpacking.
"""
def __init__(self, file_like=None, read_size=0, use_list=True, raw=True,
@@ -561,7 +589,7 @@ class Unpacker(object):
raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len)
typ = TYPE_MAP
else:
- raise ValueError("Unknown header: 0x%x" % b)
+ raise FormatError("Unknown header: 0x%x" % b)
return typ, n, obj
def _unpack(self, execute=EX_CONSTRUCT):
@@ -637,6 +665,8 @@ class Unpacker(object):
except OutOfData:
self._consume()
raise StopIteration
+ except RecursionError:
+ raise StackError
next = __next__
@@ -645,7 +675,10 @@ class Unpacker(object):
self._consume()
def unpack(self):
- ret = self._unpack(EX_CONSTRUCT)
+ try:
+ ret = self._unpack(EX_CONSTRUCT)
+ except RecursionError:
+ raise StackError
self._consume()
return ret
diff --git a/msgpack/unpack_template.h b/msgpack/unpack_template.h
index 525dea2..a78b7fa 100644
--- a/msgpack/unpack_template.h
+++ b/msgpack/unpack_template.h
@@ -123,7 +123,7 @@ static inline int unpack_execute(unpack_context* ctx, const char* data, Py_ssize
goto _fixed_trail_again
#define start_container(func, count_, ct_) \
- if(top >= MSGPACK_EMBED_STACK_SIZE) { goto _failed; } /* FIXME */ \
+ if(top >= MSGPACK_EMBED_STACK_SIZE) { ret = -3; goto _end; } \
if(construct_cb(func)(user, count_, &stack[top].obj) < 0) { goto _failed; } \
if((count_) == 0) { obj = stack[top].obj; \
if (construct_cb(func##_end)(user, &obj) < 0) { goto _failed; } \
@@ -132,27 +132,6 @@ static inline int unpack_execute(unpack_context* ctx, const char* data, Py_ssize
stack[top].size = count_; \
stack[top].count = 0; \
++top; \
- /*printf("container %d count %d stack %d\n",stack[top].obj,count_,top);*/ \
- /*printf("stack push %d\n", top);*/ \
- /* FIXME \
- if(top >= stack_size) { \
- if(stack_size == MSGPACK_EMBED_STACK_SIZE) { \
- size_t csize = sizeof(unpack_stack) * MSGPACK_EMBED_STACK_SIZE; \
- size_t nsize = csize * 2; \
- unpack_stack* tmp = (unpack_stack*)malloc(nsize); \
- if(tmp == NULL) { goto _failed; } \
- memcpy(tmp, ctx->stack, csize); \
- ctx->stack = stack = tmp; \
- ctx->stack_size = stack_size = MSGPACK_EMBED_STACK_SIZE * 2; \
- } else { \
- size_t nsize = sizeof(unpack_stack) * ctx->stack_size * 2; \
- unpack_stack* tmp = (unpack_stack*)realloc(ctx->stack, nsize); \
- if(tmp == NULL) { goto _failed; } \
- ctx->stack = stack = tmp; \
- ctx->stack_size = stack_size = stack_size * 2; \
- } \
- } \
- */ \
goto _header_again
#define NEXT_CS(p) ((unsigned int)*p & 0x1f)
@@ -229,7 +208,8 @@ static inline int unpack_execute(unpack_context* ctx, const char* data, Py_ssize
case 0xdf: // map 32
again_fixed_trail(NEXT_CS(p), 2 << (((unsigned int)*p) & 0x01));
default:
- goto _failed;
+ ret = -2;
+ goto _end;
}
SWITCH_RANGE(0xa0, 0xbf) // FixRaw
again_fixed_trail_if_zero(ACS_RAW_VALUE, ((unsigned int)*p & 0x1f), _raw_zero);
@@ -239,7 +219,8 @@ static inline int unpack_execute(unpack_context* ctx, const char* data, Py_ssize
start_container(_map, ((unsigned int)*p) & 0x0f, CT_MAP_KEY);
SWITCH_RANGE_DEFAULT
- goto _failed;
+ ret = -2;
+ goto _end;
SWITCH_RANGE_END
// end CS_HEADER
|
msgpack/msgpack-python
|
8b6ce53cce40e528af7cce89f358f7dde1a09289
|
diff --git a/test/test_except.py b/test/test_except.py
index 361d4ea..626c8be 100644
--- a/test/test_except.py
+++ b/test/test_except.py
@@ -2,7 +2,7 @@
# coding: utf-8
from pytest import raises
-from msgpack import packb, unpackb
+from msgpack import packb, unpackb, Unpacker, FormatError, StackError, OutOfData
import datetime
@@ -19,13 +19,34 @@ def test_raise_on_find_unsupported_value():
def test_raise_from_object_hook():
def hook(obj):
raise DummyException
+
raises(DummyException, unpackb, packb({}), object_hook=hook)
- raises(DummyException, unpackb, packb({'fizz': 'buzz'}), object_hook=hook)
- raises(DummyException, unpackb, packb({'fizz': 'buzz'}), object_pairs_hook=hook)
- raises(DummyException, unpackb, packb({'fizz': {'buzz': 'spam'}}), object_hook=hook)
- raises(DummyException, unpackb, packb({'fizz': {'buzz': 'spam'}}), object_pairs_hook=hook)
+ raises(DummyException, unpackb, packb({"fizz": "buzz"}), object_hook=hook)
+ raises(DummyException, unpackb, packb({"fizz": "buzz"}), object_pairs_hook=hook)
+ raises(DummyException, unpackb, packb({"fizz": {"buzz": "spam"}}), object_hook=hook)
+ raises(
+ DummyException,
+ unpackb,
+ packb({"fizz": {"buzz": "spam"}}),
+ object_pairs_hook=hook,
+ )
def test_invalidvalue():
+ incomplete = b"\xd9\x97#DL_" # raw8 - length=0x97
with raises(ValueError):
- unpackb(b'\xd9\x97#DL_')
+ unpackb(incomplete)
+
+ with raises(OutOfData):
+ unpacker = Unpacker()
+ unpacker.feed(incomplete)
+ unpacker.unpack()
+
+ with raises(FormatError):
+ unpackb(b"\xc1") # (undefined tag)
+
+ with raises(FormatError):
+ unpackb(b"\x91\xc1") # fixarray(len=1) [ (undefined tag) ]
+
+ with raises(StackError):
+ unpackb(b"\x91" * 3000) # nested fixarray(len=1)
|
Add msgpack specific exception types
### Unpacker
InvalidSequence -- it's not msgpack.
NestLimitExceeded -- too nested.
any more ?
### Packer
any?
|
0.0
|
8b6ce53cce40e528af7cce89f358f7dde1a09289
|
[
"test/test_except.py::test_raise_on_find_unsupported_value",
"test/test_except.py::test_raise_from_object_hook",
"test/test_except.py::test_invalidvalue"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-11-17 05:34:35+00:00
|
apache-2.0
| 4,068 |
|
msgpack__msgpack-python-334
|
diff --git a/msgpack/_unpacker.pyx b/msgpack/_unpacker.pyx
index a5403d8..2163425 100644
--- a/msgpack/_unpacker.pyx
+++ b/msgpack/_unpacker.pyx
@@ -27,6 +27,7 @@ cdef extern from "unpack.h":
bint use_list
bint raw
bint has_pairs_hook # call object_hook with k-v pairs
+ bint strict_map_key
PyObject* object_hook
PyObject* list_hook
PyObject* ext_hook
@@ -56,7 +57,7 @@ cdef extern from "unpack.h":
cdef inline init_ctx(unpack_context *ctx,
object object_hook, object object_pairs_hook,
object list_hook, object ext_hook,
- bint use_list, bint raw,
+ bint use_list, bint raw, bint strict_map_key,
const char* encoding, const char* unicode_errors,
Py_ssize_t max_str_len, Py_ssize_t max_bin_len,
Py_ssize_t max_array_len, Py_ssize_t max_map_len,
@@ -64,6 +65,7 @@ cdef inline init_ctx(unpack_context *ctx,
unpack_init(ctx)
ctx.user.use_list = use_list
ctx.user.raw = raw
+ ctx.user.strict_map_key = strict_map_key
ctx.user.object_hook = ctx.user.list_hook = <PyObject*>NULL
ctx.user.max_str_len = max_str_len
ctx.user.max_bin_len = max_bin_len
@@ -140,7 +142,7 @@ cdef inline int get_data_from_buffer(object obj,
return 1
def unpackb(object packed, object object_hook=None, object list_hook=None,
- bint use_list=True, bint raw=True,
+ bint use_list=True, bint raw=True, bint strict_map_key=False,
encoding=None, unicode_errors=None,
object_pairs_hook=None, ext_hook=ExtType,
Py_ssize_t max_str_len=1024*1024,
@@ -180,7 +182,7 @@ def unpackb(object packed, object object_hook=None, object list_hook=None,
get_data_from_buffer(packed, &view, &buf, &buf_len, &new_protocol)
try:
init_ctx(&ctx, object_hook, object_pairs_hook, list_hook, ext_hook,
- use_list, raw, cenc, cerr,
+ use_list, raw, strict_map_key, cenc, cerr,
max_str_len, max_bin_len, max_array_len, max_map_len, max_ext_len)
ret = unpack_construct(&ctx, buf, buf_len, &off)
finally:
@@ -236,6 +238,11 @@ cdef class Unpacker(object):
*encoding* option which is deprecated overrides this option.
+ :param bool strict_map_key:
+ If true, only str or bytes are accepted for map (dict) keys.
+ It's False by default for backward-compatibility.
+ But it will be True from msgpack 1.0.
+
:param callable object_hook:
When specified, it should be callable.
Unpacker calls it with a dict argument after unpacking msgpack map.
@@ -318,7 +325,7 @@ cdef class Unpacker(object):
self.buf = NULL
def __init__(self, file_like=None, Py_ssize_t read_size=0,
- bint use_list=True, bint raw=True,
+ bint use_list=True, bint raw=True, bint strict_map_key=False,
object object_hook=None, object object_pairs_hook=None, object list_hook=None,
encoding=None, unicode_errors=None, Py_ssize_t max_buffer_size=0,
object ext_hook=ExtType,
@@ -366,7 +373,7 @@ cdef class Unpacker(object):
cerr = unicode_errors
init_ctx(&self.ctx, object_hook, object_pairs_hook, list_hook,
- ext_hook, use_list, raw, cenc, cerr,
+ ext_hook, use_list, raw, strict_map_key, cenc, cerr,
max_str_len, max_bin_len, max_array_len,
max_map_len, max_ext_len)
diff --git a/msgpack/fallback.py b/msgpack/fallback.py
index 9c767a7..ae2fcfc 100644
--- a/msgpack/fallback.py
+++ b/msgpack/fallback.py
@@ -179,6 +179,11 @@ class Unpacker(object):
*encoding* option which is deprecated overrides this option.
+ :param bool strict_map_key:
+ If true, only str or bytes are accepted for map (dict) keys.
+ It's False by default for backward-compatibility.
+ But it will be True from msgpack 1.0.
+
:param callable object_hook:
When specified, it should be callable.
Unpacker calls it with a dict argument after unpacking msgpack map.
@@ -241,7 +246,7 @@ class Unpacker(object):
Other exceptions can be raised during unpacking.
"""
- def __init__(self, file_like=None, read_size=0, use_list=True, raw=True,
+ def __init__(self, file_like=None, read_size=0, use_list=True, raw=True, strict_map_key=False,
object_hook=None, object_pairs_hook=None, list_hook=None,
encoding=None, unicode_errors=None, max_buffer_size=0,
ext_hook=ExtType,
@@ -286,6 +291,7 @@ class Unpacker(object):
raise ValueError("read_size must be smaller than max_buffer_size")
self._read_size = read_size or min(self._max_buffer_size, 16*1024)
self._raw = bool(raw)
+ self._strict_map_key = bool(strict_map_key)
self._encoding = encoding
self._unicode_errors = unicode_errors
self._use_list = use_list
@@ -633,6 +639,8 @@ class Unpacker(object):
ret = {}
for _ in xrange(n):
key = self._unpack(EX_CONSTRUCT)
+ if self._strict_map_key and type(key) not in (Unicode, bytes):
+ raise ValueError("%s is not allowed for map key" % str(type(key)))
ret[key] = self._unpack(EX_CONSTRUCT)
if self._object_hook is not None:
ret = self._object_hook(ret)
diff --git a/msgpack/unpack.h b/msgpack/unpack.h
index 63e5543..85dbbed 100644
--- a/msgpack/unpack.h
+++ b/msgpack/unpack.h
@@ -23,6 +23,7 @@ typedef struct unpack_user {
bool use_list;
bool raw;
bool has_pairs_hook;
+ bool strict_map_key;
PyObject *object_hook;
PyObject *list_hook;
PyObject *ext_hook;
@@ -188,6 +189,10 @@ static inline int unpack_callback_map(unpack_user* u, unsigned int n, msgpack_un
static inline int unpack_callback_map_item(unpack_user* u, unsigned int current, msgpack_unpack_object* c, msgpack_unpack_object k, msgpack_unpack_object v)
{
+ if (u->strict_map_key && !PyUnicode_CheckExact(k) && !PyBytes_CheckExact(k)) {
+ PyErr_Format(PyExc_ValueError, "%.100s is not allowed for map key", Py_TYPE(k)->tp_name);
+ return -1;
+ }
if (u->has_pairs_hook) {
msgpack_unpack_object item = PyTuple_Pack(2, k, v);
if (!item)
|
msgpack/msgpack-python
|
3c9c6edbc88908fceb3c69ff3d6455be8b5914c8
|
diff --git a/test/test_except.py b/test/test_except.py
index 626c8be..5544f2b 100644
--- a/test/test_except.py
+++ b/test/test_except.py
@@ -50,3 +50,14 @@ def test_invalidvalue():
with raises(StackError):
unpackb(b"\x91" * 3000) # nested fixarray(len=1)
+
+
+def test_strict_map_key():
+ valid = {u"unicode": 1, b"bytes": 2}
+ packed = packb(valid, use_bin_type=True)
+ assert valid == unpackb(packed, raw=False, strict_map_key=True)
+
+ invalid = {42: 1}
+ packed = packb(invalid, use_bin_type=True)
+ with raises(ValueError):
+ unpackb(packed, raw=False, strict_map_key=True)
|
Option for restrict map keys
While msgpack spec doesn't prohibit any map keys, some languages allows only str as map keys.
So it's recommended for cross language compatibility.
For packer, it is useful to detect (unexpected) bin keys.
For unpacker, it is important to prevent "hashdos" attack too.
* 0.6: Add `strict_map_key` option to packer and unpacker. False by default.
* 1.0: Make it true by default.
|
0.0
|
3c9c6edbc88908fceb3c69ff3d6455be8b5914c8
|
[
"test/test_except.py::test_strict_map_key"
] |
[
"test/test_except.py::test_raise_on_find_unsupported_value",
"test/test_except.py::test_raise_from_object_hook",
"test/test_except.py::test_invalidvalue"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-11-29 13:36:11+00:00
|
apache-2.0
| 4,069 |
|
msgpack__msgpack-python-394
|
diff --git a/Makefile b/Makefile
index f8971cc..e2f25cf 100644
--- a/Makefile
+++ b/Makefile
@@ -4,7 +4,7 @@ all: cython
.PHONY: black
black:
- black msgpack/ test/
+ black msgpack/ test/ setup.py
.PHONY: cython
cython:
diff --git a/msgpack/_cmsgpack.pyx b/msgpack/_cmsgpack.pyx
index 8ebdbf5..1faaac3 100644
--- a/msgpack/_cmsgpack.pyx
+++ b/msgpack/_cmsgpack.pyx
@@ -1,4 +1,11 @@
# coding: utf-8
#cython: embedsignature=True, c_string_encoding=ascii, language_level=3
+from cpython.datetime cimport import_datetime, datetime_new
+import_datetime()
+
+import datetime
+cdef object utc = datetime.timezone.utc
+cdef object epoch = datetime_new(1970, 1, 1, 0, 0, 0, 0, tz=utc)
+
include "_packer.pyx"
include "_unpacker.pyx"
diff --git a/msgpack/_packer.pyx b/msgpack/_packer.pyx
index 1426439..b470646 100644
--- a/msgpack/_packer.pyx
+++ b/msgpack/_packer.pyx
@@ -2,6 +2,10 @@
from cpython cimport *
from cpython.bytearray cimport PyByteArray_Check, PyByteArray_CheckExact
+from cpython.datetime cimport (
+ PyDateTime_CheckExact, PyDelta_CheckExact,
+ datetime_tzinfo, timedelta_days, timedelta_seconds, timedelta_microseconds,
+)
cdef ExtType
cdef Timestamp
@@ -99,8 +103,9 @@ cdef class Packer(object):
cdef object _berrors
cdef const char *unicode_errors
cdef bint strict_types
- cdef bool use_float
+ cdef bint use_float
cdef bint autoreset
+ cdef bint datetime
def __cinit__(self):
cdef int buf_size = 1024*1024
@@ -110,12 +115,13 @@ cdef class Packer(object):
self.pk.buf_size = buf_size
self.pk.length = 0
- def __init__(self, *, default=None, unicode_errors=None,
+ def __init__(self, *, default=None,
bint use_single_float=False, bint autoreset=True, bint use_bin_type=True,
- bint strict_types=False):
+ bint strict_types=False, bint datetime=False, unicode_errors=None):
self.use_float = use_single_float
self.strict_types = strict_types
self.autoreset = autoreset
+ self.datetime = datetime
self.pk.use_bin_type = use_bin_type
if default is not None:
if not PyCallable_Check(default):
@@ -262,6 +268,13 @@ cdef class Packer(object):
if ret == 0:
ret = msgpack_pack_raw_body(&self.pk, <char*>view.buf, L)
PyBuffer_Release(&view);
+ elif self.datetime and PyDateTime_CheckExact(o) and datetime_tzinfo(o) is not None:
+ delta = o - epoch
+ if not PyDelta_CheckExact(delta):
+ raise ValueError("failed to calculate delta")
+ llval = timedelta_days(delta) * <long long>(24*60*60) + timedelta_seconds(delta)
+ ulval = timedelta_microseconds(delta) * 1000
+ ret = msgpack_pack_timestamp(&self.pk, llval, ulval)
elif not default_used and self._default:
o = self._default(o)
default_used = 1
diff --git a/msgpack/_unpacker.pyx b/msgpack/_unpacker.pyx
index 0ff633b..43c93a2 100644
--- a/msgpack/_unpacker.pyx
+++ b/msgpack/_unpacker.pyx
@@ -1,7 +1,6 @@
# coding: utf-8
from cpython cimport *
-
cdef extern from "Python.h":
ctypedef struct PyObject
cdef int PyObject_AsReadBuffer(object o, const void** buff, Py_ssize_t* buf_len) except -1
@@ -21,6 +20,8 @@ from .exceptions import (
)
from .ext import ExtType, Timestamp
+cdef object giga = 1_000_000_000
+
cdef extern from "unpack.h":
ctypedef struct msgpack_user:
@@ -28,10 +29,13 @@ cdef extern from "unpack.h":
bint raw
bint has_pairs_hook # call object_hook with k-v pairs
bint strict_map_key
+ int timestamp
PyObject* object_hook
PyObject* list_hook
PyObject* ext_hook
PyObject* timestamp_t
+ PyObject *giga;
+ PyObject *utc;
char *unicode_errors
Py_ssize_t max_str_len
Py_ssize_t max_bin_len
@@ -57,7 +61,8 @@ cdef extern from "unpack.h":
cdef inline init_ctx(unpack_context *ctx,
object object_hook, object object_pairs_hook,
object list_hook, object ext_hook,
- bint use_list, bint raw, bint strict_map_key,
+ bint use_list, bint raw, int timestamp,
+ bint strict_map_key,
const char* unicode_errors,
Py_ssize_t max_str_len, Py_ssize_t max_bin_len,
Py_ssize_t max_array_len, Py_ssize_t max_map_len,
@@ -99,8 +104,14 @@ cdef inline init_ctx(unpack_context *ctx,
raise TypeError("ext_hook must be a callable.")
ctx.user.ext_hook = <PyObject*>ext_hook
+ if timestamp < 0 or 3 < timestamp:
+ raise ValueError("timestamp must be 0..3")
+
# Add Timestamp type to the user object so it may be used in unpack.h
+ ctx.user.timestamp = timestamp
ctx.user.timestamp_t = <PyObject*>Timestamp
+ ctx.user.giga = <PyObject*>giga
+ ctx.user.utc = <PyObject*>utc
ctx.user.unicode_errors = unicode_errors
def default_read_extended_type(typecode, data):
@@ -131,7 +142,7 @@ cdef inline int get_data_from_buffer(object obj,
def unpackb(object packed, *, object object_hook=None, object list_hook=None,
- bint use_list=True, bint raw=False, bint strict_map_key=True,
+ bint use_list=True, bint raw=False, int timestamp=0, bint strict_map_key=True,
unicode_errors=None,
object_pairs_hook=None, ext_hook=ExtType,
Py_ssize_t max_str_len=-1,
@@ -179,7 +190,7 @@ def unpackb(object packed, *, object object_hook=None, object list_hook=None,
try:
init_ctx(&ctx, object_hook, object_pairs_hook, list_hook, ext_hook,
- use_list, raw, strict_map_key, cerr,
+ use_list, raw, timestamp, strict_map_key, cerr,
max_str_len, max_bin_len, max_array_len, max_map_len, max_ext_len)
ret = unpack_construct(&ctx, buf, buf_len, &off)
finally:
@@ -304,7 +315,7 @@ cdef class Unpacker(object):
self.buf = NULL
def __init__(self, file_like=None, *, Py_ssize_t read_size=0,
- bint use_list=True, bint raw=False, bint strict_map_key=True,
+ bint use_list=True, bint raw=False, int timestamp=0, bint strict_map_key=True,
object object_hook=None, object object_pairs_hook=None, object list_hook=None,
unicode_errors=None, Py_ssize_t max_buffer_size=100*1024*1024,
object ext_hook=ExtType,
@@ -359,7 +370,7 @@ cdef class Unpacker(object):
cerr = unicode_errors
init_ctx(&self.ctx, object_hook, object_pairs_hook, list_hook,
- ext_hook, use_list, raw, strict_map_key, cerr,
+ ext_hook, use_list, raw, timestamp, strict_map_key, cerr,
max_str_len, max_bin_len, max_array_len,
max_map_len, max_ext_len)
diff --git a/msgpack/ext.py b/msgpack/ext.py
index c7efff6..09adb34 100644
--- a/msgpack/ext.py
+++ b/msgpack/ext.py
@@ -1,12 +1,18 @@
# coding: utf-8
from collections import namedtuple
+import datetime
import sys
import struct
PY2 = sys.version_info[0] == 2
+
if not PY2:
long = int
+ try:
+ _utc = datetime.timezone.utc
+ except AttributeError:
+ _utc = datetime.timezone(datetime.timedelta(0))
class ExtType(namedtuple("ExtType", "code data")):
@@ -131,7 +137,7 @@ class Timestamp(object):
data = struct.pack("!Iq", self.nanoseconds, self.seconds)
return data
- def to_float_s(self):
+ def to_float(self):
"""Get the timestamp as a floating-point value.
:returns: posix timestamp
@@ -139,6 +145,12 @@ class Timestamp(object):
"""
return self.seconds + self.nanoseconds / 1e9
+ @staticmethod
+ def from_float(unix_float):
+ seconds = int(unix_float)
+ nanoseconds = int((unix_float % 1) * 1000000000)
+ return Timestamp(seconds, nanoseconds)
+
def to_unix_ns(self):
"""Get the timestamp as a unixtime in nanoseconds.
@@ -146,3 +158,16 @@ class Timestamp(object):
:rtype: int
"""
return int(self.seconds * 1e9 + self.nanoseconds)
+
+ if not PY2:
+
+ def to_datetime(self):
+ """Get the timestamp as a UTC datetime.
+
+ :rtype: datetime.
+ """
+ return datetime.datetime.fromtimestamp(self.to_float(), _utc)
+
+ @staticmethod
+ def from_datetime(dt):
+ return Timestamp.from_float(dt.timestamp())
diff --git a/msgpack/fallback.py b/msgpack/fallback.py
index f6ba424..9ba98bf 100644
--- a/msgpack/fallback.py
+++ b/msgpack/fallback.py
@@ -1,5 +1,6 @@
"""Fallback pure Python implementation of msgpack"""
+from datetime import datetime as _DateTime
import sys
import struct
@@ -174,6 +175,14 @@ class Unpacker(object):
If true, unpack msgpack raw to Python bytes.
Otherwise, unpack to Python str by decoding with UTF-8 encoding (default).
+ :param int timestamp:
+ Control how timestamp type is unpacked:
+
+ 0 - Tiemstamp
+ 1 - float (Seconds from the EPOCH)
+ 2 - int (Nanoseconds from the EPOCH)
+ 3 - datetime.datetime (UTC). Python 2 is not supported.
+
:param bool strict_map_key:
If true (default), only str or bytes are accepted for map (dict) keys.
@@ -248,6 +257,7 @@ class Unpacker(object):
read_size=0,
use_list=True,
raw=False,
+ timestamp=0,
strict_map_key=True,
object_hook=None,
object_pairs_hook=None,
@@ -307,6 +317,9 @@ class Unpacker(object):
self._strict_map_key = bool(strict_map_key)
self._unicode_errors = unicode_errors
self._use_list = use_list
+ if not (0 <= timestamp <= 3):
+ raise ValueError("timestamp must be 0..3")
+ self._timestamp = timestamp
self._list_hook = list_hook
self._object_hook = object_hook
self._object_pairs_hook = object_pairs_hook
@@ -672,10 +685,21 @@ class Unpacker(object):
else:
obj = obj.decode("utf_8", self._unicode_errors)
return obj
- if typ == TYPE_EXT:
- return self._ext_hook(n, bytes(obj))
if typ == TYPE_BIN:
return bytes(obj)
+ if typ == TYPE_EXT:
+ if n == -1: # timestamp
+ ts = Timestamp.from_bytes(bytes(obj))
+ if self._timestamp == 1:
+ return ts.to_float()
+ elif self._timestamp == 2:
+ return ts.to_unix_ns()
+ elif self._timestamp == 3:
+ return ts.to_datetime()
+ else:
+ return ts
+ else:
+ return self._ext_hook(n, bytes(obj))
assert typ == TYPE_IMMEDIATE
return obj
@@ -756,6 +780,12 @@ class Packer(object):
This is useful when trying to implement accurate serialization
for python types.
+ :param bool datetime:
+ If set to true, datetime with tzinfo is packed into Timestamp type.
+ Note that the tzinfo is stripped in the timestamp.
+ You can get UTC datetime with `timestamp=3` option of the Unapcker.
+ (Python 2 is not supported).
+
:param str unicode_errors:
The error handler for encoding unicode. (default: 'strict')
DO NOT USE THIS!! This option is kept for very specific usage.
@@ -764,18 +794,22 @@ class Packer(object):
def __init__(
self,
default=None,
- unicode_errors=None,
use_single_float=False,
autoreset=True,
use_bin_type=True,
strict_types=False,
+ datetime=False,
+ unicode_errors=None,
):
self._strict_types = strict_types
self._use_float = use_single_float
self._autoreset = autoreset
self._use_bin_type = use_bin_type
- self._unicode_errors = unicode_errors or "strict"
self._buffer = StringIO()
+ if PY2 and datetime:
+ raise ValueError("datetime is not supported in Python 2")
+ self._datetime = bool(datetime)
+ self._unicode_errors = unicode_errors or "strict"
if default is not None:
if not callable(default):
raise TypeError("default must be callable")
@@ -891,6 +925,12 @@ class Packer(object):
return self._pack_map_pairs(
len(obj), dict_iteritems(obj), nest_limit - 1
)
+
+ if self._datetime and check(obj, _DateTime):
+ obj = Timestamp.from_datetime(obj)
+ default_used = 1
+ continue
+
if not default_used and self._default is not None:
obj = self._default(obj)
default_used = 1
diff --git a/msgpack/unpack.h b/msgpack/unpack.h
index 4380ec5..debdf71 100644
--- a/msgpack/unpack.h
+++ b/msgpack/unpack.h
@@ -24,10 +24,13 @@ typedef struct unpack_user {
bool raw;
bool has_pairs_hook;
bool strict_map_key;
+ int timestamp;
PyObject *object_hook;
PyObject *list_hook;
PyObject *ext_hook;
PyObject *timestamp_t;
+ PyObject *giga;
+ PyObject *utc;
const char *unicode_errors;
Py_ssize_t max_str_len, max_bin_len, max_array_len, max_map_len, max_ext_len;
} unpack_user;
@@ -268,7 +271,7 @@ typedef struct msgpack_timestamp {
/*
* Unpack ext buffer to a timestamp. Pulled from msgpack-c timestamp.h.
*/
-static inline int unpack_timestamp(const char* buf, unsigned int buflen, msgpack_timestamp* ts) {
+static int unpack_timestamp(const char* buf, unsigned int buflen, msgpack_timestamp* ts) {
switch (buflen) {
case 4:
ts->tv_nsec = 0;
@@ -292,10 +295,11 @@ static inline int unpack_timestamp(const char* buf, unsigned int buflen, msgpack
}
}
-static inline int unpack_callback_ext(unpack_user* u, const char* base, const char* pos,
- unsigned int length, msgpack_unpack_object* o)
+#include "datetime.h"
+
+static int unpack_callback_ext(unpack_user* u, const char* base, const char* pos,
+ unsigned int length, msgpack_unpack_object* o)
{
- PyObject *py;
int8_t typecode = (int8_t)*pos++;
if (!u->ext_hook) {
PyErr_SetString(PyExc_AssertionError, "u->ext_hook cannot be NULL");
@@ -305,13 +309,67 @@ static inline int unpack_callback_ext(unpack_user* u, const char* base, const ch
PyErr_Format(PyExc_ValueError, "%u exceeds max_ext_len(%zd)", length, u->max_ext_len);
return -1;
}
+
+ PyObject *py = NULL;
// length also includes the typecode, so the actual data is length-1
if (typecode == -1) {
msgpack_timestamp ts;
- if (unpack_timestamp(pos, length-1, &ts) == 0) {
+ if (unpack_timestamp(pos, length-1, &ts) < 0) {
+ return -1;
+ }
+
+ if (u->timestamp == 2) { // int
+ PyObject *a = PyLong_FromLongLong(ts.tv_sec);
+ if (a == NULL) return -1;
+
+ PyObject *c = PyNumber_Multiply(a, u->giga);
+ Py_DECREF(a);
+ if (c == NULL) {
+ return -1;
+ }
+
+ PyObject *b = PyLong_FromUnsignedLong(ts.tv_nsec);
+ if (b == NULL) {
+ Py_DECREF(c);
+ return -1;
+ }
+
+ py = PyNumber_Add(c, b);
+ Py_DECREF(c);
+ Py_DECREF(b);
+ }
+ else if (u->timestamp == 0) { // Timestamp
py = PyObject_CallFunction(u->timestamp_t, "(Lk)", ts.tv_sec, ts.tv_nsec);
- } else {
- py = NULL;
+ }
+ else { // float or datetime
+ PyObject *a = PyFloat_FromDouble((double)ts.tv_nsec);
+ if (a == NULL) return -1;
+
+ PyObject *b = PyNumber_TrueDivide(a, u->giga);
+ Py_DECREF(a);
+ if (b == NULL) return -1;
+
+ PyObject *c = PyLong_FromLongLong(ts.tv_sec);
+ if (c == NULL) {
+ Py_DECREF(b);
+ return -1;
+ }
+
+ a = PyNumber_Add(b, c);
+ Py_DECREF(b);
+ Py_DECREF(c);
+
+ if (u->timestamp == 3) { // datetime
+ PyObject *t = PyTuple_Pack(2, a, u->utc);
+ Py_DECREF(a);
+ if (t == NULL) {
+ return -1;
+ }
+ py = PyDateTime_FromTimestamp(t);
+ Py_DECREF(t);
+ } else { // float
+ py = a;
+ }
}
} else {
py = PyObject_CallFunction(u->ext_hook, "(iy#)", (int)typecode, pos, (Py_ssize_t)length-1);
|
msgpack/msgpack-python
|
5fd611909319d03200774ea3c7a6ae16dbd26c12
|
diff --git a/test/test_timestamp.py b/test/test_timestamp.py
index 1348e69..822994c 100644
--- a/test/test_timestamp.py
+++ b/test/test_timestamp.py
@@ -1,5 +1,11 @@
+import pytest
+import sys
+import datetime
import msgpack
-from msgpack import Timestamp
+from msgpack.ext import Timestamp
+
+if sys.version_info[0] > 2:
+ from msgpack.ext import _utc
def test_timestamp():
@@ -42,5 +48,43 @@ def test_timestamp():
def test_timestamp_to():
t = Timestamp(42, 14)
- assert t.to_float_s() == 42.000000014
+ assert t.to_float() == 42.000000014
assert t.to_unix_ns() == 42000000014
+
+
[email protected](sys.version_info[0] == 2, reason="datetime support is PY3+ only")
+def test_timestamp_datetime():
+ t = Timestamp(42, 14)
+ assert t.to_datetime() == datetime.datetime(1970, 1, 1, 0, 0, 42, 0, tzinfo=_utc)
+
+
[email protected](sys.version_info[0] == 2, reason="datetime support is PY3+ only")
+def test_unpack_datetime():
+ t = Timestamp(42, 14)
+ packed = msgpack.packb(t)
+ unpacked = msgpack.unpackb(packed, timestamp=3)
+ assert unpacked == datetime.datetime(1970, 1, 1, 0, 0, 42, 0, tzinfo=_utc)
+
+
[email protected](sys.version_info[0] == 2, reason="datetime support is PY3+ only")
+def test_pack_datetime():
+ t = Timestamp(42, 14000)
+ dt = t.to_datetime()
+ assert dt == datetime.datetime(1970, 1, 1, 0, 0, 42, 14, tzinfo=_utc)
+
+ packed = msgpack.packb(dt, datetime=True)
+ packed2 = msgpack.packb(t)
+ assert packed == packed2
+
+ unpacked = msgpack.unpackb(packed)
+ print(packed, unpacked)
+ assert unpacked == t
+
+ unpacked = msgpack.unpackb(packed, timestamp=3)
+ assert unpacked == dt
+
+ x = []
+ packed = msgpack.packb(dt, datetime=False, default=x.append)
+ assert x
+ assert x[0] == dt
+ assert msgpack.unpackb(packed) is None
|
Support serializing datetime objects
Since timestamps are now a part of the msgpack spec, shouldn't this library support serializing them?
|
0.0
|
5fd611909319d03200774ea3c7a6ae16dbd26c12
|
[
"test/test_timestamp.py::test_timestamp",
"test/test_timestamp.py::test_timestamp_to",
"test/test_timestamp.py::test_timestamp_datetime"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-12-11 13:12:28+00:00
|
apache-2.0
| 4,070 |
|
mtgjson__mtgjson-1148
|
diff --git a/mtgjson5/classes/mtgjson_card.py b/mtgjson5/classes/mtgjson_card.py
index 2af2e93..bc31bc7 100644
--- a/mtgjson5/classes/mtgjson_card.py
+++ b/mtgjson5/classes/mtgjson_card.py
@@ -238,14 +238,16 @@ class MtgjsonCardObject:
if self.number == other.number:
return self_side < other_side
- self_number_clean = "".join(filter(str.isdigit, self.number)) or 100_000
+ self_number_clean = "".join(filter(str.isdigit, self.number)) or "100000"
self_number_clean_int = int(self_number_clean)
- other_number_clean = "".join(filter(str.isdigit, other.number)) or 100_000
+ other_number_clean = "".join(filter(str.isdigit, other.number)) or "100000"
other_number_clean_int = int(other_number_clean)
if self.number == self_number_clean and other.number == other_number_clean:
if self_number_clean_int == other_number_clean_int:
+ if len(self_number_clean) != len(other_number_clean):
+ return len(self_number_clean) < len(other_number_clean)
return self_side < other_side
return self_number_clean_int < other_number_clean_int
@@ -260,6 +262,8 @@ class MtgjsonCardObject:
return self_number_clean_int < other_number_clean_int
if self_number_clean == other_number_clean:
+ if not self_side and not other_side:
+ return bool(self.number < other.number)
return self_side < other_side
return self_number_clean_int < other_number_clean_int
|
mtgjson/mtgjson
|
141e2c73f005c0e8827584eb672f2a5a9abc2aa3
|
diff --git a/tests/mtgjson5/test_card_sorting.py b/tests/mtgjson5/test_card_sorting.py
index 61b02c1..39c92a3 100644
--- a/tests/mtgjson5/test_card_sorting.py
+++ b/tests/mtgjson5/test_card_sorting.py
@@ -5,30 +5,34 @@ from mtgjson5.classes.mtgjson_card import MtgjsonCardObject
def test_card_sorting():
correct_order = [
- ("A", "1", None),
- ("B", "2", None),
- ("C1", "2a", "a"),
- ("C2", "2b", "b"),
- ("D", "3", None),
- ("E", "10", None),
- ("F1", "10a", "a"),
- ("F2", "10b", "b"),
- ("G", "11", None),
- ("H", "20", None),
- ("I", "", None),
+ ("0", None),
+ ("00", None),
+ ("ap0a", None),
+ ("gn0a", None),
+ ("1", None),
+ ("2", None),
+ ("2a", "a"),
+ ("2b", "b"),
+ ("3", None),
+ ("10", None),
+ ("10a", "a"),
+ ("10b", "b"),
+ ("11", None),
+ ("20", None),
+ ("", None),
]
test_group = []
- for name, number, side in correct_order:
+ for number, side in correct_order:
card = MtgjsonCardObject()
- card.name = name
card.number = number
card.side = side
test_group.append(card)
- random.shuffle(test_group)
- test_group.sort()
+ for _ in range(0, 500):
+ random.shuffle(test_group)
+ test_group.sort()
- test_group_order = list(map(lambda x: (x.name, x.number, x.side), test_group))
+ test_group_order = list(map(lambda x: (x.number, x.side), test_group))
- assert correct_order == test_group_order
+ assert correct_order == test_group_order
|
card lists with equal (or similar) collector numbers is not sorted
following up from https://github.com/mtgjson/mtgjson/issues/1127 it seems like the card list of WCD sets gets unsorted depending on the card number -- i caught one as example but i think the numbering scheme of the whole sets might be confusing to whatever is keeping them sorted
```diff
- "name": "Blank Card",
- "number": "00",
+ "name": "2004 World Championships Ad",
+ "number": "0",
```
edit: confirmed about the numbering scheme problem
```diff
- "name": "Aeo Paquette Bio",
- "number": "ap0a",
+ "name": "Gabriel Nassif Bio",
+ "number": "gn0a",
```
|
0.0
|
141e2c73f005c0e8827584eb672f2a5a9abc2aa3
|
[
"tests/mtgjson5/test_card_sorting.py::test_card_sorting"
] |
[] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2023-11-01 02:54:05+00:00
|
mit
| 4,071 |
|
mtgjson__mtgjson-469
|
diff --git a/.travis.yml b/.travis.yml
index 50bff20..7335254 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -15,5 +15,4 @@ script:
- PYTHONPATH=. pytest
- black --check --diff .
- pylint mtgjson4 tests
- - mypy .
- npx eclint check $(git ls-files)
diff --git a/mtgjson4/__init__.py b/mtgjson4/__init__.py
index 338757a..18949b7 100644
--- a/mtgjson4/__init__.py
+++ b/mtgjson4/__init__.py
@@ -49,7 +49,7 @@ CARD_MARKET_BUFFER: str = "10101"
ALL_CARDS_OUTPUT: str = "AllCards"
ALL_DECKS_DIR_OUTPUT: str = "AllDeckFiles"
ALL_SETS_DIR_OUTPUT: str = "AllSetFiles"
-ALL_SETS_OUTPUT: str = "AllSets"
+ALL_SETS_OUTPUT: str = "AllPrintings"
CARD_TYPES_OUTPUT: str = "CardTypes"
COMPILED_LIST_OUTPUT: str = "CompiledList"
DECK_LISTS_OUTPUT: str = "DeckLists"
@@ -59,12 +59,14 @@ REFERRAL_DB_OUTPUT: str = "ReferralMap"
SET_LIST_OUTPUT: str = "SetList"
VERSION_OUTPUT: str = "version"
-STANDARD_OUTPUT: str = "Standard"
-MODERN_OUTPUT: str = "Modern"
-VINTAGE_OUTPUT: str = "Vintage"
-LEGACY_OUTPUT: str = "Legacy"
+STANDARD_OUTPUT: str = "StandardPrintings"
+PIONEER_OUTPUT: str = "PioneerPrintings"
+MODERN_OUTPUT: str = "ModernPrintings"
+VINTAGE_OUTPUT: str = "VintagePrintings"
+LEGACY_OUTPUT: str = "LegacyPrintings"
STANDARD_CARDS_OUTPUT: str = "StandardCards"
+PIONEER_CARDS_OUTPUT: str = "PioneerCards"
MODERN_CARDS_OUTPUT: str = "ModernCards"
VINTAGE_CARDS_OUTPUT: str = "VintageCards"
LEGACY_CARDS_OUTPUT: str = "LegacyCards"
@@ -72,6 +74,7 @@ PAUPER_CARDS_OUTPUT: str = "PauperCards"
SUPPORTED_FORMAT_OUTPUTS: Set[str] = {
"standard",
+ "pioneer",
"modern",
"legacy",
"vintage",
@@ -92,10 +95,12 @@ OUTPUT_FILES: List[str] = [
SET_LIST_OUTPUT,
VERSION_OUTPUT,
STANDARD_OUTPUT,
+ PIONEER_OUTPUT,
MODERN_OUTPUT,
VINTAGE_OUTPUT,
LEGACY_OUTPUT,
STANDARD_CARDS_OUTPUT,
+ PIONEER_CARDS_OUTPUT,
MODERN_CARDS_OUTPUT,
VINTAGE_CARDS_OUTPUT,
LEGACY_CARDS_OUTPUT,
diff --git a/mtgjson4/outputter.py b/mtgjson4/outputter.py
index 6107451..209656f 100644
--- a/mtgjson4/outputter.py
+++ b/mtgjson4/outputter.py
@@ -401,30 +401,38 @@ def create_set_centric_outputs(sets: Dict[str, Any]) -> None:
format_map = util.build_format_map(sets)
LOGGER.info(f"Format Map: {format_map}")
- # Standard.json
+ # Standard
write_to_file(
mtgjson4.STANDARD_OUTPUT,
__handle_compiling_sets(format_map["standard"], "Standard"),
)
- # Modern.json
+ # Pioneer
+ write_to_file(
+ mtgjson4.PIONEER_OUTPUT,
+ __handle_compiling_sets(format_map["pioneer"], "Pioneer"),
+ )
+
+ # Modern
write_to_file(
mtgjson4.MODERN_OUTPUT, __handle_compiling_sets(format_map["modern"], "Modern")
)
- # Legacy.json
+ # Legacy
write_to_file(
mtgjson4.LEGACY_OUTPUT, __handle_compiling_sets(format_map["legacy"], "Legacy")
)
- # Vintage.json
+ # Vintage
write_to_file(
mtgjson4.VINTAGE_OUTPUT, create_vintage_only_output(mtgjson4.OUTPUT_FILES)
)
- # Prices.json
+ # Prices
output_price_file(
- MtgjsonPrice(mtgjson4.COMPILED_OUTPUT_DIR.joinpath(mtgjson4.ALL_SETS_OUTPUT + ".json"))
+ MtgjsonPrice(
+ mtgjson4.COMPILED_OUTPUT_DIR.joinpath(mtgjson4.ALL_SETS_OUTPUT + ".json")
+ )
)
@@ -449,19 +457,22 @@ def create_card_centric_outputs(cards: Dict[str, Any]) -> None:
# Create format-specific subsets of AllCards.json
all_cards_subsets = create_all_cards_subsets(cards, SUPPORTED_FORMAT_OUTPUTS)
- # StandardCards.json
+ # StandardCards
write_to_file(mtgjson4.STANDARD_CARDS_OUTPUT, all_cards_subsets.get("standard"))
- # ModernCards.json
+ # PioneerCards
+ write_to_file(mtgjson4.PIONEER_CARDS_OUTPUT, all_cards_subsets.get("pioneer"))
+
+ # ModernCards
write_to_file(mtgjson4.MODERN_CARDS_OUTPUT, all_cards_subsets.get("modern"))
- # VintageCards.json
+ # VintageCards
write_to_file(mtgjson4.VINTAGE_CARDS_OUTPUT, all_cards_subsets.get("vintage"))
- # LegacyCards.json
+ # LegacyCards
write_to_file(mtgjson4.LEGACY_CARDS_OUTPUT, all_cards_subsets.get("legacy"))
- # PauperCards.json
+ # PauperCards
write_to_file(mtgjson4.PAUPER_CARDS_OUTPUT, all_cards_subsets.get("pauper"))
diff --git a/tox.ini b/tox.ini
index c95d5b3..6b4f036 100644
--- a/tox.ini
+++ b/tox.ini
@@ -12,7 +12,7 @@ deps = -r {toxinidir}/requirements.txt
description = Run black and edit all files in place
skip_install = True
deps = black
-commands = black mtgjson4/
+commands = black mtgjson4/ tests/
# Active Tests
[testenv:yapf-inplace]
@@ -24,12 +24,12 @@ commands = yapf --in-place --recursive --parallel mtgjson4/ tests/
[testenv:mypy]
description = mypy static type checking only
deps = mypy
-commands = mypy {posargs:mtgjson4/}
+commands = mypy {posargs:mtgjson4/ tests/}
[testenv:lint]
description = Run linting tools
deps = pylint
-commands = pylint mtgjson4/ --rcfile=.pylintrc
+commands = pylint mtgjson4/ tests/ --rcfile=.pylintrc
# Inactive Tests
[testenv:yapf-check]
@@ -47,7 +47,7 @@ commands = isort --check-only
[testenv:isort-inplace]
description = Sort imports
deps = isort
-commands = isort -rc mtgjson4/
+commands = isort -rc mtgjson4/ tests/
[testenv:unit]
description = Run unit tests with coverage and mypy type checking
|
mtgjson/mtgjson
|
b3d7bc4531bdca514dc1cf9f4ea5f6eac1104f89
|
diff --git a/tests/mtgjson4/test_format.py b/tests/mtgjson4/test_format.py
index e0eb474..fb87c54 100644
--- a/tests/mtgjson4/test_format.py
+++ b/tests/mtgjson4/test_format.py
@@ -3,7 +3,14 @@ import pytest
from mtgjson4.util import build_format_map
-NULL_OUTPUT = {"standard": [], "modern": [], "legacy": [], "vintage": [], "pauper": []}
+NULL_OUTPUT = {
+ "standard": [],
+ "pioneer": [],
+ "modern": [],
+ "legacy": [],
+ "vintage": [],
+ "pauper": [],
+}
@pytest.mark.parametrize(
@@ -45,11 +52,15 @@ NULL_OUTPUT = {"standard": [], "modern": [], "legacy": [], "vintage": [], "paupe
},
]
},
+ "TS5": {
+ "cards": [{"legalities": {"standard": "Legal", "pioneer": "Legal"}}]
+ },
},
{
**NULL_OUTPUT,
**{
- "standard": ["TS1", "TS2", "TS4"],
+ "standard": ["TS1", "TS2", "TS4", "TS5"],
+ "pioneer": ["TS5"],
"modern": ["TS3", "TS4"],
"legacy": ["TS4"],
"vintage": ["TS4"],
|
Add Pioneer support
Pioneer.json and PioneerCards.json
|
0.0
|
b3d7bc4531bdca514dc1cf9f4ea5f6eac1104f89
|
[
"tests/mtgjson4/test_format.py::test_build_format_map[all_sets0-expected0]",
"tests/mtgjson4/test_format.py::test_build_format_map[all_sets1-expected1]"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-10-27 18:05:53+00:00
|
mit
| 4,072 |
|
mtgjson__mtgjson-470
|
diff --git a/.travis.yml b/.travis.yml
index 50bff20..7335254 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -15,5 +15,4 @@ script:
- PYTHONPATH=. pytest
- black --check --diff .
- pylint mtgjson4 tests
- - mypy .
- npx eclint check $(git ls-files)
diff --git a/mtgjson4/__init__.py b/mtgjson4/__init__.py
index 338757a..18949b7 100644
--- a/mtgjson4/__init__.py
+++ b/mtgjson4/__init__.py
@@ -49,7 +49,7 @@ CARD_MARKET_BUFFER: str = "10101"
ALL_CARDS_OUTPUT: str = "AllCards"
ALL_DECKS_DIR_OUTPUT: str = "AllDeckFiles"
ALL_SETS_DIR_OUTPUT: str = "AllSetFiles"
-ALL_SETS_OUTPUT: str = "AllSets"
+ALL_SETS_OUTPUT: str = "AllPrintings"
CARD_TYPES_OUTPUT: str = "CardTypes"
COMPILED_LIST_OUTPUT: str = "CompiledList"
DECK_LISTS_OUTPUT: str = "DeckLists"
@@ -59,12 +59,14 @@ REFERRAL_DB_OUTPUT: str = "ReferralMap"
SET_LIST_OUTPUT: str = "SetList"
VERSION_OUTPUT: str = "version"
-STANDARD_OUTPUT: str = "Standard"
-MODERN_OUTPUT: str = "Modern"
-VINTAGE_OUTPUT: str = "Vintage"
-LEGACY_OUTPUT: str = "Legacy"
+STANDARD_OUTPUT: str = "StandardPrintings"
+PIONEER_OUTPUT: str = "PioneerPrintings"
+MODERN_OUTPUT: str = "ModernPrintings"
+VINTAGE_OUTPUT: str = "VintagePrintings"
+LEGACY_OUTPUT: str = "LegacyPrintings"
STANDARD_CARDS_OUTPUT: str = "StandardCards"
+PIONEER_CARDS_OUTPUT: str = "PioneerCards"
MODERN_CARDS_OUTPUT: str = "ModernCards"
VINTAGE_CARDS_OUTPUT: str = "VintageCards"
LEGACY_CARDS_OUTPUT: str = "LegacyCards"
@@ -72,6 +74,7 @@ PAUPER_CARDS_OUTPUT: str = "PauperCards"
SUPPORTED_FORMAT_OUTPUTS: Set[str] = {
"standard",
+ "pioneer",
"modern",
"legacy",
"vintage",
@@ -92,10 +95,12 @@ OUTPUT_FILES: List[str] = [
SET_LIST_OUTPUT,
VERSION_OUTPUT,
STANDARD_OUTPUT,
+ PIONEER_OUTPUT,
MODERN_OUTPUT,
VINTAGE_OUTPUT,
LEGACY_OUTPUT,
STANDARD_CARDS_OUTPUT,
+ PIONEER_CARDS_OUTPUT,
MODERN_CARDS_OUTPUT,
VINTAGE_CARDS_OUTPUT,
LEGACY_CARDS_OUTPUT,
diff --git a/mtgjson4/outputter.py b/mtgjson4/outputter.py
index 6107451..209656f 100644
--- a/mtgjson4/outputter.py
+++ b/mtgjson4/outputter.py
@@ -401,30 +401,38 @@ def create_set_centric_outputs(sets: Dict[str, Any]) -> None:
format_map = util.build_format_map(sets)
LOGGER.info(f"Format Map: {format_map}")
- # Standard.json
+ # Standard
write_to_file(
mtgjson4.STANDARD_OUTPUT,
__handle_compiling_sets(format_map["standard"], "Standard"),
)
- # Modern.json
+ # Pioneer
+ write_to_file(
+ mtgjson4.PIONEER_OUTPUT,
+ __handle_compiling_sets(format_map["pioneer"], "Pioneer"),
+ )
+
+ # Modern
write_to_file(
mtgjson4.MODERN_OUTPUT, __handle_compiling_sets(format_map["modern"], "Modern")
)
- # Legacy.json
+ # Legacy
write_to_file(
mtgjson4.LEGACY_OUTPUT, __handle_compiling_sets(format_map["legacy"], "Legacy")
)
- # Vintage.json
+ # Vintage
write_to_file(
mtgjson4.VINTAGE_OUTPUT, create_vintage_only_output(mtgjson4.OUTPUT_FILES)
)
- # Prices.json
+ # Prices
output_price_file(
- MtgjsonPrice(mtgjson4.COMPILED_OUTPUT_DIR.joinpath(mtgjson4.ALL_SETS_OUTPUT + ".json"))
+ MtgjsonPrice(
+ mtgjson4.COMPILED_OUTPUT_DIR.joinpath(mtgjson4.ALL_SETS_OUTPUT + ".json")
+ )
)
@@ -449,19 +457,22 @@ def create_card_centric_outputs(cards: Dict[str, Any]) -> None:
# Create format-specific subsets of AllCards.json
all_cards_subsets = create_all_cards_subsets(cards, SUPPORTED_FORMAT_OUTPUTS)
- # StandardCards.json
+ # StandardCards
write_to_file(mtgjson4.STANDARD_CARDS_OUTPUT, all_cards_subsets.get("standard"))
- # ModernCards.json
+ # PioneerCards
+ write_to_file(mtgjson4.PIONEER_CARDS_OUTPUT, all_cards_subsets.get("pioneer"))
+
+ # ModernCards
write_to_file(mtgjson4.MODERN_CARDS_OUTPUT, all_cards_subsets.get("modern"))
- # VintageCards.json
+ # VintageCards
write_to_file(mtgjson4.VINTAGE_CARDS_OUTPUT, all_cards_subsets.get("vintage"))
- # LegacyCards.json
+ # LegacyCards
write_to_file(mtgjson4.LEGACY_CARDS_OUTPUT, all_cards_subsets.get("legacy"))
- # PauperCards.json
+ # PauperCards
write_to_file(mtgjson4.PAUPER_CARDS_OUTPUT, all_cards_subsets.get("pauper"))
diff --git a/mtgjson4/provider/wizards.py b/mtgjson4/provider/wizards.py
index 0acde8d..31733c7 100644
--- a/mtgjson4/provider/wizards.py
+++ b/mtgjson4/provider/wizards.py
@@ -104,12 +104,14 @@ def get_ability_words(comp_rules: str) -> List[str]:
for line in comp_rules.split("\r\r"):
if "Ability words" in line:
# Isolate all of the ability words, capitalize the words,
- # and remove the . from the end of the string
line = unidecode.unidecode(
- line.split("The ability words are")[1].strip()[:-1]
- )
+ line.split("The ability words are")[1].strip()
+ ).split("\r\n")[0]
+
result = [x.strip().lower() for x in line.split(",")]
- result[-1] = result[-1][4:] # Address the "and" bit of the last element
+
+ # Address the "and" bit of the last element, and the period
+ result[-1] = result[-1][4:-1]
return result
return []
@@ -130,7 +132,7 @@ def parse_comp_internal(
comp_rules = comp_rules.split(top_delim)[2].split(bottom_delim)[0]
# Windows line endings... yuck
- valid_line_segments = comp_rules.split("\r\r")
+ valid_line_segments = comp_rules.split("\r\n")
# XXX.1 is just a description of what rule XXX includes.
# XXX.2 starts the action for _most_ sections
@@ -140,10 +142,10 @@ def parse_comp_internal(
for line in valid_line_segments:
# Keywords are defined as "XXX.# Name"
# We will want to ignore subset lines like "XXX.#a"
- if f"{rule_start}.{keyword_index}" in line:
+ regex_search = re.findall(f"{rule_start}.{keyword_index}. (.*)", line)
+ if regex_search:
# Break the line into "Rule Number | Keyword"
- keyword = line.split(" ", 1)[1].lower()
- return_list.append(keyword)
+ return_list.append(regex_search[0].lower())
# Get next keyword, so we can pass over the non-relevant lines
keyword_index += 1
diff --git a/tox.ini b/tox.ini
index c95d5b3..6b4f036 100644
--- a/tox.ini
+++ b/tox.ini
@@ -12,7 +12,7 @@ deps = -r {toxinidir}/requirements.txt
description = Run black and edit all files in place
skip_install = True
deps = black
-commands = black mtgjson4/
+commands = black mtgjson4/ tests/
# Active Tests
[testenv:yapf-inplace]
@@ -24,12 +24,12 @@ commands = yapf --in-place --recursive --parallel mtgjson4/ tests/
[testenv:mypy]
description = mypy static type checking only
deps = mypy
-commands = mypy {posargs:mtgjson4/}
+commands = mypy {posargs:mtgjson4/ tests/}
[testenv:lint]
description = Run linting tools
deps = pylint
-commands = pylint mtgjson4/ --rcfile=.pylintrc
+commands = pylint mtgjson4/ tests/ --rcfile=.pylintrc
# Inactive Tests
[testenv:yapf-check]
@@ -47,7 +47,7 @@ commands = isort --check-only
[testenv:isort-inplace]
description = Sort imports
deps = isort
-commands = isort -rc mtgjson4/
+commands = isort -rc mtgjson4/ tests/
[testenv:unit]
description = Run unit tests with coverage and mypy type checking
|
mtgjson/mtgjson
|
b3d7bc4531bdca514dc1cf9f4ea5f6eac1104f89
|
diff --git a/tests/mtgjson4/test_format.py b/tests/mtgjson4/test_format.py
index e0eb474..fb87c54 100644
--- a/tests/mtgjson4/test_format.py
+++ b/tests/mtgjson4/test_format.py
@@ -3,7 +3,14 @@ import pytest
from mtgjson4.util import build_format_map
-NULL_OUTPUT = {"standard": [], "modern": [], "legacy": [], "vintage": [], "pauper": []}
+NULL_OUTPUT = {
+ "standard": [],
+ "pioneer": [],
+ "modern": [],
+ "legacy": [],
+ "vintage": [],
+ "pauper": [],
+}
@pytest.mark.parametrize(
@@ -45,11 +52,15 @@ NULL_OUTPUT = {"standard": [], "modern": [], "legacy": [], "vintage": [], "paupe
},
]
},
+ "TS5": {
+ "cards": [{"legalities": {"standard": "Legal", "pioneer": "Legal"}}]
+ },
},
{
**NULL_OUTPUT,
**{
- "standard": ["TS1", "TS2", "TS4"],
+ "standard": ["TS1", "TS2", "TS4", "TS5"],
+ "pioneer": ["TS5"],
"modern": ["TS3", "TS4"],
"legacy": ["TS4"],
"vintage": ["TS4"],
|
Keyword is broken #2
The game continue ! #434
data seems broken after 38'th abilityWord
|
0.0
|
b3d7bc4531bdca514dc1cf9f4ea5f6eac1104f89
|
[
"tests/mtgjson4/test_format.py::test_build_format_map[all_sets0-expected0]",
"tests/mtgjson4/test_format.py::test_build_format_map[all_sets1-expected1]"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-10-27 18:37:54+00:00
|
mit
| 4,073 |
|
mtik00__yamicache-9
|
diff --git a/yamicache/yamicache.py b/yamicache/yamicache.py
index 1fcde36..aa8d775 100644
--- a/yamicache/yamicache.py
+++ b/yamicache/yamicache.py
@@ -12,6 +12,7 @@ function calls.
from __future__ import print_function
import json
import time
+import inspect
import contextlib
import collections
from hashlib import sha224
@@ -227,9 +228,28 @@ class Cache(collections.MutableMapping):
key = cached_key
if not key:
- key = dict(kwargs)
+ key = {}
+ # We need to grab the default arguments. `inspect.getargspec()`
+ # returns the function argument names, and any defaults. The
+ # defaults are always the last args. For example:
+ # `args=['arg1', 'arg2'], defaults=(4,)` means that `arg2` has a
+ # default of 4.
+ fargs, _, _, defaults = inspect.getargspec(func)
+
+ # Load the defaults first, since they may not be in the calling
+ # spec.
+ if defaults:
+ key = dict(zip(fargs[-len(defaults):], defaults))
+
+ # Now load in the arguments.
+ key.update(kwargs)
key.update(dict(zip(func.__code__.co_varnames, args)))
+ # This next issue is that Python may re-order the keys when we go
+ # to repr them. This will cause invalid cache misses. We can fix
+ # this by recreating a dictionary with a 'known' algorithm.
+ key = repr(dict(sorted(key.items())))
+
return "{prefix}{name}{join}{formatted_key}".format(
join=self._key_join,
prefix=(self._prefix + self._key_join) if self._prefix else '',
|
mtik00/yamicache
|
abfc36588970293492bbe28e035dfc2dca497a7a
|
diff --git a/tests/test_default_args.py b/tests/test_default_args.py
new file mode 100644
index 0000000..68274fe
--- /dev/null
+++ b/tests/test_default_args.py
@@ -0,0 +1,34 @@
+from __future__ import print_function
+from pprint import pprint as pp
+from yamicache import Cache
+
+
+c = Cache(hashing=False)
+
+
[email protected]()
+def function1(argument, power=4, addition=0, division=2):
+ return argument ** power + addition / division
+
+
+def test_main():
+ '''use default args'''
+
+ # `function1` uses default arguments. These calls are all equivalent, so
+ # there should only be 1 item in cache.
+ function1(1)
+ function1(1, 4)
+ function1(1, 4, 0)
+ function1(1, 4, addition=0, division=2)
+
+ assert len(c) == 1
+
+ pp(c._data_store)
+
+
+def main():
+ test_main()
+
+
+if __name__ == '__main__':
+ main()
|
default arguments are not handled
* yamicache version: 0.5.0
* Python version: 2.7
* Operating System: Windows 10
### Description
This code shows two different cache hits. The first call should include the default value. It doesn't, so the call is made twice.
```python
from yamicache import Cache
c = Cache()
@c.cached()
def function1(argument, power=4):
return argument ** power
function1(2)
function1(2, 4)
assert len(c) == 1
```
### What I Did
```
> python junk.py
Traceback (most recent call last):
File "junk.py", line 9, in <module>
assert len(c) == 1
AssertionError
```
|
0.0
|
abfc36588970293492bbe28e035dfc2dca497a7a
|
[
"tests/test_default_args.py::test_main"
] |
[] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2018-04-11 05:41:13+00:00
|
mit
| 4,074 |
|
multiply-org__multiply-ui-13
|
diff --git a/multiply_ui/server/cli.py b/multiply_ui/server/cli.py
index 0e45d2e..5be1f02 100644
--- a/multiply_ui/server/cli.py
+++ b/multiply_ui/server/cli.py
@@ -1,20 +1,32 @@
-import logging
import signal
import sys
-import tornado.ioloop
-import tornado.log
-import tornado.web
+import click
-from multiply_ui.server.app import new_application
-from multiply_ui.server.context import ServiceContext
+DEFAULT_SERVER_PORT = 9090
+DEFAULT_SERVER_ADDRESS = '0.0.0.0'
-PORT = 9090
-LOGGER = logging.getLogger('multiply_ui')
[email protected]('mui-server')
[email protected]('--port', '-p',
+ type=int, default=DEFAULT_SERVER_PORT,
+ help=f'Set service port number. Defaults to {DEFAULT_SERVER_PORT}.')
[email protected]('--address', '-a',
+ type=str, default=DEFAULT_SERVER_ADDRESS,
+ help=f'Set service IP address. Defaults to "{DEFAULT_SERVER_ADDRESS}".')
+def mui_server(port, address):
+ """
+ Starts a service which exposes a RESTful API to the Multiply UI.
+ """
+ from multiply_ui.server.app import new_application
+ from multiply_ui.server.config import LOGGER
+ from multiply_ui.server.context import ServiceContext
+
+ import tornado.ioloop
+ import tornado.log
+ import tornado.web
-def main():
def shut_down():
LOGGER.info(f"Shutting down...")
tornado.ioloop.IOLoop.current().stop()
@@ -25,17 +37,25 @@ def main():
LOGGER.warning(f'Caught signal {sig}')
tornado.ioloop.IOLoop.current().add_callback_from_signal(shut_down)
- signal.signal(signal.SIGINT, sig_handler)
- signal.signal(signal.SIGTERM, sig_handler)
+ def register_termination_handlers():
+ signal.signal(signal.SIGINT, sig_handler)
+ signal.signal(signal.SIGTERM, sig_handler)
tornado.log.enable_pretty_logging()
+
application = new_application()
application._ctx = ServiceContext()
- application.listen(PORT)
+ application.listen(port, address)
+
+ tornado.ioloop.IOLoop.current().add_callback_from_signal(register_termination_handlers)
- LOGGER.info(f"Server listening on port {PORT}...")
+ LOGGER.info(f"Server listening on port {port} at address {address}...")
tornado.ioloop.IOLoop.current().start()
-if __name__ == "__main__":
+def main(args=None):
+ mui_server.main(args=args)
+
+
+if __name__ == '__main__':
main()
|
multiply-org/multiply-ui
|
98b7018db9022c62e816aba3a8781ebc4e2418a7
|
diff --git a/test/server/test_cli.py b/test/server/test_cli.py
new file mode 100644
index 0000000..2abab7e
--- /dev/null
+++ b/test/server/test_cli.py
@@ -0,0 +1,28 @@
+import unittest
+
+import click.testing
+
+from multiply_ui.server.cli import mui_server
+
+
+class CliTest(unittest.TestCase):
+ @classmethod
+ def invoke_cli(cls, *args):
+ runner = click.testing.CliRunner()
+ return runner.invoke(mui_server, args, catch_exceptions=False)
+
+ def test_help_option(self):
+ result = self.invoke_cli('--help')
+ self.assertEqual(0, result.exit_code)
+ self.assertEqual(
+ (
+ 'Usage: mui-server [OPTIONS]\n'
+ '\n'
+ ' Starts a service which exposes a RESTful API to the Multiply UI.\n'
+ '\n'
+ 'Options:\n'
+ ' -p, --port INTEGER Set service port number. Defaults to 9090.\n'
+ ' -a, --address TEXT Set service IP address. Defaults to "0.0.0.0".\n'
+ ' --help Show this message and exit.\n'
+ ),
+ result.stdout)
diff --git a/test/server/test_controller.py b/test/server/test_controller.py
index d8fb105..5562556 100644
--- a/test/server/test_controller.py
+++ b/test/server/test_controller.py
@@ -1,9 +1,10 @@
-import multiply_ui.server.controller as controller
import json
-import multiply_ui.server.context as context
import os
import unittest
+from multiply_ui.server import controller
+
+
class ControllerTest(unittest.TestCase):
def test_get_parameters(self):
@@ -13,6 +14,7 @@ class ControllerTest(unittest.TestCase):
@unittest.skipIf(os.environ.get('MULTIPLY_DISABLE_WEB_TESTS') == '1', 'MULTIPLY_DISABLE_WEB_TESTS = 1')
def test_get_inputs(self):
+ from multiply_ui.server import context
with open(os.path.join(os.path.dirname(__file__), '..', 'test_data', 'example_request_parameters.json')) as fp:
json_text = fp.read()
parameters = json.loads(json_text)
|
add server options
- [ ] port
- [ ] address
|
0.0
|
98b7018db9022c62e816aba3a8781ebc4e2418a7
|
[
"test/server/test_cli.py::CliTest::test_help_option",
"test/server/test_controller.py::ControllerTest::test_get_parameters"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-05-24 15:57:48+00:00
|
mit
| 4,075 |
|
munichpavel__clovek-ne-jezi-se-14
|
diff --git a/clovek_ne_jezi_se/agent.py b/clovek_ne_jezi_se/agent.py
index 715db04..49cd9ea 100644
--- a/clovek_ne_jezi_se/agent.py
+++ b/clovek_ne_jezi_se/agent.py
@@ -1,7 +1,7 @@
-from random import randint
+
import attr
-from .consts import EMPTY_SYMBOL, NR_OF_DICE_FACES
+from .consts import EMPTY_SYMBOL
@attr.s
@@ -37,22 +37,6 @@ class Player:
def get_prehome_position(self):
return self._prehome_position
- def roll(self):
- res = Player._get_roll_value()
- if Player.roll_is_valid(res):
- print("Player {} rolls a {}".format(self.symbol, res))
- return res
- else:
- raise ValueError('Roll value must be between 1 and 6')
-
- @staticmethod
- def _get_roll_value():
- return randint(1, NR_OF_DICE_FACES)
-
- @staticmethod
- def roll_is_valid(roll_value):
- return 1 <= roll_value <= NR_OF_DICE_FACES
-
class FurthestAlongAgent(Player):
"""Agent who always moves the game piece furthest along"""
diff --git a/clovek_ne_jezi_se/game.py b/clovek_ne_jezi_se/game.py
index 25b3a4e..ad7cb84 100644
--- a/clovek_ne_jezi_se/game.py
+++ b/clovek_ne_jezi_se/game.py
@@ -1,5 +1,6 @@
"""Clovek ne jezi se game board and plays"""
from math import floor
+from random import randint
import attr
@@ -232,7 +233,10 @@ class Game:
self._waiting_count[private_symbol] = count
def initialize_spaces_array(self):
- res = [self.board.get_private_symbol(symbol) for symbol in self.board.spaces]
+ res = [
+ self.board.get_private_symbol(symbol)
+ for symbol in self.board.spaces
+ ]
self._spaces_array = np.array(res)
def get_spaces_array(self):
@@ -282,6 +286,20 @@ class Game:
"""Convenience function. TODO: Deprecate or make private?"""
self._spaces_array[idx] = self.board.get_private_symbol(symbol)
+ def roll(self):
+ res = self._get_roll_value()
+ if self.roll_is_valid(res):
+ return res
+ else:
+ raise ValueError(f'Invalid roll value: {res}')
+
+ def _get_roll_value(self):
+ return randint(1, NR_OF_DICE_FACES)
+
+ def roll_is_valid(self, value):
+ return 1 <= value and value <= NR_OF_DICE_FACES
+
+ # Game moves
def get_moves_of(self, symbol, kind, roll):
res = []
starts = self.get_move_starts(symbol, kind)
|
munichpavel/clovek-ne-jezi-se
|
5c6ec835076b946987ce6c28390ee5414bc86659
|
diff --git a/tests/test_clovek.py b/tests/test_clovek.py
index efd5754..329551d 100644
--- a/tests/test_clovek.py
+++ b/tests/test_clovek.py
@@ -25,14 +25,6 @@ class TestPlayer:
with pytest.raises(TypeError):
Player(1, number_of_players=4)
- def test_dice_roll_monkeypatch(self, monkeypatch):
-
- monkeypatch.setattr(self.player, 'roll', lambda: monkey_roll(1))
- assert self.player.roll_is_valid(self.player.roll())
-
- monkeypatch.setattr(self.player, 'roll', lambda: monkey_roll(0))
- assert ~self.player.roll_is_valid(self.player.roll())
-
@pytest.fixture
def small_initial_board():
@@ -73,7 +65,7 @@ class TestBoard:
) == symbol
-class TestGame:
+class TestGameSetup:
players = []
for symbol in ['1', '2', '3', '4']:
player = Player(symbol=symbol, number_of_players=4)
@@ -155,7 +147,7 @@ class TestGame:
('4', 30)
]
)
- def test_player_normal_start(
+ def test_player_normal_leave_waiting(
self, symbol, expected_position
):
# Normal board
@@ -227,6 +219,13 @@ class TestGame:
expected
)
+ def test_dice_roll_monkeypatch(self, monkeypatch):
+ monkeypatch.setattr(self.mini_game, 'roll', lambda: monkey_roll(1))
+ assert self.mini_game.roll_is_valid(self.mini_game.roll())
+
+ monkeypatch.setattr(self.mini_game, 'roll', lambda: monkey_roll(0))
+ assert ~self.mini_game.roll_is_valid(self.mini_game.roll())
+
class TestMoves:
def test_validators(self):
@@ -243,7 +242,7 @@ class TestMoves:
Move('1', 'leave_waiting', roll=NR_OF_DICE_FACES, start=0)
-class TestGameAction:
+class TestGameMoves:
symbols = ['1', '2', '3', '4']
players = []
for symbol in symbols:
|
Move dice roll from agent / player to game
Not sure best place to have dice roll, but there is no reason to have it along with agent definitions.
|
0.0
|
5c6ec835076b946987ce6c28390ee5414bc86659
|
[
"tests/test_clovek.py::TestGameSetup::test_dice_roll_monkeypatch"
] |
[
"tests/test_clovek.py::TestPlayer::test_home",
"tests/test_clovek.py::TestBoard::test_spaces_setup",
"tests/test_clovek.py::TestBoard::test_homes_setup",
"tests/test_clovek.py::TestBoard::test_player_representation",
"tests/test_clovek.py::TestGameSetup::test_game_setup",
"tests/test_clovek.py::TestGameSetup::test_initializtion_errors",
"tests/test_clovek.py::TestGameSetup::test_wins",
"tests/test_clovek.py::TestGameSetup::test_player_mini_start[1-0]",
"tests/test_clovek.py::TestGameSetup::test_player_mini_start[2-4]",
"tests/test_clovek.py::TestGameSetup::test_player_mini_start[3-8]",
"tests/test_clovek.py::TestGameSetup::test_player_mini_start[4-12]",
"tests/test_clovek.py::TestGameSetup::test_player_normal_leave_waiting[1-0]",
"tests/test_clovek.py::TestGameSetup::test_player_normal_leave_waiting[2-10]",
"tests/test_clovek.py::TestGameSetup::test_player_normal_leave_waiting[3-20]",
"tests/test_clovek.py::TestGameSetup::test_player_normal_leave_waiting[4-30]",
"tests/test_clovek.py::TestGameSetup::test_player_mini_pre_home_position[1-15]",
"tests/test_clovek.py::TestGameSetup::test_player_mini_pre_home_position[2-3]",
"tests/test_clovek.py::TestGameSetup::test_player_mini_pre_home_position[3-7]",
"tests/test_clovek.py::TestGameSetup::test_player_mini_pre_home_position[4-11]",
"tests/test_clovek.py::TestGameSetup::test_player_normal_pre_home_position[1-39]",
"tests/test_clovek.py::TestGameSetup::test_player_normal_pre_home_position[2-9]",
"tests/test_clovek.py::TestGameSetup::test_player_normal_pre_home_position[3-19]",
"tests/test_clovek.py::TestGameSetup::test_player_normal_pre_home_position[4-29]",
"tests/test_clovek.py::TestGameSetup::test_get_initial_arrays[get_waiting_count_array-expected0]",
"tests/test_clovek.py::TestGameSetup::test_get_initial_arrays[get_spaces_array-expected1]",
"tests/test_clovek.py::TestGameSetup::test_get_initial_arrays[get_homes_array-expected2]",
"tests/test_clovek.py::TestGameSetup::test_assignments[0-0]",
"tests/test_clovek.py::TestGameSetup::test_assignments[1-0]",
"tests/test_clovek.py::TestGameSetup::test_assignments[3-1]",
"tests/test_clovek.py::TestMoves::test_validators",
"tests/test_clovek.py::TestGameMoves::test_get_symbol_waiting_count",
"tests/test_clovek.py::TestGameMoves::test_get_symbol_space_position_array",
"tests/test_clovek.py::TestGameMoves::test_get_symbol_home_array",
"tests/test_clovek.py::TestGameMoves::test_get_home_positions[2_0]",
"tests/test_clovek.py::TestGameMoves::test_get_home_positions[2_1]",
"tests/test_clovek.py::TestGameMoves::test_get_home_positions[3]",
"tests/test_clovek.py::TestGameMoves::test_get_space_occupier[0-1]",
"tests/test_clovek.py::TestGameMoves::test_get_space_occupier[1--]",
"tests/test_clovek.py::TestGameMoves::test_get_zeroed_position[1-0-0]",
"tests/test_clovek.py::TestGameMoves::test_get_zeroed_position[1-15-15]",
"tests/test_clovek.py::TestGameMoves::test_get_zeroed_position[2-4-0]",
"tests/test_clovek.py::TestGameMoves::test_get_zeroed_position[2-3-15]",
"tests/test_clovek.py::TestGameMoves::test_get_space_to_home_position[1-1-15-0]",
"tests/test_clovek.py::TestGameMoves::test_get_space_to_home_position[1-4-15-3]",
"tests/test_clovek.py::TestGameMoves::test_move_factory[arg_dict0-expected0]",
"tests/test_clovek.py::TestGameMoves::test_move_factory[arg_dict1-expected1]",
"tests/test_clovek.py::TestGameMoves::test_move_factory[arg_dict2-expected2]",
"tests/test_clovek.py::TestGameMoves::test_move_factory[arg_dict3-expected3]",
"tests/test_clovek.py::TestGameMoves::test_move_factory[arg_dict4-expected4]",
"tests/test_clovek.py::TestGameMoves::test_move_factory_errors[1-leave_waiting-6-None]",
"tests/test_clovek.py::TestGameMoves::test_move_factory_errors[1-leave_waiting-1-None]",
"tests/test_clovek.py::TestGameMoves::test_move_factory_errors[2-leave_waiting-6-None]",
"tests/test_clovek.py::TestGameMoves::test_move_factory_errors[1-space_advance-0-1]",
"tests/test_clovek.py::TestGameMoves::test_move_factory_errors[1-space_advance-1-3]",
"tests/test_clovek.py::TestGameMoves::test_move_factory_errors[1-space_advance-1-15]",
"tests/test_clovek.py::TestGameMoves::test_move_factory_errors[1-space_to_home-5-15]",
"tests/test_clovek.py::TestGameMoves::test_move_factory_errors[3-space_to_home-1-7]",
"tests/test_clovek.py::TestGameMoves::test_move_factory_errors[4-home_advance-1-1]",
"tests/test_clovek.py::TestGameMoves::test_move_factory_errors[4-home_advance-4-0]",
"tests/test_clovek.py::TestGameMoves::test_get_moves_of[1-leave_waiting-6-expected0]",
"tests/test_clovek.py::TestGameMoves::test_get_moves_of[2-leave_waiting-6-expected1]",
"tests/test_clovek.py::TestGameMoves::test_get_moves_of[2-space_advance-1-expected2]",
"tests/test_clovek.py::TestGameMoves::test_get_moves_of[1-space_advance-1-expected3]",
"tests/test_clovek.py::TestGameMoves::test_get_moves_of[3-space_to_home-1-expected4]",
"tests/test_clovek.py::TestGameMoves::test_get_moves_of[1-space_to_home-1-expected5]",
"tests/test_clovek.py::TestGameMoves::test_get_moves_of[4-home_advance-1-expected6]",
"tests/test_clovek.py::TestGameMoves::test_get_moves_of[4-home_advance-2-expected7]",
"tests/test_clovek.py::TestGameMoves::test_game_state_is_equal",
"tests/test_clovek.py::TestGameMoves::test_do",
"tests/test_clovek.py::TestGameMoves::test_update_board_spaces",
"tests/test_clovek.py::TestGameMoves::test_update_board_homes"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-01 16:58:08+00:00
|
mit
| 4,076 |
|
munichpavel__clovek-ne-jezi-se-33
|
diff --git a/clovek_ne_jezi_se/agents.py b/clovek_ne_jezi_se/agents.py
index 33be7c5..8c011d8 100644
--- a/clovek_ne_jezi_se/agents.py
+++ b/clovek_ne_jezi_se/agents.py
@@ -32,5 +32,4 @@ class HumanPlayer(Player):
chosen_move_idx = int(input('Enter chosen move index: '))
res = allowed_moves[chosen_move_idx]
- print(f'\nYou selected move {res}')
return res
diff --git a/clovek_ne_jezi_se/game_state.py b/clovek_ne_jezi_se/game_state.py
index 740270f..7b6bcd7 100644
--- a/clovek_ne_jezi_se/game_state.py
+++ b/clovek_ne_jezi_se/game_state.py
@@ -486,16 +486,19 @@ class GameState:
advance_edges = list(nx.dfs_edges(
player_subgraph_view, source=from_node_name, depth_limit=roll+1
))
- to_node_name = advance_edges[roll-1][1]
- to_node = self._graph.nodes[to_node_name]
- to_space = BoardSpace(
- kind=to_node['kind'],
- idx=to_node['idx'],
- occupied_by=to_node['occupied_by'],
- allowed_occupants=to_node['allowed_occupants']
- )
+ if roll > len(advance_edges):
+ return None
+ else:
+ to_node_name = advance_edges[roll-1][1]
+ to_node = self._graph.nodes[to_node_name]
+ to_space = BoardSpace(
+ kind=to_node['kind'],
+ idx=to_node['idx'],
+ occupied_by=to_node['occupied_by'],
+ allowed_occupants=to_node['allowed_occupants']
+ )
- return to_space
+ return to_space
def _get_player_subgraph_query_paramses(
self, player_name: str
|
munichpavel/clovek-ne-jezi-se
|
7e115382bc6b9b56d0173bc5a1645c4158ee84f3
|
diff --git a/tests/test_state.py b/tests/test_state.py
index e65ddb0..60257e1 100644
--- a/tests/test_state.py
+++ b/tests/test_state.py
@@ -199,6 +199,7 @@ class TestGameState:
player_prehome_indices[player_name] = \
player_subgraph.nodes[player_prehome_node_name]['idx']
+ # Move tests
@pytest.mark.parametrize(
"roll,from_space,expected_to_space_kwargs",
[
@@ -209,6 +210,21 @@ class TestGameState:
),
None
),
+ (
+ 1, BoardSpace(
+ kind='home', idx=3, occupied_by='red',
+ allowed_occupants=['red', EMPTY_SYMBOL]
+ ),
+ None
+ ),
+ (
+ 5, BoardSpace(
+ kind='main', idx=player_prehome_indices['red'],
+ occupied_by='red',
+ allowed_occupants=player_names + [EMPTY_SYMBOL]
+ ),
+ None
+ ),
(
6, BoardSpace(
kind='waiting', idx=0, occupied_by='red',
@@ -271,34 +287,6 @@ class TestGameState:
from_space=from_space, to_space=expected_to_space
)
- @pytest.mark.parametrize(
- 'roll,from_space,Error',
- [
- (
- 1, BoardSpace(
- kind='home', idx=3, occupied_by='red',
- allowed_occupants=['red', EMPTY_SYMBOL]
- ),
- IndexError
- ),
- (
- 5, BoardSpace(
- kind='main', idx=player_prehome_indices['red'],
- occupied_by='red',
- allowed_occupants=player_names + [EMPTY_SYMBOL]
- ),
- IndexError
- ),
-
- ]
- )
- def test_move_factory_exceptions(
- self, roll, from_space, Error
- ):
- with pytest.raises(Error):
- self.game_state.move_factory(from_space, roll)
-
- # Move tests
@pytest.mark.parametrize(
'roll,from_space,to_space,post_do_from_space,post_do_to_space',
[
|
Index error during game play
My best guess is that the called `advance_edges` below is empty. Need some logic to return `None` in this case for `to_space`. Would be good to have play logging for situations like this, e.g. #16
```
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-34-a3bf77e003f6> in <module>
----> 1 client.play()
2 client.get_game_state().draw(color_map=color_map)
~/delo/projects/clovek-ne-jezi-se/clovek_ne_jezi_se/client.py in play(self)
52
53 moves = self._game_state.get_player_moves(
---> 54 roll_value, current_player.name
55 )
56 print(f'Player {current_player.name} rolls a {roll_value}')
~/delo/projects/clovek-ne-jezi-se/clovek_ne_jezi_se/game_state.py in get_player_moves(self, roll, player_name)
372 )
373 primary_moves = self._get_primary_moves(
--> 374 roll, player_occupied_node_names
375 )
376
~/delo/projects/clovek-ne-jezi-se/clovek_ne_jezi_se/game_state.py in _get_primary_moves(self, roll, node_names)
402 )
403 primary_move_candidate = self.move_factory(
--> 404 from_space=board_space, roll=roll
405 )
406 if primary_move_candidate.to_space is not None:
~/delo/projects/clovek-ne-jezi-se/clovek_ne_jezi_se/game_state.py in move_factory(self, from_space, roll)
447 """
448
--> 449 to_space = self._get_to_space(from_space, roll)
450 return MoveContainer(
451 from_space=from_space,
~/delo/projects/clovek-ne-jezi-se/clovek_ne_jezi_se/game_state.py in _get_to_space(self, from_space, roll)
487 player_subgraph_view, source=from_node_name, depth_limit=roll+1
488 ))
--> 489 to_node_name = advance_edges[roll-1][1]
490 to_node = self._graph.nodes[to_node_name]
491 to_space = BoardSpace(
IndexError: list index out of range
```
Game state drawing of error:

|
0.0
|
7e115382bc6b9b56d0173bc5a1645c4158ee84f3
|
[
"tests/test_state.py::TestGameState::test_move_factory[1-from_space1-None]",
"tests/test_state.py::TestGameState::test_move_factory[5-from_space2-None]"
] |
[
"tests/test_state.py::test_board_space_errors",
"tests/test_state.py::TestGameState::test_game_state_initialization_errors[init_args_dict0-ValueError]",
"tests/test_state.py::TestGameState::test_game_state_initialization_errors[init_args_dict1-TypeError]",
"tests/test_state.py::TestGameState::test_game_state_initialization_errors[init_args_dict2-ValueError]",
"tests/test_state.py::TestGameState::test_game_state_initialization_errors[init_args_dict3-TypeError]",
"tests/test_state.py::TestGameState::test_game_state_initialization_errors[init_args_dict4-ValueError]",
"tests/test_state.py::TestGameState::test_game_state_initialization_errors[init_args_dict5-TypeError]",
"tests/test_state.py::TestGameState::test_get_main_entry_index[red-0]",
"tests/test_state.py::TestGameState::test_get_main_entry_index[blue-4]",
"tests/test_state.py::TestGameState::test_get_main_entry_index[green-8]",
"tests/test_state.py::TestGameState::test_get_main_entry_index[yellow-12]",
"tests/test_state.py::TestGameState::test_get_main_board_space[0]",
"tests/test_state.py::TestGameState::test_get_main_board_space[1]",
"tests/test_state.py::TestGameState::test_get_main_board_space[2]",
"tests/test_state.py::TestGameState::test_get_main_board_space[3]",
"tests/test_state.py::TestGameState::test_get_main_board_space[4]",
"tests/test_state.py::TestGameState::test_get_main_board_space[5]",
"tests/test_state.py::TestGameState::test_get_main_board_space[6]",
"tests/test_state.py::TestGameState::test_get_main_board_space[7]",
"tests/test_state.py::TestGameState::test_get_main_board_space[8]",
"tests/test_state.py::TestGameState::test_get_main_board_space[9]",
"tests/test_state.py::TestGameState::test_get_main_board_space[10]",
"tests/test_state.py::TestGameState::test_get_main_board_space[11]",
"tests/test_state.py::TestGameState::test_get_main_board_space[12]",
"tests/test_state.py::TestGameState::test_get_main_board_space[13]",
"tests/test_state.py::TestGameState::test_get_main_board_space[14]",
"tests/test_state.py::TestGameState::test_get_main_board_space[15]",
"tests/test_state.py::TestGameState::test_get_board_space_returns_none[yadda-0]",
"tests/test_state.py::TestGameState::test_get_board_space_returns_none[main-42]",
"tests/test_state.py::TestGameState::test_get_waiting_space[0-red]",
"tests/test_state.py::TestGameState::test_get_waiting_space[0-blue]",
"tests/test_state.py::TestGameState::test_get_waiting_space[0-green]",
"tests/test_state.py::TestGameState::test_get_waiting_space[0-yellow]",
"tests/test_state.py::TestGameState::test_get_waiting_space[1-red]",
"tests/test_state.py::TestGameState::test_get_waiting_space[1-blue]",
"tests/test_state.py::TestGameState::test_get_waiting_space[1-green]",
"tests/test_state.py::TestGameState::test_get_waiting_space[1-yellow]",
"tests/test_state.py::TestGameState::test_get_waiting_space[2-red]",
"tests/test_state.py::TestGameState::test_get_waiting_space[2-blue]",
"tests/test_state.py::TestGameState::test_get_waiting_space[2-green]",
"tests/test_state.py::TestGameState::test_get_waiting_space[2-yellow]",
"tests/test_state.py::TestGameState::test_get_waiting_space[3-red]",
"tests/test_state.py::TestGameState::test_get_waiting_space[3-blue]",
"tests/test_state.py::TestGameState::test_get_waiting_space[3-green]",
"tests/test_state.py::TestGameState::test_get_waiting_space[3-yellow]",
"tests/test_state.py::TestGameState::test_get_home_space[0-red]",
"tests/test_state.py::TestGameState::test_get_home_space[0-blue]",
"tests/test_state.py::TestGameState::test_get_home_space[0-green]",
"tests/test_state.py::TestGameState::test_get_home_space[0-yellow]",
"tests/test_state.py::TestGameState::test_get_home_space[1-red]",
"tests/test_state.py::TestGameState::test_get_home_space[1-blue]",
"tests/test_state.py::TestGameState::test_get_home_space[1-green]",
"tests/test_state.py::TestGameState::test_get_home_space[1-yellow]",
"tests/test_state.py::TestGameState::test_get_home_space[2-red]",
"tests/test_state.py::TestGameState::test_get_home_space[2-blue]",
"tests/test_state.py::TestGameState::test_get_home_space[2-green]",
"tests/test_state.py::TestGameState::test_get_home_space[2-yellow]",
"tests/test_state.py::TestGameState::test_get_home_space[3-red]",
"tests/test_state.py::TestGameState::test_get_home_space[3-blue]",
"tests/test_state.py::TestGameState::test_get_home_space[3-green]",
"tests/test_state.py::TestGameState::test_get_home_space[3-yellow]",
"tests/test_state.py::TestGameState::test_waiting_areas_to_dict",
"tests/test_state.py::TestGameState::test_home_areas_to_dict",
"tests/test_state.py::TestGameState::test_main_spaces_to_list",
"tests/test_state.py::TestGameState::test_move_factory[5-from_space0-None]",
"tests/test_state.py::TestGameState::test_move_factory[6-from_space3-expected_to_space_kwargs3]",
"tests/test_state.py::TestGameState::test_move_factory[1-from_space4-expected_to_space_kwargs4]",
"tests/test_state.py::TestGameState::test_move_factory[1-from_space5-expected_to_space_kwargs5]",
"tests/test_state.py::TestGameState::test_move_factory[1-from_space6-expected_to_space_kwargs6]",
"tests/test_state.py::TestGameState::test_do[6-from_space0-to_space0-post_do_from_space0-post_do_to_space0]",
"tests/test_state.py::TestGameState::test_do[1-from_space1-to_space1-post_do_from_space1-post_do_to_space1]",
"tests/test_state.py::TestGameState::test_do[1-from_space2-to_space2-post_do_from_space2-post_do_to_space2]",
"tests/test_state.py::TestGameState::test_get_player_moves[red-1-expected0]",
"tests/test_state.py::TestGameState::test_get_player_moves[blue-6-expected1]",
"tests/test_state.py::TestGameState::test_get_player_moves[yellow-1-expected2]",
"tests/test_state.py::TestGameState::test_get_player_moves[red-6-expected3]",
"tests/test_state.py::TestGameState::test_get_player_moves[green-1-expected4]"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_media",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-01-03 14:58:59+00:00
|
mit
| 4,077 |
|
munichpavel__clovek-ne-jezi-se-43
|
diff --git a/clovek_ne_jezi_se/agents.py b/clovek_ne_jezi_se/agents.py
index e079f75..bf602f4 100644
--- a/clovek_ne_jezi_se/agents.py
+++ b/clovek_ne_jezi_se/agents.py
@@ -7,6 +7,7 @@ from random import randint
import attr
import matplotlib.pyplot as plt
+import numpy as np
from clovek_ne_jezi_se.game_state import GameState, MoveContainer
from clovek_ne_jezi_se.log_handler import handler
@@ -54,7 +55,7 @@ class Player:
@abc.abstractmethod
def choose_move_idx(
self, game_state: 'GameState',
- allowed_moves: Sequence['MoveContainer']
+ allowed_moves: Sequence[Sequence['MoveContainer']]
) -> int:
return
@@ -86,8 +87,33 @@ class RandomPlayer(Player):
"""Player that selects uniformly randomly from allowed moves"""
def choose_move_idx(
self, game_state: 'GameState',
- allowed_moves: Sequence['MoveContainer']
+ allowed_moves: Sequence[Sequence['MoveContainer']]
) -> int:
- """TODO: Test???"""
+ """TODO: Test me???"""
idx = randint(0, len(allowed_moves)-1)
return idx
+
+
[email protected]
+class FurthestAlongPlayer(Player):
+ def choose_move_idx(
+ self, game_state: 'GameState',
+ allowed_moves: Sequence[Sequence['MoveContainer']]
+ ) -> int:
+ """
+ Return index for move that is closes to the player's last home space
+ """
+ player_from_moves = []
+ for move_components in allowed_moves:
+ for move_component in move_components:
+ if move_component.from_space.occupied_by == self.name:
+ player_from_moves.append(move_component)
+
+ player_from_spaces = [move.from_space for move in player_from_moves]
+
+ distances_to_end = [
+ game_state.distance_to_end(space) for space in player_from_spaces
+ ]
+ idx_furthest_along = np.argmin(distances_to_end)
+
+ return idx_furthest_along
diff --git a/clovek_ne_jezi_se/game_state.py b/clovek_ne_jezi_se/game_state.py
index bfcf59b..f50c4ff 100644
--- a/clovek_ne_jezi_se/game_state.py
+++ b/clovek_ne_jezi_se/game_state.py
@@ -556,6 +556,23 @@ class GameState:
)
return home_count == self.pieces_per_player
+ def distance_to_end(self, board_space: 'BoardSpace') -> int:
+
+ # Get player subgraph
+ player_subgraph_query_paramses = \
+ self._get_player_subgraph_query_paramses(board_space.occupied_by)
+
+ player_subgraph_view = get_filtered_subgraph_view(
+ self._graph, player_subgraph_query_paramses
+ )
+ space_node_name = self._get_board_space_node_name(board_space)
+
+ successor_nodes = nx.dfs_successors(
+ player_subgraph_view, source=space_node_name
+ )
+
+ return len(successor_nodes)
+
# Visualization
def draw(
self, figsize=(8, 6), with_labels=False,
diff --git a/docs/source/index.rst b/docs/source/index.rst
index f599165..9edd497 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -11,6 +11,7 @@ Welcome to clovek-ne-jezi-se's documentation!
:caption: Contents:
INSTALL
+ EXPERIMENTS
api
diff --git a/notebooks/evaluate-agents.ipynb b/notebooks/evaluate-agents.ipynb
index 3438699..58f3bea 100644
--- a/notebooks/evaluate-agents.ipynb
+++ b/notebooks/evaluate-agents.ipynb
@@ -19,11 +19,14 @@
"import os\n",
"from pathlib import Path\n",
"\n",
+ "import pandas as pd\n",
+ "import numpy as np\n",
+ "\n",
"import mlflow\n",
"\n",
"import matplotlib.pyplot as plt\n",
"\n",
- "from clovek_ne_jezi_se.agents import RandomPlayer"
+ "from clovek_ne_jezi_se.agents import FurthestAlongPlayer, RandomPlayer"
]
},
{
@@ -62,6 +65,15 @@
" \" and params.main_board_section_length = 1\"\n",
" )\n",
"\n",
+ "\n",
+ "def get_int_value_counts_from_floats(series_of_floats: pd.Series) -> pd.Series:\n",
+ " counts = series_of_floats.value_counts(dropna=True)\n",
+ " # Convert index to ints\n",
+ " counts.index = counts.index.astype(int)\n",
+ " return counts\n",
+ "\n",
+ "# Tests\n",
+ "\n",
"def test_dict_to_conjunctive_filter_equality_string():\n",
" player_names = ['red', 'blue', 'green', 'yellow']\n",
" players = [RandomPlayer(name=name, print_game_state=False) for name in player_names]\n",
@@ -83,8 +95,17 @@
" assert filter_string == expected\n",
" except AssertionError as err:\n",
" print(err, filter_string, ' is not ', expected)\n",
+ " \n",
+ "\n",
+ "def test_get_int_value_counts_from_floats():\n",
+ " series = pd.Series([np.nan, 3.0, 0.0, 0.0])\n",
+ " expected = pd.Series(data=[2, 1], index=[0, 3])\n",
+ " res = get_int_value_counts_from_floats(series)\n",
+ " pd.testing.assert_series_equal(res, expected)\n",
" \n",
- "test_dict_to_conjunctive_filter_equality_string()"
+ " \n",
+ "test_dict_to_conjunctive_filter_equality_string()\n",
+ "test_get_int_value_counts_from_floats()"
]
},
{
@@ -101,32 +122,22 @@
{
"cell_type": "code",
"execution_count": 3,
- "metadata": {
- "scrolled": true
- },
- "outputs": [
- {
- "data": {
- "text/plain": [
- "1 31\n",
- "0 24\n",
- "3 24\n",
- "2 21\n",
- "Name: metrics.winner_idx, dtype: int64"
- ]
- },
- "execution_count": 3,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
+ "metadata": {},
+ "outputs": [],
"source": [
- "player_names = ['red', 'blue', 'green', 'yellow']\n",
- "players = [RandomPlayer(name=name, print_game_state=False) for name in player_names]\n",
- "\n",
"main_board_section_length = 1\n",
"pieces_per_player = 4\n",
- "number_of_dice_faces = 6\n",
+ "number_of_dice_faces = 6"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "player_names = ['red', 'blue', 'green', 'yellow']\n",
+ "players = [RandomPlayer(name=name, print_game_state=False) for name in player_names]\n",
"agents = [player.__class__.__name__ for player in players]\n",
"\n",
"run_params = dict(\n",
@@ -141,21 +152,14 @@
"os.chdir(os.environ['RUNS_PARENT_DIR'])\n",
"runs = mlflow.search_runs(filter_string=filter_string)\n",
"\n",
- "# Remove runs with no winner\n",
- "mask_winner_isna = runs['metrics.winner_idx'].isna()\n",
- "runs = runs[~mask_winner_isna]\n",
- "winner_idxes = runs['metrics.winner_idx'].astype(int)\n",
- "\n",
- "# Calculate winner counts\n",
- "winner_counts = winner_idxes.value_counts()\n",
+ "winner_counts = get_int_value_counts_from_floats(runs['metrics.winner_idx'])\n",
"names = [agents[idx] + '_' + str(idx) for idx in winner_counts.index]\n",
- "counts = winner_counts.values.astype(int)\n",
- "winner_counts"
+ "counts = winner_counts.values"
]
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": 5,
"metadata": {},
"outputs": [
{
@@ -177,6 +181,56 @@
"plt.title(f'Win count per agent after {winner_counts.sum()} games')\n",
"plt.xticks(names, names);"
]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAeAAAADTCAYAAABZTcAiAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8QVMy6AAAACXBIWXMAAAsTAAALEwEAmpwYAAAaIUlEQVR4nO3deZhcVZ3/8feHJEiAyJaeGEKgR0EdhCFoRB3FiT8RAR0Bh8WMCFExMA8M6IAjw4wDLqMZfoO4MC5shgFc2AXJCBlIBCQsCcQsRAlCEELIIvsikOQ7f5zTUimquqq7q/t0Vz6v56mnb527fe89t+p7z7m3bisiMDMzs4G1SekAzMzMNkZOwGZmZgU4AZuZmRXgBGxmZlaAE7CZmVkBTsBmZmYFOAFby0naW9JvS8dh/U/SSEnXSnpK0mWl4zEbSpyArSFJ/yzpf6rKltYp+1hE3BIRbxrYKHtP0hRJt5aOowRJsyUd3YdFHAKMAbaLiEMlTZf01RaFB4Ck90malZP8shrjO/P45yX9RtI+FeN2k3S9pDWS/NADG1ScgK0ZNwN/JWkYgKSxwAhgz6qynfO0VoeSdvrc7QTcFxFrW7EwScNrFD8HXAB8vs5sPwbuAbYD/gW4XFJHHvcycCnw6VbEZ9ZSEeGXX92+gE2B54G35feHAT8EfllVdn8engQ8UjH/MuBkYAHwFPBTYLNu1vcZYAnwDHAv8NZc/hfAbOBJYDHwkYp5ZgNHV7yfAtxa8T6AY4Glef7/ApSX+UdgHfAs8GSdmGYDXwfuBJ4GfgZsWzH+ncBtedm/BiZVzfvvwK+AF4Cdayz/FOB3Fdt8cMW4YcCZwBrgQeD4vD3D8/itgPOBFcBy4KvAsMr9APwn8ESef/887t/zdv8xb/vZdbb9MuCxXHc3A2/J5V8CXiIluWeBY/LwS/n9tXm67YErgNV5/SdULPt04HLg4rxfj64VQ552H2BZVdkbgReBURVltwDHVk23MxBNHOv7Ar/N2/pd0jF+dB73BuAm4A+5Li4Btq46zj9POs6fy3UyBvifXK//C2zT5DEzBXggz/cg8PHS3wN+tf5VPAC/hsYLmAV8Lg+fDXwqf4FXll2Qhyfx6gR8Z/4i3paUXI+ts55DcxJ5OylB7kxqZY0A7gdOJZ0Q/L/85fSmPN9sGifgnwNbAzvmZLBfrWnrxDU7x7UbsEVOKBfncePyl/IBpF6lD+T3HRXz/h54CzAcGFFnu7fP8x+ev8DH5nHHkpLyDsA2+Yu8MgFfBfwgx/VneV8fU7FtL5NOaoYBfw88CqjWfquz7Z8CRgGvAb4JzK8Yd3rXfsjvpwNfrXi/CTAP+Ldcb68nJZYPVsz/MnBQnnZkN3HUSsAHA0uqys4GvlNV1jABA6NJJwEfzfV0Yo7t6IplfCDvhw7Sycg3q47z20lJdxywCrgb2BPYjJS8T2t0zOR6fJpXju2x5JMev9rr1U5dYda/fgm8Nw/vTWpl3FJV9stu5v92RDwaEY8D1wIT6kx3NHBGRNwVyf0R8RCptbAlMC0iXoqIm0gJdXIPtmFaRDwZEb8nnVDUi6GeiyJiUUQ8B3wROCx3wR8BzIiIGRGxPiJmAnNJX65dpkfE4ohYGxEvVy84Ii7L+2d9RPyU1FLfK48+DPhWRDwSEU8A07rmkzQmr+ezEfFcRKwCzgI+VrH4hyLi3IhYB1xI+kIf0+xGR8QFEfFMRLxISph7SNqqydnfTjoR+XKutweAc6vimxMRV+dtf6HZuLItSa3VSk+RThh66gBgcURcGalL/duklj8A+VicGREvRsRq4BvAX1ct4zsRsTIilpM+H3dExD0R8UfSidKeebpGx8x6YDdJIyNiRUQs7sX22CDnBGzNuhl4j6RtSV+oS0ndZ3+Vy3aj++u/j1UMP0/64qxlPKkrttr2wMMRsb6i7CFSS6JZzcZQz8NV6x5BajXtBBwq6cmuF/AeUqKrNe+rSDpS0vyK+XfLy4a87XWW1dU7sKJi3h+QWsJdKpPI83mwqW2XNEzSNEm/k/Q0qZVHRWyN7ARsX7VvTmXDE4Bu900DzwKvrSp7Lal3pKc22M8REcAjXe8ljZH0E0nL8764mFfvh5UVwy/UeN+13+seM/kE73BSz8cKSddJenMvtscGuVo3PJjVMod0rfEzpGuZRMTTkh7NZY9GxIMtWM/DpGtt1R4FxkvapCIJ7wjcl4efAzavmP51PVhns3fHjq8Y3pHUPbmGFPNFEfGZ3qxD0k6kVuH7Sa3BdZLmk7rgIV3b3aFOHA+TroGOjt7dCNVo2/8OOJDc/Us6Bp6oiK3R8h4GHoyIXfoQQ3cWA6+XNCoiupLuHsCPerGsDfazJLHhfv8aKdbdI+JxSQeRurt7o9tjJiKuB66XNJJ0Tf9cUi+TtRG3gK0puWtwLvCPpK61Lrfmslbd/XwecLKkt+U7hnfOCeoOUqv1nySNkDQJ+BvgJ3m++cBHJW0uaWd6dtfrSmAHSZs2mO4ISbtK2hz4MnB57ta9GPgbSR/MLcbNJE2StEP3i/uTLUhf7KsBJH2S1ALucilwoqRxkrYGvtA1IiJWADcAZ0p6raRNJL1BUnXXaD0rSddl6xlFSvB/IJ3gfK2Hy7sTeEbSF/Jvhoflnwa9vcn4yNu0Gamlr7x/NwWIiPtIdX9aLj8Y+EvSNfquu843I11/Jk/zmjqrug7YXdJB+W7s49jwRG4UqcX9lKRx1L8ruxl1j5nc0j5Q0hakff8sqUva2owTsPXEL0ldm5W/mb0ll7UkAUfEZaSbu35E6ka8mnS38UukhLs/qdX5XeDIiPhNnvUs0t23K0nXOS/pwWpvIrWkHpO0ppvpLiLdZPQY6aaaE3LMD5NaiaeSkujDpC/npj5fEXEv6S7nOTn+3cm9DNm5pCS7gPRzmxnAWtIdzABHkhLMvaTW6eVs2P3dnW8Bh0h6QtK3a4z/b1J3+/K8/NsbLO98YNfcrXp1PkH5MOl6+4OkujuP1JJu1ntJ3bczSD0PL5D2R5ePARNJ2z4NOCRfo4XU1fsCqX7JwzUfEhMRa0g3w51BOuHYlXTS+WKe5EvAW0nXmK8DruzBNlSvq7tjZhPSSe2jwOOk68x/39t12eDVdSekmXVD0mzS3b7nDYJY9ge+HxE7lY6lneXfaz9C+gnQrNLxWPtxC9hskMtdtwdIGp67Pk8j3VFrLZa7hLfO3dSnkq51N2r1m/WKE7DZ4CdS9+cTpC7oJaTf1VrrvYt0F/4a0iWPg3rx0yizprgL2szMrAC3gM3MzApwAjYzMytgQB/EMXr06Ojs7BzIVZqZmRUzb968NRHRUWvcgCbgzs5O5s6dO5CrNDMzK0bSQ/XGuQvazMysACdgMzOzApyAzczMCnACNjMzK8AJ2MzMrAAnYDMzswIG9GdIrdZ5ynWlQ2hLy6Z9qHQIZmZtzy1gMzOzApyAzczMCnACNjMzK8AJ2MzMrAAnYDMzswKcgM3MzApwAjYzMyvACdjMzKwAJ2AzM7MCnIDNzMwKGNKPorShw48NbT0/MtRsaHML2MzMrICGCVjSeEmzJN0rabGkE3P56ZKWS5qfXwf0f7hmZmbtoZku6LXASRFxt6RRwDxJM/O4syLiP/svPDMzs/bUMAFHxApgRR5+RtISYFx/B2ZmZtbOenQTlqROYE/gDuDdwPGSjgTmklrJT9SYZyowFWDHHXfsa7xm1s98w1zr+YY5q6Xpm7AkbQlcAXw2Ip4Gvge8AZhAaiGfWWu+iDgnIiZGxMSOjo6+R2xmZtYGmkrAkkaQku8lEXElQESsjIh1EbEeOBfYq//CNDMzay/N3AUt4HxgSUR8o6J8bMVkBwOLWh+emZlZe2rmGvC7gU8ACyXNz2WnApMlTQACWAYc0w/xmZmZtaVm7oK+FVCNUTNaH46ZmdnGwU/CMjMzK8AJ2MzMrAAnYDMzswKcgM3MzApwAjYzMyvACdjMzKwAJ2AzM7MCnIDNzMwKcAI2MzMrwAnYzMysACdgMzOzApr5ZwxmZjYIdZ5yXekQ2s6yaR8asHW5BWxmZlaAE7CZmVkBTsBmZmYFOAGbmZkV4ARsZmZWgBOwmZlZAQ0TsKTxkmZJulfSYkkn5vJtJc2UtDT/3ab/wzUzM2sPzbSA1wInRcSuwDuB4yTtCpwC3BgRuwA35vdmZmbWhIYJOCJWRMTdefgZYAkwDjgQuDBPdiFwUD/FaGZm1nZ6dA1YUiewJ3AHMCYiVuRRjwFjWhuamZlZ+2o6AUvaErgC+GxEPF05LiICiDrzTZU0V9Lc1atX9ylYMzOzdtFUApY0gpR8L4mIK3PxSklj8/ixwKpa80bEORExMSImdnR0tCJmMzOzIa+Zu6AFnA8siYhvVIy6BjgqDx8F/Kz14ZmZmbWnZv4b0ruBTwALJc3PZacC04BLJX0aeAg4rF8iNDMza0MNE3BE3Aqozuj3tzYcMzOzjYOfhGVmZlaAE7CZmVkBTsBmZmYFOAGbmZkV4ARsZmZWgBOwmZlZAU7AZmZmBTgBm5mZFeAEbGZmVoATsJmZWQFOwGZmZgU4AZuZmRXgBGxmZlaAE7CZmVkBTsBmZmYFOAGbmZkV4ARsZmZWgBOwmZlZAU7AZmZmBTRMwJIukLRK0qKKstMlLZc0P78O6N8wzczM2kszLeDpwH41ys+KiAn5NaO1YZmZmbW3hgk4Im4GHh+AWMzMzDYafbkGfLykBbmLepuWRWRmZrYR6G0C/h7wBmACsAI4s96EkqZKmitp7urVq3u5OjMzs/bSqwQcESsjYl1ErAfOBfbqZtpzImJiREzs6OjobZxmZmZtpVcJWNLYircHA4vqTWtmZmavNrzRBJJ+DEwCRkt6BDgNmCRpAhDAMuCY/gvRzMys/TRMwBExuUbx+f0Qi5mZ2UbDT8IyMzMrwAnYzMysACdgMzOzApyAzczMCnACNjMzK8AJ2MzMrAAnYDMzswKcgM3MzApwAjYzMyvACdjMzKwAJ2AzM7MCnIDNzMwKcAI2MzMrwAnYzMysACdgMzOzApyAzczMCnACNjMzK8AJ2MzMrAAnYDMzswIaJmBJF0haJWlRRdm2kmZKWpr/btO/YZqZmbWXZlrA04H9qspOAW6MiF2AG/N7MzMza1LDBBwRNwOPVxUfCFyYhy8EDmptWGZmZu2tt9eAx0TEijz8GDCm3oSSpkqaK2nu6tWre7k6MzOz9tLnm7AiIoDoZvw5ETExIiZ2dHT0dXVmZmZtobcJeKWksQD576rWhWRmZtb+epuArwGOysNHAT9rTThmZmYbh2Z+hvRjYA7wJkmPSPo0MA34gKSlwD75vZmZmTVpeKMJImJynVHvb3EsZmZmGw0/CcvMzKwAJ2AzM7MCnIDNzMwKcAI2MzMrwAnYzMysACdgMzOzApyAzczMCnACNjMzK8AJ2MzMrAAnYDMzswKcgM3MzApwAjYzMyvACdjMzKwAJ2AzM7MCnIDNzMwKcAI2MzMrwAnYzMysACdgMzOzApyAzczMChjel5klLQOeAdYBayNiYiuCMjMza3d9SsDZ+yJiTQuWY2ZmttFwF7SZmVkBfU3AAdwgaZ6kqbUmkDRV0lxJc1evXt3H1ZmZmbWHvibg90TEW4H9geMkvbd6gog4JyImRsTEjo6OPq7OzMysPfQpAUfE8vx3FXAVsFcrgjIzM2t3vU7AkraQNKprGNgXWNSqwMzMzNpZX+6CHgNcJalrOT+KiF+0JCozM7M21+sEHBEPAHu0MBYzM7ONhn+GZGZmVoATsJmZWQFOwGZmZgU4AZuZmRXgBGxmZlaAE7CZmVkBTsBmZmYFOAGbmZkV4ARsZmZWgBOwmZlZAU7AZmZmBTgBm5mZFeAEbGZmVoATsJmZWQFOwGZmZgU4AZuZmRXgBGxmZlaAE7CZmVkBfUrAkvaT9FtJ90s6pVVBmZmZtbteJ2BJw4D/AvYHdgUmS9q1VYGZmZm1s760gPcC7o+IByLiJeAnwIGtCcvMzKy99SUBjwMernj/SC4zMzOzBob39wokTQWm5rfPSvptf69zkBoNrCkdRDP0H6UjKG5I1JXrCXBdDRVDop6gX+pqp3oj+pKAlwPjK97vkMs2EBHnAOf0YT1tQdLciJhYOg5rzHU1dLiuhgbXU2196YK+C9hF0p9L2hT4GHBNa8IyMzNrb71uAUfEWknHA9cDw4ALImJxyyIzMzNrY326BhwRM4AZLYql3W303fBDiOtq6HBdDQ2upxoUEaVjMDMz2+j4UZRmZmYFOAGbmZkVMKgSsKR1kuZXvDp7MO8ESQdUvD9d0sktiGmKpO2rykZLelnSsVXlyySN7us6q5Y5SdJTeX8skXRaRfnPW7muHsR0lKSl+XVUD+ftquNFkq6VtHWLYpoi6ewWLWuZpIWSFki6QdLrKspbWr9NxnN8ft56DOT6XVe9iud8Sb/O8VwuacsBWq/rqufxXJL/l8EiSRdIGjHQMQyqBAy8EBETKl7LmplJ0nBgAnBAg0l7YwqwfVXZocDtwOR+WF8tt0TEBGAicISkt/b3CvM+rVW+LXAa8A7S40hPk7RNDxbdVce7AY8Dx/U52P7xvoj4S2AucOpArDA/X72WXwH7AA8NRBwVXFd1dFNXn4uIPXI8vweOH4h4cF3V1U1dXQK8GdgdGAkcPRDxVBpsCfhVKs+OJE2UNDsPny7pIkm/Ai4Cvgwcns8CD8+z7ypptqQHJJ1QscwjJN2Zp/2BpGH5NT2fDS2U9DlJh5CS3iV52pF5EZOBk4BxknaoE/c/5mUtkvTZXNaZW7HnSlqczwJH5nFvz2eG8yX9f0mLqpcZEc8B84Cdq9a1l6Q5ku6RdJukN+XymyVNqJjuVkl7SNoin/Hdmec5MI+fIukaSTcBN9apkg8CMyPi8Yh4ApgJ7Fdn2kbmkB9f2s02TJF0paRfKLW4z6jYnk9Kuk/SncC7K8o7Jd2U9+eNknbM5dMlfU/S7fmYmJT3wxJJ0+vEeDNV+zsv62pJ83I9Ts1ln5L0zYppPiPprDz8qmMulz8r6UxJvwbeVSuAiLin2ZPRfuS6aq6uns7TivSlXuIuV9dVc3U1IzLgTtLDpAZWRAyaF7AOmJ9fV+WyZcDoPDwRmJ2HTyclo5H5/RTg7IplnQ7cBryG9Bi0PwAjgL8ArgVG5Om+CxwJvI2UWLrm3zr/nQ1MrCgfDyzNw18DTqoYtyyv623AQmALYEtgMbAn0AmsBSbk6S8FjsjDi4B35eFpwKI8PAn4eR7eLq/jLVXlrwWG5+F9gCvy8FHAN/PwG4G5FXF3rXdr4L4c6xTSM7237aaOTgb+teL9F4GTe1DHz+a/w4DLgP0abMMU4AFgK2AzUitwPDCW1MLoADYltRLPzvNcCxyVhz8FXJ2Hp5P+aYhI/zjkadLZ7yakY6mrXpbxyjF3NvAfNcq3zX9H5rrbLtf173jl2LotL7/mMZeHAzisyX33p/UP0OfRddWLugJ+CKwEZgGbu64Gb13laUcAdwN7D9Rnq+vV78+C7qEXInW1NuuaiHihm/HXRcSLwIuSVgFjgPeTEuRd6SSVkcAqUkW+XtJ3gOuAG+os83BS4oR00F0AnFk1zXtIJxDPAUi6Etib9KSwByNifp5uHtCpdL1mVETMyeU/Aj5csby9Jd0DrAemRcRiSZMqxm8FXChpF9KB13Ut4zLgi5I+T/rATM/l+wIf0SvXyDcDdszDMyPi8Trb3gojJc0nnaEvIbWgu9sGgBsj4ikASfeSnq06mnQytjqX/5R0kgHpjPejefgi4IyKZV0bESFpIbAyIhbm+ReTTpDm5+lmSVoHLAD+tcZ2nCDp4Dw8HtglIm5X6j34sKQlpC+GhUoPrKl1zEE66byi+11WjOuqF3UVEZ/MLbHvkL4vfthonhZwXfX+c/Vd4OaIuKXJ6VtmsCXgWtbySlf5ZlXjnmsw74sVw+tI2yvgwoj45+qJJe1B6mI9FjiMlLSqTQZeJ+nj+f32knaJiKUNYqkX08h6E1a4JSI+3M34rwCzIuJgpRvXZgNExPOSZpLOSg8jHayQ9sHfRsQG/xhD0jtovE+Xk1rfXXboWl+TXoiICZI2Jz1F7Tjg2/W2IatVj73Vtaz1VctdX7Xc90VEzYfH55OffUg9Fs8rXRbpOjbPI13b+g2vfPHWPeaAP0bEup5vxoBwXW2o6bqKiHWSfgL8EwOTgF1XG2qqrpRuau0Ajmk0bX8Y9NeASd0TXYnjb7uZ7hlgVBPLuxE4RNKfQbqpSNJOSteZN4mIK0hnZl03Ov1puZLeCGwZEeMiojMiOoGv8+qbsW4BDpK0uaQtgINzWU0R8STwTE6AkJ6r3RNb8co/wphSNe480gfxrkjXbCF9QP9B+bRR0p49WNf1wL6StlG6+WrfXNYjEfE8cAJwktINX91tQy13AH8taTuluxcPrRh3G6/sw4/Tzb7vpa2AJ/KXxJuBd3aNiIg7SGfufwf8OBfXPOZaHFO/cV01V1dKdu4aBj5CShgDxnXV/OdK0tGkBtfkiFjfom3okaGQgL8EfEvSXNJZWj2zSDddVd6E9SoRcS8pwd4gaQGpq2Ysqetmdu7GuRjoOquaDnw/l08Grqpa5BVUJeCIuDvPdyfpgD4vIu5psJ2fBs7N69kCeKrB9JXOAL6eu6k3OIuNiHmkazKVZ+FfIXVFLchdRF9pdkW5e/orpH/GcRfw5d52Wed9soC0/+puQ515V5Cu888hXadaUjH6H4BP5vr9BHBib+Lrxi+A4bk7bBrpjvhKlwK/6jrh6eaYa4qkEyQ9QuptWCDpvBZsQ4+4rpoiUnfvQtI9IGNJN4cOKNdV075Puiw5J+eNf+vrBvSUH0U5SEjaMiKezcOnAGMjos8HuNJvmGcDby51lrexUfp99lkRUe9OchskXFdDRzvW1VBoAW8sPpTPwhaRbtj6al8XKOlIUgv8X5x8+5+krSXdR7oe1zZfEu3IdTV0tHNduQVsNUnanXSnY6UXI+Idtaa3vpN0FfDnVcVfiIgeX2O3/uW6GjoGc105AZuZmRXgLmgzM7MCnIDNzMwKcAI2MzMrwAnYzMysACdgMzOzAv4PR3Fxr29LqMEAAAAASUVORK5CYII=\n",
+ "text/plain": [
+ "<Figure size 576x216 with 1 Axes>"
+ ]
+ },
+ "metadata": {
+ "needs_background": "light"
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "player_names = ['blue', 'green', 'yellow']\n",
+ "players = (\n",
+ " [FurthestAlongPlayer(name='red')] + \n",
+ " [RandomPlayer(name=name, print_game_state=False) for name in player_names]\n",
+ ")\n",
+ "\n",
+ "agents = [player.__class__.__name__ for player in players]\n",
+ "\n",
+ "run_params = dict(\n",
+ " agents=','.join(agents),\n",
+ " main_board_section_length=main_board_section_length,\n",
+ " pieces_per_player=pieces_per_player,\n",
+ " number_of_dice_faces=number_of_dice_faces\n",
+ ")\n",
+ "filter_string = dict_to_conjunctive_filter_equality_string(run_params, 'params')\n",
+ "\n",
+ "# mlflow.search_runs seemingly must be run from parent directory of mlruns\n",
+ "os.chdir(os.environ['RUNS_PARENT_DIR'])\n",
+ "runs = mlflow.search_runs(filter_string=filter_string)\n",
+ "\n",
+ "winner_counts = get_int_value_counts_from_floats(runs['metrics.winner_idx'])\n",
+ "names = [agents[idx] + '_' + str(idx) for idx in winner_counts.index]\n",
+ "counts = winner_counts.values\n",
+ "\n",
+ "# Plot\n",
+ "plt.figure(figsize=(8,3))\n",
+ "plt.bar(names, counts)\n",
+ "plt.title(f'Win count per agent after {winner_counts.sum()} games')\n",
+ "plt.xticks(names, names);"
+ ]
}
],
"metadata": {
diff --git a/scripts/play.py b/scripts/play.py
index 397a750..2998a94 100644
--- a/scripts/play.py
+++ b/scripts/play.py
@@ -1,19 +1,19 @@
import mlflow
-from mlflow.tracking import MlflowClient
-
from clovek_ne_jezi_se.client import Client
-from clovek_ne_jezi_se.agents import HumanPlayer, RandomPlayer
+from clovek_ne_jezi_se.agents import RandomPlayer, FurthestAlongPlayer
+
-# TODO put in config file (json)
-player_names = ['red', 'blue', 'green', 'yellow']
+furthest_along_player = FurthestAlongPlayer(name='red')
+random_player_names = ['blue', 'green', 'yellow']
random_players = [
- RandomPlayer(name=name, print_game_state=False) for name in player_names
+ RandomPlayer(name=name, print_game_state=False)
+ for name in random_player_names
]
-players = random_players
+players = [furthest_along_player] + random_players
-n_runs = 96
+n_runs = 100
main_board_section_length = 1
pieces_per_player = 4
number_of_dice_faces = 6
|
munichpavel/clovek-ne-jezi-se
|
0b256275eafdcc64db299d39dfa94873e864c172
|
diff --git a/tests/test_agents.py b/tests/test_agents.py
new file mode 100644
index 0000000..e840379
--- /dev/null
+++ b/tests/test_agents.py
@@ -0,0 +1,182 @@
+"""Tests for agents, if not already tested in test_client."""
+from copy import deepcopy
+
+import pytest
+
+from clovek_ne_jezi_se.client import Client
+from clovek_ne_jezi_se.game_state import (
+ GameState, MoveContainer, BoardSpace, EMPTY_SYMBOL
+)
+from clovek_ne_jezi_se.agents import FurthestAlongPlayer
+
+
+def assert_game_states_equal(
+ game_state: 'GameState', other: 'GameState'
+) -> bool:
+ waiting = game_state.waiting_areas_to_dict()
+ other_waiting = other.waiting_areas_to_dict()
+
+ assert waiting == other_waiting
+
+ main = game_state.main_spaces_to_list()
+ other_main = other.main_spaces_to_list()
+
+ assert main == other_main
+
+ home = game_state.home_areas_to_dict()
+ other_home = other.home_areas_to_dict()
+
+ assert home == other_home
+
+
+def test_assert_game_states_equal():
+ # Set attributes for repeated use below
+ player_names = ['red', 'blue', 'green', 'yellow']
+ pieces_per_player = 4
+ section_length = 4
+ game_state = GameState(
+ player_names=player_names, pieces_per_player=pieces_per_player,
+ section_length=section_length
+ )
+ game_state.initialize()
+
+ assert_game_states_equal(game_state, game_state)
+
+ other = deepcopy(game_state)
+ other.do(MoveContainer(
+ from_space=BoardSpace(
+ kind='waiting', idx=0,
+ occupied_by='red',
+ allowed_occupants=['red', EMPTY_SYMBOL]
+ ),
+ to_space=BoardSpace(
+ kind='main', idx=0,
+ occupied_by=EMPTY_SYMBOL,
+ allowed_occupants=player_names + [EMPTY_SYMBOL]
+ )
+ ))
+
+ with pytest.raises(AssertionError):
+ assert_game_states_equal(game_state, other)
+
+
+class TestAgents:
+ player_names = ['red']
+ players = [
+ FurthestAlongPlayer(name=name, print_game_state=False)
+ for name in player_names
+ ]
+
+ client = Client(players=players)
+ client.initialize()
+
+ def test_furthest_along_choose_move(self, monkeypatch):
+ played_client = deepcopy(self.client)
+ expected_client = deepcopy(self.client)
+
+ # Move red players to main board
+ played_game_state = played_client.get_game_state()
+ idx_main_ahead = 3
+ idx_main_behind = 1
+ idx_home = played_game_state.pieces_per_player-2
+ played_game_state.do(MoveContainer(
+ from_space=BoardSpace(
+ kind='waiting', idx=0,
+ occupied_by='red',
+ allowed_occupants=['red', EMPTY_SYMBOL]
+ ),
+ to_space=BoardSpace(
+ kind='main', idx=idx_main_ahead,
+ occupied_by=EMPTY_SYMBOL,
+ allowed_occupants=self.player_names + [EMPTY_SYMBOL]
+ )
+ ))
+ played_game_state.do(MoveContainer(
+ from_space=BoardSpace(
+ kind='waiting', idx=1,
+ occupied_by='red',
+ allowed_occupants=['red', EMPTY_SYMBOL]
+ ),
+ to_space=BoardSpace(
+ kind='main', idx=idx_main_behind,
+ occupied_by=EMPTY_SYMBOL,
+ allowed_occupants=self.player_names + [EMPTY_SYMBOL]
+ )
+ ))
+ played_game_state.do(MoveContainer(
+ from_space=BoardSpace(
+ kind='waiting', idx=2,
+ occupied_by='red',
+ allowed_occupants=['red', EMPTY_SYMBOL]
+ ),
+ to_space=BoardSpace(
+ kind='home', idx=idx_home,
+ occupied_by=EMPTY_SYMBOL,
+ allowed_occupants=['red', EMPTY_SYMBOL]
+ )
+ ))
+
+ # Set roll value to 1
+ roll = 1
+ monkeypatch.setattr(played_client, 'roll', lambda: roll)
+
+ # Play once (red) with fixed (monkeypatched) dice
+ played_client.take_turn()
+
+ expected_game_state = expected_client.get_game_state()
+
+ expected_game_state.do(MoveContainer(
+ from_space=BoardSpace(
+ kind='waiting', idx=0, occupied_by='red',
+ allowed_occupants=['red', EMPTY_SYMBOL]
+ ),
+ to_space=BoardSpace(
+ kind='main', idx=idx_main_ahead,
+ occupied_by='red',
+ allowed_occupants=self.player_names + [EMPTY_SYMBOL]
+ )
+ ))
+ expected_game_state.do(MoveContainer(
+ from_space=BoardSpace(
+ kind='waiting', idx=1, occupied_by='red',
+ allowed_occupants=['red', EMPTY_SYMBOL]
+ ),
+ to_space=BoardSpace(
+ kind='main', idx=idx_main_behind,
+ occupied_by='red',
+ allowed_occupants=self.player_names + [EMPTY_SYMBOL]
+
+ )
+ ))
+ expected_game_state.do(MoveContainer(
+ from_space=BoardSpace(
+ kind='waiting', idx=2,
+ occupied_by='red',
+ allowed_occupants=['red', EMPTY_SYMBOL]
+ ),
+ to_space=BoardSpace(
+ kind='home', idx=idx_home + roll,
+ occupied_by=EMPTY_SYMBOL,
+ allowed_occupants=['red', EMPTY_SYMBOL]
+ )
+ ))
+
+ assert_game_states_equal(played_game_state, expected_game_state)
+
+ # Play again red with fixed (monkeypatched) dice
+ played_client.take_turn()
+
+ expected_game_state.do(MoveContainer(
+ from_space=BoardSpace(
+ kind='main', idx=idx_main_ahead,
+ occupied_by='red',
+ allowed_occupants=self.player_names + [EMPTY_SYMBOL]
+ ),
+ to_space=BoardSpace(
+ kind='main', idx=idx_main_ahead + roll,
+ occupied_by='red',
+ allowed_occupants=self.player_names + [EMPTY_SYMBOL]
+ )
+ ))
+
+ assert_game_states_equal(played_game_state, expected_game_state)
diff --git a/tests/test_state.py b/tests/test_state.py
index 063339f..6e06b56 100644
--- a/tests/test_state.py
+++ b/tests/test_state.py
@@ -7,12 +7,6 @@ from clovek_ne_jezi_se.game_state import (
EMPTY_SYMBOL, GameState, BoardSpace, MoveContainer
)
-from clovek_ne_jezi_se.utils import (
- GraphQueryParams,
- get_filtered_subgraph_view,
- get_filtered_node_names
-)
-
def test_board_space_errors():
with pytest.raises(ValueError):
@@ -379,7 +373,6 @@ class TestGameState:
kind=to_space.kind, idx=to_space.idx, player_name='red'
) == post_do_to_space
-
@pytest.mark.parametrize(
"roll,from_space",
[
@@ -608,3 +601,34 @@ class TestGameState:
))
assert modified_game_state.is_winner(player_name) == expected
+
+ @pytest.mark.parametrize(
+ 'board_space,expected',
+ [
+ (
+ BoardSpace(
+ kind='main', idx=player_prehome_indices['red'],
+ occupied_by='red',
+ allowed_occupants=player_names + [EMPTY_SYMBOL]
+ ), 4
+ ),
+ (
+ BoardSpace(
+ kind='home', idx=pieces_per_player-1,
+ occupied_by='red',
+ allowed_occupants=['red', EMPTY_SYMBOL]
+ ), 0
+ ),
+ (
+ BoardSpace(
+ kind='waiting', idx=0,
+ occupied_by='red',
+ allowed_occupants=['red', EMPTY_SYMBOL]
+ ), pieces_per_player * (section_length + 1)
+ )
+ ]
+
+ )
+ def test_distance_to_end(self, board_space, expected):
+ assert self.game_state.distance_to_end(board_space) \
+ == expected
|
Add choose-furthest-along agent
TODOs
- [ ] Add distance ~~to home~~ WRONG: distance to last node on player subgraph method
If I had distance to home, then I would only advance players in the home area after all players on main board are already advanced.
|
0.0
|
0b256275eafdcc64db299d39dfa94873e864c172
|
[
"tests/test_agents.py::test_assert_game_states_equal",
"tests/test_agents.py::TestAgents::test_furthest_along_choose_move",
"tests/test_state.py::test_board_space_errors",
"tests/test_state.py::TestGameState::test_game_state_initialization_errors[init_args_dict0-ValueError]",
"tests/test_state.py::TestGameState::test_game_state_initialization_errors[init_args_dict1-TypeError]",
"tests/test_state.py::TestGameState::test_game_state_initialization_errors[init_args_dict2-ValueError]",
"tests/test_state.py::TestGameState::test_game_state_initialization_errors[init_args_dict3-TypeError]",
"tests/test_state.py::TestGameState::test_game_state_initialization_errors[init_args_dict4-ValueError]",
"tests/test_state.py::TestGameState::test_game_state_initialization_errors[init_args_dict5-TypeError]",
"tests/test_state.py::TestGameState::test_get_main_entry_index[red-0]",
"tests/test_state.py::TestGameState::test_get_main_entry_index[blue-4]",
"tests/test_state.py::TestGameState::test_get_main_entry_index[green-8]",
"tests/test_state.py::TestGameState::test_get_main_entry_index[yellow-12]",
"tests/test_state.py::TestGameState::test_get_main_board_space[0]",
"tests/test_state.py::TestGameState::test_get_main_board_space[1]",
"tests/test_state.py::TestGameState::test_get_main_board_space[2]",
"tests/test_state.py::TestGameState::test_get_main_board_space[3]",
"tests/test_state.py::TestGameState::test_get_main_board_space[4]",
"tests/test_state.py::TestGameState::test_get_main_board_space[5]",
"tests/test_state.py::TestGameState::test_get_main_board_space[6]",
"tests/test_state.py::TestGameState::test_get_main_board_space[7]",
"tests/test_state.py::TestGameState::test_get_main_board_space[8]",
"tests/test_state.py::TestGameState::test_get_main_board_space[9]",
"tests/test_state.py::TestGameState::test_get_main_board_space[10]",
"tests/test_state.py::TestGameState::test_get_main_board_space[11]",
"tests/test_state.py::TestGameState::test_get_main_board_space[12]",
"tests/test_state.py::TestGameState::test_get_main_board_space[13]",
"tests/test_state.py::TestGameState::test_get_main_board_space[14]",
"tests/test_state.py::TestGameState::test_get_main_board_space[15]",
"tests/test_state.py::TestGameState::test_get_board_space_returns_none[yadda-0]",
"tests/test_state.py::TestGameState::test_get_board_space_returns_none[main-42]",
"tests/test_state.py::TestGameState::test_get_waiting_space[0-red]",
"tests/test_state.py::TestGameState::test_get_waiting_space[0-blue]",
"tests/test_state.py::TestGameState::test_get_waiting_space[0-green]",
"tests/test_state.py::TestGameState::test_get_waiting_space[0-yellow]",
"tests/test_state.py::TestGameState::test_get_waiting_space[1-red]",
"tests/test_state.py::TestGameState::test_get_waiting_space[1-blue]",
"tests/test_state.py::TestGameState::test_get_waiting_space[1-green]",
"tests/test_state.py::TestGameState::test_get_waiting_space[1-yellow]",
"tests/test_state.py::TestGameState::test_get_waiting_space[2-red]",
"tests/test_state.py::TestGameState::test_get_waiting_space[2-blue]",
"tests/test_state.py::TestGameState::test_get_waiting_space[2-green]",
"tests/test_state.py::TestGameState::test_get_waiting_space[2-yellow]",
"tests/test_state.py::TestGameState::test_get_waiting_space[3-red]",
"tests/test_state.py::TestGameState::test_get_waiting_space[3-blue]",
"tests/test_state.py::TestGameState::test_get_waiting_space[3-green]",
"tests/test_state.py::TestGameState::test_get_waiting_space[3-yellow]",
"tests/test_state.py::TestGameState::test_get_home_space[0-red]",
"tests/test_state.py::TestGameState::test_get_home_space[0-blue]",
"tests/test_state.py::TestGameState::test_get_home_space[0-green]",
"tests/test_state.py::TestGameState::test_get_home_space[0-yellow]",
"tests/test_state.py::TestGameState::test_get_home_space[1-red]",
"tests/test_state.py::TestGameState::test_get_home_space[1-blue]",
"tests/test_state.py::TestGameState::test_get_home_space[1-green]",
"tests/test_state.py::TestGameState::test_get_home_space[1-yellow]",
"tests/test_state.py::TestGameState::test_get_home_space[2-red]",
"tests/test_state.py::TestGameState::test_get_home_space[2-blue]",
"tests/test_state.py::TestGameState::test_get_home_space[2-green]",
"tests/test_state.py::TestGameState::test_get_home_space[2-yellow]",
"tests/test_state.py::TestGameState::test_get_home_space[3-red]",
"tests/test_state.py::TestGameState::test_get_home_space[3-blue]",
"tests/test_state.py::TestGameState::test_get_home_space[3-green]",
"tests/test_state.py::TestGameState::test_get_home_space[3-yellow]",
"tests/test_state.py::TestGameState::test_waiting_areas_to_dict",
"tests/test_state.py::TestGameState::test_home_areas_to_dict",
"tests/test_state.py::TestGameState::test_main_spaces_to_list",
"tests/test_state.py::TestGameState::test_move_factory_initial_board[5-from_space0-None]",
"tests/test_state.py::TestGameState::test_move_factory_initial_board[1-from_space1-None]",
"tests/test_state.py::TestGameState::test_move_factory_initial_board[5-from_space2-None]",
"tests/test_state.py::TestGameState::test_move_factory_initial_board[6-from_space3-expected_to_space_kwargs3]",
"tests/test_state.py::TestGameState::test_move_factory_initial_board[1-from_space4-expected_to_space_kwargs4]",
"tests/test_state.py::TestGameState::test_move_factory_initial_board[1-from_space5-expected_to_space_kwargs5]",
"tests/test_state.py::TestGameState::test_move_factory_initial_board[1-from_space6-expected_to_space_kwargs6]",
"tests/test_state.py::TestGameState::test_do[6-from_space0-to_space0-post_do_from_space0-post_do_to_space0]",
"tests/test_state.py::TestGameState::test_do[1-from_space1-to_space1-post_do_from_space1-post_do_to_space1]",
"tests/test_state.py::TestGameState::test_do[1-from_space2-to_space2-post_do_from_space2-post_do_to_space2]",
"tests/test_state.py::TestGameState::test_do[1-from_space3-to_space3-post_do_from_space3-post_do_to_space3]",
"tests/test_state.py::TestGameState::test_move_factory_blocked_by_own_piece[1-from_space0]",
"tests/test_state.py::TestGameState::test_get_player_moves[red-1-expected0]",
"tests/test_state.py::TestGameState::test_get_player_moves[blue-6-expected1]",
"tests/test_state.py::TestGameState::test_get_player_moves[yellow-1-expected2]",
"tests/test_state.py::TestGameState::test_get_player_moves[red-6-expected3]",
"tests/test_state.py::TestGameState::test_get_player_moves[green-1-expected4]",
"tests/test_state.py::TestGameState::test_is_winner[red-False]",
"tests/test_state.py::TestGameState::test_is_winner[blue-False]",
"tests/test_state.py::TestGameState::test_is_winner[green-False]",
"tests/test_state.py::TestGameState::test_is_winner[yellow-True]",
"tests/test_state.py::TestGameState::test_distance_to_end[board_space0-4]",
"tests/test_state.py::TestGameState::test_distance_to_end[board_space1-0]",
"tests/test_state.py::TestGameState::test_distance_to_end[board_space2-20]"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-02-07 11:41:56+00:00
|
mit
| 4,078 |
|
mwouts__jupytext-368
|
diff --git a/HISTORY.rst b/HISTORY.rst
index 9508d62..2cc996b 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -28,6 +28,7 @@ Release History
- Fixed an inconsistent round trip (code cell with ``"cat"`` being converted to a markdown cell) in the ``py:light`` format (#339)
- ``jupytext --test textfile.ext`` now really compares the text file to its round trip (rather than the corresponding notebook) (#339)
- Markdown cells that contain code are now preserved in a round trip through the Markdown and R Markdown formats (#361)
+- Code cells with a ``%%python3`` cell magic are now preserved in a round trip through the Markdown format (#365)
1.2.4 (2019-09-19)
diff --git a/jupytext/cell_reader.py b/jupytext/cell_reader.py
index 307a966..b8154ae 100644
--- a/jupytext/cell_reader.py
+++ b/jupytext/cell_reader.py
@@ -262,7 +262,7 @@ class BaseCellReader(object):
class MarkdownCellReader(BaseCellReader):
"""Read notebook cells from Markdown documents"""
comment = ''
- start_code_re = re.compile(r"^```({})(.*)".format('|'.join(
+ start_code_re = re.compile(r"^```({})($|\s(.*)$)".format('|'.join(
_JUPYTER_LANGUAGES.union({str.upper(lang) for lang in _JUPYTER_LANGUAGES})).replace('+', '\\+')))
non_jupyter_code_re = re.compile(r"^```")
end_code_re = re.compile(r"^```\s*$")
|
mwouts/jupytext
|
4d6bef1c6cc78111ce1b3560b07965332c0c951c
|
diff --git a/tests/test_read_simple_markdown.py b/tests/test_read_simple_markdown.py
index 7a074b8..8e9c38e 100644
--- a/tests/test_read_simple_markdown.py
+++ b/tests/test_read_simple_markdown.py
@@ -590,3 +590,34 @@ def test_two_markdown_cell_with_no_language_code_works(nb=new_notebook(cells=[
text = jupytext.writes(nb, 'md')
nb2 = jupytext.reads(text, 'md')
compare_notebooks(nb2, nb)
+
+
+def test_notebook_with_python3_magic(no_jupytext_version_number,
+ nb=new_notebook(metadata={
+ 'kernelspec': {'display_name': 'Python 3', 'language': 'python',
+ 'name': 'python3'}},
+ cells=[new_code_cell('%%python2\na = 1\nprint a'),
+ new_code_cell('%%python3\nb = 2\nprint(b)')]),
+ text="""---
+jupyter:
+ kernelspec:
+ display_name: Python 3
+ language: python
+ name: python3
+---
+
+```python2
+a = 1
+print a
+```
+
+```python3
+b = 2
+print(b)
+```
+"""):
+ md = jupytext.writes(nb, 'md')
+ compare(md, text)
+
+ nb2 = jupytext.reads(md, 'md')
+ compare_notebooks(nb2, nb)
|
.md round-tripping Broken on %%python3 magic
Just noticed this in a notebook that uses `%%python3` magic to demonstrate code execution in separate cells.
If I'm editing a markdown file in Jupyter notebook UI under Jupytext, if I have a code cell:
```
%%python3
a = 1
print(a)
```
if I save the file and then reopen it, the `%%python3` block cell magic has been stripped.
|
0.0
|
4d6bef1c6cc78111ce1b3560b07965332c0c951c
|
[
"tests/test_read_simple_markdown.py::test_notebook_with_python3_magic"
] |
[
"tests/test_read_simple_markdown.py::test_read_markdown_file_no_language",
"tests/test_read_simple_markdown.py::test_read_julia_notebook",
"tests/test_read_simple_markdown.py::test_split_on_header",
"tests/test_read_simple_markdown.py::test_split_on_header_after_two_blank_lines",
"tests/test_read_simple_markdown.py::test_combine_md_version_one",
"tests/test_read_simple_markdown.py::test_jupyter_cell_is_not_split",
"tests/test_read_simple_markdown.py::test_indented_code_is_not_split",
"tests/test_read_simple_markdown.py::test_non_jupyter_code_is_not_split",
"tests/test_read_simple_markdown.py::test_read_markdown_idl",
"tests/test_read_simple_markdown.py::test_read_markdown_IDL",
"tests/test_read_simple_markdown.py::test_inactive_cell",
"tests/test_read_simple_markdown.py::test_inactive_cell_using_tag",
"tests/test_read_simple_markdown.py::test_inactive_cell_using_noeval",
"tests/test_read_simple_markdown.py::test_noeval_followed_by_code_works",
"tests/test_read_simple_markdown.py::test_markdown_cell_with_code_works",
"tests/test_read_simple_markdown.py::test_markdown_cell_with_noeval_code_works",
"tests/test_read_simple_markdown.py::test_two_markdown_cell_with_code_works",
"tests/test_read_simple_markdown.py::test_two_markdown_cell_with_no_language_code_works"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-10-29 23:58:10+00:00
|
mit
| 4,079 |
|
mwouts__jupytext-380
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8194baf..832e40b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -13,8 +13,9 @@
- The light format uses `# + [markdown]` rather than the previous `cell_type` metadata to identify markdown cells with metadata (#356)
- Explicit Markdown cells in the light format `# + [markdown]` can use triple quotes (#356)
- IPython magic help commands like `float?` are classified as magics, and thus commented in Python scripts (#297)
-- Cell metadata can be encoded as either key=value (the new default) or in JSON. An automatic option `cell_metadata_json` should help minimize the impact on existing files (#344)
+- Cell metadata can be encoded as either `key=value` (the new default) or in JSON. An automatic option `cell_metadata_json` should help minimize the impact on existing files (#344)
- R Markdown hidden inputs, outputs, or cells are now mapped to the corresponding Jupyter Book tags by default (#337)
+- The notebook metadata filter is automatically updated when extra keys are added to the YAML header (#376)
- The Jupyter Notebook extension for Jupytext is compatible with Jupyter Notebook 6.0 (#346)
- `jupytext notebook.py --to ipynb` updates the timestamp of `notebook.py` so that the paired notebook still works in Jupyter (#335, #254)
- `jupytext --check pytest notebook.ipynb` can be used to run test functions in a notebook (#286)
diff --git a/jupytext/header.py b/jupytext/header.py
index c2933f3..b477b55 100644
--- a/jupytext/header.py
+++ b/jupytext/header.py
@@ -8,7 +8,7 @@ import nbformat
from nbformat.v4.nbbase import new_raw_cell
from .version import __version__
from .languages import _SCRIPT_EXTENSIONS, comment_lines
-from .metadata_filter import filter_metadata
+from .metadata_filter import filter_metadata, _DEFAULT_NOTEBOOK_METADATA
from .pep8 import pep8_lines_between_cells
SafeRepresenter.add_representer(nbformat.NotebookNode, SafeRepresenter.represent_dict)
@@ -19,14 +19,6 @@ _JUPYTER_RE = re.compile(r"^jupyter\s*:\s*$")
_LEFTSPACE_RE = re.compile(r"^\s")
_UTF8_HEADER = ' -*- coding: utf-8 -*-'
-_DEFAULT_NOTEBOOK_METADATA = ','.join([
- # Preserve Jupytext section
- 'jupytext',
- # Preserve kernel specs
- 'kernelspec',
- # Kernel_info found in Nteract notebooks
- 'kernel_info'])
-
# Change this to False in tests
INSERT_AND_CHECK_VERSION_NUMBER = True
diff --git a/jupytext/metadata_filter.py b/jupytext/metadata_filter.py
index 989f567..fecb28a 100644
--- a/jupytext/metadata_filter.py
+++ b/jupytext/metadata_filter.py
@@ -2,6 +2,14 @@
from .cell_metadata import _JUPYTEXT_CELL_METADATA
+_DEFAULT_NOTEBOOK_METADATA = ','.join([
+ # Preserve Jupytext section
+ 'jupytext',
+ # Preserve kernel specs
+ 'kernelspec',
+ # Kernel_info found in Nteract notebooks
+ 'kernel_info'])
+
def metadata_filter_as_dict(metadata_config):
"""Return the metadata filter represented as either None (no filter),
@@ -84,6 +92,18 @@ def update_metadata_filters(metadata, jupyter_md, cell_metadata):
cell_metadata = {'additional': cell_metadata, 'excluded': 'all'}
metadata.setdefault('jupytext', {})['notebook_metadata_filter'] = '-all'
metadata.setdefault('jupytext', {})['cell_metadata_filter'] = metadata_filter_as_string(cell_metadata)
+ else:
+ # Update the notebook metadata filter to include existing entries 376
+ nb_md_filter = metadata.get('jupytext', {}).get('notebook_metadata_filter', '').split(',')
+ nb_md_filter = [key for key in nb_md_filter if key]
+ if 'all' in nb_md_filter or '-all' in nb_md_filter:
+ return
+ for key in metadata:
+ if key in _DEFAULT_NOTEBOOK_METADATA or key in nb_md_filter or ('-' + key) in nb_md_filter:
+ continue
+ nb_md_filter.append(key)
+ if nb_md_filter:
+ metadata.setdefault('jupytext', {})['notebook_metadata_filter'] = ','.join(nb_md_filter)
def apply_metadata_filters(user_filter, default_filter, actual_keys):
|
mwouts/jupytext
|
c189035ff88a02408a512115bd589b2c28606ba8
|
diff --git a/tests/test_read_simple_markdown.py b/tests/test_read_simple_markdown.py
index 8e9c38e..711206b 100644
--- a/tests/test_read_simple_markdown.py
+++ b/tests/test_read_simple_markdown.py
@@ -595,9 +595,9 @@ def test_two_markdown_cell_with_no_language_code_works(nb=new_notebook(cells=[
def test_notebook_with_python3_magic(no_jupytext_version_number,
nb=new_notebook(metadata={
'kernelspec': {'display_name': 'Python 3', 'language': 'python',
- 'name': 'python3'}},
- cells=[new_code_cell('%%python2\na = 1\nprint a'),
- new_code_cell('%%python3\nb = 2\nprint(b)')]),
+ 'name': 'python3'}}, cells=[
+ new_code_cell('%%python2\na = 1\nprint a'),
+ new_code_cell('%%python3\nb = 2\nprint(b)')]),
text="""---
jupyter:
kernelspec:
@@ -621,3 +621,59 @@ print(b)
nb2 = jupytext.reads(md, 'md')
compare_notebooks(nb2, nb)
+
+
+def test_update_metadata_filter(
+ no_jupytext_version_number,
+ org="""---
+jupyter:
+ kernelspec:
+ display_name: Python 3
+ language: python
+ name: python3
+ extra:
+ key: value
+---
+""", target="""---
+jupyter:
+ extra:
+ key: value
+ jupytext:
+ notebook_metadata_filter: extra
+ kernelspec:
+ display_name: Python 3
+ language: python
+ name: python3
+---
+"""):
+ nb = jupytext.reads(org, 'md')
+ text = jupytext.writes(nb, 'md')
+ compare(text, target)
+
+
+def test_update_metadata_filter_2(
+ no_jupytext_version_number,
+ org="""---
+jupyter:
+ jupytext:
+ notebook_metadata_filter: -extra
+ kernelspec:
+ display_name: Python 3
+ language: python
+ name: python3
+ extra:
+ key: value
+---
+""", target="""---
+jupyter:
+ jupytext:
+ notebook_metadata_filter: -extra
+ kernelspec:
+ display_name: Python 3
+ language: python
+ name: python3
+---
+"""):
+ nb = jupytext.reads(org, 'md')
+ text = jupytext.writes(nb, 'md')
+ compare(text, target)
|
Jupytext should not remove metadata from the YAML header in text files
The file `funnel-charts.md` contains a `plotly` entry in the YAML header, but a round trip in Jupytext [removes it](https://github.com/mwouts/plotly.py-docs/commit/335c5d1ceea263d36da2cfe895fcf3c6b901feeb#diff-79a654256c6b0d4336e702730725dc0c).
It would be cleaner if Jupytext could add a filter to make sure that the `plotly` section remains there.
|
0.0
|
c189035ff88a02408a512115bd589b2c28606ba8
|
[
"tests/test_read_simple_markdown.py::test_update_metadata_filter"
] |
[
"tests/test_read_simple_markdown.py::test_read_markdown_file_no_language",
"tests/test_read_simple_markdown.py::test_read_julia_notebook",
"tests/test_read_simple_markdown.py::test_split_on_header",
"tests/test_read_simple_markdown.py::test_split_on_header_after_two_blank_lines",
"tests/test_read_simple_markdown.py::test_combine_md_version_one",
"tests/test_read_simple_markdown.py::test_jupyter_cell_is_not_split",
"tests/test_read_simple_markdown.py::test_indented_code_is_not_split",
"tests/test_read_simple_markdown.py::test_non_jupyter_code_is_not_split",
"tests/test_read_simple_markdown.py::test_read_markdown_idl",
"tests/test_read_simple_markdown.py::test_read_markdown_IDL",
"tests/test_read_simple_markdown.py::test_inactive_cell",
"tests/test_read_simple_markdown.py::test_inactive_cell_using_tag",
"tests/test_read_simple_markdown.py::test_inactive_cell_using_noeval",
"tests/test_read_simple_markdown.py::test_noeval_followed_by_code_works",
"tests/test_read_simple_markdown.py::test_markdown_cell_with_code_works",
"tests/test_read_simple_markdown.py::test_markdown_cell_with_noeval_code_works",
"tests/test_read_simple_markdown.py::test_two_markdown_cell_with_code_works",
"tests/test_read_simple_markdown.py::test_two_markdown_cell_with_no_language_code_works",
"tests/test_read_simple_markdown.py::test_notebook_with_python3_magic",
"tests/test_read_simple_markdown.py::test_update_metadata_filter_2"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-11-09 17:08:05+00:00
|
mit
| 4,080 |
|
mwouts__jupytext-405
|
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 9c10543..42f41c6 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -37,14 +37,13 @@ A pull request for which you do not need to contact us in advance is the additio
Most of Jupytext's code is written in Python. To develop the Python part of Jupytext, you should clone Jupytext, then create a dedicated Python env:
```
cd jupytext
-conda create -n jupytext-dev jupyterlab pyyaml
+conda env create --file environment.yml
conda activate jupytext-dev
-pip install -r requirements-dev.txt
+pip install -e .
```
-Tests are executed with `pytest`. Install `pytest-xdist` to allow running the tests in parallel:
+Tests are executed with `pytest`. You can run them in parallel with for instance
```
-pip install pytest-xdist
pytest -n 5
```
diff --git a/environment.yml b/environment.yml
new file mode 100644
index 0000000..5d3cff2
--- /dev/null
+++ b/environment.yml
@@ -0,0 +1,21 @@
+name: jupytext-dev
+channels:
+ - default
+ - conda-forge
+dependencies:
+ - python==3.7
+ - jupyter
+ - pyyaml
+ - nbconvert
+ - jupyter_contrib_nbextensions
+ - pytest
+ - pytest-xdist
+ - pylint
+ - flake8
+ - black
+ - autopep8
+ - sphinx-gallery
+ - nodejs
+ - pip
+ - setuptools
+ - pandoc
diff --git a/jupytext/header.py b/jupytext/header.py
index b477b55..c23ff72 100644
--- a/jupytext/header.py
+++ b/jupytext/header.py
@@ -173,7 +173,7 @@ def header_to_metadata_and_cell(lines, header_prefix, ext=None):
if _JUPYTER_RE.match(line):
injupyter = True
- elif not _LEFTSPACE_RE.match(line):
+ elif line and not _LEFTSPACE_RE.match(line):
injupyter = False
if injupyter:
|
mwouts/jupytext
|
8cc0d8348f6cf989a1f4f354bda4bf2beafdd90d
|
diff --git a/tests/test_header.py b/tests/test_header.py
index 762456d..9f9c86d 100644
--- a/tests/test_header.py
+++ b/tests/test_header.py
@@ -99,3 +99,24 @@ def test_notebook_from_plain_script_has_metadata_filter(script="""print('Hello w
script2 = jupytext.writes(nb, '.py')
compare(script2, script)
+
+
+def test_multiline_metadata(
+ no_jupytext_version_number,
+ notebook=new_notebook(metadata={'multiline': """A multiline string
+
+with a blank line""", 'jupytext': {'notebook_metadata_filter': 'all'}}),
+ markdown="""---
+jupyter:
+ jupytext:
+ notebook_metadata_filter: all
+ multiline: 'A multiline string
+
+
+ with a blank line'
+---
+"""):
+ actual = jupytext.writes(notebook, '.md')
+ compare(actual, markdown)
+ nb2 = jupytext.reads(markdown, '.md')
+ compare(nb2, notebook)
|
empty line in notebook metadata section prevent it from openning
jupytext seems remove the indent space in empty lines when syncing the metadata from ipynb to python file. And empty line in YAML, while legal, prevent jupytext from working with error message:
```
Unreadable Notebook: c.py ScannerError('while scanning a quoted scalar', <yaml.error.Mark object at 0x7f9f90243190>, 'found unexpected end of stream', <yaml.error.Mark object at 0x7f9f90535be0>)
```
reproduce step:
1. create a new python file with following content:
```python
# ---
# jupyter:
# jupytext:
# cell_markers: '"""
#
# ,
#
# """'
# formats: .notebook//ipynb,py:percent
# notebook_metadata_filter: -all,jupytext
# text_representation:
# extension: .py
# format_name: percent
# ---
```
it configs the jupytext to use percent format and """\n \n""" as the markdown seperator.
note that there are **indent space in the two empty lines**
2. open the python file in jupyter notebook mode, it will open it successfully
3. press the save button to save the file, in order to trigger jupytext to save the metadata back to python file
4. you will notice that the **indent space were removed in the two empty lines by jupytext**
5. refresh the notebook, or reopen it again, there's an error
```
Unreadable Notebook: c.py ScannerError('while scanning a quoted scalar', <yaml.error.Mark object at 0x7f9f90243190>, 'found unexpected end of stream', <yaml.error.Mark object at 0x7f9f90535be0>)
```
|
0.0
|
8cc0d8348f6cf989a1f4f354bda4bf2beafdd90d
|
[
"tests/test_header.py::test_multiline_metadata"
] |
[
"tests/test_header.py::test_uncomment",
"tests/test_header.py::test_header_to_metadata_and_cell_blank_line",
"tests/test_header.py::test_header_to_metadata_and_cell_no_blank_line",
"tests/test_header.py::test_header_to_metadata_and_cell_metadata",
"tests/test_header.py::test_metadata_and_cell_to_header",
"tests/test_header.py::test_metadata_and_cell_to_header2",
"tests/test_header.py::test_notebook_from_plain_script_has_metadata_filter"
] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-12-20 08:41:32+00:00
|
mit
| 4,081 |
|
mwouts__jupytext-472
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 45aa942..f22230e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,7 @@
**Fixed**
- Fixed the CLI example for not commenting out magic commands: `--opt comment_magics=false`. In addition, most of the `jupytext` commands in `using-cli.md` are now tested! (#465)
- `jupytext.read` and `jupytext.write` now give more meaningful errors when the format information is incorrect (#462)
+- Multiline comments starting with quadruple quotes should not cause issues anymore (#460)
1.4.1 (2020-03-19)
------------------
diff --git a/jupytext/stringparser.py b/jupytext/stringparser.py
index 17b28fa..1b3c618 100644
--- a/jupytext/stringparser.py
+++ b/jupytext/stringparser.py
@@ -8,6 +8,7 @@ class StringParser:
is quoted or not"""
single = None
triple = None
+ triple_start = None
def __init__(self, language):
self.ignore = language is None
@@ -29,6 +30,8 @@ class StringParser:
if not self.is_quoted() and self.comment is not None and line.startswith(self.comment):
return
+ self.triple_start = -1
+
for i, char in enumerate(line):
if char not in ['"', "'"]:
continue
@@ -46,13 +49,14 @@ class StringParser:
continue
if self.triple == char:
- if line[i - 2:i + 1] == 3 * char:
+ if line[i - 2:i + 1] == 3 * char and i >= self.triple_start + 3:
self.triple = None
continue
if self.triple is not None:
continue
if line[i - 2:i + 1] == 3 * char:
self.triple = char
+ self.triple_start = i
continue
self.single = char
|
mwouts/jupytext
|
271fa1cf8b17a8b3a62dccca8bb8124e755449a0
|
diff --git a/tests/test_read_simple_percent.py b/tests/test_read_simple_percent.py
index 42af388..1a2fa1b 100644
--- a/tests/test_read_simple_percent.py
+++ b/tests/test_read_simple_percent.py
@@ -405,3 +405,21 @@ def test_single_triple_quote_works(no_jupytext_version_number, text='''# ---
print("hello")
''', notebook=new_notebook(cells=[new_code_cell('print("hello")')])):
compare_notebooks(jupytext.reads(text, 'py'), notebook)
+
+
+def test_docstring_with_quadruple_quote(nb=new_notebook(cells=[
+ new_code_cell('''def fun_1(df):
+ """"
+ docstring starting with 4 double quotes and ending with 3
+ """
+ return df'''),
+ new_code_cell('''def fun_2(df):
+ """
+ docstring
+ """
+ return df''')
+])):
+ """Reproduces https://github.com/mwouts/jupytext/issues/460"""
+ py = jupytext.writes(nb, 'py:percent')
+ nb2 = jupytext.reads(py, 'py')
+ compare_notebooks(nb2, nb)
diff --git a/tests/test_stringparser.py b/tests/test_stringparser.py
index 985b31d..36307dc 100644
--- a/tests/test_stringparser.py
+++ b/tests/test_stringparser.py
@@ -37,3 +37,19 @@ def test_single_chars(text="""'This is a single line comment'''
for line in text.splitlines():
assert not sp.is_quoted()
sp.read_line(line)
+
+
+def test_long_string_with_four_quotes(text="""''''This is a multiline
+comment that starts with four quotes
+'''
+
+1 + 1
+"""):
+ quoted = []
+ sp = StringParser('python')
+ for i, line in enumerate(text.splitlines()):
+ if sp.is_quoted():
+ quoted.append(i)
+ sp.read_line(line)
+
+ assert quoted == [1, 2]
|
Wrong cell separation when creating .ipynb from .py
Let's say I work in a notebook file, paired to a `.py` file, and write one function per cell.
When I re-generate the `.ipynb` notebook from the `.py` file I have noticed that some functions now live in the same cell.
This is best illustrated by the images below. The following code:

becomes, after regenerating the `.ipynb` from the paired `.py`:

The problem does not impact all functions/cells. I've inspected the code in the `.py` file and cannot find any obvious difference between the impacted cells/functions and the non impacted ones.
I'm using jupytext version `1.4.0` but had noticed this behaviour on previous versions too.
|
0.0
|
271fa1cf8b17a8b3a62dccca8bb8124e755449a0
|
[
"tests/test_read_simple_percent.py::test_docstring_with_quadruple_quote",
"tests/test_stringparser.py::test_long_string_with_four_quotes"
] |
[
"tests/test_read_simple_percent.py::test_read_simple_file",
"tests/test_read_simple_percent.py::test_read_cell_with_metadata",
"tests/test_read_simple_percent.py::test_read_nbconvert_script",
"tests/test_read_simple_percent.py::test_read_remove_blank_lines",
"tests/test_read_simple_percent.py::test_no_crash_on_square_bracket",
"tests/test_read_simple_percent.py::test_nbconvert_cell",
"tests/test_read_simple_percent.py::test_nbformat_v3_nbpy_cell",
"tests/test_read_simple_percent.py::test_first_cell_markdown_191",
"tests/test_read_simple_percent.py::test_multiline_comments_in_markdown_1",
"tests/test_read_simple_percent.py::test_multiline_comments_in_markdown_2",
"tests/test_read_simple_percent.py::test_multiline_comments_format_option",
"tests/test_read_simple_percent.py::test_multiline_comments_in_raw_cell",
"tests/test_read_simple_percent.py::test_multiline_comments_in_markdown_cell_no_line_return",
"tests/test_read_simple_percent.py::test_multiline_comments_in_markdown_cell_is_robust_to_additional_cell_marker",
"tests/test_read_simple_percent.py::test_single_triple_quote_works",
"tests/test_stringparser.py::test_long_string",
"tests/test_stringparser.py::test_single_chars"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-03-28 17:29:37+00:00
|
mit
| 4,082 |
|
mwouts__world_trade_data-4
|
diff --git a/.travis.yml b/.travis.yml
index 3ac5038..5fc3b45 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,12 +1,11 @@
-dist: xenial # required for Python >= 3.7
+dist: focal # required for Python >= 3.10
language: python
python:
- "3.6"
- "3.7"
- - "2.7"
- - "3.4"
- - "3.5"
- - "3.8-dev"
+ - "3.8"
+ - "3.9"
+ - "3.10"
install:
# command to install dependencies
- pip install -r requirements-dev.txt
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b9a414a..f46993a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,13 @@
+0.1.1 (2022-08-15)
+==================
+
+**Fixed**
+- Fixed an IndexError when calling `wits.get_tariff_reported` ([#3](https://github.com/mwouts/world_trade_data/issues/3))
+
+**Changed**
+- Versions of Python supported are 3.6 to 3.10.
+
+
0.1.0 (2019-11-25)
==================
diff --git a/README.md b/README.md
index 80b6bcc..90188d8 100644
--- a/README.md
+++ b/README.md
@@ -58,6 +58,15 @@ The nomenclature, and data availability, are accessible with `get_nomenclatures(
Indicators are available with `get_indicator`. Tariff rates can be loaded with `get_tariff_reported` and `get_tariff_estimated`.
+## Working with codes rather than with category names
+
+The three functions above accept a `name_or_id` argument that defaults to `'name'`. Use `name_or_id='id'` to
+get codes rather than full description for products and countries:
+
+```python
+wits.get_indicator('MPRT-TRD-VL', reporter='usa', year='2017', name_or_id='id')
+```
+
## Sample use case
In the below we show how to collect and plot the Import and Export data for the USA in 2017.
@@ -135,4 +144,4 @@ fig.show(renderer='notebook_connected')
- The WITS data can be accessed in R with the [tradestatistics](https://tradestatistics.io/) library.
- An alternative way to access the WITS data is to use [pandasdmx](https://pandasdmx.readthedocs.io/).
-<script async defer src="https://buttons.github.io/buttons.js"></script>
\ No newline at end of file
+<script async defer src="https://buttons.github.io/buttons.js"></script>
diff --git a/setup.py b/setup.py
index fda326c..8ffdabb 100644
--- a/setup.py
+++ b/setup.py
@@ -34,10 +34,9 @@ setup(
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
- 'Programming Language :: Python :: 2.7',
- 'Programming Language :: Python :: 3.4',
- 'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
- 'Programming Language :: Python :: 3.8']
+ 'Programming Language :: Python :: 3.8',
+ 'Programming Language :: Python :: 3.9',
+ 'Programming Language :: Python :: 3.10']
)
diff --git a/world_trade_data/data.py b/world_trade_data/data.py
index 43a324d..aa6e849 100644
--- a/world_trade_data/data.py
+++ b/world_trade_data/data.py
@@ -1,6 +1,8 @@
"""WITS Data: indicators and tariffs"""
import logging
+import warnings
+
import requests
import pandas as pd
import world_trade_data.defaults
@@ -33,7 +35,7 @@ def get_tariff_reported(reporter,
year=world_trade_data.defaults.DEFAULT_YEAR,
name_or_id='name'):
"""Tariffs (reported)"""
- return _get_data(reporter, partner, product, year,
+ return _get_data(reporter, partner, product, year, is_tariff=True,
datatype='reported', datasource='trn', name_or_id=name_or_id)
@@ -43,7 +45,7 @@ def get_tariff_estimated(reporter,
year=world_trade_data.defaults.DEFAULT_YEAR,
name_or_id='name'):
"""Tariffs (estimated)"""
- return _get_data(reporter, partner, product, year,
+ return _get_data(reporter, partner, product, year, is_tariff=True,
datatype='aveestimated', datasource='trn', name_or_id=name_or_id)
@@ -59,7 +61,7 @@ def get_indicator(indicator,
indicator=indicator, datasource=datasource, name_or_id=name_or_id)
-def _get_data(reporter, partner, product, year, datasource, name_or_id, **kwargs):
+def _get_data(reporter, partner, product, year, datasource, name_or_id, is_tariff=False, **kwargs):
args = {'reporter': reporter,
'partner': partner,
'product': product,
@@ -83,16 +85,22 @@ def _get_data(reporter, partner, product, year, datasource, name_or_id, **kwargs
.format('/'.join(list_args)))
response.raise_for_status()
data = response.json()
- return _wits_data_to_df(data, name_or_id=name_or_id)
+ df = _wits_data_to_df(data, name_or_id=name_or_id, is_tariff=is_tariff)
+ if is_tariff and not len(df):
+ warnings.warn("""Did you know? The reporter-partner combination only yields results
+ if the two countries have a preferential trade agreement (PTA).
+ Otherwise, all other tariffs to all non-PTA countries
+ are found if one enters "000" in partner.""")
+ return df
-def _wits_data_to_df(data, value_name='Value', name_or_id='id'):
+def _wits_data_to_df(data, value_name='Value', is_tariff=False, name_or_id='id'):
observation = data['structure']['attributes']['observation']
levels = data['structure']['dimensions']['series']
obs_levels = data['structure']['dimensions']['observation']
series = data['dataSets'][0]['series']
- index_names = [l['name'] for l in levels] + [l['name'] for l in obs_levels]
+ index_names = [level['name'] for level in levels] + [obs_level['name'] for obs_level in obs_levels]
column_names = [value_name] + [o['name'] for o in observation]
all_observations = {value_name: []}
@@ -106,7 +114,7 @@ def _wits_data_to_df(data, value_name='Value', name_or_id='id'):
# When loading tariffs, product is at depth 3, but levels say it's at depth 4
# - So we invert the two levels
- if value_name == 'Rate':
+ if is_tariff:
loc[2], loc[3] = loc[3], loc[2]
observations = series[i]['observations']
diff --git a/world_trade_data/version.py b/world_trade_data/version.py
index 973e52c..7076bb0 100644
--- a/world_trade_data/version.py
+++ b/world_trade_data/version.py
@@ -1,3 +1,3 @@
"""version number"""
-__version__ = '0.1.0'
+__version__ = '0.1.1'
|
mwouts/world_trade_data
|
efde8000e428310eeada81131c3c4d006ace305f
|
diff --git a/tests/test_data.py b/tests/test_data.py
index b416864..c14f499 100644
--- a/tests/test_data.py
+++ b/tests/test_data.py
@@ -33,18 +33,30 @@ def test_get_tariff_reported():
assert df.Value.dtype == np.float64
+def test_get_tariff_reported_issue_3():
+ df = get_tariff_reported(reporter='840', partner='124', product='all', year='2012')
+ assert df.Value.dtype == np.float64
+ assert len(df.index) > 100
+
+
def test_get_tariff_estimated():
df = get_tariff_estimated(reporter='840', partner='000', product='970600')
assert len(df.index) == 1
assert df.Value.dtype == np.float64
+def test_get_tariff_estimated_issue_3():
+ df = get_tariff_estimated(reporter='840', partner='124', product='all', year='2012')
+ assert df.Value.dtype == np.float64
+ assert len(df.index) > 100
+
+
def test_tariff_data_to_df():
current_path = os.path.dirname(__file__)
sample_file = os.path.join(current_path, 'data', 'sample_tariff_data.json')
with open(sample_file) as fp:
data = json.load(fp)
- df = _wits_data_to_df(data, 'Rate')
+ df = _wits_data_to_df(data, is_tariff=True)
assert len(df.index) > 1
assert len(df.columns) > 1
|
IndexError with wits.get_tariff_reported
Hello,
I'm getting an IndexError with the following line(s) of code:
`wits.get_tariff_reported(reporter='840',partner='124',product='all',year='2012')` OR
`wits.get_tariff_reported(reporter='840',partner='124',product='all',year='2012', name_or_id = 'id')`
The output says, "IndexError: list index out of range."
Could you assist with this issue?
Thanks in advance - Eric
|
0.0
|
efde8000e428310eeada81131c3c4d006ace305f
|
[
"tests/test_data.py::test_get_tariff_reported_issue_3",
"tests/test_data.py::test_get_tariff_estimated_issue_3"
] |
[
"tests/test_data.py::test_get_indicator",
"tests/test_data.py::test_get_indicator2",
"tests/test_data.py::test_get_tariff_reported",
"tests/test_data.py::test_get_tariff_estimated",
"tests/test_data.py::test_trade_data_to_df",
"tests/test_data.py::test_warning_on_request_all_reporter_partner",
"tests/test_data.py::test_warning_on_allx3"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-08-09 15:08:07+00:00
|
mit
| 4,083 |
|
mwtoews__gridit-18
|
diff --git a/gridit/classmethods.py b/gridit/classmethods.py
index ace5ef1..767c9d8 100644
--- a/gridit/classmethods.py
+++ b/gridit/classmethods.py
@@ -143,8 +143,11 @@ def from_vector(
Input file, such as a shapefile.
resolution : float
A grid resolution, e.g. 250.0 for 250m x 250m
- filter : dict, optional
- Property filter criteria.
+ filter : dict, str, optional
+ Property filter criteria. For example ``{"id": 4}`` to select one
+ feature with attribute "id" value 4. Or ``{"id": [4, 7, 19]}`` to
+ select features with several values. A SQL WHERE statement can also be
+ used if Fiona 1.9 or later is installed.
buffer : float, default 0.0
Add buffer to extents of vector data.
layer : int or str, default None
@@ -173,38 +176,13 @@ def from_vector(
with fiona.open(fname, "r", layer=layer) as ds:
projection = ds.crs_wkt
if filter:
- for f in ds:
- r = []
- for k in filter.keys():
- r.append(f["properties"].get(k, "") == filter[k])
- if len(r) > 0 and all(r):
- break
- else:
- raise ValueError(
- f"could not find {filter} in {fname} layer {layer}")
- geom_type = f["geometry"]["type"]
- if geom_type == "Polygon":
- ar = np.array(f["geometry"]["coordinates"])
- assert ar.ndim == 3
- assert ar.shape[2] >= 2
- xcs = ar[:, :, 0]
- ycs = ar[:, :, 1]
- elif geom_type == "LineString":
- ar = np.array(f["geometry"]["coordinates"])
- assert ar.ndim == 2
- assert ar.shape[1] >= 2
- xcs = ar[:, 0]
- ycs = ar[:, 1]
- elif geom_type == "Point":
- ar = np.array(f["geometry"]["coordinates"])
- assert ar.ndim == 1
- assert ar.shape[0] >= 2
- xcs = ar[0]
- ycs = ar[1]
- else:
- raise NotImplementedError(
- f"unexpected geometry type {geom_type}")
- bounds = xcs.min(), ycs.min(), xcs.max(), ycs.max()
+ from gridit.file import fiona_filter_collection
+
+ flt = fiona_filter_collection(ds, filter)
+ if len(flt) == 0:
+ logger.error("no features filtered with %s", filter)
+ bounds = flt.bounds
+ flt.close()
else: # full shapefile bounds
bounds = ds.bounds
shape, top_left = get_shape_top_left(bounds, resolution, buffer)
diff --git a/gridit/file.py b/gridit/file.py
index 58e0877..cf4b855 100644
--- a/gridit/file.py
+++ b/gridit/file.py
@@ -1,8 +1,16 @@
"""File methods."""
+from collections.abc import Iterable
from pathlib import Path
import numpy as np
+__all__ = [
+ "fiona_filter_collection",
+ "fiona_property_type",
+ "float32_is_also_float64",
+ "write_raster",
+ "write_vector",
+]
def float32_is_also_float64(val):
"""Return True if float32 and float64 values are the same."""
@@ -232,3 +240,69 @@ def write_vector(
with fiona.open(fname, "w", **kwargs) as ds:
ds.writerecords(recs)
grid.logger.info("wrote %d features", idxs.size)
+
+
+def fiona_filter_collection(ds, filter):
+ """Returns Fiona collection with applied filter.
+
+ Parameters
+ ----------
+ ds : fiona.Collection
+ Input data source
+ filter : dict, str
+ Property filter criteria. For example ``{"id": 4}`` to select one
+ feature with attribute "id" value 4. Or ``{"id": [4, 7, 19]}`` to
+ select features with several values. A SQL WHERE statement can also be
+ used if Fiona 1.9 or later is installed.
+
+ Returns
+ -------
+ fiona.Collection
+
+ Raises
+ ------
+ ModuleNotFoundError
+ If fiona is not installed.
+ """
+ try:
+ import fiona
+ except ModuleNotFoundError:
+ raise ModuleNotFoundError("fiona_filter_collection requires fiona")
+ if not isinstance(ds, fiona.Collection):
+ raise ValueError(f"ds must be fiona.Collection; found {type(ds)}")
+ elif ds.closed:
+ raise ValueError("ds is closed")
+ flt = fiona.io.MemoryFile().open(
+ driver=ds.driver, schema=ds.schema, crs=ds.crs)
+ if isinstance(filter, dict):
+ # check that keys are found in datasource
+ filter_keys = list(filter.keys())
+ ds_attrs = list(ds.schema["properties"].keys())
+ if not set(filter_keys).issubset(ds_attrs):
+ not_found = set(filter_keys).difference(ds_attrs)
+ raise KeyError(
+ f"cannot find filter keys: {not_found}; "
+ f"choose from data source attributes: {ds_attrs}")
+ found = 0
+ for feat in ds:
+ for attr, filt_val in filter.items():
+ feat_val = feat["properties"][attr]
+ if (isinstance(filt_val, Iterable)
+ and not isinstance(filt_val, str)):
+ for fv in filt_val:
+ if feat_val == fv:
+ found += 1
+ flt.write(feat)
+ else:
+ if feat_val == filt_val:
+ found += 1
+ flt.write(feat)
+ elif isinstance(filter, str):
+ if fiona.__version__[0:3] < "1.9":
+ raise ValueError(
+ "Fiona 1.9 or later required to use filter str as SQL WHERE")
+ for feat in ds.filter(where=filter):
+ flt.write(feat)
+ else:
+ raise ValueError("filter must be a dict or str")
+ return flt
diff --git a/pyproject.toml b/pyproject.toml
index 00d2fc7..c2a4617 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -27,6 +27,7 @@ dependencies = [
]
[project.optional-dependencies]
+dev = ["gridit[test,optional]"]
optional = [
"fiona",
"flopy",
|
mwtoews/gridit
|
c1fb5456108f274c63a9b31054303418e8d06d16
|
diff --git a/tests/test_classmethods.py b/tests/test_classmethods.py
index f0a93ee..869753c 100644
--- a/tests/test_classmethods.py
+++ b/tests/test_classmethods.py
@@ -74,10 +74,19 @@ def test_grid_from_vector_point():
assert grid == expected
# filter
+ grid = Grid.from_vector(points_path, 250, {"id": [5, 9]})
+ expected = Grid(250.0, (19, 7), (1810500.0, 5873750.0), grid.projection)
+ assert grid == expected
+
grid = Grid.from_vector(points_path, 250, {"id": 5})
expected = Grid(250.0, (1, 1), (1812000.0, 5869250.0), grid.projection)
assert grid == expected
+ # filter + buffer
+ grid = Grid.from_vector(points_path, 250, {"id": 5}, buffer=240)
+ expected = Grid(250.0, (2, 2), (1811750.0, 5869250.0), grid.projection)
+ assert grid == expected
+
@requires_pkg("fiona")
def test_grid_from_vector_polygon():
@@ -110,10 +119,27 @@ def test_grid_from_vector_line():
# filter
grid = Grid.from_vector(lines_path, 250, {"StreamOrde": 5})
- expected = Grid(250.0, (4, 3), (1811500.0, 5877000.0), grid.projection)
+ expected = Grid(250.0, (19, 14), (1808750.0, 5877000.0), grid.projection)
+ assert grid == expected
+
+ grid = Grid.from_vector(lines_path, 250, {"StreamOrde": [4, 5]})
+ expected = Grid(250.0, (28, 41), (1804750.0, 5877000.0), grid.projection)
assert grid == expected
# buffer
grid = Grid.from_vector(lines_path, 250, buffer=500)
expected = Grid(250.0, (70, 66), (1803000.0, 5878750.0), grid.projection)
assert grid == expected
+
+
+@requires_pkg("fiona")
+def test_grid_from_vector_filter_sql_where():
+ import fiona
+
+ if fiona.__version__[0:3] < "1.9":
+ pytest.skip("Fiona 1.9 or later required to use SQL WHERE")
+
+ # filter
+ grid = Grid.from_vector(lines_path, 250, "StreamOrde>=5")
+ expected = Grid(250.0, (19, 14), (1808750.0, 5877000.0), grid.projection)
+ assert grid == expected
diff --git a/tests/test_file.py b/tests/test_file.py
index efa3df8..88ed267 100644
--- a/tests/test_file.py
+++ b/tests/test_file.py
@@ -2,8 +2,15 @@
import pytest
import numpy as np
+from .conftest import datadir
from gridit import Grid
-from gridit.file import float32_is_also_float64, fiona_property_type
+from gridit.file import (
+ fiona_filter_collection,
+ fiona_property_type,
+ float32_is_also_float64,
+)
+
+points_path = datadir / "waitaku2_points.shp"
def test_float32_is_also_float64():
@@ -246,3 +253,60 @@ def test_write_vector(tmp_path, grid_basic, grid_projection):
grid_basic.write_vector(ar2d.T, "out.shp", "val")
with pytest.raises(ValueError, match="Unable to detect driver"):
grid_basic.write_vector(ar2d, "out.nope", "val")
+
+
+def test_fiona_filter_collection():
+ fiona = pytest.importorskip("fiona")
+ expected_schema = {
+ "geometry": "Point",
+ "properties": {"id": "int:10"},
+ }
+ with fiona.open(points_path) as ds:
+ flt = fiona_filter_collection(ds, filter={"id": 0})
+ assert flt.schema == expected_schema
+ assert len(flt) == 0
+ assert flt.bounds == (0.0, 0.0, 0.0, 0.0)
+
+ flt = fiona_filter_collection(ds, filter={"id": 1})
+ assert flt.schema == expected_schema
+ assert len(flt) == 1
+ np.testing.assert_array_almost_equal(
+ flt.bounds,
+ (1814758.4763, 5871013.6156, 1814758.4763, 5871013.6156),
+ decimal=4,
+ )
+
+ flt = fiona_filter_collection(ds, filter={"id": [1, 8]})
+ assert flt.schema == expected_schema
+ assert len(flt) == 2
+ np.testing.assert_array_almost_equal(
+ flt.bounds,
+ (1812243.7372, 5871013.6156, 1814758.4763, 5876813.8657),
+ decimal=4,
+ )
+
+ if fiona.__version__[0:3] >= "1.9":
+ flt = fiona_filter_collection(ds, filter="id=2")
+ assert len(flt) == 1
+ np.testing.assert_array_almost_equal(
+ flt.bounds,
+ (1812459.1405, 5875971.5429, 1812459.1405, 5875971.5429),
+ decimal=4,
+ )
+
+ # errors
+ with pytest.raises(ValueError, match="SQL Expression Parsing Err"):
+ fiona_filter_collection(ds, filter="id==2")
+
+ else:
+ with pytest.raises(ValueError, match="filter str as SQL WHERE"):
+ fiona_filter_collection(ds, filter="id=2")
+
+ with pytest.raises(ValueError, match=r"ds must be fiona\.Collection"):
+ fiona_filter_collection(None, filter={"id": 1})
+
+ with pytest.raises(KeyError, match="cannot find filter keys"):
+ fiona_filter_collection(ds, filter={"ID": 1})
+
+ with pytest.raises(ValueError, match="ds is closed"):
+ fiona_filter_collection(ds, filter={"id": 1})
|
Feature: Grid.from_array filter to accept other criteria
The request is to allow `filter` to accept more than one value from an iterable. This would allow a Grid definition to be created without needing to merge several features beforehand.
|
0.0
|
c1fb5456108f274c63a9b31054303418e8d06d16
|
[
"tests/test_classmethods.py::test_grid_from_bbox",
"tests/test_classmethods.py::test_grid_from_bbox_point",
"tests/test_classmethods.py::test_grid_from_bbox_buffer",
"tests/test_file.py::test_float32_is_also_float64",
"tests/test_file.py::test_fiona_property_type"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-03-24 02:41:18+00:00
|
bsd-3-clause
| 4,084 |
|
mystor__git-revise-53
|
diff --git a/gitrevise/merge.py b/gitrevise/merge.py
index 56caeb4..c0171d0 100644
--- a/gitrevise/merge.py
+++ b/gitrevise/merge.py
@@ -31,7 +31,7 @@ def rebase(commit: Commit, parent: Commit) -> Commit:
tree = merge_trees(
Path("/"),
- ("new parent", "old parent", "incoming"),
+ (parent.summary(), commit.parent().summary(), commit.summary()),
parent.tree(),
commit.parent().tree(),
commit.tree(),
@@ -185,9 +185,9 @@ def merge_blobs(
"merge-file",
"-q",
"-p",
- f"-L{path} ({labels[0]})",
- f"-L{path} ({labels[1]})",
- f"-L{path} ({labels[2]})",
+ f"-L{path} (new parent): {labels[0]}",
+ f"-L{path} (old parent): {labels[1]}",
+ f"-L{path} (current): {labels[2]}",
str(tmpdir / "current"),
str(tmpdir / "base"),
str(tmpdir / "other"),
@@ -201,7 +201,8 @@ def merge_blobs(
# At this point, we know that there are merge conflicts to resolve.
# Prompt to try and trigger manual resolution.
- print(f"Merge conflict for '{path}'")
+ print(f"Conflict in applying '{labels[2]}'")
+ print(f" Path: '{path}'")
if input(" Edit conflicted file? (Y/n) ").lower() == "n":
raise MergeConflict("user aborted")
|
mystor/git-revise
|
162ac90cba010a0f90f2290bf267d130466417d6
|
diff --git a/tests/test_fixup.py b/tests/test_fixup.py
index 8730f22..406d311 100644
--- a/tests/test_fixup.py
+++ b/tests/test_fixup.py
@@ -115,12 +115,12 @@ def test_fixup_nonhead_conflict(basic_repo):
with ed.next_file() as f:
assert f.equals_dedent(
f"""\
- <<<<<<< {os.sep}file1 (new parent)
+ <<<<<<< {os.sep}file1 (new parent): commit1
Hello, World!
How are things?
=======
conflict
- >>>>>>> {os.sep}file1 (incoming)
+ >>>>>>> {os.sep}file1 (current): <git index>
"""
)
f.replace_dedent("conflict1\n")
@@ -128,13 +128,13 @@ def test_fixup_nonhead_conflict(basic_repo):
with ed.next_file() as f:
assert f.equals_dedent(
f"""\
- <<<<<<< {os.sep}file1 (new parent)
+ <<<<<<< {os.sep}file1 (new parent): commit1
conflict1
=======
Hello, World!
Oops, gotta add a new line!
How are things?
- >>>>>>> {os.sep}file1 (incoming)
+ >>>>>>> {os.sep}file1 (current): commit2
"""
)
f.replace_dedent("conflict2\n")
|
Feature request: Say which patch failed when editing a conflict
A useful feature of `git rebase -i` is that it says which commit failed to apply when there is a conflict:
Could not apply badbeef... Commit bla bla
To me, this is essential information, because the editing is after all not of the end result, but some intermediate commit – you need to know which commit you're editing. Even if you can guess it, you don't want to take chances here, because of how easily this can make a tangled mess of the commits.
Git rebase also repeats the commit title inside the editable git conflict (whereas revise just puts "incoming" here). That would also suffice, though I really don't like the userfriendlyness of standard git conflicts. Thus why I suggest printing a message in plain English.
Perhaps the git conflict is the better place for this info, but then I would say the real problem is the readability of standard git conflicts: They look too much like a conflict between equals, and presents which two commits it came from, when what matters is which one commit you're editing. They really should just say `You are editing commit "Commit bla bla"`. If this is fixable, I would say you don't need the message, because then, a preview would hint even better to the user whether to edit or abort.
|
0.0
|
162ac90cba010a0f90f2290bf267d130466417d6
|
[
"tests/test_fixup.py::test_fixup_nonhead_conflict"
] |
[
"tests/test_fixup.py::test_fixup_head",
"tests/test_fixup.py::test_fixup_nonhead",
"tests/test_fixup.py::test_fixup_head_msg",
"tests/test_fixup.py::test_fixup_nonhead_msg",
"tests/test_fixup.py::test_fixup_head_editor",
"tests/test_fixup.py::test_fixup_nonhead_editor",
"tests/test_fixup.py::test_autosquash_nonhead"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-12-01 19:07:56+00:00
|
mit
| 4,085 |
|
mystor__git-revise-64
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 466f4d6..ccdf42e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,7 @@
## vNEXT
+* Fixed handling of fixup-of-fixup commits (#58)
* Added support for `git add`'s `--patch` flag (#61)
* Manpage is now installed in `share/man/man1` instead of `man/man1` (#62)
* Which patch failed to apply is now included in the conflict editor (#53)
diff --git a/gitrevise/todo.py b/gitrevise/todo.py
index b2f262d..0c72394 100644
--- a/gitrevise/todo.py
+++ b/gitrevise/todo.py
@@ -107,7 +107,7 @@ def validate_todos(old: List[Step], new: List[Step]):
def autosquash_todos(todos: List[Step]) -> List[Step]:
new_todos = todos[:]
- for step in reversed(todos):
+ for step in todos:
# Check if this is a fixup! or squash! commit, and ignore it otherwise.
summary = step.commit.summary()
if summary.startswith("fixup! "):
|
mystor/git-revise
|
114a08196382fa37386da52bf58678819bdb9544
|
diff --git a/tests/test_fixup.py b/tests/test_fixup.py
index 406d311..c98c744 100644
--- a/tests/test_fixup.py
+++ b/tests/test_fixup.py
@@ -181,3 +181,56 @@ def test_autosquash_nonhead(repo):
assert new.persisted, "commit persisted to disk"
assert new.author == old.author, "author is unchanged"
assert new.committer == repo.default_committer, "committer is updated"
+
+ file1 = new.tree().entries[b"file1"].blob().body
+ assert file1 == b"hello, world\n"
+ file2 = new.tree().entries[b"file2"].blob().body
+ assert file2 == b"second file\nextra line\n"
+
+
+def test_fixup_of_fixup(repo):
+ bash(
+ """
+ echo "hello, world" > file1
+ git add file1
+ git commit -m "commit one"
+
+ echo "second file" > file2
+ git add file2
+ git commit -m "commit two"
+
+ echo "new line!" >> file1
+ git add file1
+ git commit -m "commit three"
+
+ echo "extra line" >> file2
+ git add file2
+ git commit --fixup=HEAD~
+
+ echo "even more" >> file2
+ git add file2
+ git commit --fixup=HEAD
+ """
+ )
+
+ old = repo.get_commit("HEAD~~~")
+ assert old.persisted
+
+ main(["--autosquash", str(old.parent().oid)])
+
+ new = repo.get_commit("HEAD~")
+ assert old != new, "commit was modified"
+ assert old.parents() == new.parents(), "parents are unchanged"
+
+ assert old.tree() != new.tree(), "tree is changed"
+
+ assert new.message == old.message, "message should not be changed"
+
+ assert new.persisted, "commit persisted to disk"
+ assert new.author == old.author, "author is unchanged"
+ assert new.committer == repo.default_committer, "committer is updated"
+
+ file1 = new.tree().entries[b"file1"].blob().body
+ assert file1 == b"hello, world\n"
+ file2 = new.tree().entries[b"file2"].blob().body
+ assert file2 == b"second file\nextra line\neven more\n"
|
Fixup of fixup not detected correctly
Hi,
I often tend to do a lot of patches and then "squash" them at once. This also often means doing fixup commits to fixup commits. This seems to be completely fine with git rebase as it orders the commits correctly when using `--autosquash`. Revise on the other hand only moves the fist fixup commit, but not the subsequent one. I've done a minimal example in new repo, hope it helps with understanding the issue.
The git log after fixups
```
b87da89 (HEAD -> master) fixup! fixup! commit file
b6f2725 commit foo
30ef468 fixup! commit file
66129ee commit bar
8c62b80 commit file
```
What `git rebase -i --autosquash HEAD~5` shows:
```
pick 8c62b80 commit file
fixup 30ef468 fixup! commit file
fixup b87da89 fixup! fixup! commit file
pick 66129ee commit bar
pick b6f2725 commit foo
```
What `git revise -i --autosquash HEAD~5` shows:
```
pick 8c62b80f0846 commit file
fixup 30ef468dcb74 fixup! commit file
pick 66129ee8750d commit bar
fixup b87da89b57b3 fixup! fixup! commit file
pick b6f27254b70f commit foo
```
The revise command only reorders the first fixup, not the subsequent one.
My git version and git-revise version:
```
> git --version
git version 2.25.2
> git-revise --version
0.5.1
```
|
0.0
|
114a08196382fa37386da52bf58678819bdb9544
|
[
"tests/test_fixup.py::test_fixup_of_fixup"
] |
[
"tests/test_fixup.py::test_fixup_head",
"tests/test_fixup.py::test_fixup_nonhead",
"tests/test_fixup.py::test_fixup_head_msg",
"tests/test_fixup.py::test_fixup_nonhead_msg",
"tests/test_fixup.py::test_fixup_head_editor",
"tests/test_fixup.py::test_fixup_nonhead_editor",
"tests/test_fixup.py::test_fixup_nonhead_conflict",
"tests/test_fixup.py::test_autosquash_nonhead"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-05-16 18:02:00+00:00
|
mit
| 4,086 |
|
mystor__git-revise-72
|
diff --git a/gitrevise/merge.py b/gitrevise/merge.py
index 9e0348d..bfe6b09 100644
--- a/gitrevise/merge.py
+++ b/gitrevise/merge.py
@@ -204,7 +204,7 @@ def merge_blobs(
print(f"Conflict applying '{labels[2]}'")
print(f" Path: '{path}'")
if input(" Edit conflicted file? (Y/n) ").lower() == "n":
- raise MergeConflict("user aborted")
+ raise MergeConflict("user aborted") # pylint: disable=W0707
# Open the editor on the conflicted file. We ensure the relative path
# matches the path of the original file for a better editor experience.
@@ -222,6 +222,6 @@ def merge_blobs(
# Was the merge successful?
if input(" Merge successful? (y/N) ").lower() != "y":
- raise MergeConflict("user aborted")
+ raise MergeConflict("user aborted") # pylint: disable=W0707
return Blob(current.repo, merged)
diff --git a/gitrevise/todo.py b/gitrevise/todo.py
index 511010b..40f1b61 100644
--- a/gitrevise/todo.py
+++ b/gitrevise/todo.py
@@ -1,8 +1,8 @@
import re
from enum import Enum
-from typing import List, Optional
+from typing import Dict, List, Optional, Set
-from .odb import Commit, Repository
+from .odb import Commit, Oid, Repository
from .utils import run_editor, run_sequence_editor, edit_commit_message, cut_commit
@@ -104,8 +104,24 @@ def validate_todos(old: List[Step], new: List[Step]):
raise ValueError("'index' actions follow all non-index todo items")
+class CyclicFixupError(Exception):
+ pass
+
+
+def count_fixup_commits(
+ fixups: Dict[Oid, List[Oid]], visited: Set[Oid], node: Oid
+) -> int:
+ if node in visited:
+ raise CyclicFixupError(f"fixups would create cycle in {node}")
+ visited.add(node)
+ return 1 + sum(
+ count_fixup_commits(fixups, visited, fixup) for fixup in fixups.get(node, [])
+ )
+
+
def autosquash_todos(todos: List[Step]) -> List[Step]:
new_todos = todos[:]
+ fixups: Dict[Oid, List[Oid]] = {}
for step in todos:
# Check if this is a fixup! or squash! commit, and ignore it otherwise.
@@ -125,11 +141,19 @@ def autosquash_todos(todos: List[Step]) -> List[Step]:
needle
) or target.commit.oid.hex().startswith(needle):
found = idx
+ if target.commit.oid not in fixups:
+ fixups[target.commit.oid] = []
+ fixups[target.commit.oid] += [step.commit.oid]
+ number_of_transitive_fixup_commits = (
+ count_fixup_commits(fixups, set(), target.commit.oid) - 1
+ )
break
if found is not None:
# Insert a new `fixup` or `squash` step in the correct place.
- new_todos.insert(found + 1, Step(kind, step.commit))
+ new_todos.insert(
+ found + number_of_transitive_fixup_commits, Step(kind, step.commit)
+ )
# Remove the existing step.
new_todos.remove(step)
diff --git a/gitrevise/tui.py b/gitrevise/tui.py
index feaa94b..7942854 100644
--- a/gitrevise/tui.py
+++ b/gitrevise/tui.py
@@ -12,7 +12,13 @@ from .utils import (
cut_commit,
local_commits,
)
-from .todo import apply_todos, build_todos, edit_todos, autosquash_todos
+from .todo import (
+ CyclicFixupError,
+ apply_todos,
+ build_todos,
+ edit_todos,
+ autosquash_todos,
+)
from .merge import MergeConflict
from . import __version__
@@ -219,6 +225,9 @@ def main(argv: Optional[List[str]] = None):
except CalledProcessError as err:
print(f"subprocess exited with non-zero status: {err.returncode}")
sys.exit(1)
+ except CyclicFixupError as err:
+ print(f"todo error: {err}")
+ sys.exit(1)
except EditorError as err:
print(f"editor error: {err}")
sys.exit(1)
diff --git a/gitrevise/utils.py b/gitrevise/utils.py
index 6e290dc..7c9a9ad 100644
--- a/gitrevise/utils.py
+++ b/gitrevise/utils.py
@@ -70,7 +70,7 @@ def edit_file_with_editor(editor: str, path: Path) -> bytes:
cmd = ["sh", "-c", f'{editor} "$@"', editor, path.name]
run(cmd, check=True, cwd=path.parent)
except CalledProcessError as err:
- raise EditorError(f"Editor exited with status {err}")
+ raise EditorError(f"Editor exited with status {err}") from err
return path.read_bytes()
@@ -85,8 +85,10 @@ def get_commentchar(repo: Repository, text: bytes) -> bytes:
pass
try:
return chars[:1]
- except IndexError:
- raise EditorError("Unable to automatically select a comment character")
+ except IndexError as err:
+ raise EditorError(
+ "Unable to automatically select a comment character"
+ ) from err
if commentchar == b"":
raise EditorError("core.commentChar must not be empty")
return commentchar
|
mystor/git-revise
|
7176273c5d51a35f89bd33a00c033babd49e2d3b
|
diff --git a/tests/test_fixup.py b/tests/test_fixup.py
index e11203b..a4f59d9 100644
--- a/tests/test_fixup.py
+++ b/tests/test_fixup.py
@@ -1,6 +1,8 @@
# pylint: skip-file
from conftest import *
+from gitrevise.utils import commit_range
+from gitrevise.todo import CyclicFixupError, build_todos, autosquash_todos
import os
@@ -103,8 +105,6 @@ def test_fixup_nonhead_editor(basic_repo):
def test_fixup_nonhead_conflict(basic_repo):
- import textwrap
-
bash('echo "conflict" > file1')
bash("git add file1")
@@ -278,3 +278,74 @@ def test_fixup_by_id(repo):
assert file1 == b"hello, world\n"
file2 = new.tree().entries[b"file2"].blob().body
assert file2 == b"second file\nextra line\n"
+
+
+def test_fixup_order(repo):
+ bash(
+ """
+ git commit --allow-empty -m 'old'
+ git commit --allow-empty -m 'target commit'
+ git commit --allow-empty -m 'first fixup' --fixup=HEAD
+ git commit --allow-empty -m 'second fixup' --fixup=HEAD~
+ """
+ )
+
+ old = repo.get_commit("HEAD~3")
+ assert old.persisted
+ tip = repo.get_commit("HEAD")
+ assert tip.persisted
+
+ todos = build_todos(commit_range(old, tip), index=None)
+ [target, first, second] = autosquash_todos(todos)
+
+ assert b"target commit" in target.commit.message
+ assert b"first fixup" in first.commit.message
+ assert b"second fixup" in second.commit.message
+
+
+def test_fixup_order_transitive(repo):
+ bash(
+ """
+ git commit --allow-empty -m 'old'
+ git commit --allow-empty -m 'target commit'
+ git commit --allow-empty -m '1.0' --fixup=HEAD
+ git commit --allow-empty -m '1.1' --fixup=HEAD
+ git commit --allow-empty -m '2.0' --fixup=HEAD~2
+ """
+ )
+
+ old = repo.get_commit("HEAD~4")
+ assert old.persisted
+ tip = repo.get_commit("HEAD")
+ assert tip.persisted
+
+ todos = build_todos(commit_range(old, tip), index=None)
+ [target, a, b, c] = autosquash_todos(todos)
+
+ assert b"target commit" in target.commit.message
+ assert b"1.0" in a.commit.message
+ assert b"1.1" in b.commit.message
+ assert b"2.0" in c.commit.message
+
+
+def test_fixup_order_cycle(repo):
+ bash(
+ """
+ git commit --allow-empty -m 'old'
+ git commit --allow-empty -m 'target commit'
+ git commit --allow-empty -m 'fixup! fixup!'
+ """
+ )
+
+ old = repo.get_commit("HEAD~2")
+ assert old.persisted
+ tip = repo.get_commit("HEAD")
+ assert tip.persisted
+
+ todos = build_todos(commit_range(old, tip), index=None)
+
+ try:
+ autosquash_todos(todos)
+ assert False, "Should raise an error on cyclic fixup graphs"
+ except CyclicFixupError:
+ pass
|
fixup! commits are reversed during interactive revise calls
If I have a “base” commit and then several “fixup! base” commits, running an interactive revise will prepare the commit list in the wrong order.
For example, if the commit list is:
abc123 base commit
abc456 fixup! base commit
abc789 fixup! base commit
Then running `git revise -i abc123~` will open an editor with these contents:
pick abc123 base commit
fixup abc789 fixup! base commit
fixup abc456 fixup! base commit
It should have generated this instead:
pick abc123 base commit
fixup abc456 fixup! base commit
fixup abc789 fixup! base commit
I suspect the [this commit](https://github.com/mystor/git-revise/commit/837c779f17522fca0eaf4f8b59f4beefcef2496f) is the culprit.
|
0.0
|
7176273c5d51a35f89bd33a00c033babd49e2d3b
|
[
"tests/test_fixup.py::test_fixup_head",
"tests/test_fixup.py::test_fixup_nonhead",
"tests/test_fixup.py::test_fixup_head_msg",
"tests/test_fixup.py::test_fixup_nonhead_msg",
"tests/test_fixup.py::test_fixup_head_editor",
"tests/test_fixup.py::test_fixup_nonhead_editor",
"tests/test_fixup.py::test_fixup_nonhead_conflict",
"tests/test_fixup.py::test_autosquash_nonhead",
"tests/test_fixup.py::test_fixup_of_fixup",
"tests/test_fixup.py::test_fixup_by_id",
"tests/test_fixup.py::test_fixup_order",
"tests/test_fixup.py::test_fixup_order_transitive",
"tests/test_fixup.py::test_fixup_order_cycle"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-08-16 09:38:51+00:00
|
mit
| 4,087 |
|
nackjicholson__aiosql-25
|
diff --git a/aiosql/query_loader.py b/aiosql/query_loader.py
index 7a9efb6..d8ec807 100644
--- a/aiosql/query_loader.py
+++ b/aiosql/query_loader.py
@@ -18,7 +18,7 @@ class QueryLoader:
self.record_classes = record_classes if record_classes is not None else {}
def _make_query_datum(self, query_str: str):
- lines = query_str.strip().splitlines()
+ lines = [l.strip() for l in query_str.strip().splitlines()]
query_name = lines[0].replace("-", "_")
if query_name.endswith("<!"):
|
nackjicholson/aiosql
|
b9b5c03999106f5bdd6543f8c3e0a820dc338eeb
|
diff --git a/tests/test_loading.py b/tests/test_loading.py
index f861246..e0bfc22 100644
--- a/tests/test_loading.py
+++ b/tests/test_loading.py
@@ -4,6 +4,7 @@ from unittest import mock
import pytest
import aiosql
+from aiosql.exceptions import SQLParseException
from aiosql.queries import Queries
from aiosql.query_loader import QueryLoader
@@ -49,3 +50,14 @@ def test_fromstr_queryloader_cls(sql):
aiosql.from_str(sql, "aiosqlite", loader_cls=mock_loader)
assert mock_loader.called
+
+
+def test_trailing_space_on_lines_does_not_error():
+ # There is whitespace in this string after the line ends
+ sql_str = "-- name: trailing-space^ \n"
+ sql_str += "select * from test; \n"
+
+ try:
+ aiosql.from_str(sql_str, "aiosqlite")
+ except SQLParseException:
+ pytest.fail("Raised SQLParseException due to trailing space in query.")
|
Trailing lines in SQL causes a parse exception.
```python
Python 3.8.5 (default, Jul 27 2020, 08:42:51)
[GCC 10.1.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import aiosql
>>> import psycopg2
>>> from psycopg2.extras import execute_values
>>> sql_str = """
... -- name: create_schema#
... create table test (id int primary key, v1 int, v2 int);
...
... -- name: insert!
... INSERT INTO test (id, v1, v2) VALUES %s;
...
... -- name: update!
... UPDATE test SET v1 = data.v1 FROM (VALUES %s) AS data (id, v1)
... WHERE test.id = data.id;
...
... -- name: getem
... select * from test order by id;
... """
>>>
>>> queries = aiosql.from_str(sql_str, "psycopg2")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/nackjicholson/repos/aiosql/aiosql/aiosql.py", line 83, in from_str
query_data = query_loader.load_query_data_from_sql(sql)
File "/home/nackjicholson/repos/aiosql/aiosql/query_loader.py", line 74, in load_query_data_from_sql
query_data.append(self._make_query_datum(query_sql_str))
File "/home/nackjicholson/repos/aiosql/aiosql/query_loader.py", line 43, in _make_query_datum
raise SQLParseException(
aiosql.exceptions.SQLParseException: name must convert to valid python variable, got "create_schema# ".
```
Kind of hard to tell from that. But the way I copy pasted the lines in created trailing space on the end of the lines that messes up our code. There probably needs to be a regex change, or maybe a `.strip()` call somewhere in `query_loader.py`
|
0.0
|
b9b5c03999106f5bdd6543f8c3e0a820dc338eeb
|
[
"tests/test_loading.py::test_trailing_space_on_lines_does_not_error"
] |
[
"tests/test_loading.py::test_frompath_queries_cls",
"tests/test_loading.py::test_frompath_queryloader_cls",
"tests/test_loading.py::test_fromstr_queries_cls",
"tests/test_loading.py::test_fromstr_queryloader_cls"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2020-08-07 05:37:00+00:00
|
bsd-2-clause
| 4,088 |
|
naftulikay__mutagen-tools-23
|
diff --git a/src/mutagentools/flac/convert.py b/src/mutagentools/flac/convert.py
index 0ce867e..8b0438a 100644
--- a/src/mutagentools/flac/convert.py
+++ b/src/mutagentools/flac/convert.py
@@ -46,8 +46,8 @@ def convert_flac_to_id3(flac):
result.append(convert_genre_to_tcon(tags.pop('genre'), tags.pop('style') if 'style' in tags.keys() else []))
if 'discnumber' in tags.keys():
- result.append(convert_disc_number_to_tpos(tags.pop('discnumber'),
- tags.pop('totaldiscs') if 'totaldiscs' in tags.keys() else None))
+ result.append(convert_disc_number_to_tpos(first_of_list(tags.pop('discnumber')),
+ first_of_list(first(pop_keys(tags, 'totaldiscs', 'disctotal')))))
if contains_any(tags.keys(), 'date', 'year'):
result.append(convert_date_to_tdrc(first(pop_keys(tags, 'date', 'year'))))
@@ -67,7 +67,7 @@ def convert_flac_to_id3(flac):
if 'tracknumber' in tags.keys():
tracknumber = first_of_list(tags.pop('tracknumber'))
- totaltracks = first_of_list(tags.pop('totaltracks')) if 'totaltracks' in tags.keys() else None
+ totaltracks = first_of_list(first(pop_keys(tags, 'totaltracks', 'tracktotal')))
if PART_OF_SET.match(tracknumber):
# it's a complicated dude
|
naftulikay/mutagen-tools
|
84a04ac4a422feefdb4e9e4dba82fbb7586b1148
|
diff --git a/src/mutagentools/flac/tests.py b/src/mutagentools/flac/tests.py
index d57b6e9..7f42ec8 100644
--- a/src/mutagentools/flac/tests.py
+++ b/src/mutagentools/flac/tests.py
@@ -242,6 +242,62 @@ class FullConversionTestCase(unittest.TestCase):
self.assertEqual(['Artist'], id3.get('TPE1'))
self.assertEqual(2017, int(str(id3.get('TDRC').text[0])))
+ def test_convert_tracktotal(self):
+ """Tests that converting a track number and total number of tracks is accomplished."""
+ tags = {
+ 'tracknumber': '1',
+ 'totaltracks': '3',
+ 'tracktotal': '5',
+ }
+
+ flac_mock = mock.MagicMock()
+ flac_mock.tags = tags
+
+ id3 = ID3()
+ list(map(lambda t: id3.add(t), convert_flac_to_id3(flac_mock)))
+ # make sure that no TXXX tags are created
+ self.assertEqual(0, len(list(
+ filter(lambda f: f.FrameID == 'TXXX', id3.values()
+ ))))
+
+ def test_convert_tracktotal_no_total(self):
+ """Tests that total track numbers are detected properly."""
+ # test that the track got populated singularly
+ flac_mock = mock.MagicMock()
+ flac_mock.tags = { 'tracknumber': '1' }
+
+ id3 = ID3()
+ list(map(lambda t: id3.add(t), convert_flac_to_id3(flac_mock)))
+ self.assertEqual('01', id3.get('TRCK'))
+
+ def test_convert_disctotal_no_total(self):
+ """Tests that total disc numbers something something."""
+ # test that the track got populated singularly
+ flac_mock = mock.MagicMock()
+ flac_mock.tags = { 'discnumber': '1' }
+
+ id3 = ID3()
+ list(map(lambda t: id3.add(t), convert_flac_to_id3(flac_mock)))
+ self.assertEqual('1', id3.get('TPOS'))
+
+ def test_convert_disctotal(self):
+ """Tests that total disc numbers something something."""
+ # test that the track got populated singularly
+ flac_mock = mock.MagicMock()
+ flac_mock.tags = {
+ 'discnumber': '1',
+ 'totaldiscs': '3',
+ 'disctotal': '5',
+ }
+
+ id3 = ID3()
+ list(map(lambda t: id3.add(t), convert_flac_to_id3(flac_mock)))
+ self.assertEqual('1/3', id3.get('TPOS'))
+ # make sure that no TXXX tags are created
+ self.assertEqual(0, len(list(
+ filter(lambda f: f.FrameID == 'TXXX', id3.values()
+ ))))
+
class IndividualConversionTestCase(unittest.TestCase):
|
Disc Count isn't Being Copied Properly from EasyTAG FLACs to MP3s
**TL;DR** EasyTAG removes the `encoder` tag :unamused: and screws things up with the tags it uses.
Three bugs have been identified in `flac2id3`. two of which are fixable in this project:
- Prevent EasyTag from removing the `encoder` tag.
- [ ] Correctly bring in `tracktotal` into `TRCK` conversions.
- [ ] Correctly bring in `disctotal` into `TPOS` conversions.
|
0.0
|
84a04ac4a422feefdb4e9e4dba82fbb7586b1148
|
[
"src/mutagentools/flac/tests.py::FullConversionTestCase::test_convert_disctotal",
"src/mutagentools/flac/tests.py::FullConversionTestCase::test_convert_tracktotal"
] |
[
"src/mutagentools/flac/tests.py::FullConversionTestCase::test_convert_flac_to_id3_adds_tpos",
"src/mutagentools/flac/tests.py::FullConversionTestCase::test_convert_tracktotal_no_total",
"src/mutagentools/flac/tests.py::FullConversionTestCase::test_convert_disctotal_no_total",
"src/mutagentools/flac/tests.py::FullConversionTestCase::test_convert_flac_to_id3_track",
"src/mutagentools/flac/tests.py::FullConversionTestCase::test_convert_flac_to_id3_duplicates",
"src/mutagentools/flac/tests.py::FullConversionTestCase::test_convert_flac_to_id3",
"src/mutagentools/flac/tests.py::MainTestCase::test_to_json_dict_flatten",
"src/mutagentools/flac/tests.py::MainTestCase::test_to_json_dict",
"src/mutagentools/flac/tests.py::MainTestCase::test_to_json_dict_pictures",
"src/mutagentools/flac/tests.py::IndividualConversionTestCase::test_convert_composer_to_tcom",
"src/mutagentools/flac/tests.py::IndividualConversionTestCase::test_convert_tracknumber_to_trck",
"src/mutagentools/flac/tests.py::IndividualConversionTestCase::test_convert_picture_to_apic",
"src/mutagentools/flac/tests.py::IndividualConversionTestCase::test_convert_mbid_to_ufid",
"src/mutagentools/flac/tests.py::IndividualConversionTestCase::test_convert_encoded_by_to_txxx",
"src/mutagentools/flac/tests.py::IndividualConversionTestCase::test_convert_toc_to_mcdi_str",
"src/mutagentools/flac/tests.py::IndividualConversionTestCase::test_convert_encoder_settings_to_txxx",
"src/mutagentools/flac/tests.py::IndividualConversionTestCase::test_convert_length_to_tlen",
"src/mutagentools/flac/tests.py::IndividualConversionTestCase::test_convert_artist_to_tpe1",
"src/mutagentools/flac/tests.py::IndividualConversionTestCase::test_convert_disc_number_to_tpos",
"src/mutagentools/flac/tests.py::IndividualConversionTestCase::test_convert_date_to_tdrc",
"src/mutagentools/flac/tests.py::IndividualConversionTestCase::test_convert_encoder_to_txxx",
"src/mutagentools/flac/tests.py::IndividualConversionTestCase::test_convert_toc_to_mcdi_bytes",
"src/mutagentools/flac/tests.py::IndividualConversionTestCase::test_convert_organization_to_tpub",
"src/mutagentools/flac/tests.py::IndividualConversionTestCase::test_convert_generic_to_txxx",
"src/mutagentools/flac/tests.py::IndividualConversionTestCase::test_convert_album_to_talb",
"src/mutagentools/flac/tests.py::IndividualConversionTestCase::test_convert_title_to_tit2",
"src/mutagentools/flac/tests.py::IndividualConversionTestCase::test_convert_genre_to_tcon",
"src/mutagentools/flac/tests.py::IndividualConversionTestCase::test_convert_albumartist_to_tpe2"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2017-04-25 04:00:50+00:00
|
mit
| 4,089 |
|
nagisc007__storybuilder-181
|
diff --git a/builder/tools.py b/builder/tools.py
index d67e47e..119a5d3 100644
--- a/builder/tools.py
+++ b/builder/tools.py
@@ -298,5 +298,5 @@ def _story_title_of(act: TagAction, level: int) -> str:
assert isinstance(act, TagAction), "act Must be TagAction class!"
assert isinstance(level, int), "level Must be int!"
- return "{} {}\n".format("#" * level, act.note)
+ return "{}{} {}\n".format("\n" if level > 1 else "", "#" * level, act.note)
|
nagisc007/storybuilder
|
0beb7a2554cb09dcdc523aa63dbb6ffb4f58a007
|
diff --git a/tests/test_tools.py b/tests/test_tools.py
index a6530a4..90ca074 100644
--- a/tests/test_tools.py
+++ b/tests/test_tools.py
@@ -87,7 +87,7 @@ class PrivateMethodsTest(unittest.TestCase):
data = [
(self.ma.comment("test"), GroupType.STORY, 1, "<!--test-->"),
(self.ma.title("test"), GroupType.STORY, 1, "# test\n"),
- (self.ma.title("test"), GroupType.STORY, 2, "## test\n"),
+ (self.ma.title("test"), GroupType.STORY, 2, "\n## test\n"),
(self.ma.title("test"), GroupType.SCENE, 1, "**test**"),
(self.ma.hr(), GroupType.STORY, 1, "--------" * 9),
]
@@ -325,7 +325,7 @@ class PrivateMethodsTest(unittest.TestCase):
data = [
("a test", 0, " a test\n"),
("a test", 1, "# a test\n"),
- ("a test", 2, "## a test\n"),
+ ("a test", 2, "\n## a test\n"),
]
for title, lv, expected in data:
|
Need to fix: append head break line at scene head
シーン冒頭、前に一行開ける
===
**Description**
action出力にて、シーン冒頭のマークダウン、もう一行前も開けておく
|
0.0
|
0beb7a2554cb09dcdc523aa63dbb6ffb4f58a007
|
[
"tests/test_tools.py::PrivateMethodsTest::test_action_of_by_tag",
"tests/test_tools.py::PrivateMethodsTest::test_story_title_of"
] |
[
"tests/test_tools.py::PrivateMethodsTest::test_action_of_by_type",
"tests/test_tools.py::PrivateMethodsTest::test_action_with_obj_and_info_as_eng",
"tests/test_tools.py::PrivateMethodsTest::test_action_with_obj_and_info_as_jpn",
"tests/test_tools.py::PrivateMethodsTest::test_behavior_with_obj",
"tests/test_tools.py::PrivateMethodsTest::test_comment_of",
"tests/test_tools.py::PrivateMethodsTest::test_description_of_by_tag",
"tests/test_tools.py::PrivateMethodsTest::test_description_of_by_type",
"tests/test_tools.py::PrivateMethodsTest::test_flag_info_if",
"tests/test_tools.py::PrivateMethodsTest::test_hr_of",
"tests/test_tools.py::PrivateMethodsTest::test_list_head_inserted",
"tests/test_tools.py::PrivateMethodsTest::test_output_story_to_console",
"tests/test_tools.py::PrivateMethodsTest::test_output_story_to_file_as_action",
"tests/test_tools.py::PrivateMethodsTest::test_output_story_to_file_as_description",
"tests/test_tools.py::PrivateMethodsTest::test_output_with_linenumber",
"tests/test_tools.py::PrivateMethodsTest::test_scene_title_of",
"tests/test_tools.py::PrivateMethodsTest::test_story_converted_as_action",
"tests/test_tools.py::PrivateMethodsTest::test_story_converted_as_action_in_group",
"tests/test_tools.py::PrivateMethodsTest::test_story_converted_as_action_jpn",
"tests/test_tools.py::PrivateMethodsTest::test_story_converted_as_description",
"tests/test_tools.py::PrivateMethodsTest::test_story_converted_as_description_in_group",
"tests/test_tools.py::PrivateMethodsTest::test_story_data_converted"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-04-05 00:31:10+00:00
|
mit
| 4,090 |
|
nagisc007__storybuilder-196
|
diff --git a/builder/commons.py b/builder/commons.py
index 720a692..de6ab2d 100644
--- a/builder/commons.py
+++ b/builder/commons.py
@@ -74,7 +74,7 @@ def extraspace_chopped(target: str, lang: LangType) -> str:
assert_isclass(lang, LangType)
if lang is LangType.JPN:
- return re.sub(r'。 (.)', r'。\1', target)
+ return re.sub(r'。[ 、](.)', r'。\1', target)
else:
return re.sub(r'\s+', r' ', target)
|
nagisc007/storybuilder
|
2b764e3c3e949af354d6d32467e9f2780386e301
|
diff --git a/tests/test_commons.py b/tests/test_commons.py
index edb3166..d31cac1 100644
--- a/tests/test_commons.py
+++ b/tests/test_commons.py
@@ -134,6 +134,8 @@ class PublicMethodsTest(unittest.TestCase):
" これを。ただしくする。"),
(" This is a pen. the pen. ", LangType.ENG,
" This is a pen. the pen. "),
+ (" これを。、ただしくして。", LangType.JPN,
+ " これを。ただしくして。"),
]
for v, lng, expected in data:
|
Need to fix: unreplaced maru and ten
「。、」が放置されてしまっているので修正
===
**Description**
Description中の「。」で終わるものが「。、」になるが、それが放置されたままなので修正
|
0.0
|
2b764e3c3e949af354d6d32467e9f2780386e301
|
[
"tests/test_commons.py::PublicMethodsTest::test_extraspace_chopped"
] |
[
"tests/test_commons.py::PublicMethodsTest::test_behavior_with_np_of",
"tests/test_commons.py::PublicMethodsTest::test_comma_of",
"tests/test_commons.py::PublicMethodsTest::test_description_of",
"tests/test_commons.py::PublicMethodsTest::test_description_of_if",
"tests/test_commons.py::PublicMethodsTest::test_dialogue_from_description",
"tests/test_commons.py::PublicMethodsTest::test_dialogue_from_description_if",
"tests/test_commons.py::PublicMethodsTest::test_dialogue_from_info",
"tests/test_commons.py::PublicMethodsTest::test_infos_of",
"tests/test_commons.py::PublicMethodsTest::test_object_names_of",
"tests/test_commons.py::PublicMethodsTest::test_object_names_of_without_something",
"tests/test_commons.py::PublicMethodsTest::test_objects_all_from",
"tests/test_commons.py::PublicMethodsTest::test_objects_from",
"tests/test_commons.py::PublicMethodsTest::test_objects_from_action_without_something",
"tests/test_commons.py::PublicMethodsTest::test_objects_from_without_something",
"tests/test_commons.py::PublicMethodsTest::test_sentence_from",
"tests/test_commons.py::PublicMethodsTest::test_something_name_if",
"tests/test_commons.py::PublicMethodsTest::test_subject_name_of",
"tests/test_commons.py::PrivateMethodsTest::test_endpoint_replaced_if_with_comma",
"tests/test_commons.py::PrivateMethodsTest::test_space_replaced_if_with_symbol"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-04-06 11:20:57+00:00
|
mit
| 4,091 |
|
nagisc007__storybuilder-199
|
diff --git a/builder/tools.py b/builder/tools.py
index 82180f7..cf97ad0 100644
--- a/builder/tools.py
+++ b/builder/tools.py
@@ -367,7 +367,7 @@ def _story_converted_as_description_in_group(group: ActionGroup, group_type: Gro
if val:
tmp.append(val)
if group_type is GroupType.COMBI:
- return extraspace_chopped("".join(tmp), group.lang)
+ return [extraspace_chopped("".join(tmp), group.lang),]
else:
return tmp
|
nagisc007/storybuilder
|
b7872d78caf82d0de954c029bad6838afd1a25d1
|
diff --git a/tests/test_tools.py b/tests/test_tools.py
index 0c003c1..bbbcc7c 100644
--- a/tests/test_tools.py
+++ b/tests/test_tools.py
@@ -340,23 +340,17 @@ class PrivateMethodsTest(unittest.TestCase):
def test_story_converted_as_description_in_group(self):
- self.assertEqual(tools._story_converted_as_description_in_group(self.story, self.story.group_type, 1, False),
- ["# Taro and Hanako\n",
- " a cute girl come in. ",
- ' "Nice to meet you" ',
- ' "I\'m not fine" '])
-
- def test_story_converted_as_description_in_group_with_combi(self):
- ma = Master('test')
data = [
- (ma.story(self.taro.talk().d("test"), self.hanako.talk().d("apple")),
+ (self.ma.story("Taro and Hanako", self.taro.talk().d("test"), self.hanako.talk().d("apple"), lang=LangType.ENG),
+ GroupType.STORY,
+ ["# Taro and Hanako\n", " test. ", " apple. "]),
+ (self.ma.story(self.taro.talk().d("test"), self.hanako.talk().d("apple")),
GroupType.COMBI,
- " test。apple。"),
- (ma.story(self.taro.talk().d("test"), self.hanako.talk().d("apple"), lang=LangType.ENG),
+ [" test。apple。"]),
+ (self.ma.story(self.taro.talk().d("test"), self.hanako.talk().d("apple"), lang=LangType.ENG),
GroupType.COMBI,
- " test. apple. "),
+ [" test. apple. "]),
]
-
for v, gtype, expected in data:
with self.subTest(v=v, gtype=gtype, expected=expected):
self.assertEqual(tools._story_converted_as_description_in_group(
|
Need to fix: combine bug
combine bug display one line
===
**Description**
Combineすると全て1文字ずつで改行されて、縦に一行表示みたいになる
|
0.0
|
b7872d78caf82d0de954c029bad6838afd1a25d1
|
[
"tests/test_tools.py::PrivateMethodsTest::test_story_converted_as_description_in_group"
] |
[
"tests/test_tools.py::PrivateMethodsTest::test_action_of_by_tag",
"tests/test_tools.py::PrivateMethodsTest::test_action_of_by_type",
"tests/test_tools.py::PrivateMethodsTest::test_action_with_obj_and_info_as_eng",
"tests/test_tools.py::PrivateMethodsTest::test_action_with_obj_and_info_as_jpn",
"tests/test_tools.py::PrivateMethodsTest::test_behavior_with_obj",
"tests/test_tools.py::PrivateMethodsTest::test_break_symbol_of",
"tests/test_tools.py::PrivateMethodsTest::test_comment_of",
"tests/test_tools.py::PrivateMethodsTest::test_desc_str_replaced_tag",
"tests/test_tools.py::PrivateMethodsTest::test_description_of_by_tag",
"tests/test_tools.py::PrivateMethodsTest::test_description_of_by_type",
"tests/test_tools.py::PrivateMethodsTest::test_flag_info_if",
"tests/test_tools.py::PrivateMethodsTest::test_hr_of",
"tests/test_tools.py::PrivateMethodsTest::test_list_head_inserted",
"tests/test_tools.py::PrivateMethodsTest::test_output_story_to_console",
"tests/test_tools.py::PrivateMethodsTest::test_output_story_to_file_as_action",
"tests/test_tools.py::PrivateMethodsTest::test_output_story_to_file_as_description",
"tests/test_tools.py::PrivateMethodsTest::test_output_with_linenumber",
"tests/test_tools.py::PrivateMethodsTest::test_scene_title_of",
"tests/test_tools.py::PrivateMethodsTest::test_story_converted_as_action",
"tests/test_tools.py::PrivateMethodsTest::test_story_converted_as_action_in_group",
"tests/test_tools.py::PrivateMethodsTest::test_story_converted_as_action_jpn",
"tests/test_tools.py::PrivateMethodsTest::test_story_converted_as_description",
"tests/test_tools.py::PrivateMethodsTest::test_story_data_converted",
"tests/test_tools.py::PrivateMethodsTest::test_story_title_of"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-04-06 12:58:12+00:00
|
mit
| 4,092 |
|
nagisc007__storybuilder-224
|
diff --git a/builder/master.py b/builder/master.py
index 3a72f96..00a1f2a 100644
--- a/builder/master.py
+++ b/builder/master.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""Story manager class.
"""
-from .sbutils import assert_isint, assert_isstr
+from .sbutils import assert_isint, assert_islist, assert_isstr
from .action import _BaseAction, ActionGroup, TagAction
from .enums import GroupType, LangType, TagType
from .subject import Subject, Day, Item, Person, Stage, Word
@@ -119,34 +119,44 @@ class Master(dict):
return ActionGroup(lang=lang, group_type=GroupType.SCENE, *tmp)
def set_days(self, li: list):
+ assert_islist(li)
+
for v in li:
self.append_day(v[0], v[1:])
return self
- def set_db(self, persons: list, stages: list, days: list, items: list, words: list):
- self.set_persons(persons)
- self.set_stages(stages)
- self.set_days(days)
- self.set_items(items)
- self.set_words(words)
+ def set_db(self, persons: list=None, stages: list=None, days: list=None, items: list=None, words: list=None):
+ self.set_persons(persons if persons else [])
+ self.set_stages(stages if stages else [])
+ self.set_days(days if days else [])
+ self.set_items(items if items else [])
+ self.set_words(words if words else [])
return self
def set_items(self, li: list):
+ assert_islist(li)
+
for v in li:
self.append_item(v[0], v[1:])
return self
def set_persons(self, li: list):
+ assert_islist(li)
+
for v in li:
self.append_person(v[0], v[1:])
return self
def set_stages(self, li: list):
+ assert_islist(li)
+
for v in li:
self.append_stage(v[0], v[1:])
return self
def set_words(self, li: list):
+ assert_islist(li)
+
for v in li:
self.append_word(v[0], v[1:])
return self
diff --git a/builder/sbutils.py b/builder/sbutils.py
index c5e1219..d05a510 100644
--- a/builder/sbutils.py
+++ b/builder/sbutils.py
@@ -37,6 +37,12 @@ def assert_isint(ins) -> bool:
def assert_islist(ins) -> bool:
+ assert isinstance(ins, list) or isinstance(ins, tuple), _ASSERT_MSG.format(_instance_name_if(ins), "list or tuple")
+
+ return True
+
+
+def assert_islist_strict(ins) -> bool:
assert isinstance(ins, list), _ASSERT_MSG.format(_instance_name_if(ins), "list")
return True
|
nagisc007/storybuilder
|
b0219f7a7883f24de99ee8269d1e60b83130deed
|
diff --git a/tests/test_master.py b/tests/test_master.py
index 5691f7b..86e4e64 100644
--- a/tests/test_master.py
+++ b/tests/test_master.py
@@ -289,6 +289,26 @@ class MasterTest(unittest.TestCase):
for k, exp in expected:
self.assertIsInstance(ma[k], exp)
+ def test_set_db_when_lacked(self):
+ data_p = (("p1", "Taro", 17, "male", "student"),)
+ data_s = (("s1", "stage1"),)
+ data_d = (("d1", "day1"),)
+ data_i = (("i1", "item1"),)
+ data_w = (("w1", "word1"),)
+ data = [
+ (data_p, data_s, data_d, data_i, data_w),
+ (data_p, data_s, data_d, data_i, None),
+ (data_p, data_s, data_d, None, None),
+ (data_p, data_s, None, None, None),
+ (data_p, None, None, None, None),
+ (None, None, None, None, None),
+ ]
+
+ for p, s, d, i, w in data:
+ with self.subTest(p=p, s=s, d=d, i=i, w=w):
+ ma = Master('test')
+ self.assertIsInstance(ma.set_db(p, s, d, i, w), Master)
+
def test_set_items(self):
data = (
("t1", "item1"),
|
Need to fix: nothing error in storyDB
空の要素で失敗
===
**Description**
StoryDBの引数で要素が空のタプルが含まれている時にerror発生。審査して作らないように対処。
|
0.0
|
b0219f7a7883f24de99ee8269d1e60b83130deed
|
[
"tests/test_master.py::MasterTest::test_set_db_when_lacked"
] |
[
"tests/test_master.py::MasterTest::test_append_day",
"tests/test_master.py::MasterTest::test_append_item",
"tests/test_master.py::MasterTest::test_append_person",
"tests/test_master.py::MasterTest::test_append_stage",
"tests/test_master.py::MasterTest::test_append_word",
"tests/test_master.py::MasterTest::test_attributes",
"tests/test_master.py::MasterTest::test_br",
"tests/test_master.py::MasterTest::test_break_symbol",
"tests/test_master.py::MasterTest::test_combine",
"tests/test_master.py::MasterTest::test_comment",
"tests/test_master.py::MasterTest::test_hr",
"tests/test_master.py::MasterTest::test_scene",
"tests/test_master.py::MasterTest::test_set_days",
"tests/test_master.py::MasterTest::test_set_db",
"tests/test_master.py::MasterTest::test_set_items",
"tests/test_master.py::MasterTest::test_set_persons",
"tests/test_master.py::MasterTest::test_set_stages",
"tests/test_master.py::MasterTest::test_set_words",
"tests/test_master.py::MasterTest::test_story",
"tests/test_master.py::MasterTest::test_title"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-04-12 01:40:04+00:00
|
mit
| 4,093 |
|
nagisc007__storybuilder-231
|
diff --git a/builder/tools.py b/builder/tools.py
index 07349c4..f196f9a 100644
--- a/builder/tools.py
+++ b/builder/tools.py
@@ -66,7 +66,7 @@ def output_story(story: ActionGroup, filename: str, is_action_data: bool=False,
is_info_data: bool=False,
is_out_as_file: bool=False, is_out_chars: bool=False,
pri_filter: int=Action.MIN_PRIORITY,
- is_debug: bool=False):
+ is_debug: bool=False): # pragma: no cover
'''Output a story.
Args:
@@ -99,7 +99,8 @@ def output_story(story: ActionGroup, filename: str, is_action_data: bool=False,
ret = _output_story_to_file(
story_data, filename, suffix, is_debug)
if is_info_data:
- _output_story_to_file(["info data", "wip"],
+ _output_story_to_file(
+ _story_info_data_converted(story, is_debug),
filename, "_i", is_debug)
return ret
else:
@@ -108,7 +109,8 @@ def output_story(story: ActionGroup, filename: str, is_action_data: bool=False,
story_data += _story_flags_info_converted(story)
ret = _output_story_to_console(story_data, is_debug)
if is_info_data:
- _output_story_to_console(["info data", "wip"],
+ _output_story_to_console(
+ _story_info_data_converted(story, is_debug),
is_debug)
return ret
@@ -522,8 +524,9 @@ def _story_info_data_converted(story: ActionGroup, is_debug: bool) -> list:
assert_isclass(story, ActionGroup)
assert_isbool(is_debug)
- flags = _story_flags_info_converted(story)
- return ["## Flags\n"] + flags
+ chars = ["## Characters\n", "- Total: {}".format(_count_descriptions(story))]
+ flags = ["## Flags\n"] + _story_flags_info_converted(story)
+ return chars + flags
def _story_title_of(act: TagAction, level: int) -> str:
|
nagisc007/storybuilder
|
2a384c6fc03336ac50edb24e2281ed3b8529bab6
|
diff --git a/tests/test_tools.py b/tests/test_tools.py
index 0dbddb0..10f0880 100644
--- a/tests/test_tools.py
+++ b/tests/test_tools.py
@@ -486,6 +486,24 @@ class PrivateMethodsTest(unittest.TestCase):
with self.subTest(v=v, expected=expected):
self.assertEqual(tools._story_flags_info_converted(v), expected)
+ def test_story_info_data_converted(self):
+ data = [
+ (self.ma.story("test",
+ self.taro.be(Flag("apple"))),
+ False,
+ ["## Characters\n", "- Total: 0",
+ "## Flags\n", "[apple]:apple"]),
+ (self.ma.story("test",
+ self.taro.be().d("test apple")),
+ False,
+ ["## Characters\n", "- Total: 9",
+ "## Flags\n"]),
+ ]
+
+ for v, isdbg, expected in data:
+ with self.subTest(v=v, isdbg=isdbg, expected=expected):
+ self.assertEqual(tools._story_info_data_converted(v, isdbg), expected)
+
def test_story_title_of(self):
ma = Master('test')
|
Implement: info output
No output currently
===
**Description**
現在infoは出力内容がないので、そこを修正
|
0.0
|
2a384c6fc03336ac50edb24e2281ed3b8529bab6
|
[
"tests/test_tools.py::PrivateMethodsTest::test_story_info_data_converted"
] |
[
"tests/test_tools.py::PublicMethodsTest::test_output_info",
"tests/test_tools.py::PrivateMethodsTest::test_action_info_as_eng",
"tests/test_tools.py::PrivateMethodsTest::test_action_info_as_jpn",
"tests/test_tools.py::PrivateMethodsTest::test_action_of_by_tag",
"tests/test_tools.py::PrivateMethodsTest::test_action_of_by_type",
"tests/test_tools.py::PrivateMethodsTest::test_break_symbol_of",
"tests/test_tools.py::PrivateMethodsTest::test_comment_of",
"tests/test_tools.py::PrivateMethodsTest::test_count_desc_at_action",
"tests/test_tools.py::PrivateMethodsTest::test_count_desc_in_group",
"tests/test_tools.py::PrivateMethodsTest::test_count_descriptions",
"tests/test_tools.py::PrivateMethodsTest::test_desc_excepted_symbols",
"tests/test_tools.py::PrivateMethodsTest::test_desc_head_of",
"tests/test_tools.py::PrivateMethodsTest::test_desc_str_replaced_tag",
"tests/test_tools.py::PrivateMethodsTest::test_description_of_by_tag",
"tests/test_tools.py::PrivateMethodsTest::test_description_of_by_type",
"tests/test_tools.py::PrivateMethodsTest::test_extra_chopped",
"tests/test_tools.py::PrivateMethodsTest::test_flag_info_of",
"tests/test_tools.py::PrivateMethodsTest::test_flags_if",
"tests/test_tools.py::PrivateMethodsTest::test_hr_of",
"tests/test_tools.py::PrivateMethodsTest::test_list_head_inserted",
"tests/test_tools.py::PrivateMethodsTest::test_output_story_to_console",
"tests/test_tools.py::PrivateMethodsTest::test_output_story_to_file",
"tests/test_tools.py::PrivateMethodsTest::test_output_with_linenumber",
"tests/test_tools.py::PrivateMethodsTest::test_scene_title_of",
"tests/test_tools.py::PrivateMethodsTest::test_story_converted_as_action",
"tests/test_tools.py::PrivateMethodsTest::test_story_converted_as_action_in_group",
"tests/test_tools.py::PrivateMethodsTest::test_story_converted_as_description",
"tests/test_tools.py::PrivateMethodsTest::test_story_converted_as_description_in_group",
"tests/test_tools.py::PrivateMethodsTest::test_story_data_converted",
"tests/test_tools.py::PrivateMethodsTest::test_story_flags_info_converted",
"tests/test_tools.py::PrivateMethodsTest::test_story_title_of",
"tests/test_tools.py::PrivateMethodsTest::test_test_head_if"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-04-13 01:30:58+00:00
|
mit
| 4,094 |
|
nagisc007__storybuilder-354
|
diff --git a/builder/buildtool.py b/builder/buildtool.py
index 5c005fe..08449e3 100644
--- a/builder/buildtool.py
+++ b/builder/buildtool.py
@@ -271,7 +271,7 @@ def _manupaper_rows_from_(val, lang: em.LangType, columns: int) -> int:
elif isinstance(val, act.TagAction):
return 0
elif isinstance(val, act.Action):
- return utl.int_ceiled(_descs_count_from_(val, lang), columns)
+ return _descs_count_from_(val, lang)
else:
return 0
@@ -279,7 +279,10 @@ def _manupaper_rows_from_(val, lang: em.LangType, columns: int) -> int:
def _manupaper_rows_from_in(vals: [act.ActionGroup, list, tuple],
lang: em.LangType, columns: int) -> int:
group = vals.actions if isinstance(vals, act.ActionGroup) else vals
- return sum([_manupaper_rows_from_(v, lang, columns) for v in group])
+ if isinstance(vals, act.ActionGroup) and vals.group_type is em.GroupType.COMBI:
+ return utl.int_ceiled(sum(_manupaper_rows_from_(v, lang, columns) for v in group), columns)
+ else:
+ return sum([utl.int_ceiled(_manupaper_rows_from_(v, lang, columns), columns) for v in group])
def _maintitle_from(story: list) -> list:
|
nagisc007/storybuilder
|
d711a90a6034bb61c176ccc809b367450c469fee
|
diff --git a/tests/test_buildtool.py b/tests/test_buildtool.py
index c794c9b..b873256 100644
--- a/tests/test_buildtool.py
+++ b/tests/test_buildtool.py
@@ -157,6 +157,25 @@ class PrivateMethodsTest(unittest.TestCase):
with self.subTest(v=v, lang=lang, expected=expected):
self.assertEqual(btl._estimated_description_count_from(v, lang), expected)
+ def test_manupaper_rows_from_in(self):
+ from builder import world
+ w = world.World("test")
+ data = [
+ ((self.taro.be().d("test"), self.taro.be().d("apple"),),
+ em.LangType.JPN, 20, 2),
+ ((w.combine(
+ self.taro.be().d("test"), self.taro.be().d("apple"),
+ ),),
+ em.LangType.JPN, 20, 1),
+ (w.combine(
+ self.taro.be().d("test apple"), self.taro.be().d("test orange"),
+ ),
+ em.LangType.JPN, 20, 2),
+ ]
+ for v, lang, columns, expected in data:
+ with self.subTest(v=v, lang=lang, columns=columns, expected=expected):
+ self.assertEqual(btl._manupaper_rows_from_in(v, lang, columns), expected)
+
def test_output_to_console(self):
data = [
(["test"], False,
|
Check: character count when combined
combine した時にちゃんと行と文字数がカウントできているか
===
**Description**
combine時の確認。
間違っていたら修正。
|
0.0
|
d711a90a6034bb61c176ccc809b367450c469fee
|
[
"tests/test_buildtool.py::PrivateMethodsTest::test_manupaper_rows_from_in"
] |
[
"tests/test_buildtool.py::PrivateMethodsTest::test_action_from",
"tests/test_buildtool.py::PrivateMethodsTest::test_acttypes_percents_from",
"tests/test_buildtool.py::PrivateMethodsTest::test_charcount_from",
"tests/test_buildtool.py::PrivateMethodsTest::test_contents_title_from",
"tests/test_buildtool.py::PrivateMethodsTest::test_descs_count_from",
"tests/test_buildtool.py::PrivateMethodsTest::test_descs_from",
"tests/test_buildtool.py::PrivateMethodsTest::test_estimated_description_count_from",
"tests/test_buildtool.py::PrivateMethodsTest::test_output_to_console",
"tests/test_buildtool.py::PrivateMethodsTest::test_output_to_file"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-06-17 00:05:20+00:00
|
mit
| 4,095 |
|
nagisc007__storybuilder-52
|
diff --git a/builder/tools.py b/builder/tools.py
index 406dfd6..2a990d8 100644
--- a/builder/tools.py
+++ b/builder/tools.py
@@ -17,15 +17,15 @@ def build_action_strings(story, is_debug=False):
for s in story:
if s.act_type is ActType.SYMBOL:
- act_str.append("\n## {}\n\n".format(s.action))
+ act_str.append("\n## {}\n\n".format(_action_with_act_word_if_selected(s)))
elif s.act_type is ActType.DESC or s.act_type is ActType.ACT:
- act_str.append("{}\n".format(s.action))
+ act_str.append("{}\n".format(_action_with_act_word_if_selected(s)))
elif s.act_type is ActType.TELL:
- act_str.append("「{}」\n".format(s.action))
+ act_str.append("「{}」{}\n".format(s.action, s.act_word))
elif s.act_type is ActType.THINK:
- act_str.append("{}\n".format(s.action))
+ act_str.append("{}\n".format(_action_with_act_word_if_selected(s)))
elif s.act_type is ActType.TEST and is_debug:
- act_str.append("> TEST:{}\n".format(s.action))
+ act_str.append("> TEST:{}\n".format(_action_with_act_word_if_selected(s)))
else:
pass
@@ -100,3 +100,12 @@ def _description_selected(act):
return act.description
return act.action
+
+def _action_with_act_word_if_selected(act):
+ '''Action string created with selecting act word.
+
+ Returns:
+ str: action string.
+ '''
+ return "{}{}".format(act.action, act.act_word) if act.with_act else act.action
+
|
nagisc007/storybuilder
|
04f2d2f06e4f6ef6468d45e327b94f21f01c1567
|
diff --git a/tests/test_tools.py b/tests/test_tools.py
index 9b6da20..aec9622 100644
--- a/tests/test_tools.py
+++ b/tests/test_tools.py
@@ -12,7 +12,7 @@ from builder.tools import build_description_strings
from builder.tools import output
from builder.tools import output_md
from builder.tools import _description_selected
-
+from builder.tools import _action_with_act_word_if_selected
class TestTools(unittest.TestCase):
@@ -41,6 +41,13 @@ class TestTools(unittest.TestCase):
self.assertEqual(_description_selected(normal_act), "go to room")
+ def test__action_with_act_word_if_selected(self):
+ noselected = Act(self.taro, ActType.ACT, Behavior.GO, "to room", " go", with_act=False)
+ selected = Act(self.taro, ActType.ACT, Behavior.GO, "to room", " go", with_act=True)
+
+ self.assertEqual(_action_with_act_word_if_selected(noselected), "to room")
+ self.assertEqual(_action_with_act_word_if_selected(selected), "to room go")
+
def test__description_selected_with_description(self):
normal_act = Act(self.taro, ActType.ACT, Behavior.GO, "go to room")
act_with_desc = normal_act.desc("walk so slowly to the room")
|
Need to fix: output actions with act word
act word が表示されない
===
**Description**
仕様変更にともない、actionの出力にact_wordを使うように修正。
**To Reproduce**
nothing
**Expected behavior**
nothing
**Current behavior**
nothing
**Tasks**
- [ ] fix output with act_word
- [ ] fix output_md with act_word
- [ ] pass tests
**Note**
nothing
|
0.0
|
04f2d2f06e4f6ef6468d45e327b94f21f01c1567
|
[
"tests/test_tools.py::TestTools::test__action_with_act_word_if_selected",
"tests/test_tools.py::TestTools::test__description_selected",
"tests/test_tools.py::TestTools::test__description_selected_with_description",
"tests/test_tools.py::TestTools::test_build_action_strings",
"tests/test_tools.py::TestTools::test_build_description_strings",
"tests/test_tools.py::TestTools::test_output",
"tests/test_tools.py::TestTools::test_output_md"
] |
[] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-03-14 04:34:40+00:00
|
mit
| 4,096 |
|
nagisc007__storybuilder-54
|
diff --git a/builder/person.py b/builder/person.py
index aae74b0..9a6c312 100644
--- a/builder/person.py
+++ b/builder/person.py
@@ -85,47 +85,47 @@ class Person(BasePerson):
# sub actions
def agree(self, action, act_word="頷く", with_act=True, with_sub=False):
- return reply(action, act_word, with_act, with_sub)
+ return self.reply(action, act_word, with_act, with_sub)
def ask(self, action, act_word="尋ねる", with_act=True, with_sub=False):
- return hear(action, act_word, with_act, with_sub)
+ return self.hear(action, act_word, with_act, with_sub)
def brow(self, action, act_word="眉を顰める", with_act=True, with_sub=False):
- return feel(action, act_word, with_act, with_sub)
+ return self.feel(action, act_word, with_act, with_sub)
def call(self, action, act_word="呼ぶ", with_act=True, with_sub=False):
- return speak(action, act_word, with_act, with_sub)
+ return self.speak(action, act_word, with_act, with_sub)
def check(self, action, act_word="確認する", with_act=True, with_sub=False):
- return hear(action, act_word, with_act, with_sub)
+ return self.hear(action, act_word, with_act, with_sub)
def confuse(self, action, act_word="困惑する", with_act=True, with_sub=False):
- return feel(action, act_word, with_act, with_sub)
+ return self.feel(action, act_word, with_act, with_sub)
def explain(self, action, act_word="説明する", with_act=True, with_sub=False):
- return talk(action, act_word, with_act, with_sub)
+ return self.talk(action, act_word, with_act, with_sub)
def gaze(self, action, act_word="見つめる", with_act=True, with_sub=False):
- return see(action, act_word, with_act, with_sub)
+ return self.see(action, act_word, with_act, with_sub)
def growl(self, action, act_word="唸る", with_act=True, with_sub=False):
- return speak(action, act_word, with_act, with_sub)
+ return self.speak(action, act_word, with_act, with_sub)
def maon(self, action, act_word="呻く", with_act=True, with_sub=False):
- return speak(action, act_word, with_act, with_sub)
+ return self.speak(action, act_word, with_act, with_sub)
def oppose(self, action, act_word="反対する", with_act=True, with_sub=False):
- return reply(action, act_word, with_act, with_sub)
+ return self.reply(action, act_word, with_act, with_sub)
def surprise(self, action, act_word="驚く", with_act=True, with_sub=False):
- return feel(action, act_word, with_act, with_sub)
+ return self.feel(action, act_word, with_act, with_sub)
def stare(self, action, act_word="睨む", with_act=True, with_sub=False):
- return see(action, act_word, with_act, with_sub)
+ return self.see(action, act_word, with_act, with_sub)
def take(self, action, act_word="連れて行く", with_act=True, with_sub=False):
- return go(action, act_word, with_act, with_sub)
+ return self.go(action, act_word, with_act, with_sub)
def visit(self, action, act_word="訪れる", with_act=True, with_sub=False):
- return go(action, act_word, with_act, with_sub)
+ return self.go(action, act_word, with_act, with_sub)
|
nagisc007/storybuilder
|
aaaca7a475d697fa159dffff4bbe629a686577bb
|
diff --git a/tests/test_base.py b/tests/test_base.py
index be4be77..fe622ea 100644
--- a/tests/test_base.py
+++ b/tests/test_base.py
@@ -63,6 +63,9 @@ class TitleTest(unittest.TestCase):
class PersonTest(unittest.TestCase):
+ def setUp(self):
+ self.psn = Person("Taro", 15, "male", "student", "I am", "he is a man")
+
def test_attributes(self):
psn = Person("Taro", 15, "male", "student", "I am", "he is a man")
@@ -154,6 +157,14 @@ class PersonTest(unittest.TestCase):
self.assertEqual(acted.action, "forget his home work")
self.assertEqual(acted.behavior, Behavior.RESULT)
+ def test_sub_act_check(self):
+ acted = self.psn.check("note")
+
+ self.assertTrue(isinstance(acted, Act))
+ self.assertEqual(acted.act_type, ActType.ACT)
+ self.assertEqual(acted.action, "note")
+ self.assertEqual(acted.behavior, Behavior.HEAR)
+
class StageTest(unittest.TestCase):
|
Need to fix: forget "self" keyword in person's attr
Person内のsub actにおいてselfが抜けていたので修正。
===
**Description**
sub_actでは既に設定した、例えば see などを呼び出して使っているが、それに対してselfがないのでundefinedになる。
**To Reproduce**
nothing
**Expected behavior**
nothing
**Current behavior**
nothing
**Tasks**
- [ ] fix all sub acts in Person class
- [ ] implement sub act test
- [ ] pass tests
**Note**
nothing
|
0.0
|
aaaca7a475d697fa159dffff4bbe629a686577bb
|
[
"tests/test_base.py::PersonTest::test_sub_act_check"
] |
[
"tests/test_base.py::SubjectTest::test_feature_look",
"tests/test_base.py::ActTest::test_attributes",
"tests/test_base.py::ActTest::test_desc",
"tests/test_base.py::TitleTest::test_attributes",
"tests/test_base.py::PersonTest::test_act",
"tests/test_base.py::PersonTest::test_act_with_behavior",
"tests/test_base.py::PersonTest::test_attributes",
"tests/test_base.py::PersonTest::test_must",
"tests/test_base.py::PersonTest::test_reply",
"tests/test_base.py::PersonTest::test_result",
"tests/test_base.py::PersonTest::test_tell",
"tests/test_base.py::PersonTest::test_think",
"tests/test_base.py::PersonTest::test_want",
"tests/test_base.py::StageTest::test_attributes",
"tests/test_base.py::StageTest::test_look",
"tests/test_base.py::ItemTest::test_attributes",
"tests/test_base.py::ItemTest::test_look",
"tests/test_base.py::DayTimeTest::test_attributes",
"tests/test_base.py::DayTimeTest::test_look"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-03-14 12:22:57+00:00
|
mit
| 4,097 |
|
nagisc007__storybuilder-79
|
diff --git a/builder/tools.py b/builder/tools.py
index c36e60b..1c8ebbe 100644
--- a/builder/tools.py
+++ b/builder/tools.py
@@ -99,21 +99,33 @@ def _story_converted_as_action_in_group(group: ActionGroup, level: int, is_debug
if isinstance(a, ActionGroup):
tmp.extend(_story_converted_as_action_in_group(a, level + 1, is_debug))
else:
- tmp.append(_action_str_by_type(a, level, is_debug))
+ tmp.append(_action_str_by_type(a, group.lang, level, is_debug))
return tmp
-def _action_str_by_type(act: Action, level: int, is_debug: bool) -> str:
+def _action_str_by_type(act: Action, lang: LangType, level: int, is_debug: bool) -> str:
if act.act_type == ActType.ACT:
- return "{:8}:{:8}:{}:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or not act.note else act.note)
+ if lang == LangType.JPN:
+ return "{: 8}{: 8}:{}:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or not act.note else act.note)
+ else:
+ return "{:8}:{:8}:{}:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or not act.note else act.note)
elif act.act_type == ActType.EXPLAIN:
- return "{:8}:{:8}:{}:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or not act.note else act.note)
+ if lang == LangType.JPN:
+ return "{: 8}:{: 8}:{}:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or not act.note else act.note)
+ else:
+ return "{:8}:{:8}:{}:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or not act.note else act.note)
elif act.act_type == ActType.TAG:
return _action_str_by_tag(act, level)
elif act.act_type == ActType.TELL:
- return "{:8}:{:8}:「{}」:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or act.note else act.note)
+ if lang == LangType.JPN:
+ return "{: 8}:{: 8}:「{}」:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or act.note else act.note)
+ else:
+ return "{:8}:{:8}:「{}」:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or act.note else act.note)
elif act.act_type == ActType.TEST and is_debug:
- return "> {:8}:{:8}:{}:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or act.note else act.note)
+ if lang == LangType.JPN:
+ return "> {: 8}:{: 8}:{}:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or act.note else act.note)
+ else:
+ return "> {:8}:{:8}:{}:{}".format(act.subject.name, behavior_str_of(act.behavior), act.action, "" if act.note == "nothing" or act.note else act.note)
else:
return ""
@@ -142,9 +154,9 @@ def _story_converted_as_description_in_group(group: ActionGroup, level: int, is_
def _description_str_by_type(act: Action, lang: LangType, level: int, is_debug: bool) -> str:
if act.act_type in (ActType.ACT, ActType.EXPLAIN):
- return "{}{}{}".format(" " if lang == LangType.JPN else " ", act.description, "。" if lang == LangType.JPN else ".")
+ return "{}{}{}".format(_paragraphtop_by_lang(lang), act.description, _period_by_lang(lang))
elif act.act_type == ActType.TELL:
- return "{}{}{}".format("「" if lang == LangType.JPN else '"', act.description, "」" if lang == LangType.JPN else '"')
+ return "{}{}{}".format(_double_quatation_by_lang(lang), act.description, _double_quatation_by_lang(lang, False))
elif act.act_type == ActType.TAG:
return _description_str_by_tag(act, level, is_debug)
elif act.act_type == ActType.TEST and is_debug:
@@ -161,3 +173,20 @@ def _description_str_by_tag(act: Action, level: int, is_debug: bool) -> str:
else:
return ""
+def _period_by_lang(lang: LangType) -> str:
+ return "。" if lang == LangType.JPN else ". "
+
+
+def _comma_by_lang(lang: LangType) -> str:
+ return "、" if lang == LangType.JPN else ", "
+
+
+def _paragraphtop_by_lang(lang: LangType) -> str:
+ return " " if lang == LangType.JPN else " "
+
+
+def _double_quatation_by_lang(lang: LangType, is_top: bool=True) -> str:
+ if is_top:
+ return "「" if lang == LangType.JPN else ' "'
+ else:
+ return "」" if lang == LangType.JPN else '" '
|
nagisc007/storybuilder
|
6f5020584b00ffd715c5a4728bf3488c7daec9c6
|
diff --git a/tests/test_tools.py b/tests/test_tools.py
index 6722214..837b1f2 100644
--- a/tests/test_tools.py
+++ b/tests/test_tools.py
@@ -84,15 +84,15 @@ class BasicMethodTest(unittest.TestCase):
def test__story_converted_as_description(self):
self.assertEqual(tools._story_converted_as_description(self.story, False),
["# Taro and Hanako",
- " a cute girl come in.",
- '"Nice to meet you"',
- '"I\'m not fine"'])
+ " a cute girl come in. ",
+ ' "Nice to meet you" ',
+ ' "I\'m not fine" '])
def test__story_converted_as_description_in_group(self):
self.assertEqual(tools._story_converted_as_description_in_group(self.story, 1, False),
["# Taro and Hanako",
- " a cute girl come in.",
- '"Nice to meet you"',
- '"I\'m not fine"'])
+ " a cute girl come in. ",
+ ' "Nice to meet you" ',
+ ' "I\'m not fine" '])
|
Need to fix: action output with japanese spaces
日本語の時にはスペースを「全角」で適用する
===
**Description**
actionでのoutput時に半角だと折角そろえたものがずれているので、そこを読み取って調整する。
|
0.0
|
6f5020584b00ffd715c5a4728bf3488c7daec9c6
|
[
"tests/test_tools.py::BasicMethodTest::test__story_converted_as_description",
"tests/test_tools.py::BasicMethodTest::test__story_converted_as_description_in_group"
] |
[
"tests/test_tools.py::BasicMethodTest::test__output_story_to_console",
"tests/test_tools.py::BasicMethodTest::test__output_story_to_file_as_action",
"tests/test_tools.py::BasicMethodTest::test__output_story_to_file_as_description",
"tests/test_tools.py::BasicMethodTest::test__story_converted_as_action",
"tests/test_tools.py::BasicMethodTest::test__story_converted_as_action_in_group",
"tests/test_tools.py::BasicMethodTest::test__story_data_converted",
"tests/test_tools.py::BasicMethodTest::test_build_to_story",
"tests/test_tools.py::BasicMethodTest::test_options_parsed",
"tests/test_tools.py::BasicMethodTest::test_output_story"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-03-20 10:22:54+00:00
|
mit
| 4,098 |
|
nalepae__pandarallel-188
|
diff --git a/docs/docs/index.md b/docs/docs/index.md
index 5a3993c..32054cd 100644
--- a/docs/docs/index.md
+++ b/docs/docs/index.md
@@ -96,7 +96,7 @@ Computer used for this benchmark:
For those given examples, parallel operations run approximately 4x faster than the standard operations (except for `series.map` which runs only 3.2x faster).
-## When should I user `pandas`, `pandarallel` or `pyspark`?
+## When should I use `pandas`, `pandarallel` or `pyspark`?
According to [`pandas` documentation](https://pandas.pydata.org/):
diff --git a/pandarallel/core.py b/pandarallel/core.py
index f96359c..8eaaf12 100644
--- a/pandarallel/core.py
+++ b/pandarallel/core.py
@@ -235,7 +235,7 @@ def parallelize_with_memory_file_system(
progresses_length = [len(chunk_) * multiplicator_factor for chunk_ in chunks]
work_extra = data_type.get_work_extra(data)
- reduce_extra = data_type.get_reduce_extra(data)
+ reduce_extra = data_type.get_reduce_extra(data, user_defined_function_kwargs)
show_progress_bars = progress_bars_type != ProgressBarsType.No
@@ -376,7 +376,7 @@ def parallelize_with_pipe(
progresses_length = [len(chunk_) * multiplicator_factor for chunk_ in chunks]
work_extra = data_type.get_work_extra(data)
- reduce_extra = data_type.get_reduce_extra(data)
+ reduce_extra = data_type.get_reduce_extra(data, user_defined_function_kwargs)
show_progress_bars = progress_bars_type != ProgressBarsType.No
diff --git a/pandarallel/data_types/dataframe.py b/pandarallel/data_types/dataframe.py
index 5e2e067..4454c6f 100644
--- a/pandarallel/data_types/dataframe.py
+++ b/pandarallel/data_types/dataframe.py
@@ -4,6 +4,7 @@ import pandas as pd
from ..utils import chunk
from .generic import DataType
+from ..utils import _get_axis_int, _opposite_axis_int
class DataFrame:
@@ -13,13 +14,9 @@ class DataFrame:
nb_workers: int, data: pd.DataFrame, **kwargs
) -> Iterator[pd.DataFrame]:
user_defined_function_kwargs = kwargs["user_defined_function_kwargs"]
- axis = user_defined_function_kwargs.get("axis", 0)
- if axis not in {0, 1, "index", "columns"}:
- raise ValueError(f"No axis named {axis} for object type DataFrame")
-
- axis_int = {0: 0, 1: 1, "index": 0, "columns": 1}[axis]
- opposite_axis_int = 1 - axis_int
+ axis_int = _get_axis_int(user_defined_function_kwargs)
+ opposite_axis_int = _opposite_axis_int(axis_int)
for chunk_ in chunk(data.shape[opposite_axis_int], nb_workers):
yield data.iloc[chunk_] if axis_int == 1 else data.iloc[:, chunk_]
@@ -38,11 +35,19 @@ class DataFrame:
**user_defined_function_kwargs,
)
+ @staticmethod
+ def get_reduce_extra(data: Any, user_defined_function_kwargs) -> Dict[str, Any]:
+ return {"axis": _get_axis_int(user_defined_function_kwargs)}
+
@staticmethod
def reduce(
datas: Iterable[pd.DataFrame], extra: Dict[str, Any]
) -> pd.DataFrame:
- return pd.concat(datas, copy=False)
+ if isinstance(datas[0], pd.Series):
+ axis = 0
+ else:
+ axis = _opposite_axis_int(extra["axis"])
+ return pd.concat(datas, copy=False, axis=axis)
class ApplyMap(DataType):
@staticmethod
diff --git a/pandarallel/data_types/dataframe_groupby.py b/pandarallel/data_types/dataframe_groupby.py
index f6d119f..184088b 100644
--- a/pandarallel/data_types/dataframe_groupby.py
+++ b/pandarallel/data_types/dataframe_groupby.py
@@ -40,7 +40,7 @@ class DataFrameGroupBy:
return [compute_result(key, df) for key, df in data]
@staticmethod
- def get_reduce_extra(data: PandasDataFrameGroupBy) -> Dict[str, Any]:
+ def get_reduce_extra(data: PandasDataFrameGroupBy, user_defined_function_kwargs) -> Dict[str, Any]:
return {"df_groupby": data}
@staticmethod
diff --git a/pandarallel/data_types/generic.py b/pandarallel/data_types/generic.py
index 3b50383..34dfeb4 100644
--- a/pandarallel/data_types/generic.py
+++ b/pandarallel/data_types/generic.py
@@ -24,7 +24,7 @@ class DataType(ABC):
...
@staticmethod
- def get_reduce_extra(data: Any) -> Dict[str, Any]:
+ def get_reduce_extra(data: Any, user_defined_function_kwargs) -> Dict[str, Any]:
return dict()
@staticmethod
diff --git a/pandarallel/utils.py b/pandarallel/utils.py
index c83e93e..64c85b8 100644
--- a/pandarallel/utils.py
+++ b/pandarallel/utils.py
@@ -87,6 +87,18 @@ def get_pandas_version() -> Tuple[int, int]:
return int(major_str), int(minor_str)
+def _get_axis_int(user_defined_function_kwargs):
+ axis = user_defined_function_kwargs.get("axis", 0)
+
+ if axis not in {0, 1, "index", "columns"}:
+ raise ValueError(f"No axis named {axis} for object type DataFrame")
+
+ axis_int = {0: 0, 1: 1, "index": 0, "columns": 1}[axis]
+ return axis_int
+
+def _opposite_axis_int(axis: int):
+ return 1 - axis
+
class WorkerStatus(int, Enum):
Running = 0
Success = 1
|
nalepae/pandarallel
|
bb0f50faf9bd3e8e548736040d297613d4482eaa
|
diff --git a/tests/test_pandarallel.py b/tests/test_pandarallel.py
index 6cbdc8d..03d5cb4 100644
--- a/tests/test_pandarallel.py
+++ b/tests/test_pandarallel.py
@@ -135,6 +135,26 @@ def func_dataframe_groupby_expanding_apply(request):
)[request.param]
[email protected](params=("named", "anonymous"))
+def func_dataframe_apply_axis_0_no_reduce(request):
+ def func(x):
+ return x
+
+ return dict(
+ named=func, anonymous=lambda x: x
+ )[request.param]
+
+
[email protected](params=("named", "anonymous"))
+def func_dataframe_apply_axis_1_no_reduce(request):
+ def func(x):
+ return x**2
+
+ return dict(
+ named=func, anonymous=lambda x: x**2
+ )[request.param]
+
+
@pytest.fixture
def pandarallel_init(progress_bar, use_memory_fs):
pandarallel.initialize(
@@ -290,3 +310,29 @@ def test_dataframe_groupby_expanding_apply(
.parallel_apply(func_dataframe_groupby_expanding_apply, raw=False)
)
res.equals(res_parallel)
+
+def test_dataframe_axis_0_no_reduction(
+ pandarallel_init, func_dataframe_apply_axis_0_no_reduce, df_size
+):
+ df = pd.DataFrame(
+ dict(a=np.random.randint(1, 10, df_size), b=np.random.randint(1, 10, df_size), c=np.random.randint(1, 10, df_size))
+ )
+ res = df.apply(func_dataframe_apply_axis_0_no_reduce)
+
+ res_parallel = df.parallel_apply(func_dataframe_apply_axis_0_no_reduce)
+
+ assert res.equals(res_parallel)
+
+def test_dataframe_axis_1_no_reduction(
+ pandarallel_init, func_dataframe_apply_axis_1_no_reduce, df_size
+):
+ df = pd.DataFrame(
+ dict(a=np.random.randint(1, 10, df_size), b=np.random.randint(1, 10, df_size), c=np.random.randint(1, 10, df_size))
+ )
+
+ res = df.apply(func_dataframe_apply_axis_1_no_reduce, axis=1)
+
+ res_parallel = df.parallel_apply(func_dataframe_apply_axis_1_no_reduce, axis=1)
+
+ assert res.equals(res_parallel)
+
|
parallel_apply(..., axis=0) return duplicated rows.
this behavior is different from pandas.
|
0.0
|
bb0f50faf9bd3e8e548736040d297613d4482eaa
|
[
"tests/test_pandarallel.py::test_dataframe_axis_0_no_reduction[named-1000-False-False]",
"tests/test_pandarallel.py::test_dataframe_axis_0_no_reduction[named-1000-True-False]",
"tests/test_pandarallel.py::test_dataframe_axis_0_no_reduction[named-1-False-False]",
"tests/test_pandarallel.py::test_dataframe_axis_0_no_reduction[named-1-True-False]",
"tests/test_pandarallel.py::test_dataframe_axis_0_no_reduction[anonymous-1000-False-False]",
"tests/test_pandarallel.py::test_dataframe_axis_0_no_reduction[anonymous-1000-True-False]",
"tests/test_pandarallel.py::test_dataframe_axis_0_no_reduction[anonymous-1-False-False]",
"tests/test_pandarallel.py::test_dataframe_axis_0_no_reduction[anonymous-1-True-False]"
] |
[
"tests/test_pandarallel.py::test_dataframe_apply_axis_0[named-1000-False-False]",
"tests/test_pandarallel.py::test_dataframe_apply_axis_0[named-1000-True-False]",
"tests/test_pandarallel.py::test_dataframe_apply_axis_0[named-1-False-False]",
"tests/test_pandarallel.py::test_dataframe_apply_axis_0[named-1-True-False]",
"tests/test_pandarallel.py::test_dataframe_apply_axis_0[anonymous-1000-False-False]",
"tests/test_pandarallel.py::test_dataframe_apply_axis_0[anonymous-1000-True-False]",
"tests/test_pandarallel.py::test_dataframe_apply_axis_0[anonymous-1-False-False]",
"tests/test_pandarallel.py::test_dataframe_apply_axis_0[anonymous-1-True-False]",
"tests/test_pandarallel.py::test_dataframe_apply_axis_1[named-1000-False-False]",
"tests/test_pandarallel.py::test_dataframe_apply_axis_1[named-1000-True-False]",
"tests/test_pandarallel.py::test_dataframe_apply_axis_1[named-1-False-False]",
"tests/test_pandarallel.py::test_dataframe_apply_axis_1[named-1-True-False]",
"tests/test_pandarallel.py::test_dataframe_apply_axis_1[anonymous-1000-False-False]",
"tests/test_pandarallel.py::test_dataframe_apply_axis_1[anonymous-1000-True-False]",
"tests/test_pandarallel.py::test_dataframe_apply_axis_1[anonymous-1-False-False]",
"tests/test_pandarallel.py::test_dataframe_apply_axis_1[anonymous-1-True-False]",
"tests/test_pandarallel.py::test_dataframe_apply_invalid_axis[False-None]",
"tests/test_pandarallel.py::test_dataframe_apply_invalid_axis[False-False]",
"tests/test_pandarallel.py::test_dataframe_apply_invalid_axis[True-None]",
"tests/test_pandarallel.py::test_dataframe_apply_invalid_axis[True-False]",
"tests/test_pandarallel.py::test_dataframe_applymap[named-1000-False-None]",
"tests/test_pandarallel.py::test_dataframe_applymap[named-1000-False-False]",
"tests/test_pandarallel.py::test_dataframe_applymap[named-1000-True-None]",
"tests/test_pandarallel.py::test_dataframe_applymap[named-1000-True-False]",
"tests/test_pandarallel.py::test_dataframe_applymap[named-1-False-None]",
"tests/test_pandarallel.py::test_dataframe_applymap[named-1-False-False]",
"tests/test_pandarallel.py::test_dataframe_applymap[named-1-True-None]",
"tests/test_pandarallel.py::test_dataframe_applymap[named-1-True-False]",
"tests/test_pandarallel.py::test_dataframe_applymap[anonymous-1000-False-None]",
"tests/test_pandarallel.py::test_dataframe_applymap[anonymous-1000-False-False]",
"tests/test_pandarallel.py::test_dataframe_applymap[anonymous-1000-True-None]",
"tests/test_pandarallel.py::test_dataframe_applymap[anonymous-1000-True-False]",
"tests/test_pandarallel.py::test_dataframe_applymap[anonymous-1-False-None]",
"tests/test_pandarallel.py::test_dataframe_applymap[anonymous-1-False-False]",
"tests/test_pandarallel.py::test_dataframe_applymap[anonymous-1-True-None]",
"tests/test_pandarallel.py::test_dataframe_applymap[anonymous-1-True-False]",
"tests/test_pandarallel.py::test_series_map[named-1000-False-None]",
"tests/test_pandarallel.py::test_series_map[named-1000-False-False]",
"tests/test_pandarallel.py::test_series_map[named-1000-True-None]",
"tests/test_pandarallel.py::test_series_map[named-1000-True-False]",
"tests/test_pandarallel.py::test_series_map[named-1-False-None]",
"tests/test_pandarallel.py::test_series_map[named-1-False-False]",
"tests/test_pandarallel.py::test_series_map[named-1-True-None]",
"tests/test_pandarallel.py::test_series_map[named-1-True-False]",
"tests/test_pandarallel.py::test_series_map[anonymous-1000-False-None]",
"tests/test_pandarallel.py::test_series_map[anonymous-1000-False-False]",
"tests/test_pandarallel.py::test_series_map[anonymous-1000-True-None]",
"tests/test_pandarallel.py::test_series_map[anonymous-1000-True-False]",
"tests/test_pandarallel.py::test_series_map[anonymous-1-False-None]",
"tests/test_pandarallel.py::test_series_map[anonymous-1-False-False]",
"tests/test_pandarallel.py::test_series_map[anonymous-1-True-None]",
"tests/test_pandarallel.py::test_series_map[anonymous-1-True-False]",
"tests/test_pandarallel.py::test_series_apply[named-1000-False-None]",
"tests/test_pandarallel.py::test_series_apply[named-1000-False-False]",
"tests/test_pandarallel.py::test_series_apply[named-1000-True-None]",
"tests/test_pandarallel.py::test_series_apply[named-1000-True-False]",
"tests/test_pandarallel.py::test_series_apply[named-1-False-None]",
"tests/test_pandarallel.py::test_series_apply[named-1-False-False]",
"tests/test_pandarallel.py::test_series_apply[named-1-True-None]",
"tests/test_pandarallel.py::test_series_apply[named-1-True-False]",
"tests/test_pandarallel.py::test_series_apply[anonymous-1000-False-None]",
"tests/test_pandarallel.py::test_series_apply[anonymous-1000-False-False]",
"tests/test_pandarallel.py::test_series_apply[anonymous-1000-True-None]",
"tests/test_pandarallel.py::test_series_apply[anonymous-1000-True-False]",
"tests/test_pandarallel.py::test_series_apply[anonymous-1-False-None]",
"tests/test_pandarallel.py::test_series_apply[anonymous-1-False-False]",
"tests/test_pandarallel.py::test_series_apply[anonymous-1-True-None]",
"tests/test_pandarallel.py::test_series_apply[anonymous-1-True-False]",
"tests/test_pandarallel.py::test_series_rolling_apply[named-1000-False-None]",
"tests/test_pandarallel.py::test_series_rolling_apply[named-1000-False-False]",
"tests/test_pandarallel.py::test_series_rolling_apply[named-1000-True-None]",
"tests/test_pandarallel.py::test_series_rolling_apply[named-1000-True-False]",
"tests/test_pandarallel.py::test_series_rolling_apply[named-1-False-None]",
"tests/test_pandarallel.py::test_series_rolling_apply[named-1-False-False]",
"tests/test_pandarallel.py::test_series_rolling_apply[named-1-True-None]",
"tests/test_pandarallel.py::test_series_rolling_apply[named-1-True-False]",
"tests/test_pandarallel.py::test_series_rolling_apply[anonymous-1000-False-None]",
"tests/test_pandarallel.py::test_series_rolling_apply[anonymous-1000-False-False]",
"tests/test_pandarallel.py::test_series_rolling_apply[anonymous-1000-True-None]",
"tests/test_pandarallel.py::test_series_rolling_apply[anonymous-1000-True-False]",
"tests/test_pandarallel.py::test_series_rolling_apply[anonymous-1-False-None]",
"tests/test_pandarallel.py::test_series_rolling_apply[anonymous-1-False-False]",
"tests/test_pandarallel.py::test_series_rolling_apply[anonymous-1-True-None]",
"tests/test_pandarallel.py::test_series_rolling_apply[anonymous-1-True-False]",
"tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[named-1000-False-None]",
"tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[named-1000-False-False]",
"tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[named-1000-True-None]",
"tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[named-1000-True-False]",
"tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[named-1-False-None]",
"tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[named-1-False-False]",
"tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[named-1-True-None]",
"tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[named-1-True-False]",
"tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[anonymous-1000-False-None]",
"tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[anonymous-1000-False-False]",
"tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[anonymous-1000-True-None]",
"tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[anonymous-1000-True-False]",
"tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[anonymous-1-False-None]",
"tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[anonymous-1-False-False]",
"tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[anonymous-1-True-None]",
"tests/test_pandarallel.py::test_dataframe_groupby_rolling_apply[anonymous-1-True-False]",
"tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[named-1000-False-None]",
"tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[named-1000-False-False]",
"tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[named-1000-True-None]",
"tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[named-1000-True-False]",
"tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[named-1-False-None]",
"tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[named-1-False-False]",
"tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[named-1-True-None]",
"tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[named-1-True-False]",
"tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[anonymous-1000-False-None]",
"tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[anonymous-1000-False-False]",
"tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[anonymous-1000-True-None]",
"tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[anonymous-1000-True-False]",
"tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[anonymous-1-False-None]",
"tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[anonymous-1-False-False]",
"tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[anonymous-1-True-None]",
"tests/test_pandarallel.py::test_dataframe_groupby_expanding_apply[anonymous-1-True-False]",
"tests/test_pandarallel.py::test_dataframe_axis_1_no_reduction[named-1000-False-False]",
"tests/test_pandarallel.py::test_dataframe_axis_1_no_reduction[named-1000-True-False]",
"tests/test_pandarallel.py::test_dataframe_axis_1_no_reduction[named-1-False-False]",
"tests/test_pandarallel.py::test_dataframe_axis_1_no_reduction[named-1-True-False]",
"tests/test_pandarallel.py::test_dataframe_axis_1_no_reduction[anonymous-1000-False-False]",
"tests/test_pandarallel.py::test_dataframe_axis_1_no_reduction[anonymous-1000-True-False]",
"tests/test_pandarallel.py::test_dataframe_axis_1_no_reduction[anonymous-1-False-False]",
"tests/test_pandarallel.py::test_dataframe_axis_1_no_reduction[anonymous-1-True-False]"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-07-21 17:16:22+00:00
|
bsd-3-clause
| 4,099 |
|
namaggarwal__splitwise-68
|
diff --git a/README.md b/README.md
index 41a155f..c80c8fd 100644
--- a/README.md
+++ b/README.md
@@ -395,6 +395,17 @@ print("content:", comment.getContent())
print("errors:", errors)
```
+### Get Notifications
+
+You can use ```getNotifications()``` to get recent Notifications. It returns an array of ```Notification``` object.
+
+```python
+sObj = Splitwise(Config.consumer_key,Config.consumer_secret)
+sObj.setAccessToken(session['access_token'])
+id = 982430660
+notifications = sObj.getNotifications()
+```
+
## Objects
### User
@@ -582,7 +593,27 @@ Methods:
7. getDeletedAt(id) - Returns datetime at which comment was deleted
8. getUser() - Returns a ```User``` object containing user details
+### Notification
+
+Methods:
+
+1. getId() - Returns the id
+2. getContent() - Returns message
+3. getImageShape() - Returns comment type
+4. getImageType() - Returns relation type of the comment
+5. source - Returns source object
+6. getCreatedAt() - Returns datetime at which notification was created
+7. getCreatedBy() - Returns id of user who created notification
+
+### Source
+
+Used with Notifications.
+
+Methods:
+1. getId() - Returns the id
+2. getType() - Returns type. Use in combination with ID to fetch structured data
+3. getUrl() - Returns url
## Sample Application
diff --git a/splitwise/__init__.py b/splitwise/__init__.py
index a3d4ed3..b2e1029 100644
--- a/splitwise/__init__.py
+++ b/splitwise/__init__.py
@@ -25,6 +25,7 @@ from splitwise.group import Group
from splitwise.category import Category
from splitwise.expense import Expense
from splitwise.comment import Comment
+from splitwise.notification import Notification
from splitwise.error import SplitwiseError
from requests_oauthlib import OAuth1, OAuth2Session, OAuth2
from requests import Request, sessions
@@ -103,6 +104,8 @@ class Splitwise(object):
"api/"+SPLITWISE_VERSION+"/get_comments"
CREATE_COMMENT_URL = SPLITWISE_BASE_URL + \
"api/"+SPLITWISE_VERSION+"/create_comment"
+ GET_NOTIFICATIONS_URL = SPLITWISE_BASE_URL + \
+ "api/"+SPLITWISE_VERSION+"/get_notifications"
debug = False
@@ -867,3 +870,30 @@ class Splitwise(object):
errors = SplitwiseError(content['errors'])
return comment, errors
+
+ # TODO: Implement optional args
+ def getNotifications(self, updated_since=None, limit=None):
+ """
+ Get notifications.
+
+ Args:
+ updated_since(string): Optional. ISO8601 Timestamp.
+ limit(long): Optional. Defaults to 0
+
+ Returns:
+ :obj:`splitwise.notification.Notifications`: Object representing Notifications
+ """
+
+ try:
+ content = self.__makeRequest(Splitwise.GET_NOTIFICATIONS_URL)
+ except SplitwiseNotAllowedException as e:
+ e.setMessage("You are not allowed to fetch notifications")
+ raise
+
+ content = json.loads(content)
+ notifications = []
+ if "notifications" in content:
+ for n in content["notifications"]:
+ notifications.append(Notification(n))
+
+ return notifications
diff --git a/splitwise/notification.py b/splitwise/notification.py
new file mode 100644
index 0000000..156dc32
--- /dev/null
+++ b/splitwise/notification.py
@@ -0,0 +1,125 @@
+class Notification(object):
+ """
+ Contains notification details
+ Attributes:
+ id(long): Notification Id
+ content(string): Notification content
+ type(long): Notification type
+ image_url(string): Url
+ image_shape(string): Shape of image
+ created_at(datetime): Notification creation datetime
+ created_by(long): User Id of the notification creator
+ source: The related thing (ie, Expense)
+ """
+ def __init__(self, data=None):
+ """
+ Args:
+ data(:obj:`json`, optional): JSON object representing comment object
+ """
+
+ if data:
+ self.id = data["id"]
+ self.content = data["content"]
+ self.type = data["type"]
+ self.created_at = data["created_at"]
+ self.created_by = data["created_by"]
+ self.image_shape = data["image_shape"]
+ self.image_url = data["image_url"]
+ self.source = Source(data["source"])
+
+ def getId(self):
+ """ Returns Notification's Id
+ Returns:
+ str: Notification's Id
+ """
+
+ return self.id
+
+ def getContent(self):
+ """ Returns message
+ Returns:
+ str: Content of the notification - text and HTML.
+ """
+
+ return self.content
+
+ def getType(self):
+ """ Returns Notification type
+ Returns:
+ long: Notification type
+ """
+
+ return self.type
+
+ def getCreatedBy(self):
+ """ Returns id who triggered Notification was created
+ Returns:
+ long: Notification's creator id
+ """
+
+ return self.created_by
+
+ def getCreatedAt(self):
+ """ Returns datetime at which notification was created
+ Returns:
+ datetime: Notification's creation date
+ """
+
+ return self.created_at
+
+ def getImageShape(self):
+ """ Returns shape of image
+ Returns:
+ string: Image shape, ie square
+ """
+
+ return self.image_shape
+
+ def getImageUrl(self):
+ """ Returns url of image
+ Returns:
+ string: Image url
+ """
+
+ return self.image_url
+
+
+class Source(object):
+ """
+ Contains made on an expense
+ Attributes:
+ id(long): Notification Source Id
+ type(long): Notification Source type
+ url(string): Url
+ """
+ def __init__(self, data=None):
+ """
+ Args:
+ data(:obj:`json`, optional): JSON object representing source object
+ """
+
+ if data:
+ self.id = data["id"]
+ self.type = data["type"]
+ self.url = data["url"]
+
+ def getType(self):
+ """ Returns Notification Source's Type
+ Returns:
+ str: Notification Source's Type, ie Expense
+ """
+ return self.type
+
+ def getId(self):
+ """ Returns Notification Source's Id
+ Returns:
+ long: Notification Source's Id
+ """
+ return self.id
+
+ def getUrl(self):
+ """ Returns Notification Source's Url
+ Returns:
+ str: Notification Source's Url
+ """
+ return self.url
|
namaggarwal/splitwise
|
c87e120fed91b0994665182bac0447ccd62954ae
|
diff --git a/tests/test_getNotifications.py b/tests/test_getNotifications.py
new file mode 100755
index 0000000..100f426
--- /dev/null
+++ b/tests/test_getNotifications.py
@@ -0,0 +1,40 @@
+from splitwise import Splitwise
+import unittest
+try:
+ from unittest.mock import patch
+except ImportError: # Python 2
+ from mock import patch
+
+
+@patch('splitwise.Splitwise._Splitwise__makeRequest')
+class GetNotifications(unittest.TestCase):
+
+ def setUp(self):
+ self.sObj = Splitwise('consumerkey', 'consumersecret')
+
+ def test_getNotifications_success(self, mockMakeRequest):
+ mockMakeRequest.return_value = '{"notifications": [{"id": 32514315,"type": 0,"created_at": "2019-08-24T14:15:22Z","created_by": 2,"source": {"type": "Expense","id": 865077,"url": "string"},"image_url": "https://s3.amazonaws.com/splitwise/uploads/notifications/v2/0-venmo.png","image_shape": "square","content": "<strong>You</strong> paid <strong>Jon H.</strong>.<br><font color=\\\"#5bc5a7\\\">You paid $23.45</font>"}]}' # noqa: E501
+ notifications = self.sObj.getNotifications() # TODO: Coverage of updated_after, limit: 0 arguments
+ mockMakeRequest.assert_called_with("https://secure.splitwise.com/api/v3.0/get_notifications")
+
+ self.assertEqual(notifications[0].getId(), 32514315)
+ self.assertEqual(notifications[0].getType(), 0) # TODO: Constants?
+ self.assertEqual(notifications[0].getCreatedAt(), "2019-08-24T14:15:22Z")
+ self.assertEqual(notifications[0].getCreatedBy(), 2) # TODO: Users?
+ self.assertEqual(notifications[0].getImageUrl(),
+ "https://s3.amazonaws.com/splitwise/uploads/notifications/v2/0-venmo.png")
+ self.assertEqual(notifications[0].getImageShape(), "square")
+ self.assertEqual(notifications[0].getContent(),
+ "<strong>You</strong> paid <strong>Jon H.</strong>.<br><font color=\"#5bc5a7\">You paid $23.45</font>") # noqa: 501
+
+ self.assertEqual(notifications[0].source.getType(), "Expense")
+ self.assertEqual(notifications[0].source.getId(), 865077)
+ self.assertEqual(notifications[0].source.getUrl(), "string")
+
+ def test_getNotifications_exception(self, mockMakeRequest):
+ mockMakeRequest.side_effect = Exception(
+ "Invalid response %s. Please check your consumer key and secret." % 404)
+ with self.assertRaises(Exception):
+ self.sObj.getNotifications()
+ mockMakeRequest.assert_called_with(
+ "https://secure.splitwise.com/api/v3.0/get_notifications")
diff --git a/tests/test_updateExpense.py b/tests/test_updateExpense.py
index 5e64774..8736f78 100644
--- a/tests/test_updateExpense.py
+++ b/tests/test_updateExpense.py
@@ -93,7 +93,8 @@ class UpdateExpenseTestCase(unittest.TestCase):
expenseRes, errors = self.sObj.updateExpense(expense)
mockMakeRequest.assert_called_with(
"https://secure.splitwise.com/api/v3.0/update_expense/1010906976", "POST",
- {'cost': '13', 'description': 'Testing', 'details': 'Full details of the expense', 'group_id': 19433671, 'split_equally': True}, files=None)
+ {'cost': '13', 'description': 'Testing', 'details': 'Full details of the expense',
+ 'group_id': 19433671, 'split_equally': True}, files=None)
self.assertIsNone(errors)
self.assertEqual(expenseRes.getId(), 1010906976)
self.assertEqual(expenseRes.getGroupId(), 19433671)
@@ -318,4 +319,4 @@ Sorry for the trouble!'
users.append(user2)
expense.setUsers(users)
with self.assertRaises(SplitwiseBadRequestException):
- expenseRes, errors = self.sObj.updateExpense(expense)
\ No newline at end of file
+ expenseRes, errors = self.sObj.updateExpense(expense)
|
Add support for get_notifications
https://dev.splitwise.com/#tag/notifications
```
{
"notifications": [
{
"id": 32514315,
"type": 0,
"created_at": "2019-08-24T14:15:22Z",
"created_by": 2,
"source": {
"type": "Expense",
"id": 865077,
"url": "string"
},
"image_url": "https://s3.amazonaws.com/splitwise/uploads/notifications/v2/0-venmo.png",
"image_shape": "square",
"content": "<strong>You</strong> paid <strong>Jon H.</strong>.<br><font color=\\\"#5bc5a7\\\">You paid $23.45</font>"
}
]
}
```
Enables use cases like:
https://github.com/sriramsv/custom_component_splitwise/issues/4
|
0.0
|
c87e120fed91b0994665182bac0447ccd62954ae
|
[
"tests/test_getNotifications.py::GetNotifications::test_getNotifications_exception",
"tests/test_getNotifications.py::GetNotifications::test_getNotifications_success"
] |
[
"tests/test_updateExpense.py::UpdateExpenseTestCase::test_updateExpense_error",
"tests/test_updateExpense.py::UpdateExpenseTestCase::test_updateExpense_exception",
"tests/test_updateExpense.py::UpdateExpenseTestCase::test_updateExpense_missingExpenseId_Exception",
"tests/test_updateExpense.py::UpdateExpenseTestCase::test_updateExpense_split_equally_success",
"tests/test_updateExpense.py::UpdateExpenseTestCase::test_updateExpense_split_equally_with_receipt_success",
"tests/test_updateExpense.py::UpdateExpenseTestCase::test_updateExpense_split_manually_success"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_added_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-03-20 06:20:08+00:00
|
mit
| 4,100 |
|
nanonyme__simplecpreprocessor-11
|
diff --git a/simplecpreprocessor.py b/simplecpreprocessor.py
index d6d025b..53da5fb 100644
--- a/simplecpreprocessor.py
+++ b/simplecpreprocessor.py
@@ -104,6 +104,7 @@ IFDEF = "ifdef"
IFNDEF = "ifndef"
ELSE = "else"
SKIP_FILE = object()
+TOKEN = re.compile(r"\b\w+\b")
class Preprocessor(object):
@@ -228,27 +229,22 @@ class Preprocessor(object):
self.process_define(define, old_line_num)
def process_source_line(self, line, line_num):
- matches = set()
- line = self._recursive_transform(line, matches)
+ line = self._recursive_transform(line, set())
return line + self.line_ending
- def _recursive_transform(self, line, matches):
- original_line = line
- new_matches = set()
+ def _recursive_transform(self, line, seen):
def transform_word(match):
word = match.group(0)
- if word in matches:
+ if word in seen:
return word
else:
- new_matches.add(word)
- return self.defines.get(word, word)
- line = re.sub(r"\b\w+\b", transform_word, line)
- matches.update(new_matches)
- if original_line == line:
- return line
- else:
- return self._recursive_transform(line, matches)
+ local_seen = {word}
+ local_seen.update(seen)
+ word = self.defines.get(word, word)
+ return self._recursive_transform(word, local_seen)
+
+ return TOKEN.sub(transform_word, line)
def skip_file(self, name):
item = self.include_once.get(name)
|
nanonyme/simplecpreprocessor
|
766d87b952e0ad6f573ce396aff41cc13521e79e
|
diff --git a/test_simplecpreprocessor.py b/test_simplecpreprocessor.py
index 71ae36d..8a4f33b 100644
--- a/test_simplecpreprocessor.py
+++ b/test_simplecpreprocessor.py
@@ -97,6 +97,14 @@ class TestSimpleCPreprocessor(ProfilerMixin, unittest.TestCase):
expected_list = ["(4 + (2 * x))\n", "(2 * (4 + y))\n"]
self.run_case(f_obj, expected_list)
+ def test_define_indirect_self_reference_multiple(self):
+ f_obj = FakeFile("header.h", ["#define I 1\n",
+ "#define J I + 2\n",
+ "#define K I + J\n",
+ "I\n", "J\n", "K\n"])
+ expected_list = ["1\n", "1 + 2\n", "1 + 1 + 2\n"]
+ self.run_case(f_obj, expected_list)
+
def test_partial_match(self):
f_obj = FakeFile("header.h", [
"#define FOO\n",
|
Macro expansion produces produces wrong results
See for details https://github.com/nanonyme/simplecpreprocessor/commit/c3a1d4617fa0977bbc0f6bd0098b2d74219489bf
|
0.0
|
766d87b952e0ad6f573ce396aff41cc13521e79e
|
[
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_define_indirect_self_reference_multiple"
] |
[
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_blank_define",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_complex_ignore",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_define",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_define_indirect_self_reference",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_define_inside_ifndef",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_define_parens",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_define_simple_self_referential",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_define_undefine",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_define_with_comment",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_expand_size_t",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_extra_endif_causes_error",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_fulfilled_ifdef_define_allowed",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_fullfile_guard_ifdef_noskip",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_fullfile_guard_ifdef_skip",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_fullfile_guard_ifndef_noskip",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_fullfile_guard_ifndef_skip",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_handler_existing_file",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_handler_missing_file",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ifdef_else_defined",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ifdef_else_undefined",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ifdef_file_guard",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ifdef_left_open_causes_error",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ifdef_unfulfilled_define_ignored",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ifdef_with_comment",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ifndef_else_defined",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ifndef_else_undefined",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ifndef_fulfilled_define_allowed",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ifndef_left_open_causes_error",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ifndef_unfulfilled_define_ignored",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_ignore_include_path",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_include_local_file_with_subdirectory",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_include_missing_local_file",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_include_with_path_list",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_include_with_path_list_with_subdirectory",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_lines_normalize_custom",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_lines_normalized",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_multiline_define",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_no_fullfile_guard_ifdef",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_no_fullfile_guard_ifndef",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_partial_match",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_platform_constants",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_pragma_once",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_repeated_macro",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_space_macro_indentation",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_tab_macro_indentation",
"test_simplecpreprocessor.py::TestSimpleCPreprocessor::test_unexpected_macro_gives_parse_error"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-06-24 08:39:38+00:00
|
bsd-2-clause
| 4,101 |
|
napalm-automation-community__napalm-s350-39
|
diff --git a/napalm_s350/s350.py b/napalm_s350/s350.py
index 23c371a..206249f 100644
--- a/napalm_s350/s350.py
+++ b/napalm_s350/s350.py
@@ -406,14 +406,17 @@ class S350Driver(NetworkDriver):
match = re.match(r'^--------- -+ .*$', line)
if match:
header = False
+ fields_end = self._get_lldp_neighbors_fields_end(line)
continue
- line_elems = line.split()
+ line_elems = self._get_lldp_neighbors_line_to_fields(line, fields_end)
- # long system name owerflow to the other line
- if len(line_elems) == 1:
- # complete remote name
- remote_name = remote_name + line_elems[0]
+ # info owerflow to the other line
+ if line_elems[0] == '' or line_elems[4] == '' or line_elems[5] == '' :
+ # complete owerflown fields
+ local_port = local_port + line_elems[0]
+ remote_port = remote_port + line_elems[2]
+ remote_name = remote_name + line_elems[3]
# then reuse old values na rewrite previous entry
else:
local_port = line_elems[0]
@@ -429,6 +432,26 @@ class S350Driver(NetworkDriver):
return neighbors
+ def _get_lldp_neighbors_line_to_fields(self, line, fields_end):
+ """ dynamic fields lenghts """
+ line_elems={}
+ index=0
+ f_start=0
+ for f_end in fields_end:
+ line_elems[index] = line[f_start:f_end].strip()
+ index += 1
+ f_start = f_end
+ return line_elems
+
+ def _get_lldp_neighbors_fields_end(self, dashline):
+ """ fields length are diferent device to device, detect them on horizontal lin """
+
+ fields_end=[m.start() for m in re.finditer(' ', dashline)]
+ #fields_position.insert(0,0)
+ fields_end.append(len(dashline))
+
+ return fields_end
+
def _get_lldp_line_value(self, line):
"""
Safe-ish method to get the value from an 'lldp neighbors $IF' line.
|
napalm-automation-community/napalm-s350
|
efc48421d4d975d790feaaa73a6151372ce5b10a
|
diff --git a/test/unit/mocked_data/test_get_lldp_neighbors/SG350X-48P-K9-long_systemname/expected_result.json b/test/unit/mocked_data/test_get_lldp_neighbors/SG350X-48P-K9-long_systemname/expected_result.json
index 942f033..591a9c5 100644
--- a/test/unit/mocked_data/test_get_lldp_neighbors/SG350X-48P-K9-long_systemname/expected_result.json
+++ b/test/unit/mocked_data/test_get_lldp_neighbors/SG350X-48P-K9-long_systemname/expected_result.json
@@ -4,7 +4,7 @@
"port" : "74:ac:aa:ff:1a:5f"
} ],
"gi1/0/35" : [ {
- "hostname" : "O",
+ "hostname" : "",
"port" : "34:e6:aa:ff:53:a8"
} ],
"te1/0/1" : [ {
diff --git a/test/unit/mocked_data/test_get_lldp_neighbors/SG350X-48P-K9/expected_result.json b/test/unit/mocked_data/test_get_lldp_neighbors/SG350X-48P-K9/expected_result.json
index 51fea79..34b31f9 100644
--- a/test/unit/mocked_data/test_get_lldp_neighbors/SG350X-48P-K9/expected_result.json
+++ b/test/unit/mocked_data/test_get_lldp_neighbors/SG350X-48P-K9/expected_result.json
@@ -4,7 +4,7 @@
"port" : "74:ac:aa:ff:1a:5f"
} ],
"gi1/0/35" : [ {
- "hostname" : "O",
+ "hostname" : "",
"port" : "34:e6:aa:ff:53:a8"
} ],
"te1/0/1" : [ {
diff --git a/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_and_sysname/expected_result.json b/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_and_sysname/expected_result.json
new file mode 100644
index 0000000..5683619
--- /dev/null
+++ b/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_and_sysname/expected_result.json
@@ -0,0 +1,10 @@
+{
+ "gi7" : [ {
+ "hostname" : "switchc23cf0",
+ "port" : "gi8"
+ } ],
+ "gi8" : [ {
+ "hostname" : "iai-sti0067.example.net",
+ "port" : "9c:eb:e8:52:bd:7c"
+ } ]
+}
diff --git a/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_and_sysname/show_lldp_neighbors.txt b/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_and_sysname/show_lldp_neighbors.txt
new file mode 100644
index 0000000..7da8dbe
--- /dev/null
+++ b/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_and_sysname/show_lldp_neighbors.txt
@@ -0,0 +1,12 @@
+
+System capability legend:
+B - Bridge; R - Router; W - Wlan Access Point; T - telephone;
+D - DOCSIS Cable Device; H - Host; r - Repeater;
+TP - Two Ports MAC Relay; S - S-VLAN; C - C-VLAN; O - Other
+
+ Port Device ID Port ID System Name Capabilities TTL
+--------- ----------------- ------------- ----------------- ------------ -----
+gi7 00:a5:bf:c2:3c:f0 gi8 switchc23cf0 B, R 91
+gi8 f2:18:98:43:be:9b 9c:eb:e8:52:b iai-sti0067.examp B, W 115
+ d:7c le.net
+
diff --git a/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_id/expected_result.json b/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_id/expected_result.json
new file mode 100644
index 0000000..8df5b44
--- /dev/null
+++ b/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_id/expected_result.json
@@ -0,0 +1,1 @@
+{"gi7": [{"hostname": "switchc23cf0", "port": "gi8"}], "gi8": [{"hostname": "iai-sti006", "port": "9c:eb:e8:52:bd:7c"}]}
diff --git a/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_id/show_lldp_neighbors.txt b/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_id/show_lldp_neighbors.txt
new file mode 100644
index 0000000..cc12440
--- /dev/null
+++ b/test/unit/mocked_data/test_get_lldp_neighbors/SG3xx-long_port_id/show_lldp_neighbors.txt
@@ -0,0 +1,12 @@
+
+System capability legend:
+B - Bridge; R - Router; W - Wlan Access Point; T - telephone;
+D - DOCSIS Cable Device; H - Host; r - Repeater;
+TP - Two Ports MAC Relay; S - S-VLAN; C - C-VLAN; O - Other
+
+ Port Device ID Port ID System Name Capabilities TTL
+--------- ----------------- ------------- ----------------- ------------ -----
+gi7 00:a5:bf:c2:3c:f0 gi8 switchc23cf0 B, R 91
+gi8 f2:18:98:43:be:9b 9c:eb:e8:52:b iai-sti006 B, W 115
+ d:7c
+
|
parsing show lldp neighbors
I am working my way through the development branch and came across the parser for lldp neigbors. I don't think it works reliably. I have seen the case that both the system name and the port ID can be wrapped.
Examples:
```
switchf5bcdb#show lldp neighbors
System capability legend:
B - Bridge; R - Router; W - Wlan Access Point; T - telephone;
D - DOCSIS Cable Device; H - Host; r - Repeater;
TP - Two Ports MAC Relay; S - S-VLAN; C - C-VLAN; O - Other
Port Device ID Port ID System Name Capabilities TTL
--------- ----------------- ------------- ----------------- ------------ -----
gi7 00:a5:bf:c2:3c:f0 gi8 switchc23cf0 B, R 91
gi8 f2:18:98:43:be:9b 9c:eb:e8:52:b iai-sti006 B, W 115
d:7c
```
```
System capability legend:
B - Bridge; R - Router; W - Wlan Access Point; T - telephone;
D - DOCSIS Cable Device; H - Host; r - Repeater;
TP - Two Ports MAC Relay; S - S-VLAN; C - C-VLAN; O - Other
Port Device ID Port ID System Name Capabilities TTL
--------- ----------------- ----------------- ----------------- ------------ -----
te1/0/1 a0:f8:49:f2:4b:b6 te1/0/22 xx-rackswitch9 B, R 100
te1/0/2 a0:f8:49:f2:4b:b6 te2/0/22 xx-rackswitch9 B, R 105
te1/0/22 00:25:45:03:a5:c0 Te1/2 scn-0123-r004-3.t B, R 98
mn.xxx.xxx.xxx
```
Lines: https://github.com/napalm-automation-community/napalm-s350/blob/657bcc570ddcb0115e017fc2819aa38f95bde177/napalm_s350/s350.py#L360-L396
|
0.0
|
efc48421d4d975d790feaaa73a6151372ce5b10a
|
[
"test/unit/test_getters.py::TestGetter::test_get_lldp_neighbors[SG3xx-long_port_and_sysname]",
"test/unit/test_getters.py::TestGetter::test_get_lldp_neighbors[SG350X-48P-K9-long_systemname]",
"test/unit/test_getters.py::TestGetter::test_get_lldp_neighbors[SG3xx-long_port_id]",
"test/unit/test_getters.py::TestGetter::test_get_lldp_neighbors[SG350X-48P-K9]"
] |
[
"test/unit/test_getters.py::TestGetter::test_is_alive[normal]",
"test/unit/test_getters.py::TestGetter::test_get_lldp_neighbors_detail[SG350X-48P-K9]",
"test/unit/test_getters.py::TestGetter::test_get_arp_table[SG300-10]",
"test/unit/test_getters.py::TestGetter::test_get_arp_table[SG350X-48P-K9]",
"test/unit/test_getters.py::TestGetter::test_get_arp_table_with_vrf[SG300-10]",
"test/unit/test_getters.py::TestGetter::test_get_arp_table_with_vrf[SG350X-48P-K9]",
"test/unit/test_getters.py::TestGetter::test_get_ntp_servers[SG350X-48P-K9]",
"test/unit/test_getters.py::TestGetter::test_get_interfaces_ip[SG350X-48P-K9]",
"test/unit/test_getters.py::TestGetter::test_get_config[SG300-10]",
"test/unit/test_getters.py::TestGetter::test_get_config[SG350X-48P-K9]",
"test/unit/test_getters.py::TestGetter::test_get_config_sanitized[SG300-10]",
"test/unit/test_getters.py::TestGetter::test_get_config_sanitized[SG350X-48P-K9]"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2020-11-05 16:24:08+00:00
|
apache-2.0
| 4,102 |
|
napjon__krisk-39
|
diff --git a/krisk/plot/__init__.py b/krisk/plot/__init__.py
index 14e24b8..e702c4a 100644
--- a/krisk/plot/__init__.py
+++ b/krisk/plot/__init__.py
@@ -8,7 +8,8 @@ def bar(df,
how='count',
stacked=False,
annotate=None,
- full=False):
+ full=False,
+ trendline=False):
"""
Parameters
----------
@@ -26,10 +27,13 @@ def bar(df,
stacked: Boolean, default to False.
Whether to stacked category on top of the other categories.
annotate: string, {'all',True} default to None
- if True, annotate value on top of the plot element. If stacked is also True, annotate the last
- category. if 'all' and stacked, annotate all category
+ if True, annotate value on top of the plot element. If stacked is also True, annotate the
+ last category. if 'all' and stacked, annotate all category
full: boolean, default to False.
If true, set to full area stacked chart. Only work if stacked is True.
+ trendline: boolean, default to False.
+ If true, add line that connected the bars. Only work if not category, category but stacked,
+ or not full.
Returns
-------
@@ -39,6 +43,7 @@ def bar(df,
# TODO: add optional argument trendline
return make_chart(df,type='bar',x=x,y=y,c=c,how=how,stacked=stacked,full=full,
+ trendline=trendline,
annotate='top' if annotate == True else annotate)
diff --git a/krisk/plot/bar_line.py b/krisk/plot/bar_line.py
index 057ed1d..1101415 100644
--- a/krisk/plot/bar_line.py
+++ b/krisk/plot/bar_line.py
@@ -51,6 +51,20 @@ def set_bar_line_chart(chart, df, x, c, **kwargs):
if kwargs['annotate'] == 'top':
series[-1]['label'] = d_annotate
+ if kwargs['type'] == 'bar' and kwargs['trendline']:
+ trendline = {'name':'trendline', 'type': 'line'}
+
+ if c and kwargs['stacked']:
+ trendline['data'] = [0] * len(series[-1]['data'])
+ trendline['stack'] = c
+ elif c is None:
+ trendline['data'] = series[0]['data']
+ else:
+ raise AssertionError('Trendline must either stacked category, or not category')
+
+ series.append(trendline)
+
+
# TODO: make annotate receive all kinds supported in echarts.
|
napjon/krisk
|
b7489f45df16b6805b2f576d696dabc1a3bc5235
|
diff --git a/krisk/tests/data/bar_year_pop_mean_continent_trendline.json b/krisk/tests/data/bar_year_pop_mean_continent_trendline.json
new file mode 100644
index 0000000..89aa040
--- /dev/null
+++ b/krisk/tests/data/bar_year_pop_mean_continent_trendline.json
@@ -0,0 +1,152 @@
+{
+ "legend": {
+ "data": [
+ "Africa",
+ "Americas",
+ "Asia",
+ "Europe",
+ "Oceania"
+ ]
+ },
+ "title": {
+ "text": ""
+ },
+ "yAxis": {},
+ "series": [
+ {
+ "stack": "continent",
+ "type": "bar",
+ "data": [
+ 9279525,
+ 10270856,
+ 11000948,
+ 12760499,
+ 14760787,
+ 17152804,
+ 20033753,
+ 23254956,
+ 26298373,
+ 29072015,
+ 31287142,
+ 33333216
+ ],
+ "name": "Africa"
+ },
+ {
+ "stack": "continent",
+ "type": "bar",
+ "data": [
+ 17876956,
+ 19610538,
+ 21283783,
+ 22934225,
+ 24779799,
+ 26983828,
+ 29341374,
+ 31620918,
+ 33958947,
+ 36203463,
+ 38331121,
+ 40301927
+ ],
+ "name": "Americas"
+ },
+ {
+ "stack": "continent",
+ "type": "bar",
+ "data": [
+ 8425333,
+ 9240934,
+ 10267083,
+ 11537966,
+ 13079460,
+ 14880372,
+ 12881816,
+ 13867957,
+ 16317921,
+ 22227415,
+ 25268405,
+ 31889923
+ ],
+ "name": "Asia"
+ },
+ {
+ "stack": "continent",
+ "type": "bar",
+ "data": [
+ 1282697,
+ 1476505,
+ 1728137,
+ 1984060,
+ 2263554,
+ 2509048,
+ 2780097,
+ 3075321,
+ 3326498,
+ 3428038,
+ 3508512,
+ 3600523
+ ],
+ "name": "Europe"
+ },
+ {
+ "stack": "continent",
+ "type": "bar",
+ "data": [
+ 8691212,
+ 9712569,
+ 10794968,
+ 11872264,
+ 13177000,
+ 14074100,
+ 15184200,
+ 16257249,
+ 17481977,
+ 18565243,
+ 19546792,
+ 20434176
+ ],
+ "name": "Oceania"
+ },
+ {
+ "stack": "continent",
+ "data": [
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ ],
+ "type": "line",
+ "name": "trendline"
+ }
+ ],
+ "tooltip": {
+ "axisPointer": {
+ "type": ""
+ }
+ },
+ "xAxis": {
+ "data": [
+ 1952,
+ 1957,
+ 1962,
+ 1967,
+ 1972,
+ 1977,
+ 1982,
+ 1987,
+ 1992,
+ 1997,
+ 2002,
+ 2007
+ ]
+ }
+}
\ No newline at end of file
diff --git a/krisk/tests/data/bar_year_pop_mean_trendline.json b/krisk/tests/data/bar_year_pop_mean_trendline.json
new file mode 100644
index 0000000..15ef467
--- /dev/null
+++ b/krisk/tests/data/bar_year_pop_mean_trendline.json
@@ -0,0 +1,68 @@
+{
+ "legend": {
+ "data": []
+ },
+ "title": {
+ "text": ""
+ },
+ "yAxis": {},
+ "series": [
+ {
+ "type": "bar",
+ "data": [
+ 9111144.6,
+ 10062280.4,
+ 11014983.8,
+ 12217802.8,
+ 13612120.0,
+ 15120030.4,
+ 16044248.0,
+ 17615280.2,
+ 19476743.2,
+ 21899234.8,
+ 23588394.4,
+ 25911953.0
+ ],
+ "name": "year"
+ },
+ {
+ "data": [
+ 9111144.6,
+ 10062280.4,
+ 11014983.8,
+ 12217802.8,
+ 13612120.0,
+ 15120030.4,
+ 16044248.0,
+ 17615280.2,
+ 19476743.2,
+ 21899234.8,
+ 23588394.4,
+ 25911953.0
+ ],
+ "type": "line",
+ "name": "trendline"
+ }
+ ],
+ "tooltip": {
+ "axisPointer": {
+ "type": ""
+ }
+ },
+ "xAxis": {
+ "data": [
+ 1952,
+ 1957,
+ 1962,
+ 1967,
+ 1972,
+ 1977,
+ 1982,
+ 1987,
+ 1992,
+ 1997,
+ 2002,
+ 2007
+ ]
+ }
+}
\ No newline at end of file
diff --git a/krisk/tests/test_plot.py b/krisk/tests/test_plot.py
index 41aed67..bdce2e5 100644
--- a/krisk/tests/test_plot.py
+++ b/krisk/tests/test_plot.py
@@ -77,6 +77,20 @@ def test_full_bar_line(gapminder):
assert bar.option == line.option == true_option
+
+def test_trendline(gapminder):
+
+ p = kk.bar(gapminder,'year',how='mean',y='pop',trendline=True)
+ assert p.get_option() == read_option_tests('bar_year_pop_mean_trendline.json')
+
+ p = kk.bar(gapminder,'year',how='mean',y='pop',trendline=True,c='continent',stacked=True)
+ assert p.get_option() == read_option_tests('bar_year_pop_mean_continent_trendline.json')
+
+ try:
+ kk.bar(gapminder,'year',how='mean',y='pop',trendline=True,c='continent')
+ except AssertionError:
+ pass
+
def test_hist(gapminder):
true_option = read_option_tests('hist_x.json')
|
Add trendline parameters for bar chart
Add trendline that shows changes for bar chart
|
0.0
|
b7489f45df16b6805b2f576d696dabc1a3bc5235
|
[
"krisk/tests/test_plot.py::test_trendline"
] |
[
"krisk/tests/test_plot.py::test_bar",
"krisk/tests/test_plot.py::test_line",
"krisk/tests/test_plot.py::test_full_bar_line",
"krisk/tests/test_plot.py::test_scatter"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-09-02 13:48:46+00:00
|
bsd-3-clause
| 4,103 |
|
napjon__krisk-53
|
diff --git a/krisk/plot/api.py b/krisk/plot/api.py
index 281688f..dd4437d 100644
--- a/krisk/plot/api.py
+++ b/krisk/plot/api.py
@@ -9,7 +9,10 @@ def bar(df,
stacked=False,
annotate=None,
full=False,
- trendline=False):
+ trendline=False,
+ sort_on='index',
+ sort_c_on=None,
+ ascending=True):
"""
Parameters
----------
@@ -17,33 +20,42 @@ def bar(df,
data to be used for the chart
x: string
columns to be used as category axis
- y: string, default to None
+ y: string, default None
if None, use count of category value. otherwise aggregate based on y columns
- category: string, default to None
+ category: string, default None
another grouping columns inside x-axis
- how: string, default to None
+ how: string, default None
to be passed to pd.group_by(x).aggregate(how). Can be mean,median, or any
reduced operations.
- stacked: Boolean, default to False.
+ stacked: Boolean, default False.
Whether to stacked category on top of the other categories.
- annotate: string, {'all',True} default to None
+ annotate: string, {'all',True} default None
if True, annotate value on top of the plot element. If stacked is also True, annotate the
last category. if 'all' and stacked, annotate all category
- full: boolean, default to False.
+ full: boolean, default False.
If true, set to full area stacked chart. Only work if stacked is True.
- trendline: boolean, default to False.
+ trendline: boolean, default False.
If true, add line that connected the bars. Only work if not category, category but stacked,
- or not full.
+ or not full.
+ sort_on: {'index', 'values', int, 'count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'},
+ default 'index'.
+ Add sort mode. Only work when c is None.
+ If index, sort index on lexicographical order. use as s.sort_index()
+ if values, sort based on values. Use as s.sort_values()
+ If string, deviation from value provided by pd.Series.describe()
+ if integer, treat as value and deviate from that value
+ sort_c_on: string, default None.
+ specify a category as basis sort value if c is specified. Must be specified when use
+ sort_on other than default value.
+ ascending: boolean, default True
+ sort ascending vs. descending
Returns
-------
Chart Object
"""
-
- # TODO: add optional argument trendline
-
return make_chart(df,type='bar',x=x,y=y,c=c,how=how,stacked=stacked,full=full,
- trendline=trendline,
+ trendline=trendline, sort_on=sort_on, sort_c_on=sort_c_on, ascending=ascending,
annotate='top' if annotate == True else annotate)
@@ -56,7 +68,10 @@ def line(df,
area=False,
annotate=None,
full=False,
- smooth=False):
+ smooth=False,
+ sort_on='index',
+ sort_c_on=None,
+ ascending=True):
"""
Parameters
----------
@@ -64,29 +79,41 @@ def line(df,
data to be used for the chart
x: string
columns to be used as category axis
- y: string, default to None
+ y: string, default None
if None, use count of category value. otherwise aggregate based on y columns
- c: string, default to None
+ c: string, default None
category column inside x-axis
- how: string, default to None
+ how: string, default None
to be passed to pd.group_by(x).aggregate(how). Can be mean,median, or any
reduced operations.
- stacked: Boolean, default to False.
+ stacked: Boolean, default False.
Whether to stacked category on top of the other categories.
- annotate: string, {'all',True} default to None
+ annotate: string, {'all',True} default None
if True, annotate value on top of the plot element. If stacked is also True, annotate the last
category. if 'all' and stacked, annotate all category
- full: boolean, default to False.
+ full: boolean, default False.
If true, set to full area stacked chart. Only work if stacked is True.
- smooth: boolean, default to False.
+ smooth: boolean, default False.
If true, smooth the line.
-
+ sort_on: {'index', 'values', int, 'count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'},
+ default 'index'.
+ Add sort mode. Only work when c is None.
+ If index, sort index on lexicographical order. use as s.sort_index()
+ if values, sort based on values. Use as s.sort_values()
+ If string, deviation from value provided by pd.Series.describe()
+ if integer, treat as value and deviate from that value
+ sort_c_on: string, default None.
+ specify a category as basis sort value if c is specified. Must be specified when use
+ sort_on other than default value.
+ ascending: boolean, default True
+ sort ascending vs. descending
+
Returns
-------
Chart Object
"""
return make_chart(df,type='line',x=x,y=y,c=c,how=how,stacked=stacked,area=area,full=full,
- smooth=smooth,
+ smooth=smooth, sort_on=sort_on, sort_c_on=sort_c_on, ascending=ascending,
annotate='top' if annotate == True else annotate)
@@ -105,18 +132,18 @@ def hist(df,
data to be used for the chart
x: string
columns to be used as category axis
- c: string, default to None
+ c: string, default None
another grouping columns inside x-axis
- bins: int, default to 10
+ bins: int, default 10
Set number of bins in histogram
- normed: boolean, default to False
+ normed: boolean, default False
Whether normalize the histogram
- stacked: Boolean, default to False.
+ stacked: Boolean, default False.
Whether to stacked category on top of the other categories.
- annotate: string, {'all',True} default to None
+ annotate: string, {'all',True} default None
if True, annotate value on top of the plot element. If stacked is also True, annotate the last
category. if 'all' and stacked, annotate all category
- density: boolean, default to False.
+ density: boolean, default False.
Whether to add density to the plot
Returns
@@ -136,12 +163,12 @@ def scatter(df, x, y, s=None, c=None, saturate=None, size_px=(10, 70)):
data to be used for the chart
x,y: string, columns in pd.DataFrame
Used as coordinate in scatter chart
- s: string, columns in pd.DataFrame default to None
+ s: string, columns in pd.DataFrame default None
Used as sizing value of the scatter points
- c: string, default to None
+ c: string, default None
column used as grouping color category
saturation
- size_px: tuple, default to (10,70)
+ size_px: tuple, default (10,70)
boundary size, lower and upper limit in pixel for min-max scatter points
@@ -149,5 +176,4 @@ def scatter(df, x, y, s=None, c=None, saturate=None, size_px=(10, 70)):
-------
Chart Object
"""
- #TODO add saturation
return make_chart(df,type='scatter',x=x,y=y,s=s,c=c,saturate=saturate,size_px=size_px)
diff --git a/krisk/plot/bar_line.py b/krisk/plot/bar_line.py
index 73ffa54..931a83e 100644
--- a/krisk/plot/bar_line.py
+++ b/krisk/plot/bar_line.py
@@ -95,11 +95,7 @@ def set_bar_line_chart(chart, df, x, c, **kwargs):
else:
raise AssertionError('Density must either stacked category, or not category')
- series.append(density)
-
-
-
-
+ series.append(density)
def get_bar_line_data(df, x, c, y, **kwargs):
@@ -119,11 +115,29 @@ def get_bar_line_data(df, x, c, y, **kwargs):
else:
data = df[x].value_counts()
+ #Specify sort_on and order method
+ sort_on = kwargs['sort_on']
+ descr_keys = pd.Series([0]).describe().keys().tolist()
+
+ if isinstance(sort_on, str):
+ assert sort_on in ['index','values'] + descr_keys
+
+ if sort_on == 'index':
+ data.sort_index(inplace=True, ascending=kwargs['ascending'])
+ else:
+ if sort_on != 'values':
+ val_deviation = data.describe().loc[sort_on] if isinstance(sort_on, str) else sort_on
+ data = data - val_deviation
+ if c:
+ assert kwargs['sort_c_on'] is not None
+ data.sort_values(kwargs['sort_c_on'], inplace=True, ascending=kwargs['ascending'])
+ else:
+ data.sort_values(inplace=True, ascending=kwargs['ascending'])
+ # Stacked when category
if c and kwargs['stacked'] and kwargs['full']:
data = data.div(data.sum(1),axis=0)
-
return data
|
napjon/krisk
|
19fb69026ff1339649fac5ad82548ccbdc7b8d19
|
diff --git a/krisk/tests/test_plot.py b/krisk/tests/test_plot.py
index ebbd7f5..26fc661 100644
--- a/krisk/tests/test_plot.py
+++ b/krisk/tests/test_plot.py
@@ -103,6 +103,25 @@ def test_full_bar_line(gapminder):
assert remove_name_label(bar).option == remove_name_label(line).option == true_option
+def test_sort_bar_line(gapminder):
+ p = kk.line(gapminder,'year', y='pop', how='mean',c='continent', sort_on='mean', sort_c_on='Americas')
+
+ assert p.option['xAxis']['data'] == [1952, 1957, 1962, 1967, 1972, 1977, 1982, 1987, 1992, 1997, 2002, 2007]
+ assert p.option['legend']['data'] == ['Africa', 'Americas', 'Asia', 'Europe', 'Oceania']
+ assert p.option['series'][0] == {'data': [-10595881.167,
+ -9604550.167,
+ -8874458.167,
+ -7114907.167,
+ -5114619.167,
+ -2722602.167,
+ 158346.833,
+ 3379549.833,
+ 6422966.833,
+ 9196608.833,
+ 11411735.833,
+ 13457809.833],
+ 'name': 'Africa',
+ 'type': 'line'}
def test_hist(gapminder):
|
Add Bar Mode for Sequential, Diverging, and Qualitative
add 'mode' as keyword parameter for bar chart.
* If None, treat the bar mode as "Qualitative", and sort on lexicographical order.
* If Boolean, treat the bar as "Sequential", whether the order sort ascending.
* If string, either median or mean and deviation from that value
* if integer, treat as value and deviate from that value
|
0.0
|
19fb69026ff1339649fac5ad82548ccbdc7b8d19
|
[
"krisk/tests/test_plot.py::test_sort_bar_line"
] |
[
"krisk/tests/test_plot.py::test_bar",
"krisk/tests/test_plot.py::test_trendline",
"krisk/tests/test_plot.py::test_line",
"krisk/tests/test_plot.py::test_smooth_line",
"krisk/tests/test_plot.py::test_full_bar_line",
"krisk/tests/test_plot.py::test_scatter"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-10-12 05:48:29+00:00
|
bsd-3-clause
| 4,104 |
|
napjon__krisk-58
|
diff --git a/krisk/plot/make_chart.py b/krisk/plot/make_chart.py
index 924e015..68e3c41 100644
--- a/krisk/plot/make_chart.py
+++ b/krisk/plot/make_chart.py
@@ -43,6 +43,9 @@ def make_chart(df, **kwargs):
if kwargs.get('y', None):
chart.set_ylabel(kwargs['y'])
+ if kwargs['type'] == 'line':
+ chart.set_tooltip_style(trigger='axis',axis_pointer='shadow')
+
if kwargs['type'] in ['bar', 'line', 'hist']:
set_bar_line_chart(chart, df, **kwargs)
|
napjon/krisk
|
8497da2333a8265b2e19c87dcec3bae20b8d4059
|
diff --git a/krisk/tests/test_plot.py b/krisk/tests/test_plot.py
index 50435da..13b78e3 100644
--- a/krisk/tests/test_plot.py
+++ b/krisk/tests/test_plot.py
@@ -92,6 +92,8 @@ def test_line(gapminder):
annotate='all')
opt = read_option_tests('line.json')
assert_barline_data(p, opt)
+ assert p.option['tooltip']['axisPointer']['type'] == 'shadow'
+ assert p.option['tooltip']['trigger'] == 'axis'
def test_smooth_line(gapminder):
@@ -134,6 +136,7 @@ def test_sort_bar_line(gapminder):
'name': 'Africa',
'type': 'line'}
+
def test_hist(gapminder):
p1 = kk.hist(gapminder,'lifeExp',bins=10)
opt1 = read_option_tests('hist_x.json')
|
Line Plot should have default trigger axis and axis pointer shadow
|
0.0
|
8497da2333a8265b2e19c87dcec3bae20b8d4059
|
[
"krisk/tests/test_plot.py::test_line"
] |
[
"krisk/tests/test_plot.py::test_bar",
"krisk/tests/test_plot.py::test_trendline",
"krisk/tests/test_plot.py::test_smooth_line",
"krisk/tests/test_plot.py::test_full_bar_line",
"krisk/tests/test_plot.py::test_sort_bar_line",
"krisk/tests/test_plot.py::test_scatter"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-10-17 08:32:31+00:00
|
bsd-3-clause
| 4,105 |
|
napjon__krisk-63
|
diff --git a/krisk/util.py b/krisk/util.py
index db8bcb3..f798d93 100644
--- a/krisk/util.py
+++ b/krisk/util.py
@@ -25,9 +25,9 @@ def init_notebook():
from IPython.display import Javascript
return Javascript("""
require.config({
- baseUrl : "//cdn.rawgit.com/napjon/krisk/master/krisk/static",
+ baseUrl : "https://cdn.rawgit.com/napjon/krisk/master/krisk/static",
paths: {
- echarts: "//cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min"
+ echarts: "https://cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min"
}
});
""")
\ No newline at end of file
diff --git a/notebooks/Intro.ipynb b/notebooks/Intro.ipynb
index fba28c7..792c2b8 100644
--- a/notebooks/Intro.ipynb
+++ b/notebooks/Intro.ipynb
@@ -9,7 +9,7 @@
},
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": 1,
"metadata": {
"collapsed": false
},
@@ -19,9 +19,9 @@
"application/javascript": [
"\n",
" require.config({\n",
- " baseUrl : \"//cdn.rawgit.com/napjon/krisk/master/krisk/static\",\n",
+ " baseUrl : \"https://cdn.rawgit.com/napjon/krisk/master/krisk/static\",\n",
" paths: {\n",
- " echarts: \"//cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min\"\n",
+ " echarts: \"https://cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min\"\n",
" }\n",
" });\n",
" "
@@ -30,7 +30,7 @@
"<IPython.core.display.Javascript object>"
]
},
- "execution_count": 2,
+ "execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
@@ -38,7 +38,7 @@
"source": [
"import pandas as pd\n",
"import krisk.plot as kk\n",
- "# Only used this for nbviewer purposes (online js fetch). Don't use it.\n",
+ "# Use this when you want to nbconvert the notebook (used by nbviewer)\n",
"from krisk import init_notebook; init_notebook()"
]
},
@@ -2944,8 +2944,9 @@
}
],
"metadata": {
+ "anaconda-cloud": {},
"kernelspec": {
- "display_name": "Python 3",
+ "display_name": "Python [default]",
"language": "python",
"name": "python3"
},
@@ -2959,7 +2960,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.5.2"
+ "version": "3.5.1"
}
},
"nbformat": 4,
diff --git a/notebooks/declarative-visualization.ipynb b/notebooks/declarative-visualization.ipynb
index d59fa2f..d4bc7fb 100644
--- a/notebooks/declarative-visualization.ipynb
+++ b/notebooks/declarative-visualization.ipynb
@@ -9,7 +9,7 @@
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": 1,
"metadata": {
"collapsed": false
},
@@ -19,9 +19,9 @@
"application/javascript": [
"\n",
" require.config({\n",
- " baseUrl : \"//cdn.rawgit.com/napjon/krisk/master/krisk/static\",\n",
+ " baseUrl : \"https://cdn.rawgit.com/napjon/krisk/master/krisk/static\",\n",
" paths: {\n",
- " echarts: \"//cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min\"\n",
+ " echarts: \"https://cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min\"\n",
" }\n",
" });\n",
" "
@@ -30,13 +30,13 @@
"<IPython.core.display.Javascript object>"
]
},
- "execution_count": 3,
+ "execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
- "# Only used this for nbviewer purposes (online js fetch). Don't use it.\n",
+ "# Use this when you want to nbconvert the notebook (used by nbviewer)\n",
"from krisk import init_notebook; init_notebook()"
]
},
diff --git a/notebooks/legend-title-toolbox.ipynb b/notebooks/legend-title-toolbox.ipynb
index 26df8c2..a12b0c4 100644
--- a/notebooks/legend-title-toolbox.ipynb
+++ b/notebooks/legend-title-toolbox.ipynb
@@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": 1,
"metadata": {
"collapsed": false
},
@@ -12,9 +12,9 @@
"application/javascript": [
"\n",
" require.config({\n",
- " baseUrl : \"//cdn.rawgit.com/napjon/krisk/master/krisk/static\",\n",
+ " baseUrl : \"https://cdn.rawgit.com/napjon/krisk/master/krisk/static\",\n",
" paths: {\n",
- " echarts: \"//cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min\"\n",
+ " echarts: \"https://cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min\"\n",
" }\n",
" });\n",
" "
@@ -23,7 +23,7 @@
"<IPython.core.display.Javascript object>"
]
},
- "execution_count": 2,
+ "execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
@@ -31,7 +31,7 @@
"source": [
"import pandas as pd\n",
"import krisk.plot as kk\n",
- "# Only used this for nbviewer purposes (online js fetch). Don't use it.\n",
+ "# Use this when you want to nbconvert the notebook (used by nbviewer)\n",
"from krisk import init_notebook; init_notebook()"
]
},
@@ -1456,8 +1456,9 @@
}
],
"metadata": {
+ "anaconda-cloud": {},
"kernelspec": {
- "display_name": "Python 3",
+ "display_name": "Python [default]",
"language": "python",
"name": "python3"
},
@@ -1471,7 +1472,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.5.2"
+ "version": "3.5.1"
}
},
"nbformat": 4,
diff --git a/notebooks/resync-reproducible.ipynb b/notebooks/resync-reproducible.ipynb
index c52c887..61a760a 100644
--- a/notebooks/resync-reproducible.ipynb
+++ b/notebooks/resync-reproducible.ipynb
@@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": 1,
"metadata": {
"collapsed": false
},
@@ -12,9 +12,9 @@
"application/javascript": [
"\n",
" require.config({\n",
- " baseUrl : \"//cdn.rawgit.com/napjon/krisk/master/krisk/static\",\n",
+ " baseUrl : \"https://cdn.rawgit.com/napjon/krisk/master/krisk/static\",\n",
" paths: {\n",
- " echarts: \"//cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min\"\n",
+ " echarts: \"https://cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min\"\n",
" }\n",
" });\n",
" "
@@ -23,7 +23,7 @@
"<IPython.core.display.Javascript object>"
]
},
- "execution_count": 2,
+ "execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
@@ -34,7 +34,7 @@
"\n",
"import pandas as pd\n",
"import krisk.plot as kk\n",
- "# Only used this for nbviewer purposes (online js fetch). Don't use it.\n",
+ "# Use this when you want to nbconvert the notebook (used by nbviewer)\n",
"from krisk import init_notebook; init_notebook()"
]
},
@@ -484,8 +484,9 @@
}
],
"metadata": {
+ "anaconda-cloud": {},
"kernelspec": {
- "display_name": "Python 3",
+ "display_name": "Python [default]",
"language": "python",
"name": "python3"
},
@@ -499,7 +500,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.5.2"
+ "version": "3.5.1"
}
},
"nbformat": 4,
diff --git a/notebooks/themes-colors.ipynb b/notebooks/themes-colors.ipynb
index d73dcfa..269afc5 100644
--- a/notebooks/themes-colors.ipynb
+++ b/notebooks/themes-colors.ipynb
@@ -19,9 +19,9 @@
"application/javascript": [
"\n",
" require.config({\n",
- " baseUrl : \"//cdn.rawgit.com/napjon/krisk/master/krisk/static\",\n",
+ " baseUrl : \"https://cdn.rawgit.com/napjon/krisk/master/krisk/static\",\n",
" paths: {\n",
- " echarts: \"//cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min\"\n",
+ " echarts: \"https://cdnjs.cloudflare.com/ajax/libs/echarts/3.2.1/echarts.min\"\n",
" }\n",
" });\n",
" "
@@ -38,7 +38,7 @@
"source": [
"import krisk.plot as kk\n",
"import pandas as pd\n",
- "# Only used this for nbviewer purposes (online js fetch). Don't use it.\n",
+ "# Use this when you want to nbconvert the notebook (used by nbviewer)\n",
"from krisk import init_notebook; init_notebook()"
]
},
@@ -2394,8 +2394,9 @@
}
],
"metadata": {
+ "anaconda-cloud": {},
"kernelspec": {
- "display_name": "Python 3",
+ "display_name": "Python [default]",
"language": "python",
"name": "python3"
},
@@ -2409,7 +2410,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.5.2"
+ "version": "3.5.1"
}
},
"nbformat": 4,
|
napjon/krisk
|
5d5980a518bb36fd7b1624fc36c2fbaa5c247e9b
|
diff --git a/krisk/tests/test_template.py b/krisk/tests/test_template.py
index e5ef7e5..c25097f 100644
--- a/krisk/tests/test_template.py
+++ b/krisk/tests/test_template.py
@@ -21,9 +21,9 @@ def test_init_nb():
js_data = init_notebook().data
js_init_template = """
require.config({{
- baseUrl : "//cdn.rawgit.com/napjon/krisk/master/krisk/static",
+ baseUrl : "https://cdn.rawgit.com/napjon/krisk/master/krisk/static",
paths: {{
- echarts: "//cdnjs.cloudflare.com/ajax/libs/echarts/{VER}/echarts.min"
+ echarts: "https://cdnjs.cloudflare.com/ajax/libs/echarts/{VER}/echarts.min"
}}
}});
"""
|
Not support nbconvert
**Reproduce:**
1. Use Intro.ipynb
2. Remove `from krisk import init_notebook; init_notebook()` since it's not gonna publish to nbviewer.
3. Use nbconvert to export current page as html
**Output:**
Javascript part failed to print out.
Console error:
`require.min.js:8 Uncaught Error: Script error for: dark`
`require.min.js:8 Failed to load resource: net::ERR_FILE_NOT_FOUND`
...
**Expected Output:**
Charts should be shown as it run in notebook after exporting.
**Possible solution:**
It seems like after exporting, it still tries to retrieve source js files from local server and current folder. Is that possible we can define the source files' location as commands in python?
**ENV:**
Python3.5
Jupyter Notebook 4.2.1
Echarts 3.2.1 (built-in)
|
0.0
|
5d5980a518bb36fd7b1624fc36c2fbaa5c247e9b
|
[
"krisk/tests/test_template.py::test_init_nb"
] |
[
"krisk/tests/test_template.py::test_html"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-11-05 07:19:17+00:00
|
bsd-3-clause
| 4,106 |
|
nathan-v__aws_okta_keyman-72
|
diff --git a/aws_okta_keyman/aws.py b/aws_okta_keyman/aws.py
index 80484db..91e961b 100644
--- a/aws_okta_keyman/aws.py
+++ b/aws_okta_keyman/aws.py
@@ -179,7 +179,6 @@ class Session(object):
multiple_accounts = False
first_account = ''
formatted_roles = []
- i = 0
for role in self.assertion.roles():
account = role['role'].split(':')[4]
role_name = role['role'].split(':')[5].split('/')[1]
@@ -187,20 +186,27 @@ class Session(object):
'account': account,
'role_name': role_name,
'arn': role['role'],
- 'principle': role['principle'],
- 'roleIdx': i
+ 'principle': role['principle']
})
if first_account == '':
first_account = account
elif first_account != account:
multiple_accounts = True
- i = i + 1
if multiple_accounts:
formatted_roles = self.account_ids_to_names(formatted_roles)
- self.roles = sorted(formatted_roles,
- key=lambda k: (k['account'], k['role_name']))
+ formatted_roles = sorted(formatted_roles,
+ key=lambda k: (k['account'], k['role_name']))
+
+ # set the role role index after sorting
+ i = 0
+ for role in formatted_roles:
+ role['roleIdx'] = i
+ i = i + 1
+
+ self.roles = formatted_roles
+
return self.roles
def assume_role(self, print_only=False):
|
nathan-v/aws_okta_keyman
|
9dee43b01e5650f12ba9fdc33ce911a10fa723ca
|
diff --git a/aws_okta_keyman/test/aws_test.py b/aws_okta_keyman/test/aws_test.py
index 60be405..f4f1de1 100644
--- a/aws_okta_keyman/test/aws_test.py
+++ b/aws_okta_keyman/test/aws_test.py
@@ -271,10 +271,15 @@ class TestSession(unittest.TestCase):
def test_assume_role_preset(self, mock_write):
mock_write.return_value = None
assertion = mock.Mock()
- assertion.roles.return_value = [{'arn': '', 'principle': ''}]
+
+ roles = [{'role': '::::1:role/role1', 'principle': '', 'arn': '1'},
+ {'role': '::::1:role/role2', 'principle': '', 'arn': '2'},
+ {'role': '::::1:role/role3', 'principle': '', 'arn': '3'}]
+
+ assertion.roles.return_value = roles
session = aws.Session('BogusAssertion')
- session.role = 0
- session.roles = [{'arn': '', 'principle': ''}]
+ session.role = 1
+ session.roles = roles
session.assertion = assertion
sts = {'Credentials':
{'AccessKeyId': 'AKI',
@@ -296,6 +301,13 @@ class TestSession(unittest.TestCase):
mock_write.assert_has_calls([
mock.call()
])
+ session.sts.assert_has_calls([
+ mock.call.assume_role_with_saml(
+ RoleArn='2',
+ PrincipalArn='',
+ SAMLAssertion=mock.ANY,
+ DurationSeconds=3600)
+ ])
@mock.patch('aws_okta_keyman.aws.Session._print_creds')
@mock.patch('aws_okta_keyman.aws.Session._write')
@@ -420,23 +432,29 @@ class TestSession(unittest.TestCase):
self.assertEqual(ret, expected)
def test_available_roles(self):
- roles = [{'role': '::::1:role/role', 'principle': ''},
- {'role': '::::1:role/role', 'principle': ''}]
+ roles = [{'role': '::::1:role/role1', 'principle': ''},
+ {'role': '::::1:role/role3', 'principle': ''},
+ {'role': '::::1:role/role2', 'principle': ''}]
session = aws.Session('BogusAssertion')
session.assertion = mock.MagicMock()
session.assertion.roles.return_value = roles
- expected = [
- {'account': '1', 'role_name': 'role',
- 'principle': '', 'arn': '::::1:role/role',
- 'roleIdx': 0},
- {'account': '1', 'role_name': 'role',
- 'principle': '', 'arn': '::::1:role/role',
- 'roleIdx': 1}
- ]
result = session.available_roles()
print(result)
+
+ expected = [
+ {'account': '1', 'role_name': 'role1',
+ 'principle': '', 'arn': '::::1:role/role1',
+ 'roleIdx': 0},
+ {'account': '1', 'role_name': 'role2',
+ 'principle': '', 'arn': '::::1:role/role2',
+ 'roleIdx': 1},
+ {'account': '1', 'role_name': 'role3',
+ 'principle': '', 'arn': '::::1:role/role3',
+ 'roleIdx': 2}
+ ]
+
self.assertEqual(expected, result)
def test_available_roles_multiple_accounts(self):
@@ -453,9 +471,9 @@ class TestSession(unittest.TestCase):
session.account_ids_to_names.return_value = roles_full
expected = [
{'account': '1', 'role_name': 'role',
- 'principle': '', 'arn': '::::1:role/role'},
+ 'principle': '', 'arn': '::::1:role/role', 'roleIdx': 0},
{'account': '2', 'role_name': 'role',
- 'principle': '', 'arn': '::::2:role/role'}
+ 'principle': '', 'arn': '::::2:role/role', 'roleIdx': 1}
]
result = session.available_roles()
|
Role Selection In Windows
I've attached the response i've received while choosing a role, the issue in this is it picks the wrong role , in the below output i chooses role 2 which is ideally **aws-exp-exp3** but it assumed the different one please check
12:56:37 (INFO) AWS Okta Keyman 🔐 v0.8.3
Password:
12:56:45 (WARNING) Okta Verify Push being sent...
12:56:45 (INFO) Waiting for MFA success...
12:56:48 (INFO) Waiting for MFA success...
12:56:50 (INFO) Successfully authed Rishabh Ag
12:56:51 (INFO) Getting SAML Assertion from example
12:56:51 (WARNING) Application-level MFA present; re-authenticating Okta
12:56:52 (WARNING) Okta Verify Push being sent...
12:56:52 (INFO) Waiting for MFA success...
12:56:55 (INFO) Waiting for MFA success...
12:56:57 (INFO) Getting SAML Assertion from example
12:56:59 (INFO) Starting AWS session for us-east-1
12:56:59 (WARNING) Multiple AWS roles found; please select one
Account Role
[0] 123456789009 aws-exp-exp1
[1] 123456789009 aws-exp-exp2
[2] 123456789009 aws-exp-exp3
Selection: 2
12:57:02 (INFO) Assuming role: arn:aws:iam::123456789009:role/aws-exp-exp2
12:57:04 (INFO) Wrote profile "default" to C:\Users\Rishabh/.aws\credentials 💾
12:57:04 (INFO) Current time is 2020-07-15 07:27:04.316262
12:57:04 (INFO) Session expires at 2020-07-15 08:27:03+00:00 ⏳
12:57:04 (INFO) All done! 👍
|
0.0
|
9dee43b01e5650f12ba9fdc33ce911a10fa723ca
|
[
"aws_okta_keyman/test/aws_test.py::TestSession::test_available_roles",
"aws_okta_keyman/test/aws_test.py::TestSession::test_available_roles_multiple_accounts"
] |
[
"aws_okta_keyman/test/aws_test.py::TestCredentials::test_add_profile",
"aws_okta_keyman/test/aws_test.py::TestCredentials::test_add_profile_missing_file_creates_new",
"aws_okta_keyman/test/aws_test.py::TestSession::test_account_ids_to_names_call_failed",
"aws_okta_keyman/test/aws_test.py::TestSession::test_account_ids_to_names_map",
"aws_okta_keyman/test/aws_test.py::TestSession::test_account_names_from_html",
"aws_okta_keyman/test/aws_test.py::TestSession::test_assume_role",
"aws_okta_keyman/test/aws_test.py::TestSession::test_assume_role_duration_rejected",
"aws_okta_keyman/test/aws_test.py::TestSession::test_assume_role_multiple",
"aws_okta_keyman/test/aws_test.py::TestSession::test_assume_role_preset",
"aws_okta_keyman/test/aws_test.py::TestSession::test_assume_role_print",
"aws_okta_keyman/test/aws_test.py::TestSession::test_export_creds_to_var_string",
"aws_okta_keyman/test/aws_test.py::TestSession::test_generate_aws_console_url",
"aws_okta_keyman/test/aws_test.py::TestSession::test_get_account_name_map",
"aws_okta_keyman/test/aws_test.py::TestSession::test_get_account_name_map_error",
"aws_okta_keyman/test/aws_test.py::TestSession::test_init_folder_exists",
"aws_okta_keyman/test/aws_test.py::TestSession::test_init_folder_missing",
"aws_okta_keyman/test/aws_test.py::TestSession::test_is_valid_false",
"aws_okta_keyman/test/aws_test.py::TestSession::test_is_valid_false_missing_expiration",
"aws_okta_keyman/test/aws_test.py::TestSession::test_is_valid_true",
"aws_okta_keyman/test/aws_test.py::TestSession::test_print_creds",
"aws_okta_keyman/test/aws_test.py::TestSession::test_write"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2020-07-15 22:16:14+00:00
|
apache-2.0
| 4,107 |
|
nathandines__SPF2IP-3
|
diff --git a/SPF2IP.py b/SPF2IP.py
index e6210f3..b95903e 100644
--- a/SPF2IP.py
+++ b/SPF2IP.py
@@ -29,14 +29,22 @@ def dns_request_unicode(hostname,record_type,*args,**kwargs):
value = value.decode('utf-8')
output.append(value)
elif record_type == "MX":
- value = entry.exchange
- if type(value) is not unicode:
- value = value.__str__().encode('utf-8').decode('utf-8')
+ try:
+ value = entry.exchange.decode('utf-8')
+ except AttributeError as err:
+ if err.args[0] == "'Name' object has no attribute 'decode'":
+ value = unicode(entry.exchange)
+ else:
+ raise
output.append(value)
elif record_type == "TXT":
- value = ''.join([str(ent) for ent in entry.strings])
- if type(value) is not unicode:
- value = value.decode('utf-8')
+ value_array = []
+ for ent in entry.strings:
+ if type(ent) is not unicode:
+ value_array.append(ent.decode('utf-8'))
+ else:
+ value_array.append(ent)
+ value = ''.join(value_array)
output.append(value)
return output
diff --git a/setup.py b/setup.py
index 3b958d6..3561be0 100755
--- a/setup.py
+++ b/setup.py
@@ -11,7 +11,7 @@ with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
setup(
name='SPF2IP',
- version='1.0.4',
+ version='1.0.5',
description='Python module to get IP addresses from an SPF record',
long_description=long_description,
|
nathandines/SPF2IP
|
7e3593a6f322c39a02c1c0f4a108b046ec6c1a20
|
diff --git a/test_SPF2IP.py b/test_SPF2IP.py
index 54caff5..6e88918 100644
--- a/test_SPF2IP.py
+++ b/test_SPF2IP.py
@@ -125,11 +125,13 @@ dns_records = {
class fakedns:
def __init__(self,value,record_type):
if record_type == 'TXT':
- self.strings = value
+ self.strings = []
+ for entry in value:
+ self.strings.append(entry.encode('utf-8'))
elif record_type == 'A' or record_type == 'AAAA':
- self.address = value
+ self.address = value.encode('utf-8')
elif record_type == 'MX':
- self.exchange = value
+ self.exchange = value.encode('utf-8')
def fake_dns_resolver(hostname,record_type):
try:
dns_records[hostname]
|
Broken on 3.6
The current code always returns an empty answer on Python 3.6. This fixes it for me:
```
diff --git a/SPF2IP.py b/SPF2IP.py
index e6210f3..84683ff 100644
--- a/SPF2IP.py
+++ b/SPF2IP.py
@@ -34,7 +34,7 @@ def dns_request_unicode(hostname,record_type,*args,**kwargs):
value = value.__str__().encode('utf-8').decode('utf-8')
output.append(value)
elif record_type == "TXT":
- value = ''.join([str(ent) for ent in entry.strings])
+ value = ''.join([str(ent, encoding='ascii') for ent in entry.strings])
if type(value) is not unicode:
value = value.decode('utf-8')
output.append(value)
```
I only know python3 so I can't give good advice on making code that works on both 2 and 3. But a friend of mine's package has this function that you might find useful:
```
def to_native_str(value, encoding='utf-8'):
if isinstance(value, str):
return value
if six.PY3 and isinstance(value, six.binary_type): #pragma: no cover
return value.decode(encoding)
elif six.PY2 and isinstance(value, six.text_type): #pragma: no cover
return value.encode(encoding)
```
|
0.0
|
7e3593a6f322c39a02c1c0f4a108b046ec6c1a20
|
[
"test_SPF2IP.py::SPF2IPTestCases::test_included_list_is_string_list",
"test_SPF2IP.py::SPF2IPTestCases::test_included_loop",
"test_SPF2IP.py::SPF2IPTestCases::test_ip4_results",
"test_SPF2IP.py::SPF2IPTestCases::test_ip6_results",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_ip4",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_ip6",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_a",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_a_external",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_a_external_slash",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_a_slash",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_aaaa",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_aaaa_external",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_aaaa_external_slash",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_aaaa_slash",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_a",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_a_external",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_a_external_slash",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_a_slash",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_aaaa",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_aaaa_external",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_aaaa_external_longslash",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_aaaa_slash",
"test_SPF2IP.py::SPF2IPTestCases::test_spf_list_is_string_list_with_prefix",
"test_SPF2IP.py::SPF2IPTestCases::test_spf_list_split_spf"
] |
[
"test_SPF2IP.py::SPF2IPTestCases::test_dns_query_method_output",
"test_SPF2IP.py::SPF2IPTestCases::test_domain_without_spf_results",
"test_SPF2IP.py::SPF2IPTestCases::test_included_invalid_spf",
"test_SPF2IP.py::SPF2IPTestCases::test_included_without_includes",
"test_SPF2IP.py::SPF2IPTestCases::test_included_without_spf",
"test_SPF2IP.py::SPF2IPTestCases::test_nonexistent_domain_results",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_empty",
"test_SPF2IP.py::SPF2IPTestCases::test_single_domain_with_mx_a_external_longslash",
"test_SPF2IP.py::SPF2IPTestCases::test_spf_list_invalid_spf",
"test_SPF2IP.py::SPF2IPTestCases::test_spf_list_without_spf"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-01-19 22:24:43+00:00
|
mit
| 4,108 |
|
ncclient__ncclient-314
|
diff --git a/ncclient/manager.py b/ncclient/manager.py
index 3618291..09a8aaf 100644
--- a/ncclient/manager.py
+++ b/ncclient/manager.py
@@ -89,32 +89,47 @@ def make_device_handler(device_params):
return handler_obj
+def _extract_device_params(kwds):
+ if "device_params" in kwds:
+ device_params = kwds["device_params"]
+ del kwds["device_params"]
+ else:
+ device_params = None
+ return device_params
+
+def _extract_manager_params(kwds):
+ if "manager_params" in kwds:
+ manager_params = kwds["manager_params"]
+ del kwds["manager_params"]
+ else:
+ manager_params = {}
+ return manager_params
+
def connect_ssh(*args, **kwds):
"""
Initialize a :class:`Manager` over the SSH transport.
For documentation of arguments see :meth:`ncclient.transport.SSHSession.connect`.
The underlying :class:`ncclient.transport.SSHSession` is created with
- :data:`CAPABILITIES`. It is first instructed to
- :meth:`~ncclient.transport.SSHSession.load_known_hosts` and then
- all the provided arguments are passed directly to its implementation
- of :meth:`~ncclient.transport.SSHSession.connect`.
+ :data:`CAPABILITIES`. It is first instructed to
+ :meth:`~ncclient.transport.SSHSession.load_known_hosts` and then
+ all the provided arguments are passed directly to its implementation
+ of :meth:`~ncclient.transport.SSHSession.connect`.
- To invoke advanced vendor related operation add device_params =
- {'name':'<vendor_alias>'} in connection paramerers. For the time,
- 'junos' and 'nexus' are supported for Juniper and Cisco Nexus respectively.
+ To customize the :class:`Manager`, add a `manager_params` dictionnary in connection
+ parameters (e.g. `manager_params={'timeout': 60}` for a bigger RPC timeout paramater)
- A custom device handler can be provided with device_params =
- {'handler':<handler class>} in connection paramerers.
+ To invoke advanced vendor related operation add
+ `device_params={'name': '<vendor_alias>'}` in connection parameters. For the time,
+ 'junos' and 'nexus' are supported for Juniper and Cisco Nexus respectively.
+
+ A custom device handler can be provided with
+ `device_params={'handler':<handler class>}` in connection parameters.
"""
- # Extract device parameter dict, if it was passed into this function. Need to
- # remove it from kwds, since the session.connect() doesn't like extra stuff in
- # there.
- if "device_params" in kwds:
- device_params = kwds["device_params"]
- del kwds["device_params"]
- else:
- device_params = None
+ # Extract device parameter and manager parameter dictionaries, if they were passed into this function.
+ # Remove them from kwds (which should keep only session.connect() parameters).
+ device_params = _extract_device_params(kwds)
+ manager_params = _extract_manager_params(kwds)
device_handler = make_device_handler(device_params)
device_handler.add_additional_ssh_connect_params(kwds)
@@ -130,17 +145,16 @@ def connect_ssh(*args, **kwds):
if session.transport:
session.close()
raise
- return Manager(session, device_handler, **kwds)
+ return Manager(session, device_handler, **manager_params)
def connect_ioproc(*args, **kwds):
- if "device_params" in kwds:
- device_params = kwds["device_params"]
- del kwds["device_params"]
+ device_params = _extract_device_params(kwds)
+ manager_params = _extract_manager_params(kwds)
+
+ if device_params:
import_string = 'ncclient.transport.third_party.'
import_string += device_params['name'] + '.ioproc'
third_party_import = __import__(import_string, fromlist=['IOProc'])
- else:
- device_params = None
device_handler = make_device_handler(device_params)
@@ -149,7 +163,7 @@ def connect_ioproc(*args, **kwds):
session = third_party_import.IOProc(device_handler)
session.connect()
- return Manager(session, device_handler, **kwds)
+ return Manager(session, device_handler, **manager_params)
def connect(*args, **kwds):
@@ -184,7 +198,7 @@ class Manager(object):
# __metaclass__ = OpExecutor
- def __init__(self, session, device_handler, timeout=30, *args, **kwargs):
+ def __init__(self, session, device_handler, timeout=30):
self._session = session
self._async_mode = False
self._timeout = timeout
diff --git a/ncclient/transport/ssh.py b/ncclient/transport/ssh.py
index cf911ec..00a72a5 100644
--- a/ncclient/transport/ssh.py
+++ b/ncclient/transport/ssh.py
@@ -349,7 +349,8 @@ class SSHSession(Session):
ssh_config = "~/.ssh/config" if sys.platform != "win32" else "~/ssh/config"
if ssh_config is not None:
config = paramiko.SSHConfig()
- config.parse(open(os.path.expanduser(ssh_config)))
+ with open(os.path.expanduser(ssh_config)) as ssh_config_file_obj:
+ config.parse(ssh_config_file_obj)
# Save default Paramiko SSH port so it can be reverted
paramiko_default_ssh_port = paramiko.config.SSH_PORT
|
ncclient/ncclient
|
2b75f2c6a06bd2a5d1be67b01bb65c5ffd2e2d7a
|
diff --git a/test/unit/test_manager.py b/test/unit/test_manager.py
index 8509379..55a9859 100644
--- a/test/unit/test_manager.py
+++ b/test/unit/test_manager.py
@@ -11,6 +11,12 @@ class TestManager(unittest.TestCase):
m = MagicMock()
mock_ssh.return_value = m
conn = self._mock_manager()
+ m.connect.assert_called_once_with(host='10.10.10.10',
+ port=22,
+ username='user',
+ password='password',
+ hostkey_verify=False, allow_agent=False,
+ timeout=3)
self.assertEqual(conn._session, m)
self.assertEqual(conn._timeout, 10)
@@ -62,7 +68,7 @@ class TestManager(unittest.TestCase):
username='user',
password='password',
timeout=10,
- hostkey_verify=False,
+ hostkey_verify=False,
allow_agent=False,
ssh_config=ssh_config_path)
@@ -90,9 +96,10 @@ class TestManager(unittest.TestCase):
port=22,
username='user',
password='password',
- timeout=10,
+ timeout=3,
+ hostkey_verify=False,
device_params={'local': True, 'name': 'junos'},
- hostkey_verify=False)
+ manager_params={'timeout': 10})
self.assertEqual(mock_connect.called, 1)
self.assertEqual(conn._timeout, 10)
self.assertEqual(conn._device_handler.device_params, {'local': True, 'name': 'junos'})
@@ -182,9 +189,10 @@ class TestManager(unittest.TestCase):
port=22,
username='user',
password='password',
- timeout=10,
+ timeout=3,
+ hostkey_verify=False, allow_agent=False,
device_params={'name': 'junos'},
- hostkey_verify=False, allow_agent=False)
+ manager_params={'timeout': 10})
return conn
@patch('socket.fromfd')
|
connect_ssh timeout parameter used for contradicting purposes
The `timeout` kwd parameter of `manager.connect_ssh` ends up being used for two rather contradicting purposes:
1. It is passed as a kwd to `transport.SSHSession.connect` , where it serves as a connection timeout. This matches the documentation of the manager.connect_ssh function
1. It is also passed as kwd to `manager.Manager()`, where it serves as synchronous RPC timeout
If one wants to set the ssh connection timeout to a small value (e.g. 3 seconds, a sensible option for not waiting long for a host that is down), this results in having a RPC timeout of 3 seconds too, which is not enough for retrieving large configurations.
I see two ways of fixing this:
* a) Discontinue the `timeout` parameter of `manager.Manager()` and rely solely on the `Manager.timeout` property for modifying its default value. This would be similar with how other Manager attributes are handled (e.g. `async_mode` or `raise_mode`).
* b) Change the `transport.SSHSession.connect` timeout parameter name to `connect_timeout`, and dispatch the parameters accordingly in `manager.connect_ssh`. This would however require extending the documentation of manager.connect, to explain that it receives arguments that are used by either `transport.SSHSession.connect` (e.g. connect_timeout, and all the others) or `Manager` (e.g. timeout).
IMHO option *(a)* is cleaner, as the `connect_ssh` method keeps its current behaviour of passing all non device_params to `transport.SSHSession.connect`.
I would be happy to contribute with a patch, once we agree on the approach.
|
0.0
|
2b75f2c6a06bd2a5d1be67b01bb65c5ffd2e2d7a
|
[
"test/unit/test_manager.py::TestManager::test_manager_connected",
"test/unit/test_manager.py::TestManager::test_ssh2",
"test/unit/test_manager.py::TestManager::test_manager_channel_id",
"test/unit/test_manager.py::TestManager::test_ioproc",
"test/unit/test_manager.py::TestManager::test_manager_channel_session_id",
"test/unit/test_manager.py::TestManager::test_manager_server_capability",
"test/unit/test_manager.py::TestManager::test_ssh",
"test/unit/test_manager.py::TestManager::test_manager_client_capability",
"test/unit/test_manager.py::TestManager::test_manager_channel_name"
] |
[
"test/unit/test_manager.py::TestManager::test_outbound_manager_connected",
"test/unit/test_manager.py::TestManager::test_connect_ssh_with_hostkey_ecdsa",
"test/unit/test_manager.py::TestManager::test_manager_locked",
"test/unit/test_manager.py::TestManager::test_connect_ioproc",
"test/unit/test_manager.py::TestManager::test_connect_ssh_with_hostkey_rsa",
"test/unit/test_manager.py::TestManager::test_connect_ssh_with_hostkey_ed25519",
"test/unit/test_manager.py::TestManager::test_connect_outbound_ssh",
"test/unit/test_manager.py::TestManager::test_make_device_handler",
"test/unit/test_manager.py::TestManager::test_connect_with_ssh_config",
"test/unit/test_manager.py::TestManager::test_connect_ssh",
"test/unit/test_manager.py::TestManager::test_make_device_handler_provided_handler"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-05-03 14:53:15+00:00
|
apache-2.0
| 4,109 |
|
ncclient__ncclient-420
|
diff --git a/examples/ericsson_nc_prefix_example.py b/examples/ericsson_nc_prefix_example.py
new file mode 100644
index 0000000..29e569a
--- /dev/null
+++ b/examples/ericsson_nc_prefix_example.py
@@ -0,0 +1,59 @@
+#! /usr/bin/env python
+#
+# Connect to the NETCONF server passed on the command line and
+# set a device_params to turn on/off the namespace prefix "nc".
+# if you want to verify the result, you can print the request that
+# was sent. For brevity and clarity of the examples, we omit proper
+# exception handling.
+#
+# $ ./ericsson_nc_prefix_example.py host username password
+
+import sys, os, warnings
+warnings.simplefilter("ignore", DeprecationWarning)
+from ncclient import manager
+
+
+def ericsson_connect(host, port, user, password, device_params):
+ return manager.connect(host=host,
+ port=port,
+ username=user,
+ password=password,
+ device_params=device_params,
+ hostkey_verify-false)
+
+
+def enable_nc_prefix(host, user, password):
+ # add a parameter 'with_ns' to turn on/off 'nc'
+ device_params = {'name': 'ericsson', 'with_ns': True}
+ with ericsson_connect(host,
+ port=22,
+ user=user,
+ password=password,
+ device_params=device_params) as m:
+
+ ret = m.get_config(source="running").data_xml
+ print(ret)
+
+
+def disable_nc_prefix(host, user, password):
+ # add a parameter 'with_ns' to turn on/off 'nc'
+ device_params = {'name': 'ericsson', 'with_ns': False}
+ with ericsson_connect(host,
+ port=22,
+ user=user,
+ password=password,
+ device_params=device_params) as m:
+
+ ret = m.get_config(source="running").data_xml
+ print(ret)
+
+
+def demo(host, user, password):
+ enable_nc_prefix(host, user, password)
+ print("#"*50)
+ disable_nc_prefix(host, user, password)
+
+
+if __name__ == '__main__':
+ demo(sys.argv[1], sys.argv[2], sys.argv[3])
+
diff --git a/ncclient/devices/ericsson.py b/ncclient/devices/ericsson.py
new file mode 100644
index 0000000..c161526
--- /dev/null
+++ b/ncclient/devices/ericsson.py
@@ -0,0 +1,45 @@
+"""
+Handler for Ericsson device specific information.
+
+Note that for proper import, the classname has to be:
+
+ "<Devicename>DeviceHandler"
+
+...where <Devicename> is something like "Default", "Ericsson", etc.
+
+All device-specific handlers derive from the DefaultDeviceHandler, which implements the
+generic information needed for interaction with a Netconf server.
+
+"""
+from ncclient.xml_ import BASE_NS_1_0
+from ncclient.operations.errors import OperationError
+from .default import DefaultDeviceHandler
+
+
+class EricssonDeviceHandler(DefaultDeviceHandler):
+ """
+ Ericsson handler for device specific information.
+
+ """
+ _EXEMPT_ERRORS = []
+
+ def __init__(self, device_params):
+ super(EricssonDeviceHandler, self).__init__(device_params)
+
+ def get_xml_base_namespace_dict(self):
+ return {None: BASE_NS_1_0}
+
+ def get_xml_extra_prefix_kwargs(self):
+ d = {}
+ if self.check_device_params() is False:
+ d.update(self.get_xml_base_namespace_dict())
+ return {"nsmap": d}
+
+ def check_device_params(self):
+ value = self.device_params.get('with_ns')
+ if value in [True, False]:
+ return value
+ elif value is None:
+ return False
+ else:
+ raise OperationError('Invalid "with_ns" value: %s' % value)
|
ncclient/ncclient
|
3380f1140791f4a8de5d303f919f1e4cc9532f32
|
diff --git a/test/unit/devices/test_ericsson.py b/test/unit/devices/test_ericsson.py
new file mode 100644
index 0000000..1970d5e
--- /dev/null
+++ b/test/unit/devices/test_ericsson.py
@@ -0,0 +1,72 @@
+import unittest
+from ncclient import manager
+from ncclient.devices.ericsson import *
+from ncclient.operations.rpc import *
+from ncclient.capabilities import Capabilities
+import ncclient.transport
+
+class TestEricssonDevice(unittest.TestCase):
+
+ def setUp(self):
+ self.device_handler = manager.make_device_handler({'name': 'ericsson'})
+
+ def test_rpc_default(self):
+ # It is a switch for user to turn on/off "nc" prefix, the "nc" prefix is disable by default
+ session = ncclient.transport.SSHSession(self.device_handler)
+ obj = RPC(session, self.device_handler, raise_mode=RaiseMode.ALL, timeout=0)
+
+ expected = """<?xml version="1.0" encoding="UTF-8"?><rpc xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="%s"><get-config><source><running/></source></get-config></rpc>""" % obj.id
+
+ node = new_ele("get-config")
+ child = sub_ele(node, "source")
+ sub_ele(child, "running")
+
+ rpc_node = obj._wrap(node)
+ self.assertEqual(rpc_node, expected)
+
+ def test_rpc_disable_nc_prefix(self):
+ # It is a switch for user to turn on/off "nc" prefix
+ self.device_handler = manager.make_device_handler({'name': 'ericsson', 'with_ns': False})
+ session = ncclient.transport.SSHSession(self.device_handler)
+ obj = RPC(session, self.device_handler, raise_mode=RaiseMode.ALL, timeout=0)
+
+ expected = """<?xml version="1.0" encoding="UTF-8"?><rpc xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="%s"><get-config><source><running/></source></get-config></rpc>""" % obj.id
+
+ node = new_ele("get-config")
+ child = sub_ele(node, "source")
+ sub_ele(child, "running")
+
+ # It is a switch for user to turn on/off "nc" prefix
+ rpc_node = obj._wrap(node)
+ self.assertEqual(rpc_node, expected)
+
+ def test_rpc_enable_nc_prefix(self):
+ # It is a switch for user to turn on/off "nc" prefix
+ self.device_handler = manager.make_device_handler({'name': 'ericsson', 'with_ns': True})
+ session = ncclient.transport.SSHSession(self.device_handler)
+ obj = RPC(session, self.device_handler, raise_mode=RaiseMode.ALL, timeout=0)
+
+ expected = """<?xml version="1.0" encoding="UTF-8"?><nc:rpc xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="%s"><nc:get-config><nc:source><nc:running/></nc:source></nc:get-config></nc:rpc>""" % obj.id
+
+ node = new_ele("get-config")
+ child = sub_ele(node, "source")
+ sub_ele(child, "running")
+
+ rpc_node = obj._wrap(node)
+ self.assertEqual(rpc_node, expected)
+
+ def test_rpc_enable_nc_prefix_exception(self):
+ # invalid value in "with_ns"
+ self.device_handler = manager.make_device_handler({'name': 'ericsson', 'with_ns': "Invalid_value"})
+ session = ncclient.transport.SSHSession(self.device_handler)
+ obj = RPC(session, self.device_handler, raise_mode=RaiseMode.ALL, timeout=0)
+
+ node = new_ele("get-config")
+ child = sub_ele(node, "source")
+ sub_ele(child, "running")
+
+ self.assertRaises(OperationError, obj._wrap, node)
+
+suite = unittest.TestSuite()
+unittest.TextTestRunner().run(suite)
+
|
XML namespace tag nc:
Hi, I dont think this is an issue with ncclient, but I cant find option to turn off xml namespace tagging.
ncclient sends this RPC,
`<?xml version="1.0" encoding="UTF-8"?>
<nc:rpc xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="urn:uuid:cdad1414-956b-47e7-8efc-fee92888475c">
<nc:get-config>
<nc:source>
<nc:running />
</nc:source>
</nc:get-config>
</nc:rpc>`
But,
my device (Ericsson Telecom Packet Core elements) do not support xml namespace tags,
and expect request to be formated as:
`<?xml version="1.0" encoding="UTF-8"?>
<rpc xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="urn:uuid:cdad1414-956b-47e7-8efc-fee92888475c">
<get-config>
<source>
<running />
</source>
</get-config>
</rpc>`
How can I turn off **nc:** ?
|
0.0
|
3380f1140791f4a8de5d303f919f1e4cc9532f32
|
[
"test/unit/devices/test_ericsson.py::TestEricssonDevice::test_rpc_default",
"test/unit/devices/test_ericsson.py::TestEricssonDevice::test_rpc_disable_nc_prefix",
"test/unit/devices/test_ericsson.py::TestEricssonDevice::test_rpc_enable_nc_prefix",
"test/unit/devices/test_ericsson.py::TestEricssonDevice::test_rpc_enable_nc_prefix_exception"
] |
[] |
{
"failed_lite_validators": [
"has_added_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-08-12 12:27:05+00:00
|
apache-2.0
| 4,110 |
|
ncclient__ncclient-485
|
diff --git a/ncclient/devices/default.py b/ncclient/devices/default.py
index f9107b4..bac3195 100644
--- a/ncclient/devices/default.py
+++ b/ncclient/devices/default.py
@@ -242,16 +242,16 @@ class DefaultDeviceHandler(object):
def handle_connection_exceptions(self, sshsession):
return False
- def handle_reply_parsing_error(self, root, reply):
+ def reply_parsing_error_transform(self, reply_cls):
"""
- Hook for working around bugs in replies from devices (the root emelent can be "fixed")
+ Hook for working around bugs in replies from devices (the root element can be "fixed")
- :param root: the rpc reply root element
- :param reply: the RPCReply object that is parsing 'root'
+ :param reply_cls: the RPCReply class that is parsing the reply 'root' xml element
- :return:
+ :return: transform function for the 'root' xml element of the RPC reply in case the normal parsing fails
"""
- pass
+ # No transformation by default
+ return None
def transform_reply(self):
return False
diff --git a/ncclient/devices/junos.py b/ncclient/devices/junos.py
index 01e7501..d751560 100644
--- a/ncclient/devices/junos.py
+++ b/ncclient/devices/junos.py
@@ -39,7 +39,7 @@ class JunosDeviceHandler(DefaultDeviceHandler):
def __init__(self, device_params):
super(JunosDeviceHandler, self).__init__(device_params)
- self.__reply_parsing_error_handler_by_cls = {
+ self.__reply_parsing_error_transform_by_cls = {
GetSchemaReply: fix_get_schema_reply
}
@@ -95,13 +95,9 @@ class JunosDeviceHandler(DefaultDeviceHandler):
c.exec_command("xml-mode netconf need-trailer")
return True
- def handle_reply_parsing_error(self, root, reply):
- reply_class = type(reply)
-
- # Apply transform if found
- transform_handler = self.__reply_parsing_error_handler_by_cls.get(reply_class)
- if transform_handler is not None:
- transform_handler(root)
+ def reply_parsing_error_transform(self, reply_cls):
+ # return transform function if found, else None
+ return self.__reply_parsing_error_transform_by_cls.get(reply_cls)
def transform_reply(self):
reply = '''<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
diff --git a/ncclient/operations/rpc.py b/ncclient/operations/rpc.py
index 2a6f32d..75f0839 100644
--- a/ncclient/operations/rpc.py
+++ b/ncclient/operations/rpc.py
@@ -144,9 +144,9 @@ class RPCReply(object):
ERROR_CLS = RPCError
"Subclasses can specify a different error class, but it should be a subclass of `RPCError`."
- def __init__(self, raw, device_handler, huge_tree=False):
+ def __init__(self, raw, huge_tree=False, parsing_error_transform=None):
self._raw = raw
- self._device_handler = device_handler
+ self._parsing_error_transform = parsing_error_transform
self._parsed = False
self._root = None
self._errors = []
@@ -171,8 +171,13 @@ class RPCReply(object):
try:
self._parsing_hook(root)
except Exception as e:
+ if self._parsing_error_transform is None:
+ # re-raise as we have no workaround
+ exc_type, exc_value, exc_traceback = sys.exc_info()
+ six.reraise(exc_type, exc_value, exc_traceback)
+
# Apply device specific workaround and try again
- self._device_handler.handle_reply_parsing_error(root, self)
+ self._parsing_error_transform(root)
self._parsing_hook(root)
self._parsed = True
@@ -181,6 +186,9 @@ class RPCReply(object):
"No-op by default. Gets passed the *root* element for the reply."
pass
+ def set_parsing_error_transform(self, transform_function):
+ self._parsing_error_transform = transform_function
+
@property
def xml(self):
"*rpc-reply* element as returned."
@@ -387,7 +395,14 @@ class RPC(object):
def deliver_reply(self, raw):
# internal use
- self._reply = self.REPLY_CLS(raw, self._device_handler, huge_tree=self._huge_tree)
+ self._reply = self.REPLY_CLS(raw, huge_tree=self._huge_tree)
+
+ # Set the reply_parsing_error transform outside the constructor, to keep compatibility for
+ # third party reply classes outside of ncclient
+ self._reply.set_parsing_error_transform(
+ self._device_handler.reply_parsing_error_transform(self.REPLY_CLS)
+ )
+
self._event.set()
def deliver_error(self, err):
|
ncclient/ncclient
|
7898dd9ec3404265418b05ef99e50bd670966084
|
diff --git a/test/unit/operations/test_rpc.py b/test/unit/operations/test_rpc.py
index 3267531..249e876 100644
--- a/test/unit/operations/test_rpc.py
+++ b/test/unit/operations/test_rpc.py
@@ -121,7 +121,7 @@ xml7 = """<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
class TestRPC(unittest.TestCase):
def test_rpc_reply(self):
- obj = RPCReply(xml4, self._mock_device_handler())
+ obj = RPCReply(xml4)
obj.parse()
self.assertTrue(obj.ok)
self.assertFalse(obj.error)
@@ -129,11 +129,11 @@ class TestRPC(unittest.TestCase):
self.assertTrue(obj._parsed)
def test_rpc_reply_huge_text_node_exception(self):
- obj = RPCReply(xml5_huge, self._mock_device_handler())
+ obj = RPCReply(xml5_huge)
self.assertRaises(etree.XMLSyntaxError, obj.parse)
def test_rpc_reply_huge_text_node_workaround(self):
- obj = RPCReply(xml5_huge, self._mock_device_handler(), huge_tree=True)
+ obj = RPCReply(xml5_huge, huge_tree=True)
obj.parse()
self.assertTrue(obj.ok)
self.assertFalse(obj.error)
@@ -145,7 +145,7 @@ class TestRPC(unittest.TestCase):
def test_rpc_send(self, mock_thread, mock_send):
device_handler, session = self._mock_device_handler_and_session()
obj = RPC(session, device_handler, raise_mode=RaiseMode.ALL, timeout=0)
- reply = RPCReply(xml1, device_handler)
+ reply = RPCReply(xml1)
obj._reply = reply
node = new_ele("commit")
sub_ele(node, "confirmed")
@@ -171,7 +171,7 @@ class TestRPC(unittest.TestCase):
def test_generic_rpc_send(self, mock_thread, mock_send):
device_handler, session = self._mock_device_handler_and_session()
obj = GenericRPC(session, device_handler, raise_mode=RaiseMode.ALL, timeout=0)
- reply = RPCReply(xml1, device_handler)
+ reply = RPCReply(xml1)
obj._reply = reply
rpc_command = 'edit-config'
filters = ('subtree', '<top xmlns="urn:mod1"/>')
@@ -206,7 +206,7 @@ class TestRPC(unittest.TestCase):
raise_mode=RaiseMode.ALL,
timeout=0,
async_mode=True)
- reply = RPCReply(xml1, device_handler)
+ reply = RPCReply(xml1)
obj._reply = reply
node = new_ele("commit")
result = obj._request(node)
@@ -217,7 +217,7 @@ class TestRPC(unittest.TestCase):
def test_rpc_timeout_error(self, mock_thread, mock_send):
device_handler, session = self._mock_device_handler_and_session()
obj = RPC(session, device_handler, raise_mode=RaiseMode.ALL, timeout=0)
- reply = RPCReply(xml1, device_handler)
+ reply = RPCReply(xml1)
obj.deliver_reply(reply)
node = new_ele("commit")
sub_ele(node, "confirmed")
@@ -229,7 +229,7 @@ class TestRPC(unittest.TestCase):
def test_rpc_rpcerror(self, mock_thread, mock_send):
device_handler, session = self._mock_device_handler_and_session()
obj = RPC(session, device_handler, raise_mode=RaiseMode.ALL, timeout=0)
- reply = RPCReply(xml1, device_handler)
+ reply = RPCReply(xml1)
obj._reply = reply
node = new_ele("commit")
sub_ele(node, "confirmed")
@@ -315,9 +315,6 @@ class TestRPC(unittest.TestCase):
obj.huge_tree = False
self.assertFalse(obj.huge_tree)
- def _mock_device_handler(self):
- return manager.make_device_handler({'name': 'default'})
-
def _mock_device_handler_and_session(self):
device_handler = manager.make_device_handler({'name': 'junos'})
capabilities = Capabilities(device_handler.get_capabilities())
|
TypeError: __init__() missing 1 required positional argument: 'device_handler'
Why an optional parameter was added in between (and not in the end)
https://github.com/ncclient/ncclient/pull/452/files#diff-9f0edad4d5a881f4165c86a0b3a9116fe67d9b7cdda2d9a888fb98bcb09311feR147
also as device handler is an optional parameter with None value, if a user doesnt pass, this line is bound to through NoneType exception
https://github.com/ncclient/ncclient/pull/452/files#diff-9f0edad4d5a881f4165c86a0b3a9116fe67d9b7cdda2d9a888fb98bcb09311feR175
```
File "/content/tmp/pyez/tests/unit/test_console.py", line 262, in test_load_console
op = cu.load(xml, format="xml")
File "/content/tmp/pyez/lib/jnpr/junos/utils/config.py", line 568, in load
return try_load(rpc_contents, rpc_xattrs, ignore_warning=ignore_warning)
File "/content/tmp/pyez/lib/jnpr/junos/utils/config.py", line 480, in try_load
rpc_contents, ignore_warning=ignore_warning, **rpc_xattrs
File "/content/tmp/pyez/lib/jnpr/junos/rpcmeta.py", line 288, in load_config
return self._junos.execute(rpc, ignore_warning=ignore_warning)
File "/content/tmp/pyez/lib/jnpr/junos/decorators.py", line 76, in wrapper
return function(*args, **kwargs)
File "/content/tmp/pyez/lib/jnpr/junos/decorators.py", line 31, in wrapper
return function(*args, **kwargs)
File "/content/tmp/pyez/lib/jnpr/junos/device.py", line 816, in execute
filter_xml=kvargs.get("filter_xml"),
File "/content/tmp/pyez/lib/jnpr/junos/decorators.py", line 117, in wrapper
rsp = function(self, *args, **kwargs)
File "/content/tmp/pyez/lib/jnpr/junos/console.py", line 279, in _rpc_reply
reply = self._tty.nc.rpc(rpc_cmd)
File "/content/tmp/pyez/lib/jnpr/junos/transport/tty_netconf.py", line 123, in rpc
reply = RPCReply(rsp, huge_tree=self._tty._huge_tree)
TypeError: __init__() missing 1 required positional argument: 'device_handler'
```
|
0.0
|
7898dd9ec3404265418b05ef99e50bd670966084
|
[
"test/unit/operations/test_rpc.py::TestRPC::test_generic_rpc_send",
"test/unit/operations/test_rpc.py::TestRPC::test_rpc_async",
"test/unit/operations/test_rpc.py::TestRPC::test_rpc_reply",
"test/unit/operations/test_rpc.py::TestRPC::test_rpc_reply_huge_text_node_exception",
"test/unit/operations/test_rpc.py::TestRPC::test_rpc_reply_huge_text_node_workaround",
"test/unit/operations/test_rpc.py::TestRPC::test_rpc_rpcerror",
"test/unit/operations/test_rpc.py::TestRPC::test_rpc_send",
"test/unit/operations/test_rpc.py::TestRPC::test_rpc_timeout_error"
] |
[
"test/unit/operations/test_rpc.py::TestRPC::test_rpc_capability_error",
"test/unit/operations/test_rpc.py::TestRPC::test_rpc_huge_text_node_exception",
"test/unit/operations/test_rpc.py::TestRPC::test_rpc_huge_text_node_workaround",
"test/unit/operations/test_rpc.py::TestRPC::test_rpc_rpcerror_multiple_errors",
"test/unit/operations/test_rpc.py::TestRPC::test_rpc_rpcerror_tag_to_attr"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-03-16 19:02:38+00:00
|
apache-2.0
| 4,111 |
|
nens__threedi-modelchecker-11
|
diff --git a/CHANGES.rst b/CHANGES.rst
index ffa7937..6811dc4 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -5,7 +5,13 @@ Changelog of threedi-modelchecker
0.8 (unreleased)
----------------
-- Nothing changed yet.
+- Removed threedigrid from requirements.
+
+- Configured extra checks: Pumpstation.lower_stop_level > Manhole.bottom_level.
+
+- Configured extra checks: Pipe.invert_level >= .Manhole.bottom_level.
+
+- Added additional check type: QueryCheck.
0.7 (2019-07-18)
diff --git a/requirements.txt b/requirements.txt
index b1441aa..d5ce1cd 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,4 @@
GeoAlchemy2==0.6.1
SQLAlchemy==1.3.1
-threedigrid==1.0.10
Click==7.0
psycopg2
\ No newline at end of file
diff --git a/setup.py b/setup.py
index 494f0e1..7f260cc 100644
--- a/setup.py
+++ b/setup.py
@@ -8,7 +8,6 @@ install_requires = [
"Click",
"GeoAlchemy2>=0.6",
"SQLAlchemy>=0.8",
- "threedigrid",
]
tests_require = [
diff --git a/threedi_modelchecker/checks/base.py b/threedi_modelchecker/checks/base.py
index 37fdce2..addf0f0 100644
--- a/threedi_modelchecker/checks/base.py
+++ b/threedi_modelchecker/checks/base.py
@@ -76,7 +76,7 @@ class GeneralCheck(BaseCheck):
Either specify what is valid with `criterion_valid` or what is invalid
with `criterion_invalid`.
- The criterion should be a sqlalchemy.sql.expression.BinaryExpression (https://docs.sqlalchemy.org/en/13/core/sqlelement.html#sqlalchemy.sql.expression.BinaryExpression) # noqa
+ The criterion should be a sqlalchemy.sql.expression.BinaryExpression (https://docs.sqlalchemy.org/en/13/core/sqlelement.html#sqlalchemy.sql.expression.BinaryExpression) # noqa
with operators being operators being column within `self.table.columns`
"""
@@ -143,6 +143,25 @@ class ConditionalCheck(BaseCheck):
)
+class QueryCheck(BaseCheck):
+ """Specify a sqlalchemy.orm.Query object to return invalid instances
+
+ Provides more freedom than the GeneralCheck where you need to specify a
+ sqlalchemy.sql.expression.BinaryExpression. For example, QueryCheck allows joins
+ on multiple tables"""
+
+ def __init__(self, column, invalid, message):
+ super().__init__(column)
+ self.invalid = invalid
+ self.message = message
+
+ def get_invalid(self, session):
+ return list(self.invalid.with_session(session))
+
+ def description(self):
+ return self.message
+
+
class ForeignKeyCheck(BaseCheck):
"""Check all values in `column` are in `reference_column`.
diff --git a/threedi_modelchecker/config.py b/threedi_modelchecker/config.py
index e4c0908..e98d55e 100644
--- a/threedi_modelchecker/config.py
+++ b/threedi_modelchecker/config.py
@@ -2,8 +2,9 @@ from sqlalchemy import Integer
from sqlalchemy import and_
from sqlalchemy import cast
from sqlalchemy import or_
+from sqlalchemy.orm import Query
-from .checks.base import ConditionalCheck
+from .checks.base import ConditionalCheck, QueryCheck
from .checks.base import GeneralCheck
from .checks.base import NotNullCheck
from .checks.factories import generate_enum_checks
@@ -172,6 +173,22 @@ RANGE_CHECKS = [
column=models.Weir.friction_value,
criterion_valid=models.Weir.friction_value >= 0,
),
+ GeneralCheck(
+ column=models.Manhole.bottom_level,
+ criterion_valid=models.Manhole.bottom_level >= models.Manhole.surface_level,
+ ),
+ GeneralCheck(
+ column=models.Manhole.bottom_level,
+ criterion_valid=models.Manhole.bottom_level >= models.Manhole.drain_level,
+ ),
+ GeneralCheck(
+ column=models.GlobalSetting.maximum_sim_time_step,
+ criterion_valid=models.GlobalSetting.maximum_sim_time_step >= models.GlobalSetting.sim_time_step, # noqa: E501
+ ),
+ GeneralCheck(
+ column=models.GlobalSetting.sim_time_step,
+ criterion_valid=models.GlobalSetting.sim_time_step >= models.GlobalSetting.minimum_sim_time_step, # noqa: E501
+ ),
]
OTHER_CHECKS = [
@@ -321,6 +338,58 @@ CONDITIONAL_CHECKS = [
])
)
),
+ QueryCheck(
+ column=models.Pumpstation.lower_stop_level,
+ invalid=Query(models.Pumpstation).join(
+ models.ConnectionNode,
+ models.Pumpstation.connection_node_start_id == models.ConnectionNode.id
+ ).join(
+ models.Manhole
+ ).filter(
+ models.Pumpstation.lower_stop_level <= models.Manhole.bottom_level,
+ ),
+ message="Pumpstation.lower_stop_level should be higher than "
+ "Manhole.bottom_level"
+ ),
+ QueryCheck(
+ column=models.Pumpstation.lower_stop_level,
+ invalid=Query(models.Pumpstation).join(
+ models.ConnectionNode,
+ models.Pumpstation.connection_node_end_id == models.ConnectionNode.id
+ ).join(
+ models.Manhole
+ ).filter(
+ models.Pumpstation.lower_stop_level <= models.Manhole.bottom_level,
+ ),
+ message="Pumpstation.lower_stop_level should be higher than "
+ "Manhole.bottom_level"
+ ),
+ QueryCheck(
+ column=models.Pipe.invert_level_end_point,
+ invalid=Query(models.Pipe).join(
+ models.ConnectionNode,
+ models.Pipe.connection_node_end_id == models.ConnectionNode.id
+ ).join(
+ models.Manhole
+ ).filter(
+ models.Pipe.invert_level_end_point < models.Manhole.bottom_level,
+ ),
+ message="Pipe.invert_level_end_point should be higher or equal than "
+ "Manhole.bottom_level"
+ ),
+ QueryCheck(
+ column=models.Pipe.invert_level_start_point,
+ invalid=Query(models.Pipe).join(
+ models.ConnectionNode,
+ models.Pipe.connection_node_start_id == models.ConnectionNode.id
+ ).join(
+ models.Manhole
+ ).filter(
+ models.Pipe.invert_level_start_point < models.Manhole.bottom_level, # noqa: E501
+ ),
+ message="Pipe.invert_level_start_point should be higher or equal than "
+ "Manhole.bottom_level"
+ )
]
diff --git a/threedi_modelchecker/threedi_model/models.py b/threedi_modelchecker/threedi_model/models.py
index 63ce372..8d27a29 100644
--- a/threedi_modelchecker/threedi_model/models.py
+++ b/threedi_modelchecker/threedi_model/models.py
@@ -325,9 +325,9 @@ class Manhole(Base):
shape = Column(String(4))
width = Column(Float)
length = Column(Float)
- surface_level = Column(Float)
- bottom_level = Column(Float)
- drain_level = Column(Float)
+ surface_level = Column(Float, nullable=False)
+ bottom_level = Column(Float, nullable=False)
+ drain_level = Column(Float, nullable=False)
sediment_level = Column(Float)
manhole_indicator = Column(Integer)
calculation_type = Column(IntegerEnum(constants.CalculationTypeNode))
|
nens/threedi-modelchecker
|
e4ed25acb9b2256c31dcd6e1410c9276517f24ac
|
diff --git a/tests/test_checks_base.py b/tests/test_checks_base.py
index f23d8e2..c9a8814 100644
--- a/tests/test_checks_base.py
+++ b/tests/test_checks_base.py
@@ -1,14 +1,18 @@
import factory
import pytest
-from sqlalchemy import cast
+from sqlalchemy import cast, and_
from sqlalchemy import func
from sqlalchemy import Integer
+from sqlalchemy.orm import Query
from tests import factories
-from threedi_modelchecker.checks.base import EnumCheck, ConditionalCheck, GeneralCheck
+from threedi_modelchecker.checks.base import ConditionalCheck
+from threedi_modelchecker.checks.base import EnumCheck
+from threedi_modelchecker.checks.base import GeneralCheck
from threedi_modelchecker.checks.base import ForeignKeyCheck
from threedi_modelchecker.checks.base import GeometryCheck
from threedi_modelchecker.checks.base import GeometryTypeCheck
+from threedi_modelchecker.checks.base import QueryCheck
from threedi_modelchecker.checks.base import NotNullCheck
from threedi_modelchecker.checks.base import TypeCheck
from threedi_modelchecker.checks.base import UniqueCheck
@@ -413,6 +417,106 @@ def test_conditional_check_advanced(session):
assert invalids[0].storage_area == connection_node2.storage_area
+def test_conditional_check_joining_criterion_valid(session):
+ # Joining on criterion valid fails because it takes the complement (negation)
+ # of the joins (instead of only the where statement (joins are in the where
+ # statement)).
+ connection_node1 = factories.ConnectionNodeFactory()
+ connection_node2 = factories.ConnectionNodeFactory()
+ manhole1 = factories.ManholeFactory(
+ connection_node=connection_node1, bottom_level=1.0
+ )
+ factories.ManholeFactory(
+ connection_node=connection_node2, bottom_level=-1.0
+ )
+ factories.PumpstationFactory(
+ connection_node_start=connection_node1, lower_stop_level=0.0
+ )
+ factories.PumpstationFactory(
+ connection_node_start=connection_node2, lower_stop_level=2.0
+ )
+
+ check_lower_stop_level_gt_bottom_level_compliment = GeneralCheck(
+ column=models.Manhole.bottom_level,
+ criterion_valid=and_(
+ models.Pumpstation.connection_node_start_id == models.ConnectionNode.id,
+ models.Manhole.connection_node_id == models.ConnectionNode.id,
+ models.Pumpstation.lower_stop_level > models.Manhole.bottom_level,
+ ),
+ )
+ invalids = check_lower_stop_level_gt_bottom_level_compliment.get_invalid(session)
+ assert len(invalids) != 1 # Note that 1 is what we actually want!
+ assert invalids[0].id == manhole1.id
+
+
+def test_query_check_with_joins(session):
+ connection_node1 = factories.ConnectionNodeFactory()
+ connection_node2 = factories.ConnectionNodeFactory()
+ factories.ManholeFactory(
+ connection_node=connection_node1, bottom_level=1.0
+ )
+ factories.ManholeFactory(
+ connection_node=connection_node2, bottom_level=-1.0
+ )
+ factories.PumpstationFactory(
+ connection_node_start=connection_node1, lower_stop_level=0.0
+ )
+ factories.PumpstationFactory(
+ connection_node_start=connection_node2, lower_stop_level=2.0
+ )
+
+ query = Query(models.ConnectionNode).join(
+ models.Pumpstation.connection_node_start
+ ).join(
+ models.Manhole, models.ConnectionNode.id == models.Manhole.connection_node_id
+ ).filter(
+ models.Pumpstation.lower_stop_level <= models.Manhole.bottom_level,
+ )
+ check = QueryCheck(
+ column=models.Manhole.bottom_level,
+ invalid=query,
+ message="Pumpstation.lower_stop_level should be higher than "
+ "Manhole.bottom_level"
+ )
+ invalids = check.get_invalid(session)
+ assert len(invalids) == 1
+ assert invalids[0].id == connection_node1.id
+
+
+def test_query_check_on_pumpstation(session):
+ connection_node1 = factories.ConnectionNodeFactory()
+ connection_node2 = factories.ConnectionNodeFactory()
+ factories.ManholeFactory(
+ connection_node=connection_node1, bottom_level=1.0
+ )
+ factories.ManholeFactory(
+ connection_node=connection_node2, bottom_level=-1.0
+ )
+ pumpstation_wrong = factories.PumpstationFactory(
+ connection_node_start=connection_node1, lower_stop_level=0.0
+ )
+ factories.PumpstationFactory(
+ connection_node_start=connection_node2, lower_stop_level=2.0
+ )
+
+ query = Query(models.Pumpstation).join(
+ models.ConnectionNode, models.Pumpstation.connection_node_start_id == models.ConnectionNode.id # noqa: E501
+ ).join(
+ models.Manhole, models.Manhole.connection_node_id == models.ConnectionNode.id
+ ).filter(
+ models.Pumpstation.lower_stop_level <= models.Manhole.bottom_level,
+ )
+ check = QueryCheck(
+ column=models.Pumpstation.lower_stop_level,
+ invalid=query,
+ message="Pumpstation lower_stop_level should be higher than Manhole "
+ "bottom_level"
+ )
+ invalids = check.get_invalid(session)
+ assert len(invalids) == 1
+ assert invalids[0].id == pumpstation_wrong.id
+
+
def test_get_valid(session):
factories.ConnectionNodeFactory(storage_area=1)
factories.ConnectionNodeFactory(storage_area=2)
diff --git a/tests/test_checks_factories.py b/tests/test_checks_factories.py
index 64326d8..828076f 100644
--- a/tests/test_checks_factories.py
+++ b/tests/test_checks_factories.py
@@ -22,7 +22,7 @@ def test_gen_not_unique_checks():
def test_gen_not_null_checks():
not_null_checks = generate_not_null_checks(models.Manhole.__table__)
- assert len(not_null_checks) == 4
+ assert len(not_null_checks) == 7
not_null_check_columns = [check.column for check in not_null_checks]
assert models.Manhole.id in not_null_check_columns
assert models.Manhole.code in not_null_check_columns
|
Add additional checks
The following fields of v2_manhole should not be null:
- bottom level
- surface level
- drain level
**Some extra checks:**
- manhole.bottom_level > manhole.surface_level
- manhole.bottom_level > manhole.drain_level
- global_settings.max_timestep >= global_settings.timestep
- global_settings.timestep >= global_settings.min_timestep
warning: manhole.surface_level < manhole.drain_level
**Cross-table reference checks (might be more difficult):**
- pumpstation.lower_stop_level > manhole.bottom_level
- pipe.invert_level >= manhole.bottom_level
warning: weir.crest_level > manhole.bottom_level manhole
|
0.0
|
e4ed25acb9b2256c31dcd6e1410c9276517f24ac
|
[
"tests/test_checks_base.py::test_sqlalchemy_to_sqlite_type_with_custom_type",
"tests/test_checks_factories.py::test_gen_foreign_key_checks",
"tests/test_checks_factories.py::test_gen_not_unique_checks",
"tests/test_checks_factories.py::test_gen_not_null_checks",
"tests/test_checks_factories.py::test_gen_geometry_check",
"tests/test_checks_factories.py::test_gen_enum_checks",
"tests/test_checks_factories.py::test_gen_enum_checks_varcharenum"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-09-04 08:27:07+00:00
|
mit
| 4,112 |
|
nens__threedi-modelchecker-17
|
diff --git a/CHANGES.rst b/CHANGES.rst
index a893a3e..8890c52 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -5,7 +5,7 @@ Changelog of threedi-modelchecker
0.9 (unreleased)
----------------
-- Nothing changed yet.
+- Fixed some misconfigured checks, see https://github.com/nens/threedi-modelchecker/issues/10.
0.8 (2019-11-26)
diff --git a/threedi_modelchecker/config.py b/threedi_modelchecker/config.py
index e98d55e..d51cce5 100644
--- a/threedi_modelchecker/config.py
+++ b/threedi_modelchecker/config.py
@@ -173,14 +173,6 @@ RANGE_CHECKS = [
column=models.Weir.friction_value,
criterion_valid=models.Weir.friction_value >= 0,
),
- GeneralCheck(
- column=models.Manhole.bottom_level,
- criterion_valid=models.Manhole.bottom_level >= models.Manhole.surface_level,
- ),
- GeneralCheck(
- column=models.Manhole.bottom_level,
- criterion_valid=models.Manhole.bottom_level >= models.Manhole.drain_level,
- ),
GeneralCheck(
column=models.GlobalSetting.maximum_sim_time_step,
criterion_valid=models.GlobalSetting.maximum_sim_time_step >= models.GlobalSetting.sim_time_step, # noqa: E501
@@ -210,6 +202,7 @@ OTHER_CHECKS = [
Use0DFlowCheck()
]
+
CONDITIONAL_CHECKS = [
ConditionalCheck(
criterion=(models.ConnectionNode.id == models.Manhole.connection_node_id),
@@ -226,12 +219,6 @@ CONDITIONAL_CHECKS = [
< models.CrossSectionLocation.bank_level)
)
),
- ConditionalCheck(
- criterion=(models.GlobalSetting.timestep_plus == True),
- check=NotNullCheck(
- column=models.GlobalSetting.maximum_sim_time_step,
- )
- ),
ConditionalCheck(
criterion=or_(
models.GlobalSetting.initial_groundwater_level_file != None,
@@ -374,7 +361,7 @@ CONDITIONAL_CHECKS = [
).filter(
models.Pipe.invert_level_end_point < models.Manhole.bottom_level,
),
- message="Pipe.invert_level_end_point should be higher or equal than "
+ message="Pipe.invert_level_end_point should be higher than or equal to "
"Manhole.bottom_level"
),
QueryCheck(
@@ -387,9 +374,36 @@ CONDITIONAL_CHECKS = [
).filter(
models.Pipe.invert_level_start_point < models.Manhole.bottom_level, # noqa: E501
),
- message="Pipe.invert_level_start_point should be higher or equal than "
+ message="Pipe.invert_level_start_point should be higher than or equal to "
"Manhole.bottom_level"
- )
+ ),
+ QueryCheck(
+ column=models.Manhole.bottom_level,
+ invalid=Query(models.Manhole).filter(
+ models.Manhole.drain_level < models.Manhole.bottom_level,
+ models.Manhole.calculation_type == constants.CalculationTypeNode.CONNECTED
+ ),
+ message="Manhole.drain_level >= Manhole.bottom_level when "
+ "Manhole.calculation_type is CONNECTED"
+ ),
+ QueryCheck(
+ column=models.Manhole.drain_level,
+ invalid=Query(models.Manhole).filter(
+ models.Manhole.calculation_type == constants.CalculationTypeNode.CONNECTED,
+ models.Manhole.drain_level == None
+ ),
+ message="Manhole.drain_level cannot be null when Manhole.calculation_type is "
+ "CONNECTED"
+ ),
+ QueryCheck(
+ column=models.GlobalSetting.maximum_sim_time_step,
+ invalid=Query(models.GlobalSetting).filter(
+ models.GlobalSetting.timestep_plus == True,
+ models.GlobalSetting.maximum_sim_time_step == None
+ ),
+ message="GlobalSettings.maximum_sim_time_step cannot be null when "
+ "GlobalSettings.timestep_plus is True."
+ ),
]
diff --git a/threedi_modelchecker/threedi_model/models.py b/threedi_modelchecker/threedi_model/models.py
index 284b1d4..342c9de 100644
--- a/threedi_modelchecker/threedi_model/models.py
+++ b/threedi_modelchecker/threedi_model/models.py
@@ -325,9 +325,9 @@ class Manhole(Base):
shape = Column(String(4))
width = Column(Float)
length = Column(Float)
- surface_level = Column(Float, nullable=False)
+ surface_level = Column(Float)
bottom_level = Column(Float, nullable=False)
- drain_level = Column(Float, nullable=False)
+ drain_level = Column(Float)
sediment_level = Column(Float)
manhole_indicator = Column(Integer)
calculation_type = Column(IntegerEnum(constants.CalculationTypeNode))
|
nens/threedi-modelchecker
|
5828888ad9d692b30db626f004c41be04537389f
|
diff --git a/tests/test_checks_base.py b/tests/test_checks_base.py
index c9a8814..11cfae2 100644
--- a/tests/test_checks_base.py
+++ b/tests/test_checks_base.py
@@ -599,3 +599,56 @@ def test_general_check_modulo_operator(session):
invalid = modulo_check.get_invalid(session)
assert len(invalid) == 1
assert invalid[0].id == global_settings_remainder.id
+
+
+def test_query_check_manhole_drain_level_calc_type_2(session):
+ # manhole.drain_level can be null, but if manhole.calculation_type == 2 (Connected)
+ # then manhole.drain_level >= manhole.bottom_level
+ factories.ManholeFactory(drain_level=None)
+ factories.ManholeFactory(drain_level=1)
+ m3_error = factories.ManholeFactory(
+ drain_level=None,
+ calculation_type=constants.CalculationTypeNode.CONNECTED
+ ) # drain_level cannot be null when calculation_type is CONNECTED
+ m4_error = factories.ManholeFactory(
+ drain_level=1,
+ bottom_level=2,
+ calculation_type=constants.CalculationTypeNode.CONNECTED
+ ) # bottom_level >= drain_level when calculation_type is CONNECTED
+ factories.ManholeFactory(
+ drain_level=1,
+ bottom_level=0,
+ calculation_type=constants.CalculationTypeNode.CONNECTED
+ )
+ factories.ManholeFactory(
+ drain_level=None,
+ bottom_level=0,
+ calculation_type=constants.CalculationTypeNode.EMBEDDED
+ )
+
+ query_drn_lvl_st_bttm_lvl = Query(models.Manhole).filter(
+ models.Manhole.drain_level < models.Manhole.bottom_level,
+ models.Manhole.calculation_type == constants.CalculationTypeNode.CONNECTED
+ )
+ query_invalid_not_null = Query(models.Manhole).filter(
+ models.Manhole.calculation_type == constants.CalculationTypeNode.CONNECTED,
+ models.Manhole.drain_level == None
+ )
+ check_drn_lvl_gt_bttm_lvl = QueryCheck(
+ column=models.Manhole.bottom_level,
+ invalid=query_drn_lvl_st_bttm_lvl,
+ message="Manhole.drain_level >= Manhoole.bottom_level when "
+ "Manhole.calculation_type is CONNECTED"
+ )
+ check_invalid_not_null = QueryCheck(
+ column=models.Manhole.drain_level,
+ invalid=query_invalid_not_null,
+ message="Manhole.drain_level cannot be null when Manhole.calculation_type is "
+ "CONNECTED"
+ )
+ errors1 = check_drn_lvl_gt_bttm_lvl.get_invalid(session)
+ errors2 = check_invalid_not_null.get_invalid(session)
+ assert len(errors1) == 1
+ assert len(errors2) == 1
+ assert m3_error.id == errors2[0].id
+ assert m4_error.id == errors1[0].id
diff --git a/tests/test_checks_factories.py b/tests/test_checks_factories.py
index 828076f..4086a31 100644
--- a/tests/test_checks_factories.py
+++ b/tests/test_checks_factories.py
@@ -22,7 +22,7 @@ def test_gen_not_unique_checks():
def test_gen_not_null_checks():
not_null_checks = generate_not_null_checks(models.Manhole.__table__)
- assert len(not_null_checks) == 7
+ assert len(not_null_checks) == 5
not_null_check_columns = [check.column for check in not_null_checks]
assert models.Manhole.id in not_null_check_columns
assert models.Manhole.code in not_null_check_columns
|
Add additional checks
The following fields of v2_manhole should not be null:
- bottom level
- surface level
- drain level
**Some extra checks:**
- manhole.bottom_level > manhole.surface_level
- manhole.bottom_level > manhole.drain_level
- global_settings.max_timestep >= global_settings.timestep
- global_settings.timestep >= global_settings.min_timestep
warning: manhole.surface_level < manhole.drain_level
**Cross-table reference checks (might be more difficult):**
- pumpstation.lower_stop_level > manhole.bottom_level
- pipe.invert_level >= manhole.bottom_level
warning: weir.crest_level > manhole.bottom_level manhole
|
0.0
|
5828888ad9d692b30db626f004c41be04537389f
|
[
"tests/test_checks_factories.py::test_gen_not_null_checks"
] |
[
"tests/test_checks_base.py::test_sqlalchemy_to_sqlite_type_with_custom_type",
"tests/test_checks_factories.py::test_gen_foreign_key_checks",
"tests/test_checks_factories.py::test_gen_not_unique_checks",
"tests/test_checks_factories.py::test_gen_geometry_check",
"tests/test_checks_factories.py::test_gen_enum_checks",
"tests/test_checks_factories.py::test_gen_enum_checks_varcharenum"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-11-26 13:38:37+00:00
|
mit
| 4,113 |
|
neo4j__neo4j-python-driver-498
|
diff --git a/neo4j/spatial/__init__.py b/neo4j/spatial/__init__.py
index f7085fd4..36e8d66f 100644
--- a/neo4j/spatial/__init__.py
+++ b/neo4j/spatial/__init__.py
@@ -53,7 +53,7 @@ class Point(tuple):
srid = None
def __new__(cls, iterable):
- return tuple.__new__(cls, iterable)
+ return tuple.__new__(cls, map(float, iterable))
def __repr__(self):
return "POINT(%s)" % " ".join(map(str, self))
|
neo4j/neo4j-python-driver
|
48b989955e85ff718da4e604f029275a860ab0cb
|
diff --git a/tests/unit/spatial/__init__.py b/tests/unit/spatial/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/unit/spatial/test_cartesian_point.py b/tests/unit/spatial/test_cartesian_point.py
new file mode 100644
index 00000000..ee86e5b9
--- /dev/null
+++ b/tests/unit/spatial/test_cartesian_point.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# Copyright (c) "Neo4j"
+# Neo4j Sweden AB [http://neo4j.com]
+#
+# This file is part of Neo4j.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import io
+import struct
+from unittest import TestCase
+
+from neo4j.data import DataDehydrator
+from neo4j.packstream import Packer
+from neo4j.spatial import CartesianPoint
+
+
+class CartesianPointTestCase(TestCase):
+
+ def test_alias(self):
+ x, y, z = 3.2, 4.0, -1.2
+ p = CartesianPoint((x, y, z))
+ self.assert_(hasattr(p, "x"))
+ self.assertEqual(p.x, x)
+ self.assert_(hasattr(p, "y"))
+ self.assertEqual(p.y, y)
+ self.assert_(hasattr(p, "z"))
+ self.assertEqual(p.z, z)
+
+ def test_dehydration_3d(self):
+ coordinates = (1, -2, 3.1)
+ p = CartesianPoint(coordinates)
+
+ dehydrator = DataDehydrator()
+ buffer = io.BytesIO()
+ packer = Packer(buffer)
+ packer.pack(dehydrator.dehydrate((p,))[0])
+ self.assertEqual(
+ buffer.getvalue(),
+ b"\xB4Y" +
+ b"\xC9" + struct.pack(">h", 9157) +
+ b"".join(map(lambda c: b"\xC1" + struct.pack(">d", c), coordinates))
+ )
+
+ def test_dehydration_2d(self):
+ coordinates = (.1, 0)
+ p = CartesianPoint(coordinates)
+
+ dehydrator = DataDehydrator()
+ buffer = io.BytesIO()
+ packer = Packer(buffer)
+ packer.pack(dehydrator.dehydrate((p,))[0])
+ self.assertEqual(
+ buffer.getvalue(),
+ b"\xB3X" +
+ b"\xC9" + struct.pack(">h", 7203) +
+ b"".join(map(lambda c: b"\xC1" + struct.pack(">d", c), coordinates))
+ )
diff --git a/tests/unit/spatial/test_point.py b/tests/unit/spatial/test_point.py
new file mode 100644
index 00000000..082f95c5
--- /dev/null
+++ b/tests/unit/spatial/test_point.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# Copyright (c) "Neo4j"
+# Neo4j Sweden AB [http://neo4j.com]
+#
+# This file is part of Neo4j.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import io
+import struct
+from unittest import TestCase
+
+from neo4j.data import DataDehydrator
+from neo4j.packstream import Packer
+from neo4j.spatial import (
+ Point,
+ point_type,
+)
+
+
+class PointTestCase(TestCase):
+
+ def test_wrong_type_arguments(self):
+ for argument in (("a", "b"), ({"x": 1.0, "y": 2.0})):
+ with self.subTest():
+ with self.assertRaises(ValueError):
+ Point(argument)
+
+ def test_number_arguments(self):
+ for argument in ((1, 2), (1.2, 2.1)):
+ with self.subTest():
+ p = Point(argument)
+ assert tuple(p) == argument
+
+ def test_dehydration(self):
+ MyPoint = point_type("MyPoint", ["x", "y"], {2: 1234})
+ coordinates = (.1, 0)
+ p = MyPoint(coordinates)
+
+ dehydrator = DataDehydrator()
+ buffer = io.BytesIO()
+ packer = Packer(buffer)
+ packer.pack(dehydrator.dehydrate((p,))[0])
+ self.assertEqual(
+ buffer.getvalue(),
+ b"\xB3X" +
+ b"\xC9" + struct.pack(">h", 1234) +
+ b"".join(map(lambda c: b"\xC1" + struct.pack(">d", c), coordinates))
+ )
+
+ def test_immutable_coordinates(self):
+ MyPoint = point_type("MyPoint", ["x", "y"], {2: 1234})
+ coordinates = (.1, 0)
+ p = MyPoint(coordinates)
+ with self.assertRaises(AttributeError):
+ p.x = 2.0
+ with self.assertRaises(AttributeError):
+ p.y = 2.0
+ with self.assertRaises(TypeError):
+ p[0] = 2.0
+ with self.assertRaises(TypeError):
+ p[1] = 2.0
diff --git a/tests/unit/spatial/test_wgs84_point.py b/tests/unit/spatial/test_wgs84_point.py
new file mode 100644
index 00000000..8f725a58
--- /dev/null
+++ b/tests/unit/spatial/test_wgs84_point.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# Copyright (c) "Neo4j"
+# Neo4j Sweden AB [http://neo4j.com]
+#
+# This file is part of Neo4j.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import io
+import struct
+from unittest import TestCase
+
+from neo4j.data import DataDehydrator
+from neo4j.packstream import Packer
+from neo4j.spatial import WGS84Point
+
+
+class WGS84PointTestCase(TestCase):
+
+ def test_alias(self):
+ x, y, z = 3.2, 4.0, -1.2
+ p = WGS84Point((x, y, z))
+ self.assert_(hasattr(p, "longitude"))
+ self.assertEqual(p.longitude, x)
+ self.assert_(hasattr(p, "latitude"))
+ self.assertEqual(p.latitude, y)
+ self.assert_(hasattr(p, "height"))
+ self.assertEqual(p.height, z)
+
+ def test_dehydration_3d(self):
+ coordinates = (1, -2, 3.1)
+ p = WGS84Point(coordinates)
+
+ dehydrator = DataDehydrator()
+ buffer = io.BytesIO()
+ packer = Packer(buffer)
+ packer.pack(dehydrator.dehydrate((p,))[0])
+ self.assertEqual(
+ buffer.getvalue(),
+ b"\xB4Y" +
+ b"\xC9" + struct.pack(">h", 4979) +
+ b"".join(map(lambda c: b"\xC1" + struct.pack(">d", c), coordinates))
+ )
+
+ def test_dehydration_2d(self):
+ coordinates = (.1, 0)
+ p = WGS84Point(coordinates)
+
+ dehydrator = DataDehydrator()
+ buffer = io.BytesIO()
+ packer = Packer(buffer)
+ packer.pack(dehydrator.dehydrate((p,))[0])
+ self.assertEqual(
+ buffer.getvalue(),
+ b"\xB3X" +
+ b"\xC9" + struct.pack(">h", 4326) +
+ b"".join(map(lambda c: b"\xC1" + struct.pack(">d", c), coordinates))
+ )
|
Crash when using spatial type as parameters
Hi folks!
I can't seem to get the spatial type working, just getting a crash (stack trace attached).
- Neo4j version: Enterprise 4.2.1
- Neo4j Mode: Single instance
- Python 3.9
- Driver version: driver 4.2.1
- Operating system: Ubuntu 20.04
- **Steps to reproduce**: run the following piece of code:
```
import neo4j
print(neo4j.__version__)
from neo4j.spatial import WGS84Point
from neo4j import GraphDatabase, basic_auth
driver = GraphDatabase.driver("bolt://localhost:7687", auth=basic_auth("neo4j", ""), encrypted=False)
point = WGS84Point(dict(x=2, y=48))
cypher = "WITH $point as point RETURN [point.x, point.y]"
params = {"point": point}
with driver.session() as session:
session.run(cypher, params)
```
- Expected behavior : query returns [2, 48] (or a meaningful error message in case of misuse)
- Actual behavior: `neo4j.exceptions.ServiceUnavailable: Failed to read from defunct connection IPv4Address(('localhost', 7687)) (IPv4Address(('127.0.0.1', 7687)))`
Full stack trace is attached.
[neo_spatial.log](https://github.com/neo4j/neo4j-python-driver/files/5968103/neo_spatial.log)
Also, only error in server logs is:
```
2021-02-11 18:00:06.353+0000 ERROR Client triggered an unexpected error [Neo.DatabaseError.General.UnknownError]: Unknown statement ID: -1. Existing IDs: [], reference 1c40d17c-1c03-4b40-b422-ceeef7fc49ec.
```
- Additional information:
- Same error with CartesianPoint
- Doing similar things with DateTime works well (and basically, anything else is working just fine :) )
- I havn't found an example using spatial types in the doc, but the example is taken from https://github.com/neo4j/neo4j-python-driver/blob/4.3/tests/integration/test_spatial_types.py (I think)
- Replacing the query with a `MATCH (n:Node) RETURN n.point` returns a `neo4j.spatial.WGS84Point` as expected
- Same error as https://github.com/neo4j/neo4j-python-driver/issues/484 but, not sure the root cause is the same though.
|
0.0
|
48b989955e85ff718da4e604f029275a860ab0cb
|
[
"tests/unit/spatial/test_cartesian_point.py::CartesianPointTestCase::test_dehydration_2d",
"tests/unit/spatial/test_cartesian_point.py::CartesianPointTestCase::test_dehydration_3d",
"tests/unit/spatial/test_point.py::PointTestCase::test_dehydration",
"tests/unit/spatial/test_point.py::PointTestCase::test_wrong_type_arguments",
"tests/unit/spatial/test_wgs84_point.py::WGS84PointTestCase::test_dehydration_2d",
"tests/unit/spatial/test_wgs84_point.py::WGS84PointTestCase::test_dehydration_3d"
] |
[
"tests/unit/spatial/test_cartesian_point.py::CartesianPointTestCase::test_alias",
"tests/unit/spatial/test_point.py::PointTestCase::test_immutable_coordinates",
"tests/unit/spatial/test_point.py::PointTestCase::test_number_arguments",
"tests/unit/spatial/test_wgs84_point.py::WGS84PointTestCase::test_alias"
] |
{
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-02-15 13:41:35+00:00
|
apache-2.0
| 4,114 |
|
neo4j__neo4j-python-driver-510
|
diff --git a/neo4j/spatial/__init__.py b/neo4j/spatial/__init__.py
index f7085fd4..36e8d66f 100644
--- a/neo4j/spatial/__init__.py
+++ b/neo4j/spatial/__init__.py
@@ -53,7 +53,7 @@ class Point(tuple):
srid = None
def __new__(cls, iterable):
- return tuple.__new__(cls, iterable)
+ return tuple.__new__(cls, map(float, iterable))
def __repr__(self):
return "POINT(%s)" % " ".join(map(str, self))
diff --git a/neo4j/work/simple.py b/neo4j/work/simple.py
index ef1e6d8c..0ee46e7b 100644
--- a/neo4j/work/simple.py
+++ b/neo4j/work/simple.py
@@ -370,7 +370,7 @@ class Session(Workspace):
with driver.session() as session:
values = session.read_transaction(get_two_tx)
- :param transaction_function: a function that takes a transaction as an argument and does work with the transaction. `tx_function(tx, \*args, \*\*kwargs)`
+ :param transaction_function: a function that takes a transaction as an argument and does work with the transaction. `tx_function(tx, *args, **kwargs)`
:param args: arguments for the `transaction_function`
:param kwargs: key word arguments for the `transaction_function`
:return: a result as returned by the given unit of work
@@ -395,7 +395,7 @@ class Session(Workspace):
node_id = session.write_transaction(create_node_tx, "example")
- :param transaction_function: a function that takes a transaction as an argument and does work with the transaction. `tx_function(tx, \*args, \*\*kwargs)`
+ :param transaction_function: a function that takes a transaction as an argument and does work with the transaction. `tx_function(tx, *args, **kwargs)`
:param args: key word arguments for the `transaction_function`
:param kwargs: key word arguments for the `transaction_function`
:return: a result as returned by the given unit of work
|
neo4j/neo4j-python-driver
|
48b989955e85ff718da4e604f029275a860ab0cb
|
diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py
index fd5a4a7a..952fa0c2 100644
--- a/tests/integration/conftest.py
+++ b/tests/integration/conftest.py
@@ -189,12 +189,8 @@ def service(request):
if existing_service:
NEO4J_SERVICE = existing_service
else:
- try:
- NEO4J_SERVICE = Neo4jService(auth=NEO4J_AUTH, image=request.param, n_cores=NEO4J_CORES, n_replicas=NEO4J_REPLICAS)
- NEO4J_SERVICE.start(timeout=300)
- except urllib.error.HTTPError as error:
- # pytest.skip(str(error))
- pytest.xfail(str(error) + " " + request.param)
+ NEO4J_SERVICE = Neo4jService(auth=NEO4J_AUTH, image=request.param, n_cores=NEO4J_CORES, n_replicas=NEO4J_REPLICAS)
+ NEO4J_SERVICE.start(timeout=300)
yield NEO4J_SERVICE
if NEO4J_SERVICE is not None:
NEO4J_SERVICE.stop(timeout=300)
diff --git a/tests/integration/examples/test_driver_introduction_example.py b/tests/integration/examples/test_driver_introduction_example.py
index 5d592680..2496a27a 100644
--- a/tests/integration/examples/test_driver_introduction_example.py
+++ b/tests/integration/examples/test_driver_introduction_example.py
@@ -33,7 +33,7 @@ from neo4j.exceptions import ServiceUnavailable
from neo4j._exceptions import BoltHandshakeError
-# python -m pytest tests/integration/examples/test_aura_example.py -s -v
+# python -m pytest tests/integration/examples/test_driver_introduction_example.py -s -v
# tag::driver-introduction-example[]
class App:
@@ -91,10 +91,9 @@ class App:
if __name__ == "__main__":
- # Aura queries use an encrypted connection using the "neo4j+s" URI scheme
bolt_url = "%%BOLT_URL_PLACEHOLDER%%"
- user = "<Username for Neo4j Aura database>"
- password = "<Password for Neo4j Aura database>"
+ user = "<Username for database>"
+ password = "<Password for database>"
app = App(bolt_url, user, password)
app.create_friendship("Alice", "David")
app.find_person("Alice")
diff --git a/tests/unit/spatial/__init__.py b/tests/unit/spatial/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/unit/spatial/test_cartesian_point.py b/tests/unit/spatial/test_cartesian_point.py
new file mode 100644
index 00000000..ee86e5b9
--- /dev/null
+++ b/tests/unit/spatial/test_cartesian_point.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# Copyright (c) "Neo4j"
+# Neo4j Sweden AB [http://neo4j.com]
+#
+# This file is part of Neo4j.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import io
+import struct
+from unittest import TestCase
+
+from neo4j.data import DataDehydrator
+from neo4j.packstream import Packer
+from neo4j.spatial import CartesianPoint
+
+
+class CartesianPointTestCase(TestCase):
+
+ def test_alias(self):
+ x, y, z = 3.2, 4.0, -1.2
+ p = CartesianPoint((x, y, z))
+ self.assert_(hasattr(p, "x"))
+ self.assertEqual(p.x, x)
+ self.assert_(hasattr(p, "y"))
+ self.assertEqual(p.y, y)
+ self.assert_(hasattr(p, "z"))
+ self.assertEqual(p.z, z)
+
+ def test_dehydration_3d(self):
+ coordinates = (1, -2, 3.1)
+ p = CartesianPoint(coordinates)
+
+ dehydrator = DataDehydrator()
+ buffer = io.BytesIO()
+ packer = Packer(buffer)
+ packer.pack(dehydrator.dehydrate((p,))[0])
+ self.assertEqual(
+ buffer.getvalue(),
+ b"\xB4Y" +
+ b"\xC9" + struct.pack(">h", 9157) +
+ b"".join(map(lambda c: b"\xC1" + struct.pack(">d", c), coordinates))
+ )
+
+ def test_dehydration_2d(self):
+ coordinates = (.1, 0)
+ p = CartesianPoint(coordinates)
+
+ dehydrator = DataDehydrator()
+ buffer = io.BytesIO()
+ packer = Packer(buffer)
+ packer.pack(dehydrator.dehydrate((p,))[0])
+ self.assertEqual(
+ buffer.getvalue(),
+ b"\xB3X" +
+ b"\xC9" + struct.pack(">h", 7203) +
+ b"".join(map(lambda c: b"\xC1" + struct.pack(">d", c), coordinates))
+ )
diff --git a/tests/unit/spatial/test_point.py b/tests/unit/spatial/test_point.py
new file mode 100644
index 00000000..082f95c5
--- /dev/null
+++ b/tests/unit/spatial/test_point.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# Copyright (c) "Neo4j"
+# Neo4j Sweden AB [http://neo4j.com]
+#
+# This file is part of Neo4j.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import io
+import struct
+from unittest import TestCase
+
+from neo4j.data import DataDehydrator
+from neo4j.packstream import Packer
+from neo4j.spatial import (
+ Point,
+ point_type,
+)
+
+
+class PointTestCase(TestCase):
+
+ def test_wrong_type_arguments(self):
+ for argument in (("a", "b"), ({"x": 1.0, "y": 2.0})):
+ with self.subTest():
+ with self.assertRaises(ValueError):
+ Point(argument)
+
+ def test_number_arguments(self):
+ for argument in ((1, 2), (1.2, 2.1)):
+ with self.subTest():
+ p = Point(argument)
+ assert tuple(p) == argument
+
+ def test_dehydration(self):
+ MyPoint = point_type("MyPoint", ["x", "y"], {2: 1234})
+ coordinates = (.1, 0)
+ p = MyPoint(coordinates)
+
+ dehydrator = DataDehydrator()
+ buffer = io.BytesIO()
+ packer = Packer(buffer)
+ packer.pack(dehydrator.dehydrate((p,))[0])
+ self.assertEqual(
+ buffer.getvalue(),
+ b"\xB3X" +
+ b"\xC9" + struct.pack(">h", 1234) +
+ b"".join(map(lambda c: b"\xC1" + struct.pack(">d", c), coordinates))
+ )
+
+ def test_immutable_coordinates(self):
+ MyPoint = point_type("MyPoint", ["x", "y"], {2: 1234})
+ coordinates = (.1, 0)
+ p = MyPoint(coordinates)
+ with self.assertRaises(AttributeError):
+ p.x = 2.0
+ with self.assertRaises(AttributeError):
+ p.y = 2.0
+ with self.assertRaises(TypeError):
+ p[0] = 2.0
+ with self.assertRaises(TypeError):
+ p[1] = 2.0
diff --git a/tests/unit/spatial/test_wgs84_point.py b/tests/unit/spatial/test_wgs84_point.py
new file mode 100644
index 00000000..8f725a58
--- /dev/null
+++ b/tests/unit/spatial/test_wgs84_point.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# Copyright (c) "Neo4j"
+# Neo4j Sweden AB [http://neo4j.com]
+#
+# This file is part of Neo4j.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import io
+import struct
+from unittest import TestCase
+
+from neo4j.data import DataDehydrator
+from neo4j.packstream import Packer
+from neo4j.spatial import WGS84Point
+
+
+class WGS84PointTestCase(TestCase):
+
+ def test_alias(self):
+ x, y, z = 3.2, 4.0, -1.2
+ p = WGS84Point((x, y, z))
+ self.assert_(hasattr(p, "longitude"))
+ self.assertEqual(p.longitude, x)
+ self.assert_(hasattr(p, "latitude"))
+ self.assertEqual(p.latitude, y)
+ self.assert_(hasattr(p, "height"))
+ self.assertEqual(p.height, z)
+
+ def test_dehydration_3d(self):
+ coordinates = (1, -2, 3.1)
+ p = WGS84Point(coordinates)
+
+ dehydrator = DataDehydrator()
+ buffer = io.BytesIO()
+ packer = Packer(buffer)
+ packer.pack(dehydrator.dehydrate((p,))[0])
+ self.assertEqual(
+ buffer.getvalue(),
+ b"\xB4Y" +
+ b"\xC9" + struct.pack(">h", 4979) +
+ b"".join(map(lambda c: b"\xC1" + struct.pack(">d", c), coordinates))
+ )
+
+ def test_dehydration_2d(self):
+ coordinates = (.1, 0)
+ p = WGS84Point(coordinates)
+
+ dehydrator = DataDehydrator()
+ buffer = io.BytesIO()
+ packer = Packer(buffer)
+ packer.pack(dehydrator.dehydrate((p,))[0])
+ self.assertEqual(
+ buffer.getvalue(),
+ b"\xB3X" +
+ b"\xC9" + struct.pack(">h", 4326) +
+ b"".join(map(lambda c: b"\xC1" + struct.pack(">d", c), coordinates))
+ )
|
Prevent Deprecation Warning to users
## User get DeprecationWarnings
- Operating system: Windows 10
Run install and run [nox](https://nox.thea.codes/en/stable/) on attached archive
[example_python.zip](https://github.com/neo4j/neo4j-python-driver/files/5842343/example_python.zip)
- Expected behavior
pylint should analyze the code and report user erros
- Actual behavior
pylint reports issue in neo4j driver as well!
```
================================================= warnings summary =================================================
<unknown>:339
<unknown>:339: DeprecationWarning: invalid escape sequence \*
<unknown>:381
<unknown>:381: DeprecationWarning: invalid escape sequence \*
-- Docs: https://docs.pytest.org/en/stable/warnings.html
============================================= short test summary info ==============================================
FAILED example.py::PYLINT
```
|
0.0
|
48b989955e85ff718da4e604f029275a860ab0cb
|
[
"tests/unit/spatial/test_cartesian_point.py::CartesianPointTestCase::test_dehydration_2d",
"tests/unit/spatial/test_cartesian_point.py::CartesianPointTestCase::test_dehydration_3d",
"tests/unit/spatial/test_point.py::PointTestCase::test_dehydration",
"tests/unit/spatial/test_point.py::PointTestCase::test_wrong_type_arguments",
"tests/unit/spatial/test_wgs84_point.py::WGS84PointTestCase::test_dehydration_2d",
"tests/unit/spatial/test_wgs84_point.py::WGS84PointTestCase::test_dehydration_3d"
] |
[
"tests/unit/spatial/test_cartesian_point.py::CartesianPointTestCase::test_alias",
"tests/unit/spatial/test_point.py::PointTestCase::test_immutable_coordinates",
"tests/unit/spatial/test_point.py::PointTestCase::test_number_arguments",
"tests/unit/spatial/test_wgs84_point.py::WGS84PointTestCase::test_alias"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-03-02 09:23:44+00:00
|
apache-2.0
| 4,115 |
|
neo4j__neo4j-python-driver-803
|
diff --git a/neo4j/io/__init__.py b/neo4j/io/__init__.py
index fb71d5ef..862895d7 100644
--- a/neo4j/io/__init__.py
+++ b/neo4j/io/__init__.py
@@ -756,9 +756,9 @@ class IOPool:
+ self.connections_reservations[address])
can_create_new_connection = (infinite_pool_size
or pool_size < max_pool_size)
- self.connections_reservations[address] += 1
- if can_create_new_connection:
- return connection_creator
+ if can_create_new_connection:
+ self.connections_reservations[address] += 1
+ return connection_creator
def _acquire(self, address, deadline):
|
neo4j/neo4j-python-driver
|
2ecc4902b6047627fb5bd00c8efa1461d96bd9b2
|
diff --git a/tests/unit/io/test_neo4j_pool.py b/tests/unit/io/test_neo4j_pool.py
index 5853ceac..73087534 100644
--- a/tests/unit/io/test_neo4j_pool.py
+++ b/tests/unit/io/test_neo4j_pool.py
@@ -35,6 +35,7 @@ from neo4j.conf import (
RoutingConfig,
WorkspaceConfig
)
+from neo4j._deadline import Deadline
from neo4j.exceptions import (
ServiceUnavailable,
SessionExpired
@@ -271,3 +272,29 @@ def test_failing_opener_leaves_connections_in_use_alone(opener):
pool.acquire(READ_ACCESS, 30, 60, "test_db", None)
assert not cx1.closed()
+
+
+def test__acquire_new_later_with_room(opener):
+ config = PoolConfig()
+ config.max_connection_pool_size = 1
+ pool = Neo4jPool(
+ opener, config, WorkspaceConfig(), ROUTER_ADDRESS
+ )
+ assert pool.connections_reservations[READER_ADDRESS] == 0
+ creator = pool._acquire_new_later(READER_ADDRESS, Deadline(1))
+ assert pool.connections_reservations[READER_ADDRESS] == 1
+ assert callable(creator)
+
+
+def test__acquire_new_later_without_room(opener):
+ config = PoolConfig()
+ config.max_connection_pool_size = 1
+ pool = Neo4jPool(
+ opener, config, WorkspaceConfig(), ROUTER_ADDRESS
+ )
+ _ = pool.acquire(READ_ACCESS, 30, 60, "test_db", None)
+ # pool is full now
+ assert pool.connections_reservations[READER_ADDRESS] == 0
+ creator = pool._acquire_new_later(READER_ADDRESS, Deadline(1))
+ assert pool.connections_reservations[READER_ADDRESS] == 0
+ assert creator is None
|
Async driver keeping connections from pool open?
I encountered a new issue with the Async driver version 5.0.0a2.
The error states:
ERROR in app: Exception on request POST /recommend_contact
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/quart/app.py", line 1489, in handle_request
return await self.full_dispatch_request(request_context)
File "/usr/local/lib/python3.7/site-packages/quart/app.py", line 1514, in full_dispatch_request
result = await self.handle_user_exception(error)
File "/usr/local/lib/python3.7/site-packages/quart/app.py", line 964, in handle_user_exception
raise error
File "/usr/local/lib/python3.7/site-packages/quart/app.py", line 1512, in full_dispatch_request
result = await self.dispatch_request(request_context)
File "/usr/local/lib/python3.7/site-packages/quart/app.py", line 1557, in dispatch_request
return await self.ensure_async(handler)(**request_.view_args)
File "/usr/local/lib/python3.7/site-packages/quart/views.py", line 57, in view
return await current_app.ensure_async(self.dispatch_request)(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/quart_openapi/resource.py", line 71, in dispatch_request
return await handler(*args, **kwargs)
File "/api/app.py", line 119, in post
recommendations = await asyncio.gather(*tasks)
File "/api/app.py", line 108, in recommendation_task
el['offsite'])
File "/usr/local/lib/python3.7/site-packages/recommendation_engine-0.0.1-py3.7.egg/recommendation_engine/recommendation/recommender.py", line 32, in get_popular_contact
ret = await self.graph_handler.run_query_async(graph_query, contact_id=contact_id, p=p, offsite=offsite)
File "/usr/local/lib/python3.7/site-packages/recommendation_engine-0.0.1-py3.7.egg/recommendation_engine/graph_handler.py", line 636, in run_query_async
return await session.read_transaction(self.run_transaction, query, **kwargs)
File "/usr/local/lib/python3.7/site-packages/neo4j/_meta.py", line 73, in inner
return await f(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/neo4j/_async/work/session.py", line 656, in read_transaction
READ_ACCESS, transaction_function, *args, **kwargs
File "/usr/local/lib/python3.7/site-packages/neo4j/_async/work/session.py", line 478, in _run_transaction
timeout=timeout
File "/usr/local/lib/python3.7/site-packages/neo4j/_async/work/session.py", line 392, in _open_transaction
await self._connect(access_mode=access_mode)
File "/usr/local/lib/python3.7/site-packages/neo4j/_async/work/session.py", line 122, in _connect
await super()._connect(access_mode, **access_kwargs)
File "/usr/local/lib/python3.7/site-packages/neo4j/_async/work/workspace.py", line 194, in _connect
self._connection = await self._pool.acquire(**acquire_kwargs_)
File "/usr/local/lib/python3.7/site-packages/neo4j/_async/io/_pool.py", line 403, in acquire
self.address, deadline, liveness_check_timeout
File "/usr/local/lib/python3.7/site-packages/neo4j/_async/io/_pool.py", line 216, in _acquire
"{!r}s (timeout)".format(deadline.original_timeout)
neo4j.exceptions.ClientError: {code: None} {message: None}
I suspected it is the issue with connections being kept alive or being alive for a prolonged period of time, as when I checked "/usr/local/lib/python3.7/site-packages/neo4j/_async/io/_pool.py" I noticed the error is thrown when the pool is full and there are no avaliable connections. Also the code and message are shown as None, so I am not sure if that is a bug also.
We are running Neo4j Version 4.4.3 Community edition as a single instance.
|
0.0
|
2ecc4902b6047627fb5bd00c8efa1461d96bd9b2
|
[
"tests/unit/io/test_neo4j_pool.py::test__acquire_new_later_without_room"
] |
[
"tests/unit/io/test_neo4j_pool.py::test_acquires_new_routing_table_if_deleted",
"tests/unit/io/test_neo4j_pool.py::test_acquires_new_routing_table_if_stale",
"tests/unit/io/test_neo4j_pool.py::test_removes_old_routing_table",
"tests/unit/io/test_neo4j_pool.py::test_chooses_right_connection_type[r]",
"tests/unit/io/test_neo4j_pool.py::test_chooses_right_connection_type[w]",
"tests/unit/io/test_neo4j_pool.py::test_reuses_connection",
"tests/unit/io/test_neo4j_pool.py::test_closes_stale_connections[True]",
"tests/unit/io/test_neo4j_pool.py::test_closes_stale_connections[False]",
"tests/unit/io/test_neo4j_pool.py::test_does_not_close_stale_connections_in_use",
"tests/unit/io/test_neo4j_pool.py::test_release_resets_connections",
"tests/unit/io/test_neo4j_pool.py::test_release_does_not_resets_closed_connections",
"tests/unit/io/test_neo4j_pool.py::test_release_does_not_resets_defunct_connections",
"tests/unit/io/test_neo4j_pool.py::test_multiple_broken_connections_on_close",
"tests/unit/io/test_neo4j_pool.py::test_failing_opener_leaves_connections_in_use_alone",
"tests/unit/io/test_neo4j_pool.py::test__acquire_new_later_with_room"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-09-16 12:35:00+00:00
|
apache-2.0
| 4,116 |
|
neogeny__TatSu-121
|
diff --git a/tatsu/contexts.py b/tatsu/contexts.py
index 29024cb..8baf4df 100644
--- a/tatsu/contexts.py
+++ b/tatsu/contexts.py
@@ -557,7 +557,7 @@ class ParseContext(object):
self._results[key] = result
initial = self._pos
- lastpos = initial
+ lastpos = initial - 1
while True:
try:
self._clear_recursion_errors()
|
neogeny/TatSu
|
bd7fea0ba31234ca49f6ce0b401677849483f329
|
diff --git a/test/grammar/left_recursion_test.py b/test/grammar/left_recursion_test.py
index 6a64e5c..6c46c0c 100644
--- a/test/grammar/left_recursion_test.py
+++ b/test/grammar/left_recursion_test.py
@@ -535,7 +535,6 @@ class LeftRecursionTests(unittest.TestCase):
assert ['a', ['a', 'a']] == parse(right_grammar, 'aaa')
- @unittest.skip('bug in calculation of nullable')
def test_nullable_void(self):
left_grammar = '''
@@left_recursion :: True
|
Void doesn't end recursion
```python
def test_nullable_void(self):
left_grammar = '''
@@left_recursion :: True
@@nameguard :: False
start = A $ ;
A = | A 'a' | () ;
'''
assert [['a', 'a'], 'a'] == parse(left_grammar, 'aaa') # warning: infinite recursion
```
|
0.0
|
bd7fea0ba31234ca49f6ce0b401677849483f329
|
[
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_nullable_void"
] |
[
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_associativity",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_calc",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_calc_indirect",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_change_start_rule",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_direct_left_recursion",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_dropped_input_bug",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_indirect_left_recursion",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_indirect_left_recursion_complex",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_indirect_left_recursion_with_cut",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_left_recursion_bug",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_left_recursion_with_right_associativity",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_leftrec_with_void",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_nested_left_recursion",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_no_left_recursion",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_not_at_top_level",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_partial_input_bug",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_with_gather"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-04-26 12:52:45+00:00
|
bsd-2-clause
| 4,117 |
|
neogeny__TatSu-183
|
diff --git a/.pylintrc b/.pylintrc
index 75a1593..280bbe7 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -10,7 +10,7 @@ extension-pkg-whitelist=
# Add files or directories to the blacklist. They should be base names, not
# paths.
-ignore=CVS,bootstrap.py,model.py
+ignore=CVS,bootstrap.py,model.py,tmp
# Add files or directories matching the regex patterns to the blacklist. The
# regex matches against base names, not paths.
diff --git a/README.md b/README.md
index e16689f..a3df3b6 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,7 @@
[](https://raw.githubusercontent.com/neogeny/tatsu/master/LICENSE.txt) [](https://pypi.python.org/pypi/tatsu) [](https://badge.fury.io/py/TatSu) [](https://circleci.com/gh/neogeny/TatSu) [](http://tatsu.readthedocs.io/en/stable/)
> *At least for the people who send me mail about a new language that they're designing, the general advice is: do it to learn about how to write a compiler. Don't have any expectations that anyone will use it, unless you hook up with some sort of organization in a position to push it hard. It's a lottery, and some can buy a lot of the tickets. There are plenty of beautiful languages (more beautiful than C) that didn't catch on. But someone does win the lottery, and doing a language at least teaches you something.*
->
+>
> [Dennis Ritchie](http://en.wikipedia.org/wiki/Dennis_Ritchie) (1941-2011) Creator of the [C](http://en.wikipedia.org/wiki/C_language) programming language and of [Unix](http://en.wikipedia.org/wiki/Unix)
# 竜 **TatSu**
@@ -44,21 +44,21 @@ $ pip install TatSu
竜 **TatSu** can be used as a library, much like [Python](http://python.org)'s `re`, by embedding grammars as strings and generating grammar models instead of generating [Python](http://python.org) code.
- - `tatsu.compile(grammar, name=None, **kwargs)`
-
+- `tatsu.compile(grammar, name=None, **kwargs)`
+
Compiles the grammar and generates a *model* that can subsequently be used for parsing input with.
- - `tatsu.parse(grammar, input, **kwargs)`
-
+- `tatsu.parse(grammar, input, **kwargs)`
+
Compiles the grammar and parses the given input producing an [AST](http://en.wikipedia.org/wiki/Abstract_syntax_tree) as result. The result is equivalent to calling:
-
+
model = compile(grammar)
ast = model.parse(input)
-
+
Compiled grammars are cached for efficiency.
- - `tatsu.to_python_sourcecode(grammar, name=None, filename=None, **kwargs)`
-
+- `tatsu.to_python_sourcecode(grammar, name=None, filename=None, **kwargs)`
+
Compiles the grammar to the [Python](http://python.org) sourcecode that implements the parser.
This is an example of how to use 竜 **TatSu** as a library:
@@ -150,7 +150,7 @@ For a detailed explanation of what 竜 **TatSu** is capable of, please see the [
## Questions?
-Please use the [\[tatsu\]](https://stackoverflow.com/tags/tatsu/info) tag on [StackOverflow](http://stackoverflow.com/tags/tatsu/info) for general Q\&A, and limit Github issues to bugs, enhancement proposals, and feature requests.
+Please use the [\[tatsu\]](https://stackoverflow.com/tags/tatsu/info) tag on [StackOverflow](http://stackoverflow.com/tags/tatsu/info) for general Q&A, and limit Github issues to bugs, enhancement proposals, and feature requests.
## Changes
diff --git a/tatsu/contexts.py b/tatsu/contexts.py
index a5af681..d40cf77 100644
--- a/tatsu/contexts.py
+++ b/tatsu/contexts.py
@@ -205,6 +205,7 @@ class ParseContext(object):
semantics=None,
trace=False,
whitespace=None,
+ ignorecase=None,
**kwargs):
try:
self.parseinfo = kwargs.pop('parseinfo', self.parseinfo)
@@ -215,6 +216,7 @@ class ParseContext(object):
semantics=semantics,
trace=trace if trace is not None else self.trace,
whitespace=whitespace if whitespace is not None else self.whitespace,
+ ignorecase=ignorecase,
**kwargs
)
rule = self._find_rule(rule_name)
@@ -848,7 +850,7 @@ class ParseContext(object):
def _check_name(self, name=None):
if name is None:
name = str(self.last_node)
- if self.ignorecase or self._tokenizer.ignorecase:
+ if self.ignorecase or self.tokenizer.ignorecase:
name = name.upper()
if name in self.keywords:
raise FailedKeywordSemantics('"%s" is a reserved word' % name)
diff --git a/tatsu/grammars.py b/tatsu/grammars.py
index 8a90a8d..bb61bec 100644
--- a/tatsu/grammars.py
+++ b/tatsu/grammars.py
@@ -954,6 +954,7 @@ class Grammar(Model):
semantics=None,
filename='Unknown',
whitespace=None,
+ ignorecase=None,
nameguard=None,
namechars=None,
left_recursion=None,
@@ -983,6 +984,10 @@ class Grammar(Model):
whitespace = directives.get('whitespace')
self.whitespace = whitespace
+ if ignorecase is None:
+ ignorecase = directives.get('ignorecase')
+ self.ignorecase = ignorecase
+
if nameguard is None:
nameguard = directives.get('nameguard')
self.nameguard = nameguard
@@ -1008,6 +1013,8 @@ class Grammar(Model):
self.eol_comments_re = eol_comments_re
self.keywords = keywords or set()
+ if ignorecase:
+ self.keywords = {k.upper() for k in self.keywords}
self._adopt_children(rules)
@@ -1082,6 +1089,7 @@ class Grammar(Model):
trace=False,
context=None,
whitespace=None,
+ ignorecase=None,
left_recursion=None,
comments_re=None,
eol_comments_re=None,
@@ -1106,6 +1114,7 @@ class Grammar(Model):
nameguard = notnone(nameguard, self.nameguard)
namechars = notnone(namechars, self.namechars)
whitespace = notnone(whitespace, self.whitespace)
+ ignorecase = notnone(ignorecase, self.ignorecase)
if whitespace:
whitespace = re.compile(whitespace)
@@ -1116,6 +1125,7 @@ class Grammar(Model):
semantics=semantics,
trace=trace,
whitespace=whitespace,
+ ignorecase=ignorecase,
comments_re=comments_re,
eol_comments_re=eol_comments_re,
left_recursion=left_recursion,
diff --git a/tox.ini b/tox.ini
index f26f242..372bb77 100644
--- a/tox.ini
+++ b/tox.ini
@@ -26,7 +26,7 @@ deps =
[flake8]
ignore = N802, W504, W605
max-line-length = 200
-max-complexity = 10
+max-complexity = 16
exclude =
parsers,
docs,
|
neogeny/TatSu
|
7e3a58843d8a70b6dddaf70dfbcb2abaecad07cb
|
diff --git a/test/grammar/keyword_test.py b/test/grammar/keyword_test.py
index d8574da..da65540 100644
--- a/test/grammar/keyword_test.py
+++ b/test/grammar/keyword_test.py
@@ -132,3 +132,29 @@ class KeywordTests(unittest.TestCase):
self.fail('accepted keyword "%s" as name' % k)
except FailedParse as e:
self.assertTrue('"%s" is a reserved word' % k in str(e))
+
+ def test_ignorecase_keywords(self):
+ grammar = '''
+ @@ignorecase :: True
+ @@keyword :: if
+
+ start = rule ;
+
+ @name
+ rule = @:word if_exp $ ;
+
+ if_exp = 'if' digit ;
+
+ word = /\w+/ ;
+ digit = /\d/ ;
+ '''
+
+ model = compile(grammar, 'test')
+
+ model.parse('nonIF if 1', trace=True)
+
+ with self.assertRaises(FailedParse):
+ model.parse('i rf if 1', trace=True)
+
+ with self.assertRaises(FailedParse):
+ model.parse('IF if 1', trace=True)
diff --git a/test/parser_equivalence_test.py b/test/parser_equivalence_test.py
index dacaf60..337b608 100644
--- a/test/parser_equivalence_test.py
+++ b/test/parser_equivalence_test.py
@@ -3,6 +3,8 @@ import subprocess # noqa
import py_compile # noqa
from pathlib import Path
+import pytest
+
from tatsu.tool import compile, gencode
INPUT = """
@@ -38,11 +40,13 @@ GRAMMAR = """
;
"""
+
def test_model_parse():
model = compile(grammar=GRAMMAR)
assert OUTPUT == model.parse(INPUT)
[email protected]('work in progress')
def test_codegen_parse():
init_filename = Path('./tmp/__init__.py')
init_filename.touch(exist_ok=True)
@@ -65,7 +69,7 @@ def test_codegen_parse():
# }
# ).decode()
# print(output)
- from tmp.parser import UnknownParser
+ from tmp.parser import UnknownParser # pylint: disable=all
output = UnknownParser().parse(INPUT)
assert output == OUTPUT
finally:
|
Ignorecase needs uppercase keywords when reserving
Reserving keywords and ignoring case does not work as expected:
```
@@ignorecase :: True
@@keyword :: if
@name
rule = /\w+/ if_exp $ ;
if_exp = 'if' /\d/ ;
```
**The 'if' is parsed as the rule while it should error.**
This is because in [contexts.py](https://github.com/neogeny/TatSu/blob/master/tatsu/contexts.py), you should apply either `upper` (or `lower`) to both name and keywords:
```
def _check_name(self, name=None):
if name is None:
name = str(self.last_node)
if self.ignorecase or self._tokenizer.ignorecase:
name = name.upper()
if name in self.keywords:
raise FailedKeywordSemantics('"%s" is a reserved word' % name)
```
|
0.0
|
7e3a58843d8a70b6dddaf70dfbcb2abaecad07cb
|
[
"test/grammar/keyword_test.py::KeywordTests::test_ignorecase_keywords"
] |
[
"test/grammar/keyword_test.py::KeywordTests::test_check_keywords",
"test/grammar/keyword_test.py::KeywordTests::test_check_unicode_name",
"test/grammar/keyword_test.py::KeywordTests::test_define_keywords",
"test/grammar/keyword_test.py::KeywordTests::test_keywords_in_rule_names",
"test/grammar/keyword_test.py::KeywordTests::test_python_keywords_in_rule_names",
"test/grammar/keyword_test.py::KeywordTests::test_sparse_keywords",
"test/parser_equivalence_test.py::test_model_parse"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-03-20 18:34:21+00:00
|
bsd-4-clause
| 4,118 |
|
neogeny__TatSu-309
|
diff --git a/README.rst b/README.rst
index eb57f92..683c25c 100644
--- a/README.rst
+++ b/README.rst
@@ -32,12 +32,10 @@ input, much like the `re`_ module does with regular expressions, or it can gener
|TatSu| supports `left-recursive`_ rules in PEG_ grammars using the
algorithm_ by *Laurent* and *Mens*. The generated AST_ has the expected left associativity.
-Starting with version 5.9.0 |TatSu| requires Python 3.11 or later.
-While no code in |TatSu| yet depends on new language or standard library features,
+|TatSu| requires a maintained version of Python (3.11 at the moment). While no code
+in |TatSu| yet depends on new language or standard library features,
the authors don't want to be constrained by Python version comaptibility consideration
when developing features that will be part future releases.
-Therefore, to simplify version pinning for users of the library,
-they decided to proactively bump the Python minimum required version to 3.10.
|TatSu| releases in the 5.7 series closely track releases in the 5.8 series
while maintaining compatibility with Python 3.8 and later.
diff --git a/tatsu/contexts.py b/tatsu/contexts.py
index 06458c4..f5d0f96 100644
--- a/tatsu/contexts.py
+++ b/tatsu/contexts.py
@@ -569,6 +569,7 @@ class ParseContext:
@property
def memokey(self):
+ self.tokenizer.eat_whitespace()
return MemoKey(self._pos, self.rule, self.substate)
def _memoize(self, key, memo):
diff --git a/tatsu/tokenizing.py b/tatsu/tokenizing.py
index 918f082..bc7077f 100644
--- a/tatsu/tokenizing.py
+++ b/tatsu/tokenizing.py
@@ -40,6 +40,9 @@ class Tokenizer:
def token(self):
return self.current
+ def eat_whitespace(self):
+ raise NotImplementedError
+
def next(self):
raise NotImplementedError
|
neogeny/TatSu
|
51d9f70b4cf068142e719ebeabaeab766e0a9b92
|
diff --git a/test/parsing_test.py b/test/parsing_test.py
index 70b79e3..5c8a5a9 100644
--- a/test/parsing_test.py
+++ b/test/parsing_test.py
@@ -1,9 +1,10 @@
# -*- coding: utf-8 -*-
+import json
import unittest
import tempfile
import tatsu
-from tatsu.util import trim, eval_escapes
+from tatsu.util import trim, eval_escapes, asjson
from tatsu.grammars import EBNFBuffer
@@ -115,6 +116,20 @@ class ParsingTests(unittest.TestCase):
model = tatsu.compile(grammar=grammar)
model.parse('4 + 5')
+ def test_skip_whitespace(self):
+ grammar = '''
+ statement = 'FOO' subject $ ;
+ subject = name:id ;
+ id = /[a-z]+/ ;
+ '''
+ model = tatsu.compile(grammar=grammar)
+ ast = model.parse('FOO' + ' ' * 3 + 'bar', parseinfo=True)
+ print(json.dumps(asjson(ast), indent=2))
+ subject = ast[1]
+ assert subject['name'] == 'bar'
+ parseinfo = subject['parseinfo']
+ assert parseinfo.pos == parseinfo.tokenizer.text.index('bar')
+
def suite():
return unittest.TestLoader().loadTestsFromTestCase(ParsingTests)
|
Surprising whitespace handling
Given this example
```python
import tatsu
parser = tatsu.compile(r'''
statement = 'SELECT' 'FROM' table $ ;
table = name:id ;
id = /[a-z]+/ ;
''')
string = 'SELECT FROM foo'
value = parser.parse(string, parseinfo=True)
table = value[2]
assert table['name'] == 'foo'
parseinfo = table['parseinfo']
print(parseinfo.tokenizer.text)
print(f'{parseinfo.pos * " "}^')
```
I find whitespace handling a surprising. The whitespace between `FROM` and the table name is not skipped over before matching the `table` rule. This results in correct parsing but the `parseinfo` for the `table` rule. I would have expected whitespace to be skipped before attempting to match the `table` rule.
|
0.0
|
51d9f70b4cf068142e719ebeabaeab766e0a9b92
|
[
"test/parsing_test.py::ParsingTests::test_skip_whitespace"
] |
[
"test/parsing_test.py::ParsingTests::test_escape_sequences",
"test/parsing_test.py::ParsingTests::test_include",
"test/parsing_test.py::ParsingTests::test_multiple_include",
"test/parsing_test.py::ParsingTests::test_real_include",
"test/parsing_test.py::ParsingTests::test_rule_capitalization",
"test/parsing_test.py::ParsingTests::test_start",
"test/parsing_test.py::ParsingTests::test_startrule_issue62"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-08-29 17:00:07+00:00
|
bsd-4-clause
| 4,119 |
|
neovasili__tflens-34
|
diff --git a/README.md b/README.md
index 4c1fad7..3b089dd 100644
--- a/README.md
+++ b/README.md
@@ -76,6 +76,8 @@ optional arguments:
according to the following pattern: bucket-name/tfstate-key
-m FILTER_MODULE, --filter-module FILTER_MODULE
Applies a regular expression to the module field in order to filter the resources list to output
+ -n FILTER_NAME, --filter-name FILTER_NAME
+ Applies a regular expression to the name field in order to filter the resources list to output
```
### Examples
@@ -93,6 +95,16 @@ View table of resources for a tfstate located in the file system in the director
View filtered table of resources for a tfstate located in the file system in the directory:
+```bash
+➜ tflens --filter-name "current"
+
+| provider | type | mode | name | module |
+|--------------|---------------------|---------|-------------------------------|--------|
+| provider.aws | aws_caller_identity | data | current_user | test |
+```
+
+Or:
+
```bash
➜ tflens --filter-module "test"
@@ -101,6 +113,16 @@ View filtered table of resources for a tfstate located in the file system in the
| provider.aws | aws_caller_identity | data | current_user | test |
```
+Or:
+
+```bash
+➜ tflens --filter-name "current" --filter-module "test"
+
+| provider | type | mode | name | module |
+|--------------|---------------------|---------|-------------------------------|--------|
+| provider.aws | aws_caller_identity | data | current_user | test |
+```
+
View table of resources for a tfstate located in the file system in the `dev/terraform.tfstate.json` path:
```bash
diff --git a/tflens/__main__.py b/tflens/__main__.py
index 45b72cf..8991726 100644
--- a/tflens/__main__.py
+++ b/tflens/__main__.py
@@ -43,12 +43,21 @@ parser.add_argument('-m', '--filter-module',
filter the resources list to output",
default="")
+parser.add_argument('-n', '--filter-name',
+ type=str,
+ action="store",
+ dest="filter_name",
+ help="Applies a regular expression to the name field in order to \
+ filter the resources list to output",
+ default="")
+
args = parser.parse_args()
ARGS_REMOTE = args.remote
ARGS_FILE_LOCATION = args.file_location
ARGS_OUTPUT = args.output
ARGS_FILTER_MODULE = args.filter_module
+ARGS_FILTER_NAME = args.filter_name
if not ARGS_FILE_LOCATION:
ARGS_FILE_LOCATION = "{}/terraform.tfstate".format(Path().absolute())
@@ -61,6 +70,7 @@ def main():
tfstate_controller = remote_router[ARGS_REMOTE](
file_location=ARGS_FILE_LOCATION,
+ name_filter_expression=ARGS_FILTER_NAME,
module_filter_expression=ARGS_FILTER_MODULE
)
diff --git a/tflens/controller/tfstate.py b/tflens/controller/tfstate.py
index f4a58ec..7f0fec6 100644
--- a/tflens/controller/tfstate.py
+++ b/tflens/controller/tfstate.py
@@ -11,11 +11,17 @@ from tflens.helper.filter import TfStateFilterHelper
class TfStateController():
- def __init__(self, tfstate_content: dict, module_filter_expression: str=None):
+ def __init__(
+ self,
+ tfstate_content: dict,
+ name_filter_expression: str=None,
+ module_filter_expression: str=None
+ ):
self.__tfstate = TfState(
content=tfstate_content
)
self.__resources = TfStateFilterHelper(
+ name_filter_expression=name_filter_expression,
module_filter_expression=module_filter_expression,
resources=self.__tfstate.get_resources()
).apply_filter()
@@ -43,24 +49,36 @@ class TfStateController():
class LocalTfStateController(TfStateController):
- def __init__(self, file_location: str, module_filter_expression: str=None):
+ def __init__(
+ self,
+ file_location: str,
+ module_filter_expression: str=None,
+ name_filter_expression: str=None
+ ):
self.__local_tfstate_service = LocalTfStateService(
file_location=file_location
)
super().__init__(
tfstate_content=self.__local_tfstate_service.read_content(),
+ name_filter_expression=name_filter_expression,
module_filter_expression=module_filter_expression
)
class RemoteS3TfStateController(TfStateController):
- def __init__(self, file_location: str, module_filter_expression: str=None):
+ def __init__(
+ self,
+ file_location: str,
+ module_filter_expression: str=None,
+ name_filter_expression: str=None
+ ):
self.__remote_s3_tfstate_service = RemoteS3TfStateService(
file_location=file_location
)
super().__init__(
tfstate_content=self.__remote_s3_tfstate_service.read_content(),
+ name_filter_expression=name_filter_expression,
module_filter_expression=module_filter_expression
)
diff --git a/tflens/helper/filter.py b/tflens/helper/filter.py
index 18343e2..d5cd16c 100644
--- a/tflens/helper/filter.py
+++ b/tflens/helper/filter.py
@@ -16,12 +16,26 @@ class ModuleFilterHelper(FilterHelper):
def __init__(self, filter_expression: str, resource: TfStateResource):
super().__init__(
filter_expression=filter_expression,
- object_attribute_value = resource.get_parent_module()
+ object_attribute_value=resource.get_parent_module()
+ )
+
+class NameFilterHelper(FilterHelper):
+
+ def __init__(self, filter_expression: str, resource: TfStateResource):
+ super().__init__(
+ filter_expression=filter_expression,
+ object_attribute_value=resource.get_name()
)
class TfStateFilterHelper():
- def __init__(self, module_filter_expression: str=None, resources: list=None):
+ def __init__(
+ self,
+ name_filter_expression: str=None,
+ module_filter_expression: str=None,
+ resources: list=None
+ ):
+ self.__name_filter_expression = name_filter_expression
self.__module_filter_expression = module_filter_expression
self.__resources = resources
@@ -29,13 +43,18 @@ class TfStateFilterHelper():
filtered_list = list()
for resource in self.__resources or []:
+ pass_name_filter = True
pass_module_filter = True
+ if self.__name_filter_expression:
+ filter_helper = NameFilterHelper(filter_expression=self.__name_filter_expression, resource=resource)
+ pass_name_filter = filter_helper.check_filter()
+
if self.__module_filter_expression:
filter_helper = ModuleFilterHelper(filter_expression=self.__module_filter_expression, resource=resource)
pass_module_filter = filter_helper.check_filter()
- if pass_module_filter:
+ if pass_module_filter and pass_name_filter:
filtered_list.append(resource)
return filtered_list
diff --git a/tflens/model/tfstate_resource.py b/tflens/model/tfstate_resource.py
index 09c839f..be6a07a 100644
--- a/tflens/model/tfstate_resource.py
+++ b/tflens/model/tfstate_resource.py
@@ -27,6 +27,9 @@ class TfStateResource():
self.get_parent_module()
]
+ def get_name(self):
+ return self.__name
+
def get_parent_module(self):
return self.__module.split('.')[1] if self.__module else '-'
|
neovasili/tflens
|
8de85543fed8503e823b64b112dbbb292fc90d04
|
diff --git a/tests/controllers_test.py b/tests/controllers_test.py
index e6925f8..61504b4 100644
--- a/tests/controllers_test.py
+++ b/tests/controllers_test.py
@@ -92,6 +92,28 @@ class TestLocalTfStateController(unittest.TestCase):
self.assertEqual(captured_output.getvalue().replace('\n', ''), '')
+ def test_local_show_resources_matching_name_filter(self):
+ local_tfstate_controller = LocalTfStateController(
+ file_location=self.existing_file,
+ name_filter_expression="current_user"
+ )
+ captured_output = io.StringIO()
+ sys.stdout = captured_output
+ local_tfstate_controller.show_resources()
+
+ self.assertEqual(captured_output.getvalue().replace('\n', ''), self.print_table_output)
+
+ def test_local_show_resources_not_matching_name_filter(self):
+ local_tfstate_controller = LocalTfStateController(
+ file_location=self.existing_file,
+ name_filter_expression="Current_user"
+ )
+ captured_output = io.StringIO()
+ sys.stdout = captured_output
+ local_tfstate_controller.show_resources()
+
+ self.assertEqual(captured_output.getvalue().replace('\n', ''), '')
+
def test_local_create_markdown_file(self):
local_tfstate_controller = LocalTfStateController(self.existing_file)
local_tfstate_controller.create_markdown_file()
|
[resources] Add filter resources by name support
We need to be able to filter resources shown in a table by name field.
The filter must be able to use regular expressions as the filter pattern.
|
0.0
|
8de85543fed8503e823b64b112dbbb292fc90d04
|
[
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_name_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_name_filter"
] |
[
"tests/controllers_test.py::TestTfStateController::test_create_html_file",
"tests/controllers_test.py::TestTfStateController::test_create_markdown_file",
"tests/controllers_test.py::TestTfStateController::test_show_resources",
"tests/controllers_test.py::TestLocalTfStateController::test_local_create_html_file",
"tests/controllers_test.py::TestLocalTfStateController::test_local_create_markdown_file",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_module_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_module_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_tfstate_controller"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-26 22:05:52+00:00
|
mit
| 4,120 |
|
neovasili__tflens-35
|
diff --git a/README.md b/README.md
index 3b089dd..b4b09a0 100644
--- a/README.md
+++ b/README.md
@@ -78,6 +78,8 @@ optional arguments:
Applies a regular expression to the module field in order to filter the resources list to output
-n FILTER_NAME, --filter-name FILTER_NAME
Applies a regular expression to the name field in order to filter the resources list to output
+ -t FILTER_TYPE, --filter-type FILTER_TYPE
+ Applies a regular expression to the type field in order to filter the resources list to output
```
### Examples
diff --git a/tflens/__main__.py b/tflens/__main__.py
index 8991726..f7f87a2 100644
--- a/tflens/__main__.py
+++ b/tflens/__main__.py
@@ -51,6 +51,14 @@ parser.add_argument('-n', '--filter-name',
filter the resources list to output",
default="")
+parser.add_argument('-t', '--filter-type',
+ type=str,
+ action="store",
+ dest="filter_type",
+ help="Applies a regular expression to the type field in order to \
+ filter the resources list to output",
+ default="")
+
args = parser.parse_args()
ARGS_REMOTE = args.remote
@@ -58,6 +66,7 @@ ARGS_FILE_LOCATION = args.file_location
ARGS_OUTPUT = args.output
ARGS_FILTER_MODULE = args.filter_module
ARGS_FILTER_NAME = args.filter_name
+ARGS_FILTER_TYPE = args.filter_type
if not ARGS_FILE_LOCATION:
ARGS_FILE_LOCATION = "{}/terraform.tfstate".format(Path().absolute())
@@ -71,6 +80,7 @@ def main():
tfstate_controller = remote_router[ARGS_REMOTE](
file_location=ARGS_FILE_LOCATION,
name_filter_expression=ARGS_FILTER_NAME,
+ type_filter_expression=ARGS_FILTER_TYPE,
module_filter_expression=ARGS_FILTER_MODULE
)
diff --git a/tflens/controller/tfstate.py b/tflens/controller/tfstate.py
index 7f0fec6..478c492 100644
--- a/tflens/controller/tfstate.py
+++ b/tflens/controller/tfstate.py
@@ -15,6 +15,7 @@ class TfStateController():
self,
tfstate_content: dict,
name_filter_expression: str=None,
+ type_filter_expression: str=None,
module_filter_expression: str=None
):
self.__tfstate = TfState(
@@ -22,6 +23,7 @@ class TfStateController():
)
self.__resources = TfStateFilterHelper(
name_filter_expression=name_filter_expression,
+ type_filter_expression=type_filter_expression,
module_filter_expression=module_filter_expression,
resources=self.__tfstate.get_resources()
).apply_filter()
@@ -53,6 +55,7 @@ class LocalTfStateController(TfStateController):
self,
file_location: str,
module_filter_expression: str=None,
+ type_filter_expression: str=None,
name_filter_expression: str=None
):
self.__local_tfstate_service = LocalTfStateService(
@@ -62,6 +65,7 @@ class LocalTfStateController(TfStateController):
super().__init__(
tfstate_content=self.__local_tfstate_service.read_content(),
name_filter_expression=name_filter_expression,
+ type_filter_expression=type_filter_expression,
module_filter_expression=module_filter_expression
)
@@ -71,6 +75,7 @@ class RemoteS3TfStateController(TfStateController):
self,
file_location: str,
module_filter_expression: str=None,
+ type_filter_expression: str=None,
name_filter_expression: str=None
):
self.__remote_s3_tfstate_service = RemoteS3TfStateService(
@@ -80,5 +85,6 @@ class RemoteS3TfStateController(TfStateController):
super().__init__(
tfstate_content=self.__remote_s3_tfstate_service.read_content(),
name_filter_expression=name_filter_expression,
+ type_filter_expression=type_filter_expression,
module_filter_expression=module_filter_expression
)
diff --git a/tflens/helper/filter.py b/tflens/helper/filter.py
index d5cd16c..f6ce147 100644
--- a/tflens/helper/filter.py
+++ b/tflens/helper/filter.py
@@ -27,15 +27,25 @@ class NameFilterHelper(FilterHelper):
object_attribute_value=resource.get_name()
)
+class TypeFilterHelper(FilterHelper):
+
+ def __init__(self, filter_expression: str, resource: TfStateResource):
+ super().__init__(
+ filter_expression=filter_expression,
+ object_attribute_value=resource.get_type()
+ )
+
class TfStateFilterHelper():
def __init__(
self,
name_filter_expression: str=None,
+ type_filter_expression: str=None,
module_filter_expression: str=None,
resources: list=None
):
self.__name_filter_expression = name_filter_expression
+ self.__type_filter_expression = type_filter_expression
self.__module_filter_expression = module_filter_expression
self.__resources = resources
@@ -45,16 +55,21 @@ class TfStateFilterHelper():
for resource in self.__resources or []:
pass_name_filter = True
pass_module_filter = True
+ pass_type_filter = True
if self.__name_filter_expression:
filter_helper = NameFilterHelper(filter_expression=self.__name_filter_expression, resource=resource)
pass_name_filter = filter_helper.check_filter()
+ if self.__type_filter_expression:
+ filter_helper = TypeFilterHelper(filter_expression=self.__type_filter_expression, resource=resource)
+ pass_type_filter = filter_helper.check_filter()
+
if self.__module_filter_expression:
filter_helper = ModuleFilterHelper(filter_expression=self.__module_filter_expression, resource=resource)
pass_module_filter = filter_helper.check_filter()
- if pass_module_filter and pass_name_filter:
+ if pass_module_filter and pass_name_filter and pass_type_filter:
filtered_list.append(resource)
return filtered_list
diff --git a/tflens/model/tfstate_resource.py b/tflens/model/tfstate_resource.py
index be6a07a..afd3dbf 100644
--- a/tflens/model/tfstate_resource.py
+++ b/tflens/model/tfstate_resource.py
@@ -30,6 +30,9 @@ class TfStateResource():
def get_name(self):
return self.__name
+ def get_type(self):
+ return self.__type
+
def get_parent_module(self):
return self.__module.split('.')[1] if self.__module else '-'
|
neovasili/tflens
|
8fed041deb04b1abef816991fe185a7c0821c5ee
|
diff --git a/tests/controllers_test.py b/tests/controllers_test.py
index 61504b4..65f8078 100644
--- a/tests/controllers_test.py
+++ b/tests/controllers_test.py
@@ -114,6 +114,28 @@ class TestLocalTfStateController(unittest.TestCase):
self.assertEqual(captured_output.getvalue().replace('\n', ''), '')
+ def test_local_show_resources_matching_type_filter(self):
+ local_tfstate_controller = LocalTfStateController(
+ file_location=self.existing_file,
+ type_filter_expression="aws_caller_identity"
+ )
+ captured_output = io.StringIO()
+ sys.stdout = captured_output
+ local_tfstate_controller.show_resources()
+
+ self.assertEqual(captured_output.getvalue().replace('\n', ''), self.print_table_output)
+
+ def test_local_show_resources_not_matching_type_filter(self):
+ local_tfstate_controller = LocalTfStateController(
+ file_location=self.existing_file,
+ type_filter_expression="Aws_caller_identity"
+ )
+ captured_output = io.StringIO()
+ sys.stdout = captured_output
+ local_tfstate_controller.show_resources()
+
+ self.assertEqual(captured_output.getvalue().replace('\n', ''), '')
+
def test_local_create_markdown_file(self):
local_tfstate_controller = LocalTfStateController(self.existing_file)
local_tfstate_controller.create_markdown_file()
|
[resources] Add filter resources by resource type support
We need to be able to filter resources shown in a table by resource type field.
The filter must be able to use regular expressions as the filter pattern.
|
0.0
|
8fed041deb04b1abef816991fe185a7c0821c5ee
|
[
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_type_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_type_filter"
] |
[
"tests/controllers_test.py::TestTfStateController::test_create_html_file",
"tests/controllers_test.py::TestTfStateController::test_create_markdown_file",
"tests/controllers_test.py::TestTfStateController::test_show_resources",
"tests/controllers_test.py::TestLocalTfStateController::test_local_create_html_file",
"tests/controllers_test.py::TestLocalTfStateController::test_local_create_markdown_file",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_module_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_name_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_module_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_name_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_tfstate_controller"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-28 00:13:57+00:00
|
mit
| 4,121 |
|
neovasili__tflens-36
|
diff --git a/README.md b/README.md
index b4b09a0..5676c16 100644
--- a/README.md
+++ b/README.md
@@ -80,6 +80,8 @@ optional arguments:
Applies a regular expression to the name field in order to filter the resources list to output
-t FILTER_TYPE, --filter-type FILTER_TYPE
Applies a regular expression to the type field in order to filter the resources list to output
+ -p FILTER_PROVIDER, --filter-provider FILTER_PROVIDER
+ Applies a regular expression to the provider field in order to filter the resources list to output
```
### Examples
diff --git a/tflens/__main__.py b/tflens/__main__.py
index f7f87a2..fe1ca3d 100644
--- a/tflens/__main__.py
+++ b/tflens/__main__.py
@@ -59,6 +59,14 @@ parser.add_argument('-t', '--filter-type',
filter the resources list to output",
default="")
+parser.add_argument('-p', '--filter-provider',
+ type=str,
+ action="store",
+ dest="filter_provider",
+ help="Applies a regular expression to the provider field in order to \
+ filter the resources list to output",
+ default="")
+
args = parser.parse_args()
ARGS_REMOTE = args.remote
@@ -67,6 +75,7 @@ ARGS_OUTPUT = args.output
ARGS_FILTER_MODULE = args.filter_module
ARGS_FILTER_NAME = args.filter_name
ARGS_FILTER_TYPE = args.filter_type
+ARGS_FILTER_PROVIDER = args.filter_provider
if not ARGS_FILE_LOCATION:
ARGS_FILE_LOCATION = "{}/terraform.tfstate".format(Path().absolute())
@@ -81,6 +90,7 @@ def main():
file_location=ARGS_FILE_LOCATION,
name_filter_expression=ARGS_FILTER_NAME,
type_filter_expression=ARGS_FILTER_TYPE,
+ provider_filter_expression=ARGS_FILTER_PROVIDER,
module_filter_expression=ARGS_FILTER_MODULE
)
diff --git a/tflens/controller/tfstate.py b/tflens/controller/tfstate.py
index 478c492..0f4c7a5 100644
--- a/tflens/controller/tfstate.py
+++ b/tflens/controller/tfstate.py
@@ -16,6 +16,7 @@ class TfStateController():
tfstate_content: dict,
name_filter_expression: str=None,
type_filter_expression: str=None,
+ provider_filter_expression: str=None,
module_filter_expression: str=None
):
self.__tfstate = TfState(
@@ -24,6 +25,7 @@ class TfStateController():
self.__resources = TfStateFilterHelper(
name_filter_expression=name_filter_expression,
type_filter_expression=type_filter_expression,
+ provider_filter_expression=provider_filter_expression,
module_filter_expression=module_filter_expression,
resources=self.__tfstate.get_resources()
).apply_filter()
@@ -56,6 +58,7 @@ class LocalTfStateController(TfStateController):
file_location: str,
module_filter_expression: str=None,
type_filter_expression: str=None,
+ provider_filter_expression: str=None,
name_filter_expression: str=None
):
self.__local_tfstate_service = LocalTfStateService(
@@ -66,6 +69,7 @@ class LocalTfStateController(TfStateController):
tfstate_content=self.__local_tfstate_service.read_content(),
name_filter_expression=name_filter_expression,
type_filter_expression=type_filter_expression,
+ provider_filter_expression=provider_filter_expression,
module_filter_expression=module_filter_expression
)
@@ -76,6 +80,7 @@ class RemoteS3TfStateController(TfStateController):
file_location: str,
module_filter_expression: str=None,
type_filter_expression: str=None,
+ provider_filter_expression: str=None,
name_filter_expression: str=None
):
self.__remote_s3_tfstate_service = RemoteS3TfStateService(
@@ -86,5 +91,6 @@ class RemoteS3TfStateController(TfStateController):
tfstate_content=self.__remote_s3_tfstate_service.read_content(),
name_filter_expression=name_filter_expression,
type_filter_expression=type_filter_expression,
+ provider_filter_expression=provider_filter_expression,
module_filter_expression=module_filter_expression
)
diff --git a/tflens/helper/filter.py b/tflens/helper/filter.py
index f6ce147..ee86ff3 100644
--- a/tflens/helper/filter.py
+++ b/tflens/helper/filter.py
@@ -35,17 +35,27 @@ class TypeFilterHelper(FilterHelper):
object_attribute_value=resource.get_type()
)
+class ProviderFilterHelper(FilterHelper):
+
+ def __init__(self, filter_expression: str, resource: TfStateResource):
+ super().__init__(
+ filter_expression=filter_expression,
+ object_attribute_value=resource.get_type()
+ )
+
class TfStateFilterHelper():
def __init__(
self,
name_filter_expression: str=None,
type_filter_expression: str=None,
+ provider_filter_expression: str=None,
module_filter_expression: str=None,
resources: list=None
):
self.__name_filter_expression = name_filter_expression
self.__type_filter_expression = type_filter_expression
+ self.__provider_filter_expression = provider_filter_expression
self.__module_filter_expression = module_filter_expression
self.__resources = resources
@@ -56,6 +66,7 @@ class TfStateFilterHelper():
pass_name_filter = True
pass_module_filter = True
pass_type_filter = True
+ pass_provider_filter = True
if self.__name_filter_expression:
filter_helper = NameFilterHelper(filter_expression=self.__name_filter_expression, resource=resource)
@@ -65,11 +76,15 @@ class TfStateFilterHelper():
filter_helper = TypeFilterHelper(filter_expression=self.__type_filter_expression, resource=resource)
pass_type_filter = filter_helper.check_filter()
+ if self.__provider_filter_expression:
+ filter_helper = ProviderFilterHelper(filter_expression=self.__provider_filter_expression, resource=resource)
+ pass_provider_filter = filter_helper.check_filter()
+
if self.__module_filter_expression:
filter_helper = ModuleFilterHelper(filter_expression=self.__module_filter_expression, resource=resource)
pass_module_filter = filter_helper.check_filter()
- if pass_module_filter and pass_name_filter and pass_type_filter:
+ if pass_module_filter and pass_name_filter and pass_type_filter and pass_provider_filter:
filtered_list.append(resource)
return filtered_list
|
neovasili/tflens
|
ba81fda814f0f1401a79f52365d9ba1d3ca5799d
|
diff --git a/tests/controllers_test.py b/tests/controllers_test.py
index 65f8078..b250a98 100644
--- a/tests/controllers_test.py
+++ b/tests/controllers_test.py
@@ -136,6 +136,28 @@ class TestLocalTfStateController(unittest.TestCase):
self.assertEqual(captured_output.getvalue().replace('\n', ''), '')
+ def test_local_show_resources_matching_provider_filter(self):
+ local_tfstate_controller = LocalTfStateController(
+ file_location=self.existing_file,
+ provider_filter_expression="aws"
+ )
+ captured_output = io.StringIO()
+ sys.stdout = captured_output
+ local_tfstate_controller.show_resources()
+
+ self.assertEqual(captured_output.getvalue().replace('\n', ''), self.print_table_output)
+
+ def test_local_show_resources_not_matching_provider_filter(self):
+ local_tfstate_controller = LocalTfStateController(
+ file_location=self.existing_file,
+ provider_filter_expression="Aws"
+ )
+ captured_output = io.StringIO()
+ sys.stdout = captured_output
+ local_tfstate_controller.show_resources()
+
+ self.assertEqual(captured_output.getvalue().replace('\n', ''), '')
+
def test_local_create_markdown_file(self):
local_tfstate_controller = LocalTfStateController(self.existing_file)
local_tfstate_controller.create_markdown_file()
|
[resources] Add filter resources by provider support
We need to be able to filter resources shown in a table by provider field.
The filter must be able to use regular expressions as the filter pattern.
|
0.0
|
ba81fda814f0f1401a79f52365d9ba1d3ca5799d
|
[
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_provider_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_provider_filter"
] |
[
"tests/controllers_test.py::TestTfStateController::test_create_html_file",
"tests/controllers_test.py::TestTfStateController::test_create_markdown_file",
"tests/controllers_test.py::TestTfStateController::test_show_resources",
"tests/controllers_test.py::TestLocalTfStateController::test_local_create_html_file",
"tests/controllers_test.py::TestLocalTfStateController::test_local_create_markdown_file",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_module_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_name_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_type_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_module_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_name_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_type_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_tfstate_controller"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-28 00:29:57+00:00
|
mit
| 4,122 |
|
neovasili__tflens-37
|
diff --git a/README.md b/README.md
index 5676c16..1c92c04 100644
--- a/README.md
+++ b/README.md
@@ -82,6 +82,8 @@ optional arguments:
Applies a regular expression to the type field in order to filter the resources list to output
-p FILTER_PROVIDER, --filter-provider FILTER_PROVIDER
Applies a regular expression to the provider field in order to filter the resources list to output
+ -d FILTER_MODE, --filter-mode FILTER_MODE
+ Applies a regular expression to the mode field in order to filter the resources list to output
```
### Examples
diff --git a/tflens/__main__.py b/tflens/__main__.py
index fe1ca3d..2e696e3 100644
--- a/tflens/__main__.py
+++ b/tflens/__main__.py
@@ -67,6 +67,14 @@ parser.add_argument('-p', '--filter-provider',
filter the resources list to output",
default="")
+parser.add_argument('-d', '--filter-mode',
+ type=str,
+ action="store",
+ dest="filter_mode",
+ help="Applies a regular expression to the mode field in order to \
+ filter the resources list to output",
+ default="")
+
args = parser.parse_args()
ARGS_REMOTE = args.remote
@@ -76,6 +84,7 @@ ARGS_FILTER_MODULE = args.filter_module
ARGS_FILTER_NAME = args.filter_name
ARGS_FILTER_TYPE = args.filter_type
ARGS_FILTER_PROVIDER = args.filter_provider
+ARGS_FILER_MODE = args.filter_mode
if not ARGS_FILE_LOCATION:
ARGS_FILE_LOCATION = "{}/terraform.tfstate".format(Path().absolute())
@@ -91,7 +100,8 @@ def main():
name_filter_expression=ARGS_FILTER_NAME,
type_filter_expression=ARGS_FILTER_TYPE,
provider_filter_expression=ARGS_FILTER_PROVIDER,
- module_filter_expression=ARGS_FILTER_MODULE
+ module_filter_expression=ARGS_FILTER_MODULE,
+ mode_filter_expression=ARGS_FILER_MODE
)
output_router = {
diff --git a/tflens/controller/tfstate.py b/tflens/controller/tfstate.py
index 0f4c7a5..bafd4cb 100644
--- a/tflens/controller/tfstate.py
+++ b/tflens/controller/tfstate.py
@@ -17,7 +17,8 @@ class TfStateController():
name_filter_expression: str=None,
type_filter_expression: str=None,
provider_filter_expression: str=None,
- module_filter_expression: str=None
+ module_filter_expression: str=None,
+ mode_filter_expression: str=None
):
self.__tfstate = TfState(
content=tfstate_content
@@ -27,6 +28,7 @@ class TfStateController():
type_filter_expression=type_filter_expression,
provider_filter_expression=provider_filter_expression,
module_filter_expression=module_filter_expression,
+ mode_filter_expression=mode_filter_expression,
resources=self.__tfstate.get_resources()
).apply_filter()
@@ -59,7 +61,8 @@ class LocalTfStateController(TfStateController):
module_filter_expression: str=None,
type_filter_expression: str=None,
provider_filter_expression: str=None,
- name_filter_expression: str=None
+ name_filter_expression: str=None,
+ mode_filter_expression: str=None
):
self.__local_tfstate_service = LocalTfStateService(
file_location=file_location
@@ -70,7 +73,8 @@ class LocalTfStateController(TfStateController):
name_filter_expression=name_filter_expression,
type_filter_expression=type_filter_expression,
provider_filter_expression=provider_filter_expression,
- module_filter_expression=module_filter_expression
+ module_filter_expression=module_filter_expression,
+ mode_filter_expression=mode_filter_expression
)
class RemoteS3TfStateController(TfStateController):
@@ -81,7 +85,8 @@ class RemoteS3TfStateController(TfStateController):
module_filter_expression: str=None,
type_filter_expression: str=None,
provider_filter_expression: str=None,
- name_filter_expression: str=None
+ name_filter_expression: str=None,
+ mode_filter_expression: str=None
):
self.__remote_s3_tfstate_service = RemoteS3TfStateService(
file_location=file_location
@@ -92,5 +97,6 @@ class RemoteS3TfStateController(TfStateController):
name_filter_expression=name_filter_expression,
type_filter_expression=type_filter_expression,
provider_filter_expression=provider_filter_expression,
- module_filter_expression=module_filter_expression
+ module_filter_expression=module_filter_expression,
+ mode_filter_expression=mode_filter_expression
)
diff --git a/tflens/helper/filter.py b/tflens/helper/filter.py
index ee86ff3..58bba51 100644
--- a/tflens/helper/filter.py
+++ b/tflens/helper/filter.py
@@ -9,7 +9,9 @@ class FilterHelper():
self.__object_attribute_value = object_attribute_value
def check_filter(self):
- return re.match(self.__filter_expression, self.__object_attribute_value)
+ compiled_pattern = re.compile(self.__filter_expression)
+
+ return compiled_pattern.search(self.__object_attribute_value)
class ModuleFilterHelper(FilterHelper):
@@ -40,7 +42,15 @@ class ProviderFilterHelper(FilterHelper):
def __init__(self, filter_expression: str, resource: TfStateResource):
super().__init__(
filter_expression=filter_expression,
- object_attribute_value=resource.get_type()
+ object_attribute_value=resource.get_provider()
+ )
+
+class ModeFilterHelper(FilterHelper):
+
+ def __init__(self, filter_expression: str, resource: TfStateResource):
+ super().__init__(
+ filter_expression=filter_expression,
+ object_attribute_value=resource.get_mode()
)
class TfStateFilterHelper():
@@ -51,12 +61,14 @@ class TfStateFilterHelper():
type_filter_expression: str=None,
provider_filter_expression: str=None,
module_filter_expression: str=None,
+ mode_filter_expression: str=None,
resources: list=None
):
self.__name_filter_expression = name_filter_expression
self.__type_filter_expression = type_filter_expression
self.__provider_filter_expression = provider_filter_expression
self.__module_filter_expression = module_filter_expression
+ self.__mode_filter_expression = mode_filter_expression
self.__resources = resources
def apply_filter(self):
@@ -67,6 +79,7 @@ class TfStateFilterHelper():
pass_module_filter = True
pass_type_filter = True
pass_provider_filter = True
+ pass_mode_filter = True
if self.__name_filter_expression:
filter_helper = NameFilterHelper(filter_expression=self.__name_filter_expression, resource=resource)
@@ -84,7 +97,11 @@ class TfStateFilterHelper():
filter_helper = ModuleFilterHelper(filter_expression=self.__module_filter_expression, resource=resource)
pass_module_filter = filter_helper.check_filter()
- if pass_module_filter and pass_name_filter and pass_type_filter and pass_provider_filter:
+ if self.__mode_filter_expression:
+ filter_helper = ModeFilterHelper(filter_expression=self.__mode_filter_expression, resource=resource)
+ pass_mode_filter = filter_helper.check_filter()
+
+ if pass_module_filter and pass_name_filter and pass_type_filter and pass_provider_filter and pass_mode_filter:
filtered_list.append(resource)
return filtered_list
diff --git a/tflens/model/tfstate_resource.py b/tflens/model/tfstate_resource.py
index afd3dbf..fa14840 100644
--- a/tflens/model/tfstate_resource.py
+++ b/tflens/model/tfstate_resource.py
@@ -33,6 +33,12 @@ class TfStateResource():
def get_type(self):
return self.__type
+ def get_provider(self):
+ return self.__provider
+
+ def get_mode(self):
+ return self.__mode
+
def get_parent_module(self):
return self.__module.split('.')[1] if self.__module else '-'
|
neovasili/tflens
|
658445946b11c2776992bec4ae31e12e829a5136
|
diff --git a/tests/controllers_test.py b/tests/controllers_test.py
index b250a98..6560a65 100644
--- a/tests/controllers_test.py
+++ b/tests/controllers_test.py
@@ -158,6 +158,58 @@ class TestLocalTfStateController(unittest.TestCase):
self.assertEqual(captured_output.getvalue().replace('\n', ''), '')
+ def test_local_show_resources_matching_mode_filter(self):
+ local_tfstate_controller = LocalTfStateController(
+ file_location=self.existing_file,
+ mode_filter_expression="data"
+ )
+ captured_output = io.StringIO()
+ sys.stdout = captured_output
+ local_tfstate_controller.show_resources()
+
+ self.assertEqual(captured_output.getvalue().replace('\n', ''), self.print_table_output)
+
+ def test_local_show_resources_not_matching_mode_filter(self):
+ local_tfstate_controller = LocalTfStateController(
+ file_location=self.existing_file,
+ mode_filter_expression="Data"
+ )
+ captured_output = io.StringIO()
+ sys.stdout = captured_output
+ local_tfstate_controller.show_resources()
+
+ self.assertEqual(captured_output.getvalue().replace('\n', ''), '')
+
+ def test_local_show_resources_matching_mixed_filter(self):
+ local_tfstate_controller = LocalTfStateController(
+ file_location=self.existing_file,
+ module_filter_expression="test",
+ name_filter_expression="current_user",
+ type_filter_expression="aws_caller_identity",
+ provider_filter_expression="aws",
+ mode_filter_expression="data"
+ )
+ captured_output = io.StringIO()
+ sys.stdout = captured_output
+ local_tfstate_controller.show_resources()
+
+ self.assertEqual(captured_output.getvalue().replace('\n', ''), self.print_table_output)
+
+ def test_local_show_resources_not_matching_mixed_filter(self):
+ local_tfstate_controller = LocalTfStateController(
+ file_location=self.existing_file,
+ module_filter_expression="test",
+ name_filter_expression="current_user",
+ type_filter_expression="aws_caller_identity",
+ provider_filter_expression="aws",
+ mode_filter_expression="Data"
+ )
+ captured_output = io.StringIO()
+ sys.stdout = captured_output
+ local_tfstate_controller.show_resources()
+
+ self.assertEqual(captured_output.getvalue().replace('\n', ''), '')
+
def test_local_create_markdown_file(self):
local_tfstate_controller = LocalTfStateController(self.existing_file)
local_tfstate_controller.create_markdown_file()
|
[resources] Add filter resources by mode support
We need to be able to filter resources shown in a table by mode field.
The filter must be able to use regular expressions as the filter pattern.
|
0.0
|
658445946b11c2776992bec4ae31e12e829a5136
|
[
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_mixed_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_mode_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_mixed_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_mode_filter"
] |
[
"tests/controllers_test.py::TestTfStateController::test_create_html_file",
"tests/controllers_test.py::TestTfStateController::test_create_markdown_file",
"tests/controllers_test.py::TestTfStateController::test_show_resources",
"tests/controllers_test.py::TestLocalTfStateController::test_local_create_html_file",
"tests/controllers_test.py::TestLocalTfStateController::test_local_create_markdown_file",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_module_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_name_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_provider_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_matching_type_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_module_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_name_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_provider_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_show_resources_not_matching_type_filter",
"tests/controllers_test.py::TestLocalTfStateController::test_local_tfstate_controller"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-30 19:47:40+00:00
|
mit
| 4,123 |
|
neovasili__tflens-46
|
diff --git a/tflens/__main__.py b/tflens/__main__.py
index 4417c32..de432d6 100644
--- a/tflens/__main__.py
+++ b/tflens/__main__.py
@@ -2,11 +2,7 @@ from pathlib import Path
import argparse
from tflens.helper.config import VERSION
-from tflens.controller.tfstate import (
- RemoteS3TfStateController,
- RemoteHttpTfStateController,
- LocalTfStateController
-)
+from tflens.helper.remote import RemoteHelper
parser = argparse.ArgumentParser(
description='Terraform lens is a CLI tool that enables developers have a summarized view of tfstate resources.'
@@ -28,15 +24,6 @@ parser.add_argument('-o', '--output',
help="Defines output type (markdown|html). If empty outputs in terminal",
default="")
-parser.add_argument('-r', '--remote',
- type=str,
- action="store",
- dest="remote",
- help="Defines if remote (s3|http) or local tfstate file. If empty local is used. \
- When remote is defined, you also need to specify --file-location with the tfstate location \
- according to the following pattern: bucket-name/tfstate-key",
- default="")
-
parser.add_argument('-m', '--filter-module',
type=str,
action="store",
@@ -98,7 +85,6 @@ parser.add_argument('-v', '--version',
args = parser.parse_args()
-ARGS_REMOTE = args.remote
ARGS_FILE_LOCATION = args.file_location
ARGS_OUTPUT = args.output
ARGS_FILTER_MODULE = args.filter_module
@@ -113,13 +99,9 @@ if not ARGS_FILE_LOCATION:
ARGS_FILE_LOCATION = "{}/terraform.tfstate".format(Path().absolute())
def main():
- remote_router = {
- 's3': RemoteS3TfStateController,
- 'http': RemoteHttpTfStateController,
- '': LocalTfStateController
- }
+ remote_helper = RemoteHelper(ARGS_FILE_LOCATION)
- tfstate_controller = remote_router[ARGS_REMOTE](
+ tfstate_controller = remote_helper.invoke_remote_controller(
file_location=ARGS_FILE_LOCATION,
user=ARGS_HTTP_USER,
password=ARGS_HTTP_PASSWORD,
diff --git a/tflens/exception/exception.py b/tflens/exception/exception.py
index bff9917..95e47b5 100644
--- a/tflens/exception/exception.py
+++ b/tflens/exception/exception.py
@@ -36,3 +36,13 @@ class ServerUnavailable(CustomException):
def __init__(self):
super().__init__("The server is unavailable")
+
+class NotValidS3Location(CustomException):
+
+ def __init__(self):
+ super().__init__("Invalid S3 location. Must be something like 's3://bucket_name/key'")
+
+class NotValidHttpLocation(CustomException):
+
+ def __init__(self):
+ super().__init__("Invalid Http location. Must be something like 'http(s)://http_server/'")
diff --git a/tflens/helper/location.py b/tflens/helper/location.py
new file mode 100644
index 0000000..3d13d5c
--- /dev/null
+++ b/tflens/helper/location.py
@@ -0,0 +1,21 @@
+import re
+
+class LocationHelper():
+
+ def __init__(self, file_location: str, validation_pattern: str):
+ self.__file_location = file_location
+ self.__validation_pattern = validation_pattern
+ self.__compiled_pattern = re.compile(self.__validation_pattern)
+
+ def validate(self):
+ return self.__compiled_pattern.search(self.__file_location)
+
+class S3LocationHelper(LocationHelper):
+
+ def __init__(self, file_location: str):
+ super().__init__(file_location=file_location, validation_pattern="s3\:\/\/.+\/.+")
+
+class HttpLocationHelper(LocationHelper):
+
+ def __init__(self, file_location: str):
+ super().__init__(file_location=file_location, validation_pattern="http(s)?\:\/\/.+")
diff --git a/tflens/helper/remote.py b/tflens/helper/remote.py
new file mode 100644
index 0000000..a6bfbd3
--- /dev/null
+++ b/tflens/helper/remote.py
@@ -0,0 +1,26 @@
+from tflens.controller.tfstate import (
+ RemoteS3TfStateController,
+ RemoteHttpTfStateController,
+ LocalTfStateController
+)
+
+class RemoteHelper():
+
+ def __init__(self, file_location: str):
+ self.__file_location = file_location
+ self.__remote_router = {
+ 's3': RemoteS3TfStateController,
+ 'http': RemoteHttpTfStateController,
+ 'https': RemoteHttpTfStateController,
+ 'local': LocalTfStateController
+ }
+ self.__remote_type = "local"
+
+ if ":" in self.__file_location:
+ self.__remote_type = self.__file_location.split(":")[0]
+
+ def get_remote_type(self):
+ return self.__remote_type
+
+ def invoke_remote_controller(self, **kwargs):
+ return self.__remote_router[self.__remote_type](**kwargs)
diff --git a/tflens/service/tfstate.py b/tflens/service/tfstate.py
index e778a05..8972a4f 100644
--- a/tflens/service/tfstate.py
+++ b/tflens/service/tfstate.py
@@ -6,21 +6,35 @@ import requests
from botocore.exceptions import ClientError
+from tflens.helper.location import (
+ S3LocationHelper,
+ HttpLocationHelper
+)
from tflens.exception.exception import (
CannotLoadLocalFile,
CannotReadLocalFile,
CannotLoadRemoteFile,
UnauthorizedAccess,
Forbidden,
- ServerUnavailable
+ ServerUnavailable,
+ NotValidS3Location,
+ NotValidHttpLocation
)
class RemoteS3TfStateService():
def __init__(self, file_location: str):
- self.__s3_client = boto3.client('s3')
- self.__bucket_name = file_location.split('/')[0]
- self.__file_s3_key = "/".join(file_location.split('/')[1:])
+ location_helper = S3LocationHelper(file_location=file_location)
+
+ if location_helper.validate():
+ location_without_schema = file_location.split(":")[1].replace("//", "")
+
+ self.__s3_client = boto3.client('s3')
+ self.__bucket_name = location_without_schema.split('/')[0]
+ self.__file_s3_key = "/".join(location_without_schema.split('/')[1:])
+
+ else:
+ raise NotValidS3Location
def read_content(self):
try:
@@ -37,9 +51,15 @@ class RemoteS3TfStateService():
class RemoteHttpTfStateService():
def __init__(self, file_location: str, user: str=None, password: str=None):
- self.__file_location = file_location
- self.__user = user
- self.__password = password
+ location_helper = HttpLocationHelper(file_location=file_location)
+
+ if location_helper.validate():
+ self.__file_location = file_location
+ self.__user = user
+ self.__password = password
+
+ else:
+ raise NotValidHttpLocation
def read_content(self):
try:
|
neovasili/tflens
|
57ccc55027682bd2180c7e24c3463eeb6e52d4b5
|
diff --git a/tests/helpers_test.py b/tests/helpers_test.py
index b4b1447..9814a7f 100644
--- a/tests/helpers_test.py
+++ b/tests/helpers_test.py
@@ -18,6 +18,11 @@ from tflens.helper.table import (
MarkdownTableHelper,
HtmlTableHelper
)
+from tflens.helper.remote import RemoteHelper
+from tflens.helper.location import (
+ S3LocationHelper,
+ HttpLocationHelper
+)
class TestTableHelper(unittest.TestCase):
@@ -108,3 +113,83 @@ class TestHtmlTableHelper(unittest.TestCase):
html_file_content = html_file.read()
self.assertEqual(html_file_content.replace('\n', ''), self.file_htmltable_output.replace('\n', ''))
+
+class TestRemoteHelper(unittest.TestCase):
+
+ def setUp(self):
+ self.local_file_location = 'local/terraform.tfstate'
+ self.s3_file_location = 's3://local/terraform.tfstate'
+ self.http_file_location = 'http://local/terraform.tfstate'
+ self.https_file_location = 'https://local/terraform.tfstate'
+
+ def test_invoke_local_remote_controller(self):
+ remote_helper = RemoteHelper(self.local_file_location)
+
+ self.assertEqual(
+ remote_helper.get_remote_type(),
+ "local"
+ )
+
+ def test_invoke_s3_remote_controller(self):
+ remote_helper = RemoteHelper(self.s3_file_location)
+
+ self.assertEqual(
+ remote_helper.get_remote_type(),
+ "s3"
+ )
+
+ def test_invoke_http_remote_controller(self):
+ remote_helper = RemoteHelper(self.http_file_location)
+
+ self.assertEqual(
+ remote_helper.get_remote_type(),
+ "http"
+ )
+
+ def test_invoke_https_remote_controller(self):
+ remote_helper = RemoteHelper(self.https_file_location)
+
+ self.assertEqual(
+ remote_helper.get_remote_type(),
+ "https"
+ )
+
+class TestLocationHelper(unittest.TestCase):
+
+ def setUp(self):
+ self.s3_file_location = 's3://local/terraform.tfstate'
+ self.non_valid_s3_file_location = 's3:/local/terraform.tfstate'
+ self.http_file_location = 'http://local/terraform.tfstate'
+ self.non_valid_http_file_location = 'http:/local/terraform.tfstate'
+ self.https_file_location = 'https://local/terraform.tfstate'
+ self.non_valid_https_file_location = 'https:/local/terraform.tfstate'
+
+ def test_valid_s3_remote_location(self):
+ location_helper = S3LocationHelper(self.s3_file_location)
+
+ self.assertTrue(location_helper.validate())
+
+ def test_non_valid_s3_remote_location(self):
+ location_helper = S3LocationHelper(self.non_valid_s3_file_location)
+
+ self.assertFalse(location_helper.validate())
+
+ def test_valid_http_remote_location(self):
+ location_helper = HttpLocationHelper(self.http_file_location)
+
+ self.assertTrue(location_helper.validate())
+
+ def test_non_valid_http_remote_location(self):
+ location_helper = HttpLocationHelper(self.non_valid_http_file_location)
+
+ self.assertFalse(location_helper.validate())
+
+ def test_valid_https_remote_location(self):
+ location_helper = HttpLocationHelper(self.https_file_location)
+
+ self.assertTrue(location_helper.validate())
+
+ def test_non_valid_https_remote_location(self):
+ location_helper = HttpLocationHelper(self.non_valid_https_file_location)
+
+ self.assertFalse(location_helper.validate())
diff --git a/tests/services_test.py b/tests/services_test.py
index eaee6d2..5028226 100644
--- a/tests/services_test.py
+++ b/tests/services_test.py
@@ -16,7 +16,9 @@ from tflens.service.tfstate import (
from tflens.exception.exception import (
CannotLoadLocalFile,
CannotReadLocalFile,
- CannotLoadRemoteFile
+ CannotLoadRemoteFile,
+ NotValidS3Location,
+ NotValidHttpLocation
)
@mock_s3
@@ -27,8 +29,9 @@ class TestRemoteS3TfStateService(unittest.TestCase):
self.valid_content_key = 'tflens/terraform.tfstate'
self.non_valid_key = 'none'
- self.valid_tfstate_file = "{}/{}".format(self.bucket, self.valid_content_key)
- self.non_existing_tfstate_file = "{}/{}".format(self.bucket, self.non_valid_key)
+ self.valid_tfstate_file = "s3://{}/{}".format(self.bucket, self.valid_content_key)
+ self.non_existing_tfstate_file = "s3://{}/{}".format(self.bucket, self.non_valid_key)
+ self.non_valid_tfstate_location = "s3:/{}/{}".format(self.bucket, self.valid_content_key)
with mock_s3():
s3 = boto3.client("s3")
@@ -57,12 +60,20 @@ class TestRemoteS3TfStateService(unittest.TestCase):
remote_s3_tfstate_service.read_content
)
+ def test_non_valid_remote_s3_file_location(self):
+ self.assertRaises(
+ NotValidS3Location,
+ RemoteS3TfStateService,
+ self.non_valid_tfstate_location
+ )
+
class TestRemoteHttpTfStateService(unittest.TestCase):
@requests_mock.mock()
def setUp(self, mock):
self.valid_tfstate_file = "http://valid_tfstate_file"
self.non_existing_tfstate_file = "http://non_existing_tfstate_file"
+ self.non_valid_tfstate_location = "http:/non_existing_tfstate_file"
self.user = "user"
self.password = "password"
@@ -88,6 +99,13 @@ class TestRemoteHttpTfStateService(unittest.TestCase):
remote_http_tfstate_service.read_content
)
+ def test_non_valid_remote_http_file_location(self):
+ self.assertRaises(
+ NotValidHttpLocation,
+ RemoteHttpTfStateService,
+ self.non_valid_tfstate_location
+ )
+
class TestLocalTfStateService(unittest.TestCase):
def setUp(self):
|
[arguments] Remove remote argument
Is possible to remove the `remote` argument and guess location of the file using `--file-location` argument schema, for instance if the file-location starts with `s3://` then the tfstate is remote and is located in S3.
|
0.0
|
57ccc55027682bd2180c7e24c3463eeb6e52d4b5
|
[
"tests/helpers_test.py::TestTableHelper::test_non_valid_table_with_non_valid_row",
"tests/helpers_test.py::TestTableHelper::test_print_table",
"tests/helpers_test.py::TestTableHelper::test_set_table_non_valid_style",
"tests/helpers_test.py::TestTableHelper::test_set_table_valid_style",
"tests/helpers_test.py::TestTableHelper::test_valid_table_with_multiple_rows",
"tests/helpers_test.py::TestTableHelper::test_valid_table_with_one_row",
"tests/helpers_test.py::TestMarkdownTableHelper::test_print_markdowntable",
"tests/helpers_test.py::TestMarkdownTableHelper::test_write_markdowntable_file",
"tests/helpers_test.py::TestHtmlTableHelper::test_print_htmltable",
"tests/helpers_test.py::TestHtmlTableHelper::test_write_html_file",
"tests/helpers_test.py::TestRemoteHelper::test_invoke_http_remote_controller",
"tests/helpers_test.py::TestRemoteHelper::test_invoke_https_remote_controller",
"tests/helpers_test.py::TestRemoteHelper::test_invoke_local_remote_controller",
"tests/helpers_test.py::TestRemoteHelper::test_invoke_s3_remote_controller",
"tests/helpers_test.py::TestLocationHelper::test_non_valid_http_remote_location",
"tests/helpers_test.py::TestLocationHelper::test_non_valid_https_remote_location",
"tests/helpers_test.py::TestLocationHelper::test_non_valid_s3_remote_location",
"tests/helpers_test.py::TestLocationHelper::test_valid_http_remote_location",
"tests/helpers_test.py::TestLocationHelper::test_valid_https_remote_location",
"tests/helpers_test.py::TestLocationHelper::test_valid_s3_remote_location",
"tests/services_test.py::TestRemoteHttpTfStateService::test_non_valid_remote_http_file_location",
"tests/services_test.py::TestRemoteHttpTfStateService::test_read_content_remote_http_file",
"tests/services_test.py::TestLocalTfStateService::test_cannot_read_content_local_file",
"tests/services_test.py::TestLocalTfStateService::test_fail_open_non_existing_local_file",
"tests/services_test.py::TestLocalTfStateService::test_open_existing_local_file",
"tests/services_test.py::TestLocalTfStateService::test_read_content_local_file"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-11-08 17:16:47+00:00
|
mit
| 4,124 |
|
netaddr__netaddr-225
|
diff --git a/netaddr/eui/__init__.py b/netaddr/eui/__init__.py
index 5639676..07bbdc3 100644
--- a/netaddr/eui/__init__.py
+++ b/netaddr/eui/__init__.py
@@ -100,6 +100,9 @@ class OUI(BaseIdentifier):
else:
raise NotRegisteredError('OUI %r not registered!' % (oui,))
+ def __hash__(self):
+ return hash(self._value)
+
def __eq__(self, other):
if not isinstance(other, OUI):
try:
|
netaddr/netaddr
|
606a44b62ea7032f63e359aaaaabc0057e168890
|
diff --git a/netaddr/tests/eui/test_eui.py b/netaddr/tests/eui/test_eui.py
index 645a518..c17d0ce 100644
--- a/netaddr/tests/eui/test_eui.py
+++ b/netaddr/tests/eui/test_eui.py
@@ -186,6 +186,14 @@ def test_oui_constructor():
assert oui.reg_count == 3
+def test_oui_hash():
+ oui0 = OUI(0)
+ oui1 = OUI(1)
+ oui_dict = {oui0: None, oui1: None}
+
+ assert list(oui_dict.keys()) == [OUI(0), OUI(1)]
+
+
def test_eui_iab():
mac = EUI('00-50-C2-00-0F-01')
assert mac.is_iab()
|
OUI hashability
# Summary
`OUI` does not have a `__hash__` function. This prevents the use of `OUI` for keys.
- Python Hash documentation [link](https://docs.python.org/3/reference/datamodel.html#object.__hash__)
# Example - Break
```
from netaddr import OUI
a = OUI("002272")
try:
set(a)
except TypeError:
print("Hash failed")
```
# Example - Fix
```
from netaddr import OUI
a = OUI("002272")
set(a)
```
|
0.0
|
606a44b62ea7032f63e359aaaaabc0057e168890
|
[
"netaddr/tests/eui/test_eui.py::test_oui_hash"
] |
[
"netaddr/tests/eui/test_eui.py::test_mac_address_properties",
"netaddr/tests/eui/test_eui.py::test_mac_address_numerical_operations",
"netaddr/tests/eui/test_eui.py::test_eui_oct_format_py3",
"netaddr/tests/eui/test_eui.py::test_eui_constructor",
"netaddr/tests/eui/test_eui.py::test_eui_dialects",
"netaddr/tests/eui/test_eui.py::test_eui_dialect_property_assignment",
"netaddr/tests/eui/test_eui.py::test_eui_format",
"netaddr/tests/eui/test_eui.py::test_eui_custom_dialect",
"netaddr/tests/eui/test_eui.py::test_eui64_dialects",
"netaddr/tests/eui/test_eui.py::test_eui64_dialect_property_assignment",
"netaddr/tests/eui/test_eui.py::test_eui64_custom_dialect",
"netaddr/tests/eui/test_eui.py::test_eui_oui_information",
"netaddr/tests/eui/test_eui.py::test_oui_constructor",
"netaddr/tests/eui/test_eui.py::test_eui_iab",
"netaddr/tests/eui/test_eui.py::test_eui64",
"netaddr/tests/eui/test_eui.py::test_mac_to_ipv6_link_local",
"netaddr/tests/eui/test_eui.py::test_iab",
"netaddr/tests/eui/test_eui.py::test_new_iab",
"netaddr/tests/eui/test_eui.py::test_eui48_vs_eui64",
"netaddr/tests/eui/test_eui.py::test_eui_sort_order",
"netaddr/tests/eui/test_eui.py::test_eui_pickle_support",
"netaddr/tests/eui/test_eui.py::test_mac_to_eui64_conversion",
"netaddr/tests/eui/test_eui.py::test_mac_to_ipv6",
"netaddr/tests/eui/test_eui.py::test_eui64_constructor"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-02-06 00:03:45+00:00
|
bsd-3-clause
| 4,125 |
|
netromdk__vermin-173
|
diff --git a/vermin/source_visitor.py b/vermin/source_visitor.py
index 26f5191..440563a 100644
--- a/vermin/source_visitor.py
+++ b/vermin/source_visitor.py
@@ -769,16 +769,17 @@ class SourceVisitor(ast.NodeVisitor):
def __add_kwargs(self, function, keyword, line=None, col=None):
if function in self.__user_defs: # pragma: no cover
self.__vvvvprint("Ignoring function '{}' because it's user-defined!".format(function))
- return
+ return False
if self.__config.is_excluded_kwarg(function, keyword):
self.__vvprint("Excluding kwarg: {}({})".format(function, keyword))
- return
+ return False
fn_kw = (function, keyword)
if fn_kw not in self.__kwargs:
self.__kwargs.append(fn_kw)
self.__add_line_col(fn_kw, line, col)
+ return True
def __add_user_func_deco(self, ufd, line=None, col=None):
if ufd in self.__user_defs:
@@ -1268,6 +1269,7 @@ class SourceVisitor(ast.NodeVisitor):
self.generic_visit(node)
def visit_keyword(self, node):
+ added = False
for func_name in self.__function_name_stack:
# kwarg related.
exp_name = func_name.split(".")
@@ -1275,28 +1277,32 @@ class SourceVisitor(ast.NodeVisitor):
# Check if function is imported from module.
if func_name in self.__import_mem_mod:
mod = self.__import_mem_mod[func_name]
- self.__add_kwargs(dotted_name([mod, func_name]), node.arg, self.__line)
+ added |= self.__add_kwargs(dotted_name([mod, func_name]), node.arg, self.__line)
# When having "ElementTree.tostringlist", for instance, and include mapping "{'ElementTree':
# 'xml.etree'}" then try piecing them together to form a match.
elif exp_name[0] in self.__import_mem_mod:
mod = self.__import_mem_mod[exp_name[0]]
- self.__add_kwargs(dotted_name([mod, func_name]), node.arg, self.__line)
+ added |= self.__add_kwargs(dotted_name([mod, func_name]), node.arg, self.__line)
# Lookup indirect names via variables.
elif exp_name[0] in self.__name_res:
res = self.__name_res[exp_name[0]]
if res in self.__import_mem_mod:
mod = self.__import_mem_mod[res]
- self.__add_kwargs(dotted_name([mod, res, exp_name[1:]]), node.arg, self.__line)
+ added |= self.__add_kwargs(dotted_name([mod, res, exp_name[1:]]), node.arg, self.__line)
# Try as FQN.
else:
- self.__add_kwargs(dotted_name([res, exp_name[1:]]), node.arg, self.__line)
+ added |= self.__add_kwargs(dotted_name([res, exp_name[1:]]), node.arg, self.__line)
# Only add direct function if not found via module/class/member.
else:
- self.__add_kwargs(func_name, node.arg, self.__line)
+ added |= self.__add_kwargs(func_name, node.arg, self.__line)
+
+ # If not excluded or ignored then visit keyword values also.
+ if added:
+ self.generic_visit(node)
def visit_Bytes(self, node):
self.__bytesv3 = True
|
netromdk/vermin
|
a2229690af9c381768e1a173b7a74ca7d578e456
|
diff --git a/tests/lang.py b/tests/lang.py
index f5adaf8..4243721 100644
--- a/tests/lang.py
+++ b/tests/lang.py
@@ -2128,3 +2128,13 @@ except OSError as ex:
def test_True_constant(self):
self.assertOnlyIn(((2, 3), (3, 0)), self.detect("True"))
+
+ def test_issue_168_keyword_values(self):
+ visitor = self.visit("""
+ret = subparser.add_parser("qemu")
+ret.add_argument("--efi", action=argparse.BooleanOptionalAction, help="...")
+""")
+
+ # `argparse.BooleanOptionalAction` requires !2, 3.9 but the keyword values weren't visited
+ # before.
+ self.assertOnlyIn((3, 9), visitor.minimum_versions())
|
python 3.9 feature not detected: argparse.BooleanOptionalAction
**Describe the bug**
`argparse.BooleanOptionalAction` is a Python 3.9 feature, but vermin doesn't detect it.
**To Reproduce**
Example code:
```python3
#!/usr/bin/env python3
ret = subparser.add_parser("qemu")
ret.add_argument("--efi", action=argparse.BooleanOptionalAction, help="...")
```
Running `vermin -t=3.7-` on it doesn't report that `argparse.BooleanOptionalAction` is only available in Python >=3.9.
See https://docs.python.org/3/library/argparse.html.
**Expected behavior**
Report that `argparse.BooleanOptionalAction` is only available in Python >=3.9.
**Environment (please complete the following information):**
vermin version: 1.5.1
|
0.0
|
a2229690af9c381768e1a173b7a74ca7d578e456
|
[
"tests/lang.py::VerminLanguageTests::test_issue_168_keyword_values"
] |
[
"tests/lang.py::VerminLanguageTests::test_False_constant",
"tests/lang.py::VerminLanguageTests::test_True_constant",
"tests/lang.py::VerminLanguageTests::test_ann_assign_target_ignore_user_def",
"tests/lang.py::VerminLanguageTests::test_assign_target_ignore_user_def",
"tests/lang.py::VerminLanguageTests::test_async_comprehension",
"tests/lang.py::VerminLanguageTests::test_async_for",
"tests/lang.py::VerminLanguageTests::test_async_generator",
"tests/lang.py::VerminLanguageTests::test_async_multi_withitem",
"tests/lang.py::VerminLanguageTests::test_async_with_parentheses",
"tests/lang.py::VerminLanguageTests::test_async_with_statement",
"tests/lang.py::VerminLanguageTests::test_aug_assign_target_ignore_user_def",
"tests/lang.py::VerminLanguageTests::test_await_in_comprehension",
"tests/lang.py::VerminLanguageTests::test_bare_except_handler",
"tests/lang.py::VerminLanguageTests::test_builtin_generic_type_annotation",
"tests/lang.py::VerminLanguageTests::test_bytearray_format",
"tests/lang.py::VerminLanguageTests::test_bytes_directives",
"tests/lang.py::VerminLanguageTests::test_bytes_format",
"tests/lang.py::VerminLanguageTests::test_bytes_from_type",
"tests/lang.py::VerminLanguageTests::test_bytesv3",
"tests/lang.py::VerminLanguageTests::test_class_decorators",
"tests/lang.py::VerminLanguageTests::test_class_name_ignore_user_def",
"tests/lang.py::VerminLanguageTests::test_comprehension_assign_target_ignore_user_def",
"tests/lang.py::VerminLanguageTests::test_continue_in_finally",
"tests/lang.py::VerminLanguageTests::test_coroutines_async",
"tests/lang.py::VerminLanguageTests::test_coroutines_await",
"tests/lang.py::VerminLanguageTests::test_detect_except_members",
"tests/lang.py::VerminLanguageTests::test_detect_raise_members",
"tests/lang.py::VerminLanguageTests::test_dict_comprehension",
"tests/lang.py::VerminLanguageTests::test_dict_from_type",
"tests/lang.py::VerminLanguageTests::test_dict_union",
"tests/lang.py::VerminLanguageTests::test_dict_union_merge",
"tests/lang.py::VerminLanguageTests::test_ellipsis_in_slices",
"tests/lang.py::VerminLanguageTests::test_ellipsis_out_of_slices",
"tests/lang.py::VerminLanguageTests::test_except_handler_ignore_user_def",
"tests/lang.py::VerminLanguageTests::test_except_star",
"tests/lang.py::VerminLanguageTests::test_float_from_type",
"tests/lang.py::VerminLanguageTests::test_for_target_ignore_user_def",
"tests/lang.py::VerminLanguageTests::test_format",
"tests/lang.py::VerminLanguageTests::test_frozenset_from_type",
"tests/lang.py::VerminLanguageTests::test_fstrings",
"tests/lang.py::VerminLanguageTests::test_fstrings_named_expr",
"tests/lang.py::VerminLanguageTests::test_fstrings_self_doc",
"tests/lang.py::VerminLanguageTests::test_func_arg_ignore_user_def",
"tests/lang.py::VerminLanguageTests::test_func_kwarg_ignore_user_def",
"tests/lang.py::VerminLanguageTests::test_func_kwonlyarg_ignore_user_def",
"tests/lang.py::VerminLanguageTests::test_func_name_ignore_user_def",
"tests/lang.py::VerminLanguageTests::test_func_posonlyarg_ignore_user_def",
"tests/lang.py::VerminLanguageTests::test_func_vararg_ignore_user_def",
"tests/lang.py::VerminLanguageTests::test_function_decorators",
"tests/lang.py::VerminLanguageTests::test_generalized_unpacking",
"tests/lang.py::VerminLanguageTests::test_import_as_target_ignore_user_def",
"tests/lang.py::VerminLanguageTests::test_import_from_as_target_ignore_user_def",
"tests/lang.py::VerminLanguageTests::test_infix_matrix_multiplication",
"tests/lang.py::VerminLanguageTests::test_int_from_type",
"tests/lang.py::VerminLanguageTests::test_issue_66_annotations",
"tests/lang.py::VerminLanguageTests::test_kw_only_args",
"tests/lang.py::VerminLanguageTests::test_lambda_arg_ignore_user_def",
"tests/lang.py::VerminLanguageTests::test_list_from_type",
"tests/lang.py::VerminLanguageTests::test_long_from_type",
"tests/lang.py::VerminLanguageTests::test_longv2",
"tests/lang.py::VerminLanguageTests::test_module_dir_func",
"tests/lang.py::VerminLanguageTests::test_module_getattr_func",
"tests/lang.py::VerminLanguageTests::test_multi_withitem",
"tests/lang.py::VerminLanguageTests::test_named_expr_assign_target_ignore_user_def",
"tests/lang.py::VerminLanguageTests::test_named_expressions",
"tests/lang.py::VerminLanguageTests::test_nonlocal_stmt",
"tests/lang.py::VerminLanguageTests::test_pattern_matching",
"tests/lang.py::VerminLanguageTests::test_pos_only_args",
"tests/lang.py::VerminLanguageTests::test_print_v2_v3_mixed",
"tests/lang.py::VerminLanguageTests::test_printv2",
"tests/lang.py::VerminLanguageTests::test_printv3",
"tests/lang.py::VerminLanguageTests::test_raise_cause",
"tests/lang.py::VerminLanguageTests::test_raise_from_none",
"tests/lang.py::VerminLanguageTests::test_relaxed_decorators",
"tests/lang.py::VerminLanguageTests::test_set_comprehension",
"tests/lang.py::VerminLanguageTests::test_set_from_type",
"tests/lang.py::VerminLanguageTests::test_set_literals",
"tests/lang.py::VerminLanguageTests::test_str_from_type",
"tests/lang.py::VerminLanguageTests::test_super_no_args",
"tests/lang.py::VerminLanguageTests::test_unicode_from_type",
"tests/lang.py::VerminLanguageTests::test_union_types",
"tests/lang.py::VerminLanguageTests::test_unpacking_assignment",
"tests/lang.py::VerminLanguageTests::test_with_items_ignore_user_def",
"tests/lang.py::VerminLanguageTests::test_with_parentheses",
"tests/lang.py::VerminLanguageTests::test_with_parentheses_first_match",
"tests/lang.py::VerminLanguageTests::test_with_parentheses_second_match",
"tests/lang.py::VerminLanguageTests::test_with_statement",
"tests/lang.py::VerminLanguageTests::test_yield_from"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-04-04 19:07:54+00:00
|
mit
| 4,126 |
|
netromdk__vermin-196
|
diff --git a/vermin/backports.py b/vermin/backports.py
index f10c6f8..2d32b07 100644
--- a/vermin/backports.py
+++ b/vermin/backports.py
@@ -27,6 +27,8 @@ BACKPORTS = (
("typing_extensions==4.0", ["https://pypi.org/project/typing-extensions/4.0.0/"], (None, (3, 6))),
("typing_extensions==4.3", ["https://pypi.org/project/typing-extensions/4.3.0/"], (None, (3, 7))),
("typing_extensions", ["https://pypi.org/project/typing-extensions/4.3.0/"], (None, (3, 7))),
+
+ ("zoneinfo", ["https://pypi.org/project/backports.zoneinfo/"], (None, (3, 6))),
)
class Backports:
diff --git a/vermin/rules.py b/vermin/rules.py
index f80f006..0d76ee5 100644
--- a/vermin/rules.py
+++ b/vermin/rules.py
@@ -192,7 +192,7 @@ def MOD_REQS(config):
"xmlrpclib": ((2, 2), None),
"zipapp": (None, (3, 5)),
"zipimport": ((2, 3), (3, 0)),
- "zoneinfo": (None, (3, 9)),
+ "zoneinfo": bpv("zoneinfo", (None, (3, 9)), config),
}
# Module member requirements: member -> (module, requirements)
|
netromdk/vermin
|
5824cc0036fd71946e2cfff1ba65ceddbeae5f56
|
diff --git a/tests/backports.py b/tests/backports.py
index c8061e1..e09c89a 100644
--- a/tests/backports.py
+++ b/tests/backports.py
@@ -20,6 +20,7 @@ class VerminBackportsTests(VerminTest):
"typing_extensions==4.0",
"typing_extensions==4.3",
"typing_extensions",
+ "zoneinfo",
), Backports.modules())
def test_is_backport(self):
@@ -41,7 +42,8 @@ class VerminBackportsTests(VerminTest):
typing - https://pypi.org/project/typing/ (2.7, 3.2)
typing_extensions==4.0 - https://pypi.org/project/typing-extensions/4.0.0/ (!2, 3.6)
typing_extensions==4.3 - https://pypi.org/project/typing-extensions/4.3.0/ (!2, 3.7)
- typing_extensions - https://pypi.org/project/typing-extensions/4.3.0/ (!2, 3.7)""",
+ typing_extensions - https://pypi.org/project/typing-extensions/4.3.0/ (!2, 3.7)
+ zoneinfo - https://pypi.org/project/backports.zoneinfo/ (!2, 3.6)""",
Backports.str(3))
def test_version_filter(self):
@@ -66,6 +68,7 @@ class VerminBackportsTests(VerminTest):
"statistics",
"typing",
"typing_extensions",
+ "zoneinfo",
], Backports.unversioned_filter(Backports.modules()))
def test_expand_versions(self):
|
False positive when using backported zoneinfo
**Describe the bug**
I'm trying to use `vermin` on a project that uses `zoneinfo`. The project is properly set up to install the backport on Python < 3.9.
However, vermin reports this as a violation, and won't take `--backport zoneinfo`.
**To Reproduce**
```sh
git clone https://github.com/WhyNotHugo/django-afip.git
cd django-afip
vermin --no-tips --target=3.7 --violations .
vermin --no-tips --target=3.7 --violations --backport zoneinfo .
```
**Expected behavior**
The latter should work.
**Environment (please complete the following information):**
```con
> vermin --version
1.5.1
> git --version
git version 2.40.1
````
**Additional context**
-
|
0.0
|
5824cc0036fd71946e2cfff1ba65ceddbeae5f56
|
[
"tests/backports.py::VerminBackportsTests::test_modules",
"tests/backports.py::VerminBackportsTests::test_str",
"tests/backports.py::VerminBackportsTests::test_unversioned_filter"
] |
[
"tests/backports.py::VerminBackportsTests::test_expand_versions",
"tests/backports.py::VerminBackportsTests::test_is_backport",
"tests/backports.py::VerminBackportsTests::test_version_filter"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-06-18 15:19:50+00:00
|
mit
| 4,127 |
|
netromdk__vermin-234
|
diff --git a/vermin/arguments.py b/vermin/arguments.py
index 71fc6e4..9fc7189 100644
--- a/vermin/arguments.py
+++ b/vermin/arguments.py
@@ -63,10 +63,11 @@ class Arguments:
print(" --target=V | -t=V\n"
" Target version that files must abide by. Can be specified once or twice.\n"
" A '-' can be appended to match target version or smaller, like '-t=3.5-'.\n"
- " If not met Vermin will exit with code 1. Note that the amount of target\n"
- " versions must match the amount of minimum required versions detected.\n"
- " However, if used in conjunction with --violations, and no rules are\n"
- " triggered, it will exit with code 0.\n")
+ " If not met Vermin will exit with code 1. Vermin will only compare target\n"
+ " versions with the same major version, so if you do not care about Python\n"
+ " 2, you can just specify one target for Python 3. However, if used in\n"
+ " conjunction with --violations, and no rules are triggered, it will exit\n"
+ " with code 0.\n")
print(" --no-target (default)\n"
" Don't expect certain target version(s).\n")
print(" --processes=N | -p=N\n"
diff --git a/vermin/main.py b/vermin/main.py
index be7a883..e0425db 100644
--- a/vermin/main.py
+++ b/vermin/main.py
@@ -7,7 +7,7 @@ from .printing import nprint, vprint
from .detection import detect_paths
from .processor import Processor
from .arguments import Arguments
-from .utility import version_strings, dotted_name
+from .utility import version_strings, dotted_name, compare_requirements
from .backports import Backports
def main():
@@ -144,16 +144,10 @@ def main():
# don't fail wrt. targets.
all_inconclusive = config.only_show_violations() and len(reqs) > 0 and \
all(req == (0, 0) for req in reqs)
- if not all_inconclusive and\
- not (len(reqs) == len(targets) and
- all(((exact and target == req) or (not exact and target >= req)) for
- ((exact, target), req) in zip(targets, reqs))):
+ if not all_inconclusive and not compare_requirements(reqs, targets):
if not parsable:
vers = ["{}{}".format(dotted_name(t), "-" if not e else "") for (e, t) in targets]
nprint("Target versions not met: {}".format(version_strings(vers)), config)
- if len(targets) < len(reqs):
- nprint("Note: Number of specified targets ({}) doesn't match number of detected minimum "
- "versions ({}).".format(len(targets), len(reqs)), config)
sys.exit(1)
sys.exit(0)
diff --git a/vermin/utility.py b/vermin/utility.py
index 61bb00a..b38ae41 100644
--- a/vermin/utility.py
+++ b/vermin/utility.py
@@ -189,3 +189,20 @@ def parse_target(target):
return None
return (exact, elms)
+
+def compare_requirements(reqs, targets):
+ maj_to_req = {ver[0]: ver for ver in reqs}
+ maj_to_target = {ver[0]: (exact, ver) for (exact, ver) in targets}
+ common_major_versions = set(maj_to_req.keys()) & set(maj_to_target.keys())
+ if not common_major_versions:
+ return False
+ if set(maj_to_target.keys()) - common_major_versions:
+ return False # target major version missing from the requirements
+ for maj in common_major_versions:
+ exact, target = maj_to_target[maj]
+ req = maj_to_req[maj]
+ if exact and target != req:
+ return False
+ if not exact and target < req:
+ return False
+ return True
|
netromdk/vermin
|
a0647aa2d315125c3c83d8789c8d3f086f23c899
|
diff --git a/tests/general.py b/tests/general.py
index f36ba6b..d540df5 100644
--- a/tests/general.py
+++ b/tests/general.py
@@ -12,6 +12,7 @@ from vermin import combine_versions, InvalidVersionException, detect_paths,\
remove_whitespace, main, sort_line_column, sort_line_column_parsable, version_strings,\
format_title_descs, DEFAULT_PROCESSES
from vermin.formats import ParsableFormat
+from vermin.utility import compare_requirements
from .testutils import VerminTest, current_version, ScopedTemporaryFile, detect, visit, touch, \
working_dir
@@ -1054,3 +1055,79 @@ three - three.one
def test_default_processes(self):
self.assertEqual(cpu_count(), DEFAULT_PROCESSES)
+
+ def test_compare_requirements_py2_and_py3_compatible(self):
+ reqs = [(2, 7), (3, 6)]
+
+ # User provides only one target
+ self.assertFalse(compare_requirements(reqs, [(True, (2, 6))]))
+ self.assertFalse(compare_requirements(reqs, [(False, (2, 6))]))
+ self.assertTrue(compare_requirements(reqs, [(True, (2, 7))]))
+ self.assertTrue(compare_requirements(reqs, [(False, (2, 7))]))
+ self.assertFalse(compare_requirements(reqs, [(True, (3, 3))]))
+ self.assertFalse(compare_requirements(reqs, [(False, (3, 3))]))
+ self.assertTrue(compare_requirements(reqs, [(True, (3, 6))]))
+ self.assertTrue(compare_requirements(reqs, [(False, (3, 6))]))
+ self.assertFalse(compare_requirements(reqs, [(True, (3, 7))]))
+ self.assertTrue(compare_requirements(reqs, [(False, (3, 7))]))
+
+ # Missing and invalid targets
+ self.assertFalse(compare_requirements(reqs, []))
+ self.assertFalse(compare_requirements(reqs, [(True, (4, 1))]))
+
+ # User provides multiple valid requirements, return true when both are
+ # satisfied.
+ self.assertTrue(compare_requirements(reqs, [(True, (2, 7)), (False, (3, 7))]))
+ self.assertFalse(compare_requirements(reqs, [(True, (2, 7)), (True, (3, 7))]))
+
+ # User provides valid along with invalid version: fail because the target
+ # major version is missing
+ self.assertFalse(compare_requirements(reqs, [(True, (2, 7)), (False, (4, 7))]))
+ self.assertFalse(compare_requirements(reqs, [(True, (2, 7)), (True, (4, 7))]))
+
+ def test_compare_requirements_py2_only(self):
+ reqs = [(2, 7)]
+
+ # Correct major version, compare against minor version
+ self.assertFalse(compare_requirements(reqs, [(True, (2, 6))]))
+ self.assertFalse(compare_requirements(reqs, [(False, (2, 6))]))
+ self.assertTrue(compare_requirements(reqs, [(True, (2, 7))]))
+ self.assertTrue(compare_requirements(reqs, [(False, (2, 7))]))
+
+ # The user specifies the wrong major version: this will always fail
+ self.assertFalse(compare_requirements(reqs, [(True, (3, 3))]))
+ self.assertFalse(compare_requirements(reqs, [(False, (3, 3))]))
+ self.assertFalse(compare_requirements(reqs, [(True, (3, 6))]))
+ self.assertFalse(compare_requirements(reqs, [(False, (3, 6))]))
+ self.assertFalse(compare_requirements(reqs, [(True, (3, 7))]))
+ self.assertFalse(compare_requirements(reqs, [(False, (3, 7))]))
+ self.assertFalse(compare_requirements(reqs, [(True, (4, 1))]))
+
+ # Missing target: fail
+ self.assertFalse(compare_requirements(reqs, []))
+
+ # Multiple targets: fail because one target major version is missing
+ self.assertFalse(compare_requirements(reqs, [(False, (2, 7)), (False, (3, 6))]))
+
+ def test_compare_requirements_py3_only(self):
+ reqs = [(3, 6)]
+ # The user specifies the wrong major version: this will always fail
+ self.assertFalse(compare_requirements(reqs, [(True, (2, 6))]))
+ self.assertFalse(compare_requirements(reqs, [(False, (2, 6))]))
+ self.assertFalse(compare_requirements(reqs, [(True, (2, 7))]))
+ self.assertFalse(compare_requirements(reqs, [(False, (2, 7))]))
+ self.assertFalse(compare_requirements(reqs, [(True, (4, 1))]))
+
+ # Correct major version, compare against minor version
+ self.assertFalse(compare_requirements(reqs, [(True, (3, 3))]))
+ self.assertFalse(compare_requirements(reqs, [(False, (3, 3))]))
+ self.assertTrue(compare_requirements(reqs, [(True, (3, 6))]))
+ self.assertTrue(compare_requirements(reqs, [(False, (3, 6))]))
+ self.assertFalse(compare_requirements(reqs, [(True, (3, 7))]))
+ self.assertTrue(compare_requirements(reqs, [(False, (3, 7))]))
+
+ # Missing and invalid requirements
+ self.assertFalse(compare_requirements(reqs, []))
+
+ # Multiple targets: fail because one target amjor version is missing
+ self.assertFalse(compare_requirements(reqs, [(False, (2, 7)), (False, (3, 6))]))
|
Vermin reports that code is incompatible but it is
**Describe the bug**
Vermin reports that my codebase is not compatible with 3.8, but it is. Oddly, it points out:
Minimum required versions: 2.3, 3.0
Which sounds like it should be fine to me.
**To Reproduce**
```python
if sys.version_info < (3, 8): # noqa: UP036 # novermin
errstr = "khal only supports python version 3.8+. Please Upgrade.\n"
sys.stderr.write("#" * len(errstr) + '\n')
sys.stderr.write(errstr)
sys.stderr.write("#" * len(errstr) + '\n')
sys.exit(1)
```
Vermin fails, indicating that this code doesn't run for the specified version:
```con
> vermin -t=3.8 --violations setup.py
Detecting python files..
Analyzing using 24 processes..
Minimum required versions: 2.3, 3.0
Target versions not met: 3.8
Note: Number of specified targets (1) doesn't match number of detected minimum versions (2).
```
**Expected behavior**
This should work just fine.
**Environment (please complete the following information):**
- Via pre-commit, v1.5.2.
- From Alpine packages, v1.5.2
**Additional context**
Full file is here: https://github.com/pimutils/khal/blob/f5ca8883f2e3d8a403d7a6d16fa4f552b6b69283/setup.py
|
0.0
|
a0647aa2d315125c3c83d8789c8d3f086f23c899
|
[
"tests/general.py::VerminGeneralTests::test_assign_rvalue_attribute",
"tests/general.py::VerminGeneralTests::test_combine_versions",
"tests/general.py::VerminGeneralTests::test_combine_versions_assert",
"tests/general.py::VerminGeneralTests::test_compare_requirements_py2_and_py3_compatible",
"tests/general.py::VerminGeneralTests::test_compare_requirements_py2_only",
"tests/general.py::VerminGeneralTests::test_compare_requirements_py3_only",
"tests/general.py::VerminGeneralTests::test_default_processes",
"tests/general.py::VerminGeneralTests::test_detect_hidden_paths",
"tests/general.py::VerminGeneralTests::test_detect_hidden_paths_incrementally",
"tests/general.py::VerminGeneralTests::test_detect_min_version",
"tests/general.py::VerminGeneralTests::test_detect_min_version_assert",
"tests/general.py::VerminGeneralTests::test_detect_nonexistent_paths_incrementally",
"tests/general.py::VerminGeneralTests::test_detect_nonexistent_paths_with_dot_incrementally",
"tests/general.py::VerminGeneralTests::test_detect_paths",
"tests/general.py::VerminGeneralTests::test_detect_paths_incrementally",
"tests/general.py::VerminGeneralTests::test_detect_top_level_paths_incrementally",
"tests/general.py::VerminGeneralTests::test_detect_vermin_min_versions",
"tests/general.py::VerminGeneralTests::test_detect_vermin_min_versions_parsable",
"tests/general.py::VerminGeneralTests::test_detect_vermin_paths_all_exts",
"tests/general.py::VerminGeneralTests::test_detect_vermin_paths_directly",
"tests/general.py::VerminGeneralTests::test_detect_vermin_paths_no_invalid_exts",
"tests/general.py::VerminGeneralTests::test_detect_without_config",
"tests/general.py::VerminGeneralTests::test_dotted_name",
"tests/general.py::VerminGeneralTests::test_dotted_name_assert",
"tests/general.py::VerminGeneralTests::test_exclude_directory_regex",
"tests/general.py::VerminGeneralTests::test_exclude_pyi_regex",
"tests/general.py::VerminGeneralTests::test_exclude_regex_relative",
"tests/general.py::VerminGeneralTests::test_format",
"tests/general.py::VerminGeneralTests::test_format_title_descs",
"tests/general.py::VerminGeneralTests::test_ignore_members_when_user_defined_classes",
"tests/general.py::VerminGeneralTests::test_ignore_members_when_user_defined_funcs",
"tests/general.py::VerminGeneralTests::test_ignore_modules_when_user_defined_classes",
"tests/general.py::VerminGeneralTests::test_ignore_modules_when_user_defined_funcs",
"tests/general.py::VerminGeneralTests::test_ignore_non_top_level_imports",
"tests/general.py::VerminGeneralTests::test_main_full_usage",
"tests/general.py::VerminGeneralTests::test_main_no_args",
"tests/general.py::VerminGeneralTests::test_main_no_paths",
"tests/general.py::VerminGeneralTests::test_main_no_rules_hit",
"tests/general.py::VerminGeneralTests::test_main_no_rules_hit_target_not_met_violations_mode",
"tests/general.py::VerminGeneralTests::test_main_parsable_dont_ignore_paths_with_colon_in_drive_part",
"tests/general.py::VerminGeneralTests::test_main_parsable_has_last_results_line",
"tests/general.py::VerminGeneralTests::test_main_print_version",
"tests/general.py::VerminGeneralTests::test_main_print_versions_range",
"tests/general.py::VerminGeneralTests::test_main_target_not_met",
"tests/general.py::VerminGeneralTests::test_member_class",
"tests/general.py::VerminGeneralTests::test_member_constant",
"tests/general.py::VerminGeneralTests::test_member_function",
"tests/general.py::VerminGeneralTests::test_member_kwargs",
"tests/general.py::VerminGeneralTests::test_mod_inverse_pow",
"tests/general.py::VerminGeneralTests::test_modules",
"tests/general.py::VerminGeneralTests::test_pessimistic_syntax_error",
"tests/general.py::VerminGeneralTests::test_probably_python_file",
"tests/general.py::VerminGeneralTests::test_process_file_not_Found",
"tests/general.py::VerminGeneralTests::test_process_file_using_backport",
"tests/general.py::VerminGeneralTests::test_process_invalid_versions",
"tests/general.py::VerminGeneralTests::test_process_syntax_error",
"tests/general.py::VerminGeneralTests::test_processor_argparse_backport_spawn_or_fork",
"tests/general.py::VerminGeneralTests::test_processor_incompatible",
"tests/general.py::VerminGeneralTests::test_processor_indent_show_output_text",
"tests/general.py::VerminGeneralTests::test_processor_maybe_annotations",
"tests/general.py::VerminGeneralTests::test_processor_maybe_annotations_default",
"tests/general.py::VerminGeneralTests::test_processor_separately_incompatible",
"tests/general.py::VerminGeneralTests::test_processor_used_novermin",
"tests/general.py::VerminGeneralTests::test_processor_value_error",
"tests/general.py::VerminGeneralTests::test_remove_whitespace",
"tests/general.py::VerminGeneralTests::test_reverse_range",
"tests/general.py::VerminGeneralTests::test_sort_line_column",
"tests/general.py::VerminGeneralTests::test_sort_line_column_parsable",
"tests/general.py::VerminGeneralTests::test_strftime_directives",
"tests/general.py::VerminGeneralTests::test_user_defined",
"tests/general.py::VerminGeneralTests::test_version_strings",
"tests/general.py::VerminGeneralTests::test_version_strings_assert",
"tests/general.py::VerminGeneralTests::test_visit_has_output_text",
"tests/general.py::VerminGeneralTests::test_visit_output_text_has_correct_lines",
"tests/general.py::VerminGeneralTests::test_visit_output_text_has_correct_lines_parsable",
"tests/general.py::VerminGeneralTests::test_visit_without_config"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-10-10 17:33:45+00:00
|
mit
| 4,128 |
|
netromdk__vermin-8
|
diff --git a/.travis.yml b/.travis.yml
index ef67b64..2726025 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -16,8 +16,8 @@ python:
install:
- pip install -U pip virtualenv
script:
-- if [[ $TRAVIS_PYTHON_VERSION != 3.2 ]]; then make setup-venv setup-coverage test-coverage; else make test; fi
-- if [[ $TRAVIS_PYTHON_VERSION > 3.2 ]]; then make setup-misc; source .venv/bin/activate; make check; fi
-- if [[ $TRAVIS_PYTHON_VERSION > 3.2 && $TRAVIS_PYTHON_VERSION < 3.7 ]]; then make setup-bandit; source .venv/bin/activate; make security-check; fi
+- if [[ $TRAVIS_PYTHON_VERSION != 3.2 && $TRAVIS_PYTHON_VERSION != 3.3 ]]; then make setup-venv setup-coverage test-coverage; else make test; fi
+- if [[ $TRAVIS_PYTHON_VERSION > 3.3 ]]; then make setup-misc; source .venv/bin/activate; make check; fi
+- if [[ $TRAVIS_PYTHON_VERSION > 3.3 && $TRAVIS_PYTHON_VERSION < 3.7 ]]; then make setup-bandit; source .venv/bin/activate; make security-check; fi
after_success:
-- if [[ $TRAVIS_PYTHON_VERSION != 3.2 ]]; then make coveralls; fi
+- if [[ $TRAVIS_PYTHON_VERSION != 3.2 && $TRAVIS_PYTHON_VERSION != 3.3 ]]; then make coveralls; fi
diff --git a/vermin/rules.py b/vermin/rules.py
index 947d5b9..d54363e 100644
--- a/vermin/rules.py
+++ b/vermin/rules.py
@@ -75,6 +75,7 @@ MOD_REQS = {
"numbers": (2.6, 3.0),
"optparse": (2.3, 3.0),
"ossaudiodev": (2.3, 3.0),
+ "pathlib": (None, 3.4),
"pickletools": (2.3, 3.0),
"pkgutil": (2.3, 3.0),
"platform": (2.3, 3.0),
|
netromdk/vermin
|
653670b33ca9a54c95bd1ae06c56ce7474197a82
|
diff --git a/tests/module.py b/tests/module.py
index b8316fa..9900459 100644
--- a/tests/module.py
+++ b/tests/module.py
@@ -349,3 +349,6 @@ class VerminModuleTests(VerminTest):
def test_venv(self):
self.assertOnlyIn(3.3, detect("import venv"))
+
+ def test_pathlib(self):
+ self.assertOnlyIn(3.4, detect("import pathlib"))
|
pathlib module missing in checks
**Describe the bug**
The pathlib module is available since python 3.4 and is not in the checks
**To Reproduce**
Code using the pathlib module not having a minimum version of 3.4
**Expected behavior**
The minimum version should then be python 3.4 and no python 2 support
**Environment (please complete the following information):**
Vermin 0.4.4
|
0.0
|
653670b33ca9a54c95bd1ae06c56ce7474197a82
|
[
"tests/module.py::VerminModuleTests::test_pathlib"
] |
[
"tests/module.py::VerminModuleTests::test_ConfigParser",
"tests/module.py::VerminModuleTests::test_DocXMLRPCServer",
"tests/module.py::VerminModuleTests::test_HTMLParser",
"tests/module.py::VerminModuleTests::test_Queue",
"tests/module.py::VerminModuleTests::test_SimpleXMLRPCServer",
"tests/module.py::VerminModuleTests::test_SocketServer",
"tests/module.py::VerminModuleTests::test_Tkinter",
"tests/module.py::VerminModuleTests::test___builtin__",
"tests/module.py::VerminModuleTests::test___future__",
"tests/module.py::VerminModuleTests::test__dummy_thread",
"tests/module.py::VerminModuleTests::test__markupbase",
"tests/module.py::VerminModuleTests::test__winreg",
"tests/module.py::VerminModuleTests::test_abc",
"tests/module.py::VerminModuleTests::test_argparse",
"tests/module.py::VerminModuleTests::test_ast",
"tests/module.py::VerminModuleTests::test_asyncio",
"tests/module.py::VerminModuleTests::test_atexit",
"tests/module.py::VerminModuleTests::test_builtins",
"tests/module.py::VerminModuleTests::test_bz2",
"tests/module.py::VerminModuleTests::test_cProfile",
"tests/module.py::VerminModuleTests::test_cgitb",
"tests/module.py::VerminModuleTests::test_collections",
"tests/module.py::VerminModuleTests::test_configparser",
"tests/module.py::VerminModuleTests::test_contextlib",
"tests/module.py::VerminModuleTests::test_cookielib",
"tests/module.py::VerminModuleTests::test_copy_reg",
"tests/module.py::VerminModuleTests::test_copyreg",
"tests/module.py::VerminModuleTests::test_csv",
"tests/module.py::VerminModuleTests::test_ctypes",
"tests/module.py::VerminModuleTests::test_datetime",
"tests/module.py::VerminModuleTests::test_dbm_io",
"tests/module.py::VerminModuleTests::test_dbm_ndbm",
"tests/module.py::VerminModuleTests::test_dbm_os",
"tests/module.py::VerminModuleTests::test_dbm_struct",
"tests/module.py::VerminModuleTests::test_dbm_sys",
"tests/module.py::VerminModuleTests::test_dbm_whichdb",
"tests/module.py::VerminModuleTests::test_decimal",
"tests/module.py::VerminModuleTests::test_difflib",
"tests/module.py::VerminModuleTests::test_dummy_thread",
"tests/module.py::VerminModuleTests::test_dummy_threading",
"tests/module.py::VerminModuleTests::test_email",
"tests/module.py::VerminModuleTests::test_email_charset",
"tests/module.py::VerminModuleTests::test_email_contentmanager",
"tests/module.py::VerminModuleTests::test_email_header",
"tests/module.py::VerminModuleTests::test_email_headerregistry",
"tests/module.py::VerminModuleTests::test_email_policy",
"tests/module.py::VerminModuleTests::test_faulthandler",
"tests/module.py::VerminModuleTests::test_fractions",
"tests/module.py::VerminModuleTests::test_functools",
"tests/module.py::VerminModuleTests::test_future_builtins",
"tests/module.py::VerminModuleTests::test_hashlib",
"tests/module.py::VerminModuleTests::test_heapq",
"tests/module.py::VerminModuleTests::test_hmac",
"tests/module.py::VerminModuleTests::test_hotshot",
"tests/module.py::VerminModuleTests::test_html",
"tests/module.py::VerminModuleTests::test_htmlentitydefs",
"tests/module.py::VerminModuleTests::test_http",
"tests/module.py::VerminModuleTests::test_http_cookiejar",
"tests/module.py::VerminModuleTests::test_importlib",
"tests/module.py::VerminModuleTests::test_inspect",
"tests/module.py::VerminModuleTests::test_io",
"tests/module.py::VerminModuleTests::test_ipaddress",
"tests/module.py::VerminModuleTests::test_itertools",
"tests/module.py::VerminModuleTests::test_json",
"tests/module.py::VerminModuleTests::test_logging",
"tests/module.py::VerminModuleTests::test_lzma",
"tests/module.py::VerminModuleTests::test_markupbase",
"tests/module.py::VerminModuleTests::test_md5",
"tests/module.py::VerminModuleTests::test_modulefinder",
"tests/module.py::VerminModuleTests::test_msilib",
"tests/module.py::VerminModuleTests::test_multiprocessing",
"tests/module.py::VerminModuleTests::test_new",
"tests/module.py::VerminModuleTests::test_numbers",
"tests/module.py::VerminModuleTests::test_optparse",
"tests/module.py::VerminModuleTests::test_ossaudiodev",
"tests/module.py::VerminModuleTests::test_pickletools",
"tests/module.py::VerminModuleTests::test_pkgutil",
"tests/module.py::VerminModuleTests::test_platform",
"tests/module.py::VerminModuleTests::test_pydoc",
"tests/module.py::VerminModuleTests::test_queue",
"tests/module.py::VerminModuleTests::test_repr",
"tests/module.py::VerminModuleTests::test_reprlib",
"tests/module.py::VerminModuleTests::test_runpy",
"tests/module.py::VerminModuleTests::test_secrets",
"tests/module.py::VerminModuleTests::test_sets",
"tests/module.py::VerminModuleTests::test_shlex",
"tests/module.py::VerminModuleTests::test_socketserver",
"tests/module.py::VerminModuleTests::test_spwd",
"tests/module.py::VerminModuleTests::test_sqlite3",
"tests/module.py::VerminModuleTests::test_ssl",
"tests/module.py::VerminModuleTests::test_string_letters",
"tests/module.py::VerminModuleTests::test_string_lowercase",
"tests/module.py::VerminModuleTests::test_string_uppercase",
"tests/module.py::VerminModuleTests::test_stringprep",
"tests/module.py::VerminModuleTests::test_subprocess",
"tests/module.py::VerminModuleTests::test_sysconfig",
"tests/module.py::VerminModuleTests::test_tarfile",
"tests/module.py::VerminModuleTests::test_textwrap",
"tests/module.py::VerminModuleTests::test_timeit",
"tests/module.py::VerminModuleTests::test_tkinter",
"tests/module.py::VerminModuleTests::test_tracemalloc",
"tests/module.py::VerminModuleTests::test_typing",
"tests/module.py::VerminModuleTests::test_unittest",
"tests/module.py::VerminModuleTests::test_unittest_mock",
"tests/module.py::VerminModuleTests::test_urllib2",
"tests/module.py::VerminModuleTests::test_uuid",
"tests/module.py::VerminModuleTests::test_venv",
"tests/module.py::VerminModuleTests::test_warnings",
"tests/module.py::VerminModuleTests::test_weakref",
"tests/module.py::VerminModuleTests::test_winreg",
"tests/module.py::VerminModuleTests::test_wsgiref",
"tests/module.py::VerminModuleTests::test_xmlrpc",
"tests/module.py::VerminModuleTests::test_xmlrpc_client",
"tests/module.py::VerminModuleTests::test_xmlrpc_server",
"tests/module.py::VerminModuleTests::test_xmlrpclib",
"tests/module.py::VerminModuleTests::test_zipimport"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-07-13 09:54:23+00:00
|
mit
| 4,129 |
|
netsiphd__netrd-264
|
diff --git a/netrd/utilities/entropy.py b/netrd/utilities/entropy.py
index dff68e2..9d6f3d7 100644
--- a/netrd/utilities/entropy.py
+++ b/netrd/utilities/entropy.py
@@ -15,7 +15,7 @@ from scipy.stats import entropy as sp_entropy
def js_divergence(P, Q):
- """Jenson-Shannon divergence between `P` and `Q`.
+ """Jensen-Shannon divergence between `P` and `Q`.
Parameters
----------
@@ -35,8 +35,10 @@ def js_divergence(P, Q):
return 0.5 * (sp_entropy(P, M, base=2) + sp_entropy(Q, M, base=2))
-def entropy(var):
- """Return the Shannon entropy of a variable.
+def entropy_from_seq(var):
+ """Return the Shannon entropy of a variable. This differs from
+ Scipy's entropy by taking a sequence of observations as input
+ rather than a histogram or probability distribution.
Parameters
----------
@@ -65,7 +67,7 @@ def joint_entropy(data):
Returns
-------
float
- Joint entrpoy of the variables of interests.
+ Joint entropy of the variables of interests.
Notes
-----
|
netsiphd/netrd
|
fa2c163376a88ed72ba15649190b1a2b23b1cb9a
|
diff --git a/tests/test_utilities.py b/tests/test_utilities.py
index 8837207..5b2ab79 100644
--- a/tests/test_utilities.py
+++ b/tests/test_utilities.py
@@ -8,7 +8,7 @@ Test utility functions.
import numpy as np
from netrd.utilities.entropy import categorized_data
-from netrd.utilities.entropy import entropy, joint_entropy, conditional_entropy
+from netrd.utilities.entropy import entropy_from_seq, joint_entropy, conditional_entropy
from netrd.utilities import threshold
@@ -89,7 +89,7 @@ def test_entropies():
"""
data = np.array([[1, 0, 0, 1, 1, 0, 1, 0], [0, 1, 0, 1, 1, 0, 1, 0]]).T
- H = entropy(data[:, 0])
+ H = entropy_from_seq(data[:, 0])
H_joint = joint_entropy(data)
H_cond = conditional_entropy(data[:, 1, np.newaxis], data[:, 0, np.newaxis])
|
Clarify usage of utilities.entropy.entropy?
When I was refactoring the Dmeasure code, I looked at using our version of entropy instead of scipy's, but one is not a drop-in replacement for the other because they take different inputs - one takes a histogram and the other takes a sequence of values. It would touch a lot of files but but renaming our `entropy` to something like `entropy_from_sequence` would remove this ambiguity.
|
0.0
|
fa2c163376a88ed72ba15649190b1a2b23b1cb9a
|
[
"tests/test_utilities.py::test_thresholds",
"tests/test_utilities.py::test_categorized_data",
"tests/test_utilities.py::test_entropies"
] |
[] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-10-14 15:14:50+00:00
|
mit
| 4,130 |
|
networkx__networkx-2713
|
diff --git a/networkx/algorithms/community/quality.py b/networkx/algorithms/community/quality.py
index 7de690af7..e04ff260d 100644
--- a/networkx/algorithms/community/quality.py
+++ b/networkx/algorithms/community/quality.py
@@ -114,7 +114,10 @@ def inter_community_edges(G, partition):
# for block in partition))
# return sum(1 for u, v in G.edges() if aff[u] != aff[v])
#
- return nx.quotient_graph(G, partition, create_using=nx.MultiGraph()).size()
+ if G.is_directed():
+ return nx.quotient_graph(G, partition, create_using=nx.MultiDiGraph()).size()
+ else:
+ return nx.quotient_graph(G, partition, create_using=nx.MultiGraph()).size()
def inter_community_non_edges(G, partition):
diff --git a/networkx/algorithms/simple_paths.py b/networkx/algorithms/simple_paths.py
index 763fa24d7..a2ef79671 100644
--- a/networkx/algorithms/simple_paths.py
+++ b/networkx/algorithms/simple_paths.py
@@ -333,7 +333,6 @@ def shortest_simple_paths(G, source, target, weight=None):
for path in listA:
if path[:i] == root:
ignore_edges.add((path[i - 1], path[i]))
- ignore_nodes.add(root[-1])
try:
length, spur = shortest_path_func(G, root[-1], target,
ignore_nodes=ignore_nodes,
@@ -343,6 +342,7 @@ def shortest_simple_paths(G, source, target, weight=None):
listB.push(root_length + length, path)
except nx.NetworkXNoPath:
pass
+ ignore_nodes.add(root[-1])
if listB:
path = listB.pop()
@@ -447,6 +447,8 @@ def _bidirectional_pred_succ(G, source, target, ignore_nodes=None, ignore_edges=
succ is a dictionary of successors from w to the target.
"""
# does BFS from both source and target and meets in the middle
+ if ignore_nodes and (source in ignore_nodes or target in ignore_nodes):
+ raise nx.NetworkXNoPath("No path between %s and %s." % (source, target))
if target == source:
return ({target: None}, {source: None}, source)
@@ -605,6 +607,8 @@ def _bidirectional_dijkstra(G, source, target, weight='weight',
shortest_path
shortest_path_length
"""
+ if ignore_nodes and (source in ignore_nodes or target in ignore_nodes):
+ raise nx.NetworkXNoPath("No path between %s and %s." % (source, target))
if source == target:
return (0, [source])
|
networkx/networkx
|
9f6c9cd6a561d41192bc29f14fd9bc16bcaad919
|
diff --git a/networkx/algorithms/community/tests/test_quality.py b/networkx/algorithms/community/tests/test_quality.py
index 0c5b94c5a..79ce7e7f6 100644
--- a/networkx/algorithms/community/tests/test_quality.py
+++ b/networkx/algorithms/community/tests/test_quality.py
@@ -12,6 +12,7 @@ module.
"""
from __future__ import division
+from nose.tools import assert_equal
from nose.tools import assert_almost_equal
import networkx as nx
@@ -19,6 +20,7 @@ from networkx import barbell_graph
from networkx.algorithms.community import coverage
from networkx.algorithms.community import modularity
from networkx.algorithms.community import performance
+from networkx.algorithms.community.quality import inter_community_edges
class TestPerformance(object):
@@ -61,3 +63,17 @@ def test_modularity():
assert_almost_equal(-16 / (14 ** 2), modularity(G, C))
C = [{0, 1, 2}, {3, 4, 5}]
assert_almost_equal((35 * 2) / (14 ** 2), modularity(G, C))
+
+
+def test_inter_community_edges_with_digraphs():
+ G = nx.complete_graph(2, create_using = nx.DiGraph())
+ partition = [{0}, {1}]
+ assert_equal(inter_community_edges(G, partition), 2)
+
+ G = nx.complete_graph(10, create_using = nx.DiGraph())
+ partition = [{0}, {1, 2}, {3, 4, 5}, {6, 7, 8, 9}]
+ assert_equal(inter_community_edges(G, partition), 70)
+
+ G = nx.cycle_graph(4, create_using = nx.DiGraph())
+ partition = [{0, 1}, {2, 3}]
+ assert_equal(inter_community_edges(G, partition), 2)
diff --git a/networkx/algorithms/tests/test_simple_paths.py b/networkx/algorithms/tests/test_simple_paths.py
index e29255c32..4c701e487 100644
--- a/networkx/algorithms/tests/test_simple_paths.py
+++ b/networkx/algorithms/tests/test_simple_paths.py
@@ -220,6 +220,40 @@ def test_directed_weighted_shortest_simple_path():
cost = this_cost
+def test_weighted_shortest_simple_path_issue2427():
+ G = nx.Graph()
+ G.add_edge('IN', 'OUT', weight = 2)
+ G.add_edge('IN', 'A', weight = 1)
+ G.add_edge('IN', 'B', weight = 2)
+ G.add_edge('B', 'OUT', weight = 2)
+ assert_equal(list(nx.shortest_simple_paths(G, 'IN', 'OUT', weight = "weight")),
+ [['IN', 'OUT'], ['IN', 'B', 'OUT']])
+ G = nx.Graph()
+ G.add_edge('IN', 'OUT', weight = 10)
+ G.add_edge('IN', 'A', weight = 1)
+ G.add_edge('IN', 'B', weight = 1)
+ G.add_edge('B', 'OUT', weight = 1)
+ assert_equal(list(nx.shortest_simple_paths(G, 'IN', 'OUT', weight = "weight")),
+ [['IN', 'B', 'OUT'], ['IN', 'OUT']])
+
+
+def test_directed_weighted_shortest_simple_path_issue2427():
+ G = nx.DiGraph()
+ G.add_edge('IN', 'OUT', weight = 2)
+ G.add_edge('IN', 'A', weight = 1)
+ G.add_edge('IN', 'B', weight = 2)
+ G.add_edge('B', 'OUT', weight = 2)
+ assert_equal(list(nx.shortest_simple_paths(G, 'IN', 'OUT', weight = "weight")),
+ [['IN', 'OUT'], ['IN', 'B', 'OUT']])
+ G = nx.DiGraph()
+ G.add_edge('IN', 'OUT', weight = 10)
+ G.add_edge('IN', 'A', weight = 1)
+ G.add_edge('IN', 'B', weight = 1)
+ G.add_edge('B', 'OUT', weight = 1)
+ assert_equal(list(nx.shortest_simple_paths(G, 'IN', 'OUT', weight = "weight")),
+ [['IN', 'B', 'OUT'], ['IN', 'OUT']])
+
+
def test_weight_name():
G = nx.cycle_graph(7)
nx.set_edge_attributes(G, 1, 'weight')
@@ -303,6 +337,38 @@ def test_bidirectional_shortest_path_restricted_directed_cycle():
)
+def test_bidirectional_shortest_path_ignore():
+ G = nx.Graph()
+ nx.add_path(G, [1, 2])
+ nx.add_path(G, [1, 3])
+ nx.add_path(G, [1, 4])
+ assert_raises(
+ nx.NetworkXNoPath,
+ _bidirectional_shortest_path,
+ G,
+ 1, 2,
+ ignore_nodes=[1],
+ )
+ assert_raises(
+ nx.NetworkXNoPath,
+ _bidirectional_shortest_path,
+ G,
+ 1, 2,
+ ignore_nodes=[2],
+ )
+ G = nx.Graph()
+ nx.add_path(G, [1, 3])
+ nx.add_path(G, [1, 4])
+ nx.add_path(G, [3, 2])
+ assert_raises(
+ nx.NetworkXNoPath,
+ _bidirectional_shortest_path,
+ G,
+ 1, 2,
+ ignore_nodes=[1, 2],
+ )
+
+
def validate_path(G, s, t, soln_len, path):
assert_equal(path[0], s)
assert_equal(path[-1], t)
@@ -362,3 +428,30 @@ def test_bidirectional_dijkstra_no_path():
nx.add_path(G, [1, 2, 3])
nx.add_path(G, [4, 5, 6])
path = _bidirectional_dijkstra(G, 1, 6)
+
+
+def test_bidirectional_dijkstra_ignore():
+ G = nx.Graph()
+ nx.add_path(G, [1, 2, 10])
+ nx.add_path(G, [1, 3, 10])
+ assert_raises(
+ nx.NetworkXNoPath,
+ _bidirectional_dijkstra,
+ G,
+ 1, 2,
+ ignore_nodes=[1],
+ )
+ assert_raises(
+ nx.NetworkXNoPath,
+ _bidirectional_dijkstra,
+ G,
+ 1, 2,
+ ignore_nodes=[2],
+ )
+ assert_raises(
+ nx.NetworkXNoPath,
+ _bidirectional_dijkstra,
+ G,
+ 1, 2,
+ ignore_nodes=[1, 2],
+ )
|
inter_community_non_edges ignore directionality
Hi,
I think the function:
nx.algorithms.community.quality.inter_community_non_edges()
does not work properly for directed graph. It always return the non-edge of a undirected graph, basically halving the number of edges. This mean that the performance function (nx.algorithms.community.performance) will never by higher than 50% for a directed graph.
I'm using version '2.0.dev_20170801111157', python 3.5.1
Best,
Nicolas
|
0.0
|
9f6c9cd6a561d41192bc29f14fd9bc16bcaad919
|
[
"networkx/algorithms/community/tests/test_quality.py::TestPerformance::test_bad_partition",
"networkx/algorithms/community/tests/test_quality.py::TestPerformance::test_good_partition",
"networkx/algorithms/community/tests/test_quality.py::TestCoverage::test_bad_partition",
"networkx/algorithms/community/tests/test_quality.py::TestCoverage::test_good_partition",
"networkx/algorithms/community/tests/test_quality.py::test_modularity",
"networkx/algorithms/community/tests/test_quality.py::test_inter_community_edges_with_digraphs",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_empty_list",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_trivial_path",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_trivial_nonpath",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_simple_path",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_non_simple_path",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_cycle",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_missing_node",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_directed_path",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_directed_non_path",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_directed_cycle",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_multigraph",
"networkx/algorithms/tests/test_simple_paths.py::TestIsSimplePath::test_multidigraph",
"networkx/algorithms/tests/test_simple_paths.py::test_all_simple_paths",
"networkx/algorithms/tests/test_simple_paths.py::test_all_simple_paths_cutoff",
"networkx/algorithms/tests/test_simple_paths.py::test_all_simple_paths_multigraph",
"networkx/algorithms/tests/test_simple_paths.py::test_all_simple_paths_multigraph_with_cutoff",
"networkx/algorithms/tests/test_simple_paths.py::test_all_simple_paths_directed",
"networkx/algorithms/tests/test_simple_paths.py::test_all_simple_paths_empty",
"networkx/algorithms/tests/test_simple_paths.py::test_hamiltonian_path",
"networkx/algorithms/tests/test_simple_paths.py::test_cutoff_zero",
"networkx/algorithms/tests/test_simple_paths.py::test_source_missing",
"networkx/algorithms/tests/test_simple_paths.py::test_target_missing",
"networkx/algorithms/tests/test_simple_paths.py::test_shortest_simple_paths",
"networkx/algorithms/tests/test_simple_paths.py::test_shortest_simple_paths_directed",
"networkx/algorithms/tests/test_simple_paths.py::test_Greg_Bernstein",
"networkx/algorithms/tests/test_simple_paths.py::test_weighted_shortest_simple_path",
"networkx/algorithms/tests/test_simple_paths.py::test_directed_weighted_shortest_simple_path",
"networkx/algorithms/tests/test_simple_paths.py::test_weighted_shortest_simple_path_issue2427",
"networkx/algorithms/tests/test_simple_paths.py::test_directed_weighted_shortest_simple_path_issue2427",
"networkx/algorithms/tests/test_simple_paths.py::test_weight_name",
"networkx/algorithms/tests/test_simple_paths.py::test_ssp_source_missing",
"networkx/algorithms/tests/test_simple_paths.py::test_ssp_target_missing",
"networkx/algorithms/tests/test_simple_paths.py::test_ssp_multigraph",
"networkx/algorithms/tests/test_simple_paths.py::test_bidirectional_shortest_path_restricted_cycle",
"networkx/algorithms/tests/test_simple_paths.py::test_bidirectional_shortest_path_restricted_wheel",
"networkx/algorithms/tests/test_simple_paths.py::test_bidirectional_shortest_path_restricted_directed_cycle",
"networkx/algorithms/tests/test_simple_paths.py::test_bidirectional_shortest_path_ignore",
"networkx/algorithms/tests/test_simple_paths.py::test_bidirectional_dijksta_restricted",
"networkx/algorithms/tests/test_simple_paths.py::test_bidirectional_dijkstra_no_path",
"networkx/algorithms/tests/test_simple_paths.py::test_bidirectional_dijkstra_ignore"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-10-15 17:09:15+00:00
|
bsd-3-clause
| 4,131 |
|
networkx__networkx-3822
|
diff --git a/networkx/generators/random_graphs.py b/networkx/generators/random_graphs.py
index e4f2c569d..745f64e4d 100644
--- a/networkx/generators/random_graphs.py
+++ b/networkx/generators/random_graphs.py
@@ -1000,6 +1000,11 @@ def random_lobster(n, p1, p2, seed=None):
leaf nodes. A caterpillar is a tree that reduces to a path graph
when pruning all leaf nodes; setting `p2` to zero produces a caterpillar.
+ This implementation iterates on the probabilities `p1` and `p2` to add
+ edges at levels 1 and 2, respectively. Graphs are therefore constructed
+ iteratively with uniform randomness at each level rather than being selected
+ uniformly at random from the set of all possible lobsters.
+
Parameters
----------
n : int
@@ -1011,19 +1016,29 @@ def random_lobster(n, p1, p2, seed=None):
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
+
+ Raises
+ ------
+ NetworkXError
+ If `p1` or `p2` parameters are >= 1 because the while loops would never finish.
"""
+ p1, p2 = abs(p1), abs(p2)
+ if any([p >= 1 for p in [p1, p2]]):
+ raise nx.NetworkXError("Probability values for `p1` and `p2` must both be < 1.")
+
# a necessary ingredient in any self-respecting graph library
llen = int(2 * seed.random() * n + 0.5)
L = path_graph(llen)
# build caterpillar: add edges to path graph with probability p1
current_node = llen - 1
for n in range(llen):
- if seed.random() < p1: # add fuzzy caterpillar parts
+ while seed.random() < p1: # add fuzzy caterpillar parts
current_node += 1
L.add_edge(n, current_node)
- if seed.random() < p2: # add crunchy lobster bits
+ cat_node = current_node
+ while seed.random() < p2: # add crunchy lobster bits
current_node += 1
- L.add_edge(current_node - 1, current_node)
+ L.add_edge(cat_node, current_node)
return L # voila, un lobster!
|
networkx/networkx
|
a4d024c54f06d17d2f9ab26595a0b20ed6858f5c
|
diff --git a/networkx/generators/tests/test_random_graphs.py b/networkx/generators/tests/test_random_graphs.py
index f958dceab..8f2d68415 100644
--- a/networkx/generators/tests/test_random_graphs.py
+++ b/networkx/generators/tests/test_random_graphs.py
@@ -91,7 +91,38 @@ class TestGeneratorsRandom:
constructor = [(10, 20, 0.8), (20, 40, 0.8)]
G = random_shell_graph(constructor, seed)
+ def is_caterpillar(g):
+ """
+ A tree is a caterpillar iff all nodes of degree >=3 are surrounded
+ by at most two nodes of degree two or greater.
+ ref: http://mathworld.wolfram.com/CaterpillarGraph.html
+ """
+ deg_over_3 = [n for n in g if g.degree(n) >= 3]
+ for n in deg_over_3:
+ nbh_deg_over_2 = [nbh for nbh in g.neighbors(n) if g.degree(nbh) >= 2]
+ if not len(nbh_deg_over_2) <= 2:
+ return False
+ return True
+
+ def is_lobster(g):
+ """
+ A tree is a lobster if it has the property that the removal of leaf
+ nodes leaves a caterpillar graph (Gallian 2007)
+ ref: http://mathworld.wolfram.com/LobsterGraph.html
+ """
+ non_leafs = [n for n in g if g.degree(n) > 1]
+ return is_caterpillar(g.subgraph(non_leafs))
+
G = random_lobster(10, 0.1, 0.5, seed)
+ assert max([G.degree(n) for n in G.nodes()]) > 3
+ assert is_lobster(G)
+ pytest.raises(NetworkXError, random_lobster, 10, 0.1, 1, seed)
+ pytest.raises(NetworkXError, random_lobster, 10, 1, 1, seed)
+ pytest.raises(NetworkXError, random_lobster, 10, 1, 0.5, seed)
+
+ # docstring says this should be a caterpillar
+ G = random_lobster(10, 0.1, 0.0, seed)
+ assert is_caterpillar(G)
# difficult to find seed that requires few tries
seq = random_powerlaw_tree_sequence(10, 3, seed=14, tries=1)
|
Wrong random_lobster implementation?
Hi, it seems that [networkx.random_lobster's implementation logic](https://github.com/networkx/networkx/blob/4e9771f04192e94a5cbdd71249a983d124a56593/networkx/generators/random_graphs.py#L1009) is not aligned with the common definition as given in [wolfram mathworld](http://mathworld.wolfram.com/LobsterGraph.html) or [Wikipedia](https://en.wikipedia.org/wiki/List_of_graphs#Lobster).
For example, it can not produce the simplest lobster graph examples such as .
|
0.0
|
a4d024c54f06d17d2f9ab26595a0b20ed6858f5c
|
[
"networkx/generators/tests/test_random_graphs.py::TestGeneratorsRandom::test_random_graph"
] |
[
"networkx/generators/tests/test_random_graphs.py::TestGeneratorsRandom::test_dual_barabasi_albert",
"networkx/generators/tests/test_random_graphs.py::TestGeneratorsRandom::test_extended_barabasi_albert",
"networkx/generators/tests/test_random_graphs.py::TestGeneratorsRandom::test_random_zero_regular_graph",
"networkx/generators/tests/test_random_graphs.py::TestGeneratorsRandom::test_gnp",
"networkx/generators/tests/test_random_graphs.py::TestGeneratorsRandom::test_gnm",
"networkx/generators/tests/test_random_graphs.py::TestGeneratorsRandom::test_watts_strogatz_big_k",
"networkx/generators/tests/test_random_graphs.py::TestGeneratorsRandom::test_random_kernel_graph"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_media"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-02-16 23:49:40+00:00
|
bsd-3-clause
| 4,132 |
|
networkx__networkx-3848
|
diff --git a/networkx/algorithms/connectivity/cuts.py b/networkx/algorithms/connectivity/cuts.py
index 846cd4729..dd59e3db9 100644
--- a/networkx/algorithms/connectivity/cuts.py
+++ b/networkx/algorithms/connectivity/cuts.py
@@ -281,7 +281,7 @@ def minimum_st_node_cut(G, s, t, flow_func=None, auxiliary=None, residual=None):
if mapping is None:
raise nx.NetworkXError('Invalid auxiliary digraph.')
if G.has_edge(s, t) or G.has_edge(t, s):
- return []
+ return {}
kwargs = dict(flow_func=flow_func, residual=residual, auxiliary=H)
# The edge cut in the auxiliary digraph corresponds to the node cut in the
|
networkx/networkx
|
3f4f9c3379a5d70fc58852154aab7b1051ff96d6
|
diff --git a/networkx/algorithms/connectivity/tests/test_cuts.py b/networkx/algorithms/connectivity/tests/test_cuts.py
index b98fbfa5e..257797a6f 100644
--- a/networkx/algorithms/connectivity/tests/test_cuts.py
+++ b/networkx/algorithms/connectivity/tests/test_cuts.py
@@ -268,7 +268,7 @@ def tests_minimum_st_node_cut():
G.add_nodes_from([0, 1, 2, 3, 7, 8, 11, 12])
G.add_edges_from([(7, 11), (1, 11), (1, 12), (12, 8), (0, 1)])
nodelist = minimum_st_node_cut(G, 7, 11)
- assert(nodelist == [])
+ assert(nodelist == {})
def test_invalid_auxiliary():
|
`minimum_st_node_cut` returns empty list instead of set for adjacent nodes
https://github.com/networkx/networkx/blob/3f4f9c3379a5d70fc58852154aab7b1051ff96d6/networkx/algorithms/connectivity/cuts.py#L284
Should read `return {}`. Was questioning my sanity for a bit there 😉
|
0.0
|
3f4f9c3379a5d70fc58852154aab7b1051ff96d6
|
[
"networkx/algorithms/connectivity/tests/test_cuts.py::tests_minimum_st_node_cut"
] |
[
"networkx/algorithms/connectivity/tests/test_cuts.py::test_articulation_points",
"networkx/algorithms/connectivity/tests/test_cuts.py::test_brandes_erlebach_book",
"networkx/algorithms/connectivity/tests/test_cuts.py::test_white_harary_paper",
"networkx/algorithms/connectivity/tests/test_cuts.py::test_petersen_cutset",
"networkx/algorithms/connectivity/tests/test_cuts.py::test_octahedral_cutset",
"networkx/algorithms/connectivity/tests/test_cuts.py::test_icosahedral_cutset",
"networkx/algorithms/connectivity/tests/test_cuts.py::test_node_cutset_exception",
"networkx/algorithms/connectivity/tests/test_cuts.py::test_node_cutset_random_graphs",
"networkx/algorithms/connectivity/tests/test_cuts.py::test_edge_cutset_random_graphs",
"networkx/algorithms/connectivity/tests/test_cuts.py::test_empty_graphs",
"networkx/algorithms/connectivity/tests/test_cuts.py::test_unbounded",
"networkx/algorithms/connectivity/tests/test_cuts.py::test_missing_source",
"networkx/algorithms/connectivity/tests/test_cuts.py::test_missing_target",
"networkx/algorithms/connectivity/tests/test_cuts.py::test_not_weakly_connected",
"networkx/algorithms/connectivity/tests/test_cuts.py::test_not_connected",
"networkx/algorithms/connectivity/tests/test_cuts.py::tests_min_cut_complete",
"networkx/algorithms/connectivity/tests/test_cuts.py::tests_min_cut_complete_directed",
"networkx/algorithms/connectivity/tests/test_cuts.py::test_invalid_auxiliary",
"networkx/algorithms/connectivity/tests/test_cuts.py::test_interface_only_source",
"networkx/algorithms/connectivity/tests/test_cuts.py::test_interface_only_target"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-03-04 08:11:36+00:00
|
bsd-3-clause
| 4,133 |
|
networkx__networkx-4066
|
diff --git a/networkx/relabel.py b/networkx/relabel.py
index 737b5af3d..26f50d241 100644
--- a/networkx/relabel.py
+++ b/networkx/relabel.py
@@ -13,7 +13,7 @@ def relabel_nodes(G, mapping, copy=True):
mapping : dictionary
A dictionary with the old labels as keys and new labels as values.
- A partial mapping is allowed.
+ A partial mapping is allowed. Mapping 2 nodes to a single node is allowed.
copy : bool (optional, default=True)
If True return a copy, or if False relabel the nodes in place.
@@ -64,6 +64,27 @@ def relabel_nodes(G, mapping, copy=True):
>>> list(H)
[0, 1, 4]
+ In a multigraph, relabeling two or more nodes to the same new node
+ will retain all edges, but may change the edge keys in the process:
+
+ >>> G = nx.MultiGraph()
+ >>> G.add_edge(0, 1, value="a") # returns the key for this edge
+ 0
+ >>> G.add_edge(0, 2, value="b")
+ 0
+ >>> G.add_edge(0, 3, value="c")
+ 0
+ >>> mapping = {1: 4, 2: 4, 3: 4}
+ >>> H = nx.relabel_nodes(G, mapping, copy=True)
+ >>> print(H[0])
+ {4: {0: {'value': 'a'}, 1: {'value': 'b'}, 2: {'value': 'c'}}}
+
+ This works for in-place relabeling too:
+
+ >>> G = nx.relabel_nodes(G, mapping, copy=False)
+ >>> print(G[0])
+ {4: {0: {'value': 'a'}, 1: {'value': 'b'}, 2: {'value': 'c'}}}
+
Notes
-----
Only the nodes specified in the mapping will be relabeled.
@@ -77,6 +98,13 @@ def relabel_nodes(G, mapping, copy=True):
graph is not possible in-place and an exception is raised.
In that case, use copy=True.
+ If a relabel operation on a multigraph would cause two or more
+ edges to have the same source, target and key, the second edge must
+ be assigned a new key to retain all edges. The new key is set
+ to the lowest non-negative integer not already used as a key
+ for edges between these two nodes. Note that this means non-numeric
+ keys may be replaced by numeric keys.
+
See Also
--------
convert_node_labels_to_integers
@@ -136,6 +164,15 @@ def _relabel_inplace(G, mapping):
(new if old == source else source, new, key, data)
for (source, _, key, data) in G.in_edges(old, data=True, keys=True)
]
+ # Ensure new edges won't overwrite existing ones
+ seen = set()
+ for i, (source, target, key, data) in enumerate(new_edges):
+ if (target in G[source] and key in G[source][target]):
+ new_key = 0 if not isinstance(key, (int, float)) else key
+ while (new_key in G[source][target] or (target, new_key) in seen):
+ new_key += 1
+ new_edges[i] = (source, target, new_key, data)
+ seen.add((target, new_key))
else:
new_edges = [
(new, new if old == target else target, data)
@@ -156,10 +193,25 @@ def _relabel_copy(G, mapping):
H.add_nodes_from(mapping.get(n, n) for n in G)
H._node.update((mapping.get(n, n), d.copy()) for n, d in G.nodes.items())
if G.is_multigraph():
- H.add_edges_from(
+ new_edges = [
(mapping.get(n1, n1), mapping.get(n2, n2), k, d.copy())
for (n1, n2, k, d) in G.edges(keys=True, data=True)
- )
+ ]
+
+ # check for conflicting edge-keys
+ undirected = not G.is_directed()
+ seen_edges = set()
+ for i, (source, target, key, data) in enumerate(new_edges):
+ while (source, target, key) in seen_edges:
+ if not isinstance(key, (int, float)):
+ key = 0
+ key += 1
+ seen_edges.add((source, target, key))
+ if undirected:
+ seen_edges.add((target, source, key))
+ new_edges[i] = (source, target, key, data)
+
+ H.add_edges_from(new_edges)
else:
H.add_edges_from(
(mapping.get(n1, n1), mapping.get(n2, n2), d.copy())
|
networkx/networkx
|
5638e1ff3d01e21c7d950615a699eb1f99987b8d
|
diff --git a/networkx/tests/test_relabel.py b/networkx/tests/test_relabel.py
index 9b8ad2998..909904bc9 100644
--- a/networkx/tests/test_relabel.py
+++ b/networkx/tests/test_relabel.py
@@ -183,3 +183,109 @@ class TestRelabel:
G = nx.MultiDiGraph([(1, 1)])
G = nx.relabel_nodes(G, {1: 0}, copy=False)
assert_nodes_equal(G.nodes(), [0])
+
+# def test_relabel_multidigraph_inout_inplace(self):
+# pass
+ def test_relabel_multidigraph_inout_merge_nodes(self):
+ for MG in (nx.MultiGraph, nx.MultiDiGraph):
+ for cc in (True, False):
+ G = MG([(0, 4), (1, 4), (4, 2), (4, 3)])
+ G[0][4][0]["value"] = "a"
+ G[1][4][0]["value"] = "b"
+ G[4][2][0]["value"] = "c"
+ G[4][3][0]["value"] = "d"
+ G.add_edge(0, 4, key="x", value="e")
+ G.add_edge(4, 3, key="x", value="f")
+ mapping = {0: 9, 1: 9, 2: 9, 3: 9}
+ H = nx.relabel_nodes(G, mapping, copy=cc)
+ # No ordering on keys enforced
+ assert {"value": "a"} in H[9][4].values()
+ assert {"value": "b"} in H[9][4].values()
+ assert {"value": "c"} in H[4][9].values()
+ assert len(H[4][9]) == 3 if G.is_directed() else 6
+ assert {"value": "d"} in H[4][9].values()
+ assert {"value": "e"} in H[9][4].values()
+ assert {"value": "f"} in H[4][9].values()
+ assert len(H[9][4]) == 3 if G.is_directed() else 6
+
+ def test_relabel_multigraph_merge_inplace(self):
+ G = nx.MultiGraph([(0, 1), (0, 2), (0, 3), (0, 1), (0, 2), (0, 3)])
+ G[0][1][0]["value"] = "a"
+ G[0][2][0]["value"] = "b"
+ G[0][3][0]["value"] = "c"
+ mapping = {1: 4, 2: 4, 3: 4}
+ nx.relabel_nodes(G, mapping, copy=False)
+ # No ordering on keys enforced
+ assert {"value": "a"} in G[0][4].values()
+ assert {"value": "b"} in G[0][4].values()
+ assert {"value": "c"} in G[0][4].values()
+
+ def test_relabel_multidigraph_merge_inplace(self):
+ G = nx.MultiDiGraph([(0, 1), (0, 2), (0, 3)])
+ G[0][1][0]["value"] = "a"
+ G[0][2][0]["value"] = "b"
+ G[0][3][0]["value"] = "c"
+ mapping = {1: 4, 2: 4, 3: 4}
+ nx.relabel_nodes(G, mapping, copy=False)
+ # No ordering on keys enforced
+ assert {"value": "a"} in G[0][4].values()
+ assert {"value": "b"} in G[0][4].values()
+ assert {"value": "c"} in G[0][4].values()
+
+ def test_relabel_multidigraph_inout_copy(self):
+ G = nx.MultiDiGraph([(0, 4), (1, 4), (4, 2), (4, 3)])
+ G[0][4][0]["value"] = "a"
+ G[1][4][0]["value"] = "b"
+ G[4][2][0]["value"] = "c"
+ G[4][3][0]["value"] = "d"
+ G.add_edge(0, 4, key="x", value="e")
+ G.add_edge(4, 3, key="x", value="f")
+ mapping = {0: 9, 1: 9, 2: 9, 3: 9}
+ H = nx.relabel_nodes(G, mapping, copy=True)
+ # No ordering on keys enforced
+ assert {"value": "a"} in H[9][4].values()
+ assert {"value": "b"} in H[9][4].values()
+ assert {"value": "c"} in H[4][9].values()
+ assert len(H[4][9]) == 3
+ assert {"value": "d"} in H[4][9].values()
+ assert {"value": "e"} in H[9][4].values()
+ assert {"value": "f"} in H[4][9].values()
+ assert len(H[9][4]) == 3
+
+ def test_relabel_multigraph_merge_copy(self):
+ G = nx.MultiGraph([(0, 1), (0, 2), (0, 3)])
+ G[0][1][0]["value"] = "a"
+ G[0][2][0]["value"] = "b"
+ G[0][3][0]["value"] = "c"
+ mapping = {1: 4, 2: 4, 3: 4}
+ H = nx.relabel_nodes(G, mapping, copy=True)
+ assert {"value": "a"} in H[0][4].values()
+ assert {"value": "b"} in H[0][4].values()
+ assert {"value": "c"} in H[0][4].values()
+
+ def test_relabel_multidigraph_merge_copy(self):
+ G = nx.MultiDiGraph([(0, 1), (0, 2), (0, 3)])
+ G[0][1][0]["value"] = "a"
+ G[0][2][0]["value"] = "b"
+ G[0][3][0]["value"] = "c"
+ mapping = {1: 4, 2: 4, 3: 4}
+ H = nx.relabel_nodes(G, mapping, copy=True)
+ assert {"value": "a"} in H[0][4].values()
+ assert {"value": "b"} in H[0][4].values()
+ assert {"value": "c"} in H[0][4].values()
+
+ def test_relabel_multigraph_nonnumeric_key(self):
+ for MG in (nx.MultiGraph, nx.MultiDiGraph):
+ for cc in (True, False):
+ G = nx.MultiGraph()
+ G.add_edge(0, 1, key="I", value="a")
+ G.add_edge(0, 2, key="II", value="b")
+ G.add_edge(0, 3, key="II", value="c")
+ mapping = {1: 4, 2: 4, 3: 4}
+ nx.relabel_nodes(G, mapping, copy=False)
+ assert {"value": "a"} in G[0][4].values()
+ assert {"value": "b"} in G[0][4].values()
+ assert {"value": "c"} in G[0][4].values()
+ assert 0 in G[0][4]
+ assert "I" in G[0][4]
+ assert "II" in G[0][4]
|
relabel_nodes on MultiGraphs does not preserve both edges when two nodes are replaced by one
When the graph contains edges (0,1) and (0,2), and I relabel both 1 and 2 to 3, I expected two edges from (0,3) but only one node is preserved.
Multi*Graph supports parallel edges between nodes and I expected it to preserve both edges when "merging" the two old nodes together.
Tested on both networkx 2.4 and the latest version on master.
```python
import networkx as nx
G = nx.MultiDiGraph([(0,1),(0,2)])
G[0][1][0]["value"] = "a"
G[0][2][0]["value"] = "b"
print(G[0])
# Output:
# {1: {0: {'value': 'a'}}, 2: {0: {'value': 'b'}}}
mapping = {1:3, 2:3}
nx.relabel_nodes(G, mapping, copy=False)
print(G[0])
# Output:
# {3: {0: {'value': 'b'}}}
# Expected:
# {3: {0: {'value': 'a'}, 1: {'value': 'b'}}}
```
|
0.0
|
5638e1ff3d01e21c7d950615a699eb1f99987b8d
|
[
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_multidigraph_inout_merge_nodes",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_multigraph_merge_inplace",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_multidigraph_merge_inplace",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_multidigraph_inout_copy",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_multigraph_merge_copy",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_multidigraph_merge_copy",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_multigraph_nonnumeric_key"
] |
[
"networkx/tests/test_relabel.py::TestRelabel::test_convert_node_labels_to_integers",
"networkx/tests/test_relabel.py::TestRelabel::test_convert_to_integers2",
"networkx/tests/test_relabel.py::TestRelabel::test_convert_to_integers_raise",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_copy",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_function",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_graph",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_orderedgraph",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_digraph",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_multigraph",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_multidigraph",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_isolated_nodes_to_same",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_missing",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_copy_name",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_toposort",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_selfloop"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-11 12:56:42+00:00
|
bsd-3-clause
| 4,134 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.